2 * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 static const char *const __rcs_file_version__
= "$Revision: 23748 $";
24 #include "launchd_runtime.h"
26 #include <mach/mach.h>
27 #include <mach/mach_error.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/exception.h>
37 #include <sys/types.h>
41 #include <sys/event.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/mount.h>
45 #include <sys/reboot.h>
46 #include <sys/fcntl.h>
47 #include <bsm/libbsm.h>
48 #include <malloc/malloc.h>
61 #include "launchd_internalServer.h"
62 #include "launchd_internal.h"
63 #include "notifyServer.h"
64 #include "excServer.h"
66 /* We shouldn't be including these */
69 #include "launchd_core_logic.h"
70 #include "vproc_internal.h"
71 #include "protocol_job_reply.h"
73 static mach_port_t ipc_port_set
;
74 static mach_port_t demand_port_set
;
75 static mach_port_t launchd_internal_port
;
78 #define BULK_KEV_MAX 100
79 static struct kevent
*bulk_kev
;
80 static int bulk_kev_i
;
81 static int bulk_kev_cnt
;
83 static pthread_t kqueue_demand_thread
;
84 static pthread_t demand_thread
;
86 static void *mport_demand_loop(void *arg
);
87 static void *kqueue_demand_loop(void *arg
);
88 static void log_kevent_struct(int level
, struct kevent
*kev
, int indx
);
90 static boolean_t
launchd_internal_demux(mach_msg_header_t
*Request
, mach_msg_header_t
*Reply
);
91 static void record_caller_creds(mach_msg_header_t
*mh
);
92 static void launchd_runtime2(mach_msg_size_t msg_size
, mig_reply_error_t
*bufRequest
, mig_reply_error_t
*bufReply
);
93 static mach_msg_size_t max_msg_size
;
94 static mig_callback
*mig_cb_table
;
95 static size_t mig_cb_table_sz
;
96 static timeout_callback runtime_idle_callback
;
97 static mach_msg_timeout_t runtime_idle_timeout
;
98 static audit_token_t
*au_tok
;
99 static size_t runtime_busy_cnt
;
102 static STAILQ_HEAD(, logmsg_s
) logmsg_queue
= STAILQ_HEAD_INITIALIZER(logmsg_queue
);
103 static size_t logmsg_queue_sz
;
104 static size_t logmsg_queue_cnt
;
105 static mach_port_t drain_reply_port
;
106 static void runtime_log_uncork_pending_drain(void);
107 static kern_return_t
runtime_log_pack(vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
);
108 static void runtime_log_push(void);
110 static bool logmsg_add(struct runtime_syslog_attr
*attr
, int err_num
, const char *msg
);
111 static void logmsg_remove(struct logmsg_s
*lm
);
114 static const int sigigns
[] = { SIGHUP
, SIGINT
, SIGPIPE
, SIGALRM
, SIGTERM
,
115 SIGURG
, SIGTSTP
, SIGTSTP
, SIGCONT
, SIGTTIN
, SIGTTOU
, SIGIO
, SIGXCPU
,
116 SIGXFSZ
, SIGVTALRM
, SIGPROF
, SIGWINCH
, SIGINFO
, SIGUSR1
, SIGUSR2
118 static sigset_t sigign_set
;
121 runtime_get_kernel_port(void)
123 return launchd_internal_port
;
127 launchd_runtime_init(void)
129 mach_msg_size_t mxmsgsz
;
132 launchd_assert((mainkq
= kqueue()) != -1);
134 launchd_assert((errno
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET
, &demand_port_set
)) == KERN_SUCCESS
);
135 launchd_assert((errno
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET
, &ipc_port_set
)) == KERN_SUCCESS
);
137 launchd_assert(launchd_mport_create_recv(&launchd_internal_port
) == KERN_SUCCESS
);
138 launchd_assert(launchd_mport_make_send(launchd_internal_port
) == KERN_SUCCESS
);
140 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
141 mxmsgsz
= sizeof(union __RequestUnion__x_launchd_internal_subsystem
);
142 if (x_launchd_internal_subsystem
.maxsize
> mxmsgsz
) {
143 mxmsgsz
= x_launchd_internal_subsystem
.maxsize
;
146 launchd_assert(runtime_add_mport(launchd_internal_port
, launchd_internal_demux
, mxmsgsz
) == KERN_SUCCESS
);
148 pthread_attr_init(&attr
);
149 pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
150 pthread_attr_setstacksize(&attr
, PTHREAD_STACK_MIN
);
151 launchd_assert(pthread_create(&kqueue_demand_thread
, &attr
, kqueue_demand_loop
, NULL
) == 0);
152 pthread_attr_destroy(&attr
);
154 pthread_attr_init(&attr
);
155 pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
156 pthread_attr_setstacksize(&attr
, PTHREAD_STACK_MIN
);
157 launchd_assert(pthread_create(&demand_thread
, &attr
, mport_demand_loop
, NULL
) == 0);
158 pthread_attr_destroy(&attr
);
162 launchd_runtime_init2(void)
166 for (i
= 0; i
< (sizeof(sigigns
) / sizeof(int)); i
++) {
167 sigaddset(&sigign_set
, sigigns
[i
]);
168 launchd_assumes(signal(sigigns
[i
], SIG_IGN
) != SIG_ERR
);
173 mport_demand_loop(void *arg
__attribute__((unused
)))
175 mach_msg_empty_rcv_t dummy
;
179 kr
= mach_msg(&dummy
.header
, MACH_RCV_MSG
|MACH_RCV_LARGE
, 0, 0, demand_port_set
, 0, MACH_PORT_NULL
);
180 if (kr
== MACH_RCV_PORT_CHANGED
) {
182 } else if (!launchd_assumes(kr
== MACH_RCV_TOO_LARGE
)) {
185 launchd_assumes(handle_mport(launchd_internal_port
) == 0);
192 proc_flags_to_C_names(unsigned int flags
)
194 #define MAX_PFLAG_STR "P_ADVLOCK|P_CONTROLT|P_LP64|P_NOCLDSTOP|P_PPWAIT|P_PROFIL|P_SELECT|P_CONTINUED|P_SUGID|P_SYSTEM|P_TIMEOUT|P_TRACED|P_RESV3|P_WEXIT|P_EXEC|P_OWEUPC|P_AFFINITY|P_TRANSLATED|P_RESV5|P_CHECKOPENEVT|P_DEPENDENCY_CAPABLE|P_REBOOT|P_TBE|P_RESV7|P_THCWD|P_RESV9|P_RESV10|P_RESV11|P_NOSHLIB|P_FORCEQUOTA|P_NOCLDWAIT|P_NOREMOTEHANG|0xdeadbeeffeedface"
196 static char flags_buf
[sizeof(MAX_PFLAG_STR
)];
197 char *flags_off
= NULL
;
209 flags_off
= flags_buf
;
212 #define FLAGIF(f) if (flags & f) { flags_off += sprintf(flags_off, #f); flags &= ~f; }
215 else FLAGIF(P_CONTROLT
)
217 else FLAGIF(P_NOCLDSTOP
)
218 else FLAGIF(P_PPWAIT
)
219 else FLAGIF(P_PROFIL
)
220 else FLAGIF(P_SELECT
)
221 else FLAGIF(P_CONTINUED
)
223 else FLAGIF(P_SYSTEM
)
224 else FLAGIF(P_TIMEOUT
)
225 else FLAGIF(P_TRACED
)
229 else FLAGIF(P_OWEUPC
)
230 else FLAGIF(P_AFFINITY
)
231 else FLAGIF(P_TRANSLATED
)
233 else FLAGIF(P_CHECKOPENEVT
)
234 else FLAGIF(P_DEPENDENCY_CAPABLE
)
235 else FLAGIF(P_REBOOT
)
240 else FLAGIF(P_RESV10
)
241 else FLAGIF(P_RESV11
)
242 else FLAGIF(P_NOSHLIB
)
243 else FLAGIF(P_FORCEQUOTA
)
244 else FLAGIF(P_NOCLDWAIT
)
245 else FLAGIF(P_NOREMOTEHANG
)
247 flags_off
+= sprintf(flags_off
, "0x%x", flags
);
256 reboot_flags_to_C_names(unsigned int flags
)
258 #define MAX_RB_STR "RB_ASKNAME|RB_SINGLE|RB_NOSYNC|RB_KDB|RB_HALT|RB_INITNAME|RB_DFLTROOT|RB_ALTBOOT|RB_UNIPROC|RB_SAFEBOOT|RB_UPSDELAY|0xdeadbeeffeedface"
259 static char flags_buf
[sizeof(MAX_RB_STR
)];
260 char *flags_off
= NULL
;
263 return "RB_AUTOBOOT";
272 flags_off
= flags_buf
;
276 else FLAGIF(RB_SINGLE
)
277 else FLAGIF(RB_NOSYNC
)
279 else FLAGIF(RB_INITNAME
)
280 else FLAGIF(RB_DFLTROOT
)
281 else FLAGIF(RB_ALTBOOT
)
282 else FLAGIF(RB_UNIPROC
)
283 else FLAGIF(RB_SAFEBOOT
)
284 else FLAGIF(RB_UPSDELAY
)
286 flags_off
+= sprintf(flags_off
, "0x%x", flags
);
295 signal_to_C_name(unsigned int sig
)
297 static char unknown
[25];
299 #define SIG2CASE(sg) case sg: return #sg
333 snprintf(unknown
, sizeof(unknown
), "%u", sig
);
339 log_kevent_struct(int level
, struct kevent
*kev
, int indx
)
341 const char *filter_str
;
343 char filter_buf
[100];
344 char fflags_buf
[1000];
345 char flags_buf
[1000] = "0x0";
346 char *flags_off
= NULL
;
347 char *fflags_off
= NULL
;
348 unsigned short flags
= kev
->flags
;
349 unsigned int fflags
= kev
->fflags
;
351 if (flags
) while (flags
) {
357 flags_off
= flags_buf
;
361 else FLAGIF(EV_RECEIPT
)
362 else FLAGIF(EV_DELETE
)
363 else FLAGIF(EV_ENABLE
)
364 else FLAGIF(EV_DISABLE
)
365 else FLAGIF(EV_CLEAR
)
367 else FLAGIF(EV_ONESHOT
)
368 else FLAGIF(EV_ERROR
)
370 flags_off
+= sprintf(flags_off
, "0x%x", flags
);
375 snprintf(ident_buf
, sizeof(ident_buf
), "%ld", kev
->ident
);
376 snprintf(fflags_buf
, sizeof(fflags_buf
), "0x%x", fflags
);
378 switch (kev
->filter
) {
380 filter_str
= "EVFILT_READ";
383 filter_str
= "EVFILT_WRITE";
386 filter_str
= "EVFILT_AIO";
389 filter_str
= "EVFILT_VNODE";
390 if (fflags
) while (fflags
) {
396 fflags_off
= fflags_buf
;
399 #define FFLAGIF(ff) if (fflags & ff) { fflags_off += sprintf(fflags_off, #ff); fflags &= ~ff; }
402 else FFLAGIF(NOTE_WRITE
)
403 else FFLAGIF(NOTE_EXTEND
)
404 else FFLAGIF(NOTE_ATTRIB
)
405 else FFLAGIF(NOTE_LINK
)
406 else FFLAGIF(NOTE_RENAME
)
407 else FFLAGIF(NOTE_REVOKE
)
409 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
415 filter_str
= "EVFILT_PROC";
416 if (fflags
) while (fflags
) {
422 fflags_off
= fflags_buf
;
426 else FFLAGIF(NOTE_REAP
)
427 else FFLAGIF(NOTE_FORK
)
428 else FFLAGIF(NOTE_EXEC
)
429 else FFLAGIF(NOTE_SIGNAL
)
430 else FFLAGIF(NOTE_TRACK
)
431 else FFLAGIF(NOTE_TRACKERR
)
432 else FFLAGIF(NOTE_CHILD
)
434 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
440 filter_str
= "EVFILT_SIGNAL";
441 strcpy(ident_buf
, signal_to_C_name(kev
->ident
));
444 filter_str
= "EVFILT_TIMER";
445 snprintf(ident_buf
, sizeof(ident_buf
), "0x%lx", kev
->ident
);
446 if (fflags
) while (fflags
) {
452 fflags_off
= fflags_buf
;
455 FFLAGIF(NOTE_SECONDS
)
456 else FFLAGIF(NOTE_USECONDS
)
457 else FFLAGIF(NOTE_NSECONDS
)
458 else FFLAGIF(NOTE_ABSOLUTE
)
460 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
465 case EVFILT_MACHPORT
:
466 filter_str
= "EVFILT_MACHPORT";
467 snprintf(ident_buf
, sizeof(ident_buf
), "0x%lx", kev
->ident
);
470 filter_str
= "EVFILT_FS";
471 snprintf(ident_buf
, sizeof(ident_buf
), "0x%lx", kev
->ident
);
472 if (fflags
) while (fflags
) {
478 fflags_off
= fflags_buf
;
482 else FFLAGIF(VQ_NEEDAUTH
)
483 else FFLAGIF(VQ_LOWDISK
)
484 else FFLAGIF(VQ_MOUNT
)
485 else FFLAGIF(VQ_UNMOUNT
)
486 else FFLAGIF(VQ_DEAD
)
487 else FFLAGIF(VQ_ASSIST
)
488 else FFLAGIF(VQ_NOTRESPLOCK
)
489 else FFLAGIF(VQ_UPDATE
)
491 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
497 snprintf(filter_buf
, sizeof(filter_buf
), "%d", kev
->filter
);
498 filter_str
= filter_buf
;
502 runtime_syslog(level
, "KEVENT[%d]: udata = %p data = 0x%lx ident = %s filter = %s flags = %s fflags = %s",
503 indx
, kev
->udata
, kev
->data
, ident_buf
, filter_str
, flags_buf
, fflags_buf
);
507 x_handle_mport(mach_port_t junk
__attribute__((unused
)))
509 mach_port_name_array_t members
;
510 mach_msg_type_number_t membersCnt
;
511 mach_port_status_t status
;
512 mach_msg_type_number_t statusCnt
;
516 if (!launchd_assumes((errno
= mach_port_get_set_status(mach_task_self(), demand_port_set
, &members
, &membersCnt
)) == KERN_SUCCESS
)) {
520 for (i
= 0; i
< membersCnt
; i
++) {
521 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
522 if (mach_port_get_attributes(mach_task_self(), members
[i
], MACH_PORT_RECEIVE_STATUS
, (mach_port_info_t
)&status
,
523 &statusCnt
) != KERN_SUCCESS
) {
526 if (status
.mps_msgcount
) {
527 EV_SET(&kev
, members
[i
], EVFILT_MACHPORT
, 0, 0, 0, job_find_by_service_port(members
[i
]));
529 if (launchd_assumes(kev
.udata
!= NULL
)) {
531 log_kevent_struct(LOG_DEBUG
, &kev
, 0);
532 (*((kq_callback
*)kev
.udata
))(kev
.udata
, &kev
);
535 log_kevent_struct(LOG_ERR
, &kev
);
538 /* the callback may have tainted our ability to continue this for loop */
543 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t
)members
,
544 (vm_size_t
) membersCnt
* sizeof(mach_port_name_t
)) == KERN_SUCCESS
);
550 kqueue_demand_loop(void *arg
__attribute__((unused
)))
555 * Yes, at first glance, calling select() on a kqueue seems silly.
557 * This avoids a race condition between the main thread and this helper
558 * thread by ensuring that we drain kqueue events on the same thread
559 * that manipulates the kqueue.
564 FD_SET(mainkq
, &rfds
);
565 if (launchd_assumes(select(mainkq
+ 1, &rfds
, NULL
, NULL
, NULL
) == 1)) {
566 launchd_assumes(handle_kqueue(launchd_internal_port
, mainkq
) == 0);
574 x_handle_kqueue(mach_port_t junk
__attribute__((unused
)), integer_t fd
)
576 struct timespec ts
= { 0, 0 };
577 struct kevent kev
[BULK_KEV_MAX
];
582 launchd_assumes((bulk_kev_cnt
= kevent(fd
, NULL
, 0, kev
, BULK_KEV_MAX
, &ts
)) != -1);
584 if (bulk_kev_cnt
> 0) {
588 if (launchd_assumes(malloc_size(kev
.udata
) || dladdr(kev
.udata
, &dli
))) {
590 for (i
= 0; i
< bulk_kev_cnt
; i
++) {
591 log_kevent_struct(LOG_DEBUG
, &kev
[i
], i
);
593 for (i
= 0; i
< bulk_kev_cnt
; i
++) {
596 (*((kq_callback
*)kev
[i
].udata
))(kev
[i
].udata
, &kev
[i
]);
601 log_kevent_struct(LOG_ERR
, &kev
);
614 launchd_runtime(void)
616 mig_reply_error_t
*req
= NULL
, *resp
= NULL
;
617 mach_msg_size_t mz
= max_msg_size
;
618 int flags
= VM_MAKE_TAG(VM_MEMORY_MACH_MSG
)|TRUE
;
622 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t
)req
, mz
) == KERN_SUCCESS
);
626 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t
)resp
, mz
) == KERN_SUCCESS
);
632 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t
*)&req
, mz
, flags
) == KERN_SUCCESS
)) {
635 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t
*)&resp
, mz
, flags
) == KERN_SUCCESS
)) {
639 launchd_runtime2(mz
, req
, resp
);
641 /* If we get here, max_msg_size probably changed... */
646 launchd_set_bport(mach_port_t name
)
648 return errno
= task_set_bootstrap_port(mach_task_self(), name
);
652 launchd_get_bport(mach_port_t
*name
)
654 return errno
= task_get_bootstrap_port(mach_task_self(), name
);
658 launchd_mport_notify_req(mach_port_t name
, mach_msg_id_t which
)
660 mach_port_mscount_t msgc
= (which
== MACH_NOTIFY_PORT_DESTROYED
) ? 0 : 1;
661 mach_port_t previous
, where
= (which
== MACH_NOTIFY_NO_SENDERS
) ? name
: launchd_internal_port
;
663 if (which
== MACH_NOTIFY_NO_SENDERS
) {
664 /* Always make sure the send count is zero, in case a receive right is reused */
665 errno
= mach_port_set_mscount(mach_task_self(), name
, 0);
666 if (errno
!= KERN_SUCCESS
) {
671 errno
= mach_port_request_notification(mach_task_self(), name
, which
, msgc
, where
,
672 MACH_MSG_TYPE_MAKE_SEND_ONCE
, &previous
);
674 if (errno
== 0 && previous
!= MACH_PORT_NULL
) {
675 launchd_assumes(launchd_mport_deallocate(previous
) == KERN_SUCCESS
);
682 runtime_fork(mach_port_t bsport
)
684 sigset_t emptyset
, oset
;
689 sigemptyset(&emptyset
);
691 launchd_assumes(launchd_mport_make_send(bsport
) == KERN_SUCCESS
);
692 launchd_assumes(launchd_set_bport(bsport
) == KERN_SUCCESS
);
693 launchd_assumes(launchd_mport_deallocate(bsport
) == KERN_SUCCESS
);
695 launchd_assumes(sigprocmask(SIG_BLOCK
, &sigign_set
, &oset
) != -1);
696 for (i
= 0; i
< (sizeof(sigigns
) / sizeof(int)); i
++) {
697 launchd_assumes(signal(sigigns
[i
], SIG_DFL
) != SIG_ERR
);
704 for (i
= 0; i
< (sizeof(sigigns
) / sizeof(int)); i
++) {
705 launchd_assumes(signal(sigigns
[i
], SIG_IGN
) != SIG_ERR
);
707 launchd_assumes(sigprocmask(SIG_SETMASK
, &oset
, NULL
) != -1);
708 launchd_assumes(launchd_set_bport(MACH_PORT_NULL
) == KERN_SUCCESS
);
710 launchd_assumes(sigprocmask(SIG_SETMASK
, &emptyset
, NULL
) != -1);
720 runtime_set_timeout(timeout_callback to_cb
, unsigned int sec
)
722 if (sec
== 0 || to_cb
== NULL
) {
723 runtime_idle_callback
= NULL
;
724 runtime_idle_timeout
= 0;
727 runtime_idle_callback
= to_cb
;
728 runtime_idle_timeout
= sec
* 1000;
732 runtime_add_mport(mach_port_t name
, mig_callback demux
, mach_msg_size_t msg_size
)
734 size_t needed_table_sz
= (MACH_PORT_INDEX(name
) + 1) * sizeof(mig_callback
);
735 mach_port_t target_set
= demux
? ipc_port_set
: demand_port_set
;
737 msg_size
= round_page(msg_size
+ MAX_TRAILER_SIZE
);
739 if (needed_table_sz
> mig_cb_table_sz
) {
740 needed_table_sz
*= 2; /* Let's try and avoid realloc'ing for a while */
741 mig_callback
*new_table
= malloc(needed_table_sz
);
743 if (!launchd_assumes(new_table
!= NULL
)) {
744 return KERN_RESOURCE_SHORTAGE
;
748 memcpy(new_table
, mig_cb_table
, mig_cb_table_sz
);
752 mig_cb_table_sz
= needed_table_sz
;
753 mig_cb_table
= new_table
;
756 mig_cb_table
[MACH_PORT_INDEX(name
)] = demux
;
758 if (msg_size
> max_msg_size
) {
759 max_msg_size
= msg_size
;
762 return errno
= mach_port_move_member(mach_task_self(), name
, target_set
);
766 runtime_remove_mport(mach_port_t name
)
768 mig_cb_table
[MACH_PORT_INDEX(name
)] = NULL
;
770 return errno
= mach_port_move_member(mach_task_self(), name
, MACH_PORT_NULL
);
774 launchd_mport_make_send(mach_port_t name
)
776 return errno
= mach_port_insert_right(mach_task_self(), name
, name
, MACH_MSG_TYPE_MAKE_SEND
);
780 launchd_mport_close_recv(mach_port_t name
)
782 return errno
= mach_port_mod_refs(mach_task_self(), name
, MACH_PORT_RIGHT_RECEIVE
, -1);
786 launchd_mport_create_recv(mach_port_t
*name
)
788 return errno
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, name
);
792 launchd_mport_deallocate(mach_port_t name
)
794 return errno
= mach_port_deallocate(mach_task_self(), name
);
798 kevent_bulk_mod(struct kevent
*kev
, size_t kev_cnt
)
802 for (i
= 0; i
< kev_cnt
; i
++) {
803 kev
[i
].flags
|= EV_CLEAR
|EV_RECEIPT
;
806 return kevent(mainkq
, kev
, kev_cnt
, kev
, kev_cnt
, NULL
);
810 kevent_mod(uintptr_t ident
, short filter
, u_short flags
, u_int fflags
, intptr_t data
, void *udata
)
826 if (flags
& EV_ADD
&& !launchd_assumes(udata
!= NULL
)) {
831 EV_SET(&kev
, ident
, filter
, flags
, fflags
, data
, udata
);
833 r
= kevent(mainkq
, &kev
, 1, &kev
, 1, NULL
);
835 if (!launchd_assumes(r
== 1)) {
839 if (launchd_assumes(kev
.flags
& EV_ERROR
)) {
840 if ((flags
& EV_ADD
) && kev
.data
) {
841 runtime_syslog(LOG_DEBUG
, "%s(): See the next line...", __func__
);
842 log_kevent_struct(LOG_DEBUG
, &kev
, 0);
852 launchd_internal_demux(mach_msg_header_t
*Request
, mach_msg_header_t
*Reply
)
854 if (launchd_internal_server_routine(Request
)) {
855 return launchd_internal_server(Request
, Reply
);
856 } else if (notify_server_routine(Request
)) {
857 return notify_server(Request
, Reply
);
859 return exc_server(Request
, Reply
);
864 do_mach_notify_port_destroyed(mach_port_t notify
__attribute__((unused
)),
867 /* This message is sent to us when a receive right is returned to us. */
869 if (!launchd_assumes(job_ack_port_destruction(rights
))) {
870 launchd_assumes(launchd_mport_close_recv(rights
) == KERN_SUCCESS
);
877 do_mach_notify_port_deleted(mach_port_t notify
__attribute__((unused
)),
878 mach_port_name_t name
__attribute__((unused
)))
880 /* If we deallocate/destroy/mod_ref away a port with a pending
881 * notification, the original notification message is replaced with
882 * this message. To quote a Mach kernel expert, "the kernel has a
883 * send-once right that has to be used somehow."
889 do_mach_notify_no_senders(mach_port_t notify
,
890 mach_port_mscount_t mscount
__attribute__((unused
)))
892 job_t j
= job_mig_intran(notify
);
894 /* This message is sent to us when the last customer of one of our
898 if (!launchd_assumes(j
!= NULL
)) {
902 job_ack_no_senders(j
);
908 do_mach_notify_send_once(mach_port_t notify
__attribute__((unused
)))
910 /* This message is sent to us every time we close a port that we have
911 * outstanding Mach notification requests on. We can safely ignore this
919 do_mach_notify_dead_name(mach_port_t notify
__attribute__((unused
)),
920 mach_port_name_t name
)
922 /* This message is sent to us when one of our send rights no longer has
923 * a receiver somewhere else on the system.
926 if (name
== drain_reply_port
) {
927 launchd_assumes(launchd_mport_deallocate(name
) == KERN_SUCCESS
);
928 drain_reply_port
= MACH_PORT_NULL
;
931 if (launchd_assumes(root_jobmgr
!= NULL
)) {
932 root_jobmgr
= jobmgr_delete_anything_with_port(root_jobmgr
, name
);
935 /* A dead-name notification about a port appears to increment the
936 * rights on said port. Let's deallocate it so that we don't leak
939 launchd_assumes(launchd_mport_deallocate(name
) == KERN_SUCCESS
);
945 record_caller_creds(mach_msg_header_t
*mh
)
947 mach_msg_max_trailer_t
*tp
;
950 tp
= (mach_msg_max_trailer_t
*)((vm_offset_t
)mh
+ round_msg(mh
->msgh_size
));
952 trailer_size
= tp
->msgh_trailer_size
- (mach_msg_size_t
)(sizeof(mach_msg_trailer_type_t
) - sizeof(mach_msg_trailer_size_t
));
954 if (trailer_size
< (mach_msg_size_t
)sizeof(audit_token_t
)) {
959 au_tok
= &tp
->msgh_audit
;
963 runtime_get_caller_creds(struct ldcred
*ldc
)
969 audit_token_to_au32(*au_tok
, /* audit UID */ NULL
, &ldc
->euid
,
970 &ldc
->egid
, &ldc
->uid
, &ldc
->gid
, &ldc
->pid
,
971 &ldc
->asid
, /* au_tid_t */ NULL
);
977 launchd_runtime2(mach_msg_size_t msg_size
, mig_reply_error_t
*bufRequest
, mig_reply_error_t
*bufReply
)
979 mach_msg_options_t options
, tmp_options
;
980 mig_reply_error_t
*bufTemp
;
981 mig_callback the_demux
;
982 mach_msg_timeout_t to
;
983 mach_msg_return_t mr
;
985 options
= MACH_RCV_MSG
|MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT
) |
986 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0
);
988 tmp_options
= options
;
991 to
= MACH_MSG_TIMEOUT_NONE
;
993 if (msg_size
!= max_msg_size
) {
994 /* The buffer isn't big enougth to receive messages anymore... */
995 tmp_options
&= ~MACH_RCV_MSG
;
996 options
&= ~MACH_RCV_MSG
;
997 if (!(tmp_options
& MACH_SEND_MSG
)) {
1002 if ((tmp_options
& MACH_RCV_MSG
) && (runtime_idle_callback
|| (runtime_busy_cnt
== 0))) {
1003 tmp_options
|= MACH_RCV_TIMEOUT
;
1005 if (!(tmp_options
& MACH_SEND_TIMEOUT
)) {
1006 to
= runtime_busy_cnt
? runtime_idle_timeout
: (RUNTIME_ADVISABLE_IDLE_TIMEOUT
* 1000);
1012 mr
= mach_msg(&bufReply
->Head
, tmp_options
, bufReply
->Head
.msgh_size
,
1013 msg_size
, ipc_port_set
, to
, MACH_PORT_NULL
);
1015 tmp_options
= options
;
1017 if (mr
== MACH_SEND_INVALID_DEST
|| mr
== MACH_SEND_TIMED_OUT
) {
1018 /* We need to clean up and start over. */
1019 if (bufReply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
1020 mach_msg_destroy(&bufReply
->Head
);
1023 } else if (mr
== MACH_RCV_TIMED_OUT
) {
1024 if (to
!= MACH_MSG_TIMEOUT_NONE
) {
1025 if (runtime_busy_cnt
== 0) {
1027 } else if (runtime_idle_callback
) {
1028 runtime_idle_callback();
1032 } else if (!launchd_assumes(mr
== MACH_MSG_SUCCESS
)) {
1036 bufTemp
= bufRequest
;
1037 bufRequest
= bufReply
;
1040 if (!(tmp_options
& MACH_RCV_MSG
)) {
1044 /* we have another request message */
1046 if (!launchd_assumes(mig_cb_table
!= NULL
)) {
1050 the_demux
= mig_cb_table
[MACH_PORT_INDEX(bufRequest
->Head
.msgh_local_port
)];
1052 if (!launchd_assumes(the_demux
!= NULL
)) {
1056 record_caller_creds(&bufRequest
->Head
);
1059 * This is a total hack. We really need a bit in the kernel's proc
1060 * struct to declare our intent.
1062 static int no_hang_fd
= -1;
1063 if (no_hang_fd
== -1) {
1064 no_hang_fd
= _fd(open("/dev/autofs_nowait", 0));
1067 if (the_demux(&bufRequest
->Head
, &bufReply
->Head
) == FALSE
) {
1068 /* XXX - also gross */
1069 if (bufRequest
->Head
.msgh_id
== MACH_NOTIFY_NO_SENDERS
) {
1070 notify_server(&bufRequest
->Head
, &bufReply
->Head
);
1074 if (!(bufReply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
)) {
1075 if (bufReply
->RetCode
== MIG_NO_REPLY
) {
1076 bufReply
->Head
.msgh_remote_port
= MACH_PORT_NULL
;
1077 } else if ((bufReply
->RetCode
!= KERN_SUCCESS
) && (bufRequest
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
)) {
1078 /* destroy the request - but not the reply port */
1079 bufRequest
->Head
.msgh_remote_port
= MACH_PORT_NULL
;
1080 mach_msg_destroy(&bufRequest
->Head
);
1084 if (bufReply
->Head
.msgh_remote_port
!= MACH_PORT_NULL
) {
1085 tmp_options
|= MACH_SEND_MSG
;
1087 if (MACH_MSGH_BITS_REMOTE(bufReply
->Head
.msgh_bits
) != MACH_MSG_TYPE_MOVE_SEND_ONCE
) {
1088 tmp_options
|= MACH_SEND_TIMEOUT
;
1095 runtime_close(int fd
)
1099 if (bulk_kev
) for (i
= bulk_kev_i
+ 1; i
< bulk_kev_cnt
; i
++) {
1100 switch (bulk_kev
[i
].filter
) {
1104 if ((int)bulk_kev
[i
].ident
== fd
) {
1105 runtime_syslog(LOG_DEBUG
, "Skipping kevent index: %d", i
);
1106 bulk_kev
[i
].filter
= 0;
1116 static FILE *ourlogfile
;
1119 runtime_closelog(void)
1122 launchd_assumes(fflush(ourlogfile
) == 0);
1123 launchd_assumes(runtime_fsync(fileno(ourlogfile
)) != -1);
1128 runtime_fsync(int fd
)
1130 if (debug_shutdown_hangs
) {
1131 return fcntl(fd
, F_FULLFSYNC
, NULL
);
1137 static int internal_mask_pri
= LOG_UPTO(LOG_NOTICE
);
1138 //static int internal_mask_pri = LOG_UPTO(LOG_DEBUG);
1141 runtime_setlogmask(int maskpri
)
1143 internal_mask_pri
= maskpri
;
1145 return internal_mask_pri
;
1149 runtime_syslog(int pri
, const char *message
, ...)
1151 struct runtime_syslog_attr attr
= {
1152 "com.apple.launchd", "com.apple.launchd",
1153 getpid() == 1 ? "System" : "Background",
1154 pri
, getuid(), getpid(), getpid()
1158 va_start(ap
, message
);
1160 runtime_vsyslog(&attr
, message
, ap
);
1166 runtime_vsyslog(struct runtime_syslog_attr
*attr
, const char *message
, va_list args
)
1168 static pthread_mutex_t ourlock
= PTHREAD_MUTEX_INITIALIZER
;
1169 static struct timeval shutdown_start
;
1170 static struct timeval prev_msg
;
1171 static int apple_internal_logging
= 1;
1172 struct timeval tvnow
, tvd_total
, tvd_msg_delta
= { 0, 0 };
1174 int saved_errno
= errno
;
1178 if (!(LOG_MASK(attr
->priority
) & internal_mask_pri
)) {
1182 if (apple_internal_logging
== 1) {
1183 apple_internal_logging
= stat("/AppleInternal", &sb
);
1187 if (!(debug_shutdown_hangs
&& getpid() == 1)) {
1188 if (attr
->priority
== LOG_APPLEONLY
) {
1189 if (apple_internal_logging
== -1) {
1192 attr
->priority
= LOG_NOTICE
;
1194 vsnprintf(newmsg
, sizeof(newmsg
), message
, args
);
1195 logmsg_add(attr
, saved_errno
, newmsg
);
1199 if (shutdown_start
.tv_sec
== 0) {
1200 gettimeofday(&shutdown_start
, NULL
);
1203 if (gettimeofday(&tvnow
, NULL
) == -1) {
1208 pthread_mutex_lock(&ourlock
);
1210 if (ourlogfile
== NULL
) {
1211 rename("/var/log/launchd-shutdown.log", "/var/log/launchd-shutdown.log.1");
1212 ourlogfile
= fopen("/var/log/launchd-shutdown.log", "a");
1215 pthread_mutex_unlock(&ourlock
);
1217 if (ourlogfile
== NULL
) {
1221 if (message
== NULL
) {
1225 timersub(&tvnow
, &shutdown_start
, &tvd_total
);
1227 if (prev_msg
.tv_sec
!= 0) {
1228 timersub(&tvnow
, &prev_msg
, &tvd_msg_delta
);
1233 snprintf(newmsg
, sizeof(newmsg
), "%3ld.%06d%4ld.%06d%6u %-40s%6u %-40s ",
1234 tvd_total
.tv_sec
, tvd_total
.tv_usec
,
1235 tvd_msg_delta
.tv_sec
, tvd_msg_delta
.tv_usec
,
1236 attr
->from_pid
, attr
->from_name
,
1237 attr
->about_pid
, attr
->about_name
);
1239 for (i
= 0, j
= strlen(newmsg
); message
[i
];) {
1240 if (message
[i
] == '%' && message
[i
+ 1] == 'm') {
1241 char *errs
= strerror(saved_errno
);
1242 strcpy(newmsg
+ j
, errs
? errs
: "unknown error");
1243 j
+= strlen(newmsg
+ j
);
1246 newmsg
[j
] = message
[i
];
1252 strcpy(newmsg
+ j
, "\n");
1254 vfprintf(ourlogfile
, newmsg
, args
);
1257 runtime_log_uncork_pending_drain();
1261 logmsg_add(struct runtime_syslog_attr
*attr
, int err_num
, const char *msg
)
1263 size_t lm_sz
= sizeof(struct logmsg_s
) + strlen(msg
) + strlen(attr
->from_name
) + strlen(attr
->about_name
) + strlen(attr
->session_name
) + 4;
1265 struct logmsg_s
*lm
;
1267 #define ROUND_TO_64BIT_WORD_SIZE(x) ((x + 7) & ~7)
1269 /* we do this to make the unpacking for the log_drain cause unalignment faults */
1270 lm_sz
= ROUND_TO_64BIT_WORD_SIZE(lm_sz
);
1272 if (!(lm
= calloc(1, lm_sz
))) {
1276 data_off
= lm
->data
;
1278 launchd_assumes(gettimeofday(&lm
->when
, NULL
) != -1);
1279 lm
->from_pid
= attr
->from_pid
;
1280 lm
->about_pid
= attr
->about_pid
;
1281 lm
->err_num
= err_num
;
1282 lm
->pri
= attr
->priority
;
1285 data_off
+= sprintf(data_off
, "%s", msg
) + 1;
1286 lm
->from_name
= data_off
;
1287 data_off
+= sprintf(data_off
, "%s", attr
->from_name
) + 1;
1288 lm
->about_name
= data_off
;
1289 data_off
+= sprintf(data_off
, "%s", attr
->about_name
) + 1;
1290 lm
->session_name
= data_off
;
1291 data_off
+= sprintf(data_off
, "%s", attr
->session_name
) + 1;
1293 STAILQ_INSERT_TAIL(&logmsg_queue
, lm
, sqe
);
1294 logmsg_queue_sz
+= lm_sz
;
1301 logmsg_remove(struct logmsg_s
*lm
)
1303 STAILQ_REMOVE(&logmsg_queue
, lm
, logmsg_s
, sqe
);
1304 logmsg_queue_sz
-= lm
->obj_sz
;
1311 runtime_log_pack(vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
1313 struct logmsg_s
*lm
;
1316 *outvalCnt
= logmsg_queue_sz
;
1318 mig_allocate(outval
, *outvalCnt
);
1324 offset
= (void *)*outval
;
1326 while ((lm
= STAILQ_FIRST(&logmsg_queue
))) {
1327 lm
->from_name
-= (size_t)lm
;
1328 lm
->about_name
-= (size_t)lm
;
1329 lm
->msg
-= (size_t)lm
;
1330 lm
->session_name
-= (size_t)lm
;
1332 memcpy(offset
, lm
, lm
->obj_sz
);
1334 offset
+= lm
->obj_sz
;
1343 runtime_log_uncork_pending_drain(void)
1345 mach_msg_type_number_t outvalCnt
;
1346 mach_port_t tmp_port
;
1349 if (!drain_reply_port
) {
1353 if (logmsg_queue_cnt
== 0) {
1357 if (runtime_log_pack(&outval
, &outvalCnt
) != 0) {
1361 tmp_port
= drain_reply_port
;
1362 drain_reply_port
= MACH_PORT_NULL
;
1364 if ((errno
= job_mig_log_drain_reply(tmp_port
, 0, outval
, outvalCnt
))) {
1365 launchd_assumes(errno
== MACH_SEND_INVALID_DEST
);
1366 launchd_assumes(launchd_mport_deallocate(tmp_port
) == KERN_SUCCESS
);
1369 mig_deallocate(outval
, outvalCnt
);
1373 runtime_log_push(void)
1375 mach_msg_type_number_t outvalCnt
;
1378 if (logmsg_queue_cnt
== 0) {
1379 launchd_assumes(STAILQ_EMPTY(&logmsg_queue
));
1381 } else if (getpid() == 1) {
1385 if (runtime_log_pack(&outval
, &outvalCnt
) != 0) {
1389 launchd_assumes(_vprocmgr_log_forward(inherited_bootstrap_port
, (void *)outval
, outvalCnt
) == NULL
);
1391 mig_deallocate(outval
, outvalCnt
);
1395 runtime_log_forward(uid_t forward_uid
, gid_t forward_gid
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
1397 struct logmsg_s
*lm
, *lm_walk
;
1398 mach_msg_type_number_t data_left
= invalCnt
;
1404 for (lm_walk
= (struct logmsg_s
*)inval
; (data_left
> 0) && (lm_walk
->obj_sz
<= data_left
); lm_walk
= ((void *)lm_walk
+ lm_walk
->obj_sz
)) {
1405 if (!launchd_assumes(lm
= malloc(lm_walk
->obj_sz
))) {
1409 memcpy(lm
, lm_walk
, lm_walk
->obj_sz
);
1410 lm
->sender_uid
= forward_uid
;
1411 lm
->sender_gid
= forward_gid
;
1413 lm
->from_name
+= (size_t)lm
;
1414 lm
->about_name
+= (size_t)lm
;
1415 lm
->msg
+= (size_t)lm
;
1416 lm
->session_name
+= (size_t)lm
;
1418 STAILQ_INSERT_TAIL(&logmsg_queue
, lm
, sqe
);
1419 logmsg_queue_sz
+= lm
->obj_sz
;
1422 data_left
-= lm
->obj_sz
;
1425 mig_deallocate(inval
, invalCnt
);
1431 runtime_log_drain(mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
1433 if (logmsg_queue_cnt
== 0) {
1434 launchd_assumes(STAILQ_EMPTY(&logmsg_queue
));
1435 launchd_assumes(drain_reply_port
== 0);
1437 drain_reply_port
= srp
;
1438 launchd_assumes(launchd_mport_notify_req(drain_reply_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
);
1440 return MIG_NO_REPLY
;
1443 return runtime_log_pack(outval
, outvalCnt
);
1447 * We should break this into two reference counts.
1449 * One for hard references that would prevent exiting.
1450 * One for soft references that would only prevent idle exiting.
1452 * In the long run, reference counting should completely automate when a
1453 * process can and should exit.
1456 runtime_add_ref(void)
1462 runtime_del_ref(void)
1468 catch_exception_raise(mach_port_t exception_port
__attribute__((unused
)),
1469 mach_port_t thread
, mach_port_t task
,
1470 exception_type_t exception
, exception_data_t code
,
1471 mach_msg_type_number_t codeCnt
)
1473 runtime_syslog(LOG_NOTICE
, "%s(): thread: 0x%x task: 0x%x type: 0x%x code: %p codeCnt: 0x%x",
1474 __func__
, thread
, task
, exception
, code
, codeCnt
);
1476 launchd_assumes(launchd_mport_deallocate(thread
) == KERN_SUCCESS
);
1477 launchd_assumes(launchd_mport_deallocate(task
) == KERN_SUCCESS
);
1483 catch_exception_raise_state(mach_port_t exception_port
__attribute__((unused
)),
1484 exception_type_t exception
,
1485 const exception_data_t code
, mach_msg_type_number_t codeCnt
,
1487 const thread_state_t old_state
, mach_msg_type_number_t old_stateCnt
,
1488 thread_state_t new_state
, mach_msg_type_number_t
*new_stateCnt
)
1490 runtime_syslog(LOG_NOTICE
, "%s(): type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1491 __func__
, exception
, code
, codeCnt
, flavor
, old_state
, old_stateCnt
, new_state
, new_stateCnt
);
1493 memcpy(new_state
, old_state
, old_stateCnt
* sizeof(old_state
[0]));
1494 *new_stateCnt
= old_stateCnt
;
1500 catch_exception_raise_state_identity(mach_port_t exception_port
__attribute__((unused
)),
1501 mach_port_t thread
, mach_port_t task
,
1502 exception_type_t exception
,
1503 exception_data_t code
, mach_msg_type_number_t codeCnt
,
1505 thread_state_t old_state
, mach_msg_type_number_t old_stateCnt
,
1506 thread_state_t new_state
, mach_msg_type_number_t
*new_stateCnt
)
1508 runtime_syslog(LOG_NOTICE
, "%s(): thread: 0x%x task: 0x%x type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1509 __func__
, thread
, task
, exception
, code
, codeCnt
, flavor
, old_state
, old_stateCnt
, new_state
, new_stateCnt
);
1511 memcpy(new_state
, old_state
, old_stateCnt
* sizeof(old_state
[0]));
1512 *new_stateCnt
= old_stateCnt
;
1514 launchd_assumes(launchd_mport_deallocate(thread
) == KERN_SUCCESS
);
1515 launchd_assumes(launchd_mport_deallocate(task
) == KERN_SUCCESS
);