2 * Copyright (c) 1999-2008 Apple Computer, Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 static const char *const __rcs_file_version__
= "$Revision: 24003 $";
24 #include "launchd_runtime.h"
26 #include <mach/mach.h>
27 #include <mach/mach_error.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/mach_time.h>
37 #include <mach/exception.h>
38 #include <sys/types.h>
40 #include <sys/sysctl.h>
43 #include <sys/event.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/mount.h>
47 #include <sys/reboot.h>
48 #include <sys/fcntl.h>
49 #include <sys/kdebug.h>
50 #include <bsm/libbsm.h>
51 #include <malloc/malloc.h>
64 #include "launchd_internalServer.h"
65 #include "launchd_internal.h"
66 #include "notifyServer.h"
67 #include "mach_excServer.h"
69 /* We shouldn't be including these */
72 #include "launchd_core_logic.h"
74 #include "vproc_priv.h"
75 #include "vproc_internal.h"
76 #include "protocol_job_reply.h"
78 static mach_port_t ipc_port_set
;
79 static mach_port_t demand_port_set
;
80 static mach_port_t launchd_internal_port
;
83 #define BULK_KEV_MAX 100
84 static struct kevent
*bulk_kev
;
85 static int bulk_kev_i
;
86 static int bulk_kev_cnt
;
88 static pthread_t kqueue_demand_thread
;
90 static void mportset_callback(void);
91 static kq_callback kqmportset_callback
= (kq_callback
)mportset_callback
;
92 static void *kqueue_demand_loop(void *arg
);
94 boolean_t
launchd_internal_demux(mach_msg_header_t
*Request
, mach_msg_header_t
*Reply
);
95 static void record_caller_creds(mach_msg_header_t
*mh
);
96 static void launchd_runtime2(mach_msg_size_t msg_size
, mig_reply_error_t
*bufRequest
, mig_reply_error_t
*bufReply
);
97 static mach_msg_size_t max_msg_size
;
98 static mig_callback
*mig_cb_table
;
99 static size_t mig_cb_table_sz
;
100 static timeout_callback runtime_idle_callback
;
101 static mach_msg_timeout_t runtime_idle_timeout
;
102 static struct ldcred ldc
;
103 static size_t runtime_standby_cnt
;
105 static STAILQ_HEAD(, logmsg_s
) logmsg_queue
= STAILQ_HEAD_INITIALIZER(logmsg_queue
);
106 static size_t logmsg_queue_sz
;
107 static size_t logmsg_queue_cnt
;
108 static mach_port_t drain_reply_port
;
109 static void runtime_log_uncork_pending_drain(void);
110 static kern_return_t
runtime_log_pack(vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
);
112 static bool logmsg_add(struct runtime_syslog_attr
*attr
, int err_num
, const char *msg
);
113 static void logmsg_remove(struct logmsg_s
*lm
);
115 static void do_file_init(void) __attribute__((constructor
));
116 static mach_timebase_info_data_t tbi
;
117 static uint64_t tbi_safe_math_max
;
118 static uint64_t time_of_mach_msg_return
;
119 static double tbi_float_val
;
121 static const int sigigns
[] = { SIGHUP
, SIGINT
, SIGPIPE
, SIGALRM
, SIGTERM
,
122 SIGURG
, SIGTSTP
, SIGTSTP
, SIGCONT
, SIGTTIN
, SIGTTOU
, SIGIO
, SIGXCPU
,
123 SIGXFSZ
, SIGVTALRM
, SIGPROF
, SIGWINCH
, SIGINFO
, SIGUSR1
, SIGUSR2
125 static sigset_t sigign_set
;
126 static FILE *ourlogfile
;
128 bool do_apple_internal_logging
;
129 bool low_level_debug
;
130 bool g_force_old_kill_path
= false;
131 bool g_flat_mach_namespace
= true;
132 bool g_simulate_pid1_crash
= false;
133 bool g_use_gmalloc
= false;
134 bool g_log_per_user_shutdown
= false;
135 #if !TARGET_OS_EMBEDDED
136 bool g_log_pid1_shutdown
= true;
138 bool g_log_pid1_shutdown
= false;
140 bool g_log_strict_usage
= false;
142 size_t runtime_busy_cnt
;
145 runtime_get_kernel_port(void)
147 return launchd_internal_port
;
150 // static const char *__crashreporter_info__ = "";
152 static int internal_mask_pri
= LOG_UPTO(LOG_NOTICE
);
156 launchd_runtime_init(void)
158 mach_msg_size_t mxmsgsz
;
161 launchd_assert((mainkq
= kqueue()) != -1);
163 launchd_assert((errno
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET
, &demand_port_set
)) == KERN_SUCCESS
);
164 launchd_assert((errno
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET
, &ipc_port_set
)) == KERN_SUCCESS
);
166 launchd_assert(kevent_mod(demand_port_set
, EVFILT_MACHPORT
, EV_ADD
, 0, 0, &kqmportset_callback
) != -1);
168 launchd_assert(launchd_mport_create_recv(&launchd_internal_port
) == KERN_SUCCESS
);
169 launchd_assert(launchd_mport_make_send(launchd_internal_port
) == KERN_SUCCESS
);
171 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
172 mxmsgsz
= sizeof(union __RequestUnion__x_launchd_internal_subsystem
);
173 if (x_launchd_internal_subsystem
.maxsize
> mxmsgsz
) {
174 mxmsgsz
= x_launchd_internal_subsystem
.maxsize
;
177 launchd_assert(runtime_add_mport(launchd_internal_port
, launchd_internal_demux
, mxmsgsz
) == KERN_SUCCESS
);
178 launchd_assert(pthread_create(&kqueue_demand_thread
, NULL
, kqueue_demand_loop
, NULL
) == 0);
179 launchd_assert(pthread_detach(kqueue_demand_thread
) == 0);
181 launchd_assumes(sysctlbyname("vfs.generic.noremotehang", NULL
, NULL
, &p
, sizeof(p
)) != -1);
185 launchd_runtime_init2(void)
189 for (i
= 0; i
< (sizeof(sigigns
) / sizeof(int)); i
++) {
190 sigaddset(&sigign_set
, sigigns
[i
]);
191 launchd_assumes(signal(sigigns
[i
], SIG_IGN
) != SIG_ERR
);
196 proc_flags_to_C_names(unsigned int flags
)
198 #define MAX_PFLAG_STR "P_ADVLOCK|P_CONTROLT|P_LP64|P_NOCLDSTOP|P_PPWAIT|P_PROFIL|P_SELECT|P_CONTINUED|P_SUGID|P_SYSTEM|P_TIMEOUT|P_TRACED|P_RESV3|P_WEXIT|P_EXEC|P_OWEUPC|P_AFFINITY|P_TRANSLATED|P_RESV5|P_CHECKOPENEVT|P_DEPENDENCY_CAPABLE|P_REBOOT|P_TBE|P_RESV7|P_THCWD|P_RESV9|P_RESV10|P_RESV11|P_NOSHLIB|P_FORCEQUOTA|P_NOCLDWAIT|P_NOREMOTEHANG|0xdeadbeeffeedface"
200 static char flags_buf
[sizeof(MAX_PFLAG_STR
)];
201 char *flags_off
= NULL
;
213 flags_off
= flags_buf
;
216 #define FLAGIF(f) if (flags & f) { flags_off += sprintf(flags_off, #f); flags &= ~f; }
219 else FLAGIF(P_CONTROLT
)
221 else FLAGIF(P_NOCLDSTOP
)
222 else FLAGIF(P_PPWAIT
)
223 else FLAGIF(P_PROFIL
)
224 else FLAGIF(P_SELECT
)
225 else FLAGIF(P_CONTINUED
)
227 else FLAGIF(P_SYSTEM
)
228 else FLAGIF(P_TIMEOUT
)
229 else FLAGIF(P_TRACED
)
233 else FLAGIF(P_OWEUPC
)
234 else FLAGIF(P_AFFINITY
)
235 else FLAGIF(P_TRANSLATED
)
237 else FLAGIF(P_CHECKOPENEVT
)
238 else FLAGIF(P_DEPENDENCY_CAPABLE
)
239 else FLAGIF(P_REBOOT
)
244 else FLAGIF(P_RESV10
)
245 else FLAGIF(P_RESV11
)
246 else FLAGIF(P_NOSHLIB
)
247 else FLAGIF(P_FORCEQUOTA
)
248 else FLAGIF(P_NOCLDWAIT
)
249 else FLAGIF(P_NOREMOTEHANG
)
251 flags_off
+= sprintf(flags_off
, "0x%x", flags
);
260 reboot_flags_to_C_names(unsigned int flags
)
262 #define MAX_RB_STR "RB_ASKNAME|RB_SINGLE|RB_NOSYNC|RB_HALT|RB_INITNAME|RB_DFLTROOT|RB_ALTBOOT|RB_UNIPROC|RB_SAFEBOOT|RB_UPSDELAY|0xdeadbeeffeedface"
263 static char flags_buf
[sizeof(MAX_RB_STR
)];
264 char *flags_off
= NULL
;
267 return "RB_AUTOBOOT";
276 flags_off
= flags_buf
;
280 else FLAGIF(RB_SINGLE
)
281 else FLAGIF(RB_NOSYNC
)
283 else FLAGIF(RB_INITNAME
)
284 else FLAGIF(RB_DFLTROOT
)
285 else FLAGIF(RB_ALTBOOT
)
286 else FLAGIF(RB_UNIPROC
)
287 else FLAGIF(RB_SAFEBOOT
)
288 else FLAGIF(RB_UPSDELAY
)
290 flags_off
+= sprintf(flags_off
, "0x%x", flags
);
299 signal_to_C_name(unsigned int sig
)
301 static char unknown
[25];
303 #define SIG2CASE(sg) case sg: return #sg
337 snprintf(unknown
, sizeof(unknown
), "%u", sig
);
343 log_kevent_struct(int level
, struct kevent
*kev_base
, int indx
)
345 struct kevent
*kev
= &kev_base
[indx
];
346 const char *filter_str
;
348 char filter_buf
[100];
349 char fflags_buf
[1000];
350 char flags_buf
[1000] = "0x0";
351 char *flags_off
= NULL
;
352 char *fflags_off
= NULL
;
353 unsigned short flags
= kev
->flags
;
354 unsigned int fflags
= kev
->fflags
;
356 if (likely(!(LOG_MASK(level
) & internal_mask_pri
))) {
360 if (flags
) while (flags
) {
366 flags_off
= flags_buf
;
370 else FLAGIF(EV_RECEIPT
)
371 else FLAGIF(EV_DELETE
)
372 else FLAGIF(EV_ENABLE
)
373 else FLAGIF(EV_DISABLE
)
374 else FLAGIF(EV_CLEAR
)
376 else FLAGIF(EV_ONESHOT
)
377 else FLAGIF(EV_ERROR
)
379 flags_off
+= sprintf(flags_off
, "0x%hx", flags
);
384 snprintf(ident_buf
, sizeof(ident_buf
), "%ld", kev
->ident
);
385 snprintf(fflags_buf
, sizeof(fflags_buf
), "0x%x", fflags
);
387 switch (kev
->filter
) {
389 filter_str
= "EVFILT_READ";
392 filter_str
= "EVFILT_WRITE";
395 filter_str
= "EVFILT_AIO";
398 filter_str
= "EVFILT_VNODE";
399 if (fflags
) while (fflags
) {
405 fflags_off
= fflags_buf
;
408 #define FFLAGIF(ff) if (fflags & ff) { fflags_off += sprintf(fflags_off, #ff); fflags &= ~ff; }
411 else FFLAGIF(NOTE_WRITE
)
412 else FFLAGIF(NOTE_EXTEND
)
413 else FFLAGIF(NOTE_ATTRIB
)
414 else FFLAGIF(NOTE_LINK
)
415 else FFLAGIF(NOTE_RENAME
)
416 else FFLAGIF(NOTE_REVOKE
)
418 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
424 filter_str
= "EVFILT_PROC";
425 if (fflags
) while (fflags
) {
431 fflags_off
= fflags_buf
;
435 else FFLAGIF(NOTE_REAP
)
436 else FFLAGIF(NOTE_FORK
)
437 else FFLAGIF(NOTE_EXEC
)
438 else FFLAGIF(NOTE_SIGNAL
)
439 else FFLAGIF(NOTE_TRACK
)
440 else FFLAGIF(NOTE_TRACKERR
)
441 else FFLAGIF(NOTE_CHILD
)
443 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
449 filter_str
= "EVFILT_SIGNAL";
450 strcpy(ident_buf
, signal_to_C_name(kev
->ident
));
453 filter_str
= "EVFILT_TIMER";
454 snprintf(ident_buf
, sizeof(ident_buf
), "0x%lx", kev
->ident
);
455 if (fflags
) while (fflags
) {
461 fflags_off
= fflags_buf
;
464 FFLAGIF(NOTE_SECONDS
)
465 else FFLAGIF(NOTE_USECONDS
)
466 else FFLAGIF(NOTE_NSECONDS
)
467 else FFLAGIF(NOTE_ABSOLUTE
)
469 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
474 case EVFILT_MACHPORT
:
475 filter_str
= "EVFILT_MACHPORT";
476 snprintf(ident_buf
, sizeof(ident_buf
), "0x%lx", kev
->ident
);
479 filter_str
= "EVFILT_FS";
480 snprintf(ident_buf
, sizeof(ident_buf
), "0x%lx", kev
->ident
);
481 if (fflags
) while (fflags
) {
487 fflags_off
= fflags_buf
;
491 else FFLAGIF(VQ_NEEDAUTH
)
492 else FFLAGIF(VQ_LOWDISK
)
493 else FFLAGIF(VQ_MOUNT
)
494 else FFLAGIF(VQ_UNMOUNT
)
495 else FFLAGIF(VQ_DEAD
)
496 else FFLAGIF(VQ_ASSIST
)
497 else FFLAGIF(VQ_NOTRESPLOCK
)
498 else FFLAGIF(VQ_UPDATE
)
500 fflags_off
+= sprintf(fflags_off
, "0x%x", fflags
);
506 snprintf(filter_buf
, sizeof(filter_buf
), "%hd", kev
->filter
);
507 filter_str
= filter_buf
;
511 runtime_syslog(level
, "KEVENT[%d]: udata = %p data = 0x%lx ident = %s filter = %s flags = %s fflags = %s",
512 indx
, kev
->udata
, kev
->data
, ident_buf
, filter_str
, flags_buf
, fflags_buf
);
516 mportset_callback(void)
518 mach_port_name_array_t members
;
519 mach_msg_type_number_t membersCnt
;
520 mach_port_status_t status
;
521 mach_msg_type_number_t statusCnt
;
525 if (!launchd_assumes((errno
= mach_port_get_set_status(mach_task_self(), demand_port_set
, &members
, &membersCnt
)) == KERN_SUCCESS
)) {
529 for (i
= 0; i
< membersCnt
; i
++) {
530 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
531 if (mach_port_get_attributes(mach_task_self(), members
[i
], MACH_PORT_RECEIVE_STATUS
, (mach_port_info_t
)&status
,
532 &statusCnt
) != KERN_SUCCESS
) {
535 if (status
.mps_msgcount
) {
536 EV_SET(&kev
, members
[i
], EVFILT_MACHPORT
, 0, 0, 0, job_find_by_service_port(members
[i
]));
538 if (launchd_assumes(kev
.udata
!= NULL
)) {
540 log_kevent_struct(LOG_DEBUG
, &kev
, 0);
541 (*((kq_callback
*)kev
.udata
))(kev
.udata
, &kev
);
544 log_kevent_struct(LOG_ERR
, &kev
, 0);
547 /* the callback may have tainted our ability to continue this for loop */
552 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t
)members
,
553 (vm_size_t
) membersCnt
* sizeof(mach_port_name_t
)) == KERN_SUCCESS
);
557 kqueue_demand_loop(void *arg
__attribute__((unused
)))
562 * Yes, at first glance, calling select() on a kqueue seems silly.
564 * This avoids a race condition between the main thread and this helper
565 * thread by ensuring that we drain kqueue events on the same thread
566 * that manipulates the kqueue.
571 FD_SET(mainkq
, &rfds
);
572 if (launchd_assumes(select(mainkq
+ 1, &rfds
, NULL
, NULL
, NULL
) == 1)) {
573 launchd_assumes(handle_kqueue(launchd_internal_port
, mainkq
) == 0);
581 x_handle_kqueue(mach_port_t junk
__attribute__((unused
)), integer_t fd
)
583 struct timespec ts
= { 0, 0 };
584 struct kevent
*kevi
, kev
[BULK_KEV_MAX
];
589 if (launchd_assumes((bulk_kev_cnt
= kevent(fd
, NULL
, 0, kev
, BULK_KEV_MAX
, &ts
)) != -1)) {
591 for (i
= 0; i
< bulk_kev_cnt
; i
++) {
592 log_kevent_struct(LOG_DEBUG
, &kev
[0], i
);
595 for (i
= 0; i
< bulk_kev_cnt
; i
++) {
600 runtime_syslog(LOG_DEBUG
, "Dispatching kevent...");
601 log_kevent_struct(LOG_DEBUG
, kev
, i
);
603 /* Check if kevi->udata was either malloc(3)ed or is a valid function pointer.
604 * If neither, it's probably an invalid pointer and we should log it.
607 if (launchd_assumes(malloc_size(kevi
->udata
) || dladdr(kevi
->udata
, &dli
))) {
608 runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT
|DBG_FUNC_START
, kevi
->ident
, kevi
->filter
, kevi
->fflags
);
609 (*((kq_callback
*)kevi
->udata
))(kevi
->udata
, kevi
);
610 runtime_ktrace0(RTKT_LAUNCHD_BSD_KEVENT
|DBG_FUNC_END
);
612 runtime_syslog(LOG_ERR
, "The following kevent had invalid context data.");
613 log_kevent_struct(LOG_EMERG
, &kev
[0], i
);
616 runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT
|DBG_FUNC_START
, kevi
->ident
, kevi
->filter
, kevi
->fflags
);
617 (*((kq_callback
*)kevi
->udata
))(kevi
->udata
, kevi
);
618 runtime_ktrace0(RTKT_LAUNCHD_BSD_KEVENT
|DBG_FUNC_END
);
630 launchd_runtime(void)
632 mig_reply_error_t
*req
= NULL
, *resp
= NULL
;
633 mach_msg_size_t mz
= max_msg_size
;
634 int flags
= VM_MAKE_TAG(VM_MEMORY_MACH_MSG
)|TRUE
;
638 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t
)req
, mz
) == KERN_SUCCESS
);
642 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t
)resp
, mz
) == KERN_SUCCESS
);
648 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t
*)&req
, mz
, flags
) == KERN_SUCCESS
)) {
651 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t
*)&resp
, mz
, flags
) == KERN_SUCCESS
)) {
655 launchd_runtime2(mz
, req
, resp
);
657 /* If we get here, max_msg_size probably changed... */
662 launchd_set_bport(mach_port_t name
)
664 return errno
= task_set_bootstrap_port(mach_task_self(), name
);
668 launchd_get_bport(mach_port_t
*name
)
670 return errno
= task_get_bootstrap_port(mach_task_self(), name
);
674 launchd_mport_notify_req(mach_port_t name
, mach_msg_id_t which
)
676 mach_port_mscount_t msgc
= (which
== MACH_NOTIFY_PORT_DESTROYED
) ? 0 : 1;
677 mach_port_t previous
, where
= (which
== MACH_NOTIFY_NO_SENDERS
) ? name
: launchd_internal_port
;
679 if (which
== MACH_NOTIFY_NO_SENDERS
) {
680 /* Always make sure the send count is zero, in case a receive right is reused */
681 errno
= mach_port_set_mscount(mach_task_self(), name
, 0);
682 if (unlikely(errno
!= KERN_SUCCESS
)) {
687 errno
= mach_port_request_notification(mach_task_self(), name
, which
, msgc
, where
,
688 MACH_MSG_TYPE_MAKE_SEND_ONCE
, &previous
);
690 if (likely(errno
== 0) && previous
!= MACH_PORT_NULL
) {
691 launchd_assumes(launchd_mport_deallocate(previous
) == KERN_SUCCESS
);
698 runtime_fork(mach_port_t bsport
)
700 sigset_t emptyset
, oset
;
705 sigemptyset(&emptyset
);
707 launchd_assumes(launchd_mport_make_send(bsport
) == KERN_SUCCESS
);
708 launchd_assumes(launchd_set_bport(bsport
) == KERN_SUCCESS
);
709 launchd_assumes(launchd_mport_deallocate(bsport
) == KERN_SUCCESS
);
711 launchd_assumes(sigprocmask(SIG_BLOCK
, &sigign_set
, &oset
) != -1);
712 for (i
= 0; i
< (sizeof(sigigns
) / sizeof(int)); i
++) {
713 launchd_assumes(signal(sigigns
[i
], SIG_DFL
) != SIG_ERR
);
720 for (i
= 0; i
< (sizeof(sigigns
) / sizeof(int)); i
++) {
721 launchd_assumes(signal(sigigns
[i
], SIG_IGN
) != SIG_ERR
);
723 launchd_assumes(sigprocmask(SIG_SETMASK
, &oset
, NULL
) != -1);
724 launchd_assumes(launchd_set_bport(MACH_PORT_NULL
) == KERN_SUCCESS
);
727 launchd_assumes(sysctlbyname("vfs.generic.noremotehang", NULL
, NULL
, &p
, sizeof(p
)) != -1);
729 launchd_assumes(sigprocmask(SIG_SETMASK
, &emptyset
, NULL
) != -1);
739 runtime_set_timeout(timeout_callback to_cb
, unsigned int sec
)
741 if (sec
== 0 || to_cb
== NULL
) {
742 runtime_idle_callback
= NULL
;
743 runtime_idle_timeout
= 0;
746 runtime_idle_callback
= to_cb
;
747 runtime_idle_timeout
= sec
* 1000;
751 runtime_add_mport(mach_port_t name
, mig_callback demux
, mach_msg_size_t msg_size
)
753 size_t needed_table_sz
= (MACH_PORT_INDEX(name
) + 1) * sizeof(mig_callback
);
754 mach_port_t target_set
= demux
? ipc_port_set
: demand_port_set
;
756 msg_size
= round_page(msg_size
+ MAX_TRAILER_SIZE
);
758 if (unlikely(needed_table_sz
> mig_cb_table_sz
)) {
759 needed_table_sz
*= 2; /* Let's try and avoid realloc'ing for a while */
760 mig_callback
*new_table
= malloc(needed_table_sz
);
762 if (!launchd_assumes(new_table
!= NULL
)) {
763 return KERN_RESOURCE_SHORTAGE
;
766 if (likely(mig_cb_table
)) {
767 memcpy(new_table
, mig_cb_table
, mig_cb_table_sz
);
771 mig_cb_table_sz
= needed_table_sz
;
772 mig_cb_table
= new_table
;
775 mig_cb_table
[MACH_PORT_INDEX(name
)] = demux
;
777 if (msg_size
> max_msg_size
) {
778 max_msg_size
= msg_size
;
781 return errno
= mach_port_move_member(mach_task_self(), name
, target_set
);
785 runtime_remove_mport(mach_port_t name
)
787 mig_cb_table
[MACH_PORT_INDEX(name
)] = NULL
;
789 return errno
= mach_port_move_member(mach_task_self(), name
, MACH_PORT_NULL
);
793 launchd_mport_make_send(mach_port_t name
)
795 return errno
= mach_port_insert_right(mach_task_self(), name
, name
, MACH_MSG_TYPE_MAKE_SEND
);
799 launchd_mport_copy_send(mach_port_t name
)
801 return errno
= mach_port_insert_right(mach_task_self(), name
, name
, MACH_MSG_TYPE_COPY_SEND
);
805 launchd_mport_close_recv(mach_port_t name
)
807 return errno
= mach_port_mod_refs(mach_task_self(), name
, MACH_PORT_RIGHT_RECEIVE
, -1);
811 launchd_mport_create_recv(mach_port_t
*name
)
813 return errno
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, name
);
817 launchd_mport_deallocate(mach_port_t name
)
819 return errno
= mach_port_deallocate(mach_task_self(), name
);
823 kevent_bulk_mod(struct kevent
*kev
, size_t kev_cnt
)
827 for (i
= 0; i
< kev_cnt
; i
++) {
828 kev
[i
].flags
|= EV_CLEAR
|EV_RECEIPT
;
831 return kevent(mainkq
, kev
, kev_cnt
, kev
, kev_cnt
, NULL
);
835 kevent_mod(uintptr_t ident
, short filter
, u_short flags
, u_int fflags
, intptr_t data
, void *udata
)
845 /* Workaround 5225889 */
846 if (flags
& EV_ADD
) {
847 kevent_mod(ident
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
857 if (flags
& EV_ADD
&& !launchd_assumes(udata
!= NULL
)) {
860 } else if( (flags
& EV_DELETE
) && bulk_kev
) {
862 for( i
= bulk_kev_i
+ 1; i
< bulk_kev_cnt
; i
++ ) {
863 if( bulk_kev
[i
].filter
== filter
&& bulk_kev
[i
].ident
== ident
) {
864 runtime_syslog(LOG_DEBUG
, "Pruning the following kevent:");
865 log_kevent_struct(LOG_DEBUG
, &bulk_kev
[0], i
);
866 bulk_kev
[i
].filter
= (short)0;
871 EV_SET(&kev
, ident
, filter
, flags
, fflags
, data
, udata
);
873 r
= kevent(mainkq
, &kev
, 1, &kev
, 1, NULL
);
875 if (!launchd_assumes(r
== 1)) {
879 if (launchd_assumes(kev
.flags
& EV_ERROR
)) {
880 if ((flags
& EV_ADD
) && kev
.data
) {
881 runtime_syslog(LOG_DEBUG
, "%s(): See the next line...", __func__
);
882 log_kevent_struct(LOG_DEBUG
, &kev
, 0);
892 launchd_internal_demux(mach_msg_header_t
*Request
, mach_msg_header_t
*Reply
)
894 if (launchd_internal_server_routine(Request
)) {
895 return launchd_internal_server(Request
, Reply
);
896 } else if (notify_server_routine(Request
)) {
897 return notify_server(Request
, Reply
);
899 return mach_exc_server(Request
, Reply
);
904 do_mach_notify_port_destroyed(mach_port_t notify
__attribute__((unused
)), mach_port_t rights
)
906 /* This message is sent to us when a receive right is returned to us. */
908 if (!launchd_assumes(job_ack_port_destruction(rights
))) {
909 launchd_assumes(launchd_mport_close_recv(rights
) == KERN_SUCCESS
);
916 do_mach_notify_port_deleted(mach_port_t notify
__attribute__((unused
)), mach_port_name_t name
__attribute__((unused
)))
918 /* If we deallocate/destroy/mod_ref away a port with a pending
919 * notification, the original notification message is replaced with
920 * this message. To quote a Mach kernel expert, "the kernel has a
921 * send-once right that has to be used somehow."
927 do_mach_notify_no_senders(mach_port_t notify
, mach_port_mscount_t mscount
__attribute__((unused
)))
929 job_t j
= job_mig_intran(notify
);
931 /* This message is sent to us when the last customer of one of our
935 if (!launchd_assumes(j
!= NULL
)) {
939 job_ack_no_senders(j
);
945 do_mach_notify_send_once(mach_port_t notify
__attribute__((unused
)))
948 * This message is sent for each send-once right that is deallocated
949 * without being used.
956 do_mach_notify_dead_name(mach_port_t notify
__attribute__((unused
)), mach_port_name_t name
)
958 /* This message is sent to us when one of our send rights no longer has
959 * a receiver somewhere else on the system.
962 if (name
== drain_reply_port
) {
963 launchd_assumes(launchd_mport_deallocate(name
) == KERN_SUCCESS
);
964 drain_reply_port
= MACH_PORT_NULL
;
967 if (launchd_assumes(root_jobmgr
!= NULL
)) {
968 root_jobmgr
= jobmgr_delete_anything_with_port(root_jobmgr
, name
);
971 /* A dead-name notification about a port appears to increment the
972 * rights on said port. Let's deallocate it so that we don't leak
975 launchd_assumes(launchd_mport_deallocate(name
) == KERN_SUCCESS
);
981 record_caller_creds(mach_msg_header_t
*mh
)
983 mach_msg_max_trailer_t
*tp
;
986 tp
= (mach_msg_max_trailer_t
*)((vm_offset_t
)mh
+ round_msg(mh
->msgh_size
));
988 trailer_size
= tp
->msgh_trailer_size
- (mach_msg_size_t
)(sizeof(mach_msg_trailer_type_t
) - sizeof(mach_msg_trailer_size_t
));
990 if (launchd_assumes(trailer_size
>= (mach_msg_size_t
)sizeof(audit_token_t
))) {
991 audit_token_to_au32(tp
->msgh_audit
, /* audit UID */ NULL
, &ldc
.euid
,
992 &ldc
.egid
, &ldc
.uid
, &ldc
.gid
, &ldc
.pid
,
993 /* au_asid_t */ NULL
, /* au_tid_t */ NULL
);
999 runtime_get_caller_creds(void)
1005 launchd_exc_runtime_once(mach_port_t port
, mach_msg_size_t rcv_msg_size
, mach_msg_size_t send_msg_size
, mig_reply_error_t
*bufRequest
, mig_reply_error_t
*bufReply
, mach_msg_timeout_t to
)
1007 mach_msg_return_t mr
= ~MACH_MSG_SUCCESS
;
1008 mach_msg_option_t rcv_options
= MACH_RCV_MSG
|
1010 MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT
) |
1011 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0
) ;
1014 mr
= mach_msg(&bufRequest
->Head
, rcv_options
, 0, rcv_msg_size
, port
, to
, MACH_PORT_NULL
);
1016 case MACH_RCV_TIMED_OUT
:
1017 runtime_syslog(LOG_DEBUG
, "Message queue is empty.");
1019 case MACH_RCV_TOO_LARGE
:
1020 runtime_syslog(LOG_INFO
, "Message is larger than %u bytes.", rcv_msg_size
);
1023 launchd_assumes(mr
== MACH_MSG_SUCCESS
);
1026 if( mr
== MACH_MSG_SUCCESS
) {
1027 if( !launchd_assumes(mach_exc_server(&bufRequest
->Head
, &bufReply
->Head
) == TRUE
) ) {
1028 runtime_syslog(LOG_WARNING
, "Exception server routine failed.");
1032 mach_msg_return_t smr
= ~MACH_MSG_SUCCESS
;
1033 mach_msg_option_t send_options
= MACH_SEND_MSG
|
1036 launchd_assumes(bufReply
->Head
.msgh_size
<= send_msg_size
);
1037 smr
= mach_msg(&bufReply
->Head
, send_options
, bufReply
->Head
.msgh_size
, 0, MACH_PORT_NULL
, to
+ 100, MACH_PORT_NULL
);
1039 case MACH_SEND_TIMED_OUT
:
1040 runtime_syslog(LOG_WARNING
, "Timed out while trying to send reply to exception message.");
1042 case MACH_SEND_INVALID_DEST
:
1043 runtime_syslog(LOG_WARNING
, "Tried sending a message to a port that we don't possess a send right to.");
1046 if( !launchd_assumes(smr
== MACH_MSG_SUCCESS
) ) {
1047 runtime_syslog(LOG_WARNING
, "Couldn't deliver exception reply: 0x%x", smr
);
1058 launchd_runtime2(mach_msg_size_t msg_size
, mig_reply_error_t
*bufRequest
, mig_reply_error_t
*bufReply
)
1060 mach_msg_options_t options
, tmp_options
;
1061 mig_reply_error_t
*bufTemp
;
1062 mig_callback the_demux
;
1063 mach_msg_timeout_t to
;
1064 mach_msg_return_t mr
;
1067 options
= MACH_RCV_MSG
|MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT
) |
1068 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0
);
1070 tmp_options
= options
;
1073 busy_cnt
= runtime_busy_cnt
+ runtime_standby_cnt
;
1074 to
= MACH_MSG_TIMEOUT_NONE
;
1076 if (unlikely(msg_size
!= max_msg_size
)) {
1077 /* The buffer isn't big enough to receive messages anymore... */
1078 tmp_options
&= ~MACH_RCV_MSG
;
1079 options
&= ~MACH_RCV_MSG
;
1080 if (!(tmp_options
& MACH_SEND_MSG
)) {
1085 if ((tmp_options
& MACH_RCV_MSG
) && (runtime_idle_callback
|| (busy_cnt
== 0))) {
1086 tmp_options
|= MACH_RCV_TIMEOUT
;
1088 if (!(tmp_options
& MACH_SEND_TIMEOUT
)) {
1089 #if !TARGET_OS_EMBEDDED
1090 to
= busy_cnt
? runtime_idle_timeout
: (_vproc_standby_timeout() * 1000);
1092 to
= runtime_idle_timeout
;
1099 mr
= mach_msg(&bufReply
->Head
, tmp_options
, bufReply
->Head
.msgh_size
,
1100 msg_size
, ipc_port_set
, to
, MACH_PORT_NULL
);
1102 time_of_mach_msg_return
= runtime_get_opaque_time();
1104 tmp_options
= options
;
1106 /* It looks like the compiler doesn't optimize switch(unlikely(...)) See: 5691066 */
1107 if (unlikely(mr
)) switch (mr
) {
1108 case MACH_SEND_INVALID_DEST
:
1109 case MACH_SEND_TIMED_OUT
:
1110 /* We need to clean up and start over. */
1111 if (bufReply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
1112 mach_msg_destroy(&bufReply
->Head
);
1115 case MACH_RCV_TIMED_OUT
:
1116 if (to
!= MACH_MSG_TIMEOUT_NONE
) {
1117 if (busy_cnt
== 0) {
1118 runtime_syslog(LOG_INFO
, "Idle exiting.");
1120 } else if (runtime_idle_callback
) {
1121 runtime_idle_callback();
1126 if( !launchd_assumes(mr
== MACH_MSG_SUCCESS
) ) {
1127 runtime_syslog(LOG_ERR
, "mach_msg(): %u: %s", mr
, mach_error_string(mr
));
1132 bufTemp
= bufRequest
;
1133 bufRequest
= bufReply
;
1136 if (unlikely(!(tmp_options
& MACH_RCV_MSG
))) {
1140 /* we have another request message */
1142 if (!launchd_assumes(mig_cb_table
!= NULL
)) {
1147 the_demux
= mig_cb_table
[MACH_PORT_INDEX(bufRequest
->Head
.msgh_local_port
)];
1150 if (!launchd_assumes(the_demux
!= NULL
)) {
1155 record_caller_creds(&bufRequest
->Head
);
1156 runtime_ktrace(RTKT_LAUNCHD_MACH_IPC
|DBG_FUNC_START
, bufRequest
->Head
.msgh_local_port
, bufRequest
->Head
.msgh_id
, (long)the_demux
);
1158 if (the_demux(&bufRequest
->Head
, &bufReply
->Head
) == FALSE
) {
1159 /* XXX - also gross */
1160 if (likely(bufRequest
->Head
.msgh_id
== MACH_NOTIFY_NO_SENDERS
)) {
1161 notify_server(&bufRequest
->Head
, &bufReply
->Head
);
1165 runtime_ktrace(RTKT_LAUNCHD_MACH_IPC
|DBG_FUNC_END
, bufReply
->Head
.msgh_remote_port
, bufReply
->Head
.msgh_bits
, bufReply
->RetCode
);
1167 /* bufReply is a union. If MACH_MSGH_BITS_COMPLEX is set, then bufReply->RetCode is assumed to be zero. */
1168 if (!(bufReply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
)) {
1169 if (unlikely(bufReply
->RetCode
!= KERN_SUCCESS
)) {
1170 if (likely(bufReply
->RetCode
== MIG_NO_REPLY
)) {
1171 bufReply
->Head
.msgh_remote_port
= MACH_PORT_NULL
;
1172 } else if (bufRequest
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
1173 /* destroy the request - but not the reply port */
1174 bufRequest
->Head
.msgh_remote_port
= MACH_PORT_NULL
;
1175 mach_msg_destroy(&bufRequest
->Head
);
1180 if (likely(bufReply
->Head
.msgh_remote_port
!= MACH_PORT_NULL
)) {
1181 tmp_options
|= MACH_SEND_MSG
;
1183 if (unlikely(MACH_MSGH_BITS_REMOTE(bufReply
->Head
.msgh_bits
) != MACH_MSG_TYPE_MOVE_SEND_ONCE
)) {
1184 tmp_options
|= MACH_SEND_TIMEOUT
;
1191 runtime_close(int fd
)
1195 if (bulk_kev
) for (i
= bulk_kev_i
+ 1; i
< bulk_kev_cnt
; i
++) {
1196 switch (bulk_kev
[i
].filter
) {
1200 if (unlikely((int)bulk_kev
[i
].ident
== fd
)) {
1201 runtime_syslog(LOG_DEBUG
, "Skipping kevent index: %d", i
);
1202 bulk_kev
[i
].filter
= 0;
1213 runtime_closelog(void)
1218 launchd_assumes(fflush(ourlogfile
) == 0);
1219 launchd_assumes(runtime_fsync(fileno(ourlogfile
)) != -1);
1224 runtime_fsync(int fd
)
1227 if (do_apple_internal_logging
) {
1228 return fcntl(fd
, F_FULLFSYNC
, NULL
);
1238 runtime_setlogmask(int maskpri
)
1240 internal_mask_pri
= maskpri
;
1242 return internal_mask_pri
;
1246 runtime_syslog(int pri
, const char *message
, ...)
1248 struct runtime_syslog_attr attr
= {
1251 pid1_magic
? "System" : "Background",
1259 va_start(ap
, message
);
1260 runtime_vsyslog(&attr
, message
, ap
);
1266 runtime_vsyslog(struct runtime_syslog_attr
*attr
, const char *message
, va_list args
)
1268 int saved_errno
= errno
;
1270 bool echo_to_console
= false;
1272 if (attr
->priority
== LOG_APPLEONLY
) {
1273 if (do_apple_internal_logging
) {
1274 attr
->priority
= LOG_NOTICE
;
1278 } else if( attr
->priority
== LOG_SCOLDING
) {
1279 attr
->priority
= g_log_strict_usage
? LOG_NOTICE
: LOG_DEBUG
;
1282 if( attr
->priority
& LOG_CONSOLE
) {
1283 echo_to_console
= true;
1284 attr
->priority
&= ~LOG_CONSOLE
;
1287 if (!(LOG_MASK(attr
->priority
) & internal_mask_pri
)) {
1291 vsnprintf(newmsg
, sizeof(newmsg
), message
, args
);
1293 if( g_console
&& (unlikely(low_level_debug
) || echo_to_console
) ) {
1294 fprintf(g_console
, "%s %u\t%s %u\t%s\n", attr
->from_name
, attr
->from_pid
, attr
->about_name
, attr
->about_pid
, newmsg
);
1297 logmsg_add(attr
, saved_errno
, newmsg
);
1301 logmsg_add(struct runtime_syslog_attr
*attr
, int err_num
, const char *msg
)
1303 size_t lm_sz
= sizeof(struct logmsg_s
) + strlen(msg
) + strlen(attr
->from_name
) + strlen(attr
->about_name
) + strlen(attr
->session_name
) + 4;
1305 struct logmsg_s
*lm
;
1307 #define ROUND_TO_64BIT_WORD_SIZE(x) ((x + 7) & ~7)
1309 /* we do this to make the unpacking for the log_drain cause unalignment faults */
1310 lm_sz
= ROUND_TO_64BIT_WORD_SIZE(lm_sz
);
1312 if (unlikely((lm
= calloc(1, lm_sz
)) == NULL
)) {
1316 data_off
= lm
->data
;
1318 lm
->when
= runtime_get_wall_time();
1319 lm
->from_pid
= attr
->from_pid
;
1320 lm
->about_pid
= attr
->about_pid
;
1321 lm
->err_num
= err_num
;
1322 lm
->pri
= attr
->priority
;
1325 data_off
+= sprintf(data_off
, "%s", msg
) + 1;
1326 lm
->from_name
= data_off
;
1327 data_off
+= sprintf(data_off
, "%s", attr
->from_name
) + 1;
1328 lm
->about_name
= data_off
;
1329 data_off
+= sprintf(data_off
, "%s", attr
->about_name
) + 1;
1330 lm
->session_name
= data_off
;
1331 data_off
+= sprintf(data_off
, "%s", attr
->session_name
) + 1;
1333 STAILQ_INSERT_TAIL(&logmsg_queue
, lm
, sqe
);
1334 logmsg_queue_sz
+= lm_sz
;
1341 logmsg_remove(struct logmsg_s
*lm
)
1343 STAILQ_REMOVE(&logmsg_queue
, lm
, logmsg_s
, sqe
);
1344 logmsg_queue_sz
-= lm
->obj_sz
;
1351 runtime_log_pack(vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
1353 struct logmsg_s
*lm
;
1356 *outvalCnt
= logmsg_queue_sz
;
1358 mig_allocate(outval
, *outvalCnt
);
1360 if (unlikely(*outval
== 0)) {
1364 offset
= (void *)*outval
;
1366 if( g_log_per_user_shutdown
&& !ourlogfile
&& !pid1_magic
&& shutdown_in_progress
) {
1367 char logfile
[NAME_MAX
];
1368 snprintf(logfile
, sizeof(logfile
), "/var/tmp/launchd-%s.shutdown.log", g_username
);
1370 char logfile1
[NAME_MAX
];
1371 snprintf(logfile1
, sizeof(logfile1
), "/var/tmp/launchd-%s.shutdown.log.1", g_username
);
1373 rename(logfile
, logfile1
);
1374 ourlogfile
= fopen(logfile
, "a");
1377 static int64_t shutdown_start
= 0;
1378 if( shutdown_start
== 0 ) {
1379 shutdown_start
= runtime_get_wall_time();
1382 while ((lm
= STAILQ_FIRST(&logmsg_queue
))) {
1383 int64_t log_delta
= lm
->when
- shutdown_start
;
1384 if( !pid1_magic
&& ourlogfile
) {
1385 fprintf(ourlogfile
, "%8lld%6u %-40s%6u %-40s %s\n", log_delta
,
1386 lm
->from_pid
, lm
->from_name
, lm
->about_pid
, lm
->about_name
, lm
->msg
);
1389 lm
->from_name_offset
= lm
->from_name
- (char *)lm
;
1390 lm
->about_name_offset
= lm
->about_name
- (char *)lm
;
1391 lm
->msg_offset
= lm
->msg
- (char *)lm
;
1392 lm
->session_name_offset
= lm
->session_name
- (char *)lm
;
1394 memcpy(offset
, lm
, lm
->obj_sz
);
1396 offset
+= lm
->obj_sz
;
1409 runtime_log_uncork_pending_drain(void)
1411 mach_msg_type_number_t outvalCnt
;
1412 mach_port_t tmp_port
;
1415 if (!drain_reply_port
) {
1419 if (logmsg_queue_cnt
== 0) {
1423 if (runtime_log_pack(&outval
, &outvalCnt
) != 0) {
1427 tmp_port
= drain_reply_port
;
1428 drain_reply_port
= MACH_PORT_NULL
;
1430 if (unlikely(errno
= job_mig_log_drain_reply(tmp_port
, 0, outval
, outvalCnt
))) {
1431 launchd_assumes(errno
== MACH_SEND_INVALID_DEST
);
1432 launchd_assumes(launchd_mport_deallocate(tmp_port
) == KERN_SUCCESS
);
1435 mig_deallocate(outval
, outvalCnt
);
1439 runtime_log_push(void)
1441 static pthread_mutex_t ourlock
= PTHREAD_MUTEX_INITIALIZER
;
1442 static int64_t shutdown_start
, log_delta
;
1443 mach_msg_type_number_t outvalCnt
;
1444 struct logmsg_s
*lm
;
1447 if (logmsg_queue_cnt
== 0) {
1448 launchd_assumes(STAILQ_EMPTY(&logmsg_queue
));
1450 } else if (!pid1_magic
) {
1451 if (runtime_log_pack(&outval
, &outvalCnt
) == 0) {
1452 launchd_assumes(_vprocmgr_log_forward(inherited_bootstrap_port
, (void *)outval
, outvalCnt
) == NULL
);
1453 mig_deallocate(outval
, outvalCnt
);
1458 if (likely(!shutdown_in_progress
&& !fake_shutdown_in_progress
)) {
1459 runtime_log_uncork_pending_drain();
1463 if (unlikely(shutdown_start
== 0)) {
1464 shutdown_start
= runtime_get_wall_time();
1465 launchd_log_vm_stats();
1468 pthread_mutex_lock(&ourlock
);
1470 if( unlikely(ourlogfile
== NULL
) && g_log_pid1_shutdown
) {
1471 rename("/var/log/launchd-shutdown.log", "/var/log/launchd-shutdown.log.1");
1472 ourlogfile
= fopen("/var/log/launchd-shutdown.log", "a");
1475 pthread_mutex_unlock(&ourlock
);
1477 if (unlikely(!ourlogfile
)) {
1481 while ((lm
= STAILQ_FIRST(&logmsg_queue
))) {
1482 log_delta
= lm
->when
- shutdown_start
;
1484 fprintf(ourlogfile
, "%8lld%6u %-40s%6u %-40s %s\n", log_delta
,
1485 lm
->from_pid
, lm
->from_name
, lm
->about_pid
, lm
->about_name
, lm
->msg
);
1494 runtime_log_forward(uid_t forward_uid
, gid_t forward_gid
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
1496 struct logmsg_s
*lm
, *lm_walk
;
1497 mach_msg_type_number_t data_left
= invalCnt
;
1503 for (lm_walk
= (struct logmsg_s
*)inval
; (data_left
> 0) && (lm_walk
->obj_sz
<= data_left
); lm_walk
= ((void *)lm_walk
+ lm_walk
->obj_sz
)) {
1504 /* malloc() does not return NULL if you ask it for an allocation of size 0.
1505 * It will return a valid pointer that can be passed to free(). If we don't
1506 * do this check, we'll wind up corrupting our heap in the subsequent
1509 * We break out if this check fails because, obj_sz is supposed to include
1510 * the size of the logmsg_s struct. If it claims to be of zero size, we
1511 * can't safely increment our counter because something obviously got screwed
1512 * up along the way, since this should always be at least sizeof(struct logmsg_s).
1514 if( !launchd_assumes(lm_walk
->obj_sz
> 0) ) {
1515 runtime_syslog(LOG_WARNING
, "Encountered a log message of size 0 with %u bytes left in forwarded data. Ignoring remaining messages.", data_left
);
1519 /* If malloc() keeps failing, we shouldn't put additional pressure on the system
1520 * by attempting to add more messages to the log until it returns success
1521 * log a failure, hope pressure lets off, and move on.
1523 if (!launchd_assumes(lm
= malloc(lm_walk
->obj_sz
))) {
1524 runtime_syslog(LOG_WARNING
, "Failed to allocate %llu bytes for log message with %u bytes left in forwarded data. Ignoring remaining messages.", lm_walk
->obj_sz
, data_left
);
1528 memcpy(lm
, lm_walk
, lm_walk
->obj_sz
);
1529 lm
->sender_uid
= forward_uid
;
1530 lm
->sender_gid
= forward_gid
;
1532 lm
->from_name
+= (size_t)lm
;
1533 lm
->about_name
+= (size_t)lm
;
1534 lm
->msg
+= (size_t)lm
;
1535 lm
->session_name
+= (size_t)lm
;
1537 STAILQ_INSERT_TAIL(&logmsg_queue
, lm
, sqe
);
1538 logmsg_queue_sz
+= lm
->obj_sz
;
1541 data_left
-= lm
->obj_sz
;
1544 mig_deallocate(inval
, invalCnt
);
1550 runtime_log_drain(mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
1552 launchd_assumes(drain_reply_port
== 0);
1554 if ((logmsg_queue_cnt
== 0) || shutdown_in_progress
|| fake_shutdown_in_progress
) {
1555 drain_reply_port
= srp
;
1556 launchd_assumes(launchd_mport_notify_req(drain_reply_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
);
1558 return MIG_NO_REPLY
;
1561 return runtime_log_pack(outval
, outvalCnt
);
1565 * We should break this into two reference counts.
1567 * One for hard references that would prevent exiting.
1568 * One for soft references that would only prevent idle exiting.
1570 * In the long run, reference counting should completely automate when a
1571 * process can and should exit.
1574 runtime_add_ref(void)
1577 #if !TARGET_OS_EMBEDDED
1578 _vproc_transaction_begin();
1583 runtime_remove_timer();
1587 runtime_del_ref(void)
1590 #if !TARGET_OS_EMBEDDED
1591 if( _vproc_transaction_count() == 0 ) {
1592 runtime_syslog(LOG_INFO
, "Exiting cleanly.");
1596 _vproc_transaction_end();
1601 runtime_install_timer();
1605 runtime_add_weak_ref(void)
1608 #if !TARGET_OS_EMBEDDED
1609 _vproc_standby_begin();
1612 runtime_standby_cnt
++;
1616 runtime_del_weak_ref(void)
1619 #if !TARGET_OS_EMBEDDED
1620 _vproc_standby_end();
1623 runtime_standby_cnt
--;
1627 runtime_install_timer(void)
1629 if( !pid1_magic
&& runtime_busy_cnt
== 0 ) {
1630 launchd_assumes(kevent_mod((uintptr_t)&g_runtime_busy_time
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 30, root_jobmgr
) != -1);
1635 runtime_remove_timer(void)
1637 if( !pid1_magic
&& runtime_busy_cnt
> 0 ) {
1638 launchd_assumes(kevent_mod((uintptr_t)&g_runtime_busy_time
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
1643 catch_mach_exception_raise(mach_port_t exception_port
__attribute__((unused
)), mach_port_t thread
, mach_port_t task
,
1644 exception_type_t exception
, mach_exception_data_t code
, mach_msg_type_number_t codeCnt
)
1648 launchd_assumes(pid_for_task(task
, &p4t
) == 0);
1650 runtime_syslog(LOG_NOTICE
, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x",
1651 __func__
, p4t
, thread
, exception
, code
, codeCnt
);
1653 launchd_assumes(launchd_mport_deallocate(thread
) == KERN_SUCCESS
);
1654 launchd_assumes(launchd_mport_deallocate(task
) == KERN_SUCCESS
);
1656 return KERN_SUCCESS
;
1660 catch_mach_exception_raise_state(mach_port_t exception_port
__attribute__((unused
)),
1661 exception_type_t exception
, const mach_exception_data_t code
, mach_msg_type_number_t codeCnt
,
1662 int *flavor
, const thread_state_t old_state
, mach_msg_type_number_t old_stateCnt
,
1663 thread_state_t new_state
, mach_msg_type_number_t
*new_stateCnt
)
1665 runtime_syslog(LOG_NOTICE
, "%s(): type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1666 __func__
, exception
, code
, codeCnt
, flavor
, old_state
, old_stateCnt
, new_state
, new_stateCnt
);
1668 memcpy(new_state
, old_state
, old_stateCnt
* sizeof(old_state
[0]));
1669 *new_stateCnt
= old_stateCnt
;
1671 return KERN_SUCCESS
;
1675 catch_mach_exception_raise_state_identity(mach_port_t exception_port
__attribute__((unused
)), mach_port_t thread
, mach_port_t task
,
1676 exception_type_t exception
, mach_exception_data_t code
, mach_msg_type_number_t codeCnt
,
1677 int *flavor
, thread_state_t old_state
, mach_msg_type_number_t old_stateCnt
,
1678 thread_state_t new_state
, mach_msg_type_number_t
*new_stateCnt
)
1682 launchd_assumes(pid_for_task(task
, &p4t
) == 0);
1684 runtime_syslog(LOG_NOTICE
, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1685 __func__
, p4t
, thread
, exception
, code
, codeCnt
, flavor
, old_state
, old_stateCnt
, new_state
, new_stateCnt
);
1687 memcpy(new_state
, old_state
, old_stateCnt
* sizeof(old_state
[0]));
1688 *new_stateCnt
= old_stateCnt
;
1690 launchd_assumes(launchd_mport_deallocate(thread
) == KERN_SUCCESS
);
1691 launchd_assumes(launchd_mport_deallocate(task
) == KERN_SUCCESS
);
1693 return KERN_SUCCESS
;
1697 launchd_log_vm_stats(void)
1699 static struct vm_statistics orig_stats
;
1700 static bool did_first_pass
;
1701 unsigned int count
= HOST_VM_INFO_COUNT
;
1702 struct vm_statistics stats
, *statsp
;
1703 mach_port_t mhs
= mach_host_self();
1705 statsp
= did_first_pass
? &stats
: &orig_stats
;
1707 if (!launchd_assumes(host_statistics(mhs
, HOST_VM_INFO
, (host_info_t
)statsp
, &count
) == KERN_SUCCESS
)) {
1711 launchd_assumes(count
== HOST_VM_INFO_COUNT
);
1713 if (did_first_pass
) {
1714 runtime_syslog(LOG_DEBUG
, "VM statistics (now - orig): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1715 stats
.free_count
- orig_stats
.free_count
,
1716 stats
.active_count
- orig_stats
.active_count
,
1717 stats
.inactive_count
- orig_stats
.inactive_count
,
1718 stats
.reactivations
- orig_stats
.reactivations
,
1719 stats
.pageins
- orig_stats
.pageins
,
1720 stats
.pageouts
- orig_stats
.pageouts
,
1721 stats
.faults
- orig_stats
.faults
,
1722 stats
.cow_faults
- orig_stats
.cow_faults
,
1723 stats
.purgeable_count
- orig_stats
.purgeable_count
,
1724 stats
.purges
- orig_stats
.purges
);
1726 runtime_syslog(LOG_DEBUG
, "VM statistics (now): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1727 orig_stats
.free_count
,
1728 orig_stats
.active_count
,
1729 orig_stats
.inactive_count
,
1730 orig_stats
.reactivations
,
1732 orig_stats
.pageouts
,
1734 orig_stats
.cow_faults
,
1735 orig_stats
.purgeable_count
,
1738 did_first_pass
= true;
1741 launchd_mport_deallocate(mhs
);
1745 runtime_get_wall_time(void)
1750 launchd_assumes(gettimeofday(&tv
, NULL
) != -1);
1760 runtime_get_opaque_time(void)
1762 return mach_absolute_time();
1766 runtime_get_opaque_time_of_event(void)
1768 return time_of_mach_msg_return
;
1772 runtime_get_nanoseconds_since(uint64_t o
)
1774 return runtime_opaque_time_to_nano(runtime_get_opaque_time_of_event() - o
);
1778 runtime_opaque_time_to_nano(uint64_t o
)
1780 #if defined(__i386__) || defined(__x86_64__)
1781 if (unlikely(tbi
.numer
!= tbi
.denom
)) {
1782 #elif defined(__ppc__) || defined(__ppc64__)
1783 if (likely(tbi
.numer
!= tbi
.denom
)) {
1785 if (tbi
.numer
!= tbi
.denom
) {
1788 __uint128_t tmp
= o
;
1793 if (o
<= tbi_safe_math_max
) {
1812 launchd_assert(mach_timebase_info(&tbi
) == 0);
1813 tbi_float_val
= tbi
.numer
;
1814 tbi_float_val
/= tbi
.denom
;
1815 tbi_safe_math_max
= UINT64_MAX
/ tbi
.numer
;
1817 if (getpid() == 1) {
1821 if (stat("/AppleInternal", &sb
) == 0 && stat("/var/db/disableAppleInternal", &sb
) == -1) {
1822 do_apple_internal_logging
= true;
1825 if (stat("/var/db/.debug_launchd", &sb
) == 0) {
1826 internal_mask_pri
= LOG_UPTO(LOG_DEBUG
);
1827 low_level_debug
= true;
1830 if( stat("/var/db/.launchd_disable_sudden_termination", &sb
) == 0 ) {
1831 g_force_old_kill_path
= true;
1834 if( stat("/var/db/.launchd_log_per_user_shutdown", &sb
) == 0 ) {
1835 g_log_per_user_shutdown
= true;
1838 if( !pid1_magic
&& stat("/var/db/.launchd_no_flat_per_user_namespace", &sb
) == 0 ) {
1839 g_flat_mach_namespace
= false;
1842 if( pid1_magic
&& stat("/var/db/.launchd_simulate_pid1_crash", &sb
) == 0 ) {
1843 g_simulate_pid1_crash
= true;
1846 if( pid1_magic
&& stat("/var/db/.launchd_use_gmalloc", &sb
) == 0 ) {
1847 g_use_gmalloc
= true;
1850 if( pid1_magic
&& stat("/var/db/.launchd_log_pid1_shutdown", &sb
) == 0 ) {
1851 g_log_pid1_shutdown
= true;
1855 size_t len
= sizeof(bootargs
) - 1;
1856 int r
= pid1_magic
? sysctlbyname("kern.bootargs", bootargs
, &len
, NULL
, 0) : -1;
1857 if( r
== 0 && strnstr(bootargs
, "-v", len
) != NULL
) {
1858 g_verbose_boot
= true;
1861 if( pid1_magic
&& g_verbose_boot
&& stat("/var/db/.launchd_shutdown_debugging", &sb
) == 0 ) {
1862 g_shutdown_debugging
= true;
1865 if( stat("/var/db/.launchd_log_strict_usage", &sb
) == 0 ) {
1866 g_log_strict_usage
= true;