]> git.saurik.com Git - apple/launchd.git/blob - src/runtime.c
launchd-842.92.1.tar.gz
[apple/launchd.git] / src / runtime.c
1 /*
2 * Copyright (c) 1999-2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "config.h"
22 #include "runtime.h"
23
24 #include <mach/mach.h>
25 #include <mach/mach_error.h>
26 #include <mach/boolean.h>
27 #include <mach/message.h>
28 #include <mach/notify.h>
29 #include <mach/mig_errors.h>
30 #include <mach/mach_traps.h>
31 #include <mach/mach_interface.h>
32 #include <mach/host_info.h>
33 #include <mach/mach_host.h>
34 #include <mach/mach_time.h>
35 #include <mach/exception.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/sysctl.h>
39 #include <sys/time.h>
40 #include <sys/proc.h>
41 #include <sys/proc_info.h>
42 #include <libproc.h>
43 #include <sys/event.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/mount.h>
47 #include <sys/reboot.h>
48 #include <sys/fcntl.h>
49 #include <sys/kdebug.h>
50 #include <bsm/libbsm.h>
51 #include <malloc/malloc.h>
52 #include <unistd.h>
53 #include <pthread.h>
54 #include <errno.h>
55 #include <string.h>
56 #include <ctype.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <stdbool.h>
60 #include <syslog.h>
61 #include <signal.h>
62 #include <dlfcn.h>
63 #include <os/assumes.h>
64
65 #include "internalServer.h"
66 #include "internal.h"
67 #include "notifyServer.h"
68 #include "mach_excServer.h"
69
70 /* We shouldn't be including these */
71 #include "launch.h"
72 #include "launchd.h"
73 #include "core.h"
74 #include "vproc.h"
75 #include "vproc_priv.h"
76 #include "vproc_internal.h"
77 #include "jobServer.h"
78 #include "job_reply.h"
79
80 #include <xpc/launchd.h>
81
82 static mach_port_t ipc_port_set;
83 static mach_port_t demand_port_set;
84 static mach_port_t launchd_internal_port;
85 static int mainkq;
86
87 #define BULK_KEV_MAX 100
88 static struct kevent *bulk_kev;
89 static int bulk_kev_i;
90 static int bulk_kev_cnt;
91
92 static pthread_t kqueue_demand_thread;
93
94 static void mportset_callback(void);
95 static kq_callback kqmportset_callback = (kq_callback)mportset_callback;
96 static void *kqueue_demand_loop(void *arg);
97
98 boolean_t launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply);
99 static void launchd_runtime2(mach_msg_size_t msg_size);
100 static mach_msg_size_t max_msg_size;
101 static mig_callback *mig_cb_table;
102 static size_t mig_cb_table_sz;
103 static timeout_callback runtime_idle_callback;
104 static mach_msg_timeout_t runtime_idle_timeout;
105 static struct ldcred ldc;
106 static audit_token_t ldc_token;
107 static size_t runtime_standby_cnt;
108
109 static void do_file_init(void) __attribute__((constructor));
110 static mach_timebase_info_data_t tbi;
111 static uint64_t tbi_safe_math_max;
112 static uint64_t time_of_mach_msg_return;
113 static double tbi_float_val;
114
115 static const int sigigns[] = { SIGHUP, SIGINT, SIGPIPE, SIGALRM, SIGTERM,
116 SIGURG, SIGTSTP, SIGTSTP, SIGCONT, SIGTTIN, SIGTTOU, SIGIO, SIGXCPU,
117 SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGINFO, SIGUSR1, SIGUSR2
118 };
119 static sigset_t sigign_set;
120 bool pid1_magic;
121 bool launchd_apple_internal;
122 bool launchd_flat_mach_namespace = true;
123 bool launchd_malloc_log_stacks = false;
124 bool launchd_use_gmalloc = false;
125 bool launchd_log_per_user_shutdown = false;
126 #if !TARGET_OS_EMBEDDED
127 bool launchd_log_shutdown = true;
128 #else
129 bool launchd_log_shutdown = false;
130 #endif
131 bool launchd_log_perf = false;
132 bool launchd_log_debug = false;
133 bool launchd_trap_sigkill_bugs = false;
134 bool launchd_no_jetsam_perm_check = false;
135 bool launchd_osinstaller = false;
136 bool launchd_allow_global_dyld_envvars = false;
137 #if TARGET_OS_EMBEDDED
138 bool launchd_appletv = false;
139 #endif
140 pid_t launchd_wsp = 0;
141 size_t runtime_busy_cnt;
142
143 #if TARGET_OS_EMBEDDED
144 #define LAUNCHD_CONFIG_PREFIX "/"
145 #else
146 #define LAUNCHD_CONFIG_PREFIX "/private/var/db/"
147 #endif
148
149 #define config_check(s, sb) (stat(LAUNCHD_CONFIG_PREFIX s, &sb) == 0)
150
151 mach_port_t
152 runtime_get_kernel_port(void)
153 {
154 return launchd_internal_port;
155 }
156
157 union vproc_mig_max_sz {
158 union __RequestUnion__job_mig_job_subsystem req;
159 union __ReplyUnion__job_mig_job_subsystem rep;
160 };
161
162 union internal_max_sz {
163 union __RequestUnion__x_internal_subsystem req;
164 union __ReplyUnion__internal_subsystem rep;
165 };
166
167 union xpc_domain_max_sz {
168 union __RequestUnion__xpc_domain_xpc_domain_subsystem req;
169 union __ReplyUnion__xpc_domain_xpc_domain_subsystem rep;
170 };
171
172 union mach_exc_max_sz {
173 union __RequestUnion__catch_mach_exc_subsystem req;
174 union __ReplyUnion__catch_mach_exc_subsystem rep;
175 };
176
177 union do_notify_max_sz {
178 union __RequestUnion__do_notify_subsystem req;
179 union __ReplyUnion__do_notify_subsystem rep;
180 };
181
182 void
183 launchd_runtime_init(void)
184 {
185 pid_t p = getpid();
186
187 (void)posix_assert_zero((mainkq = kqueue()));
188
189 os_assert_zero(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &demand_port_set));
190 os_assert_zero(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &ipc_port_set));
191 posix_assert_zero(kevent_mod(demand_port_set, EVFILT_MACHPORT, EV_ADD, 0, 0, &kqmportset_callback));
192
193 os_assert_zero(launchd_mport_create_recv(&launchd_internal_port));
194 os_assert_zero(launchd_mport_make_send(launchd_internal_port));
195
196 max_msg_size = sizeof(union vproc_mig_max_sz);
197 if (sizeof(union xpc_domain_max_sz) > max_msg_size) {
198 max_msg_size = sizeof(union xpc_domain_max_sz);
199 }
200
201 os_assert_zero(runtime_add_mport(launchd_internal_port, launchd_internal_demux));
202 os_assert_zero(pthread_create(&kqueue_demand_thread, NULL, kqueue_demand_loop, NULL));
203 os_assert_zero(pthread_detach(kqueue_demand_thread));
204
205 (void)posix_assumes_zero(sysctlbyname("vfs.generic.noremotehang", NULL, NULL, &p, sizeof(p)));
206 }
207
208 void
209 launchd_runtime_init2(void)
210 {
211 size_t i;
212
213 __OS_COMPILETIME_ASSERT__(SIG_ERR == (typeof(SIG_ERR))-1);
214 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
215 sigaddset(&sigign_set, sigigns[i]);
216 (void)posix_assumes_zero(signal(sigigns[i], SIG_IGN));
217 }
218 }
219
220 #define FLAGIF(f) if (flags & f) { flags_off += sprintf(flags_off, #f); flags &= ~f; }
221 const char *
222 reboot_flags_to_C_names(unsigned int flags)
223 {
224 #define MAX_RB_STR "RB_ASKNAME|RB_SINGLE|RB_NOSYNC|RB_HALT|RB_INITNAME|RB_DFLTROOT|RB_ALTBOOT|RB_UNIPROC|RB_SAFEBOOT|RB_UPSDELAY|0xdeadbeeffeedface"
225 static char flags_buf[sizeof(MAX_RB_STR)];
226 char *flags_off = NULL;
227
228 if (flags == 0) {
229 return "RB_AUTOBOOT";
230 }
231
232 while (flags) {
233 if (flags_off) {
234 *flags_off = '|';
235 flags_off++;
236 *flags_off = '\0';
237 } else {
238 flags_off = flags_buf;
239 }
240
241 FLAGIF(RB_ASKNAME)
242 else FLAGIF(RB_SINGLE)
243 else FLAGIF(RB_NOSYNC)
244 else FLAGIF(RB_HALT)
245 else FLAGIF(RB_INITNAME)
246 else FLAGIF(RB_DFLTROOT)
247 else FLAGIF(RB_ALTBOOT)
248 else FLAGIF(RB_UNIPROC)
249 else FLAGIF(RB_SAFEBOOT)
250 else FLAGIF(RB_UPSDELAY)
251 else {
252 flags_off += sprintf(flags_off, "0x%x", flags);
253 flags = 0;
254 }
255 }
256
257 return flags_buf;
258 }
259
260 const char *
261 signal_to_C_name(unsigned int sig)
262 {
263 static char unknown[25];
264
265 #define SIG2CASE(sg) case sg: return #sg
266
267 switch (sig) {
268 SIG2CASE(SIGHUP);
269 SIG2CASE(SIGINT);
270 SIG2CASE(SIGQUIT);
271 SIG2CASE(SIGILL);
272 SIG2CASE(SIGTRAP);
273 SIG2CASE(SIGABRT);
274 SIG2CASE(SIGFPE);
275 SIG2CASE(SIGKILL);
276 SIG2CASE(SIGBUS);
277 SIG2CASE(SIGSEGV);
278 SIG2CASE(SIGSYS);
279 SIG2CASE(SIGPIPE);
280 SIG2CASE(SIGALRM);
281 SIG2CASE(SIGTERM);
282 SIG2CASE(SIGURG);
283 SIG2CASE(SIGSTOP);
284 SIG2CASE(SIGTSTP);
285 SIG2CASE(SIGCONT);
286 SIG2CASE(SIGCHLD);
287 SIG2CASE(SIGTTIN);
288 SIG2CASE(SIGTTOU);
289 SIG2CASE(SIGIO);
290 SIG2CASE(SIGXCPU);
291 SIG2CASE(SIGXFSZ);
292 SIG2CASE(SIGVTALRM);
293 SIG2CASE(SIGPROF);
294 SIG2CASE(SIGWINCH);
295 SIG2CASE(SIGINFO);
296 SIG2CASE(SIGUSR1);
297 SIG2CASE(SIGUSR2);
298 default:
299 snprintf(unknown, sizeof(unknown), "%u", sig);
300 return unknown;
301 }
302 }
303
304 void
305 log_kevent_struct(int level, struct kevent *kev_base, int indx)
306 {
307 struct kevent *kev = &kev_base[indx];
308 const char *filter_str;
309 char ident_buf[100];
310 char filter_buf[100];
311 char fflags_buf[1000];
312 char flags_buf[1000] = "0x0";
313 char *flags_off = NULL;
314 char *fflags_off = NULL;
315 unsigned short flags = kev->flags;
316 unsigned int fflags = kev->fflags;
317
318 if (likely(!(LOG_MASK(level & ~LOG_CONSOLE) & LOG_DEBUG))) {
319 return;
320 }
321
322 if (flags) while (flags) {
323 if (flags_off) {
324 *flags_off = '|';
325 flags_off++;
326 *flags_off = '\0';
327 } else {
328 flags_off = flags_buf;
329 }
330
331 FLAGIF(EV_ADD)
332 else FLAGIF(EV_RECEIPT)
333 else FLAGIF(EV_DELETE)
334 else FLAGIF(EV_ENABLE)
335 else FLAGIF(EV_DISABLE)
336 else FLAGIF(EV_CLEAR)
337 else FLAGIF(EV_EOF)
338 else FLAGIF(EV_ONESHOT)
339 else FLAGIF(EV_ERROR)
340 else {
341 flags_off += sprintf(flags_off, "0x%hx", flags);
342 flags = 0;
343 }
344 }
345
346 snprintf(ident_buf, sizeof(ident_buf), "%ld", kev->ident);
347 snprintf(fflags_buf, sizeof(fflags_buf), "0x%x", fflags);
348
349 switch (kev->filter) {
350 case EVFILT_READ:
351 filter_str = "EVFILT_READ";
352 break;
353 case EVFILT_WRITE:
354 filter_str = "EVFILT_WRITE";
355 break;
356 case EVFILT_AIO:
357 filter_str = "EVFILT_AIO";
358 break;
359 case EVFILT_VNODE:
360 filter_str = "EVFILT_VNODE";
361 if (fflags) while (fflags) {
362 if (fflags_off) {
363 *fflags_off = '|';
364 fflags_off++;
365 *fflags_off = '\0';
366 } else {
367 fflags_off = fflags_buf;
368 }
369
370 #define FFLAGIF(ff) if (fflags & ff) { fflags_off += sprintf(fflags_off, #ff); fflags &= ~ff; }
371
372 FFLAGIF(NOTE_DELETE)
373 else FFLAGIF(NOTE_WRITE)
374 else FFLAGIF(NOTE_EXTEND)
375 else FFLAGIF(NOTE_ATTRIB)
376 else FFLAGIF(NOTE_LINK)
377 else FFLAGIF(NOTE_RENAME)
378 else FFLAGIF(NOTE_REVOKE)
379 else {
380 fflags_off += sprintf(fflags_off, "0x%x", fflags);
381 fflags = 0;
382 }
383 }
384 break;
385 case EVFILT_PROC:
386 filter_str = "EVFILT_PROC";
387 if (fflags) while (fflags) {
388 if (fflags_off) {
389 *fflags_off = '|';
390 fflags_off++;
391 *fflags_off = '\0';
392 } else {
393 fflags_off = fflags_buf;
394 }
395
396 FFLAGIF(NOTE_EXIT)
397 else FFLAGIF(NOTE_REAP)
398 else FFLAGIF(NOTE_FORK)
399 else FFLAGIF(NOTE_EXEC)
400 else FFLAGIF(NOTE_SIGNAL)
401 else FFLAGIF(NOTE_TRACK)
402 else FFLAGIF(NOTE_TRACKERR)
403 else FFLAGIF(NOTE_CHILD)
404 else {
405 fflags_off += sprintf(fflags_off, "0x%x", fflags);
406 fflags = 0;
407 }
408 }
409 break;
410 case EVFILT_SIGNAL:
411 filter_str = "EVFILT_SIGNAL";
412 strcpy(ident_buf, signal_to_C_name(kev->ident));
413 break;
414 case EVFILT_TIMER:
415 filter_str = "EVFILT_TIMER";
416 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
417 if (fflags) while (fflags) {
418 if (fflags_off) {
419 *fflags_off = '|';
420 fflags_off++;
421 *fflags_off = '\0';
422 } else {
423 fflags_off = fflags_buf;
424 }
425
426 FFLAGIF(NOTE_SECONDS)
427 else FFLAGIF(NOTE_USECONDS)
428 else FFLAGIF(NOTE_NSECONDS)
429 else FFLAGIF(NOTE_ABSOLUTE)
430 else {
431 fflags_off += sprintf(fflags_off, "0x%x", fflags);
432 fflags = 0;
433 }
434 }
435 break;
436 case EVFILT_MACHPORT:
437 filter_str = "EVFILT_MACHPORT";
438 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
439 break;
440 case EVFILT_FS:
441 filter_str = "EVFILT_FS";
442 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
443 if (fflags) while (fflags) {
444 if (fflags_off) {
445 *fflags_off = '|';
446 fflags_off++;
447 *fflags_off = '\0';
448 } else {
449 fflags_off = fflags_buf;
450 }
451
452 FFLAGIF(VQ_NOTRESP)
453 else FFLAGIF(VQ_NEEDAUTH)
454 else FFLAGIF(VQ_LOWDISK)
455 else FFLAGIF(VQ_MOUNT)
456 else FFLAGIF(VQ_UNMOUNT)
457 else FFLAGIF(VQ_DEAD)
458 else FFLAGIF(VQ_ASSIST)
459 else FFLAGIF(VQ_NOTRESPLOCK)
460 else FFLAGIF(VQ_UPDATE)
461 else {
462 fflags_off += sprintf(fflags_off, "0x%x", fflags);
463 fflags = 0;
464 }
465 }
466 break;
467 default:
468 snprintf(filter_buf, sizeof(filter_buf), "%hd", kev->filter);
469 filter_str = filter_buf;
470 break;
471 }
472
473 launchd_syslog(level, "KEVENT[%d]: udata = %p data = 0x%lx ident = %s filter = %s flags = %s fflags = %s",
474 indx, kev->udata, kev->data, ident_buf, filter_str, flags_buf, fflags_buf);
475 }
476
477 void
478 mportset_callback(void)
479 {
480 mach_port_name_array_t members;
481 mach_msg_type_number_t membersCnt;
482 mach_port_status_t status;
483 mach_msg_type_number_t statusCnt;
484 struct kevent kev;
485 unsigned int i;
486
487 if (os_assumes_zero(mach_port_get_set_status(mach_task_self(), demand_port_set, &members, &membersCnt)) != 0) {
488 return;
489 }
490
491 for (i = 0; i < membersCnt; i++) {
492 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
493 if (mach_port_get_attributes(mach_task_self(), members[i], MACH_PORT_RECEIVE_STATUS, (mach_port_info_t)&status,
494 &statusCnt) != KERN_SUCCESS) {
495 continue;
496 }
497 if (status.mps_msgcount) {
498 EV_SET(&kev, members[i], EVFILT_MACHPORT, 0, 0, 0, job_find_by_service_port(members[i]));
499 #if 0
500 if (kev.udata != NULL) {
501 #endif
502 log_kevent_struct(LOG_DEBUG, &kev, 0);
503 (*((kq_callback *)kev.udata))(kev.udata, &kev);
504 #if 0
505 } else {
506 log_kevent_struct(LOG_ERR, &kev, 0);
507 }
508 #endif
509 /* the callback may have tainted our ability to continue this for loop */
510 break;
511 }
512 }
513
514 (void)os_assumes_zero(vm_deallocate(mach_task_self(), (vm_address_t)members, (vm_size_t) membersCnt * sizeof(mach_port_name_t)));
515 }
516
517 void *
518 kqueue_demand_loop(void *arg __attribute__((unused)))
519 {
520 fd_set rfds;
521
522 /*
523 * Yes, at first glance, calling select() on a kqueue seems silly.
524 *
525 * This avoids a race condition between the main thread and this helper
526 * thread by ensuring that we drain kqueue events on the same thread
527 * that manipulates the kqueue.
528 */
529
530 for (;;) {
531 FD_ZERO(&rfds);
532 FD_SET(mainkq, &rfds);
533 int r = select(mainkq + 1, &rfds, NULL, NULL, NULL);
534 if (r == 1) {
535 (void)os_assumes_zero(handle_kqueue(launchd_internal_port, mainkq));
536 } else if (posix_assumes_zero(r) != -1) {
537 (void)os_assumes_zero(r);
538 }
539 }
540
541 return NULL;
542 }
543
544 kern_return_t
545 x_handle_kqueue(mach_port_t junk __attribute__((unused)), integer_t fd)
546 {
547 struct timespec ts = { 0, 0 };
548 struct kevent *kevi, kev[BULK_KEV_MAX];
549 int i;
550
551 bulk_kev = kev;
552
553 if ((bulk_kev_cnt = kevent(fd, NULL, 0, kev, BULK_KEV_MAX, &ts)) != -1) {
554 #if 0
555 for (i = 0; i < bulk_kev_cnt; i++) {
556 log_kevent_struct(LOG_DEBUG, &kev[0], i);
557 }
558 #endif
559 for (i = 0; i < bulk_kev_cnt; i++) {
560 bulk_kev_i = i;
561 kevi = &kev[i];
562
563 if (kevi->filter) {
564 launchd_syslog(LOG_DEBUG, "Dispatching kevent (ident/filter): %lu/%hd", kevi->ident, kevi->filter);
565 log_kevent_struct(LOG_DEBUG, kev, i);
566
567 struct job_check_s {
568 kq_callback kqc;
569 };
570
571 struct job_check_s *check = kevi->udata;
572 if (check && check->kqc) {
573 runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_START, kevi->ident, kevi->filter, kevi->fflags);
574 (*((kq_callback *)kevi->udata))(kevi->udata, kevi);
575 runtime_ktrace0(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_END);
576 } else {
577 launchd_syslog(LOG_ERR, "The following kevent had invalid context data. Please file a bug with the following information:");
578 log_kevent_struct(LOG_EMERG, &kev[0], i);
579 }
580 launchd_syslog(LOG_DEBUG, "Handled kevent.");
581 }
582 }
583 } else {
584 (void)os_assumes_zero(errno);
585 }
586
587 bulk_kev = NULL;
588
589 return 0;
590 }
591
592 void
593 launchd_runtime(void)
594 {
595 launchd_runtime2(max_msg_size);
596 dispatch_main();
597 }
598
599 kern_return_t
600 launchd_set_bport(mach_port_t name)
601 {
602 return errno = task_set_bootstrap_port(mach_task_self(), name);
603 }
604
605 kern_return_t
606 launchd_get_bport(mach_port_t *name)
607 {
608 return errno = task_get_bootstrap_port(mach_task_self(), name);
609 }
610
611 kern_return_t
612 launchd_mport_notify_req(mach_port_t name, mach_msg_id_t which)
613 {
614 mach_port_mscount_t msgc = (which == MACH_NOTIFY_PORT_DESTROYED) ? 0 : 1;
615 mach_port_t previous, where = (which == MACH_NOTIFY_NO_SENDERS) ? name : launchd_internal_port;
616
617 if (which == MACH_NOTIFY_NO_SENDERS) {
618 /* Always make sure the send count is zero, in case a receive right is
619 * reused
620 */
621 errno = mach_port_set_mscount(mach_task_self(), name, 0);
622 if (unlikely(errno != KERN_SUCCESS)) {
623 return errno;
624 }
625 }
626
627 errno = mach_port_request_notification(mach_task_self(), name, which, msgc, where, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
628
629 if (likely(errno == 0) && previous != MACH_PORT_NULL) {
630 (void)os_assumes_zero(launchd_mport_deallocate(previous));
631 }
632
633 return errno;
634 }
635
636 pid_t
637 runtime_fork(mach_port_t bsport)
638 {
639 sigset_t emptyset, oset;
640 pid_t r = -1;
641 int saved_errno;
642 size_t i;
643
644 sigemptyset(&emptyset);
645
646 (void)os_assumes_zero(launchd_mport_make_send(bsport));
647 (void)os_assumes_zero(launchd_set_bport(bsport));
648 (void)os_assumes_zero(launchd_mport_deallocate(bsport));
649
650 __OS_COMPILETIME_ASSERT__(SIG_ERR == (typeof(SIG_ERR))-1);
651 (void)posix_assumes_zero(sigprocmask(SIG_BLOCK, &sigign_set, &oset));
652 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
653 (void)posix_assumes_zero(signal(sigigns[i], SIG_DFL));
654 }
655
656 r = fork();
657 saved_errno = errno;
658
659 if (r != 0) {
660 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
661 (void)posix_assumes_zero(signal(sigigns[i], SIG_IGN));
662 }
663 (void)posix_assumes_zero(sigprocmask(SIG_SETMASK, &oset, NULL));
664 (void)os_assumes_zero(launchd_set_bport(MACH_PORT_NULL));
665 } else {
666 pid_t p = -getpid();
667 (void)posix_assumes_zero(sysctlbyname("vfs.generic.noremotehang", NULL, NULL, &p, sizeof(p)));
668 (void)posix_assumes_zero(sigprocmask(SIG_SETMASK, &emptyset, NULL));
669 }
670
671 errno = saved_errno;
672
673 return r;
674 }
675
676
677 void
678 runtime_set_timeout(timeout_callback to_cb, unsigned int sec)
679 {
680 if (sec == 0 || to_cb == NULL) {
681 runtime_idle_callback = NULL;
682 runtime_idle_timeout = 0;
683 }
684
685 runtime_idle_callback = to_cb;
686 runtime_idle_timeout = sec * 1000;
687 }
688
689 kern_return_t
690 runtime_add_mport(mach_port_t name, mig_callback demux)
691 {
692 size_t needed_table_sz = (MACH_PORT_INDEX(name) + 1) * sizeof(mig_callback);
693 mach_port_t target_set = demux ? ipc_port_set : demand_port_set;
694
695 if (unlikely(needed_table_sz > mig_cb_table_sz)) {
696 needed_table_sz *= 2; /* Let's try and avoid realloc'ing for a while */
697 mig_callback *new_table = malloc(needed_table_sz);
698
699 if (!new_table) {
700 return KERN_RESOURCE_SHORTAGE;
701 }
702
703 if (likely(mig_cb_table)) {
704 memcpy(new_table, mig_cb_table, mig_cb_table_sz);
705 free(mig_cb_table);
706 }
707
708 mig_cb_table_sz = needed_table_sz;
709 mig_cb_table = new_table;
710 }
711
712 mig_cb_table[MACH_PORT_INDEX(name)] = demux;
713
714 return errno = mach_port_move_member(mach_task_self(), name, target_set);
715 }
716
717 kern_return_t
718 runtime_remove_mport(mach_port_t name)
719 {
720 mig_cb_table[MACH_PORT_INDEX(name)] = NULL;
721
722 return errno = mach_port_move_member(mach_task_self(), name, MACH_PORT_NULL);
723 }
724
725 kern_return_t
726 launchd_mport_make_send(mach_port_t name)
727 {
728 return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_MAKE_SEND);
729 }
730
731 kern_return_t
732 launchd_mport_copy_send(mach_port_t name)
733 {
734 return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_COPY_SEND);
735 }
736
737 kern_return_t
738 launchd_mport_make_send_once(mach_port_t name, mach_port_t *so)
739 {
740 mach_msg_type_name_t right = 0;
741 return errno = mach_port_extract_right(mach_task_self(), name, MACH_MSG_TYPE_MAKE_SEND_ONCE, so, &right);
742 }
743
744 kern_return_t
745 launchd_mport_close_recv(mach_port_t name)
746 {
747 return errno = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE, -1);
748 }
749
750 kern_return_t
751 launchd_mport_create_recv(mach_port_t *name)
752 {
753 return errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, name);
754 }
755
756 kern_return_t
757 launchd_mport_deallocate(mach_port_t name)
758 {
759 return errno = mach_port_deallocate(mach_task_self(), name);
760 }
761
762 int
763 kevent_bulk_mod(struct kevent *kev, size_t kev_cnt)
764 {
765 size_t i;
766
767 for (i = 0; i < kev_cnt; i++) {
768 kev[i].flags |= EV_CLEAR|EV_RECEIPT;
769 }
770
771 return kevent(mainkq, kev, kev_cnt, kev, kev_cnt, NULL);
772 }
773
774 int
775 kevent_mod(uintptr_t ident, short filter, u_short flags, u_int fflags, intptr_t data, void *udata)
776 {
777 struct kevent kev;
778 int r;
779
780 switch (filter) {
781 case EVFILT_READ:
782 case EVFILT_WRITE:
783 break;
784 case EVFILT_TIMER:
785 /* Workaround 5225889 */
786 if (flags & EV_ADD) {
787 (void)kevent_mod(ident, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
788 }
789 /* fall through */
790 default:
791 flags |= EV_CLEAR;
792 break;
793 }
794
795 flags |= EV_RECEIPT;
796
797 if (flags & EV_ADD && !udata) {
798 errno = EINVAL;
799 return -1;
800 } else if ((flags & EV_DELETE) && bulk_kev) {
801 int i = 0;
802 for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
803 if (bulk_kev[i].filter == filter && bulk_kev[i].ident == ident) {
804 launchd_syslog(LOG_DEBUG, "Pruning the following kevent:");
805 log_kevent_struct(LOG_DEBUG, &bulk_kev[0], i);
806 bulk_kev[i].filter = (short)0;
807 }
808 }
809 }
810
811 EV_SET(&kev, ident, filter, flags, fflags, data, udata);
812
813 r = kevent(mainkq, &kev, 1, &kev, 1, NULL);
814
815 if (r != 1) {
816 return -1;
817 }
818
819 if (kev.flags & EV_ERROR) {
820 if ((flags & EV_ADD) && kev.data) {
821 launchd_syslog(LOG_DEBUG, "%s(): See the next line...", __func__);
822 log_kevent_struct(LOG_DEBUG, &kev, 0);
823 errno = kev.data;
824 return -1;
825 }
826 } else {
827 (void)os_assert_zero(kev.flags);
828 }
829
830 return r;
831 }
832
833 boolean_t
834 launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply)
835 {
836 if (internal_server_routine(Request)) {
837 return internal_server(Request, Reply);
838 } else if (notify_server_routine(Request)) {
839 return notify_server(Request, Reply);
840 } else {
841 return mach_exc_server(Request, Reply);
842 }
843 }
844
845 kern_return_t
846 do_mach_notify_port_destroyed(mach_port_t notify __attribute__((unused)), mach_port_t rights)
847 {
848 /* This message is sent to us when a receive right is returned to us. */
849 if (!job_ack_port_destruction(rights)) {
850 (void)os_assumes_zero(launchd_mport_close_recv(rights));
851 }
852
853 return KERN_SUCCESS;
854 }
855
856 kern_return_t
857 do_mach_notify_port_deleted(mach_port_t notify __attribute__((unused)), mach_port_name_t name __attribute__((unused)))
858 {
859 /* If we deallocate/destroy/mod_ref away a port with a pending
860 * notification, the original notification message is replaced with
861 * this message. To quote a Mach kernel expert, "the kernel has a
862 * send-once right that has to be used somehow."
863 */
864 return KERN_SUCCESS;
865 }
866
867 kern_return_t
868 do_mach_notify_no_senders(mach_port_t notify, mach_port_mscount_t mscount __attribute__((unused)))
869 {
870 job_t j = job_mig_intran(notify);
871
872 /* This message is sent to us when the last customer of one of our objects
873 * goes away.
874 */
875
876 if (!j) {
877 return KERN_FAILURE;
878 }
879
880 job_ack_no_senders(j);
881
882 return KERN_SUCCESS;
883 }
884
885 kern_return_t
886 do_mach_notify_send_once(mach_port_t notify __attribute__((unused)))
887 {
888 /*
889 * This message is sent for each send-once right that is deallocated without
890 * being used.
891 */
892
893 return KERN_SUCCESS;
894 }
895
896 kern_return_t
897 do_mach_notify_dead_name(mach_port_t notify __attribute__((unused)), mach_port_name_t name)
898 {
899 /* This message is sent to us when one of our send rights no longer has a
900 * receiver somewhere else on the system.
901 */
902 if (name == launchd_drain_reply_port) {
903 (void)os_assumes_zero(launchd_mport_deallocate(name));
904 launchd_drain_reply_port = MACH_PORT_NULL;
905 }
906
907 if (root_jobmgr) {
908 root_jobmgr = jobmgr_delete_anything_with_port(root_jobmgr, name);
909 }
910
911 /* A dead-name notification about a port appears to increment the rights on
912 * said port. Let's deallocate it so that we don't leak dead-name ports.
913 */
914 (void)os_assumes_zero(launchd_mport_deallocate(name));
915
916 return KERN_SUCCESS;
917 }
918
919 mach_msg_return_t
920 launchd_exc_runtime_once(mach_port_t port, mach_msg_size_t rcv_msg_size, mach_msg_size_t send_msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply, mach_msg_timeout_t to)
921 {
922 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
923 mach_msg_option_t rcv_options = MACH_RCV_MSG
924 | MACH_RCV_TIMEOUT
925 | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT)
926 | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0);
927
928 do {
929 mr = mach_msg(&bufRequest->Head, rcv_options, 0, rcv_msg_size, port, to, MACH_PORT_NULL);
930 switch (mr) {
931 case MACH_RCV_TIMED_OUT:
932 launchd_syslog(LOG_DEBUG, "Message queue is empty.");
933 break;
934 case MACH_RCV_TOO_LARGE:
935 launchd_syslog(LOG_INFO, "Message is larger than %u bytes.", rcv_msg_size);
936 break;
937 default:
938 (void)os_assumes_zero(mr);
939 }
940
941 if (mr == MACH_MSG_SUCCESS) {
942 if (!mach_exc_server(&bufRequest->Head, &bufReply->Head)) {
943 launchd_syslog(LOG_WARNING, "Exception server routine failed.");
944 break;
945 }
946
947 mach_msg_return_t smr = ~MACH_MSG_SUCCESS;
948 mach_msg_option_t send_options = MACH_SEND_MSG | MACH_SEND_TIMEOUT;
949
950 (void)os_assumes(bufReply->Head.msgh_size <= send_msg_size);
951 smr = mach_msg(&bufReply->Head, send_options, bufReply->Head.msgh_size, 0, MACH_PORT_NULL, to + 100, MACH_PORT_NULL);
952 switch (smr) {
953 case MACH_SEND_TIMED_OUT:
954 launchd_syslog(LOG_WARNING, "Timed out while trying to send reply to exception message.");
955 break;
956 case MACH_SEND_INVALID_DEST:
957 launchd_syslog(LOG_WARNING, "Tried sending a message to a port that we don't possess a send right to.");
958 break;
959 default:
960 if (smr) {
961 launchd_syslog(LOG_WARNING, "Couldn't deliver exception reply: 0x%x", smr);
962 }
963 break;
964 }
965 }
966 } while (0);
967
968 return mr;
969 }
970
971 void
972 runtime_record_caller_creds(audit_token_t *token)
973 {
974 (void)memcpy(&ldc_token, token, sizeof(*token));
975 audit_token_to_au32(*token, NULL, &ldc.euid,&ldc.egid, &ldc.uid, &ldc.gid,
976 &ldc.pid, &ldc.asid, NULL);
977 }
978
979 struct ldcred *
980 runtime_get_caller_creds(void)
981 {
982 return &ldc;
983 }
984
985 audit_token_t *
986 runtime_get_caller_token(void)
987 {
988 return &ldc_token;
989 }
990
991 static boolean_t
992 launchd_mig_demux(mach_msg_header_t *request, mach_msg_header_t *reply)
993 {
994 boolean_t result = false;
995
996 time_of_mach_msg_return = runtime_get_opaque_time();
997 launchd_syslog(LOG_DEBUG, "MIG callout: %u", request->msgh_id);
998 mig_callback the_demux = mig_cb_table[MACH_PORT_INDEX(request->msgh_local_port)];
999 mach_msg_audit_trailer_t *tp = (mach_msg_audit_trailer_t *)((vm_offset_t)request + round_msg(request->msgh_size));
1000 runtime_record_caller_creds(&tp->msgh_audit);
1001
1002 result = the_demux(request, reply);
1003 if (!result) {
1004 launchd_syslog(LOG_DEBUG, "Demux failed. Trying other subsystems...");
1005 if (request->msgh_id == MACH_NOTIFY_NO_SENDERS) {
1006 launchd_syslog(LOG_DEBUG, "MACH_NOTIFY_NO_SENDERS");
1007 result = notify_server(request, reply);
1008 } else if (the_demux == job_server) {
1009 launchd_syslog(LOG_DEBUG, "Trying domain subsystem...");
1010 result = xpc_domain_server(request, reply);
1011 } else {
1012 launchd_syslog(LOG_ERR, "Cannot handle MIG request with ID: 0x%x", request->msgh_id);
1013 }
1014 } else {
1015 launchd_syslog(LOG_DEBUG, "MIG demux succeeded.");
1016 }
1017
1018 return result;
1019 }
1020
1021 void
1022 launchd_runtime2(mach_msg_size_t msg_size)
1023 {
1024 for (;;) {
1025 launchd_log_push();
1026
1027 mach_port_t recvp = MACH_PORT_NULL;
1028 xpc_object_t request = NULL;
1029 int result = xpc_pipe_try_receive(ipc_port_set, &request, &recvp, launchd_mig_demux, msg_size, 0);
1030 if (result == 0 && request) {
1031 boolean_t handled = false;
1032 time_of_mach_msg_return = runtime_get_opaque_time();
1033 launchd_syslog(LOG_DEBUG, "XPC request.");
1034
1035 xpc_object_t reply = NULL;
1036 if (xpc_event_demux(recvp, request, &reply)) {
1037 handled = true;
1038 } else if (xpc_process_demux(recvp, request, &reply)) {
1039 handled = true;
1040 }
1041
1042 if (!handled) {
1043 launchd_syslog(LOG_DEBUG, "XPC routine could not be handled.");
1044 xpc_release(request);
1045 continue;
1046 }
1047
1048 launchd_syslog(LOG_DEBUG, "XPC routine was handled.");
1049 if (reply) {
1050 launchd_syslog(LOG_DEBUG, "Sending reply.");
1051 result = xpc_pipe_routine_reply(reply);
1052 if (result == 0) {
1053 launchd_syslog(LOG_DEBUG, "Reply sent successfully.");
1054 } else if (result != EPIPE) {
1055 launchd_syslog(LOG_ERR, "Failed to send reply message: 0x%x", result);
1056 }
1057
1058 xpc_release(reply);
1059 }
1060
1061 xpc_release(request);
1062 } else if (result == 0) {
1063 launchd_syslog(LOG_DEBUG, "MIG request.");
1064 } else if (result == EINVAL) {
1065 launchd_syslog(LOG_ERR, "Rejected invalid request message.");
1066 }
1067 }
1068 }
1069
1070 int
1071 runtime_close(int fd)
1072 {
1073 int i;
1074
1075 if (bulk_kev) for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
1076 switch (bulk_kev[i].filter) {
1077 case EVFILT_VNODE:
1078 case EVFILT_WRITE:
1079 case EVFILT_READ:
1080 if (unlikely((int)bulk_kev[i].ident == fd)) {
1081 launchd_syslog(LOG_DEBUG, "Skipping kevent index: %d", i);
1082 bulk_kev[i].filter = 0;
1083 }
1084 default:
1085 break;
1086 }
1087 }
1088
1089 return close(fd);
1090 }
1091
1092 int
1093 runtime_fsync(int fd)
1094 {
1095 #if 0
1096 if (launchd_apple_internal) {
1097 return fcntl(fd, F_FULLFSYNC, NULL);
1098 } else {
1099 return fsync(fd);
1100 }
1101 #else
1102 return fsync(fd);
1103 #endif
1104 }
1105
1106 /*
1107 * We should break this into two reference counts.
1108 *
1109 * One for hard references that would prevent exiting.
1110 * One for soft references that would only prevent idle exiting.
1111 *
1112 * In the long run, reference counting should completely automate when a
1113 * process can and should exit.
1114 */
1115 void
1116 runtime_add_ref(void)
1117 {
1118 if (!pid1_magic) {
1119 #if !TARGET_OS_EMBEDDED
1120 vproc_transaction_begin(NULL);
1121 #endif
1122 }
1123
1124 runtime_busy_cnt++;
1125 launchd_syslog(LOG_PERF, "Incremented busy count. Now: %lu", runtime_busy_cnt);
1126 runtime_remove_timer();
1127 }
1128
1129 void
1130 runtime_del_ref(void)
1131 {
1132 if (!pid1_magic) {
1133 #if !TARGET_OS_EMBEDDED
1134 if (_vproc_transaction_count() == 0) {
1135 launchd_syslog(LOG_PERF, "Exiting cleanly.");
1136 }
1137
1138 vproc_transaction_end(NULL, NULL);
1139 #endif
1140 }
1141
1142 runtime_busy_cnt--;
1143 launchd_syslog(LOG_PERF, "Decremented busy count. Now: %lu", runtime_busy_cnt);
1144 runtime_install_timer();
1145 }
1146
1147 void
1148 runtime_add_weak_ref(void)
1149 {
1150 if (!pid1_magic) {
1151 #if !TARGET_OS_EMBEDDED
1152 _vproc_standby_begin();
1153 #endif
1154 }
1155 runtime_standby_cnt++;
1156 }
1157
1158 void
1159 runtime_del_weak_ref(void)
1160 {
1161 if (!pid1_magic) {
1162 #if !TARGET_OS_EMBEDDED
1163 _vproc_standby_end();
1164 #endif
1165 }
1166 runtime_standby_cnt--;
1167 }
1168
1169 void
1170 runtime_install_timer(void)
1171 {
1172 if (!pid1_magic && runtime_busy_cnt == 0) {
1173 launchd_syslog(LOG_PERF, "Gone idle. Installing idle-exit timer.");
1174 (void)posix_assumes_zero(kevent_mod((uintptr_t)&launchd_runtime_busy_time, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 10, root_jobmgr));
1175 }
1176 }
1177
1178 void
1179 runtime_remove_timer(void)
1180 {
1181 if (!pid1_magic && runtime_busy_cnt > 0) {
1182 if (runtime_busy_cnt == 1) {
1183 launchd_syslog(LOG_PERF, "No longer idle. Removing idle-exit timer.");
1184 }
1185 (void)posix_assumes_zero(kevent_mod((uintptr_t)&launchd_runtime_busy_time, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
1186 }
1187 }
1188
1189 kern_return_t
1190 catch_mach_exception_raise(mach_port_t exception_port __attribute__((unused)), mach_port_t thread, mach_port_t task,
1191 exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt)
1192 {
1193 pid_t p4t = -1;
1194
1195 (void)os_assumes_zero(pid_for_task(task, &p4t));
1196
1197 launchd_syslog(LOG_NOTICE, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x",
1198 __func__, p4t, thread, exception, code, codeCnt);
1199
1200 (void)os_assumes_zero(launchd_mport_deallocate(thread));
1201 (void)os_assumes_zero(launchd_mport_deallocate(task));
1202
1203 return KERN_SUCCESS;
1204 }
1205
1206 kern_return_t
1207 catch_mach_exception_raise_state(mach_port_t exception_port __attribute__((unused)),
1208 exception_type_t exception, const mach_exception_data_t code, mach_msg_type_number_t codeCnt,
1209 int *flavor, const thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1210 thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
1211 {
1212 launchd_syslog(LOG_NOTICE, "%s(): type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1213 __func__, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1214
1215 memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1216 *new_stateCnt = old_stateCnt;
1217
1218 return KERN_SUCCESS;
1219 }
1220
1221 kern_return_t
1222 catch_mach_exception_raise_state_identity(mach_port_t exception_port __attribute__((unused)), mach_port_t thread, mach_port_t task,
1223 exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt,
1224 int *flavor, thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1225 thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
1226 {
1227 pid_t p4t = -1;
1228
1229 (void)os_assumes_zero(pid_for_task(task, &p4t));
1230
1231 launchd_syslog(LOG_NOTICE, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1232 __func__, p4t, thread, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1233
1234 memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1235 *new_stateCnt = old_stateCnt;
1236
1237 (void)os_assumes_zero(launchd_mport_deallocate(thread));
1238 (void)os_assumes_zero(launchd_mport_deallocate(task));
1239
1240 return KERN_SUCCESS;
1241 }
1242
1243 // FIXME: should this be thread safe? With dispatch_once?
1244 uint64_t
1245 runtime_get_uniqueid(void)
1246 {
1247 static bool once;
1248 static uint64_t uniqueid;
1249 if (unlikely(!once)) {
1250 once = true;
1251
1252 struct proc_uniqidentifierinfo info;
1253 int size;
1254 size = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &info, sizeof(info));
1255 if (size == PROC_PIDUNIQIDENTIFIERINFO_SIZE) {
1256 uniqueid = info.p_uniqueid;
1257 }
1258 }
1259 return uniqueid;
1260 }
1261
1262 void
1263 launchd_log_vm_stats(void)
1264 {
1265 static struct vm_statistics orig_stats;
1266 static bool did_first_pass;
1267 unsigned int count = HOST_VM_INFO_COUNT;
1268 struct vm_statistics stats, *statsp;
1269 mach_port_t mhs = mach_host_self();
1270
1271 statsp = did_first_pass ? &stats : &orig_stats;
1272
1273 if (os_assumes_zero(host_statistics(mhs, HOST_VM_INFO, (host_info_t)statsp, &count)) != KERN_SUCCESS) {
1274 return;
1275 }
1276
1277 if (count != HOST_VM_INFO_COUNT) {
1278 (void)os_assumes_zero(count);
1279 }
1280
1281 if (did_first_pass) {
1282 launchd_syslog(LOG_DEBUG, "VM statistics (now - orig): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1283 stats.free_count - orig_stats.free_count,
1284 stats.active_count - orig_stats.active_count,
1285 stats.inactive_count - orig_stats.inactive_count,
1286 stats.reactivations - orig_stats.reactivations,
1287 stats.pageins - orig_stats.pageins,
1288 stats.pageouts - orig_stats.pageouts,
1289 stats.faults - orig_stats.faults,
1290 stats.cow_faults - orig_stats.cow_faults,
1291 stats.purgeable_count - orig_stats.purgeable_count,
1292 stats.purges - orig_stats.purges);
1293 } else {
1294 launchd_syslog(LOG_DEBUG, "VM statistics (now): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1295 orig_stats.free_count,
1296 orig_stats.active_count,
1297 orig_stats.inactive_count,
1298 orig_stats.reactivations,
1299 orig_stats.pageins,
1300 orig_stats.pageouts,
1301 orig_stats.faults,
1302 orig_stats.cow_faults,
1303 orig_stats.purgeable_count,
1304 orig_stats.purges);
1305
1306 did_first_pass = true;
1307 }
1308
1309 launchd_mport_deallocate(mhs);
1310 }
1311
1312 int64_t
1313 runtime_get_wall_time(void)
1314 {
1315 struct timeval tv;
1316 int64_t r;
1317
1318 (void)posix_assumes_zero(gettimeofday(&tv, NULL));
1319
1320 r = tv.tv_sec;
1321 r *= USEC_PER_SEC;
1322 r += tv.tv_usec;
1323
1324 return r;
1325 }
1326
1327 uint64_t
1328 runtime_get_opaque_time(void)
1329 {
1330 return mach_absolute_time();
1331 }
1332
1333 uint64_t
1334 runtime_get_opaque_time_of_event(void)
1335 {
1336 return time_of_mach_msg_return;
1337 }
1338
1339 uint64_t
1340 runtime_get_nanoseconds_since(uint64_t o)
1341 {
1342 return runtime_opaque_time_to_nano(runtime_get_opaque_time_of_event() - o);
1343 }
1344
1345 uint64_t
1346 runtime_opaque_time_to_nano(uint64_t o)
1347 {
1348 #if defined(__i386__) || defined(__x86_64__)
1349 if (unlikely(tbi.numer != tbi.denom)) {
1350 #elif defined(__ppc__) || defined(__ppc64__)
1351 if (likely(tbi.numer != tbi.denom)) {
1352 #else
1353 if (tbi.numer != tbi.denom) {
1354 #endif
1355 #ifdef __LP64__
1356 __uint128_t tmp = o;
1357 tmp *= tbi.numer;
1358 tmp /= tbi.denom;
1359 o = tmp;
1360 #else
1361 if (o <= tbi_safe_math_max) {
1362 o *= tbi.numer;
1363 o /= tbi.denom;
1364 } else {
1365 double d = o;
1366 d *= tbi_float_val;
1367 o = d;
1368 }
1369 #endif
1370 }
1371
1372 return o;
1373 }
1374
1375 void
1376 do_file_init(void)
1377 {
1378 struct stat sb;
1379
1380 os_assert_zero(mach_timebase_info(&tbi));
1381 tbi_float_val = tbi.numer;
1382 tbi_float_val /= tbi.denom;
1383 tbi_safe_math_max = UINT64_MAX / tbi.numer;
1384
1385 launchd_system_start = runtime_get_wall_time();
1386
1387 if (getpid() == 1) {
1388 pid1_magic = true;
1389 }
1390
1391 if (stat("/AppleInternal", &sb) == 0 && stat("/var/db/disableAppleInternal", &sb) == -1) {
1392 launchd_apple_internal = true;
1393 }
1394
1395 if (config_check(".launchd_use_gmalloc", sb)) {
1396 launchd_use_gmalloc = true;
1397 }
1398
1399 if (config_check(".launchd_log_shutdown", sb)) {
1400 launchd_log_shutdown = true;
1401 }
1402
1403 if (config_check(".launchd_log_debug", sb)) {
1404 launchd_log_debug = true;
1405 }
1406
1407 if (config_check(".launchd_log_perf", sb)) {
1408 launchd_log_perf = true;
1409 }
1410
1411 if (config_check("/etc/rc.cdrom", sb)) {
1412 launchd_osinstaller = true;
1413 }
1414
1415 if (!pid1_magic && config_check(".launchd_allow_global_dyld_envvars", sb)) {
1416 launchd_allow_global_dyld_envvars = true;
1417 }
1418
1419 char buff[1024];
1420 size_t len = sizeof(buff) - 1;
1421 int r = pid1_magic ? sysctlbyname("kern.bootargs", buff, &len, NULL, 0) : -1;
1422 if (r == 0) {
1423 if (strnstr(buff, "-v", len)) {
1424 launchd_verbose_boot = true;
1425 }
1426 if (strnstr(buff, "launchd_trap_sigkill_bugs", len)) {
1427 launchd_trap_sigkill_bugs = true;
1428 }
1429 if (strnstr(buff, "launchd_no_jetsam_perm_check", len)) {
1430 launchd_no_jetsam_perm_check = true;
1431 }
1432 }
1433
1434 len = sizeof(buff) - 1;
1435 #if TARGET_OS_EMBEDDED
1436 r = sysctlbyname("hw.machine", buff, &len, NULL, 0);
1437 if (r == 0) {
1438 if (strnstr(buff, "AppleTV", len)) {
1439 launchd_appletv = true;
1440 }
1441 }
1442 #endif
1443
1444 #if !TARGET_OS_EMBEDDED
1445 if (pid1_magic && launchd_verbose_boot && config_check(".launchd_shutdown_debugging", sb)) {
1446 launchd_shutdown_debugging = true;
1447 }
1448 #else
1449 if (pid1_magic && config_check(".launchd_shutdown_debugging", sb)) {
1450 launchd_shutdown_debugging = true;
1451 }
1452 #endif
1453 }