]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_runtime.c
launchd-392.18.tar.gz
[apple/launchd.git] / launchd / src / launchd_runtime.c
1 /*
2 * Copyright (c) 1999-2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 static const char *const __rcs_file_version__ = "$Revision: 24912 $";
22
23 #include "config.h"
24 #include "launchd_runtime.h"
25
26 #include <mach/mach.h>
27 #include <mach/mach_error.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/mach_time.h>
37 #include <mach/exception.h>
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <sys/sysctl.h>
41 #include <sys/time.h>
42 #include <sys/proc.h>
43 #include <sys/event.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/mount.h>
47 #include <sys/reboot.h>
48 #include <sys/fcntl.h>
49 #include <sys/kdebug.h>
50 #include <bsm/libbsm.h>
51 #include <malloc/malloc.h>
52 #include <unistd.h>
53 #include <pthread.h>
54 #include <errno.h>
55 #include <string.h>
56 #include <ctype.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <stdbool.h>
60 #include <syslog.h>
61 #include <signal.h>
62 #include <dlfcn.h>
63
64 #include "launchd_internalServer.h"
65 #include "launchd_internal.h"
66 #include "notifyServer.h"
67 #include "mach_excServer.h"
68
69 /* We shouldn't be including these */
70 #include "launch.h"
71 #include "launchd.h"
72 #include "launchd_core_logic.h"
73 #include "vproc.h"
74 #include "vproc_priv.h"
75 #include "vproc_internal.h"
76 #include "protocol_vprocServer.h"
77 #include "protocol_job_reply.h"
78
79 #if !TARGET_OS_EMBEDDED
80 #include "domainServer.h"
81 #endif
82 #include "eventsServer.h"
83
84 static mach_port_t ipc_port_set;
85 static mach_port_t demand_port_set;
86 static mach_port_t launchd_internal_port;
87 static int mainkq;
88
89 #define BULK_KEV_MAX 100
90 static struct kevent *bulk_kev;
91 static int bulk_kev_i;
92 static int bulk_kev_cnt;
93
94 static pthread_t kqueue_demand_thread;
95
96 static void mportset_callback(void);
97 static kq_callback kqmportset_callback = (kq_callback)mportset_callback;
98 static void *kqueue_demand_loop(void *arg);
99
100 boolean_t launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply);
101 static void record_caller_creds(mach_msg_header_t *mh);
102 static void launchd_runtime2(mach_msg_size_t msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply);
103 static mach_msg_size_t max_msg_size;
104 static mig_callback *mig_cb_table;
105 static size_t mig_cb_table_sz;
106 static timeout_callback runtime_idle_callback;
107 static mach_msg_timeout_t runtime_idle_timeout;
108 static struct ldcred ldc;
109 static size_t runtime_standby_cnt;
110
111 static STAILQ_HEAD(, logmsg_s) logmsg_queue = STAILQ_HEAD_INITIALIZER(logmsg_queue);
112 static size_t logmsg_queue_sz;
113 static size_t logmsg_queue_cnt;
114 static mach_port_t drain_reply_port;
115 static void runtime_log_uncork_pending_drain(void);
116 static kern_return_t runtime_log_pack(vm_offset_t *outval, mach_msg_type_number_t *outvalCnt);
117
118 static bool logmsg_add(struct runtime_syslog_attr *attr, int err_num, const char *msg);
119 static void logmsg_remove(struct logmsg_s *lm);
120
121 static void do_file_init(void) __attribute__((constructor));
122 static mach_timebase_info_data_t tbi;
123 static uint64_t tbi_safe_math_max;
124 static uint64_t time_of_mach_msg_return;
125 static double tbi_float_val;
126
127 static const int sigigns[] = { SIGHUP, SIGINT, SIGPIPE, SIGALRM, SIGTERM,
128 SIGURG, SIGTSTP, SIGTSTP, SIGCONT, SIGTTIN, SIGTTOU, SIGIO, SIGXCPU,
129 SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGINFO, SIGUSR1, SIGUSR2
130 };
131 static sigset_t sigign_set;
132 static FILE *ourlogfile;
133 bool pid1_magic;
134 bool do_apple_internal_logging;
135 bool low_level_debug;
136 bool g_flat_mach_namespace = true;
137 bool g_simulate_pid1_crash = false;
138 bool g_malloc_log_stacks = false;
139 bool g_use_gmalloc = false;
140 bool g_log_per_user_shutdown = false;
141 #if !TARGET_OS_EMBEDDED
142 bool g_log_pid1_shutdown = true;
143 #else
144 bool g_log_pid1_shutdown = false;
145 #endif
146 bool g_log_strict_usage = false;
147 bool g_trap_sigkill_bugs = false;
148 pid_t g_wsp = 0;
149 size_t runtime_busy_cnt;
150
151 mach_port_t
152 runtime_get_kernel_port(void)
153 {
154 return launchd_internal_port;
155 }
156
157 // static const char *__crashreporter_info__ = "";
158
159 static int internal_mask_pri = LOG_UPTO(LOG_NOTICE);
160
161
162 void
163 launchd_runtime_init(void)
164 {
165 mach_msg_size_t mxmsgsz;
166 pid_t p = getpid();
167
168 launchd_assert((mainkq = kqueue()) != -1);
169
170 launchd_assert((errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &demand_port_set)) == KERN_SUCCESS);
171 launchd_assert((errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &ipc_port_set)) == KERN_SUCCESS);
172
173 launchd_assert(kevent_mod(demand_port_set, EVFILT_MACHPORT, EV_ADD, 0, 0, &kqmportset_callback) != -1);
174
175 launchd_assert(launchd_mport_create_recv(&launchd_internal_port) == KERN_SUCCESS);
176 launchd_assert(launchd_mport_make_send(launchd_internal_port) == KERN_SUCCESS);
177
178 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
179 mxmsgsz = sizeof(union __RequestUnion__x_launchd_internal_subsystem);
180 if (x_launchd_internal_subsystem.maxsize > mxmsgsz) {
181 mxmsgsz = x_launchd_internal_subsystem.maxsize;
182 }
183
184 launchd_assert(runtime_add_mport(launchd_internal_port, launchd_internal_demux, mxmsgsz) == KERN_SUCCESS);
185 launchd_assert(pthread_create(&kqueue_demand_thread, NULL, kqueue_demand_loop, NULL) == 0);
186 launchd_assert(pthread_detach(kqueue_demand_thread) == 0);
187
188 (void)launchd_assumes(sysctlbyname("vfs.generic.noremotehang", NULL, NULL, &p, sizeof(p)) != -1);
189 }
190
191 void
192 launchd_runtime_init2(void)
193 {
194 size_t i;
195
196 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
197 sigaddset(&sigign_set, sigigns[i]);
198 (void)launchd_assumes(signal(sigigns[i], SIG_IGN) != SIG_ERR);
199 }
200 }
201
202 #define FLAGIF(f) if (flags & f) { flags_off += sprintf(flags_off, #f); flags &= ~f; }
203 const char *
204 reboot_flags_to_C_names(unsigned int flags)
205 {
206 #define MAX_RB_STR "RB_ASKNAME|RB_SINGLE|RB_NOSYNC|RB_HALT|RB_INITNAME|RB_DFLTROOT|RB_ALTBOOT|RB_UNIPROC|RB_SAFEBOOT|RB_UPSDELAY|0xdeadbeeffeedface"
207 static char flags_buf[sizeof(MAX_RB_STR)];
208 char *flags_off = NULL;
209
210 if (flags == 0) {
211 return "RB_AUTOBOOT";
212 }
213
214 while (flags) {
215 if (flags_off) {
216 *flags_off = '|';
217 flags_off++;
218 *flags_off = '\0';
219 } else {
220 flags_off = flags_buf;
221 }
222
223 FLAGIF(RB_ASKNAME)
224 else FLAGIF(RB_SINGLE)
225 else FLAGIF(RB_NOSYNC)
226 else FLAGIF(RB_HALT)
227 else FLAGIF(RB_INITNAME)
228 else FLAGIF(RB_DFLTROOT)
229 else FLAGIF(RB_ALTBOOT)
230 else FLAGIF(RB_UNIPROC)
231 else FLAGIF(RB_SAFEBOOT)
232 else FLAGIF(RB_UPSDELAY)
233 else {
234 flags_off += sprintf(flags_off, "0x%x", flags);
235 flags = 0;
236 }
237 }
238
239 return flags_buf;
240 }
241
242 const char *
243 signal_to_C_name(unsigned int sig)
244 {
245 static char unknown[25];
246
247 #define SIG2CASE(sg) case sg: return #sg
248
249 switch (sig) {
250 SIG2CASE(SIGHUP);
251 SIG2CASE(SIGINT);
252 SIG2CASE(SIGQUIT);
253 SIG2CASE(SIGILL);
254 SIG2CASE(SIGTRAP);
255 SIG2CASE(SIGABRT);
256 SIG2CASE(SIGFPE);
257 SIG2CASE(SIGKILL);
258 SIG2CASE(SIGBUS);
259 SIG2CASE(SIGSEGV);
260 SIG2CASE(SIGSYS);
261 SIG2CASE(SIGPIPE);
262 SIG2CASE(SIGALRM);
263 SIG2CASE(SIGTERM);
264 SIG2CASE(SIGURG);
265 SIG2CASE(SIGSTOP);
266 SIG2CASE(SIGTSTP);
267 SIG2CASE(SIGCONT);
268 SIG2CASE(SIGCHLD);
269 SIG2CASE(SIGTTIN);
270 SIG2CASE(SIGTTOU);
271 SIG2CASE(SIGIO);
272 SIG2CASE(SIGXCPU);
273 SIG2CASE(SIGXFSZ);
274 SIG2CASE(SIGVTALRM);
275 SIG2CASE(SIGPROF);
276 SIG2CASE(SIGWINCH);
277 SIG2CASE(SIGINFO);
278 SIG2CASE(SIGUSR1);
279 SIG2CASE(SIGUSR2);
280 default:
281 snprintf(unknown, sizeof(unknown), "%u", sig);
282 return unknown;
283 }
284 }
285
286 void
287 log_kevent_struct(int level, struct kevent *kev_base, int indx)
288 {
289 struct kevent *kev = &kev_base[indx];
290 const char *filter_str;
291 char ident_buf[100];
292 char filter_buf[100];
293 char fflags_buf[1000];
294 char flags_buf[1000] = "0x0";
295 char *flags_off = NULL;
296 char *fflags_off = NULL;
297 unsigned short flags = kev->flags;
298 unsigned int fflags = kev->fflags;
299
300 if (likely(!(LOG_MASK(level) & internal_mask_pri))) {
301 return;
302 }
303
304 if (flags) while (flags) {
305 if (flags_off) {
306 *flags_off = '|';
307 flags_off++;
308 *flags_off = '\0';
309 } else {
310 flags_off = flags_buf;
311 }
312
313 FLAGIF(EV_ADD)
314 else FLAGIF(EV_RECEIPT)
315 else FLAGIF(EV_DELETE)
316 else FLAGIF(EV_ENABLE)
317 else FLAGIF(EV_DISABLE)
318 else FLAGIF(EV_CLEAR)
319 else FLAGIF(EV_EOF)
320 else FLAGIF(EV_ONESHOT)
321 else FLAGIF(EV_ERROR)
322 else {
323 flags_off += sprintf(flags_off, "0x%hx", flags);
324 flags = 0;
325 }
326 }
327
328 snprintf(ident_buf, sizeof(ident_buf), "%ld", kev->ident);
329 snprintf(fflags_buf, sizeof(fflags_buf), "0x%x", fflags);
330
331 switch (kev->filter) {
332 case EVFILT_READ:
333 filter_str = "EVFILT_READ";
334 break;
335 case EVFILT_WRITE:
336 filter_str = "EVFILT_WRITE";
337 break;
338 case EVFILT_AIO:
339 filter_str = "EVFILT_AIO";
340 break;
341 case EVFILT_VNODE:
342 filter_str = "EVFILT_VNODE";
343 if (fflags) while (fflags) {
344 if (fflags_off) {
345 *fflags_off = '|';
346 fflags_off++;
347 *fflags_off = '\0';
348 } else {
349 fflags_off = fflags_buf;
350 }
351
352 #define FFLAGIF(ff) if (fflags & ff) { fflags_off += sprintf(fflags_off, #ff); fflags &= ~ff; }
353
354 FFLAGIF(NOTE_DELETE)
355 else FFLAGIF(NOTE_WRITE)
356 else FFLAGIF(NOTE_EXTEND)
357 else FFLAGIF(NOTE_ATTRIB)
358 else FFLAGIF(NOTE_LINK)
359 else FFLAGIF(NOTE_RENAME)
360 else FFLAGIF(NOTE_REVOKE)
361 else {
362 fflags_off += sprintf(fflags_off, "0x%x", fflags);
363 fflags = 0;
364 }
365 }
366 break;
367 case EVFILT_PROC:
368 filter_str = "EVFILT_PROC";
369 if (fflags) while (fflags) {
370 if (fflags_off) {
371 *fflags_off = '|';
372 fflags_off++;
373 *fflags_off = '\0';
374 } else {
375 fflags_off = fflags_buf;
376 }
377
378 FFLAGIF(NOTE_EXIT)
379 else FFLAGIF(NOTE_REAP)
380 else FFLAGIF(NOTE_FORK)
381 else FFLAGIF(NOTE_EXEC)
382 else FFLAGIF(NOTE_SIGNAL)
383 else FFLAGIF(NOTE_TRACK)
384 else FFLAGIF(NOTE_TRACKERR)
385 else FFLAGIF(NOTE_CHILD)
386 else {
387 fflags_off += sprintf(fflags_off, "0x%x", fflags);
388 fflags = 0;
389 }
390 }
391 break;
392 case EVFILT_SIGNAL:
393 filter_str = "EVFILT_SIGNAL";
394 strcpy(ident_buf, signal_to_C_name(kev->ident));
395 break;
396 case EVFILT_TIMER:
397 filter_str = "EVFILT_TIMER";
398 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
399 if (fflags) while (fflags) {
400 if (fflags_off) {
401 *fflags_off = '|';
402 fflags_off++;
403 *fflags_off = '\0';
404 } else {
405 fflags_off = fflags_buf;
406 }
407
408 FFLAGIF(NOTE_SECONDS)
409 else FFLAGIF(NOTE_USECONDS)
410 else FFLAGIF(NOTE_NSECONDS)
411 else FFLAGIF(NOTE_ABSOLUTE)
412 else {
413 fflags_off += sprintf(fflags_off, "0x%x", fflags);
414 fflags = 0;
415 }
416 }
417 break;
418 case EVFILT_MACHPORT:
419 filter_str = "EVFILT_MACHPORT";
420 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
421 break;
422 case EVFILT_FS:
423 filter_str = "EVFILT_FS";
424 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
425 if (fflags) while (fflags) {
426 if (fflags_off) {
427 *fflags_off = '|';
428 fflags_off++;
429 *fflags_off = '\0';
430 } else {
431 fflags_off = fflags_buf;
432 }
433
434 FFLAGIF(VQ_NOTRESP)
435 else FFLAGIF(VQ_NEEDAUTH)
436 else FFLAGIF(VQ_LOWDISK)
437 else FFLAGIF(VQ_MOUNT)
438 else FFLAGIF(VQ_UNMOUNT)
439 else FFLAGIF(VQ_DEAD)
440 else FFLAGIF(VQ_ASSIST)
441 else FFLAGIF(VQ_NOTRESPLOCK)
442 else FFLAGIF(VQ_UPDATE)
443 else {
444 fflags_off += sprintf(fflags_off, "0x%x", fflags);
445 fflags = 0;
446 }
447 }
448 break;
449 default:
450 snprintf(filter_buf, sizeof(filter_buf), "%hd", kev->filter);
451 filter_str = filter_buf;
452 break;
453 }
454
455 runtime_syslog(level, "KEVENT[%d]: udata = %p data = 0x%lx ident = %s filter = %s flags = %s fflags = %s",
456 indx, kev->udata, kev->data, ident_buf, filter_str, flags_buf, fflags_buf);
457 }
458
459 void
460 mportset_callback(void)
461 {
462 mach_port_name_array_t members;
463 mach_msg_type_number_t membersCnt;
464 mach_port_status_t status;
465 mach_msg_type_number_t statusCnt;
466 struct kevent kev;
467 unsigned int i;
468
469 if (!launchd_assumes((errno = mach_port_get_set_status(mach_task_self(), demand_port_set, &members, &membersCnt)) == KERN_SUCCESS)) {
470 return;
471 }
472
473 for (i = 0; i < membersCnt; i++) {
474 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
475 if (mach_port_get_attributes(mach_task_self(), members[i], MACH_PORT_RECEIVE_STATUS, (mach_port_info_t)&status,
476 &statusCnt) != KERN_SUCCESS) {
477 continue;
478 }
479 if (status.mps_msgcount) {
480 EV_SET(&kev, members[i], EVFILT_MACHPORT, 0, 0, 0, job_find_by_service_port(members[i]));
481 #if 0
482 if (launchd_assumes(kev.udata != NULL)) {
483 #endif
484 log_kevent_struct(LOG_DEBUG, &kev, 0);
485 (*((kq_callback *)kev.udata))(kev.udata, &kev);
486 #if 0
487 } else {
488 log_kevent_struct(LOG_ERR, &kev, 0);
489 }
490 #endif
491 /* the callback may have tainted our ability to continue this for loop */
492 break;
493 }
494 }
495
496 (void)launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)members,
497 (vm_size_t) membersCnt * sizeof(mach_port_name_t)) == KERN_SUCCESS);
498 }
499
500 void *
501 kqueue_demand_loop(void *arg __attribute__((unused)))
502 {
503 fd_set rfds;
504
505 /*
506 * Yes, at first glance, calling select() on a kqueue seems silly.
507 *
508 * This avoids a race condition between the main thread and this helper
509 * thread by ensuring that we drain kqueue events on the same thread
510 * that manipulates the kqueue.
511 */
512
513 for (;;) {
514 FD_ZERO(&rfds);
515 FD_SET(mainkq, &rfds);
516 if (launchd_assumes(select(mainkq + 1, &rfds, NULL, NULL, NULL) == 1)) {
517 (void)launchd_assumes(handle_kqueue(launchd_internal_port, mainkq) == 0);
518 }
519 }
520
521 return NULL;
522 }
523
524 kern_return_t
525 x_handle_kqueue(mach_port_t junk __attribute__((unused)), integer_t fd)
526 {
527 struct timespec ts = { 0, 0 };
528 struct kevent *kevi, kev[BULK_KEV_MAX];
529 int i;
530
531 bulk_kev = kev;
532
533 if (launchd_assumes((bulk_kev_cnt = kevent(fd, NULL, 0, kev, BULK_KEV_MAX, &ts)) != -1)) {
534 #if 0
535 for (i = 0; i < bulk_kev_cnt; i++) {
536 log_kevent_struct(LOG_DEBUG, &kev[0], i);
537 }
538 #endif
539 for (i = 0; i < bulk_kev_cnt; i++) {
540 bulk_kev_i = i;
541 kevi = &kev[i];
542
543 if (kevi->filter) {
544 runtime_syslog(LOG_DEBUG, "Dispatching kevent...");
545 log_kevent_struct(LOG_DEBUG, kev, i);
546 #if 0
547 /* Check if kevi->udata was either malloc(3)ed or is a valid function pointer.
548 * If neither, it's probably an invalid pointer and we should log it.
549 */
550 Dl_info dli;
551 if (launchd_assumes(malloc_size(kevi->udata) || dladdr(kevi->udata, &dli))) {
552 runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_START, kevi->ident, kevi->filter, kevi->fflags);
553 (*((kq_callback *)kevi->udata))(kevi->udata, kevi);
554 runtime_ktrace0(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_END);
555 } else {
556 runtime_syslog(LOG_ERR, "The following kevent had invalid context data.");
557 log_kevent_struct(LOG_EMERG, &kev[0], i);
558 }
559 #else
560 runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_START, kevi->ident, kevi->filter, kevi->fflags);
561 (*((kq_callback *)kevi->udata))(kevi->udata, kevi);
562 runtime_ktrace0(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_END);
563 #endif
564 }
565 }
566 }
567
568 bulk_kev = NULL;
569
570 return 0;
571 }
572
573 void
574 launchd_runtime(void)
575 {
576 mig_reply_error_t *req = NULL, *resp = NULL;
577 mach_msg_size_t mz = max_msg_size;
578 int flags = VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE;
579
580 for (;;) {
581 if (likely(req)) {
582 (void)launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)req, mz) == KERN_SUCCESS);
583 req = NULL;
584 }
585 if (likely(resp)) {
586 (void)launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)resp, mz) == KERN_SUCCESS);
587 resp = NULL;
588 }
589
590 mz = max_msg_size;
591
592 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t *)&req, mz, flags) == KERN_SUCCESS)) {
593 continue;
594 }
595 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t *)&resp, mz, flags) == KERN_SUCCESS)) {
596 continue;
597 }
598
599 launchd_runtime2(mz, req, resp);
600
601 /* If we get here, max_msg_size probably changed... */
602 }
603 }
604
605 kern_return_t
606 launchd_set_bport(mach_port_t name)
607 {
608 return errno = task_set_bootstrap_port(mach_task_self(), name);
609 }
610
611 kern_return_t
612 launchd_get_bport(mach_port_t *name)
613 {
614 return errno = task_get_bootstrap_port(mach_task_self(), name);
615 }
616
617 kern_return_t
618 launchd_mport_notify_req(mach_port_t name, mach_msg_id_t which)
619 {
620 mach_port_mscount_t msgc = (which == MACH_NOTIFY_PORT_DESTROYED) ? 0 : 1;
621 mach_port_t previous, where = (which == MACH_NOTIFY_NO_SENDERS) ? name : launchd_internal_port;
622
623 if (which == MACH_NOTIFY_NO_SENDERS) {
624 /* Always make sure the send count is zero, in case a receive right is reused */
625 errno = mach_port_set_mscount(mach_task_self(), name, 0);
626 if (unlikely(errno != KERN_SUCCESS)) {
627 return errno;
628 }
629 }
630
631 errno = mach_port_request_notification(mach_task_self(), name, which, msgc, where,
632 MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
633
634 if (likely(errno == 0) && previous != MACH_PORT_NULL) {
635 (void)launchd_assumes(launchd_mport_deallocate(previous) == KERN_SUCCESS);
636 }
637
638 return errno;
639 }
640
641 pid_t
642 runtime_fork(mach_port_t bsport)
643 {
644 sigset_t emptyset, oset;
645 pid_t r = -1;
646 int saved_errno;
647 size_t i;
648
649 sigemptyset(&emptyset);
650
651 (void)launchd_assumes(launchd_mport_make_send(bsport) == KERN_SUCCESS);
652 (void)launchd_assumes(launchd_set_bport(bsport) == KERN_SUCCESS);
653 (void)launchd_assumes(launchd_mport_deallocate(bsport) == KERN_SUCCESS);
654
655 (void)launchd_assumes(sigprocmask(SIG_BLOCK, &sigign_set, &oset) != -1);
656 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
657 (void)launchd_assumes(signal(sigigns[i], SIG_DFL) != SIG_ERR);
658 }
659
660 r = fork();
661 saved_errno = errno;
662
663 if (r != 0) {
664 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
665 (void)launchd_assumes(signal(sigigns[i], SIG_IGN) != SIG_ERR);
666 }
667 (void)launchd_assumes(sigprocmask(SIG_SETMASK, &oset, NULL) != -1);
668 (void)launchd_assumes(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
669 } else {
670 pid_t p = -getpid();
671 (void)launchd_assumes(sysctlbyname("vfs.generic.noremotehang", NULL, NULL, &p, sizeof(p)) != -1);
672
673 (void)launchd_assumes(sigprocmask(SIG_SETMASK, &emptyset, NULL) != -1);
674 }
675
676 errno = saved_errno;
677
678 return r;
679 }
680
681
682 void
683 runtime_set_timeout(timeout_callback to_cb, unsigned int sec)
684 {
685 if (sec == 0 || to_cb == NULL) {
686 runtime_idle_callback = NULL;
687 runtime_idle_timeout = 0;
688 }
689
690 runtime_idle_callback = to_cb;
691 runtime_idle_timeout = sec * 1000;
692 }
693
694 kern_return_t
695 runtime_add_mport(mach_port_t name, mig_callback demux, mach_msg_size_t msg_size)
696 {
697 size_t needed_table_sz = (MACH_PORT_INDEX(name) + 1) * sizeof(mig_callback);
698 mach_port_t target_set = demux ? ipc_port_set : demand_port_set;
699
700 msg_size = round_page(msg_size + MAX_TRAILER_SIZE);
701
702 if (unlikely(needed_table_sz > mig_cb_table_sz)) {
703 needed_table_sz *= 2; /* Let's try and avoid realloc'ing for a while */
704 mig_callback *new_table = malloc(needed_table_sz);
705
706 if (!launchd_assumes(new_table != NULL)) {
707 return KERN_RESOURCE_SHORTAGE;
708 }
709
710 if (likely(mig_cb_table)) {
711 memcpy(new_table, mig_cb_table, mig_cb_table_sz);
712 free(mig_cb_table);
713 }
714
715 mig_cb_table_sz = needed_table_sz;
716 mig_cb_table = new_table;
717 }
718
719 mig_cb_table[MACH_PORT_INDEX(name)] = demux;
720
721 if (msg_size > max_msg_size) {
722 max_msg_size = msg_size;
723 }
724
725 return errno = mach_port_move_member(mach_task_self(), name, target_set);
726 }
727
728 kern_return_t
729 runtime_remove_mport(mach_port_t name)
730 {
731 mig_cb_table[MACH_PORT_INDEX(name)] = NULL;
732
733 return errno = mach_port_move_member(mach_task_self(), name, MACH_PORT_NULL);
734 }
735
736 kern_return_t
737 launchd_mport_make_send(mach_port_t name)
738 {
739 return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_MAKE_SEND);
740 }
741
742 kern_return_t
743 launchd_mport_copy_send(mach_port_t name)
744 {
745 return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_COPY_SEND);
746 }
747
748 kern_return_t
749 launchd_mport_make_send_once(mach_port_t name, mach_port_t *so)
750 {
751 mach_msg_type_name_t right = 0;
752 return errno = mach_port_extract_right(mach_task_self(), name, MACH_MSG_TYPE_MAKE_SEND_ONCE, so, &right);
753 }
754
755 kern_return_t
756 launchd_mport_close_recv(mach_port_t name)
757 {
758 return errno = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE, -1);
759 }
760
761 kern_return_t
762 launchd_mport_create_recv(mach_port_t *name)
763 {
764 return errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, name);
765 }
766
767 kern_return_t
768 launchd_mport_deallocate(mach_port_t name)
769 {
770 return errno = mach_port_deallocate(mach_task_self(), name);
771 }
772
773 int
774 kevent_bulk_mod(struct kevent *kev, size_t kev_cnt)
775 {
776 size_t i;
777
778 for (i = 0; i < kev_cnt; i++) {
779 kev[i].flags |= EV_CLEAR|EV_RECEIPT;
780 }
781
782 return kevent(mainkq, kev, kev_cnt, kev, kev_cnt, NULL);
783 }
784
785 int
786 kevent_mod(uintptr_t ident, short filter, u_short flags, u_int fflags, intptr_t data, void *udata)
787 {
788 struct kevent kev;
789 int r;
790
791 switch (filter) {
792 case EVFILT_READ:
793 case EVFILT_WRITE:
794 break;
795 case EVFILT_TIMER:
796 /* Workaround 5225889 */
797 if (flags & EV_ADD) {
798 kevent_mod(ident, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
799 }
800 /* fall through */
801 default:
802 flags |= EV_CLEAR;
803 break;
804 }
805
806 flags |= EV_RECEIPT;
807
808 if (flags & EV_ADD && !launchd_assumes(udata != NULL)) {
809 errno = EINVAL;
810 return -1;
811 } else if ((flags & EV_DELETE) && bulk_kev) {
812 int i = 0;
813 for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
814 if (bulk_kev[i].filter == filter && bulk_kev[i].ident == ident) {
815 runtime_syslog(LOG_DEBUG, "Pruning the following kevent:");
816 log_kevent_struct(LOG_DEBUG, &bulk_kev[0], i);
817 bulk_kev[i].filter = (short)0;
818 }
819 }
820 }
821
822 EV_SET(&kev, ident, filter, flags, fflags, data, udata);
823
824 r = kevent(mainkq, &kev, 1, &kev, 1, NULL);
825
826 if (!launchd_assumes(r == 1)) {
827 return -1;
828 }
829
830 if (launchd_assumes(kev.flags & EV_ERROR)) {
831 if ((flags & EV_ADD) && kev.data) {
832 runtime_syslog(LOG_DEBUG, "%s(): See the next line...", __func__);
833 log_kevent_struct(LOG_DEBUG, &kev, 0);
834 errno = kev.data;
835 return -1;
836 }
837 }
838
839 return r;
840 }
841
842 boolean_t
843 launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply)
844 {
845 if (launchd_internal_server_routine(Request)) {
846 return launchd_internal_server(Request, Reply);
847 } else if (notify_server_routine(Request)) {
848 return notify_server(Request, Reply);
849 } else {
850 return mach_exc_server(Request, Reply);
851 }
852 }
853
854 kern_return_t
855 do_mach_notify_port_destroyed(mach_port_t notify __attribute__((unused)), mach_port_t rights)
856 {
857 /* This message is sent to us when a receive right is returned to us. */
858
859 if (!launchd_assumes(job_ack_port_destruction(rights))) {
860 (void)launchd_assumes(launchd_mport_close_recv(rights) == KERN_SUCCESS);
861 }
862
863 return KERN_SUCCESS;
864 }
865
866 kern_return_t
867 do_mach_notify_port_deleted(mach_port_t notify __attribute__((unused)), mach_port_name_t name __attribute__((unused)))
868 {
869 /* If we deallocate/destroy/mod_ref away a port with a pending
870 * notification, the original notification message is replaced with
871 * this message. To quote a Mach kernel expert, "the kernel has a
872 * send-once right that has to be used somehow."
873 */
874 return KERN_SUCCESS;
875 }
876
877 kern_return_t
878 do_mach_notify_no_senders(mach_port_t notify, mach_port_mscount_t mscount __attribute__((unused)))
879 {
880 job_t j = job_mig_intran(notify);
881
882 /* This message is sent to us when the last customer of one of our
883 * objects goes away.
884 */
885
886 if (!launchd_assumes(j != NULL)) {
887 return KERN_FAILURE;
888 }
889
890 job_ack_no_senders(j);
891
892 return KERN_SUCCESS;
893 }
894
895 kern_return_t
896 do_mach_notify_send_once(mach_port_t notify __attribute__((unused)))
897 {
898 /*
899 * This message is sent for each send-once right that is deallocated
900 * without being used.
901 */
902
903 return KERN_SUCCESS;
904 }
905
906 kern_return_t
907 do_mach_notify_dead_name(mach_port_t notify __attribute__((unused)), mach_port_name_t name)
908 {
909 /* This message is sent to us when one of our send rights no longer has
910 * a receiver somewhere else on the system.
911 */
912
913 if (name == drain_reply_port) {
914 (void)launchd_assumes(launchd_mport_deallocate(name) == KERN_SUCCESS);
915 drain_reply_port = MACH_PORT_NULL;
916 }
917
918 if (launchd_assumes(root_jobmgr != NULL)) {
919 root_jobmgr = jobmgr_delete_anything_with_port(root_jobmgr, name);
920 }
921
922 /* A dead-name notification about a port appears to increment the
923 * rights on said port. Let's deallocate it so that we don't leak
924 * dead-name ports.
925 */
926 (void)launchd_assumes(launchd_mport_deallocate(name) == KERN_SUCCESS);
927
928 return KERN_SUCCESS;
929 }
930
931 void
932 record_caller_creds(mach_msg_header_t *mh)
933 {
934 mach_msg_max_trailer_t *tp;
935 size_t trailer_size;
936
937 tp = (mach_msg_max_trailer_t *)((vm_offset_t)mh + round_msg(mh->msgh_size));
938
939 trailer_size = tp->msgh_trailer_size - (mach_msg_size_t)(sizeof(mach_msg_trailer_type_t) - sizeof(mach_msg_trailer_size_t));
940
941 if (launchd_assumes(trailer_size >= (mach_msg_size_t)sizeof(audit_token_t))) {
942 audit_token_to_au32(tp->msgh_audit, /* audit UID */ NULL, &ldc.euid,
943 &ldc.egid, &ldc.uid, &ldc.gid, &ldc.pid,
944 &ldc.asid, /* au_tid_t */ NULL);
945 }
946
947 }
948
949 struct ldcred *
950 runtime_get_caller_creds(void)
951 {
952 return &ldc;
953 }
954
955 mach_msg_return_t
956 launchd_exc_runtime_once(mach_port_t port, mach_msg_size_t rcv_msg_size, mach_msg_size_t send_msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply, mach_msg_timeout_t to)
957 {
958 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
959 mach_msg_option_t rcv_options = MACH_RCV_MSG |
960 MACH_RCV_TIMEOUT |
961 MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT) |
962 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) ;
963
964 do {
965 mr = mach_msg(&bufRequest->Head, rcv_options, 0, rcv_msg_size, port, to, MACH_PORT_NULL);
966 switch (mr) {
967 case MACH_RCV_TIMED_OUT :
968 runtime_syslog(LOG_DEBUG, "Message queue is empty.");
969 break;
970 case MACH_RCV_TOO_LARGE :
971 runtime_syslog(LOG_INFO, "Message is larger than %u bytes.", rcv_msg_size);
972 break;
973 default :
974 (void)launchd_assumes(mr == MACH_MSG_SUCCESS);
975 }
976
977 if (mr == MACH_MSG_SUCCESS) {
978 if (!launchd_assumes(mach_exc_server(&bufRequest->Head, &bufReply->Head) == TRUE)) {
979 runtime_syslog(LOG_WARNING, "Exception server routine failed.");
980 break;
981 }
982
983 mach_msg_return_t smr = ~MACH_MSG_SUCCESS;
984 mach_msg_option_t send_options = MACH_SEND_MSG |
985 MACH_SEND_TIMEOUT ;
986
987 (void)launchd_assumes(bufReply->Head.msgh_size <= send_msg_size);
988 smr = mach_msg(&bufReply->Head, send_options, bufReply->Head.msgh_size, 0, MACH_PORT_NULL, to + 100, MACH_PORT_NULL);
989 switch (smr) {
990 case MACH_SEND_TIMED_OUT :
991 runtime_syslog(LOG_WARNING, "Timed out while trying to send reply to exception message.");
992 break;
993 case MACH_SEND_INVALID_DEST :
994 runtime_syslog(LOG_WARNING, "Tried sending a message to a port that we don't possess a send right to.");
995 break;
996 default :
997 if (!launchd_assumes(smr == MACH_MSG_SUCCESS)) {
998 runtime_syslog(LOG_WARNING, "Couldn't deliver exception reply: 0x%x", smr);
999 }
1000 break;
1001 }
1002 }
1003 } while (0);
1004
1005 return mr;
1006 }
1007
1008 void
1009 launchd_runtime2(mach_msg_size_t msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply)
1010 {
1011 mach_msg_options_t options, tmp_options;
1012 mig_reply_error_t *bufTemp;
1013 mig_callback the_demux;
1014 mach_msg_timeout_t to;
1015 mach_msg_return_t mr;
1016 size_t busy_cnt;
1017
1018 options = MACH_RCV_MSG|MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT) |
1019 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0);
1020
1021 tmp_options = options;
1022
1023 for (;;) {
1024 busy_cnt = runtime_busy_cnt + runtime_standby_cnt;
1025 to = MACH_MSG_TIMEOUT_NONE;
1026
1027 if (unlikely(msg_size != max_msg_size)) {
1028 /* The buffer isn't big enough to receive messages anymore... */
1029 tmp_options &= ~MACH_RCV_MSG;
1030 options &= ~MACH_RCV_MSG;
1031 if (!(tmp_options & MACH_SEND_MSG)) {
1032 return;
1033 }
1034 }
1035
1036 if ((tmp_options & MACH_RCV_MSG) && (runtime_idle_callback || (busy_cnt == 0))) {
1037 tmp_options |= MACH_RCV_TIMEOUT;
1038
1039 if (!(tmp_options & MACH_SEND_TIMEOUT)) {
1040 #if !TARGET_OS_EMBEDDED
1041 to = busy_cnt ? runtime_idle_timeout : (_vproc_standby_timeout() * 1000);
1042 #else
1043 to = runtime_idle_timeout;
1044 #endif
1045 }
1046 }
1047
1048 runtime_log_push();
1049
1050 mr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
1051 msg_size, ipc_port_set, to, MACH_PORT_NULL);
1052
1053 time_of_mach_msg_return = runtime_get_opaque_time();
1054
1055 tmp_options = options;
1056
1057 /* It looks like the compiler doesn't optimize switch(unlikely(...)) See: 5691066 */
1058 if (unlikely(mr)) switch (mr) {
1059 case MACH_SEND_INVALID_DEST:
1060 case MACH_SEND_TIMED_OUT:
1061 /* We need to clean up and start over. */
1062 if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
1063 mach_msg_destroy(&bufReply->Head);
1064 }
1065 continue;
1066 case MACH_RCV_TIMED_OUT:
1067 if (to != MACH_MSG_TIMEOUT_NONE) {
1068 if (busy_cnt == 0) {
1069 runtime_syslog(LOG_INFO, "Idle exiting.");
1070 launchd_shutdown();
1071 } else if (runtime_idle_callback) {
1072 runtime_idle_callback();
1073 }
1074 }
1075 continue;
1076 default:
1077 if (!launchd_assumes(mr == MACH_MSG_SUCCESS)) {
1078 runtime_syslog(LOG_ERR, "mach_msg(): %u: %s", mr, mach_error_string(mr));
1079 }
1080 continue;
1081 }
1082
1083 bufTemp = bufRequest;
1084 bufRequest = bufReply;
1085 bufReply = bufTemp;
1086
1087 if (unlikely(!(tmp_options & MACH_RCV_MSG))) {
1088 continue;
1089 }
1090
1091 /* we have another request message */
1092 #if 0
1093 if (!launchd_assumes(mig_cb_table != NULL)) {
1094 break;
1095 }
1096 #endif
1097
1098 the_demux = mig_cb_table[MACH_PORT_INDEX(bufRequest->Head.msgh_local_port)];
1099
1100 #if 0
1101 if (!launchd_assumes(the_demux != NULL)) {
1102 break;
1103 }
1104 #endif
1105
1106 record_caller_creds(&bufRequest->Head);
1107 runtime_ktrace(RTKT_LAUNCHD_MACH_IPC|DBG_FUNC_START, bufRequest->Head.msgh_local_port, bufRequest->Head.msgh_id, (long)the_demux);
1108
1109 if (the_demux(&bufRequest->Head, &bufReply->Head) == FALSE) {
1110 /* XXX - also gross */
1111 if (likely(bufRequest->Head.msgh_id == MACH_NOTIFY_NO_SENDERS)) {
1112 notify_server(&bufRequest->Head, &bufReply->Head);
1113 } else if (the_demux == protocol_vproc_server) {
1114
1115 #if !TARGET_OS_EMBEDDED
1116 /* Similarly gross. */
1117 if (xpc_domain_server(&bufRequest->Head, &bufReply->Head) == FALSE) {
1118 (void)xpc_events_server(&bufRequest->Head, &bufReply->Head);
1119 }
1120 #else
1121 (void)xpc_events_server(&bufRequest->Head, &bufReply->Head);
1122 #endif
1123 }
1124 }
1125
1126 runtime_ktrace(RTKT_LAUNCHD_MACH_IPC|DBG_FUNC_END, bufReply->Head.msgh_remote_port, bufReply->Head.msgh_bits, bufReply->RetCode);
1127
1128 /* bufReply is a union. If MACH_MSGH_BITS_COMPLEX is set, then bufReply->RetCode is assumed to be zero. */
1129 if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1130 if (unlikely(bufReply->RetCode != KERN_SUCCESS)) {
1131 if (likely(bufReply->RetCode == MIG_NO_REPLY)) {
1132 bufReply->Head.msgh_remote_port = MACH_PORT_NULL;
1133 } else if (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
1134 /* destroy the request - but not the reply port */
1135 bufRequest->Head.msgh_remote_port = MACH_PORT_NULL;
1136 mach_msg_destroy(&bufRequest->Head);
1137 }
1138 }
1139 }
1140
1141 if (likely(bufReply->Head.msgh_remote_port != MACH_PORT_NULL)) {
1142 tmp_options |= MACH_SEND_MSG;
1143
1144 if (unlikely(MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != MACH_MSG_TYPE_MOVE_SEND_ONCE)) {
1145 tmp_options |= MACH_SEND_TIMEOUT;
1146 }
1147 }
1148 }
1149 }
1150
1151 int
1152 runtime_close(int fd)
1153 {
1154 int i;
1155
1156 if (bulk_kev) for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
1157 switch (bulk_kev[i].filter) {
1158 case EVFILT_VNODE:
1159 case EVFILT_WRITE:
1160 case EVFILT_READ:
1161 if (unlikely((int)bulk_kev[i].ident == fd)) {
1162 runtime_syslog(LOG_DEBUG, "Skipping kevent index: %d", i);
1163 bulk_kev[i].filter = 0;
1164 }
1165 default:
1166 break;
1167 }
1168 }
1169
1170 return close(fd);
1171 }
1172
1173 void
1174 runtime_closelog(void)
1175 {
1176 runtime_log_push();
1177
1178 if (ourlogfile) {
1179 (void)launchd_assumes(fflush(ourlogfile) == 0);
1180 (void)launchd_assumes(runtime_fsync(fileno(ourlogfile)) != -1);
1181 }
1182 }
1183
1184 int
1185 runtime_fsync(int fd)
1186 {
1187 #if 0
1188 if (do_apple_internal_logging) {
1189 return fcntl(fd, F_FULLFSYNC, NULL);
1190 } else {
1191 return fsync(fd);
1192 }
1193 #else
1194 return fsync(fd);
1195 #endif
1196 }
1197
1198 int
1199 runtime_setlogmask(int maskpri)
1200 {
1201 internal_mask_pri = maskpri;
1202
1203 return internal_mask_pri;
1204 }
1205
1206 void
1207 runtime_syslog(int pri, const char *message, ...)
1208 {
1209 struct runtime_syslog_attr attr = {
1210 g_my_label,
1211 g_my_label,
1212 pid1_magic ? "System" : "Background",
1213 pri,
1214 getuid(),
1215 getpid(),
1216 getpid()
1217 };
1218 va_list ap;
1219
1220 va_start(ap, message);
1221 runtime_vsyslog(&attr, message, ap);
1222
1223 va_end(ap);
1224 }
1225
1226 void
1227 runtime_vsyslog(struct runtime_syslog_attr *attr, const char *message, va_list args)
1228 {
1229 int saved_errno = errno;
1230 char newmsg[10000];
1231 bool echo_to_console= false;
1232
1233 if (attr->priority == LOG_APPLEONLY) {
1234 if (do_apple_internal_logging) {
1235 attr->priority = LOG_NOTICE;
1236 } else {
1237 return;
1238 }
1239 } else if (attr->priority == LOG_SCOLDING) {
1240 attr->priority = g_log_strict_usage ? LOG_NOTICE : LOG_DEBUG;
1241 }
1242
1243 if (attr->priority & LOG_CONSOLE) {
1244 echo_to_console = true;
1245 attr->priority &= ~LOG_CONSOLE;
1246 }
1247
1248 if (!(LOG_MASK(attr->priority) & internal_mask_pri)) {
1249 return;
1250 }
1251
1252 vsnprintf(newmsg, sizeof(newmsg), message, args);
1253
1254 if (g_console && (unlikely(low_level_debug) || echo_to_console)) {
1255 fprintf(g_console, "%s %u\t%s %u\t%s\n", attr->from_name, attr->from_pid, attr->about_name, attr->about_pid, newmsg);
1256 }
1257
1258 logmsg_add(attr, saved_errno, newmsg);
1259 }
1260
1261 bool
1262 logmsg_add(struct runtime_syslog_attr *attr, int err_num, const char *msg)
1263 {
1264 size_t lm_sz = sizeof(struct logmsg_s) + strlen(msg) + strlen(attr->from_name) + strlen(attr->about_name) + strlen(attr->session_name) + 4;
1265 char *data_off;
1266 struct logmsg_s *lm;
1267
1268 #define ROUND_TO_64BIT_WORD_SIZE(x) ((x + 7) & ~7)
1269
1270 /* we do this to make the unpacking for the log_drain cause unalignment faults */
1271 lm_sz = ROUND_TO_64BIT_WORD_SIZE(lm_sz);
1272
1273 if (unlikely((lm = calloc(1, lm_sz)) == NULL)) {
1274 return false;
1275 }
1276
1277 data_off = lm->data;
1278
1279 lm->when = runtime_get_wall_time();
1280 lm->from_pid = attr->from_pid;
1281 lm->about_pid = attr->about_pid;
1282 lm->err_num = err_num;
1283 lm->pri = attr->priority;
1284 lm->obj_sz = lm_sz;
1285 lm->msg = data_off;
1286 data_off += sprintf(data_off, "%s", msg) + 1;
1287 lm->from_name = data_off;
1288 data_off += sprintf(data_off, "%s", attr->from_name) + 1;
1289 lm->about_name = data_off;
1290 data_off += sprintf(data_off, "%s", attr->about_name) + 1;
1291 lm->session_name = data_off;
1292 data_off += sprintf(data_off, "%s", attr->session_name) + 1;
1293
1294 STAILQ_INSERT_TAIL(&logmsg_queue, lm, sqe);
1295 logmsg_queue_sz += lm_sz;
1296 logmsg_queue_cnt++;
1297
1298 return true;
1299 }
1300
1301 void
1302 logmsg_remove(struct logmsg_s *lm)
1303 {
1304 STAILQ_REMOVE(&logmsg_queue, lm, logmsg_s, sqe);
1305 logmsg_queue_sz -= lm->obj_sz;
1306 logmsg_queue_cnt--;
1307
1308 free(lm);
1309 }
1310
1311 kern_return_t
1312 runtime_log_pack(vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
1313 {
1314 struct logmsg_s *lm;
1315 void *offset;
1316
1317 *outvalCnt = logmsg_queue_sz;
1318
1319 mig_allocate(outval, *outvalCnt);
1320
1321 if (unlikely(*outval == 0)) {
1322 return 1;
1323 }
1324
1325 offset = (void *)*outval;
1326
1327 if (g_log_per_user_shutdown && !ourlogfile && !pid1_magic && shutdown_in_progress) {
1328 char logfile[NAME_MAX];
1329 snprintf(logfile, sizeof(logfile), "/var/tmp/launchd-%s.shutdown.log", g_username);
1330
1331 char logfile1[NAME_MAX];
1332 snprintf(logfile1, sizeof(logfile1), "/var/tmp/launchd-%s.shutdown.log.1", g_username);
1333
1334 rename(logfile, logfile1);
1335 ourlogfile = fopen(logfile, "a");
1336 }
1337
1338 static int64_t shutdown_start = 0;
1339 if (shutdown_start == 0) {
1340 shutdown_start = runtime_get_wall_time();
1341 }
1342
1343 while ((lm = STAILQ_FIRST(&logmsg_queue))) {
1344 int64_t log_delta = lm->when - shutdown_start;
1345 if (!pid1_magic && ourlogfile) {
1346 fprintf(ourlogfile, "%8lld%6u %-40s%6u %-40s %s\n", log_delta,
1347 lm->from_pid, lm->from_name, lm->about_pid, lm->about_name, lm->msg);
1348 fflush(ourlogfile);
1349 }
1350
1351 lm->from_name_offset = lm->from_name - (char *)lm;
1352 lm->about_name_offset = lm->about_name - (char *)lm;
1353 lm->msg_offset = lm->msg - (char *)lm;
1354 lm->session_name_offset = lm->session_name - (char *)lm;
1355
1356 memcpy(offset, lm, lm->obj_sz);
1357
1358 offset += lm->obj_sz;
1359
1360 logmsg_remove(lm);
1361 }
1362
1363 if (ourlogfile) {
1364 fflush(ourlogfile);
1365 }
1366
1367 return 0;
1368 }
1369
1370 void
1371 runtime_log_uncork_pending_drain(void)
1372 {
1373 mach_msg_type_number_t outvalCnt;
1374 mach_port_t tmp_port;
1375 vm_offset_t outval;
1376
1377 if (!drain_reply_port) {
1378 return;
1379 }
1380
1381 if (logmsg_queue_cnt == 0) {
1382 return;
1383 }
1384
1385 if (runtime_log_pack(&outval, &outvalCnt) != 0) {
1386 return;
1387 }
1388
1389 tmp_port = drain_reply_port;
1390 drain_reply_port = MACH_PORT_NULL;
1391
1392 if (unlikely(errno = job_mig_log_drain_reply(tmp_port, 0, outval, outvalCnt))) {
1393 (void)launchd_assumes(errno == MACH_SEND_INVALID_DEST);
1394 (void)launchd_assumes(launchd_mport_deallocate(tmp_port) == KERN_SUCCESS);
1395 }
1396
1397 mig_deallocate(outval, outvalCnt);
1398 }
1399
1400 void
1401 runtime_log_push(void)
1402 {
1403 static pthread_mutex_t ourlock = PTHREAD_MUTEX_INITIALIZER;
1404 static int64_t shutdown_start, log_delta;
1405 mach_msg_type_number_t outvalCnt;
1406 struct logmsg_s *lm;
1407 vm_offset_t outval;
1408
1409 if (logmsg_queue_cnt == 0) {
1410 (void)launchd_assumes(STAILQ_EMPTY(&logmsg_queue));
1411 return;
1412 } else if (!pid1_magic) {
1413 if (runtime_log_pack(&outval, &outvalCnt) == 0) {
1414 (void)launchd_assumes(_vprocmgr_log_forward(inherited_bootstrap_port, (void *)outval, outvalCnt) == NULL);
1415 mig_deallocate(outval, outvalCnt);
1416 }
1417 return;
1418 }
1419
1420 if (likely(!shutdown_in_progress && !fake_shutdown_in_progress)) {
1421 runtime_log_uncork_pending_drain();
1422 return;
1423 }
1424
1425 if (unlikely(shutdown_start == 0)) {
1426 shutdown_start = runtime_get_wall_time();
1427 launchd_log_vm_stats();
1428 }
1429
1430 pthread_mutex_lock(&ourlock);
1431
1432 if (unlikely(ourlogfile == NULL) && g_log_pid1_shutdown) {
1433 rename("/var/log/launchd-shutdown.log", "/var/log/launchd-shutdown.log.1");
1434 ourlogfile = fopen("/var/log/launchd-shutdown.log", "a");
1435 }
1436
1437 pthread_mutex_unlock(&ourlock);
1438
1439 if (unlikely(!ourlogfile)) {
1440 return;
1441 }
1442
1443 while ((lm = STAILQ_FIRST(&logmsg_queue))) {
1444 log_delta = lm->when - shutdown_start;
1445
1446 fprintf(ourlogfile, "%8lld%6u %-40s%6u %-40s %s\n", log_delta,
1447 lm->from_pid, lm->from_name, lm->about_pid, lm->about_name, lm->msg);
1448
1449 logmsg_remove(lm);
1450 }
1451
1452 fflush(ourlogfile);
1453 }
1454
1455 kern_return_t
1456 runtime_log_forward(uid_t forward_uid, gid_t forward_gid, vm_offset_t inval, mach_msg_type_number_t invalCnt)
1457 {
1458 struct logmsg_s *lm, *lm_walk;
1459 mach_msg_type_number_t data_left = invalCnt;
1460
1461 if (inval == 0) {
1462 return 0;
1463 }
1464
1465 for (lm_walk = (struct logmsg_s *)inval; (data_left > 0) && (lm_walk->obj_sz <= data_left); lm_walk = ((void *)lm_walk + lm_walk->obj_sz)) {
1466 /* malloc() does not return NULL if you ask it for an allocation of size 0.
1467 * It will return a valid pointer that can be passed to free(). If we don't
1468 * do this check, we'll wind up corrupting our heap in the subsequent
1469 * assignments.
1470 *
1471 * We break out if this check fails because, obj_sz is supposed to include
1472 * the size of the logmsg_s struct. If it claims to be of zero size, we
1473 * can't safely increment our counter because something obviously got screwed
1474 * up along the way, since this should always be at least sizeof(struct logmsg_s).
1475 */
1476 if (!launchd_assumes(lm_walk->obj_sz > 0)) {
1477 runtime_syslog(LOG_WARNING, "Encountered a log message of size 0 with %u bytes left in forwarded data. Ignoring remaining messages.", data_left);
1478 break;
1479 }
1480
1481 /* If malloc() keeps failing, we shouldn't put additional pressure on the system
1482 * by attempting to add more messages to the log until it returns success
1483 * log a failure, hope pressure lets off, and move on.
1484 */
1485 if (!launchd_assumes(lm = malloc(lm_walk->obj_sz))) {
1486 runtime_syslog(LOG_WARNING, "Failed to allocate %llu bytes for log message with %u bytes left in forwarded data. Ignoring remaining messages.", lm_walk->obj_sz, data_left);
1487 break;
1488 }
1489
1490 memcpy(lm, lm_walk, lm_walk->obj_sz);
1491 lm->sender_uid = forward_uid;
1492 lm->sender_gid = forward_gid;
1493
1494 lm->from_name += (size_t)lm;
1495 lm->about_name += (size_t)lm;
1496 lm->msg += (size_t)lm;
1497 lm->session_name += (size_t)lm;
1498
1499 STAILQ_INSERT_TAIL(&logmsg_queue, lm, sqe);
1500 logmsg_queue_sz += lm->obj_sz;
1501 logmsg_queue_cnt++;
1502
1503 data_left -= lm->obj_sz;
1504 }
1505
1506 mig_deallocate(inval, invalCnt);
1507
1508 return 0;
1509 }
1510
1511 kern_return_t
1512 runtime_log_drain(mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
1513 {
1514 (void)launchd_assumes(drain_reply_port == 0);
1515
1516 if ((logmsg_queue_cnt == 0) || shutdown_in_progress || fake_shutdown_in_progress) {
1517 drain_reply_port = srp;
1518 (void)launchd_assumes(launchd_mport_notify_req(drain_reply_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
1519
1520 return MIG_NO_REPLY;
1521 }
1522
1523 return runtime_log_pack(outval, outvalCnt);
1524 }
1525
1526 /*
1527 * We should break this into two reference counts.
1528 *
1529 * One for hard references that would prevent exiting.
1530 * One for soft references that would only prevent idle exiting.
1531 *
1532 * In the long run, reference counting should completely automate when a
1533 * process can and should exit.
1534 */
1535 void
1536 runtime_add_ref(void)
1537 {
1538 if (!pid1_magic) {
1539 #if !TARGET_OS_EMBEDDED
1540 _vproc_transaction_begin();
1541 #endif
1542 }
1543
1544 runtime_busy_cnt++;
1545 runtime_remove_timer();
1546 }
1547
1548 void
1549 runtime_del_ref(void)
1550 {
1551 if (!pid1_magic) {
1552 #if !TARGET_OS_EMBEDDED
1553 if (_vproc_transaction_count() == 0) {
1554 runtime_syslog(LOG_INFO, "Exiting cleanly.");
1555 }
1556
1557 runtime_closelog();
1558 _vproc_transaction_end();
1559 #endif
1560 }
1561
1562 runtime_busy_cnt--;
1563 runtime_install_timer();
1564 }
1565
1566 void
1567 runtime_add_weak_ref(void)
1568 {
1569 if (!pid1_magic) {
1570 #if !TARGET_OS_EMBEDDED
1571 _vproc_standby_begin();
1572 #endif
1573 }
1574 runtime_standby_cnt++;
1575 }
1576
1577 void
1578 runtime_del_weak_ref(void)
1579 {
1580 if (!pid1_magic) {
1581 #if !TARGET_OS_EMBEDDED
1582 _vproc_standby_end();
1583 #endif
1584 }
1585 runtime_standby_cnt--;
1586 }
1587
1588 void
1589 runtime_install_timer(void)
1590 {
1591 if (!pid1_magic && runtime_busy_cnt == 0) {
1592 (void)launchd_assumes(kevent_mod((uintptr_t)&g_runtime_busy_time, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 30, root_jobmgr) != -1);
1593 }
1594 }
1595
1596 void
1597 runtime_remove_timer(void)
1598 {
1599 if (!pid1_magic && runtime_busy_cnt > 0) {
1600 (void)launchd_assumes(kevent_mod((uintptr_t)&g_runtime_busy_time, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
1601 }
1602 }
1603
1604 kern_return_t
1605 catch_mach_exception_raise(mach_port_t exception_port __attribute__((unused)), mach_port_t thread, mach_port_t task,
1606 exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt)
1607 {
1608 pid_t p4t = -1;
1609
1610 (void)launchd_assumes(pid_for_task(task, &p4t) == 0);
1611
1612 runtime_syslog(LOG_NOTICE, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x",
1613 __func__, p4t, thread, exception, code, codeCnt);
1614
1615 (void)launchd_assumes(launchd_mport_deallocate(thread) == KERN_SUCCESS);
1616 (void)launchd_assumes(launchd_mport_deallocate(task) == KERN_SUCCESS);
1617
1618 return KERN_SUCCESS;
1619 }
1620
1621 kern_return_t
1622 catch_mach_exception_raise_state(mach_port_t exception_port __attribute__((unused)),
1623 exception_type_t exception, const mach_exception_data_t code, mach_msg_type_number_t codeCnt,
1624 int *flavor, const thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1625 thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
1626 {
1627 runtime_syslog(LOG_NOTICE, "%s(): type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1628 __func__, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1629
1630 memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1631 *new_stateCnt = old_stateCnt;
1632
1633 return KERN_SUCCESS;
1634 }
1635
1636 kern_return_t
1637 catch_mach_exception_raise_state_identity(mach_port_t exception_port __attribute__((unused)), mach_port_t thread, mach_port_t task,
1638 exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt,
1639 int *flavor, thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1640 thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
1641 {
1642 pid_t p4t = -1;
1643
1644 (void)launchd_assumes(pid_for_task(task, &p4t) == 0);
1645
1646 runtime_syslog(LOG_NOTICE, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1647 __func__, p4t, thread, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1648
1649 memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1650 *new_stateCnt = old_stateCnt;
1651
1652 (void)launchd_assumes(launchd_mport_deallocate(thread) == KERN_SUCCESS);
1653 (void)launchd_assumes(launchd_mport_deallocate(task) == KERN_SUCCESS);
1654
1655 return KERN_SUCCESS;
1656 }
1657
1658 void
1659 launchd_log_vm_stats(void)
1660 {
1661 static struct vm_statistics orig_stats;
1662 static bool did_first_pass;
1663 unsigned int count = HOST_VM_INFO_COUNT;
1664 struct vm_statistics stats, *statsp;
1665 mach_port_t mhs = mach_host_self();
1666
1667 statsp = did_first_pass ? &stats : &orig_stats;
1668
1669 if (!launchd_assumes(host_statistics(mhs, HOST_VM_INFO, (host_info_t)statsp, &count) == KERN_SUCCESS)) {
1670 return;
1671 }
1672
1673 (void)launchd_assumes(count == HOST_VM_INFO_COUNT);
1674
1675 if (did_first_pass) {
1676 runtime_syslog(LOG_DEBUG, "VM statistics (now - orig): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1677 stats.free_count - orig_stats.free_count,
1678 stats.active_count - orig_stats.active_count,
1679 stats.inactive_count - orig_stats.inactive_count,
1680 stats.reactivations - orig_stats.reactivations,
1681 stats.pageins - orig_stats.pageins,
1682 stats.pageouts - orig_stats.pageouts,
1683 stats.faults - orig_stats.faults,
1684 stats.cow_faults - orig_stats.cow_faults,
1685 stats.purgeable_count - orig_stats.purgeable_count,
1686 stats.purges - orig_stats.purges);
1687 } else {
1688 runtime_syslog(LOG_DEBUG, "VM statistics (now): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1689 orig_stats.free_count,
1690 orig_stats.active_count,
1691 orig_stats.inactive_count,
1692 orig_stats.reactivations,
1693 orig_stats.pageins,
1694 orig_stats.pageouts,
1695 orig_stats.faults,
1696 orig_stats.cow_faults,
1697 orig_stats.purgeable_count,
1698 orig_stats.purges);
1699
1700 did_first_pass = true;
1701 }
1702
1703 launchd_mport_deallocate(mhs);
1704 }
1705
1706 int64_t
1707 runtime_get_wall_time(void)
1708 {
1709 struct timeval tv;
1710 int64_t r;
1711
1712 (void)launchd_assumes(gettimeofday(&tv, NULL) != -1);
1713
1714 r = tv.tv_sec;
1715 r *= USEC_PER_SEC;
1716 r += tv.tv_usec;
1717
1718 return r;
1719 }
1720
1721 uint64_t
1722 runtime_get_opaque_time(void)
1723 {
1724 return mach_absolute_time();
1725 }
1726
1727 uint64_t
1728 runtime_get_opaque_time_of_event(void)
1729 {
1730 return time_of_mach_msg_return;
1731 }
1732
1733 uint64_t
1734 runtime_get_nanoseconds_since(uint64_t o)
1735 {
1736 return runtime_opaque_time_to_nano(runtime_get_opaque_time_of_event() - o);
1737 }
1738
1739 uint64_t
1740 runtime_opaque_time_to_nano(uint64_t o)
1741 {
1742 #if defined(__i386__) || defined(__x86_64__)
1743 if (unlikely(tbi.numer != tbi.denom)) {
1744 #elif defined(__ppc__) || defined(__ppc64__)
1745 if (likely(tbi.numer != tbi.denom)) {
1746 #else
1747 if (tbi.numer != tbi.denom) {
1748 #endif
1749 #ifdef __LP64__
1750 __uint128_t tmp = o;
1751 tmp *= tbi.numer;
1752 tmp /= tbi.denom;
1753 o = tmp;
1754 #else
1755 if (o <= tbi_safe_math_max) {
1756 o *= tbi.numer;
1757 o /= tbi.denom;
1758 } else {
1759 double d = o;
1760 d *= tbi_float_val;
1761 o = d;
1762 }
1763 #endif
1764 }
1765
1766 return o;
1767 }
1768
1769 void
1770 do_file_init(void)
1771 {
1772 struct stat sb;
1773
1774 launchd_assert(mach_timebase_info(&tbi) == 0);
1775 tbi_float_val = tbi.numer;
1776 tbi_float_val /= tbi.denom;
1777 tbi_safe_math_max = UINT64_MAX / tbi.numer;
1778
1779 if (getpid() == 1) {
1780 pid1_magic = true;
1781 }
1782
1783 if (stat("/AppleInternal", &sb) == 0 && stat("/var/db/disableAppleInternal", &sb) == -1) {
1784 do_apple_internal_logging = true;
1785 }
1786
1787 if (stat("/var/db/.debug_launchd", &sb) == 0) {
1788 internal_mask_pri = LOG_UPTO(LOG_DEBUG);
1789 low_level_debug = true;
1790 }
1791
1792 if (stat("/var/db/.launchd_log_per_user_shutdown", &sb) == 0) {
1793 g_log_per_user_shutdown = true;
1794 }
1795
1796 if (stat("/var/db/.launchd_use_gmalloc", &sb) == 0) {
1797 g_use_gmalloc = true;
1798 }
1799
1800 if (stat("/var/db/.launchd_malloc_log_stacks", &sb) == 0) {
1801 g_malloc_log_stacks = true;
1802 g_use_gmalloc = false;
1803 }
1804
1805 if (pid1_magic && stat("/var/db/.launchd_log_pid1_shutdown", &sb) == 0) {
1806 g_log_pid1_shutdown = true;
1807 }
1808
1809 char bootargs[128];
1810 size_t len = sizeof(bootargs) - 1;
1811 int r = pid1_magic ? sysctlbyname("kern.bootargs", bootargs, &len, NULL, 0) : -1;
1812 if (r == 0) {
1813 if (strnstr(bootargs, "-v", len)) {
1814 g_verbose_boot = true;
1815 }
1816 if (strnstr(bootargs, "launchd_trap_sigkill_bugs", len)) {
1817 g_trap_sigkill_bugs = true;
1818 }
1819 }
1820
1821 if (pid1_magic && g_verbose_boot && stat("/var/db/.launchd_shutdown_debugging", &sb) == 0) {
1822 g_shutdown_debugging = true;
1823 }
1824
1825 if (stat("/var/db/.launchd_log_strict_usage", &sb) == 0) {
1826 g_log_strict_usage = true;
1827 }
1828 }