]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_runtime.c
7870ec1b39d0b37edef802bcd1852ad27d2db6f6
[apple/launchd.git] / launchd / src / launchd_runtime.c
1 /*
2 * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 static const char *const __rcs_file_version__ = "$Revision: 23432 $";
22
23 #include "config.h"
24 #include "launchd_runtime.h"
25
26 #include <mach/mach.h>
27 #include <mach/mach_error.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/exception.h>
37 #include <sys/types.h>
38 #include <sys/stat.h>
39 #include <sys/time.h>
40 #include <sys/proc.h>
41 #include <sys/event.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/mount.h>
45 #include <sys/reboot.h>
46 #include <sys/fcntl.h>
47 #include <bsm/libbsm.h>
48 #include <malloc/malloc.h>
49 #include <unistd.h>
50 #include <pthread.h>
51 #include <errno.h>
52 #include <string.h>
53 #include <ctype.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <stdbool.h>
57 #include <syslog.h>
58 #include <signal.h>
59 #include <dlfcn.h>
60
61 #include "launchd_internalServer.h"
62 #include "launchd_internal.h"
63 #include "notifyServer.h"
64
65 /* We shouldn't be including these */
66 #include "launch.h"
67 #include "launchd.h"
68 #include "launchd_core_logic.h"
69 #include "libvproc_internal.h"
70 #include "job_reply.h"
71
72 static mach_port_t ipc_port_set;
73 static mach_port_t demand_port_set;
74 static mach_port_t launchd_internal_port;
75 static int mainkq;
76
77 #define BULK_KEV_MAX 100
78 static struct kevent *bulk_kev;
79 static int bulk_kev_i;
80 static int bulk_kev_cnt;
81
82 static pthread_t kqueue_demand_thread;
83 static pthread_t demand_thread;
84
85 static void *mport_demand_loop(void *arg);
86 static void *kqueue_demand_loop(void *arg);
87 static void log_kevent_struct(int level, struct kevent *kev, int indx);
88
89 static void record_caller_creds(mach_msg_header_t *mh);
90 static void launchd_runtime2(mach_msg_size_t msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply);
91 static mach_msg_size_t max_msg_size;
92 static mig_callback *mig_cb_table;
93 static size_t mig_cb_table_sz;
94 static timeout_callback runtime_idle_callback;
95 static mach_msg_timeout_t runtime_idle_timeout;
96 static audit_token_t *au_tok;
97 static size_t runtime_busy_cnt;
98
99
100 static STAILQ_HEAD(, logmsg_s) logmsg_queue = STAILQ_HEAD_INITIALIZER(logmsg_queue);
101 static size_t logmsg_queue_sz;
102 static size_t logmsg_queue_cnt;
103 static mach_port_t drain_reply_port;
104 static void runtime_log_uncork_pending_drain(void);
105 static kern_return_t runtime_log_pack(vm_offset_t *outval, mach_msg_type_number_t *outvalCnt);
106 static void runtime_log_push(void);
107
108 static bool logmsg_add(struct runtime_syslog_attr *attr, int err_num, const char *msg);
109 static void logmsg_remove(struct logmsg_s *lm);
110
111
112 static const int sigigns[] = { SIGHUP, SIGINT, SIGPIPE, SIGALRM, SIGTERM,
113 SIGURG, SIGTSTP, SIGTSTP, SIGCONT, SIGTTIN, SIGTTOU, SIGIO, SIGXCPU,
114 SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGINFO, SIGUSR1, SIGUSR2
115 };
116 static sigset_t sigign_set;
117
118 void
119 launchd_runtime_init(void)
120 {
121 mach_msg_size_t mxmsgsz;
122 pthread_attr_t attr;
123
124 launchd_assert((mainkq = kqueue()) != -1);
125
126 launchd_assert((errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &demand_port_set)) == KERN_SUCCESS);
127 launchd_assert((errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &ipc_port_set)) == KERN_SUCCESS);
128
129 launchd_assert(launchd_mport_create_recv(&launchd_internal_port) == KERN_SUCCESS);
130 launchd_assert(launchd_mport_make_send(launchd_internal_port) == KERN_SUCCESS);
131
132 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
133 mxmsgsz = sizeof(union __RequestUnion__x_launchd_internal_subsystem);
134 if (x_launchd_internal_subsystem.maxsize > mxmsgsz) {
135 mxmsgsz = x_launchd_internal_subsystem.maxsize;
136 }
137
138 launchd_assert(runtime_add_mport(launchd_internal_port, launchd_internal_demux, mxmsgsz) == KERN_SUCCESS);
139
140 pthread_attr_init(&attr);
141 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
142 pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
143 launchd_assert(pthread_create(&kqueue_demand_thread, &attr, kqueue_demand_loop, NULL) == 0);
144 pthread_attr_destroy(&attr);
145
146 pthread_attr_init(&attr);
147 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
148 pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
149 launchd_assert(pthread_create(&demand_thread, &attr, mport_demand_loop, NULL) == 0);
150 pthread_attr_destroy(&attr);
151 }
152
153 void
154 launchd_runtime_init2(void)
155 {
156 size_t i;
157
158 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
159 sigaddset(&sigign_set, sigigns[i]);
160 launchd_assumes(signal(sigigns[i], SIG_IGN) != SIG_ERR);
161 }
162 }
163
164 void *
165 mport_demand_loop(void *arg __attribute__((unused)))
166 {
167 mach_msg_empty_rcv_t dummy;
168 kern_return_t kr;
169
170 for (;;) {
171 kr = mach_msg(&dummy.header, MACH_RCV_MSG|MACH_RCV_LARGE, 0, 0, demand_port_set, 0, MACH_PORT_NULL);
172 if (kr == MACH_RCV_PORT_CHANGED) {
173 break;
174 } else if (!launchd_assumes(kr == MACH_RCV_TOO_LARGE)) {
175 continue;
176 }
177 launchd_assumes(handle_mport(launchd_internal_port) == 0);
178 }
179
180 return NULL;
181 }
182
183 const char *
184 proc_flags_to_C_names(unsigned int flags)
185 {
186 #define MAX_PFLAG_STR "P_ADVLOCK|P_CONTROLT|P_LP64|P_NOCLDSTOP|P_PPWAIT|P_PROFIL|P_SELECT|P_CONTINUED|P_SUGID|P_SYSTEM|P_TIMEOUT|P_TRACED|P_RESV3|P_WEXIT|P_EXEC|P_OWEUPC|P_AFFINITY|P_TRANSLATED|P_RESV5|P_CHECKOPENEVT|P_DEPENDENCY_CAPABLE|P_REBOOT|P_TBE|P_RESV7|P_THCWD|P_RESV9|P_RESV10|P_RESV11|P_NOSHLIB|P_FORCEQUOTA|P_NOCLDWAIT|P_NOREMOTEHANG|0xdeadbeeffeedface"
187
188 static char flags_buf[sizeof(MAX_PFLAG_STR)];
189 char *flags_off = NULL;
190
191 if (!flags) {
192 return "";
193 }
194
195 while (flags) {
196 if (flags_off) {
197 *flags_off = '|';
198 flags_off++;
199 *flags_off = '\0';
200 } else {
201 flags_off = flags_buf;
202 }
203
204 #define FLAGIF(f) if (flags & f) { flags_off += sprintf(flags_off, #f); flags &= ~f; }
205
206 FLAGIF(P_ADVLOCK)
207 else FLAGIF(P_CONTROLT)
208 else FLAGIF(P_LP64)
209 else FLAGIF(P_NOCLDSTOP)
210 else FLAGIF(P_PPWAIT)
211 else FLAGIF(P_PROFIL)
212 else FLAGIF(P_SELECT)
213 else FLAGIF(P_CONTINUED)
214 else FLAGIF(P_SUGID)
215 else FLAGIF(P_SYSTEM)
216 else FLAGIF(P_TIMEOUT)
217 else FLAGIF(P_TRACED)
218 else FLAGIF(P_RESV3)
219 else FLAGIF(P_WEXIT)
220 else FLAGIF(P_EXEC)
221 else FLAGIF(P_OWEUPC)
222 else FLAGIF(P_AFFINITY)
223 else FLAGIF(P_TRANSLATED)
224 else FLAGIF(P_RESV5)
225 else FLAGIF(P_CHECKOPENEVT)
226 else FLAGIF(P_DEPENDENCY_CAPABLE)
227 else FLAGIF(P_REBOOT)
228 else FLAGIF(P_TBE)
229 else FLAGIF(P_RESV7)
230 else FLAGIF(P_THCWD)
231 else FLAGIF(P_RESV9)
232 else FLAGIF(P_RESV10)
233 else FLAGIF(P_RESV11)
234 else FLAGIF(P_NOSHLIB)
235 else FLAGIF(P_FORCEQUOTA)
236 else FLAGIF(P_NOCLDWAIT)
237 else FLAGIF(P_NOREMOTEHANG)
238 else {
239 flags_off += sprintf(flags_off, "0x%x", flags);
240 flags = 0;
241 }
242 }
243
244 return flags_buf;
245 }
246
247 const char *
248 reboot_flags_to_C_names(unsigned int flags)
249 {
250 #define MAX_RB_STR "RB_ASKNAME|RB_SINGLE|RB_NOSYNC|RB_KDB|RB_HALT|RB_INITNAME|RB_DFLTROOT|RB_ALTBOOT|RB_UNIPROC|RB_SAFEBOOT|RB_UPSDELAY|0xdeadbeeffeedface"
251 static char flags_buf[sizeof(MAX_RB_STR)];
252 char *flags_off = NULL;
253
254 if (flags) while (flags) {
255 if (flags_off) {
256 *flags_off = '|';
257 flags_off++;
258 *flags_off = '\0';
259 } else {
260 flags_off = flags_buf;
261 }
262
263 FLAGIF(RB_ASKNAME)
264 else FLAGIF(RB_SINGLE)
265 else FLAGIF(RB_NOSYNC)
266 else FLAGIF(RB_KDB)
267 else FLAGIF(RB_HALT)
268 else FLAGIF(RB_INITNAME)
269 else FLAGIF(RB_DFLTROOT)
270 else FLAGIF(RB_ALTBOOT)
271 else FLAGIF(RB_UNIPROC)
272 else FLAGIF(RB_SAFEBOOT)
273 else FLAGIF(RB_UPSDELAY)
274 else {
275 flags_off += sprintf(flags_off, "0x%x", flags);
276 flags = 0;
277 }
278 return flags_buf;
279 } else {
280 return "RB_AUTOBOOT";
281 }
282 }
283
284 const char *
285 signal_to_C_name(unsigned int sig)
286 {
287 static char unknown[25];
288
289 #define SIG2CASE(sg) case sg: return #sg
290
291 switch (sig) {
292 SIG2CASE(SIGHUP);
293 SIG2CASE(SIGINT);
294 SIG2CASE(SIGQUIT);
295 SIG2CASE(SIGILL);
296 SIG2CASE(SIGTRAP);
297 SIG2CASE(SIGABRT);
298 SIG2CASE(SIGFPE);
299 SIG2CASE(SIGKILL);
300 SIG2CASE(SIGBUS);
301 SIG2CASE(SIGSEGV);
302 SIG2CASE(SIGSYS);
303 SIG2CASE(SIGPIPE);
304 SIG2CASE(SIGALRM);
305 SIG2CASE(SIGTERM);
306 SIG2CASE(SIGURG);
307 SIG2CASE(SIGSTOP);
308 SIG2CASE(SIGTSTP);
309 SIG2CASE(SIGCONT);
310 SIG2CASE(SIGCHLD);
311 SIG2CASE(SIGTTIN);
312 SIG2CASE(SIGTTOU);
313 SIG2CASE(SIGIO);
314 SIG2CASE(SIGXCPU);
315 SIG2CASE(SIGXFSZ);
316 SIG2CASE(SIGVTALRM);
317 SIG2CASE(SIGPROF);
318 SIG2CASE(SIGWINCH);
319 SIG2CASE(SIGINFO);
320 SIG2CASE(SIGUSR1);
321 SIG2CASE(SIGUSR2);
322 default:
323 snprintf(unknown, sizeof(unknown), "%u", sig);
324 return unknown;
325 }
326 }
327
328 void
329 log_kevent_struct(int level, struct kevent *kev, int indx)
330 {
331 const char *filter_str;
332 char ident_buf[100];
333 char filter_buf[100];
334 char fflags_buf[1000];
335 char flags_buf[1000] = "0x0";
336 char *flags_off = NULL;
337 char *fflags_off = NULL;
338 unsigned short flags = kev->flags;
339 unsigned int fflags = kev->fflags;
340
341 if (flags) while (flags) {
342 if (flags_off) {
343 *flags_off = '|';
344 flags_off++;
345 *flags_off = '\0';
346 } else {
347 flags_off = flags_buf;
348 }
349
350 FLAGIF(EV_ADD)
351 else FLAGIF(EV_RECEIPT)
352 else FLAGIF(EV_DELETE)
353 else FLAGIF(EV_ENABLE)
354 else FLAGIF(EV_DISABLE)
355 else FLAGIF(EV_CLEAR)
356 else FLAGIF(EV_EOF)
357 else FLAGIF(EV_ONESHOT)
358 else FLAGIF(EV_ERROR)
359 else {
360 flags_off += sprintf(flags_off, "0x%x", flags);
361 flags = 0;
362 }
363 }
364
365 snprintf(ident_buf, sizeof(ident_buf), "%ld", kev->ident);
366 snprintf(fflags_buf, sizeof(fflags_buf), "0x%x", fflags);
367
368 switch (kev->filter) {
369 case EVFILT_READ:
370 filter_str = "EVFILT_READ";
371 break;
372 case EVFILT_WRITE:
373 filter_str = "EVFILT_WRITE";
374 break;
375 case EVFILT_AIO:
376 filter_str = "EVFILT_AIO";
377 break;
378 case EVFILT_VNODE:
379 filter_str = "EVFILT_VNODE";
380 if (fflags) while (fflags) {
381 if (fflags_off) {
382 *fflags_off = '|';
383 fflags_off++;
384 *fflags_off = '\0';
385 } else {
386 fflags_off = fflags_buf;
387 }
388
389 #define FFLAGIF(ff) if (fflags & ff) { fflags_off += sprintf(fflags_off, #ff); fflags &= ~ff; }
390
391 FFLAGIF(NOTE_DELETE)
392 else FFLAGIF(NOTE_WRITE)
393 else FFLAGIF(NOTE_EXTEND)
394 else FFLAGIF(NOTE_ATTRIB)
395 else FFLAGIF(NOTE_LINK)
396 else FFLAGIF(NOTE_RENAME)
397 else FFLAGIF(NOTE_REVOKE)
398 else {
399 fflags_off += sprintf(fflags_off, "0x%x", fflags);
400 fflags = 0;
401 }
402 }
403 break;
404 case EVFILT_PROC:
405 filter_str = "EVFILT_PROC";
406 if (fflags) while (fflags) {
407 if (fflags_off) {
408 *fflags_off = '|';
409 fflags_off++;
410 *fflags_off = '\0';
411 } else {
412 fflags_off = fflags_buf;
413 }
414
415 FFLAGIF(NOTE_EXIT)
416 else FFLAGIF(NOTE_REAP)
417 else FFLAGIF(NOTE_FORK)
418 else FFLAGIF(NOTE_EXEC)
419 else FFLAGIF(NOTE_SIGNAL)
420 else FFLAGIF(NOTE_TRACK)
421 else FFLAGIF(NOTE_TRACKERR)
422 else FFLAGIF(NOTE_CHILD)
423 else {
424 fflags_off += sprintf(fflags_off, "0x%x", fflags);
425 fflags = 0;
426 }
427 }
428 break;
429 case EVFILT_SIGNAL:
430 filter_str = "EVFILT_SIGNAL";
431 strcpy(ident_buf, signal_to_C_name(kev->ident));
432 break;
433 case EVFILT_TIMER:
434 filter_str = "EVFILT_TIMER";
435 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
436 if (fflags) while (fflags) {
437 if (fflags_off) {
438 *fflags_off = '|';
439 fflags_off++;
440 *fflags_off = '\0';
441 } else {
442 fflags_off = fflags_buf;
443 }
444
445 FFLAGIF(NOTE_SECONDS)
446 else FFLAGIF(NOTE_USECONDS)
447 else FFLAGIF(NOTE_NSECONDS)
448 else FFLAGIF(NOTE_ABSOLUTE)
449 else {
450 fflags_off += sprintf(fflags_off, "0x%x", fflags);
451 fflags = 0;
452 }
453 }
454 break;
455 case EVFILT_MACHPORT:
456 filter_str = "EVFILT_MACHPORT";
457 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
458 break;
459 case EVFILT_FS:
460 filter_str = "EVFILT_FS";
461 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
462 if (fflags) while (fflags) {
463 if (fflags_off) {
464 *fflags_off = '|';
465 fflags_off++;
466 *fflags_off = '\0';
467 } else {
468 fflags_off = fflags_buf;
469 }
470
471 FFLAGIF(VQ_NOTRESP)
472 else FFLAGIF(VQ_NEEDAUTH)
473 else FFLAGIF(VQ_LOWDISK)
474 else FFLAGIF(VQ_MOUNT)
475 else FFLAGIF(VQ_UNMOUNT)
476 else FFLAGIF(VQ_DEAD)
477 else FFLAGIF(VQ_ASSIST)
478 else FFLAGIF(VQ_NOTRESPLOCK)
479 else FFLAGIF(VQ_UPDATE)
480 else {
481 fflags_off += sprintf(fflags_off, "0x%x", fflags);
482 fflags = 0;
483 }
484 }
485 break;
486 default:
487 snprintf(filter_buf, sizeof(filter_buf), "%d", kev->filter);
488 filter_str = filter_buf;
489 break;
490 }
491
492 runtime_syslog(level, "KEVENT[%d]: udata = %p data = 0x%lx ident = %s filter = %s flags = %s fflags = %s",
493 indx, kev->udata, kev->data, ident_buf, filter_str, flags_buf, fflags_buf);
494 }
495
496 kern_return_t
497 x_handle_mport(mach_port_t junk __attribute__((unused)))
498 {
499 mach_port_name_array_t members;
500 mach_msg_type_number_t membersCnt;
501 mach_port_status_t status;
502 mach_msg_type_number_t statusCnt;
503 struct kevent kev;
504 unsigned int i;
505
506 if (!launchd_assumes((errno = mach_port_get_set_status(mach_task_self(), demand_port_set, &members, &membersCnt)) == KERN_SUCCESS)) {
507 return 1;
508 }
509
510 for (i = 0; i < membersCnt; i++) {
511 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
512 if (mach_port_get_attributes(mach_task_self(), members[i], MACH_PORT_RECEIVE_STATUS, (mach_port_info_t)&status,
513 &statusCnt) != KERN_SUCCESS) {
514 continue;
515 }
516 if (status.mps_msgcount) {
517 EV_SET(&kev, members[i], EVFILT_MACHPORT, 0, 0, 0, job_find_by_service_port(members[i]));
518 #if 0
519 if (launchd_assumes(kev.udata != NULL)) {
520 #endif
521 log_kevent_struct(LOG_DEBUG, &kev, 0);
522 (*((kq_callback *)kev.udata))(kev.udata, &kev);
523 #if 0
524 } else {
525 log_kevent_struct(LOG_ERR, &kev);
526 }
527 #endif
528 /* the callback may have tainted our ability to continue this for loop */
529 break;
530 }
531 }
532
533 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)members,
534 (vm_size_t) membersCnt * sizeof(mach_port_name_t)) == KERN_SUCCESS);
535
536 return 0;
537 }
538
539 void *
540 kqueue_demand_loop(void *arg __attribute__((unused)))
541 {
542 fd_set rfds;
543
544 /*
545 * Yes, at first glance, calling select() on a kqueue seems silly.
546 *
547 * This avoids a race condition between the main thread and this helper
548 * thread by ensuring that we drain kqueue events on the same thread
549 * that manipulates the kqueue.
550 */
551
552 for (;;) {
553 FD_ZERO(&rfds);
554 FD_SET(mainkq, &rfds);
555 if (launchd_assumes(select(mainkq + 1, &rfds, NULL, NULL, NULL) == 1)) {
556 launchd_assumes(handle_kqueue(launchd_internal_port, mainkq) == 0);
557 }
558 }
559
560 return NULL;
561 }
562
563 kern_return_t
564 x_handle_kqueue(mach_port_t junk __attribute__((unused)), integer_t fd)
565 {
566 struct timespec ts = { 0, 0 };
567 struct kevent kev[BULK_KEV_MAX];
568 int i;
569
570 bulk_kev = kev;
571
572 launchd_assumes((bulk_kev_cnt = kevent(fd, NULL, 0, kev, BULK_KEV_MAX, &ts)) != -1);
573
574 if (bulk_kev_cnt > 0) {
575 #if 0
576 Dl_info dli;
577
578 if (launchd_assumes(malloc_size(kev.udata) || dladdr(kev.udata, &dli))) {
579 #endif
580 for (i = 0; i < bulk_kev_cnt; i++) {
581 log_kevent_struct(LOG_DEBUG, &kev[i], i);
582 }
583 for (i = 0; i < bulk_kev_cnt; i++) {
584 bulk_kev_i = i;
585 if (kev[i].filter) {
586 (*((kq_callback *)kev[i].udata))(kev[i].udata, &kev[i]);
587 }
588 }
589 #if 0
590 } else {
591 log_kevent_struct(LOG_ERR, &kev);
592 }
593 #endif
594 }
595
596 bulk_kev = NULL;
597
598 return 0;
599 }
600
601
602
603 void
604 launchd_runtime(void)
605 {
606 mig_reply_error_t *req = NULL, *resp = NULL;
607 mach_msg_size_t mz = max_msg_size;
608 int flags = VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE;
609
610 for (;;) {
611 if (req) {
612 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)req, mz) == KERN_SUCCESS);
613 req = NULL;
614 }
615 if (resp) {
616 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)resp, mz) == KERN_SUCCESS);
617 resp = NULL;
618 }
619
620 mz = max_msg_size;
621
622 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t *)&req, mz, flags) == KERN_SUCCESS)) {
623 continue;
624 }
625 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t *)&resp, mz, flags) == KERN_SUCCESS)) {
626 continue;
627 }
628
629 launchd_runtime2(mz, req, resp);
630
631 /* If we get here, max_msg_size probably changed... */
632 }
633 }
634
635 kern_return_t
636 launchd_set_bport(mach_port_t name)
637 {
638 return errno = task_set_bootstrap_port(mach_task_self(), name);
639 }
640
641 kern_return_t
642 launchd_get_bport(mach_port_t *name)
643 {
644 return errno = task_get_bootstrap_port(mach_task_self(), name);
645 }
646
647 kern_return_t
648 launchd_mport_notify_req(mach_port_t name, mach_msg_id_t which)
649 {
650 mach_port_mscount_t msgc = (which == MACH_NOTIFY_NO_SENDERS) ? 1 : 0;
651 mach_port_t previous, where = (which == MACH_NOTIFY_NO_SENDERS) ? name : launchd_internal_port;
652
653 if (which == MACH_NOTIFY_NO_SENDERS) {
654 /* Always make sure the send count is zero, in case a receive right is reused */
655 errno = mach_port_set_mscount(mach_task_self(), name, 0);
656 if (errno != KERN_SUCCESS) {
657 return errno;
658 }
659 }
660
661 errno = mach_port_request_notification(mach_task_self(), name, which, msgc, where,
662 MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
663
664 if (errno == 0 && previous != MACH_PORT_NULL) {
665 launchd_assumes(launchd_mport_deallocate(previous) == KERN_SUCCESS);
666 }
667
668 return errno;
669 }
670
671 pid_t
672 runtime_fork(mach_port_t bsport)
673 {
674 sigset_t emptyset, oset;
675 pid_t r = -1;
676 int saved_errno;
677 size_t i;
678
679 sigemptyset(&emptyset);
680
681 launchd_assumes(launchd_mport_make_send(bsport) == KERN_SUCCESS);
682 launchd_assumes(launchd_set_bport(bsport) == KERN_SUCCESS);
683 launchd_assumes(launchd_mport_deallocate(bsport) == KERN_SUCCESS);
684
685 launchd_assumes(sigprocmask(SIG_BLOCK, &sigign_set, &oset) != -1);
686 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
687 launchd_assumes(signal(sigigns[i], SIG_DFL) != SIG_ERR);
688 }
689
690 r = fork();
691 saved_errno = errno;
692
693 if (r != 0) {
694 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
695 launchd_assumes(signal(sigigns[i], SIG_IGN) != SIG_ERR);
696 }
697 launchd_assumes(sigprocmask(SIG_SETMASK, &oset, NULL) != -1);
698 launchd_assumes(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
699 } else {
700 launchd_assumes(sigprocmask(SIG_SETMASK, &emptyset, NULL) != -1);
701 }
702
703 errno = saved_errno;
704
705 return r;
706 }
707
708
709 void
710 runtime_set_timeout(timeout_callback to_cb, unsigned int sec)
711 {
712 if (sec == 0 || to_cb == NULL) {
713 runtime_idle_callback = NULL;
714 runtime_idle_timeout = 0;
715 }
716
717 runtime_idle_callback = to_cb;
718 runtime_idle_timeout = sec * 1000;
719 }
720
721 kern_return_t
722 runtime_add_mport(mach_port_t name, mig_callback demux, mach_msg_size_t msg_size)
723 {
724 size_t needed_table_sz = (MACH_PORT_INDEX(name) + 1) * sizeof(mig_callback);
725 mach_port_t target_set = demux ? ipc_port_set : demand_port_set;
726
727 msg_size = round_page(msg_size + MAX_TRAILER_SIZE);
728
729 if (needed_table_sz > mig_cb_table_sz) {
730 needed_table_sz *= 2; /* Let's try and avoid realloc'ing for a while */
731 mig_callback *new_table = malloc(needed_table_sz);
732
733 if (!launchd_assumes(new_table != NULL)) {
734 return KERN_RESOURCE_SHORTAGE;
735 }
736
737 if (mig_cb_table) {
738 memcpy(new_table, mig_cb_table, mig_cb_table_sz);
739 free(mig_cb_table);
740 }
741
742 mig_cb_table_sz = needed_table_sz;
743 mig_cb_table = new_table;
744 }
745
746 mig_cb_table[MACH_PORT_INDEX(name)] = demux;
747
748 if (msg_size > max_msg_size) {
749 max_msg_size = msg_size;
750 }
751
752 return errno = mach_port_move_member(mach_task_self(), name, target_set);
753 }
754
755 kern_return_t
756 runtime_remove_mport(mach_port_t name)
757 {
758 mig_cb_table[MACH_PORT_INDEX(name)] = NULL;
759
760 return errno = mach_port_move_member(mach_task_self(), name, MACH_PORT_NULL);
761 }
762
763 kern_return_t
764 launchd_mport_make_send(mach_port_t name)
765 {
766 return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_MAKE_SEND);
767 }
768
769 kern_return_t
770 launchd_mport_close_recv(mach_port_t name)
771 {
772 return errno = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE, -1);
773 }
774
775 kern_return_t
776 launchd_mport_create_recv(mach_port_t *name)
777 {
778 return errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, name);
779 }
780
781 kern_return_t
782 launchd_mport_deallocate(mach_port_t name)
783 {
784 return errno = mach_port_deallocate(mach_task_self(), name);
785 }
786
787 int
788 kevent_bulk_mod(struct kevent *kev, size_t kev_cnt)
789 {
790 size_t i;
791
792 for (i = 0; i < kev_cnt; i++) {
793 kev[i].flags |= EV_CLEAR|EV_RECEIPT;
794 }
795
796 return kevent(mainkq, kev, kev_cnt, kev, kev_cnt, NULL);
797 }
798
799 int
800 kevent_mod(uintptr_t ident, short filter, u_short flags, u_int fflags, intptr_t data, void *udata)
801 {
802 struct kevent kev;
803 int r;
804
805 switch (filter) {
806 case EVFILT_READ:
807 case EVFILT_WRITE:
808 break;
809 default:
810 flags |= EV_CLEAR;
811 break;
812 }
813
814 flags |= EV_RECEIPT;
815
816 if (flags & EV_ADD && !launchd_assumes(udata != NULL)) {
817 errno = EINVAL;
818 return -1;
819 }
820
821 EV_SET(&kev, ident, filter, flags, fflags, data, udata);
822
823 r = kevent(mainkq, &kev, 1, &kev, 1, NULL);
824
825 if (!launchd_assumes(r == 1)) {
826 return -1;
827 }
828
829 if (launchd_assumes(kev.flags & EV_ERROR)) {
830 if ((flags & EV_ADD) && kev.data) {
831 runtime_syslog(LOG_DEBUG, "%s(): See the next line...", __func__);
832 log_kevent_struct(LOG_DEBUG, &kev, 0);
833 errno = kev.data;
834 return -1;
835 }
836 }
837
838 return r;
839 }
840
841 boolean_t
842 launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply)
843 {
844 if (launchd_internal_server_routine(Request)) {
845 return launchd_internal_server(Request, Reply);
846 }
847
848 return notify_server(Request, Reply);
849 }
850
851 kern_return_t
852 do_mach_notify_port_destroyed(mach_port_t notify, mach_port_t rights)
853 {
854 /* This message is sent to us when a receive right is returned to us. */
855
856 if (!launchd_assumes(job_ack_port_destruction(rights))) {
857 launchd_assumes(launchd_mport_close_recv(rights) == KERN_SUCCESS);
858 }
859
860 return KERN_SUCCESS;
861 }
862
863 kern_return_t
864 do_mach_notify_port_deleted(mach_port_t notify, mach_port_name_t name)
865 {
866 /* If we deallocate/destroy/mod_ref away a port with a pending
867 * notification, the original notification message is replaced with
868 * this message. To quote a Mach kernel expert, "the kernel has a
869 * send-once right that has to be used somehow."
870 */
871 return KERN_SUCCESS;
872 }
873
874 kern_return_t
875 do_mach_notify_no_senders(mach_port_t notify, mach_port_mscount_t mscount)
876 {
877 job_t j = job_mig_intran(notify);
878
879 /* This message is sent to us when the last customer of one of our
880 * objects goes away.
881 */
882
883 if (!launchd_assumes(j != NULL)) {
884 return KERN_FAILURE;
885 }
886
887 job_ack_no_senders(j);
888
889 return KERN_SUCCESS;
890 }
891
892 kern_return_t
893 do_mach_notify_send_once(mach_port_t notify)
894 {
895 /* This message is sent to us every time we close a port that we have
896 * outstanding Mach notification requests on. We can safely ignore this
897 * message.
898 */
899
900 return KERN_SUCCESS;
901 }
902
903 kern_return_t
904 do_mach_notify_dead_name(mach_port_t notify, mach_port_name_t name)
905 {
906 /* This message is sent to us when one of our send rights no longer has
907 * a receiver somewhere else on the system.
908 */
909
910 if (name == drain_reply_port) {
911 launchd_assumes(launchd_mport_deallocate(name) == KERN_SUCCESS);
912 drain_reply_port = MACH_PORT_NULL;
913 }
914
915 if (launchd_assumes(root_jobmgr != NULL)) {
916 root_jobmgr = jobmgr_delete_anything_with_port(root_jobmgr, name);
917 }
918
919 /* A dead-name notification about a port appears to increment the
920 * rights on said port. Let's deallocate it so that we don't leak
921 * dead-name ports.
922 */
923 launchd_assumes(launchd_mport_deallocate(name) == KERN_SUCCESS);
924
925 return KERN_SUCCESS;
926 }
927
928 void
929 record_caller_creds(mach_msg_header_t *mh)
930 {
931 mach_msg_max_trailer_t *tp;
932 size_t trailer_size;
933
934 tp = (mach_msg_max_trailer_t *)((vm_offset_t)mh + round_msg(mh->msgh_size));
935
936 trailer_size = tp->msgh_trailer_size - (mach_msg_size_t)(sizeof(mach_msg_trailer_type_t) - sizeof(mach_msg_trailer_size_t));
937
938 if (trailer_size < (mach_msg_size_t)sizeof(audit_token_t)) {
939 au_tok = NULL;
940 return;
941 }
942
943 au_tok = &tp->msgh_audit;
944 }
945
946 bool
947 runtime_get_caller_creds(struct ldcred *ldc)
948 {
949 if (!au_tok) {
950 return false;
951 }
952
953 audit_token_to_au32(*au_tok, /* audit UID */ NULL, &ldc->euid,
954 &ldc->egid, &ldc->uid, &ldc->gid, &ldc->pid,
955 &ldc->asid, /* au_tid_t */ NULL);
956
957 return true;
958 }
959
960 void
961 launchd_runtime2(mach_msg_size_t msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply)
962 {
963 mach_msg_options_t options, tmp_options;
964 mig_reply_error_t *bufTemp;
965 mig_callback the_demux;
966 mach_msg_timeout_t to;
967 mach_msg_return_t mr;
968
969 options = MACH_RCV_MSG|MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT) |
970 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0);
971
972 tmp_options = options;
973
974 for (;;) {
975 to = MACH_MSG_TIMEOUT_NONE;
976
977 if (msg_size != max_msg_size) {
978 /* The buffer isn't big enougth to receive messages anymore... */
979 tmp_options &= ~MACH_RCV_MSG;
980 options &= ~MACH_RCV_MSG;
981 if (!(tmp_options & MACH_SEND_MSG)) {
982 return;
983 }
984 }
985
986 if ((tmp_options & MACH_RCV_MSG) && (runtime_idle_callback || (runtime_busy_cnt == 0))) {
987 tmp_options |= MACH_RCV_TIMEOUT;
988
989 if (!(tmp_options & MACH_SEND_TIMEOUT)) {
990 to = runtime_busy_cnt ? runtime_idle_timeout : (RUNTIME_ADVISABLE_IDLE_TIMEOUT * 1000);
991 }
992 }
993
994 runtime_log_push();
995
996 mr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
997 msg_size, ipc_port_set, to, MACH_PORT_NULL);
998
999 tmp_options = options;
1000
1001 if (mr == MACH_SEND_INVALID_DEST || mr == MACH_SEND_TIMED_OUT) {
1002 /* We need to clean up and start over. */
1003 if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
1004 mach_msg_destroy(&bufReply->Head);
1005 }
1006 continue;
1007 } else if (mr == MACH_RCV_TIMED_OUT) {
1008 if (to != MACH_MSG_TIMEOUT_NONE) {
1009 if (runtime_busy_cnt == 0) {
1010 launchd_shutdown();
1011 } else if (runtime_idle_callback) {
1012 runtime_idle_callback();
1013 }
1014 }
1015 continue;
1016 } else if (!launchd_assumes(mr == MACH_MSG_SUCCESS)) {
1017 continue;
1018 }
1019
1020 bufTemp = bufRequest;
1021 bufRequest = bufReply;
1022 bufReply = bufTemp;
1023
1024 if (!(tmp_options & MACH_RCV_MSG)) {
1025 continue;
1026 }
1027
1028 /* we have another request message */
1029
1030 if (!launchd_assumes(mig_cb_table != NULL)) {
1031 break;
1032 }
1033
1034 the_demux = mig_cb_table[MACH_PORT_INDEX(bufRequest->Head.msgh_local_port)];
1035
1036 if (!launchd_assumes(the_demux != NULL)) {
1037 break;
1038 }
1039
1040 record_caller_creds(&bufRequest->Head);
1041
1042 /*
1043 * This is a total hack. We really need a bit in the kernel's proc
1044 * struct to declare our intent.
1045 */
1046 static int no_hang_fd = -1;
1047 if (no_hang_fd == -1) {
1048 no_hang_fd = _fd(open("/dev/autofs_nowait", 0));
1049 }
1050
1051 if (the_demux(&bufRequest->Head, &bufReply->Head) == FALSE) {
1052 /* XXX - also gross */
1053 if (bufRequest->Head.msgh_id == MACH_NOTIFY_NO_SENDERS) {
1054 notify_server(&bufRequest->Head, &bufReply->Head);
1055 }
1056 }
1057
1058 if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1059 if (bufReply->RetCode == MIG_NO_REPLY) {
1060 bufReply->Head.msgh_remote_port = MACH_PORT_NULL;
1061 } else if ((bufReply->RetCode != KERN_SUCCESS) && (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1062 /* destroy the request - but not the reply port */
1063 bufRequest->Head.msgh_remote_port = MACH_PORT_NULL;
1064 mach_msg_destroy(&bufRequest->Head);
1065 }
1066 }
1067
1068 if (bufReply->Head.msgh_remote_port != MACH_PORT_NULL) {
1069 tmp_options |= MACH_SEND_MSG;
1070
1071 if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != MACH_MSG_TYPE_MOVE_SEND_ONCE) {
1072 tmp_options |= MACH_SEND_TIMEOUT;
1073 }
1074 }
1075 }
1076 }
1077
1078 int
1079 runtime_close(int fd)
1080 {
1081 int i;
1082
1083 if (bulk_kev) for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
1084 switch (bulk_kev[i].filter) {
1085 case EVFILT_VNODE:
1086 case EVFILT_WRITE:
1087 case EVFILT_READ:
1088 if ((int)bulk_kev[i].ident == fd) {
1089 runtime_syslog(LOG_DEBUG, "Skipping kevent index: %d", i);
1090 bulk_kev[i].filter = 0;
1091 }
1092 default:
1093 break;
1094 }
1095 }
1096
1097 return close(fd);
1098 }
1099
1100 static FILE *ourlogfile;
1101
1102 void
1103 runtime_closelog(void)
1104 {
1105 if (ourlogfile) {
1106 launchd_assumes(fflush(ourlogfile) == 0);
1107 launchd_assumes(runtime_fsync(fileno(ourlogfile)) != -1);
1108 }
1109 }
1110
1111 int
1112 runtime_fsync(int fd)
1113 {
1114 if (debug_shutdown_hangs) {
1115 return fcntl(fd, F_FULLFSYNC, NULL);
1116 } else {
1117 return fsync(fd);
1118 }
1119 }
1120
1121 static int internal_mask_pri = LOG_UPTO(LOG_NOTICE);
1122 //static int internal_mask_pri = LOG_UPTO(LOG_DEBUG);
1123
1124 int
1125 runtime_setlogmask(int maskpri)
1126 {
1127 internal_mask_pri = maskpri;
1128
1129 return internal_mask_pri;
1130 }
1131
1132 void
1133 runtime_syslog(int pri, const char *message, ...)
1134 {
1135 struct runtime_syslog_attr attr = {
1136 "com.apple.launchd", "com.apple.launchd",
1137 getpid() == 1 ? "System" : "Background",
1138 pri, getuid(), getpid(), getpid()
1139 };
1140 va_list ap;
1141
1142 va_start(ap, message);
1143
1144 runtime_vsyslog(&attr, message, ap);
1145
1146 va_end(ap);
1147 }
1148
1149 void
1150 runtime_vsyslog(struct runtime_syslog_attr *attr, const char *message, va_list args)
1151 {
1152 static pthread_mutex_t ourlock = PTHREAD_MUTEX_INITIALIZER;
1153 static struct timeval shutdown_start;
1154 static struct timeval prev_msg;
1155 static int apple_internal_logging = 1;
1156 struct timeval tvnow, tvd_total, tvd_msg_delta = { 0, 0 };
1157 struct stat sb;
1158 int saved_errno = errno;
1159 char newmsg[10000];
1160 size_t i, j;
1161
1162 if (!(LOG_MASK(attr->priority) & internal_mask_pri)) {
1163 goto out;
1164 }
1165
1166 if (apple_internal_logging == 1) {
1167 apple_internal_logging = stat("/AppleInternal", &sb);
1168 }
1169
1170
1171 if (!(debug_shutdown_hangs && getpid() == 1)) {
1172 if (attr->priority == LOG_APPLEONLY) {
1173 if (apple_internal_logging == -1) {
1174 goto out;
1175 }
1176 attr->priority = LOG_NOTICE;
1177 }
1178 vsnprintf(newmsg, sizeof(newmsg), message, args);
1179 logmsg_add(attr, saved_errno, newmsg);
1180 goto out;
1181 }
1182
1183 if (shutdown_start.tv_sec == 0) {
1184 gettimeofday(&shutdown_start, NULL);
1185 }
1186
1187 if (gettimeofday(&tvnow, NULL) == -1) {
1188 tvnow.tv_sec = 0;
1189 tvnow.tv_usec = 0;
1190 }
1191
1192 pthread_mutex_lock(&ourlock);
1193
1194 if (ourlogfile == NULL) {
1195 rename("/var/log/launchd-shutdown.log", "/var/log/launchd-shutdown.log.1");
1196 ourlogfile = fopen("/var/log/launchd-shutdown.log", "a");
1197 }
1198
1199 pthread_mutex_unlock(&ourlock);
1200
1201 if (ourlogfile == NULL) {
1202 goto out;
1203 }
1204
1205 if (message == NULL) {
1206 goto out;
1207 }
1208
1209 timersub(&tvnow, &shutdown_start, &tvd_total);
1210
1211 if (prev_msg.tv_sec != 0) {
1212 timersub(&tvnow, &prev_msg, &tvd_msg_delta);
1213 }
1214
1215 prev_msg = tvnow;
1216
1217 snprintf(newmsg, sizeof(newmsg), "%3ld.%06d%4ld.%06d%6u %-40s%6u %-40s ",
1218 tvd_total.tv_sec, tvd_total.tv_usec,
1219 tvd_msg_delta.tv_sec, tvd_msg_delta.tv_usec,
1220 attr->from_pid, attr->from_name,
1221 attr->about_pid, attr->about_name);
1222
1223 for (i = 0, j = strlen(newmsg); message[i];) {
1224 if (message[i] == '%' && message[i + 1] == 'm') {
1225 char *errs = strerror(saved_errno);
1226 strcpy(newmsg + j, errs ? errs : "unknown error");
1227 j += strlen(newmsg + j);
1228 i += 2;
1229 } else {
1230 newmsg[j] = message[i];
1231 j++;
1232 i++;
1233 }
1234 }
1235
1236 strcpy(newmsg + j, "\n");
1237
1238 vfprintf(ourlogfile, newmsg, args);
1239
1240 out:
1241 runtime_log_uncork_pending_drain();
1242 }
1243
1244 bool
1245 logmsg_add(struct runtime_syslog_attr *attr, int err_num, const char *msg)
1246 {
1247 size_t lm_sz = sizeof(struct logmsg_s) + strlen(msg) + strlen(attr->from_name) + strlen(attr->about_name) + strlen(attr->session_name) + 4;
1248 char *data_off;
1249 struct logmsg_s *lm;
1250
1251 #define ROUND_TO_64BIT_WORD_SIZE(x) ((x + 7) & ~7)
1252
1253 /* we do this to make the unpacking for the log_drain cause unalignment faults */
1254 lm_sz = ROUND_TO_64BIT_WORD_SIZE(lm_sz);
1255
1256 if (!(lm = calloc(1, lm_sz))) {
1257 return false;
1258 }
1259
1260 data_off = lm->data;
1261
1262 launchd_assumes(gettimeofday(&lm->when, NULL) != -1);
1263 lm->from_pid = attr->from_pid;
1264 lm->about_pid = attr->about_pid;
1265 lm->err_num = err_num;
1266 lm->pri = attr->priority;
1267 lm->obj_sz = lm_sz;
1268 lm->msg = data_off;
1269 data_off += sprintf(data_off, "%s", msg) + 1;
1270 lm->from_name = data_off;
1271 data_off += sprintf(data_off, "%s", attr->from_name) + 1;
1272 lm->about_name = data_off;
1273 data_off += sprintf(data_off, "%s", attr->about_name) + 1;
1274 lm->session_name = data_off;
1275 data_off += sprintf(data_off, "%s", attr->session_name) + 1;
1276
1277 STAILQ_INSERT_TAIL(&logmsg_queue, lm, sqe);
1278 logmsg_queue_sz += lm_sz;
1279 logmsg_queue_cnt++;
1280
1281 return true;
1282 }
1283
1284 void
1285 logmsg_remove(struct logmsg_s *lm)
1286 {
1287 STAILQ_REMOVE(&logmsg_queue, lm, logmsg_s, sqe);
1288 logmsg_queue_sz -= lm->obj_sz;
1289 logmsg_queue_cnt--;
1290
1291 free(lm);
1292 }
1293
1294 kern_return_t
1295 runtime_log_pack(vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
1296 {
1297 struct logmsg_s *lm;
1298 void *offset;
1299
1300 *outvalCnt = logmsg_queue_sz;
1301
1302 mig_allocate(outval, *outvalCnt);
1303
1304 if (*outval == 0) {
1305 return 1;
1306 }
1307
1308 offset = (void *)*outval;
1309
1310 while ((lm = STAILQ_FIRST(&logmsg_queue))) {
1311 lm->from_name -= (size_t)lm;
1312 lm->about_name -= (size_t)lm;
1313 lm->msg -= (size_t)lm;
1314 lm->session_name -= (size_t)lm;
1315
1316 memcpy(offset, lm, lm->obj_sz);
1317
1318 offset += lm->obj_sz;
1319
1320 logmsg_remove(lm);
1321 }
1322
1323 return 0;
1324 }
1325
1326 void
1327 runtime_log_uncork_pending_drain(void)
1328 {
1329 mach_msg_type_number_t outvalCnt;
1330 mach_port_t tmp_port;
1331 vm_offset_t outval;
1332
1333 if (!drain_reply_port) {
1334 return;
1335 }
1336
1337 if (logmsg_queue_cnt == 0) {
1338 return;
1339 }
1340
1341 if (runtime_log_pack(&outval, &outvalCnt) != 0) {
1342 return;
1343 }
1344
1345 tmp_port = drain_reply_port;
1346 drain_reply_port = MACH_PORT_NULL;
1347
1348 if ((errno = job_mig_log_drain_reply(tmp_port, 0, outval, outvalCnt))) {
1349 launchd_assumes(errno == MACH_SEND_INVALID_DEST);
1350 launchd_assumes(launchd_mport_deallocate(tmp_port) == KERN_SUCCESS);
1351 }
1352
1353 mig_deallocate(outval, outvalCnt);
1354 }
1355
1356 void
1357 runtime_log_push(void)
1358 {
1359 mach_msg_type_number_t outvalCnt;
1360 vm_offset_t outval;
1361
1362 if (logmsg_queue_cnt == 0) {
1363 launchd_assumes(STAILQ_EMPTY(&logmsg_queue));
1364 return;
1365 } else if (getpid() == 1) {
1366 return;
1367 }
1368
1369 if (runtime_log_pack(&outval, &outvalCnt) != 0) {
1370 return;
1371 }
1372
1373 launchd_assumes(_vprocmgr_log_forward(inherited_bootstrap_port, (void *)outval, outvalCnt) == NULL);
1374
1375 mig_deallocate(outval, outvalCnt);
1376 }
1377
1378 kern_return_t
1379 runtime_log_forward(uid_t forward_uid, gid_t forward_gid, vm_offset_t inval, mach_msg_type_number_t invalCnt)
1380 {
1381 struct logmsg_s *lm, *lm_walk;
1382 mach_msg_type_number_t data_left = invalCnt;
1383
1384 if (inval == 0) {
1385 return 0;
1386 }
1387
1388 for (lm_walk = (struct logmsg_s *)inval; (data_left > 0) && (lm_walk->obj_sz <= data_left); lm_walk = ((void *)lm_walk + lm_walk->obj_sz)) {
1389 if (!launchd_assumes(lm = malloc(lm_walk->obj_sz))) {
1390 continue;
1391 }
1392
1393 memcpy(lm, lm_walk, lm_walk->obj_sz);
1394 lm->sender_uid = forward_uid;
1395 lm->sender_gid = forward_gid;
1396
1397 lm->from_name += (size_t)lm;
1398 lm->about_name += (size_t)lm;
1399 lm->msg += (size_t)lm;
1400 lm->session_name += (size_t)lm;
1401
1402 STAILQ_INSERT_TAIL(&logmsg_queue, lm, sqe);
1403 logmsg_queue_sz += lm->obj_sz;
1404 logmsg_queue_cnt++;
1405
1406 data_left -= lm->obj_sz;
1407 }
1408
1409 mig_deallocate(inval, invalCnt);
1410
1411 return 0;
1412 }
1413
1414 kern_return_t
1415 runtime_log_drain(mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
1416 {
1417 if (logmsg_queue_cnt == 0) {
1418 launchd_assumes(STAILQ_EMPTY(&logmsg_queue));
1419 launchd_assumes(drain_reply_port == 0);
1420
1421 drain_reply_port = srp;
1422 launchd_assumes(launchd_mport_notify_req(drain_reply_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
1423
1424 return MIG_NO_REPLY;
1425 }
1426
1427 return runtime_log_pack(outval, outvalCnt);
1428 }
1429
1430 /*
1431 * We should break this into two reference counts.
1432 *
1433 * One for hard references that would prevent exiting.
1434 * One for soft references that would only prevent idle exiting.
1435 *
1436 * In the long run, reference counting should completely automate when a
1437 * process can and should exit.
1438 */
1439 void
1440 runtime_add_ref(void)
1441 {
1442 runtime_busy_cnt++;
1443 }
1444
1445 void
1446 runtime_del_ref(void)
1447 {
1448 runtime_busy_cnt--;
1449 }