]> git.saurik.com Git - apple/launchd.git/blame - launchd/src/launchd_runtime.c
launchd-258.25.tar.gz
[apple/launchd.git] / launchd / src / launchd_runtime.c
CommitLineData
5b0a4722
A
1/*
2 * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
ef398931 21static const char *const __rcs_file_version__ = "$Revision: 23748 $";
5b0a4722
A
22
23#include "config.h"
24#include "launchd_runtime.h"
25
26#include <mach/mach.h>
27#include <mach/mach_error.h>
28#include <mach/boolean.h>
29#include <mach/message.h>
30#include <mach/notify.h>
31#include <mach/mig_errors.h>
32#include <mach/mach_traps.h>
33#include <mach/mach_interface.h>
34#include <mach/host_info.h>
35#include <mach/mach_host.h>
36#include <mach/exception.h>
37#include <sys/types.h>
38#include <sys/stat.h>
39#include <sys/time.h>
40#include <sys/proc.h>
41#include <sys/event.h>
42#include <sys/queue.h>
43#include <sys/socket.h>
44#include <sys/mount.h>
45#include <sys/reboot.h>
46#include <sys/fcntl.h>
47#include <bsm/libbsm.h>
48#include <malloc/malloc.h>
49#include <unistd.h>
50#include <pthread.h>
51#include <errno.h>
52#include <string.h>
53#include <ctype.h>
54#include <stdio.h>
55#include <stdlib.h>
56#include <stdbool.h>
57#include <syslog.h>
58#include <signal.h>
59#include <dlfcn.h>
60
61#include "launchd_internalServer.h"
62#include "launchd_internal.h"
63#include "notifyServer.h"
ef398931 64#include "excServer.h"
5b0a4722
A
65
66/* We shouldn't be including these */
67#include "launch.h"
68#include "launchd.h"
69#include "launchd_core_logic.h"
ef398931
A
70#include "vproc_internal.h"
71#include "protocol_job_reply.h"
5b0a4722
A
72
73static mach_port_t ipc_port_set;
74static mach_port_t demand_port_set;
75static mach_port_t launchd_internal_port;
76static int mainkq;
77
78#define BULK_KEV_MAX 100
79static struct kevent *bulk_kev;
80static int bulk_kev_i;
81static int bulk_kev_cnt;
82
83static pthread_t kqueue_demand_thread;
84static pthread_t demand_thread;
85
86static void *mport_demand_loop(void *arg);
87static void *kqueue_demand_loop(void *arg);
88static void log_kevent_struct(int level, struct kevent *kev, int indx);
89
fe044cc9 90static boolean_t launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply);
5b0a4722
A
91static void record_caller_creds(mach_msg_header_t *mh);
92static void launchd_runtime2(mach_msg_size_t msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply);
93static mach_msg_size_t max_msg_size;
94static mig_callback *mig_cb_table;
95static size_t mig_cb_table_sz;
96static timeout_callback runtime_idle_callback;
97static mach_msg_timeout_t runtime_idle_timeout;
98static audit_token_t *au_tok;
99static size_t runtime_busy_cnt;
100
101
102static STAILQ_HEAD(, logmsg_s) logmsg_queue = STAILQ_HEAD_INITIALIZER(logmsg_queue);
103static size_t logmsg_queue_sz;
104static size_t logmsg_queue_cnt;
105static mach_port_t drain_reply_port;
106static void runtime_log_uncork_pending_drain(void);
107static kern_return_t runtime_log_pack(vm_offset_t *outval, mach_msg_type_number_t *outvalCnt);
108static void runtime_log_push(void);
109
110static bool logmsg_add(struct runtime_syslog_attr *attr, int err_num, const char *msg);
111static void logmsg_remove(struct logmsg_s *lm);
112
113
114static const int sigigns[] = { SIGHUP, SIGINT, SIGPIPE, SIGALRM, SIGTERM,
115 SIGURG, SIGTSTP, SIGTSTP, SIGCONT, SIGTTIN, SIGTTOU, SIGIO, SIGXCPU,
116 SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGINFO, SIGUSR1, SIGUSR2
117};
118static sigset_t sigign_set;
119
fe044cc9
A
120mach_port_t
121runtime_get_kernel_port(void)
122{
123 return launchd_internal_port;
124}
125
5b0a4722
A
126void
127launchd_runtime_init(void)
128{
129 mach_msg_size_t mxmsgsz;
130 pthread_attr_t attr;
131
132 launchd_assert((mainkq = kqueue()) != -1);
133
134 launchd_assert((errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &demand_port_set)) == KERN_SUCCESS);
135 launchd_assert((errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &ipc_port_set)) == KERN_SUCCESS);
136
137 launchd_assert(launchd_mport_create_recv(&launchd_internal_port) == KERN_SUCCESS);
138 launchd_assert(launchd_mport_make_send(launchd_internal_port) == KERN_SUCCESS);
139
140 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
141 mxmsgsz = sizeof(union __RequestUnion__x_launchd_internal_subsystem);
142 if (x_launchd_internal_subsystem.maxsize > mxmsgsz) {
143 mxmsgsz = x_launchd_internal_subsystem.maxsize;
144 }
145
146 launchd_assert(runtime_add_mport(launchd_internal_port, launchd_internal_demux, mxmsgsz) == KERN_SUCCESS);
147
148 pthread_attr_init(&attr);
149 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
150 pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
151 launchd_assert(pthread_create(&kqueue_demand_thread, &attr, kqueue_demand_loop, NULL) == 0);
152 pthread_attr_destroy(&attr);
153
154 pthread_attr_init(&attr);
155 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
156 pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
157 launchd_assert(pthread_create(&demand_thread, &attr, mport_demand_loop, NULL) == 0);
158 pthread_attr_destroy(&attr);
159}
160
161void
162launchd_runtime_init2(void)
163{
164 size_t i;
165
166 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
167 sigaddset(&sigign_set, sigigns[i]);
168 launchd_assumes(signal(sigigns[i], SIG_IGN) != SIG_ERR);
169 }
170}
171
172void *
173mport_demand_loop(void *arg __attribute__((unused)))
174{
175 mach_msg_empty_rcv_t dummy;
176 kern_return_t kr;
177
178 for (;;) {
179 kr = mach_msg(&dummy.header, MACH_RCV_MSG|MACH_RCV_LARGE, 0, 0, demand_port_set, 0, MACH_PORT_NULL);
180 if (kr == MACH_RCV_PORT_CHANGED) {
181 break;
182 } else if (!launchd_assumes(kr == MACH_RCV_TOO_LARGE)) {
183 continue;
184 }
185 launchd_assumes(handle_mport(launchd_internal_port) == 0);
186 }
187
188 return NULL;
189}
190
191const char *
192proc_flags_to_C_names(unsigned int flags)
193{
194#define MAX_PFLAG_STR "P_ADVLOCK|P_CONTROLT|P_LP64|P_NOCLDSTOP|P_PPWAIT|P_PROFIL|P_SELECT|P_CONTINUED|P_SUGID|P_SYSTEM|P_TIMEOUT|P_TRACED|P_RESV3|P_WEXIT|P_EXEC|P_OWEUPC|P_AFFINITY|P_TRANSLATED|P_RESV5|P_CHECKOPENEVT|P_DEPENDENCY_CAPABLE|P_REBOOT|P_TBE|P_RESV7|P_THCWD|P_RESV9|P_RESV10|P_RESV11|P_NOSHLIB|P_FORCEQUOTA|P_NOCLDWAIT|P_NOREMOTEHANG|0xdeadbeeffeedface"
195
196 static char flags_buf[sizeof(MAX_PFLAG_STR)];
197 char *flags_off = NULL;
198
199 if (!flags) {
200 return "";
201 }
202
203 while (flags) {
204 if (flags_off) {
205 *flags_off = '|';
206 flags_off++;
207 *flags_off = '\0';
208 } else {
209 flags_off = flags_buf;
210 }
211
212#define FLAGIF(f) if (flags & f) { flags_off += sprintf(flags_off, #f); flags &= ~f; }
213
214 FLAGIF(P_ADVLOCK)
215 else FLAGIF(P_CONTROLT)
216 else FLAGIF(P_LP64)
217 else FLAGIF(P_NOCLDSTOP)
218 else FLAGIF(P_PPWAIT)
219 else FLAGIF(P_PROFIL)
220 else FLAGIF(P_SELECT)
221 else FLAGIF(P_CONTINUED)
222 else FLAGIF(P_SUGID)
223 else FLAGIF(P_SYSTEM)
224 else FLAGIF(P_TIMEOUT)
225 else FLAGIF(P_TRACED)
226 else FLAGIF(P_RESV3)
227 else FLAGIF(P_WEXIT)
228 else FLAGIF(P_EXEC)
229 else FLAGIF(P_OWEUPC)
230 else FLAGIF(P_AFFINITY)
231 else FLAGIF(P_TRANSLATED)
232 else FLAGIF(P_RESV5)
233 else FLAGIF(P_CHECKOPENEVT)
234 else FLAGIF(P_DEPENDENCY_CAPABLE)
235 else FLAGIF(P_REBOOT)
236 else FLAGIF(P_TBE)
237 else FLAGIF(P_RESV7)
238 else FLAGIF(P_THCWD)
239 else FLAGIF(P_RESV9)
240 else FLAGIF(P_RESV10)
241 else FLAGIF(P_RESV11)
242 else FLAGIF(P_NOSHLIB)
243 else FLAGIF(P_FORCEQUOTA)
244 else FLAGIF(P_NOCLDWAIT)
245 else FLAGIF(P_NOREMOTEHANG)
246 else {
247 flags_off += sprintf(flags_off, "0x%x", flags);
248 flags = 0;
249 }
250 }
251
252 return flags_buf;
253}
254
255const char *
256reboot_flags_to_C_names(unsigned int flags)
257{
258#define MAX_RB_STR "RB_ASKNAME|RB_SINGLE|RB_NOSYNC|RB_KDB|RB_HALT|RB_INITNAME|RB_DFLTROOT|RB_ALTBOOT|RB_UNIPROC|RB_SAFEBOOT|RB_UPSDELAY|0xdeadbeeffeedface"
259 static char flags_buf[sizeof(MAX_RB_STR)];
260 char *flags_off = NULL;
261
fe044cc9
A
262 if (flags == 0) {
263 return "RB_AUTOBOOT";
264 }
265
266 while (flags) {
5b0a4722
A
267 if (flags_off) {
268 *flags_off = '|';
269 flags_off++;
270 *flags_off = '\0';
271 } else {
272 flags_off = flags_buf;
273 }
274
275 FLAGIF(RB_ASKNAME)
276 else FLAGIF(RB_SINGLE)
277 else FLAGIF(RB_NOSYNC)
5b0a4722
A
278 else FLAGIF(RB_HALT)
279 else FLAGIF(RB_INITNAME)
280 else FLAGIF(RB_DFLTROOT)
281 else FLAGIF(RB_ALTBOOT)
282 else FLAGIF(RB_UNIPROC)
283 else FLAGIF(RB_SAFEBOOT)
284 else FLAGIF(RB_UPSDELAY)
285 else {
286 flags_off += sprintf(flags_off, "0x%x", flags);
287 flags = 0;
288 }
5b0a4722 289 }
fe044cc9
A
290
291 return flags_buf;
5b0a4722
A
292}
293
294const char *
295signal_to_C_name(unsigned int sig)
296{
297 static char unknown[25];
298
299#define SIG2CASE(sg) case sg: return #sg
300
301 switch (sig) {
302 SIG2CASE(SIGHUP);
303 SIG2CASE(SIGINT);
304 SIG2CASE(SIGQUIT);
305 SIG2CASE(SIGILL);
306 SIG2CASE(SIGTRAP);
307 SIG2CASE(SIGABRT);
308 SIG2CASE(SIGFPE);
309 SIG2CASE(SIGKILL);
310 SIG2CASE(SIGBUS);
311 SIG2CASE(SIGSEGV);
312 SIG2CASE(SIGSYS);
313 SIG2CASE(SIGPIPE);
314 SIG2CASE(SIGALRM);
315 SIG2CASE(SIGTERM);
316 SIG2CASE(SIGURG);
317 SIG2CASE(SIGSTOP);
318 SIG2CASE(SIGTSTP);
319 SIG2CASE(SIGCONT);
320 SIG2CASE(SIGCHLD);
321 SIG2CASE(SIGTTIN);
322 SIG2CASE(SIGTTOU);
323 SIG2CASE(SIGIO);
324 SIG2CASE(SIGXCPU);
325 SIG2CASE(SIGXFSZ);
326 SIG2CASE(SIGVTALRM);
327 SIG2CASE(SIGPROF);
328 SIG2CASE(SIGWINCH);
329 SIG2CASE(SIGINFO);
330 SIG2CASE(SIGUSR1);
331 SIG2CASE(SIGUSR2);
332 default:
333 snprintf(unknown, sizeof(unknown), "%u", sig);
334 return unknown;
335 }
336}
337
338void
339log_kevent_struct(int level, struct kevent *kev, int indx)
340{
341 const char *filter_str;
342 char ident_buf[100];
343 char filter_buf[100];
344 char fflags_buf[1000];
345 char flags_buf[1000] = "0x0";
346 char *flags_off = NULL;
347 char *fflags_off = NULL;
348 unsigned short flags = kev->flags;
349 unsigned int fflags = kev->fflags;
350
351 if (flags) while (flags) {
352 if (flags_off) {
353 *flags_off = '|';
354 flags_off++;
355 *flags_off = '\0';
356 } else {
357 flags_off = flags_buf;
358 }
359
360 FLAGIF(EV_ADD)
361 else FLAGIF(EV_RECEIPT)
362 else FLAGIF(EV_DELETE)
363 else FLAGIF(EV_ENABLE)
364 else FLAGIF(EV_DISABLE)
365 else FLAGIF(EV_CLEAR)
366 else FLAGIF(EV_EOF)
367 else FLAGIF(EV_ONESHOT)
368 else FLAGIF(EV_ERROR)
369 else {
370 flags_off += sprintf(flags_off, "0x%x", flags);
371 flags = 0;
372 }
373 }
374
375 snprintf(ident_buf, sizeof(ident_buf), "%ld", kev->ident);
376 snprintf(fflags_buf, sizeof(fflags_buf), "0x%x", fflags);
377
378 switch (kev->filter) {
379 case EVFILT_READ:
380 filter_str = "EVFILT_READ";
381 break;
382 case EVFILT_WRITE:
383 filter_str = "EVFILT_WRITE";
384 break;
385 case EVFILT_AIO:
386 filter_str = "EVFILT_AIO";
387 break;
388 case EVFILT_VNODE:
389 filter_str = "EVFILT_VNODE";
390 if (fflags) while (fflags) {
391 if (fflags_off) {
392 *fflags_off = '|';
393 fflags_off++;
394 *fflags_off = '\0';
395 } else {
396 fflags_off = fflags_buf;
397 }
398
399#define FFLAGIF(ff) if (fflags & ff) { fflags_off += sprintf(fflags_off, #ff); fflags &= ~ff; }
400
401 FFLAGIF(NOTE_DELETE)
402 else FFLAGIF(NOTE_WRITE)
403 else FFLAGIF(NOTE_EXTEND)
404 else FFLAGIF(NOTE_ATTRIB)
405 else FFLAGIF(NOTE_LINK)
406 else FFLAGIF(NOTE_RENAME)
407 else FFLAGIF(NOTE_REVOKE)
408 else {
409 fflags_off += sprintf(fflags_off, "0x%x", fflags);
410 fflags = 0;
411 }
412 }
413 break;
414 case EVFILT_PROC:
415 filter_str = "EVFILT_PROC";
416 if (fflags) while (fflags) {
417 if (fflags_off) {
418 *fflags_off = '|';
419 fflags_off++;
420 *fflags_off = '\0';
421 } else {
422 fflags_off = fflags_buf;
423 }
424
425 FFLAGIF(NOTE_EXIT)
426 else FFLAGIF(NOTE_REAP)
427 else FFLAGIF(NOTE_FORK)
428 else FFLAGIF(NOTE_EXEC)
429 else FFLAGIF(NOTE_SIGNAL)
430 else FFLAGIF(NOTE_TRACK)
431 else FFLAGIF(NOTE_TRACKERR)
432 else FFLAGIF(NOTE_CHILD)
433 else {
434 fflags_off += sprintf(fflags_off, "0x%x", fflags);
435 fflags = 0;
436 }
437 }
438 break;
439 case EVFILT_SIGNAL:
440 filter_str = "EVFILT_SIGNAL";
441 strcpy(ident_buf, signal_to_C_name(kev->ident));
442 break;
443 case EVFILT_TIMER:
444 filter_str = "EVFILT_TIMER";
445 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
446 if (fflags) while (fflags) {
447 if (fflags_off) {
448 *fflags_off = '|';
449 fflags_off++;
450 *fflags_off = '\0';
451 } else {
452 fflags_off = fflags_buf;
453 }
454
455 FFLAGIF(NOTE_SECONDS)
456 else FFLAGIF(NOTE_USECONDS)
457 else FFLAGIF(NOTE_NSECONDS)
458 else FFLAGIF(NOTE_ABSOLUTE)
459 else {
460 fflags_off += sprintf(fflags_off, "0x%x", fflags);
461 fflags = 0;
462 }
463 }
464 break;
465 case EVFILT_MACHPORT:
466 filter_str = "EVFILT_MACHPORT";
467 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
468 break;
469 case EVFILT_FS:
470 filter_str = "EVFILT_FS";
471 snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
472 if (fflags) while (fflags) {
473 if (fflags_off) {
474 *fflags_off = '|';
475 fflags_off++;
476 *fflags_off = '\0';
477 } else {
478 fflags_off = fflags_buf;
479 }
480
481 FFLAGIF(VQ_NOTRESP)
482 else FFLAGIF(VQ_NEEDAUTH)
483 else FFLAGIF(VQ_LOWDISK)
484 else FFLAGIF(VQ_MOUNT)
485 else FFLAGIF(VQ_UNMOUNT)
486 else FFLAGIF(VQ_DEAD)
487 else FFLAGIF(VQ_ASSIST)
488 else FFLAGIF(VQ_NOTRESPLOCK)
489 else FFLAGIF(VQ_UPDATE)
490 else {
491 fflags_off += sprintf(fflags_off, "0x%x", fflags);
492 fflags = 0;
493 }
494 }
495 break;
496 default:
497 snprintf(filter_buf, sizeof(filter_buf), "%d", kev->filter);
498 filter_str = filter_buf;
499 break;
500 }
501
502 runtime_syslog(level, "KEVENT[%d]: udata = %p data = 0x%lx ident = %s filter = %s flags = %s fflags = %s",
503 indx, kev->udata, kev->data, ident_buf, filter_str, flags_buf, fflags_buf);
504}
505
506kern_return_t
507x_handle_mport(mach_port_t junk __attribute__((unused)))
508{
509 mach_port_name_array_t members;
510 mach_msg_type_number_t membersCnt;
511 mach_port_status_t status;
512 mach_msg_type_number_t statusCnt;
513 struct kevent kev;
514 unsigned int i;
515
516 if (!launchd_assumes((errno = mach_port_get_set_status(mach_task_self(), demand_port_set, &members, &membersCnt)) == KERN_SUCCESS)) {
517 return 1;
518 }
519
520 for (i = 0; i < membersCnt; i++) {
521 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
522 if (mach_port_get_attributes(mach_task_self(), members[i], MACH_PORT_RECEIVE_STATUS, (mach_port_info_t)&status,
523 &statusCnt) != KERN_SUCCESS) {
524 continue;
525 }
526 if (status.mps_msgcount) {
527 EV_SET(&kev, members[i], EVFILT_MACHPORT, 0, 0, 0, job_find_by_service_port(members[i]));
528#if 0
529 if (launchd_assumes(kev.udata != NULL)) {
530#endif
531 log_kevent_struct(LOG_DEBUG, &kev, 0);
532 (*((kq_callback *)kev.udata))(kev.udata, &kev);
533#if 0
534 } else {
535 log_kevent_struct(LOG_ERR, &kev);
536 }
537#endif
538 /* the callback may have tainted our ability to continue this for loop */
539 break;
540 }
541 }
542
543 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)members,
544 (vm_size_t) membersCnt * sizeof(mach_port_name_t)) == KERN_SUCCESS);
545
546 return 0;
547}
548
549void *
550kqueue_demand_loop(void *arg __attribute__((unused)))
551{
552 fd_set rfds;
553
554 /*
555 * Yes, at first glance, calling select() on a kqueue seems silly.
556 *
557 * This avoids a race condition between the main thread and this helper
558 * thread by ensuring that we drain kqueue events on the same thread
559 * that manipulates the kqueue.
560 */
561
562 for (;;) {
563 FD_ZERO(&rfds);
564 FD_SET(mainkq, &rfds);
565 if (launchd_assumes(select(mainkq + 1, &rfds, NULL, NULL, NULL) == 1)) {
566 launchd_assumes(handle_kqueue(launchd_internal_port, mainkq) == 0);
567 }
568 }
569
570 return NULL;
571}
572
573kern_return_t
574x_handle_kqueue(mach_port_t junk __attribute__((unused)), integer_t fd)
575{
576 struct timespec ts = { 0, 0 };
577 struct kevent kev[BULK_KEV_MAX];
578 int i;
579
580 bulk_kev = kev;
581
582 launchd_assumes((bulk_kev_cnt = kevent(fd, NULL, 0, kev, BULK_KEV_MAX, &ts)) != -1);
583
584 if (bulk_kev_cnt > 0) {
585#if 0
586 Dl_info dli;
587
588 if (launchd_assumes(malloc_size(kev.udata) || dladdr(kev.udata, &dli))) {
589#endif
590 for (i = 0; i < bulk_kev_cnt; i++) {
591 log_kevent_struct(LOG_DEBUG, &kev[i], i);
592 }
593 for (i = 0; i < bulk_kev_cnt; i++) {
594 bulk_kev_i = i;
595 if (kev[i].filter) {
596 (*((kq_callback *)kev[i].udata))(kev[i].udata, &kev[i]);
597 }
598 }
599#if 0
600 } else {
601 log_kevent_struct(LOG_ERR, &kev);
602 }
603#endif
604 }
605
606 bulk_kev = NULL;
607
608 return 0;
609}
610
611
612
613void
614launchd_runtime(void)
615{
616 mig_reply_error_t *req = NULL, *resp = NULL;
617 mach_msg_size_t mz = max_msg_size;
618 int flags = VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE;
619
620 for (;;) {
621 if (req) {
622 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)req, mz) == KERN_SUCCESS);
623 req = NULL;
624 }
625 if (resp) {
626 launchd_assumes(vm_deallocate(mach_task_self(), (vm_address_t)resp, mz) == KERN_SUCCESS);
627 resp = NULL;
628 }
629
630 mz = max_msg_size;
631
632 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t *)&req, mz, flags) == KERN_SUCCESS)) {
633 continue;
634 }
635 if (!launchd_assumes(vm_allocate(mach_task_self(), (vm_address_t *)&resp, mz, flags) == KERN_SUCCESS)) {
636 continue;
637 }
638
639 launchd_runtime2(mz, req, resp);
640
641 /* If we get here, max_msg_size probably changed... */
642 }
643}
644
645kern_return_t
646launchd_set_bport(mach_port_t name)
647{
648 return errno = task_set_bootstrap_port(mach_task_self(), name);
649}
650
651kern_return_t
652launchd_get_bport(mach_port_t *name)
653{
654 return errno = task_get_bootstrap_port(mach_task_self(), name);
655}
656
657kern_return_t
658launchd_mport_notify_req(mach_port_t name, mach_msg_id_t which)
659{
fe044cc9 660 mach_port_mscount_t msgc = (which == MACH_NOTIFY_PORT_DESTROYED) ? 0 : 1;
5b0a4722
A
661 mach_port_t previous, where = (which == MACH_NOTIFY_NO_SENDERS) ? name : launchd_internal_port;
662
663 if (which == MACH_NOTIFY_NO_SENDERS) {
664 /* Always make sure the send count is zero, in case a receive right is reused */
665 errno = mach_port_set_mscount(mach_task_self(), name, 0);
666 if (errno != KERN_SUCCESS) {
667 return errno;
668 }
669 }
670
671 errno = mach_port_request_notification(mach_task_self(), name, which, msgc, where,
672 MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
673
674 if (errno == 0 && previous != MACH_PORT_NULL) {
675 launchd_assumes(launchd_mport_deallocate(previous) == KERN_SUCCESS);
676 }
677
678 return errno;
679}
680
681pid_t
682runtime_fork(mach_port_t bsport)
683{
684 sigset_t emptyset, oset;
685 pid_t r = -1;
686 int saved_errno;
687 size_t i;
688
689 sigemptyset(&emptyset);
690
691 launchd_assumes(launchd_mport_make_send(bsport) == KERN_SUCCESS);
692 launchd_assumes(launchd_set_bport(bsport) == KERN_SUCCESS);
693 launchd_assumes(launchd_mport_deallocate(bsport) == KERN_SUCCESS);
694
695 launchd_assumes(sigprocmask(SIG_BLOCK, &sigign_set, &oset) != -1);
696 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
697 launchd_assumes(signal(sigigns[i], SIG_DFL) != SIG_ERR);
698 }
699
700 r = fork();
701 saved_errno = errno;
702
703 if (r != 0) {
704 for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
705 launchd_assumes(signal(sigigns[i], SIG_IGN) != SIG_ERR);
706 }
707 launchd_assumes(sigprocmask(SIG_SETMASK, &oset, NULL) != -1);
708 launchd_assumes(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
709 } else {
710 launchd_assumes(sigprocmask(SIG_SETMASK, &emptyset, NULL) != -1);
711 }
712
713 errno = saved_errno;
714
715 return r;
716}
717
718
719void
720runtime_set_timeout(timeout_callback to_cb, unsigned int sec)
721{
722 if (sec == 0 || to_cb == NULL) {
723 runtime_idle_callback = NULL;
724 runtime_idle_timeout = 0;
725 }
726
727 runtime_idle_callback = to_cb;
728 runtime_idle_timeout = sec * 1000;
729}
730
731kern_return_t
732runtime_add_mport(mach_port_t name, mig_callback demux, mach_msg_size_t msg_size)
733{
734 size_t needed_table_sz = (MACH_PORT_INDEX(name) + 1) * sizeof(mig_callback);
735 mach_port_t target_set = demux ? ipc_port_set : demand_port_set;
736
737 msg_size = round_page(msg_size + MAX_TRAILER_SIZE);
738
739 if (needed_table_sz > mig_cb_table_sz) {
740 needed_table_sz *= 2; /* Let's try and avoid realloc'ing for a while */
741 mig_callback *new_table = malloc(needed_table_sz);
742
743 if (!launchd_assumes(new_table != NULL)) {
744 return KERN_RESOURCE_SHORTAGE;
745 }
746
747 if (mig_cb_table) {
748 memcpy(new_table, mig_cb_table, mig_cb_table_sz);
749 free(mig_cb_table);
750 }
751
752 mig_cb_table_sz = needed_table_sz;
753 mig_cb_table = new_table;
754 }
755
756 mig_cb_table[MACH_PORT_INDEX(name)] = demux;
757
758 if (msg_size > max_msg_size) {
759 max_msg_size = msg_size;
760 }
761
762 return errno = mach_port_move_member(mach_task_self(), name, target_set);
763}
764
765kern_return_t
766runtime_remove_mport(mach_port_t name)
767{
768 mig_cb_table[MACH_PORT_INDEX(name)] = NULL;
769
770 return errno = mach_port_move_member(mach_task_self(), name, MACH_PORT_NULL);
771}
772
773kern_return_t
774launchd_mport_make_send(mach_port_t name)
775{
776 return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_MAKE_SEND);
777}
778
779kern_return_t
780launchd_mport_close_recv(mach_port_t name)
781{
782 return errno = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE, -1);
783}
784
785kern_return_t
786launchd_mport_create_recv(mach_port_t *name)
787{
788 return errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, name);
789}
790
791kern_return_t
792launchd_mport_deallocate(mach_port_t name)
793{
794 return errno = mach_port_deallocate(mach_task_self(), name);
795}
796
797int
798kevent_bulk_mod(struct kevent *kev, size_t kev_cnt)
799{
800 size_t i;
801
802 for (i = 0; i < kev_cnt; i++) {
803 kev[i].flags |= EV_CLEAR|EV_RECEIPT;
804 }
805
806 return kevent(mainkq, kev, kev_cnt, kev, kev_cnt, NULL);
807}
808
809int
810kevent_mod(uintptr_t ident, short filter, u_short flags, u_int fflags, intptr_t data, void *udata)
811{
812 struct kevent kev;
813 int r;
814
815 switch (filter) {
816 case EVFILT_READ:
817 case EVFILT_WRITE:
818 break;
819 default:
820 flags |= EV_CLEAR;
821 break;
822 }
823
824 flags |= EV_RECEIPT;
825
826 if (flags & EV_ADD && !launchd_assumes(udata != NULL)) {
827 errno = EINVAL;
828 return -1;
829 }
830
831 EV_SET(&kev, ident, filter, flags, fflags, data, udata);
832
833 r = kevent(mainkq, &kev, 1, &kev, 1, NULL);
834
835 if (!launchd_assumes(r == 1)) {
836 return -1;
837 }
838
839 if (launchd_assumes(kev.flags & EV_ERROR)) {
840 if ((flags & EV_ADD) && kev.data) {
841 runtime_syslog(LOG_DEBUG, "%s(): See the next line...", __func__);
842 log_kevent_struct(LOG_DEBUG, &kev, 0);
843 errno = kev.data;
844 return -1;
845 }
846 }
847
848 return r;
849}
850
851boolean_t
852launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply)
853{
854 if (launchd_internal_server_routine(Request)) {
855 return launchd_internal_server(Request, Reply);
fe044cc9
A
856 } else if (notify_server_routine(Request)) {
857 return notify_server(Request, Reply);
858 } else {
ef398931 859 return exc_server(Request, Reply);
5b0a4722 860 }
5b0a4722
A
861}
862
863kern_return_t
ef398931
A
864do_mach_notify_port_destroyed(mach_port_t notify __attribute__((unused)),
865 mach_port_t rights)
5b0a4722
A
866{
867 /* This message is sent to us when a receive right is returned to us. */
868
869 if (!launchd_assumes(job_ack_port_destruction(rights))) {
870 launchd_assumes(launchd_mport_close_recv(rights) == KERN_SUCCESS);
871 }
872
873 return KERN_SUCCESS;
874}
875
876kern_return_t
ef398931
A
877do_mach_notify_port_deleted(mach_port_t notify __attribute__((unused)),
878 mach_port_name_t name __attribute__((unused)))
5b0a4722
A
879{
880 /* If we deallocate/destroy/mod_ref away a port with a pending
881 * notification, the original notification message is replaced with
882 * this message. To quote a Mach kernel expert, "the kernel has a
883 * send-once right that has to be used somehow."
884 */
885 return KERN_SUCCESS;
886}
887
888kern_return_t
ef398931
A
889do_mach_notify_no_senders(mach_port_t notify,
890 mach_port_mscount_t mscount __attribute__((unused)))
5b0a4722
A
891{
892 job_t j = job_mig_intran(notify);
893
894 /* This message is sent to us when the last customer of one of our
895 * objects goes away.
896 */
897
898 if (!launchd_assumes(j != NULL)) {
899 return KERN_FAILURE;
900 }
901
902 job_ack_no_senders(j);
903
904 return KERN_SUCCESS;
905}
906
907kern_return_t
ef398931 908do_mach_notify_send_once(mach_port_t notify __attribute__((unused)))
5b0a4722
A
909{
910 /* This message is sent to us every time we close a port that we have
911 * outstanding Mach notification requests on. We can safely ignore this
912 * message.
913 */
914
915 return KERN_SUCCESS;
916}
917
918kern_return_t
ef398931
A
919do_mach_notify_dead_name(mach_port_t notify __attribute__((unused)),
920 mach_port_name_t name)
5b0a4722
A
921{
922 /* This message is sent to us when one of our send rights no longer has
923 * a receiver somewhere else on the system.
924 */
925
926 if (name == drain_reply_port) {
927 launchd_assumes(launchd_mport_deallocate(name) == KERN_SUCCESS);
928 drain_reply_port = MACH_PORT_NULL;
929 }
930
931 if (launchd_assumes(root_jobmgr != NULL)) {
932 root_jobmgr = jobmgr_delete_anything_with_port(root_jobmgr, name);
933 }
934
935 /* A dead-name notification about a port appears to increment the
936 * rights on said port. Let's deallocate it so that we don't leak
937 * dead-name ports.
938 */
939 launchd_assumes(launchd_mport_deallocate(name) == KERN_SUCCESS);
940
941 return KERN_SUCCESS;
942}
943
944void
945record_caller_creds(mach_msg_header_t *mh)
946{
947 mach_msg_max_trailer_t *tp;
948 size_t trailer_size;
949
950 tp = (mach_msg_max_trailer_t *)((vm_offset_t)mh + round_msg(mh->msgh_size));
951
952 trailer_size = tp->msgh_trailer_size - (mach_msg_size_t)(sizeof(mach_msg_trailer_type_t) - sizeof(mach_msg_trailer_size_t));
953
954 if (trailer_size < (mach_msg_size_t)sizeof(audit_token_t)) {
955 au_tok = NULL;
956 return;
957 }
958
959 au_tok = &tp->msgh_audit;
960}
961
962bool
963runtime_get_caller_creds(struct ldcred *ldc)
964{
965 if (!au_tok) {
966 return false;
967 }
968
969 audit_token_to_au32(*au_tok, /* audit UID */ NULL, &ldc->euid,
970 &ldc->egid, &ldc->uid, &ldc->gid, &ldc->pid,
971 &ldc->asid, /* au_tid_t */ NULL);
972
973 return true;
974}
975
976void
977launchd_runtime2(mach_msg_size_t msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply)
978{
979 mach_msg_options_t options, tmp_options;
980 mig_reply_error_t *bufTemp;
981 mig_callback the_demux;
982 mach_msg_timeout_t to;
983 mach_msg_return_t mr;
984
985 options = MACH_RCV_MSG|MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT) |
986 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0);
987
988 tmp_options = options;
989
990 for (;;) {
991 to = MACH_MSG_TIMEOUT_NONE;
992
993 if (msg_size != max_msg_size) {
994 /* The buffer isn't big enougth to receive messages anymore... */
995 tmp_options &= ~MACH_RCV_MSG;
996 options &= ~MACH_RCV_MSG;
997 if (!(tmp_options & MACH_SEND_MSG)) {
998 return;
999 }
1000 }
1001
1002 if ((tmp_options & MACH_RCV_MSG) && (runtime_idle_callback || (runtime_busy_cnt == 0))) {
1003 tmp_options |= MACH_RCV_TIMEOUT;
1004
1005 if (!(tmp_options & MACH_SEND_TIMEOUT)) {
1006 to = runtime_busy_cnt ? runtime_idle_timeout : (RUNTIME_ADVISABLE_IDLE_TIMEOUT * 1000);
1007 }
1008 }
1009
1010 runtime_log_push();
1011
1012 mr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
1013 msg_size, ipc_port_set, to, MACH_PORT_NULL);
1014
1015 tmp_options = options;
1016
1017 if (mr == MACH_SEND_INVALID_DEST || mr == MACH_SEND_TIMED_OUT) {
1018 /* We need to clean up and start over. */
1019 if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
1020 mach_msg_destroy(&bufReply->Head);
1021 }
1022 continue;
1023 } else if (mr == MACH_RCV_TIMED_OUT) {
1024 if (to != MACH_MSG_TIMEOUT_NONE) {
1025 if (runtime_busy_cnt == 0) {
1026 launchd_shutdown();
1027 } else if (runtime_idle_callback) {
1028 runtime_idle_callback();
1029 }
1030 }
1031 continue;
1032 } else if (!launchd_assumes(mr == MACH_MSG_SUCCESS)) {
1033 continue;
1034 }
1035
1036 bufTemp = bufRequest;
1037 bufRequest = bufReply;
1038 bufReply = bufTemp;
1039
1040 if (!(tmp_options & MACH_RCV_MSG)) {
1041 continue;
1042 }
1043
1044 /* we have another request message */
1045
1046 if (!launchd_assumes(mig_cb_table != NULL)) {
1047 break;
1048 }
1049
1050 the_demux = mig_cb_table[MACH_PORT_INDEX(bufRequest->Head.msgh_local_port)];
1051
1052 if (!launchd_assumes(the_demux != NULL)) {
1053 break;
1054 }
1055
1056 record_caller_creds(&bufRequest->Head);
1057
1058 /*
1059 * This is a total hack. We really need a bit in the kernel's proc
1060 * struct to declare our intent.
1061 */
1062 static int no_hang_fd = -1;
1063 if (no_hang_fd == -1) {
1064 no_hang_fd = _fd(open("/dev/autofs_nowait", 0));
1065 }
1066
1067 if (the_demux(&bufRequest->Head, &bufReply->Head) == FALSE) {
1068 /* XXX - also gross */
1069 if (bufRequest->Head.msgh_id == MACH_NOTIFY_NO_SENDERS) {
1070 notify_server(&bufRequest->Head, &bufReply->Head);
1071 }
1072 }
1073
1074 if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1075 if (bufReply->RetCode == MIG_NO_REPLY) {
1076 bufReply->Head.msgh_remote_port = MACH_PORT_NULL;
1077 } else if ((bufReply->RetCode != KERN_SUCCESS) && (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1078 /* destroy the request - but not the reply port */
1079 bufRequest->Head.msgh_remote_port = MACH_PORT_NULL;
1080 mach_msg_destroy(&bufRequest->Head);
1081 }
1082 }
1083
1084 if (bufReply->Head.msgh_remote_port != MACH_PORT_NULL) {
1085 tmp_options |= MACH_SEND_MSG;
1086
1087 if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != MACH_MSG_TYPE_MOVE_SEND_ONCE) {
1088 tmp_options |= MACH_SEND_TIMEOUT;
1089 }
1090 }
1091 }
1092}
1093
1094int
1095runtime_close(int fd)
1096{
1097 int i;
1098
1099 if (bulk_kev) for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
1100 switch (bulk_kev[i].filter) {
1101 case EVFILT_VNODE:
1102 case EVFILT_WRITE:
1103 case EVFILT_READ:
1104 if ((int)bulk_kev[i].ident == fd) {
1105 runtime_syslog(LOG_DEBUG, "Skipping kevent index: %d", i);
1106 bulk_kev[i].filter = 0;
1107 }
1108 default:
1109 break;
1110 }
1111 }
1112
1113 return close(fd);
1114}
1115
1116static FILE *ourlogfile;
1117
1118void
1119runtime_closelog(void)
1120{
1121 if (ourlogfile) {
1122 launchd_assumes(fflush(ourlogfile) == 0);
1123 launchd_assumes(runtime_fsync(fileno(ourlogfile)) != -1);
1124 }
1125}
1126
1127int
1128runtime_fsync(int fd)
1129{
1130 if (debug_shutdown_hangs) {
1131 return fcntl(fd, F_FULLFSYNC, NULL);
1132 } else {
1133 return fsync(fd);
1134 }
1135}
1136
1137static int internal_mask_pri = LOG_UPTO(LOG_NOTICE);
1138//static int internal_mask_pri = LOG_UPTO(LOG_DEBUG);
1139
1140int
1141runtime_setlogmask(int maskpri)
1142{
1143 internal_mask_pri = maskpri;
1144
1145 return internal_mask_pri;
1146}
1147
1148void
1149runtime_syslog(int pri, const char *message, ...)
1150{
1151 struct runtime_syslog_attr attr = {
1152 "com.apple.launchd", "com.apple.launchd",
1153 getpid() == 1 ? "System" : "Background",
1154 pri, getuid(), getpid(), getpid()
1155 };
1156 va_list ap;
1157
1158 va_start(ap, message);
1159
1160 runtime_vsyslog(&attr, message, ap);
1161
1162 va_end(ap);
1163}
1164
1165void
1166runtime_vsyslog(struct runtime_syslog_attr *attr, const char *message, va_list args)
1167{
1168 static pthread_mutex_t ourlock = PTHREAD_MUTEX_INITIALIZER;
1169 static struct timeval shutdown_start;
1170 static struct timeval prev_msg;
1171 static int apple_internal_logging = 1;
1172 struct timeval tvnow, tvd_total, tvd_msg_delta = { 0, 0 };
1173 struct stat sb;
1174 int saved_errno = errno;
1175 char newmsg[10000];
1176 size_t i, j;
1177
1178 if (!(LOG_MASK(attr->priority) & internal_mask_pri)) {
1179 goto out;
1180 }
1181
1182 if (apple_internal_logging == 1) {
1183 apple_internal_logging = stat("/AppleInternal", &sb);
1184 }
1185
1186
1187 if (!(debug_shutdown_hangs && getpid() == 1)) {
1188 if (attr->priority == LOG_APPLEONLY) {
1189 if (apple_internal_logging == -1) {
1190 goto out;
1191 }
1192 attr->priority = LOG_NOTICE;
1193 }
1194 vsnprintf(newmsg, sizeof(newmsg), message, args);
1195 logmsg_add(attr, saved_errno, newmsg);
1196 goto out;
1197 }
1198
1199 if (shutdown_start.tv_sec == 0) {
1200 gettimeofday(&shutdown_start, NULL);
1201 }
1202
1203 if (gettimeofday(&tvnow, NULL) == -1) {
1204 tvnow.tv_sec = 0;
1205 tvnow.tv_usec = 0;
1206 }
1207
1208 pthread_mutex_lock(&ourlock);
1209
1210 if (ourlogfile == NULL) {
1211 rename("/var/log/launchd-shutdown.log", "/var/log/launchd-shutdown.log.1");
1212 ourlogfile = fopen("/var/log/launchd-shutdown.log", "a");
1213 }
1214
1215 pthread_mutex_unlock(&ourlock);
1216
1217 if (ourlogfile == NULL) {
1218 goto out;
1219 }
1220
1221 if (message == NULL) {
1222 goto out;
1223 }
1224
1225 timersub(&tvnow, &shutdown_start, &tvd_total);
1226
1227 if (prev_msg.tv_sec != 0) {
1228 timersub(&tvnow, &prev_msg, &tvd_msg_delta);
1229 }
1230
1231 prev_msg = tvnow;
1232
1233 snprintf(newmsg, sizeof(newmsg), "%3ld.%06d%4ld.%06d%6u %-40s%6u %-40s ",
1234 tvd_total.tv_sec, tvd_total.tv_usec,
1235 tvd_msg_delta.tv_sec, tvd_msg_delta.tv_usec,
1236 attr->from_pid, attr->from_name,
1237 attr->about_pid, attr->about_name);
1238
1239 for (i = 0, j = strlen(newmsg); message[i];) {
1240 if (message[i] == '%' && message[i + 1] == 'm') {
1241 char *errs = strerror(saved_errno);
1242 strcpy(newmsg + j, errs ? errs : "unknown error");
1243 j += strlen(newmsg + j);
1244 i += 2;
1245 } else {
1246 newmsg[j] = message[i];
1247 j++;
1248 i++;
1249 }
1250 }
1251
1252 strcpy(newmsg + j, "\n");
1253
1254 vfprintf(ourlogfile, newmsg, args);
1255
1256out:
1257 runtime_log_uncork_pending_drain();
1258}
1259
1260bool
1261logmsg_add(struct runtime_syslog_attr *attr, int err_num, const char *msg)
1262{
1263 size_t lm_sz = sizeof(struct logmsg_s) + strlen(msg) + strlen(attr->from_name) + strlen(attr->about_name) + strlen(attr->session_name) + 4;
1264 char *data_off;
1265 struct logmsg_s *lm;
1266
1267#define ROUND_TO_64BIT_WORD_SIZE(x) ((x + 7) & ~7)
1268
1269 /* we do this to make the unpacking for the log_drain cause unalignment faults */
1270 lm_sz = ROUND_TO_64BIT_WORD_SIZE(lm_sz);
1271
1272 if (!(lm = calloc(1, lm_sz))) {
1273 return false;
1274 }
1275
1276 data_off = lm->data;
1277
1278 launchd_assumes(gettimeofday(&lm->when, NULL) != -1);
1279 lm->from_pid = attr->from_pid;
1280 lm->about_pid = attr->about_pid;
1281 lm->err_num = err_num;
1282 lm->pri = attr->priority;
1283 lm->obj_sz = lm_sz;
1284 lm->msg = data_off;
1285 data_off += sprintf(data_off, "%s", msg) + 1;
1286 lm->from_name = data_off;
1287 data_off += sprintf(data_off, "%s", attr->from_name) + 1;
1288 lm->about_name = data_off;
1289 data_off += sprintf(data_off, "%s", attr->about_name) + 1;
1290 lm->session_name = data_off;
1291 data_off += sprintf(data_off, "%s", attr->session_name) + 1;
1292
1293 STAILQ_INSERT_TAIL(&logmsg_queue, lm, sqe);
1294 logmsg_queue_sz += lm_sz;
1295 logmsg_queue_cnt++;
1296
1297 return true;
1298}
1299
1300void
1301logmsg_remove(struct logmsg_s *lm)
1302{
1303 STAILQ_REMOVE(&logmsg_queue, lm, logmsg_s, sqe);
1304 logmsg_queue_sz -= lm->obj_sz;
1305 logmsg_queue_cnt--;
1306
1307 free(lm);
1308}
1309
1310kern_return_t
1311runtime_log_pack(vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
1312{
1313 struct logmsg_s *lm;
1314 void *offset;
1315
1316 *outvalCnt = logmsg_queue_sz;
1317
1318 mig_allocate(outval, *outvalCnt);
1319
1320 if (*outval == 0) {
1321 return 1;
1322 }
1323
1324 offset = (void *)*outval;
1325
1326 while ((lm = STAILQ_FIRST(&logmsg_queue))) {
1327 lm->from_name -= (size_t)lm;
1328 lm->about_name -= (size_t)lm;
1329 lm->msg -= (size_t)lm;
1330 lm->session_name -= (size_t)lm;
1331
1332 memcpy(offset, lm, lm->obj_sz);
1333
1334 offset += lm->obj_sz;
1335
1336 logmsg_remove(lm);
1337 }
1338
1339 return 0;
1340}
1341
1342void
1343runtime_log_uncork_pending_drain(void)
1344{
1345 mach_msg_type_number_t outvalCnt;
1346 mach_port_t tmp_port;
1347 vm_offset_t outval;
1348
1349 if (!drain_reply_port) {
1350 return;
1351 }
1352
1353 if (logmsg_queue_cnt == 0) {
1354 return;
1355 }
1356
1357 if (runtime_log_pack(&outval, &outvalCnt) != 0) {
1358 return;
1359 }
1360
1361 tmp_port = drain_reply_port;
1362 drain_reply_port = MACH_PORT_NULL;
1363
1364 if ((errno = job_mig_log_drain_reply(tmp_port, 0, outval, outvalCnt))) {
1365 launchd_assumes(errno == MACH_SEND_INVALID_DEST);
1366 launchd_assumes(launchd_mport_deallocate(tmp_port) == KERN_SUCCESS);
1367 }
1368
1369 mig_deallocate(outval, outvalCnt);
1370}
1371
1372void
1373runtime_log_push(void)
1374{
1375 mach_msg_type_number_t outvalCnt;
1376 vm_offset_t outval;
1377
1378 if (logmsg_queue_cnt == 0) {
1379 launchd_assumes(STAILQ_EMPTY(&logmsg_queue));
1380 return;
1381 } else if (getpid() == 1) {
1382 return;
1383 }
1384
1385 if (runtime_log_pack(&outval, &outvalCnt) != 0) {
1386 return;
1387 }
1388
1389 launchd_assumes(_vprocmgr_log_forward(inherited_bootstrap_port, (void *)outval, outvalCnt) == NULL);
1390
1391 mig_deallocate(outval, outvalCnt);
1392}
1393
1394kern_return_t
1395runtime_log_forward(uid_t forward_uid, gid_t forward_gid, vm_offset_t inval, mach_msg_type_number_t invalCnt)
1396{
1397 struct logmsg_s *lm, *lm_walk;
1398 mach_msg_type_number_t data_left = invalCnt;
1399
1400 if (inval == 0) {
1401 return 0;
1402 }
1403
1404 for (lm_walk = (struct logmsg_s *)inval; (data_left > 0) && (lm_walk->obj_sz <= data_left); lm_walk = ((void *)lm_walk + lm_walk->obj_sz)) {
1405 if (!launchd_assumes(lm = malloc(lm_walk->obj_sz))) {
1406 continue;
1407 }
1408
1409 memcpy(lm, lm_walk, lm_walk->obj_sz);
1410 lm->sender_uid = forward_uid;
1411 lm->sender_gid = forward_gid;
1412
1413 lm->from_name += (size_t)lm;
1414 lm->about_name += (size_t)lm;
1415 lm->msg += (size_t)lm;
1416 lm->session_name += (size_t)lm;
1417
1418 STAILQ_INSERT_TAIL(&logmsg_queue, lm, sqe);
1419 logmsg_queue_sz += lm->obj_sz;
1420 logmsg_queue_cnt++;
1421
1422 data_left -= lm->obj_sz;
1423 }
1424
1425 mig_deallocate(inval, invalCnt);
1426
1427 return 0;
1428}
1429
1430kern_return_t
1431runtime_log_drain(mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
1432{
1433 if (logmsg_queue_cnt == 0) {
1434 launchd_assumes(STAILQ_EMPTY(&logmsg_queue));
1435 launchd_assumes(drain_reply_port == 0);
1436
1437 drain_reply_port = srp;
1438 launchd_assumes(launchd_mport_notify_req(drain_reply_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
1439
1440 return MIG_NO_REPLY;
1441 }
1442
1443 return runtime_log_pack(outval, outvalCnt);
1444}
1445
1446/*
1447 * We should break this into two reference counts.
1448 *
1449 * One for hard references that would prevent exiting.
1450 * One for soft references that would only prevent idle exiting.
1451 *
1452 * In the long run, reference counting should completely automate when a
1453 * process can and should exit.
1454 */
1455void
1456runtime_add_ref(void)
1457{
1458 runtime_busy_cnt++;
1459}
1460
1461void
1462runtime_del_ref(void)
1463{
1464 runtime_busy_cnt--;
1465}
fe044cc9
A
1466
1467kern_return_t
ef398931
A
1468catch_exception_raise(mach_port_t exception_port __attribute__((unused)),
1469 mach_port_t thread, mach_port_t task,
1470 exception_type_t exception, exception_data_t code,
1471 mach_msg_type_number_t codeCnt)
fe044cc9
A
1472{
1473 runtime_syslog(LOG_NOTICE, "%s(): thread: 0x%x task: 0x%x type: 0x%x code: %p codeCnt: 0x%x",
1474 __func__, thread, task, exception, code, codeCnt);
1475
1476 launchd_assumes(launchd_mport_deallocate(thread) == KERN_SUCCESS);
1477 launchd_assumes(launchd_mport_deallocate(task) == KERN_SUCCESS);
1478
1479 return 0;
1480}
1481
1482kern_return_t
ef398931
A
1483catch_exception_raise_state(mach_port_t exception_port __attribute__((unused)),
1484 exception_type_t exception,
1485 const exception_data_t code, mach_msg_type_number_t codeCnt,
1486 int *flavor,
1487 const thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1488 thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
fe044cc9
A
1489{
1490 runtime_syslog(LOG_NOTICE, "%s(): type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1491 __func__, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1492
1493 memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1494 *new_stateCnt = old_stateCnt;
1495
1496 return 0;
1497}
1498
1499kern_return_t
ef398931
A
1500catch_exception_raise_state_identity(mach_port_t exception_port __attribute__((unused)),
1501 mach_port_t thread, mach_port_t task,
1502 exception_type_t exception,
1503 exception_data_t code, mach_msg_type_number_t codeCnt,
1504 int *flavor,
1505 thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1506 thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
fe044cc9
A
1507{
1508 runtime_syslog(LOG_NOTICE, "%s(): thread: 0x%x task: 0x%x type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1509 __func__, thread, task, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1510
1511 memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1512 *new_stateCnt = old_stateCnt;
1513
1514 launchd_assumes(launchd_mport_deallocate(thread) == KERN_SUCCESS);
1515 launchd_assumes(launchd_mport_deallocate(task) == KERN_SUCCESS);
1516
1517 return 0;
1518}