2 * @APPLE_APACHE_LICENSE_HEADER_START@
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 * @APPLE_APACHE_LICENSE_HEADER_END@
19 static const char *const __rcs_file_version__
= "$Revision: 23585 $";
22 #include "launchd_core_logic.h"
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/mach_time.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/exception.h>
37 #include <mach/host_reboot.h>
38 #include <sys/types.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
42 #include <sys/ucred.h>
43 #include <sys/fcntl.h>
45 #include <sys/reboot.h>
47 #include <sys/sysctl.h>
48 #include <sys/sockio.h>
50 #include <sys/resource.h>
51 #include <sys/ioctl.h>
52 #include <sys/mount.h>
55 #include <netinet/in.h>
56 #include <netinet/in_var.h>
57 #include <netinet6/nd6.h>
58 #include <bsm/libbsm.h>
81 #include <quarantine.h>
84 #include "liblaunch_public.h"
85 #include "liblaunch_private.h"
86 #include "liblaunch_internal.h"
87 #include "libbootstrap_public.h"
88 #include "libbootstrap_private.h"
89 #include "libvproc_public.h"
90 #include "libvproc_internal.h"
95 #include "launchd_runtime.h"
96 #include "launchd_unix_ipc.h"
97 #include "protocol_vproc.h"
98 #include "protocol_vprocServer.h"
99 #include "job_reply.h"
101 #define LAUNCHD_MIN_JOB_RUN_TIME 10
102 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
103 #define LAUNCHD_SIGKILL_TIMER 5
106 #define TAKE_SUBSET_NAME "TakeSubsetName"
107 #define TAKE_SUBSET_PID "TakeSubsetPID"
108 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
110 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
112 extern char **environ
;
114 struct waiting_for_removal
{
115 SLIST_ENTRY(waiting_for_removal
) sle
;
116 mach_port_t reply_port
;
119 static bool waiting4removal_new(job_t j
, mach_port_t rp
);
120 static void waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
);
123 SLIST_ENTRY(mspolicy
) sle
;
124 unsigned int allow
:1, per_pid
:1;
128 static bool mspolicy_new(job_t j
, const char *name
, bool allow
, bool pid_local
, bool skip_check
);
129 static bool mspolicy_copy(job_t j_to
, job_t j_from
);
130 static void mspolicy_setup(launch_data_t obj
, const char *key
, void *context
);
131 static bool mspolicy_check(job_t j
, const char *name
, bool pid_local
);
132 static void mspolicy_delete(job_t j
, struct mspolicy
*msp
);
135 SLIST_ENTRY(machservice
) sle
;
136 SLIST_ENTRY(machservice
) special_port_sle
;
137 LIST_ENTRY(machservice
) name_hash_sle
;
138 LIST_ENTRY(machservice
) port_hash_sle
;
140 uint64_t bad_perf_cnt
;
141 unsigned int gen_num
;
142 mach_port_name_t port
;
143 unsigned int isActive
:1, reset
:1, recv
:1, hide
:1, kUNCServer
:1, per_user_hack
:1, debug_on_close
:1, per_pid
:1, special_port_num
:10;
147 static SLIST_HEAD(, machservice
) special_ports
; /* hack, this should be per jobmgr_t */
149 #define PORT_HASH_SIZE 32
150 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
152 static LIST_HEAD(, machservice
) port_hash
[PORT_HASH_SIZE
];
154 static void machservice_setup(launch_data_t obj
, const char *key
, void *context
);
155 static void machservice_setup_options(launch_data_t obj
, const char *key
, void *context
);
156 static void machservice_resetport(job_t j
, struct machservice
*ms
);
157 static struct machservice
*machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
);
158 static void machservice_ignore(job_t j
, struct machservice
*ms
);
159 static void machservice_watch(job_t j
, struct machservice
*ms
);
160 static void machservice_delete(job_t j
, struct machservice
*, bool port_died
);
161 static void machservice_request_notifications(struct machservice
*);
162 static mach_port_t
machservice_port(struct machservice
*);
163 static job_t
machservice_job(struct machservice
*);
164 static bool machservice_hidden(struct machservice
*);
165 static bool machservice_active(struct machservice
*);
166 static const char *machservice_name(struct machservice
*);
167 static bootstrap_status_t
machservice_status(struct machservice
*);
170 SLIST_ENTRY(socketgroup
) sle
;
172 unsigned int junkfds
:1, fd_cnt
:31;
179 static bool socketgroup_new(job_t j
, const char *name
, int *fds
, unsigned int fd_cnt
, bool junkfds
);
180 static void socketgroup_delete(job_t j
, struct socketgroup
*sg
);
181 static void socketgroup_watch(job_t j
, struct socketgroup
*sg
);
182 static void socketgroup_ignore(job_t j
, struct socketgroup
*sg
);
183 static void socketgroup_callback(job_t j
);
184 static void socketgroup_setup(launch_data_t obj
, const char *key
, void *context
);
185 static void socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
);
187 struct calendarinterval
{
188 LIST_ENTRY(calendarinterval
) global_sle
;
189 SLIST_ENTRY(calendarinterval
) sle
;
195 static LIST_HEAD(, calendarinterval
) sorted_calendar_events
;
197 static bool calendarinterval_new(job_t j
, struct tm
*w
);
198 static bool calendarinterval_new_from_obj(job_t j
, launch_data_t obj
);
199 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
);
200 static void calendarinterval_delete(job_t j
, struct calendarinterval
*ci
);
201 static void calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
);
202 static void calendarinterval_callback(void);
203 static void calendarinterval_sanity_check(void);
206 SLIST_ENTRY(envitem
) sle
;
214 static bool envitem_new(job_t j
, const char *k
, const char *v
, bool global
);
215 static void envitem_delete(job_t j
, struct envitem
*ei
, bool global
);
216 static void envitem_setup(launch_data_t obj
, const char *key
, void *context
);
219 SLIST_ENTRY(limititem
) sle
;
221 unsigned int setsoft
:1, sethard
:1, which
:30;
224 static bool limititem_update(job_t j
, int w
, rlim_t r
);
225 static void limititem_delete(job_t j
, struct limititem
*li
);
226 static void limititem_setup(launch_data_t obj
, const char *key
, void *context
);
228 static void seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
);
244 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
245 } semaphore_reason_t
;
247 struct semaphoreitem
{
248 SLIST_ENTRY(semaphoreitem
) sle
;
249 semaphore_reason_t why
;
257 struct semaphoreitem_dict_iter_context
{
259 semaphore_reason_t why_true
;
260 semaphore_reason_t why_false
;
263 static bool semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
);
264 static void semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
);
265 static void semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
);
266 static void semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
);
267 static void semaphoreitem_callback(job_t j
, struct kevent
*kev
);
268 static void semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
);
269 static void semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
);
270 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
);
272 #define ACTIVE_JOB_HASH_SIZE 32
273 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
274 #define MACHSERVICE_HASH_SIZE 37
277 kq_callback kqjobmgr_callback
;
278 SLIST_ENTRY(jobmgr_s
) sle
;
279 SLIST_HEAD(, jobmgr_s
) submgrs
;
280 LIST_HEAD(, job_s
) jobs
;
281 LIST_HEAD(, job_s
) active_jobs
[ACTIVE_JOB_HASH_SIZE
];
282 LIST_HEAD(, machservice
) ms_hash
[MACHSERVICE_HASH_SIZE
];
284 mach_port_t req_port
;
287 unsigned int global_on_demand_cnt
;
288 unsigned int hopefully_first_cnt
;
289 unsigned int normal_active_cnt
;
290 unsigned int sent_stop_to_normal_jobs
:1, sent_stop_to_hopefully_last_jobs
:1, shutting_down
:1, session_initialized
:1;
297 #define jobmgr_assumes(jm, e) \
298 (__builtin_expect(!(e), 0) ? jobmgr_log_bug(jm, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
300 static jobmgr_t
jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
);
301 static job_t
jobmgr_import2(jobmgr_t jm
, launch_data_t pload
);
302 static jobmgr_t
jobmgr_parent(jobmgr_t jm
);
303 static jobmgr_t
jobmgr_do_garbage_collection(jobmgr_t jm
);
304 static bool jobmgr_label_test(jobmgr_t jm
, const char *str
);
305 static void jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
);
306 static void jobmgr_log_stray_children(jobmgr_t jm
);
307 static void jobmgr_remove(jobmgr_t jm
);
308 static void jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
);
309 static job_t
jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
);
310 static job_t
jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
);
311 static job_t
job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
);
312 static void job_export_all2(jobmgr_t jm
, launch_data_t where
);
313 static void jobmgr_callback(void *obj
, struct kevent
*kev
);
314 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm
);
315 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
);
316 static struct machservice
*jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
);
317 static void jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
318 static void jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
319 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
320 static void jobmgr_log_bug(jobmgr_t jm
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
);
322 #define DO_RUSAGE_SUMMATION 0
324 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
327 kq_callback kqjob_callback
;
328 LIST_ENTRY(job_s
) sle
;
329 LIST_ENTRY(job_s
) pid_hash_sle
;
330 LIST_ENTRY(job_s
) label_hash_sle
;
331 SLIST_HEAD(, socketgroup
) sockets
;
332 SLIST_HEAD(, calendarinterval
) cal_intervals
;
333 SLIST_HEAD(, envitem
) global_env
;
334 SLIST_HEAD(, envitem
) env
;
335 SLIST_HEAD(, limititem
) limits
;
336 SLIST_HEAD(, mspolicy
) mspolicies
;
337 SLIST_HEAD(, machservice
) machservices
;
338 SLIST_HEAD(, semaphoreitem
) semaphores
;
339 SLIST_HEAD(, waiting_for_removal
) removal_watchers
;
340 #if DO_RUSAGE_SUMMATION
343 cpu_type_t
*j_binpref
;
344 size_t j_binpref_cnt
;
346 mach_port_t wait_reply_port
; /* we probably should switch to a list of waiters */
357 char *alt_exc_handler
;
358 struct machservice
*lastlookup
;
359 unsigned int lastlookup_gennum
;
361 char *seatbelt_profile
;
362 uint64_t seatbelt_flags
;
365 void *quarantine_data
;
366 size_t quarantine_data_sz
;
370 int last_exit_status
;
374 unsigned int timeout
;
375 unsigned int exit_timeout
;
377 uint64_t sent_sigterm_time
;
379 uint32_t min_run_time
;
380 uint32_t start_interval
;
381 unsigned int checkedin
:1, anonymous
:1, debug
:1, inetcompat
:1, inetcompat_wait
:1,
382 ondemand
:1, session_create
:1, low_pri_io
:1, no_init_groups
:1, priv_port_has_senders
:1,
383 importing_global_env
:1, importing_hard_limits
:1, setmask
:1, legacy_mach_job
:1, start_pending
:1;
385 unsigned int globargv
:1, wait4debugger
:1, unload_at_exit
:1, stall_before_exec
:1, only_once
:1,
386 currently_ignored
:1, forced_peers_to_demand_mode
:1, setnice
:1, hopefully_exits_last
:1, removal_pending
:1,
387 wait4pipe_eof
:1, sent_sigkill
:1, debug_before_kill
:1, weird_bootstrap
:1, start_on_mount
:1,
388 per_user
:1, hopefully_exits_first
:1, deny_unknown_mslookups
:1, unload_at_mig_return
:1, abandon_pg
:1,
389 poll_for_vfs_changes
:1, internal_exc_handler
:1, deny_job_creation
:1;
393 #define LABEL_HASH_SIZE 53
395 static LIST_HEAD(, job_s
) label_hash
[LABEL_HASH_SIZE
];
396 static size_t hash_label(const char *label
) __attribute__((pure
));
397 static size_t hash_ms(const char *msstr
) __attribute__((pure
));
400 #define job_assumes(j, e) \
401 (__builtin_expect(!(e), 0) ? job_log_bug(j, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
403 static void job_import_keys(launch_data_t obj
, const char *key
, void *context
);
404 static void job_import_bool(job_t j
, const char *key
, bool value
);
405 static void job_import_string(job_t j
, const char *key
, const char *value
);
406 static void job_import_integer(job_t j
, const char *key
, long long value
);
407 static void job_import_dictionary(job_t j
, const char *key
, launch_data_t value
);
408 static void job_import_array(job_t j
, const char *key
, launch_data_t value
);
409 static void job_import_opaque(job_t j
, const char *key
, launch_data_t value
);
410 static bool job_set_global_on_demand(job_t j
, bool val
);
411 static const char *job_active(job_t j
);
412 static void job_watch(job_t j
);
413 static void job_ignore(job_t j
);
414 static void job_reap(job_t j
);
415 static bool job_useless(job_t j
);
416 static bool job_keepalive(job_t j
);
417 static void job_start(job_t j
);
418 static void job_start_child(job_t j
) __attribute__((noreturn
));
419 static void job_setup_attributes(job_t j
);
420 static bool job_setup_machport(job_t j
);
421 static void job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
);
422 static void job_postfork_become_user(job_t j
);
423 static void job_find_and_blame_pids_with_weird_uids(job_t j
);
424 static void job_force_sampletool(job_t j
);
425 static void job_setup_exception_port(job_t j
, task_t target_task
);
426 static void job_reparent_hack(job_t j
, const char *where
);
427 static void job_callback(void *obj
, struct kevent
*kev
);
428 static void job_callback_proc(job_t j
, int flags
, int fflags
);
429 static void job_callback_timer(job_t j
, void *ident
);
430 static void job_callback_read(job_t j
, int ident
);
431 static void job_log_stray_pg(job_t j
);
432 static job_t
job_new_anonymous(jobmgr_t jm
, pid_t anonpid
);
433 static job_t
job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
);
434 static job_t
job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
);
435 static const char *job_prog(job_t j
);
436 static jobmgr_t
job_get_bs(job_t j
);
437 static void job_kill(job_t j
);
438 static void job_uncork_fork(job_t j
);
439 static void job_log_stdouterr(job_t j
);
440 static void job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
441 static void job_log_error(job_t j
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
442 static void job_log_bug(job_t j
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
);
443 static void job_log_stdouterr2(job_t j
, const char *msg
, ...);
444 static void job_set_exeception_port(job_t j
, mach_port_t port
);
445 static kern_return_t
job_handle_mpm_wait(job_t j
, mach_port_t srp
, int *waitstatus
);
449 static const struct {
452 } launchd_keys2limits
[] = {
453 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE
, RLIMIT_CORE
},
454 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU
, RLIMIT_CPU
},
455 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA
, RLIMIT_DATA
},
456 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE
, RLIMIT_FSIZE
},
457 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK
, RLIMIT_MEMLOCK
},
458 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE
, RLIMIT_NOFILE
},
459 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC
, RLIMIT_NPROC
},
460 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS
, RLIMIT_RSS
},
461 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK
, RLIMIT_STACK
},
464 static time_t cronemu(int mon
, int mday
, int hour
, int min
);
465 static time_t cronemu_wday(int wday
, int hour
, int min
);
466 static bool cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
);
467 static bool cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
);
468 static bool cronemu_hour(struct tm
*wtm
, int hour
, int min
);
469 static bool cronemu_min(struct tm
*wtm
, int min
);
471 /* miscellaneous file local functions */
472 static void ensure_root_bkgd_setup(void);
473 static int dir_has_files(job_t j
, const char *path
);
474 static char **mach_cmd2argv(const char *string
);
475 static size_t our_strhash(const char *s
) __attribute__((pure
));
476 static void extract_rcsid_substr(const char *i
, char *o
, size_t osz
);
477 static void do_first_per_user_launchd_hack(void);
478 static size_t get_kern_max_proc(void);
479 static void do_file_init(void) __attribute__((constructor
));
481 /* file local globals */
482 static bool do_apple_internal_magic
;
483 static size_t total_children
;
484 static size_t total_anon_children
;
485 static mach_port_t the_exception_server
;
486 static bool did_first_per_user_launchd_BootCache_hack
;
487 #define JOB_BOOTCACHE_HACK_CHECK(j) (j->per_user && !did_first_per_user_launchd_BootCache_hack && (j->mach_uid >= 500) && (j->mach_uid != (uid_t)-2))
488 static jobmgr_t background_jobmgr
;
489 static job_t workaround_5477111
;
490 static mach_timebase_info_data_t tbi
;
492 /* process wide globals */
493 mach_port_t inherited_bootstrap_port
;
494 jobmgr_t root_jobmgr
;
500 struct semaphoreitem
*si
;
501 struct socketgroup
*sg
;
502 struct machservice
*ms
;
504 if (j
->currently_ignored
) {
508 job_log(j
, LOG_DEBUG
, "Ignoring...");
510 j
->currently_ignored
= true;
512 if (j
->poll_for_vfs_changes
) {
513 j
->poll_for_vfs_changes
= false;
514 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
517 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
518 socketgroup_ignore(j
, sg
);
521 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
522 machservice_ignore(j
, ms
);
525 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
526 semaphoreitem_ignore(j
, si
);
533 struct semaphoreitem
*si
;
534 struct socketgroup
*sg
;
535 struct machservice
*ms
;
537 if (!j
->currently_ignored
) {
541 job_log(j
, LOG_DEBUG
, "Watching...");
543 j
->currently_ignored
= false;
545 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
546 socketgroup_watch(j
, sg
);
549 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
550 machservice_watch(j
, ms
);
553 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
554 semaphoreitem_watch(j
, si
);
561 if (!j
->p
|| j
->anonymous
) {
565 job_assumes(j
, runtime_kill(j
->p
, SIGTERM
) != -1);
566 j
->sent_sigterm_time
= mach_absolute_time();
568 if (j
->exit_timeout
) {
569 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
,
570 EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, j
->exit_timeout
, j
) != -1);
573 job_log(j
, LOG_DEBUG
, "Sent SIGTERM signal");
579 launch_data_t tmp
, tmp2
, tmp3
, r
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
585 if ((tmp
= launch_data_new_string(j
->label
))) {
586 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LABEL
);
588 if ((tmp
= launch_data_new_string(j
->mgr
->name
))) {
589 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
591 if ((tmp
= launch_data_new_bool(j
->ondemand
))) {
592 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ONDEMAND
);
594 if ((tmp
= launch_data_new_integer(j
->last_exit_status
))) {
595 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LASTEXITSTATUS
);
597 if (j
->p
&& (tmp
= launch_data_new_integer(j
->p
))) {
598 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PID
);
600 if ((tmp
= launch_data_new_integer(j
->timeout
))) {
601 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TIMEOUT
);
603 if (j
->prog
&& (tmp
= launch_data_new_string(j
->prog
))) {
604 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAM
);
606 if (j
->stdoutpath
&& (tmp
= launch_data_new_string(j
->stdoutpath
))) {
607 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDOUTPATH
);
609 if (j
->stderrpath
&& (tmp
= launch_data_new_string(j
->stderrpath
))) {
610 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDERRORPATH
);
612 if (j
->argv
&& (tmp
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
615 for (i
= 0; i
< j
->argc
; i
++) {
616 if ((tmp2
= launch_data_new_string(j
->argv
[i
]))) {
617 launch_data_array_set_index(tmp
, tmp2
, i
);
621 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
);
624 if (j
->session_create
&& (tmp
= launch_data_new_bool(true))) {
625 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SESSIONCREATE
);
628 if (j
->inetcompat
&& (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
629 if ((tmp2
= launch_data_new_bool(j
->inetcompat_wait
))) {
630 launch_data_dict_insert(tmp
, tmp2
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
);
632 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
);
635 if (!SLIST_EMPTY(&j
->sockets
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
636 struct socketgroup
*sg
;
639 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
643 if ((tmp2
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
644 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
645 if ((tmp3
= launch_data_new_fd(sg
->fds
[i
]))) {
646 launch_data_array_set_index(tmp2
, tmp3
, i
);
649 launch_data_dict_insert(tmp
, tmp2
, sg
->name
);
653 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SOCKETS
);
656 if (!SLIST_EMPTY(&j
->machservices
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
657 struct machservice
*ms
;
661 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
664 tmp3
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
667 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
668 launch_data_dict_insert(tmp3
, tmp2
, ms
->name
);
671 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
672 launch_data_dict_insert(tmp
, tmp2
, ms
->name
);
676 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_MACHSERVICES
);
679 launch_data_dict_insert(r
, tmp3
, LAUNCH_JOBKEY_PERJOBMACHSERVICES
);
687 jobmgr_log_active_jobs(jobmgr_t jm
)
689 const char *why_active
;
693 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
694 jobmgr_log_active_jobs(jmi
);
697 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
698 why_active
= job_active(ji
);
700 job_log(ji
, LOG_DEBUG
, "%s", why_active
? why_active
: "Inactive");
706 still_alive_with_check(void)
708 jobmgr_log(root_jobmgr
, LOG_NOTICE
, "Still alive with %lu/%lu children", total_children
, total_anon_children
);
710 jobmgr_log_active_jobs(root_jobmgr
);
712 runtime_closelog(); /* hack to flush logs */
716 jobmgr_shutdown(jobmgr_t jm
)
721 jobmgr_log(jm
, LOG_DEBUG
, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm
->reboot_flags
));
723 jm
->shutting_down
= true;
725 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
726 jobmgr_shutdown(jmi
);
729 if (jm
->hopefully_first_cnt
) {
730 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
731 if (ji
->p
&& ji
->hopefully_exits_first
) {
737 if (debug_shutdown_hangs
&& jm
->parentmgr
== NULL
&& getpid() == 1) {
738 runtime_set_timeout(still_alive_with_check
, 5);
741 return jobmgr_do_garbage_collection(jm
);
745 jobmgr_remove(jobmgr_t jm
)
750 jobmgr_log(jm
, LOG_DEBUG
, "Removed job manager");
752 if (!jobmgr_assumes(jm
, SLIST_EMPTY(&jm
->submgrs
))) {
753 while ((jmi
= SLIST_FIRST(&jm
->submgrs
))) {
758 while ((ji
= LIST_FIRST(&jm
->jobs
))) {
759 /* We should only have anonymous jobs left */
760 job_assumes(ji
, ji
->anonymous
);
765 jobmgr_assumes(jm
, launchd_mport_deallocate(jm
->req_port
) == KERN_SUCCESS
);
769 jobmgr_assumes(jm
, launchd_mport_close_recv(jm
->jm_port
) == KERN_SUCCESS
);
772 if (jm
== background_jobmgr
) {
773 background_jobmgr
= NULL
;
778 SLIST_REMOVE(&jm
->parentmgr
->submgrs
, jm
, jobmgr_s
, sle
);
779 } else if (getpid() == 1) {
780 jobmgr_log(jm
, LOG_DEBUG
, "About to call: reboot(%s)", reboot_flags_to_C_names(jm
->reboot_flags
));
782 jobmgr_assumes(jm
, reboot(jm
->reboot_flags
) != -1);
786 jobmgr_log(jm
, LOG_DEBUG
, "About to exit");
796 struct waiting_for_removal
*w4r
;
797 struct calendarinterval
*ci
;
798 struct semaphoreitem
*si
;
799 struct socketgroup
*sg
;
800 struct machservice
*ms
;
801 struct limititem
*li
;
802 struct mspolicy
*msp
;
805 if (j
->p
&& j
->anonymous
) {
808 job_log(j
, LOG_DEBUG
, "Removal pended until the job exits");
810 if (!j
->removal_pending
) {
811 j
->removal_pending
= true;
818 ipc_close_all_with_job(j
);
820 if (j
->forced_peers_to_demand_mode
) {
821 job_set_global_on_demand(j
, false);
824 if (!job_assumes(j
, j
->forkfd
== 0)) {
825 job_assumes(j
, runtime_close(j
->forkfd
) != -1);
828 if (!job_assumes(j
, j
->log_redirect_fd
== 0)) {
829 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
833 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
836 if (!job_assumes(j
, j
->wait_reply_port
== MACH_PORT_NULL
)) {
837 job_assumes(j
, launchd_mport_deallocate(j
->wait_reply_port
) == KERN_SUCCESS
);
840 while ((msp
= SLIST_FIRST(&j
->mspolicies
))) {
841 mspolicy_delete(j
, msp
);
843 while ((sg
= SLIST_FIRST(&j
->sockets
))) {
844 socketgroup_delete(j
, sg
);
846 while ((ci
= SLIST_FIRST(&j
->cal_intervals
))) {
847 calendarinterval_delete(j
, ci
);
849 while ((ei
= SLIST_FIRST(&j
->env
))) {
850 envitem_delete(j
, ei
, false);
852 while ((ei
= SLIST_FIRST(&j
->global_env
))) {
853 envitem_delete(j
, ei
, true);
855 while ((li
= SLIST_FIRST(&j
->limits
))) {
856 limititem_delete(j
, li
);
858 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
859 machservice_delete(j
, ms
, false);
861 while ((si
= SLIST_FIRST(&j
->semaphores
))) {
862 semaphoreitem_delete(j
, si
);
864 while ((w4r
= SLIST_FIRST(&j
->removal_watchers
))) {
865 waiting4removal_delete(j
, w4r
);
892 if (j
->alt_exc_handler
) {
893 free(j
->alt_exc_handler
);
896 if (j
->seatbelt_profile
) {
897 free(j
->seatbelt_profile
);
901 if (j
->quarantine_data
) {
902 free(j
->quarantine_data
);
908 if (j
->start_interval
) {
910 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
912 if (j
->poll_for_vfs_changes
) {
913 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
916 kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
919 LIST_REMOVE(j
, label_hash_sle
);
921 job_log(j
, LOG_DEBUG
, "Removed");
927 socketgroup_setup(launch_data_t obj
, const char *key
, void *context
)
929 launch_data_t tmp_oai
;
931 unsigned int i
, fd_cnt
= 1;
934 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
935 fd_cnt
= launch_data_array_get_count(obj
);
938 fds
= alloca(fd_cnt
* sizeof(int));
940 for (i
= 0; i
< fd_cnt
; i
++) {
941 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
942 tmp_oai
= launch_data_array_get_index(obj
, i
);
947 fds
[i
] = launch_data_get_fd(tmp_oai
);
950 socketgroup_new(j
, key
, fds
, fd_cnt
, strcmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0);
956 job_set_global_on_demand(job_t j
, bool val
)
958 if (j
->forced_peers_to_demand_mode
&& val
) {
960 } else if (!j
->forced_peers_to_demand_mode
&& !val
) {
964 if ((j
->forced_peers_to_demand_mode
= val
)) {
965 j
->mgr
->global_on_demand_cnt
++;
967 j
->mgr
->global_on_demand_cnt
--;
970 if (j
->mgr
->global_on_demand_cnt
== 0) {
971 jobmgr_dispatch_all(j
->mgr
, false);
978 job_setup_machport(job_t j
)
980 mach_msg_size_t mxmsgsz
;
982 if (!job_assumes(j
, launchd_mport_create_recv(&j
->j_port
) == KERN_SUCCESS
)) {
986 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
987 mxmsgsz
= sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
988 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
989 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
992 if (!job_assumes(j
, runtime_add_mport(j
->j_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
996 if (!job_assumes(j
, launchd_mport_notify_req(j
->j_port
, MACH_NOTIFY_NO_SENDERS
) == KERN_SUCCESS
)) {
997 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1003 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1009 job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
)
1011 const char **argv
= (const char **)mach_cmd2argv(cmd
);
1014 if (!job_assumes(j
, argv
!= NULL
)) {
1018 jr
= job_new(j
->mgr
, AUTO_PICK_LEGACY_LABEL
, NULL
, argv
);
1022 /* jobs can easily be denied creation during shutdown */
1029 jr
->legacy_mach_job
= true;
1030 jr
->abandon_pg
= true;
1031 jr
->priv_port_has_senders
= true; /* the IPC that called us will make-send on this port */
1033 if (!job_setup_machport(jr
)) {
1037 job_log(jr
, LOG_INFO
, "Legacy%s server created", ond
? " on-demand" : "");
1049 job_handle_mpm_wait(job_t j
, mach_port_t srp
, int *waitstatus
)
1052 j
->wait_reply_port
= srp
;
1053 return MIG_NO_REPLY
;
1056 *waitstatus
= j
->last_exit_status
;
1062 job_new_anonymous(jobmgr_t jm
, pid_t anonpid
)
1064 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, anonpid
};
1065 struct kinfo_proc kp
;
1066 size_t len
= sizeof(kp
);
1067 const char *zombie
= NULL
;
1068 bool shutdown_state
;
1069 job_t jp
= NULL
, jr
= NULL
;
1071 if (!jobmgr_assumes(jm
, anonpid
!= 0)) {
1075 if (!jobmgr_assumes(jm
, anonpid
< 100000)) {
1076 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1080 if (!jobmgr_assumes(jm
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
1084 if (len
!= sizeof(kp
)) {
1085 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for nonexistent PID: %u", anonpid
);
1089 if (!jobmgr_assumes(jm
, kp
.kp_proc
.p_comm
[0] != '\0')) {
1093 if (kp
.kp_proc
.p_stat
== SZOMB
) {
1094 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for zombie PID: %u", anonpid
);
1098 switch (kp
.kp_eproc
.e_ppid
) {
1103 if (getpid() != 1) {
1104 /* we cannot possibly find a parent job_t that is useful in this function */
1109 jp
= jobmgr_find_by_pid(jm
, kp
.kp_eproc
.e_ppid
, true);
1110 jobmgr_assumes(jm
, jp
!= NULL
);
1114 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1115 if ((shutdown_state
= jm
->shutting_down
)) {
1116 jm
->shutting_down
= false;
1119 if (jobmgr_assumes(jm
, (jr
= job_new(jm
, AUTO_PICK_LEGACY_LABEL
, zombie
? zombie
: kp
.kp_proc
.p_comm
, NULL
)) != NULL
)) {
1120 u_int proc_fflags
= NOTE_EXEC
|NOTE_EXIT
/* |NOTE_REAP */;
1122 total_anon_children
++;
1123 jr
->anonymous
= true;
1126 /* anonymous process reaping is messy */
1127 LIST_INSERT_HEAD(&jm
->active_jobs
[ACTIVE_JOB_HASH(jr
->p
)], jr
, pid_hash_sle
);
1129 if (kevent_mod(jr
->p
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
) == -1 && job_assumes(jr
, errno
== ESRCH
)) {
1130 /* zombies are weird */
1131 job_log(jr
, LOG_ERR
, "Failed to add kevent for PID %u. Will unload at MIG return", jr
->p
);
1132 jr
->unload_at_mig_return
= true;
1136 job_assumes(jr
, mspolicy_copy(jr
, jp
));
1139 if (shutdown_state
&& jm
->hopefully_first_cnt
== 0) {
1140 job_log(jr
, LOG_APPLEONLY
, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time");
1143 job_log(jr
, LOG_DEBUG
, "Created PID %u anonymously by PPID %u%s%s", anonpid
, kp
.kp_eproc
.e_ppid
, jp
? ": " : "", jp
? jp
->label
: "");
1146 if (shutdown_state
) {
1147 jm
->shutting_down
= true;
1154 job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
)
1156 const char *const *argv_tmp
= argv
;
1157 char auto_label
[1000];
1158 const char *bn
= NULL
;
1160 size_t minlabel_len
;
1164 launchd_assert(offsetof(struct job_s
, kqjob_callback
) == 0);
1166 if (jm
->shutting_down
) {
1171 if (prog
== NULL
&& argv
== NULL
) {
1176 if (label
== AUTO_PICK_LEGACY_LABEL
) {
1177 bn
= prog
? prog
: basename((char *)argv
[0]); /* prog for auto labels is kp.kp_kproc.p_comm */
1178 snprintf(auto_label
, sizeof(auto_label
), "%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", bn
);
1180 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1181 minlabel_len
= strlen(label
) + MAXCOMLEN
;
1183 minlabel_len
= strlen(label
);
1186 j
= calloc(1, sizeof(struct job_s
) + minlabel_len
+ 1);
1188 if (!jobmgr_assumes(jm
, j
!= NULL
)) {
1192 if (label
== auto_label
) {
1193 snprintf((char *)j
->label
, strlen(label
) + 1, "%p.%s", j
, bn
);
1195 strcpy((char *)j
->label
, label
);
1197 j
->kqjob_callback
= job_callback
;
1199 j
->min_run_time
= LAUNCHD_MIN_JOB_RUN_TIME
;
1200 j
->timeout
= RUNTIME_ADVISABLE_IDLE_TIMEOUT
;
1201 j
->exit_timeout
= LAUNCHD_DEFAULT_EXIT_TIMEOUT
;
1202 j
->currently_ignored
= true;
1204 j
->checkedin
= true;
1207 j
->prog
= strdup(prog
);
1208 if (!job_assumes(j
, j
->prog
!= NULL
)) {
1217 for (i
= 0; i
< j
->argc
; i
++) {
1218 cc
+= strlen(argv
[i
]) + 1;
1221 j
->argv
= malloc((j
->argc
+ 1) * sizeof(char *) + cc
);
1223 if (!job_assumes(j
, j
->argv
!= NULL
)) {
1227 co
= ((char *)j
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
1229 for (i
= 0; i
< j
->argc
; i
++) {
1231 strcpy(co
, argv
[i
]);
1232 co
+= strlen(argv
[i
]) + 1;
1237 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
1238 LIST_INSERT_HEAD(&label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
1240 job_log(j
, LOG_DEBUG
, "Conceived");
1254 job_import(launch_data_t pload
)
1256 job_t j
= jobmgr_import2(root_jobmgr
, pload
);
1262 return job_dispatch(j
, false);
1266 job_import_bulk(launch_data_t pload
)
1268 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
1270 size_t i
, c
= launch_data_array_get_count(pload
);
1272 ja
= alloca(c
* sizeof(job_t
));
1274 for (i
= 0; i
< c
; i
++) {
1275 if ((ja
[i
] = jobmgr_import2(root_jobmgr
, launch_data_array_get_index(pload
, i
)))) {
1278 launch_data_array_set_index(resp
, launch_data_new_errno(errno
), i
);
1281 for (i
= 0; i
< c
; i
++) {
1282 if (ja
[i
] == NULL
) {
1285 job_dispatch(ja
[i
], false);
1292 job_import_bool(job_t j
, const char *key
, bool value
)
1294 bool found_key
= false;
1299 if (strcasecmp(key
, LAUNCH_JOBKEY_ABANDONPROCESSGROUP
) == 0) {
1300 j
->abandon_pg
= value
;
1306 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
1307 j
->ondemand
= !value
;
1313 if (strcasecmp(key
, LAUNCH_JOBKEY_ONDEMAND
) == 0) {
1314 j
->ondemand
= value
;
1320 if (strcasecmp(key
, LAUNCH_JOBKEY_DEBUG
) == 0) {
1323 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLED
) == 0) {
1324 job_assumes(j
, !value
);
1330 if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
) == 0) {
1331 j
->hopefully_exits_last
= value
;
1333 } else if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST
) == 0) {
1334 j
->hopefully_exits_first
= value
;
1340 if (strcasecmp(key
, LAUNCH_JOBKEY_SESSIONCREATE
) == 0) {
1341 j
->session_create
= value
;
1343 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTONMOUNT
) == 0) {
1344 j
->start_on_mount
= value
;
1346 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SERVICEIPC
) == 0) {
1347 /* this only does something on Mac OS X 10.4 "Tiger" */
1353 if (strcasecmp(key
, LAUNCH_JOBKEY_LOWPRIORITYIO
) == 0) {
1354 j
->low_pri_io
= value
;
1356 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHONLYONCE
) == 0) {
1357 j
->only_once
= value
;
1363 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
1364 j
->internal_exc_handler
= value
;
1370 if (strcasecmp(key
, LAUNCH_JOBKEY_INITGROUPS
) == 0) {
1371 if (getuid() != 0) {
1372 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1375 j
->no_init_groups
= !value
;
1381 if (strcasecmp(key
, LAUNCH_JOBKEY_RUNATLOAD
) == 0) {
1383 /* We don't want value == false to change j->start_pending */
1384 j
->start_pending
= true;
1391 if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLEGLOBBING
) == 0) {
1392 j
->globargv
= value
;
1394 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL
) == 0) {
1395 j
->debug_before_kill
= value
;
1401 if (strcasecmp(key
, LAUNCH_JOBKEY_WAITFORDEBUGGER
) == 0) {
1402 j
->wait4debugger
= value
;
1411 job_log(j
, LOG_WARNING
, "Unknown key for boolean: %s", key
);
1416 job_import_string(job_t j
, const char *key
, const char *value
)
1418 char **where2put
= NULL
;
1423 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
1424 where2put
= &j
->alt_exc_handler
;
1429 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAM
) == 0) {
1435 if (strcasecmp(key
, LAUNCH_JOBKEY_LABEL
) == 0) {
1437 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
1439 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
1441 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
1442 job_reparent_hack(j
, value
);
1448 if (strcasecmp(key
, LAUNCH_JOBKEY_ROOTDIRECTORY
) == 0) {
1449 if (getuid() != 0) {
1450 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1453 where2put
= &j
->rootdir
;
1458 if (strcasecmp(key
, LAUNCH_JOBKEY_WORKINGDIRECTORY
) == 0) {
1459 where2put
= &j
->workingdir
;
1464 if (strcasecmp(key
, LAUNCH_JOBKEY_USERNAME
) == 0) {
1465 if (getuid() != 0) {
1466 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1468 } else if (strcmp(value
, "root") == 0) {
1471 where2put
= &j
->username
;
1476 if (strcasecmp(key
, LAUNCH_JOBKEY_GROUPNAME
) == 0) {
1477 if (getuid() != 0) {
1478 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1480 } else if (strcmp(value
, "wheel") == 0) {
1483 where2put
= &j
->groupname
;
1488 if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDOUTPATH
) == 0) {
1489 where2put
= &j
->stdoutpath
;
1490 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDERRORPATH
) == 0) {
1491 where2put
= &j
->stderrpath
;
1493 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXPROFILE
) == 0) {
1494 where2put
= &j
->seatbelt_profile
;
1499 job_log(j
, LOG_WARNING
, "Unknown key for string: %s", key
);
1504 job_assumes(j
, (*where2put
= strdup(value
)) != NULL
);
1506 job_log(j
, LOG_WARNING
, "Unknown key: %s", key
);
1511 job_import_integer(job_t j
, const char *key
, long long value
)
1516 if (strcasecmp(key
, LAUNCH_JOBKEY_EXITTIMEOUT
) == 0) {
1518 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
1519 } else if (value
> UINT32_MAX
) {
1520 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
1522 j
->exit_timeout
= value
;
1528 if (strcasecmp(key
, LAUNCH_JOBKEY_NICE
) == 0) {
1535 if (strcasecmp(key
, LAUNCH_JOBKEY_TIMEOUT
) == 0) {
1537 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
1538 } else if (value
> UINT32_MAX
) {
1539 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
1543 } else if (strcasecmp(key
, LAUNCH_JOBKEY_THROTTLEINTERVAL
) == 0) {
1545 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
1546 } else if (value
> UINT32_MAX
) {
1547 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
1549 j
->min_run_time
= value
;
1555 if (strcasecmp(key
, LAUNCH_JOBKEY_UMASK
) == 0) {
1562 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTINTERVAL
) == 0) {
1564 job_log(j
, LOG_WARNING
, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
1565 } else if (value
> UINT32_MAX
) {
1566 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
1569 j
->start_interval
= value
;
1571 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, value
, j
) != -1);
1574 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
1575 j
->seatbelt_flags
= value
;
1581 job_log(j
, LOG_WARNING
, "Unknown key for integer: %s", key
);
1587 job_import_opaque(job_t j
, const char *key
, launch_data_t value
)
1593 if (strcasecmp(key
, LAUNCH_JOBKEY_QUARANTINEDATA
) == 0) {
1594 size_t tmpsz
= launch_data_get_opaque_size(value
);
1596 if (job_assumes(j
, j
->quarantine_data
= malloc(tmpsz
))) {
1597 memcpy(j
->quarantine_data
, launch_data_get_opaque(value
), tmpsz
);
1598 j
->quarantine_data_sz
= tmpsz
;
1609 policy_setup(launch_data_t obj
, const char *key
, void *context
)
1612 bool found_key
= false;
1617 if (strcasecmp(key
, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS
) == 0) {
1618 j
->deny_job_creation
= launch_data_get_bool(obj
);
1626 if (unlikely(!found_key
)) {
1627 job_log(j
, LOG_WARNING
, "Unknown policy: %s", key
);
1632 job_import_dictionary(job_t j
, const char *key
, launch_data_t value
)
1639 if (strcasecmp(key
, LAUNCH_JOBKEY_POLICIES
) == 0) {
1640 launch_data_dict_iterate(value
, policy_setup
, j
);
1645 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
1646 launch_data_dict_iterate(value
, semaphoreitem_setup
, j
);
1651 if (strcasecmp(key
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
) == 0) {
1652 j
->inetcompat
= true;
1653 j
->abandon_pg
= true;
1654 if ((tmp
= launch_data_dict_lookup(value
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
))) {
1655 j
->inetcompat_wait
= launch_data_get_bool(tmp
);
1661 if (strcasecmp(key
, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES
) == 0) {
1662 launch_data_dict_iterate(value
, envitem_setup
, j
);
1667 if (strcasecmp(key
, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES
) == 0) {
1668 j
->importing_global_env
= true;
1669 launch_data_dict_iterate(value
, envitem_setup
, j
);
1670 j
->importing_global_env
= false;
1675 if (strcasecmp(key
, LAUNCH_JOBKEY_SOCKETS
) == 0) {
1676 launch_data_dict_iterate(value
, socketgroup_setup
, j
);
1677 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
1678 calendarinterval_new_from_obj(j
, value
);
1679 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SOFTRESOURCELIMITS
) == 0) {
1680 launch_data_dict_iterate(value
, limititem_setup
, j
);
1682 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
1683 launch_data_dict_iterate(value
, seatbelt_setup_flags
, j
);
1689 if (strcasecmp(key
, LAUNCH_JOBKEY_HARDRESOURCELIMITS
) == 0) {
1690 j
->importing_hard_limits
= true;
1691 launch_data_dict_iterate(value
, limititem_setup
, j
);
1692 j
->importing_hard_limits
= false;
1697 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICES
) == 0) {
1698 launch_data_dict_iterate(value
, machservice_setup
, j
);
1699 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICELOOKUPPOLICIES
) == 0) {
1700 launch_data_dict_iterate(value
, mspolicy_setup
, j
);
1704 job_log(j
, LOG_WARNING
, "Unknown key for dictionary: %s", key
);
1710 job_import_array(job_t j
, const char *key
, launch_data_t value
)
1712 size_t i
, value_cnt
= launch_data_array_get_count(value
);
1718 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
) == 0) {
1724 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
1726 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
1728 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
1729 job_log(j
, LOG_NOTICE
, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
1735 if (strcasecmp(key
, LAUNCH_JOBKEY_QUEUEDIRECTORIES
) == 0) {
1736 for (i
= 0; i
< value_cnt
; i
++) {
1737 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
1738 if (job_assumes(j
, str
!= NULL
)) {
1739 semaphoreitem_new(j
, DIR_NOT_EMPTY
, str
);
1747 if (strcasecmp(key
, LAUNCH_JOBKEY_WATCHPATHS
) == 0) {
1748 for (i
= 0; i
< value_cnt
; i
++) {
1749 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
1750 if (job_assumes(j
, str
!= NULL
)) {
1751 semaphoreitem_new(j
, PATH_CHANGES
, str
);
1758 if (strcasecmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0) {
1759 socketgroup_setup(value
, LAUNCH_JOBKEY_BONJOURFDS
, j
);
1760 } else if (strcasecmp(key
, LAUNCH_JOBKEY_BINARYORDERPREFERENCE
) == 0) {
1761 if (job_assumes(j
, j
->j_binpref
= malloc(value_cnt
* sizeof(*j
->j_binpref
)))) {
1762 j
->j_binpref_cnt
= value_cnt
;
1763 for (i
= 0; i
< value_cnt
; i
++) {
1764 j
->j_binpref
[i
] = launch_data_get_integer(launch_data_array_get_index(value
, i
));
1771 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
1772 for (i
= 0; i
< value_cnt
; i
++) {
1773 calendarinterval_new_from_obj(j
, launch_data_array_get_index(value
, i
));
1778 job_log(j
, LOG_WARNING
, "Unknown key for array: %s", key
);
1784 job_import_keys(launch_data_t obj
, const char *key
, void *context
)
1787 launch_data_type_t kind
;
1793 kind
= launch_data_get_type(obj
);
1796 case LAUNCH_DATA_BOOL
:
1797 job_import_bool(j
, key
, launch_data_get_bool(obj
));
1799 case LAUNCH_DATA_STRING
:
1800 job_import_string(j
, key
, launch_data_get_string(obj
));
1802 case LAUNCH_DATA_INTEGER
:
1803 job_import_integer(j
, key
, launch_data_get_integer(obj
));
1805 case LAUNCH_DATA_DICTIONARY
:
1806 job_import_dictionary(j
, key
, obj
);
1808 case LAUNCH_DATA_ARRAY
:
1809 job_import_array(j
, key
, obj
);
1811 case LAUNCH_DATA_OPAQUE
:
1812 job_import_opaque(j
, key
, obj
);
1815 job_log(j
, LOG_WARNING
, "Unknown value type '%d' for key: %s", kind
, key
);
1821 jobmgr_import2(jobmgr_t jm
, launch_data_t pload
)
1823 launch_data_t tmp
, ldpa
;
1824 const char *label
= NULL
, *prog
= NULL
;
1825 const char **argv
= NULL
;
1828 if (pload
== NULL
) {
1833 if (launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
) {
1838 if (!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
))) {
1843 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
1848 if (!(label
= launch_data_get_string(tmp
))) {
1853 if ((tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAM
)) &&
1854 (launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
1855 prog
= launch_data_get_string(tmp
);
1858 if ((ldpa
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
))) {
1861 if (launch_data_get_type(ldpa
) != LAUNCH_DATA_ARRAY
) {
1866 c
= launch_data_array_get_count(ldpa
);
1868 argv
= alloca((c
+ 1) * sizeof(char *));
1870 for (i
= 0; i
< c
; i
++) {
1871 tmp
= launch_data_array_get_index(ldpa
, i
);
1873 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
1878 argv
[i
] = launch_data_get_string(tmp
);
1884 if ((j
= job_find(label
)) != NULL
) {
1887 } else if (!jobmgr_label_test(jm
, label
)) {
1892 if ((j
= job_new(jm
, label
, prog
, argv
))) {
1893 launch_data_dict_iterate(pload
, job_import_keys
, j
);
1900 jobmgr_label_test(jobmgr_t jm
, const char *str
)
1902 char *endstr
= NULL
;
1905 if (str
[0] == '\0') {
1906 jobmgr_log(jm
, LOG_ERR
, "Empty job labels are not allowed");
1910 for (ptr
= str
; *ptr
; ptr
++) {
1911 if (iscntrl(*ptr
)) {
1912 jobmgr_log(jm
, LOG_ERR
, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr
- str
, *ptr
);
1917 strtoll(str
, &endstr
, 0);
1919 if (str
!= endstr
) {
1920 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to begin with numbers: %s", str
);
1924 if ((strncasecmp(str
, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
1925 (strncasecmp(str
, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
1926 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to use a reserved prefix: %s", str
);
1934 job_find(const char *label
)
1938 LIST_FOREACH(ji
, &label_hash
[hash_label(label
)], label_hash_sle
) {
1939 if (ji
->removal_pending
) {
1940 continue; /* 5351245 */
1941 } else if (ji
->mgr
->shutting_down
) {
1942 continue; /* 5488633 */
1945 if (strcmp(ji
->label
, label
) == 0) {
1955 jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
)
1959 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
1967 } else if (create_anon
) {
1968 return job_new_anonymous(jm
, p
);
1975 job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
)
1980 if (jm
->jm_port
== mport
) {
1981 jobmgr_assumes(jm
, (ji
= jobmgr_find_by_pid(jm
, upid
, true)) != NULL
);
1985 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
1988 if ((jr
= job_mig_intran2(jmi
, mport
, upid
))) {
1993 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
1994 if (ji
->j_port
== mport
) {
2003 job_mig_intran(mach_port_t p
)
2008 runtime_get_caller_creds(&ldc
);
2010 jr
= job_mig_intran2(root_jobmgr
, p
, ldc
.pid
);
2012 if (!jobmgr_assumes(root_jobmgr
, jr
!= NULL
)) {
2013 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, 0 };
2014 struct kinfo_proc kp
;
2015 size_t len
= sizeof(kp
);
2019 if (jobmgr_assumes(root_jobmgr
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1) && jobmgr_assumes(root_jobmgr
, len
== sizeof(kp
))) {
2020 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__
, ldc
.pid
, ldc
.uid
, ldc
.euid
, p
, kp
.kp_proc
.p_comm
);
2028 job_find_by_service_port(mach_port_t p
)
2030 struct machservice
*ms
;
2032 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
2033 if (ms
->recv
&& (ms
->port
== p
)) {
2042 job_mig_destructor(job_t j
)
2047 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
2050 if (j
&& j
!= workaround_5477111
&& j
->unload_at_mig_return
) {
2051 job_log(j
, LOG_NOTICE
, "Unloading PID %u at MIG return.", j
->p
);
2055 workaround_5477111
= NULL
;
2057 calendarinterval_sanity_check();
2061 job_export_all2(jobmgr_t jm
, launch_data_t where
)
2066 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
2067 job_export_all2(jmi
, where
);
2070 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2073 if (jobmgr_assumes(jm
, (tmp
= job_export(ji
)) != NULL
)) {
2074 launch_data_dict_insert(where
, tmp
, ji
->label
);
2080 job_export_all(void)
2082 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
2084 if (launchd_assumes(resp
!= NULL
)) {
2085 job_export_all2(root_jobmgr
, resp
);
2092 job_log_stray_pg(job_t j
)
2094 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PGRP
, j
->p
};
2095 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
2096 struct kinfo_proc
*kp
;
2098 #if TARGET_OS_EMBEDDED
2099 if (!do_apple_internal_magic
) {
2104 if (!job_assumes(j
, (kp
= malloc(len
)) != NULL
)) {
2107 if (!job_assumes(j
, sysctl(mib
, 4, kp
, &len
, NULL
, 0) != -1)) {
2111 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
2113 for (i
= 0; i
< kp_cnt
; i
++) {
2114 pid_t p_i
= kp
[i
].kp_proc
.p_pid
;
2115 pid_t pp_i
= kp
[i
].kp_eproc
.e_ppid
;
2116 const char *z
= (kp
[i
].kp_proc
.p_stat
== SZOMB
) ? "zombie " : "";
2117 const char *n
= kp
[i
].kp_proc
.p_comm
;
2121 } else if (!job_assumes(j
, p_i
!= 0 && p_i
!= 1)) {
2125 job_log(j
, LOG_WARNING
, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z
, p_i
, pp_i
, n
);
2138 job_log(j
, LOG_DEBUG
, "Reaping");
2140 if (j
->weird_bootstrap
) {
2141 mach_msg_size_t mxmsgsz
= sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
2143 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
2144 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
2147 job_assumes(j
, runtime_add_mport(j
->mgr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
);
2148 j
->weird_bootstrap
= false;
2151 if (j
->log_redirect_fd
&& !j
->wait4pipe_eof
) {
2152 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
2153 j
->log_redirect_fd
= 0;
2157 job_assumes(j
, runtime_close(j
->forkfd
) != -1);
2163 memset(&ru
, 0, sizeof(ru
));
2166 * The job is dead. While the PID/PGID is still known to be
2167 * valid, try to kill abandoned descendant processes.
2169 job_log_stray_pg(j
);
2170 if (!j
->abandon_pg
) {
2171 job_assumes(j
, runtime_killpg(j
->p
, SIGTERM
) != -1 || errno
== ESRCH
);
2177 * The current implementation of ptrace() causes the traced process to
2178 * be abducted away from the true parent and adopted by the tracer.
2180 * Once the tracing process relinquishes control, the kernel then
2181 * restores the true parent/child relationship.
2183 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2184 * data structures changes, and they return an error if reality hasn't
2185 * been restored by the time they are called.
2187 if (!job_assumes(j
, wait4(j
->p
, &status
, 0, &ru
) != -1)) {
2188 job_log(j
, LOG_NOTICE
, "Working around 5020256. Assuming the job crashed.");
2190 status
= W_EXITCODE(0, SIGSEGV
);
2191 memset(&ru
, 0, sizeof(ru
));
2195 if (j
->exit_timeout
) {
2196 kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
2200 total_anon_children
--;
2205 LIST_REMOVE(j
, pid_hash_sle
);
2207 if (j
->wait_reply_port
) {
2208 job_log(j
, LOG_DEBUG
, "MPM wait reply being sent");
2209 job_assumes(j
, job_mig_wait_reply(j
->wait_reply_port
, 0, status
) == 0);
2210 j
->wait_reply_port
= MACH_PORT_NULL
;
2213 if (j
->sent_sigterm_time
) {
2214 uint64_t td_sec
, td_usec
, td
= (mach_absolute_time() - j
->sent_sigterm_time
) * tbi
.numer
/ tbi
.denom
;
2216 td_sec
= td
/ NSEC_PER_SEC
;
2217 td_usec
= (td
% NSEC_PER_SEC
) / NSEC_PER_USEC
;
2219 job_log(j
, LOG_INFO
, "Exited %lld.%06lld seconds after %s was sent",
2220 td_sec
, td_usec
, signal_to_C_name(j
->sent_sigkill
? SIGKILL
: SIGTERM
));
2223 #if DO_RUSAGE_SUMMATION
2224 timeradd(&ru
.ru_utime
, &j
->ru
.ru_utime
, &j
->ru
.ru_utime
);
2225 timeradd(&ru
.ru_stime
, &j
->ru
.ru_stime
, &j
->ru
.ru_stime
);
2226 j
->ru
.ru_maxrss
+= ru
.ru_maxrss
;
2227 j
->ru
.ru_ixrss
+= ru
.ru_ixrss
;
2228 j
->ru
.ru_idrss
+= ru
.ru_idrss
;
2229 j
->ru
.ru_isrss
+= ru
.ru_isrss
;
2230 j
->ru
.ru_minflt
+= ru
.ru_minflt
;
2231 j
->ru
.ru_majflt
+= ru
.ru_majflt
;
2232 j
->ru
.ru_nswap
+= ru
.ru_nswap
;
2233 j
->ru
.ru_inblock
+= ru
.ru_inblock
;
2234 j
->ru
.ru_oublock
+= ru
.ru_oublock
;
2235 j
->ru
.ru_msgsnd
+= ru
.ru_msgsnd
;
2236 j
->ru
.ru_msgrcv
+= ru
.ru_msgrcv
;
2237 j
->ru
.ru_nsignals
+= ru
.ru_nsignals
;
2238 j
->ru
.ru_nvcsw
+= ru
.ru_nvcsw
;
2239 j
->ru
.ru_nivcsw
+= ru
.ru_nivcsw
;
2242 if (WIFEXITED(status
) && WEXITSTATUS(status
) != 0) {
2243 job_log(j
, LOG_WARNING
, "Exited with exit code: %d", WEXITSTATUS(status
));
2246 if (WIFSIGNALED(status
)) {
2247 int s
= WTERMSIG(status
);
2248 if (SIGKILL
== s
|| SIGTERM
== s
) {
2249 job_log(j
, LOG_NOTICE
, "Exited: %s", strsignal(s
));
2251 job_log(j
, LOG_WARNING
, "Exited abnormally: %s", strsignal(s
));
2255 if (j
->hopefully_exits_first
) {
2256 j
->mgr
->hopefully_first_cnt
--;
2257 } else if (!j
->anonymous
&& !j
->hopefully_exits_last
) {
2258 j
->mgr
->normal_active_cnt
--;
2260 j
->last_exit_status
= status
;
2261 j
->sent_sigkill
= false;
2265 * We need to someday evaluate other jobs and find those who wish to track the
2266 * active/inactive state of this job. The current job_dispatch() logic makes
2267 * this messy, given that jobs can be deleted at dispatch.
2272 jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
)
2277 if (jm
->shutting_down
) {
2281 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
2282 jobmgr_dispatch_all(jmi
, newmounthack
);
2285 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
2286 if (newmounthack
&& ji
->start_on_mount
) {
2287 ji
->start_pending
= true;
2290 job_dispatch(ji
, false);
2295 job_dispatch(job_t j
, bool kickstart
)
2298 * The whole job removal logic needs to be consolidated. The fact that
2299 * a job can be removed from just about anywhere makes it easy to have
2300 * stale pointers left behind somewhere on the stack that might get
2301 * used after the deallocation. In particular, during job iteration.
2303 * This is a classic example. The act of dispatching a job may delete it.
2305 if (!job_active(j
)) {
2306 if (job_useless(j
)) {
2309 } else if (kickstart
|| job_keepalive(j
)) {
2317 * Path checking and monitoring is really racy right now.
2318 * We should clean this up post Leopard.
2320 if (job_keepalive(j
)) {
2325 job_log(j
, LOG_DEBUG
, "Tried to dispatch an already active job.");
2332 job_log_stdouterr2(job_t j
, const char *msg
, ...)
2334 struct runtime_syslog_attr attr
= { j
->label
, j
->label
, j
->mgr
->name
, LOG_NOTICE
, getuid(), j
->p
, j
->p
};
2338 runtime_vsyslog(&attr
, msg
, ap
);
2343 job_log_stdouterr(job_t j
)
2345 char *msg
, *bufindex
, *buf
= malloc(BIG_PIPE_SIZE
+ 1);
2346 bool close_log_redir
= false;
2349 if (!job_assumes(j
, buf
!= NULL
)) {
2355 rsz
= read(j
->log_redirect_fd
, buf
, BIG_PIPE_SIZE
);
2358 job_log(j
, LOG_DEBUG
, "Standard out/error pipe closed");
2359 close_log_redir
= true;
2360 } else if (!job_assumes(j
, rsz
!= -1)) {
2361 close_log_redir
= true;
2365 while ((msg
= strsep(&bufindex
, "\n\r"))) {
2367 job_log_stdouterr2(j
, "%s", msg
);
2374 if (close_log_redir
) {
2375 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
2376 j
->log_redirect_fd
= 0;
2377 job_dispatch(j
, false);
2384 if (!j
->p
|| j
->anonymous
) {
2388 job_assumes(j
, runtime_kill(j
->p
, SIGKILL
) != -1);
2390 j
->sent_sigkill
= true;
2392 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
,
2393 EV_ADD
, NOTE_SECONDS
, LAUNCHD_SIGKILL_TIMER
, j
) != -1);
2395 job_log(j
, LOG_DEBUG
, "Sent SIGKILL signal.");
2399 job_callback_proc(job_t j
, int flags
, int fflags
)
2401 if ((fflags
& NOTE_EXEC
) && j
->anonymous
) {
2402 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, j
->p
};
2403 struct kinfo_proc kp
;
2404 size_t len
= sizeof(kp
);
2406 if (job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
2407 char newlabel
[1000];
2409 snprintf(newlabel
, sizeof(newlabel
), "%p.%s", j
, kp
.kp_proc
.p_comm
);
2411 job_log(j
, LOG_DEBUG
, "Program changed. Updating the label to: %s", newlabel
);
2413 LIST_REMOVE(j
, label_hash_sle
);
2414 strcpy((char *)j
->label
, newlabel
);
2415 LIST_INSERT_HEAD(&label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
2419 if (fflags
& NOTE_FORK
) {
2420 job_log(j
, LOG_DEBUG
, "Called fork()");
2423 if (fflags
& NOTE_EXIT
) {
2430 j
= job_dispatch(j
, false);
2434 /* NOTE_REAP sanity checking is disabled for now while we try and diagnose 5289559 */
2436 if (j
&& (fflags
& NOTE_REAP
)) {
2437 job_assumes(j
, flags
& EV_ONESHOT
);
2438 job_assumes(j
, flags
& EV_EOF
);
2440 job_assumes(j
, j
->p
== 0);
2446 job_callback_timer(job_t j
, void *ident
)
2449 job_dispatch(j
, true);
2450 } else if (&j
->semaphores
== ident
) {
2451 job_dispatch(j
, false);
2452 } else if (&j
->start_interval
== ident
) {
2453 j
->start_pending
= true;
2454 job_dispatch(j
, false);
2455 } else if (&j
->exit_timeout
== ident
) {
2456 if (j
->sent_sigkill
) {
2457 uint64_t td
= (mach_absolute_time() - j
->sent_sigterm_time
) * tbi
.numer
/ tbi
.denom
;
2460 td
-= j
->exit_timeout
;
2462 job_log(j
, LOG_ERR
, "Did not die after sending SIGKILL %llu seconds ago...", td
);
2464 job_force_sampletool(j
);
2465 if (j
->debug_before_kill
) {
2466 job_log(j
, LOG_NOTICE
, "Exit timeout elapsed. Entering the kernel debugger.");
2467 job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
2469 job_log(j
, LOG_WARNING
, "Exit timeout elapsed (%u seconds). Killing.", j
->exit_timeout
);
2473 job_assumes(j
, false);
2478 job_callback_read(job_t j
, int ident
)
2480 if (ident
== j
->log_redirect_fd
) {
2481 job_log_stdouterr(j
);
2483 socketgroup_callback(j
);
2488 jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
)
2493 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
2494 jobmgr_reap_bulk(jmi
, kev
);
2497 if ((j
= jobmgr_find_by_pid(jm
, kev
->ident
, false))) {
2499 job_callback(j
, kev
);
2504 jobmgr_callback(void *obj
, struct kevent
*kev
)
2508 switch (kev
->filter
) {
2510 jobmgr_reap_bulk(jm
, kev
);
2511 if (launchd_assumes(root_jobmgr
!= NULL
)) {
2512 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
2516 switch (kev
->ident
) {
2518 return launchd_shutdown();
2520 return calendarinterval_callback();
2522 return (void)jobmgr_assumes(jm
, false);
2526 if (kev
->fflags
& VQ_MOUNT
) {
2527 jobmgr_dispatch_all(jm
, true);
2529 jobmgr_dispatch_all_semaphores(jm
);
2532 if (jobmgr_assumes(jm
, kev
->ident
== (uintptr_t)&sorted_calendar_events
)) {
2533 calendarinterval_callback();
2537 return (void)jobmgr_assumes(jm
, false);
2542 job_callback(void *obj
, struct kevent
*kev
)
2546 job_log(j
, LOG_DEBUG
, "Dispatching kevent callback.");
2548 switch (kev
->filter
) {
2550 return job_callback_proc(j
, kev
->flags
, kev
->fflags
);
2552 return job_callback_timer(j
, (void *)kev
->ident
);
2554 return semaphoreitem_callback(j
, kev
);
2556 return job_callback_read(j
, kev
->ident
);
2557 case EVFILT_MACHPORT
:
2558 return (void)job_dispatch(j
, true);
2560 return (void)job_assumes(j
, false);
2567 uint64_t td
, tnow
= mach_absolute_time();
2574 u_int proc_fflags
= /* NOTE_EXEC|NOTE_FORK| */ NOTE_EXIT
/* |NOTE_REAP */;
2576 if (!job_assumes(j
, j
->mgr
!= NULL
)) {
2580 if (job_active(j
)) {
2581 job_log(j
, LOG_DEBUG
, "Already started");
2585 job_assumes(j
, tnow
> j
->start_time
);
2588 * Some users adjust the wall-clock and then expect software to not notice.
2589 * Therefore, launchd must use an absolute clock instead of gettimeofday()
2590 * or time() wherever possible.
2592 td
= (tnow
- j
->start_time
) * tbi
.numer
/ tbi
.denom
;
2595 if (j
->start_time
&& (td
< j
->min_run_time
) && !j
->legacy_mach_job
&& !j
->inetcompat
) {
2596 time_t respawn_delta
= j
->min_run_time
- (uint32_t)td
;
2599 * We technically should ref-count throttled jobs to prevent idle exit,
2600 * but we're not directly tracking the 'throttled' state at the moment.
2603 job_log(j
, LOG_WARNING
, "Throttling respawn: Will start in %ld seconds", respawn_delta
);
2604 job_assumes(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, respawn_delta
, j
) != -1);
2609 j
->sent_sigterm_time
= 0;
2611 if (!j
->legacy_mach_job
) {
2612 sipc
= (!SLIST_EMPTY(&j
->sockets
) || !SLIST_EMPTY(&j
->machservices
));
2615 j
->checkedin
= false;
2618 job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, spair
) != -1);
2621 job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, execspair
) != -1);
2623 if (!j
->legacy_mach_job
&& job_assumes(j
, pipe(oepair
) != -1)) {
2624 j
->log_redirect_fd
= _fd(oepair
[0]);
2625 job_assumes(j
, fcntl(j
->log_redirect_fd
, F_SETFL
, O_NONBLOCK
) != -1);
2626 job_assumes(j
, kevent_mod(j
->log_redirect_fd
, EVFILT_READ
, EV_ADD
, 0, 0, j
) != -1);
2629 j
->start_time
= tnow
;
2631 switch (c
= runtime_fork(j
->weird_bootstrap
? j
->j_port
: j
->mgr
->jm_port
)) {
2633 job_log_error(j
, LOG_ERR
, "fork() failed, will try again in one second");
2634 job_assumes(j
, runtime_close(execspair
[0]) == 0);
2635 job_assumes(j
, runtime_close(execspair
[1]) == 0);
2637 job_assumes(j
, runtime_close(spair
[0]) == 0);
2638 job_assumes(j
, runtime_close(spair
[1]) == 0);
2640 if (!j
->legacy_mach_job
) {
2641 job_assumes(j
, runtime_close(oepair
[0]) != -1);
2642 job_assumes(j
, runtime_close(oepair
[1]) != -1);
2643 j
->log_redirect_fd
= 0;
2647 if (_vproc_post_fork_ping()) {
2648 _exit(EXIT_FAILURE
);
2650 if (!j
->legacy_mach_job
) {
2651 job_assumes(j
, dup2(oepair
[1], STDOUT_FILENO
) != -1);
2652 job_assumes(j
, dup2(oepair
[1], STDERR_FILENO
) != -1);
2653 job_assumes(j
, runtime_close(oepair
[1]) != -1);
2655 job_assumes(j
, runtime_close(execspair
[0]) == 0);
2656 /* wait for our parent to say they've attached a kevent to us */
2657 read(_fd(execspair
[1]), &c
, sizeof(c
));
2660 job_assumes(j
, runtime_close(spair
[0]) == 0);
2661 snprintf(nbuf
, sizeof(nbuf
), "%d", spair
[1]);
2662 setenv(LAUNCHD_TRUSTED_FD_ENV
, nbuf
, 1);
2667 job_log(j
, LOG_DEBUG
, "Started as PID: %u", c
);
2669 j
->start_pending
= false;
2673 LIST_INSERT_HEAD(&j
->mgr
->active_jobs
[ACTIVE_JOB_HASH(c
)], j
, pid_hash_sle
);
2675 if (JOB_BOOTCACHE_HACK_CHECK(j
)) {
2676 did_first_per_user_launchd_BootCache_hack
= true;
2679 if (!j
->legacy_mach_job
) {
2680 job_assumes(j
, runtime_close(oepair
[1]) != -1);
2683 if (j
->hopefully_exits_first
) {
2684 j
->mgr
->hopefully_first_cnt
++;
2685 } else if (!j
->hopefully_exits_last
) {
2686 j
->mgr
->normal_active_cnt
++;
2688 j
->forkfd
= _fd(execspair
[0]);
2689 job_assumes(j
, runtime_close(execspair
[1]) == 0);
2691 job_assumes(j
, runtime_close(spair
[1]) == 0);
2692 ipc_open(_fd(spair
[0]), j
);
2694 if (job_assumes(j
, kevent_mod(c
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
? root_jobmgr
: j
->mgr
) != -1)) {
2700 if (!j
->stall_before_exec
) {
2708 do_first_per_user_launchd_hack(void)
2710 char *bcct_tool
[] = { "/usr/sbin/BootCacheControl", "tag", NULL
};
2714 if (launchd_assumes((bcp
= vfork()) != -1)) {
2716 execve(bcct_tool
[0], bcct_tool
, environ
);
2717 _exit(EXIT_FAILURE
);
2719 launchd_assumes(waitpid(bcp
, &dummystatus
, 0) != -1);
2725 job_start_child(job_t j
)
2727 const char *file2exec
= "/usr/libexec/launchproxy";
2729 posix_spawnattr_t spattr
;
2730 int gflags
= GLOB_NOSORT
|GLOB_NOCHECK
|GLOB_TILDE
|GLOB_DOOFFS
;
2733 short spflags
= POSIX_SPAWN_SETEXEC
;
2734 size_t binpref_out_cnt
= 0;
2737 if (JOB_BOOTCACHE_HACK_CHECK(j
)) {
2738 do_first_per_user_launchd_hack();
2741 job_assumes(j
, posix_spawnattr_init(&spattr
) == 0);
2743 job_setup_attributes(j
);
2745 if (j
->argv
&& j
->globargv
) {
2747 for (i
= 0; i
< j
->argc
; i
++) {
2749 gflags
|= GLOB_APPEND
;
2751 if (glob(j
->argv
[i
], gflags
, NULL
, &g
) != 0) {
2752 job_log_error(j
, LOG_ERR
, "glob(\"%s\")", j
->argv
[i
]);
2756 g
.gl_pathv
[0] = (char *)file2exec
;
2757 argv
= (const char **)g
.gl_pathv
;
2758 } else if (j
->argv
) {
2759 argv
= alloca((j
->argc
+ 2) * sizeof(char *));
2760 argv
[0] = file2exec
;
2761 for (i
= 0; i
< j
->argc
; i
++) {
2762 argv
[i
+ 1] = j
->argv
[i
];
2766 argv
= alloca(3 * sizeof(char *));
2767 argv
[0] = file2exec
;
2772 if (!j
->inetcompat
) {
2776 if (j
->wait4debugger
) {
2777 job_log(j
, LOG_WARNING
, "Spawned and waiting for the debugger to attach before continuing...");
2778 spflags
|= POSIX_SPAWN_START_SUSPENDED
;
2781 job_assumes(j
, posix_spawnattr_setflags(&spattr
, spflags
) == 0);
2783 if (j
->j_binpref_cnt
) {
2784 job_assumes(j
, posix_spawnattr_setbinpref_np(&spattr
, j
->j_binpref_cnt
, j
->j_binpref
, &binpref_out_cnt
) == 0);
2785 job_assumes(j
, binpref_out_cnt
== j
->j_binpref_cnt
);
2789 if (j
->quarantine_data
) {
2792 if (job_assumes(j
, qp
= qtn_proc_alloc())) {
2793 if (job_assumes(j
, qtn_proc_init_with_data(qp
, j
->quarantine_data
, j
->quarantine_data_sz
) == 0)) {
2794 job_assumes(j
, qtn_proc_apply_to_self(qp
) == 0);
2801 if (j
->seatbelt_profile
) {
2802 char *seatbelt_err_buf
= NULL
;
2804 if (!job_assumes(j
, sandbox_init(j
->seatbelt_profile
, j
->seatbelt_flags
, &seatbelt_err_buf
) != -1)) {
2805 if (seatbelt_err_buf
) {
2806 job_log(j
, LOG_ERR
, "Sandbox failed to init: %s", seatbelt_err_buf
);
2814 errno
= posix_spawn(&junk_pid
, j
->inetcompat
? file2exec
: j
->prog
, NULL
, &spattr
, (char *const*)argv
, environ
);
2815 job_log_error(j
, LOG_ERR
, "posix_spawn(\"%s\", ...)", j
->prog
);
2817 errno
= posix_spawnp(&junk_pid
, j
->inetcompat
? file2exec
: argv
[0], NULL
, &spattr
, (char *const*)argv
, environ
);
2818 job_log_error(j
, LOG_ERR
, "posix_spawnp(\"%s\", ...)", argv
[0]);
2822 _exit(EXIT_FAILURE
);
2826 jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
)
2832 if (jm
->parentmgr
) {
2833 jobmgr_export_env_from_other_jobs(jm
->parentmgr
, dict
);
2835 char **tmpenviron
= environ
;
2836 for (; *tmpenviron
; tmpenviron
++) {
2838 launch_data_t s
= launch_data_alloc(LAUNCH_DATA_STRING
);
2839 launch_data_set_string(s
, strchr(*tmpenviron
, '=') + 1);
2840 strncpy(envkey
, *tmpenviron
, sizeof(envkey
));
2841 *(strchr(envkey
, '=')) = '\0';
2842 launch_data_dict_insert(dict
, s
, envkey
);
2846 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2847 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
2848 if ((tmp
= launch_data_new_string(ei
->value
))) {
2849 launch_data_dict_insert(dict
, tmp
, ei
->key
);
2856 jobmgr_setup_env_from_other_jobs(jobmgr_t jm
)
2861 if (jm
->parentmgr
) {
2862 jobmgr_setup_env_from_other_jobs(jm
->parentmgr
);
2865 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2866 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
2867 setenv(ei
->key
, ei
->value
, 1);
2873 job_find_and_blame_pids_with_weird_uids(job_t j
)
2875 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_ALL
};
2876 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
2877 struct kinfo_proc
*kp
;
2878 uid_t u
= j
->mach_uid
;
2880 #if TARGET_OS_EMBEDDED
2881 if (!do_apple_internal_magic
) {
2887 if (!job_assumes(j
, kp
!= NULL
)) {
2890 if (!job_assumes(j
, sysctl(mib
, 3, kp
, &len
, NULL
, 0) != -1)) {
2894 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
2896 for (i
= 0; i
< kp_cnt
; i
++) {
2897 uid_t i_euid
= kp
[i
].kp_eproc
.e_ucred
.cr_uid
;
2898 uid_t i_uid
= kp
[i
].kp_eproc
.e_pcred
.p_ruid
;
2899 uid_t i_svuid
= kp
[i
].kp_eproc
.e_pcred
.p_svuid
;
2900 pid_t i_pid
= kp
[i
].kp_proc
.p_pid
;
2902 if (i_euid
!= u
&& i_uid
!= u
&& i_svuid
!= u
) {
2906 job_log(j
, LOG_ERR
, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
2907 i_pid
, kp
[i
].kp_proc
.p_comm
, i_uid
, i_euid
, i_svuid
);
2909 /* Temporarily disabled due to 5423935 and 4946119. */
2911 /* Ask the accountless process to exit. */
2912 job_assumes(j
, runtime_kill(i_pid
, SIGTERM
) != -1);
2921 job_postfork_become_user(job_t j
)
2923 char loginname
[2000];
2924 char tmpdirpath
[PATH_MAX
];
2925 char shellpath
[PATH_MAX
];
2926 char homedir
[PATH_MAX
];
2929 gid_t desired_gid
= -1;
2930 uid_t desired_uid
= -1;
2932 if (getuid() != 0) {
2937 * I contend that having UID == 0 and GID != 0 is of dubious value.
2938 * Nevertheless, this used to work in Tiger. See: 5425348
2940 if (j
->groupname
&& !j
->username
) {
2941 j
->username
= "root";
2945 if ((pwe
= getpwnam(j
->username
)) == NULL
) {
2946 job_log(j
, LOG_ERR
, "getpwnam(\"%s\") failed", j
->username
);
2947 _exit(EXIT_FAILURE
);
2949 } else if (j
->mach_uid
) {
2950 if ((pwe
= getpwuid(j
->mach_uid
)) == NULL
) {
2951 job_log(j
, LOG_ERR
, "getpwuid(\"%u\") failed", j
->mach_uid
);
2952 job_find_and_blame_pids_with_weird_uids(j
);
2953 _exit(EXIT_FAILURE
);
2960 * We must copy the results of getpw*().
2962 * Why? Because subsequent API calls may call getpw*() as a part of
2963 * their implementation. Since getpw*() returns a [now thread scoped]
2964 * global, we must therefore cache the results before continuing.
2967 desired_uid
= pwe
->pw_uid
;
2968 desired_gid
= pwe
->pw_gid
;
2970 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
2971 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
2972 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
2974 if (pwe
->pw_expire
&& time(NULL
) >= pwe
->pw_expire
) {
2975 job_log(j
, LOG_ERR
, "Expired account");
2976 _exit(EXIT_FAILURE
);
2980 if (j
->username
&& strcmp(j
->username
, loginname
) != 0) {
2981 job_log(j
, LOG_WARNING
, "Suspicious setup: User \"%s\" maps to user: %s", j
->username
, loginname
);
2982 } else if (j
->mach_uid
&& (j
->mach_uid
!= desired_uid
)) {
2983 job_log(j
, LOG_WARNING
, "Suspicious setup: UID %u maps to UID %u", j
->mach_uid
, desired_uid
);
2989 if ((gre
= getgrnam(j
->groupname
)) == NULL
) {
2990 job_log(j
, LOG_ERR
, "getgrnam(\"%s\") failed", j
->groupname
);
2991 _exit(EXIT_FAILURE
);
2994 desired_gid
= gre
->gr_gid
;
2997 if (!job_assumes(j
, setlogin(loginname
) != -1)) {
2998 _exit(EXIT_FAILURE
);
3001 if (!job_assumes(j
, setgid(desired_gid
) != -1)) {
3002 _exit(EXIT_FAILURE
);
3006 * The kernel team and the DirectoryServices team want initgroups()
3007 * called after setgid(). See 4616864 for more information.
3010 if (!j
->no_init_groups
) {
3011 if (!job_assumes(j
, initgroups(loginname
, desired_gid
) != -1)) {
3012 _exit(EXIT_FAILURE
);
3016 if (!job_assumes(j
, setuid(desired_uid
) != -1)) {
3017 _exit(EXIT_FAILURE
);
3020 r
= confstr(_CS_DARWIN_USER_TEMP_DIR
, tmpdirpath
, sizeof(tmpdirpath
));
3022 if (r
> 0 && r
< sizeof(tmpdirpath
)) {
3023 setenv("TMPDIR", tmpdirpath
, 0);
3026 setenv("SHELL", shellpath
, 0);
3027 setenv("HOME", homedir
, 0);
3028 setenv("USER", loginname
, 0);
3029 setenv("LOGNAME", loginname
, 0);
3033 job_setup_attributes(job_t j
)
3035 struct limititem
*li
;
3039 job_assumes(j
, setpriority(PRIO_PROCESS
, 0, j
->nice
) != -1);
3042 SLIST_FOREACH(li
, &j
->limits
, sle
) {
3045 if (!job_assumes(j
, getrlimit(li
->which
, &rl
) != -1)) {
3050 rl
.rlim_max
= li
->lim
.rlim_max
;
3053 rl
.rlim_cur
= li
->lim
.rlim_cur
;
3056 if (setrlimit(li
->which
, &rl
) == -1) {
3057 job_log_error(j
, LOG_WARNING
, "setrlimit()");
3061 if (!j
->inetcompat
&& j
->session_create
) {
3062 launchd_SessionCreate();
3065 if (j
->low_pri_io
) {
3066 job_assumes(j
, setiopolicy_np(IOPOL_TYPE_DISK
, IOPOL_SCOPE_PROCESS
, IOPOL_THROTTLE
) != -1);
3069 job_assumes(j
, chroot(j
->rootdir
) != -1);
3070 job_assumes(j
, chdir(".") != -1);
3073 job_postfork_become_user(j
);
3075 if (j
->workingdir
) {
3076 job_assumes(j
, chdir(j
->workingdir
) != -1);
3083 job_setup_fd(j
, STDOUT_FILENO
, j
->stdoutpath
, O_WRONLY
|O_APPEND
|O_CREAT
);
3084 job_setup_fd(j
, STDERR_FILENO
, j
->stderrpath
, O_WRONLY
|O_APPEND
|O_CREAT
);
3086 jobmgr_setup_env_from_other_jobs(j
->mgr
);
3088 SLIST_FOREACH(ei
, &j
->env
, sle
) {
3089 setenv(ei
->key
, ei
->value
, 1);
3093 * We'd like to call setsid() unconditionally, but we have reason to
3094 * believe that prevents launchd from being able to send signals to
3095 * setuid children. We'll settle for process-groups.
3097 if (getppid() != 1) {
3098 job_assumes(j
, setpgid(0, 0) != -1);
3100 job_assumes(j
, setsid() != -1);
3105 job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
)
3113 if ((fd
= open(path
, flags
|O_NOCTTY
, DEFFILEMODE
)) == -1) {
3114 job_log_error(j
, LOG_WARNING
, "open(\"%s\", ...)", path
);
3118 job_assumes(j
, dup2(fd
, target_fd
) != -1);
3119 job_assumes(j
, runtime_close(fd
) == 0);
3123 dir_has_files(job_t j
, const char *path
)
3125 DIR *dd
= opendir(path
);
3133 while ((de
= readdir(dd
))) {
3134 if (strcmp(de
->d_name
, ".") && strcmp(de
->d_name
, "..")) {
3140 job_assumes(j
, closedir(dd
) == 0);
3145 calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
)
3147 struct calendarinterval
*ci_iter
, *ci_prev
= NULL
;
3148 time_t later
, head_later
;
3150 later
= cronemu(ci
->when
.tm_mon
, ci
->when
.tm_mday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
3152 if (ci
->when
.tm_wday
!= -1) {
3153 time_t otherlater
= cronemu_wday(ci
->when
.tm_wday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
3155 if (ci
->when
.tm_mday
== -1) {
3158 later
= later
< otherlater
? later
: otherlater
;
3162 ci
->when_next
= later
;
3164 LIST_FOREACH(ci_iter
, &sorted_calendar_events
, global_sle
) {
3165 if (ci
->when_next
< ci_iter
->when_next
) {
3166 LIST_INSERT_BEFORE(ci_iter
, ci
, global_sle
);
3173 if (ci_iter
== NULL
) {
3174 /* ci must want to fire after every other timer, or there are no timers */
3176 if (LIST_EMPTY(&sorted_calendar_events
)) {
3177 LIST_INSERT_HEAD(&sorted_calendar_events
, ci
, global_sle
);
3179 LIST_INSERT_AFTER(ci_prev
, ci
, global_sle
);
3183 head_later
= LIST_FIRST(&sorted_calendar_events
)->when_next
;
3185 /* Workaround 5225889 */
3186 kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_DELETE
, 0, 0, root_jobmgr
);
3188 if (job_assumes(j
, kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_ADD
, NOTE_ABSOLUTE
|NOTE_SECONDS
, head_later
, root_jobmgr
) != -1)) {
3189 char time_string
[100];
3190 size_t time_string_len
;
3192 ctime_r(&later
, time_string
);
3193 time_string_len
= strlen(time_string
);
3195 if (time_string_len
&& time_string
[time_string_len
- 1] == '\n') {
3196 time_string
[time_string_len
- 1] = '\0';
3199 job_log(j
, LOG_INFO
, "Scheduled to run again at %s", time_string
);
3204 extract_rcsid_substr(const char *i
, char *o
, size_t osz
)
3206 char *rcs_rev_tmp
= strchr(i
, ' ');
3211 strlcpy(o
, rcs_rev_tmp
+ 1, osz
);
3212 rcs_rev_tmp
= strchr(o
, ' ');
3214 *rcs_rev_tmp
= '\0';
3220 jobmgr_log_bug(jobmgr_t jm
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
)
3222 int saved_errno
= errno
;
3223 const char *file
= strrchr(path
, '/');
3226 extract_rcsid_substr(rcs_rev
, buf
, sizeof(buf
));
3234 jobmgr_log(jm
, LOG_NOTICE
, "Bug: %s:%u (%s):%u: %s", file
, line
, buf
, saved_errno
, test
);
3238 job_log_bug(job_t j
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
)
3240 int saved_errno
= errno
;
3241 const char *file
= strrchr(path
, '/');
3244 extract_rcsid_substr(rcs_rev
, buf
, sizeof(buf
));
3252 job_log(j
, LOG_NOTICE
, "Bug: %s:%u (%s):%u: %s", file
, line
, buf
, saved_errno
, test
);
3256 job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
)
3258 struct runtime_syslog_attr attr
= { "com.apple.launchd", j
->label
, j
->mgr
->name
, pri
, getuid(), getpid(), j
->p
};
3264 * Hack: If bootstrap_port is set, we must be on the child side of a
3265 * fork(), but before the exec*(). Let's route the log message back to
3268 if (bootstrap_port
) {
3269 return _vproc_logv(pri
, err
, msg
, ap
);
3272 newmsgsz
= strlen(msg
) + 200;
3273 newmsg
= alloca(newmsgsz
);
3276 snprintf(newmsg
, newmsgsz
, "%s: %s", msg
, strerror(err
));
3278 snprintf(newmsg
, newmsgsz
, "%s", msg
);
3282 oldmask
= setlogmask(LOG_UPTO(LOG_DEBUG
));
3285 runtime_vsyslog(&attr
, newmsg
, ap
);
3288 setlogmask(oldmask
);
3293 job_log_error(job_t j
, int pri
, const char *msg
, ...)
3298 job_logv(j
, pri
, errno
, msg
, ap
);
3303 job_log(job_t j
, int pri
, const char *msg
, ...)
3308 job_logv(j
, pri
, 0, msg
, ap
);
3314 jobmgr_log_error(jobmgr_t jm
, int pri
, const char *msg
, ...)
3319 jobmgr_logv(jm
, pri
, errno
, msg
, ap
);
3325 jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...)
3330 jobmgr_logv(jm
, pri
, 0, msg
, ap
);
3335 jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
)
3339 size_t i
, o
, jmname_len
= strlen(jm
->name
), newmsgsz
;
3341 newname
= alloca((jmname_len
+ 1) * 2);
3342 newmsgsz
= (jmname_len
+ 1) * 2 + strlen(msg
) + 100;
3343 newmsg
= alloca(newmsgsz
);
3345 for (i
= 0, o
= 0; i
< jmname_len
; i
++, o
++) {
3346 if (jm
->name
[i
] == '%') {
3350 newname
[o
] = jm
->name
[i
];
3355 snprintf(newmsg
, newmsgsz
, "%s: %s: %s", newname
, msg
, strerror(err
));
3357 snprintf(newmsg
, newmsgsz
, "%s: %s", newname
, msg
);
3360 if (jm
->parentmgr
) {
3361 jobmgr_logv(jm
->parentmgr
, pri
, 0, newmsg
, ap
);
3363 struct runtime_syslog_attr attr
= { "com.apple.launchd", "com.apple.launchd", jm
->name
, pri
, getuid(), getpid(), getpid() };
3365 runtime_vsyslog(&attr
, newmsg
, ap
);
3370 semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
)
3373 job_log(j
, LOG_DEBUG
, "Ignoring Vnode: %d", si
->fd
);
3374 job_assumes(j
, kevent_mod(si
->fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
) != -1);
3379 semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
)
3381 char *parentdir
, tmp_path
[PATH_MAX
];
3382 const char *which_path
= si
->what
;
3383 int saved_errno
= 0;
3388 fflags
= NOTE_DELETE
|NOTE_RENAME
|NOTE_REVOKE
|NOTE_EXTEND
|NOTE_WRITE
;
3391 fflags
= NOTE_DELETE
|NOTE_RENAME
;
3395 fflags
= NOTE_DELETE
|NOTE_RENAME
|NOTE_REVOKE
|NOTE_EXTEND
|NOTE_WRITE
|NOTE_ATTRIB
|NOTE_LINK
;
3401 /* dirname() may modify tmp_path */
3402 strlcpy(tmp_path
, si
->what
, sizeof(tmp_path
));
3404 if (!job_assumes(j
, (parentdir
= dirname(tmp_path
)))) {
3408 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
3411 if ((si
->fd
= _fd(open(which_path
, O_EVTONLY
|O_NOCTTY
))) == -1) {
3412 which_path
= parentdir
;
3413 si
->fd
= _fd(open(which_path
, O_EVTONLY
|O_NOCTTY
));
3418 return job_log_error(j
, LOG_ERR
, "Path monitoring failed on \"%s\"", which_path
);
3421 job_log(j
, LOG_DEBUG
, "Watching Vnode: %d", si
->fd
);
3423 if (kevent_mod(si
->fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, j
) == -1) {
3424 saved_errno
= errno
;
3426 * The FD can be revoked between the open() and kevent().
3427 * This is similar to the inability for kevents to be
3428 * attached to short lived zombie processes after fork()
3429 * but before kevent().
3431 job_assumes(j
, runtime_close(si
->fd
) == 0);
3434 } while ((si
->fd
== -1) && (saved_errno
== ENOENT
));
3436 if (saved_errno
== ENOTSUP
) {
3438 * 3524219 NFS needs kqueue support
3439 * 4124079 VFS needs generic kqueue support
3440 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
3442 job_log(j
, LOG_DEBUG
, "Falling back to polling for path: %s", si
->what
);
3444 if (!j
->poll_for_vfs_changes
) {
3445 j
->poll_for_vfs_changes
= true;
3446 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 3, j
) != -1);
3452 semaphoreitem_callback(job_t j
, struct kevent
*kev
)
3454 char invalidation_reason
[100] = "";
3455 struct semaphoreitem
*si
;
3457 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
3468 if (si
->fd
== (int)kev
->ident
) {
3473 if (!job_assumes(j
, si
!= NULL
)) {
3477 if (NOTE_DELETE
& kev
->fflags
) {
3478 strcat(invalidation_reason
, "deleted");
3481 if (NOTE_RENAME
& kev
->fflags
) {
3482 if (invalidation_reason
[0]) {
3483 strcat(invalidation_reason
, "/renamed");
3485 strcat(invalidation_reason
, "renamed");
3489 if (NOTE_REVOKE
& kev
->fflags
) {
3490 if (invalidation_reason
[0]) {
3491 strcat(invalidation_reason
, "/revoked");
3493 strcat(invalidation_reason
, "revoked");
3497 if (invalidation_reason
[0]) {
3498 job_log(j
, LOG_DEBUG
, "Path %s: %s", invalidation_reason
, si
->what
);
3499 job_assumes(j
, runtime_close(si
->fd
) == 0);
3500 si
->fd
= -1; /* this will get fixed in semaphoreitem_watch() */
3503 job_log(j
, LOG_DEBUG
, "Watch path modified: %s", si
->what
);
3505 if (si
->why
== PATH_CHANGES
) {
3506 j
->start_pending
= true;
3509 job_dispatch(j
, false);
3513 calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
)
3515 struct tm
*tmptm
= context
;
3518 if (LAUNCH_DATA_INTEGER
!= launch_data_get_type(obj
)) {
3519 /* hack to let caller know something went wrong */
3524 val
= launch_data_get_integer(obj
);
3526 if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MINUTE
) == 0) {
3527 tmptm
->tm_min
= val
;
3528 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_HOUR
) == 0) {
3529 tmptm
->tm_hour
= val
;
3530 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_DAY
) == 0) {
3531 tmptm
->tm_mday
= val
;
3532 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_WEEKDAY
) == 0) {
3533 tmptm
->tm_wday
= val
;
3534 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MONTH
) == 0) {
3535 tmptm
->tm_mon
= val
;
3536 tmptm
->tm_mon
-= 1; /* 4798263 cron compatibility */
3541 calendarinterval_new_from_obj(job_t j
, launch_data_t obj
)
3545 memset(&tmptm
, 0, sizeof(0));
3553 if (!job_assumes(j
, obj
!= NULL
)) {
3557 if (LAUNCH_DATA_DICTIONARY
!= launch_data_get_type(obj
)) {
3561 launch_data_dict_iterate(obj
, calendarinterval_new_from_obj_dict_walk
, &tmptm
);
3563 if (tmptm
.tm_sec
== -1) {
3567 return calendarinterval_new(j
, &tmptm
);
3571 calendarinterval_new(job_t j
, struct tm
*w
)
3573 struct calendarinterval
*ci
= calloc(1, sizeof(struct calendarinterval
));
3575 if (!job_assumes(j
, ci
!= NULL
)) {
3582 SLIST_INSERT_HEAD(&j
->cal_intervals
, ci
, sle
);
3584 calendarinterval_setalarm(j
, ci
);
3592 calendarinterval_delete(job_t j
, struct calendarinterval
*ci
)
3594 SLIST_REMOVE(&j
->cal_intervals
, ci
, calendarinterval
, sle
);
3595 LIST_REMOVE(ci
, global_sle
);
3603 calendarinterval_sanity_check(void)
3605 struct calendarinterval
*ci
= LIST_FIRST(&sorted_calendar_events
);
3606 time_t now
= time(NULL
);
3608 if (ci
&& (ci
->when_next
< now
)) {
3609 jobmgr_assumes(root_jobmgr
, raise(SIGUSR1
) != -1);
3614 calendarinterval_callback(void)
3616 struct calendarinterval
*ci
, *ci_next
;
3617 time_t now
= time(NULL
);
3619 LIST_FOREACH_SAFE(ci
, &sorted_calendar_events
, global_sle
, ci_next
) {
3622 if (ci
->when_next
> now
) {
3626 LIST_REMOVE(ci
, global_sle
);
3627 calendarinterval_setalarm(j
, ci
);
3629 j
->start_pending
= true;
3630 job_dispatch(j
, false);
3635 socketgroup_new(job_t j
, const char *name
, int *fds
, unsigned int fd_cnt
, bool junkfds
)
3637 struct socketgroup
*sg
= calloc(1, sizeof(struct socketgroup
) + strlen(name
) + 1);
3639 if (!job_assumes(j
, sg
!= NULL
)) {
3643 sg
->fds
= calloc(1, fd_cnt
* sizeof(int));
3644 sg
->fd_cnt
= fd_cnt
;
3645 sg
->junkfds
= junkfds
;
3647 if (!job_assumes(j
, sg
->fds
!= NULL
)) {
3652 memcpy(sg
->fds
, fds
, fd_cnt
* sizeof(int));
3653 strcpy(sg
->name_init
, name
);
3655 SLIST_INSERT_HEAD(&j
->sockets
, sg
, sle
);
3663 socketgroup_delete(job_t j
, struct socketgroup
*sg
)
3667 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
3669 struct sockaddr_storage ss
;
3670 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
3671 socklen_t ss_len
= sizeof(ss
);
3674 if (job_assumes(j
, getsockname(sg
->fds
[i
], (struct sockaddr
*)&ss
, &ss_len
) != -1)
3675 && job_assumes(j
, ss_len
> 0) && (ss
.ss_family
== AF_UNIX
)) {
3676 job_assumes(j
, unlink(sun
->sun_path
) != -1);
3677 /* We might conditionally need to delete a directory here */
3680 job_assumes(j
, runtime_close(sg
->fds
[i
]) != -1);
3683 SLIST_REMOVE(&j
->sockets
, sg
, socketgroup
, sle
);
3692 socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
)
3694 struct kevent kev
[sg
->fd_cnt
];
3696 unsigned int i
, buf_off
= 0;
3702 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
3703 EV_SET(&kev
[i
], sg
->fds
[i
], EVFILT_READ
, do_add
? EV_ADD
: EV_DELETE
, 0, 0, j
);
3704 buf_off
+= snprintf(buf
+ buf_off
, sizeof(buf
) - buf_off
, " %d", sg
->fds
[i
]);
3707 job_log(j
, LOG_DEBUG
, "%s Sockets:%s", do_add
? "Watching" : "Ignoring", buf
);
3709 job_assumes(j
, kevent_bulk_mod(kev
, sg
->fd_cnt
) != -1);
3711 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
3712 job_assumes(j
, kev
[i
].flags
& EV_ERROR
);
3713 errno
= kev
[i
].data
;
3714 job_assumes(j
, kev
[i
].data
== 0);
3719 socketgroup_ignore(job_t j
, struct socketgroup
*sg
)
3721 socketgroup_kevent_mod(j
, sg
, false);
3725 socketgroup_watch(job_t j
, struct socketgroup
*sg
)
3727 socketgroup_kevent_mod(j
, sg
, true);
3731 socketgroup_callback(job_t j
)
3733 job_dispatch(j
, true);
3737 envitem_new(job_t j
, const char *k
, const char *v
, bool global
)
3739 struct envitem
*ei
= calloc(1, sizeof(struct envitem
) + strlen(k
) + 1 + strlen(v
) + 1);
3741 if (!job_assumes(j
, ei
!= NULL
)) {
3745 strcpy(ei
->key_init
, k
);
3746 ei
->value
= ei
->key_init
+ strlen(k
) + 1;
3747 strcpy(ei
->value
, v
);
3750 SLIST_INSERT_HEAD(&j
->global_env
, ei
, sle
);
3752 SLIST_INSERT_HEAD(&j
->env
, ei
, sle
);
3755 job_log(j
, LOG_DEBUG
, "Added environmental variable: %s=%s", k
, v
);
3761 envitem_delete(job_t j
, struct envitem
*ei
, bool global
)
3764 SLIST_REMOVE(&j
->global_env
, ei
, envitem
, sle
);
3766 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
3773 envitem_setup(launch_data_t obj
, const char *key
, void *context
)
3777 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
3781 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
);
3785 limititem_update(job_t j
, int w
, rlim_t r
)
3787 struct limititem
*li
;
3789 SLIST_FOREACH(li
, &j
->limits
, sle
) {
3790 if (li
->which
== w
) {
3796 li
= calloc(1, sizeof(struct limititem
));
3798 if (!job_assumes(j
, li
!= NULL
)) {
3802 SLIST_INSERT_HEAD(&j
->limits
, li
, sle
);
3807 if (j
->importing_hard_limits
) {
3808 li
->lim
.rlim_max
= r
;
3811 li
->lim
.rlim_cur
= r
;
3819 limititem_delete(job_t j
, struct limititem
*li
)
3821 SLIST_REMOVE(&j
->limits
, li
, limititem
, sle
);
3828 seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
)
3832 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
3833 job_log(j
, LOG_WARNING
, "Sandbox flag value must be boolean: %s", key
);
3837 if (launch_data_get_bool(obj
) == false) {
3841 if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOX_NAMED
) == 0) {
3842 j
->seatbelt_flags
|= SANDBOX_NAMED
;
3848 limititem_setup(launch_data_t obj
, const char *key
, void *context
)
3851 int i
, limits_cnt
= (sizeof(launchd_keys2limits
) / sizeof(launchd_keys2limits
[0]));
3854 if (launch_data_get_type(obj
) != LAUNCH_DATA_INTEGER
) {
3858 rl
= launch_data_get_integer(obj
);
3860 for (i
= 0; i
< limits_cnt
; i
++) {
3861 if (strcasecmp(launchd_keys2limits
[i
].key
, key
) == 0) {
3866 if (i
== limits_cnt
) {
3870 limititem_update(j
, launchd_keys2limits
[i
].val
, rl
);
3874 job_useless(job_t j
)
3876 /* Yes, j->unload_at_exit and j->only_once seem the same, but they'll differ someday... */
3878 if ((j
->unload_at_exit
|| j
->only_once
) && j
->start_time
!= 0) {
3879 if (j
->unload_at_exit
&& j
->j_port
) {
3882 job_log(j
, LOG_INFO
, "Exited. Was only configured to run once.");
3884 } else if (j
->removal_pending
) {
3885 job_log(j
, LOG_DEBUG
, "Exited while removal was pending.");
3887 } else if (j
->mgr
->shutting_down
&& (j
->hopefully_exits_first
|| j
->mgr
->hopefully_first_cnt
== 0)) {
3888 job_log(j
, LOG_DEBUG
, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children
, total_anon_children
);
3890 } else if (j
->legacy_mach_job
) {
3891 if (SLIST_EMPTY(&j
->machservices
)) {
3892 job_log(j
, LOG_INFO
, "Garbage collecting");
3894 } else if (!j
->checkedin
) {
3895 job_log(j
, LOG_WARNING
, "Failed to check-in!");
3904 job_keepalive(job_t j
)
3906 mach_msg_type_number_t statusCnt
;
3907 mach_port_status_t status
;
3908 struct semaphoreitem
*si
;
3909 struct machservice
*ms
;
3911 bool good_exit
= (WIFEXITED(j
->last_exit_status
) && WEXITSTATUS(j
->last_exit_status
) == 0);
3913 if (j
->mgr
->shutting_down
) {
3920 * We definitely need to revisit this after Leopard ships. Please see
3921 * launchctl.c for the other half of this hack.
3923 if (j
->mgr
->global_on_demand_cnt
> 0 && strcmp(j
->label
, "com.apple.kextd") != 0) {
3927 if (j
->start_pending
) {
3928 job_log(j
, LOG_DEBUG
, "KeepAlive check: Pent-up non-IPC launch criteria.");
3933 job_log(j
, LOG_DEBUG
, "KeepAlive check: job configured to run continuously.");
3937 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
3938 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
3939 if (mach_port_get_attributes(mach_task_self(), ms
->port
, MACH_PORT_RECEIVE_STATUS
,
3940 (mach_port_info_t
)&status
, &statusCnt
) != KERN_SUCCESS
) {
3943 if (status
.mps_msgcount
) {
3944 job_log(j
, LOG_DEBUG
, "KeepAlive check: job restarted due to %d queued Mach messages on service: %s",
3945 status
.mps_msgcount
, ms
->name
);
3951 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
3952 bool wanted_state
= false;
3958 wanted_state
= true;
3960 if (network_up
== wanted_state
) {
3961 job_log(j
, LOG_DEBUG
, "KeepAlive: The network is %s.", wanted_state
? "up" : "down");
3965 case SUCCESSFUL_EXIT
:
3966 wanted_state
= true;
3968 if (good_exit
== wanted_state
) {
3969 job_log(j
, LOG_DEBUG
, "KeepAlive: The exit state was %s.", wanted_state
? "successful" : "failure");
3973 case OTHER_JOB_ENABLED
:
3974 wanted_state
= true;
3975 case OTHER_JOB_DISABLED
:
3976 if ((bool)job_find(si
->what
) == wanted_state
) {
3977 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "enabled" : "disabled", si
->what
);
3981 case OTHER_JOB_ACTIVE
:
3982 wanted_state
= true;
3983 case OTHER_JOB_INACTIVE
:
3984 if ((other_j
= job_find(si
->what
))) {
3985 if ((bool)other_j
->p
== wanted_state
) {
3986 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "active" : "inactive", si
->what
);
3992 wanted_state
= true;
3994 if ((bool)(stat(si
->what
, &sb
) == 0) == wanted_state
) {
3996 job_assumes(j
, runtime_close(si
->fd
) == 0);
3999 job_log(j
, LOG_DEBUG
, "KeepAlive: The following path %s: %s", wanted_state
? "exists" : "is missing", si
->what
);
4006 if (-1 == (qdir_file_cnt
= dir_has_files(j
, si
->what
))) {
4007 job_log_error(j
, LOG_ERR
, "Failed to count the number of files in \"%s\"", si
->what
);
4008 } else if (qdir_file_cnt
> 0) {
4009 job_log(j
, LOG_DEBUG
, "KeepAlive: Directory is not empty: %s", si
->what
);
4024 } else if (j
->argv
) {
4034 struct machservice
*ms
;
4037 return "PID is still valid";
4040 if (j
->mgr
->shutting_down
&& j
->log_redirect_fd
) {
4041 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
4042 j
->log_redirect_fd
= 0;
4045 if (j
->log_redirect_fd
) {
4046 if (job_assumes(j
, j
->wait4pipe_eof
)) {
4047 return "Standard out/error is still valid";
4049 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
4050 j
->log_redirect_fd
= 0;
4054 if (j
->priv_port_has_senders
) {
4055 return "Privileged Port still has outstanding senders";
4058 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
4059 if (ms
->recv
&& ms
->isActive
) {
4060 return "Mach service is still active";
4068 machservice_watch(job_t j
, struct machservice
*ms
)
4071 job_assumes(j
, runtime_add_mport(ms
->port
, NULL
, 0) == KERN_SUCCESS
);
4076 machservice_ignore(job_t j
, struct machservice
*ms
)
4078 job_assumes(j
, runtime_remove_mport(ms
->port
) == KERN_SUCCESS
);
4082 machservice_resetport(job_t j
, struct machservice
*ms
)
4084 LIST_REMOVE(ms
, port_hash_sle
);
4085 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
4086 job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
4088 job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
);
4089 job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
);
4090 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
4093 struct machservice
*
4094 machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
)
4096 struct machservice
*ms
;
4098 if ((ms
= calloc(1, sizeof(struct machservice
) + strlen(name
) + 1)) == NULL
) {
4102 strcpy((char *)ms
->name
, name
);
4104 ms
->per_pid
= pid_local
;
4106 if (*serviceport
== MACH_PORT_NULL
) {
4107 if (!job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
)) {
4111 if (!job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
)) {
4114 *serviceport
= ms
->port
;
4117 ms
->port
= *serviceport
;
4118 ms
->isActive
= true;
4121 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
4122 LIST_INSERT_HEAD(&j
->mgr
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
4123 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
4125 job_log(j
, LOG_INFO
, "Mach service added: %s", name
);
4129 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
4136 machservice_status(struct machservice
*ms
)
4139 return BOOTSTRAP_STATUS_ACTIVE
;
4140 } else if (ms
->job
->ondemand
) {
4141 return BOOTSTRAP_STATUS_ON_DEMAND
;
4143 return BOOTSTRAP_STATUS_INACTIVE
;
4148 job_setup_exception_port(job_t j
, task_t target_task
)
4150 struct machservice
*ms
;
4151 thread_state_flavor_t f
= 0;
4152 mach_port_t exc_port
= the_exception_server
;
4154 if (j
->alt_exc_handler
) {
4155 ms
= jobmgr_lookup_service(j
->mgr
, j
->alt_exc_handler
, true, 0);
4157 exc_port
= machservice_port(ms
);
4159 job_log(j
, LOG_WARNING
, "Falling back to default Mach exception handler. Could not find: %s", j
->alt_exc_handler
);
4161 } else if (j
->internal_exc_handler
) {
4162 exc_port
= runtime_get_kernel_port();
4163 } else if (!exc_port
) {
4167 #if defined (__ppc__)
4168 f
= PPC_THREAD_STATE64
;
4169 #elif defined(__i386__)
4170 f
= x86_THREAD_STATE
;
4171 #elif defined(__arm__)
4172 f
= ARM_THREAD_STATE
;
4174 #error "unknown architecture"
4178 job_assumes(j
, task_set_exception_ports(target_task
, EXC_MASK_CRASH
, exc_port
,
4179 EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
4180 } else if (getpid() == 1 && the_exception_server
) {
4181 mach_port_t mhp
= mach_host_self();
4182 job_assumes(j
, host_set_exception_ports(mhp
, EXC_MASK_CRASH
, the_exception_server
,
4183 EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
4184 job_assumes(j
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
4190 job_set_exeception_port(job_t j
, mach_port_t port
)
4192 if (!the_exception_server
) {
4193 the_exception_server
= port
;
4194 job_setup_exception_port(j
, 0);
4196 job_log(j
, LOG_WARNING
, "The exception server is already claimed!");
4201 machservice_setup_options(launch_data_t obj
, const char *key
, void *context
)
4203 struct machservice
*ms
= context
;
4204 mach_port_t mhp
= mach_host_self();
4208 if (!job_assumes(ms
->job
, mhp
!= MACH_PORT_NULL
)) {
4212 switch (launch_data_get_type(obj
)) {
4213 case LAUNCH_DATA_INTEGER
:
4214 which_port
= launch_data_get_integer(obj
);
4215 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT
) == 0) {
4216 switch (which_port
) {
4217 case TASK_KERNEL_PORT
:
4218 case TASK_HOST_PORT
:
4219 case TASK_NAME_PORT
:
4220 case TASK_BOOTSTRAP_PORT
:
4221 /* I find it a little odd that zero isn't reserved in the header */
4223 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved task special port: %d", which_port
);
4226 ms
->special_port_num
= which_port
;
4227 SLIST_INSERT_HEAD(&special_ports
, ms
, special_port_sle
);
4230 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT
) == 0 && getpid() == 1) {
4231 if (which_port
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
4232 job_assumes(ms
->job
, (errno
= host_set_special_port(mhp
, which_port
, ms
->port
)) == KERN_SUCCESS
);
4234 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved host special port: %d", which_port
);
4237 case LAUNCH_DATA_BOOL
:
4238 b
= launch_data_get_bool(obj
);
4239 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE
) == 0) {
4240 ms
->debug_on_close
= b
;
4241 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_RESETATCLOSE
) == 0) {
4243 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN
) == 0) {
4245 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER
) == 0) {
4246 job_set_exeception_port(ms
->job
, ms
->port
);
4247 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_KUNCSERVER
) == 0) {
4249 job_assumes(ms
->job
, host_set_UNDServer(mhp
, ms
->port
) == KERN_SUCCESS
);
4252 case LAUNCH_DATA_DICTIONARY
:
4253 job_set_exeception_port(ms
->job
, ms
->port
);
4259 job_assumes(ms
->job
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
4263 machservice_setup(launch_data_t obj
, const char *key
, void *context
)
4266 struct machservice
*ms
;
4267 mach_port_t p
= MACH_PORT_NULL
;
4269 if ((ms
= jobmgr_lookup_service(j
->mgr
, key
, false, 0))) {
4270 job_log(j
, LOG_WARNING
, "Conflict with job: %s over Mach service: %s", ms
->job
->label
, key
);
4274 if ((ms
= machservice_new(j
, key
, &p
, false)) == NULL
) {
4275 job_log_error(j
, LOG_WARNING
, "Cannot add service: %s", key
);
4279 ms
->isActive
= false;
4281 if (launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
) {
4282 launch_data_dict_iterate(obj
, machservice_setup_options
, ms
);
4287 jobmgr_do_garbage_collection(jobmgr_t jm
)
4292 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
4293 jobmgr_do_garbage_collection(jmi
);
4296 if (!jm
->shutting_down
) {
4300 jobmgr_log(jm
, LOG_DEBUG
, "Garbage collecting.");
4303 * Normally, we wait for all resources of a job (Unix PIDs/FDs and Mach ports)
4304 * to reset before we conider the job truly dead and ready to be spawned again.
4306 * In order to work around 5487724 and 3456090, we're going to call reboot()
4307 * when the last PID dies and not wait for the associated resources to reset.
4309 if (getpid() == 1 && jm
->parentmgr
== NULL
&& total_children
== 0) {
4310 jobmgr_log(jm
, LOG_DEBUG
, "About to force a call to: reboot(%s)", reboot_flags_to_C_names(jm
->reboot_flags
));
4312 jobmgr_assumes(jm
, reboot(jm
->reboot_flags
) != -1);
4315 if (jm
->hopefully_first_cnt
) {
4319 if (jm
->parentmgr
&& jm
->parentmgr
->shutting_down
&& jm
->parentmgr
->hopefully_first_cnt
) {
4323 if (!jm
->sent_stop_to_normal_jobs
) {
4324 jobmgr_log(jm
, LOG_DEBUG
, "Asking \"normal\" jobs to exit.");
4326 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
4327 if (!job_active(ji
)) {
4329 } else if (!ji
->hopefully_exits_last
) {
4334 jm
->sent_stop_to_normal_jobs
= true;
4337 if (jm
->normal_active_cnt
) {
4341 if (!jm
->sent_stop_to_hopefully_last_jobs
) {
4342 jobmgr_log(jm
, LOG_DEBUG
, "Asking \"hopefully last\" jobs to exit.");
4344 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
4345 if (ji
->p
&& ji
->anonymous
) {
4347 } else if (ji
->p
&& job_assumes(ji
, ji
->hopefully_exits_last
)) {
4352 jm
->sent_stop_to_hopefully_last_jobs
= true;
4355 if (!SLIST_EMPTY(&jm
->submgrs
)) {
4359 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
4360 if (!ji
->anonymous
) {
4365 jobmgr_log_stray_children(jm
);
4371 jobmgr_log_stray_children(jobmgr_t jm
)
4373 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_ALL
};
4374 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
4375 struct kinfo_proc
*kp
;
4377 #if TARGET_OS_EMBEDDED
4378 if (!do_apple_internal_magic
) {
4382 if (jm
->parentmgr
|| getpid() != 1) {
4386 if (!jobmgr_assumes(jm
, (kp
= malloc(len
)) != NULL
)) {
4389 if (!jobmgr_assumes(jm
, sysctl(mib
, 3, kp
, &len
, NULL
, 0) != -1)) {
4393 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
4395 for (i
= 0; i
< kp_cnt
; i
++) {
4396 pid_t p_i
= kp
[i
].kp_proc
.p_pid
;
4397 pid_t pp_i
= kp
[i
].kp_eproc
.e_ppid
;
4398 pid_t pg_i
= kp
[i
].kp_eproc
.e_pgid
;
4399 const char *z
= (kp
[i
].kp_proc
.p_stat
== SZOMB
) ? "zombie " : "";
4400 const char *n
= kp
[i
].kp_proc
.p_comm
;
4402 if (p_i
== 0 || p_i
== 1) {
4406 jobmgr_log(jm
, LOG_WARNING
, "Stray %sprocess at shutdown: PID %u PPID %u PGID %u %s", z
, p_i
, pp_i
, pg_i
, n
);
4409 * The kernel team requested that launchd not do this for Leopard.
4410 * jobmgr_assumes(jm, runtime_kill(p_i, SIGKILL) != -1);
4419 jobmgr_parent(jobmgr_t jm
)
4421 return jm
->parentmgr
;
4425 job_uncork_fork(job_t j
)
4429 job_log(j
, LOG_DEBUG
, "Uncorking the fork().");
4430 /* this unblocks the child and avoids a race
4431 * between the above fork() and the kevent_mod() */
4432 job_assumes(j
, write(j
->forkfd
, &c
, sizeof(c
)) == sizeof(c
));
4433 job_assumes(j
, runtime_close(j
->forkfd
) != -1);
4438 jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
)
4440 mach_msg_size_t mxmsgsz
;
4441 job_t bootstrapper
= NULL
;
4444 launchd_assert(offsetof(struct jobmgr_s
, kqjobmgr_callback
) == 0);
4446 if (jm
&& requestorport
== MACH_PORT_NULL
) {
4447 jobmgr_log(jm
, LOG_ERR
, "Mach sub-bootstrap create request requires a requester port");
4451 jmr
= calloc(1, sizeof(struct jobmgr_s
) + (name
? (strlen(name
) + 1) : 128));
4457 jmr
->kqjobmgr_callback
= jobmgr_callback
;
4458 strcpy(jmr
->name_init
, name
? name
: "Under construction");
4460 jmr
->req_port
= requestorport
;
4462 if ((jmr
->parentmgr
= jm
)) {
4463 SLIST_INSERT_HEAD(&jm
->submgrs
, jmr
, sle
);
4466 if (jm
&& !jobmgr_assumes(jmr
, launchd_mport_notify_req(jmr
->req_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
)) {
4470 if (transfer_port
!= MACH_PORT_NULL
) {
4471 jobmgr_assumes(jmr
, jm
!= NULL
);
4472 jmr
->jm_port
= transfer_port
;
4473 } else if (!jm
&& getpid() != 1) {
4474 char *trusted_fd
= getenv(LAUNCHD_TRUSTED_FD_ENV
);
4477 snprintf(service_buf
, sizeof(service_buf
), "com.apple.launchd.peruser.%u", getuid());
4479 if (!jobmgr_assumes(jmr
, bootstrap_check_in(bootstrap_port
, service_buf
, &jmr
->jm_port
) == 0)) {
4484 int dfd
, lfd
= strtol(trusted_fd
, NULL
, 10);
4486 if ((dfd
= dup(lfd
)) >= 0) {
4487 jobmgr_assumes(jmr
, runtime_close(dfd
) != -1);
4488 jobmgr_assumes(jmr
, runtime_close(lfd
) != -1);
4491 unsetenv(LAUNCHD_TRUSTED_FD_ENV
);
4494 /* cut off the Libc cache, we don't want to deadlock against ourself */
4495 inherited_bootstrap_port
= bootstrap_port
;
4496 bootstrap_port
= MACH_PORT_NULL
;
4497 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
);
4499 /* We set this explicitly as we start each child */
4500 launchd_assert(launchd_set_bport(MACH_PORT_NULL
) == KERN_SUCCESS
);
4501 } else if (!jobmgr_assumes(jmr
, launchd_mport_create_recv(&jmr
->jm_port
) == KERN_SUCCESS
)) {
4506 sprintf(jmr
->name_init
, "%u", MACH_PORT_INDEX(jmr
->jm_port
));
4509 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
4510 mxmsgsz
= sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
4511 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
4512 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
4516 jobmgr_assumes(jmr
, kevent_mod(SIGTERM
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
4517 jobmgr_assumes(jmr
, kevent_mod(SIGUSR1
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
4518 jobmgr_assumes(jmr
, kevent_mod(0, EVFILT_FS
, EV_ADD
, VQ_MOUNT
|VQ_UNMOUNT
|VQ_UPDATE
, 0, jmr
) != -1);
4522 bootstrapper
= jobmgr_init_session(jmr
, name
, sflag
);
4525 if (!bootstrapper
|| !bootstrapper
->weird_bootstrap
) {
4526 if (!jobmgr_assumes(jmr
, runtime_add_mport(jmr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
4531 jobmgr_log(jmr
, LOG_DEBUG
, "Created job manager%s%s", jm
? " with parent: " : ".", jm
? jm
->name
: "");
4534 jobmgr_assumes(jmr
, job_dispatch(bootstrapper
, true) != NULL
);
4537 if (jmr
->parentmgr
) {
4551 jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
)
4553 const char *bootstrap_tool
[] = { "/bin/launchctl", "bootstrap", "-S", session_type
, sflag
? "-s" : NULL
, NULL
};
4554 char thelabel
[1000];
4557 snprintf(thelabel
, sizeof(thelabel
), "com.apple.launchctl.%s", session_type
);
4558 bootstrapper
= job_new(jm
, thelabel
, NULL
, bootstrap_tool
);
4559 if (jobmgr_assumes(jm
, bootstrapper
!= NULL
) && (jm
->parentmgr
|| getuid())) {
4562 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
4563 snprintf(buf
, sizeof(buf
), "0x%X:0:0", getuid());
4564 envitem_new(bootstrapper
, "__CF_USER_TEXT_ENCODING", buf
, false);
4565 bootstrapper
->weird_bootstrap
= true;
4566 jobmgr_assumes(jm
, job_setup_machport(bootstrapper
));
4569 jm
->session_initialized
= true;
4571 return bootstrapper
;
4575 jobmgr_delete_anything_with_port(jobmgr_t jm
, mach_port_t port
)
4577 struct machservice
*ms
, *next_ms
;
4580 /* Mach ports, unlike Unix descriptors, are reference counted. In other
4581 * words, when some program hands us a second or subsequent send right
4582 * to a port we already have open, the Mach kernel gives us the same
4583 * port number back and increments an reference count associated with
4584 * the port. This forces us, when discovering that a receive right at
4585 * the other end has been deleted, to wander all of our objects to see
4586 * what weird places clients might have handed us the same send right
4590 if (jm
== root_jobmgr
) {
4591 if (port
== inherited_bootstrap_port
) {
4592 launchd_assumes(launchd_mport_deallocate(port
) == KERN_SUCCESS
);
4593 inherited_bootstrap_port
= MACH_PORT_NULL
;
4595 return jobmgr_shutdown(jm
);
4598 LIST_FOREACH_SAFE(ms
, &port_hash
[HASH_PORT(port
)], port_hash_sle
, next_ms
) {
4599 if (ms
->port
== port
) {
4600 machservice_delete(ms
->job
, ms
, true);
4605 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
4606 jobmgr_delete_anything_with_port(jmi
, port
);
4609 if (jm
->req_port
== port
) {
4610 jobmgr_log(jm
, LOG_DEBUG
, "Request port died: 0x%x", port
);
4611 return jobmgr_shutdown(jm
);
4617 struct machservice
*
4618 jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
)
4620 struct machservice
*ms
;
4623 jobmgr_assumes(jm
, !check_parent
);
4626 LIST_FOREACH(ms
, &jm
->ms_hash
[hash_ms(name
)], name_hash_sle
) {
4627 if ((target_pid
&& ms
->per_pid
&& ms
->job
->p
== target_pid
) || (!target_pid
&& !ms
->per_pid
)) {
4628 if (strcmp(name
, ms
->name
) == 0) {
4634 if (jm
->parentmgr
== NULL
) {
4638 if (!check_parent
) {
4642 return jobmgr_lookup_service(jm
->parentmgr
, name
, true, 0);
4646 machservice_port(struct machservice
*ms
)
4652 machservice_job(struct machservice
*ms
)
4658 machservice_hidden(struct machservice
*ms
)
4664 machservice_active(struct machservice
*ms
)
4666 return ms
->isActive
;
4670 machservice_name(struct machservice
*ms
)
4676 machservice_delete(job_t j
, struct machservice
*ms
, bool port_died
)
4678 if (ms
->debug_on_close
) {
4679 job_log(j
, LOG_NOTICE
, "About to enter kernel debugger because of Mach port: 0x%x", ms
->port
);
4680 job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
4683 if (ms
->recv
&& job_assumes(j
, !ms
->isActive
)) {
4684 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
4687 job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
4689 if (ms
->port
== the_exception_server
) {
4690 the_exception_server
= 0;
4693 job_log(j
, LOG_INFO
, "Mach service deleted%s: %s", port_died
? " (port died)" : "", ms
->name
);
4695 if (ms
->special_port_num
) {
4696 SLIST_REMOVE(&special_ports
, ms
, machservice
, special_port_sle
);
4699 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
4700 LIST_REMOVE(ms
, name_hash_sle
);
4701 LIST_REMOVE(ms
, port_hash_sle
);
4707 machservice_request_notifications(struct machservice
*ms
)
4709 mach_msg_id_t which
= MACH_NOTIFY_DEAD_NAME
;
4711 ms
->isActive
= true;
4714 which
= MACH_NOTIFY_PORT_DESTROYED
;
4715 job_checkin(ms
->job
);
4718 job_assumes(ms
->job
, launchd_mport_notify_req(ms
->port
, which
) == KERN_SUCCESS
);
4721 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
4722 #define END_OF(x) (&(x)[NELEM(x)])
4725 mach_cmd2argv(const char *string
)
4727 char *argv
[100], args
[1000];
4729 char *argp
= args
, term
, **argv_ret
, *co
;
4730 unsigned int nargs
= 0, i
;
4732 for (cp
= string
; *cp
;) {
4733 while (isspace(*cp
))
4735 term
= (*cp
== '"') ? *cp
++ : '\0';
4736 if (nargs
< NELEM(argv
)) {
4737 argv
[nargs
++] = argp
;
4739 while (*cp
&& (term
? *cp
!= term
: !isspace(*cp
)) && argp
< END_OF(args
)) {
4756 argv_ret
= malloc((nargs
+ 1) * sizeof(char *) + strlen(string
) + 1);
4758 if (!launchd_assumes(argv_ret
!= NULL
)) {
4762 co
= (char *)argv_ret
+ (nargs
+ 1) * sizeof(char *);
4764 for (i
= 0; i
< nargs
; i
++) {
4765 strcpy(co
, argv
[i
]);
4767 co
+= strlen(argv
[i
]) + 1;
4775 job_checkin(job_t j
)
4777 j
->checkedin
= true;
4781 job_ack_port_destruction(mach_port_t p
)
4783 struct machservice
*ms
;
4785 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
4786 if (ms
->recv
&& (ms
->port
== p
)) {
4795 ms
->isActive
= false;
4798 machservice_resetport(ms
->job
, ms
);
4801 job_log(ms
->job
, LOG_DEBUG
, "Receive right returned to us: %s", ms
->name
);
4802 job_dispatch(ms
->job
, false);
4804 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
4810 job_ack_no_senders(job_t j
)
4812 j
->priv_port_has_senders
= false;
4814 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
4817 job_log(j
, LOG_DEBUG
, "No more senders on privileged Mach bootstrap port");
4819 job_dispatch(j
, false);
4825 if (job_assumes(j
, j
->mgr
!= NULL
)) {
4833 job_is_anonymous(job_t j
)
4835 return j
->anonymous
;
4839 job_force_sampletool(job_t j
)
4842 char logfile
[PATH_MAX
];
4844 char *sample_args
[] = { "sample", pidstr
, "1", "-mayDie", "-file", logfile
, NULL
};
4845 char *contents
= NULL
;
4846 int logfile_fd
= -1;
4847 int console_fd
= -1;
4851 if (!debug_shutdown_hangs
) {
4855 snprintf(pidstr
, sizeof(pidstr
), "%u", j
->p
);
4856 snprintf(logfile
, sizeof(logfile
), SHUTDOWN_LOG_DIR
"/%s-%u.sample.txt", j
->label
, j
->p
);
4858 if (!job_assumes(j
, unlink(logfile
) != -1 || errno
== ENOENT
)) {
4863 * This will stall launchd for as long as the 'sample' tool runs.
4865 * We didn't give the 'sample' tool a bootstrap port, so it therefore
4866 * can't deadlock against launchd.
4868 if (!job_assumes(j
, (errno
= posix_spawnp(&sp
, sample_args
[0], NULL
, NULL
, sample_args
, environ
)) == 0)) {
4872 job_log(j
, LOG_DEBUG
, "Waiting for 'sample' to finish.");
4874 if (!job_assumes(j
, waitpid(sp
, &wstatus
, 0) != -1)) {
4879 * This won't work if the VFS or filesystems are sick:
4883 if (!job_assumes(j
, WIFEXITED(wstatus
) && WEXITSTATUS(wstatus
) == 0)) {
4887 if (!job_assumes(j
, (logfile_fd
= open(logfile
, O_RDONLY
|O_NOCTTY
)) != -1)) {
4891 if (!job_assumes(j
, (console_fd
= open(_PATH_CONSOLE
, O_WRONLY
|O_APPEND
|O_NOCTTY
)) != -1)) {
4895 if (!job_assumes(j
, fstat(logfile_fd
, &sb
) != -1)) {
4899 contents
= malloc(sb
.st_size
);
4901 if (!job_assumes(j
, contents
!= NULL
)) {
4905 if (!job_assumes(j
, read(logfile_fd
, contents
, sb
.st_size
) == sb
.st_size
)) {
4909 job_assumes(j
, write(console_fd
, contents
, sb
.st_size
) == sb
.st_size
);
4916 if (logfile_fd
!= -1) {
4917 job_assumes(j
, runtime_fsync(logfile_fd
) != -1);
4918 job_assumes(j
, runtime_close(logfile_fd
) != -1);
4921 if (console_fd
!= -1) {
4922 job_assumes(j
, runtime_close(console_fd
) != -1);
4925 job_log(j
, LOG_DEBUG
, "Finished sampling.");
4929 semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
)
4931 struct semaphoreitem
*si
;
4932 size_t alloc_sz
= sizeof(struct semaphoreitem
);
4935 alloc_sz
+= strlen(what
) + 1;
4938 if (!job_assumes(j
, si
= calloc(1, alloc_sz
))) {
4946 strcpy(si
->what_init
, what
);
4949 SLIST_INSERT_HEAD(&j
->semaphores
, si
, sle
);
4951 semaphoreitem_runtime_mod_ref(si
, true);
4957 semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
)
4960 * External events need to be tracked.
4961 * Internal events do NOT need to be tracked.
4965 case SUCCESSFUL_EXIT
:
4967 case OTHER_JOB_ENABLED
:
4968 case OTHER_JOB_DISABLED
:
4969 case OTHER_JOB_ACTIVE
:
4970 case OTHER_JOB_INACTIVE
:
4984 semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
)
4986 semaphoreitem_runtime_mod_ref(si
, false);
4988 SLIST_REMOVE(&j
->semaphores
, si
, semaphoreitem
, sle
);
4991 job_assumes(j
, runtime_close(si
->fd
) != -1);
4998 semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
)
5000 struct semaphoreitem_dict_iter_context
*sdic
= context
;
5001 semaphore_reason_t why
;
5003 why
= launch_data_get_bool(obj
) ? sdic
->why_true
: sdic
->why_false
;
5005 semaphoreitem_new(sdic
->j
, why
, key
);
5009 semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
)
5011 struct semaphoreitem_dict_iter_context sdic
= { context
, 0, 0 };
5013 semaphore_reason_t why
;
5015 switch (launch_data_get_type(obj
)) {
5016 case LAUNCH_DATA_BOOL
:
5017 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE
) == 0) {
5018 why
= launch_data_get_bool(obj
) ? NETWORK_UP
: NETWORK_DOWN
;
5019 semaphoreitem_new(j
, why
, NULL
);
5020 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT
) == 0) {
5021 why
= launch_data_get_bool(obj
) ? SUCCESSFUL_EXIT
: FAILED_EXIT
;
5022 semaphoreitem_new(j
, why
, NULL
);
5023 j
->start_pending
= true;
5025 job_assumes(j
, false);
5028 case LAUNCH_DATA_DICTIONARY
:
5029 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE
) == 0) {
5030 sdic
.why_true
= PATH_EXISTS
;
5031 sdic
.why_false
= PATH_MISSING
;
5032 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE
) == 0) {
5033 sdic
.why_true
= OTHER_JOB_ACTIVE
;
5034 sdic
.why_false
= OTHER_JOB_INACTIVE
;
5035 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED
) == 0) {
5036 sdic
.why_true
= OTHER_JOB_ENABLED
;
5037 sdic
.why_false
= OTHER_JOB_DISABLED
;
5039 job_assumes(j
, false);
5043 launch_data_dict_iterate(obj
, semaphoreitem_setup_dict_iter
, &sdic
);
5046 job_assumes(j
, false);
5052 jobmgr_dispatch_all_semaphores(jobmgr_t jm
)
5058 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
5059 jobmgr_dispatch_all_semaphores(jmi
);
5062 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
5063 if (!SLIST_EMPTY(&ji
->semaphores
)) {
5064 job_dispatch(ji
, false);
5070 cronemu(int mon
, int mday
, int hour
, int min
)
5072 struct tm workingtm
;
5076 workingtm
= *localtime(&now
);
5078 workingtm
.tm_isdst
= -1;
5079 workingtm
.tm_sec
= 0;
5082 while (!cronemu_mon(&workingtm
, mon
, mday
, hour
, min
)) {
5083 workingtm
.tm_year
++;
5084 workingtm
.tm_mon
= 0;
5085 workingtm
.tm_mday
= 1;
5086 workingtm
.tm_hour
= 0;
5087 workingtm
.tm_min
= 0;
5091 return mktime(&workingtm
);
5095 cronemu_wday(int wday
, int hour
, int min
)
5097 struct tm workingtm
;
5101 workingtm
= *localtime(&now
);
5103 workingtm
.tm_isdst
= -1;
5104 workingtm
.tm_sec
= 0;
5111 while (!(workingtm
.tm_wday
== wday
&& cronemu_hour(&workingtm
, hour
, min
))) {
5112 workingtm
.tm_mday
++;
5113 workingtm
.tm_hour
= 0;
5114 workingtm
.tm_min
= 0;
5118 return mktime(&workingtm
);
5122 cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
)
5125 struct tm workingtm
= *wtm
;
5128 while (!cronemu_mday(&workingtm
, mday
, hour
, min
)) {
5130 workingtm
.tm_mday
= 1;
5131 workingtm
.tm_hour
= 0;
5132 workingtm
.tm_min
= 0;
5133 carrytest
= workingtm
.tm_mon
;
5135 if (carrytest
!= workingtm
.tm_mon
) {
5143 if (mon
< wtm
->tm_mon
) {
5147 if (mon
> wtm
->tm_mon
) {
5154 return cronemu_mday(wtm
, mday
, hour
, min
);
5158 cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
)
5161 struct tm workingtm
= *wtm
;
5164 while (!cronemu_hour(&workingtm
, hour
, min
)) {
5165 workingtm
.tm_mday
++;
5166 workingtm
.tm_hour
= 0;
5167 workingtm
.tm_min
= 0;
5168 carrytest
= workingtm
.tm_mday
;
5170 if (carrytest
!= workingtm
.tm_mday
) {
5178 if (mday
< wtm
->tm_mday
) {
5182 if (mday
> wtm
->tm_mday
) {
5183 wtm
->tm_mday
= mday
;
5188 return cronemu_hour(wtm
, hour
, min
);
5192 cronemu_hour(struct tm
*wtm
, int hour
, int min
)
5195 struct tm workingtm
= *wtm
;
5198 while (!cronemu_min(&workingtm
, min
)) {
5199 workingtm
.tm_hour
++;
5200 workingtm
.tm_min
= 0;
5201 carrytest
= workingtm
.tm_hour
;
5203 if (carrytest
!= workingtm
.tm_hour
) {
5211 if (hour
< wtm
->tm_hour
) {
5215 if (hour
> wtm
->tm_hour
) {
5216 wtm
->tm_hour
= hour
;
5220 return cronemu_min(wtm
, min
);
5224 cronemu_min(struct tm
*wtm
, int min
)
5230 if (min
< wtm
->tm_min
) {
5234 if (min
> wtm
->tm_min
) {
5242 job_mig_create_server(job_t j
, cmd_t server_cmd
, uid_t server_uid
, boolean_t on_demand
, mach_port_t
*server_portp
)
5247 if (!launchd_assumes(j
!= NULL
)) {
5248 return BOOTSTRAP_NO_MEMORY
;
5251 if (unlikely(j
->deny_job_creation
)) {
5252 return BOOTSTRAP_NOT_PRIVILEGED
;
5255 runtime_get_caller_creds(&ldc
);
5257 job_log(j
, LOG_DEBUG
, "Server create attempt: %s", server_cmd
);
5259 #define LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5260 /* XXX - This code should go away once the per session launchd is integrated with the rest of the system */
5261 #ifdef LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5262 if (getpid() == 1) {
5263 if (ldc
.euid
&& server_uid
&& (ldc
.euid
!= server_uid
)) {
5264 job_log(j
, LOG_WARNING
, "Server create: \"%s\": Will run as UID %d, not UID %d as they told us to",
5265 server_cmd
, ldc
.euid
, server_uid
);
5266 server_uid
= ldc
.euid
;
5271 if (server_uid
!= getuid()) {
5272 job_log(j
, LOG_WARNING
, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
5273 server_cmd
, getuid(), server_uid
);
5275 server_uid
= 0; /* zero means "do nothing" */
5278 js
= job_new_via_mach_init(j
, server_cmd
, server_uid
, on_demand
);
5281 return BOOTSTRAP_NO_MEMORY
;
5284 *server_portp
= js
->j_port
;
5285 return BOOTSTRAP_SUCCESS
;
5289 job_mig_send_signal(job_t j
, mach_port_t srp
, name_t targetlabel
, int sig
)
5294 if (!launchd_assumes(j
!= NULL
)) {
5295 return BOOTSTRAP_NO_MEMORY
;
5298 runtime_get_caller_creds(&ldc
);
5300 if (ldc
.euid
!= 0 && ldc
.euid
!= getuid()) {
5301 return BOOTSTRAP_NOT_PRIVILEGED
;
5304 if (!(otherj
= job_find(targetlabel
))) {
5305 return BOOTSTRAP_UNKNOWN_SERVICE
;
5308 if (sig
== VPROC_MAGIC_UNLOAD_SIGNAL
) {
5309 bool do_block
= otherj
->p
;
5311 if (otherj
->anonymous
) {
5312 return BOOTSTRAP_NOT_PRIVILEGED
;
5318 job_log(j
, LOG_DEBUG
, "Blocking MIG return of job_remove(): %s", otherj
->label
);
5319 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
5320 job_assumes(otherj
, waiting4removal_new(otherj
, srp
));
5321 return MIG_NO_REPLY
;
5325 } else if (otherj
->p
) {
5326 job_assumes(j
, runtime_kill(otherj
->p
, sig
) != -1);
5333 job_mig_log_forward(job_t j
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
5337 if (!launchd_assumes(j
!= NULL
)) {
5338 return BOOTSTRAP_NO_MEMORY
;
5341 if (!job_assumes(j
, j
->per_user
)) {
5342 return BOOTSTRAP_NOT_PRIVILEGED
;
5345 runtime_get_caller_creds(&ldc
);
5347 return runtime_log_forward(ldc
.euid
, ldc
.egid
, inval
, invalCnt
);
5351 job_mig_log_drain(job_t j
, mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
5355 if (!launchd_assumes(j
!= NULL
)) {
5356 return BOOTSTRAP_NO_MEMORY
;
5359 runtime_get_caller_creds(&ldc
);
5362 return BOOTSTRAP_NOT_PRIVILEGED
;
5365 return runtime_log_drain(srp
, outval
, outvalCnt
);
5369 job_mig_swap_complex(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
,
5370 vm_offset_t inval
, mach_msg_type_number_t invalCnt
,
5371 vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
5374 launch_data_t input_obj
, output_obj
;
5375 size_t data_offset
= 0;
5379 runtime_get_caller_creds(&ldc
);
5381 if (!launchd_assumes(j
!= NULL
)) {
5382 return BOOTSTRAP_NO_MEMORY
;
5385 if (inkey
&& ldc
.euid
&& ldc
.euid
!= getuid()) {
5386 return BOOTSTRAP_NOT_PRIVILEGED
;
5389 if (inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
)) {
5393 if (inkey
&& outkey
) {
5394 action
= "Swapping";
5401 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
5403 *outvalCnt
= 20 * 1024 * 1024;
5404 mig_allocate(outval
, *outvalCnt
);
5405 if (!job_assumes(j
, *outval
!= 0)) {
5409 if (invalCnt
&& !job_assumes(j
, (input_obj
= launch_data_unpack((void *)inval
, invalCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
5414 case VPROC_GSK_ENVIRONMENT
:
5415 if (!job_assumes(j
, (output_obj
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
5418 jobmgr_export_env_from_other_jobs(j
->mgr
, output_obj
);
5419 if (!job_assumes(j
, launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
) != 0)) {
5422 launch_data_free(output_obj
);
5424 case VPROC_GSK_ALLJOBS
:
5425 if (!job_assumes(j
, (output_obj
= job_export_all()) != NULL
)) {
5428 ipc_revoke_fds(output_obj
);
5429 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
5430 if (!job_assumes(j
, packed_size
!= 0)) {
5433 launch_data_free(output_obj
);
5436 mig_deallocate(*outval
, *outvalCnt
);
5444 if (invalCnt
) switch (inkey
) {
5445 case VPROC_GSK_ENVIRONMENT
:
5446 job_assumes(j
, false);
5454 mig_deallocate(inval
, invalCnt
);
5460 mig_deallocate(*outval
, *outvalCnt
);
5466 job_mig_swap_integer(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
, int64_t inval
, int64_t *outval
)
5469 kern_return_t kr
= 0;
5473 runtime_get_caller_creds(&ldc
);
5475 if (!launchd_assumes(j
!= NULL
)) {
5476 return BOOTSTRAP_NO_MEMORY
;
5479 if (inkey
&& ldc
.euid
&& ldc
.euid
!= getuid()) {
5480 return BOOTSTRAP_NOT_PRIVILEGED
;
5483 if (inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
)) {
5487 if (inkey
&& outkey
) {
5488 action
= "Swapping";
5495 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
5498 case VPROC_GSK_LAST_EXIT_STATUS
:
5499 *outval
= j
->last_exit_status
;
5501 case VPROC_GSK_MGR_UID
:
5504 case VPROC_GSK_MGR_PID
:
5507 case VPROC_GSK_IS_MANAGED
:
5508 *outval
= j
->anonymous
? 0 : 1;
5510 case VPROC_GSK_BASIC_KEEPALIVE
:
5511 *outval
= !j
->ondemand
;
5513 case VPROC_GSK_START_INTERVAL
:
5514 *outval
= j
->start_interval
;
5516 case VPROC_GSK_IDLE_TIMEOUT
:
5517 *outval
= j
->timeout
;
5519 case VPROC_GSK_EXIT_TIMEOUT
:
5520 *outval
= j
->exit_timeout
;
5522 case VPROC_GSK_GLOBAL_LOG_MASK
:
5523 oldmask
= runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
5525 runtime_setlogmask(oldmask
);
5527 case VPROC_GSK_GLOBAL_UMASK
:
5541 case VPROC_GSK_GLOBAL_ON_DEMAND
:
5542 kr
= job_set_global_on_demand(j
, (bool)inval
) ? 0 : 1;
5544 case VPROC_GSK_BASIC_KEEPALIVE
:
5545 j
->ondemand
= !inval
;
5547 case VPROC_GSK_START_INTERVAL
:
5548 if ((uint64_t)inval
> UINT32_MAX
) {
5551 if (j
->start_interval
== 0) {
5554 /* Workaround 5225889 */
5555 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
5557 j
->start_interval
= inval
;
5558 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
) != -1);
5559 } else if (j
->start_interval
) {
5560 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
5561 if (j
->start_interval
!= 0) {
5564 j
->start_interval
= 0;
5567 case VPROC_GSK_IDLE_TIMEOUT
:
5568 if ((unsigned int)inval
> 0) {
5572 case VPROC_GSK_EXIT_TIMEOUT
:
5573 if ((unsigned int)inval
> 0) {
5574 j
->exit_timeout
= inval
;
5577 case VPROC_GSK_GLOBAL_LOG_MASK
:
5578 runtime_setlogmask(inval
);
5580 case VPROC_GSK_GLOBAL_UMASK
:
5594 job_mig_post_fork_ping(job_t j
, task_t child_task
)
5596 struct machservice
*ms
;
5598 if (!launchd_assumes(j
!= NULL
)) {
5599 return BOOTSTRAP_NO_MEMORY
;
5602 job_log(j
, LOG_DEBUG
, "Post fork ping.");
5604 job_setup_exception_port(j
, child_task
);
5606 SLIST_FOREACH(ms
, &special_ports
, special_port_sle
) {
5607 if (j
->per_user
&& (ms
->special_port_num
!= TASK_ACCESS_PORT
)) {
5608 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
5612 errno
= task_set_special_port(child_task
, ms
->special_port_num
, ms
->port
);
5615 int desired_log_level
= LOG_ERR
;
5620 desired_log_level
= LOG_WARNING
;
5622 if (ms
->special_port_num
== TASK_SEATBELT_PORT
) {
5623 desired_log_level
= LOG_DEBUG
;
5627 job_log(j
, desired_log_level
, "Could not setup Mach task special port %u: %s", ms
->special_port_num
, mach_error_string(errno
));
5631 job_assumes(j
, launchd_mport_deallocate(child_task
) == KERN_SUCCESS
);
5637 job_mig_reboot2(job_t j
, uint64_t flags
)
5639 char who_started_the_reboot
[2048] = "";
5640 struct kinfo_proc kp
;
5644 if (!launchd_assumes(j
!= NULL
)) {
5645 return BOOTSTRAP_NO_MEMORY
;
5648 if (getpid() != 1) {
5649 return BOOTSTRAP_NOT_PRIVILEGED
;
5652 runtime_get_caller_creds(&ldc
);
5655 return BOOTSTRAP_NOT_PRIVILEGED
;
5658 for (pid_to_log
= ldc
.pid
; pid_to_log
; pid_to_log
= kp
.kp_eproc
.e_ppid
) {
5659 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, pid_to_log
};
5660 size_t who_offset
, len
= sizeof(kp
);
5662 if (!job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
5666 who_offset
= strlen(who_started_the_reboot
);
5667 snprintf(who_started_the_reboot
+ who_offset
, sizeof(who_started_the_reboot
) - who_offset
,
5668 " %s[%u]%s", kp
.kp_proc
.p_comm
, pid_to_log
, kp
.kp_eproc
.e_ppid
? " ->" : "");
5671 root_jobmgr
->reboot_flags
= (int)flags
;
5675 job_log(j
, LOG_DEBUG
, "reboot2() initiated by:%s", who_started_the_reboot
);
5681 job_mig_getsocket(job_t j
, name_t spr
)
5683 if (!launchd_assumes(j
!= NULL
)) {
5684 return BOOTSTRAP_NO_MEMORY
;
5690 return BOOTSTRAP_NO_MEMORY
;
5693 strncpy(spr
, sockpath
, sizeof(name_t
));
5695 return BOOTSTRAP_SUCCESS
;
5699 job_mig_log(job_t j
, int pri
, int err
, logmsg_t msg
)
5701 if (!launchd_assumes(j
!= NULL
)) {
5702 return BOOTSTRAP_NO_MEMORY
;
5705 if ((errno
= err
)) {
5706 job_log_error(j
, pri
, "%s", msg
);
5708 job_log(j
, pri
, "%s", msg
);
5715 ensure_root_bkgd_setup(void)
5717 if (background_jobmgr
|| getpid() != 1) {
5721 if (!jobmgr_assumes(root_jobmgr
, (background_jobmgr
= jobmgr_new(root_jobmgr
, mach_task_self(), MACH_PORT_NULL
, false, VPROCMGR_SESSION_BACKGROUND
)) != NULL
)) {
5725 background_jobmgr
->req_port
= 0;
5726 jobmgr_assumes(root_jobmgr
, launchd_mport_make_send(background_jobmgr
->jm_port
) == KERN_SUCCESS
);
5730 job_mig_lookup_per_user_context(job_t j
, uid_t which_user
, mach_port_t
*up_cont
)
5735 if (!launchd_assumes(j
!= NULL
)) {
5736 return BOOTSTRAP_NO_MEMORY
;
5739 job_log(j
, LOG_DEBUG
, "Looking up per user launchd for UID: %u", which_user
);
5741 runtime_get_caller_creds(&ldc
);
5743 if (getpid() != 1) {
5744 job_log(j
, LOG_ERR
, "Only PID 1 supports per user launchd lookups.");
5745 return BOOTSTRAP_NOT_PRIVILEGED
;
5748 if (ldc
.euid
|| ldc
.uid
) {
5749 which_user
= ldc
.euid
? ldc
.euid
: ldc
.uid
;
5752 *up_cont
= MACH_PORT_NULL
;
5754 if (which_user
== 0) {
5755 ensure_root_bkgd_setup();
5757 *up_cont
= background_jobmgr
->jm_port
;
5762 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
5763 if (!ji
->per_user
) {
5766 if (ji
->mach_uid
!= which_user
) {
5769 if (SLIST_EMPTY(&ji
->machservices
)) {
5772 if (!SLIST_FIRST(&ji
->machservices
)->per_user_hack
) {
5779 struct machservice
*ms
;
5782 job_log(j
, LOG_DEBUG
, "Creating per user launchd job for UID: %u", which_user
);
5784 sprintf(lbuf
, "com.apple.launchd.peruser.%u", which_user
);
5786 ji
= job_new(root_jobmgr
, lbuf
, "/sbin/launchd", NULL
);
5789 return BOOTSTRAP_NO_MEMORY
;
5792 ji
->mach_uid
= which_user
;
5793 ji
->per_user
= true;
5795 if ((ms
= machservice_new(ji
, lbuf
, up_cont
, false)) == NULL
) {
5797 return BOOTSTRAP_NO_MEMORY
;
5800 ms
->per_user_hack
= true;
5803 ji
= job_dispatch(ji
, false);
5805 job_log(j
, LOG_DEBUG
, "Per user launchd job found for UID: %u", which_user
);
5808 if (job_assumes(j
, ji
!= NULL
)) {
5809 *up_cont
= machservice_port(SLIST_FIRST(&ji
->machservices
));
5816 job_mig_check_in(job_t j
, name_t servicename
, mach_port_t
*serviceportp
)
5818 static pid_t last_warned_pid
= 0;
5819 struct machservice
*ms
;
5822 if (!launchd_assumes(j
!= NULL
)) {
5823 return BOOTSTRAP_NO_MEMORY
;
5826 runtime_get_caller_creds(&ldc
);
5828 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
5831 job_log(j
, LOG_DEBUG
, "Check-in of Mach service failed. Unknown: %s", servicename
);
5832 return BOOTSTRAP_UNKNOWN_SERVICE
;
5834 if (machservice_job(ms
) != j
) {
5835 if (last_warned_pid
!= ldc
.pid
) {
5836 job_log(j
, LOG_NOTICE
, "Check-in of Mach service failed. PID %d is not privileged: %s",
5837 ldc
.pid
, servicename
);
5838 last_warned_pid
= ldc
.pid
;
5840 return BOOTSTRAP_NOT_PRIVILEGED
;
5842 if (machservice_active(ms
)) {
5843 job_log(j
, LOG_WARNING
, "Check-in of Mach service failed. Already active: %s", servicename
);
5844 return BOOTSTRAP_SERVICE_ACTIVE
;
5847 machservice_request_notifications(ms
);
5849 job_log(j
, LOG_INFO
, "Check-in of service: %s", servicename
);
5851 *serviceportp
= machservice_port(ms
);
5852 return BOOTSTRAP_SUCCESS
;
5856 job_mig_register2(job_t j
, name_t servicename
, mach_port_t serviceport
, uint64_t flags
)
5858 struct machservice
*ms
;
5861 if (!launchd_assumes(j
!= NULL
)) {
5862 return BOOTSTRAP_NO_MEMORY
;
5865 runtime_get_caller_creds(&ldc
);
5868 job_log(j
, LOG_APPLEONLY
, "bootstrap_register() is deprecated. Service: %s", servicename
);
5871 job_log(j
, LOG_DEBUG
, "%sMach service registration attempt: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
5873 /* 5641783 for the embedded hack */
5874 #if !TARGET_OS_EMBEDDED
5876 * From a per-user/session launchd's perspective, SecurityAgent (UID
5877 * 92) is a rogue application (not our UID, not root and not a child of
5878 * us). We'll have to reconcile this design friction at a later date.
5880 if (j
->anonymous
&& job_get_bs(j
)->parentmgr
== NULL
&& ldc
.uid
!= 0 && ldc
.uid
!= getuid() && ldc
.uid
!= 92) {
5881 if (getpid() == 1) {
5882 return VPROC_ERR_TRY_PER_USER
;
5884 return BOOTSTRAP_NOT_PRIVILEGED
;
5889 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, flags
& BOOTSTRAP_PER_PID_SERVICE
? ldc
.pid
: 0);
5892 if (machservice_job(ms
) != j
) {
5893 return BOOTSTRAP_NOT_PRIVILEGED
;
5895 if (machservice_active(ms
)) {
5896 job_log(j
, LOG_DEBUG
, "Mach service registration failed. Already active: %s", servicename
);
5897 return BOOTSTRAP_SERVICE_ACTIVE
;
5900 machservice_delete(j
, ms
, false);
5903 if (serviceport
!= MACH_PORT_NULL
) {
5904 if ((ms
= machservice_new(j
, servicename
, &serviceport
, flags
& BOOTSTRAP_PER_PID_SERVICE
? true : false))) {
5905 machservice_request_notifications(ms
);
5907 return BOOTSTRAP_NO_MEMORY
;
5911 return BOOTSTRAP_SUCCESS
;
5915 job_mig_look_up2(job_t j
, name_t servicename
, mach_port_t
*serviceportp
, mach_msg_type_name_t
*ptype
, pid_t target_pid
, uint64_t flags
)
5917 struct machservice
*ms
;
5921 if (!launchd_assumes(j
!= NULL
)) {
5922 return BOOTSTRAP_NO_MEMORY
;
5925 runtime_get_caller_creds(&ldc
);
5927 /* 5641783 for the embedded hack */
5928 #if !TARGET_OS_EMBEDDED
5929 if (getpid() == 1 && j
->anonymous
&& job_get_bs(j
)->parentmgr
== NULL
&& ldc
.uid
!= 0 && ldc
.euid
!= 0) {
5930 return VPROC_ERR_TRY_PER_USER
;
5934 if (!mspolicy_check(j
, servicename
, flags
& BOOTSTRAP_PER_PID_SERVICE
)) {
5935 job_log(j
, LOG_NOTICE
, "Policy denied Mach service lookup: %s", servicename
);
5936 return BOOTSTRAP_NOT_PRIVILEGED
;
5939 if (flags
& BOOTSTRAP_PER_PID_SERVICE
) {
5940 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, target_pid
);
5942 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
5945 if (ms
&& machservice_hidden(ms
) && !job_active(machservice_job(ms
))) {
5947 } else if (ms
&& ms
->per_user_hack
) {
5952 launchd_assumes(machservice_port(ms
) != MACH_PORT_NULL
);
5953 job_log(j
, LOG_DEBUG
, "%sMach service lookup: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
5955 /* After Leopard ships, we should enable this */
5956 if (j
->lastlookup
== ms
&& j
->lastlookup_gennum
== ms
->gen_num
&& !j
->per_user
) {
5958 job_log(j
, LOG_APPLEONLY
, "Performance opportunity: Number of bootstrap_lookup(... \"%s\" ...) calls that should have been cached: %llu",
5959 servicename
, ms
->bad_perf_cnt
);
5962 j
->lastlookup_gennum
= ms
->gen_num
;
5964 *serviceportp
= machservice_port(ms
);
5965 *ptype
= MACH_MSG_TYPE_COPY_SEND
;
5966 kr
= BOOTSTRAP_SUCCESS
;
5967 } else if (!(flags
& BOOTSTRAP_PER_PID_SERVICE
) && (inherited_bootstrap_port
!= MACH_PORT_NULL
)) {
5968 job_log(j
, LOG_DEBUG
, "Mach service lookup forwarded: %s", servicename
);
5969 *ptype
= MACH_MSG_TYPE_MOVE_SEND
;
5970 kr
= bootstrap_look_up(inherited_bootstrap_port
, servicename
, serviceportp
);
5971 } else if (getpid() == 1 && j
->anonymous
&& ldc
.euid
>= 500 && strcasecmp(job_get_bs(j
)->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
5973 * 5240036 Should start background session when a lookup of CCacheServer occurs
5975 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
5976 * If we find a EUID that isn't root, we force it over to the per-user context.
5978 return VPROC_ERR_TRY_PER_USER
;
5980 job_log(j
, LOG_DEBUG
, "%sMach service lookup failed: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
5981 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
5988 job_mig_parent(job_t j
, mach_port_t
*parentport
, mach_msg_type_name_t
*pptype
)
5990 if (!launchd_assumes(j
!= NULL
)) {
5991 return BOOTSTRAP_NO_MEMORY
;
5994 job_log(j
, LOG_DEBUG
, "Requested parent bootstrap port");
5995 jobmgr_t jm
= j
->mgr
;
5997 *pptype
= MACH_MSG_TYPE_MAKE_SEND
;
5999 if (jobmgr_parent(jm
)) {
6000 *parentport
= jobmgr_parent(jm
)->jm_port
;
6001 } else if (MACH_PORT_NULL
== inherited_bootstrap_port
) {
6002 *parentport
= jm
->jm_port
;
6004 *pptype
= MACH_MSG_TYPE_COPY_SEND
;
6005 *parentport
= inherited_bootstrap_port
;
6007 return BOOTSTRAP_SUCCESS
;
6011 job_mig_info(job_t j
, name_array_t
*servicenamesp
, unsigned int *servicenames_cnt
,
6012 bootstrap_status_array_t
*serviceactivesp
, unsigned int *serviceactives_cnt
)
6014 name_array_t service_names
= NULL
;
6015 bootstrap_status_array_t service_actives
= NULL
;
6016 unsigned int cnt
= 0, cnt2
= 0;
6017 struct machservice
*ms
;
6021 if (!launchd_assumes(j
!= NULL
)) {
6022 return BOOTSTRAP_NO_MEMORY
;
6027 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
6028 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6039 mig_allocate((vm_address_t
*)&service_names
, cnt
* sizeof(service_names
[0]));
6040 if (!launchd_assumes(service_names
!= NULL
)) {
6044 mig_allocate((vm_address_t
*)&service_actives
, cnt
* sizeof(service_actives
[0]));
6045 if (!launchd_assumes(service_actives
!= NULL
)) {
6049 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
6050 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6052 strlcpy(service_names
[cnt2
], machservice_name(ms
), sizeof(service_names
[0]));
6053 service_actives
[cnt2
] = machservice_status(ms
);
6059 launchd_assumes(cnt
== cnt2
);
6062 *servicenamesp
= service_names
;
6063 *serviceactivesp
= service_actives
;
6064 *servicenames_cnt
= *serviceactives_cnt
= cnt
;
6066 return BOOTSTRAP_SUCCESS
;
6069 if (service_names
) {
6070 mig_deallocate((vm_address_t
)service_names
, cnt
* sizeof(service_names
[0]));
6072 if (service_actives
) {
6073 mig_deallocate((vm_address_t
)service_actives
, cnt
* sizeof(service_actives
[0]));
6076 return BOOTSTRAP_NO_MEMORY
;
6080 job_reparent_hack(job_t j
, const char *where
)
6084 ensure_root_bkgd_setup();
6086 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
6087 if (where
== NULL
) {
6088 if (strcasecmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
6089 where
= VPROCMGR_SESSION_LOGINWINDOW
;
6091 where
= VPROCMGR_SESSION_AQUA
;
6095 if (strcasecmp(j
->mgr
->name
, where
) == 0) {
6099 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
6100 if (jmi
->shutting_down
) {
6102 } else if (strcasecmp(jmi
->name
, where
) == 0) {
6104 } else if (strcasecmp(jmi
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0 && getpid() == 1) {
6105 SLIST_FOREACH(jmi2
, &jmi
->submgrs
, sle
) {
6106 if (strcasecmp(jmi2
->name
, where
) == 0) {
6115 if (job_assumes(j
, jmi
!= NULL
)) {
6116 struct machservice
*msi
;
6118 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
6119 LIST_REMOVE(msi
, name_hash_sle
);
6122 LIST_REMOVE(j
, sle
);
6123 LIST_INSERT_HEAD(&jmi
->jobs
, j
, sle
);
6126 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
6127 LIST_INSERT_HEAD(&j
->mgr
->ms_hash
[hash_ms(msi
->name
)], msi
, name_hash_sle
);
6133 job_mig_move_subset(job_t j
, mach_port_t target_subset
, name_t session_type
)
6135 mach_msg_type_number_t l2l_i
, l2l_port_cnt
= 0;
6136 mach_port_array_t l2l_ports
= NULL
;
6137 mach_port_t reqport
, rcvright
;
6138 kern_return_t kr
= 1;
6139 launch_data_t out_obj_array
= NULL
;
6141 jobmgr_t jmr
= NULL
;
6143 if (!launchd_assumes(j
!= NULL
)) {
6144 return BOOTSTRAP_NO_MEMORY
;
6147 runtime_get_caller_creds(&ldc
);
6149 if (target_subset
== MACH_PORT_NULL
) {
6152 if (j
->mgr
->session_initialized
) {
6153 if (ldc
.uid
== 0 && getpid() == 1) {
6154 if (strcmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
6157 LIST_FOREACH_SAFE(ji
, &j
->mgr
->jobs
, sle
, jn
) {
6158 if (!ji
->anonymous
) {
6163 ensure_root_bkgd_setup();
6165 SLIST_REMOVE(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, jobmgr_s
, sle
);
6166 j
->mgr
->parentmgr
= background_jobmgr
;
6167 SLIST_INSERT_HEAD(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, sle
);
6170 * We really should wait for all the jobs to die before proceeding. See 5351245 for more info.
6172 * We have hacked around this in job_find() by ignoring jobs that are pending removal.
6175 } else if (strcmp(j
->mgr
->name
, VPROCMGR_SESSION_AQUA
) == 0) {
6176 job_log(j
, LOG_DEBUG
, "Tried to move the Aqua session.");
6178 } else if (strcmp(j
->mgr
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0) {
6179 job_log(j
, LOG_DEBUG
, "Tried to move the background session.");
6182 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
6183 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
6187 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
6188 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
6191 } else if (ldc
.uid
== 0 && getpid() == 1 && strcmp(session_type
, VPROCMGR_SESSION_STANDARDIO
) == 0) {
6192 ensure_root_bkgd_setup();
6194 SLIST_REMOVE(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, jobmgr_s
, sle
);
6195 j
->mgr
->parentmgr
= background_jobmgr
;
6196 SLIST_INSERT_HEAD(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, sle
);
6197 } else if (strcmp(session_type
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
6203 * We're working around LoginWindow and the WindowServer.
6205 * In practice, there is only one LoginWindow session. Unfortunately, for certain
6206 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
6207 * spawns a replacement loginwindow session before cleaning up the previous one.
6209 * We're going to use the creation of a new LoginWindow context as a clue that the
6210 * previous LoginWindow context is on the way out and therefore we should just
6211 * kick-start the shutdown of it.
6214 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
6215 if (jmi
->shutting_down
) {
6217 } else if (strcasecmp(jmi
->name
, session_type
) == 0) {
6218 jobmgr_shutdown(jmi
);
6224 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Renaming to: %s", session_type
);
6225 strcpy(j
->mgr
->name_init
, session_type
);
6227 if (job_assumes(j
, (j2
= jobmgr_init_session(j
->mgr
, session_type
, false)))) {
6228 job_assumes(j
, job_dispatch(j2
, true));
6233 } else if (job_mig_intran2(root_jobmgr
, target_subset
, ldc
.pid
)) {
6234 job_log(j
, LOG_ERR
, "Moving a session to ourself is bogus.");
6236 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
6240 job_log(j
, LOG_DEBUG
, "Move subset attempt: 0x%x", target_subset
);
6242 errno
= kr
= _vproc_grab_subset(target_subset
, &reqport
, &rcvright
, &out_obj_array
, &l2l_ports
, &l2l_port_cnt
);
6244 if (!job_assumes(j
, kr
== 0)) {
6248 launchd_assert(launch_data_array_get_count(out_obj_array
) == l2l_port_cnt
);
6250 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, reqport
, rcvright
, false, session_type
)) != NULL
)) {
6251 kr
= BOOTSTRAP_NO_MEMORY
;
6255 for (l2l_i
= 0; l2l_i
< l2l_port_cnt
; l2l_i
++) {
6256 launch_data_t tmp
, obj_at_idx
;
6257 struct machservice
*ms
;
6258 job_t j_for_service
;
6259 const char *serv_name
;
6263 job_assumes(j
, obj_at_idx
= launch_data_array_get_index(out_obj_array
, l2l_i
));
6264 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PID
));
6265 target_pid
= (pid_t
)launch_data_get_integer(tmp
);
6266 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PERPID
));
6267 serv_perpid
= launch_data_get_bool(tmp
);
6268 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_NAME
));
6269 serv_name
= launch_data_get_string(tmp
);
6271 j_for_service
= jobmgr_find_by_pid(jmr
, target_pid
, true);
6273 if (!j_for_service
) {
6274 /* The PID probably exited */
6275 job_assumes(j
, launchd_mport_deallocate(l2l_ports
[l2l_i
]) == KERN_SUCCESS
);
6279 if ((ms
= machservice_new(j_for_service
, serv_name
, &l2l_ports
[l2l_i
], serv_perpid
))) {
6280 machservice_request_notifications(ms
);
6287 if (out_obj_array
) {
6288 launch_data_free(out_obj_array
);
6292 mig_deallocate((vm_address_t
)l2l_ports
, l2l_port_cnt
* sizeof(l2l_ports
[0]));
6296 if (target_subset
) {
6297 job_assumes(j
, launchd_mport_deallocate(target_subset
) == KERN_SUCCESS
);
6300 jobmgr_shutdown(jmr
);
6307 job_mig_take_subset(job_t j
, mach_port_t
*reqport
, mach_port_t
*rcvright
,
6308 vm_offset_t
*outdata
, mach_msg_type_number_t
*outdataCnt
,
6309 mach_port_array_t
*portsp
, unsigned int *ports_cnt
)
6311 launch_data_t tmp_obj
, tmp_dict
, outdata_obj_array
= NULL
;
6312 mach_port_array_t ports
= NULL
;
6313 unsigned int cnt
= 0, cnt2
= 0;
6315 struct machservice
*ms
;
6319 if (!launchd_assumes(j
!= NULL
)) {
6320 return BOOTSTRAP_NO_MEMORY
;
6325 if (getpid() != 1) {
6326 job_log(j
, LOG_ERR
, "Only the system launchd will transfer Mach sub-bootstraps.");
6327 return BOOTSTRAP_NOT_PRIVILEGED
;
6328 } else if (jobmgr_parent(jm
) == NULL
) {
6329 job_log(j
, LOG_ERR
, "Root Mach bootstrap cannot be transferred.");
6330 return BOOTSTRAP_NOT_PRIVILEGED
;
6331 } else if (strcasecmp(jm
->name
, VPROCMGR_SESSION_AQUA
) == 0) {
6332 job_log(j
, LOG_ERR
, "Cannot transfer a setup GUI session.");
6333 return BOOTSTRAP_NOT_PRIVILEGED
;
6334 } else if (!j
->anonymous
) {
6335 job_log(j
, LOG_ERR
, "Only the anonymous job can transfer Mach sub-bootstraps.");
6336 return BOOTSTRAP_NOT_PRIVILEGED
;
6339 job_log(j
, LOG_DEBUG
, "Transferring sub-bootstrap to the per session launchd.");
6341 outdata_obj_array
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
6342 if (!job_assumes(j
, outdata_obj_array
)) {
6346 *outdataCnt
= 20 * 1024 * 1024;
6347 mig_allocate(outdata
, *outdataCnt
);
6348 if (!job_assumes(j
, *outdata
!= 0)) {
6352 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
6353 if (!ji
->anonymous
) {
6356 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6361 mig_allocate((vm_address_t
*)&ports
, cnt
* sizeof(ports
[0]));
6362 if (!launchd_assumes(ports
!= NULL
)) {
6366 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
6367 if (!ji
->anonymous
) {
6371 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6372 if (job_assumes(j
, (tmp_dict
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
6373 job_assumes(j
, launch_data_array_set_index(outdata_obj_array
, tmp_dict
, cnt2
));
6378 if (job_assumes(j
, (tmp_obj
= launch_data_new_string(machservice_name(ms
))))) {
6379 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_NAME
));
6384 if (job_assumes(j
, (tmp_obj
= launch_data_new_integer((ms
->job
->p
))))) {
6385 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PID
));
6390 if (job_assumes(j
, (tmp_obj
= launch_data_new_bool((ms
->per_pid
))))) {
6391 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PERPID
));
6396 ports
[cnt2
] = machservice_port(ms
);
6398 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
6399 jobmgr_assumes(jm
, (errno
= mach_port_mod_refs(mach_task_self(), ports
[cnt2
], MACH_PORT_RIGHT_SEND
, 1)) == 0);
6404 launchd_assumes(cnt
== cnt2
);
6406 packed_size
= launch_data_pack(outdata_obj_array
, (void *)*outdata
, *outdataCnt
, NULL
, NULL
);
6407 if (!job_assumes(j
, packed_size
!= 0)) {
6411 launch_data_free(outdata_obj_array
);
6416 *reqport
= jm
->req_port
;
6417 *rcvright
= jm
->jm_port
;
6422 workaround_5477111
= j
;
6424 jobmgr_shutdown(jm
);
6426 return BOOTSTRAP_SUCCESS
;
6429 if (outdata_obj_array
) {
6430 launch_data_free(outdata_obj_array
);
6433 mig_deallocate(*outdata
, *outdataCnt
);
6436 mig_deallocate((vm_address_t
)ports
, cnt
* sizeof(ports
[0]));
6439 return BOOTSTRAP_NO_MEMORY
;
6443 job_mig_subset(job_t j
, mach_port_t requestorport
, mach_port_t
*subsetportp
)
6448 if (!launchd_assumes(j
!= NULL
)) {
6449 return BOOTSTRAP_NO_MEMORY
;
6454 while ((jmr
= jobmgr_parent(jmr
)) != NULL
) {
6458 /* Since we use recursion, we need an artificial depth for subsets */
6459 if (bsdepth
> 100) {
6460 job_log(j
, LOG_ERR
, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth
);
6461 return BOOTSTRAP_NO_MEMORY
;
6464 if ((jmr
= jobmgr_new(j
->mgr
, requestorport
, MACH_PORT_NULL
, false, NULL
)) == NULL
) {
6465 if (requestorport
== MACH_PORT_NULL
) {
6466 return BOOTSTRAP_NOT_PRIVILEGED
;
6468 return BOOTSTRAP_NO_MEMORY
;
6471 *subsetportp
= jmr
->jm_port
;
6472 return BOOTSTRAP_SUCCESS
;
6476 job_mig_create_service(job_t j
, name_t servicename
, mach_port_t
*serviceportp
)
6478 struct machservice
*ms
;
6480 if (!launchd_assumes(j
!= NULL
)) {
6481 return BOOTSTRAP_NO_MEMORY
;
6484 if (job_prog(j
)[0] == '\0') {
6485 job_log(j
, LOG_ERR
, "Mach service creation requires a target server: %s", servicename
);
6486 return BOOTSTRAP_NOT_PRIVILEGED
;
6489 if (!j
->legacy_mach_job
) {
6490 job_log(j
, LOG_ERR
, "bootstrap_create_service() is only allowed against legacy Mach jobs: %s", servicename
);
6491 return BOOTSTRAP_NOT_PRIVILEGED
;
6494 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, 0);
6496 job_log(j
, LOG_DEBUG
, "Mach service creation attempt for failed. Already exists: %s", servicename
);
6497 return BOOTSTRAP_NAME_IN_USE
;
6502 *serviceportp
= MACH_PORT_NULL
;
6503 ms
= machservice_new(j
, servicename
, serviceportp
, false);
6505 if (!launchd_assumes(ms
!= NULL
)) {
6509 return BOOTSTRAP_SUCCESS
;
6512 launchd_assumes(launchd_mport_close_recv(*serviceportp
) == KERN_SUCCESS
);
6513 return BOOTSTRAP_NO_MEMORY
;
6517 job_mig_embedded_wait(job_t j
, name_t targetlabel
, integer_t
*waitstatus
)
6521 if (!launchd_assumes(j
!= NULL
)) {
6522 return BOOTSTRAP_NO_MEMORY
;
6525 if (unlikely(!(otherj
= job_find(targetlabel
)))) {
6526 return BOOTSTRAP_UNKNOWN_SERVICE
;
6529 *waitstatus
= j
->last_exit_status
;
6535 job_mig_embedded_kickstart(job_t j
, name_t targetlabel
, pid_t
*out_pid
, mach_port_t
*out_name_port
)
6541 if (!launchd_assumes(j
!= NULL
)) {
6542 return BOOTSTRAP_NO_MEMORY
;
6545 if (unlikely(!(otherj
= job_find(targetlabel
)))) {
6546 return BOOTSTRAP_UNKNOWN_SERVICE
;
6549 runtime_get_caller_creds(&ldc
);
6551 if (ldc
.euid
!= 0 && ldc
.euid
!= geteuid()
6552 #if TARGET_OS_EMBEDDED
6553 && j
->username
&& otherj
->username
6554 && strcmp(j
->username
, otherj
->username
) != 0
6557 return BOOTSTRAP_NOT_PRIVILEGED
;
6560 otherj
= job_dispatch(otherj
, true);
6562 if (!job_assumes(j
, otherj
&& otherj
->p
)) {
6563 return BOOTSTRAP_NO_MEMORY
;
6566 kr
= task_name_for_pid(mach_task_self(), otherj
->p
, out_name_port
);
6567 if (!job_assumes(j
, kr
== 0)) {
6571 *out_pid
= otherj
->p
;
6577 job_mig_wait(job_t j
, mach_port_t srp
, integer_t
*waitstatus
)
6579 if (!launchd_assumes(j
!= NULL
)) {
6580 return BOOTSTRAP_NO_MEMORY
;
6584 runtime_get_caller_creds(&ldc
);
6586 return job_handle_mpm_wait(j
, srp
, waitstatus
);
6590 job_mig_uncork_fork(job_t j
)
6592 if (!launchd_assumes(j
!= NULL
)) {
6593 return BOOTSTRAP_NO_MEMORY
;
6596 if (!j
->stall_before_exec
) {
6597 job_log(j
, LOG_WARNING
, "Attempt to uncork a job that isn't in the middle of a fork().");
6602 j
->stall_before_exec
= false;
6607 job_mig_set_service_policy(job_t j
, pid_t target_pid
, uint64_t flags
, name_t target_service
)
6611 if (!launchd_assumes(j
!= NULL
)) {
6612 return BOOTSTRAP_NO_MEMORY
;
6615 if (!job_assumes(j
, (target_j
= jobmgr_find_by_pid(j
->mgr
, target_pid
, true)) != NULL
)) {
6616 return BOOTSTRAP_NO_MEMORY
;
6619 if (SLIST_EMPTY(&j
->mspolicies
)) {
6620 job_log(j
, LOG_DEBUG
, "Setting policy on job \"%s\" for Mach service: %s", target_j
->label
, target_service
);
6621 if (target_service
[0]) {
6622 job_assumes(j
, mspolicy_new(target_j
, target_service
, flags
& BOOTSTRAP_ALLOW_LOOKUP
, flags
& BOOTSTRAP_PER_PID_SERVICE
, false));
6624 target_j
->deny_unknown_mslookups
= !(flags
& BOOTSTRAP_ALLOW_LOOKUP
);
6625 target_j
->deny_job_creation
= (bool)(flags
& BOOTSTRAP_DENY_JOB_CREATION
);
6628 job_log(j
, LOG_WARNING
, "Jobs that have policies assigned to them may not set policies.");
6629 return BOOTSTRAP_NOT_PRIVILEGED
;
6636 job_mig_spawn(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, pid_t
*child_pid
, mach_port_t
*obsvr_port
)
6638 launch_data_t input_obj
= NULL
;
6639 size_t data_offset
= 0;
6643 runtime_get_caller_creds(&ldc
);
6645 if (!launchd_assumes(j
!= NULL
)) {
6646 return BOOTSTRAP_NO_MEMORY
;
6649 if (unlikely(j
->deny_job_creation
)) {
6650 return BOOTSTRAP_NOT_PRIVILEGED
;
6653 if (getpid() == 1 && ldc
.euid
&& ldc
.uid
) {
6654 job_log(j
, LOG_DEBUG
, "Punting spawn to per-user-context");
6655 return VPROC_ERR_TRY_PER_USER
;
6658 if (!job_assumes(j
, indataCnt
!= 0)) {
6662 if (!job_assumes(j
, (input_obj
= launch_data_unpack((void *)indata
, indataCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
6666 jr
= jobmgr_import2(j
->mgr
, input_obj
);
6668 if (!job_assumes(j
, jr
!= NULL
)) {
6671 return BOOTSTRAP_NAME_IN_USE
;
6673 return BOOTSTRAP_NO_MEMORY
;
6677 job_reparent_hack(jr
, NULL
);
6679 if (getpid() == 1) {
6680 jr
->mach_uid
= ldc
.uid
;
6683 jr
->unload_at_exit
= true;
6684 jr
->wait4pipe_eof
= true;
6685 jr
->abandon_pg
= true;
6686 jr
->stall_before_exec
= jr
->wait4debugger
;
6687 jr
->wait4debugger
= false;
6689 jr
= job_dispatch(jr
, true);
6691 if (!job_assumes(j
, jr
!= NULL
)) {
6692 return BOOTSTRAP_NO_MEMORY
;
6695 if (!job_assumes(jr
, jr
->p
)) {
6697 return BOOTSTRAP_NO_MEMORY
;
6700 if (!job_setup_machport(jr
)) {
6702 return BOOTSTRAP_NO_MEMORY
;
6705 job_log(jr
, LOG_DEBUG
, "Spawned by PID %u: %s", j
->p
, j
->label
);
6708 *obsvr_port
= jr
->j_port
;
6710 mig_deallocate(indata
, indataCnt
);
6712 return BOOTSTRAP_SUCCESS
;
6716 jobmgr_init(bool sflag
)
6718 const char *root_session_type
= getpid() == 1 ? VPROCMGR_SESSION_SYSTEM
: VPROCMGR_SESSION_BACKGROUND
;
6720 launchd_assert((root_jobmgr
= jobmgr_new(NULL
, MACH_PORT_NULL
, MACH_PORT_NULL
, sflag
, root_session_type
)) != NULL
);
6724 our_strhash(const char *s
)
6729 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
6732 while ((c
= *s
++)) {
6733 r
= ((r
<< 5) + r
) + c
; /* hash*33 + c */
6740 hash_label(const char *label
)
6742 return our_strhash(label
) % LABEL_HASH_SIZE
;
6746 hash_ms(const char *msstr
)
6748 return our_strhash(msstr
) % MACHSERVICE_HASH_SIZE
;
6752 mspolicy_copy(job_t j_to
, job_t j_from
)
6754 struct mspolicy
*msp
;
6756 SLIST_FOREACH(msp
, &j_from
->mspolicies
, sle
) {
6757 if (!mspolicy_new(j_to
, msp
->name
, msp
->allow
, msp
->per_pid
, true)) {
6766 mspolicy_new(job_t j
, const char *name
, bool allow
, bool pid_local
, bool skip_check
)
6768 struct mspolicy
*msp
;
6770 if (!skip_check
) SLIST_FOREACH(msp
, &j
->mspolicies
, sle
) {
6771 if (msp
->per_pid
!= pid_local
) {
6773 } else if (strcmp(msp
->name
, name
) == 0) {
6778 if ((msp
= calloc(1, sizeof(struct mspolicy
) + strlen(name
) + 1)) == NULL
) {
6782 strcpy((char *)msp
->name
, name
);
6783 msp
->per_pid
= pid_local
;
6786 SLIST_INSERT_HEAD(&j
->mspolicies
, msp
, sle
);
6792 mspolicy_setup(launch_data_t obj
, const char *key
, void *context
)
6796 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
6797 job_log(j
, LOG_WARNING
, "Invalid object type for Mach service policy key: %s", key
);
6801 job_assumes(j
, mspolicy_new(j
, key
, launch_data_get_bool(obj
), false, false));
6805 mspolicy_check(job_t j
, const char *name
, bool pid_local
)
6807 struct mspolicy
*mspi
;
6809 SLIST_FOREACH(mspi
, &j
->mspolicies
, sle
) {
6810 if (mspi
->per_pid
!= pid_local
) {
6812 } else if (strcmp(mspi
->name
, name
) != 0) {
6818 return !j
->deny_unknown_mslookups
;
6822 mspolicy_delete(job_t j
, struct mspolicy
*msp
)
6824 SLIST_REMOVE(&j
->mspolicies
, msp
, mspolicy
, sle
);
6830 waiting4removal_new(job_t j
, mach_port_t rp
)
6832 struct waiting_for_removal
*w4r
;
6834 if (!job_assumes(j
, (w4r
= malloc(sizeof(struct waiting_for_removal
))) != NULL
)) {
6838 w4r
->reply_port
= rp
;
6840 SLIST_INSERT_HEAD(&j
->removal_watchers
, w4r
, sle
);
6846 waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
)
6848 job_assumes(j
, job_mig_send_signal_reply(w4r
->reply_port
, 0) == 0);
6850 SLIST_REMOVE(&j
->removal_watchers
, w4r
, waiting_for_removal
, sle
);
6856 get_kern_max_proc(void)
6858 int mib
[] = { CTL_KERN
, KERN_MAXPROC
};
6860 size_t max_sz
= sizeof(max
);
6862 launchd_assumes(sysctl(mib
, 2, &max
, &max_sz
, NULL
, 0) != -1);
6872 launchd_assert(mach_timebase_info(&tbi
) == 0);
6874 if (stat("/AppleInternal", &sb
) == 0) {
6875 do_apple_internal_magic
= true;