2 * @APPLE_APACHE_LICENSE_HEADER_START@
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 * @APPLE_APACHE_LICENSE_HEADER_END@
19 static const char *const __rcs_file_version__
= "$Revision: 23923 $";
22 #include "launchd_core_logic.h"
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/mach_time.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/exception.h>
37 #include <mach/host_reboot.h>
38 #include <sys/types.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
42 #include <sys/ucred.h>
43 #include <sys/fcntl.h>
45 #include <sys/reboot.h>
47 #include <sys/sysctl.h>
48 #include <sys/sockio.h>
50 #include <sys/resource.h>
51 #include <sys/ioctl.h>
52 #include <sys/mount.h>
55 #include <netinet/in.h>
56 #include <netinet/in_var.h>
57 #include <netinet6/nd6.h>
58 #include <bsm/libbsm.h>
81 #include <quarantine.h>
85 #include "launch_priv.h"
86 #include "launch_internal.h"
87 #include "bootstrap.h"
88 #include "bootstrap_priv.h"
90 #include "vproc_internal.h"
95 #include "launchd_runtime.h"
96 #include "launchd_unix_ipc.h"
97 #include "protocol_vproc.h"
98 #include "protocol_vprocServer.h"
99 #include "protocol_job_reply.h"
101 #define LAUNCHD_MIN_JOB_RUN_TIME 10
102 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
103 #define LAUNCHD_SIGKILL_TIMER 5
106 #define TAKE_SUBSET_NAME "TakeSubsetName"
107 #define TAKE_SUBSET_PID "TakeSubsetPID"
108 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
110 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
112 extern char **environ
;
114 struct waiting_for_removal
{
115 SLIST_ENTRY(waiting_for_removal
) sle
;
116 mach_port_t reply_port
;
119 static bool waiting4removal_new(job_t j
, mach_port_t rp
);
120 static void waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
);
123 SLIST_ENTRY(mspolicy
) sle
;
124 unsigned int allow
:1, per_pid
:1;
128 static bool mspolicy_new(job_t j
, const char *name
, bool allow
, bool pid_local
, bool skip_check
);
129 static bool mspolicy_copy(job_t j_to
, job_t j_from
);
130 static void mspolicy_setup(launch_data_t obj
, const char *key
, void *context
);
131 static bool mspolicy_check(job_t j
, const char *name
, bool pid_local
);
132 static void mspolicy_delete(job_t j
, struct mspolicy
*msp
);
135 SLIST_ENTRY(machservice
) sle
;
136 SLIST_ENTRY(machservice
) special_port_sle
;
137 LIST_ENTRY(machservice
) name_hash_sle
;
138 LIST_ENTRY(machservice
) port_hash_sle
;
140 uint64_t bad_perf_cnt
;
141 unsigned int gen_num
;
142 mach_port_name_t port
;
143 unsigned int isActive
:1, reset
:1, recv
:1, hide
:1, kUNCServer
:1, per_user_hack
:1, debug_on_close
:1, per_pid
:1, special_port_num
:10;
147 static SLIST_HEAD(, machservice
) special_ports
; /* hack, this should be per jobmgr_t */
149 #define PORT_HASH_SIZE 32
150 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
152 static LIST_HEAD(, machservice
) port_hash
[PORT_HASH_SIZE
];
154 static void machservice_setup(launch_data_t obj
, const char *key
, void *context
);
155 static void machservice_setup_options(launch_data_t obj
, const char *key
, void *context
);
156 static void machservice_resetport(job_t j
, struct machservice
*ms
);
157 static struct machservice
*machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
);
158 static void machservice_ignore(job_t j
, struct machservice
*ms
);
159 static void machservice_watch(job_t j
, struct machservice
*ms
);
160 static void machservice_delete(job_t j
, struct machservice
*, bool port_died
);
161 static void machservice_request_notifications(struct machservice
*);
162 static mach_port_t
machservice_port(struct machservice
*);
163 static job_t
machservice_job(struct machservice
*);
164 static bool machservice_hidden(struct machservice
*);
165 static bool machservice_active(struct machservice
*);
166 static const char *machservice_name(struct machservice
*);
167 static bootstrap_status_t
machservice_status(struct machservice
*);
170 SLIST_ENTRY(socketgroup
) sle
;
172 unsigned int junkfds
:1, fd_cnt
:31;
179 static bool socketgroup_new(job_t j
, const char *name
, int *fds
, unsigned int fd_cnt
, bool junkfds
);
180 static void socketgroup_delete(job_t j
, struct socketgroup
*sg
);
181 static void socketgroup_watch(job_t j
, struct socketgroup
*sg
);
182 static void socketgroup_ignore(job_t j
, struct socketgroup
*sg
);
183 static void socketgroup_callback(job_t j
);
184 static void socketgroup_setup(launch_data_t obj
, const char *key
, void *context
);
185 static void socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
);
187 struct calendarinterval
{
188 LIST_ENTRY(calendarinterval
) global_sle
;
189 SLIST_ENTRY(calendarinterval
) sle
;
195 static LIST_HEAD(, calendarinterval
) sorted_calendar_events
;
197 static bool calendarinterval_new(job_t j
, struct tm
*w
);
198 static bool calendarinterval_new_from_obj(job_t j
, launch_data_t obj
);
199 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
);
200 static void calendarinterval_delete(job_t j
, struct calendarinterval
*ci
);
201 static void calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
);
202 static void calendarinterval_callback(void);
203 static void calendarinterval_sanity_check(void);
206 SLIST_ENTRY(envitem
) sle
;
214 static bool envitem_new(job_t j
, const char *k
, const char *v
, bool global
);
215 static void envitem_delete(job_t j
, struct envitem
*ei
, bool global
);
216 static void envitem_setup(launch_data_t obj
, const char *key
, void *context
);
219 SLIST_ENTRY(limititem
) sle
;
221 unsigned int setsoft
:1, sethard
:1, which
:30;
224 static bool limititem_update(job_t j
, int w
, rlim_t r
);
225 static void limititem_delete(job_t j
, struct limititem
*li
);
226 static void limititem_setup(launch_data_t obj
, const char *key
, void *context
);
228 static void seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
);
244 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
245 } semaphore_reason_t
;
247 struct semaphoreitem
{
248 SLIST_ENTRY(semaphoreitem
) sle
;
249 semaphore_reason_t why
;
257 struct semaphoreitem_dict_iter_context
{
259 semaphore_reason_t why_true
;
260 semaphore_reason_t why_false
;
263 static bool semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
);
264 static void semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
);
265 static void semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
);
266 static void semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
);
267 static void semaphoreitem_callback(job_t j
, struct kevent
*kev
);
268 static void semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
);
269 static void semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
);
270 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
);
272 #define ACTIVE_JOB_HASH_SIZE 32
273 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
274 #define MACHSERVICE_HASH_SIZE 37
277 kq_callback kqjobmgr_callback
;
278 SLIST_ENTRY(jobmgr_s
) sle
;
279 SLIST_HEAD(, jobmgr_s
) submgrs
;
280 LIST_HEAD(, job_s
) jobs
;
281 LIST_HEAD(, job_s
) active_jobs
[ACTIVE_JOB_HASH_SIZE
];
282 LIST_HEAD(, machservice
) ms_hash
[MACHSERVICE_HASH_SIZE
];
284 mach_port_t req_port
;
287 unsigned int global_on_demand_cnt
;
288 unsigned int hopefully_first_cnt
;
289 unsigned int normal_active_cnt
;
290 unsigned int sent_stop_to_normal_jobs
:1, sent_stop_to_hopefully_last_jobs
:1, shutting_down
:1, session_initialized
:1;
297 #define jobmgr_assumes(jm, e) \
298 (__builtin_expect(!(e), 0) ? jobmgr_log_bug(jm, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
300 static jobmgr_t
jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
);
301 static job_t
jobmgr_import2(jobmgr_t jm
, launch_data_t pload
);
302 static jobmgr_t
jobmgr_parent(jobmgr_t jm
);
303 static jobmgr_t
jobmgr_do_garbage_collection(jobmgr_t jm
);
304 static bool jobmgr_label_test(jobmgr_t jm
, const char *str
);
305 static void jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
);
306 static void jobmgr_log_stray_children(jobmgr_t jm
);
307 static void jobmgr_remove(jobmgr_t jm
);
308 static void jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
);
309 static job_t
jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
);
310 static job_t
jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
);
311 static job_t
job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
);
312 static void job_export_all2(jobmgr_t jm
, launch_data_t where
);
313 static void jobmgr_callback(void *obj
, struct kevent
*kev
);
314 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm
);
315 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
);
316 static struct machservice
*jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
);
317 static void jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
318 static void jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
319 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
320 static void jobmgr_log_bug(jobmgr_t jm
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
);
322 #define DO_RUSAGE_SUMMATION 0
324 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
327 kq_callback kqjob_callback
;
328 LIST_ENTRY(job_s
) sle
;
329 LIST_ENTRY(job_s
) pid_hash_sle
;
330 LIST_ENTRY(job_s
) label_hash_sle
;
331 SLIST_HEAD(, socketgroup
) sockets
;
332 SLIST_HEAD(, calendarinterval
) cal_intervals
;
333 SLIST_HEAD(, envitem
) global_env
;
334 SLIST_HEAD(, envitem
) env
;
335 SLIST_HEAD(, limititem
) limits
;
336 SLIST_HEAD(, mspolicy
) mspolicies
;
337 SLIST_HEAD(, machservice
) machservices
;
338 SLIST_HEAD(, semaphoreitem
) semaphores
;
339 SLIST_HEAD(, waiting_for_removal
) removal_watchers
;
340 #if DO_RUSAGE_SUMMATION
343 cpu_type_t
*j_binpref
;
344 size_t j_binpref_cnt
;
346 mach_port_t wait_reply_port
; /* we probably should switch to a list of waiters */
357 char *alt_exc_handler
;
358 struct machservice
*lastlookup
;
359 unsigned int lastlookup_gennum
;
361 char *seatbelt_profile
;
362 uint64_t seatbelt_flags
;
365 void *quarantine_data
;
366 size_t quarantine_data_sz
;
370 int last_exit_status
;
374 unsigned int timeout
;
375 unsigned int exit_timeout
;
377 uint64_t sent_sigterm_time
;
379 uint32_t min_run_time
;
380 uint32_t start_interval
;
381 unsigned int checkedin
:1, anonymous
:1, debug
:1, inetcompat
:1, inetcompat_wait
:1,
382 ondemand
:1, session_create
:1, low_pri_io
:1, no_init_groups
:1, priv_port_has_senders
:1,
383 importing_global_env
:1, importing_hard_limits
:1, setmask
:1, legacy_mach_job
:1, start_pending
:1;
385 unsigned int globargv
:1, wait4debugger
:1, unload_at_exit
:1, stall_before_exec
:1, only_once
:1,
386 currently_ignored
:1, forced_peers_to_demand_mode
:1, setnice
:1, hopefully_exits_last
:1, removal_pending
:1,
387 wait4pipe_eof
:1, sent_sigkill
:1, debug_before_kill
:1, weird_bootstrap
:1, start_on_mount
:1,
388 per_user
:1, hopefully_exits_first
:1, deny_unknown_mslookups
:1, unload_at_mig_return
:1, abandon_pg
:1,
389 poll_for_vfs_changes
:1, internal_exc_handler
:1, deny_job_creation
:1;
393 #define LABEL_HASH_SIZE 53
395 static LIST_HEAD(, job_s
) label_hash
[LABEL_HASH_SIZE
];
396 static size_t hash_label(const char *label
) __attribute__((pure
));
397 static size_t hash_ms(const char *msstr
) __attribute__((pure
));
400 #define job_assumes(j, e) \
401 (__builtin_expect(!(e), 0) ? job_log_bug(j, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
403 static void job_import_keys(launch_data_t obj
, const char *key
, void *context
);
404 static void job_import_bool(job_t j
, const char *key
, bool value
);
405 static void job_import_string(job_t j
, const char *key
, const char *value
);
406 static void job_import_integer(job_t j
, const char *key
, long long value
);
407 static void job_import_dictionary(job_t j
, const char *key
, launch_data_t value
);
408 static void job_import_array(job_t j
, const char *key
, launch_data_t value
);
409 static void job_import_opaque(job_t j
, const char *key
, launch_data_t value
);
410 static bool job_set_global_on_demand(job_t j
, bool val
);
411 static const char *job_active(job_t j
);
412 static void job_watch(job_t j
);
413 static void job_ignore(job_t j
);
414 static void job_reap(job_t j
);
415 static bool job_useless(job_t j
);
416 static bool job_keepalive(job_t j
);
417 static void job_start(job_t j
);
418 static void job_start_child(job_t j
) __attribute__((noreturn
));
419 static void job_setup_attributes(job_t j
);
420 static bool job_setup_machport(job_t j
);
421 static void job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
);
422 static void job_postfork_become_user(job_t j
);
423 #if !TARGET_OS_EMBEDDED
424 static void job_enable_audit_for_user(job_t j
, uid_t u
, char *name
);
426 static void job_find_and_blame_pids_with_weird_uids(job_t j
);
427 static void job_force_sampletool(job_t j
);
428 static void job_setup_exception_port(job_t j
, task_t target_task
);
429 static void job_reparent_hack(job_t j
, const char *where
);
430 static void job_callback(void *obj
, struct kevent
*kev
);
431 static void job_callback_proc(job_t j
, int flags
, int fflags
);
432 static void job_callback_timer(job_t j
, void *ident
);
433 static void job_callback_read(job_t j
, int ident
);
434 static void job_log_stray_pg(job_t j
);
435 static job_t
job_new_anonymous(jobmgr_t jm
, pid_t anonpid
);
436 static job_t
job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
);
437 static job_t
job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
);
438 static const char *job_prog(job_t j
);
439 static jobmgr_t
job_get_bs(job_t j
);
440 static void job_kill(job_t j
);
441 static void job_uncork_fork(job_t j
);
442 static void job_log_stdouterr(job_t j
);
443 static void job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
444 static void job_log_error(job_t j
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
445 static void job_log_bug(job_t j
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
);
446 static void job_log_stdouterr2(job_t j
, const char *msg
, ...);
447 static void job_set_exeception_port(job_t j
, mach_port_t port
);
448 static kern_return_t
job_handle_mpm_wait(job_t j
, mach_port_t srp
, int *waitstatus
);
452 static const struct {
455 } launchd_keys2limits
[] = {
456 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE
, RLIMIT_CORE
},
457 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU
, RLIMIT_CPU
},
458 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA
, RLIMIT_DATA
},
459 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE
, RLIMIT_FSIZE
},
460 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK
, RLIMIT_MEMLOCK
},
461 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE
, RLIMIT_NOFILE
},
462 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC
, RLIMIT_NPROC
},
463 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS
, RLIMIT_RSS
},
464 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK
, RLIMIT_STACK
},
467 static time_t cronemu(int mon
, int mday
, int hour
, int min
);
468 static time_t cronemu_wday(int wday
, int hour
, int min
);
469 static bool cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
);
470 static bool cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
);
471 static bool cronemu_hour(struct tm
*wtm
, int hour
, int min
);
472 static bool cronemu_min(struct tm
*wtm
, int min
);
474 /* miscellaneous file local functions */
475 static void ensure_root_bkgd_setup(void);
476 static int dir_has_files(job_t j
, const char *path
);
477 static char **mach_cmd2argv(const char *string
);
478 static size_t our_strhash(const char *s
) __attribute__((pure
));
479 static void extract_rcsid_substr(const char *i
, char *o
, size_t osz
);
480 static void do_first_per_user_launchd_hack(void);
481 static size_t get_kern_max_proc(void);
482 static void do_file_init(void) __attribute__((constructor
));
484 /* file local globals */
485 static bool do_apple_internal_magic
;
486 static size_t total_children
;
487 static size_t total_anon_children
;
488 static mach_port_t the_exception_server
;
489 static bool did_first_per_user_launchd_BootCache_hack
;
490 #define JOB_BOOTCACHE_HACK_CHECK(j) (j->per_user && !did_first_per_user_launchd_BootCache_hack && (j->mach_uid >= 500) && (j->mach_uid != (uid_t)-2))
491 static jobmgr_t background_jobmgr
;
492 static job_t workaround_5477111
;
493 static mach_timebase_info_data_t tbi
;
495 /* process wide globals */
496 mach_port_t inherited_bootstrap_port
;
497 jobmgr_t root_jobmgr
;
503 struct semaphoreitem
*si
;
504 struct socketgroup
*sg
;
505 struct machservice
*ms
;
507 if (j
->currently_ignored
) {
511 job_log(j
, LOG_DEBUG
, "Ignoring...");
513 j
->currently_ignored
= true;
515 if (j
->poll_for_vfs_changes
) {
516 j
->poll_for_vfs_changes
= false;
517 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
520 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
521 socketgroup_ignore(j
, sg
);
524 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
525 machservice_ignore(j
, ms
);
528 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
529 semaphoreitem_ignore(j
, si
);
536 struct semaphoreitem
*si
;
537 struct socketgroup
*sg
;
538 struct machservice
*ms
;
540 if (!j
->currently_ignored
) {
544 job_log(j
, LOG_DEBUG
, "Watching...");
546 j
->currently_ignored
= false;
548 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
549 socketgroup_watch(j
, sg
);
552 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
553 machservice_watch(j
, ms
);
556 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
557 semaphoreitem_watch(j
, si
);
564 if (!j
->p
|| j
->anonymous
) {
568 job_assumes(j
, runtime_kill(j
->p
, SIGTERM
) != -1);
569 j
->sent_sigterm_time
= mach_absolute_time();
571 if (j
->exit_timeout
) {
572 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
,
573 EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, j
->exit_timeout
, j
) != -1);
576 job_log(j
, LOG_DEBUG
, "Sent SIGTERM signal");
582 launch_data_t tmp
, tmp2
, tmp3
, r
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
588 if ((tmp
= launch_data_new_string(j
->label
))) {
589 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LABEL
);
591 if ((tmp
= launch_data_new_string(j
->mgr
->name
))) {
592 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
594 if ((tmp
= launch_data_new_bool(j
->ondemand
))) {
595 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ONDEMAND
);
597 if ((tmp
= launch_data_new_integer(j
->last_exit_status
))) {
598 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LASTEXITSTATUS
);
600 if (j
->p
&& (tmp
= launch_data_new_integer(j
->p
))) {
601 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PID
);
603 if ((tmp
= launch_data_new_integer(j
->timeout
))) {
604 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TIMEOUT
);
606 if (j
->prog
&& (tmp
= launch_data_new_string(j
->prog
))) {
607 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAM
);
609 if (j
->stdoutpath
&& (tmp
= launch_data_new_string(j
->stdoutpath
))) {
610 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDOUTPATH
);
612 if (j
->stderrpath
&& (tmp
= launch_data_new_string(j
->stderrpath
))) {
613 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDERRORPATH
);
615 if (j
->argv
&& (tmp
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
618 for (i
= 0; i
< j
->argc
; i
++) {
619 if ((tmp2
= launch_data_new_string(j
->argv
[i
]))) {
620 launch_data_array_set_index(tmp
, tmp2
, i
);
624 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
);
627 if (j
->session_create
&& (tmp
= launch_data_new_bool(true))) {
628 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SESSIONCREATE
);
631 if (j
->inetcompat
&& (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
632 if ((tmp2
= launch_data_new_bool(j
->inetcompat_wait
))) {
633 launch_data_dict_insert(tmp
, tmp2
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
);
635 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
);
638 if (!SLIST_EMPTY(&j
->sockets
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
639 struct socketgroup
*sg
;
642 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
646 if ((tmp2
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
647 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
648 if ((tmp3
= launch_data_new_fd(sg
->fds
[i
]))) {
649 launch_data_array_set_index(tmp2
, tmp3
, i
);
652 launch_data_dict_insert(tmp
, tmp2
, sg
->name
);
656 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SOCKETS
);
659 if (!SLIST_EMPTY(&j
->machservices
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
660 struct machservice
*ms
;
664 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
667 tmp3
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
670 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
671 launch_data_dict_insert(tmp3
, tmp2
, ms
->name
);
674 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
675 launch_data_dict_insert(tmp
, tmp2
, ms
->name
);
679 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_MACHSERVICES
);
682 launch_data_dict_insert(r
, tmp3
, LAUNCH_JOBKEY_PERJOBMACHSERVICES
);
690 jobmgr_log_active_jobs(jobmgr_t jm
)
692 const char *why_active
;
696 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
697 jobmgr_log_active_jobs(jmi
);
700 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
701 why_active
= job_active(ji
);
703 job_log(ji
, LOG_DEBUG
, "%s", why_active
? why_active
: "Inactive");
709 still_alive_with_check(void)
711 jobmgr_log(root_jobmgr
, LOG_NOTICE
, "Still alive with %lu/%lu children", total_children
, total_anon_children
);
713 jobmgr_log_active_jobs(root_jobmgr
);
715 runtime_closelog(); /* hack to flush logs */
719 jobmgr_shutdown(jobmgr_t jm
)
724 jobmgr_log(jm
, LOG_DEBUG
, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm
->reboot_flags
));
726 jm
->shutting_down
= true;
728 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
729 jobmgr_shutdown(jmi
);
732 if (jm
->hopefully_first_cnt
) {
733 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
734 if (ji
->p
&& ji
->hopefully_exits_first
) {
740 if (debug_shutdown_hangs
&& jm
->parentmgr
== NULL
&& getpid() == 1) {
741 runtime_set_timeout(still_alive_with_check
, 5);
744 return jobmgr_do_garbage_collection(jm
);
748 jobmgr_remove(jobmgr_t jm
)
753 jobmgr_log(jm
, LOG_DEBUG
, "Removed job manager");
755 if (!jobmgr_assumes(jm
, SLIST_EMPTY(&jm
->submgrs
))) {
756 while ((jmi
= SLIST_FIRST(&jm
->submgrs
))) {
761 while ((ji
= LIST_FIRST(&jm
->jobs
))) {
762 /* We should only have anonymous jobs left */
763 job_assumes(ji
, ji
->anonymous
);
768 jobmgr_assumes(jm
, launchd_mport_deallocate(jm
->req_port
) == KERN_SUCCESS
);
772 jobmgr_assumes(jm
, launchd_mport_close_recv(jm
->jm_port
) == KERN_SUCCESS
);
775 if (jm
== background_jobmgr
) {
776 background_jobmgr
= NULL
;
781 SLIST_REMOVE(&jm
->parentmgr
->submgrs
, jm
, jobmgr_s
, sle
);
782 } else if (getpid() == 1) {
783 jobmgr_log(jm
, LOG_DEBUG
, "About to call: reboot(%s)", reboot_flags_to_C_names(jm
->reboot_flags
));
785 jobmgr_assumes(jm
, reboot(jm
->reboot_flags
) != -1);
789 jobmgr_log(jm
, LOG_DEBUG
, "About to exit");
799 struct waiting_for_removal
*w4r
;
800 struct calendarinterval
*ci
;
801 struct semaphoreitem
*si
;
802 struct socketgroup
*sg
;
803 struct machservice
*ms
;
804 struct limititem
*li
;
805 struct mspolicy
*msp
;
808 if (j
->p
&& j
->anonymous
) {
811 job_log(j
, LOG_DEBUG
, "Removal pended until the job exits");
813 if (!j
->removal_pending
) {
814 j
->removal_pending
= true;
821 ipc_close_all_with_job(j
);
823 if (j
->forced_peers_to_demand_mode
) {
824 job_set_global_on_demand(j
, false);
827 if (!job_assumes(j
, j
->forkfd
== 0)) {
828 job_assumes(j
, runtime_close(j
->forkfd
) != -1);
831 if (!job_assumes(j
, j
->log_redirect_fd
== 0)) {
832 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
836 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
839 if (!job_assumes(j
, j
->wait_reply_port
== MACH_PORT_NULL
)) {
840 job_assumes(j
, launchd_mport_deallocate(j
->wait_reply_port
) == KERN_SUCCESS
);
843 while ((msp
= SLIST_FIRST(&j
->mspolicies
))) {
844 mspolicy_delete(j
, msp
);
846 while ((sg
= SLIST_FIRST(&j
->sockets
))) {
847 socketgroup_delete(j
, sg
);
849 while ((ci
= SLIST_FIRST(&j
->cal_intervals
))) {
850 calendarinterval_delete(j
, ci
);
852 while ((ei
= SLIST_FIRST(&j
->env
))) {
853 envitem_delete(j
, ei
, false);
855 while ((ei
= SLIST_FIRST(&j
->global_env
))) {
856 envitem_delete(j
, ei
, true);
858 while ((li
= SLIST_FIRST(&j
->limits
))) {
859 limititem_delete(j
, li
);
861 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
862 machservice_delete(j
, ms
, false);
864 while ((si
= SLIST_FIRST(&j
->semaphores
))) {
865 semaphoreitem_delete(j
, si
);
867 while ((w4r
= SLIST_FIRST(&j
->removal_watchers
))) {
868 waiting4removal_delete(j
, w4r
);
895 if (j
->alt_exc_handler
) {
896 free(j
->alt_exc_handler
);
899 if (j
->seatbelt_profile
) {
900 free(j
->seatbelt_profile
);
904 if (j
->quarantine_data
) {
905 free(j
->quarantine_data
);
911 if (j
->start_interval
) {
913 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
915 if (j
->poll_for_vfs_changes
) {
916 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
919 kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
922 LIST_REMOVE(j
, label_hash_sle
);
924 job_log(j
, LOG_DEBUG
, "Removed");
930 socketgroup_setup(launch_data_t obj
, const char *key
, void *context
)
932 launch_data_t tmp_oai
;
934 unsigned int i
, fd_cnt
= 1;
937 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
938 fd_cnt
= launch_data_array_get_count(obj
);
941 fds
= alloca(fd_cnt
* sizeof(int));
943 for (i
= 0; i
< fd_cnt
; i
++) {
944 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
945 tmp_oai
= launch_data_array_get_index(obj
, i
);
950 fds
[i
] = launch_data_get_fd(tmp_oai
);
953 socketgroup_new(j
, key
, fds
, fd_cnt
, strcmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0);
959 job_set_global_on_demand(job_t j
, bool val
)
961 if (j
->forced_peers_to_demand_mode
&& val
) {
963 } else if (!j
->forced_peers_to_demand_mode
&& !val
) {
967 if ((j
->forced_peers_to_demand_mode
= val
)) {
968 j
->mgr
->global_on_demand_cnt
++;
970 j
->mgr
->global_on_demand_cnt
--;
973 if (j
->mgr
->global_on_demand_cnt
== 0) {
974 jobmgr_dispatch_all(j
->mgr
, false);
981 job_setup_machport(job_t j
)
983 mach_msg_size_t mxmsgsz
;
985 if (!job_assumes(j
, launchd_mport_create_recv(&j
->j_port
) == KERN_SUCCESS
)) {
989 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
990 mxmsgsz
= sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
991 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
992 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
995 if (!job_assumes(j
, runtime_add_mport(j
->j_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
999 if (!job_assumes(j
, launchd_mport_notify_req(j
->j_port
, MACH_NOTIFY_NO_SENDERS
) == KERN_SUCCESS
)) {
1000 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1006 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1012 job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
)
1014 const char **argv
= (const char **)mach_cmd2argv(cmd
);
1017 if (!job_assumes(j
, argv
!= NULL
)) {
1021 jr
= job_new(j
->mgr
, AUTO_PICK_LEGACY_LABEL
, NULL
, argv
);
1025 /* jobs can easily be denied creation during shutdown */
1032 jr
->legacy_mach_job
= true;
1033 jr
->abandon_pg
= true;
1034 jr
->priv_port_has_senders
= true; /* the IPC that called us will make-send on this port */
1036 if (!job_setup_machport(jr
)) {
1040 job_log(jr
, LOG_INFO
, "Legacy%s server created", ond
? " on-demand" : "");
1052 job_handle_mpm_wait(job_t j
, mach_port_t srp
, int *waitstatus
)
1055 j
->wait_reply_port
= srp
;
1056 return MIG_NO_REPLY
;
1059 *waitstatus
= j
->last_exit_status
;
1065 job_new_anonymous(jobmgr_t jm
, pid_t anonpid
)
1067 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, anonpid
};
1068 struct kinfo_proc kp
;
1069 size_t len
= sizeof(kp
);
1070 const char *zombie
= NULL
;
1071 bool shutdown_state
;
1072 job_t jp
= NULL
, jr
= NULL
;
1074 if (!jobmgr_assumes(jm
, anonpid
!= 0)) {
1078 if (!jobmgr_assumes(jm
, anonpid
< 100000)) {
1079 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1083 if (!jobmgr_assumes(jm
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
1087 if (len
!= sizeof(kp
)) {
1088 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for nonexistent PID: %u", anonpid
);
1092 if (!jobmgr_assumes(jm
, kp
.kp_proc
.p_comm
[0] != '\0')) {
1096 if (kp
.kp_proc
.p_stat
== SZOMB
) {
1097 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for zombie PID: %u", anonpid
);
1101 switch (kp
.kp_eproc
.e_ppid
) {
1106 if (getpid() != 1) {
1107 /* we cannot possibly find a parent job_t that is useful in this function */
1112 jp
= jobmgr_find_by_pid(jm
, kp
.kp_eproc
.e_ppid
, true);
1113 jobmgr_assumes(jm
, jp
!= NULL
);
1117 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1118 if ((shutdown_state
= jm
->shutting_down
)) {
1119 jm
->shutting_down
= false;
1122 if (jobmgr_assumes(jm
, (jr
= job_new(jm
, AUTO_PICK_LEGACY_LABEL
, zombie
? zombie
: kp
.kp_proc
.p_comm
, NULL
)) != NULL
)) {
1123 u_int proc_fflags
= NOTE_EXEC
|NOTE_EXIT
/* |NOTE_REAP */;
1125 total_anon_children
++;
1126 jr
->anonymous
= true;
1129 /* anonymous process reaping is messy */
1130 LIST_INSERT_HEAD(&jm
->active_jobs
[ACTIVE_JOB_HASH(jr
->p
)], jr
, pid_hash_sle
);
1132 if (kevent_mod(jr
->p
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
) == -1 && job_assumes(jr
, errno
== ESRCH
)) {
1133 /* zombies are weird */
1134 job_log(jr
, LOG_ERR
, "Failed to add kevent for PID %u. Will unload at MIG return", jr
->p
);
1135 jr
->unload_at_mig_return
= true;
1139 job_assumes(jr
, mspolicy_copy(jr
, jp
));
1142 if (shutdown_state
&& jm
->hopefully_first_cnt
== 0) {
1143 job_log(jr
, LOG_APPLEONLY
, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time");
1146 job_log(jr
, LOG_DEBUG
, "Created PID %u anonymously by PPID %u%s%s", anonpid
, kp
.kp_eproc
.e_ppid
, jp
? ": " : "", jp
? jp
->label
: "");
1149 if (shutdown_state
) {
1150 jm
->shutting_down
= true;
1157 job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
)
1159 const char *const *argv_tmp
= argv
;
1160 char auto_label
[1000];
1161 const char *bn
= NULL
;
1163 size_t minlabel_len
;
1167 launchd_assert(offsetof(struct job_s
, kqjob_callback
) == 0);
1169 if (jm
->shutting_down
) {
1174 if (prog
== NULL
&& argv
== NULL
) {
1179 if (label
== AUTO_PICK_LEGACY_LABEL
) {
1180 bn
= prog
? prog
: basename((char *)argv
[0]); /* prog for auto labels is kp.kp_kproc.p_comm */
1181 snprintf(auto_label
, sizeof(auto_label
), "%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", bn
);
1183 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1184 minlabel_len
= strlen(label
) + MAXCOMLEN
;
1186 minlabel_len
= strlen(label
);
1189 j
= calloc(1, sizeof(struct job_s
) + minlabel_len
+ 1);
1191 if (!jobmgr_assumes(jm
, j
!= NULL
)) {
1195 if (label
== auto_label
) {
1196 snprintf((char *)j
->label
, strlen(label
) + 1, "%p.%s", j
, bn
);
1198 strcpy((char *)j
->label
, label
);
1200 j
->kqjob_callback
= job_callback
;
1202 j
->min_run_time
= LAUNCHD_MIN_JOB_RUN_TIME
;
1203 j
->timeout
= RUNTIME_ADVISABLE_IDLE_TIMEOUT
;
1204 j
->exit_timeout
= LAUNCHD_DEFAULT_EXIT_TIMEOUT
;
1205 j
->currently_ignored
= true;
1207 j
->checkedin
= true;
1210 j
->prog
= strdup(prog
);
1211 if (!job_assumes(j
, j
->prog
!= NULL
)) {
1220 for (i
= 0; i
< j
->argc
; i
++) {
1221 cc
+= strlen(argv
[i
]) + 1;
1224 j
->argv
= malloc((j
->argc
+ 1) * sizeof(char *) + cc
);
1226 if (!job_assumes(j
, j
->argv
!= NULL
)) {
1230 co
= ((char *)j
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
1232 for (i
= 0; i
< j
->argc
; i
++) {
1234 strcpy(co
, argv
[i
]);
1235 co
+= strlen(argv
[i
]) + 1;
1240 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
1241 LIST_INSERT_HEAD(&label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
1243 job_log(j
, LOG_DEBUG
, "Conceived");
1257 job_import(launch_data_t pload
)
1259 job_t j
= jobmgr_import2(root_jobmgr
, pload
);
1265 return job_dispatch(j
, false);
1269 job_import_bulk(launch_data_t pload
)
1271 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
1273 size_t i
, c
= launch_data_array_get_count(pload
);
1275 ja
= alloca(c
* sizeof(job_t
));
1277 for (i
= 0; i
< c
; i
++) {
1278 if ((ja
[i
] = jobmgr_import2(root_jobmgr
, launch_data_array_get_index(pload
, i
)))) {
1281 launch_data_array_set_index(resp
, launch_data_new_errno(errno
), i
);
1284 for (i
= 0; i
< c
; i
++) {
1285 if (ja
[i
] == NULL
) {
1288 job_dispatch(ja
[i
], false);
1295 job_import_bool(job_t j
, const char *key
, bool value
)
1297 bool found_key
= false;
1302 if (strcasecmp(key
, LAUNCH_JOBKEY_ABANDONPROCESSGROUP
) == 0) {
1303 j
->abandon_pg
= value
;
1309 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
1310 j
->ondemand
= !value
;
1316 if (strcasecmp(key
, LAUNCH_JOBKEY_ONDEMAND
) == 0) {
1317 j
->ondemand
= value
;
1323 if (strcasecmp(key
, LAUNCH_JOBKEY_DEBUG
) == 0) {
1326 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLED
) == 0) {
1327 job_assumes(j
, !value
);
1333 if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
) == 0) {
1334 j
->hopefully_exits_last
= value
;
1336 } else if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST
) == 0) {
1337 j
->hopefully_exits_first
= value
;
1343 if (strcasecmp(key
, LAUNCH_JOBKEY_SESSIONCREATE
) == 0) {
1344 j
->session_create
= value
;
1346 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTONMOUNT
) == 0) {
1347 j
->start_on_mount
= value
;
1349 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SERVICEIPC
) == 0) {
1350 /* this only does something on Mac OS X 10.4 "Tiger" */
1356 if (strcasecmp(key
, LAUNCH_JOBKEY_LOWPRIORITYIO
) == 0) {
1357 j
->low_pri_io
= value
;
1359 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHONLYONCE
) == 0) {
1360 j
->only_once
= value
;
1366 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
1367 j
->internal_exc_handler
= value
;
1373 if (strcasecmp(key
, LAUNCH_JOBKEY_INITGROUPS
) == 0) {
1374 if (getuid() != 0) {
1375 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1378 j
->no_init_groups
= !value
;
1384 if (strcasecmp(key
, LAUNCH_JOBKEY_RUNATLOAD
) == 0) {
1386 /* We don't want value == false to change j->start_pending */
1387 j
->start_pending
= true;
1394 if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLEGLOBBING
) == 0) {
1395 j
->globargv
= value
;
1397 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL
) == 0) {
1398 j
->debug_before_kill
= value
;
1404 if (strcasecmp(key
, LAUNCH_JOBKEY_WAITFORDEBUGGER
) == 0) {
1405 j
->wait4debugger
= value
;
1414 job_log(j
, LOG_WARNING
, "Unknown key for boolean: %s", key
);
1419 job_import_string(job_t j
, const char *key
, const char *value
)
1421 char **where2put
= NULL
;
1426 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
1427 where2put
= &j
->alt_exc_handler
;
1432 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAM
) == 0) {
1438 if (strcasecmp(key
, LAUNCH_JOBKEY_LABEL
) == 0) {
1440 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
1442 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
1444 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
1445 job_reparent_hack(j
, value
);
1451 if (strcasecmp(key
, LAUNCH_JOBKEY_ROOTDIRECTORY
) == 0) {
1452 if (getuid() != 0) {
1453 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1456 where2put
= &j
->rootdir
;
1461 if (strcasecmp(key
, LAUNCH_JOBKEY_WORKINGDIRECTORY
) == 0) {
1462 where2put
= &j
->workingdir
;
1467 if (strcasecmp(key
, LAUNCH_JOBKEY_USERNAME
) == 0) {
1468 if (getuid() != 0) {
1469 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1471 } else if (strcmp(value
, "root") == 0) {
1474 where2put
= &j
->username
;
1479 if (strcasecmp(key
, LAUNCH_JOBKEY_GROUPNAME
) == 0) {
1480 if (getuid() != 0) {
1481 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1483 } else if (strcmp(value
, "wheel") == 0) {
1486 where2put
= &j
->groupname
;
1491 if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDOUTPATH
) == 0) {
1492 where2put
= &j
->stdoutpath
;
1493 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDERRORPATH
) == 0) {
1494 where2put
= &j
->stderrpath
;
1496 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXPROFILE
) == 0) {
1497 where2put
= &j
->seatbelt_profile
;
1502 job_log(j
, LOG_WARNING
, "Unknown key for string: %s", key
);
1507 job_assumes(j
, (*where2put
= strdup(value
)) != NULL
);
1509 job_log(j
, LOG_WARNING
, "Unknown key: %s", key
);
1514 job_import_integer(job_t j
, const char *key
, long long value
)
1519 if (strcasecmp(key
, LAUNCH_JOBKEY_EXITTIMEOUT
) == 0) {
1521 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
1522 } else if (value
> UINT32_MAX
) {
1523 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
1525 j
->exit_timeout
= value
;
1531 if (strcasecmp(key
, LAUNCH_JOBKEY_NICE
) == 0) {
1538 if (strcasecmp(key
, LAUNCH_JOBKEY_TIMEOUT
) == 0) {
1540 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
1541 } else if (value
> UINT32_MAX
) {
1542 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
1546 } else if (strcasecmp(key
, LAUNCH_JOBKEY_THROTTLEINTERVAL
) == 0) {
1548 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
1549 } else if (value
> UINT32_MAX
) {
1550 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
1552 j
->min_run_time
= value
;
1558 if (strcasecmp(key
, LAUNCH_JOBKEY_UMASK
) == 0) {
1565 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTINTERVAL
) == 0) {
1567 job_log(j
, LOG_WARNING
, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
1568 } else if (value
> UINT32_MAX
) {
1569 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
1572 j
->start_interval
= value
;
1574 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, value
, j
) != -1);
1577 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
1578 j
->seatbelt_flags
= value
;
1584 job_log(j
, LOG_WARNING
, "Unknown key for integer: %s", key
);
1590 job_import_opaque(job_t j
__attribute__((unused
)),
1591 const char *key
, launch_data_t value
__attribute__((unused
)))
1597 if (strcasecmp(key
, LAUNCH_JOBKEY_QUARANTINEDATA
) == 0) {
1598 size_t tmpsz
= launch_data_get_opaque_size(value
);
1600 if (job_assumes(j
, j
->quarantine_data
= malloc(tmpsz
))) {
1601 memcpy(j
->quarantine_data
, launch_data_get_opaque(value
), tmpsz
);
1602 j
->quarantine_data_sz
= tmpsz
;
1613 policy_setup(launch_data_t obj
, const char *key
, void *context
)
1616 bool found_key
= false;
1621 if (strcasecmp(key
, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS
) == 0) {
1622 j
->deny_job_creation
= launch_data_get_bool(obj
);
1630 if (unlikely(!found_key
)) {
1631 job_log(j
, LOG_WARNING
, "Unknown policy: %s", key
);
1636 job_import_dictionary(job_t j
, const char *key
, launch_data_t value
)
1643 if (strcasecmp(key
, LAUNCH_JOBKEY_POLICIES
) == 0) {
1644 launch_data_dict_iterate(value
, policy_setup
, j
);
1649 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
1650 launch_data_dict_iterate(value
, semaphoreitem_setup
, j
);
1655 if (strcasecmp(key
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
) == 0) {
1656 j
->inetcompat
= true;
1657 j
->abandon_pg
= true;
1658 if ((tmp
= launch_data_dict_lookup(value
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
))) {
1659 j
->inetcompat_wait
= launch_data_get_bool(tmp
);
1665 if (strcasecmp(key
, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES
) == 0) {
1666 launch_data_dict_iterate(value
, envitem_setup
, j
);
1671 if (strcasecmp(key
, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES
) == 0) {
1672 j
->importing_global_env
= true;
1673 launch_data_dict_iterate(value
, envitem_setup
, j
);
1674 j
->importing_global_env
= false;
1679 if (strcasecmp(key
, LAUNCH_JOBKEY_SOCKETS
) == 0) {
1680 launch_data_dict_iterate(value
, socketgroup_setup
, j
);
1681 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
1682 calendarinterval_new_from_obj(j
, value
);
1683 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SOFTRESOURCELIMITS
) == 0) {
1684 launch_data_dict_iterate(value
, limititem_setup
, j
);
1686 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
1687 launch_data_dict_iterate(value
, seatbelt_setup_flags
, j
);
1693 if (strcasecmp(key
, LAUNCH_JOBKEY_HARDRESOURCELIMITS
) == 0) {
1694 j
->importing_hard_limits
= true;
1695 launch_data_dict_iterate(value
, limititem_setup
, j
);
1696 j
->importing_hard_limits
= false;
1701 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICES
) == 0) {
1702 launch_data_dict_iterate(value
, machservice_setup
, j
);
1703 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICELOOKUPPOLICIES
) == 0) {
1704 launch_data_dict_iterate(value
, mspolicy_setup
, j
);
1708 job_log(j
, LOG_WARNING
, "Unknown key for dictionary: %s", key
);
1714 job_import_array(job_t j
, const char *key
, launch_data_t value
)
1716 size_t i
, value_cnt
= launch_data_array_get_count(value
);
1722 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
) == 0) {
1728 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
1730 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
1732 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
1733 job_log(j
, LOG_NOTICE
, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
1739 if (strcasecmp(key
, LAUNCH_JOBKEY_QUEUEDIRECTORIES
) == 0) {
1740 for (i
= 0; i
< value_cnt
; i
++) {
1741 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
1742 if (job_assumes(j
, str
!= NULL
)) {
1743 semaphoreitem_new(j
, DIR_NOT_EMPTY
, str
);
1751 if (strcasecmp(key
, LAUNCH_JOBKEY_WATCHPATHS
) == 0) {
1752 for (i
= 0; i
< value_cnt
; i
++) {
1753 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
1754 if (job_assumes(j
, str
!= NULL
)) {
1755 semaphoreitem_new(j
, PATH_CHANGES
, str
);
1762 if (strcasecmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0) {
1763 socketgroup_setup(value
, LAUNCH_JOBKEY_BONJOURFDS
, j
);
1764 } else if (strcasecmp(key
, LAUNCH_JOBKEY_BINARYORDERPREFERENCE
) == 0) {
1765 if (job_assumes(j
, j
->j_binpref
= malloc(value_cnt
* sizeof(*j
->j_binpref
)))) {
1766 j
->j_binpref_cnt
= value_cnt
;
1767 for (i
= 0; i
< value_cnt
; i
++) {
1768 j
->j_binpref
[i
] = launch_data_get_integer(launch_data_array_get_index(value
, i
));
1775 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
1776 for (i
= 0; i
< value_cnt
; i
++) {
1777 calendarinterval_new_from_obj(j
, launch_data_array_get_index(value
, i
));
1782 job_log(j
, LOG_WARNING
, "Unknown key for array: %s", key
);
1788 job_import_keys(launch_data_t obj
, const char *key
, void *context
)
1791 launch_data_type_t kind
;
1797 kind
= launch_data_get_type(obj
);
1800 case LAUNCH_DATA_BOOL
:
1801 job_import_bool(j
, key
, launch_data_get_bool(obj
));
1803 case LAUNCH_DATA_STRING
:
1804 job_import_string(j
, key
, launch_data_get_string(obj
));
1806 case LAUNCH_DATA_INTEGER
:
1807 job_import_integer(j
, key
, launch_data_get_integer(obj
));
1809 case LAUNCH_DATA_DICTIONARY
:
1810 job_import_dictionary(j
, key
, obj
);
1812 case LAUNCH_DATA_ARRAY
:
1813 job_import_array(j
, key
, obj
);
1815 case LAUNCH_DATA_OPAQUE
:
1816 job_import_opaque(j
, key
, obj
);
1819 job_log(j
, LOG_WARNING
, "Unknown value type '%d' for key: %s", kind
, key
);
1825 jobmgr_import2(jobmgr_t jm
, launch_data_t pload
)
1827 launch_data_t tmp
, ldpa
;
1828 const char *label
= NULL
, *prog
= NULL
;
1829 const char **argv
= NULL
;
1832 if (pload
== NULL
) {
1837 if (launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
) {
1842 if (!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
))) {
1847 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
1852 if (!(label
= launch_data_get_string(tmp
))) {
1857 if ((tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAM
)) &&
1858 (launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
1859 prog
= launch_data_get_string(tmp
);
1862 if ((ldpa
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
))) {
1865 if (launch_data_get_type(ldpa
) != LAUNCH_DATA_ARRAY
) {
1870 c
= launch_data_array_get_count(ldpa
);
1872 argv
= alloca((c
+ 1) * sizeof(char *));
1874 for (i
= 0; i
< c
; i
++) {
1875 tmp
= launch_data_array_get_index(ldpa
, i
);
1877 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
1882 argv
[i
] = launch_data_get_string(tmp
);
1888 if ((j
= job_find(label
)) != NULL
) {
1891 } else if (!jobmgr_label_test(jm
, label
)) {
1896 if ((j
= job_new(jm
, label
, prog
, argv
))) {
1897 launch_data_dict_iterate(pload
, job_import_keys
, j
);
1904 jobmgr_label_test(jobmgr_t jm
, const char *str
)
1906 char *endstr
= NULL
;
1909 if (str
[0] == '\0') {
1910 jobmgr_log(jm
, LOG_ERR
, "Empty job labels are not allowed");
1914 for (ptr
= str
; *ptr
; ptr
++) {
1915 if (iscntrl(*ptr
)) {
1916 jobmgr_log(jm
, LOG_ERR
, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr
- str
, *ptr
);
1921 strtoll(str
, &endstr
, 0);
1923 if (str
!= endstr
) {
1924 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to begin with numbers: %s", str
);
1928 if ((strncasecmp(str
, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
1929 (strncasecmp(str
, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
1930 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to use a reserved prefix: %s", str
);
1938 job_find(const char *label
)
1942 LIST_FOREACH(ji
, &label_hash
[hash_label(label
)], label_hash_sle
) {
1943 if (ji
->removal_pending
) {
1944 continue; /* 5351245 */
1945 } else if (ji
->mgr
->shutting_down
) {
1946 continue; /* 5488633 */
1949 if (strcmp(ji
->label
, label
) == 0) {
1959 jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
)
1963 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
1971 } else if (create_anon
) {
1972 return job_new_anonymous(jm
, p
);
1979 job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
)
1984 if (jm
->jm_port
== mport
) {
1985 jobmgr_assumes(jm
, (ji
= jobmgr_find_by_pid(jm
, upid
, true)) != NULL
);
1989 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
1992 if ((jr
= job_mig_intran2(jmi
, mport
, upid
))) {
1997 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
1998 if (ji
->j_port
== mport
) {
2007 job_mig_intran(mach_port_t p
)
2012 runtime_get_caller_creds(&ldc
);
2014 jr
= job_mig_intran2(root_jobmgr
, p
, ldc
.pid
);
2016 if (!jobmgr_assumes(root_jobmgr
, jr
!= NULL
)) {
2017 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, 0 };
2018 struct kinfo_proc kp
;
2019 size_t len
= sizeof(kp
);
2023 if (jobmgr_assumes(root_jobmgr
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1) && jobmgr_assumes(root_jobmgr
, len
== sizeof(kp
))) {
2024 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__
, ldc
.pid
, ldc
.uid
, ldc
.euid
, p
, kp
.kp_proc
.p_comm
);
2032 job_find_by_service_port(mach_port_t p
)
2034 struct machservice
*ms
;
2036 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
2037 if (ms
->recv
&& (ms
->port
== p
)) {
2046 job_mig_destructor(job_t j
)
2051 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
2054 if (j
&& j
!= workaround_5477111
&& j
->unload_at_mig_return
) {
2055 job_log(j
, LOG_NOTICE
, "Unloading PID %u at MIG return.", j
->p
);
2059 workaround_5477111
= NULL
;
2061 calendarinterval_sanity_check();
2065 job_export_all2(jobmgr_t jm
, launch_data_t where
)
2070 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
2071 job_export_all2(jmi
, where
);
2074 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2077 if (jobmgr_assumes(jm
, (tmp
= job_export(ji
)) != NULL
)) {
2078 launch_data_dict_insert(where
, tmp
, ji
->label
);
2084 job_export_all(void)
2086 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
2088 if (launchd_assumes(resp
!= NULL
)) {
2089 job_export_all2(root_jobmgr
, resp
);
2096 job_log_stray_pg(job_t j
)
2098 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PGRP
, j
->p
};
2099 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
2100 struct kinfo_proc
*kp
;
2102 #if TARGET_OS_EMBEDDED
2103 if (!do_apple_internal_magic
) {
2108 if (!job_assumes(j
, (kp
= malloc(len
)) != NULL
)) {
2111 if (!job_assumes(j
, sysctl(mib
, 4, kp
, &len
, NULL
, 0) != -1)) {
2115 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
2117 for (i
= 0; i
< kp_cnt
; i
++) {
2118 pid_t p_i
= kp
[i
].kp_proc
.p_pid
;
2119 pid_t pp_i
= kp
[i
].kp_eproc
.e_ppid
;
2120 const char *z
= (kp
[i
].kp_proc
.p_stat
== SZOMB
) ? "zombie " : "";
2121 const char *n
= kp
[i
].kp_proc
.p_comm
;
2125 } else if (!job_assumes(j
, p_i
!= 0 && p_i
!= 1)) {
2129 job_log(j
, LOG_WARNING
, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z
, p_i
, pp_i
, n
);
2142 job_log(j
, LOG_DEBUG
, "Reaping");
2144 if (j
->weird_bootstrap
) {
2145 mach_msg_size_t mxmsgsz
= sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
2147 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
2148 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
2151 job_assumes(j
, runtime_add_mport(j
->mgr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
);
2152 j
->weird_bootstrap
= false;
2155 if (j
->log_redirect_fd
&& !j
->wait4pipe_eof
) {
2156 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
2157 j
->log_redirect_fd
= 0;
2161 job_assumes(j
, runtime_close(j
->forkfd
) != -1);
2167 memset(&ru
, 0, sizeof(ru
));
2170 * The job is dead. While the PID/PGID is still known to be
2171 * valid, try to kill abandoned descendant processes.
2173 job_log_stray_pg(j
);
2174 if (!j
->abandon_pg
) {
2175 job_assumes(j
, runtime_killpg(j
->p
, SIGTERM
) != -1 || errno
== ESRCH
);
2181 * The current implementation of ptrace() causes the traced process to
2182 * be abducted away from the true parent and adopted by the tracer.
2184 * Once the tracing process relinquishes control, the kernel then
2185 * restores the true parent/child relationship.
2187 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2188 * data structures changes, and they return an error if reality hasn't
2189 * been restored by the time they are called.
2191 if (!job_assumes(j
, wait4(j
->p
, &status
, 0, &ru
) != -1)) {
2192 job_log(j
, LOG_NOTICE
, "Working around 5020256. Assuming the job crashed.");
2194 status
= W_EXITCODE(0, SIGSEGV
);
2195 memset(&ru
, 0, sizeof(ru
));
2199 if (j
->exit_timeout
) {
2200 kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
2204 total_anon_children
--;
2209 LIST_REMOVE(j
, pid_hash_sle
);
2211 if (j
->wait_reply_port
) {
2212 job_log(j
, LOG_DEBUG
, "MPM wait reply being sent");
2213 job_assumes(j
, job_mig_wait_reply(j
->wait_reply_port
, 0, status
) == 0);
2214 j
->wait_reply_port
= MACH_PORT_NULL
;
2217 if (j
->sent_sigterm_time
) {
2218 uint64_t td_sec
, td_usec
, td
= (mach_absolute_time() - j
->sent_sigterm_time
) * tbi
.numer
/ tbi
.denom
;
2220 td_sec
= td
/ NSEC_PER_SEC
;
2221 td_usec
= (td
% NSEC_PER_SEC
) / NSEC_PER_USEC
;
2223 job_log(j
, LOG_INFO
, "Exited %lld.%06lld seconds after %s was sent",
2224 td_sec
, td_usec
, signal_to_C_name(j
->sent_sigkill
? SIGKILL
: SIGTERM
));
2227 #if DO_RUSAGE_SUMMATION
2228 timeradd(&ru
.ru_utime
, &j
->ru
.ru_utime
, &j
->ru
.ru_utime
);
2229 timeradd(&ru
.ru_stime
, &j
->ru
.ru_stime
, &j
->ru
.ru_stime
);
2230 j
->ru
.ru_maxrss
+= ru
.ru_maxrss
;
2231 j
->ru
.ru_ixrss
+= ru
.ru_ixrss
;
2232 j
->ru
.ru_idrss
+= ru
.ru_idrss
;
2233 j
->ru
.ru_isrss
+= ru
.ru_isrss
;
2234 j
->ru
.ru_minflt
+= ru
.ru_minflt
;
2235 j
->ru
.ru_majflt
+= ru
.ru_majflt
;
2236 j
->ru
.ru_nswap
+= ru
.ru_nswap
;
2237 j
->ru
.ru_inblock
+= ru
.ru_inblock
;
2238 j
->ru
.ru_oublock
+= ru
.ru_oublock
;
2239 j
->ru
.ru_msgsnd
+= ru
.ru_msgsnd
;
2240 j
->ru
.ru_msgrcv
+= ru
.ru_msgrcv
;
2241 j
->ru
.ru_nsignals
+= ru
.ru_nsignals
;
2242 j
->ru
.ru_nvcsw
+= ru
.ru_nvcsw
;
2243 j
->ru
.ru_nivcsw
+= ru
.ru_nivcsw
;
2246 if (WIFEXITED(status
) && WEXITSTATUS(status
) != 0) {
2247 job_log(j
, LOG_WARNING
, "Exited with exit code: %d", WEXITSTATUS(status
));
2250 if (WIFSIGNALED(status
)) {
2251 int s
= WTERMSIG(status
);
2252 if (SIGKILL
== s
|| SIGTERM
== s
) {
2253 job_log(j
, LOG_NOTICE
, "Exited: %s", strsignal(s
));
2255 job_log(j
, LOG_WARNING
, "Exited abnormally: %s", strsignal(s
));
2259 if (j
->hopefully_exits_first
) {
2260 j
->mgr
->hopefully_first_cnt
--;
2261 } else if (!j
->anonymous
&& !j
->hopefully_exits_last
) {
2262 j
->mgr
->normal_active_cnt
--;
2264 j
->last_exit_status
= status
;
2265 j
->sent_sigkill
= false;
2266 j
->lastlookup
= NULL
;
2267 j
->lastlookup_gennum
= 0;
2271 * We need to someday evaluate other jobs and find those who wish to track the
2272 * active/inactive state of this job. The current job_dispatch() logic makes
2273 * this messy, given that jobs can be deleted at dispatch.
2278 jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
)
2283 if (jm
->shutting_down
) {
2287 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
2288 jobmgr_dispatch_all(jmi
, newmounthack
);
2291 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
2292 if (newmounthack
&& ji
->start_on_mount
) {
2293 ji
->start_pending
= true;
2296 job_dispatch(ji
, false);
2301 job_dispatch(job_t j
, bool kickstart
)
2304 * The whole job removal logic needs to be consolidated. The fact that
2305 * a job can be removed from just about anywhere makes it easy to have
2306 * stale pointers left behind somewhere on the stack that might get
2307 * used after the deallocation. In particular, during job iteration.
2309 * This is a classic example. The act of dispatching a job may delete it.
2311 if (!job_active(j
)) {
2312 if (job_useless(j
)) {
2315 } else if (kickstart
|| job_keepalive(j
)) {
2323 * Path checking and monitoring is really racy right now.
2324 * We should clean this up post Leopard.
2326 if (job_keepalive(j
)) {
2331 job_log(j
, LOG_DEBUG
, "Tried to dispatch an already active job.");
2338 job_log_stdouterr2(job_t j
, const char *msg
, ...)
2340 struct runtime_syslog_attr attr
= { j
->label
, j
->label
, j
->mgr
->name
, LOG_NOTICE
, getuid(), j
->p
, j
->p
};
2344 runtime_vsyslog(&attr
, msg
, ap
);
2349 job_log_stdouterr(job_t j
)
2351 char *msg
, *bufindex
, *buf
= malloc(BIG_PIPE_SIZE
+ 1);
2352 bool close_log_redir
= false;
2355 if (!job_assumes(j
, buf
!= NULL
)) {
2361 rsz
= read(j
->log_redirect_fd
, buf
, BIG_PIPE_SIZE
);
2364 job_log(j
, LOG_DEBUG
, "Standard out/error pipe closed");
2365 close_log_redir
= true;
2366 } else if (!job_assumes(j
, rsz
!= -1)) {
2367 close_log_redir
= true;
2371 while ((msg
= strsep(&bufindex
, "\n\r"))) {
2373 job_log_stdouterr2(j
, "%s", msg
);
2380 if (close_log_redir
) {
2381 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
2382 j
->log_redirect_fd
= 0;
2383 job_dispatch(j
, false);
2390 if (!j
->p
|| j
->anonymous
) {
2394 job_assumes(j
, runtime_kill(j
->p
, SIGKILL
) != -1);
2396 j
->sent_sigkill
= true;
2398 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
,
2399 EV_ADD
, NOTE_SECONDS
, LAUNCHD_SIGKILL_TIMER
, j
) != -1);
2401 job_log(j
, LOG_DEBUG
, "Sent SIGKILL signal.");
2405 job_callback_proc(job_t j
, int flags
__attribute__((unused
)), int fflags
)
2407 if ((fflags
& NOTE_EXEC
) && j
->anonymous
) {
2408 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, j
->p
};
2409 struct kinfo_proc kp
;
2410 size_t len
= sizeof(kp
);
2412 if (job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
2413 char newlabel
[1000];
2415 snprintf(newlabel
, sizeof(newlabel
), "%p.%s", j
, kp
.kp_proc
.p_comm
);
2417 job_log(j
, LOG_DEBUG
, "Program changed. Updating the label to: %s", newlabel
);
2419 LIST_REMOVE(j
, label_hash_sle
);
2420 strcpy((char *)j
->label
, newlabel
);
2421 LIST_INSERT_HEAD(&label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
2425 if (fflags
& NOTE_FORK
) {
2426 job_log(j
, LOG_DEBUG
, "Called fork()");
2429 if (fflags
& NOTE_EXIT
) {
2436 j
= job_dispatch(j
, false);
2440 /* NOTE_REAP sanity checking is disabled for now while we try and diagnose 5289559 */
2442 if (j
&& (fflags
& NOTE_REAP
)) {
2443 job_assumes(j
, flags
& EV_ONESHOT
);
2444 job_assumes(j
, flags
& EV_EOF
);
2446 job_assumes(j
, j
->p
== 0);
2452 job_callback_timer(job_t j
, void *ident
)
2455 job_dispatch(j
, true);
2456 } else if (&j
->semaphores
== ident
) {
2457 job_dispatch(j
, false);
2458 } else if (&j
->start_interval
== ident
) {
2459 j
->start_pending
= true;
2460 job_dispatch(j
, false);
2461 } else if (&j
->exit_timeout
== ident
) {
2462 if (j
->sent_sigkill
) {
2463 uint64_t td
= (mach_absolute_time() - j
->sent_sigterm_time
) * tbi
.numer
/ tbi
.denom
;
2466 td
-= j
->exit_timeout
;
2468 job_log(j
, LOG_ERR
, "Did not die after sending SIGKILL %llu seconds ago...", td
);
2470 job_force_sampletool(j
);
2471 if (j
->debug_before_kill
) {
2472 job_log(j
, LOG_NOTICE
, "Exit timeout elapsed. Entering the kernel debugger.");
2473 job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
2475 job_log(j
, LOG_WARNING
, "Exit timeout elapsed (%u seconds). Killing.", j
->exit_timeout
);
2479 job_assumes(j
, false);
2484 job_callback_read(job_t j
, int ident
)
2486 if (ident
== j
->log_redirect_fd
) {
2487 job_log_stdouterr(j
);
2489 socketgroup_callback(j
);
2494 jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
)
2499 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
2500 jobmgr_reap_bulk(jmi
, kev
);
2503 if ((j
= jobmgr_find_by_pid(jm
, kev
->ident
, false))) {
2505 job_callback(j
, kev
);
2510 jobmgr_callback(void *obj
, struct kevent
*kev
)
2514 switch (kev
->filter
) {
2516 jobmgr_reap_bulk(jm
, kev
);
2517 if (launchd_assumes(root_jobmgr
!= NULL
)) {
2518 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
2522 switch (kev
->ident
) {
2524 return launchd_shutdown();
2526 return calendarinterval_callback();
2528 return (void)jobmgr_assumes(jm
, false);
2532 if (kev
->fflags
& VQ_MOUNT
) {
2533 jobmgr_dispatch_all(jm
, true);
2535 jobmgr_dispatch_all_semaphores(jm
);
2538 if (jobmgr_assumes(jm
, kev
->ident
== (uintptr_t)&sorted_calendar_events
)) {
2539 calendarinterval_callback();
2543 return (void)jobmgr_assumes(jm
, false);
2548 job_callback(void *obj
, struct kevent
*kev
)
2552 job_log(j
, LOG_DEBUG
, "Dispatching kevent callback.");
2554 switch (kev
->filter
) {
2556 return job_callback_proc(j
, kev
->flags
, kev
->fflags
);
2558 return job_callback_timer(j
, (void *)kev
->ident
);
2560 return semaphoreitem_callback(j
, kev
);
2562 return job_callback_read(j
, kev
->ident
);
2563 case EVFILT_MACHPORT
:
2564 return (void)job_dispatch(j
, true);
2566 return (void)job_assumes(j
, false);
2573 uint64_t td
, tnow
= mach_absolute_time();
2580 u_int proc_fflags
= /* NOTE_EXEC|NOTE_FORK| */ NOTE_EXIT
/* |NOTE_REAP */;
2582 if (!job_assumes(j
, j
->mgr
!= NULL
)) {
2586 if (job_active(j
)) {
2587 job_log(j
, LOG_DEBUG
, "Already started");
2591 job_assumes(j
, tnow
> j
->start_time
);
2594 * Some users adjust the wall-clock and then expect software to not notice.
2595 * Therefore, launchd must use an absolute clock instead of gettimeofday()
2596 * or time() wherever possible.
2598 td
= (tnow
- j
->start_time
) * tbi
.numer
/ tbi
.denom
;
2601 if (j
->start_time
&& (td
< j
->min_run_time
) && !j
->legacy_mach_job
&& !j
->inetcompat
) {
2602 time_t respawn_delta
= j
->min_run_time
- (uint32_t)td
;
2605 * We technically should ref-count throttled jobs to prevent idle exit,
2606 * but we're not directly tracking the 'throttled' state at the moment.
2609 job_log(j
, LOG_WARNING
, "Throttling respawn: Will start in %ld seconds", respawn_delta
);
2610 job_assumes(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, respawn_delta
, j
) != -1);
2615 j
->sent_sigterm_time
= 0;
2617 if (!j
->legacy_mach_job
) {
2618 sipc
= (!SLIST_EMPTY(&j
->sockets
) || !SLIST_EMPTY(&j
->machservices
));
2619 #if TARGET_OS_EMBEDDED
2620 if (j
->username
&& strcmp(j
->username
, "mobile") == 0 && strncmp(j
->label
, "com.apple.", strlen("com.apple.")) != 0) {
2626 j
->checkedin
= false;
2629 job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, spair
) != -1);
2632 job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, execspair
) != -1);
2634 if (!j
->legacy_mach_job
&& job_assumes(j
, pipe(oepair
) != -1)) {
2635 j
->log_redirect_fd
= _fd(oepair
[0]);
2636 job_assumes(j
, fcntl(j
->log_redirect_fd
, F_SETFL
, O_NONBLOCK
) != -1);
2637 job_assumes(j
, kevent_mod(j
->log_redirect_fd
, EVFILT_READ
, EV_ADD
, 0, 0, j
) != -1);
2640 j
->start_time
= tnow
;
2642 switch (c
= runtime_fork(j
->weird_bootstrap
? j
->j_port
: j
->mgr
->jm_port
)) {
2644 job_log_error(j
, LOG_ERR
, "fork() failed, will try again in one second");
2645 job_assumes(j
, runtime_close(execspair
[0]) == 0);
2646 job_assumes(j
, runtime_close(execspair
[1]) == 0);
2648 job_assumes(j
, runtime_close(spair
[0]) == 0);
2649 job_assumes(j
, runtime_close(spair
[1]) == 0);
2651 if (!j
->legacy_mach_job
) {
2652 job_assumes(j
, runtime_close(oepair
[0]) != -1);
2653 job_assumes(j
, runtime_close(oepair
[1]) != -1);
2654 j
->log_redirect_fd
= 0;
2656 job_assumes(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, 1, j
) != -1);
2660 if (_vproc_post_fork_ping()) {
2661 _exit(EXIT_FAILURE
);
2663 if (!j
->legacy_mach_job
) {
2664 job_assumes(j
, dup2(oepair
[1], STDOUT_FILENO
) != -1);
2665 job_assumes(j
, dup2(oepair
[1], STDERR_FILENO
) != -1);
2666 job_assumes(j
, runtime_close(oepair
[1]) != -1);
2668 job_assumes(j
, runtime_close(execspair
[0]) == 0);
2669 /* wait for our parent to say they've attached a kevent to us */
2670 read(_fd(execspair
[1]), &c
, sizeof(c
));
2673 job_assumes(j
, runtime_close(spair
[0]) == 0);
2674 snprintf(nbuf
, sizeof(nbuf
), "%d", spair
[1]);
2675 setenv(LAUNCHD_TRUSTED_FD_ENV
, nbuf
, 1);
2680 job_log(j
, LOG_DEBUG
, "Started as PID: %u", c
);
2682 j
->start_pending
= false;
2686 LIST_INSERT_HEAD(&j
->mgr
->active_jobs
[ACTIVE_JOB_HASH(c
)], j
, pid_hash_sle
);
2688 if (JOB_BOOTCACHE_HACK_CHECK(j
)) {
2689 did_first_per_user_launchd_BootCache_hack
= true;
2692 if (!j
->legacy_mach_job
) {
2693 job_assumes(j
, runtime_close(oepair
[1]) != -1);
2696 if (j
->hopefully_exits_first
) {
2697 j
->mgr
->hopefully_first_cnt
++;
2698 } else if (!j
->hopefully_exits_last
) {
2699 j
->mgr
->normal_active_cnt
++;
2701 j
->forkfd
= _fd(execspair
[0]);
2702 job_assumes(j
, runtime_close(execspair
[1]) == 0);
2704 job_assumes(j
, runtime_close(spair
[1]) == 0);
2705 ipc_open(_fd(spair
[0]), j
);
2707 if (job_assumes(j
, kevent_mod(c
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
? root_jobmgr
: j
->mgr
) != -1)) {
2713 if (!j
->stall_before_exec
) {
2721 do_first_per_user_launchd_hack(void)
2723 char *bcct_tool
[] = { "/usr/sbin/BootCacheControl", "tag", NULL
};
2727 if (launchd_assumes((bcp
= vfork()) != -1)) {
2729 execve(bcct_tool
[0], bcct_tool
, environ
);
2730 _exit(EXIT_FAILURE
);
2732 launchd_assumes(waitpid(bcp
, &dummystatus
, 0) != -1);
2738 job_start_child(job_t j
)
2740 const char *file2exec
= "/usr/libexec/launchproxy";
2742 posix_spawnattr_t spattr
;
2743 int gflags
= GLOB_NOSORT
|GLOB_NOCHECK
|GLOB_TILDE
|GLOB_DOOFFS
;
2746 short spflags
= POSIX_SPAWN_SETEXEC
;
2747 size_t binpref_out_cnt
= 0;
2750 if (JOB_BOOTCACHE_HACK_CHECK(j
)) {
2751 do_first_per_user_launchd_hack();
2754 job_assumes(j
, posix_spawnattr_init(&spattr
) == 0);
2756 job_setup_attributes(j
);
2758 if (j
->argv
&& j
->globargv
) {
2760 for (i
= 0; i
< j
->argc
; i
++) {
2762 gflags
|= GLOB_APPEND
;
2764 if (glob(j
->argv
[i
], gflags
, NULL
, &g
) != 0) {
2765 job_log_error(j
, LOG_ERR
, "glob(\"%s\")", j
->argv
[i
]);
2769 g
.gl_pathv
[0] = (char *)file2exec
;
2770 argv
= (const char **)g
.gl_pathv
;
2771 } else if (j
->argv
) {
2772 argv
= alloca((j
->argc
+ 2) * sizeof(char *));
2773 argv
[0] = file2exec
;
2774 for (i
= 0; i
< j
->argc
; i
++) {
2775 argv
[i
+ 1] = j
->argv
[i
];
2779 argv
= alloca(3 * sizeof(char *));
2780 argv
[0] = file2exec
;
2785 if (!j
->inetcompat
) {
2789 if (j
->wait4debugger
) {
2790 job_log(j
, LOG_WARNING
, "Spawned and waiting for the debugger to attach before continuing...");
2791 spflags
|= POSIX_SPAWN_START_SUSPENDED
;
2794 job_assumes(j
, posix_spawnattr_setflags(&spattr
, spflags
) == 0);
2796 if (j
->j_binpref_cnt
) {
2797 job_assumes(j
, posix_spawnattr_setbinpref_np(&spattr
, j
->j_binpref_cnt
, j
->j_binpref
, &binpref_out_cnt
) == 0);
2798 job_assumes(j
, binpref_out_cnt
== j
->j_binpref_cnt
);
2802 if (j
->quarantine_data
) {
2805 if (job_assumes(j
, qp
= qtn_proc_alloc())) {
2806 if (job_assumes(j
, qtn_proc_init_with_data(qp
, j
->quarantine_data
, j
->quarantine_data_sz
) == 0)) {
2807 job_assumes(j
, qtn_proc_apply_to_self(qp
) == 0);
2814 if (j
->seatbelt_profile
) {
2815 char *seatbelt_err_buf
= NULL
;
2817 if (!job_assumes(j
, sandbox_init(j
->seatbelt_profile
, j
->seatbelt_flags
, &seatbelt_err_buf
) != -1)) {
2818 if (seatbelt_err_buf
) {
2819 job_log(j
, LOG_ERR
, "Sandbox failed to init: %s", seatbelt_err_buf
);
2827 errno
= posix_spawn(&junk_pid
, j
->inetcompat
? file2exec
: j
->prog
, NULL
, &spattr
, (char *const*)argv
, environ
);
2828 job_log_error(j
, LOG_ERR
, "posix_spawn(\"%s\", ...)", j
->prog
);
2830 errno
= posix_spawnp(&junk_pid
, j
->inetcompat
? file2exec
: argv
[0], NULL
, &spattr
, (char *const*)argv
, environ
);
2831 job_log_error(j
, LOG_ERR
, "posix_spawnp(\"%s\", ...)", argv
[0]);
2837 _exit(EXIT_FAILURE
);
2841 jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
)
2847 if (jm
->parentmgr
) {
2848 jobmgr_export_env_from_other_jobs(jm
->parentmgr
, dict
);
2850 char **tmpenviron
= environ
;
2851 for (; *tmpenviron
; tmpenviron
++) {
2853 launch_data_t s
= launch_data_alloc(LAUNCH_DATA_STRING
);
2854 launch_data_set_string(s
, strchr(*tmpenviron
, '=') + 1);
2855 strncpy(envkey
, *tmpenviron
, sizeof(envkey
));
2856 *(strchr(envkey
, '=')) = '\0';
2857 launch_data_dict_insert(dict
, s
, envkey
);
2861 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2862 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
2863 if ((tmp
= launch_data_new_string(ei
->value
))) {
2864 launch_data_dict_insert(dict
, tmp
, ei
->key
);
2871 jobmgr_setup_env_from_other_jobs(jobmgr_t jm
)
2876 if (jm
->parentmgr
) {
2877 jobmgr_setup_env_from_other_jobs(jm
->parentmgr
);
2880 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2881 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
2882 setenv(ei
->key
, ei
->value
, 1);
2888 job_find_and_blame_pids_with_weird_uids(job_t j
)
2890 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_ALL
};
2891 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
2892 struct kinfo_proc
*kp
;
2893 uid_t u
= j
->mach_uid
;
2895 #if TARGET_OS_EMBEDDED
2896 if (!do_apple_internal_magic
) {
2902 if (!job_assumes(j
, kp
!= NULL
)) {
2905 if (!job_assumes(j
, sysctl(mib
, 3, kp
, &len
, NULL
, 0) != -1)) {
2909 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
2911 for (i
= 0; i
< kp_cnt
; i
++) {
2912 uid_t i_euid
= kp
[i
].kp_eproc
.e_ucred
.cr_uid
;
2913 uid_t i_uid
= kp
[i
].kp_eproc
.e_pcred
.p_ruid
;
2914 uid_t i_svuid
= kp
[i
].kp_eproc
.e_pcred
.p_svuid
;
2915 pid_t i_pid
= kp
[i
].kp_proc
.p_pid
;
2917 if (i_euid
!= u
&& i_uid
!= u
&& i_svuid
!= u
) {
2921 job_log(j
, LOG_ERR
, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
2922 i_pid
, kp
[i
].kp_proc
.p_comm
, i_uid
, i_euid
, i_svuid
);
2924 /* Temporarily disabled due to 5423935 and 4946119. */
2926 /* Ask the accountless process to exit. */
2927 job_assumes(j
, runtime_kill(i_pid
, SIGTERM
) != -1);
2935 #if !TARGET_OS_EMBEDDED
2937 job_enable_audit_for_user(job_t j
, uid_t u
, char *name
)
2939 auditinfo_t auinfo
= {
2945 if (!job_assumes(j
, auditon(A_GETCOND
, &au_cond
, sizeof(long)) == 0)) {
2946 _exit(EXIT_FAILURE
);
2949 if (au_cond
!= AUC_NOAUDIT
) {
2950 if (!job_assumes(j
, au_user_mask(name
, &auinfo
.ai_mask
) == 0)) {
2951 _exit(EXIT_FAILURE
);
2952 } else if (!job_assumes(j
, setaudit(&auinfo
) == 0)) {
2953 _exit(EXIT_FAILURE
);
2960 job_postfork_become_user(job_t j
)
2962 char loginname
[2000];
2963 char tmpdirpath
[PATH_MAX
];
2964 char shellpath
[PATH_MAX
];
2965 char homedir
[PATH_MAX
];
2968 gid_t desired_gid
= -1;
2969 uid_t desired_uid
= -1;
2971 if (getuid() != 0) {
2976 * I contend that having UID == 0 and GID != 0 is of dubious value.
2977 * Nevertheless, this used to work in Tiger. See: 5425348
2979 if (j
->groupname
&& !j
->username
) {
2980 j
->username
= "root";
2984 if ((pwe
= getpwnam(j
->username
)) == NULL
) {
2985 job_log(j
, LOG_ERR
, "getpwnam(\"%s\") failed", j
->username
);
2986 _exit(EXIT_FAILURE
);
2988 } else if (j
->mach_uid
) {
2989 if ((pwe
= getpwuid(j
->mach_uid
)) == NULL
) {
2990 job_log(j
, LOG_ERR
, "getpwuid(\"%u\") failed", j
->mach_uid
);
2991 job_find_and_blame_pids_with_weird_uids(j
);
2992 _exit(EXIT_FAILURE
);
2999 * We must copy the results of getpw*().
3001 * Why? Because subsequent API calls may call getpw*() as a part of
3002 * their implementation. Since getpw*() returns a [now thread scoped]
3003 * global, we must therefore cache the results before continuing.
3006 desired_uid
= pwe
->pw_uid
;
3007 desired_gid
= pwe
->pw_gid
;
3009 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
3010 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
3011 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
3013 if (pwe
->pw_expire
&& time(NULL
) >= pwe
->pw_expire
) {
3014 job_log(j
, LOG_ERR
, "Expired account");
3015 _exit(EXIT_FAILURE
);
3019 if (j
->username
&& strcmp(j
->username
, loginname
) != 0) {
3020 job_log(j
, LOG_WARNING
, "Suspicious setup: User \"%s\" maps to user: %s", j
->username
, loginname
);
3021 } else if (j
->mach_uid
&& (j
->mach_uid
!= desired_uid
)) {
3022 job_log(j
, LOG_WARNING
, "Suspicious setup: UID %u maps to UID %u", j
->mach_uid
, desired_uid
);
3028 if ((gre
= getgrnam(j
->groupname
)) == NULL
) {
3029 job_log(j
, LOG_ERR
, "getgrnam(\"%s\") failed", j
->groupname
);
3030 _exit(EXIT_FAILURE
);
3033 desired_gid
= gre
->gr_gid
;
3036 #if !TARGET_OS_EMBEDDED
3037 job_enable_audit_for_user(j
, desired_uid
, loginname
);
3040 if (!job_assumes(j
, setlogin(loginname
) != -1)) {
3041 _exit(EXIT_FAILURE
);
3044 if (!job_assumes(j
, setgid(desired_gid
) != -1)) {
3045 _exit(EXIT_FAILURE
);
3049 * The kernel team and the DirectoryServices team want initgroups()
3050 * called after setgid(). See 4616864 for more information.
3053 if (!j
->no_init_groups
) {
3054 if (!job_assumes(j
, initgroups(loginname
, desired_gid
) != -1)) {
3055 _exit(EXIT_FAILURE
);
3059 if (!job_assumes(j
, setuid(desired_uid
) != -1)) {
3060 _exit(EXIT_FAILURE
);
3063 r
= confstr(_CS_DARWIN_USER_TEMP_DIR
, tmpdirpath
, sizeof(tmpdirpath
));
3065 if (r
> 0 && r
< sizeof(tmpdirpath
)) {
3066 setenv("TMPDIR", tmpdirpath
, 0);
3069 setenv("SHELL", shellpath
, 0);
3070 setenv("HOME", homedir
, 0);
3071 setenv("USER", loginname
, 0);
3072 setenv("LOGNAME", loginname
, 0);
3076 job_setup_attributes(job_t j
)
3078 struct limititem
*li
;
3082 job_assumes(j
, setpriority(PRIO_PROCESS
, 0, j
->nice
) != -1);
3085 SLIST_FOREACH(li
, &j
->limits
, sle
) {
3088 if (!job_assumes(j
, getrlimit(li
->which
, &rl
) != -1)) {
3093 rl
.rlim_max
= li
->lim
.rlim_max
;
3096 rl
.rlim_cur
= li
->lim
.rlim_cur
;
3099 if (setrlimit(li
->which
, &rl
) == -1) {
3100 job_log_error(j
, LOG_WARNING
, "setrlimit()");
3104 if (!j
->inetcompat
&& j
->session_create
) {
3105 launchd_SessionCreate();
3108 if (j
->low_pri_io
) {
3109 job_assumes(j
, setiopolicy_np(IOPOL_TYPE_DISK
, IOPOL_SCOPE_PROCESS
, IOPOL_THROTTLE
) != -1);
3112 job_assumes(j
, chroot(j
->rootdir
) != -1);
3113 job_assumes(j
, chdir(".") != -1);
3116 job_postfork_become_user(j
);
3118 if (j
->workingdir
) {
3119 job_assumes(j
, chdir(j
->workingdir
) != -1);
3126 job_setup_fd(j
, STDOUT_FILENO
, j
->stdoutpath
, O_WRONLY
|O_APPEND
|O_CREAT
);
3127 job_setup_fd(j
, STDERR_FILENO
, j
->stderrpath
, O_WRONLY
|O_APPEND
|O_CREAT
);
3129 jobmgr_setup_env_from_other_jobs(j
->mgr
);
3131 SLIST_FOREACH(ei
, &j
->env
, sle
) {
3132 setenv(ei
->key
, ei
->value
, 1);
3136 * We'd like to call setsid() unconditionally, but we have reason to
3137 * believe that prevents launchd from being able to send signals to
3138 * setuid children. We'll settle for process-groups.
3140 if (getppid() != 1) {
3141 job_assumes(j
, setpgid(0, 0) != -1);
3143 job_assumes(j
, setsid() != -1);
3148 job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
)
3156 if ((fd
= open(path
, flags
|O_NOCTTY
, DEFFILEMODE
)) == -1) {
3157 job_log_error(j
, LOG_WARNING
, "open(\"%s\", ...)", path
);
3161 job_assumes(j
, dup2(fd
, target_fd
) != -1);
3162 job_assumes(j
, runtime_close(fd
) == 0);
3166 dir_has_files(job_t j
, const char *path
)
3168 DIR *dd
= opendir(path
);
3176 while ((de
= readdir(dd
))) {
3177 if (strcmp(de
->d_name
, ".") && strcmp(de
->d_name
, "..")) {
3183 job_assumes(j
, closedir(dd
) == 0);
3188 calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
)
3190 struct calendarinterval
*ci_iter
, *ci_prev
= NULL
;
3191 time_t later
, head_later
;
3193 later
= cronemu(ci
->when
.tm_mon
, ci
->when
.tm_mday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
3195 if (ci
->when
.tm_wday
!= -1) {
3196 time_t otherlater
= cronemu_wday(ci
->when
.tm_wday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
3198 if (ci
->when
.tm_mday
== -1) {
3201 later
= later
< otherlater
? later
: otherlater
;
3205 ci
->when_next
= later
;
3207 LIST_FOREACH(ci_iter
, &sorted_calendar_events
, global_sle
) {
3208 if (ci
->when_next
< ci_iter
->when_next
) {
3209 LIST_INSERT_BEFORE(ci_iter
, ci
, global_sle
);
3216 if (ci_iter
== NULL
) {
3217 /* ci must want to fire after every other timer, or there are no timers */
3219 if (LIST_EMPTY(&sorted_calendar_events
)) {
3220 LIST_INSERT_HEAD(&sorted_calendar_events
, ci
, global_sle
);
3222 LIST_INSERT_AFTER(ci_prev
, ci
, global_sle
);
3226 head_later
= LIST_FIRST(&sorted_calendar_events
)->when_next
;
3228 /* Workaround 5225889 */
3229 kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_DELETE
, 0, 0, root_jobmgr
);
3231 if (job_assumes(j
, kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_ADD
, NOTE_ABSOLUTE
|NOTE_SECONDS
, head_later
, root_jobmgr
) != -1)) {
3232 char time_string
[100];
3233 size_t time_string_len
;
3235 ctime_r(&later
, time_string
);
3236 time_string_len
= strlen(time_string
);
3238 if (time_string_len
&& time_string
[time_string_len
- 1] == '\n') {
3239 time_string
[time_string_len
- 1] = '\0';
3242 job_log(j
, LOG_INFO
, "Scheduled to run again at %s", time_string
);
3247 extract_rcsid_substr(const char *i
, char *o
, size_t osz
)
3249 char *rcs_rev_tmp
= strchr(i
, ' ');
3254 strlcpy(o
, rcs_rev_tmp
+ 1, osz
);
3255 rcs_rev_tmp
= strchr(o
, ' ');
3257 *rcs_rev_tmp
= '\0';
3263 jobmgr_log_bug(jobmgr_t jm
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
)
3265 int saved_errno
= errno
;
3266 const char *file
= strrchr(path
, '/');
3269 extract_rcsid_substr(rcs_rev
, buf
, sizeof(buf
));
3277 jobmgr_log(jm
, LOG_NOTICE
, "Bug: %s:%u (%s):%u: %s", file
, line
, buf
, saved_errno
, test
);
3281 job_log_bug(job_t j
, const char *rcs_rev
, const char *path
, unsigned int line
, const char *test
)
3283 int saved_errno
= errno
;
3284 const char *file
= strrchr(path
, '/');
3287 extract_rcsid_substr(rcs_rev
, buf
, sizeof(buf
));
3295 job_log(j
, LOG_NOTICE
, "Bug: %s:%u (%s):%u: %s", file
, line
, buf
, saved_errno
, test
);
3299 job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
)
3301 struct runtime_syslog_attr attr
= { "com.apple.launchd", j
->label
, j
->mgr
->name
, pri
, getuid(), getpid(), j
->p
};
3307 * Hack: If bootstrap_port is set, we must be on the child side of a
3308 * fork(), but before the exec*(). Let's route the log message back to
3311 if (bootstrap_port
) {
3312 return _vproc_logv(pri
, err
, msg
, ap
);
3315 newmsgsz
= strlen(msg
) + 200;
3316 newmsg
= alloca(newmsgsz
);
3319 snprintf(newmsg
, newmsgsz
, "%s: %s", msg
, strerror(err
));
3321 snprintf(newmsg
, newmsgsz
, "%s", msg
);
3325 oldmask
= setlogmask(LOG_UPTO(LOG_DEBUG
));
3328 runtime_vsyslog(&attr
, newmsg
, ap
);
3331 setlogmask(oldmask
);
3336 job_log_error(job_t j
, int pri
, const char *msg
, ...)
3341 job_logv(j
, pri
, errno
, msg
, ap
);
3346 job_log(job_t j
, int pri
, const char *msg
, ...)
3351 job_logv(j
, pri
, 0, msg
, ap
);
3357 jobmgr_log_error(jobmgr_t jm
, int pri
, const char *msg
, ...)
3362 jobmgr_logv(jm
, pri
, errno
, msg
, ap
);
3368 jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...)
3373 jobmgr_logv(jm
, pri
, 0, msg
, ap
);
3378 jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
)
3382 size_t i
, o
, jmname_len
= strlen(jm
->name
), newmsgsz
;
3384 newname
= alloca((jmname_len
+ 1) * 2);
3385 newmsgsz
= (jmname_len
+ 1) * 2 + strlen(msg
) + 100;
3386 newmsg
= alloca(newmsgsz
);
3388 for (i
= 0, o
= 0; i
< jmname_len
; i
++, o
++) {
3389 if (jm
->name
[i
] == '%') {
3393 newname
[o
] = jm
->name
[i
];
3398 snprintf(newmsg
, newmsgsz
, "%s: %s: %s", newname
, msg
, strerror(err
));
3400 snprintf(newmsg
, newmsgsz
, "%s: %s", newname
, msg
);
3403 if (jm
->parentmgr
) {
3404 jobmgr_logv(jm
->parentmgr
, pri
, 0, newmsg
, ap
);
3406 struct runtime_syslog_attr attr
= { "com.apple.launchd", "com.apple.launchd", jm
->name
, pri
, getuid(), getpid(), getpid() };
3408 runtime_vsyslog(&attr
, newmsg
, ap
);
3413 semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
)
3416 job_log(j
, LOG_DEBUG
, "Ignoring Vnode: %d", si
->fd
);
3417 job_assumes(j
, kevent_mod(si
->fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
) != -1);
3422 semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
)
3424 char *parentdir
, tmp_path
[PATH_MAX
];
3425 const char *which_path
= si
->what
;
3426 int saved_errno
= 0;
3431 fflags
= NOTE_DELETE
|NOTE_RENAME
|NOTE_REVOKE
|NOTE_EXTEND
|NOTE_WRITE
;
3434 fflags
= NOTE_DELETE
|NOTE_RENAME
;
3438 fflags
= NOTE_DELETE
|NOTE_RENAME
|NOTE_REVOKE
|NOTE_EXTEND
|NOTE_WRITE
|NOTE_ATTRIB
|NOTE_LINK
;
3444 /* dirname() may modify tmp_path */
3445 strlcpy(tmp_path
, si
->what
, sizeof(tmp_path
));
3447 if (!job_assumes(j
, (parentdir
= dirname(tmp_path
)))) {
3451 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
3454 if ((si
->fd
= _fd(open(which_path
, O_EVTONLY
|O_NOCTTY
))) == -1) {
3455 which_path
= parentdir
;
3456 si
->fd
= _fd(open(which_path
, O_EVTONLY
|O_NOCTTY
));
3461 return job_log_error(j
, LOG_ERR
, "Path monitoring failed on \"%s\"", which_path
);
3464 job_log(j
, LOG_DEBUG
, "Watching Vnode: %d", si
->fd
);
3466 if (kevent_mod(si
->fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, j
) == -1) {
3467 saved_errno
= errno
;
3469 * The FD can be revoked between the open() and kevent().
3470 * This is similar to the inability for kevents to be
3471 * attached to short lived zombie processes after fork()
3472 * but before kevent().
3474 job_assumes(j
, runtime_close(si
->fd
) == 0);
3477 } while ((si
->fd
== -1) && (saved_errno
== ENOENT
));
3479 if (saved_errno
== ENOTSUP
) {
3481 * 3524219 NFS needs kqueue support
3482 * 4124079 VFS needs generic kqueue support
3483 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
3485 job_log(j
, LOG_DEBUG
, "Falling back to polling for path: %s", si
->what
);
3487 if (!j
->poll_for_vfs_changes
) {
3488 j
->poll_for_vfs_changes
= true;
3489 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 3, j
) != -1);
3495 semaphoreitem_callback(job_t j
, struct kevent
*kev
)
3497 char invalidation_reason
[100] = "";
3498 struct semaphoreitem
*si
;
3500 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
3511 if (si
->fd
== (int)kev
->ident
) {
3516 if (!job_assumes(j
, si
!= NULL
)) {
3520 if (NOTE_DELETE
& kev
->fflags
) {
3521 strcat(invalidation_reason
, "deleted");
3524 if (NOTE_RENAME
& kev
->fflags
) {
3525 if (invalidation_reason
[0]) {
3526 strcat(invalidation_reason
, "/renamed");
3528 strcat(invalidation_reason
, "renamed");
3532 if (NOTE_REVOKE
& kev
->fflags
) {
3533 if (invalidation_reason
[0]) {
3534 strcat(invalidation_reason
, "/revoked");
3536 strcat(invalidation_reason
, "revoked");
3540 if (invalidation_reason
[0]) {
3541 job_log(j
, LOG_DEBUG
, "Path %s: %s", invalidation_reason
, si
->what
);
3542 job_assumes(j
, runtime_close(si
->fd
) == 0);
3543 si
->fd
= -1; /* this will get fixed in semaphoreitem_watch() */
3546 job_log(j
, LOG_DEBUG
, "Watch path modified: %s", si
->what
);
3548 if (si
->why
== PATH_CHANGES
) {
3549 j
->start_pending
= true;
3552 job_dispatch(j
, false);
3556 calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
)
3558 struct tm
*tmptm
= context
;
3561 if (LAUNCH_DATA_INTEGER
!= launch_data_get_type(obj
)) {
3562 /* hack to let caller know something went wrong */
3567 val
= launch_data_get_integer(obj
);
3569 if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MINUTE
) == 0) {
3570 tmptm
->tm_min
= val
;
3571 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_HOUR
) == 0) {
3572 tmptm
->tm_hour
= val
;
3573 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_DAY
) == 0) {
3574 tmptm
->tm_mday
= val
;
3575 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_WEEKDAY
) == 0) {
3576 tmptm
->tm_wday
= val
;
3577 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MONTH
) == 0) {
3578 tmptm
->tm_mon
= val
;
3579 tmptm
->tm_mon
-= 1; /* 4798263 cron compatibility */
3584 calendarinterval_new_from_obj(job_t j
, launch_data_t obj
)
3588 memset(&tmptm
, 0, sizeof(0));
3596 if (!job_assumes(j
, obj
!= NULL
)) {
3600 if (LAUNCH_DATA_DICTIONARY
!= launch_data_get_type(obj
)) {
3604 launch_data_dict_iterate(obj
, calendarinterval_new_from_obj_dict_walk
, &tmptm
);
3606 if (tmptm
.tm_sec
== -1) {
3610 return calendarinterval_new(j
, &tmptm
);
3614 calendarinterval_new(job_t j
, struct tm
*w
)
3616 struct calendarinterval
*ci
= calloc(1, sizeof(struct calendarinterval
));
3618 if (!job_assumes(j
, ci
!= NULL
)) {
3625 SLIST_INSERT_HEAD(&j
->cal_intervals
, ci
, sle
);
3627 calendarinterval_setalarm(j
, ci
);
3635 calendarinterval_delete(job_t j
, struct calendarinterval
*ci
)
3637 SLIST_REMOVE(&j
->cal_intervals
, ci
, calendarinterval
, sle
);
3638 LIST_REMOVE(ci
, global_sle
);
3646 calendarinterval_sanity_check(void)
3648 struct calendarinterval
*ci
= LIST_FIRST(&sorted_calendar_events
);
3649 time_t now
= time(NULL
);
3651 if (ci
&& (ci
->when_next
< now
)) {
3652 jobmgr_assumes(root_jobmgr
, raise(SIGUSR1
) != -1);
3657 calendarinterval_callback(void)
3659 struct calendarinterval
*ci
, *ci_next
;
3660 time_t now
= time(NULL
);
3662 LIST_FOREACH_SAFE(ci
, &sorted_calendar_events
, global_sle
, ci_next
) {
3665 if (ci
->when_next
> now
) {
3669 LIST_REMOVE(ci
, global_sle
);
3670 calendarinterval_setalarm(j
, ci
);
3672 j
->start_pending
= true;
3673 job_dispatch(j
, false);
3678 socketgroup_new(job_t j
, const char *name
, int *fds
, unsigned int fd_cnt
, bool junkfds
)
3680 struct socketgroup
*sg
= calloc(1, sizeof(struct socketgroup
) + strlen(name
) + 1);
3682 if (!job_assumes(j
, sg
!= NULL
)) {
3686 sg
->fds
= calloc(1, fd_cnt
* sizeof(int));
3687 sg
->fd_cnt
= fd_cnt
;
3688 sg
->junkfds
= junkfds
;
3690 if (!job_assumes(j
, sg
->fds
!= NULL
)) {
3695 memcpy(sg
->fds
, fds
, fd_cnt
* sizeof(int));
3696 strcpy(sg
->name_init
, name
);
3698 SLIST_INSERT_HEAD(&j
->sockets
, sg
, sle
);
3706 socketgroup_delete(job_t j
, struct socketgroup
*sg
)
3710 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
3712 struct sockaddr_storage ss
;
3713 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
3714 socklen_t ss_len
= sizeof(ss
);
3717 if (job_assumes(j
, getsockname(sg
->fds
[i
], (struct sockaddr
*)&ss
, &ss_len
) != -1)
3718 && job_assumes(j
, ss_len
> 0) && (ss
.ss_family
== AF_UNIX
)) {
3719 job_assumes(j
, unlink(sun
->sun_path
) != -1);
3720 /* We might conditionally need to delete a directory here */
3723 job_assumes(j
, runtime_close(sg
->fds
[i
]) != -1);
3726 SLIST_REMOVE(&j
->sockets
, sg
, socketgroup
, sle
);
3735 socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
)
3737 struct kevent kev
[sg
->fd_cnt
];
3739 unsigned int i
, buf_off
= 0;
3745 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
3746 EV_SET(&kev
[i
], sg
->fds
[i
], EVFILT_READ
, do_add
? EV_ADD
: EV_DELETE
, 0, 0, j
);
3747 buf_off
+= snprintf(buf
+ buf_off
, sizeof(buf
) - buf_off
, " %d", sg
->fds
[i
]);
3750 job_log(j
, LOG_DEBUG
, "%s Sockets:%s", do_add
? "Watching" : "Ignoring", buf
);
3752 job_assumes(j
, kevent_bulk_mod(kev
, sg
->fd_cnt
) != -1);
3754 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
3755 job_assumes(j
, kev
[i
].flags
& EV_ERROR
);
3756 errno
= kev
[i
].data
;
3757 job_assumes(j
, kev
[i
].data
== 0);
3762 socketgroup_ignore(job_t j
, struct socketgroup
*sg
)
3764 socketgroup_kevent_mod(j
, sg
, false);
3768 socketgroup_watch(job_t j
, struct socketgroup
*sg
)
3770 socketgroup_kevent_mod(j
, sg
, true);
3774 socketgroup_callback(job_t j
)
3776 job_dispatch(j
, true);
3780 envitem_new(job_t j
, const char *k
, const char *v
, bool global
)
3782 struct envitem
*ei
= calloc(1, sizeof(struct envitem
) + strlen(k
) + 1 + strlen(v
) + 1);
3784 if (!job_assumes(j
, ei
!= NULL
)) {
3788 strcpy(ei
->key_init
, k
);
3789 ei
->value
= ei
->key_init
+ strlen(k
) + 1;
3790 strcpy(ei
->value
, v
);
3793 SLIST_INSERT_HEAD(&j
->global_env
, ei
, sle
);
3795 SLIST_INSERT_HEAD(&j
->env
, ei
, sle
);
3798 job_log(j
, LOG_DEBUG
, "Added environmental variable: %s=%s", k
, v
);
3804 envitem_delete(job_t j
, struct envitem
*ei
, bool global
)
3807 SLIST_REMOVE(&j
->global_env
, ei
, envitem
, sle
);
3809 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
3816 envitem_setup(launch_data_t obj
, const char *key
, void *context
)
3820 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
3824 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
);
3828 limititem_update(job_t j
, int w
, rlim_t r
)
3830 struct limititem
*li
;
3832 SLIST_FOREACH(li
, &j
->limits
, sle
) {
3833 if (li
->which
== w
) {
3839 li
= calloc(1, sizeof(struct limititem
));
3841 if (!job_assumes(j
, li
!= NULL
)) {
3845 SLIST_INSERT_HEAD(&j
->limits
, li
, sle
);
3850 if (j
->importing_hard_limits
) {
3851 li
->lim
.rlim_max
= r
;
3854 li
->lim
.rlim_cur
= r
;
3862 limititem_delete(job_t j
, struct limititem
*li
)
3864 SLIST_REMOVE(&j
->limits
, li
, limititem
, sle
);
3871 seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
)
3875 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
3876 job_log(j
, LOG_WARNING
, "Sandbox flag value must be boolean: %s", key
);
3880 if (launch_data_get_bool(obj
) == false) {
3884 if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOX_NAMED
) == 0) {
3885 j
->seatbelt_flags
|= SANDBOX_NAMED
;
3891 limititem_setup(launch_data_t obj
, const char *key
, void *context
)
3894 int i
, limits_cnt
= (sizeof(launchd_keys2limits
) / sizeof(launchd_keys2limits
[0]));
3897 if (launch_data_get_type(obj
) != LAUNCH_DATA_INTEGER
) {
3901 rl
= launch_data_get_integer(obj
);
3903 for (i
= 0; i
< limits_cnt
; i
++) {
3904 if (strcasecmp(launchd_keys2limits
[i
].key
, key
) == 0) {
3909 if (i
== limits_cnt
) {
3913 limititem_update(j
, launchd_keys2limits
[i
].val
, rl
);
3917 job_useless(job_t j
)
3919 /* Yes, j->unload_at_exit and j->only_once seem the same, but they'll differ someday... */
3921 if ((j
->unload_at_exit
|| j
->only_once
) && j
->start_time
!= 0) {
3922 if (j
->unload_at_exit
&& j
->j_port
) {
3925 job_log(j
, LOG_INFO
, "Exited. Was only configured to run once.");
3927 } else if (j
->removal_pending
) {
3928 job_log(j
, LOG_DEBUG
, "Exited while removal was pending.");
3930 } else if (j
->mgr
->shutting_down
&& (j
->hopefully_exits_first
|| j
->mgr
->hopefully_first_cnt
== 0)) {
3931 job_log(j
, LOG_DEBUG
, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children
, total_anon_children
);
3933 } else if (j
->legacy_mach_job
) {
3934 if (SLIST_EMPTY(&j
->machservices
)) {
3935 job_log(j
, LOG_INFO
, "Garbage collecting");
3937 } else if (!j
->checkedin
) {
3938 job_log(j
, LOG_WARNING
, "Failed to check-in!");
3947 job_keepalive(job_t j
)
3949 mach_msg_type_number_t statusCnt
;
3950 mach_port_status_t status
;
3951 struct semaphoreitem
*si
;
3952 struct machservice
*ms
;
3954 bool good_exit
= (WIFEXITED(j
->last_exit_status
) && WEXITSTATUS(j
->last_exit_status
) == 0);
3956 if (j
->mgr
->shutting_down
) {
3963 * We definitely need to revisit this after Leopard ships. Please see
3964 * launchctl.c for the other half of this hack.
3966 if (j
->mgr
->global_on_demand_cnt
> 0 && strcmp(j
->label
, "com.apple.kextd") != 0) {
3970 if (j
->start_pending
) {
3971 job_log(j
, LOG_DEBUG
, "KeepAlive check: Pent-up non-IPC launch criteria.");
3976 job_log(j
, LOG_DEBUG
, "KeepAlive check: job configured to run continuously.");
3980 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
3981 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
3982 if (mach_port_get_attributes(mach_task_self(), ms
->port
, MACH_PORT_RECEIVE_STATUS
,
3983 (mach_port_info_t
)&status
, &statusCnt
) != KERN_SUCCESS
) {
3986 if (status
.mps_msgcount
) {
3987 job_log(j
, LOG_DEBUG
, "KeepAlive check: job restarted due to %d queued Mach messages on service: %s",
3988 status
.mps_msgcount
, ms
->name
);
3994 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
3995 bool wanted_state
= false;
4001 wanted_state
= true;
4003 if (network_up
== wanted_state
) {
4004 job_log(j
, LOG_DEBUG
, "KeepAlive: The network is %s.", wanted_state
? "up" : "down");
4008 case SUCCESSFUL_EXIT
:
4009 wanted_state
= true;
4011 if (good_exit
== wanted_state
) {
4012 job_log(j
, LOG_DEBUG
, "KeepAlive: The exit state was %s.", wanted_state
? "successful" : "failure");
4016 case OTHER_JOB_ENABLED
:
4017 wanted_state
= true;
4018 case OTHER_JOB_DISABLED
:
4019 if ((bool)job_find(si
->what
) == wanted_state
) {
4020 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "enabled" : "disabled", si
->what
);
4024 case OTHER_JOB_ACTIVE
:
4025 wanted_state
= true;
4026 case OTHER_JOB_INACTIVE
:
4027 if ((other_j
= job_find(si
->what
))) {
4028 if ((bool)other_j
->p
== wanted_state
) {
4029 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "active" : "inactive", si
->what
);
4035 wanted_state
= true;
4037 if ((bool)(stat(si
->what
, &sb
) == 0) == wanted_state
) {
4039 job_assumes(j
, runtime_close(si
->fd
) == 0);
4042 job_log(j
, LOG_DEBUG
, "KeepAlive: The following path %s: %s", wanted_state
? "exists" : "is missing", si
->what
);
4049 if (-1 == (qdir_file_cnt
= dir_has_files(j
, si
->what
))) {
4050 job_log_error(j
, LOG_ERR
, "Failed to count the number of files in \"%s\"", si
->what
);
4051 } else if (qdir_file_cnt
> 0) {
4052 job_log(j
, LOG_DEBUG
, "KeepAlive: Directory is not empty: %s", si
->what
);
4067 } else if (j
->argv
) {
4077 struct machservice
*ms
;
4080 return "PID is still valid";
4083 if (j
->mgr
->shutting_down
&& j
->log_redirect_fd
) {
4084 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
4085 j
->log_redirect_fd
= 0;
4088 if (j
->log_redirect_fd
) {
4089 if (job_assumes(j
, j
->wait4pipe_eof
)) {
4090 return "Standard out/error is still valid";
4092 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
4093 j
->log_redirect_fd
= 0;
4097 if (j
->priv_port_has_senders
) {
4098 return "Privileged Port still has outstanding senders";
4101 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
4102 if (ms
->recv
&& ms
->isActive
) {
4103 return "Mach service is still active";
4111 machservice_watch(job_t j
, struct machservice
*ms
)
4114 job_assumes(j
, runtime_add_mport(ms
->port
, NULL
, 0) == KERN_SUCCESS
);
4119 machservice_ignore(job_t j
, struct machservice
*ms
)
4121 job_assumes(j
, runtime_remove_mport(ms
->port
) == KERN_SUCCESS
);
4125 machservice_resetport(job_t j
, struct machservice
*ms
)
4127 LIST_REMOVE(ms
, port_hash_sle
);
4128 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
4129 job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
4131 job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
);
4132 job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
);
4133 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
4136 struct machservice
*
4137 machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
)
4139 struct machservice
*ms
;
4141 if ((ms
= calloc(1, sizeof(struct machservice
) + strlen(name
) + 1)) == NULL
) {
4145 strcpy((char *)ms
->name
, name
);
4147 ms
->per_pid
= pid_local
;
4149 if (*serviceport
== MACH_PORT_NULL
) {
4150 if (!job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
)) {
4154 if (!job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
)) {
4157 *serviceport
= ms
->port
;
4160 ms
->port
= *serviceport
;
4161 ms
->isActive
= true;
4164 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
4165 LIST_INSERT_HEAD(&j
->mgr
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
4166 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
4168 job_log(j
, LOG_INFO
, "Mach service added: %s", name
);
4172 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
4179 machservice_status(struct machservice
*ms
)
4182 return BOOTSTRAP_STATUS_ACTIVE
;
4183 } else if (ms
->job
->ondemand
) {
4184 return BOOTSTRAP_STATUS_ON_DEMAND
;
4186 return BOOTSTRAP_STATUS_INACTIVE
;
4191 job_setup_exception_port(job_t j
, task_t target_task
)
4193 struct machservice
*ms
;
4194 thread_state_flavor_t f
= 0;
4195 mach_port_t exc_port
= the_exception_server
;
4197 if (j
->alt_exc_handler
) {
4198 ms
= jobmgr_lookup_service(j
->mgr
, j
->alt_exc_handler
, true, 0);
4200 exc_port
= machservice_port(ms
);
4202 job_log(j
, LOG_WARNING
, "Falling back to default Mach exception handler. Could not find: %s", j
->alt_exc_handler
);
4204 } else if (j
->internal_exc_handler
) {
4205 exc_port
= runtime_get_kernel_port();
4206 } else if (!exc_port
) {
4210 #if defined (__ppc__) || defined (__ppc64__)
4211 f
= PPC_THREAD_STATE64
;
4212 #elif defined(__i386__) || defined(__x86_64__)
4213 f
= x86_THREAD_STATE
;
4214 #elif defined(__arm__)
4215 f
= ARM_THREAD_STATE
;
4217 #error "unknown architecture"
4221 job_assumes(j
, task_set_exception_ports(target_task
, EXC_MASK_CRASH
, exc_port
,
4222 EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
4223 } else if (getpid() == 1 && the_exception_server
) {
4224 mach_port_t mhp
= mach_host_self();
4225 job_assumes(j
, host_set_exception_ports(mhp
, EXC_MASK_CRASH
, the_exception_server
,
4226 EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
4227 job_assumes(j
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
4233 job_set_exeception_port(job_t j
, mach_port_t port
)
4235 if (!the_exception_server
) {
4236 the_exception_server
= port
;
4237 job_setup_exception_port(j
, 0);
4239 job_log(j
, LOG_WARNING
, "The exception server is already claimed!");
4244 machservice_setup_options(launch_data_t obj
, const char *key
, void *context
)
4246 struct machservice
*ms
= context
;
4247 mach_port_t mhp
= mach_host_self();
4251 if (!job_assumes(ms
->job
, mhp
!= MACH_PORT_NULL
)) {
4255 switch (launch_data_get_type(obj
)) {
4256 case LAUNCH_DATA_INTEGER
:
4257 which_port
= launch_data_get_integer(obj
);
4258 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT
) == 0) {
4259 switch (which_port
) {
4260 case TASK_KERNEL_PORT
:
4261 case TASK_HOST_PORT
:
4262 case TASK_NAME_PORT
:
4263 case TASK_BOOTSTRAP_PORT
:
4264 /* I find it a little odd that zero isn't reserved in the header */
4266 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved task special port: %d", which_port
);
4269 ms
->special_port_num
= which_port
;
4270 SLIST_INSERT_HEAD(&special_ports
, ms
, special_port_sle
);
4273 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT
) == 0 && getpid() == 1) {
4274 if (which_port
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
4275 job_assumes(ms
->job
, (errno
= host_set_special_port(mhp
, which_port
, ms
->port
)) == KERN_SUCCESS
);
4277 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved host special port: %d", which_port
);
4280 case LAUNCH_DATA_BOOL
:
4281 b
= launch_data_get_bool(obj
);
4282 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE
) == 0) {
4283 ms
->debug_on_close
= b
;
4284 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_RESETATCLOSE
) == 0) {
4286 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN
) == 0) {
4288 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER
) == 0) {
4289 job_set_exeception_port(ms
->job
, ms
->port
);
4290 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_KUNCSERVER
) == 0) {
4292 job_assumes(ms
->job
, host_set_UNDServer(mhp
, ms
->port
) == KERN_SUCCESS
);
4295 case LAUNCH_DATA_DICTIONARY
:
4296 job_set_exeception_port(ms
->job
, ms
->port
);
4302 job_assumes(ms
->job
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
4306 machservice_setup(launch_data_t obj
, const char *key
, void *context
)
4309 struct machservice
*ms
;
4310 mach_port_t p
= MACH_PORT_NULL
;
4312 if ((ms
= jobmgr_lookup_service(j
->mgr
, key
, false, 0))) {
4313 job_log(j
, LOG_WARNING
, "Conflict with job: %s over Mach service: %s", ms
->job
->label
, key
);
4317 if ((ms
= machservice_new(j
, key
, &p
, false)) == NULL
) {
4318 job_log_error(j
, LOG_WARNING
, "Cannot add service: %s", key
);
4322 ms
->isActive
= false;
4324 if (launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
) {
4325 launch_data_dict_iterate(obj
, machservice_setup_options
, ms
);
4330 jobmgr_do_garbage_collection(jobmgr_t jm
)
4335 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
4336 jobmgr_do_garbage_collection(jmi
);
4339 if (!jm
->shutting_down
) {
4343 jobmgr_log(jm
, LOG_DEBUG
, "Garbage collecting.");
4346 * Normally, we wait for all resources of a job (Unix PIDs/FDs and Mach ports)
4347 * to reset before we conider the job truly dead and ready to be spawned again.
4349 * In order to work around 5487724 and 3456090, we're going to call reboot()
4350 * when the last PID dies and not wait for the associated resources to reset.
4352 if (getpid() == 1 && jm
->parentmgr
== NULL
&& total_children
== 0) {
4353 jobmgr_log(jm
, LOG_DEBUG
, "About to force a call to: reboot(%s)", reboot_flags_to_C_names(jm
->reboot_flags
));
4355 jobmgr_assumes(jm
, reboot(jm
->reboot_flags
) != -1);
4358 if (jm
->hopefully_first_cnt
) {
4362 if (jm
->parentmgr
&& jm
->parentmgr
->shutting_down
&& jm
->parentmgr
->hopefully_first_cnt
) {
4366 if (!jm
->sent_stop_to_normal_jobs
) {
4367 jobmgr_log(jm
, LOG_DEBUG
, "Asking \"normal\" jobs to exit.");
4369 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
4370 if (!job_active(ji
)) {
4372 } else if (!ji
->hopefully_exits_last
) {
4377 jm
->sent_stop_to_normal_jobs
= true;
4380 if (jm
->normal_active_cnt
) {
4384 if (!jm
->sent_stop_to_hopefully_last_jobs
) {
4385 jobmgr_log(jm
, LOG_DEBUG
, "Asking \"hopefully last\" jobs to exit.");
4387 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
4388 if (ji
->p
&& ji
->anonymous
) {
4390 } else if (ji
->p
&& job_assumes(ji
, ji
->hopefully_exits_last
)) {
4395 jm
->sent_stop_to_hopefully_last_jobs
= true;
4398 if (!SLIST_EMPTY(&jm
->submgrs
)) {
4402 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
4403 if (!ji
->anonymous
) {
4408 jobmgr_log_stray_children(jm
);
4414 jobmgr_log_stray_children(jobmgr_t jm
)
4416 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_ALL
};
4417 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
4418 struct kinfo_proc
*kp
;
4420 #if TARGET_OS_EMBEDDED
4421 if (!do_apple_internal_magic
) {
4425 if (jm
->parentmgr
|| getpid() != 1) {
4429 if (!jobmgr_assumes(jm
, (kp
= malloc(len
)) != NULL
)) {
4432 if (!jobmgr_assumes(jm
, sysctl(mib
, 3, kp
, &len
, NULL
, 0) != -1)) {
4436 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
4438 for (i
= 0; i
< kp_cnt
; i
++) {
4439 pid_t p_i
= kp
[i
].kp_proc
.p_pid
;
4440 pid_t pp_i
= kp
[i
].kp_eproc
.e_ppid
;
4441 pid_t pg_i
= kp
[i
].kp_eproc
.e_pgid
;
4442 const char *z
= (kp
[i
].kp_proc
.p_stat
== SZOMB
) ? "zombie " : "";
4443 const char *n
= kp
[i
].kp_proc
.p_comm
;
4445 if (p_i
== 0 || p_i
== 1) {
4449 jobmgr_log(jm
, LOG_WARNING
, "Stray %sprocess at shutdown: PID %u PPID %u PGID %u %s", z
, p_i
, pp_i
, pg_i
, n
);
4452 * The kernel team requested that launchd not do this for Leopard.
4453 * jobmgr_assumes(jm, runtime_kill(p_i, SIGKILL) != -1);
4462 jobmgr_parent(jobmgr_t jm
)
4464 return jm
->parentmgr
;
4468 job_uncork_fork(job_t j
)
4472 job_log(j
, LOG_DEBUG
, "Uncorking the fork().");
4473 /* this unblocks the child and avoids a race
4474 * between the above fork() and the kevent_mod() */
4475 job_assumes(j
, write(j
->forkfd
, &c
, sizeof(c
)) == sizeof(c
));
4476 job_assumes(j
, runtime_close(j
->forkfd
) != -1);
4481 jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
)
4483 mach_msg_size_t mxmsgsz
;
4484 job_t bootstrapper
= NULL
;
4487 launchd_assert(offsetof(struct jobmgr_s
, kqjobmgr_callback
) == 0);
4489 if (jm
&& requestorport
== MACH_PORT_NULL
) {
4490 jobmgr_log(jm
, LOG_ERR
, "Mach sub-bootstrap create request requires a requester port");
4494 jmr
= calloc(1, sizeof(struct jobmgr_s
) + (name
? (strlen(name
) + 1) : 128));
4500 jmr
->kqjobmgr_callback
= jobmgr_callback
;
4501 strcpy(jmr
->name_init
, name
? name
: "Under construction");
4503 jmr
->req_port
= requestorport
;
4505 if ((jmr
->parentmgr
= jm
)) {
4506 SLIST_INSERT_HEAD(&jm
->submgrs
, jmr
, sle
);
4509 if (jm
&& !jobmgr_assumes(jmr
, launchd_mport_notify_req(jmr
->req_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
)) {
4513 if (transfer_port
!= MACH_PORT_NULL
) {
4514 jobmgr_assumes(jmr
, jm
!= NULL
);
4515 jmr
->jm_port
= transfer_port
;
4516 } else if (!jm
&& getpid() != 1) {
4517 char *trusted_fd
= getenv(LAUNCHD_TRUSTED_FD_ENV
);
4520 snprintf(service_buf
, sizeof(service_buf
), "com.apple.launchd.peruser.%u", getuid());
4522 if (!jobmgr_assumes(jmr
, bootstrap_check_in(bootstrap_port
, service_buf
, &jmr
->jm_port
) == 0)) {
4527 int dfd
, lfd
= strtol(trusted_fd
, NULL
, 10);
4529 if ((dfd
= dup(lfd
)) >= 0) {
4530 jobmgr_assumes(jmr
, runtime_close(dfd
) != -1);
4531 jobmgr_assumes(jmr
, runtime_close(lfd
) != -1);
4534 unsetenv(LAUNCHD_TRUSTED_FD_ENV
);
4537 /* cut off the Libc cache, we don't want to deadlock against ourself */
4538 inherited_bootstrap_port
= bootstrap_port
;
4539 bootstrap_port
= MACH_PORT_NULL
;
4540 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
);
4542 /* We set this explicitly as we start each child */
4543 launchd_assert(launchd_set_bport(MACH_PORT_NULL
) == KERN_SUCCESS
);
4544 } else if (!jobmgr_assumes(jmr
, launchd_mport_create_recv(&jmr
->jm_port
) == KERN_SUCCESS
)) {
4549 sprintf(jmr
->name_init
, "%u", MACH_PORT_INDEX(jmr
->jm_port
));
4552 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
4553 mxmsgsz
= sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
4554 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
4555 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
4559 jobmgr_assumes(jmr
, kevent_mod(SIGTERM
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
4560 jobmgr_assumes(jmr
, kevent_mod(SIGUSR1
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
4561 jobmgr_assumes(jmr
, kevent_mod(0, EVFILT_FS
, EV_ADD
, VQ_MOUNT
|VQ_UNMOUNT
|VQ_UPDATE
, 0, jmr
) != -1);
4565 bootstrapper
= jobmgr_init_session(jmr
, name
, sflag
);
4568 if (!bootstrapper
|| !bootstrapper
->weird_bootstrap
) {
4569 if (!jobmgr_assumes(jmr
, runtime_add_mport(jmr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
4574 jobmgr_log(jmr
, LOG_DEBUG
, "Created job manager%s%s", jm
? " with parent: " : ".", jm
? jm
->name
: "");
4577 jobmgr_assumes(jmr
, job_dispatch(bootstrapper
, true) != NULL
);
4580 if (jmr
->parentmgr
) {
4594 jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
)
4596 const char *bootstrap_tool
[] = { "/bin/launchctl", "bootstrap", "-S", session_type
, sflag
? "-s" : NULL
, NULL
};
4597 char thelabel
[1000];
4600 snprintf(thelabel
, sizeof(thelabel
), "com.apple.launchctl.%s", session_type
);
4601 bootstrapper
= job_new(jm
, thelabel
, NULL
, bootstrap_tool
);
4602 if (jobmgr_assumes(jm
, bootstrapper
!= NULL
) && (jm
->parentmgr
|| getuid())) {
4605 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
4606 snprintf(buf
, sizeof(buf
), "0x%X:0:0", getuid());
4607 envitem_new(bootstrapper
, "__CF_USER_TEXT_ENCODING", buf
, false);
4608 bootstrapper
->weird_bootstrap
= true;
4609 jobmgr_assumes(jm
, job_setup_machport(bootstrapper
));
4612 jm
->session_initialized
= true;
4614 return bootstrapper
;
4618 jobmgr_delete_anything_with_port(jobmgr_t jm
, mach_port_t port
)
4620 struct machservice
*ms
, *next_ms
;
4623 /* Mach ports, unlike Unix descriptors, are reference counted. In other
4624 * words, when some program hands us a second or subsequent send right
4625 * to a port we already have open, the Mach kernel gives us the same
4626 * port number back and increments an reference count associated with
4627 * the port. This forces us, when discovering that a receive right at
4628 * the other end has been deleted, to wander all of our objects to see
4629 * what weird places clients might have handed us the same send right
4633 if (jm
== root_jobmgr
) {
4634 if (port
== inherited_bootstrap_port
) {
4635 launchd_assumes(launchd_mport_deallocate(port
) == KERN_SUCCESS
);
4636 inherited_bootstrap_port
= MACH_PORT_NULL
;
4638 return jobmgr_shutdown(jm
);
4641 LIST_FOREACH_SAFE(ms
, &port_hash
[HASH_PORT(port
)], port_hash_sle
, next_ms
) {
4642 if (ms
->port
== port
) {
4643 machservice_delete(ms
->job
, ms
, true);
4648 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
4649 jobmgr_delete_anything_with_port(jmi
, port
);
4652 if (jm
->req_port
== port
) {
4653 jobmgr_log(jm
, LOG_DEBUG
, "Request port died: 0x%x", port
);
4654 return jobmgr_shutdown(jm
);
4660 struct machservice
*
4661 jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
)
4663 struct machservice
*ms
;
4666 jobmgr_assumes(jm
, !check_parent
);
4669 LIST_FOREACH(ms
, &jm
->ms_hash
[hash_ms(name
)], name_hash_sle
) {
4670 if ((target_pid
&& ms
->per_pid
&& ms
->job
->p
== target_pid
) || (!target_pid
&& !ms
->per_pid
)) {
4671 if (strcmp(name
, ms
->name
) == 0) {
4677 if (jm
->parentmgr
== NULL
) {
4681 if (!check_parent
) {
4685 return jobmgr_lookup_service(jm
->parentmgr
, name
, true, 0);
4689 machservice_port(struct machservice
*ms
)
4695 machservice_job(struct machservice
*ms
)
4701 machservice_hidden(struct machservice
*ms
)
4707 machservice_active(struct machservice
*ms
)
4709 return ms
->isActive
;
4713 machservice_name(struct machservice
*ms
)
4719 machservice_delete(job_t j
, struct machservice
*ms
, bool port_died
)
4721 if (ms
->debug_on_close
) {
4722 job_log(j
, LOG_NOTICE
, "About to enter kernel debugger because of Mach port: 0x%x", ms
->port
);
4723 job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
4726 if (ms
->recv
&& job_assumes(j
, !ms
->isActive
)) {
4727 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
4730 job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
4732 if (ms
->port
== the_exception_server
) {
4733 the_exception_server
= 0;
4736 job_log(j
, LOG_INFO
, "Mach service deleted%s: %s", port_died
? " (port died)" : "", ms
->name
);
4738 if (ms
->special_port_num
) {
4739 SLIST_REMOVE(&special_ports
, ms
, machservice
, special_port_sle
);
4742 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
4743 LIST_REMOVE(ms
, name_hash_sle
);
4744 LIST_REMOVE(ms
, port_hash_sle
);
4750 machservice_request_notifications(struct machservice
*ms
)
4752 mach_msg_id_t which
= MACH_NOTIFY_DEAD_NAME
;
4754 ms
->isActive
= true;
4757 which
= MACH_NOTIFY_PORT_DESTROYED
;
4758 job_checkin(ms
->job
);
4761 job_assumes(ms
->job
, launchd_mport_notify_req(ms
->port
, which
) == KERN_SUCCESS
);
4764 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
4765 #define END_OF(x) (&(x)[NELEM(x)])
4768 mach_cmd2argv(const char *string
)
4770 char *argv
[100], args
[1000];
4772 char *argp
= args
, term
, **argv_ret
, *co
;
4773 unsigned int nargs
= 0, i
;
4775 for (cp
= string
; *cp
;) {
4776 while (isspace(*cp
))
4778 term
= (*cp
== '"') ? *cp
++ : '\0';
4779 if (nargs
< NELEM(argv
)) {
4780 argv
[nargs
++] = argp
;
4782 while (*cp
&& (term
? *cp
!= term
: !isspace(*cp
)) && argp
< END_OF(args
)) {
4799 argv_ret
= malloc((nargs
+ 1) * sizeof(char *) + strlen(string
) + 1);
4801 if (!launchd_assumes(argv_ret
!= NULL
)) {
4805 co
= (char *)argv_ret
+ (nargs
+ 1) * sizeof(char *);
4807 for (i
= 0; i
< nargs
; i
++) {
4808 strcpy(co
, argv
[i
]);
4810 co
+= strlen(argv
[i
]) + 1;
4818 job_checkin(job_t j
)
4820 j
->checkedin
= true;
4824 job_ack_port_destruction(mach_port_t p
)
4826 struct machservice
*ms
;
4828 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
4829 if (ms
->recv
&& (ms
->port
== p
)) {
4838 ms
->isActive
= false;
4841 machservice_resetport(ms
->job
, ms
);
4844 job_log(ms
->job
, LOG_DEBUG
, "Receive right returned to us: %s", ms
->name
);
4845 job_dispatch(ms
->job
, false);
4847 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
4853 job_ack_no_senders(job_t j
)
4855 j
->priv_port_has_senders
= false;
4857 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
4860 job_log(j
, LOG_DEBUG
, "No more senders on privileged Mach bootstrap port");
4862 job_dispatch(j
, false);
4868 if (job_assumes(j
, j
->mgr
!= NULL
)) {
4876 job_is_anonymous(job_t j
)
4878 return j
->anonymous
;
4882 job_force_sampletool(job_t j
)
4885 char logfile
[PATH_MAX
];
4887 char *sample_args
[] = { "sample", pidstr
, "1", "-mayDie", "-file", logfile
, NULL
};
4888 char *contents
= NULL
;
4889 int logfile_fd
= -1;
4890 int console_fd
= -1;
4894 if (!debug_shutdown_hangs
) {
4898 snprintf(pidstr
, sizeof(pidstr
), "%u", j
->p
);
4899 snprintf(logfile
, sizeof(logfile
), SHUTDOWN_LOG_DIR
"/%s-%u.sample.txt", j
->label
, j
->p
);
4901 if (!job_assumes(j
, unlink(logfile
) != -1 || errno
== ENOENT
)) {
4906 * This will stall launchd for as long as the 'sample' tool runs.
4908 * We didn't give the 'sample' tool a bootstrap port, so it therefore
4909 * can't deadlock against launchd.
4911 if (!job_assumes(j
, (errno
= posix_spawnp(&sp
, sample_args
[0], NULL
, NULL
, sample_args
, environ
)) == 0)) {
4915 job_log(j
, LOG_DEBUG
, "Waiting for 'sample' to finish.");
4917 if (!job_assumes(j
, waitpid(sp
, &wstatus
, 0) != -1)) {
4922 * This won't work if the VFS or filesystems are sick:
4926 if (!job_assumes(j
, WIFEXITED(wstatus
) && WEXITSTATUS(wstatus
) == 0)) {
4930 if (!job_assumes(j
, (logfile_fd
= open(logfile
, O_RDONLY
|O_NOCTTY
)) != -1)) {
4934 if (!job_assumes(j
, (console_fd
= open(_PATH_CONSOLE
, O_WRONLY
|O_APPEND
|O_NOCTTY
)) != -1)) {
4938 if (!job_assumes(j
, fstat(logfile_fd
, &sb
) != -1)) {
4942 contents
= malloc(sb
.st_size
);
4944 if (!job_assumes(j
, contents
!= NULL
)) {
4948 if (!job_assumes(j
, read(logfile_fd
, contents
, sb
.st_size
) == sb
.st_size
)) {
4952 job_assumes(j
, write(console_fd
, contents
, sb
.st_size
) == sb
.st_size
);
4959 if (logfile_fd
!= -1) {
4960 job_assumes(j
, runtime_fsync(logfile_fd
) != -1);
4961 job_assumes(j
, runtime_close(logfile_fd
) != -1);
4964 if (console_fd
!= -1) {
4965 job_assumes(j
, runtime_close(console_fd
) != -1);
4968 job_log(j
, LOG_DEBUG
, "Finished sampling.");
4972 semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
)
4974 struct semaphoreitem
*si
;
4975 size_t alloc_sz
= sizeof(struct semaphoreitem
);
4978 alloc_sz
+= strlen(what
) + 1;
4981 if (!job_assumes(j
, si
= calloc(1, alloc_sz
))) {
4989 strcpy(si
->what_init
, what
);
4992 SLIST_INSERT_HEAD(&j
->semaphores
, si
, sle
);
4994 semaphoreitem_runtime_mod_ref(si
, true);
5000 semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
)
5003 * External events need to be tracked.
5004 * Internal events do NOT need to be tracked.
5008 case SUCCESSFUL_EXIT
:
5010 case OTHER_JOB_ENABLED
:
5011 case OTHER_JOB_DISABLED
:
5012 case OTHER_JOB_ACTIVE
:
5013 case OTHER_JOB_INACTIVE
:
5027 semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
)
5029 semaphoreitem_runtime_mod_ref(si
, false);
5031 SLIST_REMOVE(&j
->semaphores
, si
, semaphoreitem
, sle
);
5034 job_assumes(j
, runtime_close(si
->fd
) != -1);
5041 semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
)
5043 struct semaphoreitem_dict_iter_context
*sdic
= context
;
5044 semaphore_reason_t why
;
5046 why
= launch_data_get_bool(obj
) ? sdic
->why_true
: sdic
->why_false
;
5048 semaphoreitem_new(sdic
->j
, why
, key
);
5052 semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
)
5054 struct semaphoreitem_dict_iter_context sdic
= { context
, 0, 0 };
5056 semaphore_reason_t why
;
5058 switch (launch_data_get_type(obj
)) {
5059 case LAUNCH_DATA_BOOL
:
5060 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE
) == 0) {
5061 why
= launch_data_get_bool(obj
) ? NETWORK_UP
: NETWORK_DOWN
;
5062 semaphoreitem_new(j
, why
, NULL
);
5063 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT
) == 0) {
5064 why
= launch_data_get_bool(obj
) ? SUCCESSFUL_EXIT
: FAILED_EXIT
;
5065 semaphoreitem_new(j
, why
, NULL
);
5066 j
->start_pending
= true;
5068 job_assumes(j
, false);
5071 case LAUNCH_DATA_DICTIONARY
:
5072 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE
) == 0) {
5073 sdic
.why_true
= PATH_EXISTS
;
5074 sdic
.why_false
= PATH_MISSING
;
5075 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE
) == 0) {
5076 sdic
.why_true
= OTHER_JOB_ACTIVE
;
5077 sdic
.why_false
= OTHER_JOB_INACTIVE
;
5078 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED
) == 0) {
5079 sdic
.why_true
= OTHER_JOB_ENABLED
;
5080 sdic
.why_false
= OTHER_JOB_DISABLED
;
5082 job_assumes(j
, false);
5086 launch_data_dict_iterate(obj
, semaphoreitem_setup_dict_iter
, &sdic
);
5089 job_assumes(j
, false);
5095 jobmgr_dispatch_all_semaphores(jobmgr_t jm
)
5101 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
5102 jobmgr_dispatch_all_semaphores(jmi
);
5105 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
5106 if (!SLIST_EMPTY(&ji
->semaphores
)) {
5107 job_dispatch(ji
, false);
5113 cronemu(int mon
, int mday
, int hour
, int min
)
5115 struct tm workingtm
;
5119 workingtm
= *localtime(&now
);
5121 workingtm
.tm_isdst
= -1;
5122 workingtm
.tm_sec
= 0;
5125 while (!cronemu_mon(&workingtm
, mon
, mday
, hour
, min
)) {
5126 workingtm
.tm_year
++;
5127 workingtm
.tm_mon
= 0;
5128 workingtm
.tm_mday
= 1;
5129 workingtm
.tm_hour
= 0;
5130 workingtm
.tm_min
= 0;
5134 return mktime(&workingtm
);
5138 cronemu_wday(int wday
, int hour
, int min
)
5140 struct tm workingtm
;
5144 workingtm
= *localtime(&now
);
5146 workingtm
.tm_isdst
= -1;
5147 workingtm
.tm_sec
= 0;
5154 while (!(workingtm
.tm_wday
== wday
&& cronemu_hour(&workingtm
, hour
, min
))) {
5155 workingtm
.tm_mday
++;
5156 workingtm
.tm_hour
= 0;
5157 workingtm
.tm_min
= 0;
5161 return mktime(&workingtm
);
5165 cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
)
5168 struct tm workingtm
= *wtm
;
5171 while (!cronemu_mday(&workingtm
, mday
, hour
, min
)) {
5173 workingtm
.tm_mday
= 1;
5174 workingtm
.tm_hour
= 0;
5175 workingtm
.tm_min
= 0;
5176 carrytest
= workingtm
.tm_mon
;
5178 if (carrytest
!= workingtm
.tm_mon
) {
5186 if (mon
< wtm
->tm_mon
) {
5190 if (mon
> wtm
->tm_mon
) {
5197 return cronemu_mday(wtm
, mday
, hour
, min
);
5201 cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
)
5204 struct tm workingtm
= *wtm
;
5207 while (!cronemu_hour(&workingtm
, hour
, min
)) {
5208 workingtm
.tm_mday
++;
5209 workingtm
.tm_hour
= 0;
5210 workingtm
.tm_min
= 0;
5211 carrytest
= workingtm
.tm_mday
;
5213 if (carrytest
!= workingtm
.tm_mday
) {
5221 if (mday
< wtm
->tm_mday
) {
5225 if (mday
> wtm
->tm_mday
) {
5226 wtm
->tm_mday
= mday
;
5231 return cronemu_hour(wtm
, hour
, min
);
5235 cronemu_hour(struct tm
*wtm
, int hour
, int min
)
5238 struct tm workingtm
= *wtm
;
5241 while (!cronemu_min(&workingtm
, min
)) {
5242 workingtm
.tm_hour
++;
5243 workingtm
.tm_min
= 0;
5244 carrytest
= workingtm
.tm_hour
;
5246 if (carrytest
!= workingtm
.tm_hour
) {
5254 if (hour
< wtm
->tm_hour
) {
5258 if (hour
> wtm
->tm_hour
) {
5259 wtm
->tm_hour
= hour
;
5263 return cronemu_min(wtm
, min
);
5267 cronemu_min(struct tm
*wtm
, int min
)
5273 if (min
< wtm
->tm_min
) {
5277 if (min
> wtm
->tm_min
) {
5285 job_mig_create_server(job_t j
, cmd_t server_cmd
, uid_t server_uid
, boolean_t on_demand
, mach_port_t
*server_portp
)
5290 #if TARGET_OS_EMBEDDED
5291 return BOOTSTRAP_NOT_PRIVILEGED
;
5294 if (!launchd_assumes(j
!= NULL
)) {
5295 return BOOTSTRAP_NO_MEMORY
;
5298 if (unlikely(j
->deny_job_creation
)) {
5299 return BOOTSTRAP_NOT_PRIVILEGED
;
5302 runtime_get_caller_creds(&ldc
);
5304 job_log(j
, LOG_DEBUG
, "Server create attempt: %s", server_cmd
);
5306 #define LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5307 /* XXX - This code should go away once the per session launchd is integrated with the rest of the system */
5308 #ifdef LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5309 if (getpid() == 1) {
5310 if (ldc
.euid
&& server_uid
&& (ldc
.euid
!= server_uid
)) {
5311 job_log(j
, LOG_WARNING
, "Server create: \"%s\": Will run as UID %d, not UID %d as they told us to",
5312 server_cmd
, ldc
.euid
, server_uid
);
5313 server_uid
= ldc
.euid
;
5318 if (server_uid
!= getuid()) {
5319 job_log(j
, LOG_WARNING
, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
5320 server_cmd
, getuid(), server_uid
);
5322 server_uid
= 0; /* zero means "do nothing" */
5325 js
= job_new_via_mach_init(j
, server_cmd
, server_uid
, on_demand
);
5328 return BOOTSTRAP_NO_MEMORY
;
5331 *server_portp
= js
->j_port
;
5332 return BOOTSTRAP_SUCCESS
;
5336 job_mig_send_signal(job_t j
, mach_port_t srp
, name_t targetlabel
, int sig
)
5341 if (!launchd_assumes(j
!= NULL
)) {
5342 return BOOTSTRAP_NO_MEMORY
;
5345 runtime_get_caller_creds(&ldc
);
5347 if (ldc
.euid
!= 0 && ldc
.euid
!= getuid()) {
5348 return BOOTSTRAP_NOT_PRIVILEGED
;
5351 if (!(otherj
= job_find(targetlabel
))) {
5352 return BOOTSTRAP_UNKNOWN_SERVICE
;
5355 if (sig
== VPROC_MAGIC_UNLOAD_SIGNAL
) {
5356 bool do_block
= otherj
->p
;
5358 if (otherj
->anonymous
) {
5359 return BOOTSTRAP_NOT_PRIVILEGED
;
5365 job_log(j
, LOG_DEBUG
, "Blocking MIG return of job_remove(): %s", otherj
->label
);
5366 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
5367 job_assumes(otherj
, waiting4removal_new(otherj
, srp
));
5368 return MIG_NO_REPLY
;
5372 } else if (otherj
->p
) {
5373 job_assumes(j
, runtime_kill(otherj
->p
, sig
) != -1);
5380 job_mig_log_forward(job_t j
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
5384 if (!launchd_assumes(j
!= NULL
)) {
5385 return BOOTSTRAP_NO_MEMORY
;
5388 if (!job_assumes(j
, j
->per_user
)) {
5389 return BOOTSTRAP_NOT_PRIVILEGED
;
5392 runtime_get_caller_creds(&ldc
);
5394 return runtime_log_forward(ldc
.euid
, ldc
.egid
, inval
, invalCnt
);
5398 job_mig_log_drain(job_t j
, mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
5402 if (!launchd_assumes(j
!= NULL
)) {
5403 return BOOTSTRAP_NO_MEMORY
;
5406 runtime_get_caller_creds(&ldc
);
5409 return BOOTSTRAP_NOT_PRIVILEGED
;
5412 return runtime_log_drain(srp
, outval
, outvalCnt
);
5416 job_mig_swap_complex(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
,
5417 vm_offset_t inval
, mach_msg_type_number_t invalCnt
,
5418 vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
5421 launch_data_t input_obj
, output_obj
;
5422 size_t data_offset
= 0;
5426 runtime_get_caller_creds(&ldc
);
5428 if (!launchd_assumes(j
!= NULL
)) {
5429 return BOOTSTRAP_NO_MEMORY
;
5432 if (inkey
&& ldc
.euid
&& ldc
.euid
!= getuid()) {
5433 return BOOTSTRAP_NOT_PRIVILEGED
;
5436 if (inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
)) {
5440 if (inkey
&& outkey
) {
5441 action
= "Swapping";
5448 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
5450 *outvalCnt
= 20 * 1024 * 1024;
5451 mig_allocate(outval
, *outvalCnt
);
5452 if (!job_assumes(j
, *outval
!= 0)) {
5456 if (invalCnt
&& !job_assumes(j
, (input_obj
= launch_data_unpack((void *)inval
, invalCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
5461 case VPROC_GSK_ENVIRONMENT
:
5462 if (!job_assumes(j
, (output_obj
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
5465 jobmgr_export_env_from_other_jobs(j
->mgr
, output_obj
);
5466 if (!job_assumes(j
, launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
) != 0)) {
5469 launch_data_free(output_obj
);
5471 case VPROC_GSK_ALLJOBS
:
5472 if (!job_assumes(j
, (output_obj
= job_export_all()) != NULL
)) {
5475 ipc_revoke_fds(output_obj
);
5476 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
5477 if (!job_assumes(j
, packed_size
!= 0)) {
5480 launch_data_free(output_obj
);
5483 mig_deallocate(*outval
, *outvalCnt
);
5491 if (invalCnt
) switch (inkey
) {
5492 case VPROC_GSK_ENVIRONMENT
:
5493 job_assumes(j
, false);
5501 mig_deallocate(inval
, invalCnt
);
5507 mig_deallocate(*outval
, *outvalCnt
);
5513 job_mig_swap_integer(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
, int64_t inval
, int64_t *outval
)
5516 kern_return_t kr
= 0;
5520 runtime_get_caller_creds(&ldc
);
5522 if (!launchd_assumes(j
!= NULL
)) {
5523 return BOOTSTRAP_NO_MEMORY
;
5526 if (inkey
&& ldc
.euid
&& ldc
.euid
!= getuid()) {
5527 return BOOTSTRAP_NOT_PRIVILEGED
;
5530 if (inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
)) {
5534 if (inkey
&& outkey
) {
5535 action
= "Swapping";
5542 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
5545 case VPROC_GSK_LAST_EXIT_STATUS
:
5546 *outval
= j
->last_exit_status
;
5548 case VPROC_GSK_MGR_UID
:
5551 case VPROC_GSK_MGR_PID
:
5554 case VPROC_GSK_IS_MANAGED
:
5555 *outval
= j
->anonymous
? 0 : 1;
5557 case VPROC_GSK_BASIC_KEEPALIVE
:
5558 *outval
= !j
->ondemand
;
5560 case VPROC_GSK_START_INTERVAL
:
5561 *outval
= j
->start_interval
;
5563 case VPROC_GSK_IDLE_TIMEOUT
:
5564 *outval
= j
->timeout
;
5566 case VPROC_GSK_EXIT_TIMEOUT
:
5567 *outval
= j
->exit_timeout
;
5569 case VPROC_GSK_GLOBAL_LOG_MASK
:
5570 oldmask
= runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
5572 runtime_setlogmask(oldmask
);
5574 case VPROC_GSK_GLOBAL_UMASK
:
5588 case VPROC_GSK_GLOBAL_ON_DEMAND
:
5589 kr
= job_set_global_on_demand(j
, (bool)inval
) ? 0 : 1;
5591 case VPROC_GSK_BASIC_KEEPALIVE
:
5592 j
->ondemand
= !inval
;
5594 case VPROC_GSK_START_INTERVAL
:
5595 if ((uint64_t)inval
> UINT32_MAX
) {
5598 if (j
->start_interval
== 0) {
5601 /* Workaround 5225889 */
5602 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
5604 j
->start_interval
= inval
;
5605 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
) != -1);
5606 } else if (j
->start_interval
) {
5607 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
5608 if (j
->start_interval
!= 0) {
5611 j
->start_interval
= 0;
5614 case VPROC_GSK_IDLE_TIMEOUT
:
5615 if ((unsigned int)inval
> 0) {
5619 case VPROC_GSK_EXIT_TIMEOUT
:
5620 if ((unsigned int)inval
> 0) {
5621 j
->exit_timeout
= inval
;
5624 case VPROC_GSK_GLOBAL_LOG_MASK
:
5625 runtime_setlogmask(inval
);
5627 case VPROC_GSK_GLOBAL_UMASK
:
5641 job_mig_post_fork_ping(job_t j
, task_t child_task
)
5643 struct machservice
*ms
;
5645 if (!launchd_assumes(j
!= NULL
)) {
5646 return BOOTSTRAP_NO_MEMORY
;
5649 job_log(j
, LOG_DEBUG
, "Post fork ping.");
5651 job_setup_exception_port(j
, child_task
);
5653 SLIST_FOREACH(ms
, &special_ports
, special_port_sle
) {
5654 if (j
->per_user
&& (ms
->special_port_num
!= TASK_ACCESS_PORT
)) {
5655 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
5659 errno
= task_set_special_port(child_task
, ms
->special_port_num
, ms
->port
);
5662 int desired_log_level
= LOG_ERR
;
5667 desired_log_level
= LOG_WARNING
;
5669 if (ms
->special_port_num
== TASK_SEATBELT_PORT
) {
5670 desired_log_level
= LOG_DEBUG
;
5674 job_log(j
, desired_log_level
, "Could not setup Mach task special port %u: %s", ms
->special_port_num
, mach_error_string(errno
));
5678 job_assumes(j
, launchd_mport_deallocate(child_task
) == KERN_SUCCESS
);
5684 job_mig_reboot2(job_t j
, uint64_t flags
)
5686 char who_started_the_reboot
[2048] = "";
5687 struct kinfo_proc kp
;
5691 if (!launchd_assumes(j
!= NULL
)) {
5692 return BOOTSTRAP_NO_MEMORY
;
5695 if (getpid() != 1) {
5696 return BOOTSTRAP_NOT_PRIVILEGED
;
5699 runtime_get_caller_creds(&ldc
);
5702 return BOOTSTRAP_NOT_PRIVILEGED
;
5705 for (pid_to_log
= ldc
.pid
; pid_to_log
; pid_to_log
= kp
.kp_eproc
.e_ppid
) {
5706 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, pid_to_log
};
5707 size_t who_offset
, len
= sizeof(kp
);
5709 if (!job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
5713 who_offset
= strlen(who_started_the_reboot
);
5714 snprintf(who_started_the_reboot
+ who_offset
, sizeof(who_started_the_reboot
) - who_offset
,
5715 " %s[%u]%s", kp
.kp_proc
.p_comm
, pid_to_log
, kp
.kp_eproc
.e_ppid
? " ->" : "");
5718 root_jobmgr
->reboot_flags
= (int)flags
;
5722 job_log(j
, LOG_DEBUG
, "reboot2() initiated by:%s", who_started_the_reboot
);
5728 job_mig_getsocket(job_t j
, name_t spr
)
5730 if (!launchd_assumes(j
!= NULL
)) {
5731 return BOOTSTRAP_NO_MEMORY
;
5737 return BOOTSTRAP_NO_MEMORY
;
5740 strncpy(spr
, sockpath
, sizeof(name_t
));
5742 return BOOTSTRAP_SUCCESS
;
5746 job_mig_log(job_t j
, int pri
, int err
, logmsg_t msg
)
5748 if (!launchd_assumes(j
!= NULL
)) {
5749 return BOOTSTRAP_NO_MEMORY
;
5752 if ((errno
= err
)) {
5753 job_log_error(j
, pri
, "%s", msg
);
5755 job_log(j
, pri
, "%s", msg
);
5762 ensure_root_bkgd_setup(void)
5764 if (background_jobmgr
|| getpid() != 1) {
5768 if (!jobmgr_assumes(root_jobmgr
, (background_jobmgr
= jobmgr_new(root_jobmgr
, mach_task_self(), MACH_PORT_NULL
, false, VPROCMGR_SESSION_BACKGROUND
)) != NULL
)) {
5772 background_jobmgr
->req_port
= 0;
5773 jobmgr_assumes(root_jobmgr
, launchd_mport_make_send(background_jobmgr
->jm_port
) == KERN_SUCCESS
);
5777 job_mig_lookup_per_user_context(job_t j
, uid_t which_user
, mach_port_t
*up_cont
)
5782 #if TARGET_OS_EMBEDDED
5783 return BOOTSTRAP_NOT_PRIVILEGED
;
5786 if (!launchd_assumes(j
!= NULL
)) {
5787 return BOOTSTRAP_NO_MEMORY
;
5790 job_log(j
, LOG_DEBUG
, "Looking up per user launchd for UID: %u", which_user
);
5792 runtime_get_caller_creds(&ldc
);
5794 if (getpid() != 1) {
5795 job_log(j
, LOG_ERR
, "Only PID 1 supports per user launchd lookups.");
5796 return BOOTSTRAP_NOT_PRIVILEGED
;
5799 if (ldc
.euid
|| ldc
.uid
) {
5800 which_user
= ldc
.euid
? ldc
.euid
: ldc
.uid
;
5803 *up_cont
= MACH_PORT_NULL
;
5805 if (which_user
== 0) {
5806 ensure_root_bkgd_setup();
5808 *up_cont
= background_jobmgr
->jm_port
;
5813 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
5814 if (!ji
->per_user
) {
5817 if (ji
->mach_uid
!= which_user
) {
5820 if (SLIST_EMPTY(&ji
->machservices
)) {
5823 if (!SLIST_FIRST(&ji
->machservices
)->per_user_hack
) {
5830 struct machservice
*ms
;
5833 job_log(j
, LOG_DEBUG
, "Creating per user launchd job for UID: %u", which_user
);
5835 sprintf(lbuf
, "com.apple.launchd.peruser.%u", which_user
);
5837 ji
= job_new(root_jobmgr
, lbuf
, "/sbin/launchd", NULL
);
5840 return BOOTSTRAP_NO_MEMORY
;
5843 ji
->mach_uid
= which_user
;
5844 ji
->per_user
= true;
5846 if ((ms
= machservice_new(ji
, lbuf
, up_cont
, false)) == NULL
) {
5848 return BOOTSTRAP_NO_MEMORY
;
5851 ms
->per_user_hack
= true;
5854 ji
= job_dispatch(ji
, false);
5856 job_log(j
, LOG_DEBUG
, "Per user launchd job found for UID: %u", which_user
);
5859 if (job_assumes(j
, ji
!= NULL
)) {
5860 *up_cont
= machservice_port(SLIST_FIRST(&ji
->machservices
));
5867 job_mig_check_in(job_t j
, name_t servicename
, mach_port_t
*serviceportp
)
5869 static pid_t last_warned_pid
= 0;
5870 struct machservice
*ms
;
5873 if (!launchd_assumes(j
!= NULL
)) {
5874 return BOOTSTRAP_NO_MEMORY
;
5877 runtime_get_caller_creds(&ldc
);
5879 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
5882 job_log(j
, LOG_DEBUG
, "Check-in of Mach service failed. Unknown: %s", servicename
);
5883 return BOOTSTRAP_UNKNOWN_SERVICE
;
5885 if (machservice_job(ms
) != j
) {
5886 if (last_warned_pid
!= ldc
.pid
) {
5887 job_log(j
, LOG_NOTICE
, "Check-in of Mach service failed. PID %d is not privileged: %s",
5888 ldc
.pid
, servicename
);
5889 last_warned_pid
= ldc
.pid
;
5891 return BOOTSTRAP_NOT_PRIVILEGED
;
5893 if (machservice_active(ms
)) {
5894 job_log(j
, LOG_WARNING
, "Check-in of Mach service failed. Already active: %s", servicename
);
5895 return BOOTSTRAP_SERVICE_ACTIVE
;
5898 machservice_request_notifications(ms
);
5900 job_log(j
, LOG_INFO
, "Check-in of service: %s", servicename
);
5902 *serviceportp
= machservice_port(ms
);
5903 return BOOTSTRAP_SUCCESS
;
5907 job_mig_register2(job_t j
, name_t servicename
, mach_port_t serviceport
, uint64_t flags
)
5909 struct machservice
*ms
;
5912 if (!launchd_assumes(j
!= NULL
)) {
5913 return BOOTSTRAP_NO_MEMORY
;
5916 runtime_get_caller_creds(&ldc
);
5919 job_log(j
, LOG_APPLEONLY
, "bootstrap_register() is deprecated. Service: %s", servicename
);
5922 job_log(j
, LOG_DEBUG
, "%sMach service registration attempt: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
5924 /* 5641783 for the embedded hack */
5925 #if !TARGET_OS_EMBEDDED
5927 * From a per-user/session launchd's perspective, SecurityAgent (UID
5928 * 92) is a rogue application (not our UID, not root and not a child of
5929 * us). We'll have to reconcile this design friction at a later date.
5931 if (j
->anonymous
&& job_get_bs(j
)->parentmgr
== NULL
&& ldc
.uid
!= 0 && ldc
.uid
!= getuid() && ldc
.uid
!= 92) {
5932 if (getpid() == 1) {
5933 return VPROC_ERR_TRY_PER_USER
;
5935 return BOOTSTRAP_NOT_PRIVILEGED
;
5940 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, flags
& BOOTSTRAP_PER_PID_SERVICE
? ldc
.pid
: 0);
5943 if (machservice_job(ms
) != j
) {
5944 return BOOTSTRAP_NOT_PRIVILEGED
;
5946 if (machservice_active(ms
)) {
5947 job_log(j
, LOG_DEBUG
, "Mach service registration failed. Already active: %s", servicename
);
5948 return BOOTSTRAP_SERVICE_ACTIVE
;
5951 machservice_delete(j
, ms
, false);
5954 if (serviceport
!= MACH_PORT_NULL
) {
5955 if ((ms
= machservice_new(j
, servicename
, &serviceport
, flags
& BOOTSTRAP_PER_PID_SERVICE
? true : false))) {
5956 machservice_request_notifications(ms
);
5958 return BOOTSTRAP_NO_MEMORY
;
5962 return BOOTSTRAP_SUCCESS
;
5966 job_mig_look_up2(job_t j
, name_t servicename
, mach_port_t
*serviceportp
, mach_msg_type_name_t
*ptype
, pid_t target_pid
, uint64_t flags
)
5968 struct machservice
*ms
;
5972 if (!launchd_assumes(j
!= NULL
)) {
5973 return BOOTSTRAP_NO_MEMORY
;
5976 runtime_get_caller_creds(&ldc
);
5978 /* 5641783 for the embedded hack */
5979 #if !TARGET_OS_EMBEDDED
5980 if (getpid() == 1 && j
->anonymous
&& job_get_bs(j
)->parentmgr
== NULL
&& ldc
.uid
!= 0 && ldc
.euid
!= 0) {
5981 return VPROC_ERR_TRY_PER_USER
;
5985 if (!mspolicy_check(j
, servicename
, flags
& BOOTSTRAP_PER_PID_SERVICE
)) {
5986 job_log(j
, LOG_NOTICE
, "Policy denied Mach service lookup: %s", servicename
);
5987 return BOOTSTRAP_NOT_PRIVILEGED
;
5990 if (flags
& BOOTSTRAP_PER_PID_SERVICE
) {
5991 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, target_pid
);
5993 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
5996 if (ms
&& machservice_hidden(ms
) && !machservice_active(ms
)) {
5998 } else if (ms
&& ms
->per_user_hack
) {
6003 launchd_assumes(machservice_port(ms
) != MACH_PORT_NULL
);
6004 job_log(j
, LOG_DEBUG
, "%sMach service lookup: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
6006 /* After Leopard ships, we should enable this */
6007 if (j
->lastlookup
== ms
&& j
->lastlookup_gennum
== ms
->gen_num
&& !j
->per_user
) {
6009 job_log(j
, LOG_APPLEONLY
, "Performance opportunity: Number of bootstrap_lookup(... \"%s\" ...) calls that should have been cached: %llu",
6010 servicename
, ms
->bad_perf_cnt
);
6013 j
->lastlookup_gennum
= ms
->gen_num
;
6015 *serviceportp
= machservice_port(ms
);
6016 *ptype
= MACH_MSG_TYPE_COPY_SEND
;
6017 kr
= BOOTSTRAP_SUCCESS
;
6018 } else if (!(flags
& BOOTSTRAP_PER_PID_SERVICE
) && (inherited_bootstrap_port
!= MACH_PORT_NULL
)) {
6019 job_log(j
, LOG_DEBUG
, "Mach service lookup forwarded: %s", servicename
);
6020 *ptype
= MACH_MSG_TYPE_MOVE_SEND
;
6021 kr
= bootstrap_look_up(inherited_bootstrap_port
, servicename
, serviceportp
);
6022 } else if (getpid() == 1 && j
->anonymous
&& ldc
.euid
>= 500 && strcasecmp(job_get_bs(j
)->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
6024 * 5240036 Should start background session when a lookup of CCacheServer occurs
6026 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
6027 * If we find a EUID that isn't root, we force it over to the per-user context.
6029 return VPROC_ERR_TRY_PER_USER
;
6031 job_log(j
, LOG_DEBUG
, "%sMach service lookup failed: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
6032 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
6039 job_mig_parent(job_t j
, mach_port_t
*parentport
, mach_msg_type_name_t
*pptype
)
6041 if (!launchd_assumes(j
!= NULL
)) {
6042 return BOOTSTRAP_NO_MEMORY
;
6045 job_log(j
, LOG_DEBUG
, "Requested parent bootstrap port");
6046 jobmgr_t jm
= j
->mgr
;
6048 *pptype
= MACH_MSG_TYPE_MAKE_SEND
;
6050 if (jobmgr_parent(jm
)) {
6051 *parentport
= jobmgr_parent(jm
)->jm_port
;
6052 } else if (MACH_PORT_NULL
== inherited_bootstrap_port
) {
6053 *parentport
= jm
->jm_port
;
6055 *pptype
= MACH_MSG_TYPE_COPY_SEND
;
6056 *parentport
= inherited_bootstrap_port
;
6058 return BOOTSTRAP_SUCCESS
;
6062 job_mig_info(job_t j
, name_array_t
*servicenamesp
, unsigned int *servicenames_cnt
,
6063 bootstrap_status_array_t
*serviceactivesp
, unsigned int *serviceactives_cnt
)
6065 name_array_t service_names
= NULL
;
6066 bootstrap_status_array_t service_actives
= NULL
;
6067 unsigned int cnt
= 0, cnt2
= 0;
6068 struct machservice
*ms
;
6072 #if TARGET_OS_EMBEDDED
6073 return BOOTSTRAP_NOT_PRIVILEGED
;
6076 if (!launchd_assumes(j
!= NULL
)) {
6077 return BOOTSTRAP_NO_MEMORY
;
6082 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
6083 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6094 mig_allocate((vm_address_t
*)&service_names
, cnt
* sizeof(service_names
[0]));
6095 if (!launchd_assumes(service_names
!= NULL
)) {
6099 mig_allocate((vm_address_t
*)&service_actives
, cnt
* sizeof(service_actives
[0]));
6100 if (!launchd_assumes(service_actives
!= NULL
)) {
6104 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
6105 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6107 strlcpy(service_names
[cnt2
], machservice_name(ms
), sizeof(service_names
[0]));
6108 service_actives
[cnt2
] = machservice_status(ms
);
6114 launchd_assumes(cnt
== cnt2
);
6117 *servicenamesp
= service_names
;
6118 *serviceactivesp
= service_actives
;
6119 *servicenames_cnt
= *serviceactives_cnt
= cnt
;
6121 return BOOTSTRAP_SUCCESS
;
6124 if (service_names
) {
6125 mig_deallocate((vm_address_t
)service_names
, cnt
* sizeof(service_names
[0]));
6127 if (service_actives
) {
6128 mig_deallocate((vm_address_t
)service_actives
, cnt
* sizeof(service_actives
[0]));
6131 return BOOTSTRAP_NO_MEMORY
;
6135 job_reparent_hack(job_t j
, const char *where
)
6139 ensure_root_bkgd_setup();
6141 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
6142 if (where
== NULL
) {
6143 if (strcasecmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
6144 where
= VPROCMGR_SESSION_LOGINWINDOW
;
6146 where
= VPROCMGR_SESSION_AQUA
;
6150 if (strcasecmp(j
->mgr
->name
, where
) == 0) {
6154 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
6155 if (jmi
->shutting_down
) {
6157 } else if (strcasecmp(jmi
->name
, where
) == 0) {
6159 } else if (strcasecmp(jmi
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0 && getpid() == 1) {
6160 SLIST_FOREACH(jmi2
, &jmi
->submgrs
, sle
) {
6161 if (strcasecmp(jmi2
->name
, where
) == 0) {
6170 if (job_assumes(j
, jmi
!= NULL
)) {
6171 struct machservice
*msi
;
6173 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
6174 LIST_REMOVE(msi
, name_hash_sle
);
6177 LIST_REMOVE(j
, sle
);
6178 LIST_INSERT_HEAD(&jmi
->jobs
, j
, sle
);
6181 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
6182 LIST_INSERT_HEAD(&j
->mgr
->ms_hash
[hash_ms(msi
->name
)], msi
, name_hash_sle
);
6188 job_mig_move_subset(job_t j
, mach_port_t target_subset
, name_t session_type
)
6190 mach_msg_type_number_t l2l_i
, l2l_port_cnt
= 0;
6191 mach_port_array_t l2l_ports
= NULL
;
6192 mach_port_t reqport
, rcvright
;
6193 kern_return_t kr
= 1;
6194 launch_data_t out_obj_array
= NULL
;
6196 jobmgr_t jmr
= NULL
;
6198 #if TARGET_OS_EMBEDDED
6199 return BOOTSTRAP_NOT_PRIVILEGED
;
6202 if (!launchd_assumes(j
!= NULL
)) {
6203 return BOOTSTRAP_NO_MEMORY
;
6206 runtime_get_caller_creds(&ldc
);
6208 if (target_subset
== MACH_PORT_NULL
) {
6211 if (j
->mgr
->session_initialized
) {
6212 if (ldc
.uid
== 0 && getpid() == 1) {
6213 if (strcmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
6216 LIST_FOREACH_SAFE(ji
, &j
->mgr
->jobs
, sle
, jn
) {
6217 if (!ji
->anonymous
) {
6222 ensure_root_bkgd_setup();
6224 SLIST_REMOVE(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, jobmgr_s
, sle
);
6225 j
->mgr
->parentmgr
= background_jobmgr
;
6226 SLIST_INSERT_HEAD(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, sle
);
6229 * We really should wait for all the jobs to die before proceeding. See 5351245 for more info.
6231 * We have hacked around this in job_find() by ignoring jobs that are pending removal.
6234 } else if (strcmp(j
->mgr
->name
, VPROCMGR_SESSION_AQUA
) == 0) {
6235 job_log(j
, LOG_DEBUG
, "Tried to move the Aqua session.");
6237 } else if (strcmp(j
->mgr
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0) {
6238 job_log(j
, LOG_DEBUG
, "Tried to move the background session.");
6241 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
6242 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
6246 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
6247 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
6250 } else if (ldc
.uid
== 0 && getpid() == 1 && strcmp(session_type
, VPROCMGR_SESSION_STANDARDIO
) == 0) {
6251 ensure_root_bkgd_setup();
6253 SLIST_REMOVE(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, jobmgr_s
, sle
);
6254 j
->mgr
->parentmgr
= background_jobmgr
;
6255 SLIST_INSERT_HEAD(&j
->mgr
->parentmgr
->submgrs
, j
->mgr
, sle
);
6256 } else if (strcmp(session_type
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
6262 * We're working around LoginWindow and the WindowServer.
6264 * In practice, there is only one LoginWindow session. Unfortunately, for certain
6265 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
6266 * spawns a replacement loginwindow session before cleaning up the previous one.
6268 * We're going to use the creation of a new LoginWindow context as a clue that the
6269 * previous LoginWindow context is on the way out and therefore we should just
6270 * kick-start the shutdown of it.
6273 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
6274 if (jmi
->shutting_down
) {
6276 } else if (strcasecmp(jmi
->name
, session_type
) == 0) {
6277 jobmgr_shutdown(jmi
);
6283 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Renaming to: %s", session_type
);
6284 strcpy(j
->mgr
->name_init
, session_type
);
6286 if (job_assumes(j
, (j2
= jobmgr_init_session(j
->mgr
, session_type
, false)))) {
6287 job_assumes(j
, job_dispatch(j2
, true));
6292 } else if (job_mig_intran2(root_jobmgr
, target_subset
, ldc
.pid
)) {
6293 job_log(j
, LOG_ERR
, "Moving a session to ourself is bogus.");
6295 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
6299 job_log(j
, LOG_DEBUG
, "Move subset attempt: 0x%x", target_subset
);
6301 errno
= kr
= _vproc_grab_subset(target_subset
, &reqport
, &rcvright
, &out_obj_array
, &l2l_ports
, &l2l_port_cnt
);
6303 if (!job_assumes(j
, kr
== 0)) {
6307 launchd_assert(launch_data_array_get_count(out_obj_array
) == l2l_port_cnt
);
6309 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, reqport
, rcvright
, false, session_type
)) != NULL
)) {
6310 kr
= BOOTSTRAP_NO_MEMORY
;
6314 for (l2l_i
= 0; l2l_i
< l2l_port_cnt
; l2l_i
++) {
6315 launch_data_t tmp
, obj_at_idx
;
6316 struct machservice
*ms
;
6317 job_t j_for_service
;
6318 const char *serv_name
;
6322 job_assumes(j
, obj_at_idx
= launch_data_array_get_index(out_obj_array
, l2l_i
));
6323 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PID
));
6324 target_pid
= (pid_t
)launch_data_get_integer(tmp
);
6325 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PERPID
));
6326 serv_perpid
= launch_data_get_bool(tmp
);
6327 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_NAME
));
6328 serv_name
= launch_data_get_string(tmp
);
6330 j_for_service
= jobmgr_find_by_pid(jmr
, target_pid
, true);
6332 if (!j_for_service
) {
6333 /* The PID probably exited */
6334 job_assumes(j
, launchd_mport_deallocate(l2l_ports
[l2l_i
]) == KERN_SUCCESS
);
6338 if ((ms
= machservice_new(j_for_service
, serv_name
, &l2l_ports
[l2l_i
], serv_perpid
))) {
6339 machservice_request_notifications(ms
);
6346 if (out_obj_array
) {
6347 launch_data_free(out_obj_array
);
6351 mig_deallocate((vm_address_t
)l2l_ports
, l2l_port_cnt
* sizeof(l2l_ports
[0]));
6355 if (target_subset
) {
6356 job_assumes(j
, launchd_mport_deallocate(target_subset
) == KERN_SUCCESS
);
6359 jobmgr_shutdown(jmr
);
6366 job_mig_take_subset(job_t j
, mach_port_t
*reqport
, mach_port_t
*rcvright
,
6367 vm_offset_t
*outdata
, mach_msg_type_number_t
*outdataCnt
,
6368 mach_port_array_t
*portsp
, unsigned int *ports_cnt
)
6370 launch_data_t tmp_obj
, tmp_dict
, outdata_obj_array
= NULL
;
6371 mach_port_array_t ports
= NULL
;
6372 unsigned int cnt
= 0, cnt2
= 0;
6374 struct machservice
*ms
;
6378 #if TARGET_OS_EMBEDDED
6379 return BOOTSTRAP_NOT_PRIVILEGED
;
6382 if (!launchd_assumes(j
!= NULL
)) {
6383 return BOOTSTRAP_NO_MEMORY
;
6388 if (getpid() != 1) {
6389 job_log(j
, LOG_ERR
, "Only the system launchd will transfer Mach sub-bootstraps.");
6390 return BOOTSTRAP_NOT_PRIVILEGED
;
6391 } else if (jobmgr_parent(jm
) == NULL
) {
6392 job_log(j
, LOG_ERR
, "Root Mach bootstrap cannot be transferred.");
6393 return BOOTSTRAP_NOT_PRIVILEGED
;
6394 } else if (strcasecmp(jm
->name
, VPROCMGR_SESSION_AQUA
) == 0) {
6395 job_log(j
, LOG_ERR
, "Cannot transfer a setup GUI session.");
6396 return BOOTSTRAP_NOT_PRIVILEGED
;
6397 } else if (!j
->anonymous
) {
6398 job_log(j
, LOG_ERR
, "Only the anonymous job can transfer Mach sub-bootstraps.");
6399 return BOOTSTRAP_NOT_PRIVILEGED
;
6402 job_log(j
, LOG_DEBUG
, "Transferring sub-bootstrap to the per session launchd.");
6404 outdata_obj_array
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
6405 if (!job_assumes(j
, outdata_obj_array
)) {
6409 *outdataCnt
= 20 * 1024 * 1024;
6410 mig_allocate(outdata
, *outdataCnt
);
6411 if (!job_assumes(j
, *outdata
!= 0)) {
6415 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
6416 if (!ji
->anonymous
) {
6419 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6424 mig_allocate((vm_address_t
*)&ports
, cnt
* sizeof(ports
[0]));
6425 if (!launchd_assumes(ports
!= NULL
)) {
6429 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
6430 if (!ji
->anonymous
) {
6434 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
6435 if (job_assumes(j
, (tmp_dict
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
6436 job_assumes(j
, launch_data_array_set_index(outdata_obj_array
, tmp_dict
, cnt2
));
6441 if (job_assumes(j
, (tmp_obj
= launch_data_new_string(machservice_name(ms
))))) {
6442 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_NAME
));
6447 if (job_assumes(j
, (tmp_obj
= launch_data_new_integer((ms
->job
->p
))))) {
6448 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PID
));
6453 if (job_assumes(j
, (tmp_obj
= launch_data_new_bool((ms
->per_pid
))))) {
6454 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PERPID
));
6459 ports
[cnt2
] = machservice_port(ms
);
6461 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
6462 jobmgr_assumes(jm
, (errno
= mach_port_mod_refs(mach_task_self(), ports
[cnt2
], MACH_PORT_RIGHT_SEND
, 1)) == 0);
6467 launchd_assumes(cnt
== cnt2
);
6469 packed_size
= launch_data_pack(outdata_obj_array
, (void *)*outdata
, *outdataCnt
, NULL
, NULL
);
6470 if (!job_assumes(j
, packed_size
!= 0)) {
6474 launch_data_free(outdata_obj_array
);
6479 *reqport
= jm
->req_port
;
6480 *rcvright
= jm
->jm_port
;
6485 workaround_5477111
= j
;
6487 jobmgr_shutdown(jm
);
6489 return BOOTSTRAP_SUCCESS
;
6492 if (outdata_obj_array
) {
6493 launch_data_free(outdata_obj_array
);
6496 mig_deallocate(*outdata
, *outdataCnt
);
6499 mig_deallocate((vm_address_t
)ports
, cnt
* sizeof(ports
[0]));
6502 return BOOTSTRAP_NO_MEMORY
;
6506 job_mig_subset(job_t j
, mach_port_t requestorport
, mach_port_t
*subsetportp
)
6511 if (!launchd_assumes(j
!= NULL
)) {
6512 return BOOTSTRAP_NO_MEMORY
;
6517 while ((jmr
= jobmgr_parent(jmr
)) != NULL
) {
6521 /* Since we use recursion, we need an artificial depth for subsets */
6522 if (bsdepth
> 100) {
6523 job_log(j
, LOG_ERR
, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth
);
6524 return BOOTSTRAP_NO_MEMORY
;
6527 if ((jmr
= jobmgr_new(j
->mgr
, requestorport
, MACH_PORT_NULL
, false, NULL
)) == NULL
) {
6528 if (requestorport
== MACH_PORT_NULL
) {
6529 return BOOTSTRAP_NOT_PRIVILEGED
;
6531 return BOOTSTRAP_NO_MEMORY
;
6534 *subsetportp
= jmr
->jm_port
;
6535 return BOOTSTRAP_SUCCESS
;
6539 job_mig_create_service(job_t j
, name_t servicename
, mach_port_t
*serviceportp
)
6541 struct machservice
*ms
;
6543 if (!launchd_assumes(j
!= NULL
)) {
6544 return BOOTSTRAP_NO_MEMORY
;
6547 if (job_prog(j
)[0] == '\0') {
6548 job_log(j
, LOG_ERR
, "Mach service creation requires a target server: %s", servicename
);
6549 return BOOTSTRAP_NOT_PRIVILEGED
;
6552 if (!j
->legacy_mach_job
) {
6553 job_log(j
, LOG_ERR
, "bootstrap_create_service() is only allowed against legacy Mach jobs: %s", servicename
);
6554 return BOOTSTRAP_NOT_PRIVILEGED
;
6557 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, 0);
6559 job_log(j
, LOG_DEBUG
, "Mach service creation attempt for failed. Already exists: %s", servicename
);
6560 return BOOTSTRAP_NAME_IN_USE
;
6565 *serviceportp
= MACH_PORT_NULL
;
6566 ms
= machservice_new(j
, servicename
, serviceportp
, false);
6568 if (!launchd_assumes(ms
!= NULL
)) {
6572 return BOOTSTRAP_SUCCESS
;
6575 launchd_assumes(launchd_mport_close_recv(*serviceportp
) == KERN_SUCCESS
);
6576 return BOOTSTRAP_NO_MEMORY
;
6580 job_mig_embedded_wait(job_t j
, name_t targetlabel
, integer_t
*waitstatus
)
6584 if (!launchd_assumes(j
!= NULL
)) {
6585 return BOOTSTRAP_NO_MEMORY
;
6588 if (unlikely(!(otherj
= job_find(targetlabel
)))) {
6589 return BOOTSTRAP_UNKNOWN_SERVICE
;
6592 *waitstatus
= j
->last_exit_status
;
6598 job_mig_embedded_kickstart(job_t j
, name_t targetlabel
, pid_t
*out_pid
, mach_port_t
*out_name_port
)
6604 if (!launchd_assumes(j
!= NULL
)) {
6605 return BOOTSTRAP_NO_MEMORY
;
6608 if (unlikely(!(otherj
= job_find(targetlabel
)))) {
6609 return BOOTSTRAP_UNKNOWN_SERVICE
;
6612 runtime_get_caller_creds(&ldc
);
6614 if (ldc
.euid
!= 0 && ldc
.euid
!= geteuid()
6615 #if TARGET_OS_EMBEDDED
6616 && j
->username
&& otherj
->username
6617 && strcmp(j
->username
, otherj
->username
) != 0
6620 return BOOTSTRAP_NOT_PRIVILEGED
;
6623 otherj
= job_dispatch(otherj
, true);
6625 if (!job_assumes(j
, otherj
&& otherj
->p
)) {
6626 return BOOTSTRAP_NO_MEMORY
;
6629 kr
= task_name_for_pid(mach_task_self(), otherj
->p
, out_name_port
);
6630 if (!job_assumes(j
, kr
== 0)) {
6634 *out_pid
= otherj
->p
;
6640 job_mig_wait(job_t j
, mach_port_t srp
, integer_t
*waitstatus
)
6642 if (!launchd_assumes(j
!= NULL
)) {
6643 return BOOTSTRAP_NO_MEMORY
;
6647 runtime_get_caller_creds(&ldc
);
6649 return job_handle_mpm_wait(j
, srp
, waitstatus
);
6653 job_mig_uncork_fork(job_t j
)
6655 if (!launchd_assumes(j
!= NULL
)) {
6656 return BOOTSTRAP_NO_MEMORY
;
6659 if (!j
->stall_before_exec
) {
6660 job_log(j
, LOG_WARNING
, "Attempt to uncork a job that isn't in the middle of a fork().");
6665 j
->stall_before_exec
= false;
6670 job_mig_set_service_policy(job_t j
, pid_t target_pid
, uint64_t flags
, name_t target_service
)
6675 if (!launchd_assumes(j
!= NULL
)) {
6676 return BOOTSTRAP_NO_MEMORY
;
6679 runtime_get_caller_creds(&ldc
);
6681 #if TARGET_OS_EMBEDDED
6683 return BOOTSTRAP_NOT_PRIVILEGED
;
6686 if( ldc
.euid
&& (ldc
.euid
!= getuid()) ) {
6687 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, target_pid
};
6688 struct kinfo_proc kp
;
6689 size_t len
= sizeof(kp
);
6691 job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1);
6692 job_assumes(j
, len
== sizeof(kp
));
6694 uid_t kp_euid
= kp
.kp_eproc
.e_ucred
.cr_uid
;
6695 uid_t kp_uid
= kp
.kp_eproc
.e_pcred
.p_ruid
;
6697 if( ldc
.euid
== kp_euid
) {
6698 job_log(j
, LOG_DEBUG
, "Working around rdar://problem/5982485 and allowing job to set policy for PID %u.", target_pid
);
6700 job_log(j
, LOG_ERR
, "Denied Mach service policy update requested by UID/EUID %u/%u against PID %u with UID/EUID %u/%u due to mismatched credentials.", ldc
.uid
, ldc
.euid
, target_pid
, kp_uid
, kp_euid
);
6702 return BOOTSTRAP_NOT_PRIVILEGED
;
6707 if (!job_assumes(j
, (target_j
= jobmgr_find_by_pid(j
->mgr
, target_pid
, true)) != NULL
)) {
6708 return BOOTSTRAP_NO_MEMORY
;
6711 if (SLIST_EMPTY(&j
->mspolicies
)) {
6712 job_log(j
, LOG_DEBUG
, "Setting policy on job \"%s\" for Mach service: %s", target_j
->label
, target_service
);
6713 if (target_service
[0]) {
6714 job_assumes(j
, mspolicy_new(target_j
, target_service
, flags
& BOOTSTRAP_ALLOW_LOOKUP
, flags
& BOOTSTRAP_PER_PID_SERVICE
, false));
6716 target_j
->deny_unknown_mslookups
= !(flags
& BOOTSTRAP_ALLOW_LOOKUP
);
6717 target_j
->deny_job_creation
= (bool)(flags
& BOOTSTRAP_DENY_JOB_CREATION
);
6720 job_log(j
, LOG_WARNING
, "Jobs that have policies assigned to them may not set policies.");
6721 return BOOTSTRAP_NOT_PRIVILEGED
;
6728 job_mig_spawn(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, pid_t
*child_pid
, mach_port_t
*obsvr_port
)
6730 launch_data_t input_obj
= NULL
;
6731 size_t data_offset
= 0;
6735 #if TARGET_OS_EMBEDDED
6736 return BOOTSTRAP_NOT_PRIVILEGED
;
6739 runtime_get_caller_creds(&ldc
);
6741 if (!launchd_assumes(j
!= NULL
)) {
6742 return BOOTSTRAP_NO_MEMORY
;
6745 if (unlikely(j
->deny_job_creation
)) {
6746 return BOOTSTRAP_NOT_PRIVILEGED
;
6749 if (getpid() == 1 && ldc
.euid
&& ldc
.uid
) {
6750 job_log(j
, LOG_DEBUG
, "Punting spawn to per-user-context");
6751 return VPROC_ERR_TRY_PER_USER
;
6754 if (!job_assumes(j
, indataCnt
!= 0)) {
6758 if (!job_assumes(j
, (input_obj
= launch_data_unpack((void *)indata
, indataCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
6762 jr
= jobmgr_import2(j
->mgr
, input_obj
);
6764 if (!job_assumes(j
, jr
!= NULL
)) {
6767 return BOOTSTRAP_NAME_IN_USE
;
6769 return BOOTSTRAP_NO_MEMORY
;
6773 job_reparent_hack(jr
, NULL
);
6775 if (getpid() == 1) {
6776 jr
->mach_uid
= ldc
.uid
;
6779 jr
->unload_at_exit
= true;
6780 jr
->wait4pipe_eof
= true;
6781 jr
->abandon_pg
= true;
6782 jr
->stall_before_exec
= jr
->wait4debugger
;
6783 jr
->wait4debugger
= false;
6785 jr
= job_dispatch(jr
, true);
6787 if (!job_assumes(j
, jr
!= NULL
)) {
6788 return BOOTSTRAP_NO_MEMORY
;
6791 if (!job_assumes(jr
, jr
->p
)) {
6793 return BOOTSTRAP_NO_MEMORY
;
6796 if (!job_setup_machport(jr
)) {
6798 return BOOTSTRAP_NO_MEMORY
;
6801 job_log(jr
, LOG_DEBUG
, "Spawned by PID %u: %s", j
->p
, j
->label
);
6804 *obsvr_port
= jr
->j_port
;
6806 mig_deallocate(indata
, indataCnt
);
6808 return BOOTSTRAP_SUCCESS
;
6812 jobmgr_init(bool sflag
)
6814 const char *root_session_type
= getpid() == 1 ? VPROCMGR_SESSION_SYSTEM
: VPROCMGR_SESSION_BACKGROUND
;
6816 launchd_assert((root_jobmgr
= jobmgr_new(NULL
, MACH_PORT_NULL
, MACH_PORT_NULL
, sflag
, root_session_type
)) != NULL
);
6820 our_strhash(const char *s
)
6825 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
6828 while ((c
= *s
++)) {
6829 r
= ((r
<< 5) + r
) + c
; /* hash*33 + c */
6836 hash_label(const char *label
)
6838 return our_strhash(label
) % LABEL_HASH_SIZE
;
6842 hash_ms(const char *msstr
)
6844 return our_strhash(msstr
) % MACHSERVICE_HASH_SIZE
;
6848 mspolicy_copy(job_t j_to
, job_t j_from
)
6850 struct mspolicy
*msp
;
6852 SLIST_FOREACH(msp
, &j_from
->mspolicies
, sle
) {
6853 if (!mspolicy_new(j_to
, msp
->name
, msp
->allow
, msp
->per_pid
, true)) {
6862 mspolicy_new(job_t j
, const char *name
, bool allow
, bool pid_local
, bool skip_check
)
6864 struct mspolicy
*msp
;
6866 if (!skip_check
) SLIST_FOREACH(msp
, &j
->mspolicies
, sle
) {
6867 if (msp
->per_pid
!= pid_local
) {
6869 } else if (strcmp(msp
->name
, name
) == 0) {
6874 if ((msp
= calloc(1, sizeof(struct mspolicy
) + strlen(name
) + 1)) == NULL
) {
6878 strcpy((char *)msp
->name
, name
);
6879 msp
->per_pid
= pid_local
;
6882 SLIST_INSERT_HEAD(&j
->mspolicies
, msp
, sle
);
6888 mspolicy_setup(launch_data_t obj
, const char *key
, void *context
)
6892 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
6893 job_log(j
, LOG_WARNING
, "Invalid object type for Mach service policy key: %s", key
);
6897 job_assumes(j
, mspolicy_new(j
, key
, launch_data_get_bool(obj
), false, false));
6901 mspolicy_check(job_t j
, const char *name
, bool pid_local
)
6903 struct mspolicy
*mspi
;
6905 SLIST_FOREACH(mspi
, &j
->mspolicies
, sle
) {
6906 if (mspi
->per_pid
!= pid_local
) {
6908 } else if (strcmp(mspi
->name
, name
) != 0) {
6914 return !j
->deny_unknown_mslookups
;
6918 mspolicy_delete(job_t j
, struct mspolicy
*msp
)
6920 SLIST_REMOVE(&j
->mspolicies
, msp
, mspolicy
, sle
);
6926 waiting4removal_new(job_t j
, mach_port_t rp
)
6928 struct waiting_for_removal
*w4r
;
6930 if (!job_assumes(j
, (w4r
= malloc(sizeof(struct waiting_for_removal
))) != NULL
)) {
6934 w4r
->reply_port
= rp
;
6936 SLIST_INSERT_HEAD(&j
->removal_watchers
, w4r
, sle
);
6942 waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
)
6944 job_assumes(j
, job_mig_send_signal_reply(w4r
->reply_port
, 0) == 0);
6946 SLIST_REMOVE(&j
->removal_watchers
, w4r
, waiting_for_removal
, sle
);
6952 get_kern_max_proc(void)
6954 int mib
[] = { CTL_KERN
, KERN_MAXPROC
};
6956 size_t max_sz
= sizeof(max
);
6958 launchd_assumes(sysctl(mib
, 2, &max
, &max_sz
, NULL
, 0) != -1);
6968 launchd_assert(mach_timebase_info(&tbi
) == 0);
6970 if (stat("/AppleInternal", &sb
) == 0) {
6971 do_apple_internal_magic
= true;