2 * @APPLE_APACHE_LICENSE_HEADER_START@
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 * @APPLE_APACHE_LICENSE_HEADER_END@
19 static const char *const __rcs_file_version__
= "$Revision: 25693 $";
22 #include "launchd_core_logic.h"
23 #include "launch_internal.h"
24 #include "launchd_helper.h"
26 #include <TargetConditionals.h>
27 #include <mach/mach.h>
28 #include <mach/mach_error.h>
29 #include <mach/boolean.h>
30 #include <mach/message.h>
31 #include <mach/notify.h>
32 #include <mach/mig_errors.h>
33 #include <mach/mach_traps.h>
34 #include <mach/mach_interface.h>
35 #include <mach/host_info.h>
36 #include <mach/mach_host.h>
37 #include <mach/exception.h>
38 #include <mach/host_reboot.h>
39 #include <sys/types.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
43 #include <sys/ucred.h>
44 #include <sys/fcntl.h>
46 #include <sys/reboot.h>
48 #include <sys/sysctl.h>
49 #include <sys/sockio.h>
51 #include <sys/resource.h>
52 #include <sys/ioctl.h>
53 #include <sys/mount.h>
56 #include <sys/socket.h>
57 #include <sys/syscall.h>
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet6/nd6.h>
62 #include <bsm/libbsm.h>
80 #include <System/sys/spawn.h>
86 #include <malloc/malloc.h>
90 #define __APPLE_API_PRIVATE
94 #include <quarantine.h>
96 #if TARGET_OS_EMBEDDED
97 #include <sys/kern_memorystatus.h>
99 extern int gL1CacheEnabled
;
100 /* To make my life easier. */
101 typedef struct jetsam_priority_entry
{
106 int32_t hiwat_reserved1
;
107 int32_t hiwat_reserved2
;
108 int32_t hiwat_reserved3
;
109 } jetsam_priority_entry_t
;
112 kJetsamFlagsFrontmost
= (1 << 0),
113 kJetsamFlagsKilled
= (1 << 1)
118 #include "launch_priv.h"
119 #include "launch_internal.h"
120 #include "bootstrap.h"
121 #include "bootstrap_priv.h"
123 #include "vproc_internal.h"
128 #include "launchd_runtime.h"
129 #include "launchd_unix_ipc.h"
130 #include "protocol_vproc.h"
131 #include "protocol_vprocServer.h"
132 #include "protocol_job_reply.h"
133 #include "protocol_job_forward.h"
134 #include "mach_excServer.h"
135 #if !TARGET_OS_EMBEDDED
136 #include "domainServer.h"
138 #endif /* !TARGET_OS_EMBEDDED */
139 #include "eventsServer.h"
141 #ifndef POSIX_SPAWN_OSX_TALAPP_START
142 #define POSIX_SPAWN_OSX_TALAPP_START 0x0400
145 #ifndef POSIX_SPAWN_OSX_WIDGET_START
146 #define POSIX_SPAWN_OSX_WIDGET_START 0x0800
149 #ifndef POSIX_SPAWN_IOS_APP_START
150 #define POSIX_SPAWN_IOS_APP_START 0x1000
153 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
154 * If the job hasn't exited in the given number of seconds after sending
155 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
157 #define LAUNCHD_MIN_JOB_RUN_TIME 10
158 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
159 #define LAUNCHD_SIGKILL_TIMER 2
160 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
162 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
164 #define TAKE_SUBSET_NAME "TakeSubsetName"
165 #define TAKE_SUBSET_PID "TakeSubsetPID"
166 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
168 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
170 extern char **environ
;
172 struct waiting_for_removal
{
173 SLIST_ENTRY(waiting_for_removal
) sle
;
174 mach_port_t reply_port
;
177 static bool waiting4removal_new(job_t j
, mach_port_t rp
);
178 static void waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
);
181 SLIST_ENTRY(machservice
) sle
;
182 SLIST_ENTRY(machservice
) special_port_sle
;
183 LIST_ENTRY(machservice
) name_hash_sle
;
184 LIST_ENTRY(machservice
) port_hash_sle
;
185 struct machservice
*alias
;
187 unsigned int gen_num
;
188 mach_port_name_t port
;
198 delete_on_destruction
:1,
199 drain_one_on_crash
:1,
200 drain_all_on_crash
:1,
201 event_update_port
:1, /* The job which owns this port is the event monitor. */
202 upfront
:1, /* This service was declared in the plist. */
203 event_channel
:1, /* The job is to receive events on this channel. */
204 /* Don't let the size of this field to get too small. It has to be large enough
205 * to represent the reasonable range of special port numbers.
207 special_port_num
:18;
211 static SLIST_HEAD(, machservice
) special_ports
; /* hack, this should be per jobmgr_t */
213 #define PORT_HASH_SIZE 32
214 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
216 static LIST_HEAD(, machservice
) port_hash
[PORT_HASH_SIZE
];
218 static void machservice_setup(launch_data_t obj
, const char *key
, void *context
);
219 static void machservice_setup_options(launch_data_t obj
, const char *key
, void *context
);
220 static void machservice_resetport(job_t j
, struct machservice
*ms
);
221 static struct machservice
*machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
);
222 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
223 static struct machservice
*machservice_new_alias(job_t aj
, struct machservice
*orig
);
224 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
225 static void machservice_ignore(job_t j
, struct machservice
*ms
);
226 static void machservice_watch(job_t j
, struct machservice
*ms
);
227 static void machservice_delete(job_t j
, struct machservice
*, bool port_died
);
228 static void machservice_request_notifications(struct machservice
*);
229 static mach_port_t
machservice_port(struct machservice
*);
230 static job_t
machservice_job(struct machservice
*);
231 static bool machservice_hidden(struct machservice
*);
232 static bool machservice_active(struct machservice
*);
233 static const char *machservice_name(struct machservice
*);
234 static bootstrap_status_t
machservice_status(struct machservice
*);
235 void machservice_drain_port(struct machservice
*);
236 static struct machservice
*xpc_events_find_channel(job_t j
, event_name_t stream
, mach_port_t
*p
);
239 SLIST_ENTRY(socketgroup
) sle
;
241 unsigned int junkfds
:1, fd_cnt
:31;
248 static bool socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
, bool junkfds
);
249 static void socketgroup_delete(job_t j
, struct socketgroup
*sg
);
250 static void socketgroup_watch(job_t j
, struct socketgroup
*sg
);
251 static void socketgroup_ignore(job_t j
, struct socketgroup
*sg
);
252 static void socketgroup_callback(job_t j
);
253 static void socketgroup_setup(launch_data_t obj
, const char *key
, void *context
);
254 static void socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
);
256 struct calendarinterval
{
257 LIST_ENTRY(calendarinterval
) global_sle
;
258 SLIST_ENTRY(calendarinterval
) sle
;
264 static LIST_HEAD(, calendarinterval
) sorted_calendar_events
;
266 static bool calendarinterval_new(job_t j
, struct tm
*w
);
267 static bool calendarinterval_new_from_obj(job_t j
, launch_data_t obj
);
268 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
);
269 static void calendarinterval_delete(job_t j
, struct calendarinterval
*ci
);
270 static void calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
);
271 static void calendarinterval_callback(void);
272 static void calendarinterval_sanity_check(void);
275 SLIST_ENTRY(envitem
) sle
;
284 static bool envitem_new(job_t j
, const char *k
, const char *v
, bool global
, bool one_shot
);
285 static void envitem_delete(job_t j
, struct envitem
*ei
, bool global
);
286 static void envitem_setup(launch_data_t obj
, const char *key
, void *context
);
287 static void envitem_setup_one_shot(launch_data_t obj
, const char *key
, void *context
);
290 SLIST_ENTRY(limititem
) sle
;
292 unsigned int setsoft
:1, sethard
:1, which
:30;
295 static bool limititem_update(job_t j
, int w
, rlim_t r
);
296 static void limititem_delete(job_t j
, struct limititem
*li
);
297 static void limititem_setup(launch_data_t obj
, const char *key
, void *context
);
299 static void seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
);
302 static void jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
);
319 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
320 } semaphore_reason_t
;
322 struct semaphoreitem
{
323 SLIST_ENTRY(semaphoreitem
) sle
;
324 semaphore_reason_t why
;
325 bool watching_parent
;
334 struct semaphoreitem_dict_iter_context
{
336 semaphore_reason_t why_true
;
337 semaphore_reason_t why_false
;
340 static bool semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
);
341 static void semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
);
342 static void semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
);
343 static void semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
);
344 static void semaphoreitem_callback(job_t j
, struct kevent
*kev
);
345 static void semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
);
346 static void semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
);
347 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
);
349 struct externalevent
{
350 LIST_ENTRY(externalevent
) sys_le
;
351 LIST_ENTRY(externalevent
) job_le
;
352 struct eventsystem
*sys
;
363 struct externalevent_iter_ctx
{
365 struct eventsystem
*sys
;
368 static bool externalevent_new(job_t j
, struct eventsystem
*sys
, char *evname
, launch_data_t event
);
369 static void externalevent_delete(struct externalevent
*ee
);
370 static void externalevent_setup(launch_data_t obj
, const char *key
, void *context
);
371 static struct externalevent
*externalevent_find(const char *sysname
, uint64_t id
);
374 LIST_ENTRY(eventsystem
) global_le
;
375 LIST_HEAD(, externalevent
) events
;
381 static struct eventsystem
*eventsystem_new(const char *name
);
382 static void eventsystem_delete(struct eventsystem
*sys
);
383 static void eventsystem_setup(launch_data_t obj
, const char *key
, void *context
);
384 static struct eventsystem
*eventsystem_find(const char *name
);
385 static void eventsystem_ping(void);
387 #define ACTIVE_JOB_HASH_SIZE 32
388 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
390 #define MACHSERVICE_HASH_SIZE 37
392 #define LABEL_HASH_SIZE 53
394 kq_callback kqjobmgr_callback
;
395 LIST_ENTRY(jobmgr_s
) xpc_le
;
396 SLIST_ENTRY(jobmgr_s
) sle
;
397 SLIST_HEAD(, jobmgr_s
) submgrs
;
398 LIST_HEAD(, job_s
) jobs
;
399 LIST_HEAD(, job_s
) jetsam_jobs
;
401 /* For legacy reasons, we keep all job labels that are imported in the
402 * root job manager's label hash. If a job manager is an XPC domain, then
403 * it gets its own label hash that is separate from the "global" one
404 * stored in the root job manager.
406 LIST_HEAD(, job_s
) label_hash
[LABEL_HASH_SIZE
];
407 LIST_HEAD(, job_s
) active_jobs
[ACTIVE_JOB_HASH_SIZE
];
408 LIST_HEAD(, machservice
) ms_hash
[MACHSERVICE_HASH_SIZE
];
409 LIST_HEAD(, job_s
) global_env_jobs
;
411 mach_port_t req_port
;
414 time_t shutdown_time
;
415 unsigned int global_on_demand_cnt
;
416 unsigned int normal_active_cnt
;
417 unsigned int jetsam_jobs_cnt
;
420 session_initialized
:1,
421 killed_stray_jobs
:1,
423 shutdown_jobs_dirtied
:1,
424 shutdown_jobs_cleaned
:1,
427 /* XPC-specific properties. */
428 char owner
[MAXCOMLEN
];
430 mach_port_t req_bsport
;
431 mach_port_t req_excport
;
432 mach_port_t req_asport
;
438 mach_msg_type_number_t req_ctx_sz
;
439 mach_port_t req_rport
;
447 /* Global XPC domains. */
448 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
449 static jobmgr_t _s_xpc_system_domain
;
450 static LIST_HEAD(, jobmgr_s
) _s_xpc_user_domains
;
451 static LIST_HEAD(, jobmgr_s
) _s_xpc_session_domains
;
452 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
454 #define jobmgr_assumes(jm, e) \
455 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
457 static jobmgr_t
jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool no_init
, mach_port_t asport
);
458 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
459 static jobmgr_t
jobmgr_new_xpc_singleton_domain(jobmgr_t jm
, name_t name
);
460 static jobmgr_t
jobmgr_find_xpc_per_user_domain(jobmgr_t jm
, uid_t uid
);
461 static jobmgr_t
jobmgr_find_xpc_per_session_domain(jobmgr_t jm
, au_asid_t asid
);
462 static job_t
xpc_domain_import_service(jobmgr_t jm
, launch_data_t pload
);
463 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
464 static job_t
jobmgr_import2(jobmgr_t jm
, launch_data_t pload
);
465 static jobmgr_t
jobmgr_parent(jobmgr_t jm
);
466 static jobmgr_t
jobmgr_do_garbage_collection(jobmgr_t jm
);
467 static bool jobmgr_label_test(jobmgr_t jm
, const char *str
);
468 static void jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
);
469 static void jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
);
470 static void jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
);
471 static void jobmgr_remove(jobmgr_t jm
);
472 static void jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
);
473 static job_t
jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
);
474 static job_t
jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
);
475 static job_t
jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
);
476 static jobmgr_t
jobmgr_find_by_name(jobmgr_t jm
, const char *where
);
477 static job_t
job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
);
478 static job_t
jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, mach_port_t
*mp
);
479 static void job_export_all2(jobmgr_t jm
, launch_data_t where
);
480 static void jobmgr_callback(void *obj
, struct kevent
*kev
);
481 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm
);
482 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
);
483 static struct machservice
*jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
);
484 static void jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
485 static void jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
486 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
487 static void jobmgr_log_bug(jobmgr_t jm
, unsigned int line
);
489 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
490 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
491 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
493 struct suspended_peruser
{
494 LIST_ENTRY(suspended_peruser
) sle
;
499 kq_callback kqjob_callback
; /* MUST be first element of this structure for benefit of launchd's run loop. */
500 LIST_ENTRY(job_s
) sle
;
501 LIST_ENTRY(job_s
) subjob_sle
;
502 LIST_ENTRY(job_s
) needing_session_sle
;
503 LIST_ENTRY(job_s
) jetsam_sle
;
504 LIST_ENTRY(job_s
) pid_hash_sle
;
505 LIST_ENTRY(job_s
) label_hash_sle
;
506 LIST_ENTRY(job_s
) global_env_sle
;
507 SLIST_ENTRY(job_s
) curious_jobs_sle
;
508 LIST_HEAD(, suspended_peruser
) suspended_perusers
;
509 LIST_HEAD(, waiting_for_exit
) exit_watchers
;
510 LIST_HEAD(, job_s
) subjobs
;
511 LIST_HEAD(, externalevent
) events
;
512 SLIST_HEAD(, socketgroup
) sockets
;
513 SLIST_HEAD(, calendarinterval
) cal_intervals
;
514 SLIST_HEAD(, envitem
) global_env
;
515 SLIST_HEAD(, envitem
) env
;
516 SLIST_HEAD(, limititem
) limits
;
517 SLIST_HEAD(, machservice
) machservices
;
518 SLIST_HEAD(, semaphoreitem
) semaphores
;
519 SLIST_HEAD(, waiting_for_removal
) removal_watchers
;
522 cpu_type_t
*j_binpref
;
523 size_t j_binpref_cnt
;
525 mach_port_t exit_status_dest
;
526 mach_port_t exit_status_port
;
527 mach_port_t spawn_reply_port
;
540 char *alt_exc_handler
;
541 struct vproc_shmem_s
*shmem
;
542 struct machservice
*lastlookup
;
543 unsigned int lastlookup_gennum
;
545 char *seatbelt_profile
;
546 uint64_t seatbelt_flags
;
549 void *quarantine_data
;
550 size_t quarantine_data_sz
;
553 int last_exit_status
;
560 int32_t jetsam_priority
;
561 int32_t jetsam_memlimit
;
563 int32_t main_thread_priority
;
565 uint32_t exit_timeout
;
566 uint64_t sent_signal_time
;
568 uint32_t min_run_time
;
569 uint32_t start_interval
;
570 uint32_t peruser_suspend_count
; /* The number of jobs that have disabled this per-user launchd. */
576 J_TYPE_ANONYMOUS
= 1,
577 J_TYPE_LANCHSERVICES
,
583 debug
:1, /* man launchd.plist --> Debug */
584 ondemand
:1, /* man launchd.plist --> KeepAlive == false */
585 session_create
:1, /* man launchd.plist --> SessionCreate */
586 low_pri_io
:1, /* man launchd.plist --> LowPriorityIO */
587 no_init_groups
:1, /* man launchd.plist --> InitGroups */
588 priv_port_has_senders
:1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
589 importing_global_env
:1, /* a hack during job importing */
590 importing_hard_limits
:1, /* a hack during job importing */
591 setmask
:1, /* man launchd.plist --> Umask */
592 anonymous
:1, /* a process that launchd knows about, but isn't managed by launchd */
593 checkedin
:1, /* a legacy mach_init concept to detect sick jobs */
594 legacy_mach_job
:1, /* a job created via bootstrap_create_server() */
595 legacy_LS_job
:1, /* a job created via spawn_via_launchd() */
596 inetcompat
:1, /* a legacy job that wants inetd compatible semantics */
597 inetcompat_wait
:1, /* a twist on inetd compatibility */
598 start_pending
:1, /* an event fired and the job should start, but not necessarily right away */
599 globargv
:1, /* man launchd.plist --> EnableGlobbing */
600 wait4debugger
:1, /* man launchd.plist --> WaitForDebugger */
601 wait4debugger_oneshot
:1, /* One-shot WaitForDebugger. */
602 internal_exc_handler
:1, /* MachExceptionHandler == true */
603 stall_before_exec
:1, /* a hack to support an option of spawn_via_launchd() */
604 only_once
:1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
605 currently_ignored
:1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
606 forced_peers_to_demand_mode
:1, /* A job that forced all other jobs to be temporarily launch-on-demand */
607 setnice
:1, /* man launchd.plist --> Nice */
608 removal_pending
:1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
609 sent_sigkill
:1, /* job_kill() was called */
610 debug_before_kill
:1, /* enter the kernel debugger before killing a job */
611 weird_bootstrap
:1, /* a hack that launchd+launchctl use during jobmgr_t creation */
612 start_on_mount
:1, /* man launchd.plist --> StartOnMount */
613 per_user
:1, /* This job is a per-user launchd managed by the PID 1 launchd */
614 unload_at_mig_return
:1, /* A job thoroughly confused launchd. We need to unload it ASAP */
615 abandon_pg
:1, /* man launchd.plist --> AbandonProcessGroup */
616 ignore_pg_at_shutdown
:1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
617 poll_for_vfs_changes
:1, /* a hack to work around the fact that kqueues don't work on all filesystems */
618 deny_job_creation
:1, /* Don't let this job create new 'job_t' objects in launchd */
619 kill_via_shmem
:1, /* man launchd.plist --> EnableTransactions */
620 sent_kill_via_shmem
:1, /* We need to 'kill_via_shmem' once-and-only-once */
621 clean_kill
:1, /* The job was sent SIGKILL because it was clean. */
622 kill_after_sample
:1, /* The job is to be killed after sampling. */
623 reap_after_trace
:1, /* The job exited before sample did, so we should reap it after sample is done. */
624 nosy
:1, /* The job has an OtherJobEnabled KeepAlive criterion. */
625 crashed
:1, /* The job is the default Mach exception handler, and it crashed. */
626 reaped
:1, /* We've received NOTE_EXIT for the job. */
627 stopped
:1, /* job_stop() was called. */
628 jetsam_frontmost
:1, /* The job is considered "frontmost" by Jetsam. */
629 needs_kickoff
:1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
630 is_bootstrapper
:1, /* The job is a bootstrapper. */
631 has_console
:1, /* The job owns the console. */
632 embedded_special_privileges
:1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
633 did_exec
:1, /* The job exec(2)ed successfully. */
634 xpcproxy_did_exec
:1, /* The job is an XPC service, and XPC proxy successfully exec(3)ed. */
635 holds_ref
:1, /* The (anonymous) job called vprocmgr_switch_to_session(). */
636 jetsam_properties
:1, /* The job has Jetsam limits in place. */
637 dedicated_instance
:1, /* This job was created as the result of a look up of a service provided by a per-lookup job. */
638 multiple_instances
:1, /* The job supports creating additional instances of itself. */
639 former_subjob
:1, /* The sub-job was already removed from the parent's list of sub-jobs. */
640 event_monitor
:1, /* The job is responsible for monitoring external events for this launchd. */
641 removing
:1, /* A lame hack. */
642 disable_aslr
:1, /* Disable ASLR when launching this job. */
643 xpc_service
:1, /* The job is an XPC Service. */
644 shutdown_monitor
:1, /* The job is the Performance team's shutdown monitor. */
645 dirty_at_shutdown
:1, /* We should open a transaction for the job when shutdown begins. */
646 workaround9359725
:1, /* The job was sent SIGKILL but did not exit in a timely fashion, indicating a kernel bug. */
652 /* Only set for per-user launchd's. */
654 uuid_t expected_audit_uuid
;
658 static size_t hash_label(const char *label
) __attribute__((pure
));
659 static size_t hash_ms(const char *msstr
) __attribute__((pure
));
660 static SLIST_HEAD(, job_s
) s_curious_jobs
;
662 #define job_assumes(j, e) \
663 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
665 static void job_import_keys(launch_data_t obj
, const char *key
, void *context
);
666 static void job_import_bool(job_t j
, const char *key
, bool value
);
667 static void job_import_string(job_t j
, const char *key
, const char *value
);
668 static void job_import_integer(job_t j
, const char *key
, long long value
);
669 static void job_import_dictionary(job_t j
, const char *key
, launch_data_t value
);
670 static void job_import_array(job_t j
, const char *key
, launch_data_t value
);
671 static void job_import_opaque(job_t j
, const char *key
, launch_data_t value
);
672 static bool job_set_global_on_demand(job_t j
, bool val
);
673 static const char *job_active(job_t j
);
674 static void job_watch(job_t j
);
675 static void job_ignore(job_t j
);
676 static void job_cleanup_after_tracer(job_t j
);
677 static void job_reap(job_t j
);
678 static bool job_useless(job_t j
);
679 static bool job_keepalive(job_t j
);
680 static void job_dispatch_curious_jobs(job_t j
);
681 static void job_start(job_t j
);
682 static void job_start_child(job_t j
) __attribute__((noreturn
));
683 static void job_setup_attributes(job_t j
);
684 static bool job_setup_machport(job_t j
);
685 static kern_return_t
job_setup_exit_port(job_t j
);
686 static void job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
);
687 static void job_postfork_become_user(job_t j
);
688 static void job_postfork_test_user(job_t j
);
689 static void job_log_pids_with_weird_uids(job_t j
);
690 static void job_setup_exception_port(job_t j
, task_t target_task
);
691 static void job_callback(void *obj
, struct kevent
*kev
);
692 static void job_callback_proc(job_t j
, struct kevent
*kev
);
693 static void job_callback_timer(job_t j
, void *ident
);
694 static void job_callback_read(job_t j
, int ident
);
695 static void job_log_stray_pg(job_t j
);
696 static void job_log_children_without_exec(job_t j
);
697 static job_t
job_new_anonymous(jobmgr_t jm
, pid_t anonpid
) __attribute__((malloc
, nonnull
, warn_unused_result
));
698 static job_t
job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
) __attribute__((malloc
, nonnull(1,2), warn_unused_result
));
699 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
700 static job_t
job_new_alias(jobmgr_t jm
, job_t src
);
701 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
702 static job_t
job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
) __attribute__((malloc
, nonnull
, warn_unused_result
));
703 static job_t
job_new_subjob(job_t j
, uuid_t identifier
);
704 static void job_kill(job_t j
);
705 static void job_uncork_fork(job_t j
);
706 static void job_log_stdouterr(job_t j
);
707 static void job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
708 static void job_log_error(job_t j
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
709 static void job_log_bug(job_t j
, unsigned int line
);
710 static void job_log_stdouterr2(job_t j
, const char *msg
, ...);
711 static void job_set_exception_port(job_t j
, mach_port_t port
);
712 static kern_return_t
job_mig_spawn_internal(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, job_t
*outj
);
713 static void job_open_shutdown_transaction(job_t ji
);
714 static void job_close_shutdown_transaction(job_t ji
);
716 static const struct {
719 } launchd_keys2limits
[] = {
720 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE
, RLIMIT_CORE
},
721 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU
, RLIMIT_CPU
},
722 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA
, RLIMIT_DATA
},
723 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE
, RLIMIT_FSIZE
},
724 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK
, RLIMIT_MEMLOCK
},
725 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE
, RLIMIT_NOFILE
},
726 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC
, RLIMIT_NPROC
},
727 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS
, RLIMIT_RSS
},
728 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK
, RLIMIT_STACK
},
731 static time_t cronemu(int mon
, int mday
, int hour
, int min
);
732 static time_t cronemu_wday(int wday
, int hour
, int min
);
733 static bool cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
);
734 static bool cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
);
735 static bool cronemu_hour(struct tm
*wtm
, int hour
, int min
);
736 static bool cronemu_min(struct tm
*wtm
, int min
);
738 /* These functions are a total nightmare to get to through headers.
739 * See rdar://problem/8223092.
741 typedef __darwin_mach_port_t fileport_t
;
742 #define FILEPORT_NULL ((fileport_t)0)
743 extern int fileport_makeport(int, fileport_t
*);
744 extern int fileport_makefd(fileport_t
);
746 /* miscellaneous file local functions */
747 static size_t get_kern_max_proc(void);
748 static int dir_has_files(job_t j
, const char *path
);
749 static char **mach_cmd2argv(const char *string
);
750 static size_t our_strhash(const char *s
) __attribute__((pure
));
751 static void extract_rcsid_substr(const char *i
, char *o
, size_t osz
);
753 void eliminate_double_reboot(void);
755 /* file local globals */
756 static size_t total_children
;
757 static size_t total_anon_children
;
758 static mach_port_t the_exception_server
;
759 static job_t workaround_5477111
;
760 static LIST_HEAD(, job_s
) s_needing_sessions
;
761 static LIST_HEAD(, eventsystem
) _s_event_systems
;
762 static job_t _s_event_monitor
;
763 static job_t _s_xpc_bootstrapper
;
764 static job_t _s_shutdown_monitor
;
765 static mach_port_t _s_event_update_port
;
766 mach_port_t g_audit_session_port
= MACH_PORT_NULL
;
767 static uint32_t s_jetsam_sequence_id
;
769 #if !TARGET_OS_EMBEDDED
770 static job_t s_embedded_privileged_job
= (job_t
)&root_jobmgr
;
771 au_asid_t g_audit_session
= AU_DEFAUDITSID
;
773 static job_t s_embedded_privileged_job
= NULL
;
774 pid_t g_audit_session
= 0;
777 static int s_no_hang_fd
= -1;
779 /* process wide globals */
780 mach_port_t inherited_bootstrap_port
;
781 jobmgr_t root_jobmgr
;
782 bool g_shutdown_debugging
= false;
783 bool g_verbose_boot
= false;
784 bool g_embedded_privileged_action
= false;
785 bool g_runtime_busy_time
= false;
790 struct semaphoreitem
*si
;
791 struct socketgroup
*sg
;
792 struct machservice
*ms
;
794 if (j
->currently_ignored
) {
798 job_log(j
, LOG_DEBUG
, "Ignoring...");
800 j
->currently_ignored
= true;
802 if (j
->poll_for_vfs_changes
) {
803 j
->poll_for_vfs_changes
= false;
804 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
807 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
808 socketgroup_ignore(j
, sg
);
811 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
812 machservice_ignore(j
, ms
);
815 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
816 semaphoreitem_ignore(j
, si
);
823 struct semaphoreitem
*si
;
824 struct socketgroup
*sg
;
825 struct machservice
*ms
;
827 if (!j
->currently_ignored
) {
831 job_log(j
, LOG_DEBUG
, "Watching...");
833 j
->currently_ignored
= false;
835 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
836 socketgroup_watch(j
, sg
);
839 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
840 machservice_watch(j
, ms
);
843 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
844 semaphoreitem_watch(j
, si
);
854 if (unlikely(!j
->p
|| j
->stopped
|| j
->anonymous
)) {
858 #if TARGET_OS_EMBEDDED
859 if (g_embedded_privileged_action
&& s_embedded_privileged_job
) {
860 if (!job_assumes(j
, s_embedded_privileged_job
->username
!= NULL
&& j
->username
!= NULL
)) {
865 if (strcmp(j
->username
, s_embedded_privileged_job
->username
) != 0) {
869 } else if (g_embedded_privileged_action
) {
875 if (j
->kill_via_shmem
) {
877 if (!j
->sent_kill_via_shmem
) {
878 j
->shmem
->vp_shmem_flags
|= VPROC_SHMEM_EXITING
;
879 newval
= __sync_sub_and_fetch(&j
->shmem
->vp_shmem_transaction_cnt
, 1);
880 j
->sent_kill_via_shmem
= true;
882 newval
= j
->shmem
->vp_shmem_transaction_cnt
;
889 j
->sent_signal_time
= runtime_get_opaque_time();
892 j
->clean_kill
= true;
895 (void)job_assumes(j
, runtime_kill(j
->p
, SIGTERM
) != -1);
897 if (j
->exit_timeout
) {
898 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, j
->exit_timeout
, j
) != -1);
900 job_log(j
, LOG_NOTICE
, "This job has an infinite exit timeout");
903 if (j
->kill_via_shmem
) {
904 snprintf(extralog
, sizeof(extralog
), ": %d remaining transactions", newval
+ 1);
909 job_log(j
, LOG_DEBUG
, "Sent SIGTERM signal%s", extralog
);
918 launch_data_t tmp
, tmp2
, tmp3
, r
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
924 if ((tmp
= launch_data_new_string(j
->label
))) {
925 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LABEL
);
927 if ((tmp
= launch_data_new_string(j
->mgr
->name
))) {
928 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
930 if ((tmp
= launch_data_new_bool(j
->ondemand
))) {
931 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ONDEMAND
);
933 if ((tmp
= launch_data_new_integer(j
->last_exit_status
))) {
934 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LASTEXITSTATUS
);
936 if (j
->p
&& (tmp
= launch_data_new_integer(j
->p
))) {
937 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PID
);
939 if ((tmp
= launch_data_new_integer(j
->timeout
))) {
940 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TIMEOUT
);
942 if (j
->prog
&& (tmp
= launch_data_new_string(j
->prog
))) {
943 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAM
);
945 if (j
->stdinpath
&& (tmp
= launch_data_new_string(j
->stdinpath
))) {
946 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDINPATH
);
948 if (j
->stdoutpath
&& (tmp
= launch_data_new_string(j
->stdoutpath
))) {
949 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDOUTPATH
);
951 if (j
->stderrpath
&& (tmp
= launch_data_new_string(j
->stderrpath
))) {
952 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDERRORPATH
);
954 if (likely(j
->argv
) && (tmp
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
957 for (i
= 0; i
< j
->argc
; i
++) {
958 if ((tmp2
= launch_data_new_string(j
->argv
[i
]))) {
959 launch_data_array_set_index(tmp
, tmp2
, i
);
963 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
);
966 if (j
->kill_via_shmem
&& (tmp
= launch_data_new_bool(true))) {
967 int32_t tmp_cnt
= -1;
969 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
);
972 tmp_cnt
= j
->shmem
->vp_shmem_transaction_cnt
;
975 if (j
->sent_kill_via_shmem
) {
979 if ((tmp
= launch_data_new_integer(tmp_cnt
))) {
980 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TRANSACTIONCOUNT
);
984 if (j
->session_create
&& (tmp
= launch_data_new_bool(true))) {
985 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SESSIONCREATE
);
988 if (j
->inetcompat
&& (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
989 if ((tmp2
= launch_data_new_bool(j
->inetcompat_wait
))) {
990 launch_data_dict_insert(tmp
, tmp2
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
);
992 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
);
995 if (!SLIST_EMPTY(&j
->sockets
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
996 struct socketgroup
*sg
;
999 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
1003 if ((tmp2
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
1004 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
1005 if ((tmp3
= launch_data_new_fd(sg
->fds
[i
]))) {
1006 launch_data_array_set_index(tmp2
, tmp3
, i
);
1009 launch_data_dict_insert(tmp
, tmp2
, sg
->name
);
1013 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SOCKETS
);
1016 if (!SLIST_EMPTY(&j
->machservices
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
1017 struct machservice
*ms
;
1021 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
1024 tmp3
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
1027 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
1028 launch_data_dict_insert(tmp3
, tmp2
, ms
->name
);
1031 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
1032 launch_data_dict_insert(tmp
, tmp2
, ms
->name
);
1036 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_MACHSERVICES
);
1039 launch_data_dict_insert(r
, tmp3
, LAUNCH_JOBKEY_PERJOBMACHSERVICES
);
1047 jobmgr_log_active_jobs(jobmgr_t jm
)
1049 const char *why_active
;
1053 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
1054 jobmgr_log_active_jobs(jmi
);
1057 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
1058 if ((why_active
= job_active(ji
))) {
1060 job_log(ji
, LOG_DEBUG
| LOG_CONSOLE
, "%s", why_active
);
1067 jobmgr_still_alive_with_check(jobmgr_t jm
)
1069 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Still alive with %lu/%lu (normal/anonymous) children.", total_children
, total_anon_children
);
1070 jobmgr_log_active_jobs(jm
);
1074 jobmgr_shutdown(jobmgr_t jm
)
1077 jobmgr_log(jm
, LOG_DEBUG
, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm
->reboot_flags
));
1079 jm
->shutdown_time
= runtime_get_wall_time() / USEC_PER_SEC
;
1082 (void)localtime_r(&jm
->shutdown_time
, &curtime
);
1085 (void)asctime_r(&curtime
, date
);
1086 /* Trim the new line that asctime_r(3) puts there for some reason. */
1089 if (jm
== root_jobmgr
&& pid1_magic
) {
1090 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown begun at: %s", date
);
1092 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown begun at: %s", date
);
1095 jm
->shutting_down
= true;
1097 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
1098 jobmgr_shutdown(jmi
);
1101 if (jm
->parentmgr
== NULL
&& pid1_magic
) {
1102 (void)jobmgr_assumes(jm
, kevent_mod((uintptr_t)jm
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 5, jm
));
1104 /* Spawn the shutdown monitor. */
1105 if (_s_shutdown_monitor
&& !_s_shutdown_monitor
->p
) {
1106 job_log(_s_shutdown_monitor
, LOG_NOTICE
| LOG_CONSOLE
, "Starting shutdown monitor.");
1107 job_dispatch(_s_shutdown_monitor
, true);
1111 return jobmgr_do_garbage_collection(jm
);
1115 jobmgr_remove(jobmgr_t jm
)
1120 jobmgr_log(jm
, LOG_DEBUG
, "Removing job manager.");
1121 if (!jobmgr_assumes(jm
, SLIST_EMPTY(&jm
->submgrs
))) {
1122 while ((jmi
= SLIST_FIRST(&jm
->submgrs
))) {
1127 while ((ji
= LIST_FIRST(&jm
->jobs
))) {
1128 if (!ji
->anonymous
&& !job_assumes(ji
, ji
->p
== 0)) {
1135 (void)jobmgr_assumes(jm
, launchd_mport_deallocate(jm
->req_port
) == KERN_SUCCESS
);
1138 (void)jobmgr_assumes(jm
, launchd_mport_close_recv(jm
->jm_port
) == KERN_SUCCESS
);
1141 if (jm
->req_bsport
) {
1142 (void)jobmgr_assumes(jm
, launchd_mport_deallocate(jm
->req_bsport
) == KERN_SUCCESS
);
1144 if (jm
->req_excport
) {
1145 (void)jobmgr_assumes(jm
, launchd_mport_deallocate(jm
->req_excport
) == KERN_SUCCESS
);
1147 if (jm
->req_asport
) {
1148 (void)jobmgr_assumes(jm
, launchd_mport_deallocate(jm
->req_asport
) == KERN_SUCCESS
);
1150 #if !TARGET_OS_EMBEDDED
1151 if (jm
->req_rport
) {
1152 kern_return_t kr
= xpc_call_wakeup(jm
->req_rport
, jm
->error
);
1153 if (!(kr
== KERN_SUCCESS
|| kr
== MACH_SEND_INVALID_DEST
)) {
1154 /* If the originator went away, the reply port will be a dead name,
1155 * and we expect this to fail.
1158 (void)jobmgr_assumes(jm
, kr
== KERN_SUCCESS
);
1161 #endif /* !TARGET_OS_EMBEDDED */
1163 (void)jobmgr_assumes(jm
, vm_deallocate(mach_task_self(), jm
->req_ctx
, jm
->req_ctx_sz
) == KERN_SUCCESS
);
1166 time_t ts
= runtime_get_wall_time() / USEC_PER_SEC
;
1168 (void)localtime_r(&ts
, &curtime
);
1171 (void)asctime_r(&curtime
, date
);
1174 time_t delta
= ts
- jm
->shutdown_time
;
1175 if (jm
== root_jobmgr
&& pid1_magic
) {
1176 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown finished at: %s", date
);
1177 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown took approximately %ld second%s.", delta
, (delta
!= 1) ? "s" : "");
1179 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown finished at: %s", date
);
1180 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown took approximately %ld second%s.", delta
, (delta
!= 1) ? "s" : "");
1183 if (jm
->parentmgr
) {
1184 runtime_del_weak_ref();
1185 SLIST_REMOVE(&jm
->parentmgr
->submgrs
, jm
, jobmgr_s
, sle
);
1186 } else if (pid1_magic
) {
1187 eliminate_double_reboot();
1188 launchd_log_vm_stats();
1189 jobmgr_log_stray_children(jm
, true);
1190 jobmgr_log(root_jobmgr
, LOG_NOTICE
| LOG_CONSOLE
, "About to call: reboot(%s).", reboot_flags_to_C_names(jm
->reboot_flags
));
1192 (void)jobmgr_assumes(jm
, reboot(jm
->reboot_flags
) != -1);
1194 jobmgr_log(jm
, LOG_DEBUG
, "About to exit");
1205 struct waiting_for_removal
*w4r
;
1206 struct calendarinterval
*ci
;
1207 struct semaphoreitem
*si
;
1208 struct socketgroup
*sg
;
1209 struct machservice
*ms
;
1210 struct limititem
*li
;
1214 /* HACK: Egregious code duplication. But as with machservice_delete(),
1215 * job aliases can't (and shouldn't) have any complex behaviors
1216 * associated with them.
1218 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
1219 machservice_delete(j
, ms
, false);
1222 LIST_REMOVE(j
, sle
);
1223 LIST_REMOVE(j
, label_hash_sle
);
1228 #if TARGET_OS_EMBEDDED
1229 if (g_embedded_privileged_action
&& s_embedded_privileged_job
) {
1230 if (!job_assumes(j
, s_embedded_privileged_job
->username
!= NULL
&& j
->username
!= NULL
)) {
1235 if (strcmp(j
->username
, s_embedded_privileged_job
->username
) != 0) {
1239 } else if (g_embedded_privileged_action
) {
1245 /* Do this BEFORE we check and see whether the job is still active. If we're a
1246 * sub-job, we're being removed due to the parent job removing us. Therefore, the
1247 * parent job will free itself after this call completes. So if we defer removing
1248 * ourselves from the parent's list, we'll crash when we finally get around to it.
1250 if (j
->dedicated_instance
&& !j
->former_subjob
) {
1251 LIST_REMOVE(j
, subjob_sle
);
1252 j
->former_subjob
= true;
1255 if (unlikely(j
->p
)) {
1259 job_log(j
, LOG_DEBUG
, "Removal pended until the job exits");
1261 if (!j
->removal_pending
) {
1262 j
->removal_pending
= true;
1272 job_dispatch_curious_jobs(j
);
1275 ipc_close_all_with_job(j
);
1277 job_log(j
, LOG_INFO
, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1278 j
->ru
.ru_utime
.tv_sec
, j
->ru
.ru_utime
.tv_usec
,
1279 j
->ru
.ru_stime
.tv_sec
, j
->ru
.ru_stime
.tv_usec
,
1280 j
->ru
.ru_maxrss
, j
->ru
.ru_ixrss
, j
->ru
.ru_idrss
, j
->ru
.ru_isrss
,
1281 j
->ru
.ru_minflt
, j
->ru
.ru_majflt
,
1282 j
->ru
.ru_nswap
, j
->ru
.ru_inblock
, j
->ru
.ru_oublock
,
1283 j
->ru
.ru_msgsnd
, j
->ru
.ru_msgrcv
,
1284 j
->ru
.ru_nsignals
, j
->ru
.ru_nvcsw
, j
->ru
.ru_nivcsw
);
1286 if (j
->forced_peers_to_demand_mode
) {
1287 job_set_global_on_demand(j
, false);
1290 if (!job_assumes(j
, j
->fork_fd
== 0)) {
1291 (void)job_assumes(j
, runtime_close(j
->fork_fd
) != -1);
1295 (void)job_assumes(j
, runtime_close(j
->stdin_fd
) != -1);
1298 if (!job_assumes(j
, j
->log_redirect_fd
== 0)) {
1299 (void)job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
1303 (void)job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1306 while ((sg
= SLIST_FIRST(&j
->sockets
))) {
1307 socketgroup_delete(j
, sg
);
1309 while ((ci
= SLIST_FIRST(&j
->cal_intervals
))) {
1310 calendarinterval_delete(j
, ci
);
1312 while ((ei
= SLIST_FIRST(&j
->env
))) {
1313 envitem_delete(j
, ei
, false);
1315 while ((ei
= SLIST_FIRST(&j
->global_env
))) {
1316 envitem_delete(j
, ei
, true);
1318 while ((li
= SLIST_FIRST(&j
->limits
))) {
1319 limititem_delete(j
, li
);
1321 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
1322 machservice_delete(j
, ms
, false);
1324 while ((si
= SLIST_FIRST(&j
->semaphores
))) {
1325 semaphoreitem_delete(j
, si
);
1327 while ((w4r
= SLIST_FIRST(&j
->removal_watchers
))) {
1328 waiting4removal_delete(j
, w4r
);
1331 struct externalevent
*eei
= NULL
;
1332 while ((eei
= LIST_FIRST(&j
->events
))) {
1334 externalevent_delete(eei
);
1338 /* Event systems exist independently of an actual monitor job. They're
1339 * created on-demand when a job has a LaunchEvents dictionary. So we
1340 * really don't need to get rid of them.
1342 if (j
->event_monitor
) {
1343 struct eventsystem
*esi
= NULL
;
1344 while ((esi
= LIST_FIRST(&_s_event_systems
))) {
1345 eventsystem_delete(esi
);
1350 /* Make gcc happy. */
1351 eventsystem_delete(NULL
);
1353 if (j
->event_monitor
) {
1354 if (_s_event_update_port
!= MACH_PORT_NULL
) {
1355 (void)job_assumes(j
, launchd_mport_deallocate(_s_event_update_port
) == KERN_SUCCESS
);
1356 _s_event_update_port
= MACH_PORT_NULL
;
1358 _s_event_monitor
= NULL
;
1371 if (j
->workingdir
) {
1372 free(j
->workingdir
);
1383 if (j
->stdoutpath
) {
1384 free(j
->stdoutpath
);
1386 if (j
->stderrpath
) {
1387 free(j
->stderrpath
);
1389 if (j
->alt_exc_handler
) {
1390 free(j
->alt_exc_handler
);
1393 if (j
->seatbelt_profile
) {
1394 free(j
->seatbelt_profile
);
1398 if (j
->quarantine_data
) {
1399 free(j
->quarantine_data
);
1405 if (j
->start_interval
) {
1406 runtime_del_weak_ref();
1407 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
1409 if (j
->poll_for_vfs_changes
) {
1410 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
1412 if (j
->exit_timeout
) {
1413 /* Not a big deal if this fails. It means that the timer's already been freed. */
1414 kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1416 if (j
->jetsam_properties
) {
1417 LIST_REMOVE(j
, jetsam_sle
);
1418 j
->mgr
->jetsam_jobs_cnt
--;
1420 if (j
->asport
!= MACH_PORT_NULL
) {
1421 (void)job_assumes(j
, launchd_mport_deallocate(j
->asport
) == KERN_SUCCESS
);
1423 if (!uuid_is_null(j
->expected_audit_uuid
)) {
1424 LIST_REMOVE(j
, needing_session_sle
);
1426 if (j
->embedded_special_privileges
) {
1427 s_embedded_privileged_job
= NULL
;
1429 if (j
->shutdown_monitor
) {
1430 _s_shutdown_monitor
= NULL
;
1433 kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1435 LIST_REMOVE(j
, sle
);
1436 LIST_REMOVE(j
, label_hash_sle
);
1440 LIST_FOREACH_SAFE(ji
, &j
->subjobs
, subjob_sle
, jit
) {
1444 job_log(j
, LOG_DEBUG
, "Removed");
1446 j
->kqjob_callback
= (kq_callback
)0x8badf00d;
1451 socketgroup_setup(launch_data_t obj
, const char *key
, void *context
)
1453 launch_data_t tmp_oai
;
1455 size_t i
, fd_cnt
= 1;
1458 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1459 fd_cnt
= launch_data_array_get_count(obj
);
1462 fds
= alloca(fd_cnt
* sizeof(int));
1464 for (i
= 0; i
< fd_cnt
; i
++) {
1465 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1466 tmp_oai
= launch_data_array_get_index(obj
, i
);
1471 fds
[i
] = launch_data_get_fd(tmp_oai
);
1474 socketgroup_new(j
, key
, fds
, fd_cnt
, strcmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0);
1476 ipc_revoke_fds(obj
);
1480 job_set_global_on_demand(job_t j
, bool val
)
1482 if (j
->forced_peers_to_demand_mode
&& val
) {
1484 } else if (!j
->forced_peers_to_demand_mode
&& !val
) {
1488 if ((j
->forced_peers_to_demand_mode
= val
)) {
1489 j
->mgr
->global_on_demand_cnt
++;
1491 j
->mgr
->global_on_demand_cnt
--;
1494 if (j
->mgr
->global_on_demand_cnt
== 0) {
1495 jobmgr_dispatch_all(j
->mgr
, false);
1502 job_setup_machport(job_t j
)
1504 mach_msg_size_t mxmsgsz
;
1506 if (!job_assumes(j
, launchd_mport_create_recv(&j
->j_port
) == KERN_SUCCESS
)) {
1510 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
1511 mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
1512 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
1513 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
1516 if (!job_assumes(j
, runtime_add_mport(j
->j_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
1520 if (!job_assumes(j
, launchd_mport_notify_req(j
->j_port
, MACH_NOTIFY_NO_SENDERS
) == KERN_SUCCESS
)) {
1521 (void)job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1527 (void)job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1533 job_setup_exit_port(job_t j
)
1535 kern_return_t kr
= launchd_mport_create_recv(&j
->exit_status_port
);
1536 if (!job_assumes(j
, kr
== KERN_SUCCESS
)) {
1537 return MACH_PORT_NULL
;
1540 struct mach_port_limits limits
= {
1543 kr
= mach_port_set_attributes(mach_task_self(), j
->exit_status_port
, MACH_PORT_LIMITS_INFO
, (mach_port_info_t
)&limits
, sizeof(limits
));
1544 (void)job_assumes(j
, kr
== KERN_SUCCESS
);
1546 kr
= launchd_mport_make_send_once(j
->exit_status_port
, &j
->exit_status_dest
);
1547 if (!job_assumes(j
, kr
== KERN_SUCCESS
)) {
1548 (void)job_assumes(j
, launchd_mport_close_recv(j
->exit_status_port
) == KERN_SUCCESS
);
1549 j
->exit_status_port
= MACH_PORT_NULL
;
1556 job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
)
1558 const char **argv
= (const char **)mach_cmd2argv(cmd
);
1561 if (!job_assumes(j
, argv
!= NULL
)) {
1565 jr
= job_new(j
->mgr
, AUTO_PICK_LEGACY_LABEL
, NULL
, argv
);
1569 /* jobs can easily be denied creation during shutdown */
1570 if (unlikely(jr
== NULL
)) {
1576 jr
->legacy_mach_job
= true;
1577 jr
->abandon_pg
= true;
1578 jr
->priv_port_has_senders
= true; /* the IPC that called us will make-send on this port */
1580 if (!job_setup_machport(jr
)) {
1584 job_log(jr
, LOG_INFO
, "Legacy%s server created", ond
? " on-demand" : "");
1596 job_new_anonymous(jobmgr_t jm
, pid_t anonpid
)
1598 struct proc_bsdshortinfo proc
;
1599 bool shutdown_state
;
1600 job_t jp
= NULL
, jr
= NULL
;
1601 uid_t kp_euid
, kp_uid
, kp_svuid
;
1602 gid_t kp_egid
, kp_gid
, kp_svgid
;
1604 if (!jobmgr_assumes(jm
, anonpid
!= 0)) {
1609 if (!jobmgr_assumes(jm
, anonpid
< 100000)) {
1610 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1615 /* libproc returns the number of bytes written into the buffer upon success,
1618 if (proc_pidinfo(anonpid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
1619 if (errno
!= ESRCH
) {
1620 (void)jobmgr_assumes(jm
, errno
== 0);
1625 if (!jobmgr_assumes(jm
, proc
.pbsi_comm
[0] != '\0')) {
1630 if (unlikely(proc
.pbsi_status
== SZOMB
)) {
1631 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for zombie PID %u: %s", anonpid
, proc
.pbsi_comm
);
1634 if (unlikely(proc
.pbsi_flags
& P_SUGID
)) {
1635 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid
, proc
.pbsi_comm
);
1638 kp_euid
= proc
.pbsi_uid
;
1639 kp_uid
= proc
.pbsi_ruid
;
1640 kp_svuid
= proc
.pbsi_svuid
;
1641 kp_egid
= proc
.pbsi_gid
;
1642 kp_gid
= proc
.pbsi_rgid
;
1643 kp_svgid
= proc
.pbsi_svgid
;
1645 if (unlikely(kp_euid
!= kp_uid
|| kp_euid
!= kp_svuid
|| kp_uid
!= kp_svuid
|| kp_egid
!= kp_gid
|| kp_egid
!= kp_svgid
|| kp_gid
!= kp_svgid
)) {
1646 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1647 kp_euid
, kp_uid
, kp_svuid
, kp_egid
, kp_gid
, kp_svgid
, anonpid
, proc
.pbsi_comm
);
1650 /* "Fix" for a problem that shouldn't even exist.
1651 * See rdar://problem/7264615 for the symptom and rdar://problem/5020256
1652 * as to why this can happen.
1654 if (!jobmgr_assumes(jm
, (pid_t
)proc
.pbsi_ppid
!= anonpid
)) {
1655 jobmgr_log(jm
, LOG_WARNING
, "Process has become its own parent through ptrace(3). It should find a different way to do whatever it's doing. Setting PPID to 0: %s", proc
.pbsi_comm
);
1660 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1661 if (unlikely(shutdown_state
= jm
->shutting_down
)) {
1662 jm
->shutting_down
= false;
1665 /* We only set requestor_pid for XPC domains. */
1666 const char *whichlabel
= (jm
->req_pid
== anonpid
) ? AUTO_PICK_XPC_LABEL
: AUTO_PICK_ANONYMOUS_LABEL
;
1667 if (jobmgr_assumes(jm
, (jr
= job_new(jm
, whichlabel
, proc
.pbsi_comm
, NULL
)) != NULL
)) {
1668 u_int proc_fflags
= NOTE_EXEC
|NOTE_FORK
|NOTE_EXIT
;
1670 total_anon_children
++;
1671 jr
->anonymous
= true;
1674 /* anonymous process reaping is messy */
1675 LIST_INSERT_HEAD(&jm
->active_jobs
[ACTIVE_JOB_HASH(jr
->p
)], jr
, pid_hash_sle
);
1677 if (unlikely(kevent_mod(jr
->p
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
) == -1) && job_assumes(jr
, errno
== ESRCH
)) {
1678 /* zombies are weird */
1679 job_log(jr
, LOG_ERR
, "Failed to add kevent for PID %u. Will unload at MIG return", jr
->p
);
1680 jr
->unload_at_mig_return
= true;
1683 if (unlikely(shutdown_state
)) {
1684 job_log(jr
, LOG_SCOLDING
, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1687 job_log(jr
, LOG_DEBUG
, "Created PID %u anonymously by PPID %u%s%s", anonpid
, proc
.pbsi_ppid
, jp
? ": " : "", jp
? jp
->label
: "");
1690 if (unlikely(shutdown_state
)) {
1691 jm
->shutting_down
= true;
1694 /* This is down here to mitigate the effects of rdar://problem/7264615, in which a process
1695 * attaches to its own parent. We need to make sure that the anonymous job has been added
1696 * to the process list so that, if it's used ptrace(3) to cause a cycle in the process
1697 * tree (thereby making it not a tree anymore), we'll find the tracing parent PID of the
1698 * parent process, which is the child, when we go looking for it in jobmgr_find_by_pid().
1700 switch (proc
.pbsi_ppid
) {
1706 /* we cannot possibly find a parent job_t that is useful in this function */
1711 jp
= jobmgr_find_by_pid(jm
, proc
.pbsi_ppid
, true);
1712 if (jobmgr_assumes(jm
, jp
!= NULL
)) {
1713 if (jp
&& !jp
->anonymous
&& unlikely(!(proc
.pbsi_flags
& P_EXEC
))) {
1714 job_log(jp
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc
.pbsi_pid
);
1724 job_new_subjob(job_t j
, uuid_t identifier
)
1727 uuid_string_t idstr
;
1728 uuid_unparse(identifier
, idstr
);
1729 size_t label_sz
= snprintf(label
, 0, "%s.%s", j
->label
, idstr
);
1731 job_t nj
= (struct job_s
*)calloc(1, sizeof(struct job_s
) + label_sz
+ 1);
1732 if (launchd_assumes(nj
!= NULL
)) {
1733 nj
->kqjob_callback
= job_callback
;
1735 nj
->min_run_time
= j
->min_run_time
;
1736 nj
->timeout
= j
->timeout
;
1737 nj
->exit_timeout
= j
->exit_timeout
;
1739 snprintf((char *)nj
->label
, label_sz
+ 1, "%s.%s", j
->label
, idstr
);
1741 /* Set all our simple Booleans that are applicable. */
1742 nj
->debug
= j
->debug
;
1743 nj
->ondemand
= j
->ondemand
;
1744 nj
->checkedin
= true;
1745 nj
->low_pri_io
= j
->low_pri_io
;
1746 nj
->setmask
= j
->setmask
;
1747 nj
->wait4debugger
= j
->wait4debugger
;
1748 nj
->internal_exc_handler
= j
->internal_exc_handler
;
1749 nj
->setnice
= j
->setnice
;
1750 nj
->abandon_pg
= j
->abandon_pg
;
1751 nj
->ignore_pg_at_shutdown
= j
->ignore_pg_at_shutdown
;
1752 nj
->deny_job_creation
= j
->deny_job_creation
;
1753 nj
->kill_via_shmem
= j
->kill_via_shmem
;
1754 nj
->needs_kickoff
= j
->needs_kickoff
;
1755 nj
->currently_ignored
= true;
1756 nj
->dedicated_instance
= true;
1757 nj
->xpc_service
= j
->xpc_service
;
1758 nj
->xpc_bootstrapper
= j
->xpc_bootstrapper
;
1761 uuid_copy(nj
->instance_id
, identifier
);
1763 /* These jobs are purely on-demand Mach jobs. */
1765 /* {Hard | Soft}ResourceLimits are not supported. */
1767 struct machservice
*msi
= NULL
;
1768 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
1769 /* Only copy MachServices that were actually declared in the plist.
1770 * So skip over per-PID ones and ones that were created via
1771 * bootstrap_register().
1774 mach_port_t mp
= MACH_PORT_NULL
;
1775 struct machservice
*msj
= machservice_new(nj
, msi
->name
, &mp
, msi
->per_pid
);
1776 if (job_assumes(nj
, msj
!= NULL
)) {
1777 msj
->reset
= msi
->reset
;
1778 msj
->delete_on_destruction
= msi
->delete_on_destruction
;
1779 msj
->drain_one_on_crash
= msi
->drain_one_on_crash
;
1780 msj
->drain_all_on_crash
= msi
->drain_all_on_crash
;
1786 nj
->prog
= strdup(j
->prog
);
1789 size_t sz
= malloc_size(j
->argv
);
1790 nj
->argv
= (char **)malloc(sz
);
1791 if (job_assumes(nj
, nj
->argv
!= NULL
)) {
1792 /* This is the start of our strings. */
1793 char *p
= ((char *)nj
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
1796 for (i
= 0; i
< j
->argc
; i
++) {
1797 (void)strcpy(p
, j
->argv
[i
]);
1799 p
+= (strlen(j
->argv
[i
]) + 1);
1807 /* We ignore global environment variables. */
1808 struct envitem
*ei
= NULL
;
1809 SLIST_FOREACH(ei
, &j
->env
, sle
) {
1810 (void)job_assumes(nj
, envitem_new(nj
, ei
->key
, ei
->value
, false, false));
1813 uuid_unparse(identifier
, val
);
1814 (void)job_assumes(nj
, envitem_new(nj
, LAUNCH_ENV_INSTANCEID
, val
, false, false));
1817 nj
->rootdir
= strdup(j
->rootdir
);
1819 if (j
->workingdir
) {
1820 nj
->workingdir
= strdup(j
->workingdir
);
1823 nj
->username
= strdup(j
->username
);
1826 nj
->groupname
= strdup(j
->groupname
);
1828 /* FIXME: We shouldn't redirect all the output from these jobs to the same
1829 * file. We should uniquify the file names.
1832 nj
->stdinpath
= strdup(j
->stdinpath
);
1834 if (j
->stdoutpath
) {
1835 nj
->stdoutpath
= strdup(j
->stdinpath
);
1837 if (j
->stderrpath
) {
1838 nj
->stderrpath
= strdup(j
->stderrpath
);
1840 if (j
->alt_exc_handler
) {
1841 nj
->alt_exc_handler
= strdup(j
->alt_exc_handler
);
1844 if (j
->seatbelt_profile
) {
1845 nj
->seatbelt_profile
= strdup(j
->seatbelt_profile
);
1850 if (j
->quarantine_data
) {
1851 nj
->quarantine_data
= strdup(j
->quarantine_data
);
1853 nj
->quarantine_data_sz
= j
->quarantine_data_sz
;
1856 size_t sz
= malloc_size(j
->j_binpref
);
1857 nj
->j_binpref
= (cpu_type_t
*)malloc(sz
);
1858 if (job_assumes(nj
, nj
->j_binpref
)) {
1859 memcpy(&nj
->j_binpref
, &j
->j_binpref
, sz
);
1863 /* JetsamPriority is unsupported. */
1865 if (j
->asport
!= MACH_PORT_NULL
) {
1866 (void)job_assumes(nj
, launchd_mport_copy_send(j
->asport
) == KERN_SUCCESS
);
1867 nj
->asport
= j
->asport
;
1870 LIST_INSERT_HEAD(&nj
->mgr
->jobs
, nj
, sle
);
1872 jobmgr_t where2put
= root_jobmgr
;
1873 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
1876 LIST_INSERT_HEAD(&where2put
->label_hash
[hash_label(nj
->label
)], nj
, label_hash_sle
);
1877 LIST_INSERT_HEAD(&j
->subjobs
, nj
, subjob_sle
);
1884 job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
)
1886 const char *const *argv_tmp
= argv
;
1887 char tmp_path
[PATH_MAX
];
1888 char auto_label
[1000];
1889 const char *bn
= NULL
;
1891 size_t minlabel_len
;
1895 launchd_assert(offsetof(struct job_s
, kqjob_callback
) == 0);
1897 if (unlikely(jm
->shutting_down
)) {
1902 if (unlikely(prog
== NULL
&& argv
== NULL
)) {
1907 char *anon_or_legacy
= (label
== AUTO_PICK_ANONYMOUS_LABEL
) ? "anonymous" : "mach_init";
1908 if (unlikely(label
== AUTO_PICK_LEGACY_LABEL
|| label
== AUTO_PICK_ANONYMOUS_LABEL
)) {
1912 strlcpy(tmp_path
, argv
[0], sizeof(tmp_path
));
1913 bn
= basename(tmp_path
); /* prog for auto labels is kp.kp_kproc.p_comm */
1915 snprintf(auto_label
, sizeof(auto_label
), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy
, bn
);
1917 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1918 minlabel_len
= strlen(label
) + MAXCOMLEN
;
1920 if (label
== AUTO_PICK_XPC_LABEL
) {
1921 minlabel_len
= snprintf(auto_label
, sizeof(auto_label
), "com.apple.xpc.domain-owner.%s", jm
->owner
);
1923 minlabel_len
= strlen(label
);
1927 j
= calloc(1, sizeof(struct job_s
) + minlabel_len
+ 1);
1929 if (!jobmgr_assumes(jm
, j
!= NULL
)) {
1933 if (unlikely(label
== auto_label
)) {
1934 snprintf((char *)j
->label
, strlen(label
) + 1, "%p.%s.%s", j
, anon_or_legacy
, bn
);
1936 strcpy((char *)j
->label
, (label
== AUTO_PICK_XPC_LABEL
) ? auto_label
: label
);
1938 j
->kqjob_callback
= job_callback
;
1940 j
->min_run_time
= LAUNCHD_MIN_JOB_RUN_TIME
;
1941 j
->timeout
= RUNTIME_ADVISABLE_IDLE_TIMEOUT
;
1942 j
->exit_timeout
= LAUNCHD_DEFAULT_EXIT_TIMEOUT
;
1943 j
->currently_ignored
= true;
1945 j
->checkedin
= true;
1946 j
->jetsam_priority
= -1;
1947 j
->jetsam_memlimit
= -1;
1949 uuid_clear(j
->expected_audit_uuid
);
1952 j
->prog
= strdup(prog
);
1953 if (!job_assumes(j
, j
->prog
!= NULL
)) {
1959 while (*argv_tmp
++) {
1963 for (i
= 0; i
< j
->argc
; i
++) {
1964 cc
+= strlen(argv
[i
]) + 1;
1967 j
->argv
= malloc((j
->argc
+ 1) * sizeof(char *) + cc
);
1969 if (!job_assumes(j
, j
->argv
!= NULL
)) {
1973 co
= ((char *)j
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
1975 for (i
= 0; i
< j
->argc
; i
++) {
1977 strcpy(co
, argv
[i
]);
1978 co
+= strlen(argv
[i
]) + 1;
1983 if (strcmp(j
->label
, "com.apple.WindowServer") == 0) {
1984 j
->has_console
= true;
1987 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
1989 jobmgr_t where2put_label
= root_jobmgr
;
1990 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
1991 where2put_label
= j
->mgr
;
1993 LIST_INSERT_HEAD(&where2put_label
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
1994 uuid_clear(j
->expected_audit_uuid
);
1996 job_log(j
, LOG_DEBUG
, "Conceived");
2009 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
2011 job_new_alias(jobmgr_t jm
, job_t src
)
2014 if (job_find(jm
, src
->label
)) {
2017 j
= calloc(1, sizeof(struct job_s
) + strlen(src
->label
) + 1);
2018 if (jobmgr_assumes(jm
, j
!= NULL
)) {
2019 strcpy((char *)j
->label
, src
->label
);
2020 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
2021 LIST_INSERT_HEAD(&jm
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
2022 /* Bad jump address. The kqueue callback for aliases should never be
2025 j
->kqjob_callback
= (kq_callback
)0xfa1afe1;
2029 struct machservice
*msi
= NULL
;
2030 SLIST_FOREACH(msi
, &src
->machservices
, sle
) {
2031 if (!machservice_new_alias(j
, msi
)) {
2032 jobmgr_log(jm
, LOG_ERR
, "Failed to alias job: %s", src
->label
);
2042 job_log(j
, LOG_DEBUG
, "Aliased service into domain: %s", jm
->name
);
2048 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
2051 job_import(launch_data_t pload
)
2053 job_t j
= jobmgr_import2(root_jobmgr
, pload
);
2055 if (unlikely(j
== NULL
)) {
2059 /* Since jobs are effectively stalled until they get security sessions assigned
2060 * to them, we may wish to reconsider this behavior of calling the job "enabled"
2061 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
2063 job_dispatch_curious_jobs(j
);
2064 return job_dispatch(j
, false);
2068 job_import_bulk(launch_data_t pload
)
2070 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
2072 size_t i
, c
= launch_data_array_get_count(pload
);
2074 ja
= alloca(c
* sizeof(job_t
));
2076 for (i
= 0; i
< c
; i
++) {
2077 if ((likely(ja
[i
] = jobmgr_import2(root_jobmgr
, launch_data_array_get_index(pload
, i
)))) && errno
!= ENEEDAUTH
) {
2080 launch_data_array_set_index(resp
, launch_data_new_errno(errno
), i
);
2083 for (i
= 0; i
< c
; i
++) {
2084 if (likely(ja
[i
])) {
2085 job_dispatch_curious_jobs(ja
[i
]);
2086 job_dispatch(ja
[i
], false);
2094 job_import_bool(job_t j
, const char *key
, bool value
)
2096 bool found_key
= false;
2101 if (strcasecmp(key
, LAUNCH_JOBKEY_ABANDONPROCESSGROUP
) == 0) {
2102 j
->abandon_pg
= value
;
2108 if (strcasecmp(key
, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN
) == 0) {
2109 j
->dirty_at_shutdown
= value
;
2115 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
2116 j
->ondemand
= !value
;
2122 if (strcasecmp(key
, LAUNCH_JOBKEY_ONDEMAND
) == 0) {
2123 j
->ondemand
= value
;
2129 if (strcasecmp(key
, LAUNCH_JOBKEY_DEBUG
) == 0) {
2132 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLED
) == 0) {
2133 (void)job_assumes(j
, !value
);
2135 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLEASLR
) == 0) {
2136 j
->disable_aslr
= value
;
2142 if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
) == 0) {
2143 job_log(j
, LOG_INFO
, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN
);
2144 j
->dirty_at_shutdown
= value
;
2150 if (strcasecmp(key
, LAUNCH_JOBKEY_SESSIONCREATE
) == 0) {
2151 j
->session_create
= value
;
2153 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTONMOUNT
) == 0) {
2154 j
->start_on_mount
= value
;
2156 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SERVICEIPC
) == 0) {
2157 /* this only does something on Mac OS X 10.4 "Tiger" */
2159 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SHUTDOWNMONITOR
) == 0) {
2160 if (_s_shutdown_monitor
) {
2161 job_log(j
, LOG_ERR
, "Only one job may monitor shutdown.");
2163 j
->shutdown_monitor
= true;
2164 _s_shutdown_monitor
= j
;
2171 if (strcasecmp(key
, LAUNCH_JOBKEY_LOWPRIORITYIO
) == 0) {
2172 j
->low_pri_io
= value
;
2174 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHONLYONCE
) == 0) {
2175 j
->only_once
= value
;
2181 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
2182 j
->internal_exc_handler
= value
;
2184 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MULTIPLEINSTANCES
) == 0) {
2185 j
->multiple_instances
= value
;
2191 if (strcasecmp(key
, LAUNCH_JOBKEY_INITGROUPS
) == 0) {
2192 if (getuid() != 0) {
2193 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2196 j
->no_init_groups
= !value
;
2198 } else if (strcasecmp(key
, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN
) == 0) {
2199 j
->ignore_pg_at_shutdown
= value
;
2205 if (strcasecmp(key
, LAUNCH_JOBKEY_RUNATLOAD
) == 0) {
2207 /* We don't want value == false to change j->start_pending */
2208 j
->start_pending
= true;
2215 if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLEGLOBBING
) == 0) {
2216 j
->globargv
= value
;
2218 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
) == 0) {
2219 j
->kill_via_shmem
= value
;
2221 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL
) == 0) {
2222 j
->debug_before_kill
= value
;
2224 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION
) == 0) {
2225 if (!s_embedded_privileged_job
) {
2226 j
->embedded_special_privileges
= value
;
2227 s_embedded_privileged_job
= j
;
2229 job_log(j
, LOG_ERR
, "Job tried to claim %s after it has already been claimed.", key
);
2232 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EVENTMONITOR
) == 0) {
2233 if (job_assumes(j
, _s_event_monitor
== NULL
)) {
2234 j
->event_monitor
= value
;
2236 _s_event_monitor
= j
;
2239 job_log(j
, LOG_NOTICE
, "Job tried to steal event monitoring responsibility!");
2246 if (strcasecmp(key
, LAUNCH_JOBKEY_WAITFORDEBUGGER
) == 0) {
2247 j
->wait4debugger
= value
;
2253 if (strcasecmp(key
, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER
) == 0) {
2255 if (_s_xpc_bootstrapper
) {
2256 job_log(j
, LOG_ERR
, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _s_xpc_bootstrapper
->label
);
2258 _s_xpc_bootstrapper
= j
;
2259 j
->xpc_bootstrapper
= value
;
2262 job_log(j
, LOG_ERR
, "Non-daemon tried to claim XPC bootstrapper property.");
2271 if (unlikely(!found_key
)) {
2272 job_log(j
, LOG_WARNING
, "Unknown key for boolean: %s", key
);
2277 job_import_string(job_t j
, const char *key
, const char *value
)
2279 char **where2put
= NULL
;
2284 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
2285 where2put
= &j
->alt_exc_handler
;
2290 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAM
) == 0) {
2292 } else if (strcasecmp(key
, LAUNCH_JOBKEY_POSIXSPAWNTYPE
) == 0) {
2293 if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP
) == 0) {
2294 j
->pstype
= POSIX_SPAWN_OSX_TALAPP_START
;
2295 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET
) == 0) {
2296 j
->pstype
= POSIX_SPAWN_OSX_WIDGET_START
;
2298 #if TARGET_OS_EMBEDDED
2299 else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP
) == 0) {
2300 j
->pstype
= POSIX_SPAWN_IOS_APP_START
;
2302 #endif /* TARGET_OS_EMBEDDED */
2304 job_log(j
, LOG_ERR
, "Unknown value for key %s: %s", key
, value
);
2311 if (strcasecmp(key
, LAUNCH_JOBKEY_LABEL
) == 0) {
2313 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
2315 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
2317 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
2323 if (strcasecmp(key
, LAUNCH_JOBKEY_ROOTDIRECTORY
) == 0) {
2324 if (getuid() != 0) {
2325 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2328 where2put
= &j
->rootdir
;
2333 if (strcasecmp(key
, LAUNCH_JOBKEY_WORKINGDIRECTORY
) == 0) {
2334 where2put
= &j
->workingdir
;
2339 if (strcasecmp(key
, LAUNCH_JOBKEY_USERNAME
) == 0) {
2340 if (getuid() != 0) {
2341 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2343 } else if (strcmp(value
, "root") == 0) {
2346 where2put
= &j
->username
;
2351 if (strcasecmp(key
, LAUNCH_JOBKEY_GROUPNAME
) == 0) {
2352 if (getuid() != 0) {
2353 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2355 } else if (strcmp(value
, "wheel") == 0) {
2358 where2put
= &j
->groupname
;
2363 if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDOUTPATH
) == 0) {
2364 where2put
= &j
->stdoutpath
;
2365 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDERRORPATH
) == 0) {
2366 where2put
= &j
->stderrpath
;
2367 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDINPATH
) == 0) {
2368 where2put
= &j
->stdinpath
;
2369 j
->stdin_fd
= _fd(open(value
, O_RDONLY
|O_CREAT
|O_NOCTTY
|O_NONBLOCK
, DEFFILEMODE
));
2370 if (job_assumes(j
, j
->stdin_fd
!= -1)) {
2371 /* open() should not block, but regular IO by the job should */
2372 (void)job_assumes(j
, fcntl(j
->stdin_fd
, F_SETFL
, 0) != -1);
2373 /* XXX -- EV_CLEAR should make named pipes happy? */
2374 (void)job_assumes(j
, kevent_mod(j
->stdin_fd
, EVFILT_READ
, EV_ADD
|EV_CLEAR
, 0, 0, j
) != -1);
2379 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXPROFILE
) == 0) {
2380 where2put
= &j
->seatbelt_profile
;
2386 if (strcasecmp(key
, LAUNCH_JOBKEY_XPCDOMAIN
) == 0) {
2391 job_log(j
, LOG_WARNING
, "Unknown key for string: %s", key
);
2395 if (likely(where2put
)) {
2396 (void)job_assumes(j
, (*where2put
= strdup(value
)) != NULL
);
2398 /* See rdar://problem/5496612. These two are okay. */
2399 if (strncmp(key
, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key
, "ServiceDescription", sizeof("ServiceDescription")) != 0) {
2400 job_log(j
, LOG_WARNING
, "Unknown key: %s", key
);
2406 job_import_integer(job_t j
, const char *key
, long long value
)
2411 if (strcasecmp(key
, LAUNCH_JOBKEY_EXITTIMEOUT
) == 0) {
2412 if (unlikely(value
< 0)) {
2413 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
2414 } else if (unlikely(value
> UINT32_MAX
)) {
2415 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
2417 j
->exit_timeout
= (typeof(j
->exit_timeout
)) value
;
2419 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY
) == 0) {
2420 j
->main_thread_priority
= value
;
2425 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0) {
2426 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2428 launch_data_t pri
= launch_data_new_integer(value
);
2429 if (job_assumes(j
, pri
!= NULL
)) {
2430 jetsam_property_setup(pri
, LAUNCH_JOBKEY_JETSAMPRIORITY
, j
);
2431 launch_data_free(pri
);
2436 if (strcasecmp(key
, LAUNCH_JOBKEY_NICE
) == 0) {
2437 if (unlikely(value
< PRIO_MIN
)) {
2438 job_log(j
, LOG_WARNING
, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MIN
);
2439 } else if (unlikely(value
> PRIO_MAX
)) {
2440 job_log(j
, LOG_WARNING
, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MAX
);
2442 j
->nice
= (typeof(j
->nice
)) value
;
2449 if (strcasecmp(key
, LAUNCH_JOBKEY_TIMEOUT
) == 0) {
2450 if (unlikely(value
< 0)) {
2451 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
2452 } else if (unlikely(value
> UINT32_MAX
)) {
2453 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
2455 j
->timeout
= (typeof(j
->timeout
)) value
;
2457 } else if (strcasecmp(key
, LAUNCH_JOBKEY_THROTTLEINTERVAL
) == 0) {
2459 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
2460 } else if (value
> UINT32_MAX
) {
2461 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
2463 j
->min_run_time
= (typeof(j
->min_run_time
)) value
;
2469 if (strcasecmp(key
, LAUNCH_JOBKEY_UMASK
) == 0) {
2476 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTINTERVAL
) == 0) {
2477 if (unlikely(value
<= 0)) {
2478 job_log(j
, LOG_WARNING
, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
2479 } else if (unlikely(value
> UINT32_MAX
)) {
2480 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
2482 runtime_add_weak_ref();
2483 j
->start_interval
= (typeof(j
->start_interval
)) value
;
2485 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
) != -1);
2488 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
2489 j
->seatbelt_flags
= value
;
2495 job_log(j
, LOG_WARNING
, "Unknown key for integer: %s", key
);
2501 job_import_opaque(job_t j
__attribute__((unused
)),
2502 const char *key
, launch_data_t value
__attribute__((unused
)))
2508 if (strcasecmp(key
, LAUNCH_JOBKEY_QUARANTINEDATA
) == 0) {
2509 size_t tmpsz
= launch_data_get_opaque_size(value
);
2511 if (job_assumes(j
, j
->quarantine_data
= malloc(tmpsz
))) {
2512 memcpy(j
->quarantine_data
, launch_data_get_opaque(value
), tmpsz
);
2513 j
->quarantine_data_sz
= tmpsz
;
2519 if (strcasecmp(key
, LAUNCH_JOBKEY_SECURITYSESSIONUUID
) == 0) {
2520 size_t tmpsz
= launch_data_get_opaque_size(value
);
2521 if (job_assumes(j
, tmpsz
== sizeof(uuid_t
))) {
2522 memcpy(j
->expected_audit_uuid
, launch_data_get_opaque(value
), sizeof(uuid_t
));
2532 policy_setup(launch_data_t obj
, const char *key
, void *context
)
2535 bool found_key
= false;
2540 if (strcasecmp(key
, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS
) == 0) {
2541 j
->deny_job_creation
= launch_data_get_bool(obj
);
2549 if (unlikely(!found_key
)) {
2550 job_log(j
, LOG_WARNING
, "Unknown policy: %s", key
);
2555 job_import_dictionary(job_t j
, const char *key
, launch_data_t value
)
2562 if (strcasecmp(key
, LAUNCH_JOBKEY_POLICIES
) == 0) {
2563 launch_data_dict_iterate(value
, policy_setup
, j
);
2568 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
2569 launch_data_dict_iterate(value
, semaphoreitem_setup
, j
);
2574 if (strcasecmp(key
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
) == 0) {
2575 j
->inetcompat
= true;
2576 j
->abandon_pg
= true;
2577 if ((tmp
= launch_data_dict_lookup(value
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
))) {
2578 j
->inetcompat_wait
= launch_data_get_bool(tmp
);
2584 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPROPERTIES
) == 0) {
2585 launch_data_dict_iterate(value
, (void (*)(launch_data_t
, const char *, void *))jetsam_property_setup
, j
);
2589 if (strcasecmp(key
, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES
) == 0) {
2590 launch_data_dict_iterate(value
, envitem_setup
, j
);
2595 if (strcasecmp(key
, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES
) == 0) {
2596 j
->importing_global_env
= true;
2597 launch_data_dict_iterate(value
, envitem_setup
, j
);
2598 j
->importing_global_env
= false;
2603 if (strcasecmp(key
, LAUNCH_JOBKEY_SOCKETS
) == 0) {
2604 launch_data_dict_iterate(value
, socketgroup_setup
, j
);
2605 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
2606 calendarinterval_new_from_obj(j
, value
);
2607 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SOFTRESOURCELIMITS
) == 0) {
2608 launch_data_dict_iterate(value
, limititem_setup
, j
);
2610 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
2611 launch_data_dict_iterate(value
, seatbelt_setup_flags
, j
);
2617 if (strcasecmp(key
, LAUNCH_JOBKEY_HARDRESOURCELIMITS
) == 0) {
2618 j
->importing_hard_limits
= true;
2619 launch_data_dict_iterate(value
, limititem_setup
, j
);
2620 j
->importing_hard_limits
= false;
2625 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICES
) == 0) {
2626 launch_data_dict_iterate(value
, machservice_setup
, j
);
2631 if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHEVENTS
) == 0) {
2632 launch_data_dict_iterate(value
, eventsystem_setup
, j
);
2634 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE
) == 0) {
2637 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE
) == 0) {
2643 job_log(j
, LOG_WARNING
, "Unknown key for dictionary: %s", key
);
2649 job_import_array(job_t j
, const char *key
, launch_data_t value
)
2651 size_t i
, value_cnt
= launch_data_array_get_count(value
);
2657 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
) == 0) {
2663 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
2665 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
2667 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
2668 job_log(j
, LOG_NOTICE
, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
2674 if (strcasecmp(key
, LAUNCH_JOBKEY_QUEUEDIRECTORIES
) == 0) {
2675 for (i
= 0; i
< value_cnt
; i
++) {
2676 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
2677 if (job_assumes(j
, str
!= NULL
)) {
2678 semaphoreitem_new(j
, DIR_NOT_EMPTY
, str
);
2686 if (strcasecmp(key
, LAUNCH_JOBKEY_WATCHPATHS
) == 0) {
2687 for (i
= 0; i
< value_cnt
; i
++) {
2688 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
2689 if (job_assumes(j
, str
!= NULL
)) {
2690 semaphoreitem_new(j
, PATH_CHANGES
, str
);
2697 if (strcasecmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0) {
2698 socketgroup_setup(value
, LAUNCH_JOBKEY_BONJOURFDS
, j
);
2699 } else if (strcasecmp(key
, LAUNCH_JOBKEY_BINARYORDERPREFERENCE
) == 0) {
2700 if (job_assumes(j
, j
->j_binpref
= malloc(value_cnt
* sizeof(*j
->j_binpref
)))) {
2701 j
->j_binpref_cnt
= value_cnt
;
2702 for (i
= 0; i
< value_cnt
; i
++) {
2703 j
->j_binpref
[i
] = (cpu_type_t
) launch_data_get_integer(launch_data_array_get_index(value
, i
));
2710 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
2711 for (i
= 0; i
< value_cnt
; i
++) {
2712 calendarinterval_new_from_obj(j
, launch_data_array_get_index(value
, i
));
2717 job_log(j
, LOG_WARNING
, "Unknown key for array: %s", key
);
2723 job_import_keys(launch_data_t obj
, const char *key
, void *context
)
2726 launch_data_type_t kind
;
2728 if (!launchd_assumes(obj
!= NULL
)) {
2732 kind
= launch_data_get_type(obj
);
2735 case LAUNCH_DATA_BOOL
:
2736 job_import_bool(j
, key
, launch_data_get_bool(obj
));
2738 case LAUNCH_DATA_STRING
:
2739 job_import_string(j
, key
, launch_data_get_string(obj
));
2741 case LAUNCH_DATA_INTEGER
:
2742 job_import_integer(j
, key
, launch_data_get_integer(obj
));
2744 case LAUNCH_DATA_DICTIONARY
:
2745 job_import_dictionary(j
, key
, obj
);
2747 case LAUNCH_DATA_ARRAY
:
2748 job_import_array(j
, key
, obj
);
2750 case LAUNCH_DATA_OPAQUE
:
2751 job_import_opaque(j
, key
, obj
);
2754 job_log(j
, LOG_WARNING
, "Unknown value type '%d' for key: %s", kind
, key
);
2760 jobmgr_import2(jobmgr_t jm
, launch_data_t pload
)
2762 launch_data_t tmp
, ldpa
;
2763 const char *label
= NULL
, *prog
= NULL
;
2764 const char **argv
= NULL
;
2767 if (!jobmgr_assumes(jm
, pload
!= NULL
)) {
2772 if (unlikely(launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
)) {
2777 if (unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
)))) {
2782 if (unlikely(launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
)) {
2787 if (unlikely(!(label
= launch_data_get_string(tmp
)))) {
2792 #if TARGET_OS_EMBEDDED
2793 if (unlikely(g_embedded_privileged_action
&& s_embedded_privileged_job
)) {
2794 if (unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_USERNAME
)))) {
2799 const char *username
= NULL
;
2800 if (likely(tmp
&& launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
2801 username
= launch_data_get_string(tmp
);
2807 if (!jobmgr_assumes(jm
, s_embedded_privileged_job
->username
!= NULL
&& username
!= NULL
)) {
2812 if (unlikely(strcmp(s_embedded_privileged_job
->username
, username
) != 0)) {
2816 } else if (g_embedded_privileged_action
) {
2822 if ((tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAM
)) &&
2823 (launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
2824 prog
= launch_data_get_string(tmp
);
2828 if ((ldpa
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
))) {
2831 if (launch_data_get_type(ldpa
) != LAUNCH_DATA_ARRAY
) {
2836 c
= launch_data_array_get_count(ldpa
);
2838 argv
= alloca((c
+ 1) * sizeof(char *));
2840 for (i
= 0; i
< c
; i
++) {
2841 tmp
= launch_data_array_get_index(ldpa
, i
);
2843 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
2848 argv
[i
] = launch_data_get_string(tmp
);
2855 if (!prog
&& argc
== 0) {
2856 jobmgr_log(jm
, LOG_ERR
, "Job specifies neither Program nor ProgramArguments: %s", label
);
2861 /* Find the requested session. You cannot load services into XPC domains in
2864 launch_data_t session
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
2866 jobmgr_t jmt
= NULL
;
2867 if (launch_data_get_type(session
) == LAUNCH_DATA_STRING
) {
2868 jmt
= jobmgr_find_by_name(jm
, launch_data_get_string(session
));
2870 jobmgr_log(jm
, LOG_ERR
, "Could not find requested session: %s", launch_data_get_string(session
));
2875 jobmgr_log(jm
, LOG_ERR
, "Session type is not a string.");
2884 /* For legacy reasons, we have a global hash of all labels in all job
2885 * managers. So rather than make it a global, we store it in the root job
2886 * manager. But for an XPC domain, we store a local hash of all services in
2889 jobmgr_t where2look
= (jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) ? jm
: root_jobmgr
;
2890 if (unlikely((j
= job_find(where2look
, label
)) != NULL
)) {
2891 if (jm
->xpc_singleton
) {
2892 /* There can (and probably will be) multiple attemtps to import the
2893 * same XPC service from the same framework. This is okay. It's
2894 * treated as a singleton, so just return the existing one so that
2895 * it may be aliased into the requesting process' XPC domain.
2899 /* If we're not a global XPC domain, then it's an error to try
2900 * importing the same job/service multiple times.
2905 } else if (unlikely(!jobmgr_label_test(where2look
, label
))) {
2909 jobmgr_log(jm
, LOG_DEBUG
, "Importing %s.", label
);
2911 if (likely(j
= job_new(jm
, label
, prog
, argv
))) {
2912 launch_data_dict_iterate(pload
, job_import_keys
, j
);
2913 if (!uuid_is_null(j
->expected_audit_uuid
)) {
2914 uuid_string_t uuid_str
;
2915 uuid_unparse(j
->expected_audit_uuid
, uuid_str
);
2916 job_log(j
, LOG_DEBUG
, "Imported job. Waiting for session for UUID %s.", uuid_str
);
2917 LIST_INSERT_HEAD(&s_needing_sessions
, j
, needing_session_sle
);
2920 job_log(j
, LOG_DEBUG
, "No security session specified.");
2921 j
->asport
= MACH_PORT_NULL
;
2924 if (j
->event_monitor
) {
2925 if (job_assumes(j
, LIST_FIRST(&j
->events
) == NULL
)) {
2926 struct machservice
*msi
= NULL
;
2927 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
2928 if (msi
->event_update_port
) {
2933 if (job_assumes(j
, msi
!= NULL
)) {
2934 /* Create our send-once right so we can kick things off. */
2935 (void)job_assumes(j
, launchd_mport_make_send_once(msi
->port
, &_s_event_update_port
) == KERN_SUCCESS
);
2936 if (!LIST_EMPTY(&_s_event_systems
)) {
2941 job_log(j
, LOG_ERR
, "The event monitor job may not have a LaunchEvents dictionary.");
2952 jobmgr_label_test(jobmgr_t jm
, const char *str
)
2954 char *endstr
= NULL
;
2957 if (str
[0] == '\0') {
2958 jobmgr_log(jm
, LOG_ERR
, "Empty job labels are not allowed");
2962 for (ptr
= str
; *ptr
; ptr
++) {
2963 if (iscntrl(*ptr
)) {
2964 jobmgr_log(jm
, LOG_ERR
, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr
- str
, *ptr
);
2969 strtoll(str
, &endstr
, 0);
2971 if (str
!= endstr
) {
2972 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to begin with numbers: %s", str
);
2976 if ((strncasecmp(str
, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2977 (strncasecmp(str
, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2978 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to use a reserved prefix: %s", str
);
2986 job_find(jobmgr_t jm
, const char *label
)
2994 LIST_FOREACH(ji
, &jm
->label_hash
[hash_label(label
)], label_hash_sle
) {
2995 if (unlikely(ji
->removal_pending
|| ji
->mgr
->shutting_down
)) {
2996 continue; /* 5351245 and 5488633 respectively */
2999 if (strcmp(ji
->label
, label
) == 0) {
3008 /* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
3010 jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
)
3013 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
3014 if (ji
->p
== p
&& (!ji
->anonymous
|| (ji
->anonymous
&& anon_okay
)) ) {
3019 jobmgr_t jmi
= NULL
;
3020 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3021 if ((ji
= jobmgr_find_by_pid_deep(jmi
, p
, anon_okay
))) {
3030 jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
)
3034 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
3040 return create_anon
? job_new_anonymous(jm
, p
) : NULL
;
3044 job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
)
3049 if (jm
->jm_port
== mport
) {
3050 return jobmgr_find_by_pid(jm
, upid
, true);
3053 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3056 if ((jr
= job_mig_intran2(jmi
, mport
, upid
))) {
3061 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
3062 if (ji
->j_port
== mport
) {
3071 job_mig_intran(mach_port_t p
)
3073 struct ldcred
*ldc
= runtime_get_caller_creds();
3076 jr
= job_mig_intran2(root_jobmgr
, p
, ldc
->pid
);
3078 if (!jobmgr_assumes(root_jobmgr
, jr
!= NULL
)) {
3079 struct proc_bsdshortinfo proc
;
3080 if (proc_pidinfo(ldc
->pid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3081 if (errno
!= ESRCH
) {
3082 (void)jobmgr_assumes(root_jobmgr
, errno
== 0);
3084 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__
, ldc
->pid
, ldc
->uid
, ldc
->euid
, p
, proc
.pbsi_comm
);
3093 job_find_by_service_port(mach_port_t p
)
3095 struct machservice
*ms
;
3097 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
3098 if (ms
->recv
&& (ms
->port
== p
)) {
3107 job_mig_destructor(job_t j
)
3112 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
3115 if (unlikely(j
&& (j
!= workaround_5477111
) && j
->unload_at_mig_return
)) {
3116 job_log(j
, LOG_NOTICE
, "Unloading PID %u at MIG return.", j
->p
);
3120 workaround_5477111
= NULL
;
3122 calendarinterval_sanity_check();
3126 job_export_all2(jobmgr_t jm
, launch_data_t where
)
3131 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3132 job_export_all2(jmi
, where
);
3135 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
3138 if (jobmgr_assumes(jm
, (tmp
= job_export(ji
)) != NULL
)) {
3139 launch_data_dict_insert(where
, tmp
, ji
->label
);
3145 job_export_all(void)
3147 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
3149 if (launchd_assumes(resp
!= NULL
)) {
3150 job_export_all2(root_jobmgr
, resp
);
3157 job_log_stray_pg(job_t j
)
3160 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
3161 int i
= 0, kp_cnt
= 0;
3163 if (!do_apple_internal_logging
) {
3167 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG
, j
->p
, 0, 0);
3169 if (!job_assumes(j
, (pids
= malloc(len
)) != NULL
)) {
3172 if (!job_assumes(j
, (kp_cnt
= proc_listpgrppids(j
->p
, pids
, len
)) != -1)) {
3176 for (i
= 0; i
< kp_cnt
; i
++) {
3177 pid_t p_i
= pids
[i
];
3180 } else if (!job_assumes(j
, p_i
!= 0 && p_i
!= 1)) {
3184 struct proc_bsdshortinfo proc
;
3185 if (proc_pidinfo(p_i
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3186 if (errno
!= ESRCH
) {
3187 job_assumes(j
, errno
== 0);
3192 pid_t pp_i
= proc
.pbsi_ppid
;
3193 const char *z
= (proc
.pbsi_status
== SZOMB
) ? "zombie " : "";
3194 const char *n
= proc
.pbsi_comm
;
3196 job_log(j
, LOG_WARNING
, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z
, p_i
, pp_i
, proc
.pbsi_pgid
, n
);
3209 bool is_system_bootstrapper
= j
->is_bootstrapper
&& pid1_magic
&& !j
->mgr
->parentmgr
;
3211 job_log(j
, LOG_DEBUG
, "Reaping");
3214 (void)job_assumes(j
, vm_deallocate(mach_task_self(), (vm_address_t
)j
->shmem
, getpagesize()) == 0);
3218 if (unlikely(j
->weird_bootstrap
)) {
3220 job_mig_swap_integer(j
, VPROC_GSK_WEIRD_BOOTSTRAP
, 0, 0, &junk
);
3223 if (j
->log_redirect_fd
&& !j
->legacy_LS_job
) {
3224 job_log_stdouterr(j
); /* one last chance */
3226 if (j
->log_redirect_fd
) {
3227 (void)job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
3228 j
->log_redirect_fd
= 0;
3233 (void)job_assumes(j
, runtime_close(j
->fork_fd
) != -1);
3239 memset(&ru
, 0, sizeof(ru
));
3242 * The job is dead. While the PID/PGID is still known to be
3243 * valid, try to kill abandoned descendant processes.
3245 job_log_stray_pg(j
);
3246 if (!j
->abandon_pg
) {
3247 if (unlikely(runtime_killpg(j
->p
, SIGTERM
) == -1 && errno
!= ESRCH
)) {
3249 job_log(j
, LOG_APPLEONLY
, "Bug: 5487498");
3251 (void)job_assumes(j
, false);
3256 /* We have to work around one of two kernel bugs here. ptrace(3) may
3257 * have abducted the child away from us and reparented it to the tracing
3258 * process. If the process then exits, we still get NOTE_EXIT, but we
3259 * cannot reap it because the kernel may not have restored the true
3260 * parent/child relationship in time.
3262 * See <rdar://problem/5020256>.
3264 * The other bug is if the shutdown monitor has suspended a task and not
3265 * resumed it before exiting. In this case, the kernel will not clean up
3266 * after the shutdown monitor. It will, instead, leave the task
3267 * task suspended and not process any pending signals on the event loop
3270 * There are a variety of other kernel bugs that could prevent a process
3271 * from exiting, usually having to do with faulty hardware or talking to
3272 * misbehaving drivers that mark a thread as uninterruptible and
3273 * deadlock/hang before unmarking it as such. So we have to work around
3276 * See <rdar://problem/9284889&9359725>.
3278 if (j
->workaround9359725
) {
3279 job_log(j
, LOG_NOTICE
, "Simulated exit: <rdar://problem/9359725>");
3280 status
= W_EXITCODE(-1, SIGSEGV
);
3281 memset(&ru
, 0, sizeof(ru
));
3282 } else if (wait4(j
->p
, &status
, 0, &ru
) == -1) {
3283 job_log(j
, LOG_NOTICE
, "Assuming job exited: <rdar://problem/5020256>: %d: %s", errno
, strerror(errno
));
3284 status
= W_EXITCODE(-1, SIGSEGV
);
3285 memset(&ru
, 0, sizeof(ru
));
3289 if (j
->exit_timeout
) {
3290 kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
3293 LIST_REMOVE(j
, pid_hash_sle
);
3295 if (j
->sent_signal_time
) {
3296 uint64_t td_sec
, td_usec
, td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
3298 td_sec
= td
/ NSEC_PER_SEC
;
3299 td_usec
= (td
% NSEC_PER_SEC
) / NSEC_PER_USEC
;
3301 job_log(j
, LOG_DEBUG
, "Exited %llu.%06llu seconds after the first signal was sent", td_sec
, td_usec
);
3304 timeradd(&ru
.ru_utime
, &j
->ru
.ru_utime
, &j
->ru
.ru_utime
);
3305 timeradd(&ru
.ru_stime
, &j
->ru
.ru_stime
, &j
->ru
.ru_stime
);
3306 j
->ru
.ru_maxrss
+= ru
.ru_maxrss
;
3307 j
->ru
.ru_ixrss
+= ru
.ru_ixrss
;
3308 j
->ru
.ru_idrss
+= ru
.ru_idrss
;
3309 j
->ru
.ru_isrss
+= ru
.ru_isrss
;
3310 j
->ru
.ru_minflt
+= ru
.ru_minflt
;
3311 j
->ru
.ru_majflt
+= ru
.ru_majflt
;
3312 j
->ru
.ru_nswap
+= ru
.ru_nswap
;
3313 j
->ru
.ru_inblock
+= ru
.ru_inblock
;
3314 j
->ru
.ru_oublock
+= ru
.ru_oublock
;
3315 j
->ru
.ru_msgsnd
+= ru
.ru_msgsnd
;
3316 j
->ru
.ru_msgrcv
+= ru
.ru_msgrcv
;
3317 j
->ru
.ru_nsignals
+= ru
.ru_nsignals
;
3318 j
->ru
.ru_nvcsw
+= ru
.ru_nvcsw
;
3319 j
->ru
.ru_nivcsw
+= ru
.ru_nivcsw
;
3321 if (WIFEXITED(status
) && WEXITSTATUS(status
) != 0) {
3322 int level
= LOG_WARNING
;
3323 if (!j
->did_exec
&& (j
->fail_cnt
++ % LAUNCHD_LOG_FAILED_EXEC_FREQ
) != 0) {
3327 job_log(j
, level
, "Exited with code: %d", WEXITSTATUS(status
));
3332 if (WIFSIGNALED(status
)) {
3333 int s
= WTERMSIG(status
);
3334 if ((SIGKILL
== s
|| SIGTERM
== s
) && !j
->stopped
) {
3335 job_log(j
, LOG_NOTICE
, "Exited: %s", strsignal(s
));
3336 } else if (!j
->stopped
&& !j
->clean_kill
) {
3338 /* Signals which indicate a crash. */
3345 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3346 * SIGTRAP, assume that it's a crash.
3350 job_log(j
, LOG_WARNING
, "Job appears to have crashed: %s", strsignal(s
));
3353 job_log(j
, LOG_WARNING
, "Exited abnormally: %s", strsignal(s
));
3357 if (is_system_bootstrapper
&& j
->crashed
) {
3358 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "The %s bootstrapper has crashed: %s", j
->mgr
->name
, strsignal(s
));
3365 struct machservice
*msi
= NULL
;
3366 if (j
->crashed
|| !(j
->did_exec
|| j
->anonymous
)) {
3367 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
3368 if (j
->crashed
&& !msi
->isActive
&& (msi
->drain_one_on_crash
|| msi
->drain_all_on_crash
)) {
3369 machservice_drain_port(msi
);
3372 if (!j
->did_exec
&& msi
->reset
&& job_assumes(j
, !msi
->isActive
)) {
3373 machservice_resetport(j
, msi
);
3378 /* HACK: Essentially duplicating the logic directly above. But this has
3379 * gotten really hairy, and I don't want to try consolidating it right now.
3381 if (j
->xpc_service
&& !j
->xpcproxy_did_exec
) {
3382 job_log(j
, LOG_ERR
, "XPC Service could not exec(3). Resetting port.");
3383 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
3384 /* Drain the messages but do not reset the port. If xpcproxy could
3385 * not exec(3), then we don't want to continue trying, since there
3386 * is very likely a serious configuration error with the service.
3388 * <rdar://problem/8986802>
3390 machservice_resetport(j
, msi
);
3394 struct suspended_peruser
*spi
= NULL
;
3395 while ((spi
= LIST_FIRST(&j
->suspended_perusers
))) {
3396 job_log(j
, LOG_ERR
, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi
->j
->mach_uid
);
3397 spi
->j
->peruser_suspend_count
--;
3398 if (spi
->j
->peruser_suspend_count
== 0) {
3399 job_dispatch(spi
->j
, false);
3401 LIST_REMOVE(spi
, sle
);
3405 j
->last_exit_status
= status
;
3407 if (j
->exit_status_dest
) {
3408 errno
= helper_downcall_wait(j
->exit_status_dest
, j
->last_exit_status
);
3409 if (errno
&& errno
!= MACH_SEND_INVALID_DEST
) {
3410 (void)job_assumes(j
, errno
== 0);
3413 j
->exit_status_dest
= MACH_PORT_NULL
;
3416 if (j
->spawn_reply_port
) {
3417 /* If the child never called exec(3), we must send a spawn() reply so
3418 * that the requestor can get exit status from it. If we fail to send
3419 * the reply for some reason, we have to deallocate the exit status port
3422 kern_return_t kr
= job_mig_spawn2_reply(j
->spawn_reply_port
, BOOTSTRAP_SUCCESS
, j
->p
, j
->exit_status_port
);
3424 if (kr
!= MACH_SEND_INVALID_DEST
) {
3426 (void)job_assumes(j
, errno
== KERN_SUCCESS
);
3429 (void)job_assumes(j
, launchd_mport_close_recv(j
->exit_status_port
) == KERN_SUCCESS
);
3432 j
->exit_status_port
= MACH_PORT_NULL
;
3433 j
->spawn_reply_port
= MACH_PORT_NULL
;
3437 total_anon_children
--;
3446 if (j
->has_console
) {
3450 if (j
->shutdown_monitor
) {
3451 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Shutdown monitor has exited.");
3452 _s_shutdown_monitor
= NULL
;
3453 j
->shutdown_monitor
= false;
3456 if (j
->event_monitor
&& !j
->mgr
->shutting_down
) {
3458 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
3459 if (msi
->event_update_port
) {
3463 /* Only do this if we've gotten the port-destroyed notification already.
3464 * If we haven't yet, the port destruction handler will do this.
3466 if (job_assumes(j
, msi
!= NULL
) && !msi
->isActive
) {
3467 if (_s_event_update_port
== MACH_PORT_NULL
) {
3468 (void)job_assumes(j
, launchd_mport_make_send_once(msi
->port
, &_s_event_update_port
) == KERN_SUCCESS
);
3474 if (!j
->anonymous
) {
3475 j
->mgr
->normal_active_cnt
--;
3477 j
->sent_signal_time
= 0;
3478 j
->sent_sigkill
= false;
3479 j
->clean_kill
= false;
3480 j
->sent_kill_via_shmem
= false;
3481 j
->lastlookup
= NULL
;
3482 j
->lastlookup_gennum
= 0;
3487 jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
)
3492 if (jm
->shutting_down
) {
3496 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
3497 jobmgr_dispatch_all(jmi
, newmounthack
);
3500 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
3501 if (newmounthack
&& ji
->start_on_mount
) {
3502 ji
->start_pending
= true;
3505 job_dispatch(ji
, false);
3510 job_dispatch_curious_jobs(job_t j
)
3512 job_t ji
= NULL
, jt
= NULL
;
3513 SLIST_FOREACH_SAFE(ji
, &s_curious_jobs
, curious_jobs_sle
, jt
) {
3514 struct semaphoreitem
*si
= NULL
;
3515 SLIST_FOREACH(si
, &ji
->semaphores
, sle
) {
3516 if (!(si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
)) {
3520 if (strcmp(si
->what
, j
->label
) == 0) {
3521 job_log(ji
, LOG_DEBUG
, "Dispatching out of interest in \"%s\".", j
->label
);
3523 if (!ji
->removing
) {
3524 job_dispatch(ji
, false);
3526 job_log(ji
, LOG_NOTICE
, "The following job is circularly dependent upon this one: %s", j
->label
);
3529 /* ji could be removed here, so don't do anything with it or its semaphores
3539 job_dispatch(job_t j
, bool kickstart
)
3541 /* Don't dispatch a job if it has no audit session set. */
3542 if (!uuid_is_null(j
->expected_audit_uuid
)) {
3549 #if TARGET_OS_EMBEDDED
3550 if (g_embedded_privileged_action
&& s_embedded_privileged_job
) {
3551 if (!job_assumes(j
, s_embedded_privileged_job
->username
!= NULL
&& j
->username
!= NULL
)) {
3556 if (strcmp(j
->username
, s_embedded_privileged_job
->username
) != 0) {
3560 } else if (g_embedded_privileged_action
) {
3567 * The whole job removal logic needs to be consolidated. The fact that
3568 * a job can be removed from just about anywhere makes it easy to have
3569 * stale pointers left behind somewhere on the stack that might get
3570 * used after the deallocation. In particular, during job iteration.
3572 * This is a classic example. The act of dispatching a job may delete it.
3574 if (!job_active(j
)) {
3575 if (job_useless(j
)) {
3579 if (unlikely(j
->per_user
&& j
->peruser_suspend_count
> 0)) {
3583 if (kickstart
|| job_keepalive(j
)) {
3584 job_log(j
, LOG_DEBUG
, "Starting job (kickstart = %s)", kickstart
? "true" : "false");
3587 job_log(j
, LOG_DEBUG
, "Watching job (kickstart = %s)", kickstart
? "true" : "false");
3593 * Path checking and monitoring is really racy right now.
3594 * We should clean this up post Leopard.
3596 if (job_keepalive(j
)) {
3601 job_log(j
, LOG_DEBUG
, "Tried to dispatch an already active job (%s).", job_active(j
));
3608 job_log_stdouterr2(job_t j
, const char *msg
, ...)
3610 struct runtime_syslog_attr attr
= { j
->label
, j
->label
, j
->mgr
->name
, LOG_NOTICE
, getuid(), j
->p
, j
->p
};
3614 runtime_vsyslog(&attr
, msg
, ap
);
3619 job_log_stdouterr(job_t j
)
3621 char *msg
, *bufindex
, *buf
= malloc(BIG_PIPE_SIZE
+ 1);
3622 bool close_log_redir
= false;
3625 if (!job_assumes(j
, buf
!= NULL
)) {
3631 rsz
= read(j
->log_redirect_fd
, buf
, BIG_PIPE_SIZE
);
3633 if (unlikely(rsz
== 0)) {
3634 job_log(j
, LOG_DEBUG
, "Standard out/error pipe closed");
3635 close_log_redir
= true;
3636 } else if (rsz
== -1) {
3637 if (!job_assumes(j
, errno
== EAGAIN
)) {
3638 close_log_redir
= true;
3643 while ((msg
= strsep(&bufindex
, "\n\r"))) {
3645 job_log_stdouterr2(j
, "%s", msg
);
3652 if (unlikely(close_log_redir
)) {
3653 (void)job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
3654 j
->log_redirect_fd
= 0;
3655 job_dispatch(j
, false);
3662 if (unlikely(!j
->p
|| j
->anonymous
)) {
3666 (void)job_assumes(j
, runtime_kill(j
->p
, SIGKILL
) != -1);
3668 j
->sent_sigkill
= true;
3669 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, LAUNCHD_SIGKILL_TIMER
, j
) != -1);
3671 job_log(j
, LOG_DEBUG
, "Sent SIGKILL signal");
3675 job_open_shutdown_transaction(job_t j
)
3677 if (j
->kill_via_shmem
) {
3679 job_log(j
, LOG_DEBUG
, "Opening shutdown transaction for job.");
3680 (void)__sync_add_and_fetch(&j
->shmem
->vp_shmem_transaction_cnt
, 1);
3682 job_log(j
, LOG_DEBUG
, "Job wants to be dirty at shutdown, but it has not set up shared memory. Treating normally.");
3683 j
->dirty_at_shutdown
= false;
3686 job_log(j
, LOG_DEBUG
, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3687 j
->dirty_at_shutdown
= false;
3692 job_close_shutdown_transaction(job_t j
)
3694 if (j
->dirty_at_shutdown
) {
3695 job_log(j
, LOG_DEBUG
, "Closing shutdown transaction for job.");
3696 if (__sync_sub_and_fetch(&j
->shmem
->vp_shmem_transaction_cnt
, 1) == -1) {
3697 job_log(j
, LOG_DEBUG
, "Job is now clean. Killing.");
3700 j
->dirty_at_shutdown
= false;
3705 job_log_children_without_exec(job_t j
)
3708 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
3709 int i
= 0, kp_cnt
= 0;
3711 if (!do_apple_internal_logging
|| j
->anonymous
|| j
->per_user
) {
3715 if (!job_assumes(j
, (pids
= malloc(len
)) != NULL
)) {
3718 if (!job_assumes(j
, (kp_cnt
= proc_listchildpids(j
->p
, pids
, len
)) != -1)) {
3722 for (i
= 0; i
< kp_cnt
; i
++) {
3723 struct proc_bsdshortinfo proc
;
3724 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3725 if (errno
!= ESRCH
) {
3726 job_assumes(j
, errno
== 0);
3730 if (proc
.pbsi_flags
& P_EXEC
) {
3734 job_log(j
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids
[i
]);
3742 job_cleanup_after_tracer(job_t j
)
3745 if (j
->reap_after_trace
) {
3746 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Reaping job now that attached tracer is gone.");
3748 EV_SET(&kev
, j
->p
, 0, 0, NOTE_EXIT
, 0, 0);
3750 /* Fake a kevent to keep our logic consistent. */
3751 job_callback_proc(j
, &kev
);
3753 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3754 * on the root job manager. To make our fakery complete, we will do garbage
3755 * collection at the beginning of the next run loop cycle (after we're done
3756 * draining the current queue of kevents).
3758 (void)job_assumes(j
, kevent_mod((uintptr_t)&root_jobmgr
->reboot_flags
, EVFILT_TIMER
, EV_ADD
| EV_ONESHOT
, NOTE_NSECONDS
, 1, root_jobmgr
) != -1);
3763 job_callback_proc(job_t j
, struct kevent
*kev
)
3765 bool program_changed
= false;
3766 int fflags
= kev
->fflags
;
3768 job_log(j
, LOG_DEBUG
, "EVFILT_PROC event for job.");
3769 log_kevent_struct(LOG_DEBUG
, kev
, 0);
3771 if (fflags
& NOTE_EXIT
) {
3772 if (j
->p
== (pid_t
)kev
->ident
&& !j
->anonymous
) {
3773 /* Note that the third argument to proc_pidinfo() is a magic argument for
3774 * PROC_PIDT_SHORTBSDINFO. Specifically, passing 1 means "don't fail on a zombie
3777 struct proc_bsdshortinfo proc
;
3778 if (job_assumes(j
, proc_pidinfo(j
->p
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) > 0)) {
3779 if (!job_assumes(j
, (pid_t
)proc
.pbsi_ppid
== getpid())) {
3780 /* Someone has attached to the process with ptrace(). There's a race here.
3781 * If we determine that we are not the parent process and then fail to attach
3782 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3783 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3784 * reparenting of the PID should be atomic to us, so in that case, we reap the
3787 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3788 * would if a job died while we were sampling it at shutdown.
3790 * Note that we foolishly assume that in the process *tree* a node cannot be its
3791 * own parent. Apparently, that is not correct. If this is the case, we forsake
3792 * the process to its own devices. Let it reap itself.
3794 if (!job_assumes(j
, proc
.pbsi_ppid
!= kev
->ident
)) {
3795 job_log(j
, LOG_WARNING
, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3798 if (job_assumes(j
, kevent_mod(proc
.pbsi_ppid
, EVFILT_PROC
, EV_ADD
, NOTE_EXIT
, 0, j
) != -1)) {
3799 j
->tracing_pid
= proc
.pbsi_ppid
;
3800 j
->reap_after_trace
= true;
3805 } else if (!j
->anonymous
) {
3806 if (j
->tracing_pid
== (pid_t
)kev
->ident
) {
3807 job_cleanup_after_tracer(j
);
3810 } else if (j
->tracing_pid
&& !j
->reap_after_trace
) {
3811 /* The job exited before our sample completed. */
3812 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job has exited. Will reap after tracing PID %i exits.", j
->tracing_pid
);
3813 j
->reap_after_trace
= true;
3819 if (fflags
& NOTE_EXEC
) {
3820 program_changed
= true;
3823 struct proc_bsdshortinfo proc
;
3824 if (proc_pidinfo(j
->p
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) > 0) {
3825 char newlabel
[1000];
3827 snprintf(newlabel
, sizeof(newlabel
), "%p.anonymous.%s", j
, proc
.pbsi_comm
);
3829 job_log(j
, LOG_INFO
, "Program changed. Updating the label to: %s", newlabel
);
3830 j
->lastlookup
= NULL
;
3831 j
->lastlookup_gennum
= 0;
3833 LIST_REMOVE(j
, label_hash_sle
);
3834 strcpy((char *)j
->label
, newlabel
);
3836 jobmgr_t where2put
= root_jobmgr
;
3837 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
3840 LIST_INSERT_HEAD(&where2put
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
3841 } else if (errno
!= ESRCH
) {
3842 job_assumes(j
, errno
== 0);
3845 if (j
->spawn_reply_port
) {
3846 errno
= job_mig_spawn2_reply(j
->spawn_reply_port
, BOOTSTRAP_SUCCESS
, j
->p
, j
->exit_status_port
);
3848 if (errno
!= MACH_SEND_INVALID_DEST
) {
3849 (void)job_assumes(j
, errno
== KERN_SUCCESS
);
3851 (void)job_assumes(j
, launchd_mport_close_recv(j
->exit_status_port
) == KERN_SUCCESS
);
3854 j
->spawn_reply_port
= MACH_PORT_NULL
;
3855 j
->exit_status_port
= MACH_PORT_NULL
;
3858 if (j
->xpc_service
&& j
->did_exec
) {
3859 j
->xpcproxy_did_exec
= true;
3863 job_log(j
, LOG_DEBUG
, "Program changed");
3867 if (fflags
& NOTE_FORK
) {
3868 job_log(j
, LOG_DEBUG
, "fork()ed%s", program_changed
? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3869 job_log_children_without_exec(j
);
3872 if (fflags
& NOTE_EXIT
) {
3879 j
= job_dispatch(j
, false);
3885 job_callback_timer(job_t j
, void *ident
)
3888 job_log(j
, LOG_DEBUG
, "j == ident (%p)", ident
);
3889 job_dispatch(j
, true);
3890 } else if (&j
->semaphores
== ident
) {
3891 job_log(j
, LOG_DEBUG
, "&j->semaphores == ident (%p)", ident
);
3892 job_dispatch(j
, false);
3893 } else if (&j
->start_interval
== ident
) {
3894 job_log(j
, LOG_DEBUG
, "&j->start_interval == ident (%p)", ident
);
3895 j
->start_pending
= true;
3896 job_dispatch(j
, false);
3897 } else if (&j
->exit_timeout
== ident
) {
3898 if (!job_assumes(j
, j
->p
!= 0)) {
3902 if (j
->sent_sigkill
) {
3903 uint64_t td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
3906 td
-= j
->clean_kill
? 0 : j
->exit_timeout
;
3908 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j
->clean_kill
? "cleanly " : "", td
);
3909 j
->workaround9359725
= true;
3911 if (g_trap_sigkill_bugs
) {
3912 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3913 (void)job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
3916 /* We've simulated the exit, so we have to cancel the kevent for
3917 * this job, otherwise we may get a kevent later down the road that
3918 * has a stale context pointer (if we've removed the job). Or worse,
3919 * it'll corrupt our data structures if the job still exists or the
3920 * allocation was recycled.
3922 * If the failing process had a tracer attached to it, we need to
3923 * remove out NOTE_EXIT for that tracer too, otherwise the same
3924 * thing might happen.
3926 * Note that, if we're not shutting down, this will result in a
3927 * zombie process just hanging around forever. But if the process
3928 * didn't exit after receiving SIGKILL, odds are it would've just
3929 * stuck around forever anyway.
3931 * See <rdar://problem/9481630>.
3933 kevent_mod((uintptr_t)j
->p
, EVFILT_PROC
, EV_DELETE
, 0, 0, NULL
);
3934 if (j
->tracing_pid
) {
3935 kevent_mod((uintptr_t)j
->tracing_pid
, EVFILT_PROC
, EV_DELETE
, 0, 0, NULL
);
3938 struct kevent bogus_exit
;
3939 EV_SET(&bogus_exit
, j
->p
, EVFILT_PROC
, 0, NOTE_EXIT
, 0, 0);
3940 jobmgr_callback(j
->mgr
, &bogus_exit
);
3942 if (unlikely(j
->debug_before_kill
)) {
3943 job_log(j
, LOG_NOTICE
, "Exit timeout elapsed. Entering the kernel debugger");
3944 (void)job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
3947 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Exit timeout elapsed (%u seconds). Killing", j
->exit_timeout
);
3951 (void)job_assumes(j
, false);
3956 job_callback_read(job_t j
, int ident
)
3958 if (ident
== j
->log_redirect_fd
) {
3959 job_log_stdouterr(j
);
3960 } else if (ident
== j
->stdin_fd
) {
3961 job_dispatch(j
, true);
3963 socketgroup_callback(j
);
3968 jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
)
3973 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3974 jobmgr_reap_bulk(jmi
, kev
);
3977 if ((j
= jobmgr_find_by_pid(jm
, (pid_t
)kev
->ident
, false))) {
3979 job_callback(j
, kev
);
3984 jobmgr_callback(void *obj
, struct kevent
*kev
)
3989 switch (kev
->filter
) {
3991 jobmgr_reap_bulk(jm
, kev
);
3992 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
3995 switch (kev
->ident
) {
3997 jobmgr_log(jm
, LOG_DEBUG
, "Got SIGTERM. Shutting down.");
3998 return launchd_shutdown();
4000 return calendarinterval_callback();
4002 fake_shutdown_in_progress
= true;
4003 runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
4005 runtime_closelog(); /* HACK -- force 'start' time to be set */
4008 int64_t now
= runtime_get_wall_time();
4010 jobmgr_log(jm
, LOG_NOTICE
, "Anticipatory shutdown began at: %lld.%06llu", now
/ USEC_PER_SEC
, now
% USEC_PER_SEC
);
4012 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
4013 if (ji
->per_user
&& ji
->p
) {
4014 (void)job_assumes(ji
, runtime_kill(ji
->p
, SIGUSR2
) != -1);
4018 jobmgr_log(jm
, LOG_NOTICE
, "Anticipatory per-user launchd shutdown");
4023 return (void)jobmgr_assumes(jm
, false);
4027 if (kev
->fflags
& VQ_MOUNT
) {
4028 jobmgr_dispatch_all(jm
, true);
4030 jobmgr_dispatch_all_semaphores(jm
);
4033 if (kev
->ident
== (uintptr_t)&sorted_calendar_events
) {
4034 calendarinterval_callback();
4035 } else if (kev
->ident
== (uintptr_t)jm
) {
4036 jobmgr_log(jm
, LOG_DEBUG
, "Shutdown timer firing.");
4037 jobmgr_still_alive_with_check(jm
);
4038 } else if (kev
->ident
== (uintptr_t)&jm
->reboot_flags
) {
4039 jobmgr_do_garbage_collection(jm
);
4040 } else if (kev
->ident
== (uintptr_t)&g_runtime_busy_time
) {
4041 jobmgr_log(jm
, LOG_DEBUG
, "Idle exit timer fired. Shutting down.");
4042 if (jobmgr_assumes(jm
, runtime_busy_cnt
== 0)) {
4043 return launchd_shutdown();
4048 if (kev
->ident
== (uintptr_t)s_no_hang_fd
) {
4049 int _no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
4050 if (unlikely(_no_hang_fd
!= -1)) {
4051 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "/dev/autofs_nowait has appeared!");
4052 (void)jobmgr_assumes(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
) != -1);
4053 (void)jobmgr_assumes(root_jobmgr
, runtime_close(s_no_hang_fd
) != -1);
4054 s_no_hang_fd
= _fd(_no_hang_fd
);
4056 } else if (pid1_magic
&& g_console
&& kev
->ident
== (uintptr_t)fileno(g_console
)) {
4058 if (launchd_assumes((cfd
= open(_PATH_CONSOLE
, O_WRONLY
| O_NOCTTY
)) != -1)) {
4060 if (!launchd_assumes((g_console
= fdopen(cfd
, "w")) != NULL
)) {
4067 return (void)jobmgr_assumes(jm
, false);
4072 job_callback(void *obj
, struct kevent
*kev
)
4076 job_log(j
, LOG_DEBUG
, "Dispatching kevent callback.");
4078 switch (kev
->filter
) {
4080 return job_callback_proc(j
, kev
);
4082 return job_callback_timer(j
, (void *) kev
->ident
);
4084 return semaphoreitem_callback(j
, kev
);
4086 return job_callback_read(j
, (int) kev
->ident
);
4087 case EVFILT_MACHPORT
:
4088 return (void)job_dispatch(j
, true);
4090 return (void)job_assumes(j
, false);
4104 u_int proc_fflags
= NOTE_EXIT
|NOTE_FORK
|NOTE_EXEC
;
4106 if (!job_assumes(j
, j
->mgr
!= NULL
)) {
4110 if (unlikely(job_active(j
))) {
4111 job_log(j
, LOG_DEBUG
, "Already started");
4116 * Some users adjust the wall-clock and then expect software to not notice.
4117 * Therefore, launchd must use an absolute clock instead of the wall clock
4118 * wherever possible.
4120 td
= runtime_get_nanoseconds_since(j
->start_time
);
4123 if (j
->start_time
&& (td
< j
->min_run_time
) && !j
->legacy_mach_job
&& !j
->inetcompat
) {
4124 time_t respawn_delta
= j
->min_run_time
- (uint32_t)td
;
4127 * We technically should ref-count throttled jobs to prevent idle exit,
4128 * but we're not directly tracking the 'throttled' state at the moment.
4130 int level
= LOG_WARNING
;
4131 if (!j
->did_exec
&& ((j
->fail_cnt
- 1) % LAUNCHD_LOG_FAILED_EXEC_FREQ
) != 0) {
4135 job_log(j
, level
, "Throttling respawn: Will start in %ld seconds", respawn_delta
);
4136 (void)job_assumes(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, respawn_delta
, j
) != -1);
4141 if (likely(!j
->legacy_mach_job
)) {
4142 sipc
= ((!SLIST_EMPTY(&j
->sockets
) || !SLIST_EMPTY(&j
->machservices
)) && !j
->deny_job_creation
) || j
->embedded_special_privileges
;
4146 (void)job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, spair
) != -1);
4149 (void)job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, execspair
) != -1);
4151 if (likely(!j
->legacy_mach_job
) && job_assumes(j
, pipe(oepair
) != -1)) {
4152 j
->log_redirect_fd
= _fd(oepair
[0]);
4153 (void)job_assumes(j
, fcntl(j
->log_redirect_fd
, F_SETFL
, O_NONBLOCK
) != -1);
4154 (void)job_assumes(j
, kevent_mod(j
->log_redirect_fd
, EVFILT_READ
, EV_ADD
, 0, 0, j
) != -1);
4157 switch (c
= runtime_fork(j
->weird_bootstrap
? j
->j_port
: j
->mgr
->jm_port
)) {
4159 job_log_error(j
, LOG_ERR
, "fork() failed, will try again in one second");
4160 (void)job_assumes(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, 1, j
) != -1);
4163 (void)job_assumes(j
, runtime_close(execspair
[0]) == 0);
4164 (void)job_assumes(j
, runtime_close(execspair
[1]) == 0);
4166 (void)job_assumes(j
, runtime_close(spair
[0]) == 0);
4167 (void)job_assumes(j
, runtime_close(spair
[1]) == 0);
4169 if (likely(!j
->legacy_mach_job
)) {
4170 (void)job_assumes(j
, runtime_close(oepair
[0]) != -1);
4171 (void)job_assumes(j
, runtime_close(oepair
[1]) != -1);
4172 j
->log_redirect_fd
= 0;
4176 if (unlikely(_vproc_post_fork_ping())) {
4177 _exit(EXIT_FAILURE
);
4179 if (!j
->legacy_mach_job
) {
4180 (void)job_assumes(j
, dup2(oepair
[1], STDOUT_FILENO
) != -1);
4181 (void)job_assumes(j
, dup2(oepair
[1], STDERR_FILENO
) != -1);
4182 (void)job_assumes(j
, runtime_close(oepair
[1]) != -1);
4184 (void)job_assumes(j
, runtime_close(execspair
[0]) == 0);
4185 /* wait for our parent to say they've attached a kevent to us */
4186 read(_fd(execspair
[1]), &c
, sizeof(c
));
4189 (void)job_assumes(j
, runtime_close(spair
[0]) == 0);
4190 snprintf(nbuf
, sizeof(nbuf
), "%d", spair
[1]);
4191 setenv(LAUNCHD_TRUSTED_FD_ENV
, nbuf
, 1);
4196 j
->start_time
= runtime_get_opaque_time();
4198 job_log(j
, LOG_DEBUG
, "Started as PID: %u", c
);
4200 j
->did_exec
= false;
4201 j
->xpcproxy_did_exec
= false;
4202 j
->checkedin
= false;
4203 j
->start_pending
= false;
4207 if (j
->needs_kickoff
) {
4208 j
->needs_kickoff
= false;
4210 if (SLIST_EMPTY(&j
->semaphores
)) {
4211 j
->ondemand
= false;
4215 if (j
->has_console
) {
4221 LIST_INSERT_HEAD(&j
->mgr
->active_jobs
[ACTIVE_JOB_HASH(c
)], j
, pid_hash_sle
);
4223 if (likely(!j
->legacy_mach_job
)) {
4224 (void)job_assumes(j
, runtime_close(oepair
[1]) != -1);
4228 j
->mgr
->normal_active_cnt
++;
4229 j
->fork_fd
= _fd(execspair
[0]);
4230 (void)job_assumes(j
, runtime_close(execspair
[1]) == 0);
4232 (void)job_assumes(j
, runtime_close(spair
[1]) == 0);
4233 ipc_open(_fd(spair
[0]), j
);
4235 if (job_assumes(j
, kevent_mod(c
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
? root_jobmgr
: j
->mgr
) != -1)) {
4241 j
->wait4debugger_oneshot
= false;
4243 struct envitem
*ei
= NULL
, *et
= NULL
;
4244 SLIST_FOREACH_SAFE(ei
, &j
->env
, sle
, et
) {
4246 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
4250 if (likely(!j
->stall_before_exec
)) {
4258 job_start_child(job_t j
)
4260 typeof(posix_spawn
) *psf
;
4261 const char *file2exec
= "/usr/libexec/launchproxy";
4263 posix_spawnattr_t spattr
;
4264 int gflags
= GLOB_NOSORT
|GLOB_NOCHECK
|GLOB_TILDE
|GLOB_DOOFFS
;
4266 short spflags
= POSIX_SPAWN_SETEXEC
;
4267 size_t binpref_out_cnt
= 0;
4270 (void)job_assumes(j
, posix_spawnattr_init(&spattr
) == 0);
4272 job_setup_attributes(j
);
4274 if (unlikely(j
->argv
&& j
->globargv
)) {
4276 for (i
= 0; i
< j
->argc
; i
++) {
4278 gflags
|= GLOB_APPEND
;
4280 if (glob(j
->argv
[i
], gflags
, NULL
, &g
) != 0) {
4281 job_log_error(j
, LOG_ERR
, "glob(\"%s\")", j
->argv
[i
]);
4285 g
.gl_pathv
[0] = (char *)file2exec
;
4286 argv
= (const char **)g
.gl_pathv
;
4287 } else if (likely(j
->argv
)) {
4288 argv
= alloca((j
->argc
+ 2) * sizeof(char *));
4289 argv
[0] = file2exec
;
4290 for (i
= 0; i
< j
->argc
; i
++) {
4291 argv
[i
+ 1] = j
->argv
[i
];
4295 argv
= alloca(3 * sizeof(char *));
4296 argv
[0] = file2exec
;
4301 if (likely(!j
->inetcompat
)) {
4305 if (unlikely(j
->wait4debugger
|| j
->wait4debugger_oneshot
)) {
4306 if (!j
->legacy_LS_job
) {
4307 job_log(j
, LOG_WARNING
, "Spawned and waiting for the debugger to attach before continuing...");
4309 spflags
|= POSIX_SPAWN_START_SUSPENDED
;
4312 if (unlikely(j
->disable_aslr
)) {
4313 spflags
|= _POSIX_SPAWN_DISABLE_ASLR
;
4315 spflags
|= j
->pstype
;
4317 (void)job_assumes(j
, posix_spawnattr_setflags(&spattr
, spflags
) == 0);
4319 if (unlikely(j
->j_binpref_cnt
)) {
4320 (void)job_assumes(j
, posix_spawnattr_setbinpref_np(&spattr
, j
->j_binpref_cnt
, j
->j_binpref
, &binpref_out_cnt
) == 0);
4321 (void)job_assumes(j
, binpref_out_cnt
== j
->j_binpref_cnt
);
4325 if (j
->quarantine_data
) {
4328 if (job_assumes(j
, qp
= qtn_proc_alloc())) {
4329 if (job_assumes(j
, qtn_proc_init_with_data(qp
, j
->quarantine_data
, j
->quarantine_data_sz
) == 0)) {
4330 (void)job_assumes(j
, qtn_proc_apply_to_self(qp
) == 0);
4337 if (j
->seatbelt_profile
) {
4338 char *seatbelt_err_buf
= NULL
;
4340 if (!job_assumes(j
, sandbox_init(j
->seatbelt_profile
, j
->seatbelt_flags
, &seatbelt_err_buf
) != -1)) {
4341 if (seatbelt_err_buf
) {
4342 job_log(j
, LOG_ERR
, "Sandbox failed to init: %s", seatbelt_err_buf
);
4349 psf
= j
->prog
? posix_spawn
: posix_spawnp
;
4351 if (likely(!j
->inetcompat
)) {
4352 file2exec
= j
->prog
? j
->prog
: argv
[0];
4355 errno
= psf(NULL
, file2exec
, NULL
, &spattr
, (char *const *)argv
, environ
);
4356 if (errno
!= EBADARCH
) {
4357 int level
= LOG_ERR
;
4358 if ((j
->fail_cnt
++ % LAUNCHD_LOG_FAILED_EXEC_FREQ
) != 0) {
4361 job_log_error(j
, level
, "posix_spawn(\"%s\", ...)", file2exec
);
4362 errno
= EXIT_FAILURE
;
4372 jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
)
4378 if (jm
->parentmgr
) {
4379 jobmgr_export_env_from_other_jobs(jm
->parentmgr
, dict
);
4381 char **tmpenviron
= environ
;
4382 for (; *tmpenviron
; tmpenviron
++) {
4384 launch_data_t s
= launch_data_alloc(LAUNCH_DATA_STRING
);
4385 launch_data_set_string(s
, strchr(*tmpenviron
, '=') + 1);
4386 strncpy(envkey
, *tmpenviron
, sizeof(envkey
));
4387 *(strchr(envkey
, '=')) = '\0';
4388 launch_data_dict_insert(dict
, s
, envkey
);
4392 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
4393 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
4394 if ((tmp
= launch_data_new_string(ei
->value
))) {
4395 launch_data_dict_insert(dict
, tmp
, ei
->key
);
4402 jobmgr_setup_env_from_other_jobs(jobmgr_t jm
)
4407 if (jm
->parentmgr
) {
4408 jobmgr_setup_env_from_other_jobs(jm
->parentmgr
);
4411 LIST_FOREACH(ji
, &jm
->global_env_jobs
, global_env_sle
) {
4412 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
4413 setenv(ei
->key
, ei
->value
, 1);
4419 job_log_pids_with_weird_uids(job_t j
)
4421 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
4423 uid_t u
= j
->mach_uid
;
4424 int i
= 0, kp_cnt
= 0;
4426 if (!do_apple_internal_logging
) {
4431 if (!job_assumes(j
, pids
!= NULL
)) {
4435 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS
, j
->p
, u
, 0);
4437 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4438 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4439 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4440 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4441 * struct back in a single call for each one.
4443 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4444 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4445 * libproc could go stale before we call proc_pidinfo().
4447 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4448 * of bytes written to the buffer.
4450 if (!job_assumes(j
, (kp_cnt
= proc_listallpids(pids
, len
)) != -1)) {
4454 for (i
= 0; i
< kp_cnt
; i
++) {
4455 struct proc_bsdshortinfo proc
;
4456 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4459 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
4460 if (errno
!= ESRCH
) {
4461 job_assumes(j
, errno
== 0);
4466 uid_t i_euid
= proc
.pbsi_uid
;
4467 uid_t i_uid
= proc
.pbsi_ruid
;
4468 uid_t i_svuid
= proc
.pbsi_svuid
;
4469 pid_t i_pid
= pids
[i
];
4471 if (i_euid
!= u
&& i_uid
!= u
&& i_svuid
!= u
) {
4475 job_log(j
, LOG_ERR
, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid
, proc
.pbsi_comm
, i_uid
, i_euid
, i_svuid
);
4477 /* Temporarily disabled due to 5423935 and 4946119. */
4479 /* Ask the accountless process to exit. */
4480 (void)job_assumes(j
, runtime_kill(i_pid
, SIGTERM
) != -1);
4488 static struct passwd
*
4489 job_getpwnam(job_t j
, const char *name
)
4492 * methodology for system daemons
4494 * first lookup user record without any opendirectoryd interaction,
4495 * we don't know what interprocess dependencies might be in flight.
4496 * if that fails, we re-enable opendirectoryd interaction and
4497 * re-issue the lookup. We have to disable the libinfo L1 cache
4498 * otherwise libinfo will return the negative cache entry on the retry
4501 #if !TARGET_OS_EMBEDDED
4502 struct passwd
*pw
= NULL
;
4504 if (pid1_magic
&& j
->mgr
== root_jobmgr
) {
4505 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4506 gL1CacheEnabled
= false;
4508 pw
= getpwnam(name
);
4510 si_search_module_set_flags("ds", 0);
4514 pw
= getpwnam(name
);
4519 return getpwnam(name
);
4523 static struct group
*
4524 job_getgrnam(job_t j
, const char *name
)
4526 #if !TARGET_OS_EMBEDDED
4527 struct group
*gr
= NULL
;
4529 if (pid1_magic
&& j
->mgr
== root_jobmgr
) {
4530 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4531 gL1CacheEnabled
= false;
4533 gr
= getgrnam(name
);
4535 si_search_module_set_flags("ds", 0);
4539 gr
= getgrnam(name
);
4545 return getgrnam(name
);
4550 job_postfork_test_user(job_t j
)
4552 /* This function is all about 5201578 */
4554 const char *home_env_var
= getenv("HOME");
4555 const char *user_env_var
= getenv("USER");
4556 const char *logname_env_var
= getenv("LOGNAME");
4557 uid_t tmp_uid
, local_uid
= getuid();
4558 gid_t tmp_gid
, local_gid
= getgid();
4559 char shellpath
[PATH_MAX
];
4560 char homedir
[PATH_MAX
];
4561 char loginname
[2000];
4565 if (!job_assumes(j
, home_env_var
&& user_env_var
&& logname_env_var
4566 && strcmp(user_env_var
, logname_env_var
) == 0)) {
4570 if ((pwe
= job_getpwnam(j
, user_env_var
)) == NULL
) {
4571 job_log(j
, LOG_ERR
, "The account \"%s\" has been deleted out from under us!", user_env_var
);
4576 * We must copy the results of getpw*().
4578 * Why? Because subsequent API calls may call getpw*() as a part of
4579 * their implementation. Since getpw*() returns a [now thread scoped]
4580 * global, we must therefore cache the results before continuing.
4583 tmp_uid
= pwe
->pw_uid
;
4584 tmp_gid
= pwe
->pw_gid
;
4586 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
4587 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
4588 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
4590 if (strcmp(loginname
, logname_env_var
) != 0) {
4591 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "USER");
4594 if (strcmp(homedir
, home_env_var
) != 0) {
4595 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "HOME");
4598 if (local_uid
!= tmp_uid
) {
4599 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
4600 'U', tmp_uid
, local_uid
);
4603 if (local_gid
!= tmp_gid
) {
4604 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
4605 'G', tmp_gid
, local_gid
);
4612 (void)job_assumes(j
, runtime_kill(getppid(), SIGTERM
) != -1);
4613 _exit(EXIT_FAILURE
);
4615 job_log(j
, LOG_WARNING
, "In a future build of the OS, this error will be fatal.");
4620 job_postfork_become_user(job_t j
)
4622 char loginname
[2000];
4623 char tmpdirpath
[PATH_MAX
];
4624 char shellpath
[PATH_MAX
];
4625 char homedir
[PATH_MAX
];
4628 gid_t desired_gid
= -1;
4629 uid_t desired_uid
= -1;
4631 if (getuid() != 0) {
4632 return job_postfork_test_user(j
);
4636 * I contend that having UID == 0 and GID != 0 is of dubious value.
4637 * Nevertheless, this used to work in Tiger. See: 5425348
4639 if (j
->groupname
&& !j
->username
) {
4640 j
->username
= "root";
4644 if ((pwe
= job_getpwnam(j
, j
->username
)) == NULL
) {
4645 job_log(j
, LOG_ERR
, "getpwnam(\"%s\") failed", j
->username
);
4646 _exit(EXIT_FAILURE
);
4648 } else if (j
->mach_uid
) {
4649 if ((pwe
= getpwuid(j
->mach_uid
)) == NULL
) {
4650 job_log(j
, LOG_ERR
, "getpwuid(\"%u\") failed", j
->mach_uid
);
4651 job_log_pids_with_weird_uids(j
);
4652 _exit(EXIT_FAILURE
);
4659 * We must copy the results of getpw*().
4661 * Why? Because subsequent API calls may call getpw*() as a part of
4662 * their implementation. Since getpw*() returns a [now thread scoped]
4663 * global, we must therefore cache the results before continuing.
4666 desired_uid
= pwe
->pw_uid
;
4667 desired_gid
= pwe
->pw_gid
;
4669 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
4670 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
4671 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
4673 if (unlikely(pwe
->pw_expire
&& time(NULL
) >= pwe
->pw_expire
)) {
4674 job_log(j
, LOG_ERR
, "Expired account");
4675 _exit(EXIT_FAILURE
);
4679 if (unlikely(j
->username
&& strcmp(j
->username
, loginname
) != 0)) {
4680 job_log(j
, LOG_WARNING
, "Suspicious setup: User \"%s\" maps to user: %s", j
->username
, loginname
);
4681 } else if (unlikely(j
->mach_uid
&& (j
->mach_uid
!= desired_uid
))) {
4682 job_log(j
, LOG_WARNING
, "Suspicious setup: UID %u maps to UID %u", j
->mach_uid
, desired_uid
);
4688 if (unlikely((gre
= job_getgrnam(j
, j
->groupname
)) == NULL
)) {
4689 job_log(j
, LOG_ERR
, "getgrnam(\"%s\") failed", j
->groupname
);
4690 _exit(EXIT_FAILURE
);
4693 desired_gid
= gre
->gr_gid
;
4696 if (!job_assumes(j
, setlogin(loginname
) != -1)) {
4697 _exit(EXIT_FAILURE
);
4700 if (!job_assumes(j
, setgid(desired_gid
) != -1)) {
4701 _exit(EXIT_FAILURE
);
4705 * The kernel team and the DirectoryServices team want initgroups()
4706 * called after setgid(). See 4616864 for more information.
4709 if (likely(!j
->no_init_groups
)) {
4711 if (!job_assumes(j
, initgroups(loginname
, desired_gid
) != -1)) {
4712 _exit(EXIT_FAILURE
);
4715 /* Do our own little initgroups(). We do this to guarantee that we're
4716 * always opted into dynamic group resolution in the kernel. initgroups(3)
4717 * does not make this guarantee.
4719 int groups
[NGROUPS
], ngroups
;
4721 /* A failure here isn't fatal, and we'll still get data we can use. */
4722 (void)job_assumes(j
, getgrouplist(j
->username
, desired_gid
, groups
, &ngroups
) != -1);
4724 if (!job_assumes(j
, syscall(SYS_initgroups
, ngroups
, groups
, desired_uid
) != -1)) {
4725 _exit(EXIT_FAILURE
);
4730 if (!job_assumes(j
, setuid(desired_uid
) != -1)) {
4731 _exit(EXIT_FAILURE
);
4734 r
= confstr(_CS_DARWIN_USER_TEMP_DIR
, tmpdirpath
, sizeof(tmpdirpath
));
4736 if (likely(r
> 0 && r
< sizeof(tmpdirpath
))) {
4737 setenv("TMPDIR", tmpdirpath
, 0);
4740 setenv("SHELL", shellpath
, 0);
4741 setenv("HOME", homedir
, 0);
4742 setenv("USER", loginname
, 0);
4743 setenv("LOGNAME", loginname
, 0);
4747 job_setup_attributes(job_t j
)
4749 struct limititem
*li
;
4752 if (unlikely(j
->setnice
)) {
4753 (void)job_assumes(j
, setpriority(PRIO_PROCESS
, 0, j
->nice
) != -1);
4756 SLIST_FOREACH(li
, &j
->limits
, sle
) {
4759 if (!job_assumes(j
, getrlimit(li
->which
, &rl
) != -1)) {
4764 rl
.rlim_max
= li
->lim
.rlim_max
;
4767 rl
.rlim_cur
= li
->lim
.rlim_cur
;
4770 if (setrlimit(li
->which
, &rl
) == -1) {
4771 job_log_error(j
, LOG_WARNING
, "setrlimit()");
4775 if (unlikely(!j
->inetcompat
&& j
->session_create
)) {
4776 launchd_SessionCreate();
4779 if (unlikely(j
->low_pri_io
)) {
4780 (void)job_assumes(j
, setiopolicy_np(IOPOL_TYPE_DISK
, IOPOL_SCOPE_PROCESS
, IOPOL_THROTTLE
) != -1);
4782 if (unlikely(j
->rootdir
)) {
4783 (void)job_assumes(j
, chroot(j
->rootdir
) != -1);
4784 (void)job_assumes(j
, chdir(".") != -1);
4787 job_postfork_become_user(j
);
4789 if (unlikely(j
->workingdir
)) {
4790 (void)job_assumes(j
, chdir(j
->workingdir
) != -1);
4793 if (unlikely(j
->setmask
)) {
4798 (void)job_assumes(j
, dup2(j
->stdin_fd
, STDIN_FILENO
) != -1);
4800 job_setup_fd(j
, STDIN_FILENO
, j
->stdinpath
, O_RDONLY
|O_CREAT
);
4802 job_setup_fd(j
, STDOUT_FILENO
, j
->stdoutpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
4803 job_setup_fd(j
, STDERR_FILENO
, j
->stderrpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
4805 jobmgr_setup_env_from_other_jobs(j
->mgr
);
4807 SLIST_FOREACH(ei
, &j
->env
, sle
) {
4808 setenv(ei
->key
, ei
->value
, 1);
4811 if (do_apple_internal_logging
) {
4812 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING
, "true", 1);
4815 #if !TARGET_OS_EMBEDDED
4816 if (j
->jetsam_properties
) {
4817 (void)job_assumes(j
, proc_setpcontrol(PROC_SETPC_TERMINATE
) == 0);
4821 #if TARGET_OS_EMBEDDED
4822 if (j
->main_thread_priority
!= 0) {
4823 struct sched_param params
;
4824 bzero(¶ms
, sizeof(params
));
4825 params
.sched_priority
= j
->main_thread_priority
;
4826 (void)job_assumes(j
, pthread_setschedparam(pthread_self(), SCHED_OTHER
, ¶ms
) != -1);
4831 * We'd like to call setsid() unconditionally, but we have reason to
4832 * believe that prevents launchd from being able to send signals to
4833 * setuid children. We'll settle for process-groups.
4835 if (getppid() != 1) {
4836 (void)job_assumes(j
, setpgid(0, 0) != -1);
4838 (void)job_assumes(j
, setsid() != -1);
4843 job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
)
4851 if ((fd
= open(path
, flags
|O_NOCTTY
, DEFFILEMODE
)) == -1) {
4852 job_log_error(j
, LOG_WARNING
, "open(\"%s\", ...)", path
);
4856 (void)job_assumes(j
, dup2(fd
, target_fd
) != -1);
4857 (void)job_assumes(j
, runtime_close(fd
) == 0);
4861 dir_has_files(job_t j
, const char *path
)
4863 DIR *dd
= opendir(path
);
4867 if (unlikely(!dd
)) {
4871 while ((de
= readdir(dd
))) {
4872 if (strcmp(de
->d_name
, ".") && strcmp(de
->d_name
, "..")) {
4878 (void)job_assumes(j
, closedir(dd
) == 0);
4883 calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
)
4885 struct calendarinterval
*ci_iter
, *ci_prev
= NULL
;
4886 time_t later
, head_later
;
4888 later
= cronemu(ci
->when
.tm_mon
, ci
->when
.tm_mday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
4890 if (ci
->when
.tm_wday
!= -1) {
4891 time_t otherlater
= cronemu_wday(ci
->when
.tm_wday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
4893 if (ci
->when
.tm_mday
== -1) {
4896 later
= later
< otherlater
? later
: otherlater
;
4900 ci
->when_next
= later
;
4902 LIST_FOREACH(ci_iter
, &sorted_calendar_events
, global_sle
) {
4903 if (ci
->when_next
< ci_iter
->when_next
) {
4904 LIST_INSERT_BEFORE(ci_iter
, ci
, global_sle
);
4911 if (ci_iter
== NULL
) {
4912 /* ci must want to fire after every other timer, or there are no timers */
4914 if (LIST_EMPTY(&sorted_calendar_events
)) {
4915 LIST_INSERT_HEAD(&sorted_calendar_events
, ci
, global_sle
);
4917 LIST_INSERT_AFTER(ci_prev
, ci
, global_sle
);
4921 head_later
= LIST_FIRST(&sorted_calendar_events
)->when_next
;
4923 if (job_assumes(j
, kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_ADD
, NOTE_ABSOLUTE
|NOTE_SECONDS
, head_later
, root_jobmgr
) != -1)) {
4924 char time_string
[100];
4925 size_t time_string_len
;
4927 ctime_r(&later
, time_string
);
4928 time_string_len
= strlen(time_string
);
4930 if (likely(time_string_len
&& time_string
[time_string_len
- 1] == '\n')) {
4931 time_string
[time_string_len
- 1] = '\0';
4934 job_log(j
, LOG_INFO
, "Scheduled to run again at %s", time_string
);
4939 extract_rcsid_substr(const char *i
, char *o
, size_t osz
)
4941 char *rcs_rev_tmp
= strchr(i
, ' ');
4946 strlcpy(o
, rcs_rev_tmp
+ 1, osz
);
4947 rcs_rev_tmp
= strchr(o
, ' ');
4949 *rcs_rev_tmp
= '\0';
4955 jobmgr_log_bug(jobmgr_t jm
, unsigned int line
)
4957 static const char *file
;
4958 int saved_errno
= errno
;
4961 runtime_ktrace1(RTKT_LAUNCHD_BUG
);
4963 extract_rcsid_substr(__rcs_file_version__
, buf
, sizeof(buf
));
4966 file
= strrchr(__FILE__
, '/');
4974 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4976 jobmgr_log(jm
, LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
4978 runtime_syslog(LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
4983 job_log_bug(job_t j
, unsigned int line
)
4985 static const char *file
;
4986 int saved_errno
= errno
;
4989 runtime_ktrace1(RTKT_LAUNCHD_BUG
);
4991 extract_rcsid_substr(__rcs_file_version__
, buf
, sizeof(buf
));
4994 file
= strrchr(__FILE__
, '/');
5003 job_log(j
, LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
5005 runtime_syslog(LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
5010 job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
)
5012 const char *label2use
= j
? j
->label
: "com.apple.launchd.NULL";
5013 const char *mgr2use
= j
? j
->mgr
->name
: "NULL";
5014 struct runtime_syslog_attr attr
= { g_my_label
, label2use
, mgr2use
, pri
, getuid(), getpid(), j
? j
->p
: 0 };
5020 * Hack: If bootstrap_port is set, we must be on the child side of a
5021 * fork(), but before the exec*(). Let's route the log message back to
5024 if (bootstrap_port
) {
5025 return _vproc_logv(pri
, err
, msg
, ap
);
5028 newmsgsz
= strlen(msg
) + 200;
5029 newmsg
= alloca(newmsgsz
);
5032 #if !TARGET_OS_EMBEDDED
5033 snprintf(newmsg
, newmsgsz
, "%s: %s", msg
, strerror(err
));
5035 snprintf(newmsg
, newmsgsz
, "(%s) %s: %s", label2use
, msg
, strerror(err
));
5038 #if !TARGET_OS_EMBEDDED
5039 snprintf(newmsg
, newmsgsz
, "%s", msg
);
5041 snprintf(newmsg
, newmsgsz
, "(%s) %s", label2use
, msg
);
5045 if (j
&& unlikely(j
->debug
)) {
5046 oldmask
= setlogmask(LOG_UPTO(LOG_DEBUG
));
5049 runtime_vsyslog(&attr
, newmsg
, ap
);
5051 if (j
&& unlikely(j
->debug
)) {
5052 setlogmask(oldmask
);
5057 job_log_error(job_t j
, int pri
, const char *msg
, ...)
5062 job_logv(j
, pri
, errno
, msg
, ap
);
5067 job_log(job_t j
, int pri
, const char *msg
, ...)
5072 job_logv(j
, pri
, 0, msg
, ap
);
5078 jobmgr_log_error(jobmgr_t jm
, int pri
, const char *msg
, ...)
5083 jobmgr_logv(jm
, pri
, errno
, msg
, ap
);
5089 jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...)
5094 jobmgr_logv(jm
, pri
, 0, msg
, ap
);
5099 jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
)
5103 size_t i
, o
, jmname_len
= strlen(jm
->name
), newmsgsz
;
5105 newname
= alloca((jmname_len
+ 1) * 2);
5106 newmsgsz
= (jmname_len
+ 1) * 2 + strlen(msg
) + 100;
5107 newmsg
= alloca(newmsgsz
);
5109 for (i
= 0, o
= 0; i
< jmname_len
; i
++, o
++) {
5110 if (jm
->name
[i
] == '%') {
5114 newname
[o
] = jm
->name
[i
];
5119 snprintf(newmsg
, newmsgsz
, "%s: %s: %s", newname
, msg
, strerror(err
));
5121 snprintf(newmsg
, newmsgsz
, "%s: %s", newname
, msg
);
5124 if (jm
->parentmgr
) {
5125 jobmgr_logv(jm
->parentmgr
, pri
, 0, newmsg
, ap
);
5127 struct runtime_syslog_attr attr
= { g_my_label
, g_my_label
, jm
->name
, pri
, getuid(), getpid(), getpid() };
5129 runtime_vsyslog(&attr
, newmsg
, ap
);
5134 semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
)
5137 job_log(j
, LOG_DEBUG
, "Ignoring Vnode: %d", si
->fd
);
5138 (void)job_assumes(j
, kevent_mod(si
->fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
) != -1);
5143 semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
)
5145 char *parentdir
, tmp_path
[PATH_MAX
];
5146 int saved_errno
= 0;
5147 int fflags
= NOTE_DELETE
|NOTE_RENAME
;
5152 fflags
|= NOTE_ATTRIB
|NOTE_LINK
;
5155 fflags
|= NOTE_REVOKE
|NOTE_EXTEND
|NOTE_WRITE
;
5163 /* dirname() may modify tmp_path */
5164 strlcpy(tmp_path
, si
->what
, sizeof(tmp_path
));
5166 if (!job_assumes(j
, (parentdir
= dirname(tmp_path
)))) {
5170 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
5174 if (stat(si
->what
, &sb
) == 0) {
5175 /* If we're watching a character or block device, only watch the parent directory.
5176 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
5177 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
5178 * open(2)s the file (like a character device that waits for a carrier signal) or
5179 * (b) preventing other processes from obtaining an exclusive lock on the file, even
5180 * though we're opening it with O_EVTONLY.
5182 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
5183 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
5184 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
5185 * each time the parent changes to see if it appeared or disappeared.
5187 if (S_ISREG(sb
.st_mode
) || S_ISDIR(sb
.st_mode
)) {
5188 si
->fd
= _fd(open(si
->what
, O_EVTONLY
| O_NOCTTY
| O_NONBLOCK
));
5193 si
->watching_parent
= job_assumes(j
, (si
->fd
= _fd(open(parentdir
, O_EVTONLY
| O_NOCTTY
| O_NONBLOCK
))) != -1);
5195 si
->watching_parent
= false;
5200 return job_log_error(j
, LOG_ERR
, "Path monitoring failed on \"%s\"", si
->what
);
5203 job_log(j
, LOG_DEBUG
, "Watching %svnode (%s): %d", si
->watching_parent
? "parent ": "", si
->what
, si
->fd
);
5205 if (kevent_mod(si
->fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, j
) == -1) {
5206 saved_errno
= errno
;
5208 * The FD can be revoked between the open() and kevent().
5209 * This is similar to the inability for kevents to be
5210 * attached to short lived zombie processes after fork()
5211 * but before kevent().
5213 (void)job_assumes(j
, runtime_close(si
->fd
) == 0);
5216 } while (unlikely((si
->fd
== -1) && (saved_errno
== ENOENT
)));
5218 if (saved_errno
== ENOTSUP
) {
5220 * 3524219 NFS needs kqueue support
5221 * 4124079 VFS needs generic kqueue support
5222 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
5224 job_log(j
, LOG_DEBUG
, "Falling back to polling for path: %s", si
->what
);
5226 if (!j
->poll_for_vfs_changes
) {
5227 j
->poll_for_vfs_changes
= true;
5228 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 3, j
) != -1);
5234 semaphoreitem_callback(job_t j
, struct kevent
*kev
)
5236 char invalidation_reason
[100] = "";
5237 struct semaphoreitem
*si
;
5239 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
5245 job_log(j
, LOG_DEBUG
, "P%s changed (%u): %s", si
->watching_parent
? "arent path" : "ath", si
->why
, si
->what
);
5251 if (si
->fd
== (int)kev
->ident
) {
5256 if (!job_assumes(j
, si
!= NULL
)) {
5260 if (NOTE_DELETE
& kev
->fflags
) {
5261 strcat(invalidation_reason
, "deleted");
5264 if (NOTE_RENAME
& kev
->fflags
) {
5265 if (invalidation_reason
[0]) {
5266 strcat(invalidation_reason
, "/renamed");
5268 strcat(invalidation_reason
, "renamed");
5272 if (NOTE_REVOKE
& kev
->fflags
) {
5273 if (invalidation_reason
[0]) {
5274 strcat(invalidation_reason
, "/revoked");
5276 strcat(invalidation_reason
, "revoked");
5280 if (invalidation_reason
[0]) {
5281 job_log(j
, LOG_DEBUG
, "Path %s: %s", invalidation_reason
, si
->what
);
5282 (void)job_assumes(j
, runtime_close(si
->fd
) == 0);
5283 si
->fd
= -1; /* this will get fixed in semaphoreitem_watch() */
5286 if (!si
->watching_parent
) {
5287 if (si
->why
== PATH_CHANGES
) {
5288 j
->start_pending
= true;
5290 semaphoreitem_watch(j
, si
);
5292 } else { /* Something happened to the parent directory. See if our target file appeared. */
5293 if (!invalidation_reason
[0]) {
5294 (void)job_assumes(j
, runtime_close(si
->fd
) == 0);
5295 si
->fd
= -1; /* this will get fixed in semaphoreitem_watch() */
5296 semaphoreitem_watch(j
, si
);
5298 /* Need to think about what should happen if the parent directory goes invalid. */
5301 job_dispatch(j
, false);
5304 struct cal_dict_walk
{
5310 calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
)
5312 struct cal_dict_walk
*cdw
= context
;
5313 struct tm
*tmptm
= &cdw
->tmptm
;
5317 if (unlikely(LAUNCH_DATA_INTEGER
!= launch_data_get_type(obj
))) {
5318 /* hack to let caller know something went wrong */
5323 val
= launch_data_get_integer(obj
);
5326 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is less than zero.", key
);
5327 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MINUTE
) == 0) {
5329 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key
);
5332 tmptm
->tm_min
= (typeof(tmptm
->tm_min
)) val
;
5334 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_HOUR
) == 0) {
5336 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key
);
5339 tmptm
->tm_hour
= (typeof(tmptm
->tm_hour
)) val
;
5341 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_DAY
) == 0) {
5342 if (val
< 1 || val
> 31) {
5343 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key
);
5346 tmptm
->tm_mday
= (typeof(tmptm
->tm_mday
)) val
;
5348 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_WEEKDAY
) == 0) {
5350 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key
);
5353 tmptm
->tm_wday
= (typeof(tmptm
->tm_wday
)) val
;
5355 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MONTH
) == 0) {
5357 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key
);
5360 tmptm
->tm_mon
= (typeof(tmptm
->tm_mon
)) val
;
5361 tmptm
->tm_mon
-= 1; /* 4798263 cron compatibility */
5367 calendarinterval_new_from_obj(job_t j
, launch_data_t obj
)
5369 struct cal_dict_walk cdw
;
5372 memset(&cdw
.tmptm
, 0, sizeof(0));
5374 cdw
.tmptm
.tm_min
= -1;
5375 cdw
.tmptm
.tm_hour
= -1;
5376 cdw
.tmptm
.tm_mday
= -1;
5377 cdw
.tmptm
.tm_wday
= -1;
5378 cdw
.tmptm
.tm_mon
= -1;
5380 if (!job_assumes(j
, obj
!= NULL
)) {
5384 if (unlikely(LAUNCH_DATA_DICTIONARY
!= launch_data_get_type(obj
))) {
5388 launch_data_dict_iterate(obj
, calendarinterval_new_from_obj_dict_walk
, &cdw
);
5390 if (unlikely(cdw
.tmptm
.tm_sec
== -1)) {
5394 return calendarinterval_new(j
, &cdw
.tmptm
);
5398 calendarinterval_new(job_t j
, struct tm
*w
)
5400 struct calendarinterval
*ci
= calloc(1, sizeof(struct calendarinterval
));
5402 if (!job_assumes(j
, ci
!= NULL
)) {
5409 SLIST_INSERT_HEAD(&j
->cal_intervals
, ci
, sle
);
5411 calendarinterval_setalarm(j
, ci
);
5413 runtime_add_weak_ref();
5419 calendarinterval_delete(job_t j
, struct calendarinterval
*ci
)
5421 SLIST_REMOVE(&j
->cal_intervals
, ci
, calendarinterval
, sle
);
5422 LIST_REMOVE(ci
, global_sle
);
5426 runtime_del_weak_ref();
5430 calendarinterval_sanity_check(void)
5432 struct calendarinterval
*ci
= LIST_FIRST(&sorted_calendar_events
);
5433 time_t now
= time(NULL
);
5435 if (unlikely(ci
&& (ci
->when_next
< now
))) {
5436 (void)jobmgr_assumes(root_jobmgr
, raise(SIGUSR1
) != -1);
5441 calendarinterval_callback(void)
5443 struct calendarinterval
*ci
, *ci_next
;
5444 time_t now
= time(NULL
);
5446 LIST_FOREACH_SAFE(ci
, &sorted_calendar_events
, global_sle
, ci_next
) {
5449 if (ci
->when_next
> now
) {
5453 LIST_REMOVE(ci
, global_sle
);
5454 calendarinterval_setalarm(j
, ci
);
5456 j
->start_pending
= true;
5457 job_dispatch(j
, false);
5462 socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
, bool junkfds
)
5464 struct socketgroup
*sg
= calloc(1, sizeof(struct socketgroup
) + strlen(name
) + 1);
5466 if (!job_assumes(j
, sg
!= NULL
)) {
5470 sg
->fds
= calloc(1, fd_cnt
* sizeof(int));
5471 sg
->fd_cnt
= fd_cnt
;
5472 sg
->junkfds
= junkfds
;
5474 if (!job_assumes(j
, sg
->fds
!= NULL
)) {
5479 memcpy(sg
->fds
, fds
, fd_cnt
* sizeof(int));
5480 strcpy(sg
->name_init
, name
);
5482 SLIST_INSERT_HEAD(&j
->sockets
, sg
, sle
);
5484 runtime_add_weak_ref();
5490 socketgroup_delete(job_t j
, struct socketgroup
*sg
)
5494 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5496 struct sockaddr_storage ss
;
5497 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
5498 socklen_t ss_len
= sizeof(ss
);
5501 if (job_assumes(j
, getsockname(sg
->fds
[i
], (struct sockaddr
*)&ss
, &ss_len
) != -1)
5502 && job_assumes(j
, ss_len
> 0) && (ss
.ss_family
== AF_UNIX
)) {
5503 (void)job_assumes(j
, unlink(sun
->sun_path
) != -1);
5504 /* We might conditionally need to delete a directory here */
5507 (void)job_assumes(j
, runtime_close(sg
->fds
[i
]) != -1);
5510 SLIST_REMOVE(&j
->sockets
, sg
, socketgroup
, sle
);
5515 runtime_del_weak_ref();
5519 socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
)
5521 struct kevent kev
[sg
->fd_cnt
];
5523 unsigned int i
, buf_off
= 0;
5525 if (unlikely(sg
->junkfds
)) {
5529 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5530 EV_SET(&kev
[i
], sg
->fds
[i
], EVFILT_READ
, do_add
? EV_ADD
: EV_DELETE
, 0, 0, j
);
5531 buf_off
+= snprintf(buf
+ buf_off
, sizeof(buf
) - buf_off
, " %d", sg
->fds
[i
]);
5534 job_log(j
, LOG_DEBUG
, "%s Sockets:%s", do_add
? "Watching" : "Ignoring", buf
);
5536 (void)job_assumes(j
, kevent_bulk_mod(kev
, sg
->fd_cnt
) != -1);
5538 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5539 (void)job_assumes(j
, kev
[i
].flags
& EV_ERROR
);
5540 errno
= (typeof(errno
)) kev
[i
].data
;
5541 (void)job_assumes(j
, kev
[i
].data
== 0);
5546 socketgroup_ignore(job_t j
, struct socketgroup
*sg
)
5548 socketgroup_kevent_mod(j
, sg
, false);
5552 socketgroup_watch(job_t j
, struct socketgroup
*sg
)
5554 socketgroup_kevent_mod(j
, sg
, true);
5558 socketgroup_callback(job_t j
)
5560 job_dispatch(j
, true);
5564 envitem_new(job_t j
, const char *k
, const char *v
, bool global
, bool one_shot
)
5566 struct envitem
*ei
= calloc(1, sizeof(struct envitem
) + strlen(k
) + 1 + strlen(v
) + 1);
5568 if (!job_assumes(j
, ei
!= NULL
)) {
5572 strcpy(ei
->key_init
, k
);
5573 ei
->value
= ei
->key_init
+ strlen(k
) + 1;
5574 strcpy(ei
->value
, v
);
5575 ei
->one_shot
= one_shot
;
5578 if (SLIST_EMPTY(&j
->global_env
)) {
5579 LIST_INSERT_HEAD(&j
->mgr
->global_env_jobs
, j
, global_env_sle
);
5581 SLIST_INSERT_HEAD(&j
->global_env
, ei
, sle
);
5583 SLIST_INSERT_HEAD(&j
->env
, ei
, sle
);
5586 job_log(j
, LOG_DEBUG
, "Added environmental variable: %s=%s", k
, v
);
5592 envitem_delete(job_t j
, struct envitem
*ei
, bool global
)
5595 SLIST_REMOVE(&j
->global_env
, ei
, envitem
, sle
);
5596 if (SLIST_EMPTY(&j
->global_env
)) {
5597 LIST_REMOVE(j
, global_env_sle
);
5600 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
5607 envitem_setup(launch_data_t obj
, const char *key
, void *context
)
5611 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
5615 if (strncmp(LAUNCHD_TRUSTED_FD_ENV
, key
, sizeof(LAUNCHD_TRUSTED_FD_ENV
) - 1) != 0) {
5616 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
, false);
5618 job_log(j
, LOG_DEBUG
, "Ignoring reserved environmental variable: %s", key
);
5623 envitem_setup_one_shot(launch_data_t obj
, const char *key
, void *context
)
5627 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
5631 if (strncmp(LAUNCHD_TRUSTED_FD_ENV
, key
, sizeof(LAUNCHD_TRUSTED_FD_ENV
) - 1) != 0) {
5632 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
, true);
5634 job_log(j
, LOG_DEBUG
, "Ignoring reserved environmental variable: %s", key
);
5639 limititem_update(job_t j
, int w
, rlim_t r
)
5641 struct limititem
*li
;
5643 SLIST_FOREACH(li
, &j
->limits
, sle
) {
5644 if (li
->which
== w
) {
5650 li
= calloc(1, sizeof(struct limititem
));
5652 if (!job_assumes(j
, li
!= NULL
)) {
5656 SLIST_INSERT_HEAD(&j
->limits
, li
, sle
);
5661 if (j
->importing_hard_limits
) {
5662 li
->lim
.rlim_max
= r
;
5665 li
->lim
.rlim_cur
= r
;
5673 limititem_delete(job_t j
, struct limititem
*li
)
5675 SLIST_REMOVE(&j
->limits
, li
, limititem
, sle
);
5682 seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
)
5686 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
5687 job_log(j
, LOG_WARNING
, "Sandbox flag value must be boolean: %s", key
);
5691 if (launch_data_get_bool(obj
) == false) {
5695 if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOX_NAMED
) == 0) {
5696 j
->seatbelt_flags
|= SANDBOX_NAMED
;
5702 limititem_setup(launch_data_t obj
, const char *key
, void *context
)
5705 size_t i
, limits_cnt
= (sizeof(launchd_keys2limits
) / sizeof(launchd_keys2limits
[0]));
5708 if (launch_data_get_type(obj
) != LAUNCH_DATA_INTEGER
) {
5712 rl
= launch_data_get_integer(obj
);
5714 for (i
= 0; i
< limits_cnt
; i
++) {
5715 if (strcasecmp(launchd_keys2limits
[i
].key
, key
) == 0) {
5720 if (i
== limits_cnt
) {
5724 limititem_update(j
, launchd_keys2limits
[i
].val
, rl
);
5728 job_useless(job_t j
)
5730 if ((j
->legacy_LS_job
|| j
->only_once
) && j
->start_time
!= 0) {
5731 if (j
->legacy_LS_job
&& j
->j_port
) {
5734 job_log(j
, LOG_INFO
, "Exited. Was only configured to run once.");
5736 } else if (j
->removal_pending
) {
5737 job_log(j
, LOG_DEBUG
, "Exited while removal was pending.");
5739 } else if (j
->shutdown_monitor
) {
5741 } else if (j
->mgr
->shutting_down
) {
5742 job_log(j
, LOG_DEBUG
, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children
, total_anon_children
);
5743 if (total_children
== 0 && !j
->anonymous
) {
5744 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job was last to exit during shutdown of: %s.", j
->mgr
->name
);
5747 } else if (j
->legacy_mach_job
) {
5748 if (SLIST_EMPTY(&j
->machservices
)) {
5749 job_log(j
, LOG_INFO
, "Garbage collecting");
5751 } else if (!j
->checkedin
) {
5752 job_log(j
, LOG_WARNING
, "Failed to check-in!");
5756 /* If the job's executable does not have any valid architectures (for
5757 * example, if it's a PowerPC-only job), then we don't even bother
5758 * trying to relaunch it, as we have no reasonable expectation that
5759 * the situation will change.
5761 * <rdar://problem/9106979>
5763 if (!j
->did_exec
&& WEXITSTATUS(j
->last_exit_status
) == EBADARCH
) {
5764 job_log(j
, LOG_ERR
, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5773 job_keepalive(job_t j
)
5775 mach_msg_type_number_t statusCnt
;
5776 mach_port_status_t status
;
5777 struct semaphoreitem
*si
;
5778 struct machservice
*ms
;
5780 bool good_exit
= (WIFEXITED(j
->last_exit_status
) && WEXITSTATUS(j
->last_exit_status
) == 0);
5781 bool is_not_kextd
= (do_apple_internal_logging
|| (strcmp(j
->label
, "com.apple.kextd") != 0));
5783 if (unlikely(j
->mgr
->shutting_down
)) {
5790 * We definitely need to revisit this after Leopard ships. Please see
5791 * launchctl.c for the other half of this hack.
5793 if (unlikely((j
->mgr
->global_on_demand_cnt
> 0) && is_not_kextd
)) {
5797 if (unlikely(j
->needs_kickoff
)) {
5798 job_log(j
, LOG_DEBUG
, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5802 if (j
->start_pending
) {
5803 job_log(j
, LOG_DEBUG
, "KeepAlive check: Pent-up non-IPC launch criteria.");
5808 job_log(j
, LOG_DEBUG
, "KeepAlive check: job configured to run continuously.");
5812 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
5813 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
5814 if (mach_port_get_attributes(mach_task_self(), ms
->port
, MACH_PORT_RECEIVE_STATUS
,
5815 (mach_port_info_t
)&status
, &statusCnt
) != KERN_SUCCESS
) {
5818 if (status
.mps_msgcount
) {
5819 job_log(j
, LOG_DEBUG
, "KeepAlive check: %d queued Mach messages on service: %s",
5820 status
.mps_msgcount
, ms
->name
);
5825 /* TODO: Coalesce external events and semaphore items, since they're basically
5828 struct externalevent
*ei
= NULL
;
5829 LIST_FOREACH(ei
, &j
->events
, job_le
) {
5830 if (ei
->state
== ei
->wanted_state
) {
5835 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
5836 bool wanted_state
= false;
5842 wanted_state
= true;
5844 if (network_up
== wanted_state
) {
5845 job_log(j
, LOG_DEBUG
, "KeepAlive: The network is %s.", wanted_state
? "up" : "down");
5849 case SUCCESSFUL_EXIT
:
5850 wanted_state
= true;
5852 if (good_exit
== wanted_state
) {
5853 job_log(j
, LOG_DEBUG
, "KeepAlive: The exit state was %s.", wanted_state
? "successful" : "failure");
5858 wanted_state
= true;
5860 if (j
->crashed
== wanted_state
) {
5864 case OTHER_JOB_ENABLED
:
5865 wanted_state
= true;
5866 case OTHER_JOB_DISABLED
:
5867 if ((bool)job_find(NULL
, si
->what
) == wanted_state
) {
5868 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "enabled" : "disabled", si
->what
);
5872 case OTHER_JOB_ACTIVE
:
5873 wanted_state
= true;
5874 case OTHER_JOB_INACTIVE
:
5875 if ((other_j
= job_find(NULL
, si
->what
))) {
5876 if ((bool)other_j
->p
== wanted_state
) {
5877 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "active" : "inactive", si
->what
);
5883 wanted_state
= true;
5885 if ((bool)(stat(si
->what
, &sb
) == 0) == wanted_state
) {
5886 job_log(j
, LOG_DEBUG
, "KeepAlive: The following path %s: %s", wanted_state
? "exists" : "is missing", si
->what
);
5889 if (wanted_state
) { /* File is not there but we wish it was. */
5890 if (si
->fd
!= -1 && !si
->watching_parent
) { /* Need to be watching the parent now. */
5891 (void)job_assumes(j
, runtime_close(si
->fd
) == 0);
5893 semaphoreitem_watch(j
, si
);
5895 } else { /* File is there but we wish it wasn't. */
5896 if (si
->fd
!= -1 && si
->watching_parent
) { /* Need to watch the file now. */
5897 (void)job_assumes(j
, runtime_close(si
->fd
) == 0);
5899 semaphoreitem_watch(j
, si
);
5907 if (-1 == (qdir_file_cnt
= dir_has_files(j
, si
->what
))) {
5908 job_log_error(j
, LOG_ERR
, "Failed to count the number of files in \"%s\"", si
->what
);
5909 } else if (qdir_file_cnt
> 0) {
5910 job_log(j
, LOG_DEBUG
, "KeepAlive: Directory is not empty: %s", si
->what
);
5923 struct machservice
*ms
;
5924 if (j
->p
&& j
->shutdown_monitor
) {
5925 return "Monitoring shutdown";
5928 return "PID is still valid";
5931 if (j
->mgr
->shutting_down
&& j
->log_redirect_fd
) {
5932 (void)job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
5933 j
->log_redirect_fd
= 0;
5936 if (j
->log_redirect_fd
) {
5937 if (job_assumes(j
, j
->legacy_LS_job
)) {
5938 return "Standard out/error is still valid";
5940 (void)job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
5941 j
->log_redirect_fd
= 0;
5945 if (j
->priv_port_has_senders
) {
5946 return "Privileged Port still has outstanding senders";
5949 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
5950 if (ms
->recv
&& machservice_active(ms
)) {
5951 return "Mach service is still active";
5959 machservice_watch(job_t j
, struct machservice
*ms
)
5962 (void)job_assumes(j
, runtime_add_mport(ms
->port
, NULL
, 0) == KERN_SUCCESS
);
5967 machservice_ignore(job_t j
, struct machservice
*ms
)
5969 (void)job_assumes(j
, runtime_remove_mport(ms
->port
) == KERN_SUCCESS
);
5973 machservice_resetport(job_t j
, struct machservice
*ms
)
5975 LIST_REMOVE(ms
, port_hash_sle
);
5976 (void)job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
5977 (void)job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
5979 (void)job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
);
5980 (void)job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
);
5981 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
5984 struct machservice
*
5985 machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
)
5987 struct machservice
*ms
= calloc(1, sizeof(struct machservice
) + strlen(name
) + 1);
5989 if (!job_assumes(j
, ms
!= NULL
)) {
5993 strcpy((char *)ms
->name
, name
);
5996 ms
->per_pid
= pid_local
;
5998 if (likely(*serviceport
== MACH_PORT_NULL
)) {
5999 if (!job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
)) {
6003 if (!job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
)) {
6006 *serviceport
= ms
->port
;
6009 ms
->port
= *serviceport
;
6010 ms
->isActive
= true;
6013 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
6015 jobmgr_t where2put
= j
->mgr
;
6016 /* XPC domains are separate from Mach bootstraps. */
6017 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
6018 if (g_flat_mach_namespace
&& !(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
)) {
6019 where2put
= root_jobmgr
;
6023 /* Don't allow MachServices added by multiple-instance jobs to be looked up by others.
6024 * We could just do this with a simple bit, but then we'd have to uniquify the
6025 * names ourselves to avoid collisions. This is just easier.
6027 if (!j
->dedicated_instance
) {
6028 LIST_INSERT_HEAD(&where2put
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
6030 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
6032 job_log(j
, LOG_DEBUG
, "Mach service added%s: %s", (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) ? " to private namespace" : "", name
);
6036 (void)job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
6042 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6043 struct machservice
*
6044 machservice_new_alias(job_t j
, struct machservice
*orig
)
6046 struct machservice
*ms
= calloc(1, sizeof(struct machservice
) + strlen(orig
->name
) + 1);
6047 if (job_assumes(j
, ms
!= NULL
)) {
6048 strcpy((char *)ms
->name
, orig
->name
);
6052 LIST_INSERT_HEAD(&j
->mgr
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
6053 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
6054 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Service aliased into job manager: %s", orig
->name
);
6059 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
6062 machservice_status(struct machservice
*ms
)
6064 ms
= ms
->alias
? ms
->alias
: ms
;
6066 return BOOTSTRAP_STATUS_ACTIVE
;
6067 } else if (ms
->job
->ondemand
) {
6068 return BOOTSTRAP_STATUS_ON_DEMAND
;
6070 return BOOTSTRAP_STATUS_INACTIVE
;
6075 job_setup_exception_port(job_t j
, task_t target_task
)
6077 struct machservice
*ms
;
6078 thread_state_flavor_t f
= 0;
6079 mach_port_t exc_port
= the_exception_server
;
6081 if (unlikely(j
->alt_exc_handler
)) {
6082 ms
= jobmgr_lookup_service(j
->mgr
, j
->alt_exc_handler
, true, 0);
6084 exc_port
= machservice_port(ms
);
6086 job_log(j
, LOG_WARNING
, "Falling back to default Mach exception handler. Could not find: %s", j
->alt_exc_handler
);
6088 } else if (unlikely(j
->internal_exc_handler
)) {
6089 exc_port
= runtime_get_kernel_port();
6090 } else if (unlikely(!exc_port
)) {
6094 #if defined (__ppc__) || defined(__ppc64__)
6095 f
= PPC_THREAD_STATE64
;
6096 #elif defined(__i386__) || defined(__x86_64__)
6097 f
= x86_THREAD_STATE
;
6098 #elif defined(__arm__)
6099 f
= ARM_THREAD_STATE
;
6101 #error "unknown architecture"
6104 if (likely(target_task
)) {
6105 (void)job_assumes(j
, task_set_exception_ports(target_task
, EXC_MASK_CRASH
, exc_port
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
6106 } else if (pid1_magic
&& the_exception_server
) {
6107 mach_port_t mhp
= mach_host_self();
6108 (void)job_assumes(j
, host_set_exception_ports(mhp
, EXC_MASK_CRASH
, the_exception_server
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
6109 job_assumes(j
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
6114 job_set_exception_port(job_t j
, mach_port_t port
)
6116 if (unlikely(!the_exception_server
)) {
6117 the_exception_server
= port
;
6118 job_setup_exception_port(j
, 0);
6120 job_log(j
, LOG_WARNING
, "The exception server is already claimed!");
6125 machservice_setup_options(launch_data_t obj
, const char *key
, void *context
)
6127 struct machservice
*ms
= context
;
6128 mach_port_t mhp
= mach_host_self();
6132 if (!job_assumes(ms
->job
, mhp
!= MACH_PORT_NULL
)) {
6136 switch (launch_data_get_type(obj
)) {
6137 case LAUNCH_DATA_INTEGER
:
6138 which_port
= (int)launch_data_get_integer(obj
); /* XXX we should bound check this... */
6139 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT
) == 0) {
6140 switch (which_port
) {
6141 case TASK_KERNEL_PORT
:
6142 case TASK_HOST_PORT
:
6143 case TASK_NAME_PORT
:
6144 case TASK_BOOTSTRAP_PORT
:
6145 /* I find it a little odd that zero isn't reserved in the header.
6146 * Normally Mach is fairly good about this convention... */
6148 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved task special port: %d", which_port
);
6151 ms
->special_port_num
= which_port
;
6152 SLIST_INSERT_HEAD(&special_ports
, ms
, special_port_sle
);
6155 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT
) == 0 && pid1_magic
) {
6156 if (which_port
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
6157 (void)job_assumes(ms
->job
, (errno
= host_set_special_port(mhp
, which_port
, ms
->port
)) == KERN_SUCCESS
);
6159 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved host special port: %d", which_port
);
6162 case LAUNCH_DATA_BOOL
:
6163 b
= launch_data_get_bool(obj
);
6164 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE
) == 0) {
6165 ms
->debug_on_close
= b
;
6166 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_RESETATCLOSE
) == 0) {
6168 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN
) == 0) {
6170 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER
) == 0) {
6171 job_set_exception_port(ms
->job
, ms
->port
);
6172 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_KUNCSERVER
) == 0) {
6174 (void)job_assumes(ms
->job
, host_set_UNDServer(mhp
, ms
->port
) == KERN_SUCCESS
);
6175 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_PINGEVENTUPDATES
) == 0) {
6176 ms
->event_update_port
= b
;
6179 case LAUNCH_DATA_STRING
:
6180 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH
) == 0) {
6181 const char *option
= launch_data_get_string(obj
);
6182 if (strcasecmp(option
, "One") == 0) {
6183 ms
->drain_one_on_crash
= true;
6184 } else if (strcasecmp(option
, "All") == 0) {
6185 ms
->drain_all_on_crash
= true;
6189 case LAUNCH_DATA_DICTIONARY
:
6190 job_set_exception_port(ms
->job
, ms
->port
);
6196 job_assumes(ms
->job
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
6200 machservice_setup(launch_data_t obj
, const char *key
, void *context
)
6203 struct machservice
*ms
;
6204 mach_port_t p
= MACH_PORT_NULL
;
6206 if (unlikely(ms
= jobmgr_lookup_service(j
->mgr
, key
, false, 0))) {
6207 job_log(j
, LOG_WARNING
, "Conflict with job: %s over Mach service: %s", ms
->job
->label
, key
);
6211 if (!job_assumes(j
, (ms
= machservice_new(j
, key
, &p
, false)) != NULL
)) {
6215 ms
->isActive
= false;
6218 if (launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
) {
6219 launch_data_dict_iterate(obj
, machservice_setup_options
, ms
);
6224 jobmgr_do_garbage_collection(jobmgr_t jm
)
6226 jobmgr_t jmi
= NULL
, jmn
= NULL
;
6227 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
6228 jobmgr_do_garbage_collection(jmi
);
6231 if (!jm
->shutting_down
) {
6235 if (SLIST_EMPTY(&jm
->submgrs
)) {
6236 jobmgr_log(jm
, LOG_DEBUG
, "No submanagers left.");
6238 jobmgr_log(jm
, LOG_DEBUG
, "Still have submanagers.");
6239 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
6240 jobmgr_log(jm
, LOG_DEBUG
, "Submanager: %s", jmi
->name
);
6245 job_t ji
= NULL
, jn
= NULL
;
6246 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
6247 if (ji
->anonymous
) {
6251 /* Let the shutdown monitor be up until the very end. */
6252 if (ji
->shutdown_monitor
) {
6256 /* On our first pass through, open a transaction for all the jobs that
6257 * need to be dirty at shutdown. We'll close these transactions once the
6258 * jobs that do not need to be dirty at shutdown have all exited.
6260 if (ji
->dirty_at_shutdown
&& !jm
->shutdown_jobs_dirtied
) {
6261 job_open_shutdown_transaction(ji
);
6264 const char *active
= job_active(ji
);
6268 job_log(ji
, LOG_DEBUG
, "Job is active: %s", active
);
6271 if (ji
->p
&& !ji
->dirty_at_shutdown
) {
6272 /* We really only care if the job has not yet been reaped.
6273 * There's no reason to delay shutdown if a Mach port has not
6274 * yet been sent back to us, for example. While we're shutting
6275 * all the "normal" jobs down, do not count the
6276 * dirty-at-shutdown jobs toward the total of actives.
6278 * Note that there's a potential race here where we may not get
6279 * a port back in time, so that when we hit jobmgr_remove(), we
6280 * end up removing the job and then our attempt to close the
6281 * Mach port will fail. But at that point, the failure won't
6282 * even make it to the syslog, so not a big deal.
6287 if (ji
->clean_kill
) {
6288 job_log(ji
, LOG_DEBUG
, "Job was killed cleanly.");
6290 job_log(ji
, LOG_DEBUG
, "Job was sent SIGTERM%s.", ji
->sent_sigkill
? " and SIGKILL" : "");
6295 jm
->shutdown_jobs_dirtied
= true;
6297 if (!jm
->shutdown_jobs_cleaned
) {
6298 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
6299 if (!ji
->anonymous
) {
6300 job_close_shutdown_transaction(ji
);
6305 jm
->shutdown_jobs_cleaned
= true;
6306 } else if (jm
->monitor_shutdown
&& _s_shutdown_monitor
) {
6307 /* The rest of shutdown has completed, so we can kill the shutdown
6308 * monitor now like it was any other job.
6310 _s_shutdown_monitor
->shutdown_monitor
= false;
6313 job_log(_s_shutdown_monitor
, LOG_NOTICE
| LOG_CONSOLE
, "Stopping shutdown monitor.");
6314 job_stop(_s_shutdown_monitor
);
6315 _s_shutdown_monitor
= NULL
;
6320 if (SLIST_EMPTY(&jm
->submgrs
) && actives
== 0) {
6321 jobmgr_log(jm
, LOG_DEBUG
, "Removing.");
6330 jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
)
6332 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
6333 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
6334 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
6335 * for them to exit before moving on.
6337 * See rdar://problem/6562592
6340 for (i
= 0; i
< np
; i
++) {
6342 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Sending SIGTERM to PID %u and continuing...", p
[i
]);
6343 (void)jobmgr_assumes(jm
, runtime_kill(p
[i
], SIGTERM
) != -1);
6349 jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
)
6351 size_t kp_skipped
= 0, len
= sizeof(pid_t
) * get_kern_max_proc();
6353 int i
= 0, kp_cnt
= 0;
6355 if (likely(jm
->parentmgr
|| !pid1_magic
)) {
6359 if (!jobmgr_assumes(jm
, (pids
= malloc(len
)) != NULL
)) {
6363 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS
);
6365 if (!jobmgr_assumes(jm
, (kp_cnt
= proc_listallpids(pids
, len
)) != -1)) {
6369 pid_t
*ps
= (pid_t
*)calloc(sizeof(pid_t
), kp_cnt
);
6370 for (i
= 0; i
< kp_cnt
; i
++) {
6371 struct proc_bsdshortinfo proc
;
6372 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
6373 if (errno
!= ESRCH
) {
6374 jobmgr_assumes(jm
, errno
== 0);
6381 pid_t p_i
= pids
[i
];
6382 pid_t pp_i
= proc
.pbsi_ppid
;
6383 pid_t pg_i
= proc
.pbsi_pgid
;
6384 const char *z
= (proc
.pbsi_status
== SZOMB
) ? "zombie " : "";
6385 const char *n
= proc
.pbsi_comm
;
6387 if (unlikely(p_i
== 0 || p_i
== 1)) {
6392 if (_s_shutdown_monitor
&& pp_i
== _s_shutdown_monitor
->p
) {
6397 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
6398 job_t j
= jobmgr_find_by_pid(jm
, p_i
, false);
6399 if (!j
|| (j
&& j
->anonymous
)) {
6400 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z
, j
? "anonymous job" : "process", p_i
, pp_i
, pg_i
, n
);
6403 if (pp_i
== getpid() && !jobmgr_assumes(jm
, proc
.pbsi_status
!= SZOMB
)) {
6404 if (jobmgr_assumes(jm
, waitpid(p_i
, &status
, WNOHANG
) == 0)) {
6405 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status
));
6409 job_t leader
= jobmgr_find_by_pid(jm
, pg_i
, false);
6410 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6411 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6412 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6413 * their userspace emissaries go away, before the call to reboot(2).
6415 if (leader
&& leader
->ignore_pg_at_shutdown
) {
6426 if ((kp_cnt
- kp_skipped
> 0) && kill_strays
) {
6427 jobmgr_kill_stray_children(jm
, ps
, kp_cnt
- kp_skipped
);
6436 jobmgr_parent(jobmgr_t jm
)
6438 return jm
->parentmgr
;
6442 job_uncork_fork(job_t j
)
6446 job_log(j
, LOG_DEBUG
, "Uncorking the fork().");
6447 /* this unblocks the child and avoids a race
6448 * between the above fork() and the kevent_mod() */
6449 (void)job_assumes(j
, write(j
->fork_fd
, &c
, sizeof(c
)) == sizeof(c
));
6450 (void)job_assumes(j
, runtime_close(j
->fork_fd
) != -1);
6455 jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool skip_init
, mach_port_t asport
)
6457 mach_msg_size_t mxmsgsz
;
6458 job_t bootstrapper
= NULL
;
6461 launchd_assert(offsetof(struct jobmgr_s
, kqjobmgr_callback
) == 0);
6463 if (unlikely(jm
&& requestorport
== MACH_PORT_NULL
)) {
6464 jobmgr_log(jm
, LOG_ERR
, "Mach sub-bootstrap create request requires a requester port");
6468 jmr
= calloc(1, sizeof(struct jobmgr_s
) + (name
? (strlen(name
) + 1) : NAME_MAX
+ 1));
6470 if (!jobmgr_assumes(jm
, jmr
!= NULL
)) {
6478 jmr
->kqjobmgr_callback
= jobmgr_callback
;
6479 strcpy(jmr
->name_init
, name
? name
: "Under construction");
6481 jmr
->req_port
= requestorport
;
6483 if ((jmr
->parentmgr
= jm
)) {
6484 SLIST_INSERT_HEAD(&jm
->submgrs
, jmr
, sle
);
6487 if (jm
&& !jobmgr_assumes(jmr
, launchd_mport_notify_req(jmr
->req_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
)) {
6491 if (transfer_port
!= MACH_PORT_NULL
) {
6492 (void)jobmgr_assumes(jmr
, jm
!= NULL
);
6493 jmr
->jm_port
= transfer_port
;
6494 } else if (!jm
&& !pid1_magic
) {
6495 char *trusted_fd
= getenv(LAUNCHD_TRUSTED_FD_ENV
);
6498 snprintf(service_buf
, sizeof(service_buf
), "com.apple.launchd.peruser.%u", getuid());
6500 if (!jobmgr_assumes(jmr
, bootstrap_check_in(bootstrap_port
, service_buf
, &jmr
->jm_port
) == 0)) {
6505 int dfd
, lfd
= (int) strtol(trusted_fd
, NULL
, 10);
6507 if ((dfd
= dup(lfd
)) >= 0) {
6508 (void)jobmgr_assumes(jmr
, runtime_close(dfd
) != -1);
6509 (void)jobmgr_assumes(jmr
, runtime_close(lfd
) != -1);
6512 unsetenv(LAUNCHD_TRUSTED_FD_ENV
);
6515 /* cut off the Libc cache, we don't want to deadlock against ourself */
6516 inherited_bootstrap_port
= bootstrap_port
;
6517 bootstrap_port
= MACH_PORT_NULL
;
6518 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
);
6520 /* We set this explicitly as we start each child */
6521 launchd_assert(launchd_set_bport(MACH_PORT_NULL
) == KERN_SUCCESS
);
6522 } else if (!jobmgr_assumes(jmr
, launchd_mport_create_recv(&jmr
->jm_port
) == KERN_SUCCESS
)) {
6527 sprintf(jmr
->name_init
, "%u", MACH_PORT_INDEX(jmr
->jm_port
));
6530 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
6531 mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
6532 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
6533 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
6536 /* Total hacks. But the MIG server loop is too generic, and the more dynamic
6537 * parts of it haven't been tested, or if they have, it was a very long time
6540 if (xpc_events_xpc_events_subsystem
.maxsize
> mxmsgsz
) {
6541 mxmsgsz
= xpc_events_xpc_events_subsystem
.maxsize
;
6543 if (xpc_domain_xpc_domain_subsystem
.maxsize
> mxmsgsz
) {
6544 mxmsgsz
= xpc_domain_xpc_domain_subsystem
.maxsize
;
6548 (void)jobmgr_assumes(jmr
, kevent_mod(SIGTERM
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
6549 (void)jobmgr_assumes(jmr
, kevent_mod(SIGUSR1
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
6550 (void)jobmgr_assumes(jmr
, kevent_mod(SIGUSR2
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
6551 (void)jobmgr_assumes(jmr
, kevent_mod(0, EVFILT_FS
, EV_ADD
, VQ_MOUNT
|VQ_UNMOUNT
|VQ_UPDATE
, 0, jmr
) != -1);
6554 if (name
&& !skip_init
) {
6555 bootstrapper
= jobmgr_init_session(jmr
, name
, sflag
);
6558 if (!bootstrapper
|| !bootstrapper
->weird_bootstrap
) {
6559 if (!jobmgr_assumes(jmr
, runtime_add_mport(jmr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
6564 jobmgr_log(jmr
, LOG_DEBUG
, "Created job manager%s%s", jm
? " with parent: " : ".", jm
? jm
->name
: "");
6567 bootstrapper
->asport
= asport
;
6569 jobmgr_log(jmr
, LOG_DEBUG
, "Bootstrapping new job manager with audit session %u", asport
);
6570 (void)jobmgr_assumes(jmr
, job_dispatch(bootstrapper
, true) != NULL
);
6572 jmr
->req_asport
= asport
;
6575 if (asport
!= MACH_PORT_NULL
) {
6576 (void)jobmgr_assumes(jmr
, launchd_mport_copy_send(asport
) == KERN_SUCCESS
);
6579 if (jmr
->parentmgr
) {
6580 runtime_add_weak_ref();
6595 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6597 jobmgr_new_xpc_singleton_domain(jobmgr_t jm
, name_t name
)
6599 jobmgr_t
new = NULL
;
6601 /* These job managers are basically singletons, so we use the root Mach
6602 * bootstrap port as their requestor ports so they'll never go away.
6604 mach_port_t req_port
= root_jobmgr
->jm_port
;
6605 if (jobmgr_assumes(jm
, launchd_mport_make_send(req_port
) == KERN_SUCCESS
)) {
6606 new = jobmgr_new(root_jobmgr
, req_port
, MACH_PORT_NULL
, false, name
, true, MACH_PORT_NULL
);
6608 new->properties
|= BOOTSTRAP_PROPERTY_XPC_SINGLETON
;
6609 new->properties
|= BOOTSTRAP_PROPERTY_XPC_DOMAIN
;
6610 new->xpc_singleton
= true;
6618 jobmgr_find_xpc_per_user_domain(jobmgr_t jm
, uid_t uid
)
6620 jobmgr_t jmi
= NULL
;
6621 LIST_FOREACH(jmi
, &_s_xpc_user_domains
, xpc_le
) {
6622 if (jmi
->req_euid
== uid
) {
6628 (void)snprintf(name
, sizeof(name
), "com.apple.xpc.domain.peruser.%u", uid
);
6629 jmi
= jobmgr_new_xpc_singleton_domain(jm
, name
);
6630 if (jobmgr_assumes(jm
, jmi
!= NULL
)) {
6631 /* We need to create a per-user launchd for this UID if there isn't one
6632 * already so we can grab the bootstrap port.
6634 job_t puj
= jobmgr_lookup_per_user_context_internal(NULL
, uid
, &jmi
->req_bsport
);
6635 if (jobmgr_assumes(jmi
, puj
!= NULL
)) {
6636 (void)jobmgr_assumes(jmi
, launchd_mport_copy_send(puj
->asport
) == KERN_SUCCESS
);
6637 (void)jobmgr_assumes(jmi
, launchd_mport_copy_send(jmi
->req_bsport
) == KERN_SUCCESS
);
6638 jmi
->shortdesc
= "per-user";
6639 jmi
->req_asport
= puj
->asport
;
6640 jmi
->req_asid
= puj
->asid
;
6641 jmi
->req_euid
= uid
;
6644 LIST_INSERT_HEAD(&_s_xpc_user_domains
, jmi
, xpc_le
);
6654 jobmgr_find_xpc_per_session_domain(jobmgr_t jm
, au_asid_t asid
)
6656 jobmgr_t jmi
= NULL
;
6657 LIST_FOREACH(jmi
, &_s_xpc_session_domains
, xpc_le
) {
6658 if (jmi
->req_asid
== asid
) {
6664 (void)snprintf(name
, sizeof(name
), "com.apple.xpc.domain.persession.%i", asid
);
6665 jmi
= jobmgr_new_xpc_singleton_domain(jm
, name
);
6666 if (jobmgr_assumes(jm
, jmi
!= NULL
)) {
6667 (void)jobmgr_assumes(jmi
, launchd_mport_make_send(root_jobmgr
->jm_port
) == KERN_SUCCESS
);
6668 jmi
->shortdesc
= "per-session";
6669 jmi
->req_bsport
= root_jobmgr
->jm_port
;
6670 (void)jobmgr_assumes(jmi
, audit_session_port(asid
, &jmi
->req_asport
) == 0);
6671 jmi
->req_asid
= asid
;
6675 LIST_INSERT_HEAD(&_s_xpc_session_domains
, jmi
, xpc_le
);
6682 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
6685 jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
)
6687 const char *bootstrap_tool
[] = { "/bin/launchctl", "bootstrap", "-S", session_type
, sflag
? "-s" : NULL
, NULL
};
6688 char thelabel
[1000];
6691 snprintf(thelabel
, sizeof(thelabel
), "com.apple.launchctl.%s", session_type
);
6692 bootstrapper
= job_new(jm
, thelabel
, NULL
, bootstrap_tool
);
6694 if (jobmgr_assumes(jm
, bootstrapper
!= NULL
) && (jm
->parentmgr
|| !pid1_magic
)) {
6695 bootstrapper
->is_bootstrapper
= true;
6698 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
6699 snprintf(buf
, sizeof(buf
), "0x%X:0:0", getuid());
6700 envitem_new(bootstrapper
, "__CF_USER_TEXT_ENCODING", buf
, false, false);
6701 bootstrapper
->weird_bootstrap
= true;
6702 (void)jobmgr_assumes(jm
, job_setup_machport(bootstrapper
));
6703 } else if (bootstrapper
&& strncmp(session_type
, VPROCMGR_SESSION_SYSTEM
, sizeof(VPROCMGR_SESSION_SYSTEM
)) == 0) {
6704 bootstrapper
->is_bootstrapper
= true;
6705 if (jobmgr_assumes(jm
, pid1_magic
)) {
6706 /* Have our system bootstrapper print out to the console. */
6707 bootstrapper
->stdoutpath
= strdup(_PATH_CONSOLE
);
6708 bootstrapper
->stderrpath
= strdup(_PATH_CONSOLE
);
6711 (void)jobmgr_assumes(jm
, kevent_mod((uintptr_t)fileno(g_console
), EVFILT_VNODE
, EV_ADD
| EV_ONESHOT
, NOTE_REVOKE
, 0, jm
) != -1);
6716 jm
->session_initialized
= true;
6717 return bootstrapper
;
6721 jobmgr_delete_anything_with_port(jobmgr_t jm
, mach_port_t port
)
6723 struct machservice
*ms
, *next_ms
;
6726 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6727 * words, when some program hands us a second or subsequent send right
6728 * to a port we already have open, the Mach kernel gives us the same
6729 * port number back and increments an reference count associated with
6730 * the port. This forces us, when discovering that a receive right at
6731 * the other end has been deleted, to wander all of our objects to see
6732 * what weird places clients might have handed us the same send right
6736 if (jm
== root_jobmgr
) {
6737 if (port
== inherited_bootstrap_port
) {
6738 (void)jobmgr_assumes(jm
, launchd_mport_deallocate(port
) == KERN_SUCCESS
);
6739 inherited_bootstrap_port
= MACH_PORT_NULL
;
6741 return jobmgr_shutdown(jm
);
6744 LIST_FOREACH_SAFE(ms
, &port_hash
[HASH_PORT(port
)], port_hash_sle
, next_ms
) {
6745 if (ms
->port
== port
&& !ms
->recv
) {
6746 machservice_delete(ms
->job
, ms
, true);
6751 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
6752 jobmgr_delete_anything_with_port(jmi
, port
);
6755 if (jm
->req_port
== port
) {
6756 jobmgr_log(jm
, LOG_DEBUG
, "Request port died: %i", MACH_PORT_INDEX(port
));
6757 return jobmgr_shutdown(jm
);
6763 struct machservice
*
6764 jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
)
6766 struct machservice
*ms
;
6769 jobmgr_log(jm
, LOG_DEBUG
, "Looking up %sservice %s", target_pid
? "per-PID " : "", name
);
6772 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6773 * bootstrap in other bootstraps.
6776 /* Start in the given bootstrap. */
6777 if (unlikely((target_j
= jobmgr_find_by_pid(jm
, target_pid
, false)) == NULL
)) {
6778 /* If we fail, do a deep traversal. */
6779 if (unlikely((target_j
= jobmgr_find_by_pid_deep(root_jobmgr
, target_pid
, true)) == NULL
)) {
6780 jobmgr_log(jm
, LOG_DEBUG
, "Didn't find PID %i", target_pid
);
6785 SLIST_FOREACH(ms
, &target_j
->machservices
, sle
) {
6786 if (ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
6791 job_log(target_j
, LOG_DEBUG
, "Didn't find per-PID Mach service: %s", name
);
6795 jobmgr_t where2look
= jm
;
6796 /* XPC domains are separate from Mach bootstraps. */
6797 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
6798 if (g_flat_mach_namespace
&& !(jm
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
)) {
6799 where2look
= root_jobmgr
;
6803 LIST_FOREACH(ms
, &where2look
->ms_hash
[hash_ms(name
)], name_hash_sle
) {
6804 if (!ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
6809 if (jm
->parentmgr
== NULL
|| !check_parent
) {
6813 return jobmgr_lookup_service(jm
->parentmgr
, name
, true, 0);
6817 machservice_port(struct machservice
*ms
)
6823 machservice_job(struct machservice
*ms
)
6829 machservice_hidden(struct machservice
*ms
)
6835 machservice_active(struct machservice
*ms
)
6837 return ms
->isActive
;
6841 machservice_name(struct machservice
*ms
)
6847 machservice_drain_port(struct machservice
*ms
)
6849 bool drain_one
= ms
->drain_one_on_crash
;
6850 bool drain_all
= ms
->drain_all_on_crash
;
6852 if (!job_assumes(ms
->job
, (drain_one
|| drain_all
) == true)) {
6856 job_log(ms
->job
, LOG_INFO
, "Draining %s...", ms
->name
);
6858 char req_buff
[sizeof(union __RequestUnion__catch_mach_exc_subsystem
) * 2];
6859 char rep_buff
[sizeof(union __ReplyUnion__catch_mach_exc_subsystem
)];
6860 mig_reply_error_t
*req_hdr
= (mig_reply_error_t
*)&req_buff
;
6861 mig_reply_error_t
*rep_hdr
= (mig_reply_error_t
*)&rep_buff
;
6863 mach_msg_return_t mr
= ~MACH_MSG_SUCCESS
;
6866 /* This should be a direct check on the Mach service to see if it's an exception-handling
6867 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6868 * Mach services. But for now, it should be okay.
6870 if (ms
->job
->alt_exc_handler
|| ms
->job
->internal_exc_handler
) {
6871 mr
= launchd_exc_runtime_once(ms
->port
, sizeof(req_buff
), sizeof(rep_buff
), req_hdr
, rep_hdr
, 0);
6873 mach_msg_options_t options
= MACH_RCV_MSG
|
6876 mr
= mach_msg((mach_msg_header_t
*)req_hdr
, options
, 0, sizeof(req_buff
), ms
->port
, 0, MACH_PORT_NULL
);
6878 case MACH_MSG_SUCCESS
:
6879 mach_msg_destroy((mach_msg_header_t
*)req_hdr
);
6881 case MACH_RCV_TIMED_OUT
:
6883 case MACH_RCV_TOO_LARGE
:
6884 runtime_syslog(LOG_WARNING
, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff
));
6890 } while (drain_all
&& mr
!= MACH_RCV_TIMED_OUT
);
6894 machservice_delete(job_t j
, struct machservice
*ms
, bool port_died
)
6897 /* HACK: Egregious code duplication. But dealing with aliases is a
6898 * pretty simple affair since they can't and shouldn't have any complex
6899 * behaviors associated with them.
6901 LIST_REMOVE(ms
, name_hash_sle
);
6902 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
6907 if (unlikely(ms
->debug_on_close
)) {
6908 job_log(j
, LOG_NOTICE
, "About to enter kernel debugger because of Mach port: 0x%x", ms
->port
);
6909 (void)job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
6912 if (ms
->recv
&& job_assumes(j
, !machservice_active(ms
))) {
6913 job_log(j
, LOG_DEBUG
, "Closing receive right for %s", ms
->name
);
6914 (void)job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
6917 (void)job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
6919 if (unlikely(ms
->port
== the_exception_server
)) {
6920 the_exception_server
= 0;
6923 job_log(j
, LOG_DEBUG
, "Mach service deleted%s: %s", port_died
? " (port died)" : "", ms
->name
);
6925 if (ms
->special_port_num
) {
6926 SLIST_REMOVE(&special_ports
, ms
, machservice
, special_port_sle
);
6928 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
6930 if (!(j
->dedicated_instance
|| ms
->event_channel
)) {
6931 LIST_REMOVE(ms
, name_hash_sle
);
6933 LIST_REMOVE(ms
, port_hash_sle
);
6939 machservice_request_notifications(struct machservice
*ms
)
6941 mach_msg_id_t which
= MACH_NOTIFY_DEAD_NAME
;
6943 ms
->isActive
= true;
6946 which
= MACH_NOTIFY_PORT_DESTROYED
;
6947 job_checkin(ms
->job
);
6950 (void)job_assumes(ms
->job
, launchd_mport_notify_req(ms
->port
, which
) == KERN_SUCCESS
);
6953 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6954 #define END_OF(x) (&(x)[NELEM(x)])
6957 mach_cmd2argv(const char *string
)
6959 char *argv
[100], args
[1000];
6961 char *argp
= args
, term
, **argv_ret
, *co
;
6962 unsigned int nargs
= 0, i
;
6964 for (cp
= string
; *cp
;) {
6965 while (isspace(*cp
))
6967 term
= (*cp
== '"') ? *cp
++ : '\0';
6968 if (nargs
< NELEM(argv
)) {
6969 argv
[nargs
++] = argp
;
6971 while (*cp
&& (term
? *cp
!= term
: !isspace(*cp
)) && argp
< END_OF(args
)) {
6988 argv_ret
= malloc((nargs
+ 1) * sizeof(char *) + strlen(string
) + 1);
6990 if (!launchd_assumes(argv_ret
!= NULL
)) {
6994 co
= (char *)argv_ret
+ (nargs
+ 1) * sizeof(char *);
6996 for (i
= 0; i
< nargs
; i
++) {
6997 strcpy(co
, argv
[i
]);
6999 co
+= strlen(argv
[i
]) + 1;
7007 job_checkin(job_t j
)
7009 j
->checkedin
= true;
7012 bool job_is_god(job_t j
)
7014 return j
->embedded_special_privileges
;
7018 job_ack_port_destruction(mach_port_t p
)
7020 struct machservice
*ms
;
7023 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
7024 if (ms
->recv
&& (ms
->port
== p
)) {
7029 if (!jobmgr_assumes(root_jobmgr
, ms
!= NULL
)) {
7035 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "Receive right returned to us: %s", ms
->name
);
7037 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
7038 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
7039 * receive rights have been returned.
7041 * So when we get receive rights back, check to see if the job has been reaped yet. If
7042 * not, then we add this service to a list of services to be drained on crash if it's
7043 * requested that behavior. So, for a job with N receive rights all requesting that they
7044 * be drained on crash, we can safely handle the following sequence of events.
7046 * ReceiveRight0Returned
7047 * ReceiveRight1Returned
7048 * ReceiveRight2Returned
7049 * NOTE_EXIT (reap, get exit status)
7050 * ReceiveRight3Returned
7054 * ReceiveRight(N - 1)Returned
7057 if (ms
->drain_one_on_crash
|| ms
->drain_all_on_crash
) {
7058 if (j
->crashed
&& j
->reaped
) {
7059 job_log(j
, LOG_DEBUG
, "Job has crashed. Draining port...");
7060 machservice_drain_port(ms
);
7061 } else if (!(j
->crashed
|| j
->reaped
)) {
7062 job_log(j
, LOG_DEBUG
, "Job's exit status is still unknown. Deferring drain.");
7066 /* If we get this notification after the job has been reaped, then we want to ping
7067 * the event port to keep things going.
7069 if (ms
->event_update_port
&& !j
->p
&& job_assumes(j
, j
->event_monitor
)) {
7070 if (_s_event_update_port
== MACH_PORT_NULL
) {
7071 (void)job_assumes(j
, launchd_mport_make_send_once(ms
->port
, &_s_event_update_port
) == KERN_SUCCESS
);
7076 ms
->isActive
= false;
7077 if (ms
->delete_on_destruction
) {
7078 machservice_delete(j
, ms
, false);
7079 } else if (ms
->reset
) {
7080 machservice_resetport(j
, ms
);
7083 job_dispatch(j
, false);
7085 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
7091 job_ack_no_senders(job_t j
)
7093 j
->priv_port_has_senders
= false;
7095 (void)job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
7098 job_log(j
, LOG_DEBUG
, "No more senders on privileged Mach bootstrap port");
7100 job_dispatch(j
, false);
7104 semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
)
7106 struct semaphoreitem
*si
;
7107 size_t alloc_sz
= sizeof(struct semaphoreitem
);
7110 alloc_sz
+= strlen(what
) + 1;
7113 if (!job_assumes(j
, si
= calloc(1, alloc_sz
))) {
7121 strcpy(si
->what_init
, what
);
7124 SLIST_INSERT_HEAD(&j
->semaphores
, si
, sle
);
7126 if ((why
== OTHER_JOB_ENABLED
|| why
== OTHER_JOB_DISABLED
) && !j
->nosy
) {
7127 job_log(j
, LOG_DEBUG
, "Job is interested in \"%s\".", what
);
7128 SLIST_INSERT_HEAD(&s_curious_jobs
, j
, curious_jobs_sle
);
7132 semaphoreitem_runtime_mod_ref(si
, true);
7138 semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
)
7141 * External events need to be tracked.
7142 * Internal events do NOT need to be tracked.
7146 case SUCCESSFUL_EXIT
:
7148 case OTHER_JOB_ENABLED
:
7149 case OTHER_JOB_DISABLED
:
7150 case OTHER_JOB_ACTIVE
:
7151 case OTHER_JOB_INACTIVE
:
7158 runtime_add_weak_ref();
7160 runtime_del_weak_ref();
7165 semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
)
7167 semaphoreitem_runtime_mod_ref(si
, false);
7169 SLIST_REMOVE(&j
->semaphores
, si
, semaphoreitem
, sle
);
7172 (void)job_assumes(j
, runtime_close(si
->fd
) != -1);
7175 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
7176 if ((si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
) && j
->nosy
) {
7178 SLIST_REMOVE(&s_curious_jobs
, j
, job_s
, curious_jobs_sle
);
7185 semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
)
7187 struct semaphoreitem_dict_iter_context
*sdic
= context
;
7188 semaphore_reason_t why
;
7190 why
= launch_data_get_bool(obj
) ? sdic
->why_true
: sdic
->why_false
;
7192 semaphoreitem_new(sdic
->j
, why
, key
);
7196 semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
)
7198 struct semaphoreitem_dict_iter_context sdic
= { context
, 0, 0 };
7200 semaphore_reason_t why
;
7202 switch (launch_data_get_type(obj
)) {
7203 case LAUNCH_DATA_BOOL
:
7204 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE
) == 0) {
7205 why
= launch_data_get_bool(obj
) ? NETWORK_UP
: NETWORK_DOWN
;
7206 semaphoreitem_new(j
, why
, NULL
);
7207 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT
) == 0) {
7208 why
= launch_data_get_bool(obj
) ? SUCCESSFUL_EXIT
: FAILED_EXIT
;
7209 semaphoreitem_new(j
, why
, NULL
);
7210 j
->start_pending
= true;
7211 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND
) == 0) {
7212 j
->needs_kickoff
= launch_data_get_bool(obj
);
7213 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_CRASHED
) == 0) {
7214 why
= launch_data_get_bool(obj
) ? CRASHED
: DID_NOT_CRASH
;
7215 semaphoreitem_new(j
, why
, NULL
);
7216 j
->start_pending
= true;
7218 (void)job_assumes(j
, false);
7221 case LAUNCH_DATA_DICTIONARY
:
7222 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE
) == 0) {
7223 sdic
.why_true
= PATH_EXISTS
;
7224 sdic
.why_false
= PATH_MISSING
;
7225 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE
) == 0) {
7226 sdic
.why_true
= OTHER_JOB_ACTIVE
;
7227 sdic
.why_false
= OTHER_JOB_INACTIVE
;
7228 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED
) == 0) {
7229 sdic
.why_true
= OTHER_JOB_ENABLED
;
7230 sdic
.why_false
= OTHER_JOB_DISABLED
;
7232 (void)job_assumes(j
, false);
7236 launch_data_dict_iterate(obj
, semaphoreitem_setup_dict_iter
, &sdic
);
7239 (void)job_assumes(j
, false);
7245 externalevent_new(job_t j
, struct eventsystem
*sys
, char *evname
, launch_data_t event
)
7247 struct externalevent
*ee
= (struct externalevent
*)calloc(1, sizeof(struct externalevent
) + strlen(evname
) + 1);
7248 if (job_assumes(j
, ee
!= NULL
)) {
7249 ee
->event
= launch_data_copy(event
);
7250 if (job_assumes(j
, ee
->event
!= NULL
)) {
7251 strcpy(ee
->name
, evname
);
7253 ee
->id
= sys
->curid
;
7256 ee
->wanted_state
= true;
7259 LIST_INSERT_HEAD(&j
->events
, ee
, job_le
);
7260 LIST_INSERT_HEAD(&sys
->events
, ee
, sys_le
);
7262 job_log(j
, LOG_DEBUG
, "New event: %s:%s", sys
->name
, evname
);
7274 externalevent_delete(struct externalevent
*ee
)
7276 launch_data_free(ee
->event
);
7277 LIST_REMOVE(ee
, job_le
);
7278 LIST_REMOVE(ee
, sys_le
);
7286 externalevent_setup(launch_data_t obj
, const char *key
, void *context
)
7288 struct externalevent_iter_ctx
*ctx
= (struct externalevent_iter_ctx
*)context
;
7289 (void)job_assumes(ctx
->j
, externalevent_new(ctx
->j
, ctx
->sys
, (char *)key
, obj
));
7292 struct externalevent
*
7293 externalevent_find(const char *sysname
, uint64_t id
)
7295 struct externalevent
*ei
= NULL
;
7297 struct eventsystem
*es
= eventsystem_find(sysname
);
7298 if (launchd_assumes(es
!= NULL
)) {
7299 LIST_FOREACH(ei
, &es
->events
, sys_le
) {
7309 struct eventsystem
*
7310 eventsystem_new(const char *name
)
7312 struct eventsystem
*es
= (struct eventsystem
*)calloc(1, sizeof(struct eventsystem
) + strlen(name
) + 1);
7313 if (launchd_assumes(es
!= NULL
)) {
7314 strcpy(es
->name
, name
);
7315 LIST_INSERT_HEAD(&_s_event_systems
, es
, global_le
);
7322 eventsystem_delete(struct eventsystem
*es
)
7324 struct externalevent
*ei
= NULL
;
7325 while ((ei
= LIST_FIRST(&es
->events
))) {
7326 externalevent_delete(ei
);
7329 LIST_REMOVE(es
, global_le
);
7335 eventsystem_setup(launch_data_t obj
, const char *key
, void *context
)
7337 job_t j
= (job_t
)context
;
7338 if (!job_assumes(j
, launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
)) {
7342 struct eventsystem
*sys
= eventsystem_find(key
);
7343 if (unlikely(sys
== NULL
)) {
7344 sys
= eventsystem_new(key
);
7345 job_log(j
, LOG_DEBUG
, "New event system: %s", key
);
7348 if (job_assumes(j
, sys
!= NULL
)) {
7349 struct externalevent_iter_ctx ctx
= {
7353 launch_data_dict_iterate(obj
, externalevent_setup
, &ctx
);
7354 sys
->has_updates
= true;
7358 struct eventsystem
*
7359 eventsystem_find(const char *name
)
7361 struct eventsystem
*esi
= NULL
;
7362 LIST_FOREACH(esi
, &_s_event_systems
, global_le
) {
7363 if (strcmp(name
, esi
->name
) == 0) {
7372 eventsystem_ping(void)
7374 /* We don't wrap this in an assumes() macro because we could potentially
7375 * call this function many times before the helper job gets back to us
7376 * and gives us another send-once right. So if it's MACH_PORT_NULL, that
7377 * means that we've sent a ping, but the helper hasn't yet checked in to
7378 * get the new set of notifications.
7380 if (_s_event_update_port
!= MACH_PORT_NULL
) {
7381 kern_return_t kr
= helper_downcall_ping(_s_event_update_port
);
7382 if (kr
!= KERN_SUCCESS
) {
7383 runtime_syslog(LOG_NOTICE
, "helper_downcall_ping(): kr = 0x%x", kr
);
7385 _s_event_update_port
= MACH_PORT_NULL
;
7390 jobmgr_dispatch_all_semaphores(jobmgr_t jm
)
7396 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
7397 jobmgr_dispatch_all_semaphores(jmi
);
7400 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
7401 if (!SLIST_EMPTY(&ji
->semaphores
)) {
7402 job_dispatch(ji
, false);
7408 cronemu(int mon
, int mday
, int hour
, int min
)
7410 struct tm workingtm
;
7414 workingtm
= *localtime(&now
);
7416 workingtm
.tm_isdst
= -1;
7417 workingtm
.tm_sec
= 0;
7420 while (!cronemu_mon(&workingtm
, mon
, mday
, hour
, min
)) {
7421 workingtm
.tm_year
++;
7422 workingtm
.tm_mon
= 0;
7423 workingtm
.tm_mday
= 1;
7424 workingtm
.tm_hour
= 0;
7425 workingtm
.tm_min
= 0;
7429 return mktime(&workingtm
);
7433 cronemu_wday(int wday
, int hour
, int min
)
7435 struct tm workingtm
;
7439 workingtm
= *localtime(&now
);
7441 workingtm
.tm_isdst
= -1;
7442 workingtm
.tm_sec
= 0;
7449 while (!(workingtm
.tm_wday
== wday
&& cronemu_hour(&workingtm
, hour
, min
))) {
7450 workingtm
.tm_mday
++;
7451 workingtm
.tm_hour
= 0;
7452 workingtm
.tm_min
= 0;
7456 return mktime(&workingtm
);
7460 cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
)
7463 struct tm workingtm
= *wtm
;
7466 while (!cronemu_mday(&workingtm
, mday
, hour
, min
)) {
7468 workingtm
.tm_mday
= 1;
7469 workingtm
.tm_hour
= 0;
7470 workingtm
.tm_min
= 0;
7471 carrytest
= workingtm
.tm_mon
;
7473 if (carrytest
!= workingtm
.tm_mon
) {
7481 if (mon
< wtm
->tm_mon
) {
7485 if (mon
> wtm
->tm_mon
) {
7492 return cronemu_mday(wtm
, mday
, hour
, min
);
7496 cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
)
7499 struct tm workingtm
= *wtm
;
7502 while (!cronemu_hour(&workingtm
, hour
, min
)) {
7503 workingtm
.tm_mday
++;
7504 workingtm
.tm_hour
= 0;
7505 workingtm
.tm_min
= 0;
7506 carrytest
= workingtm
.tm_mday
;
7508 if (carrytest
!= workingtm
.tm_mday
) {
7516 if (mday
< wtm
->tm_mday
) {
7520 if (mday
> wtm
->tm_mday
) {
7521 wtm
->tm_mday
= mday
;
7526 return cronemu_hour(wtm
, hour
, min
);
7530 cronemu_hour(struct tm
*wtm
, int hour
, int min
)
7533 struct tm workingtm
= *wtm
;
7536 while (!cronemu_min(&workingtm
, min
)) {
7537 workingtm
.tm_hour
++;
7538 workingtm
.tm_min
= 0;
7539 carrytest
= workingtm
.tm_hour
;
7541 if (carrytest
!= workingtm
.tm_hour
) {
7549 if (hour
< wtm
->tm_hour
) {
7553 if (hour
> wtm
->tm_hour
) {
7554 wtm
->tm_hour
= hour
;
7558 return cronemu_min(wtm
, min
);
7562 cronemu_min(struct tm
*wtm
, int min
)
7568 if (min
< wtm
->tm_min
) {
7572 if (min
> wtm
->tm_min
) {
7580 job_mig_setup_shmem(job_t j
, mach_port_t
*shmem_port
)
7582 memory_object_size_t size_of_page
, size_of_page_orig
;
7583 vm_address_t vm_addr
;
7586 if (!launchd_assumes(j
!= NULL
)) {
7587 return BOOTSTRAP_NO_MEMORY
;
7590 if (unlikely(j
->anonymous
)) {
7591 job_log(j
, LOG_DEBUG
, "Anonymous job tried to setup shared memory");
7592 return BOOTSTRAP_NOT_PRIVILEGED
;
7595 if (unlikely(j
->shmem
)) {
7596 job_log(j
, LOG_ERR
, "Tried to setup shared memory more than once");
7597 return BOOTSTRAP_NOT_PRIVILEGED
;
7600 size_of_page_orig
= size_of_page
= getpagesize();
7602 kr
= vm_allocate(mach_task_self(), &vm_addr
, size_of_page
, true);
7604 if (!job_assumes(j
, kr
== 0)) {
7608 j
->shmem
= (typeof(j
->shmem
))vm_addr
;
7609 j
->shmem
->vp_shmem_standby_timeout
= j
->timeout
;
7611 kr
= mach_make_memory_entry_64(mach_task_self(), &size_of_page
,
7612 (memory_object_offset_t
)vm_addr
, VM_PROT_READ
|VM_PROT_WRITE
, shmem_port
, 0);
7614 if (job_assumes(j
, kr
== 0)) {
7615 (void)job_assumes(j
, size_of_page
== size_of_page_orig
);
7618 /* no need to inherit this in child processes */
7619 (void)job_assumes(j
, vm_inherit(mach_task_self(), (vm_address_t
)j
->shmem
, size_of_page_orig
, VM_INHERIT_NONE
) == 0);
7625 job_mig_create_server(job_t j
, cmd_t server_cmd
, uid_t server_uid
, boolean_t on_demand
, mach_port_t
*server_portp
)
7627 struct ldcred
*ldc
= runtime_get_caller_creds();
7630 if (!launchd_assumes(j
!= NULL
)) {
7631 return BOOTSTRAP_NO_MEMORY
;
7634 if (unlikely(j
->deny_job_creation
)) {
7635 return BOOTSTRAP_NOT_PRIVILEGED
;
7639 const char **argv
= (const char **)mach_cmd2argv(server_cmd
);
7640 if (unlikely(argv
== NULL
)) {
7641 return BOOTSTRAP_NO_MEMORY
;
7643 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_PATH
, argv
[0]) > 0)) {
7645 return BOOTSTRAP_NOT_PRIVILEGED
;
7650 job_log(j
, LOG_DEBUG
, "Server create attempt: %s", server_cmd
);
7653 if (ldc
->euid
|| ldc
->uid
) {
7654 job_log(j
, LOG_WARNING
, "Server create attempt moved to per-user launchd: %s", server_cmd
);
7655 return VPROC_ERR_TRY_PER_USER
;
7658 if (unlikely(server_uid
!= getuid())) {
7659 job_log(j
, LOG_WARNING
, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7660 server_cmd
, getuid(), server_uid
);
7662 server_uid
= 0; /* zero means "do nothing" */
7665 js
= job_new_via_mach_init(j
, server_cmd
, server_uid
, on_demand
);
7667 if (unlikely(js
== NULL
)) {
7668 return BOOTSTRAP_NO_MEMORY
;
7671 *server_portp
= js
->j_port
;
7672 return BOOTSTRAP_SUCCESS
;
7676 job_mig_send_signal(job_t j
, mach_port_t srp
, name_t targetlabel
, int sig
)
7678 struct ldcred
*ldc
= runtime_get_caller_creds();
7681 if (!launchd_assumes(j
!= NULL
)) {
7682 return BOOTSTRAP_NO_MEMORY
;
7685 if (unlikely(ldc
->euid
!= 0 && ldc
->euid
!= getuid()) || j
->deny_job_creation
) {
7686 #if TARGET_OS_EMBEDDED
7687 if (!j
->embedded_special_privileges
) {
7688 return BOOTSTRAP_NOT_PRIVILEGED
;
7691 return BOOTSTRAP_NOT_PRIVILEGED
;
7696 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
7697 return BOOTSTRAP_NOT_PRIVILEGED
;
7701 if (unlikely(!(otherj
= job_find(NULL
, targetlabel
)))) {
7702 return BOOTSTRAP_UNKNOWN_SERVICE
;
7705 #if TARGET_OS_EMBEDDED
7706 if (j
->embedded_special_privileges
&& strcmp(j
->username
, otherj
->username
) != 0) {
7707 return BOOTSTRAP_NOT_PRIVILEGED
;
7711 if (sig
== VPROC_MAGIC_UNLOAD_SIGNAL
) {
7712 bool do_block
= otherj
->p
;
7714 if (otherj
->anonymous
) {
7715 return BOOTSTRAP_NOT_PRIVILEGED
;
7721 job_log(j
, LOG_DEBUG
, "Blocking MIG return of job_remove(): %s", otherj
->label
);
7722 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
7723 (void)job_assumes(otherj
, waiting4removal_new(otherj
, srp
));
7724 return MIG_NO_REPLY
;
7728 } else if (sig
== VPROC_MAGIC_TRYKILL_SIGNAL
) {
7729 if (!j
->kill_via_shmem
) {
7730 return BOOTSTRAP_NOT_PRIVILEGED
;
7734 j
->sent_kill_via_shmem
= true;
7735 (void)job_assumes(j
, runtime_kill(otherj
->p
, SIGKILL
) != -1);
7739 #if !TARGET_OS_EMBEDDED
7740 if (__sync_bool_compare_and_swap(&j
->shmem
->vp_shmem_transaction_cnt
, 0, -1)) {
7741 j
->shmem
->vp_shmem_flags
|= VPROC_SHMEM_EXITING
;
7742 j
->sent_kill_via_shmem
= true;
7743 (void)job_assumes(j
, runtime_kill(otherj
->p
, SIGKILL
) != -1);
7747 return BOOTSTRAP_NOT_PRIVILEGED
;
7748 } else if (otherj
->p
) {
7749 (void)job_assumes(j
, runtime_kill(otherj
->p
, sig
) != -1);
7756 job_mig_log_forward(job_t j
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
7758 struct ldcred
*ldc
= runtime_get_caller_creds();
7760 if (!launchd_assumes(j
!= NULL
)) {
7761 return BOOTSTRAP_NO_MEMORY
;
7764 if (!job_assumes(j
, j
->per_user
)) {
7765 return BOOTSTRAP_NOT_PRIVILEGED
;
7768 return runtime_log_forward(ldc
->euid
, ldc
->egid
, inval
, invalCnt
);
7772 job_mig_log_drain(job_t j
, mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
7774 struct ldcred
*ldc
= runtime_get_caller_creds();
7776 if (!launchd_assumes(j
!= NULL
)) {
7777 return BOOTSTRAP_NO_MEMORY
;
7780 if (unlikely(ldc
->euid
)) {
7781 return BOOTSTRAP_NOT_PRIVILEGED
;
7784 return runtime_log_drain(srp
, outval
, outvalCnt
);
7788 job_mig_swap_complex(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
7791 launch_data_t input_obj
= NULL
, output_obj
= NULL
;
7792 size_t data_offset
= 0;
7794 struct ldcred
*ldc
= runtime_get_caller_creds();
7796 if (!launchd_assumes(j
!= NULL
)) {
7797 return BOOTSTRAP_NO_MEMORY
;
7799 if (unlikely(inkey
&& ldc
->euid
&& ldc
->euid
!= getuid())) {
7800 return BOOTSTRAP_NOT_PRIVILEGED
;
7802 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
7806 if (inkey
&& outkey
) {
7807 action
= "Swapping";
7814 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
7816 *outvalCnt
= 20 * 1024 * 1024;
7817 mig_allocate(outval
, *outvalCnt
);
7818 if (!job_assumes(j
, *outval
!= 0)) {
7822 /* Note to future maintainers: launch_data_unpack() does NOT return a heap object. The data
7823 * is decoded in-place. So do not call launch_data_free() on input_obj.
7825 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
7826 if (unlikely(invalCnt
&& !job_assumes(j
, (input_obj
= launch_data_unpack((void *)inval
, invalCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
))) {
7831 case VPROC_GSK_ENVIRONMENT
:
7832 if (!job_assumes(j
, (output_obj
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
7835 jobmgr_export_env_from_other_jobs(j
->mgr
, output_obj
);
7836 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
7837 if (!job_assumes(j
, launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
) != 0)) {
7840 launch_data_free(output_obj
);
7842 case VPROC_GSK_ALLJOBS
:
7843 if (!job_assumes(j
, (output_obj
= job_export_all()) != NULL
)) {
7846 ipc_revoke_fds(output_obj
);
7847 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
7848 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
7849 if (!job_assumes(j
, packed_size
!= 0)) {
7852 launch_data_free(output_obj
);
7854 case VPROC_GSK_MGR_NAME
:
7855 if (!job_assumes(j
, (output_obj
= launch_data_new_string(j
->mgr
->name
)) != NULL
)) {
7858 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
7859 if (!job_assumes(j
, packed_size
!= 0)) {
7863 launch_data_free(output_obj
);
7865 case VPROC_GSK_JOB_OVERRIDES_DB
:
7866 if (!job_assumes(j
, (output_obj
= launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES
))) != NULL
)) {
7869 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
7870 if (!job_assumes(j
, packed_size
!= 0)) {
7874 launch_data_free(output_obj
);
7876 case VPROC_GSK_JOB_CACHE_DB
:
7877 if (!job_assumes(j
, (output_obj
= launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE
))) != NULL
)) {
7880 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
7881 if (!job_assumes(j
, packed_size
!= 0)) {
7885 job_log(j
, LOG_DEBUG
, "Location of job cache database: %s", launch_data_get_string(output_obj
));
7887 launch_data_free(output_obj
);
7890 mig_deallocate(*outval
, *outvalCnt
);
7898 if (invalCnt
) switch (inkey
) {
7899 case VPROC_GSK_ENVIRONMENT
:
7900 if (launch_data_get_type(input_obj
) == LAUNCH_DATA_DICTIONARY
) {
7902 job_log(j
, LOG_INFO
, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
7904 launch_data_dict_iterate(input_obj
, envitem_setup_one_shot
, j
);
7913 mig_deallocate(inval
, invalCnt
);
7917 mig_deallocate(inval
, invalCnt
);
7919 mig_deallocate(*outval
, *outvalCnt
);
7922 launch_data_free(output_obj
);
7929 job_mig_swap_integer(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
, int64_t inval
, int64_t *outval
)
7932 kern_return_t kr
= 0;
7933 struct ldcred
*ldc
= runtime_get_caller_creds();
7936 if (!launchd_assumes(j
!= NULL
)) {
7937 return BOOTSTRAP_NO_MEMORY
;
7940 if (unlikely(inkey
&& ldc
->euid
&& ldc
->euid
!= getuid())) {
7941 return BOOTSTRAP_NOT_PRIVILEGED
;
7944 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
7948 if (inkey
&& outkey
) {
7949 action
= "Swapping";
7956 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
7959 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
7960 *outval
= j
->abandon_pg
;
7962 case VPROC_GSK_LAST_EXIT_STATUS
:
7963 *outval
= j
->last_exit_status
;
7965 case VPROC_GSK_MGR_UID
:
7968 case VPROC_GSK_MGR_PID
:
7971 case VPROC_GSK_IS_MANAGED
:
7972 *outval
= j
->anonymous
? 0 : 1;
7974 case VPROC_GSK_BASIC_KEEPALIVE
:
7975 *outval
= !j
->ondemand
;
7977 case VPROC_GSK_START_INTERVAL
:
7978 *outval
= j
->start_interval
;
7980 case VPROC_GSK_IDLE_TIMEOUT
:
7981 *outval
= j
->timeout
;
7983 case VPROC_GSK_EXIT_TIMEOUT
:
7984 *outval
= j
->exit_timeout
;
7986 case VPROC_GSK_GLOBAL_LOG_MASK
:
7987 oldmask
= runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
7989 runtime_setlogmask(oldmask
);
7991 case VPROC_GSK_GLOBAL_UMASK
:
7996 case VPROC_GSK_TRANSACTIONS_ENABLED
:
7997 job_log(j
, LOG_DEBUG
, "Reading transaction model status.");
7998 *outval
= j
->kill_via_shmem
;
8000 case VPROC_GSK_WAITFORDEBUGGER
:
8001 *outval
= j
->wait4debugger
;
8003 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT
:
8004 *outval
= j
->embedded_special_privileges
;
8015 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
8016 j
->abandon_pg
= (bool)inval
;
8018 case VPROC_GSK_GLOBAL_ON_DEMAND
:
8019 job_log(j
, LOG_DEBUG
, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval
? "true" : "false", j
->forced_peers_to_demand_mode
? "true" : "false");
8020 kr
= job_set_global_on_demand(j
, (bool)inval
) ? 0 : 1;
8022 case VPROC_GSK_BASIC_KEEPALIVE
:
8023 j
->ondemand
= !inval
;
8025 case VPROC_GSK_START_INTERVAL
:
8026 if (inval
> UINT32_MAX
|| inval
< 0) {
8029 if (j
->start_interval
== 0) {
8030 runtime_add_weak_ref();
8032 j
->start_interval
= (typeof(j
->start_interval
)) inval
;
8033 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
) != -1);
8034 } else if (j
->start_interval
) {
8035 (void)job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
8036 if (j
->start_interval
!= 0) {
8037 runtime_del_weak_ref();
8039 j
->start_interval
= 0;
8042 case VPROC_GSK_IDLE_TIMEOUT
:
8043 if (inval
< 0 || inval
> UINT32_MAX
) {
8046 j
->timeout
= (typeof(j
->timeout
)) inval
;
8049 case VPROC_GSK_EXIT_TIMEOUT
:
8050 if (inval
< 0 || inval
> UINT32_MAX
) {
8053 j
->exit_timeout
= (typeof(j
->exit_timeout
)) inval
;
8056 case VPROC_GSK_GLOBAL_LOG_MASK
:
8057 if (inval
< 0 || inval
> UINT32_MAX
) {
8060 runtime_setlogmask((int) inval
);
8063 case VPROC_GSK_GLOBAL_UMASK
:
8064 launchd_assert(sizeof (mode_t
) == 2);
8065 if (inval
< 0 || inval
> UINT16_MAX
) {
8069 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8072 umask((mode_t
) inval
);
8077 case VPROC_GSK_TRANSACTIONS_ENABLED
:
8078 if (!job_assumes(j
, inval
!= 0)) {
8079 job_log(j
, LOG_WARNING
, "Attempt to unregister from transaction model. This is not supported.");
8082 j
->kill_via_shmem
= (bool)inval
;
8085 case VPROC_GSK_WEIRD_BOOTSTRAP
:
8086 if (job_assumes(j
, j
->weird_bootstrap
)) {
8087 job_log(j
, LOG_DEBUG
, "Unsetting weird bootstrap.");
8089 mach_msg_size_t mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
8091 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
8092 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
8095 (void)job_assumes(j
, runtime_add_mport(j
->mgr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
);
8096 j
->weird_bootstrap
= false;
8099 case VPROC_GSK_WAITFORDEBUGGER
:
8100 j
->wait4debugger_oneshot
= inval
;
8102 case VPROC_GSK_PERUSER_SUSPEND
:
8103 if (job_assumes(j
, pid1_magic
&& ldc
->euid
== 0)) {
8104 mach_port_t junk
= MACH_PORT_NULL
;
8105 job_t jpu
= jobmgr_lookup_per_user_context_internal(j
, (uid_t
)inval
, &junk
);
8106 if (job_assumes(j
, jpu
!= NULL
)) {
8107 struct suspended_peruser
*spi
= NULL
;
8108 LIST_FOREACH(spi
, &j
->suspended_perusers
, sle
) {
8109 if ((int64_t)(spi
->j
->mach_uid
) == inval
) {
8110 job_log(j
, LOG_WARNING
, "Job tried to suspend per-user launchd for UID %lli twice.", inval
);
8116 job_log(j
, LOG_INFO
, "Job is suspending the per-user launchd for UID %lli.", inval
);
8117 spi
= (struct suspended_peruser
*)calloc(sizeof(struct suspended_peruser
), 1);
8118 if (job_assumes(j
, spi
!= NULL
)) {
8119 /* Stop listening for events.
8121 * See <rdar://problem/9014146>.
8123 if (jpu
->peruser_suspend_count
== 0) {
8128 spi
->j
->peruser_suspend_count
++;
8129 LIST_INSERT_HEAD(&j
->suspended_perusers
, spi
, sle
);
8133 kr
= BOOTSTRAP_NO_MEMORY
;
8141 case VPROC_GSK_PERUSER_RESUME
:
8142 if (job_assumes(j
, pid1_magic
== true)) {
8143 struct suspended_peruser
*spi
= NULL
, *spt
= NULL
;
8144 LIST_FOREACH_SAFE(spi
, &j
->suspended_perusers
, sle
, spt
) {
8145 if ((int64_t)(spi
->j
->mach_uid
) == inval
) {
8146 spi
->j
->peruser_suspend_count
--;
8147 LIST_REMOVE(spi
, sle
);
8148 job_log(j
, LOG_INFO
, "Job is resuming the per-user launchd for UID %lli.", inval
);
8153 if (!job_assumes(j
, spi
!= NULL
)) {
8154 job_log(j
, LOG_WARNING
, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval
);
8155 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
8156 } else if (spi
->j
->peruser_suspend_count
== 0) {
8158 job_dispatch(spi
->j
, false);
8176 job_mig_post_fork_ping(job_t j
, task_t child_task
, mach_port_t
*asport
)
8178 struct machservice
*ms
;
8180 if (!launchd_assumes(j
!= NULL
)) {
8181 return BOOTSTRAP_NO_MEMORY
;
8184 job_log(j
, LOG_DEBUG
, "Post fork ping.");
8186 job_setup_exception_port(j
, child_task
);
8188 SLIST_FOREACH(ms
, &special_ports
, special_port_sle
) {
8189 if (j
->per_user
&& (ms
->special_port_num
!= TASK_ACCESS_PORT
)) {
8190 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
8194 errno
= task_set_special_port(child_task
, ms
->special_port_num
, ms
->port
);
8196 if (unlikely(errno
)) {
8197 int desired_log_level
= LOG_ERR
;
8202 desired_log_level
= LOG_WARNING
;
8204 if (ms
->special_port_num
== TASK_SEATBELT_PORT
) {
8205 desired_log_level
= LOG_DEBUG
;
8209 job_log(j
, desired_log_level
, "Could not setup Mach task special port %u: %s", ms
->special_port_num
, mach_error_string(errno
));
8213 /* MIG will not zero-initialize this pointer, so we must always do so. See
8214 * <rdar://problem/8562593>.
8216 *asport
= MACH_PORT_NULL
;
8217 #if !TARGET_OS_EMBEDDED
8218 if (!j
->anonymous
) {
8219 /* XPC services will spawn into the root security session by default.
8220 * xpcproxy will switch them away if needed.
8222 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
8223 job_log(j
, LOG_DEBUG
, "Returning j->asport: %u", j
->asport
);
8224 *asport
= j
->asport
;
8228 (void)job_assumes(j
, launchd_mport_deallocate(child_task
) == KERN_SUCCESS
);
8234 job_mig_reboot2(job_t j
, uint64_t flags
)
8236 char who_started_the_reboot
[2048] = "";
8237 struct proc_bsdshortinfo proc
;
8238 struct ldcred
*ldc
= runtime_get_caller_creds();
8241 if (!launchd_assumes(j
!= NULL
)) {
8242 return BOOTSTRAP_NO_MEMORY
;
8245 if (unlikely(!pid1_magic
)) {
8246 return BOOTSTRAP_NOT_PRIVILEGED
;
8249 #if !TARGET_OS_EMBEDDED
8250 if (unlikely(ldc
->euid
)) {
8252 if (unlikely(ldc
->euid
) && !j
->embedded_special_privileges
) {
8254 return BOOTSTRAP_NOT_PRIVILEGED
;
8257 for (pid_to_log
= ldc
->pid
; pid_to_log
; pid_to_log
= proc
.pbsi_ppid
) {
8259 if (proc_pidinfo(pid_to_log
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
8260 if (errno
!= ESRCH
) {
8261 job_assumes(j
, errno
== 0);
8266 if (!job_assumes(j
, pid_to_log
!= (pid_t
)proc
.pbsi_ppid
)) {
8267 job_log(j
, LOG_WARNING
, "Job which is its own parent started reboot.");
8268 snprintf(who_started_the_reboot
, sizeof(who_started_the_reboot
), "%s[%u]->%s[%u]->%s[%u]->...", proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_comm
, pid_to_log
);
8272 who_offset
= strlen(who_started_the_reboot
);
8273 snprintf(who_started_the_reboot
+ who_offset
, sizeof(who_started_the_reboot
) - who_offset
,
8274 " %s[%u]%s", proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_ppid
? " ->" : "");
8277 root_jobmgr
->reboot_flags
= (int)flags
;
8278 job_log(j
, LOG_DEBUG
, "reboot2() initiated by:%s", who_started_the_reboot
);
8285 job_mig_getsocket(job_t j
, name_t spr
)
8287 if (!launchd_assumes(j
!= NULL
)) {
8288 return BOOTSTRAP_NO_MEMORY
;
8291 if (j
->deny_job_creation
) {
8292 return BOOTSTRAP_NOT_PRIVILEGED
;
8296 struct ldcred
*ldc
= runtime_get_caller_creds();
8297 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8298 return BOOTSTRAP_NOT_PRIVILEGED
;
8304 if (unlikely(!sockpath
)) {
8305 return BOOTSTRAP_NO_MEMORY
;
8308 strncpy(spr
, sockpath
, sizeof(name_t
));
8310 return BOOTSTRAP_SUCCESS
;
8314 job_mig_log(job_t j
, int pri
, int err
, logmsg_t msg
)
8316 if (!launchd_assumes(j
!= NULL
)) {
8317 return BOOTSTRAP_NO_MEMORY
;
8320 if ((errno
= err
)) {
8321 job_log_error(j
, pri
, "%s", msg
);
8323 job_log(j
, pri
, "%s", msg
);
8330 jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, mach_port_t
*mp
)
8333 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
8334 if (!ji
->per_user
) {
8337 if (ji
->mach_uid
!= which_user
) {
8340 if (SLIST_EMPTY(&ji
->machservices
)) {
8343 if (!SLIST_FIRST(&ji
->machservices
)->per_user_hack
) {
8349 if (unlikely(ji
== NULL
)) {
8350 struct machservice
*ms
;
8353 job_log(j
, LOG_DEBUG
, "Creating per user launchd job for UID: %u", which_user
);
8355 sprintf(lbuf
, "com.apple.launchd.peruser.%u", which_user
);
8357 ji
= job_new(root_jobmgr
, lbuf
, "/sbin/launchd", NULL
);
8360 auditinfo_addr_t auinfo
= {
8361 .ai_termid
= { .at_type
= AU_IPv4
},
8362 .ai_auid
= which_user
,
8363 .ai_asid
= AU_ASSIGN_ASID
,
8366 if (setaudit_addr(&auinfo
, sizeof(auinfo
)) == 0) {
8367 job_log(ji
, LOG_DEBUG
, "Created new security session for per-user launchd: %u", auinfo
.ai_asid
);
8368 (void)job_assumes(ji
, (ji
->asport
= audit_session_self()) != MACH_PORT_NULL
);
8370 /* Kinda lame that we have to do this, but we can't create an
8371 * audit session without joining it.
8373 (void)job_assumes(ji
, audit_session_join(g_audit_session_port
));
8374 ji
->asid
= auinfo
.ai_asid
;
8376 job_log(ji
, LOG_WARNING
, "Could not set audit session!");
8381 ji
->mach_uid
= which_user
;
8382 ji
->per_user
= true;
8383 ji
->kill_via_shmem
= true;
8386 char pu_db
[PATH_MAX
];
8387 snprintf(pu_db
, sizeof(pu_db
), LAUNCHD_DB_PREFIX
"/%s", lbuf
);
8389 bool created
= false;
8390 int err
= stat(pu_db
, &sb
);
8391 if ((err
== -1 && errno
== ENOENT
) || (err
== 0 && !S_ISDIR(sb
.st_mode
))) {
8393 char move_aside
[PATH_MAX
];
8394 snprintf(move_aside
, sizeof(move_aside
), LAUNCHD_DB_PREFIX
"/%s.movedaside", lbuf
);
8396 (void)job_assumes(ji
, rename(pu_db
, move_aside
) != -1);
8399 (void)job_assumes(ji
, mkdir(pu_db
, S_IRWXU
) != -1);
8400 (void)job_assumes(ji
, chown(pu_db
, which_user
, 0) != -1);
8405 if (!job_assumes(ji
, sb
.st_uid
== which_user
)) {
8406 (void)job_assumes(ji
, chown(pu_db
, which_user
, 0) != -1);
8408 if (!job_assumes(ji
, sb
.st_gid
== 0)) {
8409 (void)job_assumes(ji
, chown(pu_db
, which_user
, 0) != -1);
8411 if (!job_assumes(ji
, sb
.st_mode
== (S_IRWXU
| S_IFDIR
))) {
8412 (void)job_assumes(ji
, chmod(pu_db
, S_IRWXU
) != -1);
8416 if ((ms
= machservice_new(ji
, lbuf
, mp
, false)) == NULL
) {
8420 ms
->per_user_hack
= true;
8423 ji
= job_dispatch(ji
, false);
8427 *mp
= machservice_port(SLIST_FIRST(&ji
->machservices
));
8428 job_log(j
, LOG_DEBUG
, "Per user launchd job found for UID: %u", which_user
);
8435 job_mig_lookup_per_user_context(job_t j
, uid_t which_user
, mach_port_t
*up_cont
)
8437 struct ldcred
*ldc
= runtime_get_caller_creds();
8440 #if TARGET_OS_EMBEDDED
8441 /* There is no need for per-user launchd's on embedded. */
8442 job_log(j
, LOG_ERR
, "Per-user launchds are not supported on this platform.");
8443 return BOOTSTRAP_NOT_PRIVILEGED
;
8447 if (unlikely(sandbox_check(ldc
->pid
, "mach-per-user-lookup", SANDBOX_FILTER_NONE
) > 0)) {
8448 return BOOTSTRAP_NOT_PRIVILEGED
;
8452 if (!launchd_assumes(j
!= NULL
)) {
8453 return BOOTSTRAP_NO_MEMORY
;
8456 job_log(j
, LOG_INFO
, "Looking up per user launchd for UID: %u", which_user
);
8458 if (unlikely(!pid1_magic
)) {
8459 job_log(j
, LOG_ERR
, "Only PID 1 supports per user launchd lookups.");
8460 return BOOTSTRAP_NOT_PRIVILEGED
;
8463 if (ldc
->euid
|| ldc
->uid
) {
8464 which_user
= ldc
->euid
?: ldc
->uid
;
8467 *up_cont
= MACH_PORT_NULL
;
8469 jpu
= jobmgr_lookup_per_user_context_internal(j
, which_user
, up_cont
);
8475 job_mig_check_in2(job_t j
, name_t servicename
, mach_port_t
*serviceportp
, uuid_t instance_id
, uint64_t flags
)
8477 bool per_pid_service
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
8478 bool strict
= flags
& BOOTSTRAP_STRICT_CHECKIN
;
8479 struct ldcred
*ldc
= runtime_get_caller_creds();
8480 struct machservice
*ms
= NULL
;
8483 if (!launchd_assumes(j
!= NULL
)) {
8484 return BOOTSTRAP_NO_MEMORY
;
8487 if (j
->dedicated_instance
) {
8488 struct machservice
*msi
= NULL
;
8489 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
8490 if (strncmp(servicename
, msi
->name
, sizeof(name_t
) - 1) == 0) {
8491 uuid_copy(instance_id
, j
->instance_id
);
8497 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, per_pid_service
? ldc
->pid
: 0);
8501 if (likely(ms
!= NULL
)) {
8503 return BOOTSTRAP_NOT_PRIVILEGED
;
8504 } else if (ms
->isActive
) {
8505 return BOOTSTRAP_SERVICE_ACTIVE
;
8508 return BOOTSTRAP_UNKNOWN_SERVICE
;
8510 } else if (ms
== NULL
) {
8511 if (job_assumes(j
, !j
->dedicated_instance
)) {
8512 *serviceportp
= MACH_PORT_NULL
;
8514 if (unlikely((ms
= machservice_new(j
, servicename
, serviceportp
, per_pid_service
)) == NULL
)) {
8515 return BOOTSTRAP_NO_MEMORY
;
8518 /* Treat this like a legacy job. */
8519 if (!j
->legacy_mach_job
) {
8520 ms
->isActive
= true;
8524 if (!(j
->anonymous
|| j
->legacy_LS_job
|| j
->legacy_mach_job
)) {
8525 job_log(j
, LOG_SCOLDING
, "Please add the following service to the configuration file for this job: %s", servicename
);
8528 return BOOTSTRAP_UNKNOWN_SERVICE
;
8531 if (unlikely((jo
= machservice_job(ms
)) != j
)) {
8532 static pid_t last_warned_pid
;
8534 if (last_warned_pid
!= ldc
->pid
) {
8535 job_log(jo
, LOG_WARNING
, "The following job tried to hijack the service \"%s\" from this job: %s", servicename
, j
->label
);
8536 last_warned_pid
= ldc
->pid
;
8539 return BOOTSTRAP_NOT_PRIVILEGED
;
8541 if (unlikely(machservice_active(ms
))) {
8542 job_log(j
, LOG_WARNING
, "Check-in of Mach service failed. Already active: %s", servicename
);
8543 return BOOTSTRAP_SERVICE_ACTIVE
;
8548 machservice_request_notifications(ms
);
8550 job_log(j
, LOG_INFO
, "Check-in of service: %s", servicename
);
8552 *serviceportp
= machservice_port(ms
);
8553 return BOOTSTRAP_SUCCESS
;
8557 job_mig_register2(job_t j
, name_t servicename
, mach_port_t serviceport
, uint64_t flags
)
8559 struct machservice
*ms
;
8560 struct ldcred
*ldc
= runtime_get_caller_creds();
8562 if (!launchd_assumes(j
!= NULL
)) {
8563 return BOOTSTRAP_NO_MEMORY
;
8566 if (!(flags
& BOOTSTRAP_PER_PID_SERVICE
) && !j
->legacy_LS_job
) {
8567 job_log(j
, LOG_SCOLDING
, "Performance: bootstrap_register() is deprecated. Service: %s", servicename
);
8570 job_log(j
, LOG_DEBUG
, "%sMach service registration attempt: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
8572 /* 5641783 for the embedded hack */
8573 #if !TARGET_OS_EMBEDDED
8575 * From a per-user/session launchd's perspective, SecurityAgent (UID
8576 * 92) is a rogue application (not our UID, not root and not a child of
8577 * us). We'll have to reconcile this design friction at a later date.
8579 if (unlikely(j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->uid
!= getuid() && ldc
->uid
!= 92)) {
8581 return VPROC_ERR_TRY_PER_USER
;
8583 return BOOTSTRAP_NOT_PRIVILEGED
;
8588 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, flags
& BOOTSTRAP_PER_PID_SERVICE
? ldc
->pid
: 0);
8591 if (machservice_job(ms
) != j
) {
8592 return BOOTSTRAP_NOT_PRIVILEGED
;
8594 if (machservice_active(ms
)) {
8595 job_log(j
, LOG_DEBUG
, "Mach service registration failed. Already active: %s", servicename
);
8596 return BOOTSTRAP_SERVICE_ACTIVE
;
8598 if (ms
->recv
&& (serviceport
!= MACH_PORT_NULL
)) {
8599 job_log(j
, LOG_ERR
, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename
);
8600 return BOOTSTRAP_NOT_PRIVILEGED
;
8603 machservice_delete(j
, ms
, false);
8606 if (likely(serviceport
!= MACH_PORT_NULL
)) {
8607 if (likely(ms
= machservice_new(j
, servicename
, &serviceport
, flags
& BOOTSTRAP_PER_PID_SERVICE
? true : false))) {
8608 machservice_request_notifications(ms
);
8610 return BOOTSTRAP_NO_MEMORY
;
8615 return BOOTSTRAP_SUCCESS
;
8619 job_mig_look_up2(job_t j
, mach_port_t srp
, name_t servicename
, mach_port_t
*serviceportp
, pid_t target_pid
, uuid_t instance_id
, uint64_t flags
)
8621 struct machservice
*ms
= NULL
;
8622 struct ldcred
*ldc
= runtime_get_caller_creds();
8624 bool per_pid_lookup
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
8625 bool specific_instance
= flags
& BOOTSTRAP_SPECIFIC_INSTANCE
;
8626 bool strict_lookup
= flags
& BOOTSTRAP_STRICT_LOOKUP
;
8627 bool privileged
= flags
& BOOTSTRAP_PRIVILEGED_SERVER
;
8629 if (!launchd_assumes(j
!= NULL
)) {
8630 return BOOTSTRAP_NO_MEMORY
;
8633 bool xpc_req
= j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
;
8635 /* 5641783 for the embedded hack */
8636 #if !TARGET_OS_EMBEDDED
8637 if (unlikely(pid1_magic
&& j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->euid
!= 0)) {
8638 return VPROC_ERR_TRY_PER_USER
;
8643 /* We don't do sandbox checking for XPC domains because, by definition, all
8644 * the services within your domain should be accessibly to you.
8646 if (!xpc_req
&& unlikely(sandbox_check(ldc
->pid
, "mach-lookup", per_pid_lookup
? SANDBOX_FILTER_LOCAL_NAME
: SANDBOX_FILTER_GLOBAL_NAME
, servicename
) > 0)) {
8647 return BOOTSTRAP_NOT_PRIVILEGED
;
8651 if (per_pid_lookup
) {
8652 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, target_pid
);
8655 /* Requests from XPC domains stay local. */
8656 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, 0);
8658 /* A strict lookup which is privileged won't even bother trying to
8659 * find a service if we're not hosting the root Mach bootstrap.
8661 if (strict_lookup
&& privileged
) {
8662 if (inherited_bootstrap_port
== MACH_PORT_NULL
) {
8663 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
8666 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
8672 ms
= ms
->alias
? ms
->alias
: ms
;
8673 if (unlikely(specific_instance
&& ms
->job
->multiple_instances
)) {
8675 job_t instance
= NULL
;
8676 LIST_FOREACH(ji
, &ms
->job
->subjobs
, subjob_sle
) {
8677 if (uuid_compare(instance_id
, ji
->instance_id
) == 0) {
8683 if (unlikely(instance
== NULL
)) {
8684 job_log(ms
->job
, LOG_DEBUG
, "Creating new instance of job based on lookup of service %s", ms
->name
);
8685 instance
= job_new_subjob(ms
->job
, instance_id
);
8686 if (job_assumes(j
, instance
!= NULL
)) {
8687 /* Disable this support for now. We only support having
8688 * multi-instance jobs within private XPC domains.
8691 /* If the job is multi-instance, in a singleton XPC domain
8692 * and the request is not coming from within that singleton
8693 * domain, we need to alias the new job into the requesting
8696 if (!j
->mgr
->xpc_singleton
&& xpc_req
) {
8697 (void)job_assumes(instance
, job_new_alias(j
->mgr
, instance
));
8700 job_dispatch(instance
, false);
8705 if (job_assumes(j
, instance
!= NULL
)) {
8706 struct machservice
*msi
= NULL
;
8707 SLIST_FOREACH(msi
, &instance
->machservices
, sle
) {
8708 /* sizeof(servicename) will return the size of a pointer, even though it's
8709 * an array type, because when passing arrays as parameters in C, they
8710 * implicitly degrade to pointers.
8712 if (strncmp(servicename
, msi
->name
, sizeof(name_t
) - 1) == 0) {
8719 if (machservice_hidden(ms
) && !machservice_active(ms
)) {
8721 } else if (unlikely(ms
->per_user_hack
)) {
8728 (void)job_assumes(j
, machservice_port(ms
) != MACH_PORT_NULL
);
8729 job_log(j
, LOG_DEBUG
, "%sMach service lookup: %s", per_pid_lookup
? "Per PID " : "", servicename
);
8731 if (unlikely(!per_pid_lookup
&& j
->lastlookup
== ms
&& j
->lastlookup_gennum
== ms
->gen_num
&& !j
->per_user
)) {
8732 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
8733 job_log(j
, LOG_DEBUG
, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms
->job
->label
, servicename
);
8737 j
->lastlookup_gennum
= ms
->gen_num
;
8739 *serviceportp
= machservice_port(ms
);
8741 kr
= BOOTSTRAP_SUCCESS
;
8742 } else if (strict_lookup
&& !privileged
) {
8743 /* Hack: We need to simulate XPC's desire not to establish a hierarchy. So if
8744 * XPC is doing the lookup, and it's not a privileged lookup, we won't forward.
8745 * But if it is a privileged lookup (that is, was looked up in XPC_DOMAIN_LOCAL_SYSTEM)
8746 * then we must forward.
8748 return BOOTSTRAP_UNKNOWN_SERVICE
;
8749 } else if (inherited_bootstrap_port
!= MACH_PORT_NULL
) {
8750 /* Requests from within an XPC domain don't get forwarded. */
8751 job_log(j
, LOG_DEBUG
, "Mach service lookup forwarded: %s", servicename
);
8752 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
8753 (void)job_assumes(j
, vproc_mig_look_up2_forward(inherited_bootstrap_port
, srp
, servicename
, target_pid
, instance_id
, flags
) == 0);
8754 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8755 return MIG_NO_REPLY
;
8756 } else if (pid1_magic
&& j
->anonymous
&& ldc
->euid
>= 500 && strcasecmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
8758 * 5240036 Should start background session when a lookup of CCacheServer occurs
8760 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
8761 * If we find a EUID that isn't root, we force it over to the per-user context.
8763 return VPROC_ERR_TRY_PER_USER
;
8765 job_log(j
, LOG_DEBUG
, "%sMach service lookup failed: %s", per_pid_lookup
? "Per PID " : "", servicename
);
8766 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
8773 job_mig_parent(job_t j
, mach_port_t srp
, mach_port_t
*parentport
)
8775 if (!launchd_assumes(j
!= NULL
)) {
8776 return BOOTSTRAP_NO_MEMORY
;
8779 job_log(j
, LOG_DEBUG
, "Requested parent bootstrap port");
8780 jobmgr_t jm
= j
->mgr
;
8782 if (jobmgr_parent(jm
)) {
8783 *parentport
= jobmgr_parent(jm
)->jm_port
;
8784 } else if (MACH_PORT_NULL
== inherited_bootstrap_port
) {
8785 *parentport
= jm
->jm_port
;
8787 (void)job_assumes(j
, vproc_mig_parent_forward(inherited_bootstrap_port
, srp
) == 0);
8788 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8789 return MIG_NO_REPLY
;
8791 return BOOTSTRAP_SUCCESS
;
8795 job_mig_get_root_bootstrap(job_t j
, mach_port_t
*rootbsp
)
8798 return BOOTSTRAP_NO_MEMORY
;
8801 if (inherited_bootstrap_port
== MACH_PORT_NULL
) {
8802 *rootbsp
= root_jobmgr
->jm_port
;
8803 (void)job_assumes(j
, launchd_mport_make_send(root_jobmgr
->jm_port
) == KERN_SUCCESS
);
8805 *rootbsp
= inherited_bootstrap_port
;
8806 (void)job_assumes(j
, launchd_mport_copy_send(inherited_bootstrap_port
) == KERN_SUCCESS
);
8809 return BOOTSTRAP_SUCCESS
;
8813 job_mig_info(job_t j
, name_array_t
*servicenamesp
, unsigned int *servicenames_cnt
, name_array_t
*servicejobsp
, unsigned int *servicejobs_cnt
, bootstrap_status_array_t
*serviceactivesp
, unsigned int *serviceactives_cnt
, uint64_t flags
)
8815 name_array_t service_names
= NULL
;
8816 name_array_t service_jobs
= NULL
;
8817 bootstrap_status_array_t service_actives
= NULL
;
8818 unsigned int cnt
= 0, cnt2
= 0;
8821 if (!launchd_assumes(j
!= NULL
)) {
8822 return BOOTSTRAP_NO_MEMORY
;
8825 if (g_flat_mach_namespace
) {
8826 if ((j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) || (flags
& BOOTSTRAP_FORCE_LOCAL
)) {
8836 struct machservice
*msi
= NULL
;
8837 for (i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++) {
8838 LIST_FOREACH(msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
8839 cnt
+= !msi
->per_pid
? 1 : 0;
8847 mig_allocate((vm_address_t
*)&service_names
, cnt
* sizeof(service_names
[0]));
8848 if (!job_assumes(j
, service_names
!= NULL
)) {
8852 mig_allocate((vm_address_t
*)&service_jobs
, cnt
* sizeof(service_jobs
[0]));
8853 if (!job_assumes(j
, service_jobs
!= NULL
)) {
8857 mig_allocate((vm_address_t
*)&service_actives
, cnt
* sizeof(service_actives
[0]));
8858 if (!job_assumes(j
, service_actives
!= NULL
)) {
8862 for (i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++) {
8863 LIST_FOREACH(msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
8864 if (!msi
->per_pid
) {
8865 strlcpy(service_names
[cnt2
], machservice_name(msi
), sizeof(service_names
[0]));
8866 msi
= msi
->alias
? msi
->alias
: msi
;
8867 if (msi
->job
->mgr
->shortdesc
) {
8868 strlcpy(service_jobs
[cnt2
], msi
->job
->mgr
->shortdesc
, sizeof(service_jobs
[0]));
8870 strlcpy(service_jobs
[cnt2
], msi
->job
->label
, sizeof(service_jobs
[0]));
8872 service_actives
[cnt2
] = machservice_status(msi
);
8878 (void)job_assumes(j
, cnt
== cnt2
);
8881 *servicenamesp
= service_names
;
8882 *servicejobsp
= service_jobs
;
8883 *serviceactivesp
= service_actives
;
8884 *servicenames_cnt
= *servicejobs_cnt
= *serviceactives_cnt
= cnt
;
8886 return BOOTSTRAP_SUCCESS
;
8889 if (service_names
) {
8890 mig_deallocate((vm_address_t
)service_names
, cnt
* sizeof(service_names
[0]));
8893 mig_deallocate((vm_address_t
)service_jobs
, cnt
* sizeof(service_jobs
[0]));
8895 if (service_actives
) {
8896 mig_deallocate((vm_address_t
)service_actives
, cnt
* sizeof(service_actives
[0]));
8899 return BOOTSTRAP_NO_MEMORY
;
8903 job_mig_lookup_children(job_t j
, mach_port_array_t
*child_ports
, mach_msg_type_number_t
*child_ports_cnt
, name_array_t
*child_names
, mach_msg_type_number_t
*child_names_cnt
, bootstrap_property_array_t
*child_properties
,mach_msg_type_number_t
*child_properties_cnt
)
8905 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
8906 if (!launchd_assumes(j
!= NULL
)) {
8907 return BOOTSTRAP_NO_MEMORY
;
8910 struct ldcred
*ldc
= runtime_get_caller_creds();
8912 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8913 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8914 * in a non-flat namespace.
8916 if (ldc
->euid
!= 0) {
8917 job_log(j
, LOG_WARNING
, "Attempt to look up children of bootstrap by unprivileged job.");
8918 return BOOTSTRAP_NOT_PRIVILEGED
;
8921 unsigned int cnt
= 0;
8923 jobmgr_t jmr
= j
->mgr
;
8924 jobmgr_t jmi
= NULL
;
8925 SLIST_FOREACH(jmi
, &jmr
->submgrs
, sle
) {
8929 /* Find our per-user launchds if we're PID 1. */
8932 LIST_FOREACH(ji
, &jmr
->jobs
, sle
) {
8933 cnt
+= ji
->per_user
? 1 : 0;
8938 return BOOTSTRAP_NO_CHILDREN
;
8941 mach_port_array_t _child_ports
= NULL
;
8942 mig_allocate((vm_address_t
*)&_child_ports
, cnt
* sizeof(_child_ports
[0]));
8943 if (!job_assumes(j
, _child_ports
!= NULL
)) {
8944 kr
= BOOTSTRAP_NO_MEMORY
;
8948 name_array_t _child_names
= NULL
;
8949 mig_allocate((vm_address_t
*)&_child_names
, cnt
* sizeof(_child_names
[0]));
8950 if (!job_assumes(j
, _child_names
!= NULL
)) {
8951 kr
= BOOTSTRAP_NO_MEMORY
;
8955 bootstrap_property_array_t _child_properties
= NULL
;
8956 mig_allocate((vm_address_t
*)&_child_properties
, cnt
* sizeof(_child_properties
[0]));
8957 if (!job_assumes(j
, _child_properties
!= NULL
)) {
8958 kr
= BOOTSTRAP_NO_MEMORY
;
8962 unsigned int cnt2
= 0;
8963 SLIST_FOREACH(jmi
, &jmr
->submgrs
, sle
) {
8964 if (jobmgr_assumes(jmi
, launchd_mport_make_send(jmi
->jm_port
) == KERN_SUCCESS
)) {
8965 _child_ports
[cnt2
] = jmi
->jm_port
;
8967 _child_ports
[cnt2
] = MACH_PORT_NULL
;
8970 strlcpy(_child_names
[cnt2
], jmi
->name
, sizeof(_child_names
[0]));
8971 _child_properties
[cnt2
] = jmi
->properties
;
8976 if (pid1_magic
) LIST_FOREACH( ji
, &jmr
->jobs
, sle
) {
8978 if (job_assumes(ji
, SLIST_FIRST(&ji
->machservices
)->per_user_hack
== true)) {
8979 mach_port_t port
= machservice_port(SLIST_FIRST(&ji
->machservices
));
8981 if (job_assumes(ji
, launchd_mport_copy_send(port
) == KERN_SUCCESS
)) {
8982 _child_ports
[cnt2
] = port
;
8984 _child_ports
[cnt2
] = MACH_PORT_NULL
;
8987 _child_ports
[cnt2
] = MACH_PORT_NULL
;
8990 strlcpy(_child_names
[cnt2
], ji
->label
, sizeof(_child_names
[0]));
8991 _child_properties
[cnt2
] |= BOOTSTRAP_PROPERTY_PERUSER
;
8997 *child_names_cnt
= cnt
;
8998 *child_ports_cnt
= cnt
;
8999 *child_properties_cnt
= cnt
;
9001 *child_names
= _child_names
;
9002 *child_ports
= _child_ports
;
9003 *child_properties
= _child_properties
;
9006 for (i
= 0; i
< cnt
; i
++) {
9007 job_log(j
, LOG_DEBUG
, "child_names[%u] = %s", i
, (char *)_child_names
[i
]);
9010 return BOOTSTRAP_SUCCESS
;
9013 mig_deallocate((vm_address_t
)_child_ports
, cnt
* sizeof(_child_ports
[0]));
9017 mig_deallocate((vm_address_t
)_child_names
, cnt
* sizeof(_child_ports
[0]));
9020 if (_child_properties
) {
9021 mig_deallocate((vm_address_t
)_child_properties
, cnt
* sizeof(_child_properties
[0]));
9028 job_mig_transaction_count_for_pid(job_t j
, pid_t p
, int32_t *cnt
, boolean_t
*condemned
)
9031 return BOOTSTRAP_NO_MEMORY
;
9034 kern_return_t kr
= KERN_FAILURE
;
9035 struct ldcred
*ldc
= runtime_get_caller_creds();
9036 if ((ldc
->euid
!= geteuid()) && (ldc
->euid
!= 0)) {
9037 return BOOTSTRAP_NOT_PRIVILEGED
;
9040 job_t j_for_pid
= jobmgr_find_by_pid_deep(j
->mgr
, p
, false);
9042 if (j_for_pid
->kill_via_shmem
) {
9043 if (j_for_pid
->shmem
) {
9044 *cnt
= j_for_pid
->shmem
->vp_shmem_transaction_cnt
;
9045 *condemned
= j_for_pid
->shmem
->vp_shmem_flags
& VPROC_SHMEM_EXITING
;
9046 *cnt
+= *condemned
? 1 : 0;
9052 kr
= BOOTSTRAP_SUCCESS
;
9054 kr
= BOOTSTRAP_NO_MEMORY
;
9057 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
9064 job_mig_pid_is_managed(job_t j
__attribute__((unused
)), pid_t p
, boolean_t
*managed
)
9066 struct ldcred
*ldc
= runtime_get_caller_creds();
9067 if ((ldc
->euid
!= geteuid()) && (ldc
->euid
!= 0)) {
9068 return BOOTSTRAP_NOT_PRIVILEGED
;
9071 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9072 * directly by launchd as agents.
9074 job_t j_for_pid
= jobmgr_find_by_pid_deep(root_jobmgr
, p
, false);
9075 if (j_for_pid
&& !j_for_pid
->anonymous
&& !j_for_pid
->legacy_LS_job
) {
9079 return BOOTSTRAP_SUCCESS
;
9083 job_mig_port_for_label(job_t j
__attribute__((unused
)), name_t label
, mach_port_t
*mp
)
9086 return BOOTSTRAP_NO_MEMORY
;
9089 struct ldcred
*ldc
= runtime_get_caller_creds();
9090 kern_return_t kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9093 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
9094 return BOOTSTRAP_NOT_PRIVILEGED
;
9098 mach_port_t _mp
= MACH_PORT_NULL
;
9099 if (!j
->deny_job_creation
&& (ldc
->euid
== 0 || ldc
->euid
== geteuid())) {
9100 job_t target_j
= job_find(NULL
, label
);
9101 if (jobmgr_assumes(root_jobmgr
, target_j
!= NULL
)) {
9102 if (target_j
->j_port
== MACH_PORT_NULL
) {
9103 (void)job_assumes(target_j
, job_setup_machport(target_j
) == true);
9106 _mp
= target_j
->j_port
;
9107 kr
= _mp
!= MACH_PORT_NULL
? BOOTSTRAP_SUCCESS
: BOOTSTRAP_NO_MEMORY
;
9109 kr
= BOOTSTRAP_NO_MEMORY
;
9117 #if !TARGET_OS_EMBEDDED
9119 job_mig_set_security_session(job_t j
, uuid_t uuid
, mach_port_t asport
)
9122 return BOOTSTRAP_NO_MEMORY
;
9125 uuid_string_t uuid_str
;
9126 uuid_unparse(uuid
, uuid_str
);
9127 job_log(j
, LOG_DEBUG
, "Setting session %u for UUID %s...", asport
, uuid_str
);
9129 job_t ji
= NULL
, jt
= NULL
;
9130 LIST_FOREACH_SAFE(ji
, &s_needing_sessions
, sle
, jt
) {
9131 uuid_string_t uuid_str2
;
9132 uuid_unparse(ji
->expected_audit_uuid
, uuid_str2
);
9134 if (uuid_compare(uuid
, ji
->expected_audit_uuid
) == 0) {
9135 uuid_clear(ji
->expected_audit_uuid
);
9136 if (asport
!= MACH_PORT_NULL
) {
9137 job_log(ji
, LOG_DEBUG
, "Job should join session with port %u", asport
);
9138 (void)job_assumes(j
, launchd_mport_copy_send(asport
) == KERN_SUCCESS
);
9140 job_log(ji
, LOG_DEBUG
, "No session to set for job. Using our session.");
9143 ji
->asport
= asport
;
9144 LIST_REMOVE(ji
, needing_session_sle
);
9145 job_dispatch(ji
, false);
9149 /* Each job that the session port was set for holds a reference. At the end of
9150 * the loop, there will be one extra reference belonging to this MiG protocol.
9151 * We need to release it so that the session goes away when all the jobs
9152 * referencing it are unloaded.
9154 (void)job_assumes(j
, launchd_mport_deallocate(asport
) == KERN_SUCCESS
);
9156 return KERN_SUCCESS
;
9160 job_mig_set_security_session(job_t j
__attribute__((unused
)), uuid_t uuid
__attribute__((unused
)), mach_port_t session
__attribute__((unused
)))
9162 return KERN_SUCCESS
;
9167 jobmgr_find_by_name(jobmgr_t jm
, const char *where
)
9171 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
9172 if (where
== NULL
) {
9173 if (strcasecmp(jm
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
9174 where
= VPROCMGR_SESSION_LOGINWINDOW
;
9176 where
= VPROCMGR_SESSION_AQUA
;
9180 if (strcasecmp(jm
->name
, where
) == 0) {
9184 if (strcasecmp(where
, VPROCMGR_SESSION_BACKGROUND
) == 0 && !pid1_magic
) {
9189 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
9190 if (unlikely(jmi
->shutting_down
)) {
9192 } else if (jmi
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
9194 } else if (strcasecmp(jmi
->name
, where
) == 0) {
9196 } else if (strcasecmp(jmi
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0 && pid1_magic
) {
9197 SLIST_FOREACH(jmi2
, &jmi
->submgrs
, sle
) {
9198 if (strcasecmp(jmi2
->name
, where
) == 0) {
9211 job_mig_move_subset(job_t j
, mach_port_t target_subset
, name_t session_type
, mach_port_t asport
, uint64_t flags
)
9213 mach_msg_type_number_t l2l_i
, l2l_port_cnt
= 0;
9214 mach_port_array_t l2l_ports
= NULL
;
9215 mach_port_t reqport
, rcvright
;
9216 kern_return_t kr
= 1;
9217 launch_data_t out_obj_array
= NULL
;
9218 struct ldcred
*ldc
= runtime_get_caller_creds();
9219 jobmgr_t jmr
= NULL
;
9221 if (!launchd_assumes(j
!= NULL
)) {
9222 return BOOTSTRAP_NO_MEMORY
;
9225 if (job_mig_intran2(root_jobmgr
, target_subset
, ldc
->pid
)) {
9226 job_log(j
, LOG_ERR
, "Moving a session to ourself is bogus.");
9228 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9232 job_log(j
, LOG_DEBUG
, "Move subset attempt: 0x%x", target_subset
);
9234 kr
= _vproc_grab_subset(target_subset
, &reqport
, &rcvright
, &out_obj_array
, &l2l_ports
, &l2l_port_cnt
);
9236 if (!job_assumes(j
, kr
== 0)) {
9240 launchd_assert(launch_data_array_get_count(out_obj_array
) == l2l_port_cnt
);
9242 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, reqport
, rcvright
, false, session_type
, false, asport
)) != NULL
)) {
9243 kr
= BOOTSTRAP_NO_MEMORY
;
9247 jmr
->properties
|= BOOTSTRAP_PROPERTY_MOVEDSUBSET
;
9249 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9250 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9251 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9253 if (flags
& LAUNCH_GLOBAL_ON_DEMAND
) {
9254 /* This is so awful. */
9255 /* Remove the job from its current job manager. */
9256 LIST_REMOVE(j
, sle
);
9257 LIST_REMOVE(j
, pid_hash_sle
);
9259 /* Put the job into the target job manager. */
9260 LIST_INSERT_HEAD(&jmr
->jobs
, j
, sle
);
9261 LIST_INSERT_HEAD(&jmr
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
9264 job_set_global_on_demand(j
, true);
9266 if (!j
->holds_ref
) {
9267 j
->holds_ref
= true;
9272 for (l2l_i
= 0; l2l_i
< l2l_port_cnt
; l2l_i
++) {
9273 launch_data_t tmp
, obj_at_idx
;
9274 struct machservice
*ms
;
9275 job_t j_for_service
;
9276 const char *serv_name
;
9280 (void)job_assumes(j
, obj_at_idx
= launch_data_array_get_index(out_obj_array
, l2l_i
));
9281 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PID
));
9282 target_pid
= (pid_t
)launch_data_get_integer(tmp
);
9283 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PERPID
));
9284 serv_perpid
= launch_data_get_bool(tmp
);
9285 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_NAME
));
9286 serv_name
= launch_data_get_string(tmp
);
9288 j_for_service
= jobmgr_find_by_pid(jmr
, target_pid
, true);
9290 if (unlikely(!j_for_service
)) {
9291 /* The PID probably exited */
9292 (void)job_assumes(j
, launchd_mport_deallocate(l2l_ports
[l2l_i
]) == KERN_SUCCESS
);
9296 if (likely(ms
= machservice_new(j_for_service
, serv_name
, &l2l_ports
[l2l_i
], serv_perpid
))) {
9297 job_log(j
, LOG_DEBUG
, "Importing %s into new bootstrap.", serv_name
);
9298 machservice_request_notifications(ms
);
9305 if (out_obj_array
) {
9306 launch_data_free(out_obj_array
);
9310 mig_deallocate((vm_address_t
)l2l_ports
, l2l_port_cnt
* sizeof(l2l_ports
[0]));
9314 if (target_subset
) {
9315 (void)job_assumes(j
, launchd_mport_deallocate(target_subset
) == KERN_SUCCESS
);
9318 (void)job_assumes(j
, launchd_mport_deallocate(asport
) == KERN_SUCCESS
);
9321 jobmgr_shutdown(jmr
);
9328 job_mig_init_session(job_t j
, name_t session_type
, mach_port_t asport
)
9331 return BOOTSTRAP_NO_MEMORY
;
9336 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
9337 if (j
->mgr
->session_initialized
) {
9338 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
9339 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9340 } else if (strcmp(session_type
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
9346 * We're working around LoginWindow and the WindowServer.
9348 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9349 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9350 * spawns a replacement loginwindow session before cleaning up the previous one.
9352 * We're going to use the creation of a new LoginWindow context as a clue that the
9353 * previous LoginWindow context is on the way out and therefore we should just
9354 * kick-start the shutdown of it.
9357 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
9358 if (unlikely(jmi
->shutting_down
)) {
9360 } else if (strcasecmp(jmi
->name
, session_type
) == 0) {
9361 jobmgr_shutdown(jmi
);
9367 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Initializing as %s", session_type
);
9368 strcpy(j
->mgr
->name_init
, session_type
);
9370 if (job_assumes(j
, (j2
= jobmgr_init_session(j
->mgr
, session_type
, false)))) {
9371 j2
->asport
= asport
;
9372 (void)job_assumes(j
, job_dispatch(j2
, true));
9373 kr
= BOOTSTRAP_SUCCESS
;
9380 job_mig_switch_to_session(job_t j
, mach_port_t requestor_port
, name_t session_name
, mach_port_t asport
, mach_port_t
*new_bsport
)
9382 struct ldcred
*ldc
= runtime_get_caller_creds();
9383 if (!jobmgr_assumes(root_jobmgr
, j
!= NULL
)) {
9384 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s() called with NULL job: PID %d", __func__
, ldc
->pid
);
9385 return BOOTSTRAP_NO_MEMORY
;
9388 if (j
->mgr
->shutting_down
) {
9389 return BOOTSTRAP_UNKNOWN_SERVICE
;
9392 job_log(j
, LOG_DEBUG
, "Job wants to move to %s session.", session_name
);
9394 if (!job_assumes(j
, pid1_magic
== false)) {
9395 job_log(j
, LOG_WARNING
, "Switching sessions is not allowed in the system Mach bootstrap.");
9396 return BOOTSTRAP_NOT_PRIVILEGED
;
9399 if (!j
->anonymous
) {
9400 job_log(j
, LOG_NOTICE
, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9401 return BOOTSTRAP_NOT_PRIVILEGED
;
9404 jobmgr_t target_jm
= jobmgr_find_by_name(root_jobmgr
, session_name
);
9405 if (target_jm
== j
->mgr
) {
9406 job_log(j
, LOG_DEBUG
, "Job is already in its desired session (%s).", session_name
);
9407 *new_bsport
= target_jm
->jm_port
;
9408 return BOOTSTRAP_SUCCESS
;
9412 target_jm
= jobmgr_new(j
->mgr
, requestor_port
, MACH_PORT_NULL
, false, session_name
, false, asport
);
9414 target_jm
->properties
|= BOOTSTRAP_PROPERTY_IMPLICITSUBSET
;
9415 (void)job_assumes(j
, launchd_mport_deallocate(asport
) == KERN_SUCCESS
);
9419 if (!job_assumes(j
, target_jm
!= NULL
)) {
9420 job_log(j
, LOG_WARNING
, "Could not find %s session!", session_name
);
9421 return BOOTSTRAP_NO_MEMORY
;
9424 /* Remove the job from it's current job manager. */
9425 LIST_REMOVE(j
, sle
);
9426 LIST_REMOVE(j
, pid_hash_sle
);
9428 job_t ji
= NULL
, jit
= NULL
;
9429 LIST_FOREACH_SAFE(ji
, &j
->mgr
->global_env_jobs
, global_env_sle
, jit
) {
9431 LIST_REMOVE(ji
, global_env_sle
);
9436 /* Put the job into the target job manager. */
9437 LIST_INSERT_HEAD(&target_jm
->jobs
, j
, sle
);
9438 LIST_INSERT_HEAD(&target_jm
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
9441 LIST_INSERT_HEAD(&target_jm
->global_env_jobs
, j
, global_env_sle
);
9444 /* Move our Mach services over if we're not in a flat namespace. */
9445 if (!g_flat_mach_namespace
&& !SLIST_EMPTY(&j
->machservices
)) {
9446 struct machservice
*msi
= NULL
, *msit
= NULL
;
9447 SLIST_FOREACH_SAFE(msi
, &j
->machservices
, sle
, msit
) {
9448 LIST_REMOVE(msi
, name_hash_sle
);
9449 LIST_INSERT_HEAD(&target_jm
->ms_hash
[hash_ms(msi
->name
)], msi
, name_hash_sle
);
9455 if (!j
->holds_ref
) {
9456 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9457 * stick around while they're still around.
9458 * For example, login calls into the PAM launchd module, which moves the process into
9459 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9460 * ourselves from going away.
9462 j
->holds_ref
= true;
9466 *new_bsport
= target_jm
->jm_port
;
9468 return KERN_SUCCESS
;
9472 job_mig_take_subset(job_t j
, mach_port_t
*reqport
, mach_port_t
*rcvright
,
9473 vm_offset_t
*outdata
, mach_msg_type_number_t
*outdataCnt
,
9474 mach_port_array_t
*portsp
, unsigned int *ports_cnt
)
9476 launch_data_t tmp_obj
, tmp_dict
, outdata_obj_array
= NULL
;
9477 mach_port_array_t ports
= NULL
;
9478 unsigned int cnt
= 0, cnt2
= 0;
9480 struct machservice
*ms
;
9484 if (!launchd_assumes(j
!= NULL
)) {
9485 return BOOTSTRAP_NO_MEMORY
;
9490 if (unlikely(!pid1_magic
)) {
9491 job_log(j
, LOG_ERR
, "Only the system launchd will transfer Mach sub-bootstraps.");
9492 return BOOTSTRAP_NOT_PRIVILEGED
;
9494 if (unlikely(jobmgr_parent(jm
) == NULL
)) {
9495 job_log(j
, LOG_ERR
, "Root Mach bootstrap cannot be transferred.");
9496 return BOOTSTRAP_NOT_PRIVILEGED
;
9498 if (unlikely(strcasecmp(jm
->name
, VPROCMGR_SESSION_AQUA
) == 0)) {
9499 job_log(j
, LOG_ERR
, "Cannot transfer a setup GUI session.");
9500 return BOOTSTRAP_NOT_PRIVILEGED
;
9502 if (unlikely(!j
->anonymous
)) {
9503 job_log(j
, LOG_ERR
, "Only the anonymous job can transfer Mach sub-bootstraps.");
9504 return BOOTSTRAP_NOT_PRIVILEGED
;
9507 job_log(j
, LOG_DEBUG
, "Transferring sub-bootstrap to the per session launchd.");
9509 outdata_obj_array
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
9510 if (!job_assumes(j
, outdata_obj_array
)) {
9514 *outdataCnt
= 20 * 1024 * 1024;
9515 mig_allocate(outdata
, *outdataCnt
);
9516 if (!job_assumes(j
, *outdata
!= 0)) {
9520 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
9521 if (!ji
->anonymous
) {
9524 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
9529 mig_allocate((vm_address_t
*)&ports
, cnt
* sizeof(ports
[0]));
9530 if (!job_assumes(j
, ports
!= NULL
)) {
9534 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
9535 if (!ji
->anonymous
) {
9539 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
9540 if (job_assumes(j
, (tmp_dict
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
9541 (void)job_assumes(j
, launch_data_array_set_index(outdata_obj_array
, tmp_dict
, cnt2
));
9546 if (job_assumes(j
, (tmp_obj
= launch_data_new_string(machservice_name(ms
))))) {
9547 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_NAME
));
9552 if (job_assumes(j
, (tmp_obj
= launch_data_new_integer((ms
->job
->p
))))) {
9553 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PID
));
9558 if (job_assumes(j
, (tmp_obj
= launch_data_new_bool((ms
->per_pid
))))) {
9559 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PERPID
));
9564 ports
[cnt2
] = machservice_port(ms
);
9566 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
9567 (void)jobmgr_assumes(jm
, (errno
= launchd_mport_copy_send(ports
[cnt2
])) == KERN_SUCCESS
);
9572 (void)job_assumes(j
, cnt
== cnt2
);
9574 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
9575 packed_size
= launch_data_pack(outdata_obj_array
, (void *)*outdata
, *outdataCnt
, NULL
, NULL
);
9576 if (!job_assumes(j
, packed_size
!= 0)) {
9580 launch_data_free(outdata_obj_array
);
9585 *reqport
= jm
->req_port
;
9586 *rcvright
= jm
->jm_port
;
9591 workaround_5477111
= j
;
9593 jobmgr_shutdown(jm
);
9595 return BOOTSTRAP_SUCCESS
;
9598 if (outdata_obj_array
) {
9599 launch_data_free(outdata_obj_array
);
9602 mig_deallocate(*outdata
, *outdataCnt
);
9605 mig_deallocate((vm_address_t
)ports
, cnt
* sizeof(ports
[0]));
9608 return BOOTSTRAP_NO_MEMORY
;
9612 job_mig_subset(job_t j
, mach_port_t requestorport
, mach_port_t
*subsetportp
)
9617 if (!launchd_assumes(j
!= NULL
)) {
9618 return BOOTSTRAP_NO_MEMORY
;
9620 if (j
->mgr
->shutting_down
) {
9621 return BOOTSTRAP_UNKNOWN_SERVICE
;
9626 while ((jmr
= jobmgr_parent(jmr
)) != NULL
) {
9630 /* Since we use recursion, we need an artificial depth for subsets */
9631 if (unlikely(bsdepth
> 100)) {
9632 job_log(j
, LOG_ERR
, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth
);
9633 return BOOTSTRAP_NO_MEMORY
;
9636 char name
[NAME_MAX
];
9637 snprintf(name
, sizeof(name
), "%s[%i].subset.%i", j
->anonymous
? j
->prog
: j
->label
, j
->p
, MACH_PORT_INDEX(requestorport
));
9639 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, requestorport
, MACH_PORT_NULL
, false, name
, true, j
->asport
)) != NULL
)) {
9640 if (unlikely(requestorport
== MACH_PORT_NULL
)) {
9641 return BOOTSTRAP_NOT_PRIVILEGED
;
9643 return BOOTSTRAP_NO_MEMORY
;
9646 *subsetportp
= jmr
->jm_port
;
9647 jmr
->properties
|= BOOTSTRAP_PROPERTY_EXPLICITSUBSET
;
9649 /* A job could create multiple subsets, so only add a reference the first time
9650 * it does so we don't have to keep a count.
9652 if (j
->anonymous
&& !j
->holds_ref
) {
9653 j
->holds_ref
= true;
9657 job_log(j
, LOG_DEBUG
, "Job created a subset named \"%s\"", jmr
->name
);
9658 return BOOTSTRAP_SUCCESS
;
9661 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
9663 xpc_domain_import_service(jobmgr_t jm
, launch_data_t pload
)
9665 jobmgr_t where2put
= NULL
;
9667 launch_data_t destname
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_XPCDOMAIN
);
9669 if (launch_data_get_type(destname
) == LAUNCH_DATA_STRING
) {
9670 const char *str
= launch_data_get_string(destname
);
9671 if (strcmp(str
, XPC_DOMAIN_TYPE_SYSTEM
) == 0) {
9672 where2put
= _s_xpc_system_domain
;
9673 } else if (strcmp(str
, XPC_DOMAIN_TYPE_PERUSER
) == 0) {
9674 where2put
= jobmgr_find_xpc_per_user_domain(jm
, jm
->req_euid
);
9675 } else if (strcmp(str
, XPC_DOMAIN_TYPE_PERSESSION
) == 0) {
9676 where2put
= jobmgr_find_xpc_per_session_domain(jm
, jm
->req_asid
);
9678 jobmgr_log(jm
, LOG_ERR
, "Invalid XPC domain type: %s", str
);
9682 jobmgr_log(jm
, LOG_ERR
, "XPC domain type is not a string.");
9687 launch_data_t mi
= NULL
;
9688 if ((mi
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_MULTIPLEINSTANCES
))) {
9689 if (launch_data_get_type(mi
) == LAUNCH_DATA_BOOL
&& launch_data_get_bool(mi
)) {
9690 jobmgr_log(where2put
, LOG_ERR
, "Multiple-instance services are not supported in this domain.");
9702 jobmgr_log(where2put
, LOG_DEBUG
, "Importing service...");
9703 j
= jobmgr_import2(where2put
, pload
);
9705 j
->xpc_service
= true;
9706 if (where2put
->xpc_singleton
) {
9707 /* If the service was destined for one of the global domains,
9708 * then we have to alias it into our local domain to reserve the
9711 job_t ja
= job_new_alias(jm
, j
);
9713 /* If we failed to alias the job because of a conflict over
9714 * the label, then we remove it from the global domain. We
9715 * don't want to risk having imported a malicious job into
9716 * one of the global domains.
9718 if (errno
!= EEXIST
) {
9719 job_assumes(j
, errno
== 0);
9721 job_log(j
, LOG_ERR
, "Failed to alias job into: %s", where2put
->name
);
9726 ja
->xpc_service
= true;
9737 xpc_domain_import2(job_t j
, mach_port_t reqport
, mach_port_t dport
)
9739 if (unlikely(!pid1_magic
)) {
9740 job_log(j
, LOG_ERR
, "XPC domains may only reside in PID 1.");
9741 return BOOTSTRAP_NOT_PRIVILEGED
;
9743 if (!j
|| !MACH_PORT_VALID(reqport
)) {
9744 return BOOTSTRAP_UNKNOWN_SERVICE
;
9746 if (root_jobmgr
->shutting_down
) {
9747 jobmgr_log(root_jobmgr
, LOG_ERR
, "Attempt to create new domain while shutting down.");
9748 return BOOTSTRAP_NOT_PRIVILEGED
;
9750 if (!j
->xpc_bootstrapper
) {
9751 job_log(j
, LOG_ERR
, "Attempt to create new XPC domain by unprivileged job.");
9752 return BOOTSTRAP_NOT_PRIVILEGED
;
9755 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
9756 /* All XPC domains are children of the root job manager. What we're creating
9757 * here is really just a skeleton. By creating it, we're adding reqp to our
9758 * port set. It will have two messages on it. The first specifies the
9759 * environment of the originator. This is so we can cache it and hand it to
9760 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9761 * to be bootstrapped in.
9763 jobmgr_t jm
= jobmgr_new(root_jobmgr
, reqport
, dport
, false, NULL
, true, MACH_PORT_NULL
);
9764 if (job_assumes(j
, jm
!= NULL
)) {
9765 jm
->properties
|= BOOTSTRAP_PROPERTY_XPC_DOMAIN
;
9766 jm
->shortdesc
= "private";
9767 kr
= BOOTSTRAP_SUCCESS
;
9774 xpc_domain_set_environment(job_t j
, mach_port_t rp
, mach_port_t bsport
, mach_port_t excport
, vm_offset_t ctx
, mach_msg_type_number_t ctx_sz
)
9777 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9778 * getting this message long after the requesting process has gone away.
9779 * See <rdar://problem/8593143>.
9781 return BOOTSTRAP_UNKNOWN_SERVICE
;
9784 jobmgr_t jm
= j
->mgr
;
9785 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
9786 return BOOTSTRAP_NOT_PRIVILEGED
;
9789 if (jm
->req_asport
!= MACH_PORT_NULL
) {
9790 return BOOTSTRAP_NOT_PRIVILEGED
;
9793 struct ldcred
*ldc
= runtime_get_caller_creds();
9794 struct proc_bsdshortinfo proc
;
9795 if (proc_pidinfo(ldc
->pid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
9796 if (errno
!= ESRCH
) {
9797 jobmgr_assumes(jm
, errno
== 0);
9802 return BOOTSTRAP_NO_MEMORY
;
9805 if (!jobmgr_assumes(jm
, audit_session_port(ldc
->asid
, &jm
->req_asport
) == 0)) {
9808 job_log(j
, LOG_ERR
, "Failed to get port for ASID: %u", ldc
->asid
);
9809 return BOOTSTRAP_NOT_PRIVILEGED
;
9812 (void)snprintf(jm
->name_init
, NAME_MAX
, "com.apple.xpc.domain.%s[%i]", proc
.pbsi_comm
, ldc
->pid
);
9813 strlcpy(jm
->owner
, proc
.pbsi_comm
, sizeof(jm
->owner
));
9814 jm
->req_bsport
= bsport
;
9815 jm
->req_excport
= excport
;
9818 jm
->req_ctx_sz
= ctx_sz
;
9819 jm
->req_pid
= ldc
->pid
;
9820 jm
->req_euid
= ldc
->euid
;
9821 jm
->req_egid
= ldc
->egid
;
9822 jm
->req_asid
= ldc
->asid
;
9824 return KERN_SUCCESS
;
9828 xpc_domain_load_services(job_t j
, vm_offset_t services_buff
, mach_msg_type_number_t services_sz
)
9831 return BOOTSTRAP_UNKNOWN_SERVICE
;
9834 job_t rootj
= jobmgr_find_by_pid(root_jobmgr
, j
->p
, false);
9835 if (!(rootj
&& rootj
->xpc_bootstrapper
)) {
9836 job_log(j
, LOG_ERR
, "Attempt to load services into XPC domain by unprivileged job.");
9837 return BOOTSTRAP_NOT_PRIVILEGED
;
9840 /* This is just for XPC domains (for now). */
9841 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
9842 return BOOTSTRAP_NOT_PRIVILEGED
;
9844 if (j
->mgr
->session_initialized
) {
9845 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to initialize an already-initialized XPC domain.");
9846 return BOOTSTRAP_NOT_PRIVILEGED
;
9850 launch_data_t services
= launch_data_unpack((void *)services_buff
, services_sz
, NULL
, 0, &offset
, NULL
);
9851 if (!jobmgr_assumes(j
->mgr
, services
!= NULL
)) {
9852 return BOOTSTRAP_NO_MEMORY
;
9856 size_t c
= launch_data_array_get_count(services
);
9857 for (i
= 0; i
< c
; i
++) {
9859 launch_data_t ploadi
= launch_data_array_get_index(services
, i
);
9860 if (!(nj
= xpc_domain_import_service(j
->mgr
, ploadi
))) {
9861 /* If loading one job fails, just fail the whole thing. At this
9862 * point, xpchelper should receive the failure and then just refuse
9863 * to launch the application, since its XPC services could not be
9864 * fully bootstrapped.
9866 * Take care to not reference the job or its manager after this
9869 if (errno
== EINVAL
) {
9870 jobmgr_log(j
->mgr
, LOG_ERR
, "Service at index is not valid: %lu", i
);
9871 } else if (errno
== EEXIST
) {
9872 /* If we get back EEXIST, we know that the payload was a
9873 * dictionary with a label. But, well, I guess it never hurts to
9876 char *label
= "(bogus)";
9877 if (launch_data_get_type(ploadi
) == LAUNCH_DATA_DICTIONARY
) {
9878 launch_data_t llabel
= launch_data_dict_lookup(ploadi
, LAUNCH_JOBKEY_LABEL
);
9879 if (launch_data_get_type(llabel
) == LAUNCH_DATA_STRING
) {
9880 label
= (char *)launch_data_get_string(llabel
);
9883 jobmgr_log(j
->mgr
, LOG_ERR
, "Service name conflict: %s", label
);
9886 j
->mgr
->error
= errno
;
9887 jobmgr_log(j
->mgr
, LOG_ERR
, "Obliterating domain.");
9888 jobmgr_remove(j
->mgr
);
9891 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Imported service %s", nj
->label
);
9892 job_dispatch(nj
, false);
9896 kern_return_t result
= BOOTSTRAP_NO_MEMORY
;
9898 j
->mgr
->session_initialized
= true;
9899 (void)jobmgr_assumes(j
->mgr
, xpc_call_wakeup(j
->mgr
->req_rport
, BOOTSTRAP_SUCCESS
) == KERN_SUCCESS
);
9900 j
->mgr
->req_rport
= MACH_PORT_NULL
;
9902 /* Returning a failure code will destroy the message, whereas returning
9903 * success will not, so we need to clean up here.
9905 mig_deallocate(services_buff
, services_sz
);
9906 result
= BOOTSTRAP_SUCCESS
;
9913 xpc_domain_check_in(job_t j
, mach_port_t
*bsport
, mach_port_t
*sbsport
, mach_port_t
*excport
, mach_port_t
*asport
, uint32_t *uid
, uint32_t *gid
, int32_t *asid
, vm_offset_t
*ctx
, mach_msg_type_number_t
*ctx_sz
)
9915 if (!jobmgr_assumes(root_jobmgr
, j
!= NULL
)) {
9916 return BOOTSTRAP_UNKNOWN_SERVICE
;
9918 jobmgr_t jm
= j
->mgr
;
9919 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
9920 return BOOTSTRAP_NOT_PRIVILEGED
;
9923 if (jm
->req_asport
== MACH_PORT_NULL
) {
9924 return BOOTSTRAP_NOT_PRIVILEGED
;
9927 *bsport
= jm
->req_bsport
;
9928 *sbsport
= root_jobmgr
->jm_port
;
9929 *excport
= jm
->req_excport
;
9930 *asport
= jm
->req_asport
;
9931 *uid
= jm
->req_euid
;
9932 *gid
= jm
->req_egid
;
9933 *asid
= jm
->req_asid
;
9936 *ctx_sz
= jm
->req_ctx_sz
;
9938 return KERN_SUCCESS
;
9942 xpc_domain_get_service_name(job_t j
, event_name_t name
)
9945 return BOOTSTRAP_NO_MEMORY
;
9947 if (!j
->xpc_service
) {
9948 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to get service name by non-XPC service: %s", j
->label
);
9949 return BOOTSTRAP_NOT_PRIVILEGED
;
9952 struct machservice
* ms
= SLIST_FIRST(&j
->machservices
);
9954 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to get service name of job with no machservices: %s", j
->label
);
9955 return BOOTSTRAP_UNKNOWN_SERVICE
;
9958 (void)strlcpy(name
, ms
->name
, sizeof(event_name_t
));
9959 return BOOTSTRAP_SUCCESS
;
9961 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
9964 xpc_events_get_channel_name(job_t j
__attribute__((unused
)), event_name_t stream
__attribute__((unused
)), uint64_t token
__attribute__((unused
)), event_name_t name
__attribute__((unused
)))
9966 return KERN_FAILURE
;
9970 xpc_events_get_event_name(job_t j
, event_name_t stream
, uint64_t token
, event_name_t name
)
9972 struct externalevent
*event
= externalevent_find(stream
, token
);
9973 if (event
&& j
->event_monitor
) {
9974 (void)strcpy(name
, event
->name
);
9979 return event
? BOOTSTRAP_SUCCESS
: BOOTSTRAP_UNKNOWN_SERVICE
;
9983 xpc_events_set_event(job_t j
, event_name_t stream
, event_name_t key
, vm_offset_t event
, mach_msg_type_number_t eventCnt
)
9986 return BOOTSTRAP_NOT_PRIVILEGED
;
9989 struct externalevent
*eei
= NULL
;
9990 LIST_FOREACH(eei
, &j
->events
, job_le
) {
9991 if (strcmp(eei
->name
, key
) == 0 && strcmp(eei
->sys
->name
, stream
) == 0) {
9992 externalevent_delete(eei
);
9998 bool success
= false;
9999 struct eventsystem
*es
= eventsystem_find(stream
);
10001 es
= eventsystem_new(stream
);
10002 (void)job_assumes(j
, es
!= NULL
);
10007 launch_data_t unpacked
= launch_data_unpack((void *)event
, eventCnt
, NULL
, 0, &offset
, 0);
10008 if (unpacked
&& launch_data_get_type(unpacked
) == LAUNCH_DATA_DICTIONARY
) {
10009 success
= externalevent_new(j
, es
, key
, unpacked
);
10014 mig_deallocate(event
, eventCnt
);
10017 return KERN_SUCCESS
;
10021 xpc_events_get_event(job_t j
, event_name_t stream
, event_name_t key
, vm_offset_t
*event
, mach_msg_type_number_t
*eventCnt
)
10023 struct externalevent
*eei
= NULL
;
10024 LIST_FOREACH(eei
, &j
->events
, job_le
) {
10025 if (strcmp(eei
->name
, key
) == 0 && strcmp(eei
->sys
->name
, stream
) == 0) {
10027 *eventCnt
= 10 * 1024;
10028 mig_allocate(event
, *eventCnt
);
10030 size_t sz
= launch_data_pack(eei
->event
, (void *)*event
, *eventCnt
, NULL
, NULL
);
10031 if (!job_assumes(j
, sz
!= 0)) {
10032 mig_deallocate(*event
, *eventCnt
);
10033 return BOOTSTRAP_NO_MEMORY
;
10036 return BOOTSTRAP_SUCCESS
;
10040 return BOOTSTRAP_UNKNOWN_SERVICE
;
10043 struct machservice
*
10044 xpc_events_find_channel(job_t j
, event_name_t stream
, mach_port_t
*p
)
10046 struct machservice
*msi
= NULL
;
10047 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
10048 if (strcmp(stream
, msi
->name
) == 0) {
10054 mach_port_t sp
= MACH_PORT_NULL
;
10055 msi
= machservice_new(j
, stream
, &sp
, false);
10056 if (job_assumes(j
, msi
)) {
10057 /* Hack to keep this from being publicly accessible through
10058 * bootstrap_look_up().
10060 LIST_REMOVE(msi
, name_hash_sle
);
10061 msi
->event_channel
= true;
10064 /* If we call job_dispatch() here before the audit session for the
10065 * job has been set, we'll end up not watching this service. But we
10066 * also have to take care not to watch the port if the job is
10069 * See <rdar://problem/10357855>.
10071 if (!j
->currently_ignored
) {
10072 machservice_watch(j
, msi
);
10075 errno
= BOOTSTRAP_NO_MEMORY
;
10078 if (!msi
->event_channel
) {
10079 job_log(j
, LOG_ERR
, "This job registered a MachService name identical to the requested event channel name: %s", stream
);
10081 errno
= BOOTSTRAP_NAME_IN_USE
;
10091 xpc_events_channel_check_in(job_t j
, event_name_t stream
, uint64_t flags
__attribute__((unused
)), mach_port_t
*p
)
10093 struct machservice
*ms
= xpc_events_find_channel(j
, stream
, p
);
10095 if (ms
->isActive
) {
10096 job_log(j
, LOG_ERR
, "Attempt to check in on event channel multiple times: %s", stream
);
10097 *p
= MACH_PORT_NULL
;
10098 errno
= BOOTSTRAP_SERVICE_ACTIVE
;
10101 machservice_request_notifications(ms
);
10102 errno
= BOOTSTRAP_SUCCESS
;
10110 xpc_events_channel_look_up(job_t j
, event_name_t stream
, event_token_t token
, uint64_t flags
__attribute__((unused
)), mach_port_t
*p
)
10112 if (!j
->event_monitor
) {
10113 return BOOTSTRAP_NOT_PRIVILEGED
;
10116 struct externalevent
*ee
= externalevent_find(stream
, token
);
10118 return BOOTSTRAP_UNKNOWN_SERVICE
;
10121 struct machservice
*ms
= xpc_events_find_channel(ee
->job
, stream
, p
);
10123 errno
= BOOTSTRAP_SUCCESS
;
10130 job_mig_kickstart(job_t j
, name_t targetlabel
, pid_t
*out_pid
, unsigned int flags
)
10132 struct ldcred
*ldc
= runtime_get_caller_creds();
10135 if (!launchd_assumes(j
!= NULL
)) {
10136 return BOOTSTRAP_NO_MEMORY
;
10139 if (unlikely(!(otherj
= job_find(NULL
, targetlabel
)))) {
10140 return BOOTSTRAP_UNKNOWN_SERVICE
;
10143 #if TARGET_OS_EMBEDDED
10144 bool allow_non_root_kickstart
= j
->username
&& otherj
->username
&& (strcmp(j
->username
, otherj
->username
) == 0);
10146 bool allow_non_root_kickstart
= false;
10149 if (ldc
->euid
!= 0 && ldc
->euid
!= geteuid() && !allow_non_root_kickstart
) {
10150 return BOOTSTRAP_NOT_PRIVILEGED
;
10154 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
10155 return BOOTSTRAP_NOT_PRIVILEGED
;
10159 if (otherj
->p
&& (flags
& VPROCFLAG_STALL_JOB_EXEC
)) {
10160 return BOOTSTRAP_SERVICE_ACTIVE
;
10163 otherj
->stall_before_exec
= (flags
& VPROCFLAG_STALL_JOB_EXEC
);
10164 otherj
= job_dispatch(otherj
, true);
10166 if (!job_assumes(j
, otherj
&& otherj
->p
)) {
10167 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
10168 otherj
->stall_before_exec
= false;
10169 return BOOTSTRAP_NO_MEMORY
;
10172 *out_pid
= otherj
->p
;
10178 job_mig_spawn_internal(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, job_t
*outj
)
10180 launch_data_t jobdata
= NULL
;
10181 size_t data_offset
= 0;
10182 struct ldcred
*ldc
= runtime_get_caller_creds();
10185 if (!launchd_assumes(j
!= NULL
)) {
10186 return BOOTSTRAP_NO_MEMORY
;
10189 if (unlikely(j
->deny_job_creation
)) {
10190 return BOOTSTRAP_NOT_PRIVILEGED
;
10194 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
10195 return BOOTSTRAP_NOT_PRIVILEGED
;
10199 if (unlikely(pid1_magic
&& ldc
->euid
&& ldc
->uid
)) {
10200 job_log(j
, LOG_DEBUG
, "Punting spawn to per-user-context");
10201 return VPROC_ERR_TRY_PER_USER
;
10204 if (!job_assumes(j
, indataCnt
!= 0)) {
10208 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
10209 if (!job_assumes(j
, (jobdata
= launch_data_unpack((void *)indata
, indataCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
10213 jobmgr_t target_jm
= jobmgr_find_by_name(j
->mgr
, NULL
);
10214 if (!jobmgr_assumes(j
->mgr
, target_jm
!= NULL
)) {
10215 jobmgr_log(j
->mgr
, LOG_ERR
, "This API can only be used by a process running within an Aqua session.");
10219 jr
= jobmgr_import2(target_jm
?: j
->mgr
, jobdata
);
10221 launch_data_t label
= NULL
;
10222 launch_data_t wait4debugger
= NULL
;
10226 /* If EEXIST was returned, we know that there is a label string in
10227 * the dictionary. So we don't need to check the types here; that
10228 * has already been done.
10230 label
= launch_data_dict_lookup(jobdata
, LAUNCH_JOBKEY_LABEL
);
10231 jr
= job_find(NULL
, launch_data_get_string(label
));
10232 if (job_assumes(j
, jr
!= NULL
) && !jr
->p
) {
10233 wait4debugger
= launch_data_dict_lookup(jobdata
, LAUNCH_JOBKEY_WAITFORDEBUGGER
);
10234 if (wait4debugger
&& launch_data_get_type(wait4debugger
) == LAUNCH_DATA_BOOL
) {
10235 if (launch_data_get_bool(wait4debugger
)) {
10236 /* If the job exists, we're going to kick-start it, but
10237 * we need to give the caller the opportunity to start
10238 * it suspended if it so desires. But this will only
10239 * take effect if the job isn't running.
10241 jr
->wait4debugger_oneshot
= true;
10247 return BOOTSTRAP_NAME_IN_USE
;
10249 return BOOTSTRAP_NO_MEMORY
;
10254 jr
->mach_uid
= ldc
->uid
;
10257 jr
->legacy_LS_job
= true;
10258 jr
->abandon_pg
= true;
10259 jr
->asport
= asport
;
10260 uuid_clear(jr
->expected_audit_uuid
);
10261 jr
= job_dispatch(jr
, true);
10263 if (!job_assumes(j
, jr
!= NULL
)) {
10265 return BOOTSTRAP_NO_MEMORY
;
10268 if (!job_assumes(jr
, jr
->p
)) {
10270 return BOOTSTRAP_NO_MEMORY
;
10273 job_log(jr
, LOG_DEBUG
, "Spawned by PID %u: %s", j
->p
, j
->label
);
10276 return BOOTSTRAP_SUCCESS
;
10280 job_mig_spawn2(job_t j
, mach_port_t rp
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, pid_t
*child_pid
, mach_port_t
*obsvr_port
)
10283 kern_return_t kr
= job_mig_spawn_internal(j
, indata
, indataCnt
, asport
, &nj
);
10284 if (likely(kr
== KERN_SUCCESS
)) {
10285 if (job_setup_exit_port(nj
) != KERN_SUCCESS
) {
10287 kr
= BOOTSTRAP_NO_MEMORY
;
10289 /* Do not return until the job has called exec(3), thereby making it
10290 * safe for the caller to send it SIGCONT.
10292 * <rdar://problem/9042798>
10294 nj
->spawn_reply_port
= rp
;
10297 } else if (kr
== BOOTSTRAP_NAME_IN_USE
) {
10298 bool was_running
= nj
->p
;
10299 if (job_dispatch(nj
, true)) {
10300 if (!was_running
) {
10301 job_log(nj
, LOG_DEBUG
, "Job exists but is not running. Kick-starting.");
10303 if (job_setup_exit_port(nj
) == KERN_SUCCESS
) {
10304 nj
->spawn_reply_port
= rp
;
10307 kr
= BOOTSTRAP_NO_MEMORY
;
10310 *obsvr_port
= MACH_PORT_NULL
;
10311 *child_pid
= nj
->p
;
10315 job_log(nj
, LOG_ERR
, "Failed to dispatch job, requestor: %s", j
->label
);
10316 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
10320 mig_deallocate(indata
, indataCnt
);
10325 job_mig_event_source_check_in(job_t j
, name_t name
, mach_port_t ping_port
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
, uint64_t *tokens
)
10327 if (!j
|| !j
->event_monitor
) {
10328 return BOOTSTRAP_NOT_PRIVILEGED
;
10331 /* Update our ping-port. One ping will force all the notification systems
10332 * to check in, so they'll all give us send-once rights. It doesn't really
10333 * matter which one we keep around. It's not the most efficient thing ever,
10334 * but keep in mind that, by doing this over one channel, we can do it over
10335 * the job's MachService. This means that we'll get it back when the job dies,
10336 * and we can create ourselves a send-once right if we didn't have one already,
10337 * and we can just keep the helper alive without it needing to bootstrap
10340 * So we're trading efficiency for robustness. In this case, the checkins
10341 * should happen pretty infrequently, so it's pretty worth it.
10343 if (_s_event_update_port
!= MACH_PORT_NULL
) {
10344 (void)job_assumes(j
, launchd_mport_deallocate(_s_event_update_port
) == KERN_SUCCESS
);
10346 _s_event_update_port
= ping_port
;
10348 kern_return_t result
= BOOTSTRAP_NO_MEMORY
;
10349 launch_data_t arr
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
10350 if (job_assumes(j
, arr
!= NULL
)) {
10351 struct eventsystem
*es
= eventsystem_find(name
);
10352 if (unlikely(es
== NULL
)) {
10353 es
= eventsystem_new(name
);
10356 if (job_assumes(j
, es
!= NULL
)) {
10357 struct externalevent
*ei
= NULL
;
10359 LIST_FOREACH(ei
, &es
->events
, sys_le
) {
10360 (void)job_assumes(j
, launch_data_array_set_index(arr
, ei
->event
, i
));
10361 if (job_assumes(j
, i
< 1024)) {
10362 tokens
[i
] = ei
->id
;
10370 *outvalCnt
= 10 * 1024;
10371 mig_allocate(outval
, *outvalCnt
);
10373 size_t sz
= launch_data_pack(arr
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
10374 if (job_assumes(j
, sz
!= 0)) {
10375 result
= BOOTSTRAP_SUCCESS
;
10377 mig_deallocate(*outval
, *outvalCnt
);
10381 /* Total hack, but launch_data doesn't do ref-counting. */
10382 struct _launch_data
*hack
= (struct _launch_data
*)arr
;
10383 free(hack
->_array
);
10391 job_mig_event_set_state(job_t j
, name_t name
, uint64_t token
, boolean_t state
)
10393 if (!j
|| !j
->event_monitor
) {
10394 return BOOTSTRAP_NOT_PRIVILEGED
;
10397 struct externalevent
*ei
= externalevent_find(name
, token
);
10398 if (job_assumes(j
, ei
!= NULL
)) {
10400 if(job_dispatch(ei
->job
, false) == NULL
) {
10401 if (errno
== EPERM
) {
10402 return BOOTSTRAP_NOT_PRIVILEGED
;
10404 return BOOTSTRAP_NO_MEMORY
;
10407 return BOOTSTRAP_NO_MEMORY
;
10410 return BOOTSTRAP_SUCCESS
;
10414 jobmgr_init(bool sflag
)
10416 const char *root_session_type
= pid1_magic
? VPROCMGR_SESSION_SYSTEM
: VPROCMGR_SESSION_BACKGROUND
;
10417 SLIST_INIT(&s_curious_jobs
);
10418 LIST_INIT(&s_needing_sessions
);
10420 launchd_assert((root_jobmgr
= jobmgr_new(NULL
, MACH_PORT_NULL
, MACH_PORT_NULL
, sflag
, root_session_type
, false, MACH_PORT_NULL
)) != NULL
);
10421 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
10422 launchd_assert((_s_xpc_system_domain
= jobmgr_new_xpc_singleton_domain(root_jobmgr
, "com.apple.xpc.system")) != NULL
);
10423 _s_xpc_system_domain
->req_asid
= g_audit_session
;
10424 _s_xpc_system_domain
->req_asport
= g_audit_session_port
;
10425 _s_xpc_system_domain
->shortdesc
= "system";
10426 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
10428 root_jobmgr
->monitor_shutdown
= true;
10431 uint32_t fflags
= NOTE_ATTRIB
| NOTE_LINK
| NOTE_REVOKE
| NOTE_EXTEND
| NOTE_WRITE
;
10432 s_no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
10433 if (likely(s_no_hang_fd
== -1)) {
10434 if (jobmgr_assumes(root_jobmgr
, (s_no_hang_fd
= open("/dev", O_EVTONLY
| O_NONBLOCK
)) != -1)) {
10435 (void)jobmgr_assumes(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, root_jobmgr
) != -1);
10438 s_no_hang_fd
= _fd(s_no_hang_fd
);
10442 our_strhash(const char *s
)
10444 size_t c
, r
= 5381;
10447 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10450 while ((c
= *s
++)) {
10451 r
= ((r
<< 5) + r
) + c
; /* hash*33 + c */
10458 hash_label(const char *label
)
10460 return our_strhash(label
) % LABEL_HASH_SIZE
;
10464 hash_ms(const char *msstr
)
10466 return our_strhash(msstr
) % MACHSERVICE_HASH_SIZE
;
10470 waiting4removal_new(job_t j
, mach_port_t rp
)
10472 struct waiting_for_removal
*w4r
;
10474 if (!job_assumes(j
, (w4r
= malloc(sizeof(struct waiting_for_removal
))) != NULL
)) {
10478 w4r
->reply_port
= rp
;
10480 SLIST_INSERT_HEAD(&j
->removal_watchers
, w4r
, sle
);
10486 waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
)
10488 (void)job_assumes(j
, job_mig_send_signal_reply(w4r
->reply_port
, 0) == 0);
10490 SLIST_REMOVE(&j
->removal_watchers
, w4r
, waiting_for_removal
, sle
);
10496 get_kern_max_proc(void)
10498 int mib
[] = { CTL_KERN
, KERN_MAXPROC
};
10500 size_t max_sz
= sizeof(max
);
10502 (void)launchd_assumes(sysctl(mib
, 2, &max
, &max_sz
, NULL
, 0) != -1);
10507 /* See rdar://problem/6271234 */
10509 eliminate_double_reboot(void)
10511 if (unlikely(!pid1_magic
)) {
10516 const char *argv
[] = { _PATH_BSHELL
, "/etc/rc.deferred_install", NULL
};
10517 char *try_again
= "Will try again at next boot.";
10520 if (unlikely(stat(argv
[1], &sb
) != -1)) {
10521 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Going to run deferred install script.");
10526 (void)jobmgr_assumes(root_jobmgr
, (errno
= posix_spawnp(&p
, argv
[0], NULL
, NULL
, (char **)argv
, environ
)) == 0);
10529 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Couldn't run deferred install script! %s", try_again
);
10533 if (!jobmgr_assumes(root_jobmgr
, waitpid(p
, &wstatus
, 0) != -1)) {
10534 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Couldn't confirm that deferred install script exited successfully! %s", try_again
);
10538 if (jobmgr_assumes(root_jobmgr
, WIFEXITED(wstatus
) != 0)) {
10539 if (jobmgr_assumes(root_jobmgr
, (result
= WEXITSTATUS(wstatus
)) == EXIT_SUCCESS
)) {
10540 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Deferred install script completed successfully.");
10542 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus
), try_again
);
10545 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again
);
10550 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
10551 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
10552 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
10553 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
10555 if (!jobmgr_assumes(root_jobmgr
, unlink(argv
[1]) != -1)) {
10556 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Deferred install script couldn't be removed!");
10562 jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
)
10564 job_log(j
, LOG_DEBUG
, "Setting Jetsam properties for job...");
10565 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
10566 j
->jetsam_priority
= (typeof(j
->jetsam_priority
))launch_data_get_integer(obj
);
10567 job_log(j
, LOG_DEBUG
, "Priority: %d", j
->jetsam_priority
);
10568 } else if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
10569 j
->jetsam_memlimit
= (typeof(j
->jetsam_memlimit
))launch_data_get_integer(obj
);
10570 job_log(j
, LOG_DEBUG
, "Memory limit: %d", j
->jetsam_memlimit
);
10571 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMFRONTMOST
) == 0) {
10572 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10573 * You can't set this in a plist.
10575 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMLABEL
) == 0) {
10576 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10577 * complain about it.
10580 job_log(j
, LOG_ERR
, "Unknown Jetsam key: %s", key
);
10583 if (unlikely(!j
->jetsam_properties
)) {
10584 j
->jetsam_properties
= true;
10585 LIST_INSERT_HEAD(&j
->mgr
->jetsam_jobs
, j
, jetsam_sle
);
10586 j
->mgr
->jetsam_jobs_cnt
++;
10589 j
->jetsam_seq
= s_jetsam_sequence_id
++;
10593 launchd_set_jetsam_priorities(launch_data_t priorities
)
10595 if (!launchd_assumes(launch_data_get_type(priorities
) == LAUNCH_DATA_ARRAY
)) {
10599 jobmgr_t jm
= NULL
;
10600 #if !TARGET_OS_EMBEDDED
10602 jm
= jobmgr_find_by_name(root_jobmgr
, VPROCMGR_SESSION_AQUA
);
10603 if (!launchd_assumes(jm
!= NULL
)) {
10607 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
10610 if (!g_embedded_privileged_action
) {
10615 size_t npris
= launch_data_array_get_count(priorities
);
10619 for (i
= 0; i
< npris
; i
++) {
10620 launch_data_t ldi
= launch_data_array_get_index(priorities
, i
);
10621 if (!launchd_assumes(launch_data_get_type(ldi
) == LAUNCH_DATA_DICTIONARY
)) {
10625 launch_data_t label
= NULL
;
10626 if (!launchd_assumes(label
= launch_data_dict_lookup(ldi
, LAUNCH_KEY_JETSAMLABEL
))) {
10629 const char *_label
= launch_data_get_string(label
);
10631 ji
= job_find(NULL
, _label
);
10632 if (!launchd_assumes(ji
!= NULL
)) {
10636 launch_data_dict_iterate(ldi
, (void (*)(launch_data_t
, const char *, void *))jetsam_property_setup
, ji
);
10638 launch_data_t frontmost
= NULL
;
10639 if ((frontmost
= launch_data_dict_lookup(ldi
, LAUNCH_KEY_JETSAMFRONTMOST
)) && launch_data_get_type(frontmost
) == LAUNCH_DATA_BOOL
) {
10640 ji
->jetsam_frontmost
= launch_data_get_bool(frontmost
);
10645 job_t
*jobs
= (job_t
*)calloc(jm
->jetsam_jobs_cnt
, sizeof(job_t
));
10646 if (launchd_assumes(jobs
!= NULL
)) {
10647 LIST_FOREACH(ji
, &jm
->jetsam_jobs
, jetsam_sle
) {
10655 size_t totalpris
= i
;
10657 int result
= EINVAL
;
10659 /* It is conceivable that there could be no Jetsam jobs running. */
10660 if (totalpris
> 0) {
10662 qsort_b((void *)jobs
, totalpris
, sizeof(job_t
), ^ int (const void *lhs
, const void *rhs
) {
10663 job_t _lhs
= *(job_t
*)lhs
;
10664 job_t _rhs
= *(job_t
*)rhs
;
10665 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
10666 if (_lhs
->jetsam_priority
> _rhs
->jetsam_priority
) {
10668 } else if (_lhs
->jetsam_priority
< _rhs
->jetsam_priority
) {
10671 /* Priority is equal, so sort by sequence ID to maintain LRU order */
10672 if( (int)(_lhs
->jetsam_seq
- _rhs
->jetsam_seq
) > 0 ) {
10674 } else if( (int)(_lhs
->jetsam_seq
- _rhs
->jetsam_seq
) < 0 ) {
10681 jetsam_priority_entry_t
*jpris
= (jetsam_priority_entry_t
*)calloc(totalpris
, sizeof(jetsam_priority_entry_t
));
10682 if (!launchd_assumes(jpris
!= NULL
)) {
10685 for (i
= 0; i
< totalpris
; i
++) {
10686 jpris
[i
].pid
= jobs
[i
]->p
; /* Subject to time-of-use vs. time-of-check, obviously. */
10687 jpris
[i
].flags
|= jobs
[i
]->jetsam_frontmost
? kJetsamFlagsFrontmost
: 0;
10688 jpris
[i
].hiwat_pages
= jobs
[i
]->jetsam_memlimit
;
10691 (void)launchd_assumes((result
= sysctlbyname("kern.memorystatus_priority_list", NULL
, NULL
, &jpris
[0], totalpris
* sizeof(jetsam_priority_entry_t
))) != -1);
10692 result
= result
!= 0 ? errno
: 0;