2 * @APPLE_APACHE_LICENSE_HEADER_START@
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
44 #include <sys/reboot.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <sys/kern_memorystatus.h>
58 #include <netinet/in.h>
59 #include <netinet/in_var.h>
60 #include <netinet6/nd6.h>
61 #include <bsm/libbsm.h>
79 #include <System/sys/spawn.h>
80 #include <System/sys/spawn_internal.h>
82 #include <spawn_private.h>
85 #include <os/assumes.h>
86 #include <xpc/launchd.h>
91 #include <libproc_internal.h>
92 #include <System/sys/proc_info.h>
93 #include <malloc/malloc.h>
96 #define __APPLE_API_PRIVATE
100 #include <quarantine.h>
102 #if HAVE_RESPONSIBILITY
103 #include <responsibility.h>
105 #if !TARGET_OS_EMBEDDED
106 extern int gL1CacheEnabled
;
109 #include <systemstats/systemstats.h>
113 #include "launch_priv.h"
114 #include "launch_internal.h"
115 #include "bootstrap.h"
116 #include "bootstrap_priv.h"
118 #include "vproc_internal.h"
126 #include "jobServer.h"
127 #include "job_reply.h"
128 #include "job_forward.h"
129 #include "mach_excServer.h"
131 #define POSIX_SPAWN_IOS_INTERACTIVE 0
133 #if TARGET_OS_EMBEDDED
134 /* Default memory highwatermark for daemons as set out in <rdar://problem/10307788>. */
135 #define DEFAULT_JETSAM_DAEMON_HIGHWATERMARK 5
138 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
139 * If the job hasn't exited in the given number of seconds after sending
140 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
142 #define LAUNCHD_MIN_JOB_RUN_TIME 10
143 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
144 #define LAUNCHD_SIGKILL_TIMER 4
145 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
147 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
149 #define TAKE_SUBSET_NAME "TakeSubsetName"
150 #define TAKE_SUBSET_PID "TakeSubsetPID"
151 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
153 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
155 extern char **environ
;
157 struct waiting_for_removal
{
158 SLIST_ENTRY(waiting_for_removal
) sle
;
159 mach_port_t reply_port
;
162 static bool waiting4removal_new(job_t j
, mach_port_t rp
);
163 static void waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
);
166 SLIST_ENTRY(machservice
) sle
;
167 SLIST_ENTRY(machservice
) special_port_sle
;
168 LIST_ENTRY(machservice
) name_hash_sle
;
169 LIST_ENTRY(machservice
) port_hash_sle
;
170 struct machservice
*alias
;
172 unsigned int gen_num
;
173 mach_port_name_t port
;
183 delete_on_destruction
:1,
184 drain_one_on_crash
:1,
185 drain_all_on_crash
:1,
189 /* Don't let the size of this field to get too small. It has to be large
190 * enough to represent the reasonable range of special port numbers.
196 // HACK: This should be per jobmgr_t
197 static SLIST_HEAD(, machservice
) special_ports
;
199 #define PORT_HASH_SIZE 32
200 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
202 static LIST_HEAD(, machservice
) port_hash
[PORT_HASH_SIZE
];
204 static void machservice_setup(launch_data_t obj
, const char *key
, void *context
);
205 static void machservice_setup_options(launch_data_t obj
, const char *key
, void *context
);
206 static void machservice_resetport(job_t j
, struct machservice
*ms
);
207 static void machservice_stamp_port(job_t j
, struct machservice
*ms
);
208 static struct machservice
*machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
);
209 static struct machservice
*machservice_new_alias(job_t aj
, struct machservice
*orig
);
210 static void machservice_ignore(job_t j
, struct machservice
*ms
);
211 static void machservice_watch(job_t j
, struct machservice
*ms
);
212 static void machservice_delete(job_t j
, struct machservice
*, bool port_died
);
213 static void machservice_request_notifications(struct machservice
*);
214 static mach_port_t
machservice_port(struct machservice
*);
215 static job_t
machservice_job(struct machservice
*);
216 static bool machservice_hidden(struct machservice
*);
217 static bool machservice_active(struct machservice
*);
218 static const char *machservice_name(struct machservice
*);
219 static bootstrap_status_t
machservice_status(struct machservice
*);
220 void machservice_drain_port(struct machservice
*);
223 SLIST_ENTRY(socketgroup
) sle
;
232 static bool socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
);
233 static void socketgroup_delete(job_t j
, struct socketgroup
*sg
);
234 static void socketgroup_watch(job_t j
, struct socketgroup
*sg
);
235 static void socketgroup_ignore(job_t j
, struct socketgroup
*sg
);
236 static void socketgroup_callback(job_t j
);
237 static void socketgroup_setup(launch_data_t obj
, const char *key
, void *context
);
238 static void socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
);
240 struct calendarinterval
{
241 LIST_ENTRY(calendarinterval
) global_sle
;
242 SLIST_ENTRY(calendarinterval
) sle
;
248 static LIST_HEAD(, calendarinterval
) sorted_calendar_events
;
250 static bool calendarinterval_new(job_t j
, struct tm
*w
);
251 static bool calendarinterval_new_from_obj(job_t j
, launch_data_t obj
);
252 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
);
253 static void calendarinterval_delete(job_t j
, struct calendarinterval
*ci
);
254 static void calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
);
255 static void calendarinterval_callback(void);
256 static void calendarinterval_sanity_check(void);
259 SLIST_ENTRY(envitem
) sle
;
267 static bool envitem_new(job_t j
, const char *k
, const char *v
, bool global
);
268 static void envitem_delete(job_t j
, struct envitem
*ei
, bool global
);
269 static void envitem_setup(launch_data_t obj
, const char *key
, void *context
);
272 SLIST_ENTRY(limititem
) sle
;
274 unsigned int setsoft
:1, sethard
:1, which
:30;
277 static bool limititem_update(job_t j
, int w
, rlim_t r
);
278 static void limititem_delete(job_t j
, struct limititem
*li
);
279 static void limititem_setup(launch_data_t obj
, const char *key
, void *context
);
281 static void seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
);
284 static void jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
);
297 } semaphore_reason_t
;
299 struct semaphoreitem
{
300 SLIST_ENTRY(semaphoreitem
) sle
;
301 semaphore_reason_t why
;
309 struct semaphoreitem_dict_iter_context
{
311 semaphore_reason_t why_true
;
312 semaphore_reason_t why_false
;
315 static bool semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
);
316 static void semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
);
317 static void semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
);
318 static void semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
);
319 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
);
321 struct externalevent
{
322 LIST_ENTRY(externalevent
) sys_le
;
323 LIST_ENTRY(externalevent
) job_le
;
324 struct eventsystem
*sys
;
332 xpc_object_t entitlements
;
337 struct externalevent_iter_ctx
{
339 struct eventsystem
*sys
;
342 static bool externalevent_new(job_t j
, struct eventsystem
*sys
, const char *evname
, xpc_object_t event
, uint64_t flags
);
343 static void externalevent_delete(struct externalevent
*ee
);
344 static void externalevent_setup(launch_data_t obj
, const char *key
, void *context
);
345 static struct externalevent
*externalevent_find(const char *sysname
, uint64_t id
);
348 LIST_ENTRY(eventsystem
) global_le
;
349 LIST_HEAD(, externalevent
) events
;
354 static struct eventsystem
*eventsystem_new(const char *name
);
355 static void eventsystem_delete(struct eventsystem
*sys
) __attribute__((unused
));
356 static void eventsystem_setup(launch_data_t obj
, const char *key
, void *context
);
357 static struct eventsystem
*eventsystem_find(const char *name
);
358 static void eventsystem_ping(void);
360 struct waiting4attach
{
361 LIST_ENTRY(waiting4attach
) le
;
364 xpc_service_type_t type
;
368 static LIST_HEAD(, waiting4attach
) _launchd_domain_waiters
;
370 static struct waiting4attach
*waiting4attach_new(jobmgr_t jm
, const char *name
, mach_port_t port
, pid_t dest
, xpc_service_type_t type
);
371 static void waiting4attach_delete(jobmgr_t jm
, struct waiting4attach
*w4a
);
372 static struct waiting4attach
*waiting4attach_find(jobmgr_t jm
, job_t j
);
374 #define ACTIVE_JOB_HASH_SIZE 32
375 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
377 #define MACHSERVICE_HASH_SIZE 37
379 #define LABEL_HASH_SIZE 53
381 kq_callback kqjobmgr_callback
;
382 LIST_ENTRY(jobmgr_s
) xpc_le
;
383 SLIST_ENTRY(jobmgr_s
) sle
;
384 SLIST_HEAD(, jobmgr_s
) submgrs
;
385 LIST_HEAD(, job_s
) jobs
;
386 LIST_HEAD(, waiting4attach
) attaches
;
388 /* For legacy reasons, we keep all job labels that are imported in the root
389 * job manager's label hash. If a job manager is an XPC domain, then it gets
390 * its own label hash that is separate from the "global" one stored in the
393 LIST_HEAD(, job_s
) label_hash
[LABEL_HASH_SIZE
];
394 LIST_HEAD(, job_s
) active_jobs
[ACTIVE_JOB_HASH_SIZE
];
395 LIST_HEAD(, machservice
) ms_hash
[MACHSERVICE_HASH_SIZE
];
396 LIST_HEAD(, job_s
) global_env_jobs
;
398 mach_port_t req_port
;
401 time_t shutdown_time
;
402 unsigned int global_on_demand_cnt
;
403 unsigned int normal_active_cnt
;
406 session_initialized
:1,
409 shutdown_jobs_dirtied
:1,
410 shutdown_jobs_cleaned
:1,
413 // XPC-specific properties.
414 char owner
[MAXCOMLEN
];
416 mach_port_t req_bsport
;
417 mach_port_t req_excport
;
418 mach_port_t req_asport
;
419 mach_port_t req_gui_asport
;
425 mach_msg_type_number_t req_ctx_sz
;
426 mach_port_t req_rport
;
427 uint64_t req_uniqueid
;
435 // Global XPC domains.
436 static jobmgr_t _s_xpc_system_domain
;
437 static LIST_HEAD(, jobmgr_s
) _s_xpc_user_domains
;
438 static LIST_HEAD(, jobmgr_s
) _s_xpc_session_domains
;
440 #define jobmgr_assumes(jm, e) os_assumes_ctx(jobmgr_log_bug, jm, (e))
441 #define jobmgr_assumes_zero(jm, e) os_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
442 #define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
444 static jobmgr_t
jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool no_init
, mach_port_t asport
);
445 static jobmgr_t
jobmgr_new_xpc_singleton_domain(jobmgr_t jm
, name_t name
);
446 static jobmgr_t
jobmgr_find_xpc_per_user_domain(jobmgr_t jm
, uid_t uid
);
447 static jobmgr_t
jobmgr_find_xpc_per_session_domain(jobmgr_t jm
, au_asid_t asid
);
448 static job_t
jobmgr_import2(jobmgr_t jm
, launch_data_t pload
);
449 static jobmgr_t
jobmgr_parent(jobmgr_t jm
);
450 static jobmgr_t
jobmgr_do_garbage_collection(jobmgr_t jm
);
451 static bool jobmgr_label_test(jobmgr_t jm
, const char *str
);
452 static void jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
);
453 static void jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
);
454 static void jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
);
455 static void jobmgr_remove(jobmgr_t jm
);
456 static void jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
);
457 static job_t
jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
);
458 static job_t
jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
);
459 static job_t
jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
);
460 static job_t
managed_job(pid_t p
);
461 static jobmgr_t
jobmgr_find_by_name(jobmgr_t jm
, const char *where
);
462 static job_t
job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
);
463 static job_t
jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, mach_port_t
*mp
);
464 static void job_export_all2(jobmgr_t jm
, launch_data_t where
);
465 static void jobmgr_callback(void *obj
, struct kevent
*kev
);
466 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm
);
467 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
);
468 static struct machservice
*jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
);
469 static void jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
470 static void jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
471 static void jobmgr_log_perf_statistics(jobmgr_t jm
, bool signal_children
);
472 // static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
473 static bool jobmgr_log_bug(_SIMPLE_STRING asl_message
, void *ctx
, const char *message
);
475 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
476 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
477 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
479 struct suspended_peruser
{
480 LIST_ENTRY(suspended_peruser
) sle
;
485 // MUST be first element of this structure.
486 kq_callback kqjob_callback
;
487 LIST_ENTRY(job_s
) sle
;
488 LIST_ENTRY(job_s
) subjob_sle
;
489 LIST_ENTRY(job_s
) needing_session_sle
;
490 LIST_ENTRY(job_s
) jetsam_sle
;
491 LIST_ENTRY(job_s
) pid_hash_sle
;
492 LIST_ENTRY(job_s
) global_pid_hash_sle
;
493 LIST_ENTRY(job_s
) label_hash_sle
;
494 LIST_ENTRY(job_s
) global_env_sle
;
495 SLIST_ENTRY(job_s
) curious_jobs_sle
;
496 LIST_HEAD(, suspended_peruser
) suspended_perusers
;
497 LIST_HEAD(, waiting_for_exit
) exit_watchers
;
498 LIST_HEAD(, job_s
) subjobs
;
499 LIST_HEAD(, externalevent
) events
;
500 SLIST_HEAD(, socketgroup
) sockets
;
501 SLIST_HEAD(, calendarinterval
) cal_intervals
;
502 SLIST_HEAD(, envitem
) global_env
;
503 SLIST_HEAD(, envitem
) env
;
504 SLIST_HEAD(, limititem
) limits
;
505 SLIST_HEAD(, machservice
) machservices
;
506 SLIST_HEAD(, semaphoreitem
) semaphores
;
507 SLIST_HEAD(, waiting_for_removal
) removal_watchers
;
508 struct waiting4attach
*w4a
;
511 cpu_type_t
*j_binpref
;
512 size_t j_binpref_cnt
;
514 mach_port_t exit_status_dest
;
515 mach_port_t exit_status_port
;
516 mach_port_t spawn_reply_port
;
529 char *alt_exc_handler
;
530 char *cfbundleidentifier
;
534 char *seatbelt_profile
;
535 uint64_t seatbelt_flags
;
536 char *container_identifier
;
539 void *quarantine_data
;
540 size_t quarantine_data_sz
;
544 int last_exit_status
;
550 int32_t jetsam_priority
;
551 int32_t jetsam_memlimit
;
552 int32_t main_thread_priority
;
554 uint32_t exit_timeout
;
555 uint64_t sent_signal_time
;
557 uint32_t min_run_time
;
559 uint32_t start_interval
;
560 uint32_t peruser_suspend_count
;
565 uuid_t expected_audit_uuid
;
567 // man launchd.plist --> Debug
569 // man launchd.plist --> KeepAlive == false
571 // man launchd.plist --> SessionCreate
573 // man launchd.plist --> LowPriorityIO
575 // man launchd.plist --> InitGroups
577 /* A legacy mach_init concept to make bootstrap_create_server/service()
580 priv_port_has_senders
:1,
581 // A hack during job importing
582 importing_global_env
:1,
583 // A hack during job importing
584 importing_hard_limits
:1,
585 // man launchd.plist --> Umask
587 // A process that launchd knows about but doesn't manage.
589 // A legacy mach_init concept to detect sick jobs
591 // A job created via bootstrap_create_server()
593 // A job created via spawn_via_launchd()
595 // A legacy job that wants inetd compatible semantics
597 // A twist on inetd compatibility
599 /* An event fired and the job should start, but not necessarily right
603 // man launchd.plist --> EnableGlobbing
605 // man launchd.plist --> WaitForDebugger
607 // One-shot WaitForDebugger.
608 wait4debugger_oneshot
:1,
609 // MachExceptionHandler == true
610 internal_exc_handler
:1,
611 // A hack to support an option of spawn_via_launchd()
613 /* man launchd.plist --> LaunchOnlyOnce.
615 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
618 /* Make job_ignore() / job_watch() work. If these calls were balanced,
619 * then this wouldn't be necessarily.
622 /* A job that forced all other jobs to be temporarily launch-on-
625 forced_peers_to_demand_mode
:1,
626 // man launchd.plist --> Nice
628 /* A job was asked to be unloaded/removed while running, we'll remove it
632 // job_kill() was called.
634 // Enter the kernel debugger before killing a job.
636 // A hack that launchd+launchctl use during jobmgr_t creation.
638 // man launchd.plist --> StartOnMount
640 // This job is a per-user launchd managed by the PID 1 launchd.
642 // A job thoroughly confused launchd. We need to unload it ASAP.
643 unload_at_mig_return
:1,
644 // man launchd.plist --> AbandonProcessGroup
646 /* During shutdown, do not send SIGTERM to stray processes in the
647 * process group of this job.
649 ignore_pg_at_shutdown
:1,
650 /* Don't let this job create new 'job_t' objects in launchd. Has been
651 * seriously overloaded for the purposes of sandboxing.
654 // man launchd.plist --> EnableTransactions
655 enable_transactions
:1,
656 // The job was sent SIGKILL because it was clean.
658 // The job has an OtherJobEnabled KeepAlive criterion.
660 // The job exited due to a crash.
662 // We've received NOTE_EXIT for the job and reaped it.
664 // job_stop() was called.
666 /* The job is to be kept alive continuously, but it must first get an
670 // The job is a bootstrapper.
672 // The job owns the console.
674 /* The job runs as a non-root user on embedded but has select privileges
675 * of the root user. This is SpringBoard.
678 // The job is responsible for drawing the home screen on embedded.
680 // We got NOTE_EXEC for the job.
682 // The job is an XPC service, and XPC proxy successfully exec(3)ed.
684 // The (anonymous) job called vprocmgr_switch_to_session().
686 // The job has Jetsam limits in place.
688 // The job's Jetsam memory limits should only be applied in the background
689 jetsam_memory_limit_background
:1,
690 /* This job was created as the result of a look up of a service provided
691 * by a MultipleInstance job.
693 dedicated_instance
:1,
694 // The job supports creating additional instances of itself.
695 multiple_instances
:1,
696 /* The sub-job was already removed from the parent's list of
700 /* The job is responsible for monitoring external events for this
704 // The event monitor job has retrieved the initial list of events.
705 event_monitor_ready2signal
:1,
708 // Disable ASLR when launching this job.
710 // The job is an XPC Service.
712 // The job is the Performance team's shutdown monitor.
714 // We should open a transaction for the job when shutdown begins.
716 /* The job was sent SIGKILL but did not exit in a timely fashion,
717 * indicating a kernel bug.
720 // The job is the XPC domain bootstrapper.
722 // The job is an app (on either iOS or OS X) and has different resource
725 // FairPlay decryption failed on the job. This should only ever happen
728 // The job failed to exec(3) for reasons that may be transient, so we're
729 // waiting for UserEventAgent to tell us when it's okay to try spawning
730 // again (i.e. when the executable path appears, when the UID appears,
733 // The job exited due to memory pressure.
735 // The job supports idle-exit.
737 // The job was implicitly reaped by the kernel.
740 joins_gui_session
:1,
741 low_priority_background_io
:1,
747 static size_t hash_label(const char *label
) __attribute__((pure
));
748 static size_t hash_ms(const char *msstr
) __attribute__((pure
));
749 static SLIST_HEAD(, job_s
) s_curious_jobs
;
750 static LIST_HEAD(, job_s
) managed_actives
[ACTIVE_JOB_HASH_SIZE
];
752 #define job_assumes(j, e) os_assumes_ctx(job_log_bug, j, (e))
753 #define job_assumes_zero(j, e) os_assumes_zero_ctx(job_log_bug, j, (e))
754 #define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
756 static void job_import_keys(launch_data_t obj
, const char *key
, void *context
);
757 static void job_import_bool(job_t j
, const char *key
, bool value
);
758 static void job_import_string(job_t j
, const char *key
, const char *value
);
759 static void job_import_integer(job_t j
, const char *key
, long long value
);
760 static void job_import_dictionary(job_t j
, const char *key
, launch_data_t value
);
761 static void job_import_array(job_t j
, const char *key
, launch_data_t value
);
762 static void job_import_opaque(job_t j
, const char *key
, launch_data_t value
);
763 static bool job_set_global_on_demand(job_t j
, bool val
);
764 static const char *job_active(job_t j
);
765 static void job_watch(job_t j
);
766 static void job_ignore(job_t j
);
767 static void job_reap(job_t j
);
768 static bool job_useless(job_t j
);
769 static bool job_keepalive(job_t j
);
770 static void job_dispatch_curious_jobs(job_t j
);
771 static void job_start(job_t j
);
772 static void job_start_child(job_t j
) __attribute__((noreturn
));
773 static void job_setup_attributes(job_t j
);
774 static bool job_setup_machport(job_t j
);
775 static kern_return_t
job_setup_exit_port(job_t j
);
776 static void job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
);
777 static void job_postfork_become_user(job_t j
);
778 static void job_postfork_test_user(job_t j
);
779 static void job_log_pids_with_weird_uids(job_t j
);
780 static void job_setup_exception_port(job_t j
, task_t target_task
);
781 static void job_callback(void *obj
, struct kevent
*kev
);
782 static void job_callback_proc(job_t j
, struct kevent
*kev
);
783 static void job_callback_timer(job_t j
, void *ident
);
784 static void job_callback_read(job_t j
, int ident
);
785 static void job_log_stray_pg(job_t j
);
786 static void job_log_children_without_exec(job_t j
);
787 static job_t
job_new_anonymous(jobmgr_t jm
, pid_t anonpid
) __attribute__((malloc
, nonnull
, warn_unused_result
));
788 static job_t
job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
) __attribute__((malloc
, nonnull(1,2), warn_unused_result
));
789 static job_t
job_new_alias(jobmgr_t jm
, job_t src
);
790 static job_t
job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
) __attribute__((malloc
, nonnull
, warn_unused_result
));
791 static job_t
job_new_subjob(job_t j
, uuid_t identifier
);
792 static void job_kill(job_t j
);
793 static void job_uncork_fork(job_t j
);
794 static void job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
795 static void job_log_error(job_t j
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
796 static bool job_log_bug(_SIMPLE_STRING asl_message
, void *ctx
, const char *message
);
797 static void job_log_perf_statistics(job_t j
, struct rusage_info_v1
*ri
, int64_t exit_status
);
799 static void job_log_systemstats(pid_t pid
, uint64_t uniqueid
, uint64_t parent_uniqueid
, pid_t req_pid
, uint64_t req_uniqueid
, const char *name
, struct rusage_info_v1
*ri
, int64_t exit_status
);
801 static void job_set_exception_port(job_t j
, mach_port_t port
);
802 static kern_return_t
job_mig_spawn_internal(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, job_t
*outj
);
803 static void job_open_shutdown_transaction(job_t ji
);
804 static void job_close_shutdown_transaction(job_t ji
);
805 static launch_data_t
job_do_legacy_ipc_request(job_t j
, launch_data_t request
, mach_port_t asport
);
806 static void job_setup_per_user_directory(job_t j
, uid_t uid
, const char *path
);
807 static void job_setup_per_user_directories(job_t j
, uid_t uid
, const char *label
);
808 static void job_update_jetsam_properties(job_t j
, xpc_jetsam_band_t band
, uint64_t user_data
);
809 static void job_update_jetsam_memory_limit(job_t j
, int32_t limit
);
811 #if TARGET_OS_EMBEDDED
812 static bool job_import_defaults(launch_data_t pload
);
815 static struct priority_properties_t
{
818 } _launchd_priority_map
[] = {
819 { XPC_JETSAM_BAND_SUSPENDED
, JETSAM_PRIORITY_IDLE
},
820 { XPC_JETSAM_BAND_BACKGROUND_OPPORTUNISTIC
, JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC
},
821 { XPC_JETSAM_BAND_BACKGROUND
, JETSAM_PRIORITY_BACKGROUND
},
822 { XPC_JETSAM_BAND_MAIL
, JETSAM_PRIORITY_MAIL
},
823 { XPC_JETSAM_BAND_PHONE
, JETSAM_PRIORITY_PHONE
},
824 { XPC_JETSAM_BAND_UI_SUPPORT
, JETSAM_PRIORITY_UI_SUPPORT
},
825 { XPC_JETSAM_BAND_FOREGROUND_SUPPORT
, JETSAM_PRIORITY_FOREGROUND_SUPPORT
},
826 { XPC_JETSAM_BAND_FOREGROUND
, JETSAM_PRIORITY_FOREGROUND
},
827 { XPC_JETSAM_BAND_AUDIO
, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY
},
828 { XPC_JETSAM_BAND_ACCESSORY
, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY
},
829 { XPC_JETSAM_BAND_CRITICAL
, JETSAM_PRIORITY_CRITICAL
},
830 { XPC_JETSAM_BAND_TELEPHONY
, JETSAM_PRIORITY_TELEPHONY
},
833 static const struct {
836 } launchd_keys2limits
[] = {
837 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE
, RLIMIT_CORE
},
838 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU
, RLIMIT_CPU
},
839 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA
, RLIMIT_DATA
},
840 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE
, RLIMIT_FSIZE
},
841 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK
, RLIMIT_MEMLOCK
},
842 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE
, RLIMIT_NOFILE
},
843 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC
, RLIMIT_NPROC
},
844 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS
, RLIMIT_RSS
},
845 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK
, RLIMIT_STACK
},
848 static time_t cronemu(int mon
, int mday
, int hour
, int min
);
849 static time_t cronemu_wday(int wday
, int hour
, int min
);
850 static bool cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
);
851 static bool cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
);
852 static bool cronemu_hour(struct tm
*wtm
, int hour
, int min
);
853 static bool cronemu_min(struct tm
*wtm
, int min
);
855 // miscellaneous file local functions
856 static size_t get_kern_max_proc(void);
857 static char **mach_cmd2argv(const char *string
);
858 static size_t our_strhash(const char *s
) __attribute__((pure
));
860 void eliminate_double_reboot(void);
862 #pragma mark XPC Domain Forward Declarations
863 static job_t
_xpc_domain_import_service(jobmgr_t jm
, launch_data_t pload
);
864 static int _xpc_domain_import_services(job_t j
, launch_data_t services
);
866 #pragma mark XPC Event Forward Declarations
867 static int xpc_event_find_channel(job_t j
, const char *stream
, struct machservice
**ms
);
868 static int xpc_event_get_event_name(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
869 static int xpc_event_set_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
870 static int xpc_event_copy_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
871 static int xpc_event_channel_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
872 static int xpc_event_channel_look_up(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
873 static int xpc_event_provider_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
874 static int xpc_event_provider_set_state(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
876 #pragma mark XPC Process Forward Declarations
877 static int xpc_process_set_jetsam_band(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
878 static int xpc_process_set_jetsam_memory_limit(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
880 // file local globals
881 static job_t _launchd_embedded_god
= NULL
;
882 static job_t _launchd_embedded_home
= NULL
;
883 static size_t total_children
;
884 static size_t total_anon_children
;
885 static mach_port_t the_exception_server
;
886 static job_t workaround_5477111
;
887 static LIST_HEAD(, job_s
) s_needing_sessions
;
888 static LIST_HEAD(, eventsystem
) _s_event_systems
;
889 static struct eventsystem
*_launchd_support_system
;
890 static job_t _launchd_event_monitor
;
891 static job_t _launchd_xpc_bootstrapper
;
892 static job_t _launchd_shutdown_monitor
;
894 #if TARGET_OS_EMBEDDED
895 static xpc_object_t _launchd_defaults_cache
;
897 mach_port_t launchd_audit_port
= MACH_PORT_DEAD
;
898 pid_t launchd_audit_session
= 0;
900 mach_port_t launchd_audit_port
= MACH_PORT_NULL
;
901 au_asid_t launchd_audit_session
= AU_DEFAUDITSID
;
904 static int s_no_hang_fd
= -1;
906 // process wide globals
907 mach_port_t inherited_bootstrap_port
;
908 jobmgr_t root_jobmgr
;
909 bool launchd_shutdown_debugging
= false;
910 bool launchd_verbose_boot
= false;
911 bool launchd_embedded_handofgod
= false;
912 bool launchd_runtime_busy_time
= false;
917 struct socketgroup
*sg
;
918 struct machservice
*ms
;
920 if (j
->currently_ignored
) {
924 job_log(j
, LOG_DEBUG
, "Ignoring...");
926 j
->currently_ignored
= true;
928 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
929 socketgroup_ignore(j
, sg
);
932 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
933 machservice_ignore(j
, ms
);
940 struct socketgroup
*sg
;
941 struct machservice
*ms
;
943 if (!j
->currently_ignored
) {
947 job_log(j
, LOG_DEBUG
, "Watching...");
949 j
->currently_ignored
= false;
951 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
952 socketgroup_watch(j
, sg
);
955 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
956 machservice_watch(j
, ms
);
965 if (unlikely(!j
->p
|| j
->stopped
|| j
->anonymous
)) {
969 #if TARGET_OS_EMBEDDED
970 if (launchd_embedded_handofgod
&& _launchd_embedded_god
) {
971 if (!_launchd_embedded_god
->username
|| !j
->username
) {
976 if (strcmp(j
->username
, _launchd_embedded_god
->username
) != 0) {
980 } else if (launchd_embedded_handofgod
) {
986 j
->sent_signal_time
= runtime_get_opaque_time();
988 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Stopping job...");
991 error
= proc_terminate(j
->p
, &sig
);
993 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "Could not terminate job: %d: %s", error
, strerror(error
));
994 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Using fallback option to terminate job...");
995 error
= kill2(j
->p
, SIGTERM
);
997 job_log(j
, LOG_ERR
, "Could not signal job: %d: %s", error
, strerror(error
));
1006 j
->sent_sigkill
= true;
1007 j
->clean_kill
= true;
1009 /* We cannot effectively simulate an exit for jobs during the course
1010 * of a normal run. Even if we pretend that the job exited, we will
1011 * still not have gotten the receive rights associated with the
1012 * job's MachServices back, so we cannot safely respawn it.
1014 if (j
->mgr
->shutting_down
) {
1015 error
= kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, LAUNCHD_SIGKILL_TIMER
, j
);
1016 (void)job_assumes_zero_p(j
, error
);
1019 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Sent job SIGKILL.");
1022 if (j
->exit_timeout
) {
1023 error
= kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, j
->exit_timeout
, j
);
1024 (void)job_assumes_zero_p(j
, error
);
1026 job_log(j
, LOG_NOTICE
, "This job has an infinite exit timeout");
1028 job_log(j
, LOG_DEBUG
, "Sent job SIGTERM.");
1031 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "Job was sent unexpected signal: %d: %s", sig
, strsignal(sig
));
1042 launch_data_t tmp
, tmp2
, tmp3
, r
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
1048 if ((tmp
= launch_data_new_string(j
->label
))) {
1049 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LABEL
);
1051 if ((tmp
= launch_data_new_string(j
->mgr
->name
))) {
1052 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
1054 if ((tmp
= launch_data_new_bool(j
->ondemand
))) {
1055 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ONDEMAND
);
1058 long long status
= j
->last_exit_status
;
1060 status
= LAUNCH_EXITSTATUS_FAIRPLAY_FAIL
;
1062 if ((tmp
= launch_data_new_integer(status
))) {
1063 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LASTEXITSTATUS
);
1066 if (j
->p
&& (tmp
= launch_data_new_integer(j
->p
))) {
1067 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PID
);
1069 if ((tmp
= launch_data_new_integer(j
->timeout
))) {
1070 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TIMEOUT
);
1072 if (j
->prog
&& (tmp
= launch_data_new_string(j
->prog
))) {
1073 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAM
);
1075 if (j
->stdinpath
&& (tmp
= launch_data_new_string(j
->stdinpath
))) {
1076 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDINPATH
);
1078 if (j
->stdoutpath
&& (tmp
= launch_data_new_string(j
->stdoutpath
))) {
1079 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDOUTPATH
);
1081 if (j
->stderrpath
&& (tmp
= launch_data_new_string(j
->stderrpath
))) {
1082 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDERRORPATH
);
1084 if (likely(j
->argv
) && (tmp
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
1087 for (i
= 0; i
< j
->argc
; i
++) {
1088 if ((tmp2
= launch_data_new_string(j
->argv
[i
]))) {
1089 launch_data_array_set_index(tmp
, tmp2
, i
);
1093 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
);
1096 if (j
->enable_transactions
&& (tmp
= launch_data_new_bool(true))) {
1097 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
);
1100 if (j
->session_create
&& (tmp
= launch_data_new_bool(true))) {
1101 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SESSIONCREATE
);
1104 if (j
->inetcompat
&& (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
1105 if ((tmp2
= launch_data_new_bool(j
->inetcompat_wait
))) {
1106 launch_data_dict_insert(tmp
, tmp2
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
);
1108 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
);
1111 if (!SLIST_EMPTY(&j
->sockets
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
1112 struct socketgroup
*sg
;
1115 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
1116 if ((tmp2
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
1117 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
1118 if ((tmp3
= launch_data_new_fd(sg
->fds
[i
]))) {
1119 launch_data_array_set_index(tmp2
, tmp3
, i
);
1122 launch_data_dict_insert(tmp
, tmp2
, sg
->name
);
1126 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SOCKETS
);
1129 if (!SLIST_EMPTY(&j
->machservices
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
1130 struct machservice
*ms
;
1134 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
1137 tmp3
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
1140 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
1141 launch_data_dict_insert(tmp3
, tmp2
, ms
->name
);
1144 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
1145 launch_data_dict_insert(tmp
, tmp2
, ms
->name
);
1149 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_MACHSERVICES
);
1152 launch_data_dict_insert(r
, tmp3
, LAUNCH_JOBKEY_PERJOBMACHSERVICES
);
1160 jobmgr_log_active_jobs(jobmgr_t jm
)
1162 const char *why_active
;
1166 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
1167 jobmgr_log_active_jobs(jmi
);
1170 int level
= LOG_DEBUG
;
1172 level
|= LOG_CONSOLE
;
1175 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
1176 if ((why_active
= job_active(ji
))) {
1178 job_log(ji
, level
, "%s", why_active
);
1181 (void)proc_get_dirty(ji
->p
, &flags
);
1182 if (!(flags
& PROC_DIRTY_TRACKED
)) {
1186 char *dirty
= "clean";
1187 if (flags
& PROC_DIRTY_IS_DIRTY
) {
1191 char *idle_exit
= "idle-exit unsupported";
1192 if (flags
& PROC_DIRTY_ALLOWS_IDLE_EXIT
) {
1193 idle_exit
= "idle-exit supported";
1196 job_log(ji
, level
, "Killability: %s/%s", dirty
, idle_exit
);
1203 jobmgr_still_alive_with_check(jobmgr_t jm
)
1205 int level
= LOG_DEBUG
;
1207 level
|= LOG_CONSOLE
;
1210 jobmgr_log(jm
, level
, "Still alive with %lu/%lu (normal/anonymous) children.", total_children
, total_anon_children
);
1211 jobmgr_log_active_jobs(jm
);
1216 jobmgr_shutdown(jobmgr_t jm
)
1219 jobmgr_log(jm
, LOG_DEBUG
, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm
->reboot_flags
));
1221 jm
->shutdown_time
= runtime_get_wall_time() / USEC_PER_SEC
;
1224 (void)localtime_r(&jm
->shutdown_time
, &curtime
);
1227 (void)asctime_r(&curtime
, date
);
1228 // Trim the new line that asctime_r(3) puts there for some reason.
1231 if (jm
== root_jobmgr
&& pid1_magic
) {
1232 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown begun at: %s", date
);
1234 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown begun at: %s", date
);
1237 jm
->shutting_down
= true;
1239 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
1240 jobmgr_shutdown(jmi
);
1243 if (!jm
->parentmgr
) {
1245 // Spawn the shutdown monitor.
1246 if (_launchd_shutdown_monitor
&& !_launchd_shutdown_monitor
->p
) {
1247 job_log(_launchd_shutdown_monitor
, LOG_NOTICE
| LOG_CONSOLE
, "Starting shutdown monitor.");
1248 job_dispatch(_launchd_shutdown_monitor
, true);
1252 (void)jobmgr_assumes_zero_p(jm
, kevent_mod((uintptr_t)jm
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 5, jm
));
1255 return jobmgr_do_garbage_collection(jm
);
1259 jobmgr_remove(jobmgr_t jm
)
1264 jobmgr_log(jm
, LOG_DEBUG
, "Removing job manager.");
1265 if (!SLIST_EMPTY(&jm
->submgrs
)) {
1267 while ((jmi
= SLIST_FIRST(&jm
->submgrs
))) {
1272 (void)jobmgr_assumes_zero(jm
, cnt
);
1275 while ((ji
= LIST_FIRST(&jm
->jobs
))) {
1276 if (!ji
->anonymous
&& ji
->p
!= 0) {
1277 job_log(ji
, LOG_ERR
, "Job is still active at job manager teardown.");
1284 struct waiting4attach
*w4ai
= NULL
;
1285 while ((w4ai
= LIST_FIRST(&jm
->attaches
))) {
1286 waiting4attach_delete(jm
, w4ai
);
1290 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_port
));
1293 (void)jobmgr_assumes_zero(jm
, launchd_mport_close_recv(jm
->jm_port
));
1296 if (jm
->req_bsport
) {
1297 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_bsport
));
1299 if (jm
->req_excport
) {
1300 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_excport
));
1302 if (MACH_PORT_VALID(jm
->req_asport
)) {
1303 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_asport
));
1305 if (jm
->req_rport
) {
1306 kern_return_t kr
= xpc_call_wakeup(jm
->req_rport
, jm
->error
);
1307 if (!(kr
== KERN_SUCCESS
|| kr
== MACH_SEND_INVALID_DEST
)) {
1308 /* If the originator went away, the reply port will be a dead name,
1309 * and we expect this to fail.
1311 (void)jobmgr_assumes_zero(jm
, kr
);
1315 (void)jobmgr_assumes_zero(jm
, vm_deallocate(mach_task_self(), jm
->req_ctx
, jm
->req_ctx_sz
));
1318 time_t ts
= runtime_get_wall_time() / USEC_PER_SEC
;
1320 (void)localtime_r(&ts
, &curtime
);
1323 (void)asctime_r(&curtime
, date
);
1326 time_t delta
= ts
- jm
->shutdown_time
;
1327 if (jm
== root_jobmgr
&& pid1_magic
) {
1328 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown finished at: %s", date
);
1329 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown took approximately %ld second%s.", delta
, (delta
!= 1) ? "s" : "");
1331 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown finished at: %s", date
);
1332 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown took approximately %ld second%s.", delta
, (delta
!= 1) ? "s" : "");
1335 if (jm
->parentmgr
) {
1336 runtime_del_weak_ref();
1337 SLIST_REMOVE(&jm
->parentmgr
->submgrs
, jm
, jobmgr_s
, sle
);
1339 // Hack for the guest user so that its stuff doesn't persist.
1341 // <rdar://problem/14527875>
1342 if (strcmp(jm
->name
, VPROCMGR_SESSION_AQUA
) == 0 && getuid() == 201) {
1345 } else if (pid1_magic
) {
1346 eliminate_double_reboot();
1347 launchd_log_vm_stats();
1348 jobmgr_log_stray_children(jm
, true);
1349 jobmgr_log(root_jobmgr
, LOG_NOTICE
| LOG_CONSOLE
, "About to call: reboot(%s).", reboot_flags_to_C_names(jm
->reboot_flags
));
1351 (void)jobmgr_assumes_zero_p(jm
, reboot(jm
->reboot_flags
));
1353 jobmgr_log(jm
, LOG_DEBUG
, "About to exit");
1364 struct waiting_for_removal
*w4r
;
1365 struct calendarinterval
*ci
;
1366 struct semaphoreitem
*si
;
1367 struct socketgroup
*sg
;
1368 struct machservice
*ms
;
1369 struct limititem
*li
;
1373 /* HACK: Egregious code duplication. But as with machservice_delete(),
1374 * job aliases can't (and shouldn't) have any complex behaviors
1375 * associated with them.
1377 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
1378 machservice_delete(j
, ms
, false);
1381 LIST_REMOVE(j
, sle
);
1382 LIST_REMOVE(j
, label_hash_sle
);
1387 #if TARGET_OS_EMBEDDED
1388 if (launchd_embedded_handofgod
&& _launchd_embedded_god
) {
1389 if (!(_launchd_embedded_god
->username
&& j
->username
)) {
1394 if (strcmp(j
->username
, _launchd_embedded_god
->username
) != 0) {
1398 } else if (launchd_embedded_handofgod
) {
1404 /* Do this BEFORE we check and see whether the job is still active. If we're
1405 * a sub-job, we're being removed due to the parent job removing us.
1406 * Therefore, the parent job will free itself after this call completes. So
1407 * if we defer removing ourselves from the parent's list, we'll crash when
1408 * we finally get around to it.
1410 if (j
->dedicated_instance
&& !j
->former_subjob
) {
1411 LIST_REMOVE(j
, subjob_sle
);
1412 j
->former_subjob
= true;
1415 if (unlikely(j
->p
)) {
1419 job_log(j
, LOG_DEBUG
, "Removal pended until the job exits");
1421 if (!j
->removal_pending
) {
1422 j
->removal_pending
= true;
1432 job_dispatch_curious_jobs(j
);
1435 ipc_close_all_with_job(j
);
1437 if (j
->forced_peers_to_demand_mode
) {
1438 job_set_global_on_demand(j
, false);
1441 if (job_assumes_zero(j
, j
->fork_fd
)) {
1442 (void)posix_assumes_zero(runtime_close(j
->fork_fd
));
1446 (void)posix_assumes_zero(runtime_close(j
->stdin_fd
));
1450 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
1453 while ((sg
= SLIST_FIRST(&j
->sockets
))) {
1454 socketgroup_delete(j
, sg
);
1456 while ((ci
= SLIST_FIRST(&j
->cal_intervals
))) {
1457 calendarinterval_delete(j
, ci
);
1459 while ((ei
= SLIST_FIRST(&j
->env
))) {
1460 envitem_delete(j
, ei
, false);
1462 while ((ei
= SLIST_FIRST(&j
->global_env
))) {
1463 envitem_delete(j
, ei
, true);
1465 while ((li
= SLIST_FIRST(&j
->limits
))) {
1466 limititem_delete(j
, li
);
1468 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
1469 machservice_delete(j
, ms
, false);
1471 while ((si
= SLIST_FIRST(&j
->semaphores
))) {
1472 semaphoreitem_delete(j
, si
);
1474 while ((w4r
= SLIST_FIRST(&j
->removal_watchers
))) {
1475 waiting4removal_delete(j
, w4r
);
1478 struct externalevent
*eei
= NULL
;
1479 while ((eei
= LIST_FIRST(&j
->events
))) {
1480 externalevent_delete(eei
);
1483 if (j
->event_monitor
) {
1484 _launchd_event_monitor
= NULL
;
1486 if (j
->xpc_bootstrapper
) {
1487 _launchd_xpc_bootstrapper
= NULL
;
1499 if (j
->workingdir
) {
1500 free(j
->workingdir
);
1511 if (j
->stdoutpath
) {
1512 free(j
->stdoutpath
);
1514 if (j
->stderrpath
) {
1515 free(j
->stderrpath
);
1517 if (j
->alt_exc_handler
) {
1518 free(j
->alt_exc_handler
);
1520 if (j
->cfbundleidentifier
) {
1521 free(j
->cfbundleidentifier
);
1524 if (j
->seatbelt_profile
) {
1525 free(j
->seatbelt_profile
);
1527 if (j
->container_identifier
) {
1528 free(j
->container_identifier
);
1532 if (j
->quarantine_data
) {
1533 free(j
->quarantine_data
);
1539 if (j
->start_interval
) {
1540 runtime_del_weak_ref();
1541 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
));
1543 if (j
->exit_timeout
) {
1544 /* If this fails, it just means the timer's already fired, so no need to
1545 * wrap it in an assumes() macro.
1547 (void)kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1549 if (j
->asport
!= MACH_PORT_NULL
) {
1550 (void)job_assumes_zero(j
, launchd_mport_deallocate(j
->asport
));
1552 if (!uuid_is_null(j
->expected_audit_uuid
)) {
1553 LIST_REMOVE(j
, needing_session_sle
);
1555 if (j
->embedded_god
) {
1556 _launchd_embedded_god
= NULL
;
1558 if (j
->embedded_home
) {
1559 _launchd_embedded_home
= NULL
;
1561 if (j
->shutdown_monitor
) {
1562 _launchd_shutdown_monitor
= NULL
;
1565 (void)kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1567 LIST_REMOVE(j
, sle
);
1568 LIST_REMOVE(j
, label_hash_sle
);
1572 LIST_FOREACH_SAFE(ji
, &j
->subjobs
, subjob_sle
, jit
) {
1576 job_log(j
, LOG_DEBUG
, "Removed");
1578 j
->kqjob_callback
= (kq_callback
)0x8badf00d;
1583 socketgroup_setup(launch_data_t obj
, const char *key
, void *context
)
1585 launch_data_t tmp_oai
;
1587 size_t i
, fd_cnt
= 1;
1590 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1591 fd_cnt
= launch_data_array_get_count(obj
);
1594 fds
= alloca(fd_cnt
* sizeof(int));
1596 for (i
= 0; i
< fd_cnt
; i
++) {
1597 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1598 tmp_oai
= launch_data_array_get_index(obj
, i
);
1603 fds
[i
] = launch_data_get_fd(tmp_oai
);
1606 socketgroup_new(j
, key
, fds
, fd_cnt
);
1608 ipc_revoke_fds(obj
);
1612 job_set_global_on_demand(job_t j
, bool val
)
1614 if (j
->forced_peers_to_demand_mode
&& val
) {
1616 } else if (!j
->forced_peers_to_demand_mode
&& !val
) {
1620 if ((j
->forced_peers_to_demand_mode
= val
)) {
1621 j
->mgr
->global_on_demand_cnt
++;
1623 j
->mgr
->global_on_demand_cnt
--;
1626 if (j
->mgr
->global_on_demand_cnt
== 0) {
1627 jobmgr_dispatch_all(j
->mgr
, false);
1634 job_setup_machport(job_t j
)
1636 if (job_assumes_zero(j
, launchd_mport_create_recv(&j
->j_port
)) != KERN_SUCCESS
) {
1640 if (job_assumes_zero(j
, runtime_add_mport(j
->j_port
, job_server
)) != KERN_SUCCESS
) {
1644 if (job_assumes_zero(j
, launchd_mport_notify_req(j
->j_port
, MACH_NOTIFY_NO_SENDERS
)) != KERN_SUCCESS
) {
1645 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
1651 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
1657 job_setup_exit_port(job_t j
)
1659 kern_return_t kr
= launchd_mport_create_recv(&j
->exit_status_port
);
1660 if (job_assumes_zero(j
, kr
) != KERN_SUCCESS
) {
1661 return MACH_PORT_NULL
;
1664 struct mach_port_limits limits
= {
1667 kr
= mach_port_set_attributes(mach_task_self(), j
->exit_status_port
, MACH_PORT_LIMITS_INFO
, (mach_port_info_t
)&limits
, sizeof(limits
));
1668 (void)job_assumes_zero(j
, kr
);
1670 kr
= launchd_mport_make_send_once(j
->exit_status_port
, &j
->exit_status_dest
);
1671 if (job_assumes_zero(j
, kr
) != KERN_SUCCESS
) {
1672 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->exit_status_port
));
1673 j
->exit_status_port
= MACH_PORT_NULL
;
1680 job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
)
1682 const char **argv
= (const char **)mach_cmd2argv(cmd
);
1689 jr
= job_new(j
->mgr
, AUTO_PICK_LEGACY_LABEL
, NULL
, argv
);
1692 // Job creation can be denied during shutdown.
1693 if (unlikely(jr
== NULL
)) {
1699 jr
->legacy_mach_job
= true;
1700 jr
->abandon_pg
= true;
1701 jr
->priv_port_has_senders
= true; // the IPC that called us will make-send on this port
1703 if (!job_setup_machport(jr
)) {
1707 job_log(jr
, LOG_INFO
, "Legacy%s server created", ond
? " on-demand" : "");
1719 job_new_anonymous(jobmgr_t jm
, pid_t anonpid
)
1721 struct proc_bsdshortinfo proc
;
1722 bool shutdown_state
;
1723 job_t jp
= NULL
, jr
= NULL
;
1724 uid_t kp_euid
, kp_uid
, kp_svuid
;
1725 gid_t kp_egid
, kp_gid
, kp_svgid
;
1732 if (anonpid
>= 100000) {
1733 /* The kernel current defines PID_MAX to be 99999, but that define isn't
1736 launchd_syslog(LOG_WARNING
, "Did PID_MAX change? Got request from PID: %d", anonpid
);
1741 /* libproc returns the number of bytes written into the buffer upon success,
1742 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
1744 if (proc_pidinfo(anonpid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
1745 if (errno
!= ESRCH
) {
1746 (void)jobmgr_assumes_zero(jm
, errno
);
1751 if (proc
.pbsi_comm
[0] == '\0') {
1752 launchd_syslog(LOG_WARNING
, "Blank command for PID: %d", anonpid
);
1757 if (unlikely(proc
.pbsi_status
== SZOMB
)) {
1758 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for zombie PID %u: %s", anonpid
, proc
.pbsi_comm
);
1761 if (unlikely(proc
.pbsi_flags
& P_SUGID
)) {
1762 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid
, proc
.pbsi_comm
);
1765 kp_euid
= proc
.pbsi_uid
;
1766 kp_uid
= proc
.pbsi_ruid
;
1767 kp_svuid
= proc
.pbsi_svuid
;
1768 kp_egid
= proc
.pbsi_gid
;
1769 kp_gid
= proc
.pbsi_rgid
;
1770 kp_svgid
= proc
.pbsi_svgid
;
1772 if (unlikely(kp_euid
!= kp_uid
|| kp_euid
!= kp_svuid
|| kp_uid
!= kp_svuid
|| kp_egid
!= kp_gid
|| kp_egid
!= kp_svgid
|| kp_gid
!= kp_svgid
)) {
1773 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1774 kp_euid
, kp_uid
, kp_svuid
, kp_egid
, kp_gid
, kp_svgid
, anonpid
, proc
.pbsi_comm
);
1777 /* "Fix" for when the kernel turns the process tree into a weird, cyclic
1780 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
1781 * as to why this can happen.
1783 if ((pid_t
)proc
.pbsi_ppid
== anonpid
) {
1784 jobmgr_log(jm
, LOG_WARNING
, "Process has become its own parent through ptrace(3). Ignoring: %s", proc
.pbsi_comm
);
1789 /* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1790 * jobs can pop up during shutdown and need to talk to us.
1792 if (unlikely(shutdown_state
= jm
->shutting_down
)) {
1793 jm
->shutting_down
= false;
1796 // We only set requestor_pid for XPC domains.
1797 const char *whichlabel
= (jm
->req_pid
== anonpid
) ? AUTO_PICK_XPC_LABEL
: AUTO_PICK_ANONYMOUS_LABEL
;
1798 if ((jr
= job_new(jm
, whichlabel
, proc
.pbsi_comm
, NULL
))) {
1799 u_int proc_fflags
= NOTE_EXEC
|NOTE_FORK
|NOTE_EXIT
;
1801 total_anon_children
++;
1802 jr
->anonymous
= true;
1805 // Anonymous process reaping is messy.
1806 LIST_INSERT_HEAD(&jm
->active_jobs
[ACTIVE_JOB_HASH(jr
->p
)], jr
, pid_hash_sle
);
1808 if (unlikely(kevent_mod(jr
->p
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
) == -1)) {
1809 if (errno
!= ESRCH
) {
1810 (void)job_assumes_zero(jr
, errno
);
1813 // Zombies interact weirdly with kevent(3).
1814 job_log(jr
, LOG_ERR
, "Failed to add kevent for PID %u. Will unload at MIG return", jr
->p
);
1815 jr
->unload_at_mig_return
= true;
1818 if (unlikely(shutdown_state
)) {
1819 job_log(jr
, LOG_APPLEONLY
, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1822 job_log(jr
, LOG_DEBUG
, "Created PID %u anonymously by PPID %u%s%s", anonpid
, proc
.pbsi_ppid
, jp
? ": " : "", jp
? jp
->label
: "");
1824 (void)os_assumes_zero(errno
);
1827 // Undo our hack from above.
1828 if (unlikely(shutdown_state
)) {
1829 jm
->shutting_down
= true;
1832 /* This is down here to prevent infinite recursion due to a process
1833 * attaching to its parent through ptrace(3) -- causing a cycle in the
1834 * process tree and thereby not making it a tree anymore. We need to make
1835 * sure that the anonymous job has been added to the process list so that
1836 * we'll find the tracing parent PID of the parent process, which is the
1837 * child, when we go looking for it in jobmgr_find_by_pid().
1839 * <rdar://problem/7264615>
1841 switch (proc
.pbsi_ppid
) {
1851 jp
= jobmgr_find_by_pid(jm
, proc
.pbsi_ppid
, true);
1852 if (jobmgr_assumes(jm
, jp
!= NULL
)) {
1853 if (jp
&& !jp
->anonymous
&& unlikely(!(proc
.pbsi_flags
& P_EXEC
))) {
1854 job_log(jp
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc
.pbsi_pid
);
1864 job_new_subjob(job_t j
, uuid_t identifier
)
1867 uuid_string_t idstr
;
1868 uuid_unparse(identifier
, idstr
);
1869 size_t label_sz
= snprintf(label
, 0, "%s.%s", j
->label
, idstr
);
1871 job_t nj
= (struct job_s
*)calloc(1, sizeof(struct job_s
) + label_sz
+ 1);
1873 nj
->kqjob_callback
= job_callback
;
1876 nj
->min_run_time
= j
->min_run_time
;
1877 nj
->timeout
= j
->timeout
;
1878 nj
->exit_timeout
= j
->exit_timeout
;
1880 snprintf((char *)nj
->label
, label_sz
+ 1, "%s.%s", j
->label
, idstr
);
1882 // Set all our simple Booleans that are applicable.
1883 nj
->debug
= j
->debug
;
1884 nj
->ondemand
= j
->ondemand
;
1885 nj
->checkedin
= true;
1886 nj
->low_pri_io
= j
->low_pri_io
;
1887 nj
->setmask
= j
->setmask
;
1888 nj
->wait4debugger
= j
->wait4debugger
;
1889 nj
->internal_exc_handler
= j
->internal_exc_handler
;
1890 nj
->setnice
= j
->setnice
;
1891 nj
->abandon_pg
= j
->abandon_pg
;
1892 nj
->ignore_pg_at_shutdown
= j
->ignore_pg_at_shutdown
;
1893 nj
->deny_job_creation
= j
->deny_job_creation
;
1894 nj
->enable_transactions
= j
->enable_transactions
;
1895 nj
->needs_kickoff
= j
->needs_kickoff
;
1896 nj
->currently_ignored
= true;
1897 nj
->dedicated_instance
= true;
1898 nj
->xpc_service
= j
->xpc_service
;
1899 nj
->xpc_bootstrapper
= j
->xpc_bootstrapper
;
1900 nj
->jetsam_priority
= j
->jetsam_priority
;
1901 nj
->jetsam_memlimit
= j
->jetsam_memlimit
;
1902 nj
->psproctype
= j
->psproctype
;
1905 uuid_copy(nj
->instance_id
, identifier
);
1907 // These jobs are purely on-demand Mach jobs.
1908 // {Hard | Soft}ResourceLimits are not supported.
1909 // JetsamPriority is not supported.
1912 nj
->prog
= strdup(j
->prog
);
1915 size_t sz
= malloc_size(j
->argv
);
1916 nj
->argv
= (char **)malloc(sz
);
1917 if (nj
->argv
!= NULL
) {
1918 // This is the start of our strings.
1919 char *p
= ((char *)nj
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
1922 for (i
= 0; i
< j
->argc
; i
++) {
1923 (void)strcpy(p
, j
->argv
[i
]);
1925 p
+= (strlen(j
->argv
[i
]) + 1);
1929 (void)job_assumes_zero(nj
, errno
);
1935 struct machservice
*msi
= NULL
;
1936 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
1937 /* Only copy MachServices that were actually declared in the plist.
1938 * So skip over per-PID ones and ones that were created via
1939 * bootstrap_register().
1942 mach_port_t mp
= MACH_PORT_NULL
;
1943 struct machservice
*msj
= machservice_new(nj
, msi
->name
, &mp
, false);
1945 msj
->reset
= msi
->reset
;
1946 msj
->delete_on_destruction
= msi
->delete_on_destruction
;
1947 msj
->drain_one_on_crash
= msi
->drain_one_on_crash
;
1948 msj
->drain_all_on_crash
= msi
->drain_all_on_crash
;
1950 kern_return_t kr
= mach_port_set_attributes(mach_task_self(), msj
->port
, MACH_PORT_TEMPOWNER
, NULL
, 0);
1951 (void)job_assumes_zero(j
, kr
);
1953 (void)job_assumes_zero(nj
, errno
);
1958 // We ignore global environment variables.
1959 struct envitem
*ei
= NULL
;
1960 SLIST_FOREACH(ei
, &j
->env
, sle
) {
1961 if (envitem_new(nj
, ei
->key
, ei
->value
, false)) {
1962 (void)job_assumes_zero(nj
, errno
);
1966 uuid_unparse(identifier
, val
);
1967 if (envitem_new(nj
, LAUNCH_ENV_INSTANCEID
, val
, false)) {
1968 (void)job_assumes_zero(nj
, errno
);
1972 nj
->rootdir
= strdup(j
->rootdir
);
1974 if (j
->workingdir
) {
1975 nj
->workingdir
= strdup(j
->workingdir
);
1978 nj
->username
= strdup(j
->username
);
1981 nj
->groupname
= strdup(j
->groupname
);
1984 /* FIXME: We shouldn't redirect all the output from these jobs to the
1985 * same file. We should uniquify the file names. But this hasn't shown
1986 * to be a problem in practice.
1989 nj
->stdinpath
= strdup(j
->stdinpath
);
1991 if (j
->stdoutpath
) {
1992 nj
->stdoutpath
= strdup(j
->stdinpath
);
1994 if (j
->stderrpath
) {
1995 nj
->stderrpath
= strdup(j
->stderrpath
);
1997 if (j
->alt_exc_handler
) {
1998 nj
->alt_exc_handler
= strdup(j
->alt_exc_handler
);
2000 if (j
->cfbundleidentifier
) {
2001 nj
->cfbundleidentifier
= strdup(j
->cfbundleidentifier
);
2004 if (j
->seatbelt_profile
) {
2005 nj
->seatbelt_profile
= strdup(j
->seatbelt_profile
);
2007 if (j
->container_identifier
) {
2008 nj
->container_identifier
= strdup(j
->container_identifier
);
2013 if (j
->quarantine_data
) {
2014 nj
->quarantine_data
= strdup(j
->quarantine_data
);
2016 nj
->quarantine_data_sz
= j
->quarantine_data_sz
;
2019 size_t sz
= malloc_size(j
->j_binpref
);
2020 nj
->j_binpref
= (cpu_type_t
*)malloc(sz
);
2021 if (nj
->j_binpref
) {
2022 memcpy(&nj
->j_binpref
, &j
->j_binpref
, sz
);
2024 (void)job_assumes_zero(nj
, errno
);
2028 if (j
->asport
!= MACH_PORT_NULL
) {
2029 (void)job_assumes_zero(nj
, launchd_mport_copy_send(j
->asport
));
2030 nj
->asport
= j
->asport
;
2033 LIST_INSERT_HEAD(&nj
->mgr
->jobs
, nj
, sle
);
2035 jobmgr_t where2put
= root_jobmgr
;
2036 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
2039 LIST_INSERT_HEAD(&where2put
->label_hash
[hash_label(nj
->label
)], nj
, label_hash_sle
);
2040 LIST_INSERT_HEAD(&j
->subjobs
, nj
, subjob_sle
);
2042 (void)os_assumes_zero(errno
);
2049 job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
)
2051 const char *const *argv_tmp
= argv
;
2052 char tmp_path
[PATH_MAX
];
2053 char auto_label
[1000];
2054 const char *bn
= NULL
;
2056 size_t minlabel_len
;
2060 __OS_COMPILETIME_ASSERT__(offsetof(struct job_s
, kqjob_callback
) == 0);
2062 if (unlikely(jm
->shutting_down
)) {
2067 if (unlikely(prog
== NULL
&& argv
== NULL
)) {
2072 /* I'd really like to redo this someday. Anonymous jobs carry all the
2073 * baggage of managed jobs with them, even though most of it is unused.
2074 * Maybe when we have Objective-C objects in libSystem, there can be a base
2075 * job type that anonymous and managed jobs inherit from...
2077 char *anon_or_legacy
= (label
== AUTO_PICK_ANONYMOUS_LABEL
) ? "anonymous" : "mach_init";
2078 if (unlikely(label
== AUTO_PICK_LEGACY_LABEL
|| label
== AUTO_PICK_ANONYMOUS_LABEL
)) {
2082 strlcpy(tmp_path
, argv
[0], sizeof(tmp_path
));
2083 // prog for auto labels is kp.kp_kproc.p_comm.
2084 bn
= basename(tmp_path
);
2087 (void)snprintf(auto_label
, sizeof(auto_label
), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy
, bn
);
2089 /* This is so we can do gross things later. See NOTE_EXEC for anonymous
2092 minlabel_len
= strlen(label
) + MAXCOMLEN
;
2094 if (label
== AUTO_PICK_XPC_LABEL
) {
2095 minlabel_len
= snprintf(auto_label
, sizeof(auto_label
), "com.apple.xpc.domain-owner.%s", jm
->owner
);
2097 minlabel_len
= strlen(label
);
2101 j
= calloc(1, sizeof(struct job_s
) + minlabel_len
+ 1);
2104 (void)os_assumes_zero(errno
);
2108 if (unlikely(label
== auto_label
)) {
2109 (void)snprintf((char *)j
->label
, strlen(label
) + 1, "%p.%s.%s", j
, anon_or_legacy
, bn
);
2111 (void)strcpy((char *)j
->label
, (label
== AUTO_PICK_XPC_LABEL
) ? auto_label
: label
);
2114 j
->kqjob_callback
= job_callback
;
2116 j
->min_run_time
= LAUNCHD_MIN_JOB_RUN_TIME
;
2117 j
->timeout
= RUNTIME_ADVISABLE_IDLE_TIMEOUT
;
2118 j
->exit_timeout
= LAUNCHD_DEFAULT_EXIT_TIMEOUT
;
2119 j
->currently_ignored
= true;
2121 j
->checkedin
= true;
2122 j
->jetsam_priority
= DEFAULT_JETSAM_PRIORITY
;
2123 j
->jetsam_memlimit
= -1;
2124 uuid_clear(j
->expected_audit_uuid
);
2125 #if TARGET_OS_EMBEDDED
2126 /* Run embedded daemons as background by default. SpringBoard jobs are
2127 * Interactive by default. Unfortunately, so many daemons have opted into
2128 * this priority band that its usefulness is highly questionable.
2130 * See <rdar://problem/9539873>.
2132 * Also ensure that daemons have a default memory highwatermark unless
2133 * otherwise specified, as per <rdar://problem/10307814>.
2135 if (launchd_embedded_handofgod
) {
2136 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
;
2139 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND
;
2140 j
->jetsam_memlimit
= DEFAULT_JETSAM_DAEMON_HIGHWATERMARK
;
2143 /* Jobs on OS X that just come from disk are "standard" by default so that
2144 * third-party daemons/agents don't encounter unexpected throttling.
2146 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD
;
2150 j
->prog
= strdup(prog
);
2152 (void)os_assumes_zero(errno
);
2158 while (*argv_tmp
++) {
2162 for (i
= 0; i
< j
->argc
; i
++) {
2163 cc
+= strlen(argv
[i
]) + 1;
2166 j
->argv
= malloc((j
->argc
+ 1) * sizeof(char *) + cc
);
2168 (void)job_assumes_zero(j
, errno
);
2172 co
= ((char *)j
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
2174 for (i
= 0; i
< j
->argc
; i
++) {
2176 (void)strcpy(co
, argv
[i
]);
2177 co
+= strlen(argv
[i
]) + 1;
2182 // Sssshhh... don't tell anyone.
2183 if (strcmp(j
->label
, "com.apple.WindowServer") == 0) {
2184 j
->has_console
= true;
2187 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
2189 jobmgr_t where2put_label
= root_jobmgr
;
2190 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
2191 where2put_label
= j
->mgr
;
2193 LIST_INSERT_HEAD(&where2put_label
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
2194 uuid_clear(j
->expected_audit_uuid
);
2196 job_log(j
, LOG_DEBUG
, "Conceived");
2210 job_new_alias(jobmgr_t jm
, job_t src
)
2212 if (job_find(jm
, src
->label
)) {
2217 job_t j
= calloc(1, sizeof(struct job_s
) + strlen(src
->label
) + 1);
2219 (void)os_assumes_zero(errno
);
2223 (void)strcpy((char *)j
->label
, src
->label
);
2224 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
2225 LIST_INSERT_HEAD(&jm
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
2226 /* Bad jump address. The kqueue callback for aliases should never be
2229 j
->kqjob_callback
= (kq_callback
)0xfa1afe1;
2233 struct machservice
*msi
= NULL
;
2234 SLIST_FOREACH(msi
, &src
->machservices
, sle
) {
2235 if (!machservice_new_alias(j
, msi
)) {
2236 jobmgr_log(jm
, LOG_ERR
, "Failed to alias job: %s", src
->label
);
2245 job_log(j
, LOG_DEBUG
, "Aliased service into domain: %s", jm
->name
);
2252 job_import(launch_data_t pload
)
2254 #if TARGET_OS_EMBEDDED
2255 /* If this is the special payload of default values, handle it here */
2256 if (unlikely(launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_DEFAULTS
))) {
2257 job_import_defaults(pload
);
2262 job_t j
= jobmgr_import2(root_jobmgr
, pload
);
2264 if (unlikely(j
== NULL
)) {
2268 /* Since jobs are effectively stalled until they get security sessions
2269 * assigned to them, we may wish to reconsider this behavior of calling the
2270 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2273 job_dispatch_curious_jobs(j
);
2274 return job_dispatch(j
, false);
2277 #if TARGET_OS_EMBEDDED
2280 job_import_defaults(launch_data_t pload
)
2282 bool result
= false;
2283 xpc_object_t xd
= NULL
, defaults
;
2285 if (_launchd_defaults_cache
) {
2286 xpc_release(_launchd_defaults_cache
);
2287 _launchd_defaults_cache
= NULL
;
2291 if (!xd
|| xpc_get_type(xd
) != XPC_TYPE_DICTIONARY
) {
2295 defaults
= xpc_dictionary_get_value(xd
, LAUNCHD_JOB_DEFAULTS
);
2296 if (!defaults
|| xpc_get_type(defaults
) != XPC_TYPE_DICTIONARY
) {
2300 _launchd_defaults_cache
= xpc_copy(defaults
);
2311 job_apply_defaults(job_t j
) {
2312 const char *test_prefix
= "com.apple.test.";
2314 char *sb_prefix_end
, *sb_suffix_start
;
2315 char true_job_label
[strlen(j
->label
)];
2318 if (((sb_prefix_end
= strchr(j
->label
, ':')) != NULL
) &&
2319 ((sb_suffix_start
= strchr(sb_prefix_end
+ 1, '[')) != NULL
)) {
2321 * Workaround 'UIKitApplication:com.apple.foo[bar]' convention for the processes
2322 * we're interested in. To be removed when <rdar://problem/13066361> is addressed.
2324 snprintf(true_job_label
, sb_suffix_start
- sb_prefix_end
, "%s", sb_prefix_end
+ 1);
2325 label
= true_job_label
;
2327 /* Just test the standard label */
2331 /* Test for cache presence and apply if found */
2332 if (_launchd_defaults_cache
) {
2333 xpc_object_t props
= xpc_dictionary_get_value(_launchd_defaults_cache
, label
);
2334 if (props
&& xpc_get_type(props
) == XPC_TYPE_DICTIONARY
) {
2335 launch_data_t lv
= xpc2ld(props
);
2336 launch_data_dict_iterate(lv
, job_import_keys
, j
);
2337 launch_data_free(lv
);
2342 /* Limit free? Disable the memory limit if this is a test job; see <rdar://problem/13180697> */
2343 if (!strncmp(label
, test_prefix
, strlen(test_prefix
))) {
2344 j
->jetsam_memlimit
= -1;
2354 job_import_bulk(launch_data_t pload
)
2356 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
2358 size_t i
, c
= launch_data_array_get_count(pload
);
2360 ja
= alloca(c
* sizeof(job_t
));
2362 for (i
= 0; i
< c
; i
++) {
2363 if ((likely(ja
[i
] = jobmgr_import2(root_jobmgr
, launch_data_array_get_index(pload
, i
)))) && errno
!= ENEEDAUTH
) {
2366 launch_data_array_set_index(resp
, launch_data_new_errno(errno
), i
);
2369 for (i
= 0; i
< c
; i
++) {
2370 if (likely(ja
[i
])) {
2371 job_dispatch_curious_jobs(ja
[i
]);
2372 job_dispatch(ja
[i
], false);
2380 job_import_bool(job_t j
, const char *key
, bool value
)
2382 bool found_key
= false;
2387 if (strcasecmp(key
, LAUNCH_JOBKEY_ABANDONPROCESSGROUP
) == 0) {
2388 j
->abandon_pg
= value
;
2394 if (strcasecmp(key
, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN
) == 0) {
2395 j
->dirty_at_shutdown
= value
;
2401 if (strcasecmp(key
, LAUNCH_JOBKEY_JOINGUISESSION
) == 0) {
2402 j
->joins_gui_session
= value
;
2408 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
2409 j
->ondemand
= !value
;
2415 if (strcasecmp(key
, LAUNCH_JOBKEY_ONDEMAND
) == 0) {
2416 j
->ondemand
= value
;
2422 if (strcasecmp(key
, LAUNCH_JOBKEY_DEBUG
) == 0) {
2425 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLED
) == 0) {
2426 (void)job_assumes(j
, !value
);
2428 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLEASLR
) == 0) {
2429 j
->disable_aslr
= value
;
2435 if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
) == 0) {
2436 job_log(j
, LOG_PERF
, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN
);
2437 j
->dirty_at_shutdown
= value
;
2443 if (strcasecmp(key
, LAUNCH_JOBKEY_SESSIONCREATE
) == 0) {
2444 j
->session_create
= value
;
2446 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTONMOUNT
) == 0) {
2447 j
->start_on_mount
= value
;
2449 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SERVICEIPC
) == 0) {
2450 // this only does something on Mac OS X 10.4 "Tiger"
2452 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SHUTDOWNMONITOR
) == 0) {
2453 if (_launchd_shutdown_monitor
) {
2454 job_log(j
, LOG_ERR
, "Only one job may monitor shutdown.");
2456 j
->shutdown_monitor
= true;
2457 _launchd_shutdown_monitor
= j
;
2464 if (strcasecmp(key
, LAUNCH_JOBKEY_LOWPRIORITYIO
) == 0) {
2465 j
->low_pri_io
= value
;
2467 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHONLYONCE
) == 0) {
2468 j
->only_once
= value
;
2470 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LOWPRIORITYBACKGROUNDIO
) == 0) {
2471 j
->low_priority_background_io
= true;
2473 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LEGACYTIMERS
) == 0) {
2474 #if !TARGET_OS_EMBEDDED
2475 j
->legacy_timers
= value
;
2476 #else // !TARGET_OS_EMBEDDED
2477 job_log(j
, LOG_ERR
, "This key is not supported on this platform: %s", key
);
2478 #endif // !TARGET_OS_EMBEDDED
2484 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
2485 j
->internal_exc_handler
= value
;
2487 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MULTIPLEINSTANCES
) == 0) {
2488 j
->multiple_instances
= value
;
2494 if (strcasecmp(key
, LAUNCH_JOBKEY_INITGROUPS
) == 0) {
2495 if (getuid() != 0) {
2496 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2499 j
->no_init_groups
= !value
;
2501 } else if (strcasecmp(key
, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN
) == 0) {
2502 j
->ignore_pg_at_shutdown
= value
;
2508 if (strcasecmp(key
, LAUNCH_JOBKEY_RUNATLOAD
) == 0) {
2510 // We don't want value == false to change j->start_pending
2511 j
->start_pending
= true;
2518 if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLEGLOBBING
) == 0) {
2519 j
->globargv
= value
;
2521 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
) == 0) {
2522 j
->enable_transactions
= value
;
2524 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL
) == 0) {
2525 j
->debug_before_kill
= value
;
2527 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION
) == 0) {
2528 #if TARGET_OS_EMBEDDED
2529 if (!_launchd_embedded_god
) {
2530 if ((j
->embedded_god
= value
)) {
2531 _launchd_embedded_god
= j
;
2534 job_log(j
, LOG_ERR
, "Job tried to claim %s after it has already been claimed.", key
);
2537 job_log(j
, LOG_ERR
, "This key is not supported on this platform: %s", key
);
2540 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDHOMESCREEN
) == 0) {
2541 #if TARGET_OS_EMBEDDED
2542 if (!_launchd_embedded_home
) {
2543 if ((j
->embedded_home
= value
)) {
2544 _launchd_embedded_home
= j
;
2547 job_log(j
, LOG_ERR
, "Job tried to claim %s after it has already been claimed.", key
);
2550 job_log(j
, LOG_ERR
, "This key is not supported on this platform: %s", key
);
2552 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EVENTMONITOR
) == 0) {
2553 if (!_launchd_event_monitor
) {
2554 j
->event_monitor
= value
;
2556 _launchd_event_monitor
= j
;
2559 job_log(j
, LOG_NOTICE
, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor
->label
);
2566 if (strcasecmp(key
, LAUNCH_JOBKEY_WAITFORDEBUGGER
) == 0) {
2567 j
->wait4debugger
= value
;
2573 if (strcasecmp(key
, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER
) == 0) {
2575 if (_launchd_xpc_bootstrapper
) {
2576 job_log(j
, LOG_ERR
, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper
->label
);
2578 _launchd_xpc_bootstrapper
= j
;
2579 j
->xpc_bootstrapper
= value
;
2582 job_log(j
, LOG_ERR
, "Non-daemon tried to claim XPC bootstrapper property.");
2591 if (unlikely(!found_key
)) {
2592 job_log(j
, LOG_WARNING
, "Unknown key for boolean: %s", key
);
2597 job_import_string(job_t j
, const char *key
, const char *value
)
2599 char **where2put
= NULL
;
2604 if (strcasecmp(key
, LAUNCH_JOBKEY_CFBUNDLEIDENTIFIER
) == 0) {
2605 where2put
= &j
->cfbundleidentifier
;
2610 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
2611 where2put
= &j
->alt_exc_handler
;
2616 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAM
) == 0) {
2618 } else if (strcasecmp(key
, LAUNCH_JOBKEY_POSIXSPAWNTYPE
) == 0
2619 || strcasecmp(key
, LAUNCH_JOBKEY_PROCESSTYPE
) == 0) {
2620 if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE
) == 0) {
2621 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE
;
2622 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_ADAPTIVE
) == 0) {
2623 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE
;
2624 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_STANDARD
) == 0) {
2625 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD
;
2626 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND
) == 0) {
2627 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND
;
2628 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP
) == 0) {
2629 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_APP_TAL
;
2630 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_SYSTEMAPP
) == 0) {
2631 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
;
2632 j
->system_app
= true;
2633 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_APP
) == 0) {
2634 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
;
2637 job_log(j
, LOG_ERR
, "Unknown value for key %s: %s", key
, value
);
2644 if (strcasecmp(key
, LAUNCH_JOBKEY_LABEL
) == 0) {
2646 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
2648 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
2650 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
2656 if (strcasecmp(key
, LAUNCH_JOBKEY_ROOTDIRECTORY
) == 0) {
2657 if (getuid() != 0) {
2658 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2661 where2put
= &j
->rootdir
;
2666 if (strcasecmp(key
, LAUNCH_JOBKEY_WORKINGDIRECTORY
) == 0) {
2667 where2put
= &j
->workingdir
;
2672 if (strcasecmp(key
, LAUNCH_JOBKEY_USERNAME
) == 0) {
2673 if (getuid() != 0) {
2674 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2676 } else if (strcmp(value
, "root") == 0) {
2679 where2put
= &j
->username
;
2684 if (strcasecmp(key
, LAUNCH_JOBKEY_GROUPNAME
) == 0) {
2685 if (getuid() != 0) {
2686 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2688 } else if (strcmp(value
, "wheel") == 0) {
2691 where2put
= &j
->groupname
;
2696 if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDOUTPATH
) == 0) {
2697 where2put
= &j
->stdoutpath
;
2698 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDERRORPATH
) == 0) {
2699 where2put
= &j
->stderrpath
;
2700 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDINPATH
) == 0) {
2701 where2put
= &j
->stdinpath
;
2702 j
->stdin_fd
= _fd(open(value
, O_RDONLY
|O_CREAT
|O_NOCTTY
|O_NONBLOCK
, DEFFILEMODE
));
2703 if (job_assumes_zero_p(j
, j
->stdin_fd
) != -1) {
2704 // open() should not block, but regular IO by the job should
2705 (void)job_assumes_zero_p(j
, fcntl(j
->stdin_fd
, F_SETFL
, 0));
2706 // XXX -- EV_CLEAR should make named pipes happy?
2707 (void)job_assumes_zero_p(j
, kevent_mod(j
->stdin_fd
, EVFILT_READ
, EV_ADD
|EV_CLEAR
, 0, 0, j
));
2712 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXPROFILE
) == 0) {
2713 where2put
= &j
->seatbelt_profile
;
2714 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXCONTAINER
) == 0) {
2715 where2put
= &j
->container_identifier
;
2721 if (strcasecmp(key
, LAUNCH_JOBKEY_XPCDOMAIN
) == 0) {
2726 job_log(j
, LOG_WARNING
, "Unknown key for string: %s", key
);
2730 if (likely(where2put
)) {
2731 if (!(*where2put
= strdup(value
))) {
2732 (void)job_assumes_zero(j
, errno
);
2735 // See rdar://problem/5496612. These two are okay.
2736 if (strncmp(key
, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2737 || strncmp(key
, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2738 job_log(j
, LOG_APPLEONLY
, "This key is no longer relevant and should be removed: %s", key
);
2740 job_log(j
, LOG_WARNING
, "Unknown key: %s", key
);
2746 job_import_integer(job_t j
, const char *key
, long long value
)
2751 #if TARGET_OS_EMBEDDED
2752 if (strcasecmp(key
, LAUNCH_JOBKEY_ASID
) == 0) {
2753 if (launchd_embedded_handofgod
) {
2754 if (audit_session_port((au_asid_t
)value
, &j
->asport
) == -1 && errno
!= ENOSYS
) {
2755 (void)job_assumes_zero(j
, errno
);
2762 if (strcasecmp(key
, LAUNCH_JOBKEY_EXITTIMEOUT
) == 0) {
2763 if (unlikely(value
< 0)) {
2764 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
2765 } else if (unlikely(value
> UINT32_MAX
)) {
2766 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
2768 j
->exit_timeout
= (typeof(j
->exit_timeout
)) value
;
2770 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY
) == 0) {
2771 j
->main_thread_priority
= value
;
2776 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0) {
2777 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2779 launch_data_t pri
= launch_data_new_integer(value
);
2780 if (job_assumes(j
, pri
!= NULL
)) {
2781 jetsam_property_setup(pri
, LAUNCH_JOBKEY_JETSAMPRIORITY
, j
);
2782 launch_data_free(pri
);
2787 if (strcasecmp(key
, LAUNCH_JOBKEY_NICE
) == 0) {
2788 if (unlikely(value
< PRIO_MIN
)) {
2789 job_log(j
, LOG_WARNING
, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MIN
);
2790 } else if (unlikely(value
> PRIO_MAX
)) {
2791 job_log(j
, LOG_WARNING
, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MAX
);
2793 j
->nice
= (typeof(j
->nice
)) value
;
2800 if (strcasecmp(key
, LAUNCH_JOBKEY_TIMEOUT
) == 0) {
2801 if (unlikely(value
< 0)) {
2802 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
2803 } else if (unlikely(value
> UINT32_MAX
)) {
2804 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
2806 j
->timeout
= (typeof(j
->timeout
)) value
;
2808 } else if (strcasecmp(key
, LAUNCH_JOBKEY_THROTTLEINTERVAL
) == 0) {
2810 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
2811 } else if (value
> UINT32_MAX
) {
2812 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
2814 j
->min_run_time
= (typeof(j
->min_run_time
)) value
;
2820 if (strcasecmp(key
, LAUNCH_JOBKEY_UMASK
) == 0) {
2827 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTINTERVAL
) == 0) {
2828 if (unlikely(value
<= 0)) {
2829 job_log(j
, LOG_WARNING
, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
2830 } else if (unlikely(value
> UINT32_MAX
)) {
2831 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
2833 runtime_add_weak_ref();
2834 j
->start_interval
= (typeof(j
->start_interval
)) value
;
2836 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
));
2839 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
2840 j
->seatbelt_flags
= value
;
2846 job_log(j
, LOG_WARNING
, "Unknown key for integer: %s", key
);
2852 job_import_opaque(job_t j
__attribute__((unused
)), const char *key
, launch_data_t value
__attribute__((unused
)))
2858 if (strcasecmp(key
, LAUNCH_JOBKEY_QUARANTINEDATA
) == 0) {
2859 size_t tmpsz
= launch_data_get_opaque_size(value
);
2861 if (job_assumes(j
, j
->quarantine_data
= malloc(tmpsz
))) {
2862 memcpy(j
->quarantine_data
, launch_data_get_opaque(value
), tmpsz
);
2863 j
->quarantine_data_sz
= tmpsz
;
2869 if (strcasecmp(key
, LAUNCH_JOBKEY_SECURITYSESSIONUUID
) == 0) {
2870 size_t tmpsz
= launch_data_get_opaque_size(value
);
2871 if (job_assumes(j
, tmpsz
== sizeof(uuid_t
))) {
2872 memcpy(j
->expected_audit_uuid
, launch_data_get_opaque(value
), sizeof(uuid_t
));
2882 policy_setup(launch_data_t obj
, const char *key
, void *context
)
2885 bool found_key
= false;
2890 if (strcasecmp(key
, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS
) == 0) {
2891 j
->deny_job_creation
= launch_data_get_bool(obj
);
2899 if (unlikely(!found_key
)) {
2900 job_log(j
, LOG_WARNING
, "Unknown policy: %s", key
);
2905 job_import_dictionary(job_t j
, const char *key
, launch_data_t value
)
2912 if (strcasecmp(key
, LAUNCH_JOBKEY_POLICIES
) == 0) {
2913 launch_data_dict_iterate(value
, policy_setup
, j
);
2918 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
2919 launch_data_dict_iterate(value
, semaphoreitem_setup
, j
);
2924 if (strcasecmp(key
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
) == 0) {
2925 j
->inetcompat
= true;
2926 j
->abandon_pg
= true;
2927 if ((tmp
= launch_data_dict_lookup(value
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
))) {
2928 j
->inetcompat_wait
= launch_data_get_bool(tmp
);
2934 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPROPERTIES
) == 0) {
2935 launch_data_dict_iterate(value
, (void (*)(launch_data_t
, const char *, void *))jetsam_property_setup
, j
);
2939 if (strcasecmp(key
, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES
) == 0) {
2940 launch_data_dict_iterate(value
, envitem_setup
, j
);
2945 if (strcasecmp(key
, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES
) == 0) {
2946 j
->importing_global_env
= true;
2947 launch_data_dict_iterate(value
, envitem_setup
, j
);
2948 j
->importing_global_env
= false;
2953 if (strcasecmp(key
, LAUNCH_JOBKEY_SOCKETS
) == 0) {
2954 launch_data_dict_iterate(value
, socketgroup_setup
, j
);
2955 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
2956 calendarinterval_new_from_obj(j
, value
);
2957 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SOFTRESOURCELIMITS
) == 0) {
2958 launch_data_dict_iterate(value
, limititem_setup
, j
);
2960 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
2961 launch_data_dict_iterate(value
, seatbelt_setup_flags
, j
);
2967 if (strcasecmp(key
, LAUNCH_JOBKEY_HARDRESOURCELIMITS
) == 0) {
2968 j
->importing_hard_limits
= true;
2969 launch_data_dict_iterate(value
, limititem_setup
, j
);
2970 j
->importing_hard_limits
= false;
2975 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICES
) == 0) {
2976 launch_data_dict_iterate(value
, machservice_setup
, j
);
2981 if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHEVENTS
) == 0) {
2982 launch_data_dict_iterate(value
, eventsystem_setup
, j
);
2984 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE
) == 0) {
2987 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE
) == 0) {
2993 job_log(j
, LOG_WARNING
, "Unknown key for dictionary: %s", key
);
2999 job_import_array(job_t j
, const char *key
, launch_data_t value
)
3001 size_t i
, value_cnt
= launch_data_array_get_count(value
);
3006 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
) == 0) {
3012 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
3014 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
3016 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
3017 job_log(j
, LOG_NOTICE
, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
3023 if (strcasecmp(key
, LAUNCH_JOBKEY_BINARYORDERPREFERENCE
) == 0) {
3024 if (job_assumes(j
, j
->j_binpref
= malloc(value_cnt
* sizeof(*j
->j_binpref
)))) {
3025 j
->j_binpref_cnt
= value_cnt
;
3026 for (i
= 0; i
< value_cnt
; i
++) {
3027 j
->j_binpref
[i
] = (cpu_type_t
) launch_data_get_integer(launch_data_array_get_index(value
, i
));
3034 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
3035 for (i
= 0; i
< value_cnt
; i
++) {
3036 calendarinterval_new_from_obj(j
, launch_data_array_get_index(value
, i
));
3041 job_log(j
, LOG_WARNING
, "Unknown key for array: %s", key
);
3047 job_import_keys(launch_data_t obj
, const char *key
, void *context
)
3050 launch_data_type_t kind
;
3053 launchd_syslog(LOG_ERR
, "NULL object given to job_import_keys().");
3057 kind
= launch_data_get_type(obj
);
3060 case LAUNCH_DATA_BOOL
:
3061 job_import_bool(j
, key
, launch_data_get_bool(obj
));
3063 case LAUNCH_DATA_STRING
:
3064 job_import_string(j
, key
, launch_data_get_string(obj
));
3066 case LAUNCH_DATA_INTEGER
:
3067 job_import_integer(j
, key
, launch_data_get_integer(obj
));
3069 case LAUNCH_DATA_DICTIONARY
:
3070 job_import_dictionary(j
, key
, obj
);
3072 case LAUNCH_DATA_ARRAY
:
3073 job_import_array(j
, key
, obj
);
3075 case LAUNCH_DATA_OPAQUE
:
3076 job_import_opaque(j
, key
, obj
);
3079 job_log(j
, LOG_WARNING
, "Unknown value type '%d' for key: %s", kind
, key
);
3085 jobmgr_import2(jobmgr_t jm
, launch_data_t pload
)
3087 launch_data_t tmp
, ldpa
;
3088 const char *label
= NULL
, *prog
= NULL
;
3089 const char **argv
= NULL
;
3092 if (!jobmgr_assumes(jm
, pload
!= NULL
)) {
3097 if (unlikely(launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
)) {
3102 if (unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
)))) {
3107 if (unlikely(launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
)) {
3112 if (unlikely(!(label
= launch_data_get_string(tmp
)))) {
3117 #if TARGET_OS_EMBEDDED
3118 if (unlikely(launchd_embedded_handofgod
&& _launchd_embedded_god
)) {
3119 if (unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_USERNAME
)))) {
3124 const char *username
= NULL
;
3125 if (likely(tmp
&& launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
3126 username
= launch_data_get_string(tmp
);
3132 if (!jobmgr_assumes(jm
, _launchd_embedded_god
->username
!= NULL
&& username
!= NULL
)) {
3137 if (unlikely(strcmp(_launchd_embedded_god
->username
, username
) != 0)) {
3141 } else if (launchd_embedded_handofgod
) {
3147 if ((tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAM
))
3148 && (launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
3149 prog
= launch_data_get_string(tmp
);
3153 if ((ldpa
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
))) {
3156 if (launch_data_get_type(ldpa
) != LAUNCH_DATA_ARRAY
) {
3161 c
= launch_data_array_get_count(ldpa
);
3163 argv
= alloca((c
+ 1) * sizeof(char *));
3165 for (i
= 0; i
< c
; i
++) {
3166 tmp
= launch_data_array_get_index(ldpa
, i
);
3168 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
3173 argv
[i
] = launch_data_get_string(tmp
);
3180 if (!prog
&& argc
== 0) {
3181 jobmgr_log(jm
, LOG_ERR
, "Job specifies neither Program nor ProgramArguments: %s", label
);
3186 /* Find the requested session. You cannot load services into XPC domains in
3189 launch_data_t session
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
3191 jobmgr_t jmt
= NULL
;
3192 if (launch_data_get_type(session
) == LAUNCH_DATA_STRING
) {
3193 jmt
= jobmgr_find_by_name(jm
, launch_data_get_string(session
));
3195 jobmgr_log(jm
, LOG_ERR
, "Could not find requested session: %s", launch_data_get_string(session
));
3200 jobmgr_log(jm
, LOG_ERR
, "Session type is not a string.");
3209 /* For legacy reasons, we have a global hash of all labels in all job
3210 * managers. So rather than make it a global, we store it in the root job
3211 * manager. But for an XPC domain, we store a local hash of all services in
3214 jobmgr_t where2look
= (jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) ? jm
: root_jobmgr
;
3215 if (unlikely((j
= job_find(where2look
, label
)) != NULL
)) {
3216 if (jm
->xpc_singleton
) {
3217 /* There can (and probably will be) multiple attemtps to import the
3218 * same XPC service from the same framework. This is okay. It's
3219 * treated as a singleton, so just return the existing one so that
3220 * it may be aliased into the requesting process' XPC domain.
3225 /* If we're not a global XPC domain, then it's an error to try
3226 * importing the same job/service multiple times.
3231 } else if (unlikely(!jobmgr_label_test(where2look
, label
))) {
3235 jobmgr_log(jm
, LOG_DEBUG
, "Importing %s.", label
);
3237 if (likely(j
= job_new(jm
, label
, prog
, argv
))) {
3238 #if TARGET_OS_EMBEDDED
3239 job_apply_defaults(j
);
3241 launch_data_dict_iterate(pload
, job_import_keys
, j
);
3242 if (!uuid_is_null(j
->expected_audit_uuid
)) {
3243 uuid_string_t uuid_str
;
3244 uuid_unparse(j
->expected_audit_uuid
, uuid_str
);
3245 job_log(j
, LOG_DEBUG
, "Imported job. Waiting for session for UUID %s.", uuid_str
);
3246 LIST_INSERT_HEAD(&s_needing_sessions
, j
, needing_session_sle
);
3249 job_log(j
, LOG_DEBUG
, "No security session specified.");
3250 j
->asport
= MACH_PORT_NULL
;
3253 if (pid1_magic
&& !jm
->parentmgr
) {
3254 /* Workaround reentrancy in CF. We don't make this a global variable
3255 * because we don't want per-user launchd's to inherit it. So we
3256 * just set it for every job that we import into the System session.
3258 * See <rdar://problem/9468837>.
3260 envitem_new(j
, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
3263 if (j
->event_monitor
) {
3267 #if TARGET_OS_EMBEDDED
3268 /* SpringBoard and backboardd must run at elevated priority.
3270 * See <rdar://problem/9539873> and <rdar://problem/10984383>.
3272 if (j
->embedded_god
|| j
->embedded_home
) {
3273 j
->psproctype
= POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
;
3282 jobmgr_label_test(jobmgr_t jm
, const char *str
)
3286 if (str
[0] == '\0') {
3287 jobmgr_log(jm
, LOG_ERR
, "Empty job labels are not allowed");
3291 for (ptr
= str
; *ptr
; ptr
++) {
3292 if (iscntrl(*ptr
)) {
3293 jobmgr_log(jm
, LOG_ERR
, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr
- str
, *ptr
);
3298 if ((strncasecmp(str
, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3299 || (strncasecmp(str
, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
3300 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to use a reserved prefix: %s", str
);
3308 job_find(jobmgr_t jm
, const char *label
)
3316 LIST_FOREACH(ji
, &jm
->label_hash
[hash_label(label
)], label_hash_sle
) {
3317 if (unlikely(ji
->removal_pending
|| ji
->mgr
->shutting_down
)) {
3318 // 5351245 and 5488633 respectively
3322 if (strcmp(ji
->label
, label
) == 0) {
3331 // Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
3333 jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
)
3336 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
3337 if (ji
->p
== p
&& (!ji
->anonymous
|| (ji
->anonymous
&& anon_okay
))) {
3342 jobmgr_t jmi
= NULL
;
3343 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3344 if ((ji
= jobmgr_find_by_pid_deep(jmi
, p
, anon_okay
))) {
3353 jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
)
3357 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
3363 return create_anon
? job_new_anonymous(jm
, p
) : NULL
;
3367 managed_job(pid_t p
)
3371 LIST_FOREACH(ji
, &managed_actives
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
3381 job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
)
3386 if (jm
->jm_port
== mport
) {
3387 return jobmgr_find_by_pid(jm
, upid
, true);
3390 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3393 if ((jr
= job_mig_intran2(jmi
, mport
, upid
))) {
3398 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
3399 if (ji
->j_port
== mport
) {
3408 job_mig_intran(mach_port_t p
)
3410 struct ldcred
*ldc
= runtime_get_caller_creds();
3413 jr
= job_mig_intran2(root_jobmgr
, p
, ldc
->pid
);
3416 struct proc_bsdshortinfo proc
;
3417 if (proc_pidinfo(ldc
->pid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3418 if (errno
!= ESRCH
) {
3419 (void)jobmgr_assumes_zero(root_jobmgr
, errno
);
3421 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc
.pbsi_comm
, ldc
->pid
, ldc
->uid
, ldc
->euid
);
3430 job_find_by_service_port(mach_port_t p
)
3432 struct machservice
*ms
;
3434 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
3435 if (ms
->recv
&& (ms
->port
== p
)) {
3444 job_mig_destructor(job_t j
)
3446 /* The job can go invalid before this point.
3448 * <rdar://problem/5477111>
3450 if (unlikely(j
&& (j
!= workaround_5477111
) && j
->unload_at_mig_return
)) {
3451 job_log(j
, LOG_NOTICE
, "Unloading PID %u at MIG return.", j
->p
);
3455 workaround_5477111
= NULL
;
3457 calendarinterval_sanity_check();
3461 job_export_all2(jobmgr_t jm
, launch_data_t where
)
3466 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3467 job_export_all2(jmi
, where
);
3470 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
3473 if (jobmgr_assumes(jm
, (tmp
= job_export(ji
)) != NULL
)) {
3474 launch_data_dict_insert(where
, tmp
, ji
->label
);
3480 job_export_all(void)
3482 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
3485 job_export_all2(root_jobmgr
, resp
);
3487 (void)os_assumes_zero(errno
);
3494 job_log_stray_pg(job_t j
)
3497 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
3498 int i
= 0, kp_cnt
= 0;
3500 if (!launchd_apple_internal
) {
3504 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG
, j
->p
, 0, 0);
3506 if (!job_assumes(j
, (pids
= malloc(len
)) != NULL
)) {
3509 if (job_assumes_zero_p(j
, (kp_cnt
= proc_listpgrppids(j
->p
, pids
, len
))) == -1) {
3513 for (i
= 0; i
< kp_cnt
; i
++) {
3514 pid_t p_i
= pids
[i
];
3517 } else if (p_i
== 0 || p_i
== 1) {
3521 struct proc_bsdshortinfo proc
;
3522 if (proc_pidinfo(p_i
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3523 if (errno
!= ESRCH
) {
3524 (void)job_assumes_zero(j
, errno
);
3529 pid_t pp_i
= proc
.pbsi_ppid
;
3530 const char *z
= (proc
.pbsi_status
== SZOMB
) ? "zombie " : "";
3531 const char *n
= proc
.pbsi_comm
;
3533 job_log(j
, LOG_WARNING
, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z
, p_i
, pp_i
, proc
.pbsi_pgid
, n
);
3540 #if HAVE_SYSTEMSTATS
3542 systemstats_timer_callback(void)
3544 jobmgr_log_perf_statistics(root_jobmgr
, true);
3548 systemstats_is_enabled(void)
3550 static bool systemstats_enabled
;
3552 if (!systemstats_enabled
) {
3553 char *store
= launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_LOGS
, NULL
);
3554 systemstats_enabled
= systemstats_init(SYSTEMSTATS_WRITER_launchd
, store
);
3558 interval
= systemstats_get_log_interval(SYSTEMSTATS_WRITER_launchd
);
3560 if (pid1_magic
&& systemstats_enabled
&& interval
) {
3561 jobmgr_assumes_zero_p(root_jobmgr
, kevent_mod((uintptr_t)systemstats_timer_callback
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, interval
, root_jobmgr
));
3565 return systemstats_enabled
;
3567 #endif // HAVE_SYSTEMSTATS
3572 bool is_system_bootstrapper
= ((j
->is_bootstrapper
&& pid1_magic
) && !j
->mgr
->parentmgr
);
3574 job_log(j
, LOG_DEBUG
, "Reaping");
3576 if (unlikely(j
->weird_bootstrap
)) {
3578 job_mig_swap_integer(j
, VPROC_GSK_WEIRD_BOOTSTRAP
, 0, 0, &junk
);
3582 (void)job_assumes_zero_p(j
, runtime_close(j
->fork_fd
));
3586 bool was_dirty
= false;
3587 if (!(j
->anonymous
|| j
->implicit_reap
)) {
3589 (void)job_assumes_zero(j
, proc_get_dirty(j
->p
, &flags
));
3591 j
->idle_exit
= (flags
& PROC_DIRTY_ALLOWS_IDLE_EXIT
);
3592 was_dirty
= (flags
& PROC_DIRTY_IS_DIRTY
);
3594 job_log(j
, LOG_DEBUG
, "%sob exited %s.", j
->idle_exit
? "Idle-exit j" : "J", was_dirty
? "while dirty" : "cleanly");
3597 if (j
->idle_exit
&& was_dirty
) {
3598 if (j
->jettisoned
) {
3599 job_log(j
, LOG_NOTICE
, "Idle-exit job was jettisoned while dirty. Will respawn immediately.");
3600 j
->unthrottle
= true;
3601 j
->start_pending
= true;
3603 job_log(j
, LOG_INFO
, "Idle-exit job exited while dirty.");
3605 } else if (j
->idle_exit
&& j
->jettisoned
) {
3606 /* If an idle-exit job is jettisoned, then we shouldn't throttle its
3607 * next respawn because it could not help when it exited. If it ran for
3608 * the minimum runtime, then this doesn't really matter. If it ran for
3609 * less than the minimum runtime, it will not be throttled.
3611 * <rdar://problem/12098667>
3613 job_log(j
, LOG_NOTICE
, "Idle-exit job was jettisoned. Will bypass throttle interval for next on-demand launch.");
3614 j
->unthrottle
= true;
3618 j
->last_exit_status
= 0;
3620 uint64_t rt
= runtime_get_nanoseconds_since(j
->start_time
);
3623 job_log(j
, LOG_PERF
, "Last instance wall time: %06f", (double)rt
/ (double)NSEC_PER_SEC
);
3626 /* The job is dead. While the PID/PGID is still known to be valid, try
3627 * to kill abandoned descendant processes.
3629 job_log_stray_pg(j
);
3630 if (!j
->abandon_pg
) {
3631 if (unlikely(killpg2(j
->p
, SIGTERM
) == -1 && errno
!= ESRCH
)) {
3632 job_log(j
, LOG_APPLEONLY
, "Bug: 5487498");
3637 if (!j
->implicit_reap
) {
3638 /* If the shutdown monitor has suspended a task and not resumed it
3639 * resumed it before exiting, the kernel will not clean up after the
3640 * shutdown monitor. It will, instead, leave the task suspended and
3641 * not process any pending signals on the event loop for the task.
3643 * There are a variety of other kernel bugs that could prevent a
3644 * process from exiting, usually having to do with faulty hardware
3645 * or talking to misbehaving drivers that mark a thread as
3646 * uninterruptible and deadlock/hang before unmarking it as such. So
3647 * we have to work around that too.
3649 * See <rdar://problem/9284889&9359725>.
3651 if (j
->workaround9359725
) {
3652 job_log(j
, LOG_NOTICE
, "Simulated exit: <rdar://problem/9359725>");
3653 j
->last_exit_status
= W_EXITCODE(-1, SIGSEGV
);
3655 #if HAVE_SYSTEMSTATS
3657 struct rusage_info_v1 ri
;
3658 r2
= job_assumes_zero(j
, proc_pid_rusage(j
->p
, RUSAGE_INFO_V1
, (rusage_info_t
)&ri
));
3660 if ((r
= wait4(j
->p
, &j
->last_exit_status
, 0, NULL
)) == -1) {
3661 job_log(j
, LOG_ERR
, "Reap failed. Assuming job exited: %d: %s", errno
, strerror(errno
));
3662 j
->last_exit_status
= W_EXITCODE(-1, SIGSEGV
);
3665 if (j
->idle_exit
&& j
->jettisoned
) {
3666 // Treat idle-exit jettisons as successful exit.
3668 // <rdar://problem/13338973>
3669 (void)job_assumes_zero(j
, WTERMSIG(j
->last_exit_status
));
3670 j
->last_exit_status
= W_EXITCODE(0, 0);
3672 #if HAVE_SYSTEMSTATS
3674 job_log_perf_statistics(j
, &ri
, j
->last_exit_status
);
3679 job_log(j
, LOG_INFO
, "Job was implicitly reaped by the kernel.");
3683 if (j
->exit_timeout
) {
3684 (void)kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
3687 LIST_REMOVE(j
, pid_hash_sle
);
3688 if (!j
->anonymous
) {
3689 LIST_REMOVE(j
, global_pid_hash_sle
);
3692 if (j
->sent_signal_time
) {
3693 uint64_t td_sec
, td_usec
, td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
3695 td_sec
= td
/ NSEC_PER_SEC
;
3696 td_usec
= (td
% NSEC_PER_SEC
) / NSEC_PER_USEC
;
3698 job_log(j
, LOG_DEBUG
, "Exited %llu.%06llu seconds after the first signal was sent", td_sec
, td_usec
);
3701 int exit_status
= WEXITSTATUS(j
->last_exit_status
);
3702 if (WIFEXITED(j
->last_exit_status
) && exit_status
!= 0) {
3703 if (!j
->did_exec
&& _launchd_support_system
) {
3704 xpc_object_t event
= NULL
;
3705 switch (exit_status
) {
3709 job_log(j
, LOG_NOTICE
, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status
, strerror(exit_status
));
3710 event
= xpc_dictionary_create(NULL
, NULL
, 0);
3711 xpc_dictionary_set_string(event
, "Executable", j
->prog
? j
->prog
: j
->argv
[0]);
3713 xpc_dictionary_set_uint64(event
, "UID", j
->mach_uid
);
3714 } else if (j
->username
) {
3715 xpc_dictionary_set_string(event
, "UserName", j
->username
);
3719 xpc_dictionary_set_string(event
, "GroupName", j
->groupname
);
3722 (void)externalevent_new(j
, _launchd_support_system
, j
->label
, event
, 0);
3725 j
->waiting4ok
= true;
3727 job_log(j
, LOG_NOTICE
, "Job failed to exec(3) for weird reason: %d", exit_status
);
3730 int level
= LOG_INFO
;
3731 if (exit_status
!= 0) {
3735 job_log(j
, level
, "Exited with code: %d", exit_status
);
3739 if (WIFSIGNALED(j
->last_exit_status
)) {
3740 int s
= WTERMSIG(j
->last_exit_status
);
3741 if ((SIGKILL
== s
|| SIGTERM
== s
) && !j
->stopped
) {
3742 job_log(j
, LOG_NOTICE
, "Exited: %s", strsignal(s
));
3743 } else if (!(j
->stopped
|| j
->clean_kill
|| j
->jettisoned
)) {
3745 // Signals which indicate a crash.
3752 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3753 * SIGTRAP, assume that it's a crash.
3757 job_log(j
, LOG_WARNING
, "Job appears to have crashed: %s", strsignal(s
));
3760 job_log(j
, LOG_WARNING
, "Exited abnormally: %s", strsignal(s
));
3764 if (is_system_bootstrapper
&& j
->crashed
) {
3765 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "The %s bootstrapper has crashed: %s", j
->mgr
->name
, strsignal(s
));
3772 struct machservice
*msi
= NULL
;
3773 if (j
->crashed
|| !(j
->did_exec
|| j
->anonymous
)) {
3774 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
3775 if (j
->crashed
&& !msi
->isActive
&& (msi
->drain_one_on_crash
|| msi
->drain_all_on_crash
)) {
3776 machservice_drain_port(msi
);
3779 if (!j
->did_exec
&& msi
->reset
&& job_assumes(j
, !msi
->isActive
)) {
3780 machservice_resetport(j
, msi
);
3785 /* HACK: Essentially duplicating the logic directly above. But this has
3786 * gotten really hairy, and I don't want to try consolidating it right now.
3788 if (j
->xpc_service
&& !j
->xpcproxy_did_exec
) {
3789 job_log(j
, LOG_ERR
, "XPC Service could not exec(3). Resetting port.");
3790 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
3791 /* Drain the messages but do not reset the port. If xpcproxy could
3792 * not exec(3), then we don't want to continue trying, since there
3793 * is very likely a serious configuration error with the service.
3795 * The above comment is weird. I originally said we should drain
3796 * messages but not reset the port, but that's exactly what we do
3797 * below, and I'm not sure which is the mistake, the comment or the
3800 * Since it's always been this way, I'll assume that the comment is
3801 * incorrect, but I'll leave it in place just to remind myself to
3802 * actually look into it at some point.
3804 * <rdar://problem/8986802>
3806 if (msi
->upfront
&& job_assumes(j
, !msi
->isActive
)) {
3807 machservice_resetport(j
, msi
);
3812 struct suspended_peruser
*spi
= NULL
;
3813 while ((spi
= LIST_FIRST(&j
->suspended_perusers
))) {
3814 job_log(j
, LOG_ERR
, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi
->j
->mach_uid
);
3815 spi
->j
->peruser_suspend_count
--;
3816 if (spi
->j
->peruser_suspend_count
== 0) {
3817 job_dispatch(spi
->j
, false);
3819 LIST_REMOVE(spi
, sle
);
3823 if (j
->exit_status_dest
) {
3824 errno
= helper_downcall_wait(j
->exit_status_dest
, j
->last_exit_status
);
3825 if (errno
&& errno
!= MACH_SEND_INVALID_DEST
) {
3826 (void)job_assumes_zero(j
, errno
);
3829 j
->exit_status_dest
= MACH_PORT_NULL
;
3832 if (j
->spawn_reply_port
) {
3833 /* If the child never called exec(3), we must send a spawn() reply so
3834 * that the requestor can get exit status from it. If we fail to send
3835 * the reply for some reason, we have to deallocate the exit status port
3838 kern_return_t kr
= job_mig_spawn2_reply(j
->spawn_reply_port
, BOOTSTRAP_SUCCESS
, j
->p
, j
->exit_status_port
);
3840 if (kr
!= MACH_SEND_INVALID_DEST
) {
3841 (void)job_assumes_zero(j
, kr
);
3844 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->exit_status_port
));
3847 j
->exit_status_port
= MACH_PORT_NULL
;
3848 j
->spawn_reply_port
= MACH_PORT_NULL
;
3852 total_anon_children
--;
3854 job_log(j
, LOG_PERF
, "Anonymous job exited holding reference.");
3858 job_log(j
, LOG_PERF
, "Job exited.");
3863 if (j
->has_console
) {
3867 if (j
->shutdown_monitor
) {
3868 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Shutdown monitor has exited.");
3869 _launchd_shutdown_monitor
= NULL
;
3870 j
->shutdown_monitor
= false;
3873 if (!j
->anonymous
) {
3874 j
->mgr
->normal_active_cnt
--;
3876 j
->sent_signal_time
= 0;
3877 j
->sent_sigkill
= false;
3878 j
->clean_kill
= false;
3879 j
->event_monitor_ready2signal
= false;
3885 jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
)
3890 if (jm
->shutting_down
) {
3894 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
3895 jobmgr_dispatch_all(jmi
, newmounthack
);
3898 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
3899 if (newmounthack
&& ji
->start_on_mount
) {
3900 ji
->start_pending
= true;
3903 job_dispatch(ji
, false);
3908 job_dispatch_curious_jobs(job_t j
)
3910 job_t ji
= NULL
, jt
= NULL
;
3911 SLIST_FOREACH_SAFE(ji
, &s_curious_jobs
, curious_jobs_sle
, jt
) {
3912 struct semaphoreitem
*si
= NULL
;
3913 SLIST_FOREACH(si
, &ji
->semaphores
, sle
) {
3914 if (!(si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
)) {
3918 if (strcmp(si
->what
, j
->label
) == 0) {
3919 job_log(ji
, LOG_DEBUG
, "Dispatching out of interest in \"%s\".", j
->label
);
3921 if (!ji
->removing
) {
3922 job_dispatch(ji
, false);
3924 job_log(ji
, LOG_NOTICE
, "The following job is circularly dependent upon this one: %s", j
->label
);
3927 /* ji could be removed here, so don't do anything with it or its semaphores
3937 job_dispatch(job_t j
, bool kickstart
)
3939 // Don't dispatch a job if it has no audit session set.
3940 if (!uuid_is_null(j
->expected_audit_uuid
)) {
3941 job_log(j
, LOG_DEBUG
, "Job is still awaiting its audit session UUID. Not dispatching.");
3945 job_log(j
, LOG_DEBUG
, "Job is an alias. Not dispatching.");
3949 if (j
->waiting4ok
) {
3950 job_log(j
, LOG_DEBUG
, "Job cannot exec(3). Not dispatching.");
3954 #if TARGET_OS_EMBEDDED
3955 if (launchd_embedded_handofgod
&& _launchd_embedded_god
) {
3956 if (!job_assumes(j
, _launchd_embedded_god
->username
!= NULL
&& j
->username
!= NULL
)) {
3961 if (strcmp(j
->username
, _launchd_embedded_god
->username
) != 0) {
3965 } else if (launchd_embedded_handofgod
) {
3972 * The whole job removal logic needs to be consolidated. The fact that
3973 * a job can be removed from just about anywhere makes it easy to have
3974 * stale pointers left behind somewhere on the stack that might get
3975 * used after the deallocation. In particular, during job iteration.
3977 * This is a classic example. The act of dispatching a job may delete it.
3979 if (!job_active(j
)) {
3980 if (job_useless(j
)) {
3981 job_log(j
, LOG_DEBUG
, "Job is useless. Removing.");
3985 if (unlikely(j
->per_user
&& j
->peruser_suspend_count
> 0)) {
3986 job_log(j
, LOG_DEBUG
, "Per-user launchd is suspended. Not dispatching.");
3990 if (kickstart
|| job_keepalive(j
)) {
3991 job_log(j
, LOG_DEBUG
, "%starting job", kickstart
? "Kicks" : "S");
3994 job_log(j
, LOG_DEBUG
, "Watching job.");
3998 job_log(j
, LOG_DEBUG
, "Tried to dispatch an already active job: %s.", job_active(j
));
4007 if (unlikely(!j
->p
|| j
->anonymous
)) {
4011 (void)job_assumes_zero_p(j
, kill2(j
->p
, SIGKILL
));
4013 j
->sent_sigkill
= true;
4014 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, LAUNCHD_SIGKILL_TIMER
, j
));
4016 job_log(j
, LOG_DEBUG
, "Sent SIGKILL signal");
4020 job_open_shutdown_transaction(job_t j
)
4022 int rv
= proc_set_dirty(j
->p
, true);
4024 job_log(j
, LOG_DEBUG
, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
4025 j
->dirty_at_shutdown
= false;
4030 job_close_shutdown_transaction(job_t j
)
4032 if (j
->dirty_at_shutdown
) {
4033 job_log(j
, LOG_DEBUG
, "Closing shutdown transaction for job.");
4034 (void)job_assumes_zero(j
, proc_set_dirty(j
->p
, false));
4035 j
->dirty_at_shutdown
= false;
4040 job_log_children_without_exec(job_t j
)
4043 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
4044 int i
= 0, kp_cnt
= 0;
4046 if (!launchd_apple_internal
|| j
->anonymous
|| j
->per_user
) {
4050 if (!job_assumes(j
, (pids
= malloc(len
)) != NULL
)) {
4053 if (job_assumes_zero_p(j
, (kp_cnt
= proc_listchildpids(j
->p
, pids
, len
))) == -1) {
4057 for (i
= 0; i
< kp_cnt
; i
++) {
4058 struct proc_bsdshortinfo proc
;
4059 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
4060 if (errno
!= ESRCH
) {
4061 (void)job_assumes_zero(j
, errno
);
4065 if (proc
.pbsi_flags
& P_EXEC
) {
4069 job_log(j
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids
[i
]);
4077 job_callback_proc(job_t j
, struct kevent
*kev
)
4079 bool program_changed
= false;
4080 int fflags
= kev
->fflags
;
4082 job_log(j
, LOG_DEBUG
, "EVFILT_PROC event for job.");
4083 log_kevent_struct(LOG_DEBUG
, kev
, 0);
4085 if (fflags
& NOTE_EXEC
) {
4086 program_changed
= true;
4089 struct proc_bsdshortinfo proc
;
4090 if (proc_pidinfo(j
->p
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) > 0) {
4091 char newlabel
[1000];
4093 snprintf(newlabel
, sizeof(newlabel
), "%p.anonymous.%s", j
, proc
.pbsi_comm
);
4095 job_log(j
, LOG_INFO
, "Program changed. Updating the label to: %s", newlabel
);
4097 LIST_REMOVE(j
, label_hash_sle
);
4098 strcpy((char *)j
->label
, newlabel
);
4100 jobmgr_t where2put
= root_jobmgr
;
4101 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
4104 LIST_INSERT_HEAD(&where2put
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
4105 } else if (errno
!= ESRCH
) {
4106 (void)job_assumes_zero(j
, errno
);
4109 if (j
->spawn_reply_port
) {
4110 errno
= job_mig_spawn2_reply(j
->spawn_reply_port
, BOOTSTRAP_SUCCESS
, j
->p
, j
->exit_status_port
);
4112 if (errno
!= MACH_SEND_INVALID_DEST
) {
4113 (void)job_assumes_zero(j
, errno
);
4115 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->exit_status_port
));
4118 j
->spawn_reply_port
= MACH_PORT_NULL
;
4119 j
->exit_status_port
= MACH_PORT_NULL
;
4122 if (j
->xpc_service
&& j
->did_exec
) {
4123 j
->xpcproxy_did_exec
= true;
4127 job_log(j
, LOG_DEBUG
, "Program changed");
4131 if (fflags
& NOTE_FORK
) {
4132 job_log(j
, LOG_DEBUG
, "fork()ed%s", program_changed
? ". For this message only: We don't know whether this event happened before or after execve()." : "");
4133 job_log_children_without_exec(j
);
4136 if (fflags
& NOTE_EXIT
) {
4137 if (kev
->data
& NOTE_EXIT_DECRYPTFAIL
) {
4139 job_log(j
, LOG_WARNING
, "FairPlay decryption failed on binary for job.");
4140 } else if (kev
->data
& NOTE_EXIT_MEMORY
) {
4141 j
->jettisoned
= true;
4142 job_log(j
, LOG_INFO
, "Job was killed due to memory pressure.");
4151 struct waiting4attach
*w4ai
= NULL
;
4152 struct waiting4attach
*w4ait
= NULL
;
4153 LIST_FOREACH_SAFE(w4ai
, &_launchd_domain_waiters
, le
, w4ait
) {
4154 if (w4ai
->dest
== (pid_t
)kev
->ident
) {
4155 waiting4attach_delete(j
->mgr
, w4ai
);
4159 (void)job_dispatch(j
, false);
4165 job_callback_timer(job_t j
, void *ident
)
4168 job_log(j
, LOG_DEBUG
, "j == ident (%p)", ident
);
4169 job_dispatch(j
, true);
4170 } else if (&j
->semaphores
== ident
) {
4171 job_log(j
, LOG_DEBUG
, "&j->semaphores == ident (%p)", ident
);
4172 job_dispatch(j
, false);
4173 } else if (&j
->start_interval
== ident
) {
4174 job_log(j
, LOG_DEBUG
, "&j->start_interval == ident (%p)", ident
);
4175 j
->start_pending
= true;
4176 job_dispatch(j
, false);
4177 } else if (&j
->exit_timeout
== ident
) {
4178 if (!job_assumes(j
, j
->p
!= 0)) {
4182 if (j
->sent_sigkill
) {
4183 uint64_t td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
4186 td
-= j
->clean_kill
? 0 : j
->exit_timeout
;
4188 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j
->clean_kill
? "cleanly " : "", td
);
4189 j
->workaround9359725
= true;
4191 // This basically has to be done off the main thread. We have no
4192 // mechanism for draining the main queue in our run loop (like CF
4193 // does), and the kevent mechanism wants an object to be associated
4194 // as the callback. So we just create a dispatch source and reap the
4195 // errant PID whenever we can. Note that it is not safe for us to do
4196 // any logging in this block, since logging requires exclusive
4197 // access to global data structures that is only protected by the
4199 dispatch_source_t hack_13570156
= dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC
, j
->p
, DISPATCH_PROC_EXIT
, dispatch_get_global_queue(0, 0));
4200 dispatch_source_set_event_handler(hack_13570156
, ^{
4201 pid_t pid
= (pid_t
)dispatch_source_get_handle(hack_13570156
);
4204 (void)waitpid(pid
, &status
, 0);
4205 dispatch_release(hack_13570156
);
4208 dispatch_resume(hack_13570156
);
4210 if (launchd_trap_sigkill_bugs
) {
4211 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
4212 (void)job_assumes_zero(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
));
4215 struct kevent bogus_exit
;
4216 EV_SET(&bogus_exit
, j
->p
, EVFILT_PROC
, 0, NOTE_EXIT
, 0, 0);
4217 jobmgr_callback(j
->mgr
, &bogus_exit
);
4219 if (unlikely(j
->debug_before_kill
)) {
4220 job_log(j
, LOG_NOTICE
, "Exit timeout elapsed. Entering the kernel debugger");
4221 (void)job_assumes_zero(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
));
4224 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Exit timeout elapsed (%u seconds). Killing", j
->exit_timeout
);
4228 job_log(j
, LOG_ERR
, "Unrecognized job timer callback: %p", ident
);
4233 job_callback_read(job_t j
, int ident
)
4235 if (ident
== j
->stdin_fd
) {
4236 job_dispatch(j
, true);
4238 socketgroup_callback(j
);
4243 jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
)
4248 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
4249 jobmgr_reap_bulk(jmi
, kev
);
4252 if ((j
= jobmgr_find_by_pid(jm
, (pid_t
)kev
->ident
, false))) {
4254 job_callback(j
, kev
);
4259 jobmgr_callback(void *obj
, struct kevent
*kev
)
4263 #if TARGET_OS_EMBEDDED
4264 int flag2check
= VQ_MOUNT
;
4266 int flag2check
= VQ_UPDATE
;
4269 switch (kev
->filter
) {
4271 jobmgr_reap_bulk(jm
, kev
);
4272 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
4275 switch (kev
->ident
) {
4277 jobmgr_log(jm
, LOG_DEBUG
, "Got SIGTERM. Shutting down.");
4278 return launchd_shutdown();
4280 return calendarinterval_callback();
4282 // Turn on all logging.
4283 launchd_log_perf
= true;
4284 launchd_log_debug
= true;
4285 launchd_log_shutdown
= true;
4286 /* Hopefully /var is available by this point. If not, uh, oh well.
4287 * It's just a debugging facility.
4289 return jobmgr_log_perf_statistics(jm
, false);
4291 return jobmgr_log_perf_statistics(jm
, true);
4293 jobmgr_log(jm
, LOG_ERR
, "Unrecognized signal: %lu: %s", kev
->ident
, strsignal(kev
->ident
));
4297 if (kev
->fflags
& flag2check
) {
4298 if (!launchd_var_available
) {
4300 if (stat("/var/log", &sb
) == 0 && (sb
.st_mode
& S_IWUSR
)) {
4301 launchd_var_available
= true;
4304 } else if (kev
->fflags
& VQ_MOUNT
) {
4305 jobmgr_dispatch_all(jm
, true);
4307 jobmgr_dispatch_all_semaphores(jm
);
4310 if (kev
->ident
== (uintptr_t)&sorted_calendar_events
) {
4311 calendarinterval_callback();
4312 } else if (kev
->ident
== (uintptr_t)jm
) {
4313 jobmgr_log(jm
, LOG_DEBUG
, "Shutdown timer firing.");
4314 jobmgr_still_alive_with_check(jm
);
4315 } else if (kev
->ident
== (uintptr_t)&jm
->reboot_flags
) {
4316 jobmgr_do_garbage_collection(jm
);
4317 } else if (kev
->ident
== (uintptr_t)&launchd_runtime_busy_time
) {
4318 jobmgr_log(jm
, LOG_DEBUG
, "Idle exit timer fired. Shutting down.");
4319 if (jobmgr_assumes_zero(jm
, runtime_busy_cnt
) == 0) {
4320 return launchd_shutdown();
4322 #if HAVE_SYSTEMSTATS
4323 } else if (kev
->ident
== (uintptr_t)systemstats_timer_callback
) {
4324 systemstats_timer_callback();
4329 if (kev
->ident
== (uintptr_t)s_no_hang_fd
) {
4330 int _no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
4331 if (unlikely(_no_hang_fd
!= -1)) {
4332 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "/dev/autofs_nowait has appeared!");
4333 (void)jobmgr_assumes_zero_p(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
));
4334 (void)jobmgr_assumes_zero_p(root_jobmgr
, runtime_close(s_no_hang_fd
));
4335 s_no_hang_fd
= _fd(_no_hang_fd
);
4337 } else if (pid1_magic
&& launchd_console
&& kev
->ident
== (uintptr_t)fileno(launchd_console
)) {
4339 if (jobmgr_assumes_zero_p(jm
, cfd
= open(_PATH_CONSOLE
, O_WRONLY
| O_NOCTTY
)) != -1) {
4341 if (!(launchd_console
= fdopen(cfd
, "w"))) {
4342 (void)jobmgr_assumes_zero(jm
, errno
);
4349 jobmgr_log(jm
, LOG_ERR
, "Unrecognized kevent filter: %hd", kev
->filter
);
4354 job_callback(void *obj
, struct kevent
*kev
)
4358 job_log(j
, LOG_DEBUG
, "Dispatching kevent callback.");
4360 switch (kev
->filter
) {
4362 return job_callback_proc(j
, kev
);
4364 return job_callback_timer(j
, (void *) kev
->ident
);
4366 return job_callback_read(j
, (int) kev
->ident
);
4367 case EVFILT_MACHPORT
:
4368 return (void)job_dispatch(j
, true);
4370 job_log(j
, LOG_ERR
, "Unrecognized job callback filter: %hd", kev
->filter
);
4383 u_int proc_fflags
= NOTE_EXIT
|NOTE_FORK
|NOTE_EXEC
|NOTE_EXIT_DETAIL
|NOTE_EXITSTATUS
;
4385 if (!job_assumes(j
, j
->mgr
!= NULL
)) {
4389 if (unlikely(job_active(j
))) {
4390 job_log(j
, LOG_DEBUG
, "Already started");
4394 if (!LIST_EMPTY(&j
->mgr
->attaches
)) {
4395 job_log(j
, LOG_DEBUG
, "Looking for attachments for job: %s", j
->label
);
4396 (void)waiting4attach_find(j
->mgr
, j
);
4400 * Some users adjust the wall-clock and then expect software to not notice.
4401 * Therefore, launchd must use an absolute clock instead of the wall clock
4402 * wherever possible.
4404 td
= runtime_get_nanoseconds_since(j
->start_time
);
4407 if (j
->start_time
&& (td
< j
->min_run_time
) && !j
->legacy_mach_job
&& !j
->inetcompat
&& !j
->unthrottle
) {
4408 time_t respawn_delta
= j
->min_run_time
- (uint32_t)td
;
4409 /* We technically should ref-count throttled jobs to prevent idle exit,
4410 * but we're not directly tracking the 'throttled' state at the moment.
4412 job_log(j
, LOG_NOTICE
, "Throttling respawn: Will start in %ld seconds", respawn_delta
);
4413 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, respawn_delta
, j
));
4418 if (likely(!j
->legacy_mach_job
)) {
4419 sipc
= ((!SLIST_EMPTY(&j
->sockets
) || !SLIST_EMPTY(&j
->machservices
)) && !j
->deny_job_creation
) || j
->embedded_god
;
4423 (void)job_assumes_zero_p(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, spair
));
4426 (void)job_assumes_zero_p(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, execspair
));
4428 switch (c
= runtime_fork(j
->weird_bootstrap
? j
->j_port
: j
->mgr
->jm_port
)) {
4430 job_log_error(j
, LOG_ERR
, "fork() failed, will try again in one second");
4431 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, 1, j
));
4434 (void)job_assumes_zero(j
, runtime_close(execspair
[0]));
4435 (void)job_assumes_zero(j
, runtime_close(execspair
[1]));
4437 (void)job_assumes_zero(j
, runtime_close(spair
[0]));
4438 (void)job_assumes_zero(j
, runtime_close(spair
[1]));
4442 if (unlikely(_vproc_post_fork_ping())) {
4443 _exit(EXIT_FAILURE
);
4446 (void)job_assumes_zero(j
, runtime_close(execspair
[0]));
4447 // wait for our parent to say they've attached a kevent to us
4448 read(_fd(execspair
[1]), &c
, sizeof(c
));
4451 (void)job_assumes_zero(j
, runtime_close(spair
[0]));
4452 snprintf(nbuf
, sizeof(nbuf
), "%d", spair
[1]);
4453 setenv(LAUNCHD_TRUSTED_FD_ENV
, nbuf
, 1);
4458 j
->start_time
= runtime_get_opaque_time();
4460 job_log(j
, LOG_DEBUG
, "Started as PID: %u", c
);
4462 j
->did_exec
= false;
4464 j
->jettisoned
= false;
4465 j
->xpcproxy_did_exec
= false;
4466 j
->checkedin
= false;
4467 j
->start_pending
= false;
4471 j
->workaround9359725
= false;
4472 j
->implicit_reap
= false;
4473 j
->unthrottle
= false;
4474 if (j
->needs_kickoff
) {
4475 j
->needs_kickoff
= false;
4477 if (SLIST_EMPTY(&j
->semaphores
)) {
4478 j
->ondemand
= false;
4482 if (j
->has_console
) {
4486 job_log(j
, LOG_PERF
, "Job started.");
4489 LIST_INSERT_HEAD(&j
->mgr
->active_jobs
[ACTIVE_JOB_HASH(c
)], j
, pid_hash_sle
);
4490 LIST_INSERT_HEAD(&managed_actives
[ACTIVE_JOB_HASH(c
)], j
, global_pid_hash_sle
);
4493 struct proc_uniqidentifierinfo info
;
4494 if (proc_pidinfo(c
, PROC_PIDUNIQIDENTIFIERINFO
, 0, &info
, PROC_PIDUNIQIDENTIFIERINFO_SIZE
) != 0) {
4495 // ignore errors here, kevent_mod below will catch them and clean up
4496 j
->uniqueid
= info
.p_uniqueid
;
4499 j
->mgr
->normal_active_cnt
++;
4500 j
->fork_fd
= _fd(execspair
[0]);
4501 (void)job_assumes_zero(j
, runtime_close(execspair
[1]));
4503 (void)job_assumes_zero(j
, runtime_close(spair
[1]));
4504 ipc_open(_fd(spair
[0]), j
);
4506 if (kevent_mod(c
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
? root_jobmgr
: j
->mgr
) != -1) {
4509 if (errno
== ESRCH
) {
4510 job_log(j
, LOG_ERR
, "Child was killed before we could attach a kevent.");
4512 (void)job_assumes(j
, errno
== ESRCH
);
4516 /* If we have reaped this job within this same run loop pass, then
4517 * it will be currently ignored. So if there's a failure to attach a
4518 * kevent, we need to make sure that we watch the job so that we can
4521 * See <rdar://problem/10140809>.
4526 #if HAVE_SYSTEMSTATS
4527 if (systemstats_is_enabled()) {
4528 /* We don't really *need* to make the full rusage call -- it
4529 * will be mostly 0s and very small numbers. We only need
4530 * ri_proc_start_abstime, because that's how we disambiguiate
4531 * PIDs when they wrap around; and the UUID.
4532 * In the future we should use the 64-bit process unique ID,
4533 * so there's nothing to disambiguiate, and skip the full
4536 * Well, the future is now.
4538 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION
, SYSTEMSTATS_WRITER_launchd
, SYSTEMSTATS_PROPERTY_LAUNCHD_SHOULD_LOG_JOB_START
)) {
4539 job_log_perf_statistics(j
, NULL
, -3);
4543 j
->wait4debugger_oneshot
= false;
4544 if (likely(!j
->stall_before_exec
)) {
4552 job_start_child(job_t j
)
4554 typeof(posix_spawn
) *psf
;
4555 const char *file2exec
= "/usr/libexec/launchproxy";
4557 posix_spawnattr_t spattr
;
4558 int gflags
= GLOB_NOSORT
|GLOB_NOCHECK
|GLOB_TILDE
|GLOB_DOOFFS
;
4560 short spflags
= POSIX_SPAWN_SETEXEC
;
4561 int psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND
;
4562 size_t binpref_out_cnt
= 0;
4565 (void)job_assumes_zero(j
, posix_spawnattr_init(&spattr
));
4567 job_setup_attributes(j
);
4569 bool use_xpcproxy
= false;
4570 struct waiting4attach
*w4a
= waiting4attach_find(j
->mgr
, j
);
4572 (void)setenv(XPC_SERVICE_ENV_ATTACHED
, "1", 1);
4573 if (!j
->xpc_service
) {
4574 use_xpcproxy
= true;
4579 argv
= alloca(3 * sizeof(char *));
4580 argv
[0] = "/usr/libexec/xpcproxy";
4584 file2exec
= argv
[0];
4585 } else if (unlikely(j
->argv
&& j
->globargv
)) {
4587 for (i
= 0; i
< j
->argc
; i
++) {
4589 gflags
|= GLOB_APPEND
;
4591 if (glob(j
->argv
[i
], gflags
, NULL
, &g
) != 0) {
4592 job_log_error(j
, LOG_ERR
, "glob(\"%s\")", j
->argv
[i
]);
4596 g
.gl_pathv
[0] = (char *)file2exec
;
4597 argv
= (const char **)g
.gl_pathv
;
4598 } else if (likely(j
->argv
)) {
4599 argv
= alloca((j
->argc
+ 2) * sizeof(char *));
4600 argv
[0] = file2exec
;
4601 for (i
= 0; i
< j
->argc
; i
++) {
4602 argv
[i
+ 1] = j
->argv
[i
];
4606 argv
= alloca(3 * sizeof(char *));
4607 argv
[0] = file2exec
;
4612 if (likely(!(j
->inetcompat
|| use_xpcproxy
))) {
4616 if (unlikely(j
->wait4debugger
|| j
->wait4debugger_oneshot
)) {
4618 job_log(j
, LOG_WARNING
, "Spawned and waiting for the debugger to attach before continuing...");
4620 spflags
|= POSIX_SPAWN_START_SUSPENDED
;
4623 #if !TARGET_OS_EMBEDDED
4624 if (unlikely(j
->disable_aslr
)) {
4625 spflags
|= _POSIX_SPAWN_DISABLE_ASLR
;
4628 spflags
|= j
->pstype
;
4630 (void)job_assumes_zero(j
, posix_spawnattr_setflags(&spattr
, spflags
));
4631 if (unlikely(j
->j_binpref_cnt
)) {
4632 (void)job_assumes_zero(j
, posix_spawnattr_setbinpref_np(&spattr
, j
->j_binpref_cnt
, j
->j_binpref
, &binpref_out_cnt
));
4633 (void)job_assumes(j
, binpref_out_cnt
== j
->j_binpref_cnt
);
4636 psproctype
= j
->psproctype
;
4637 (void)job_assumes_zero(j
, posix_spawnattr_setprocesstype_np(&spattr
, psproctype
));
4639 #if TARGET_OS_EMBEDDED
4640 /* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4641 * against a race which arises if, during spawn, an initial jetsam property
4642 * update occurs before the values below are applied. In this case, the flag
4643 * ensures that the subsequent change is ignored; the explicit update should
4644 * be given priority.
4646 (void)job_assumes_zero(j
, posix_spawnattr_setjetsam(&spattr
,
4647 POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY
| (j
->jetsam_memory_limit_background
? POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND
: 0),
4648 j
->jetsam_priority
, j
->jetsam_memlimit
));
4651 mach_port_array_t sports
= NULL
;
4652 mach_msg_type_number_t sports_cnt
= 0;
4653 kern_return_t kr
= vproc_mig_get_listener_port_rights(bootstrap_port
, &sports
, &sports_cnt
);
4654 if (kr
== 0 && sports_cnt
) {
4655 /* For some reason, this SPI takes a count as a signed quantity. */
4656 (void)posix_spawnattr_set_importancewatch_port_np(&spattr
, (int)sports_cnt
, sports
);
4658 /* All "count" parameters in MIG are counts of the array. So an array of
4659 * mach_port_t containing 10 elements will have a count of ten, but it
4660 * will occupy 40 bytes. So we must do the multiplication here to pass
4663 * Note that we do NOT release the send rights. We need them to be valid
4664 * at the time they are passed to posix_spawn(2). When we exec(3) using
4665 * posix_spawn(2), they'll be cleaned up anyway.
4667 mig_deallocate((vm_address_t
)sports
, sports_cnt
* sizeof(sports
[0]));
4668 } else if (kr
!= BOOTSTRAP_UNKNOWN_SERVICE
) {
4669 (void)job_assumes_zero(j
, kr
);
4672 #if TARGET_OS_EMBEDDED
4673 if (!j
->app
|| j
->system_app
) {
4674 (void)job_assumes_zero(j
, posix_spawnattr_setcpumonitor_default(&spattr
));
4677 (void)job_assumes_zero(j
, posix_spawnattr_setcpumonitor_default(&spattr
));
4680 #if !TARGET_OS_EMBEDDED
4681 struct task_qos_policy qosinfo
= {
4682 .task_latency_qos_tier
= LATENCY_QOS_LAUNCH_DEFAULT_TIER
,
4683 .task_throughput_qos_tier
= THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER
,
4686 if (!j
->legacy_timers
) {
4687 kr
= task_policy_set(mach_task_self(), TASK_BASE_QOS_POLICY
, (task_policy_t
)&qosinfo
, TASK_QOS_POLICY_COUNT
);
4688 (void)job_assumes_zero_p(j
, kr
);
4692 #if HAVE_RESPONSIBILITY
4693 /* Specify which process is responsible for the new job. Per-app XPC
4694 * services are the responsibility of the app. Other processes are
4695 * responsible for themselves. This decision is final and also applies
4696 * to the process's children, so don't initialize responsibility when
4697 * starting a per-user launchd.
4699 if (j
->mgr
->req_pid
) {
4700 responsibility_init2(j
->mgr
->req_pid
, NULL
);
4701 } else if (!j
->per_user
) {
4702 responsibility_init2(getpid(), j
->prog
? j
->prog
: j
->argv
[0]);
4707 if (j
->quarantine_data
) {
4710 if (job_assumes(j
, qp
= qtn_proc_alloc())) {
4711 if (job_assumes_zero(j
, qtn_proc_init_with_data(qp
, j
->quarantine_data
, j
->quarantine_data_sz
) == 0)) {
4712 (void)job_assumes_zero(j
, qtn_proc_apply_to_self(qp
));
4719 #if TARGET_OS_EMBEDDED
4720 struct sandbox_spawnattrs sbattrs
;
4721 if (j
->seatbelt_profile
|| j
->container_identifier
) {
4722 sandbox_spawnattrs_init(&sbattrs
);
4723 if (j
->seatbelt_profile
) {
4724 sandbox_spawnattrs_setprofilename(&sbattrs
, j
->seatbelt_profile
);
4726 if (j
->container_identifier
) {
4727 sandbox_spawnattrs_setcontainer(&sbattrs
, j
->container_identifier
);
4729 (void)job_assumes_zero(j
, posix_spawnattr_setmacpolicyinfo_np(&spattr
, "Sandbox", &sbattrs
, sizeof(sbattrs
)));
4732 if (j
->seatbelt_profile
) {
4733 char *seatbelt_err_buf
= NULL
;
4735 if (job_assumes_zero_p(j
, sandbox_init(j
->seatbelt_profile
, j
->seatbelt_flags
, &seatbelt_err_buf
)) == -1) {
4736 if (seatbelt_err_buf
) {
4737 job_log(j
, LOG_ERR
, "Sandbox failed to init: %s", seatbelt_err_buf
);
4745 psf
= j
->prog
? posix_spawn
: posix_spawnp
;
4747 if (likely(!(j
->inetcompat
|| use_xpcproxy
))) {
4748 file2exec
= j
->prog
? j
->prog
: argv
[0];
4751 errno
= psf(NULL
, file2exec
, NULL
, &spattr
, (char *const *)argv
, environ
);
4753 #if HAVE_SANDBOX && !TARGET_OS_EMBEDDED
4760 jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
)
4766 if (jm
->parentmgr
) {
4767 jobmgr_export_env_from_other_jobs(jm
->parentmgr
, dict
);
4769 char **tmpenviron
= environ
;
4770 for (; *tmpenviron
; tmpenviron
++) {
4772 launch_data_t s
= launch_data_alloc(LAUNCH_DATA_STRING
);
4773 launch_data_set_string(s
, strchr(*tmpenviron
, '=') + 1);
4774 strncpy(envkey
, *tmpenviron
, sizeof(envkey
));
4775 *(strchr(envkey
, '=')) = '\0';
4776 launch_data_dict_insert(dict
, s
, envkey
);
4780 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
4781 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
4782 if ((tmp
= launch_data_new_string(ei
->value
))) {
4783 launch_data_dict_insert(dict
, tmp
, ei
->key
);
4790 jobmgr_setup_env_from_other_jobs(jobmgr_t jm
)
4795 if (jm
->parentmgr
) {
4796 jobmgr_setup_env_from_other_jobs(jm
->parentmgr
);
4799 LIST_FOREACH(ji
, &jm
->global_env_jobs
, global_env_sle
) {
4800 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
4801 setenv(ei
->key
, ei
->value
, 1);
4807 job_log_pids_with_weird_uids(job_t j
)
4809 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
4811 uid_t u
= j
->mach_uid
;
4812 int i
= 0, kp_cnt
= 0;
4814 if (!launchd_apple_internal
) {
4819 if (!job_assumes(j
, pids
!= NULL
)) {
4823 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS
, j
->p
, u
, 0);
4825 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4826 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4827 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4828 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4829 * struct back in a single call for each one.
4831 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4832 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4833 * libproc could go stale before we call proc_pidinfo().
4835 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4836 * of bytes written to the buffer.
4838 if (job_assumes_zero_p(j
, (kp_cnt
= proc_listallpids(pids
, len
))) == -1) {
4842 for (i
= 0; i
< kp_cnt
; i
++) {
4843 struct proc_bsdshortinfo proc
;
4844 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4847 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
4848 if (errno
!= ESRCH
) {
4849 (void)job_assumes_zero(j
, errno
);
4854 uid_t i_euid
= proc
.pbsi_uid
;
4855 uid_t i_uid
= proc
.pbsi_ruid
;
4856 uid_t i_svuid
= proc
.pbsi_svuid
;
4857 pid_t i_pid
= pids
[i
];
4859 if (i_euid
!= u
&& i_uid
!= u
&& i_svuid
!= u
) {
4863 job_log(j
, LOG_ERR
, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid
, proc
.pbsi_comm
, i_uid
, i_euid
, i_svuid
);
4865 // Temporarily disabled due to 5423935 and 4946119.
4867 // Ask the accountless process to exit.
4868 (void)job_assumes_zero_p(j
, kill2(i_pid
, SIGTERM
));
4876 static struct passwd
*
4877 job_getpwnam(job_t j
, const char *name
)
4880 * methodology for system daemons
4882 * first lookup user record without any opendirectoryd interaction,
4883 * we don't know what interprocess dependencies might be in flight.
4884 * if that fails, we re-enable opendirectoryd interaction and
4885 * re-issue the lookup. We have to disable the libinfo L1 cache
4886 * otherwise libinfo will return the negative cache entry on the retry
4888 #if !TARGET_OS_EMBEDDED
4889 struct passwd
*pw
= NULL
;
4891 if (pid1_magic
&& j
->mgr
== root_jobmgr
) {
4892 // 1 == SEARCH_MODULE_FLAG_DISABLED
4893 si_search_module_set_flags("ds", 1);
4894 gL1CacheEnabled
= false;
4896 pw
= getpwnam(name
);
4897 si_search_module_set_flags("ds", 0);
4901 pw
= getpwnam(name
);
4907 return getpwnam(name
);
4911 static struct group
*
4912 job_getgrnam(job_t j
, const char *name
)
4914 #if !TARGET_OS_EMBEDDED
4915 struct group
*gr
= NULL
;
4917 if (pid1_magic
&& j
->mgr
== root_jobmgr
) {
4918 si_search_module_set_flags("ds", 1);
4919 gL1CacheEnabled
= false;
4921 gr
= getgrnam(name
);
4923 si_search_module_set_flags("ds", 0);
4927 gr
= getgrnam(name
);
4933 return getgrnam(name
);
4938 job_postfork_test_user(job_t j
)
4940 // This function is all about 5201578
4942 const char *home_env_var
= getenv("HOME");
4943 const char *user_env_var
= getenv("USER");
4944 const char *logname_env_var
= getenv("LOGNAME");
4945 uid_t tmp_uid
, local_uid
= getuid();
4946 gid_t tmp_gid
, local_gid
= getgid();
4947 char shellpath
[PATH_MAX
];
4948 char homedir
[PATH_MAX
];
4949 char loginname
[2000];
4953 if (!job_assumes(j
, home_env_var
&& user_env_var
&& logname_env_var
4954 && strcmp(user_env_var
, logname_env_var
) == 0)) {
4958 if ((pwe
= job_getpwnam(j
, user_env_var
)) == NULL
) {
4959 job_log(j
, LOG_ERR
, "The account \"%s\" has been deleted out from under us!", user_env_var
);
4964 * We must copy the results of getpw*().
4966 * Why? Because subsequent API calls may call getpw*() as a part of
4967 * their implementation. Since getpw*() returns a [now thread scoped]
4968 * global, we must therefore cache the results before continuing.
4971 tmp_uid
= pwe
->pw_uid
;
4972 tmp_gid
= pwe
->pw_gid
;
4974 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
4975 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
4976 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
4978 if (strcmp(loginname
, logname_env_var
) != 0) {
4979 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "USER");
4982 if (strcmp(homedir
, home_env_var
) != 0) {
4983 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "HOME");
4986 if (local_uid
!= tmp_uid
) {
4987 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
4988 'U', tmp_uid
, local_uid
);
4991 if (local_gid
!= tmp_gid
) {
4992 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
4993 'G', tmp_gid
, local_gid
);
5000 (void)job_assumes_zero_p(j
, kill2(getppid(), SIGTERM
));
5001 _exit(EXIT_FAILURE
);
5003 job_log(j
, LOG_WARNING
, "In a future build of the OS, this error will be fatal.");
5008 job_postfork_become_user(job_t j
)
5010 char loginname
[2000];
5011 char tmpdirpath
[PATH_MAX
];
5012 char shellpath
[PATH_MAX
];
5013 char homedir
[PATH_MAX
];
5016 gid_t desired_gid
= -1;
5017 uid_t desired_uid
= -1;
5019 if (getuid() != 0) {
5020 return job_postfork_test_user(j
);
5024 * I contend that having UID == 0 and GID != 0 is of dubious value.
5025 * Nevertheless, this used to work in Tiger. See: 5425348
5027 if (j
->groupname
&& !j
->username
) {
5028 j
->username
= "root";
5032 if ((pwe
= job_getpwnam(j
, j
->username
)) == NULL
) {
5033 job_log(j
, LOG_ERR
, "getpwnam(\"%s\") failed", j
->username
);
5036 } else if (j
->mach_uid
) {
5037 if ((pwe
= getpwuid(j
->mach_uid
)) == NULL
) {
5038 job_log(j
, LOG_ERR
, "getpwuid(\"%u\") failed", j
->mach_uid
);
5039 job_log_pids_with_weird_uids(j
);
5047 * We must copy the results of getpw*().
5049 * Why? Because subsequent API calls may call getpw*() as a part of
5050 * their implementation. Since getpw*() returns a [now thread scoped]
5051 * global, we must therefore cache the results before continuing.
5054 desired_uid
= pwe
->pw_uid
;
5055 desired_gid
= pwe
->pw_gid
;
5057 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
5058 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
5059 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
5061 if (unlikely(pwe
->pw_expire
&& time(NULL
) >= pwe
->pw_expire
)) {
5062 job_log(j
, LOG_ERR
, "Expired account");
5063 _exit(EXIT_FAILURE
);
5067 if (unlikely(j
->username
&& strcmp(j
->username
, loginname
) != 0)) {
5068 job_log(j
, LOG_WARNING
, "Suspicious setup: User \"%s\" maps to user: %s", j
->username
, loginname
);
5069 } else if (unlikely(j
->mach_uid
&& (j
->mach_uid
!= desired_uid
))) {
5070 job_log(j
, LOG_WARNING
, "Suspicious setup: UID %u maps to UID %u", j
->mach_uid
, desired_uid
);
5076 if (unlikely((gre
= job_getgrnam(j
, j
->groupname
)) == NULL
)) {
5077 job_log(j
, LOG_ERR
, "getgrnam(\"%s\") failed", j
->groupname
);
5081 desired_gid
= gre
->gr_gid
;
5084 if (job_assumes_zero_p(j
, setlogin(loginname
)) == -1) {
5085 _exit(EXIT_FAILURE
);
5088 if (job_assumes_zero_p(j
, setgid(desired_gid
)) == -1) {
5089 _exit(EXIT_FAILURE
);
5093 * The kernel team and the DirectoryServices team want initgroups()
5094 * called after setgid(). See 4616864 for more information.
5097 if (likely(!j
->no_init_groups
)) {
5099 if (job_assumes_zero_p(j
, initgroups(loginname
, desired_gid
)) == -1) {
5100 _exit(EXIT_FAILURE
);
5103 /* Do our own little initgroups(). We do this to guarantee that we're
5104 * always opted into dynamic group resolution in the kernel. initgroups(3)
5105 * does not make this guarantee.
5107 int groups
[NGROUPS
], ngroups
;
5109 // A failure here isn't fatal, and we'll still get data we can use.
5110 (void)job_assumes_zero_p(j
, getgrouplist(j
->username
, desired_gid
, groups
, &ngroups
));
5112 if (job_assumes_zero_p(j
, syscall(SYS_initgroups
, ngroups
, groups
, desired_uid
)) == -1) {
5113 _exit(EXIT_FAILURE
);
5118 if (job_assumes_zero_p(j
, setuid(desired_uid
)) == -1) {
5119 _exit(EXIT_FAILURE
);
5122 r
= confstr(_CS_DARWIN_USER_TEMP_DIR
, tmpdirpath
, sizeof(tmpdirpath
));
5124 if (likely(r
> 0 && r
< sizeof(tmpdirpath
))) {
5125 setenv("TMPDIR", tmpdirpath
, 0);
5128 setenv("SHELL", shellpath
, 0);
5129 setenv("HOME", homedir
, 0);
5130 setenv("USER", loginname
, 0);
5131 setenv("LOGNAME", loginname
, 0);
5135 job_setup_attributes(job_t j
)
5137 struct limititem
*li
;
5140 if (unlikely(j
->setnice
)) {
5141 (void)job_assumes_zero_p(j
, setpriority(PRIO_PROCESS
, 0, j
->nice
));
5144 SLIST_FOREACH(li
, &j
->limits
, sle
) {
5147 if (job_assumes_zero_p(j
, getrlimit(li
->which
, &rl
) == -1)) {
5152 rl
.rlim_max
= li
->lim
.rlim_max
;
5155 rl
.rlim_cur
= li
->lim
.rlim_cur
;
5158 if (setrlimit(li
->which
, &rl
) == -1) {
5159 job_log_error(j
, LOG_WARNING
, "setrlimit()");
5163 if (unlikely(!j
->inetcompat
&& j
->session_create
)) {
5164 launchd_SessionCreate();
5167 if (unlikely(j
->low_pri_io
)) {
5168 (void)job_assumes_zero_p(j
, setiopolicy_np(IOPOL_TYPE_DISK
, IOPOL_SCOPE_PROCESS
, IOPOL_THROTTLE
));
5170 if (j
->low_priority_background_io
) {
5171 (void)job_assumes_zero_p(j
, setiopolicy_np(IOPOL_TYPE_DISK
, IOPOL_SCOPE_DARWIN_BG
, IOPOL_THROTTLE
));
5173 if (unlikely(j
->rootdir
)) {
5174 (void)job_assumes_zero_p(j
, chroot(j
->rootdir
));
5175 (void)job_assumes_zero_p(j
, chdir("."));
5178 job_postfork_become_user(j
);
5180 if (unlikely(j
->workingdir
)) {
5181 if (chdir(j
->workingdir
) == -1) {
5182 if (errno
== ENOENT
|| errno
== ENOTDIR
) {
5183 job_log(j
, LOG_ERR
, "Job specified non-existent working directory: %s", j
->workingdir
);
5185 (void)job_assumes_zero(j
, errno
);
5190 if (unlikely(j
->setmask
)) {
5195 (void)job_assumes_zero_p(j
, dup2(j
->stdin_fd
, STDIN_FILENO
));
5197 job_setup_fd(j
, STDIN_FILENO
, j
->stdinpath
, O_RDONLY
|O_CREAT
);
5199 job_setup_fd(j
, STDOUT_FILENO
, j
->stdoutpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
5200 job_setup_fd(j
, STDERR_FILENO
, j
->stderrpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
5202 jobmgr_setup_env_from_other_jobs(j
->mgr
);
5204 SLIST_FOREACH(ei
, &j
->env
, sle
) {
5205 setenv(ei
->key
, ei
->value
, 1);
5208 #if !TARGET_OS_EMBEDDED
5209 if (j
->jetsam_properties
) {
5210 (void)job_assumes_zero(j
, proc_setpcontrol(PROC_SETPC_TERMINATE
));
5214 #if TARGET_OS_EMBEDDED
5215 if (j
->main_thread_priority
!= 0) {
5216 struct sched_param params
;
5217 bzero(¶ms
, sizeof(params
));
5218 params
.sched_priority
= j
->main_thread_priority
;
5219 (void)job_assumes_zero_p(j
, pthread_setschedparam(pthread_self(), SCHED_OTHER
, ¶ms
));
5224 * We'd like to call setsid() unconditionally, but we have reason to
5225 * believe that prevents launchd from being able to send signals to
5226 * setuid children. We'll settle for process-groups.
5228 if (getppid() != 1) {
5229 (void)job_assumes_zero_p(j
, setpgid(0, 0));
5231 (void)job_assumes_zero_p(j
, setsid());
5236 job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
)
5244 if ((fd
= open(path
, flags
|O_NOCTTY
, DEFFILEMODE
)) == -1) {
5245 job_log_error(j
, LOG_WARNING
, "open(\"%s\", ...)", path
);
5249 (void)job_assumes_zero_p(j
, dup2(fd
, target_fd
));
5250 (void)job_assumes_zero(j
, runtime_close(fd
));
5254 calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
)
5256 struct calendarinterval
*ci_iter
, *ci_prev
= NULL
;
5257 time_t later
, head_later
;
5259 later
= cronemu(ci
->when
.tm_mon
, ci
->when
.tm_mday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
5261 if (ci
->when
.tm_wday
!= -1) {
5262 time_t otherlater
= cronemu_wday(ci
->when
.tm_wday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
5264 if (ci
->when
.tm_mday
== -1) {
5267 later
= later
< otherlater
? later
: otherlater
;
5271 ci
->when_next
= later
;
5273 LIST_FOREACH(ci_iter
, &sorted_calendar_events
, global_sle
) {
5274 if (ci
->when_next
< ci_iter
->when_next
) {
5275 LIST_INSERT_BEFORE(ci_iter
, ci
, global_sle
);
5282 if (ci_iter
== NULL
) {
5283 // ci must want to fire after every other timer, or there are no timers
5285 if (LIST_EMPTY(&sorted_calendar_events
)) {
5286 LIST_INSERT_HEAD(&sorted_calendar_events
, ci
, global_sle
);
5288 LIST_INSERT_AFTER(ci_prev
, ci
, global_sle
);
5292 head_later
= LIST_FIRST(&sorted_calendar_events
)->when_next
;
5294 if (job_assumes_zero_p(j
, kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_ADD
, NOTE_ABSOLUTE
|NOTE_SECONDS
, head_later
, root_jobmgr
)) != -1) {
5295 char time_string
[100];
5296 size_t time_string_len
;
5298 ctime_r(&later
, time_string
);
5299 time_string_len
= strlen(time_string
);
5301 if (likely(time_string_len
&& time_string
[time_string_len
- 1] == '\n')) {
5302 time_string
[time_string_len
- 1] = '\0';
5305 job_log(j
, LOG_INFO
, "Scheduled to run again at %s", time_string
);
5310 jobmgr_log_bug(_SIMPLE_STRING asl_message
__attribute__((unused
)), void *ctx
, const char *message
)
5313 jobmgr_log(jm
, LOG_ERR
, "%s", message
);
5319 job_log_bug(_SIMPLE_STRING asl_message
__attribute__((unused
)), void *ctx
, const char *message
)
5322 job_log(j
, LOG_ERR
, "%s", message
);
5327 // ri: NULL = please sample j->p; non-NULL = use this sample
5329 job_log_perf_statistics(job_t j
, struct rusage_info_v1
*ri
, int64_t exit_status
)
5331 #if HAVE_SYSTEMSTATS
5332 if (j
->anonymous
|| !j
->p
) {
5335 if (!systemstats_is_enabled()) {
5339 if (j
->cfbundleidentifier
) {
5340 name
= j
->cfbundleidentifier
;
5345 struct rusage_info_v1 ris
;
5348 r
= proc_pid_rusage(j
->p
, RUSAGE_INFO_V1
, (rusage_info_t
)ri
);
5353 job_log_systemstats(j
->p
, j
->uniqueid
, runtime_get_uniqueid(), j
->mgr
->req_pid
, j
->mgr
->req_uniqueid
, name
, ri
, exit_status
);
5355 #pragma unused (j, ri, exit_status)
5359 #if HAVE_SYSTEMSTATS
5360 // ri: NULL = don't write fields from ri; non-NULL = use this sample
5363 job_log_systemstats(pid_t pid
, uint64_t uniqueid
, uint64_t parent_uniqueid
, pid_t req_pid
, uint64_t req_uniqueid
, const char *name
, struct rusage_info_v1
*ri
, int64_t exit_status
)
5365 if (!systemstats_is_enabled()) {
5369 struct systemstats_process_usage_s info
;
5370 bzero(&info
, sizeof(info
));
5373 info
.exit_status
= exit_status
;
5374 info
.uid
= getuid();
5375 info
.ppid
= getpid();
5376 info
.responsible_pid
= req_pid
;
5379 info
.macho_uuid
= (const uint8_t *)&ri
->ri_uuid
;
5380 info
.user_time
= ri
->ri_user_time
;
5381 info
.system_time
= ri
->ri_system_time
;
5382 info
.pkg_idle_wkups
= ri
->ri_pkg_idle_wkups
;
5383 info
.interrupt_wkups
= ri
->ri_interrupt_wkups
;
5384 info
.proc_start_abstime
= ri
->ri_proc_start_abstime
;
5385 info
.proc_exit_abstime
= ri
->ri_proc_exit_abstime
;
5386 #if SYSTEMSTATS_API_VERSION >= 20130319
5387 info
.pageins
= ri
->ri_pageins
;
5388 info
.wired_size
= ri
->ri_wired_size
;
5389 info
.resident_size
= ri
->ri_resident_size
;
5390 info
.phys_footprint
= ri
->ri_phys_footprint
;
5391 // info.purgeablesize = ???
5393 #if SYSTEMSTATS_API_VERSION >= 20130328
5394 info
.child_user_time
= ri
->ri_child_user_time
;
5395 info
.child_system_time
= ri
->ri_child_system_time
;
5396 info
.child_pkg_idle_wkups
= ri
->ri_child_pkg_idle_wkups
;
5397 info
.child_interrupt_wkups
= ri
->ri_child_interrupt_wkups
;
5398 info
.child_pageins
= ri
->ri_child_pageins
;
5399 info
.child_elapsed_abstime
= ri
->ri_child_elapsed_abstime
;
5402 #if SYSTEMSTATS_API_VERSION >= 20130410
5403 info
.uniqueid
= uniqueid
;
5404 info
.parent_uniqueid
= parent_uniqueid
;
5405 info
.responsible_uniqueid
= req_uniqueid
;
5407 systemstats_write_process_usage(&info
);
5409 #endif /* HAVE_SYSTEMSTATS */
5411 struct waiting4attach
*
5412 waiting4attach_new(jobmgr_t jm
, const char *name
, mach_port_t port
, pid_t dest
, xpc_service_type_t type
)
5414 size_t xtra
= strlen(name
) + 1;
5416 struct waiting4attach
*w4a
= malloc(sizeof(*w4a
) + xtra
);
5424 (void)strcpy(w4a
->name
, name
);
5427 LIST_INSERT_HEAD(&_launchd_domain_waiters
, w4a
, le
);
5429 LIST_INSERT_HEAD(&jm
->attaches
, w4a
, le
);
5433 (void)jobmgr_assumes_zero(jm
, launchd_mport_notify_req(port
, MACH_NOTIFY_DEAD_NAME
));
5438 waiting4attach_delete(jobmgr_t jm
, struct waiting4attach
*w4a
)
5440 jobmgr_log(jm
, LOG_DEBUG
, "Canceling dead-name notification for waiter port: 0x%x", w4a
->port
);
5442 LIST_REMOVE(w4a
, le
);
5444 mach_port_t previous
= MACH_PORT_NULL
;
5445 (void)jobmgr_assumes_zero(jm
, mach_port_request_notification(mach_task_self(), w4a
->port
, MACH_NOTIFY_DEAD_NAME
, 0, MACH_PORT_NULL
, MACH_MSG_TYPE_MOVE_SEND_ONCE
, &previous
));
5447 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(previous
));
5450 jobmgr_assumes_zero(jm
, launchd_mport_deallocate(w4a
->port
));
5454 struct waiting4attach
*
5455 waiting4attach_find(jobmgr_t jm
, job_t j
)
5457 char *name2use
= (char *)j
->label
;
5459 struct envitem
*ei
= NULL
;
5460 SLIST_FOREACH(ei
, &j
->env
, sle
) {
5461 if (strcmp(ei
->key
, XPC_SERVICE_RENDEZVOUS_TOKEN
) == 0) {
5462 name2use
= ei
->value
;
5468 struct waiting4attach
*w4ai
= NULL
;
5469 LIST_FOREACH(w4ai
, &jm
->attaches
, le
) {
5470 if (strcmp(name2use
, w4ai
->name
) == 0) {
5471 job_log(j
, LOG_DEBUG
, "Found attachment: %s", name2use
);
5480 job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
)
5482 const char *label2use
= j
? j
->label
: "com.apple.launchd.job-unknown";
5483 const char *mgr2use
= j
? j
->mgr
->name
: "com.apple.launchd.jobmanager-unknown";
5488 struct launchd_syslog_attr attr
= {
5489 .from_name
= launchd_label
,
5490 .about_name
= label2use
,
5491 .session_name
= mgr2use
,
5493 .from_uid
= getuid(),
5494 .from_pid
= getpid(),
5495 .about_pid
= j
? j
->p
: 0,
5498 /* Hack: If bootstrap_port is set, we must be on the child side of a
5499 * fork(2), but before the exec*(3). Let's route the log message back to
5502 if (bootstrap_port
) {
5503 return _vproc_logv(pri
, err
, msg
, ap
);
5506 newmsgsz
= strlen(msg
) + 200;
5507 newmsg
= alloca(newmsgsz
);
5510 #if !TARGET_OS_EMBEDDED
5511 snprintf(newmsg
, newmsgsz
, "%s: %d: %s", msg
, err
, strerror(err
));
5513 snprintf(newmsg
, newmsgsz
, "(%s) %s: %d: %s", label2use
, msg
, err
, strerror(err
));
5516 #if !TARGET_OS_EMBEDDED
5517 snprintf(newmsg
, newmsgsz
, "%s", msg
);
5519 snprintf(newmsg
, newmsgsz
, "(%s) %s", label2use
, msg
);
5523 if (j
&& unlikely(j
->debug
)) {
5524 oldmask
= setlogmask(LOG_UPTO(LOG_DEBUG
));
5527 launchd_vsyslog(&attr
, newmsg
, ap
);
5529 if (j
&& unlikely(j
->debug
)) {
5530 setlogmask(oldmask
);
5535 job_log_error(job_t j
, int pri
, const char *msg
, ...)
5540 job_logv(j
, pri
, errno
, msg
, ap
);
5545 job_log(job_t j
, int pri
, const char *msg
, ...)
5550 job_logv(j
, pri
, 0, msg
, ap
);
5556 jobmgr_log_error(jobmgr_t jm
, int pri
, const char *msg
, ...)
5561 jobmgr_logv(jm
, pri
, errno
, msg
, ap
);
5567 jobmgr_log_perf_statistics(jobmgr_t jm
, bool signal_children
)
5569 #if HAVE_SYSTEMSTATS
5570 // Log information for kernel_task and pid 1 launchd.
5571 if (systemstats_is_enabled() && pid1_magic
&& jm
== root_jobmgr
) {
5572 #if SYSTEMSTATS_API_VERSION >= 20130328
5573 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION
, SYSTEMSTATS_WRITER_launchd
, SYSTEMSTATS_PROPERTY_SHOULD_LOG_ENERGY_STATISTICS
)) {
5574 systemstats_write_intel_energy_statistics(NULL
);
5577 systemstats_write_intel_energy_statistics(NULL
);
5579 job_log_systemstats(0, 0, 0, 0, 0, "com.apple.kernel", NULL
, -1);
5580 job_log_systemstats(1, 1, 0, 1, 1, "com.apple.launchd", NULL
, -1);
5583 jobmgr_t jmi
= NULL
;
5584 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
5585 jobmgr_log_perf_statistics(jmi
, signal_children
);
5588 if (jm
->xpc_singleton
) {
5589 jobmgr_log(jm
, LOG_PERF
, "XPC Singleton Domain: %s", jm
->shortdesc
);
5590 } else if (jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
5591 jobmgr_log(jm
, LOG_PERF
, "XPC Private Domain: %s", jm
->owner
);
5592 } else if (jm
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) {
5593 jobmgr_log(jm
, LOG_PERF
, "Created via bootstrap_subset()");
5596 jobmgr_log(jm
, LOG_PERF
, "Jobs in job manager:");
5599 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
5600 job_log_perf_statistics(ji
, NULL
, -1);
5601 if (unlikely(signal_children
) && unlikely(strstr(ji
->label
, "com.apple.launchd.peruser.") == ji
->label
)) {
5602 jobmgr_log(jm
, LOG_PERF
, "Sending SIGINFO to peruser launchd %d", ji
->p
);
5603 kill(ji
->p
, SIGINFO
);
5607 jobmgr_log(jm
, LOG_PERF
, "End of job list.");
5611 jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...)
5616 jobmgr_logv(jm
, pri
, 0, msg
, ap
);
5621 jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
)
5629 size_t i
, o
, jmname_len
= strlen(jm
->name
), newmsgsz
;
5631 newname
= alloca((jmname_len
+ 1) * 2);
5632 newmsgsz
= (jmname_len
+ 1) * 2 + strlen(msg
) + 100;
5633 newmsg
= alloca(newmsgsz
);
5635 for (i
= 0, o
= 0; i
< jmname_len
; i
++, o
++) {
5636 if (jm
->name
[i
] == '%') {
5640 newname
[o
] = jm
->name
[i
];
5645 snprintf(newmsg
, newmsgsz
, "%s: %s: %s", newname
, msg
, strerror(err
));
5647 snprintf(newmsg
, newmsgsz
, "%s: %s", newname
, msg
);
5650 if (jm
->parentmgr
) {
5651 jobmgr_logv(jm
->parentmgr
, pri
, 0, newmsg
, ap
);
5653 struct launchd_syslog_attr attr
= {
5654 .from_name
= launchd_label
,
5655 .about_name
= launchd_label
,
5656 .session_name
= jm
->name
,
5658 .from_uid
= getuid(),
5659 .from_pid
= getpid(),
5660 .about_pid
= getpid(),
5663 launchd_vsyslog(&attr
, newmsg
, ap
);
5667 struct cal_dict_walk
{
5673 calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
)
5675 struct cal_dict_walk
*cdw
= context
;
5676 struct tm
*tmptm
= &cdw
->tmptm
;
5680 if (unlikely(LAUNCH_DATA_INTEGER
!= launch_data_get_type(obj
))) {
5681 // hack to let caller know something went wrong
5686 val
= launch_data_get_integer(obj
);
5689 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is less than zero.", key
);
5690 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MINUTE
) == 0) {
5692 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key
);
5695 tmptm
->tm_min
= (typeof(tmptm
->tm_min
)) val
;
5697 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_HOUR
) == 0) {
5699 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key
);
5702 tmptm
->tm_hour
= (typeof(tmptm
->tm_hour
)) val
;
5704 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_DAY
) == 0) {
5705 if (val
< 1 || val
> 31) {
5706 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key
);
5709 tmptm
->tm_mday
= (typeof(tmptm
->tm_mday
)) val
;
5711 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_WEEKDAY
) == 0) {
5713 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key
);
5716 tmptm
->tm_wday
= (typeof(tmptm
->tm_wday
)) val
;
5718 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MONTH
) == 0) {
5720 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key
);
5723 tmptm
->tm_mon
= (typeof(tmptm
->tm_mon
)) val
;
5724 tmptm
->tm_mon
-= 1; // 4798263 cron compatibility
5730 calendarinterval_new_from_obj(job_t j
, launch_data_t obj
)
5732 struct cal_dict_walk cdw
;
5735 memset(&cdw
.tmptm
, 0, sizeof(0));
5737 cdw
.tmptm
.tm_min
= -1;
5738 cdw
.tmptm
.tm_hour
= -1;
5739 cdw
.tmptm
.tm_mday
= -1;
5740 cdw
.tmptm
.tm_wday
= -1;
5741 cdw
.tmptm
.tm_mon
= -1;
5743 if (!job_assumes(j
, obj
!= NULL
)) {
5747 if (unlikely(LAUNCH_DATA_DICTIONARY
!= launch_data_get_type(obj
))) {
5751 launch_data_dict_iterate(obj
, calendarinterval_new_from_obj_dict_walk
, &cdw
);
5753 if (unlikely(cdw
.tmptm
.tm_sec
== -1)) {
5757 return calendarinterval_new(j
, &cdw
.tmptm
);
5761 calendarinterval_new(job_t j
, struct tm
*w
)
5763 struct calendarinterval
*ci
= calloc(1, sizeof(struct calendarinterval
));
5765 if (!job_assumes(j
, ci
!= NULL
)) {
5772 SLIST_INSERT_HEAD(&j
->cal_intervals
, ci
, sle
);
5774 calendarinterval_setalarm(j
, ci
);
5776 runtime_add_weak_ref();
5782 calendarinterval_delete(job_t j
, struct calendarinterval
*ci
)
5784 SLIST_REMOVE(&j
->cal_intervals
, ci
, calendarinterval
, sle
);
5785 LIST_REMOVE(ci
, global_sle
);
5789 runtime_del_weak_ref();
5793 calendarinterval_sanity_check(void)
5795 struct calendarinterval
*ci
= LIST_FIRST(&sorted_calendar_events
);
5796 time_t now
= time(NULL
);
5798 if (unlikely(ci
&& (ci
->when_next
< now
))) {
5799 (void)jobmgr_assumes_zero_p(root_jobmgr
, raise(SIGUSR1
));
5804 calendarinterval_callback(void)
5806 struct calendarinterval
*ci
, *ci_next
;
5807 time_t now
= time(NULL
);
5809 LIST_FOREACH_SAFE(ci
, &sorted_calendar_events
, global_sle
, ci_next
) {
5812 if (ci
->when_next
> now
) {
5816 LIST_REMOVE(ci
, global_sle
);
5817 calendarinterval_setalarm(j
, ci
);
5819 j
->start_pending
= true;
5820 job_dispatch(j
, false);
5825 socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
)
5827 struct socketgroup
*sg
= calloc(1, sizeof(struct socketgroup
) + strlen(name
) + 1);
5829 if (!job_assumes(j
, sg
!= NULL
)) {
5833 sg
->fds
= calloc(1, fd_cnt
* sizeof(int));
5834 sg
->fd_cnt
= fd_cnt
;
5836 if (!job_assumes(j
, sg
->fds
!= NULL
)) {
5841 memcpy(sg
->fds
, fds
, fd_cnt
* sizeof(int));
5842 strcpy(sg
->name_init
, name
);
5844 SLIST_INSERT_HEAD(&j
->sockets
, sg
, sle
);
5846 runtime_add_weak_ref();
5852 socketgroup_delete(job_t j
, struct socketgroup
*sg
)
5856 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5858 struct sockaddr_storage ss
;
5859 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
5860 socklen_t ss_len
= sizeof(ss
);
5863 if (job_assumes_zero(j
, getsockname(sg
->fds
[i
], (struct sockaddr
*)&ss
, &ss_len
) != -1)
5864 && job_assumes(j
, ss_len
> 0) && (ss
.ss_family
== AF_UNIX
)) {
5865 (void)job_assumes(j
, unlink(sun
->sun_path
) != -1);
5866 // We might conditionally need to delete a directory here
5869 (void)job_assumes_zero_p(j
, runtime_close(sg
->fds
[i
]));
5872 SLIST_REMOVE(&j
->sockets
, sg
, socketgroup
, sle
);
5877 runtime_del_weak_ref();
5881 socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
)
5883 struct kevent kev
[sg
->fd_cnt
];
5885 unsigned int i
, buf_off
= 0;
5887 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5888 EV_SET(&kev
[i
], sg
->fds
[i
], EVFILT_READ
, do_add
? EV_ADD
: EV_DELETE
, 0, 0, j
);
5889 buf_off
+= snprintf(buf
+ buf_off
, sizeof(buf
) - buf_off
, " %d", sg
->fds
[i
]);
5892 job_log(j
, LOG_DEBUG
, "%s Sockets:%s", do_add
? "Watching" : "Ignoring", buf
);
5894 (void)job_assumes_zero_p(j
, kevent_bulk_mod(kev
, sg
->fd_cnt
));
5896 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5897 (void)job_assumes(j
, kev
[i
].flags
& EV_ERROR
);
5898 errno
= (typeof(errno
)) kev
[i
].data
;
5899 (void)job_assumes_zero(j
, kev
[i
].data
);
5904 socketgroup_ignore(job_t j
, struct socketgroup
*sg
)
5906 socketgroup_kevent_mod(j
, sg
, false);
5910 socketgroup_watch(job_t j
, struct socketgroup
*sg
)
5912 socketgroup_kevent_mod(j
, sg
, true);
5916 socketgroup_callback(job_t j
)
5918 job_dispatch(j
, true);
5922 envitem_new(job_t j
, const char *k
, const char *v
, bool global
)
5924 if (global
&& !launchd_allow_global_dyld_envvars
) {
5925 if (strncmp("DYLD_", k
, sizeof("DYLD_") - 1) == 0) {
5926 job_log(j
, LOG_ERR
, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k
, v
);
5931 struct envitem
*ei
= calloc(1, sizeof(struct envitem
) + strlen(k
) + 1 + strlen(v
) + 1);
5933 if (!job_assumes(j
, ei
!= NULL
)) {
5937 strcpy(ei
->key_init
, k
);
5938 ei
->value
= ei
->key_init
+ strlen(k
) + 1;
5939 strcpy(ei
->value
, v
);
5942 if (SLIST_EMPTY(&j
->global_env
)) {
5943 LIST_INSERT_HEAD(&j
->mgr
->global_env_jobs
, j
, global_env_sle
);
5945 SLIST_INSERT_HEAD(&j
->global_env
, ei
, sle
);
5947 SLIST_INSERT_HEAD(&j
->env
, ei
, sle
);
5950 job_log(j
, LOG_DEBUG
, "Added environmental variable: %s=%s", k
, v
);
5956 envitem_delete(job_t j
, struct envitem
*ei
, bool global
)
5959 SLIST_REMOVE(&j
->global_env
, ei
, envitem
, sle
);
5960 if (SLIST_EMPTY(&j
->global_env
)) {
5961 LIST_REMOVE(j
, global_env_sle
);
5964 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
5971 envitem_setup(launch_data_t obj
, const char *key
, void *context
)
5975 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
5979 if (strncmp(LAUNCHD_TRUSTED_FD_ENV
, key
, sizeof(LAUNCHD_TRUSTED_FD_ENV
) - 1) != 0) {
5980 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
);
5982 job_log(j
, LOG_DEBUG
, "Ignoring reserved environmental variable: %s", key
);
5987 limititem_update(job_t j
, int w
, rlim_t r
)
5989 struct limititem
*li
;
5991 SLIST_FOREACH(li
, &j
->limits
, sle
) {
5992 if (li
->which
== w
) {
5998 li
= calloc(1, sizeof(struct limititem
));
6000 if (!job_assumes(j
, li
!= NULL
)) {
6004 SLIST_INSERT_HEAD(&j
->limits
, li
, sle
);
6009 if (j
->importing_hard_limits
) {
6010 li
->lim
.rlim_max
= r
;
6013 li
->lim
.rlim_cur
= r
;
6021 limititem_delete(job_t j
, struct limititem
*li
)
6023 SLIST_REMOVE(&j
->limits
, li
, limititem
, sle
);
6030 seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
)
6034 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
6035 job_log(j
, LOG_WARNING
, "Sandbox flag value must be boolean: %s", key
);
6039 if (launch_data_get_bool(obj
) == false) {
6043 if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOX_NAMED
) == 0) {
6044 j
->seatbelt_flags
|= SANDBOX_NAMED
;
6050 limititem_setup(launch_data_t obj
, const char *key
, void *context
)
6053 size_t i
, limits_cnt
= (sizeof(launchd_keys2limits
) / sizeof(launchd_keys2limits
[0]));
6056 if (launch_data_get_type(obj
) != LAUNCH_DATA_INTEGER
) {
6060 rl
= launch_data_get_integer(obj
);
6062 for (i
= 0; i
< limits_cnt
; i
++) {
6063 if (strcasecmp(launchd_keys2limits
[i
].key
, key
) == 0) {
6068 if (i
== limits_cnt
) {
6072 limititem_update(j
, launchd_keys2limits
[i
].val
, rl
);
6076 job_useless(job_t j
)
6078 if ((j
->legacy_LS_job
|| j
->only_once
) && j
->start_time
!= 0) {
6079 if (j
->legacy_LS_job
&& j
->j_port
) {
6082 job_log(j
, LOG_INFO
, "Exited. Was only configured to run once.");
6084 } else if (j
->removal_pending
) {
6085 job_log(j
, LOG_DEBUG
, "Exited while removal was pending.");
6087 } else if (j
->shutdown_monitor
) {
6089 } else if (j
->mgr
->shutting_down
&& !j
->mgr
->parentmgr
) {
6090 job_log(j
, LOG_DEBUG
, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children
, total_anon_children
);
6091 if (total_children
== 0 && !j
->anonymous
) {
6092 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job was last to exit during shutdown of: %s.", j
->mgr
->name
);
6095 } else if (j
->legacy_mach_job
) {
6096 if (SLIST_EMPTY(&j
->machservices
)) {
6097 job_log(j
, LOG_INFO
, "Garbage collecting");
6099 } else if (!j
->checkedin
) {
6100 job_log(j
, LOG_WARNING
, "Failed to check-in!");
6104 /* If the job's executable does not have any valid architectures (for
6105 * example, if it's a PowerPC-only job), then we don't even bother
6106 * trying to relaunch it, as we have no reasonable expectation that
6107 * the situation will change.
6109 * <rdar://problem/9106979>
6111 if (!j
->did_exec
&& WEXITSTATUS(j
->last_exit_status
) == EBADARCH
) {
6112 job_log(j
, LOG_ERR
, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
6121 job_keepalive(job_t j
)
6123 mach_msg_type_number_t statusCnt
;
6124 mach_port_status_t status
;
6125 struct semaphoreitem
*si
;
6126 struct machservice
*ms
;
6127 bool good_exit
= (WIFEXITED(j
->last_exit_status
) && WEXITSTATUS(j
->last_exit_status
) == 0);
6128 bool is_not_kextd
= (launchd_apple_internal
|| (strcmp(j
->label
, "com.apple.kextd") != 0));
6130 if (unlikely(j
->mgr
->shutting_down
)) {
6137 * We definitely need to revisit this after Leopard ships. Please see
6138 * launchctl.c for the other half of this hack.
6140 if (unlikely((j
->mgr
->global_on_demand_cnt
> 0) && is_not_kextd
)) {
6144 if (unlikely(j
->needs_kickoff
)) {
6145 job_log(j
, LOG_DEBUG
, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
6149 if (j
->start_pending
) {
6150 job_log(j
, LOG_DEBUG
, "KeepAlive check: Pent-up non-IPC launch criteria.");
6155 job_log(j
, LOG_DEBUG
, "KeepAlive check: job configured to run continuously.");
6159 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
6160 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
6161 if (mach_port_get_attributes(mach_task_self(), ms
->port
, MACH_PORT_RECEIVE_STATUS
,
6162 (mach_port_info_t
)&status
, &statusCnt
) != KERN_SUCCESS
) {
6165 if (status
.mps_msgcount
) {
6166 job_log(j
, LOG_DEBUG
, "KeepAlive check: %d queued Mach messages on service: %s",
6167 status
.mps_msgcount
, ms
->name
);
6172 /* TODO: Coalesce external events and semaphore items, since they're basically
6175 struct externalevent
*ei
= NULL
;
6176 LIST_FOREACH(ei
, &j
->events
, job_le
) {
6177 if (ei
->state
== ei
->wanted_state
) {
6182 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
6183 bool wanted_state
= false;
6188 wanted_state
= true;
6190 if (network_up
== wanted_state
) {
6191 job_log(j
, LOG_DEBUG
, "KeepAlive: The network is %s.", wanted_state
? "up" : "down");
6195 case SUCCESSFUL_EXIT
:
6196 wanted_state
= true;
6198 if (good_exit
== wanted_state
) {
6199 job_log(j
, LOG_DEBUG
, "KeepAlive: The exit state was %s.", wanted_state
? "successful" : "failure");
6204 wanted_state
= true;
6206 if (j
->crashed
== wanted_state
) {
6210 case OTHER_JOB_ENABLED
:
6211 wanted_state
= true;
6212 case OTHER_JOB_DISABLED
:
6213 if ((bool)job_find(NULL
, si
->what
) == wanted_state
) {
6214 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "enabled" : "disabled", si
->what
);
6218 case OTHER_JOB_ACTIVE
:
6219 wanted_state
= true;
6220 case OTHER_JOB_INACTIVE
:
6221 if ((other_j
= job_find(NULL
, si
->what
))) {
6222 if ((bool)other_j
->p
== wanted_state
) {
6223 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "active" : "inactive", si
->what
);
6237 if (j
->p
&& j
->shutdown_monitor
) {
6238 return "Monitoring shutdown";
6241 return "PID is still valid";
6244 if (j
->priv_port_has_senders
) {
6245 return "Privileged Port still has outstanding senders";
6248 struct machservice
*ms
;
6249 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
6250 /* If we've simulated an exit, we mark the job as non-active, even
6251 * though doing so will leave it in an unsafe state. We do this so that
6252 * shutdown can proceed. See <rdar://problem/11126530>.
6254 if (!j
->workaround9359725
&& ms
->recv
&& machservice_active(ms
)) {
6255 job_log(j
, LOG_INFO
, "Mach service is still active: %s", ms
->name
);
6256 return "Mach service is still active";
6264 machservice_watch(job_t j
, struct machservice
*ms
)
6267 if (job_assumes_zero(j
, runtime_add_mport(ms
->port
, NULL
)) == KERN_INVALID_RIGHT
) {
6268 ms
->recv_race_hack
= true;
6274 machservice_ignore(job_t j
, struct machservice
*ms
)
6276 /* We only add ports whose receive rights we control into the port set, so
6277 * don't attempt to remove te service from the port set if we didn't put it
6278 * there in the first place. Otherwise, we could wind up trying to access a
6279 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
6281 * <rdar://problem/10898014>
6284 (void)job_assumes_zero(j
, runtime_remove_mport(ms
->port
));
6289 machservice_resetport(job_t j
, struct machservice
*ms
)
6291 LIST_REMOVE(ms
, port_hash_sle
);
6292 (void)job_assumes_zero(j
, launchd_mport_close_recv(ms
->port
));
6293 (void)job_assumes_zero(j
, launchd_mport_deallocate(ms
->port
));
6296 (void)job_assumes_zero(j
, launchd_mport_create_recv(&ms
->port
));
6297 (void)job_assumes_zero(j
, launchd_mport_make_send(ms
->port
));
6298 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
6302 machservice_stamp_port(job_t j
, struct machservice
*ms
)
6304 mach_port_context_t ctx
= 0;
6305 char *where2get
= j
->prog
? j
->prog
: j
->argv
[0];
6308 if ((prog
= strrchr(where2get
, '/'))) {
6314 (void)strncpy((char *)&ctx
, prog
, sizeof(ctx
));
6315 #if __LITTLE_ENDIAN__
6317 ctx
= OSSwapBigToHostInt64(ctx
);
6319 ctx
= OSSwapBigToHostInt32(ctx
);
6323 (void)job_assumes_zero(j
, mach_port_set_context(mach_task_self(), ms
->port
, ctx
));
6326 struct machservice
*
6327 machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
)
6329 /* Don't create new MachServices for dead ports. This is primarily for
6330 * clients who use bootstrap_register2(). They can pass in a send right, but
6331 * then that port can immediately go dead. Hilarity ensues.
6333 * <rdar://problem/10898014>
6335 if (*serviceport
== MACH_PORT_DEAD
) {
6339 struct machservice
*ms
= calloc(1, sizeof(struct machservice
) + strlen(name
) + 1);
6340 if (!job_assumes(j
, ms
!= NULL
)) {
6344 strcpy((char *)ms
->name
, name
);
6347 ms
->per_pid
= pid_local
;
6349 if (likely(*serviceport
== MACH_PORT_NULL
)) {
6350 if (job_assumes_zero(j
, launchd_mport_create_recv(&ms
->port
)) != KERN_SUCCESS
) {
6354 if (job_assumes_zero(j
, launchd_mport_make_send(ms
->port
)) != KERN_SUCCESS
) {
6357 *serviceport
= ms
->port
;
6360 ms
->port
= *serviceport
;
6361 ms
->isActive
= true;
6364 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
6366 jobmgr_t where2put
= j
->mgr
;
6367 // XPC domains are separate from Mach bootstraps.
6368 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
6369 if (launchd_flat_mach_namespace
&& !(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
)) {
6370 where2put
= root_jobmgr
;
6374 /* Don't allow MachServices added by multiple-instance jobs to be looked up
6375 * by others. We could just do this with a simple bit, but then we'd have to
6376 * uniquify the names ourselves to avoid collisions. This is just easier.
6378 if (!j
->dedicated_instance
) {
6379 LIST_INSERT_HEAD(&where2put
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
6381 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
6384 machservice_stamp_port(j
, ms
);
6387 job_log(j
, LOG_DEBUG
, "Mach service added%s: %s", (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) ? " to private namespace" : "", name
);
6391 (void)job_assumes_zero(j
, launchd_mport_close_recv(ms
->port
));
6397 struct machservice
*
6398 machservice_new_alias(job_t j
, struct machservice
*orig
)
6400 struct machservice
*ms
= calloc(1, sizeof(struct machservice
) + strlen(orig
->name
) + 1);
6401 if (job_assumes(j
, ms
!= NULL
)) {
6402 strcpy((char *)ms
->name
, orig
->name
);
6406 LIST_INSERT_HEAD(&j
->mgr
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
6407 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
6408 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Service aliased into job manager: %s", orig
->name
);
6415 machservice_status(struct machservice
*ms
)
6417 ms
= ms
->alias
? ms
->alias
: ms
;
6419 return BOOTSTRAP_STATUS_ACTIVE
;
6420 } else if (ms
->job
->ondemand
) {
6421 return BOOTSTRAP_STATUS_ON_DEMAND
;
6423 return BOOTSTRAP_STATUS_INACTIVE
;
6428 job_setup_exception_port(job_t j
, task_t target_task
)
6430 struct machservice
*ms
;
6431 thread_state_flavor_t f
= 0;
6432 mach_port_t exc_port
= the_exception_server
;
6434 if (unlikely(j
->alt_exc_handler
)) {
6435 ms
= jobmgr_lookup_service(j
->mgr
, j
->alt_exc_handler
, true, 0);
6437 exc_port
= machservice_port(ms
);
6439 job_log(j
, LOG_WARNING
, "Falling back to default Mach exception handler. Could not find: %s", j
->alt_exc_handler
);
6441 } else if (unlikely(j
->internal_exc_handler
)) {
6442 exc_port
= runtime_get_kernel_port();
6443 } else if (unlikely(!exc_port
)) {
6447 #if defined (__ppc__) || defined(__ppc64__)
6448 f
= PPC_THREAD_STATE64
;
6449 #elif defined(__i386__) || defined(__x86_64__)
6450 f
= x86_THREAD_STATE
;
6451 #elif defined(__arm__)
6452 f
= ARM_THREAD_STATE
;
6454 #error "unknown architecture"
6457 if (likely(target_task
)) {
6458 kern_return_t kr
= task_set_exception_ports(target_task
, EXC_MASK_CRASH
| EXC_MASK_GUARD
| EXC_MASK_RESOURCE
, exc_port
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
);
6460 if (kr
!= MACH_SEND_INVALID_DEST
) {
6461 (void)job_assumes_zero(j
, kr
);
6463 job_log(j
, LOG_WARNING
, "Task died before exception port could be set.");
6466 } else if (pid1_magic
&& the_exception_server
) {
6467 mach_port_t mhp
= mach_host_self();
6468 (void)job_assumes_zero(j
, host_set_exception_ports(mhp
, EXC_MASK_CRASH
| EXC_MASK_GUARD
| EXC_MASK_RESOURCE
, the_exception_server
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
));
6469 (void)job_assumes_zero(j
, launchd_mport_deallocate(mhp
));
6474 job_set_exception_port(job_t j
, mach_port_t port
)
6476 if (unlikely(!the_exception_server
)) {
6477 the_exception_server
= port
;
6478 job_setup_exception_port(j
, 0);
6480 job_log(j
, LOG_WARNING
, "The exception server is already claimed!");
6485 machservice_setup_options(launch_data_t obj
, const char *key
, void *context
)
6487 struct machservice
*ms
= context
;
6488 mach_port_t mhp
= mach_host_self();
6492 if (!job_assumes(ms
->job
, mhp
!= MACH_PORT_NULL
)) {
6496 switch (launch_data_get_type(obj
)) {
6497 case LAUNCH_DATA_INTEGER
:
6498 which_port
= (int)launch_data_get_integer(obj
); // XXX we should bound check this...
6499 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT
) == 0) {
6500 switch (which_port
) {
6501 case TASK_KERNEL_PORT
:
6502 case TASK_HOST_PORT
:
6503 case TASK_NAME_PORT
:
6504 case TASK_BOOTSTRAP_PORT
:
6505 /* I find it a little odd that zero isn't reserved in the header.
6506 * Normally Mach is fairly good about this convention...
6509 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved task special port: %d", which_port
);
6512 ms
->special_port_num
= which_port
;
6513 SLIST_INSERT_HEAD(&special_ports
, ms
, special_port_sle
);
6516 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT
) == 0 && pid1_magic
) {
6517 if (which_port
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
6518 (void)job_assumes_zero(ms
->job
, (errno
= host_set_special_port(mhp
, which_port
, ms
->port
)));
6520 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved host special port: %d", which_port
);
6523 case LAUNCH_DATA_BOOL
:
6524 b
= launch_data_get_bool(obj
);
6525 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE
) == 0) {
6526 ms
->debug_on_close
= b
;
6527 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_RESETATCLOSE
) == 0) {
6529 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN
) == 0) {
6531 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER
) == 0) {
6532 job_set_exception_port(ms
->job
, ms
->port
);
6533 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_KUNCSERVER
) == 0) {
6535 (void)job_assumes_zero(ms
->job
, host_set_UNDServer(mhp
, ms
->port
));
6538 case LAUNCH_DATA_STRING
:
6539 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH
) == 0) {
6540 const char *option
= launch_data_get_string(obj
);
6541 if (strcasecmp(option
, "One") == 0) {
6542 ms
->drain_one_on_crash
= true;
6543 } else if (strcasecmp(option
, "All") == 0) {
6544 ms
->drain_all_on_crash
= true;
6548 case LAUNCH_DATA_DICTIONARY
:
6549 if (launch_data_dict_get_count(obj
) == 0) {
6550 job_set_exception_port(ms
->job
, ms
->port
);
6557 (void)job_assumes_zero(ms
->job
, launchd_mport_deallocate(mhp
));
6561 machservice_setup(launch_data_t obj
, const char *key
, void *context
)
6564 struct machservice
*ms
;
6565 mach_port_t p
= MACH_PORT_NULL
;
6567 if (unlikely(ms
= jobmgr_lookup_service(j
->mgr
, key
, false, 0))) {
6568 job_log(j
, LOG_WARNING
, "Conflict with job: %s over Mach service: %s", ms
->job
->label
, key
);
6572 if (!job_assumes(j
, (ms
= machservice_new(j
, key
, &p
, false)) != NULL
)) {
6576 ms
->isActive
= false;
6579 if (launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
) {
6580 launch_data_dict_iterate(obj
, machservice_setup_options
, ms
);
6583 kern_return_t kr
= mach_port_set_attributes(mach_task_self(), ms
->port
, MACH_PORT_TEMPOWNER
, NULL
, 0);
6584 (void)job_assumes_zero(j
, kr
);
6588 jobmgr_do_garbage_collection(jobmgr_t jm
)
6590 jobmgr_t jmi
= NULL
, jmn
= NULL
;
6591 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
6592 jobmgr_do_garbage_collection(jmi
);
6595 if (!jm
->shutting_down
) {
6599 if (SLIST_EMPTY(&jm
->submgrs
)) {
6600 jobmgr_log(jm
, LOG_DEBUG
, "No submanagers left.");
6602 jobmgr_log(jm
, LOG_DEBUG
, "Still have submanagers.");
6603 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
6604 jobmgr_log(jm
, LOG_DEBUG
, "Submanager: %s", jmi
->name
);
6609 job_t ji
= NULL
, jn
= NULL
;
6610 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
6611 if (ji
->anonymous
) {
6615 // Let the shutdown monitor be up until the very end.
6616 if (ji
->shutdown_monitor
) {
6620 /* On our first pass through, open a transaction for all the jobs that
6621 * need to be dirty at shutdown. We'll close these transactions once the
6622 * jobs that do not need to be dirty at shutdown have all exited.
6624 if (ji
->dirty_at_shutdown
&& !jm
->shutdown_jobs_dirtied
) {
6625 job_open_shutdown_transaction(ji
);
6628 const char *active
= job_active(ji
);
6632 job_log(ji
, LOG_DEBUG
, "Job is active: %s", active
);
6635 if (!ji
->dirty_at_shutdown
) {
6639 if (ji
->clean_kill
) {
6640 job_log(ji
, LOG_DEBUG
, "Job was killed cleanly.");
6642 job_log(ji
, LOG_DEBUG
, "Job was sent SIGTERM%s.", ji
->sent_sigkill
? " and SIGKILL" : "");
6647 jm
->shutdown_jobs_dirtied
= true;
6649 if (!jm
->shutdown_jobs_cleaned
) {
6650 /* Once all normal jobs have exited, we clean the dirty-at-shutdown
6651 * jobs and make them into normal jobs so that the above loop will
6652 * handle them appropriately.
6654 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
6655 if (ji
->anonymous
) {
6659 if (!job_active(ji
)) {
6663 if (ji
->shutdown_monitor
) {
6667 job_close_shutdown_transaction(ji
);
6671 jm
->shutdown_jobs_cleaned
= true;
6674 if (SLIST_EMPTY(&jm
->submgrs
) && actives
== 0) {
6675 /* We may be in a situation where the shutdown monitor is all that's
6676 * left, in which case we want to stop it. Like dirty-at-shutdown
6677 * jobs, we turn it back into a normal job so that the main loop
6678 * treats it appropriately.
6681 * <rdar://problem/10756306>
6682 * <rdar://problem/11034971>
6683 * <rdar://problem/11549541>
6685 if (jm
->monitor_shutdown
&& _launchd_shutdown_monitor
) {
6686 /* The rest of shutdown has completed, so we can kill the shutdown
6687 * monitor now like it was any other job.
6689 _launchd_shutdown_monitor
->shutdown_monitor
= false;
6691 job_log(_launchd_shutdown_monitor
, LOG_NOTICE
| LOG_CONSOLE
, "Stopping shutdown monitor.");
6692 job_stop(_launchd_shutdown_monitor
);
6693 _launchd_shutdown_monitor
= NULL
;
6695 jobmgr_log(jm
, LOG_DEBUG
, "Removing.");
6706 jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
)
6708 /* I maintain that stray processes should be at the mercy of launchd during
6709 * shutdown, but nevertheless, things like diskimages-helper can stick
6710 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
6711 * to all the strays and don't wait for them to exit before moving on.
6713 * See rdar://problem/6562592
6716 for (i
= 0; i
< np
; i
++) {
6718 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Sending SIGTERM to PID %u and continuing...", p
[i
]);
6719 (void)jobmgr_assumes_zero_p(jm
, kill2(p
[i
], SIGTERM
));
6725 jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
)
6727 size_t kp_skipped
= 0, len
= sizeof(pid_t
) * get_kern_max_proc();
6729 int i
= 0, kp_cnt
= 0;
6731 if (likely(jm
->parentmgr
|| !pid1_magic
)) {
6735 if (!jobmgr_assumes(jm
, (pids
= malloc(len
)) != NULL
)) {
6739 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS
);
6741 if (jobmgr_assumes_zero_p(jm
, (kp_cnt
= proc_listallpids(pids
, len
))) == -1) {
6745 pid_t
*ps
= (pid_t
*)calloc(sizeof(pid_t
), kp_cnt
);
6746 for (i
= 0; i
< kp_cnt
; i
++) {
6747 struct proc_bsdshortinfo proc
;
6748 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
6749 if (errno
!= ESRCH
) {
6750 (void)jobmgr_assumes_zero(jm
, errno
);
6757 pid_t p_i
= pids
[i
];
6758 pid_t pp_i
= proc
.pbsi_ppid
;
6759 pid_t pg_i
= proc
.pbsi_pgid
;
6760 const char *z
= (proc
.pbsi_status
== SZOMB
) ? "zombie " : "";
6761 const char *n
= proc
.pbsi_comm
;
6763 if (unlikely(p_i
== 0 || p_i
== 1)) {
6768 if (_launchd_shutdown_monitor
&& pp_i
== _launchd_shutdown_monitor
->p
) {
6773 // We might have some jobs hanging around that we've decided to shut down in spite of.
6774 job_t j
= jobmgr_find_by_pid(jm
, p_i
, false);
6775 if (!j
|| (j
&& j
->anonymous
)) {
6776 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z
, j
? "anonymous job" : "process", p_i
, pp_i
, pg_i
, n
);
6779 if (pp_i
== getpid() && !jobmgr_assumes(jm
, proc
.pbsi_status
!= SZOMB
)) {
6780 if (jobmgr_assumes_zero(jm
, waitpid(p_i
, &status
, WNOHANG
)) == 0) {
6781 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status
));
6785 job_t leader
= jobmgr_find_by_pid(jm
, pg_i
, false);
6786 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6787 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6788 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6789 * their userspace emissaries go away, before the call to reboot(2).
6791 if (leader
&& leader
->ignore_pg_at_shutdown
) {
6802 if ((kp_cnt
- kp_skipped
> 0) && kill_strays
) {
6803 jobmgr_kill_stray_children(jm
, ps
, kp_cnt
- kp_skipped
);
6812 jobmgr_parent(jobmgr_t jm
)
6814 return jm
->parentmgr
;
6818 job_uncork_fork(job_t j
)
6822 job_log(j
, LOG_DEBUG
, "Uncorking the fork().");
6823 /* this unblocks the child and avoids a race
6824 * between the above fork() and the kevent_mod() */
6825 (void)job_assumes(j
, write(j
->fork_fd
, &c
, sizeof(c
)) == sizeof(c
));
6826 (void)job_assumes_zero_p(j
, runtime_close(j
->fork_fd
));
6831 jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool skip_init
, mach_port_t asport
)
6833 job_t bootstrapper
= NULL
;
6836 __OS_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s
, kqjobmgr_callback
) == 0);
6838 if (unlikely(jm
&& requestorport
== MACH_PORT_NULL
)) {
6839 jobmgr_log(jm
, LOG_ERR
, "Mach sub-bootstrap create request requires a requester port");
6843 jmr
= calloc(1, sizeof(struct jobmgr_s
) + (name
? (strlen(name
) + 1) : NAME_MAX
+ 1));
6845 if (!jobmgr_assumes(jm
, jmr
!= NULL
)) {
6853 jmr
->kqjobmgr_callback
= jobmgr_callback
;
6854 strcpy(jmr
->name_init
, name
? name
: "Under construction");
6856 jmr
->req_port
= requestorport
;
6858 if ((jmr
->parentmgr
= jm
)) {
6859 SLIST_INSERT_HEAD(&jm
->submgrs
, jmr
, sle
);
6862 if (jm
&& jobmgr_assumes_zero(jmr
, launchd_mport_notify_req(jmr
->req_port
, MACH_NOTIFY_DEAD_NAME
)) != KERN_SUCCESS
) {
6866 if (transfer_port
!= MACH_PORT_NULL
) {
6867 (void)jobmgr_assumes(jmr
, jm
!= NULL
);
6868 jmr
->jm_port
= transfer_port
;
6869 } else if (!jm
&& !pid1_magic
) {
6870 char *trusted_fd
= getenv(LAUNCHD_TRUSTED_FD_ENV
);
6873 snprintf(service_buf
, sizeof(service_buf
), "com.apple.launchd.peruser.%u", getuid());
6875 if (jobmgr_assumes_zero(jmr
, bootstrap_check_in(bootstrap_port
, service_buf
, &jmr
->jm_port
)) != 0) {
6880 int dfd
, lfd
= (int) strtol(trusted_fd
, NULL
, 10);
6882 if ((dfd
= dup(lfd
)) >= 0) {
6883 (void)jobmgr_assumes_zero_p(jmr
, runtime_close(dfd
));
6884 (void)jobmgr_assumes_zero_p(jmr
, runtime_close(lfd
));
6887 unsetenv(LAUNCHD_TRUSTED_FD_ENV
);
6890 // cut off the Libc cache, we don't want to deadlock against ourself
6891 inherited_bootstrap_port
= bootstrap_port
;
6892 bootstrap_port
= MACH_PORT_NULL
;
6893 os_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port
, MACH_NOTIFY_DEAD_NAME
));
6895 // We set this explicitly as we start each child
6896 os_assert_zero(launchd_set_bport(MACH_PORT_NULL
));
6897 } else if (jobmgr_assumes_zero(jmr
, launchd_mport_create_recv(&jmr
->jm_port
)) != KERN_SUCCESS
) {
6902 sprintf(jmr
->name_init
, "%u", MACH_PORT_INDEX(jmr
->jm_port
));
6906 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(SIGTERM
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
));
6907 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(SIGUSR1
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
));
6908 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(SIGUSR2
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
));
6909 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(SIGINFO
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
));
6910 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(0, EVFILT_FS
, EV_ADD
, VQ_MOUNT
|VQ_UNMOUNT
|VQ_UPDATE
, 0, jmr
));
6913 if (name
&& !skip_init
) {
6914 bootstrapper
= jobmgr_init_session(jmr
, name
, sflag
);
6917 if (!bootstrapper
|| !bootstrapper
->weird_bootstrap
) {
6918 if (jobmgr_assumes_zero(jmr
, runtime_add_mport(jmr
->jm_port
, job_server
)) != KERN_SUCCESS
) {
6923 jobmgr_log(jmr
, LOG_DEBUG
, "Created job manager%s%s", jm
? " with parent: " : ".", jm
? jm
->name
: "");
6926 bootstrapper
->asport
= asport
;
6928 jobmgr_log(jmr
, LOG_DEBUG
, "Bootstrapping new job manager with audit session %u", asport
);
6929 (void)jobmgr_assumes(jmr
, job_dispatch(bootstrapper
, true) != NULL
);
6931 jmr
->req_asport
= asport
;
6934 if (asport
!= MACH_PORT_NULL
) {
6935 (void)jobmgr_assumes_zero(jmr
, launchd_mport_copy_send(asport
));
6938 if (jmr
->parentmgr
) {
6939 runtime_add_weak_ref();
6955 jobmgr_new_xpc_singleton_domain(jobmgr_t jm
, name_t name
)
6957 jobmgr_t
new = NULL
;
6959 /* These job managers are basically singletons, so we use the root Mach
6960 * bootstrap port as their requestor ports so they'll never go away.
6962 mach_port_t req_port
= root_jobmgr
->jm_port
;
6963 if (jobmgr_assumes_zero(jm
, launchd_mport_make_send(req_port
)) == KERN_SUCCESS
) {
6964 new = jobmgr_new(root_jobmgr
, req_port
, MACH_PORT_NULL
, false, name
, true, MACH_PORT_NULL
);
6966 new->properties
|= BOOTSTRAP_PROPERTY_XPC_SINGLETON
;
6967 new->properties
|= BOOTSTRAP_PROPERTY_XPC_DOMAIN
;
6968 new->xpc_singleton
= true;
6976 jobmgr_find_xpc_per_user_domain(jobmgr_t jm
, uid_t uid
)
6978 jobmgr_t jmi
= NULL
;
6979 LIST_FOREACH(jmi
, &_s_xpc_user_domains
, xpc_le
) {
6980 if (jmi
->req_euid
== uid
) {
6986 (void)snprintf(name
, sizeof(name
), "com.apple.xpc.domain.peruser.%u", uid
);
6987 jmi
= jobmgr_new_xpc_singleton_domain(jm
, name
);
6988 if (jobmgr_assumes(jm
, jmi
!= NULL
)) {
6989 /* We need to create a per-user launchd for this UID if there isn't one
6990 * already so we can grab the bootstrap port.
6992 job_t puj
= jobmgr_lookup_per_user_context_internal(NULL
, uid
, &jmi
->req_bsport
);
6993 if (jobmgr_assumes(jmi
, puj
!= NULL
)) {
6994 (void)jobmgr_assumes_zero(jmi
, launchd_mport_copy_send(puj
->asport
));
6995 (void)jobmgr_assumes_zero(jmi
, launchd_mport_copy_send(jmi
->req_bsport
));
6996 jmi
->shortdesc
= "per-user";
6997 jmi
->req_asport
= puj
->asport
;
6998 jmi
->req_asid
= puj
->asid
;
6999 jmi
->req_euid
= uid
;
7002 LIST_INSERT_HEAD(&_s_xpc_user_domains
, jmi
, xpc_le
);
7012 jobmgr_find_xpc_per_session_domain(jobmgr_t jm
, au_asid_t asid
)
7014 jobmgr_t jmi
= NULL
;
7015 LIST_FOREACH(jmi
, &_s_xpc_session_domains
, xpc_le
) {
7016 if (jmi
->req_asid
== asid
) {
7022 (void)snprintf(name
, sizeof(name
), "com.apple.xpc.domain.persession.%i", asid
);
7023 jmi
= jobmgr_new_xpc_singleton_domain(jm
, name
);
7024 if (jobmgr_assumes(jm
, jmi
!= NULL
)) {
7025 (void)jobmgr_assumes_zero(jmi
, launchd_mport_make_send(root_jobmgr
->jm_port
));
7026 jmi
->shortdesc
= "per-session";
7027 jmi
->req_bsport
= root_jobmgr
->jm_port
;
7028 (void)jobmgr_assumes_zero(jmi
, audit_session_port(asid
, &jmi
->req_asport
));
7029 jmi
->req_asid
= asid
;
7033 LIST_INSERT_HEAD(&_s_xpc_session_domains
, jmi
, xpc_le
);
7042 jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
)
7044 const char *bootstrap_tool
[] = { "/bin/launchctl", "bootstrap", "-S", session_type
, sflag
? "-s" : NULL
, NULL
};
7045 char thelabel
[1000];
7048 snprintf(thelabel
, sizeof(thelabel
), "com.apple.launchctl.%s", session_type
);
7049 bootstrapper
= job_new(jm
, thelabel
, NULL
, bootstrap_tool
);
7051 if (jobmgr_assumes(jm
, bootstrapper
!= NULL
) && (jm
->parentmgr
|| !pid1_magic
)) {
7052 bootstrapper
->is_bootstrapper
= true;
7055 // <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
7056 snprintf(buf
, sizeof(buf
), "0x%X:0:0", getuid());
7057 envitem_new(bootstrapper
, "__CF_USER_TEXT_ENCODING", buf
, false);
7058 bootstrapper
->weird_bootstrap
= true;
7059 (void)jobmgr_assumes(jm
, job_setup_machport(bootstrapper
));
7060 } else if (bootstrapper
&& strncmp(session_type
, VPROCMGR_SESSION_SYSTEM
, sizeof(VPROCMGR_SESSION_SYSTEM
)) == 0) {
7061 #if TARGET_OS_EMBEDDED
7062 bootstrapper
->psproctype
= POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE
;
7064 bootstrapper
->is_bootstrapper
= true;
7065 if (jobmgr_assumes(jm
, pid1_magic
)) {
7066 // Have our system bootstrapper print out to the console.
7067 bootstrapper
->stdoutpath
= strdup(_PATH_CONSOLE
);
7068 bootstrapper
->stderrpath
= strdup(_PATH_CONSOLE
);
7070 if (launchd_console
) {
7071 (void)jobmgr_assumes_zero_p(jm
, kevent_mod((uintptr_t)fileno(launchd_console
), EVFILT_VNODE
, EV_ADD
| EV_ONESHOT
, NOTE_REVOKE
, 0, jm
));
7076 jm
->session_initialized
= true;
7077 return bootstrapper
;
7081 jobmgr_delete_anything_with_port(jobmgr_t jm
, mach_port_t port
)
7083 struct machservice
*ms
, *next_ms
;
7086 /* Mach ports, unlike Unix descriptors, are reference counted. In other
7087 * words, when some program hands us a second or subsequent send right to a
7088 * port we already have open, the Mach kernel gives us the same port number
7089 * back and increments an reference count associated with the port. This
7090 * This forces us, when discovering that a receive right at the other end
7091 * has been deleted, to wander all of our objects to see what weird places
7092 * clients might have handed us the same send right to use.
7095 if (jm
== root_jobmgr
) {
7096 if (port
== inherited_bootstrap_port
) {
7097 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(port
));
7098 inherited_bootstrap_port
= MACH_PORT_NULL
;
7100 return jobmgr_shutdown(jm
);
7103 LIST_FOREACH_SAFE(ms
, &port_hash
[HASH_PORT(port
)], port_hash_sle
, next_ms
) {
7104 if (ms
->port
== port
&& !ms
->recv
) {
7105 machservice_delete(ms
->job
, ms
, true);
7110 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
7111 jobmgr_delete_anything_with_port(jmi
, port
);
7114 if (jm
->req_port
== port
) {
7115 jobmgr_log(jm
, LOG_DEBUG
, "Request port died: %i", MACH_PORT_INDEX(port
));
7116 return jobmgr_shutdown(jm
);
7119 struct waiting4attach
*w4ai
= NULL
;
7120 struct waiting4attach
*w4ait
= NULL
;
7121 LIST_FOREACH_SAFE(w4ai
, &jm
->attaches
, le
, w4ait
) {
7122 if (port
== w4ai
->port
) {
7123 waiting4attach_delete(jm
, w4ai
);
7131 struct machservice
*
7132 jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
)
7134 struct machservice
*ms
;
7137 jobmgr_log(jm
, LOG_DEBUG
, "Looking up %sservice %s", target_pid
? "per-PID " : "", name
);
7140 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
7141 * bootstrap in other bootstraps.
7144 // Start in the given bootstrap.
7145 if (unlikely((target_j
= jobmgr_find_by_pid(jm
, target_pid
, false)) == NULL
)) {
7146 // If we fail, do a deep traversal.
7147 if (unlikely((target_j
= jobmgr_find_by_pid_deep(root_jobmgr
, target_pid
, true)) == NULL
)) {
7148 jobmgr_log(jm
, LOG_DEBUG
, "Didn't find PID %i", target_pid
);
7153 SLIST_FOREACH(ms
, &target_j
->machservices
, sle
) {
7154 if (ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
7159 job_log(target_j
, LOG_DEBUG
, "Didn't find per-PID Mach service: %s", name
);
7163 jobmgr_t where2look
= jm
;
7164 // XPC domains are separate from Mach bootstraps.
7165 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
7166 if (launchd_flat_mach_namespace
&& !(jm
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
)) {
7167 where2look
= root_jobmgr
;
7171 LIST_FOREACH(ms
, &where2look
->ms_hash
[hash_ms(name
)], name_hash_sle
) {
7172 if (!ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
7177 if (jm
->parentmgr
== NULL
|| !check_parent
) {
7181 return jobmgr_lookup_service(jm
->parentmgr
, name
, true, 0);
7185 machservice_port(struct machservice
*ms
)
7191 machservice_job(struct machservice
*ms
)
7197 machservice_hidden(struct machservice
*ms
)
7203 machservice_active(struct machservice
*ms
)
7205 return ms
->isActive
;
7209 machservice_name(struct machservice
*ms
)
7215 machservice_drain_port(struct machservice
*ms
)
7217 bool drain_one
= ms
->drain_one_on_crash
;
7218 bool drain_all
= ms
->drain_all_on_crash
;
7220 if (!job_assumes(ms
->job
, (drain_one
|| drain_all
) == true)) {
7224 job_log(ms
->job
, LOG_INFO
, "Draining %s...", ms
->name
);
7226 char req_buff
[sizeof(union __RequestUnion__catch_mach_exc_subsystem
) * 2];
7227 char rep_buff
[sizeof(union __ReplyUnion__catch_mach_exc_subsystem
)];
7228 mig_reply_error_t
*req_hdr
= (mig_reply_error_t
*)&req_buff
;
7229 mig_reply_error_t
*rep_hdr
= (mig_reply_error_t
*)&rep_buff
;
7231 mach_msg_return_t mr
= ~MACH_MSG_SUCCESS
;
7234 /* This should be a direct check on the Mach service to see if it's an exception-handling
7235 * port, and it will break things if ReportCrash or SafetyNet start advertising other
7236 * Mach services. But for now, it should be okay.
7238 if (ms
->job
->alt_exc_handler
|| ms
->job
->internal_exc_handler
) {
7239 mr
= launchd_exc_runtime_once(ms
->port
, sizeof(req_buff
), sizeof(rep_buff
), req_hdr
, rep_hdr
, 0);
7241 mach_msg_options_t options
= MACH_RCV_MSG
|
7244 mr
= mach_msg((mach_msg_header_t
*)req_hdr
, options
, 0, sizeof(req_buff
), ms
->port
, 0, MACH_PORT_NULL
);
7246 case MACH_MSG_SUCCESS
:
7247 mach_msg_destroy((mach_msg_header_t
*)req_hdr
);
7249 case MACH_RCV_TIMED_OUT
:
7251 case MACH_RCV_TOO_LARGE
:
7252 launchd_syslog(LOG_WARNING
, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff
));
7258 } while (drain_all
&& mr
!= MACH_RCV_TIMED_OUT
);
7262 machservice_delete(job_t j
, struct machservice
*ms
, bool port_died
)
7265 /* HACK: Egregious code duplication. But dealing with aliases is a
7266 * pretty simple affair since they can't and shouldn't have any complex
7267 * behaviors associated with them.
7269 LIST_REMOVE(ms
, name_hash_sle
);
7270 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
7275 if (unlikely(ms
->debug_on_close
)) {
7276 job_log(j
, LOG_NOTICE
, "About to enter kernel debugger because of Mach port: 0x%x", ms
->port
);
7277 (void)job_assumes_zero(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
));
7280 if (ms
->recv
&& job_assumes(j
, !machservice_active(ms
))) {
7281 job_log(j
, LOG_DEBUG
, "Closing receive right for %s", ms
->name
);
7282 (void)job_assumes_zero(j
, launchd_mport_close_recv(ms
->port
));
7285 (void)job_assumes_zero(j
, launchd_mport_deallocate(ms
->port
));
7287 if (unlikely(ms
->port
== the_exception_server
)) {
7288 the_exception_server
= 0;
7291 job_log(j
, LOG_DEBUG
, "Mach service deleted%s: %s", port_died
? " (port died)" : "", ms
->name
);
7293 if (ms
->special_port_num
) {
7294 SLIST_REMOVE(&special_ports
, ms
, machservice
, special_port_sle
);
7296 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
7298 if (!(j
->dedicated_instance
|| ms
->event_channel
)) {
7299 LIST_REMOVE(ms
, name_hash_sle
);
7301 LIST_REMOVE(ms
, port_hash_sle
);
7307 machservice_request_notifications(struct machservice
*ms
)
7309 mach_msg_id_t which
= MACH_NOTIFY_DEAD_NAME
;
7311 ms
->isActive
= true;
7314 which
= MACH_NOTIFY_PORT_DESTROYED
;
7315 job_checkin(ms
->job
);
7318 (void)job_assumes_zero(ms
->job
, launchd_mport_notify_req(ms
->port
, which
));
7321 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
7322 #define END_OF(x) (&(x)[NELEM(x)])
7325 mach_cmd2argv(const char *string
)
7327 char *argv
[100], args
[1000];
7329 char *argp
= args
, term
, **argv_ret
, *co
;
7330 unsigned int nargs
= 0, i
;
7332 for (cp
= string
; *cp
;) {
7333 while (isspace(*cp
))
7335 term
= (*cp
== '"') ? *cp
++ : '\0';
7336 if (nargs
< NELEM(argv
)) {
7337 argv
[nargs
++] = argp
;
7339 while (*cp
&& (term
? *cp
!= term
: !isspace(*cp
)) && argp
< END_OF(args
)) {
7356 argv_ret
= malloc((nargs
+ 1) * sizeof(char *) + strlen(string
) + 1);
7359 (void)os_assumes_zero(errno
);
7363 co
= (char *)argv_ret
+ (nargs
+ 1) * sizeof(char *);
7365 for (i
= 0; i
< nargs
; i
++) {
7366 strcpy(co
, argv
[i
]);
7368 co
+= strlen(argv
[i
]) + 1;
7376 job_checkin(job_t j
)
7378 j
->checkedin
= true;
7381 bool job_is_god(job_t j
)
7383 return j
->embedded_god
;
7387 job_ack_port_destruction(mach_port_t p
)
7389 struct machservice
*ms
;
7392 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
7393 if (ms
->recv
&& (ms
->port
== p
)) {
7399 launchd_syslog(LOG_WARNING
, "Could not find MachService to match receive right: 0x%x", p
);
7405 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "Receive right returned to us: %s", ms
->name
);
7407 /* Without being the exception handler, NOTE_EXIT is our only way to tell if
7408 * the job crashed, and we can't rely on NOTE_EXIT always being processed
7409 * after all the job's receive rights have been returned.
7411 * So when we get receive rights back, check to see if the job has been
7412 * reaped yet. If not, then we add this service to a list of services to be
7413 * drained on crash if it's requested that behavior. So, for a job with N
7414 * receive rights all requesting that they be drained on crash, we can
7415 * safely handle the following sequence of events.
7417 * ReceiveRight0Returned
7418 * ReceiveRight1Returned
7419 * ReceiveRight2Returned
7420 * NOTE_EXIT (reap, get exit status)
7421 * ReceiveRight3Returned
7425 * ReceiveRight(N - 1)Returned
7427 if (ms
->drain_one_on_crash
|| ms
->drain_all_on_crash
) {
7428 if (j
->crashed
&& j
->reaped
) {
7429 job_log(j
, LOG_DEBUG
, "Job has crashed. Draining port...");
7430 machservice_drain_port(ms
);
7431 } else if (!(j
->crashed
|| j
->reaped
)) {
7432 job_log(j
, LOG_DEBUG
, "Job's exit status is still unknown. Deferring drain.");
7436 ms
->isActive
= false;
7437 if (ms
->delete_on_destruction
) {
7438 machservice_delete(j
, ms
, false);
7439 } else if (ms
->reset
) {
7440 machservice_resetport(j
, ms
);
7443 kern_return_t kr
= mach_port_set_attributes(mach_task_self(), ms
->port
, MACH_PORT_TEMPOWNER
, NULL
, 0);
7444 (void)job_assumes_zero(j
, kr
);
7445 machservice_stamp_port(j
, ms
);
7446 job_dispatch(j
, false);
7448 if (ms
->recv_race_hack
) {
7449 ms
->recv_race_hack
= false;
7450 machservice_watch(ms
->job
, ms
);
7453 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
7459 job_ack_no_senders(job_t j
)
7461 j
->priv_port_has_senders
= false;
7463 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
7466 job_log(j
, LOG_DEBUG
, "No more senders on privileged Mach bootstrap port");
7468 job_dispatch(j
, false);
7472 semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
)
7474 struct semaphoreitem
*si
;
7475 size_t alloc_sz
= sizeof(struct semaphoreitem
);
7478 alloc_sz
+= strlen(what
) + 1;
7481 if (job_assumes(j
, si
= calloc(1, alloc_sz
)) == NULL
) {
7488 strcpy(si
->what_init
, what
);
7491 SLIST_INSERT_HEAD(&j
->semaphores
, si
, sle
);
7493 if ((why
== OTHER_JOB_ENABLED
|| why
== OTHER_JOB_DISABLED
) && !j
->nosy
) {
7494 job_log(j
, LOG_DEBUG
, "Job is interested in \"%s\".", what
);
7495 SLIST_INSERT_HEAD(&s_curious_jobs
, j
, curious_jobs_sle
);
7499 semaphoreitem_runtime_mod_ref(si
, true);
7505 semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
)
7508 * External events need to be tracked.
7509 * Internal events do NOT need to be tracked.
7513 case SUCCESSFUL_EXIT
:
7515 case OTHER_JOB_ENABLED
:
7516 case OTHER_JOB_DISABLED
:
7517 case OTHER_JOB_ACTIVE
:
7518 case OTHER_JOB_INACTIVE
:
7525 runtime_add_weak_ref();
7527 runtime_del_weak_ref();
7532 semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
)
7534 semaphoreitem_runtime_mod_ref(si
, false);
7536 SLIST_REMOVE(&j
->semaphores
, si
, semaphoreitem
, sle
);
7538 // We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
7539 if ((si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
) && j
->nosy
) {
7541 SLIST_REMOVE(&s_curious_jobs
, j
, job_s
, curious_jobs_sle
);
7548 semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
)
7550 struct semaphoreitem_dict_iter_context
*sdic
= context
;
7551 semaphore_reason_t why
;
7553 why
= launch_data_get_bool(obj
) ? sdic
->why_true
: sdic
->why_false
;
7555 semaphoreitem_new(sdic
->j
, why
, key
);
7559 semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
)
7561 struct semaphoreitem_dict_iter_context sdic
= { context
, 0, 0 };
7563 semaphore_reason_t why
;
7565 switch (launch_data_get_type(obj
)) {
7566 case LAUNCH_DATA_BOOL
:
7567 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE
) == 0) {
7568 why
= launch_data_get_bool(obj
) ? NETWORK_UP
: NETWORK_DOWN
;
7569 semaphoreitem_new(j
, why
, NULL
);
7570 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT
) == 0) {
7571 why
= launch_data_get_bool(obj
) ? SUCCESSFUL_EXIT
: FAILED_EXIT
;
7572 semaphoreitem_new(j
, why
, NULL
);
7573 j
->start_pending
= true;
7574 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND
) == 0) {
7575 j
->needs_kickoff
= launch_data_get_bool(obj
);
7576 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_CRASHED
) == 0) {
7577 why
= launch_data_get_bool(obj
) ? CRASHED
: DID_NOT_CRASH
;
7578 semaphoreitem_new(j
, why
, NULL
);
7579 j
->start_pending
= true;
7581 job_log(j
, LOG_ERR
, "Unrecognized KeepAlive attribute: %s", key
);
7584 case LAUNCH_DATA_DICTIONARY
:
7585 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE
) == 0) {
7586 sdic
.why_true
= OTHER_JOB_ACTIVE
;
7587 sdic
.why_false
= OTHER_JOB_INACTIVE
;
7588 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED
) == 0) {
7589 sdic
.why_true
= OTHER_JOB_ENABLED
;
7590 sdic
.why_false
= OTHER_JOB_DISABLED
;
7592 job_log(j
, LOG_ERR
, "Unrecognized KeepAlive attribute: %s", key
);
7596 launch_data_dict_iterate(obj
, semaphoreitem_setup_dict_iter
, &sdic
);
7599 job_log(j
, LOG_ERR
, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj
));
7605 externalevent_new(job_t j
, struct eventsystem
*sys
, const char *evname
, xpc_object_t event
, uint64_t flags
)
7607 if (j
->event_monitor
) {
7608 job_log(j
, LOG_ERR
, "The event monitor job cannot use LaunchEvents or XPC Events.");
7612 struct externalevent
*ee
= (struct externalevent
*)calloc(1, sizeof(struct externalevent
) + strlen(evname
) + 1);
7617 ee
->event
= xpc_retain(event
);
7618 (void)strcpy(ee
->name
, evname
);
7620 ee
->id
= sys
->curid
;
7623 ee
->wanted_state
= true;
7626 if (flags
& XPC_EVENT_FLAG_ENTITLEMENTS
) {
7627 struct ldcred
*ldc
= runtime_get_caller_creds();
7629 ee
->entitlements
= xpc_copy_entitlements_for_pid(ldc
->pid
);
7633 if (sys
== _launchd_support_system
) {
7634 ee
->internal
= true;
7637 LIST_INSERT_HEAD(&j
->events
, ee
, job_le
);
7638 LIST_INSERT_HEAD(&sys
->events
, ee
, sys_le
);
7640 job_log(j
, LOG_DEBUG
, "New event: %s/%s", sys
->name
, evname
);
7647 externalevent_delete(struct externalevent
*ee
)
7649 xpc_release(ee
->event
);
7650 if (ee
->entitlements
) {
7651 xpc_release(ee
->entitlements
);
7653 LIST_REMOVE(ee
, job_le
);
7654 LIST_REMOVE(ee
, sys_le
);
7662 externalevent_setup(launch_data_t obj
, const char *key
, void *context
)
7664 /* This method can ONLY be called on the job_import() path, as it assumes
7665 * the input is a launch_data_t.
7667 struct externalevent_iter_ctx
*ctx
= (struct externalevent_iter_ctx
*)context
;
7669 xpc_object_t xobj
= ld2xpc(obj
);
7671 job_log(ctx
->j
, LOG_DEBUG
, "Importing stream/event: %s/%s", ctx
->sys
->name
, key
);
7672 externalevent_new(ctx
->j
, ctx
->sys
, key
, xobj
, 0);
7675 job_log(ctx
->j
, LOG_ERR
, "Could not import event for job: %s", key
);
7679 struct externalevent
*
7680 externalevent_find(const char *sysname
, uint64_t id
)
7682 struct externalevent
*ei
= NULL
;
7684 struct eventsystem
*es
= eventsystem_find(sysname
);
7686 LIST_FOREACH(ei
, &es
->events
, sys_le
) {
7692 launchd_syslog(LOG_ERR
, "Could not find event system: %s", sysname
);
7698 struct eventsystem
*
7699 eventsystem_new(const char *name
)
7701 struct eventsystem
*es
= (struct eventsystem
*)calloc(1, sizeof(struct eventsystem
) + strlen(name
) + 1);
7704 (void)strcpy(es
->name
, name
);
7705 LIST_INSERT_HEAD(&_s_event_systems
, es
, global_le
);
7707 (void)os_assumes_zero(errno
);
7714 eventsystem_delete(struct eventsystem
*es
)
7716 struct externalevent
*ei
= NULL
;
7717 while ((ei
= LIST_FIRST(&es
->events
))) {
7718 externalevent_delete(ei
);
7721 LIST_REMOVE(es
, global_le
);
7727 eventsystem_setup(launch_data_t obj
, const char *key
, void *context
)
7729 job_t j
= (job_t
)context
;
7730 if (!job_assumes(j
, launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
)) {
7734 struct eventsystem
*sys
= eventsystem_find(key
);
7735 if (unlikely(sys
== NULL
)) {
7736 sys
= eventsystem_new(key
);
7737 job_log(j
, LOG_DEBUG
, "New event system: %s", key
);
7740 if (job_assumes(j
, sys
!= NULL
)) {
7741 struct externalevent_iter_ctx ctx
= {
7746 job_log(j
, LOG_DEBUG
, "Importing events for stream: %s", key
);
7747 launch_data_dict_iterate(obj
, externalevent_setup
, &ctx
);
7751 struct eventsystem
*
7752 eventsystem_find(const char *name
)
7754 struct eventsystem
*esi
= NULL
;
7755 LIST_FOREACH(esi
, &_s_event_systems
, global_le
) {
7756 if (strcmp(name
, esi
->name
) == 0) {
7765 eventsystem_ping(void)
7767 if (!_launchd_event_monitor
) {
7771 if (!_launchd_event_monitor
->p
) {
7772 (void)job_dispatch(_launchd_event_monitor
, true);
7774 if (_launchd_event_monitor
->event_monitor_ready2signal
) {
7775 (void)job_assumes_zero_p(_launchd_event_monitor
, kill(_launchd_event_monitor
->p
, SIGUSR1
));
7781 jobmgr_dispatch_all_semaphores(jobmgr_t jm
)
7787 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
7788 jobmgr_dispatch_all_semaphores(jmi
);
7791 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
7792 if (!SLIST_EMPTY(&ji
->semaphores
)) {
7793 job_dispatch(ji
, false);
7799 cronemu(int mon
, int mday
, int hour
, int min
)
7801 struct tm workingtm
;
7805 workingtm
= *localtime(&now
);
7807 workingtm
.tm_isdst
= -1;
7808 workingtm
.tm_sec
= 0;
7811 while (!cronemu_mon(&workingtm
, mon
, mday
, hour
, min
)) {
7812 workingtm
.tm_year
++;
7813 workingtm
.tm_mon
= 0;
7814 workingtm
.tm_mday
= 1;
7815 workingtm
.tm_hour
= 0;
7816 workingtm
.tm_min
= 0;
7820 return mktime(&workingtm
);
7824 cronemu_wday(int wday
, int hour
, int min
)
7826 struct tm workingtm
;
7830 workingtm
= *localtime(&now
);
7832 workingtm
.tm_isdst
= -1;
7833 workingtm
.tm_sec
= 0;
7840 while (!(workingtm
.tm_wday
== wday
&& cronemu_hour(&workingtm
, hour
, min
))) {
7841 workingtm
.tm_mday
++;
7842 workingtm
.tm_hour
= 0;
7843 workingtm
.tm_min
= 0;
7847 return mktime(&workingtm
);
7851 cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
)
7854 struct tm workingtm
= *wtm
;
7857 while (!cronemu_mday(&workingtm
, mday
, hour
, min
)) {
7859 workingtm
.tm_mday
= 1;
7860 workingtm
.tm_hour
= 0;
7861 workingtm
.tm_min
= 0;
7862 carrytest
= workingtm
.tm_mon
;
7864 if (carrytest
!= workingtm
.tm_mon
) {
7872 if (mon
< wtm
->tm_mon
) {
7876 if (mon
> wtm
->tm_mon
) {
7883 return cronemu_mday(wtm
, mday
, hour
, min
);
7887 cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
)
7890 struct tm workingtm
= *wtm
;
7893 while (!cronemu_hour(&workingtm
, hour
, min
)) {
7894 workingtm
.tm_mday
++;
7895 workingtm
.tm_hour
= 0;
7896 workingtm
.tm_min
= 0;
7897 carrytest
= workingtm
.tm_mday
;
7899 if (carrytest
!= workingtm
.tm_mday
) {
7907 if (mday
< wtm
->tm_mday
) {
7911 if (mday
> wtm
->tm_mday
) {
7912 wtm
->tm_mday
= mday
;
7917 return cronemu_hour(wtm
, hour
, min
);
7921 cronemu_hour(struct tm
*wtm
, int hour
, int min
)
7924 struct tm workingtm
= *wtm
;
7927 while (!cronemu_min(&workingtm
, min
)) {
7928 workingtm
.tm_hour
++;
7929 workingtm
.tm_min
= 0;
7930 carrytest
= workingtm
.tm_hour
;
7932 if (carrytest
!= workingtm
.tm_hour
) {
7940 if (hour
< wtm
->tm_hour
) {
7944 if (hour
> wtm
->tm_hour
) {
7945 wtm
->tm_hour
= hour
;
7949 return cronemu_min(wtm
, min
);
7953 cronemu_min(struct tm
*wtm
, int min
)
7959 if (min
< wtm
->tm_min
) {
7963 if (min
> wtm
->tm_min
) {
7971 job_mig_create_server(job_t j
, cmd_t server_cmd
, uid_t server_uid
, boolean_t on_demand
, mach_port_t
*server_portp
)
7973 struct ldcred
*ldc
= runtime_get_caller_creds();
7977 return BOOTSTRAP_NO_MEMORY
;
7980 if (unlikely(j
->deny_job_creation
)) {
7981 return BOOTSTRAP_NOT_PRIVILEGED
;
7985 const char **argv
= (const char **)mach_cmd2argv(server_cmd
);
7986 if (unlikely(argv
== NULL
)) {
7987 return BOOTSTRAP_NO_MEMORY
;
7989 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_PATH
, argv
[0]) > 0)) {
7991 return BOOTSTRAP_NOT_PRIVILEGED
;
7996 job_log(j
, LOG_DEBUG
, "Server create attempt: %s", server_cmd
);
7999 if (ldc
->euid
|| ldc
->uid
) {
8000 job_log(j
, LOG_WARNING
, "Server create attempt moved to per-user launchd: %s", server_cmd
);
8001 return VPROC_ERR_TRY_PER_USER
;
8004 if (unlikely(server_uid
!= getuid())) {
8005 job_log(j
, LOG_WARNING
, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
8006 server_cmd
, getuid(), server_uid
);
8008 server_uid
= 0; // zero means "do nothing"
8011 js
= job_new_via_mach_init(j
, server_cmd
, server_uid
, on_demand
);
8013 if (unlikely(js
== NULL
)) {
8014 return BOOTSTRAP_NO_MEMORY
;
8017 *server_portp
= js
->j_port
;
8018 return BOOTSTRAP_SUCCESS
;
8022 job_mig_send_signal(job_t j
, mach_port_t srp
, name_t targetlabel
, int sig
)
8024 struct ldcred
*ldc
= runtime_get_caller_creds();
8028 return BOOTSTRAP_NO_MEMORY
;
8031 if (unlikely(ldc
->euid
!= 0 && ldc
->euid
!= getuid()) || j
->deny_job_creation
) {
8032 #if TARGET_OS_EMBEDDED
8033 if (!j
->embedded_god
) {
8034 return BOOTSTRAP_NOT_PRIVILEGED
;
8037 return BOOTSTRAP_NOT_PRIVILEGED
;
8042 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8043 return BOOTSTRAP_NOT_PRIVILEGED
;
8047 if (unlikely(!(otherj
= job_find(NULL
, targetlabel
)))) {
8048 return BOOTSTRAP_UNKNOWN_SERVICE
;
8051 #if TARGET_OS_EMBEDDED
8052 if (j
->embedded_god
) {
8053 if (j
->username
&& otherj
->username
) {
8054 if (strcmp(j
->username
, otherj
->username
) != 0) {
8055 return BOOTSTRAP_NOT_PRIVILEGED
;
8058 return BOOTSTRAP_NOT_PRIVILEGED
;
8063 if (sig
== VPROC_MAGIC_UNLOAD_SIGNAL
) {
8064 bool do_block
= otherj
->p
;
8066 if (otherj
->anonymous
) {
8067 return BOOTSTRAP_NOT_PRIVILEGED
;
8073 job_log(j
, LOG_DEBUG
, "Blocking MIG return of job_remove(): %s", otherj
->label
);
8074 // this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
8075 (void)job_assumes(otherj
, waiting4removal_new(otherj
, srp
));
8076 return MIG_NO_REPLY
;
8080 } else if (otherj
->p
) {
8081 (void)job_assumes_zero_p(j
, kill2(otherj
->p
, sig
));
8088 job_mig_log_forward(job_t j
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
8090 struct ldcred
*ldc
= runtime_get_caller_creds();
8093 return BOOTSTRAP_NO_MEMORY
;
8096 if (!job_assumes(j
, j
->per_user
)) {
8097 return BOOTSTRAP_NOT_PRIVILEGED
;
8100 return launchd_log_forward(ldc
->euid
, ldc
->egid
, inval
, invalCnt
);
8104 job_mig_log_drain(job_t j
, mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
8106 struct ldcred
*ldc
= runtime_get_caller_creds();
8109 return BOOTSTRAP_NO_MEMORY
;
8112 if (unlikely(ldc
->euid
)) {
8113 return BOOTSTRAP_NOT_PRIVILEGED
;
8116 return launchd_log_drain(srp
, outval
, outvalCnt
);
8120 job_mig_swap_complex(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
,
8121 vm_offset_t inval
, mach_msg_type_number_t invalCnt
, vm_offset_t
*outval
,
8122 mach_msg_type_number_t
*outvalCnt
)
8125 launch_data_t input_obj
= NULL
, output_obj
= NULL
;
8126 size_t data_offset
= 0;
8128 struct ldcred
*ldc
= runtime_get_caller_creds();
8131 return BOOTSTRAP_NO_MEMORY
;
8134 if (inkey
&& ldc
->pid
!= j
->p
) {
8135 if (ldc
->euid
&& ldc
->euid
!= getuid()) {
8136 return BOOTSTRAP_NOT_PRIVILEGED
;
8140 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
8144 if (inkey
&& outkey
) {
8145 action
= "Swapping";
8152 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
8154 *outvalCnt
= 20 * 1024 * 1024;
8155 mig_allocate(outval
, *outvalCnt
);
8156 if (!job_assumes(j
, *outval
!= 0)) {
8160 /* Note to future maintainers: launch_data_unpack() does NOT return a heap
8161 * object. The data is decoded in-place. So do not call launch_data_free()
8164 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
8165 if (unlikely(invalCnt
&& !job_assumes(j
, (input_obj
= launch_data_unpack((void *)inval
, invalCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
))) {
8171 case VPROC_GSK_ENVIRONMENT
:
8172 if (!job_assumes(j
, (output_obj
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
8175 jobmgr_export_env_from_other_jobs(j
->mgr
, output_obj
);
8176 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
8177 if (!job_assumes(j
, launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
) != 0)) {
8180 launch_data_free(output_obj
);
8182 case VPROC_GSK_ALLJOBS
:
8183 if (!job_assumes(j
, (output_obj
= job_export_all()) != NULL
)) {
8186 ipc_revoke_fds(output_obj
);
8187 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
8188 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
8189 if (!job_assumes(j
, packed_size
!= 0)) {
8192 launch_data_free(output_obj
);
8194 case VPROC_GSK_MGR_NAME
:
8195 if (!job_assumes(j
, (output_obj
= launch_data_new_string(j
->mgr
->name
)) != NULL
)) {
8198 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
8199 if (!job_assumes(j
, packed_size
!= 0)) {
8203 launch_data_free(output_obj
);
8205 case VPROC_GSK_JOB_OVERRIDES_DB
:
8206 store
= launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB
, "overrides.plist");
8207 if (!store
|| !job_assumes(j
, (output_obj
= launch_data_new_string(store
)) != NULL
)) {
8213 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
8214 if (!job_assumes(j
, packed_size
!= 0)) {
8218 launch_data_free(output_obj
);
8220 case VPROC_GSK_ZERO
:
8221 mig_deallocate(*outval
, *outvalCnt
);
8229 mig_deallocate(inval
, invalCnt
);
8233 mig_deallocate(inval
, invalCnt
);
8235 mig_deallocate(*outval
, *outvalCnt
);
8238 launch_data_free(output_obj
);
8245 job_mig_swap_integer(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
, int64_t inval
, int64_t *outval
)
8248 kern_return_t kr
= 0;
8249 struct ldcred
*ldc
= runtime_get_caller_creds();
8253 return BOOTSTRAP_NO_MEMORY
;
8256 if (inkey
&& ldc
->pid
!= j
->p
) {
8257 if (ldc
->euid
&& ldc
->euid
!= getuid()) {
8258 return BOOTSTRAP_NOT_PRIVILEGED
;
8262 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
8266 if (inkey
&& outkey
) {
8267 action
= "Swapping";
8274 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
8277 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
8278 *outval
= j
->abandon_pg
;
8280 case VPROC_GSK_LAST_EXIT_STATUS
:
8281 *outval
= j
->last_exit_status
;
8283 case VPROC_GSK_MGR_UID
:
8286 case VPROC_GSK_MGR_PID
:
8289 case VPROC_GSK_IS_MANAGED
:
8290 *outval
= j
->anonymous
? 0 : 1;
8292 case VPROC_GSK_BASIC_KEEPALIVE
:
8293 *outval
= !j
->ondemand
;
8295 case VPROC_GSK_START_INTERVAL
:
8296 *outval
= j
->start_interval
;
8298 case VPROC_GSK_IDLE_TIMEOUT
:
8299 *outval
= j
->timeout
;
8301 case VPROC_GSK_EXIT_TIMEOUT
:
8302 *outval
= j
->exit_timeout
;
8304 case VPROC_GSK_GLOBAL_LOG_MASK
:
8305 oldmask
= runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
8307 runtime_setlogmask(oldmask
);
8309 case VPROC_GSK_GLOBAL_UMASK
:
8314 case VPROC_GSK_TRANSACTIONS_ENABLED
:
8315 job_log(j
, LOG_DEBUG
, "Reading EnableTransactions value.");
8316 *outval
= j
->enable_transactions
;
8318 case VPROC_GSK_WAITFORDEBUGGER
:
8319 *outval
= j
->wait4debugger
;
8321 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT
:
8322 *outval
= j
->embedded_god
;
8324 case VPROC_GSK_ZERO
:
8333 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
8334 j
->abandon_pg
= (bool)inval
;
8336 case VPROC_GSK_GLOBAL_ON_DEMAND
:
8337 job_log(j
, LOG_DEBUG
, "Job has set global on-demand mode to: %s", inval
? "true" : "false");
8338 kr
= job_set_global_on_demand(j
, inval
);
8340 case VPROC_GSK_BASIC_KEEPALIVE
:
8341 j
->ondemand
= !inval
;
8343 case VPROC_GSK_START_INTERVAL
:
8344 if (inval
> UINT32_MAX
|| inval
< 0) {
8347 if (j
->start_interval
== 0) {
8348 runtime_add_weak_ref();
8350 j
->start_interval
= (typeof(j
->start_interval
)) inval
;
8351 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
));
8352 } else if (j
->start_interval
) {
8353 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
));
8354 if (j
->start_interval
!= 0) {
8355 runtime_del_weak_ref();
8357 j
->start_interval
= 0;
8360 case VPROC_GSK_IDLE_TIMEOUT
:
8361 if (inval
< 0 || inval
> UINT32_MAX
) {
8364 j
->timeout
= (typeof(j
->timeout
)) inval
;
8367 case VPROC_GSK_EXIT_TIMEOUT
:
8368 if (inval
< 0 || inval
> UINT32_MAX
) {
8371 j
->exit_timeout
= (typeof(j
->exit_timeout
)) inval
;
8374 case VPROC_GSK_GLOBAL_LOG_MASK
:
8375 if (inval
< 0 || inval
> UINT32_MAX
) {
8378 runtime_setlogmask((int) inval
);
8381 case VPROC_GSK_GLOBAL_UMASK
:
8382 __OS_COMPILETIME_ASSERT__(sizeof (mode_t
) == 2);
8383 if (inval
< 0 || inval
> UINT16_MAX
) {
8387 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8390 umask((mode_t
) inval
);
8395 case VPROC_GSK_TRANSACTIONS_ENABLED
:
8398 case VPROC_GSK_WEIRD_BOOTSTRAP
:
8399 if (job_assumes(j
, j
->weird_bootstrap
)) {
8400 job_log(j
, LOG_DEBUG
, "Unsetting weird bootstrap.");
8402 mach_msg_size_t mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_job_subsystem
);
8404 if (job_mig_job_subsystem
.maxsize
> mxmsgsz
) {
8405 mxmsgsz
= job_mig_job_subsystem
.maxsize
;
8408 (void)job_assumes_zero(j
, runtime_add_mport(j
->mgr
->jm_port
, job_server
));
8409 j
->weird_bootstrap
= false;
8412 case VPROC_GSK_WAITFORDEBUGGER
:
8413 j
->wait4debugger_oneshot
= inval
;
8415 case VPROC_GSK_PERUSER_SUSPEND
:
8416 if (job_assumes(j
, pid1_magic
&& ldc
->euid
== 0)) {
8417 mach_port_t junk
= MACH_PORT_NULL
;
8418 job_t jpu
= jobmgr_lookup_per_user_context_internal(j
, (uid_t
)inval
, &junk
);
8419 if (job_assumes(j
, jpu
!= NULL
)) {
8420 struct suspended_peruser
*spi
= NULL
;
8421 LIST_FOREACH(spi
, &j
->suspended_perusers
, sle
) {
8422 if ((int64_t)(spi
->j
->mach_uid
) == inval
) {
8423 job_log(j
, LOG_WARNING
, "Job tried to suspend per-user launchd for UID %lli twice.", inval
);
8429 job_log(j
, LOG_INFO
, "Job is suspending the per-user launchd for UID %lli.", inval
);
8430 spi
= (struct suspended_peruser
*)calloc(sizeof(struct suspended_peruser
), 1);
8431 if (job_assumes(j
, spi
!= NULL
)) {
8432 /* Stop listening for events.
8434 * See <rdar://problem/9014146>.
8436 if (jpu
->peruser_suspend_count
== 0) {
8441 spi
->j
->peruser_suspend_count
++;
8442 LIST_INSERT_HEAD(&j
->suspended_perusers
, spi
, sle
);
8446 kr
= BOOTSTRAP_NO_MEMORY
;
8454 case VPROC_GSK_PERUSER_RESUME
:
8455 if (job_assumes(j
, pid1_magic
== true)) {
8456 struct suspended_peruser
*spi
= NULL
, *spt
= NULL
;
8457 LIST_FOREACH_SAFE(spi
, &j
->suspended_perusers
, sle
, spt
) {
8458 if ((int64_t)(spi
->j
->mach_uid
) == inval
) {
8459 spi
->j
->peruser_suspend_count
--;
8460 LIST_REMOVE(spi
, sle
);
8461 job_log(j
, LOG_INFO
, "Job is resuming the per-user launchd for UID %lli.", inval
);
8466 if (!job_assumes(j
, spi
!= NULL
)) {
8467 job_log(j
, LOG_WARNING
, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval
);
8468 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
8469 } else if (spi
->j
->peruser_suspend_count
== 0) {
8471 job_dispatch(spi
->j
, false);
8478 case VPROC_GSK_ZERO
:
8489 job_mig_post_fork_ping(job_t j
, task_t child_task
, mach_port_t
*asport
)
8492 return BOOTSTRAP_NO_MEMORY
;
8495 job_log(j
, LOG_DEBUG
, "Post fork ping.");
8497 struct machservice
*ms
;
8498 job_setup_exception_port(j
, child_task
);
8499 SLIST_FOREACH(ms
, &special_ports
, special_port_sle
) {
8500 if (j
->per_user
&& (ms
->special_port_num
!= TASK_ACCESS_PORT
)) {
8501 // The TASK_ACCESS_PORT funny business is to workaround 5325399.
8505 errno
= task_set_special_port(child_task
, ms
->special_port_num
, ms
->port
);
8507 if (errno
== MACH_SEND_INVALID_DEST
) {
8508 job_log(j
, LOG_WARNING
, "Task died before special ports could be set.");
8512 int desired_log_level
= LOG_ERR
;
8516 desired_log_level
= LOG_WARNING
;
8518 if (ms
->special_port_num
== TASK_SEATBELT_PORT
) {
8519 desired_log_level
= LOG_DEBUG
;
8523 job_log(j
, desired_log_level
, "Could not setup Mach task special port %u: %s", ms
->special_port_num
, mach_error_string(errno
));
8527 /* MIG will not zero-initialize this pointer, so we must always do so.
8529 * <rdar://problem/8562593>.
8531 *asport
= MACH_PORT_NULL
;
8532 #if !TARGET_OS_EMBEDDED
8533 if (!j
->anonymous
) {
8534 /* XPC services will spawn into the root security session by default.
8535 * xpcproxy will switch them away if needed.
8537 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
8538 job_log(j
, LOG_DEBUG
, "Returning session port: 0x%x", j
->asport
);
8539 *asport
= j
->asport
;
8543 (void)job_assumes_zero(j
, launchd_mport_deallocate(child_task
));
8549 job_mig_get_listener_port_rights(job_t j
, mach_port_array_t
*sports
, mach_msg_type_number_t
*sports_cnt
)
8552 return BOOTSTRAP_NO_MEMORY
;
8556 struct machservice
*msi
= NULL
;
8557 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
8558 if (msi
->upfront
&& job_assumes(j
, msi
->recv
)) {
8564 return BOOTSTRAP_UNKNOWN_SERVICE
;
8567 mach_port_array_t sports2
= NULL
;
8568 mig_allocate((vm_address_t
*)&sports2
, cnt
* sizeof(sports2
[0]));
8570 return BOOTSTRAP_NO_MEMORY
;
8574 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
8575 if (msi
->upfront
&& msi
->recv
) {
8576 sports2
[i
] = msi
->port
;
8584 return KERN_SUCCESS
;
8588 job_mig_register_gui_session(job_t j
, mach_port_t asport
)
8591 return BOOTSTRAP_NOT_PRIVILEGED
;
8594 jobmgr_t jm
= jobmgr_find_xpc_per_user_domain(root_jobmgr
, j
->mach_uid
);
8596 return BOOTSTRAP_UNKNOWN_SERVICE
;
8599 if (jm
->req_gui_asport
) {
8600 // This job manager persists, so we need to allow the per-user launchd
8601 // to update the GUI session as it comes and goes.
8602 jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_gui_asport
));
8605 jm
->req_gui_asport
= asport
;
8606 return KERN_SUCCESS
;
8610 job_mig_reboot2(job_t j
, uint64_t flags
)
8612 char who_started_the_reboot
[2048] = "";
8613 struct proc_bsdshortinfo proc
;
8614 struct ldcred
*ldc
= runtime_get_caller_creds();
8618 return BOOTSTRAP_NO_MEMORY
;
8621 if (unlikely(!pid1_magic
)) {
8622 return BOOTSTRAP_NOT_PRIVILEGED
;
8625 #if !TARGET_OS_EMBEDDED
8626 if (unlikely(ldc
->euid
)) {
8628 if (unlikely(ldc
->euid
) && !j
->embedded_god
) {
8630 return BOOTSTRAP_NOT_PRIVILEGED
;
8633 for (pid_to_log
= ldc
->pid
; pid_to_log
; pid_to_log
= proc
.pbsi_ppid
) {
8635 if (proc_pidinfo(pid_to_log
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
8636 if (errno
!= ESRCH
) {
8637 (void)job_assumes_zero(j
, errno
);
8642 if (!job_assumes(j
, pid_to_log
!= (pid_t
)proc
.pbsi_ppid
)) {
8643 job_log(j
, LOG_WARNING
, "Job which is its own parent started reboot.");
8644 snprintf(who_started_the_reboot
, sizeof(who_started_the_reboot
), "%s[%u]->%s[%u]->%s[%u]->...", proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_comm
, pid_to_log
);
8648 who_offset
= strlen(who_started_the_reboot
);
8649 snprintf(who_started_the_reboot
+ who_offset
, sizeof(who_started_the_reboot
) - who_offset
,
8650 " %s[%u]%s", proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_ppid
? " ->" : "");
8653 root_jobmgr
->reboot_flags
= (int)flags
;
8654 job_log(j
, LOG_DEBUG
, "reboot2() initiated by:%s", who_started_the_reboot
);
8661 job_mig_getsocket(job_t j
, name_t spr
)
8664 return BOOTSTRAP_NO_MEMORY
;
8667 if (j
->deny_job_creation
) {
8668 return BOOTSTRAP_NOT_PRIVILEGED
;
8672 struct ldcred
*ldc
= runtime_get_caller_creds();
8673 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8674 return BOOTSTRAP_NOT_PRIVILEGED
;
8680 if (unlikely(!sockpath
)) {
8681 return BOOTSTRAP_NO_MEMORY
;
8684 strncpy(spr
, sockpath
, sizeof(name_t
));
8686 return BOOTSTRAP_SUCCESS
;
8690 job_mig_log(job_t j
, int pri
, int err
, logmsg_t msg
)
8693 return BOOTSTRAP_NO_MEMORY
;
8696 if ((errno
= err
)) {
8697 job_log_error(j
, pri
, "%s", msg
);
8699 job_log(j
, pri
, "%s", msg
);
8706 job_setup_per_user_directory(job_t j
, uid_t uid
, const char *path
)
8710 bool created
= false;
8711 int r
= stat(path
, &sb
);
8712 if ((r
== -1 && errno
== ENOENT
) || (r
== 0 && !S_ISDIR(sb
.st_mode
))) {
8714 job_log(j
, LOG_NOTICE
, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path
);
8717 snprintf(old
, sizeof(old
), "%s.movedaside", path
);
8718 (void)job_assumes_zero_p(j
, rename(path
, old
));
8721 (void)job_assumes_zero_p(j
, mkdir(path
, S_IRWXU
));
8722 (void)job_assumes_zero_p(j
, chown(path
, uid
, 0));
8727 if (sb
.st_uid
!= uid
) {
8728 job_log(j
, LOG_NOTICE
, "Per-user launchd directory has improper user ownership. Repairing: %s", path
);
8729 (void)job_assumes_zero_p(j
, chown(path
, uid
, 0));
8731 if (sb
.st_gid
!= 0) {
8732 job_log(j
, LOG_NOTICE
, "Per-user launchd directory has improper group ownership. Repairing: %s", path
);
8733 (void)job_assumes_zero_p(j
, chown(path
, uid
, 0));
8735 if (sb
.st_mode
!= (S_IRWXU
| S_IFDIR
)) {
8736 job_log(j
, LOG_NOTICE
, "Per-user launchd directory has improper mode. Repairing: %s", path
);
8737 (void)job_assumes_zero_p(j
, chmod(path
, S_IRWXU
));
8743 job_setup_per_user_directories(job_t j
, uid_t uid
, const char *label
)
8745 char path
[PATH_MAX
];
8747 (void)snprintf(path
, sizeof(path
), LAUNCHD_DB_PREFIX
"/%s", label
);
8748 job_setup_per_user_directory(j
, uid
, path
);
8750 (void)snprintf(path
, sizeof(path
), LAUNCHD_LOG_PREFIX
"/%s", label
);
8751 job_setup_per_user_directory(j
, uid
, path
);
8755 jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, mach_port_t
*mp
)
8758 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
8759 if (!ji
->per_user
) {
8762 if (ji
->mach_uid
!= which_user
) {
8765 if (SLIST_EMPTY(&ji
->machservices
)) {
8768 if (!SLIST_FIRST(&ji
->machservices
)->per_user_hack
) {
8774 if (unlikely(ji
== NULL
)) {
8775 struct machservice
*ms
;
8778 job_log(j
, LOG_DEBUG
, "Creating per user launchd job for UID: %u", which_user
);
8780 sprintf(lbuf
, "com.apple.launchd.peruser.%u", which_user
);
8782 ji
= job_new(root_jobmgr
, lbuf
, "/sbin/launchd", NULL
);
8785 auditinfo_addr_t auinfo
= {
8789 .ai_auid
= which_user
,
8790 .ai_asid
= AU_ASSIGN_ASID
,
8793 if (setaudit_addr(&auinfo
, sizeof(auinfo
)) == 0) {
8794 job_log(ji
, LOG_DEBUG
, "Created new security session for per-user launchd: %u", auinfo
.ai_asid
);
8795 (void)job_assumes(ji
, (ji
->asport
= audit_session_self()) != MACH_PORT_NULL
);
8797 /* Kinda lame that we have to do this, but we can't create an
8798 * audit session without joining it.
8800 (void)job_assumes(ji
, audit_session_join(launchd_audit_port
));
8801 ji
->asid
= auinfo
.ai_asid
;
8803 job_log(ji
, LOG_WARNING
, "Could not set audit session!");
8808 ji
->mach_uid
= which_user
;
8809 ji
->per_user
= true;
8810 ji
->enable_transactions
= true;
8811 job_setup_per_user_directories(ji
, which_user
, lbuf
);
8813 if ((ms
= machservice_new(ji
, lbuf
, mp
, false)) == NULL
) {
8818 ms
->per_user_hack
= true;
8821 ji
= job_dispatch(ji
, false);
8825 *mp
= machservice_port(SLIST_FIRST(&ji
->machservices
));
8826 job_log(j
, LOG_DEBUG
, "Per user launchd job found for UID: %u", which_user
);
8833 job_mig_lookup_per_user_context(job_t j
, uid_t which_user
, mach_port_t
*up_cont
)
8835 struct ldcred
*ldc
= runtime_get_caller_creds();
8839 return BOOTSTRAP_NO_MEMORY
;
8842 if (launchd_osinstaller
) {
8843 return BOOTSTRAP_UNKNOWN_SERVICE
;
8846 #if TARGET_OS_EMBEDDED
8847 // There is no need for per-user launchd's on embedded.
8848 job_log(j
, LOG_ERR
, "Per-user launchds are not supported on this platform.");
8849 return BOOTSTRAP_UNKNOWN_SERVICE
;
8853 if (unlikely(sandbox_check(ldc
->pid
, "mach-per-user-lookup", SANDBOX_FILTER_NONE
) > 0)) {
8854 return BOOTSTRAP_NOT_PRIVILEGED
;
8858 job_log(j
, LOG_INFO
, "Looking up per user launchd for UID: %u", which_user
);
8860 if (unlikely(!pid1_magic
)) {
8861 job_log(j
, LOG_ERR
, "Only PID 1 supports per user launchd lookups.");
8862 return BOOTSTRAP_NOT_PRIVILEGED
;
8865 if (ldc
->euid
|| ldc
->uid
) {
8866 which_user
= ldc
->euid
?: ldc
->uid
;
8869 *up_cont
= MACH_PORT_NULL
;
8871 jpu
= jobmgr_lookup_per_user_context_internal(j
, which_user
, up_cont
);
8877 job_mig_check_in2(job_t j
, name_t servicename
, mach_port_t
*serviceportp
, uuid_t instance_id
, uint64_t flags
)
8879 bool per_pid_service
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
8880 bool strict
= flags
& BOOTSTRAP_STRICT_CHECKIN
;
8881 struct ldcred
*ldc
= runtime_get_caller_creds();
8882 struct machservice
*ms
= NULL
;
8886 return BOOTSTRAP_NO_MEMORY
;
8889 if (j
->dedicated_instance
) {
8890 struct machservice
*msi
= NULL
;
8891 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
8892 if (strncmp(servicename
, msi
->name
, sizeof(name_t
) - 1) == 0) {
8893 uuid_copy(instance_id
, j
->instance_id
);
8899 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, per_pid_service
? ldc
->pid
: 0);
8903 if (likely(ms
!= NULL
)) {
8905 return BOOTSTRAP_NOT_PRIVILEGED
;
8906 } else if (ms
->isActive
) {
8907 return BOOTSTRAP_SERVICE_ACTIVE
;
8910 return BOOTSTRAP_UNKNOWN_SERVICE
;
8912 } else if (ms
== NULL
) {
8913 if (job_assumes(j
, !j
->dedicated_instance
)) {
8914 *serviceportp
= MACH_PORT_NULL
;
8917 if (unlikely(sandbox_check(ldc
->pid
, "mach-register", per_pid_service
? SANDBOX_FILTER_LOCAL_NAME
: SANDBOX_FILTER_GLOBAL_NAME
, servicename
) > 0)) {
8918 return BOOTSTRAP_NOT_PRIVILEGED
;
8921 if (unlikely((ms
= machservice_new(j
, servicename
, serviceportp
, per_pid_service
)) == NULL
)) {
8922 return BOOTSTRAP_NO_MEMORY
;
8925 // Treat this like a legacy job.
8926 if (!j
->legacy_mach_job
) {
8927 ms
->isActive
= true;
8931 if (!(j
->anonymous
|| j
->legacy_LS_job
|| j
->legacy_mach_job
)) {
8932 job_log(j
, LOG_APPLEONLY
, "Please add the following service to the configuration file for this job: %s", servicename
);
8935 return BOOTSTRAP_UNKNOWN_SERVICE
;
8938 if (unlikely((jo
= machservice_job(ms
)) != j
)) {
8939 static pid_t last_warned_pid
;
8941 if (last_warned_pid
!= ldc
->pid
) {
8942 job_log(jo
, LOG_WARNING
, "The following job tried to hijack the service \"%s\" from this job: %s", servicename
, j
->label
);
8943 last_warned_pid
= ldc
->pid
;
8946 return BOOTSTRAP_NOT_PRIVILEGED
;
8948 if (unlikely(machservice_active(ms
))) {
8949 job_log(j
, LOG_WARNING
, "Check-in of Mach service failed. Already active: %s", servicename
);
8950 return BOOTSTRAP_SERVICE_ACTIVE
;
8955 machservice_request_notifications(ms
);
8957 job_log(j
, LOG_INFO
, "Check-in of service: %s", servicename
);
8959 *serviceportp
= machservice_port(ms
);
8960 return BOOTSTRAP_SUCCESS
;
8964 job_mig_register2(job_t j
, name_t servicename
, mach_port_t serviceport
, uint64_t flags
)
8966 struct machservice
*ms
;
8967 struct ldcred
*ldc
= runtime_get_caller_creds();
8968 bool per_pid_service
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
8971 return BOOTSTRAP_NO_MEMORY
;
8974 if (!per_pid_service
&& !j
->legacy_LS_job
) {
8975 job_log(j
, LOG_APPLEONLY
, "Performance: bootstrap_register() is deprecated. Service: %s", servicename
);
8978 job_log(j
, LOG_DEBUG
, "%sMach service registration attempt: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
8981 if (unlikely(sandbox_check(ldc
->pid
, "mach-register", per_pid_service
? SANDBOX_FILTER_LOCAL_NAME
: SANDBOX_FILTER_GLOBAL_NAME
, servicename
) > 0)) {
8982 return BOOTSTRAP_NOT_PRIVILEGED
;
8986 // 5641783 for the embedded hack
8987 #if !TARGET_OS_EMBEDDED
8989 * From a per-user/session launchd's perspective, SecurityAgent (UID
8990 * 92) is a rogue application (not our UID, not root and not a child of
8991 * us). We'll have to reconcile this design friction at a later date.
8993 if (unlikely(j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->uid
!= getuid() && ldc
->uid
!= 92)) {
8995 return VPROC_ERR_TRY_PER_USER
;
8997 return BOOTSTRAP_NOT_PRIVILEGED
;
9002 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, flags
& BOOTSTRAP_PER_PID_SERVICE
? ldc
->pid
: 0);
9005 if (machservice_job(ms
) != j
) {
9006 return BOOTSTRAP_NOT_PRIVILEGED
;
9008 if (machservice_active(ms
)) {
9009 job_log(j
, LOG_DEBUG
, "Mach service registration failed. Already active: %s", servicename
);
9010 return BOOTSTRAP_SERVICE_ACTIVE
;
9012 if (ms
->recv
&& (serviceport
!= MACH_PORT_NULL
)) {
9013 job_log(j
, LOG_ERR
, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename
);
9014 return BOOTSTRAP_NOT_PRIVILEGED
;
9017 machservice_delete(j
, ms
, false);
9020 if (likely(serviceport
!= MACH_PORT_NULL
)) {
9021 if (likely(ms
= machservice_new(j
, servicename
, &serviceport
, flags
& BOOTSTRAP_PER_PID_SERVICE
? true : false))) {
9022 machservice_request_notifications(ms
);
9024 return BOOTSTRAP_NO_MEMORY
;
9029 return BOOTSTRAP_SUCCESS
;
9033 job_mig_look_up2(job_t j
, mach_port_t srp
, name_t servicename
, mach_port_t
*serviceportp
, pid_t target_pid
, uuid_t instance_id
, uint64_t flags
)
9035 struct machservice
*ms
= NULL
;
9036 struct ldcred
*ldc
= runtime_get_caller_creds();
9038 bool per_pid_lookup
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
9039 bool specific_instance
= flags
& BOOTSTRAP_SPECIFIC_INSTANCE
;
9040 bool strict_lookup
= flags
& BOOTSTRAP_STRICT_LOOKUP
;
9041 bool privileged
= flags
& BOOTSTRAP_PRIVILEGED_SERVER
;
9044 return BOOTSTRAP_NO_MEMORY
;
9047 bool xpc_req
= (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
);
9049 // 5641783 for the embedded hack
9050 #if !TARGET_OS_EMBEDDED
9051 if (unlikely(pid1_magic
&& j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->euid
!= 0)) {
9052 return VPROC_ERR_TRY_PER_USER
;
9057 /* We don't do sandbox checking for XPC domains because, by definition, all
9058 * the services within your domain should be accessible to you.
9060 if (!xpc_req
&& unlikely(sandbox_check(ldc
->pid
, "mach-lookup", per_pid_lookup
? SANDBOX_FILTER_LOCAL_NAME
: SANDBOX_FILTER_GLOBAL_NAME
, servicename
) > 0)) {
9061 return BOOTSTRAP_NOT_PRIVILEGED
;
9065 if (per_pid_lookup
) {
9066 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, target_pid
);
9069 // Requests from XPC domains stay local.
9070 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, 0);
9072 /* A strict lookup which is privileged won't even bother trying to
9073 * find a service if we're not hosting the root Mach bootstrap.
9075 if (strict_lookup
&& privileged
) {
9076 if (inherited_bootstrap_port
== MACH_PORT_NULL
) {
9077 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
9080 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
9086 ms
= ms
->alias
? ms
->alias
: ms
;
9087 if (unlikely(specific_instance
&& ms
->job
->multiple_instances
)) {
9089 job_t instance
= NULL
;
9090 LIST_FOREACH(ji
, &ms
->job
->subjobs
, subjob_sle
) {
9091 if (uuid_compare(instance_id
, ji
->instance_id
) == 0) {
9097 if (unlikely(instance
== NULL
)) {
9098 job_log(ms
->job
, LOG_DEBUG
, "Creating new instance of job based on lookup of service %s", ms
->name
);
9099 instance
= job_new_subjob(ms
->job
, instance_id
);
9100 if (job_assumes(j
, instance
!= NULL
)) {
9101 /* Disable this support for now. We only support having
9102 * multi-instance jobs within private XPC domains.
9105 /* If the job is multi-instance, in a singleton XPC domain
9106 * and the request is not coming from within that singleton
9107 * domain, we need to alias the new job into the requesting
9110 if (!j
->mgr
->xpc_singleton
&& xpc_req
) {
9111 (void)job_assumes(instance
, job_new_alias(j
->mgr
, instance
));
9114 job_dispatch(instance
, false);
9119 if (job_assumes(j
, instance
!= NULL
)) {
9120 struct machservice
*msi
= NULL
;
9121 SLIST_FOREACH(msi
, &instance
->machservices
, sle
) {
9122 /* sizeof(servicename) will return the size of a pointer,
9123 * even though it's an array type, because when passing
9124 * arrays as parameters in C, they implicitly degrade to
9127 if (strncmp(servicename
, msi
->name
, sizeof(name_t
) - 1) == 0) {
9134 if (machservice_hidden(ms
) && !machservice_active(ms
)) {
9136 } else if (unlikely(ms
->per_user_hack
)) {
9143 (void)job_assumes(j
, machservice_port(ms
) != MACH_PORT_NULL
);
9144 job_log(j
, LOG_DEBUG
, "%sMach service lookup: %s", per_pid_lookup
? "Per PID " : "", servicename
);
9145 *serviceportp
= machservice_port(ms
);
9147 kr
= BOOTSTRAP_SUCCESS
;
9148 } else if (strict_lookup
&& !privileged
) {
9149 /* Hack: We need to simulate XPC's desire not to establish a hierarchy.
9150 * So if XPC is doing the lookup, and it's not a privileged lookup, we
9151 * won't forward. But if it is a privileged lookup, then we must
9154 return BOOTSTRAP_UNKNOWN_SERVICE
;
9155 } else if (inherited_bootstrap_port
!= MACH_PORT_NULL
) {
9156 // Requests from within an XPC domain don't get forwarded.
9157 job_log(j
, LOG_DEBUG
, "Mach service lookup forwarded: %s", servicename
);
9158 /* Clients potentially check the audit token of the reply to verify that
9159 * the returned send right is trustworthy.
9161 (void)job_assumes_zero(j
, vproc_mig_look_up2_forward(inherited_bootstrap_port
, srp
, servicename
, target_pid
, instance_id
, flags
));
9162 return MIG_NO_REPLY
;
9163 } else if (pid1_magic
&& j
->anonymous
&& ldc
->euid
>= 500 && strcasecmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
9164 /* 5240036 Should start background session when a lookup of CCacheServer
9167 * This is a total hack. We sniff out loginwindow session, and attempt
9168 * to guess what it is up to. If we find a EUID that isn't root, we
9169 * force it over to the per-user context.
9171 return VPROC_ERR_TRY_PER_USER
;
9173 job_log(j
, LOG_DEBUG
, "%sMach service lookup failed: %s", per_pid_lookup
? "Per PID " : "", servicename
);
9174 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
9181 job_mig_parent(job_t j
, mach_port_t srp
, mach_port_t
*parentport
)
9184 return BOOTSTRAP_NO_MEMORY
;
9187 job_log(j
, LOG_DEBUG
, "Requested parent bootstrap port");
9188 jobmgr_t jm
= j
->mgr
;
9190 if (jobmgr_parent(jm
)) {
9191 *parentport
= jobmgr_parent(jm
)->jm_port
;
9192 } else if (MACH_PORT_NULL
== inherited_bootstrap_port
) {
9193 *parentport
= jm
->jm_port
;
9195 (void)job_assumes_zero(j
, vproc_mig_parent_forward(inherited_bootstrap_port
, srp
));
9196 // The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
9197 return MIG_NO_REPLY
;
9199 return BOOTSTRAP_SUCCESS
;
9203 job_mig_get_root_bootstrap(job_t j
, mach_port_t
*rootbsp
)
9206 return BOOTSTRAP_NO_MEMORY
;
9209 if (inherited_bootstrap_port
== MACH_PORT_NULL
) {
9210 *rootbsp
= root_jobmgr
->jm_port
;
9211 (void)job_assumes_zero(j
, launchd_mport_make_send(root_jobmgr
->jm_port
));
9213 *rootbsp
= inherited_bootstrap_port
;
9214 (void)job_assumes_zero(j
, launchd_mport_copy_send(inherited_bootstrap_port
));
9217 return BOOTSTRAP_SUCCESS
;
9221 job_mig_info(job_t j
, name_array_t
*servicenamesp
,
9222 unsigned int *servicenames_cnt
, name_array_t
*servicejobsp
,
9223 unsigned int *servicejobs_cnt
, bootstrap_status_array_t
*serviceactivesp
,
9224 unsigned int *serviceactives_cnt
, uint64_t flags
)
9226 name_array_t service_names
= NULL
;
9227 name_array_t service_jobs
= NULL
;
9228 bootstrap_status_array_t service_actives
= NULL
;
9229 unsigned int cnt
= 0, cnt2
= 0;
9233 return BOOTSTRAP_NO_MEMORY
;
9236 #if TARGET_OS_EMBEDDED
9237 struct ldcred
*ldc
= runtime_get_caller_creds();
9241 #endif // TARGET_OS_EMBEDDED
9243 if (launchd_flat_mach_namespace
) {
9244 if ((j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) || (flags
& BOOTSTRAP_FORCE_LOCAL
)) {
9254 struct machservice
*msi
= NULL
;
9255 for (i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++) {
9256 LIST_FOREACH(msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
9257 cnt
+= !msi
->per_pid
? 1 : 0;
9265 mig_allocate((vm_address_t
*)&service_names
, cnt
* sizeof(service_names
[0]));
9266 if (!job_assumes(j
, service_names
!= NULL
)) {
9270 mig_allocate((vm_address_t
*)&service_jobs
, cnt
* sizeof(service_jobs
[0]));
9271 if (!job_assumes(j
, service_jobs
!= NULL
)) {
9275 mig_allocate((vm_address_t
*)&service_actives
, cnt
* sizeof(service_actives
[0]));
9276 if (!job_assumes(j
, service_actives
!= NULL
)) {
9280 for (i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++) {
9281 LIST_FOREACH(msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
9282 if (!msi
->per_pid
) {
9283 strlcpy(service_names
[cnt2
], machservice_name(msi
), sizeof(service_names
[0]));
9284 msi
= msi
->alias
? msi
->alias
: msi
;
9285 if (msi
->job
->mgr
->shortdesc
) {
9286 strlcpy(service_jobs
[cnt2
], msi
->job
->mgr
->shortdesc
, sizeof(service_jobs
[0]));
9288 strlcpy(service_jobs
[cnt2
], msi
->job
->label
, sizeof(service_jobs
[0]));
9290 service_actives
[cnt2
] = machservice_status(msi
);
9296 (void)job_assumes(j
, cnt
== cnt2
);
9299 *servicenamesp
= service_names
;
9300 *servicejobsp
= service_jobs
;
9301 *serviceactivesp
= service_actives
;
9302 *servicenames_cnt
= *servicejobs_cnt
= *serviceactives_cnt
= cnt
;
9304 return BOOTSTRAP_SUCCESS
;
9307 if (service_names
) {
9308 mig_deallocate((vm_address_t
)service_names
, cnt
* sizeof(service_names
[0]));
9311 mig_deallocate((vm_address_t
)service_jobs
, cnt
* sizeof(service_jobs
[0]));
9313 if (service_actives
) {
9314 mig_deallocate((vm_address_t
)service_actives
, cnt
* sizeof(service_actives
[0]));
9317 return BOOTSTRAP_NO_MEMORY
;
9321 job_mig_lookup_children(job_t j
, mach_port_array_t
*child_ports
,
9322 mach_msg_type_number_t
*child_ports_cnt
, name_array_t
*child_names
,
9323 mach_msg_type_number_t
*child_names_cnt
,
9324 bootstrap_property_array_t
*child_properties
,
9325 mach_msg_type_number_t
*child_properties_cnt
)
9327 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
9329 return BOOTSTRAP_NO_MEMORY
;
9332 struct ldcred
*ldc
= runtime_get_caller_creds();
9334 /* Only allow root processes to look up children, even if we're in the per-user launchd.
9335 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
9336 * in a non-flat namespace.
9338 if (ldc
->euid
!= 0) {
9339 job_log(j
, LOG_WARNING
, "Attempt to look up children of bootstrap by unprivileged job.");
9340 return BOOTSTRAP_NOT_PRIVILEGED
;
9343 unsigned int cnt
= 0;
9345 jobmgr_t jmr
= j
->mgr
;
9346 jobmgr_t jmi
= NULL
;
9347 SLIST_FOREACH(jmi
, &jmr
->submgrs
, sle
) {
9351 // Find our per-user launchds if we're PID 1.
9354 LIST_FOREACH(ji
, &jmr
->jobs
, sle
) {
9355 cnt
+= ji
->per_user
? 1 : 0;
9360 return BOOTSTRAP_NO_CHILDREN
;
9363 mach_port_array_t _child_ports
= NULL
;
9364 name_array_t _child_names
= NULL
;
9365 bootstrap_property_array_t _child_properties
= NULL
;
9367 mig_allocate((vm_address_t
*)&_child_ports
, cnt
* sizeof(_child_ports
[0]));
9368 if (!job_assumes(j
, _child_ports
!= NULL
)) {
9369 kr
= BOOTSTRAP_NO_MEMORY
;
9373 mig_allocate((vm_address_t
*)&_child_names
, cnt
* sizeof(_child_names
[0]));
9374 if (!job_assumes(j
, _child_names
!= NULL
)) {
9375 kr
= BOOTSTRAP_NO_MEMORY
;
9379 mig_allocate((vm_address_t
*)&_child_properties
, cnt
* sizeof(_child_properties
[0]));
9380 if (!job_assumes(j
, _child_properties
!= NULL
)) {
9381 kr
= BOOTSTRAP_NO_MEMORY
;
9385 unsigned int cnt2
= 0;
9386 SLIST_FOREACH(jmi
, &jmr
->submgrs
, sle
) {
9387 if (jobmgr_assumes_zero(jmi
, launchd_mport_make_send(jmi
->jm_port
)) == KERN_SUCCESS
) {
9388 _child_ports
[cnt2
] = jmi
->jm_port
;
9390 _child_ports
[cnt2
] = MACH_PORT_NULL
;
9393 strlcpy(_child_names
[cnt2
], jmi
->name
, sizeof(_child_names
[0]));
9394 _child_properties
[cnt2
] = jmi
->properties
;
9399 if (pid1_magic
) LIST_FOREACH(ji
, &jmr
->jobs
, sle
) {
9401 if (job_assumes(ji
, SLIST_FIRST(&ji
->machservices
)->per_user_hack
== true)) {
9402 mach_port_t port
= machservice_port(SLIST_FIRST(&ji
->machservices
));
9404 if (job_assumes_zero(ji
, launchd_mport_copy_send(port
)) == KERN_SUCCESS
) {
9405 _child_ports
[cnt2
] = port
;
9407 _child_ports
[cnt2
] = MACH_PORT_NULL
;
9410 _child_ports
[cnt2
] = MACH_PORT_NULL
;
9413 strlcpy(_child_names
[cnt2
], ji
->label
, sizeof(_child_names
[0]));
9414 _child_properties
[cnt2
] |= BOOTSTRAP_PROPERTY_PERUSER
;
9420 *child_names_cnt
= cnt
;
9421 *child_ports_cnt
= cnt
;
9422 *child_properties_cnt
= cnt
;
9424 *child_names
= _child_names
;
9425 *child_ports
= _child_ports
;
9426 *child_properties
= _child_properties
;
9429 for (i
= 0; i
< cnt
; i
++) {
9430 job_log(j
, LOG_DEBUG
, "child_names[%u] = %s", i
, (char *)_child_names
[i
]);
9433 return BOOTSTRAP_SUCCESS
;
9436 mig_deallocate((vm_address_t
)_child_ports
, cnt
* sizeof(_child_ports
[0]));
9440 mig_deallocate((vm_address_t
)_child_names
, cnt
* sizeof(_child_names
[0]));
9443 if (_child_properties
) {
9444 mig_deallocate((vm_address_t
)_child_properties
, cnt
* sizeof(_child_properties
[0]));
9451 job_mig_pid_is_managed(job_t j
__attribute__((unused
)), pid_t p
, boolean_t
*managed
)
9453 struct ldcred
*ldc
= runtime_get_caller_creds();
9454 if ((ldc
->euid
!= geteuid()) && (ldc
->euid
!= 0)) {
9455 return BOOTSTRAP_NOT_PRIVILEGED
;
9458 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9459 * directly by launchd as agents.
9461 job_t j_for_pid
= jobmgr_find_by_pid_deep(root_jobmgr
, p
, false);
9462 if (j_for_pid
&& !j_for_pid
->anonymous
&& !j_for_pid
->legacy_LS_job
) {
9466 return BOOTSTRAP_SUCCESS
;
9470 job_mig_port_for_label(job_t j
__attribute__((unused
)), name_t label
, mach_port_t
*mp
)
9473 return BOOTSTRAP_NO_MEMORY
;
9476 struct ldcred
*ldc
= runtime_get_caller_creds();
9477 kern_return_t kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9480 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
9481 return BOOTSTRAP_NOT_PRIVILEGED
;
9485 mach_port_t _mp
= MACH_PORT_NULL
;
9486 if (!j
->deny_job_creation
&& (ldc
->euid
== 0 || ldc
->euid
== geteuid())) {
9487 job_t target_j
= job_find(NULL
, label
);
9488 if (jobmgr_assumes(root_jobmgr
, target_j
!= NULL
)) {
9489 if (target_j
->j_port
== MACH_PORT_NULL
) {
9490 (void)job_assumes(target_j
, job_setup_machport(target_j
) == true);
9493 _mp
= target_j
->j_port
;
9494 kr
= _mp
!= MACH_PORT_NULL
? BOOTSTRAP_SUCCESS
: BOOTSTRAP_NO_MEMORY
;
9496 kr
= BOOTSTRAP_NO_MEMORY
;
9505 job_mig_set_security_session(job_t j
, uuid_t uuid
, mach_port_t asport
)
9507 #if TARGET_OS_EMBEDDED
9508 return KERN_SUCCESS
;
9512 return BOOTSTRAP_NO_MEMORY
;
9515 uuid_string_t uuid_str
;
9516 uuid_unparse(uuid
, uuid_str
);
9517 job_log(j
, LOG_DEBUG
, "Setting session %u for UUID %s...", asport
, uuid_str
);
9519 job_t ji
= NULL
, jt
= NULL
;
9520 LIST_FOREACH_SAFE(ji
, &s_needing_sessions
, sle
, jt
) {
9521 uuid_string_t uuid_str2
;
9522 uuid_unparse(ji
->expected_audit_uuid
, uuid_str2
);
9524 if (uuid_compare(uuid
, ji
->expected_audit_uuid
) == 0) {
9525 uuid_clear(ji
->expected_audit_uuid
);
9526 if (asport
!= MACH_PORT_NULL
) {
9527 job_log(ji
, LOG_DEBUG
, "Job should join session with port 0x%x", asport
);
9528 (void)job_assumes_zero(j
, launchd_mport_copy_send(asport
));
9530 job_log(ji
, LOG_DEBUG
, "No session to set for job. Using our session.");
9533 ji
->asport
= asport
;
9534 LIST_REMOVE(ji
, needing_session_sle
);
9536 if (ji
->event_monitor
) {
9539 job_dispatch(ji
, false);
9544 /* Each job that the session port was set for holds a reference. At the end of
9545 * the loop, there will be one extra reference belonging to this MiG protocol.
9546 * We need to release it so that the session goes away when all the jobs
9547 * referencing it are unloaded.
9549 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
9551 return KERN_SUCCESS
;
9555 jobmgr_find_by_name(jobmgr_t jm
, const char *where
)
9559 // NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
9560 if (where
== NULL
) {
9561 if (strcasecmp(jm
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
9562 where
= VPROCMGR_SESSION_LOGINWINDOW
;
9564 where
= VPROCMGR_SESSION_AQUA
;
9568 if (strcasecmp(jm
->name
, where
) == 0) {
9572 if (strcasecmp(where
, VPROCMGR_SESSION_BACKGROUND
) == 0 && !pid1_magic
) {
9577 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
9578 if (unlikely(jmi
->shutting_down
)) {
9580 } else if (jmi
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
9582 } else if (strcasecmp(jmi
->name
, where
) == 0) {
9584 } else if (strcasecmp(jmi
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0 && pid1_magic
) {
9585 SLIST_FOREACH(jmi2
, &jmi
->submgrs
, sle
) {
9586 if (strcasecmp(jmi2
->name
, where
) == 0) {
9599 job_mig_move_subset(job_t j
, mach_port_t target_subset
, name_t session_type
, mach_port_t asport
, uint64_t flags
)
9601 mach_msg_type_number_t l2l_i
, l2l_port_cnt
= 0;
9602 mach_port_array_t l2l_ports
= NULL
;
9603 mach_port_t reqport
, rcvright
;
9604 kern_return_t kr
= 1;
9605 launch_data_t out_obj_array
= NULL
;
9606 struct ldcred
*ldc
= runtime_get_caller_creds();
9607 jobmgr_t jmr
= NULL
;
9610 return BOOTSTRAP_NO_MEMORY
;
9613 if (job_mig_intran2(root_jobmgr
, target_subset
, ldc
->pid
)) {
9614 job_log(j
, LOG_ERR
, "Moving a session to ourself is bogus.");
9616 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9620 job_log(j
, LOG_DEBUG
, "Move subset attempt: 0x%x", target_subset
);
9622 kr
= _vproc_grab_subset(target_subset
, &reqport
, &rcvright
, &out_obj_array
, &l2l_ports
, &l2l_port_cnt
);
9623 if (job_assumes_zero(j
, kr
) != 0) {
9627 if (launch_data_array_get_count(out_obj_array
) != l2l_port_cnt
) {
9628 os_assert_zero(l2l_port_cnt
);
9631 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, reqport
, rcvright
, false, session_type
, false, asport
)) != NULL
)) {
9632 kr
= BOOTSTRAP_NO_MEMORY
;
9636 if (strcmp(session_type
, VPROCMGR_SESSION_AQUA
) == 0) {
9637 jobmgr_log(jmr
, LOG_NOTICE
, "Registering new GUI session.");
9638 kr
= vproc_mig_register_gui_session(inherited_bootstrap_port
, asport
);
9640 jobmgr_log(jmr
, LOG_ERR
, "Failed to register GUI session with PID 1: 0x%x/0x%x", inherited_bootstrap_port
, kr
);
9644 jmr
->properties
|= BOOTSTRAP_PROPERTY_MOVEDSUBSET
;
9646 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9647 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9648 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9650 if (flags
& LAUNCH_GLOBAL_ON_DEMAND
) {
9651 // This is so awful.
9652 // Remove the job from its current job manager.
9653 LIST_REMOVE(j
, sle
);
9654 LIST_REMOVE(j
, pid_hash_sle
);
9656 // Put the job into the target job manager.
9657 LIST_INSERT_HEAD(&jmr
->jobs
, j
, sle
);
9658 LIST_INSERT_HEAD(&jmr
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
9661 job_set_global_on_demand(j
, true);
9663 if (!j
->holds_ref
) {
9664 job_log(j
, LOG_PERF
, "Job moved subset into: %s", j
->mgr
->name
);
9665 j
->holds_ref
= true;
9670 for (l2l_i
= 0; l2l_i
< l2l_port_cnt
; l2l_i
++) {
9671 launch_data_t tmp
, obj_at_idx
;
9672 struct machservice
*ms
;
9673 job_t j_for_service
;
9674 const char *serv_name
;
9678 (void)job_assumes(j
, obj_at_idx
= launch_data_array_get_index(out_obj_array
, l2l_i
));
9679 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PID
));
9680 target_pid
= (pid_t
)launch_data_get_integer(tmp
);
9681 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PERPID
));
9682 serv_perpid
= launch_data_get_bool(tmp
);
9683 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_NAME
));
9684 serv_name
= launch_data_get_string(tmp
);
9686 j_for_service
= jobmgr_find_by_pid(jmr
, target_pid
, true);
9688 if (unlikely(!j_for_service
)) {
9689 // The PID probably exited
9690 (void)job_assumes_zero(j
, launchd_mport_deallocate(l2l_ports
[l2l_i
]));
9694 if (likely(ms
= machservice_new(j_for_service
, serv_name
, &l2l_ports
[l2l_i
], serv_perpid
))) {
9695 job_log(j
, LOG_DEBUG
, "Importing %s into new bootstrap.", serv_name
);
9696 machservice_request_notifications(ms
);
9703 if (out_obj_array
) {
9704 launch_data_free(out_obj_array
);
9708 mig_deallocate((vm_address_t
)l2l_ports
, l2l_port_cnt
* sizeof(l2l_ports
[0]));
9712 if (target_subset
) {
9713 (void)job_assumes_zero(j
, launchd_mport_deallocate(target_subset
));
9716 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
9719 jobmgr_shutdown(jmr
);
9726 job_mig_init_session(job_t j
, name_t session_type
, mach_port_t asport
)
9729 return BOOTSTRAP_NO_MEMORY
;
9734 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
9735 if (j
->mgr
->session_initialized
) {
9736 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
9737 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9738 } else if (strcmp(session_type
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
9744 * We're working around LoginWindow and the WindowServer.
9746 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9747 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9748 * spawns a replacement loginwindow session before cleaning up the previous one.
9750 * We're going to use the creation of a new LoginWindow context as a clue that the
9751 * previous LoginWindow context is on the way out and therefore we should just
9752 * kick-start the shutdown of it.
9755 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
9756 if (unlikely(jmi
->shutting_down
)) {
9758 } else if (strcasecmp(jmi
->name
, session_type
) == 0) {
9759 jobmgr_shutdown(jmi
);
9763 } else if (strcmp(session_type
, VPROCMGR_SESSION_AQUA
) == 0) {
9764 (void)job_assumes_zero(j
, runtime_remove_mport(j
->mgr
->jm_port
));
9767 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Initializing as %s", session_type
);
9768 strcpy(j
->mgr
->name_init
, session_type
);
9770 if (job_assumes(j
, (j2
= jobmgr_init_session(j
->mgr
, session_type
, false)))) {
9771 j2
->asport
= asport
;
9772 (void)job_assumes(j
, job_dispatch(j2
, true));
9773 kr
= BOOTSTRAP_SUCCESS
;
9780 job_mig_switch_to_session(job_t j
, mach_port_t requestor_port
, name_t session_name
, mach_port_t asport
, mach_port_t
*new_bsport
)
9782 struct ldcred
*ldc
= runtime_get_caller_creds();
9783 if (!jobmgr_assumes(root_jobmgr
, j
!= NULL
)) {
9784 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s() called with NULL job: PID %d", __func__
, ldc
->pid
);
9785 return BOOTSTRAP_NO_MEMORY
;
9788 if (j
->mgr
->shutting_down
) {
9789 return BOOTSTRAP_UNKNOWN_SERVICE
;
9792 job_log(j
, LOG_DEBUG
, "Job wants to move to %s session.", session_name
);
9794 if (!job_assumes(j
, pid1_magic
== false)) {
9795 job_log(j
, LOG_WARNING
, "Switching sessions is not allowed in the system Mach bootstrap.");
9796 return BOOTSTRAP_NOT_PRIVILEGED
;
9799 if (!j
->anonymous
) {
9800 job_log(j
, LOG_NOTICE
, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9801 return BOOTSTRAP_NOT_PRIVILEGED
;
9804 jobmgr_t target_jm
= jobmgr_find_by_name(root_jobmgr
, session_name
);
9805 if (target_jm
== j
->mgr
) {
9806 job_log(j
, LOG_DEBUG
, "Job is already in its desired session (%s).", session_name
);
9807 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
9808 (void)job_assumes_zero(j
, launchd_mport_deallocate(requestor_port
));
9809 *new_bsport
= target_jm
->jm_port
;
9810 return BOOTSTRAP_SUCCESS
;
9814 target_jm
= jobmgr_new(j
->mgr
, requestor_port
, MACH_PORT_NULL
, false, session_name
, false, asport
);
9816 target_jm
->properties
|= BOOTSTRAP_PROPERTY_IMPLICITSUBSET
;
9817 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
9821 if (!job_assumes(j
, target_jm
!= NULL
)) {
9822 job_log(j
, LOG_WARNING
, "Could not find %s session!", session_name
);
9823 return BOOTSTRAP_NO_MEMORY
;
9826 // Remove the job from it's current job manager.
9827 LIST_REMOVE(j
, sle
);
9828 LIST_REMOVE(j
, pid_hash_sle
);
9830 job_t ji
= NULL
, jit
= NULL
;
9831 LIST_FOREACH_SAFE(ji
, &j
->mgr
->global_env_jobs
, global_env_sle
, jit
) {
9833 LIST_REMOVE(ji
, global_env_sle
);
9838 // Put the job into the target job manager.
9839 LIST_INSERT_HEAD(&target_jm
->jobs
, j
, sle
);
9840 LIST_INSERT_HEAD(&target_jm
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
9843 LIST_INSERT_HEAD(&target_jm
->global_env_jobs
, j
, global_env_sle
);
9846 // Move our Mach services over if we're not in a flat namespace.
9847 if (!launchd_flat_mach_namespace
&& !SLIST_EMPTY(&j
->machservices
)) {
9848 struct machservice
*msi
= NULL
, *msit
= NULL
;
9849 SLIST_FOREACH_SAFE(msi
, &j
->machservices
, sle
, msit
) {
9850 LIST_REMOVE(msi
, name_hash_sle
);
9851 LIST_INSERT_HEAD(&target_jm
->ms_hash
[hash_ms(msi
->name
)], msi
, name_hash_sle
);
9857 if (!j
->holds_ref
) {
9858 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9859 * stick around while they're still around.
9860 * For example, login calls into the PAM launchd module, which moves the process into
9861 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9862 * ourselves from going away.
9864 j
->holds_ref
= true;
9865 job_log(j
, LOG_PERF
, "Job switched into manager: %s", j
->mgr
->name
);
9869 *new_bsport
= target_jm
->jm_port
;
9871 return KERN_SUCCESS
;
9875 job_mig_take_subset(job_t j
, mach_port_t
*reqport
, mach_port_t
*rcvright
,
9876 vm_offset_t
*outdata
, mach_msg_type_number_t
*outdataCnt
,
9877 mach_port_array_t
*portsp
, unsigned int *ports_cnt
)
9879 launch_data_t tmp_obj
, tmp_dict
, outdata_obj_array
= NULL
;
9880 mach_port_array_t ports
= NULL
;
9881 unsigned int cnt
= 0, cnt2
= 0;
9883 struct machservice
*ms
;
9888 return BOOTSTRAP_NO_MEMORY
;
9893 if (unlikely(!pid1_magic
)) {
9894 job_log(j
, LOG_ERR
, "Only the system launchd will transfer Mach sub-bootstraps.");
9895 return BOOTSTRAP_NOT_PRIVILEGED
;
9897 if (unlikely(jobmgr_parent(jm
) == NULL
)) {
9898 job_log(j
, LOG_ERR
, "Root Mach bootstrap cannot be transferred.");
9899 return BOOTSTRAP_NOT_PRIVILEGED
;
9901 if (unlikely(strcasecmp(jm
->name
, VPROCMGR_SESSION_AQUA
) == 0)) {
9902 job_log(j
, LOG_ERR
, "Cannot transfer a setup GUI session.");
9903 return BOOTSTRAP_NOT_PRIVILEGED
;
9905 if (unlikely(!j
->anonymous
)) {
9906 job_log(j
, LOG_ERR
, "Only the anonymous job can transfer Mach sub-bootstraps.");
9907 return BOOTSTRAP_NOT_PRIVILEGED
;
9910 job_log(j
, LOG_DEBUG
, "Transferring sub-bootstrap to the per session launchd.");
9912 outdata_obj_array
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
9913 if (!job_assumes(j
, outdata_obj_array
)) {
9917 *outdataCnt
= 20 * 1024 * 1024;
9918 mig_allocate(outdata
, *outdataCnt
);
9919 if (!job_assumes(j
, *outdata
!= 0)) {
9923 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
9924 if (!ji
->anonymous
) {
9927 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
9932 mig_allocate((vm_address_t
*)&ports
, cnt
* sizeof(ports
[0]));
9933 if (!job_assumes(j
, ports
!= NULL
)) {
9937 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
9938 if (!ji
->anonymous
) {
9942 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
9943 if (job_assumes(j
, (tmp_dict
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
9944 (void)job_assumes(j
, launch_data_array_set_index(outdata_obj_array
, tmp_dict
, cnt2
));
9949 if (job_assumes(j
, (tmp_obj
= launch_data_new_string(machservice_name(ms
))))) {
9950 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_NAME
));
9955 if (job_assumes(j
, (tmp_obj
= launch_data_new_integer((ms
->job
->p
))))) {
9956 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PID
));
9961 if (job_assumes(j
, (tmp_obj
= launch_data_new_bool((ms
->per_pid
))))) {
9962 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PERPID
));
9967 ports
[cnt2
] = machservice_port(ms
);
9969 // Increment the send right by one so we can shutdown the jobmgr cleanly
9970 (void)jobmgr_assumes_zero(jm
, launchd_mport_copy_send(ports
[cnt2
]));
9975 (void)job_assumes(j
, cnt
== cnt2
);
9977 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
9978 packed_size
= launch_data_pack(outdata_obj_array
, (void *)*outdata
, *outdataCnt
, NULL
, NULL
);
9979 if (!job_assumes(j
, packed_size
!= 0)) {
9983 launch_data_free(outdata_obj_array
);
9988 *reqport
= jm
->req_port
;
9989 *rcvright
= jm
->jm_port
;
9994 workaround_5477111
= j
;
9996 jobmgr_shutdown(jm
);
9998 return BOOTSTRAP_SUCCESS
;
10001 if (outdata_obj_array
) {
10002 launch_data_free(outdata_obj_array
);
10005 mig_deallocate(*outdata
, *outdataCnt
);
10008 mig_deallocate((vm_address_t
)ports
, cnt
* sizeof(ports
[0]));
10011 return BOOTSTRAP_NO_MEMORY
;
10015 job_mig_subset(job_t j
, mach_port_t requestorport
, mach_port_t
*subsetportp
)
10021 return BOOTSTRAP_NO_MEMORY
;
10023 if (j
->mgr
->shutting_down
) {
10024 return BOOTSTRAP_UNKNOWN_SERVICE
;
10029 while ((jmr
= jobmgr_parent(jmr
)) != NULL
) {
10033 // Since we use recursion, we need an artificial depth for subsets
10034 if (unlikely(bsdepth
> 100)) {
10035 job_log(j
, LOG_ERR
, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth
);
10036 return BOOTSTRAP_NO_MEMORY
;
10039 char name
[NAME_MAX
];
10040 snprintf(name
, sizeof(name
), "%s[%i].subset.%i", j
->anonymous
? j
->prog
: j
->label
, j
->p
, MACH_PORT_INDEX(requestorport
));
10042 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, requestorport
, MACH_PORT_NULL
, false, name
, true, j
->asport
)) != NULL
)) {
10043 if (unlikely(requestorport
== MACH_PORT_NULL
)) {
10044 return BOOTSTRAP_NOT_PRIVILEGED
;
10046 return BOOTSTRAP_NO_MEMORY
;
10049 *subsetportp
= jmr
->jm_port
;
10050 jmr
->properties
|= BOOTSTRAP_PROPERTY_EXPLICITSUBSET
;
10052 /* A job could create multiple subsets, so only add a reference the first time
10053 * it does so we don't have to keep a count.
10055 if (j
->anonymous
&& !j
->holds_ref
) {
10056 job_log(j
, LOG_PERF
, "Job created subset: %s", jmr
->name
);
10057 j
->holds_ref
= true;
10061 job_log(j
, LOG_DEBUG
, "Job created a subset named \"%s\"", jmr
->name
);
10062 return BOOTSTRAP_SUCCESS
;
10066 _xpc_domain_import_service(jobmgr_t jm
, launch_data_t pload
)
10068 jobmgr_t where2put
= NULL
;
10070 if (launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
) {
10075 launch_data_t ldlabel
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
);
10076 if (!ldlabel
|| launch_data_get_type(ldlabel
) != LAUNCH_DATA_STRING
) {
10081 const char *label
= launch_data_get_string(ldlabel
);
10082 jobmgr_log(jm
, LOG_DEBUG
, "Importing service: %s", label
);
10084 launch_data_t destname
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_XPCDOMAIN
);
10086 bool supported_domain
= false;
10088 if (launch_data_get_type(destname
) == LAUNCH_DATA_STRING
) {
10089 const char *str
= launch_data_get_string(destname
);
10090 if (strcmp(str
, XPC_DOMAIN_TYPE_SYSTEM
) == 0) {
10091 where2put
= _s_xpc_system_domain
;
10092 } else if (strcmp(str
, XPC_DOMAIN_TYPE_PERUSER
) == 0) {
10093 where2put
= jobmgr_find_xpc_per_user_domain(jm
, jm
->req_euid
);
10094 supported_domain
= true;
10095 } else if (strcmp(str
, XPC_DOMAIN_TYPE_PERSESSION
) == 0) {
10096 where2put
= jobmgr_find_xpc_per_session_domain(jm
, jm
->req_asid
);
10098 jobmgr_log(jm
, LOG_ERR
, "Invalid XPC domain type: %s", str
);
10102 jobmgr_log(jm
, LOG_ERR
, "XPC domain type is not a string.");
10106 if (where2put
&& !supported_domain
) {
10107 launch_data_t mi
= NULL
;
10108 if ((mi
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_MULTIPLEINSTANCES
))) {
10109 if (launch_data_get_type(mi
) == LAUNCH_DATA_BOOL
&& launch_data_get_bool(mi
)) {
10110 jobmgr_log(where2put
, LOG_ERR
, "Multiple-instance services are not supported in this domain.");
10122 /* Gross. If the service already exists in a singleton domain, then
10123 * jobmgr_import2() will return the existing job. But if we fail to alias
10124 * this job, we will normally want to remove it. But if we did not create
10125 * it in the first place, then we need to avoid removing it. So check
10126 * errno against EEXIST in the success case and if it's EEXIST, then do
10127 * not remove the original job in the event of a failed alias.
10129 * This really needs to be re-thought, but I think it'll require a larger
10130 * evaluation of launchd's data structures. Right now, once a job is
10131 * imported into a singleton domain, it won't be removed until the system
10132 * shuts down, but that may not always be true. If it ever changes, we'll
10133 * have a problem because we'll have to account for all existing aliases
10134 * and clean them up somehow. Or just start ref-counting. I knew this
10135 * aliasing stuff would be trouble...
10137 * <rdar://problem/10646503>
10139 jobmgr_log(where2put
, LOG_DEBUG
, "Importing service...");
10142 if ((j
= jobmgr_import2(where2put
, pload
))) {
10143 bool created
= (errno
!= EEXIST
);
10144 j
->xpc_service
= true;
10146 if (where2put
->xpc_singleton
) {
10147 /* If the service was destined for one of the global domains,
10148 * then we have to alias it into our local domain to reserve the
10152 if (!(ja
= job_new_alias(jm
, j
))) {
10153 /* If we failed to alias the job because of a conflict over
10154 * the label, then we remove it from the global domain. We
10155 * don't want to risk having imported a malicious job into
10156 * one of the global domains.
10158 if (errno
!= EEXIST
) {
10159 job_log(j
, LOG_ERR
, "Failed to alias job into: %s: %d: %s", where2put
->name
, errno
, strerror(errno
));
10165 jobmgr_log(jm
, LOG_WARNING
, "Singleton service already existed in job-local namespace. Removing: %s", j
->label
);
10171 jobmgr_log(jm
, LOG_DEBUG
, "Aliased service into local domain: %s", j
->label
);
10172 (void)job_dispatch(j
, false);
10173 ja
->xpc_service
= true;
10177 (void)job_dispatch(j
, false);
10181 jobmgr_log(jm
, LOG_DEBUG
, "Could not find destination for service: %s", label
);
10188 _xpc_domain_import_services(job_t j
, launch_data_t services
)
10190 int error
= EINVAL
;
10191 if (launch_data_get_type(services
) != LAUNCH_DATA_ARRAY
) {
10196 size_t c
= launch_data_array_get_count(services
);
10197 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Importing new services: %lu", c
);
10199 for (i
= 0; i
< c
; i
++) {
10200 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Importing service at index: %lu", i
);
10203 launch_data_t ploadi
= launch_data_array_get_index(services
, i
);
10204 if (!(nj
= _xpc_domain_import_service(j
->mgr
, ploadi
))) {
10205 if (!j
->mgr
->session_initialized
&& errno
) {
10206 /* Service import failures are only fatal if the domain is being
10207 * initialized. If we're extending the domain, we can run into
10208 * errors with services already existing, so we just ignore them.
10209 * In the case of a domain extension, we don't want to halt the
10210 * operation if we run into an error with one service.
10212 * <rdar://problem/10842779>
10214 jobmgr_log(j
->mgr
, LOG_ERR
, "Failed to import service at index: %lu: %d: %s", i
, errno
, strerror(errno
));
10219 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Imported service: %s", nj
->label
);
10231 xpc_domain_import2(job_t j
, mach_port_t reqport
, mach_port_t dport
)
10233 if (unlikely(!pid1_magic
)) {
10234 job_log(j
, LOG_ERR
, "XPC domains may only reside in PID 1.");
10235 return BOOTSTRAP_NOT_PRIVILEGED
;
10237 if (!j
|| !MACH_PORT_VALID(reqport
)) {
10238 return BOOTSTRAP_UNKNOWN_SERVICE
;
10240 if (root_jobmgr
->shutting_down
) {
10241 jobmgr_log(root_jobmgr
, LOG_ERR
, "Attempt to create new domain while shutting down.");
10242 return BOOTSTRAP_NOT_PRIVILEGED
;
10244 if (!j
->xpc_bootstrapper
) {
10245 job_log(j
, LOG_ERR
, "Attempt to create new XPC domain by unprivileged job.");
10246 return BOOTSTRAP_NOT_PRIVILEGED
;
10249 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
10250 /* All XPC domains are children of the root job manager. What we're creating
10251 * here is really just a skeleton. By creating it, we're adding reqp to our
10252 * port set. It will have two messages on it. The first specifies the
10253 * environment of the originator. This is so we can cache it and hand it to
10254 * xpcproxy to bootstrap our services. The second is the set of jobs that is
10255 * to be bootstrapped in.
10257 jobmgr_t jm
= jobmgr_new(root_jobmgr
, reqport
, dport
, false, NULL
, true, MACH_PORT_NULL
);
10258 if (job_assumes(j
, jm
!= NULL
)) {
10259 jm
->properties
|= BOOTSTRAP_PROPERTY_XPC_DOMAIN
;
10260 jm
->shortdesc
= "private";
10261 kr
= BOOTSTRAP_SUCCESS
;
10268 xpc_domain_set_environment(job_t j
, mach_port_t rp
, mach_port_t bsport
, mach_port_t excport
, vm_offset_t ctx
, mach_msg_type_number_t ctx_sz
)
10271 /* Due to the whacky nature of XPC service bootstrapping, we can end up
10272 * getting this message long after the requesting process has gone away.
10273 * See <rdar://problem/8593143>.
10275 return BOOTSTRAP_UNKNOWN_SERVICE
;
10278 jobmgr_t jm
= j
->mgr
;
10279 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
10280 return BOOTSTRAP_NOT_PRIVILEGED
;
10283 if (jm
->req_asport
!= MACH_PORT_NULL
) {
10284 return BOOTSTRAP_NOT_PRIVILEGED
;
10287 struct ldcred
*ldc
= runtime_get_caller_creds();
10288 struct proc_bsdinfowithuniqid proc
;
10289 if (proc_pidinfo(ldc
->pid
, PROC_PIDT_BSDINFOWITHUNIQID
, 1, &proc
, PROC_PIDT_BSDINFOWITHUNIQID_SIZE
) == 0) {
10290 if (errno
!= ESRCH
) {
10291 (void)jobmgr_assumes_zero(jm
, errno
);
10296 return BOOTSTRAP_NO_MEMORY
;
10299 #if !TARGET_OS_EMBEDDED
10300 if (jobmgr_assumes_zero(jm
, audit_session_port(ldc
->asid
, &jm
->req_asport
)) != 0) {
10303 job_log(j
, LOG_ERR
, "Failed to get port for ASID: %u", ldc
->asid
);
10304 return BOOTSTRAP_NOT_PRIVILEGED
;
10307 jm
->req_asport
= MACH_PORT_DEAD
;
10310 struct waiting4attach
*w4ai
= NULL
;
10311 struct waiting4attach
*w4ait
= NULL
;
10312 LIST_FOREACH_SAFE(w4ai
, &_launchd_domain_waiters
, le
, w4ait
) {
10313 if (w4ai
->dest
== ldc
->pid
) {
10314 jobmgr_log(jm
, LOG_DEBUG
, "Migrating attach for: %s", w4ai
->name
);
10315 LIST_REMOVE(w4ai
, le
);
10316 LIST_INSERT_HEAD(&jm
->attaches
, w4ai
, le
);
10321 (void)snprintf(jm
->name_init
, NAME_MAX
, "com.apple.xpc.domain.%s.%d", proc
.pbsd
.pbi_comm
, ldc
->pid
);
10322 strlcpy(jm
->owner
, proc
.pbsd
.pbi_comm
, sizeof(jm
->owner
));
10323 jm
->req_bsport
= bsport
;
10324 jm
->req_excport
= excport
;
10325 jm
->req_rport
= rp
;
10327 jm
->req_ctx_sz
= ctx_sz
;
10328 jm
->req_pid
= ldc
->pid
;
10329 jm
->req_euid
= ldc
->euid
;
10330 jm
->req_egid
= ldc
->egid
;
10331 jm
->req_asid
= ldc
->asid
;
10332 jm
->req_uniqueid
= proc
.p_uniqidentifier
.p_uniqueid
;
10334 return KERN_SUCCESS
;
10338 xpc_domain_load_services(job_t j
, vm_offset_t services_buff
, mach_msg_type_number_t services_sz
)
10341 return BOOTSTRAP_UNKNOWN_SERVICE
;
10344 job_t rootj
= jobmgr_find_by_pid(root_jobmgr
, j
->p
, false);
10345 if (!(rootj
&& rootj
->xpc_bootstrapper
)) {
10346 job_log(j
, LOG_ERR
, "Attempt to load services into XPC domain by unprivileged job.");
10347 return BOOTSTRAP_NOT_PRIVILEGED
;
10350 // This is just for XPC domains (for now).
10351 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
10352 return BOOTSTRAP_NOT_PRIVILEGED
;
10354 if (j
->mgr
->session_initialized
) {
10355 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to initialize an already-initialized XPC domain.");
10356 return BOOTSTRAP_NOT_PRIVILEGED
;
10360 launch_data_t services
= launch_data_unpack((void *)services_buff
, services_sz
, NULL
, 0, &offset
, NULL
);
10362 return BOOTSTRAP_NO_MEMORY
;
10365 int error
= _xpc_domain_import_services(j
, services
);
10367 j
->mgr
->error
= error
;
10368 jobmgr_log(j
->mgr
, LOG_ERR
, "Obliterating domain.");
10369 jobmgr_remove(j
->mgr
);
10371 j
->mgr
->session_initialized
= true;
10372 (void)jobmgr_assumes_zero(j
->mgr
, xpc_call_wakeup(j
->mgr
->req_rport
, BOOTSTRAP_SUCCESS
));
10373 j
->mgr
->req_rport
= MACH_PORT_NULL
;
10375 /* Returning a failure code will destroy the message, whereas returning
10376 * success will not, so we need to clean up here.
10378 mig_deallocate(services_buff
, services_sz
);
10379 error
= BOOTSTRAP_SUCCESS
;
10386 xpc_domain_check_in(job_t j
, mach_port_t
*bsport
, mach_port_t
*sbsport
,
10387 mach_port_t
*excport
, mach_port_t
*asport
, uint32_t *uid
, uint32_t *gid
,
10388 int32_t *asid
, vm_offset_t
*ctx
, mach_msg_type_number_t
*ctx_sz
)
10390 if (!jobmgr_assumes(root_jobmgr
, j
!= NULL
)) {
10391 return BOOTSTRAP_UNKNOWN_SERVICE
;
10393 jobmgr_t jm
= j
->mgr
;
10394 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
10395 return BOOTSTRAP_NOT_PRIVILEGED
;
10398 if (jm
->req_asport
== MACH_PORT_NULL
) {
10399 return BOOTSTRAP_NOT_PRIVILEGED
;
10402 *bsport
= jm
->req_bsport
;
10403 *sbsport
= root_jobmgr
->jm_port
;
10404 *excport
= jm
->req_excport
;
10405 if (j
->joins_gui_session
) {
10406 if (jm
->req_gui_asport
) {
10407 *asport
= jm
->req_gui_asport
;
10409 job_log(j
, LOG_NOTICE
, "No GUI session set for UID of user service. This service may not act properly.");
10410 *asport
= jm
->req_asport
;
10413 *asport
= jm
->req_asport
;
10416 *uid
= jm
->req_euid
;
10417 *gid
= jm
->req_egid
;
10418 *asid
= jm
->req_asid
;
10420 *ctx
= jm
->req_ctx
;
10421 *ctx_sz
= jm
->req_ctx_sz
;
10423 return KERN_SUCCESS
;
10427 xpc_domain_get_service_name(job_t j
, event_name_t name
)
10430 return BOOTSTRAP_NO_MEMORY
;
10433 if (!j
->xpc_service
) {
10434 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to get service name by non-XPC service: %s", j
->label
);
10435 return BOOTSTRAP_NOT_PRIVILEGED
;
10438 const char *what2find
= j
->label
;
10439 if (j
->dedicated_instance
) {
10440 what2find
= j
->original
->label
;
10443 struct machservice
*msi
= NULL
;
10444 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
10445 if (strcmp(msi
->name
, what2find
) == 0) {
10451 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to get service name that does not exist: %s", j
->label
);
10452 return BOOTSTRAP_UNKNOWN_SERVICE
;
10455 (void)strlcpy(name
, msi
->name
, sizeof(event_name_t
));
10456 return BOOTSTRAP_SUCCESS
;
10459 #if XPC_LPI_VERSION >= 20111216
10461 xpc_domain_add_services(job_t j
, vm_offset_t services_buff
, mach_msg_type_number_t services_sz
)
10464 return BOOTSTRAP_UNKNOWN_SERVICE
;
10467 job_t rootj
= jobmgr_find_by_pid(root_jobmgr
, j
->p
, false);
10468 if (!(rootj
&& rootj
->xpc_bootstrapper
)) {
10469 job_log(j
, LOG_ERR
, "Attempt to add service to XPC domain by unprivileged job.");
10470 return BOOTSTRAP_NOT_PRIVILEGED
;
10473 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
10474 return BOOTSTRAP_NOT_PRIVILEGED
;
10478 launch_data_t services
= launch_data_unpack((void *)services_buff
, services_sz
, NULL
, 0, &offset
, NULL
);
10480 return BOOTSTRAP_NO_MEMORY
;
10483 int error
= _xpc_domain_import_services(j
, services
);
10485 mig_deallocate(services_buff
, services_sz
);
10492 #pragma mark XPC Events
10494 xpc_event_find_channel(job_t j
, const char *stream
, struct machservice
**ms
)
10496 int error
= EXNOMEM
;
10497 struct machservice
*msi
= NULL
;
10498 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
10499 if (strcmp(stream
, msi
->name
) == 0) {
10505 mach_port_t sp
= MACH_PORT_NULL
;
10506 msi
= machservice_new(j
, stream
, &sp
, false);
10511 job_log(j
, LOG_DEBUG
, "Creating new MachService for stream: %s", stream
);
10512 /* Hack to keep this from being publicly accessible through
10513 * bootstrap_look_up().
10515 if (!j
->dedicated_instance
) {
10516 LIST_REMOVE(msi
, name_hash_sle
);
10518 msi
->event_channel
= true;
10520 /* If we call job_dispatch() here before the audit session for the job
10521 * has been set, we'll end up not watching this service. But we also have
10522 * to take care not to watch the port if the job is active.
10524 * See <rdar://problem/10357855>.
10526 if (!j
->currently_ignored
) {
10527 machservice_watch(j
, msi
);
10532 } else if (!msi
->event_channel
) {
10533 job_log(j
, LOG_ERR
, "This job registered a MachService name identical to the requested event channel name: %s", stream
);
10544 xpc_event_get_event_name(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10546 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10551 uint64_t token
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_TOKEN
);
10556 job_log(j
, LOG_DEBUG
, "Getting event name for stream/token: %s/0x%llu", stream
, token
);
10558 int result
= ESRCH
;
10559 struct externalevent
*event
= externalevent_find(stream
, token
);
10560 if (event
&& j
->event_monitor
) {
10561 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10562 xpc_dictionary_set_string(reply2
, XPC_EVENT_ROUTINE_KEY_NAME
, event
->name
);
10565 job_log(j
, LOG_DEBUG
, "Found: %s", event
->name
);
10573 xpc_event_copy_entitlements(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10575 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10580 uint64_t token
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_TOKEN
);
10585 job_log(j
, LOG_DEBUG
, "Getting entitlements for stream/token: %s/0x%llu", stream
, token
);
10587 int result
= ESRCH
;
10588 struct externalevent
*event
= externalevent_find(stream
, token
);
10589 if (event
&& j
->event_monitor
) {
10590 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10591 xpc_dictionary_set_value(reply2
, XPC_EVENT_ROUTINE_KEY_ENTITLEMENTS
, event
->entitlements
);
10594 job_log(j
, LOG_DEBUG
, "Found: %s", event
->name
);
10601 // TODO - can be removed with rdar://problem/12666150
10602 #ifndef XPC_EVENT_FLAG_ALLOW_UNMANAGED
10603 #define XPC_EVENT_FLAG_ALLOW_UNMANAGED (1 << 1)
10607 xpc_event_set_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10609 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10614 const char *key
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_NAME
);
10619 xpc_object_t event
= xpc_dictionary_get_value(request
, XPC_EVENT_ROUTINE_KEY_EVENT
);
10620 if (event
&& xpc_get_type(event
) != XPC_TYPE_DICTIONARY
) {
10624 uint64_t flags
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_FLAGS
);
10626 /* Don't allow events to be set for anonymous jobs unless specifically
10627 * requested in the flags. Only permit this for internal development.
10629 if (j
->anonymous
&& ((flags
& XPC_EVENT_FLAG_ALLOW_UNMANAGED
) == 0 || !launchd_apple_internal
)) {
10630 job_log(j
, LOG_ERR
, "Unmanaged jobs may not make XPC Events requests.");
10634 job_log(j
, LOG_DEBUG
, "%s event for stream/key: %s/%s", event
? "Setting" : "Removing", stream
, key
);
10636 struct externalevent
*eei
= NULL
;
10637 LIST_FOREACH(eei
, &j
->events
, job_le
) {
10638 /* If the event for the given key already exists for the job, we need to
10639 * remove the old one first.
10641 if (strcmp(eei
->name
, key
) == 0 && strcmp(eei
->sys
->name
, stream
) == 0) {
10642 job_log(j
, LOG_DEBUG
, "Event exists. Removing.");
10643 externalevent_delete(eei
);
10648 int result
= EXNOMEM
;
10650 struct eventsystem
*es
= eventsystem_find(stream
);
10652 job_log(j
, LOG_DEBUG
, "Creating stream.");
10653 es
= eventsystem_new(stream
);
10657 job_log(j
, LOG_DEBUG
, "Adding event.");
10658 if (externalevent_new(j
, es
, key
, event
, flags
)) {
10659 job_log(j
, LOG_DEBUG
, "Added new event for key: %s", key
);
10662 job_log(j
, LOG_ERR
, "Could not create event for key: %s", key
);
10665 job_log(j
, LOG_ERR
, "Event stream could not be created: %s", stream
);
10668 /* If the event was NULL, then we just remove it and return. */
10673 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10681 xpc_event_copy_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10683 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10684 const char *key
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_NAME
);
10686 bool all_streams
= (stream
== NULL
);
10687 bool all_events
= (key
== NULL
|| strcmp(key
, "") == 0); // strcmp for libxpc compatibility
10688 xpc_object_t events
= NULL
;
10690 if (all_streams
&& !all_events
) {
10694 if (all_streams
|| all_events
) {
10695 job_log(j
, LOG_DEBUG
, "Fetching all events%s%s", stream
? " for stream: " : "", stream
? stream
: "");
10696 events
= xpc_dictionary_create(NULL
, NULL
, 0);
10698 job_log(j
, LOG_DEBUG
, "Fetching stream/key: %s/%s", stream
, key
);
10701 int result
= ESRCH
;
10702 struct externalevent
*eei
= NULL
;
10703 LIST_FOREACH(eei
, &j
->events
, job_le
) {
10705 xpc_object_t sub
= xpc_dictionary_get_value(events
, eei
->sys
->name
);
10707 sub
= xpc_dictionary_create(NULL
, NULL
, 0);
10708 xpc_dictionary_set_value(events
, eei
->sys
->name
, sub
);
10711 xpc_dictionary_set_value(sub
, eei
->name
, eei
->event
);
10712 } else if (strcmp(eei
->sys
->name
, stream
) == 0) {
10714 xpc_dictionary_set_value(events
, eei
->name
, eei
->event
);
10715 } else if (strcmp(eei
->name
, key
) == 0) {
10716 job_log(j
, LOG_DEBUG
, "Found event.");
10717 events
= xpc_retain(eei
->event
);
10724 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10725 xpc_dictionary_set_value(reply2
, XPC_EVENT_ROUTINE_KEY_EVENT
, events
);
10726 xpc_release(events
);
10736 xpc_event_channel_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10738 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10743 job_log(j
, LOG_DEBUG
, "Checking in stream: %s", stream
);
10745 struct machservice
*ms
= NULL
;
10746 int error
= xpc_event_find_channel(j
, stream
, &ms
);
10748 job_log(j
, LOG_ERR
, "Failed to check in: 0x%x: %s", error
, xpc_strerror(error
));
10749 } else if (ms
->isActive
) {
10750 job_log(j
, LOG_ERR
, "Attempt to check in on event channel multiple times: %s", stream
);
10753 machservice_request_notifications(ms
);
10755 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10756 xpc_dictionary_set_mach_recv(reply2
, XPC_EVENT_ROUTINE_KEY_PORT
, ms
->port
);
10765 xpc_event_channel_look_up(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10767 if (!j
->event_monitor
) {
10771 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10776 uint64_t token
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_TOKEN
);
10781 job_log(j
, LOG_DEBUG
, "Looking up channel for stream/token: %s/%llu", stream
, token
);
10783 struct externalevent
*ee
= externalevent_find(stream
, token
);
10788 struct machservice
*ms
= NULL
;
10789 int error
= xpc_event_find_channel(ee
->job
, stream
, &ms
);
10791 job_log(j
, LOG_DEBUG
, "Found event channel port: 0x%x", ms
->port
);
10792 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10793 xpc_dictionary_set_mach_send(reply2
, XPC_EVENT_ROUTINE_KEY_PORT
, ms
->port
);
10797 job_log(j
, LOG_ERR
, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream
, token
, error
, xpc_strerror(error
));
10804 xpc_event_provider_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10806 if (!j
->event_monitor
) {
10810 /* This indicates that the event monitor is now safe to signal. This state
10811 * is independent of whether this operation actually succeeds; we just need
10812 * it to ignore SIGUSR1.
10814 j
->event_monitor_ready2signal
= true;
10816 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10821 job_log(j
, LOG_DEBUG
, "Provider checking in for stream: %s", stream
);
10823 xpc_object_t events
= xpc_array_create(NULL
, 0);
10824 struct eventsystem
*es
= eventsystem_find(stream
);
10826 /* If we had to create the event stream, there were no events, so just
10827 * give back the empty array.
10829 job_log(j
, LOG_DEBUG
, "Creating event stream.");
10830 es
= eventsystem_new(stream
);
10831 if (!job_assumes(j
, es
)) {
10832 xpc_release(events
);
10836 if (strcmp(stream
, "com.apple.launchd.helper") == 0) {
10837 _launchd_support_system
= es
;
10840 job_log(j
, LOG_DEBUG
, "Filling event array.");
10842 struct externalevent
*ei
= NULL
;
10843 LIST_FOREACH(ei
, &es
->events
, sys_le
) {
10844 xpc_array_set_uint64(events
, XPC_ARRAY_APPEND
, ei
->id
);
10845 xpc_array_append_value(events
, ei
->event
);
10849 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10850 xpc_dictionary_set_value(reply2
, XPC_EVENT_ROUTINE_KEY_EVENTS
, events
);
10851 xpc_release(events
);
10858 xpc_event_provider_set_state(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10860 job_t other_j
= NULL
;
10862 if (!j
->event_monitor
) {
10866 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10871 uint64_t token
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_TOKEN
);
10876 bool state
= false;
10877 xpc_object_t xstate
= xpc_dictionary_get_value(request
, XPC_EVENT_ROUTINE_KEY_STATE
);
10878 if (!xstate
|| xpc_get_type(xstate
) != XPC_TYPE_BOOL
) {
10881 state
= xpc_bool_get_value(xstate
);
10884 job_log(j
, LOG_DEBUG
, "Setting event state to %s for stream/token: %s/%llu", state
? "true" : "false", stream
, token
);
10886 struct externalevent
*ei
= externalevent_find(stream
, token
);
10888 job_log(j
, LOG_ERR
, "Could not find stream/token: %s/%llu", stream
, token
);
10895 if (ei
->internal
) {
10896 job_log(ei
->job
, LOG_NOTICE
, "Job should be able to exec(3) now.");
10897 ei
->job
->waiting4ok
= false;
10898 externalevent_delete(ei
);
10901 (void)job_dispatch(other_j
, false);
10903 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10910 xpc_event_demux(mach_port_t p
, xpc_object_t request
, xpc_object_t
*reply
)
10912 uint64_t op
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_OP
);
10917 audit_token_t token
;
10918 xpc_dictionary_get_audit_token(request
, &token
);
10919 runtime_record_caller_creds(&token
);
10921 struct ldcred
*ldc
= runtime_get_caller_creds();
10922 job_t j
= managed_job(ldc
->pid
);
10924 j
= job_mig_intran(p
);
10930 job_log(j
, LOG_DEBUG
, "Incoming XPC event request: %llu", op
);
10934 case XPC_EVENT_GET_NAME
:
10935 error
= xpc_event_get_event_name(j
, request
, reply
);
10937 case XPC_EVENT_SET
:
10938 error
= xpc_event_set_event(j
, request
, reply
);
10940 case XPC_EVENT_COPY
:
10941 error
= xpc_event_copy_event(j
, request
, reply
);
10943 case XPC_EVENT_CHECK_IN
:
10944 error
= xpc_event_channel_check_in(j
, request
, reply
);
10946 case XPC_EVENT_LOOK_UP
:
10947 error
= xpc_event_channel_look_up(j
, request
, reply
);
10949 case XPC_EVENT_PROVIDER_CHECK_IN
:
10950 error
= xpc_event_provider_check_in(j
, request
, reply
);
10952 case XPC_EVENT_PROVIDER_SET_STATE
:
10953 error
= xpc_event_provider_set_state(j
, request
, reply
);
10955 case XPC_EVENT_COPY_ENTITLEMENTS
:
10956 error
= xpc_event_copy_entitlements(j
, request
, reply
);
10962 job_log(j
, LOG_ERR
, "Bogus opcode.");
10967 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10968 xpc_dictionary_set_uint64(reply2
, XPC_EVENT_ROUTINE_KEY_ERROR
, error
);
10976 xpc_get_jetsam_entitlement(const char *key
)
10978 uint64_t entitlement
= 0;
10980 audit_token_t
*token
= runtime_get_caller_token();
10981 xpc_object_t value
= xpc_copy_entitlement_for_token(key
, token
);
10983 if (xpc_get_type(value
) == XPC_TYPE_UINT64
) {
10984 entitlement
= xpc_uint64_get_value(value
);
10987 xpc_release(value
);
10990 return entitlement
;
10994 xpc_process_set_jetsam_band(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
11000 const char *label
= xpc_dictionary_get_string(request
, XPC_PROCESS_ROUTINE_KEY_LABEL
);
11005 xpc_jetsam_band_t entitled_band
= -1;
11006 xpc_jetsam_band_t requested_band
= (xpc_jetsam_band_t
)xpc_dictionary_get_uint64(request
, XPC_PROCESS_ROUTINE_KEY_PRIORITY_BAND
);
11007 if (!requested_band
) {
11011 if (!(requested_band
>= XPC_JETSAM_BAND_SUSPENDED
&& requested_band
< XPC_JETSAM_BAND_LAST
)) {
11015 uint64_t rcdata
= xpc_dictionary_get_uint64(request
, XPC_PROCESS_ROUTINE_KEY_RCDATA
);
11017 job_t tj
= job_find(root_jobmgr
, label
);
11022 boolean_t allow
= false;
11023 if (j
->embedded_god
) {
11026 entitled_band
= xpc_get_jetsam_entitlement("com.apple.private.jetsam.modify-priority");
11027 if (entitled_band
>= requested_band
) {
11033 if (launchd_no_jetsam_perm_check
) {
11034 job_log(j
, LOG_NOTICE
, "Jetsam priority checks disabled; allowing job to set priority: %d", requested_band
);
11036 job_log(j
, LOG_ERR
, "Job cannot decrease Jetsam priority band (requested/maximum): %d/%d", requested_band
, entitled_band
);
11041 job_log(j
, LOG_INFO
, "Setting Jetsam band: %d.", requested_band
);
11042 job_update_jetsam_properties(tj
, requested_band
, rcdata
);
11044 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
11051 xpc_process_set_jetsam_memory_limit(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
11057 const char *label
= xpc_dictionary_get_string(request
, XPC_PROCESS_ROUTINE_KEY_LABEL
);
11062 int32_t entitlement_limit
= 0;
11063 int32_t requested_limit
= (int32_t)xpc_dictionary_get_uint64(request
, XPC_PROCESS_ROUTINE_KEY_MEMORY_LIMIT
);
11065 job_t tj
= job_find(root_jobmgr
, label
);
11070 boolean_t allow
= false;
11071 if (j
->embedded_god
) {
11074 entitlement_limit
= (int32_t)xpc_get_jetsam_entitlement("com.apple.private.jetsam.memory_limit");
11075 if (entitlement_limit
>= requested_limit
) {
11081 if (launchd_no_jetsam_perm_check
) {
11082 job_log(j
, LOG_NOTICE
, "Jetsam priority checks disabled; allowing job to set memory limit: %d", requested_limit
);
11084 job_log(j
, LOG_ERR
, "Job cannot set Jetsam memory limit (requested/maximum): %d/%d", requested_limit
, entitlement_limit
);
11089 job_log(j
, LOG_INFO
, "Setting Jetsam memory limit: %d.", requested_limit
);
11090 job_update_jetsam_memory_limit(tj
, requested_limit
);
11092 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
11099 _xpc_process_find_target_manager(job_t j
, xpc_service_type_t type
, pid_t pid
)
11101 jobmgr_t target
= NULL
;
11102 if (type
== XPC_SERVICE_TYPE_BUNDLED
) {
11103 job_log(j
, LOG_DEBUG
, "Bundled service. Searching for XPC domains for PID: %d", pid
);
11105 jobmgr_t jmi
= NULL
;
11106 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
11107 if (jmi
->req_pid
&& jmi
->req_pid
== pid
) {
11108 jobmgr_log(jmi
, LOG_DEBUG
, "Found job manager for PID.");
11113 } else if (type
== XPC_SERVICE_TYPE_LAUNCHD
|| type
== XPC_SERVICE_TYPE_APP
) {
11121 xpc_process_attach(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
11127 audit_token_t
*token
= runtime_get_caller_token();
11128 xpc_object_t entitlement
= xpc_copy_entitlement_for_token(XPC_SERVICE_ENTITLEMENT_ATTACH
, token
);
11129 if (!entitlement
) {
11130 job_log(j
, LOG_ERR
, "Job does not have entitlement: %s", XPC_SERVICE_ENTITLEMENT_ATTACH
);
11134 if (entitlement
!= XPC_BOOL_TRUE
) {
11135 char *desc
= xpc_copy_description(entitlement
);
11136 job_log(j
, LOG_ERR
, "Job has bad value for entitlement: %s:\n%s", XPC_SERVICE_ENTITLEMENT_ATTACH
, desc
);
11139 xpc_release(entitlement
);
11143 const char *name
= xpc_dictionary_get_string(request
, XPC_PROCESS_ROUTINE_KEY_NAME
);
11148 xpc_service_type_t type
= xpc_dictionary_get_int64(request
, XPC_PROCESS_ROUTINE_KEY_TYPE
);
11153 mach_port_t port
= xpc_dictionary_copy_mach_send(request
, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT
);
11154 if (!MACH_PORT_VALID(port
)) {
11158 pid_t pid
= xpc_dictionary_get_int64(request
, XPC_PROCESS_ROUTINE_KEY_HANDLE
);
11160 job_log(j
, LOG_DEBUG
, "Attaching to service: %s", name
);
11162 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
11163 jobmgr_t target
= _xpc_process_find_target_manager(j
, type
, pid
);
11165 jobmgr_log(target
, LOG_DEBUG
, "Found target job manager for service: %s", name
);
11166 (void)jobmgr_assumes(target
, waiting4attach_new(target
, name
, port
, 0, type
));
11168 /* HACK: This is awful. For legacy reasons, launchd job labels are all
11169 * stored in a global namespace, which is stored in the root job
11170 * manager. But XPC domains have a per-domain namespace. So if we're
11171 * looking for a legacy launchd job, we have to redirect any attachment
11172 * attempts to the root job manager to find existing instances.
11174 * But because we store attachments on a per-job manager basis, we have
11175 * to create the new attachment in the actual target job manager, hence
11176 * why we change the target only after we've created the attachment.
11178 if (strcmp(target
->name
, VPROCMGR_SESSION_AQUA
) == 0) {
11179 target
= root_jobmgr
;
11182 job_t existing
= job_find(target
, name
);
11183 if (existing
&& existing
->p
) {
11184 job_log(existing
, LOG_DEBUG
, "Found existing instance of service.");
11185 xpc_dictionary_set_int64(reply2
, XPC_PROCESS_ROUTINE_KEY_PID
, existing
->p
);
11187 xpc_dictionary_set_uint64(reply2
, XPC_PROCESS_ROUTINE_KEY_ERROR
, ESRCH
);
11189 } else if (type
== XPC_SERVICE_TYPE_BUNDLED
) {
11190 (void)job_assumes(j
, waiting4attach_new(target
, name
, port
, pid
, type
));
11191 xpc_dictionary_set_uint64(reply2
, XPC_PROCESS_ROUTINE_KEY_ERROR
, ESRCH
);
11193 xpc_dictionary_set_uint64(reply2
, XPC_PROCESS_ROUTINE_KEY_ERROR
, EXSRCH
);
11201 xpc_process_detach(job_t j
, xpc_object_t request
, xpc_object_t
*reply __unused
)
11207 const char *name
= xpc_dictionary_get_string(request
, XPC_PROCESS_ROUTINE_KEY_NAME
);
11212 xpc_service_type_t type
= xpc_dictionary_get_int64(request
, XPC_PROCESS_ROUTINE_KEY_TYPE
);
11217 job_log(j
, LOG_DEBUG
, "Deatching from service: %s", name
);
11219 pid_t pid
= xpc_dictionary_get_int64(request
, XPC_PROCESS_ROUTINE_KEY_PID
);
11220 jobmgr_t target
= _xpc_process_find_target_manager(j
, type
, pid
);
11222 jobmgr_log(target
, LOG_DEBUG
, "Found target job manager for service: %s", name
);
11224 struct waiting4attach
*w4ai
= NULL
;
11225 struct waiting4attach
*w4ait
= NULL
;
11226 LIST_FOREACH_SAFE(w4ai
, &target
->attaches
, le
, w4ait
) {
11227 if (strcmp(name
, w4ai
->name
) == 0) {
11228 jobmgr_log(target
, LOG_DEBUG
, "Found attachment. Deleting.");
11229 waiting4attach_delete(target
, w4ai
);
11239 xpc_process_get_properties(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
11241 if (j
->anonymous
) {
11242 /* Total hack. libxpc will send requests to the pipe created out of the
11243 * process' bootstrap port, so when job_mig_intran() tries to resolve
11244 * the process into a job, it'll wind up creating an anonymous job if
11245 * the requestor was an XPC service, whose job manager is an XPC domain.
11248 jobmgr_t jmi
= NULL
;
11249 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
11250 if ((j
= jobmgr_find_by_pid(jmi
, pid
, false))) {
11256 if (!j
|| j
->anonymous
) {
11260 struct waiting4attach
*w4a
= waiting4attach_find(j
->mgr
, j
);
11265 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
11266 xpc_dictionary_set_uint64(reply2
, XPC_PROCESS_ROUTINE_KEY_TYPE
, w4a
->type
);
11267 xpc_dictionary_set_mach_send(reply2
, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT
, w4a
->port
);
11269 xpc_dictionary_set_string(reply2
, XPC_PROCESS_ROUTINE_KEY_PATH
, j
->prog
);
11271 xpc_dictionary_set_string(reply2
, XPC_PROCESS_ROUTINE_KEY_PATH
, j
->argv
[0]);
11275 xpc_object_t xargv
= xpc_array_create(NULL
, 0);
11278 for (i
= 0; i
< j
->argc
; i
++) {
11280 xpc_array_set_string(xargv
, XPC_ARRAY_APPEND
, j
->argv
[i
]);
11284 xpc_dictionary_set_value(reply2
, XPC_PROCESS_ROUTINE_KEY_ARGV
, xargv
);
11285 xpc_release(xargv
);
11293 xpc_process_service_kill(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
11295 #if XPC_LPI_VERSION >= 20130426
11300 jobmgr_t jm
= _xpc_process_find_target_manager(j
, XPC_SERVICE_TYPE_BUNDLED
, j
->p
);
11305 const char *name
= xpc_dictionary_get_string(request
, XPC_PROCESS_ROUTINE_KEY_NAME
);
11310 int64_t whichsig
= xpc_dictionary_get_int64(request
, XPC_PROCESS_ROUTINE_KEY_SIGNAL
);
11315 job_t j2kill
= job_find(jm
, name
);
11320 if (j2kill
->alias
) {
11321 // Only allow for private instances to be killed.
11325 struct proc_bsdshortinfo proc
;
11326 if (proc_pidinfo(j2kill
->p
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
11327 if (errno
!= ESRCH
) {
11328 (void)jobmgr_assumes_zero(root_jobmgr
, errno
);
11334 struct ldcred
*ldc
= runtime_get_caller_creds();
11335 if (proc
.pbsi_uid
!= ldc
->euid
) {
11336 // Do not allow non-root to kill RoleAccount services running as a
11345 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
11351 int ret
= kill(j2kill
->p
, whichsig
);
11356 xpc_dictionary_set_int64(reply2
, XPC_PROCESS_ROUTINE_KEY_ERROR
, error
);
11365 xpc_process_demux(mach_port_t p
, xpc_object_t request
, xpc_object_t
*reply
)
11367 uint64_t op
= xpc_dictionary_get_uint64(request
, XPC_PROCESS_ROUTINE_KEY_OP
);
11372 audit_token_t token
;
11373 xpc_dictionary_get_audit_token(request
, &token
);
11374 runtime_record_caller_creds(&token
);
11376 job_t j
= job_mig_intran(p
);
11377 job_log(j
, LOG_DEBUG
, "Incoming XPC process request: %llu", op
);
11381 case XPC_PROCESS_JETSAM_SET_BAND
:
11382 error
= xpc_process_set_jetsam_band(j
, request
, reply
);
11384 case XPC_PROCESS_JETSAM_SET_MEMORY_LIMIT
:
11385 error
= xpc_process_set_jetsam_memory_limit(j
, request
, reply
);
11387 case XPC_PROCESS_SERVICE_ATTACH
:
11388 error
= xpc_process_attach(j
, request
, reply
);
11390 case XPC_PROCESS_SERVICE_DETACH
:
11391 error
= xpc_process_detach(j
, request
, reply
);
11393 case XPC_PROCESS_SERVICE_GET_PROPERTIES
:
11394 error
= xpc_process_get_properties(j
, request
, reply
);
11396 case XPC_PROCESS_SERVICE_KILL
:
11397 error
= xpc_process_service_kill(j
, request
, reply
);
11400 job_log(j
, LOG_ERR
, "Bogus process opcode.");
11405 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
11407 xpc_dictionary_set_uint64(reply2
, XPC_PROCESS_ROUTINE_KEY_ERROR
, error
);
11417 job_mig_kickstart(job_t j
, name_t targetlabel
, pid_t
*out_pid
, unsigned int flags
)
11419 struct ldcred
*ldc
= runtime_get_caller_creds();
11423 return BOOTSTRAP_NO_MEMORY
;
11426 if (unlikely(!(otherj
= job_find(NULL
, targetlabel
)))) {
11427 return BOOTSTRAP_UNKNOWN_SERVICE
;
11430 #if TARGET_OS_EMBEDDED
11431 bool allow_non_root_kickstart
= j
->username
&& otherj
->username
&& (strcmp(j
->username
, otherj
->username
) == 0);
11433 bool allow_non_root_kickstart
= false;
11436 if (ldc
->euid
!= 0 && ldc
->euid
!= geteuid() && !allow_non_root_kickstart
) {
11437 return BOOTSTRAP_NOT_PRIVILEGED
;
11441 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
11442 return BOOTSTRAP_NOT_PRIVILEGED
;
11446 if (otherj
->p
&& (flags
& VPROCFLAG_STALL_JOB_EXEC
)) {
11447 return BOOTSTRAP_SERVICE_ACTIVE
;
11450 otherj
->stall_before_exec
= (flags
& VPROCFLAG_STALL_JOB_EXEC
);
11451 otherj
= job_dispatch(otherj
, true);
11453 if (!job_assumes(j
, otherj
&& otherj
->p
)) {
11454 // <rdar://problem/6787083> Clear this flag if we failed to start the job.
11455 otherj
->stall_before_exec
= false;
11456 return BOOTSTRAP_NO_MEMORY
;
11459 *out_pid
= otherj
->p
;
11465 job_mig_spawn_internal(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, job_t
*outj
)
11467 launch_data_t jobdata
= NULL
;
11468 size_t data_offset
= 0;
11469 struct ldcred
*ldc
= runtime_get_caller_creds();
11473 return BOOTSTRAP_NO_MEMORY
;
11476 if (unlikely(j
->deny_job_creation
)) {
11477 return BOOTSTRAP_NOT_PRIVILEGED
;
11481 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
11482 return BOOTSTRAP_NOT_PRIVILEGED
;
11486 if (unlikely(pid1_magic
&& ldc
->euid
&& ldc
->uid
)) {
11487 job_log(j
, LOG_DEBUG
, "Punting spawn to per-user-context");
11488 return VPROC_ERR_TRY_PER_USER
;
11491 if (!job_assumes(j
, indataCnt
!= 0)) {
11495 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
11496 if (!job_assumes(j
, (jobdata
= launch_data_unpack((void *)indata
, indataCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
11500 jobmgr_t target_jm
= jobmgr_find_by_name(j
->mgr
, NULL
);
11501 if (!jobmgr_assumes(j
->mgr
, target_jm
!= NULL
)) {
11502 jobmgr_log(j
->mgr
, LOG_ERR
, "This API can only be used by a process running within an Aqua session.");
11506 jr
= jobmgr_import2(target_jm
?: j
->mgr
, jobdata
);
11508 launch_data_t label
= NULL
;
11509 launch_data_t wait4debugger
= NULL
;
11513 /* If EEXIST was returned, we know that there is a label string in
11514 * the dictionary. So we don't need to check the types here; that
11515 * has already been done.
11517 label
= launch_data_dict_lookup(jobdata
, LAUNCH_JOBKEY_LABEL
);
11518 jr
= job_find(NULL
, launch_data_get_string(label
));
11519 if (job_assumes(j
, jr
!= NULL
) && !jr
->p
) {
11520 wait4debugger
= launch_data_dict_lookup(jobdata
, LAUNCH_JOBKEY_WAITFORDEBUGGER
);
11521 if (wait4debugger
&& launch_data_get_type(wait4debugger
) == LAUNCH_DATA_BOOL
) {
11522 if (launch_data_get_bool(wait4debugger
)) {
11523 /* If the job exists, we're going to kick-start it, but
11524 * we need to give the caller the opportunity to start
11525 * it suspended if it so desires. But this will only
11526 * take effect if the job isn't running.
11528 jr
->wait4debugger_oneshot
= true;
11534 return BOOTSTRAP_NAME_IN_USE
;
11536 return BOOTSTRAP_NO_MEMORY
;
11541 jr
->mach_uid
= ldc
->uid
;
11544 // TODO: Consolidate the app and legacy_LS_job bits.
11545 jr
->legacy_LS_job
= true;
11546 jr
->abandon_pg
= true;
11547 jr
->asport
= asport
;
11549 uuid_clear(jr
->expected_audit_uuid
);
11550 jr
= job_dispatch(jr
, true);
11552 if (!job_assumes(j
, jr
!= NULL
)) {
11554 return BOOTSTRAP_NO_MEMORY
;
11557 if (!job_assumes(jr
, jr
->p
)) {
11559 return BOOTSTRAP_NO_MEMORY
;
11562 job_log(jr
, LOG_DEBUG
, "Spawned by PID %u: %s", j
->p
, j
->label
);
11565 return BOOTSTRAP_SUCCESS
;
11569 job_mig_spawn2(job_t j
, mach_port_t rp
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, pid_t
*child_pid
, mach_port_t
*obsvr_port
)
11572 kern_return_t kr
= job_mig_spawn_internal(j
, indata
, indataCnt
, asport
, &nj
);
11573 if (likely(kr
== KERN_SUCCESS
)) {
11574 if (job_setup_exit_port(nj
) != KERN_SUCCESS
) {
11576 kr
= BOOTSTRAP_NO_MEMORY
;
11578 /* Do not return until the job has called exec(3), thereby making it
11579 * safe for the caller to send it SIGCONT.
11581 * <rdar://problem/9042798>
11583 nj
->spawn_reply_port
= rp
;
11586 } else if (kr
== BOOTSTRAP_NAME_IN_USE
) {
11587 bool was_running
= nj
->p
;
11588 if (job_dispatch(nj
, true)) {
11589 if (!was_running
) {
11590 job_log(nj
, LOG_DEBUG
, "Job exists but is not running. Kick-starting.");
11592 if (job_setup_exit_port(nj
) == KERN_SUCCESS
) {
11593 nj
->spawn_reply_port
= rp
;
11596 kr
= BOOTSTRAP_NO_MEMORY
;
11599 *obsvr_port
= MACH_PORT_NULL
;
11600 *child_pid
= nj
->p
;
11604 job_log(nj
, LOG_ERR
, "Failed to dispatch job, requestor: %s", j
->label
);
11605 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
11609 mig_deallocate(indata
, indataCnt
);
11614 job_do_legacy_ipc_request(job_t j
, launch_data_t request
, mach_port_t asport
__attribute__((unused
)))
11616 launch_data_t reply
= NULL
;
11619 if (launch_data_get_type(request
) == LAUNCH_DATA_STRING
) {
11620 if (strcmp(launch_data_get_string(request
), LAUNCH_KEY_CHECKIN
) == 0) {
11621 reply
= job_export(j
);
11629 #define LAUNCHD_MAX_LEGACY_FDS 128
11630 #define countof(x) (sizeof((x)) / sizeof((x[0])))
11633 job_mig_legacy_ipc_request(job_t j
, vm_offset_t request
,
11634 mach_msg_type_number_t requestCnt
, mach_port_array_t request_fds
,
11635 mach_msg_type_number_t request_fdsCnt
, vm_offset_t
*reply
,
11636 mach_msg_type_number_t
*replyCnt
, mach_port_array_t
*reply_fdps
,
11637 mach_msg_type_number_t
*reply_fdsCnt
, mach_port_t asport
)
11640 return BOOTSTRAP_NO_MEMORY
;
11643 /* TODO: Once we support actions other than checking in, we must check the
11644 * sandbox capabilities and EUID of the requestort.
11646 size_t nout_fdps
= 0;
11647 size_t nfds
= request_fdsCnt
/ sizeof(request_fds
[0]);
11648 if (nfds
> LAUNCHD_MAX_LEGACY_FDS
) {
11649 job_log(j
, LOG_ERR
, "Too many incoming descriptors: %lu", nfds
);
11650 return BOOTSTRAP_NO_MEMORY
;
11653 int in_fds
[LAUNCHD_MAX_LEGACY_FDS
];
11655 for (i
= 0; i
< nfds
; i
++) {
11656 in_fds
[i
] = fileport_makefd(request_fds
[i
]);
11657 if (in_fds
[i
] == -1) {
11658 job_log(j
, LOG_ERR
, "Bad descriptor passed in legacy IPC request at index: %lu", i
);
11662 // DON'T goto outbad before this point.
11664 *reply_fdps
= NULL
;
11665 launch_data_t ldreply
= NULL
;
11667 size_t dataoff
= 0;
11669 launch_data_t ldrequest
= launch_data_unpack((void *)request
, requestCnt
, in_fds
, nfds
, &dataoff
, &fdoff
);
11671 job_log(j
, LOG_ERR
, "Invalid legacy IPC request passed.");
11675 ldreply
= job_do_legacy_ipc_request(j
, ldrequest
, asport
);
11677 ldreply
= launch_data_new_errno(errno
);
11683 *replyCnt
= 10 * 1024 * 1024;
11684 mig_allocate(reply
, *replyCnt
);
11689 int out_fds
[LAUNCHD_MAX_LEGACY_FDS
];
11690 size_t nout_fds
= 0;
11691 size_t sz
= launch_data_pack(ldreply
, (void *)*reply
, *replyCnt
, out_fds
, &nout_fds
);
11693 job_log(j
, LOG_ERR
, "Could not pack legacy IPC reply.");
11698 if (nout_fds
> 128) {
11699 job_log(j
, LOG_ERR
, "Too many outgoing descriptors: %lu", nout_fds
);
11703 *reply_fdsCnt
= nout_fds
* sizeof((*reply_fdps
)[0]);
11704 mig_allocate((vm_address_t
*)reply_fdps
, *reply_fdsCnt
);
11705 if (!*reply_fdps
) {
11709 for (i
= 0; i
< nout_fds
; i
++) {
11710 mach_port_t fp
= MACH_PORT_NULL
;
11711 /* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
11712 * deal. Note, these get stuffed into an array whose disposition is
11713 * mach_port_move_send_t, so we don't have to worry about them after
11716 if (fileport_makeport(out_fds
[i
], &fp
) != 0) {
11717 job_log(j
, LOG_ERR
, "Could not pack response descriptor at index: %lu: %d: %s", i
, errno
, strerror(errno
));
11719 (*reply_fdps
)[i
] = fp
;
11722 nout_fdps
= nout_fds
;
11727 mig_deallocate(request
, requestCnt
);
11728 launch_data_free(ldreply
);
11732 (void)launchd_mport_deallocate(asport
);
11734 return BOOTSTRAP_SUCCESS
;
11737 for (i
= 0; i
< nfds
; i
++) {
11738 (void)close(in_fds
[i
]);
11741 for (i
= 0; i
< nout_fds
; i
++) {
11742 (void)launchd_mport_deallocate((*reply_fdps
)[i
]);
11746 mig_deallocate(*reply
, *replyCnt
);
11749 /* We should never hit this since the last goto out is in the case that
11750 * allocating this fails.
11753 mig_deallocate((vm_address_t
)*reply_fdps
, *reply_fdsCnt
);
11757 launch_data_free(ldreply
);
11760 return BOOTSTRAP_NO_MEMORY
;
11764 jobmgr_init(bool sflag
)
11766 const char *root_session_type
= pid1_magic
? VPROCMGR_SESSION_SYSTEM
: VPROCMGR_SESSION_BACKGROUND
;
11767 SLIST_INIT(&s_curious_jobs
);
11768 LIST_INIT(&s_needing_sessions
);
11770 os_assert((root_jobmgr
= jobmgr_new(NULL
, MACH_PORT_NULL
, MACH_PORT_NULL
, sflag
, root_session_type
, false, MACH_PORT_NULL
)) != NULL
);
11771 os_assert((_s_xpc_system_domain
= jobmgr_new_xpc_singleton_domain(root_jobmgr
, "com.apple.xpc.system")) != NULL
);
11772 _s_xpc_system_domain
->req_asid
= launchd_audit_session
;
11773 _s_xpc_system_domain
->req_asport
= launchd_audit_port
;
11774 _s_xpc_system_domain
->shortdesc
= "system";
11776 root_jobmgr
->monitor_shutdown
= true;
11779 uint32_t fflags
= NOTE_ATTRIB
| NOTE_LINK
| NOTE_REVOKE
| NOTE_EXTEND
| NOTE_WRITE
;
11780 s_no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
11781 if (likely(s_no_hang_fd
== -1)) {
11782 if (jobmgr_assumes_zero_p(root_jobmgr
, (s_no_hang_fd
= open("/dev", O_EVTONLY
| O_NONBLOCK
))) != -1) {
11783 (void)jobmgr_assumes_zero_p(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, root_jobmgr
));
11786 s_no_hang_fd
= _fd(s_no_hang_fd
);
11790 our_strhash(const char *s
)
11792 size_t c
, r
= 5381;
11795 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
11798 while ((c
= *s
++)) {
11799 r
= ((r
<< 5) + r
) + c
; // hash*33 + c
11806 hash_label(const char *label
)
11808 return our_strhash(label
) % LABEL_HASH_SIZE
;
11812 hash_ms(const char *msstr
)
11814 return our_strhash(msstr
) % MACHSERVICE_HASH_SIZE
;
11818 waiting4removal_new(job_t j
, mach_port_t rp
)
11820 struct waiting_for_removal
*w4r
;
11822 if (!job_assumes(j
, (w4r
= malloc(sizeof(struct waiting_for_removal
))) != NULL
)) {
11826 w4r
->reply_port
= rp
;
11828 SLIST_INSERT_HEAD(&j
->removal_watchers
, w4r
, sle
);
11834 waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
)
11836 (void)job_assumes_zero(j
, job_mig_send_signal_reply(w4r
->reply_port
, 0));
11838 SLIST_REMOVE(&j
->removal_watchers
, w4r
, waiting_for_removal
, sle
);
11844 get_kern_max_proc(void)
11846 int mib
[] = { CTL_KERN
, KERN_MAXPROC
};
11848 size_t max_sz
= sizeof(max
);
11850 (void)posix_assumes_zero(sysctl(mib
, 2, &max
, &max_sz
, NULL
, 0));
11855 // See rdar://problem/6271234
11857 eliminate_double_reboot(void)
11859 if (unlikely(!pid1_magic
)) {
11864 const char *argv
[] = { _PATH_BSHELL
, "/etc/rc.deferred_install", NULL
};
11867 if (unlikely(stat(argv
[1], &sb
) != -1)) {
11868 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Going to run deferred install script.");
11871 result
= posix_spawnp(&p
, argv
[0], NULL
, NULL
, (char **)argv
, environ
);
11872 if (result
== -1) {
11873 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Couldn't run deferred install script: %d: %s", result
, strerror(result
));
11878 result
= waitpid(p
, &wstatus
, 0);
11879 if (result
== -1) {
11880 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Failed to reap deferred install script: %d: %s", errno
, strerror(errno
));
11884 if (WIFEXITED(wstatus
)) {
11885 if ((result
= WEXITSTATUS(wstatus
)) == 0) {
11886 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Deferred install script completed successfully.");
11888 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus
));
11891 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Weirdness with install script: %d", wstatus
);
11896 /* If the unlink(2) was to fail, it would be most likely fail with
11897 * EBUSY. All the other failure cases for unlink(2) don't apply when
11898 * we're running under PID 1 and have verified that the file exists.
11899 * Outside of someone deliberately messing with us (like if
11900 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
11901 * point for a filesystem) and I/O errors, we should be good.
11903 if (unlink(argv
[1]) == -1) {
11904 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Failed to remove deferred install script: %d: %s", errno
, strerror(errno
));
11910 jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
)
11912 job_log(j
, LOG_DEBUG
, "Setting Jetsam properties for job...");
11913 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
11914 j
->jetsam_priority
= (typeof(j
->jetsam_priority
))launch_data_get_integer(obj
);
11916 #if XPC_LPI_VERSION >= 20120810
11917 if (j
->jetsam_priority
> XPC_JETSAM_PRIORITY_RESERVED
&& j
->jetsam_priority
< XPC_JETSAM_PRIORITY_RESERVED
+ XPC_JETSAM_BAND_LAST
) {
11918 size_t band
= j
->jetsam_priority
- XPC_JETSAM_PRIORITY_RESERVED
;
11919 j
->jetsam_priority
= _launchd_priority_map
[band
- 1].priority
;
11922 job_log(j
, LOG_DEBUG
, "Priority: %d", j
->jetsam_priority
);
11923 } else if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
11924 j
->jetsam_memlimit
= (typeof(j
->jetsam_memlimit
))launch_data_get_integer(obj
);
11925 job_log(j
, LOG_DEBUG
, "Memory limit: %d", j
->jetsam_memlimit
);
11926 } else if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMMEMORYLIMITBACKGROUND
) == 0) {
11927 j
->jetsam_memory_limit_background
= true;
11928 job_log(j
, LOG_DEBUG
, "Memory limit is for background state only");
11929 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMFRONTMOST
) == 0) {
11930 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
11931 * You can't set this in a plist.
11933 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMACTIVE
) == 0) {
11935 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMLABEL
) == 0) {
11936 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
11937 * complain about it.
11940 job_log(j
, LOG_ERR
, "Unknown Jetsam key: %s", key
);
11943 if (unlikely(!j
->jetsam_properties
)) {
11944 j
->jetsam_properties
= true;
11949 job_update_jetsam_properties(job_t j
, xpc_jetsam_band_t band
, uint64_t user_data
)
11951 #if TARGET_OS_EMBEDDED
11952 j
->jetsam_priority
= _launchd_priority_map
[band
- 1].priority
;
11953 j
->jetsam_properties
= true;
11955 memorystatus_priority_properties_t mjp
;
11956 mjp
.priority
= j
->jetsam_priority
;
11957 mjp
.user_data
= user_data
;
11959 size_t size
= sizeof(mjp
);
11960 int r
= memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES
, j
->p
, 0, &mjp
, size
);
11961 if (r
== -1 && errno
!= ESRCH
) {
11962 (void)job_assumes_zero(j
, errno
);
11965 #pragma unused(j, band, user_data)
11970 job_update_jetsam_memory_limit(job_t j
, int32_t limit
)
11972 #if TARGET_OS_EMBEDDED
11973 j
->jetsam_memlimit
= limit
;
11974 j
->jetsam_properties
= true;
11976 int r
= memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK
, j
->p
, limit
, NULL
, 0);
11977 if (r
== -1 && errno
!= ESRCH
) {
11978 (void)job_assumes_zero(j
, errno
);
11981 #pragma unused(j, limit)