2 * @APPLE_APACHE_LICENSE_HEADER_START@
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
44 #include <sys/reboot.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <sys/kern_memorystatus.h>
58 #include <netinet/in.h>
59 #include <netinet/in_var.h>
60 #include <netinet6/nd6.h>
61 #include <bsm/libbsm.h>
79 #include <System/sys/spawn.h>
80 #include <System/sys/spawn_internal.h>
82 #include <spawn_private.h>
86 #include <xpc/launchd.h>
89 #include <System/sys/proc_info.h>
90 #include <malloc/malloc.h>
94 #define __APPLE_API_PRIVATE
98 #include <quarantine.h>
100 #if !TARGET_OS_EMBEDDED
101 extern int gL1CacheEnabled
;
105 #include "launch_priv.h"
106 #include "launch_internal.h"
107 #include "bootstrap.h"
108 #include "bootstrap_priv.h"
110 #include "vproc_internal.h"
118 #include "jobServer.h"
119 #include "job_reply.h"
120 #include "job_forward.h"
121 #include "mach_excServer.h"
123 #define POSIX_SPAWN_IOS_INTERACTIVE 0
125 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
126 * If the job hasn't exited in the given number of seconds after sending
127 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
129 #define LAUNCHD_MIN_JOB_RUN_TIME 10
130 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
131 #define LAUNCHD_SIGKILL_TIMER 4
132 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
134 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
136 #define TAKE_SUBSET_NAME "TakeSubsetName"
137 #define TAKE_SUBSET_PID "TakeSubsetPID"
138 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
140 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
142 extern char **environ
;
144 struct waiting_for_removal
{
145 SLIST_ENTRY(waiting_for_removal
) sle
;
146 mach_port_t reply_port
;
149 static bool waiting4removal_new(job_t j
, mach_port_t rp
);
150 static void waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
);
153 SLIST_ENTRY(machservice
) sle
;
154 SLIST_ENTRY(machservice
) special_port_sle
;
155 LIST_ENTRY(machservice
) name_hash_sle
;
156 LIST_ENTRY(machservice
) port_hash_sle
;
157 struct machservice
*alias
;
159 unsigned int gen_num
;
160 mach_port_name_t port
;
170 delete_on_destruction
:1,
171 drain_one_on_crash
:1,
172 drain_all_on_crash
:1,
175 /* Don't let the size of this field to get too small. It has to be large
176 * enough to represent the reasonable range of special port numbers.
182 // HACK: This should be per jobmgr_t
183 static SLIST_HEAD(, machservice
) special_ports
;
185 #define PORT_HASH_SIZE 32
186 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
188 static LIST_HEAD(, machservice
) port_hash
[PORT_HASH_SIZE
];
190 static void machservice_setup(launch_data_t obj
, const char *key
, void *context
);
191 static void machservice_setup_options(launch_data_t obj
, const char *key
, void *context
);
192 static void machservice_resetport(job_t j
, struct machservice
*ms
);
193 static void machservice_stamp_port(job_t j
, struct machservice
*ms
);
194 static struct machservice
*machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
);
195 static struct machservice
*machservice_new_alias(job_t aj
, struct machservice
*orig
);
196 static void machservice_ignore(job_t j
, struct machservice
*ms
);
197 static void machservice_watch(job_t j
, struct machservice
*ms
);
198 static void machservice_delete(job_t j
, struct machservice
*, bool port_died
);
199 static void machservice_request_notifications(struct machservice
*);
200 static mach_port_t
machservice_port(struct machservice
*);
201 static job_t
machservice_job(struct machservice
*);
202 static bool machservice_hidden(struct machservice
*);
203 static bool machservice_active(struct machservice
*);
204 static const char *machservice_name(struct machservice
*);
205 static bootstrap_status_t
machservice_status(struct machservice
*);
206 void machservice_drain_port(struct machservice
*);
209 SLIST_ENTRY(socketgroup
) sle
;
218 static bool socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
);
219 static void socketgroup_delete(job_t j
, struct socketgroup
*sg
);
220 static void socketgroup_watch(job_t j
, struct socketgroup
*sg
);
221 static void socketgroup_ignore(job_t j
, struct socketgroup
*sg
);
222 static void socketgroup_callback(job_t j
);
223 static void socketgroup_setup(launch_data_t obj
, const char *key
, void *context
);
224 static void socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
);
226 struct calendarinterval
{
227 LIST_ENTRY(calendarinterval
) global_sle
;
228 SLIST_ENTRY(calendarinterval
) sle
;
234 static LIST_HEAD(, calendarinterval
) sorted_calendar_events
;
236 static bool calendarinterval_new(job_t j
, struct tm
*w
);
237 static bool calendarinterval_new_from_obj(job_t j
, launch_data_t obj
);
238 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
);
239 static void calendarinterval_delete(job_t j
, struct calendarinterval
*ci
);
240 static void calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
);
241 static void calendarinterval_callback(void);
242 static void calendarinterval_sanity_check(void);
245 SLIST_ENTRY(envitem
) sle
;
253 static bool envitem_new(job_t j
, const char *k
, const char *v
, bool global
);
254 static void envitem_delete(job_t j
, struct envitem
*ei
, bool global
);
255 static void envitem_setup(launch_data_t obj
, const char *key
, void *context
);
258 SLIST_ENTRY(limititem
) sle
;
260 unsigned int setsoft
:1, sethard
:1, which
:30;
263 static bool limititem_update(job_t j
, int w
, rlim_t r
);
264 static void limititem_delete(job_t j
, struct limititem
*li
);
265 static void limititem_setup(launch_data_t obj
, const char *key
, void *context
);
267 static void seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
);
270 static void jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
);
283 } semaphore_reason_t
;
285 struct semaphoreitem
{
286 SLIST_ENTRY(semaphoreitem
) sle
;
287 semaphore_reason_t why
;
295 struct semaphoreitem_dict_iter_context
{
297 semaphore_reason_t why_true
;
298 semaphore_reason_t why_false
;
301 static bool semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
);
302 static void semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
);
303 static void semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
);
304 static void semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
);
305 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
);
307 struct externalevent
{
308 LIST_ENTRY(externalevent
) sys_le
;
309 LIST_ENTRY(externalevent
) job_le
;
310 struct eventsystem
*sys
;
322 struct externalevent_iter_ctx
{
324 struct eventsystem
*sys
;
327 static bool externalevent_new(job_t j
, struct eventsystem
*sys
, const char *evname
, xpc_object_t event
);
328 static void externalevent_delete(struct externalevent
*ee
);
329 static void externalevent_setup(launch_data_t obj
, const char *key
, void *context
);
330 static struct externalevent
*externalevent_find(const char *sysname
, uint64_t id
);
333 LIST_ENTRY(eventsystem
) global_le
;
334 LIST_HEAD(, externalevent
) events
;
339 static struct eventsystem
*eventsystem_new(const char *name
);
340 static void eventsystem_delete(struct eventsystem
*sys
) __attribute__((unused
));
341 static void eventsystem_setup(launch_data_t obj
, const char *key
, void *context
);
342 static struct eventsystem
*eventsystem_find(const char *name
);
343 static void eventsystem_ping(void);
345 #define ACTIVE_JOB_HASH_SIZE 32
346 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
348 #define MACHSERVICE_HASH_SIZE 37
350 #define LABEL_HASH_SIZE 53
352 kq_callback kqjobmgr_callback
;
353 LIST_ENTRY(jobmgr_s
) xpc_le
;
354 SLIST_ENTRY(jobmgr_s
) sle
;
355 SLIST_HEAD(, jobmgr_s
) submgrs
;
356 LIST_HEAD(, job_s
) jobs
;
358 /* For legacy reasons, we keep all job labels that are imported in the root
359 * job manager's label hash. If a job manager is an XPC domain, then it gets
360 * its own label hash that is separate from the "global" one stored in the
363 LIST_HEAD(, job_s
) label_hash
[LABEL_HASH_SIZE
];
364 LIST_HEAD(, job_s
) active_jobs
[ACTIVE_JOB_HASH_SIZE
];
365 LIST_HEAD(, machservice
) ms_hash
[MACHSERVICE_HASH_SIZE
];
366 LIST_HEAD(, job_s
) global_env_jobs
;
368 mach_port_t req_port
;
371 time_t shutdown_time
;
372 unsigned int global_on_demand_cnt
;
373 unsigned int normal_active_cnt
;
376 session_initialized
:1,
379 shutdown_jobs_dirtied
:1,
380 shutdown_jobs_cleaned
:1,
383 // XPC-specific properties.
384 char owner
[MAXCOMLEN
];
386 mach_port_t req_bsport
;
387 mach_port_t req_excport
;
388 mach_port_t req_asport
;
394 mach_msg_type_number_t req_ctx_sz
;
395 mach_port_t req_rport
;
403 // Global XPC domains.
404 static jobmgr_t _s_xpc_system_domain
;
405 static LIST_HEAD(, jobmgr_s
) _s_xpc_user_domains
;
406 static LIST_HEAD(, jobmgr_s
) _s_xpc_session_domains
;
408 #define jobmgr_assumes(jm, e) osx_assumes_ctx(jobmgr_log_bug, jm, (e))
409 #define jobmgr_assumes_zero(jm, e) osx_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
410 #define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
412 static jobmgr_t
jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool no_init
, mach_port_t asport
);
413 static jobmgr_t
jobmgr_new_xpc_singleton_domain(jobmgr_t jm
, name_t name
);
414 static jobmgr_t
jobmgr_find_xpc_per_user_domain(jobmgr_t jm
, uid_t uid
);
415 static jobmgr_t
jobmgr_find_xpc_per_session_domain(jobmgr_t jm
, au_asid_t asid
);
416 static job_t
jobmgr_import2(jobmgr_t jm
, launch_data_t pload
);
417 static jobmgr_t
jobmgr_parent(jobmgr_t jm
);
418 static jobmgr_t
jobmgr_do_garbage_collection(jobmgr_t jm
);
419 static bool jobmgr_label_test(jobmgr_t jm
, const char *str
);
420 static void jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
);
421 static void jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
);
422 static void jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
);
423 static void jobmgr_remove(jobmgr_t jm
);
424 static void jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
);
425 static job_t
jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
);
426 static job_t
jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
);
427 static job_t
jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
);
428 static jobmgr_t
jobmgr_find_by_name(jobmgr_t jm
, const char *where
);
429 static job_t
job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
);
430 static job_t
jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, mach_port_t
*mp
);
431 static void job_export_all2(jobmgr_t jm
, launch_data_t where
);
432 static void jobmgr_callback(void *obj
, struct kevent
*kev
);
433 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm
);
434 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
);
435 static struct machservice
*jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
);
436 static void jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
437 static void jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
438 static void jobmgr_log_perf_statistics(jobmgr_t jm
);
439 // static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
440 static bool jobmgr_log_bug(aslmsg asl_message
, void *ctx
, const char *message
);
442 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
443 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
444 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
446 struct suspended_peruser
{
447 LIST_ENTRY(suspended_peruser
) sle
;
452 // MUST be first element of this structure.
453 kq_callback kqjob_callback
;
454 LIST_ENTRY(job_s
) sle
;
455 LIST_ENTRY(job_s
) subjob_sle
;
456 LIST_ENTRY(job_s
) needing_session_sle
;
457 LIST_ENTRY(job_s
) jetsam_sle
;
458 LIST_ENTRY(job_s
) pid_hash_sle
;
459 LIST_ENTRY(job_s
) label_hash_sle
;
460 LIST_ENTRY(job_s
) global_env_sle
;
461 SLIST_ENTRY(job_s
) curious_jobs_sle
;
462 LIST_HEAD(, suspended_peruser
) suspended_perusers
;
463 LIST_HEAD(, waiting_for_exit
) exit_watchers
;
464 LIST_HEAD(, job_s
) subjobs
;
465 LIST_HEAD(, externalevent
) events
;
466 SLIST_HEAD(, socketgroup
) sockets
;
467 SLIST_HEAD(, calendarinterval
) cal_intervals
;
468 SLIST_HEAD(, envitem
) global_env
;
469 SLIST_HEAD(, envitem
) env
;
470 SLIST_HEAD(, limititem
) limits
;
471 SLIST_HEAD(, machservice
) machservices
;
472 SLIST_HEAD(, semaphoreitem
) semaphores
;
473 SLIST_HEAD(, waiting_for_removal
) removal_watchers
;
476 cpu_type_t
*j_binpref
;
477 size_t j_binpref_cnt
;
479 mach_port_t exit_status_dest
;
480 mach_port_t exit_status_port
;
481 mach_port_t spawn_reply_port
;
494 char *alt_exc_handler
;
498 char *seatbelt_profile
;
499 uint64_t seatbelt_flags
;
502 void *quarantine_data
;
503 size_t quarantine_data_sz
;
506 int last_exit_status
;
511 int32_t jetsam_priority
;
512 int32_t jetsam_memlimit
;
513 int32_t main_thread_priority
;
515 uint32_t exit_timeout
;
516 uint64_t sent_signal_time
;
518 uint32_t min_run_time
;
519 uint32_t start_interval
;
520 uint32_t peruser_suspend_count
;
525 // Only set for per-user launchd's.
527 uuid_t expected_audit_uuid
;
529 // man launchd.plist --> Debug
531 // man launchd.plist --> KeepAlive == false
533 // man launchd.plist --> SessionCreate
535 // man launchd.plist --> LowPriorityIO
537 // man launchd.plist --> InitGroups
539 /* A legacy mach_init concept to make bootstrap_create_server/service()
542 priv_port_has_senders
:1,
543 // A hack during job importing
544 importing_global_env
:1,
545 // A hack during job importing
546 importing_hard_limits
:1,
547 // man launchd.plist --> Umask
549 // A process that launchd knows about but doesn't manage.
551 // A legacy mach_init concept to detect sick jobs
553 // A job created via bootstrap_create_server()
555 // A job created via spawn_via_launchd()
557 // A legacy job that wants inetd compatible semantics
559 // A twist on inetd compatibility
561 /* An event fired and the job should start, but not necessarily right
565 // man launchd.plist --> EnableGlobbing
567 // man launchd.plist --> WaitForDebugger
569 // One-shot WaitForDebugger.
570 wait4debugger_oneshot
:1,
571 // MachExceptionHandler == true
572 internal_exc_handler
:1,
573 // A hack to support an option of spawn_via_launchd()
575 /* man launchd.plist --> LaunchOnlyOnce.
577 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
580 /* Make job_ignore() / job_watch() work. If these calls were balanced,
581 * then this wouldn't be necessarily.
584 /* A job that forced all other jobs to be temporarily launch-on-
587 forced_peers_to_demand_mode
:1,
588 // man launchd.plist --> Nice
590 /* A job was asked to be unloaded/removed while running, we'll remove it
594 // job_kill() was called.
596 // Enter the kernel debugger before killing a job.
598 // A hack that launchd+launchctl use during jobmgr_t creation.
600 // man launchd.plist --> StartOnMount
602 // This job is a per-user launchd managed by the PID 1 launchd.
604 // A job thoroughly confused launchd. We need to unload it ASAP.
605 unload_at_mig_return
:1,
606 // man launchd.plist --> AbandonProcessGroup
608 /* During shutdown, do not send SIGTERM to stray processes in the
609 * process group of this job.
611 ignore_pg_at_shutdown
:1,
612 /* Don't let this job create new 'job_t' objects in launchd. Has been
613 * seriously overloaded for the purposes of sandboxing.
616 // man launchd.plist --> EnableTransactions
617 enable_transactions
:1,
618 // The job was sent SIGKILL because it was clean.
620 /* The job has a tracing PID (probably a debugger) and exited before the
621 * tracer did. So we must defer our reap attempt until after the tracer
622 * has exited. This works around our busted ptrace(3) implementation.
625 // The job has an OtherJobEnabled KeepAlive criterion.
627 // The job exited due to a crash.
629 // We've received NOTE_EXIT for the job and reaped it.
631 // job_stop() was called.
633 // The job is considered "frontmost" by Jetsam.
635 /* The job is not frontmost, but it is considered "active" (i.e.
636 * backgrounded) by Jetsam.
639 /* The job is to be kept alive continuously, but it must first get an
643 // The job is a bootstrapper.
645 // The job owns the console.
647 /* The job runs as a non-root user on embedded but has select privileges
648 * of the root user. This is SpringBoard.
651 // We got NOTE_EXEC for the job.
653 // The job is an XPC service, and XPC proxy successfully exec(3)ed.
655 // The (anonymous) job called vprocmgr_switch_to_session().
657 // The job has Jetsam limits in place.
659 /* This job was created as the result of a look up of a service provided
660 * by a MultipleInstance job.
662 dedicated_instance
:1,
663 // The job supports creating additional instances of itself.
664 multiple_instances
:1,
665 /* The sub-job was already removed from the parent's list of
669 /* The job is responsible for monitoring external events for this
673 // The event monitor job has retrieved the initial list of events.
674 event_monitor_ready2signal
:1,
677 // Disable ASLR when launching this job.
679 // The job is an XPC Service.
681 // The job is the Performance team's shutdown monitor.
683 // We should open a transaction for the job when shutdown begins.
685 /* The job was sent SIGKILL but did not exit in a timely fashion,
686 * indicating a kernel bug.
689 // The job is the XPC domain bootstrapper.
691 // The job is an app (on either iOS or OS X) and has different resource
694 // The job failed to exec(3) for reasons that may be transient, so we're
695 // waiting for UserEventAgent to tell us when it's okay to try spawning
696 // again (i.e. when the executable path appears, when the UID appears,
703 static size_t hash_label(const char *label
) __attribute__((pure
));
704 static size_t hash_ms(const char *msstr
) __attribute__((pure
));
705 static SLIST_HEAD(, job_s
) s_curious_jobs
;
707 #define job_assumes(j, e) osx_assumes_ctx(job_log_bug, j, (e))
708 #define job_assumes_zero(j, e) osx_assumes_zero_ctx(job_log_bug, j, (e))
709 #define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
711 static void job_import_keys(launch_data_t obj
, const char *key
, void *context
);
712 static void job_import_bool(job_t j
, const char *key
, bool value
);
713 static void job_import_string(job_t j
, const char *key
, const char *value
);
714 static void job_import_integer(job_t j
, const char *key
, long long value
);
715 static void job_import_dictionary(job_t j
, const char *key
, launch_data_t value
);
716 static void job_import_array(job_t j
, const char *key
, launch_data_t value
);
717 static void job_import_opaque(job_t j
, const char *key
, launch_data_t value
);
718 static bool job_set_global_on_demand(job_t j
, bool val
);
719 static const char *job_active(job_t j
);
720 static void job_watch(job_t j
);
721 static void job_ignore(job_t j
);
722 static void job_cleanup_after_tracer(job_t j
);
723 static void job_reap(job_t j
);
724 static bool job_useless(job_t j
);
725 static bool job_keepalive(job_t j
);
726 static void job_dispatch_curious_jobs(job_t j
);
727 static void job_start(job_t j
);
728 static void job_start_child(job_t j
) __attribute__((noreturn
));
729 static void job_setup_attributes(job_t j
);
730 static bool job_setup_machport(job_t j
);
731 static kern_return_t
job_setup_exit_port(job_t j
);
732 static void job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
);
733 static void job_postfork_become_user(job_t j
);
734 static void job_postfork_test_user(job_t j
);
735 static void job_log_pids_with_weird_uids(job_t j
);
736 static void job_setup_exception_port(job_t j
, task_t target_task
);
737 static void job_callback(void *obj
, struct kevent
*kev
);
738 static void job_callback_proc(job_t j
, struct kevent
*kev
);
739 static void job_callback_timer(job_t j
, void *ident
);
740 static void job_callback_read(job_t j
, int ident
);
741 static void job_log_stray_pg(job_t j
);
742 static void job_log_children_without_exec(job_t j
);
743 static job_t
job_new_anonymous(jobmgr_t jm
, pid_t anonpid
) __attribute__((malloc
, nonnull
, warn_unused_result
));
744 static job_t
job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
) __attribute__((malloc
, nonnull(1,2), warn_unused_result
));
745 static job_t
job_new_alias(jobmgr_t jm
, job_t src
);
746 static job_t
job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
) __attribute__((malloc
, nonnull
, warn_unused_result
));
747 static job_t
job_new_subjob(job_t j
, uuid_t identifier
);
748 static void job_kill(job_t j
);
749 static void job_uncork_fork(job_t j
);
750 static void job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
751 static void job_log_error(job_t j
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
752 static bool job_log_bug(aslmsg asl_message
, void *ctx
, const char *message
);
753 static void job_log_perf_statistics(job_t j
);
754 static void job_set_exception_port(job_t j
, mach_port_t port
);
755 static kern_return_t
job_mig_spawn_internal(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, job_t
*outj
);
756 static void job_open_shutdown_transaction(job_t ji
);
757 static void job_close_shutdown_transaction(job_t ji
);
758 static launch_data_t
job_do_legacy_ipc_request(job_t j
, launch_data_t request
, mach_port_t asport
);
759 static void job_setup_per_user_directory(job_t j
, uid_t uid
, const char *path
);
760 static void job_setup_per_user_directories(job_t j
, uid_t uid
, const char *label
);
762 static const struct {
765 } launchd_keys2limits
[] = {
766 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE
, RLIMIT_CORE
},
767 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU
, RLIMIT_CPU
},
768 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA
, RLIMIT_DATA
},
769 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE
, RLIMIT_FSIZE
},
770 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK
, RLIMIT_MEMLOCK
},
771 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE
, RLIMIT_NOFILE
},
772 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC
, RLIMIT_NPROC
},
773 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS
, RLIMIT_RSS
},
774 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK
, RLIMIT_STACK
},
777 static time_t cronemu(int mon
, int mday
, int hour
, int min
);
778 static time_t cronemu_wday(int wday
, int hour
, int min
);
779 static bool cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
);
780 static bool cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
);
781 static bool cronemu_hour(struct tm
*wtm
, int hour
, int min
);
782 static bool cronemu_min(struct tm
*wtm
, int min
);
784 // miscellaneous file local functions
785 static size_t get_kern_max_proc(void);
786 static char **mach_cmd2argv(const char *string
);
787 static size_t our_strhash(const char *s
) __attribute__((pure
));
789 void eliminate_double_reboot(void);
791 #pragma mark XPC Domain Forward Declarations
792 static job_t
_xpc_domain_import_service(jobmgr_t jm
, launch_data_t pload
);
793 static int _xpc_domain_import_services(job_t j
, launch_data_t services
);
795 #pragma mark XPC Event Forward Declarations
796 static int xpc_event_find_channel(job_t j
, const char *stream
, struct machservice
**ms
);
797 static int xpc_event_get_event_name(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
798 static int xpc_event_set_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
799 static int xpc_event_copy_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
800 static int xpc_event_channel_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
801 static int xpc_event_channel_look_up(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
802 static int xpc_event_provider_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
803 static int xpc_event_provider_set_state(job_t j
, xpc_object_t request
, xpc_object_t
*reply
);
805 // file local globals
806 static job_t _launchd_embedded_god
= NULL
;
807 static size_t total_children
;
808 static size_t total_anon_children
;
809 static mach_port_t the_exception_server
;
810 static job_t workaround_5477111
;
811 static LIST_HEAD(, job_s
) s_needing_sessions
;
812 static LIST_HEAD(, eventsystem
) _s_event_systems
;
813 static struct eventsystem
*_launchd_support_system
;
814 static job_t _launchd_event_monitor
;
815 static job_t _launchd_xpc_bootstrapper
;
816 static job_t _launchd_shutdown_monitor
;
818 mach_port_t launchd_audit_port
= MACH_PORT_NULL
;
819 #if !TARGET_OS_EMBEDDED
820 au_asid_t launchd_audit_session
= AU_DEFAUDITSID
;
822 pid_t launchd_audit_session
= 0;
825 static int s_no_hang_fd
= -1;
827 // process wide globals
828 mach_port_t inherited_bootstrap_port
;
829 jobmgr_t root_jobmgr
;
830 bool launchd_shutdown_debugging
= false;
831 bool launchd_verbose_boot
= false;
832 bool launchd_embedded_handofgod
= false;
833 bool launchd_runtime_busy_time
= false;
838 struct socketgroup
*sg
;
839 struct machservice
*ms
;
841 if (j
->currently_ignored
) {
845 job_log(j
, LOG_DEBUG
, "Ignoring...");
847 j
->currently_ignored
= true;
849 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
850 socketgroup_ignore(j
, sg
);
853 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
854 machservice_ignore(j
, ms
);
861 struct socketgroup
*sg
;
862 struct machservice
*ms
;
864 if (!j
->currently_ignored
) {
868 job_log(j
, LOG_DEBUG
, "Watching...");
870 j
->currently_ignored
= false;
872 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
873 socketgroup_watch(j
, sg
);
876 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
877 machservice_watch(j
, ms
);
886 if (unlikely(!j
->p
|| j
->stopped
|| j
->anonymous
)) {
890 #if TARGET_OS_EMBEDDED
891 if (launchd_embedded_handofgod
&& _launchd_embedded_god
) {
892 if (!_launchd_embedded_god
->username
|| !j
->username
) {
897 if (strcmp(j
->username
, _launchd_embedded_god
->username
) != 0) {
901 } else if (launchd_embedded_handofgod
) {
907 j
->sent_signal_time
= runtime_get_opaque_time();
909 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Stopping job...");
912 error
= proc_terminate(j
->p
, &sig
);
914 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "Could not terminate job: %d: %s", error
, strerror(error
));
915 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Using fallback option to terminate job...");
916 error
= kill2(j
->p
, SIGTERM
);
918 job_log(j
, LOG_ERR
, "Could not signal job: %d: %s", error
, strerror(error
));
927 j
->sent_sigkill
= true;
928 j
->clean_kill
= true;
929 error
= kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, LAUNCHD_SIGKILL_TIMER
, j
);
930 (void)job_assumes_zero_p(j
, error
);
932 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Sent job SIGKILL.");
935 if (j
->exit_timeout
) {
936 error
= kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, j
->exit_timeout
, j
);
937 (void)job_assumes_zero_p(j
, error
);
939 job_log(j
, LOG_NOTICE
, "This job has an infinite exit timeout");
941 job_log(j
, LOG_DEBUG
, "Sent job SIGTERM.");
944 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "Job was sent unexpected signal: %d: %s", sig
, strsignal(sig
));
955 launch_data_t tmp
, tmp2
, tmp3
, r
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
961 if ((tmp
= launch_data_new_string(j
->label
))) {
962 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LABEL
);
964 if ((tmp
= launch_data_new_string(j
->mgr
->name
))) {
965 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
967 if ((tmp
= launch_data_new_bool(j
->ondemand
))) {
968 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ONDEMAND
);
970 if ((tmp
= launch_data_new_integer(j
->last_exit_status
))) {
971 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LASTEXITSTATUS
);
973 if (j
->p
&& (tmp
= launch_data_new_integer(j
->p
))) {
974 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PID
);
976 if ((tmp
= launch_data_new_integer(j
->timeout
))) {
977 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TIMEOUT
);
979 if (j
->prog
&& (tmp
= launch_data_new_string(j
->prog
))) {
980 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAM
);
982 if (j
->stdinpath
&& (tmp
= launch_data_new_string(j
->stdinpath
))) {
983 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDINPATH
);
985 if (j
->stdoutpath
&& (tmp
= launch_data_new_string(j
->stdoutpath
))) {
986 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDOUTPATH
);
988 if (j
->stderrpath
&& (tmp
= launch_data_new_string(j
->stderrpath
))) {
989 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDERRORPATH
);
991 if (likely(j
->argv
) && (tmp
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
994 for (i
= 0; i
< j
->argc
; i
++) {
995 if ((tmp2
= launch_data_new_string(j
->argv
[i
]))) {
996 launch_data_array_set_index(tmp
, tmp2
, i
);
1000 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
);
1003 if (j
->enable_transactions
&& (tmp
= launch_data_new_bool(true))) {
1004 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
);
1007 if (j
->session_create
&& (tmp
= launch_data_new_bool(true))) {
1008 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SESSIONCREATE
);
1011 if (j
->inetcompat
&& (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
1012 if ((tmp2
= launch_data_new_bool(j
->inetcompat_wait
))) {
1013 launch_data_dict_insert(tmp
, tmp2
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
);
1015 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
);
1018 if (!SLIST_EMPTY(&j
->sockets
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
1019 struct socketgroup
*sg
;
1022 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
1023 if ((tmp2
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
1024 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
1025 if ((tmp3
= launch_data_new_fd(sg
->fds
[i
]))) {
1026 launch_data_array_set_index(tmp2
, tmp3
, i
);
1029 launch_data_dict_insert(tmp
, tmp2
, sg
->name
);
1033 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SOCKETS
);
1036 if (!SLIST_EMPTY(&j
->machservices
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
1037 struct machservice
*ms
;
1041 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
1044 tmp3
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
1047 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
1048 launch_data_dict_insert(tmp3
, tmp2
, ms
->name
);
1051 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
1052 launch_data_dict_insert(tmp
, tmp2
, ms
->name
);
1056 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_MACHSERVICES
);
1059 launch_data_dict_insert(r
, tmp3
, LAUNCH_JOBKEY_PERJOBMACHSERVICES
);
1067 jobmgr_log_active_jobs(jobmgr_t jm
)
1069 const char *why_active
;
1073 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
1074 jobmgr_log_active_jobs(jmi
);
1077 int level
= LOG_DEBUG
;
1079 level
|= LOG_CONSOLE
;
1082 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
1083 if ((why_active
= job_active(ji
))) {
1085 job_log(ji
, level
, "%s", why_active
);
1088 (void)proc_get_dirty(ji
->p
, &flags
);
1089 if (!(flags
& PROC_DIRTY_TRACKED
)) {
1093 char *dirty
= "clean";
1094 if (flags
& PROC_DIRTY_IS_DIRTY
) {
1098 char *idle_exit
= "idle-exit unsupported";
1099 if (flags
& PROC_DIRTY_ALLOWS_IDLE_EXIT
) {
1100 idle_exit
= "idle-exit supported";
1103 job_log(ji
, level
, "Killability: %s/%s", dirty
, idle_exit
);
1110 jobmgr_still_alive_with_check(jobmgr_t jm
)
1112 int level
= LOG_DEBUG
;
1114 level
|= LOG_CONSOLE
;
1117 jobmgr_log(jm
, level
, "Still alive with %lu/%lu (normal/anonymous) children.", total_children
, total_anon_children
);
1118 jobmgr_log_active_jobs(jm
);
1123 jobmgr_shutdown(jobmgr_t jm
)
1126 jobmgr_log(jm
, LOG_DEBUG
, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm
->reboot_flags
));
1128 jm
->shutdown_time
= runtime_get_wall_time() / USEC_PER_SEC
;
1131 (void)localtime_r(&jm
->shutdown_time
, &curtime
);
1134 (void)asctime_r(&curtime
, date
);
1135 // Trim the new line that asctime_r(3) puts there for some reason.
1138 if (jm
== root_jobmgr
&& pid1_magic
) {
1139 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown begun at: %s", date
);
1141 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown begun at: %s", date
);
1144 jm
->shutting_down
= true;
1146 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
1147 jobmgr_shutdown(jmi
);
1150 if (!jm
->parentmgr
) {
1152 // Spawn the shutdown monitor.
1153 if (_launchd_shutdown_monitor
&& !_launchd_shutdown_monitor
->p
) {
1154 job_log(_launchd_shutdown_monitor
, LOG_NOTICE
| LOG_CONSOLE
, "Starting shutdown monitor.");
1155 job_dispatch(_launchd_shutdown_monitor
, true);
1159 (void)jobmgr_assumes_zero_p(jm
, kevent_mod((uintptr_t)jm
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 5, jm
));
1162 return jobmgr_do_garbage_collection(jm
);
1166 jobmgr_remove(jobmgr_t jm
)
1171 jobmgr_log(jm
, LOG_DEBUG
, "Removing job manager.");
1172 if (!SLIST_EMPTY(&jm
->submgrs
)) {
1174 while ((jmi
= SLIST_FIRST(&jm
->submgrs
))) {
1179 (void)jobmgr_assumes_zero(jm
, cnt
);
1182 while ((ji
= LIST_FIRST(&jm
->jobs
))) {
1183 if (!ji
->anonymous
&& ji
->p
!= 0) {
1184 job_log(ji
, LOG_ERR
, "Job is still active at job manager teardown.");
1191 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_port
));
1194 (void)jobmgr_assumes_zero(jm
, launchd_mport_close_recv(jm
->jm_port
));
1197 if (jm
->req_bsport
) {
1198 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_bsport
));
1200 if (jm
->req_excport
) {
1201 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_excport
));
1203 if (MACH_PORT_VALID(jm
->req_asport
)) {
1204 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(jm
->req_asport
));
1206 if (jm
->req_rport
) {
1207 kern_return_t kr
= xpc_call_wakeup(jm
->req_rport
, jm
->error
);
1208 if (!(kr
== KERN_SUCCESS
|| kr
== MACH_SEND_INVALID_DEST
)) {
1209 /* If the originator went away, the reply port will be a dead name,
1210 * and we expect this to fail.
1212 (void)jobmgr_assumes_zero(jm
, kr
);
1216 (void)jobmgr_assumes_zero(jm
, vm_deallocate(mach_task_self(), jm
->req_ctx
, jm
->req_ctx_sz
));
1219 time_t ts
= runtime_get_wall_time() / USEC_PER_SEC
;
1221 (void)localtime_r(&ts
, &curtime
);
1224 (void)asctime_r(&curtime
, date
);
1227 time_t delta
= ts
- jm
->shutdown_time
;
1228 if (jm
== root_jobmgr
&& pid1_magic
) {
1229 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown finished at: %s", date
);
1230 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Userspace shutdown took approximately %ld second%s.", delta
, (delta
!= 1) ? "s" : "");
1232 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown finished at: %s", date
);
1233 jobmgr_log(jm
, LOG_DEBUG
, "Job manager shutdown took approximately %ld second%s.", delta
, (delta
!= 1) ? "s" : "");
1236 if (jm
->parentmgr
) {
1237 runtime_del_weak_ref();
1238 SLIST_REMOVE(&jm
->parentmgr
->submgrs
, jm
, jobmgr_s
, sle
);
1239 } else if (pid1_magic
) {
1240 eliminate_double_reboot();
1241 launchd_log_vm_stats();
1242 jobmgr_log_stray_children(jm
, true);
1243 jobmgr_log(root_jobmgr
, LOG_NOTICE
| LOG_CONSOLE
, "About to call: reboot(%s).", reboot_flags_to_C_names(jm
->reboot_flags
));
1245 (void)jobmgr_assumes_zero_p(jm
, reboot(jm
->reboot_flags
));
1247 jobmgr_log(jm
, LOG_DEBUG
, "About to exit");
1258 struct waiting_for_removal
*w4r
;
1259 struct calendarinterval
*ci
;
1260 struct semaphoreitem
*si
;
1261 struct socketgroup
*sg
;
1262 struct machservice
*ms
;
1263 struct limititem
*li
;
1267 /* HACK: Egregious code duplication. But as with machservice_delete(),
1268 * job aliases can't (and shouldn't) have any complex behaviors
1269 * associated with them.
1271 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
1272 machservice_delete(j
, ms
, false);
1275 LIST_REMOVE(j
, sle
);
1276 LIST_REMOVE(j
, label_hash_sle
);
1281 #if TARGET_OS_EMBEDDED
1282 if (launchd_embedded_handofgod
&& _launchd_embedded_god
) {
1283 if (!(_launchd_embedded_god
->username
&& j
->username
)) {
1288 if (strcmp(j
->username
, _launchd_embedded_god
->username
) != 0) {
1292 } else if (launchd_embedded_handofgod
) {
1298 /* Do this BEFORE we check and see whether the job is still active. If we're
1299 * a sub-job, we're being removed due to the parent job removing us.
1300 * Therefore, the parent job will free itself after this call completes. So
1301 * if we defer removing ourselves from the parent's list, we'll crash when
1302 * we finally get around to it.
1304 if (j
->dedicated_instance
&& !j
->former_subjob
) {
1305 LIST_REMOVE(j
, subjob_sle
);
1306 j
->former_subjob
= true;
1309 if (unlikely(j
->p
)) {
1313 job_log(j
, LOG_DEBUG
, "Removal pended until the job exits");
1315 if (!j
->removal_pending
) {
1316 j
->removal_pending
= true;
1326 job_dispatch_curious_jobs(j
);
1329 ipc_close_all_with_job(j
);
1331 if (j
->forced_peers_to_demand_mode
) {
1332 job_set_global_on_demand(j
, false);
1335 if (job_assumes_zero(j
, j
->fork_fd
)) {
1336 (void)posix_assumes_zero(runtime_close(j
->fork_fd
));
1340 (void)posix_assumes_zero(runtime_close(j
->stdin_fd
));
1344 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
1347 while ((sg
= SLIST_FIRST(&j
->sockets
))) {
1348 socketgroup_delete(j
, sg
);
1350 while ((ci
= SLIST_FIRST(&j
->cal_intervals
))) {
1351 calendarinterval_delete(j
, ci
);
1353 while ((ei
= SLIST_FIRST(&j
->env
))) {
1354 envitem_delete(j
, ei
, false);
1356 while ((ei
= SLIST_FIRST(&j
->global_env
))) {
1357 envitem_delete(j
, ei
, true);
1359 while ((li
= SLIST_FIRST(&j
->limits
))) {
1360 limititem_delete(j
, li
);
1362 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
1363 machservice_delete(j
, ms
, false);
1365 while ((si
= SLIST_FIRST(&j
->semaphores
))) {
1366 semaphoreitem_delete(j
, si
);
1368 while ((w4r
= SLIST_FIRST(&j
->removal_watchers
))) {
1369 waiting4removal_delete(j
, w4r
);
1372 struct externalevent
*eei
= NULL
;
1373 while ((eei
= LIST_FIRST(&j
->events
))) {
1374 externalevent_delete(eei
);
1377 if (j
->event_monitor
) {
1378 _launchd_event_monitor
= NULL
;
1380 if (j
->xpc_bootstrapper
) {
1381 _launchd_xpc_bootstrapper
= NULL
;
1393 if (j
->workingdir
) {
1394 free(j
->workingdir
);
1405 if (j
->stdoutpath
) {
1406 free(j
->stdoutpath
);
1408 if (j
->stderrpath
) {
1409 free(j
->stderrpath
);
1411 if (j
->alt_exc_handler
) {
1412 free(j
->alt_exc_handler
);
1415 if (j
->seatbelt_profile
) {
1416 free(j
->seatbelt_profile
);
1420 if (j
->quarantine_data
) {
1421 free(j
->quarantine_data
);
1427 if (j
->start_interval
) {
1428 runtime_del_weak_ref();
1429 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
));
1431 if (j
->exit_timeout
) {
1432 /* If this fails, it just means the timer's already fired, so no need to
1433 * wrap it in an assumes() macro.
1435 (void)kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1437 if (j
->asport
!= MACH_PORT_NULL
) {
1438 (void)job_assumes_zero(j
, launchd_mport_deallocate(j
->asport
));
1440 if (!uuid_is_null(j
->expected_audit_uuid
)) {
1441 LIST_REMOVE(j
, needing_session_sle
);
1443 if (j
->embedded_god
) {
1444 _launchd_embedded_god
= NULL
;
1446 if (j
->shutdown_monitor
) {
1447 _launchd_shutdown_monitor
= NULL
;
1450 (void)kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1452 LIST_REMOVE(j
, sle
);
1453 LIST_REMOVE(j
, label_hash_sle
);
1457 LIST_FOREACH_SAFE(ji
, &j
->subjobs
, subjob_sle
, jit
) {
1461 job_log(j
, LOG_DEBUG
, "Removed");
1463 j
->kqjob_callback
= (kq_callback
)0x8badf00d;
1468 socketgroup_setup(launch_data_t obj
, const char *key
, void *context
)
1470 launch_data_t tmp_oai
;
1472 size_t i
, fd_cnt
= 1;
1475 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1476 fd_cnt
= launch_data_array_get_count(obj
);
1479 fds
= alloca(fd_cnt
* sizeof(int));
1481 for (i
= 0; i
< fd_cnt
; i
++) {
1482 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1483 tmp_oai
= launch_data_array_get_index(obj
, i
);
1488 fds
[i
] = launch_data_get_fd(tmp_oai
);
1491 socketgroup_new(j
, key
, fds
, fd_cnt
);
1493 ipc_revoke_fds(obj
);
1497 job_set_global_on_demand(job_t j
, bool val
)
1499 if (j
->forced_peers_to_demand_mode
&& val
) {
1501 } else if (!j
->forced_peers_to_demand_mode
&& !val
) {
1505 if ((j
->forced_peers_to_demand_mode
= val
)) {
1506 j
->mgr
->global_on_demand_cnt
++;
1508 j
->mgr
->global_on_demand_cnt
--;
1511 if (j
->mgr
->global_on_demand_cnt
== 0) {
1512 jobmgr_dispatch_all(j
->mgr
, false);
1519 job_setup_machport(job_t j
)
1521 if (job_assumes_zero(j
, launchd_mport_create_recv(&j
->j_port
)) != KERN_SUCCESS
) {
1525 if (job_assumes_zero(j
, runtime_add_mport(j
->j_port
, job_server
)) != KERN_SUCCESS
) {
1529 if (job_assumes_zero(j
, launchd_mport_notify_req(j
->j_port
, MACH_NOTIFY_NO_SENDERS
)) != KERN_SUCCESS
) {
1530 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
1536 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
1542 job_setup_exit_port(job_t j
)
1544 kern_return_t kr
= launchd_mport_create_recv(&j
->exit_status_port
);
1545 if (job_assumes_zero(j
, kr
) != KERN_SUCCESS
) {
1546 return MACH_PORT_NULL
;
1549 struct mach_port_limits limits
= {
1552 kr
= mach_port_set_attributes(mach_task_self(), j
->exit_status_port
, MACH_PORT_LIMITS_INFO
, (mach_port_info_t
)&limits
, sizeof(limits
));
1553 (void)job_assumes_zero(j
, kr
);
1555 kr
= launchd_mport_make_send_once(j
->exit_status_port
, &j
->exit_status_dest
);
1556 if (job_assumes_zero(j
, kr
) != KERN_SUCCESS
) {
1557 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->exit_status_port
));
1558 j
->exit_status_port
= MACH_PORT_NULL
;
1565 job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
)
1567 const char **argv
= (const char **)mach_cmd2argv(cmd
);
1574 jr
= job_new(j
->mgr
, AUTO_PICK_LEGACY_LABEL
, NULL
, argv
);
1577 // Job creation can be denied during shutdown.
1578 if (unlikely(jr
== NULL
)) {
1584 jr
->legacy_mach_job
= true;
1585 jr
->abandon_pg
= true;
1586 jr
->priv_port_has_senders
= true; // the IPC that called us will make-send on this port
1588 if (!job_setup_machport(jr
)) {
1592 job_log(jr
, LOG_INFO
, "Legacy%s server created", ond
? " on-demand" : "");
1604 job_new_anonymous(jobmgr_t jm
, pid_t anonpid
)
1606 struct proc_bsdshortinfo proc
;
1607 bool shutdown_state
;
1608 job_t jp
= NULL
, jr
= NULL
;
1609 uid_t kp_euid
, kp_uid
, kp_svuid
;
1610 gid_t kp_egid
, kp_gid
, kp_svgid
;
1617 if (anonpid
>= 100000) {
1618 /* The kernel current defines PID_MAX to be 99999, but that define isn't
1621 launchd_syslog(LOG_WARNING
, "Did PID_MAX change? Got request from PID: %d", anonpid
);
1626 /* libproc returns the number of bytes written into the buffer upon success,
1627 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
1629 if (proc_pidinfo(anonpid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
1630 if (errno
!= ESRCH
) {
1631 (void)jobmgr_assumes_zero(jm
, errno
);
1636 if (proc
.pbsi_comm
[0] == '\0') {
1637 launchd_syslog(LOG_WARNING
, "Blank command for PID: %d", anonpid
);
1642 if (unlikely(proc
.pbsi_status
== SZOMB
)) {
1643 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for zombie PID %u: %s", anonpid
, proc
.pbsi_comm
);
1646 if (unlikely(proc
.pbsi_flags
& P_SUGID
)) {
1647 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid
, proc
.pbsi_comm
);
1650 kp_euid
= proc
.pbsi_uid
;
1651 kp_uid
= proc
.pbsi_ruid
;
1652 kp_svuid
= proc
.pbsi_svuid
;
1653 kp_egid
= proc
.pbsi_gid
;
1654 kp_gid
= proc
.pbsi_rgid
;
1655 kp_svgid
= proc
.pbsi_svgid
;
1657 if (unlikely(kp_euid
!= kp_uid
|| kp_euid
!= kp_svuid
|| kp_uid
!= kp_svuid
|| kp_egid
!= kp_gid
|| kp_egid
!= kp_svgid
|| kp_gid
!= kp_svgid
)) {
1658 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1659 kp_euid
, kp_uid
, kp_svuid
, kp_egid
, kp_gid
, kp_svgid
, anonpid
, proc
.pbsi_comm
);
1662 /* "Fix" for when the kernel turns the process tree into a weird, cyclic
1665 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
1666 * as to why this can happen.
1668 if ((pid_t
)proc
.pbsi_ppid
== anonpid
) {
1669 jobmgr_log(jm
, LOG_WARNING
, "Process has become its own parent through ptrace(3). Ignoring: %s", proc
.pbsi_comm
);
1674 /* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1675 * jobs can pop up during shutdown and need to talk to us.
1677 if (unlikely(shutdown_state
= jm
->shutting_down
)) {
1678 jm
->shutting_down
= false;
1681 // We only set requestor_pid for XPC domains.
1682 const char *whichlabel
= (jm
->req_pid
== anonpid
) ? AUTO_PICK_XPC_LABEL
: AUTO_PICK_ANONYMOUS_LABEL
;
1683 if ((jr
= job_new(jm
, whichlabel
, proc
.pbsi_comm
, NULL
))) {
1684 u_int proc_fflags
= NOTE_EXEC
|NOTE_FORK
|NOTE_EXIT
;
1686 total_anon_children
++;
1687 jr
->anonymous
= true;
1690 // Anonymous process reaping is messy.
1691 LIST_INSERT_HEAD(&jm
->active_jobs
[ACTIVE_JOB_HASH(jr
->p
)], jr
, pid_hash_sle
);
1693 if (unlikely(kevent_mod(jr
->p
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
) == -1)) {
1694 if (errno
!= ESRCH
) {
1695 (void)job_assumes_zero(jr
, errno
);
1698 // Zombies interact weirdly with kevent(3).
1699 job_log(jr
, LOG_ERR
, "Failed to add kevent for PID %u. Will unload at MIG return", jr
->p
);
1700 jr
->unload_at_mig_return
= true;
1703 if (unlikely(shutdown_state
)) {
1704 job_log(jr
, LOG_APPLEONLY
, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1707 job_log(jr
, LOG_DEBUG
, "Created PID %u anonymously by PPID %u%s%s", anonpid
, proc
.pbsi_ppid
, jp
? ": " : "", jp
? jp
->label
: "");
1709 (void)osx_assumes_zero(errno
);
1712 // Undo our hack from above.
1713 if (unlikely(shutdown_state
)) {
1714 jm
->shutting_down
= true;
1717 /* This is down here to prevent infinite recursion due to a process
1718 * attaching to its parent through ptrace(3) -- causing a cycle in the
1719 * process tree and thereby not making it a tree anymore. We need to make
1720 * sure that the anonymous job has been added to the process list so that
1721 * we'll find the tracing parent PID of the parent process, which is the
1722 * child, when we go looking for it in jobmgr_find_by_pid().
1724 * <rdar://problem/7264615>
1726 switch (proc
.pbsi_ppid
) {
1736 jp
= jobmgr_find_by_pid(jm
, proc
.pbsi_ppid
, true);
1737 if (jobmgr_assumes(jm
, jp
!= NULL
)) {
1738 if (jp
&& !jp
->anonymous
&& unlikely(!(proc
.pbsi_flags
& P_EXEC
))) {
1739 job_log(jp
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc
.pbsi_pid
);
1749 job_new_subjob(job_t j
, uuid_t identifier
)
1752 uuid_string_t idstr
;
1753 uuid_unparse(identifier
, idstr
);
1754 size_t label_sz
= snprintf(label
, 0, "%s.%s", j
->label
, idstr
);
1756 job_t nj
= (struct job_s
*)calloc(1, sizeof(struct job_s
) + label_sz
+ 1);
1758 nj
->kqjob_callback
= job_callback
;
1760 nj
->min_run_time
= j
->min_run_time
;
1761 nj
->timeout
= j
->timeout
;
1762 nj
->exit_timeout
= j
->exit_timeout
;
1764 snprintf((char *)nj
->label
, label_sz
+ 1, "%s.%s", j
->label
, idstr
);
1766 // Set all our simple Booleans that are applicable.
1767 nj
->debug
= j
->debug
;
1768 nj
->ondemand
= j
->ondemand
;
1769 nj
->checkedin
= true;
1770 nj
->low_pri_io
= j
->low_pri_io
;
1771 nj
->setmask
= j
->setmask
;
1772 nj
->wait4debugger
= j
->wait4debugger
;
1773 nj
->internal_exc_handler
= j
->internal_exc_handler
;
1774 nj
->setnice
= j
->setnice
;
1775 nj
->abandon_pg
= j
->abandon_pg
;
1776 nj
->ignore_pg_at_shutdown
= j
->ignore_pg_at_shutdown
;
1777 nj
->deny_job_creation
= j
->deny_job_creation
;
1778 nj
->enable_transactions
= j
->enable_transactions
;
1779 nj
->needs_kickoff
= j
->needs_kickoff
;
1780 nj
->currently_ignored
= true;
1781 nj
->dedicated_instance
= true;
1782 nj
->xpc_service
= j
->xpc_service
;
1783 nj
->xpc_bootstrapper
= j
->xpc_bootstrapper
;
1786 uuid_copy(nj
->instance_id
, identifier
);
1788 // These jobs are purely on-demand Mach jobs.
1789 // {Hard | Soft}ResourceLimits are not supported.
1790 // JetsamPriority is not supported.
1793 nj
->prog
= strdup(j
->prog
);
1796 size_t sz
= malloc_size(j
->argv
);
1797 nj
->argv
= (char **)malloc(sz
);
1798 if (nj
->argv
!= NULL
) {
1799 // This is the start of our strings.
1800 char *p
= ((char *)nj
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
1803 for (i
= 0; i
< j
->argc
; i
++) {
1804 (void)strcpy(p
, j
->argv
[i
]);
1806 p
+= (strlen(j
->argv
[i
]) + 1);
1810 (void)job_assumes_zero(nj
, errno
);
1816 struct machservice
*msi
= NULL
;
1817 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
1818 /* Only copy MachServices that were actually declared in the plist.
1819 * So skip over per-PID ones and ones that were created via
1820 * bootstrap_register().
1823 mach_port_t mp
= MACH_PORT_NULL
;
1824 struct machservice
*msj
= machservice_new(nj
, msi
->name
, &mp
, msi
->per_pid
);
1826 msj
->reset
= msi
->reset
;
1827 msj
->delete_on_destruction
= msi
->delete_on_destruction
;
1828 msj
->drain_one_on_crash
= msi
->drain_one_on_crash
;
1829 msj
->drain_all_on_crash
= msi
->drain_all_on_crash
;
1831 (void)job_assumes_zero(nj
, errno
);
1836 // We ignore global environment variables.
1837 struct envitem
*ei
= NULL
;
1838 SLIST_FOREACH(ei
, &j
->env
, sle
) {
1839 if (envitem_new(nj
, ei
->key
, ei
->value
, false)) {
1840 (void)job_assumes_zero(nj
, errno
);
1844 uuid_unparse(identifier
, val
);
1845 if (envitem_new(nj
, LAUNCH_ENV_INSTANCEID
, val
, false)) {
1846 (void)job_assumes_zero(nj
, errno
);
1850 nj
->rootdir
= strdup(j
->rootdir
);
1852 if (j
->workingdir
) {
1853 nj
->workingdir
= strdup(j
->workingdir
);
1856 nj
->username
= strdup(j
->username
);
1859 nj
->groupname
= strdup(j
->groupname
);
1862 /* FIXME: We shouldn't redirect all the output from these jobs to the
1863 * same file. We should uniquify the file names. But this hasn't shown
1864 * to be a problem in practice.
1867 nj
->stdinpath
= strdup(j
->stdinpath
);
1869 if (j
->stdoutpath
) {
1870 nj
->stdoutpath
= strdup(j
->stdinpath
);
1872 if (j
->stderrpath
) {
1873 nj
->stderrpath
= strdup(j
->stderrpath
);
1875 if (j
->alt_exc_handler
) {
1876 nj
->alt_exc_handler
= strdup(j
->alt_exc_handler
);
1879 if (j
->seatbelt_profile
) {
1880 nj
->seatbelt_profile
= strdup(j
->seatbelt_profile
);
1885 if (j
->quarantine_data
) {
1886 nj
->quarantine_data
= strdup(j
->quarantine_data
);
1888 nj
->quarantine_data_sz
= j
->quarantine_data_sz
;
1891 size_t sz
= malloc_size(j
->j_binpref
);
1892 nj
->j_binpref
= (cpu_type_t
*)malloc(sz
);
1893 if (nj
->j_binpref
) {
1894 memcpy(&nj
->j_binpref
, &j
->j_binpref
, sz
);
1896 (void)job_assumes_zero(nj
, errno
);
1900 if (j
->asport
!= MACH_PORT_NULL
) {
1901 (void)job_assumes_zero(nj
, launchd_mport_copy_send(j
->asport
));
1902 nj
->asport
= j
->asport
;
1905 LIST_INSERT_HEAD(&nj
->mgr
->jobs
, nj
, sle
);
1907 jobmgr_t where2put
= root_jobmgr
;
1908 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
1911 LIST_INSERT_HEAD(&where2put
->label_hash
[hash_label(nj
->label
)], nj
, label_hash_sle
);
1912 LIST_INSERT_HEAD(&j
->subjobs
, nj
, subjob_sle
);
1914 (void)osx_assumes_zero(errno
);
1921 job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
)
1923 const char *const *argv_tmp
= argv
;
1924 char tmp_path
[PATH_MAX
];
1925 char auto_label
[1000];
1926 const char *bn
= NULL
;
1928 size_t minlabel_len
;
1932 __OSX_COMPILETIME_ASSERT__(offsetof(struct job_s
, kqjob_callback
) == 0);
1934 if (unlikely(jm
->shutting_down
)) {
1939 if (unlikely(prog
== NULL
&& argv
== NULL
)) {
1944 /* I'd really like to redo this someday. Anonymous jobs carry all the
1945 * baggage of managed jobs with them, even though most of it is unused.
1946 * Maybe when we have Objective-C objects in libSystem, there can be a base
1947 * job type that anonymous and managed jobs inherit from...
1949 char *anon_or_legacy
= (label
== AUTO_PICK_ANONYMOUS_LABEL
) ? "anonymous" : "mach_init";
1950 if (unlikely(label
== AUTO_PICK_LEGACY_LABEL
|| label
== AUTO_PICK_ANONYMOUS_LABEL
)) {
1954 strlcpy(tmp_path
, argv
[0], sizeof(tmp_path
));
1955 // prog for auto labels is kp.kp_kproc.p_comm.
1956 bn
= basename(tmp_path
);
1959 (void)snprintf(auto_label
, sizeof(auto_label
), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy
, bn
);
1961 /* This is so we can do gross things later. See NOTE_EXEC for anonymous
1964 minlabel_len
= strlen(label
) + MAXCOMLEN
;
1966 if (label
== AUTO_PICK_XPC_LABEL
) {
1967 minlabel_len
= snprintf(auto_label
, sizeof(auto_label
), "com.apple.xpc.domain-owner.%s", jm
->owner
);
1969 minlabel_len
= strlen(label
);
1973 j
= calloc(1, sizeof(struct job_s
) + minlabel_len
+ 1);
1976 (void)osx_assumes_zero(errno
);
1980 if (unlikely(label
== auto_label
)) {
1981 (void)snprintf((char *)j
->label
, strlen(label
) + 1, "%p.%s.%s", j
, anon_or_legacy
, bn
);
1983 (void)strcpy((char *)j
->label
, (label
== AUTO_PICK_XPC_LABEL
) ? auto_label
: label
);
1986 j
->kqjob_callback
= job_callback
;
1988 j
->min_run_time
= LAUNCHD_MIN_JOB_RUN_TIME
;
1989 j
->timeout
= RUNTIME_ADVISABLE_IDLE_TIMEOUT
;
1990 j
->exit_timeout
= LAUNCHD_DEFAULT_EXIT_TIMEOUT
;
1991 j
->currently_ignored
= true;
1993 j
->checkedin
= true;
1994 j
->jetsam_priority
= DEFAULT_JETSAM_PRIORITY
;
1995 j
->jetsam_memlimit
= -1;
1996 uuid_clear(j
->expected_audit_uuid
);
1997 #if TARGET_OS_EMBEDDED
1998 /* Run embedded daemons as background by default. SpringBoard jobs are
1999 * Interactive by default. Unfortunately, so many daemons have opted into
2000 * this priority band that its usefulness is highly questionable.
2002 * See <rdar://problem/9539873>.
2004 if (launchd_embedded_handofgod
) {
2005 j
->pstype
= POSIX_SPAWN_IOS_INTERACTIVE
;
2008 j
->pstype
= POSIX_SPAWN_IOS_APPLE_DAEMON_START
;
2013 j
->prog
= strdup(prog
);
2015 (void)osx_assumes_zero(errno
);
2021 while (*argv_tmp
++) {
2025 for (i
= 0; i
< j
->argc
; i
++) {
2026 cc
+= strlen(argv
[i
]) + 1;
2029 j
->argv
= malloc((j
->argc
+ 1) * sizeof(char *) + cc
);
2031 (void)job_assumes_zero(j
, errno
);
2035 co
= ((char *)j
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
2037 for (i
= 0; i
< j
->argc
; i
++) {
2039 (void)strcpy(co
, argv
[i
]);
2040 co
+= strlen(argv
[i
]) + 1;
2045 // Sssshhh... don't tell anyone.
2046 if (strcmp(j
->label
, "com.apple.WindowServer") == 0) {
2047 j
->has_console
= true;
2050 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
2052 jobmgr_t where2put_label
= root_jobmgr
;
2053 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
2054 where2put_label
= j
->mgr
;
2056 LIST_INSERT_HEAD(&where2put_label
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
2057 uuid_clear(j
->expected_audit_uuid
);
2059 job_log(j
, LOG_DEBUG
, "Conceived");
2073 job_new_alias(jobmgr_t jm
, job_t src
)
2075 if (job_find(jm
, src
->label
)) {
2080 job_t j
= calloc(1, sizeof(struct job_s
) + strlen(src
->label
) + 1);
2082 (void)osx_assumes_zero(errno
);
2086 (void)strcpy((char *)j
->label
, src
->label
);
2087 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
2088 LIST_INSERT_HEAD(&jm
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
2089 /* Bad jump address. The kqueue callback for aliases should never be
2092 j
->kqjob_callback
= (kq_callback
)0xfa1afe1;
2096 struct machservice
*msi
= NULL
;
2097 SLIST_FOREACH(msi
, &src
->machservices
, sle
) {
2098 if (!machservice_new_alias(j
, msi
)) {
2099 jobmgr_log(jm
, LOG_ERR
, "Failed to alias job: %s", src
->label
);
2108 job_log(j
, LOG_DEBUG
, "Aliased service into domain: %s", jm
->name
);
2115 job_import(launch_data_t pload
)
2117 job_t j
= jobmgr_import2(root_jobmgr
, pload
);
2119 if (unlikely(j
== NULL
)) {
2123 /* Since jobs are effectively stalled until they get security sessions
2124 * assigned to them, we may wish to reconsider this behavior of calling the
2125 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2128 job_dispatch_curious_jobs(j
);
2129 return job_dispatch(j
, false);
2133 job_import_bulk(launch_data_t pload
)
2135 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
2137 size_t i
, c
= launch_data_array_get_count(pload
);
2139 ja
= alloca(c
* sizeof(job_t
));
2141 for (i
= 0; i
< c
; i
++) {
2142 if ((likely(ja
[i
] = jobmgr_import2(root_jobmgr
, launch_data_array_get_index(pload
, i
)))) && errno
!= ENEEDAUTH
) {
2145 launch_data_array_set_index(resp
, launch_data_new_errno(errno
), i
);
2148 for (i
= 0; i
< c
; i
++) {
2149 if (likely(ja
[i
])) {
2150 job_dispatch_curious_jobs(ja
[i
]);
2151 job_dispatch(ja
[i
], false);
2159 job_import_bool(job_t j
, const char *key
, bool value
)
2161 bool found_key
= false;
2166 if (strcasecmp(key
, LAUNCH_JOBKEY_ABANDONPROCESSGROUP
) == 0) {
2167 j
->abandon_pg
= value
;
2173 if (strcasecmp(key
, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN
) == 0) {
2174 j
->dirty_at_shutdown
= value
;
2180 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
2181 j
->ondemand
= !value
;
2187 if (strcasecmp(key
, LAUNCH_JOBKEY_ONDEMAND
) == 0) {
2188 j
->ondemand
= value
;
2194 if (strcasecmp(key
, LAUNCH_JOBKEY_DEBUG
) == 0) {
2197 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLED
) == 0) {
2198 (void)job_assumes(j
, !value
);
2200 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLEASLR
) == 0) {
2201 j
->disable_aslr
= value
;
2207 if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
) == 0) {
2208 job_log(j
, LOG_PERF
, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN
);
2209 j
->dirty_at_shutdown
= value
;
2215 if (strcasecmp(key
, LAUNCH_JOBKEY_SESSIONCREATE
) == 0) {
2216 j
->session_create
= value
;
2218 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTONMOUNT
) == 0) {
2219 j
->start_on_mount
= value
;
2221 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SERVICEIPC
) == 0) {
2222 // this only does something on Mac OS X 10.4 "Tiger"
2224 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SHUTDOWNMONITOR
) == 0) {
2225 if (_launchd_shutdown_monitor
) {
2226 job_log(j
, LOG_ERR
, "Only one job may monitor shutdown.");
2228 j
->shutdown_monitor
= true;
2229 _launchd_shutdown_monitor
= j
;
2236 if (strcasecmp(key
, LAUNCH_JOBKEY_LOWPRIORITYIO
) == 0) {
2237 j
->low_pri_io
= value
;
2239 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHONLYONCE
) == 0) {
2240 j
->only_once
= value
;
2246 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
2247 j
->internal_exc_handler
= value
;
2249 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MULTIPLEINSTANCES
) == 0) {
2250 j
->multiple_instances
= value
;
2256 if (strcasecmp(key
, LAUNCH_JOBKEY_INITGROUPS
) == 0) {
2257 if (getuid() != 0) {
2258 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2261 j
->no_init_groups
= !value
;
2263 } else if (strcasecmp(key
, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN
) == 0) {
2264 j
->ignore_pg_at_shutdown
= value
;
2270 if (strcasecmp(key
, LAUNCH_JOBKEY_RUNATLOAD
) == 0) {
2272 // We don't want value == false to change j->start_pending
2273 j
->start_pending
= true;
2280 if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLEGLOBBING
) == 0) {
2281 j
->globargv
= value
;
2283 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
) == 0) {
2284 j
->enable_transactions
= value
;
2286 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL
) == 0) {
2287 j
->debug_before_kill
= value
;
2289 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION
) == 0) {
2290 #if TARGET_OS_EMBEDDED
2291 if (!_launchd_embedded_god
) {
2292 if ((j
->embedded_god
= value
)) {
2293 _launchd_embedded_god
= j
;
2296 job_log(j
, LOG_ERR
, "Job tried to claim %s after it has already been claimed.", key
);
2299 job_log(j
, LOG_ERR
, "This key is not supported on this platform: %s", key
);
2302 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EVENTMONITOR
) == 0) {
2303 if (!_launchd_event_monitor
) {
2304 j
->event_monitor
= value
;
2306 _launchd_event_monitor
= j
;
2309 job_log(j
, LOG_NOTICE
, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor
->label
);
2316 if (strcasecmp(key
, LAUNCH_JOBKEY_WAITFORDEBUGGER
) == 0) {
2317 j
->wait4debugger
= value
;
2323 if (strcasecmp(key
, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER
) == 0) {
2325 if (_launchd_xpc_bootstrapper
) {
2326 job_log(j
, LOG_ERR
, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper
->label
);
2328 _launchd_xpc_bootstrapper
= j
;
2329 j
->xpc_bootstrapper
= value
;
2332 job_log(j
, LOG_ERR
, "Non-daemon tried to claim XPC bootstrapper property.");
2341 if (unlikely(!found_key
)) {
2342 job_log(j
, LOG_WARNING
, "Unknown key for boolean: %s", key
);
2347 job_import_string(job_t j
, const char *key
, const char *value
)
2349 char **where2put
= NULL
;
2354 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
2355 where2put
= &j
->alt_exc_handler
;
2360 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAM
) == 0) {
2362 } else if (strcasecmp(key
, LAUNCH_JOBKEY_POSIXSPAWNTYPE
) == 0) {
2363 if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP
) == 0) {
2364 #if !TARGET_OS_EMBEDDED
2365 j
->pstype
= POSIX_SPAWN_OSX_TALAPP_START
;
2367 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET
) == 0) {
2368 #if !TARGET_OS_EMBEDDED
2369 j
->pstype
= POSIX_SPAWN_OSX_DBCLIENT_START
;
2371 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP
) == 0) {
2372 #if TARGET_OS_EMBEDDED
2373 j
->pstype
= POSIX_SPAWN_IOS_APP_START
;
2375 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE
) == 0) {
2376 #if TARGET_OS_EMBEDDED
2377 j
->pstype
= POSIX_SPAWN_IOS_INTERACTIVE
;
2379 } else if (strcasecmp(value
, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND
) == 0) {
2380 #if TARGET_OS_EMBEDDED
2381 j
->pstype
= POSIX_SPAWN_IOS_APPLE_DAEMON_START
;
2383 } else if (strcasecmp(value
, "Adaptive") == 0) {
2386 job_log(j
, LOG_ERR
, "Unknown value for key %s: %s", key
, value
);
2393 if (strcasecmp(key
, LAUNCH_JOBKEY_LABEL
) == 0) {
2395 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
2397 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
2399 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
2405 if (strcasecmp(key
, LAUNCH_JOBKEY_ROOTDIRECTORY
) == 0) {
2406 if (getuid() != 0) {
2407 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2410 where2put
= &j
->rootdir
;
2415 if (strcasecmp(key
, LAUNCH_JOBKEY_WORKINGDIRECTORY
) == 0) {
2416 where2put
= &j
->workingdir
;
2421 if (strcasecmp(key
, LAUNCH_JOBKEY_USERNAME
) == 0) {
2422 if (getuid() != 0) {
2423 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2425 } else if (strcmp(value
, "root") == 0) {
2428 where2put
= &j
->username
;
2433 if (strcasecmp(key
, LAUNCH_JOBKEY_GROUPNAME
) == 0) {
2434 if (getuid() != 0) {
2435 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
2437 } else if (strcmp(value
, "wheel") == 0) {
2440 where2put
= &j
->groupname
;
2445 if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDOUTPATH
) == 0) {
2446 where2put
= &j
->stdoutpath
;
2447 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDERRORPATH
) == 0) {
2448 where2put
= &j
->stderrpath
;
2449 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDINPATH
) == 0) {
2450 where2put
= &j
->stdinpath
;
2451 j
->stdin_fd
= _fd(open(value
, O_RDONLY
|O_CREAT
|O_NOCTTY
|O_NONBLOCK
, DEFFILEMODE
));
2452 if (job_assumes_zero_p(j
, j
->stdin_fd
) != -1) {
2453 // open() should not block, but regular IO by the job should
2454 (void)job_assumes_zero_p(j
, fcntl(j
->stdin_fd
, F_SETFL
, 0));
2455 // XXX -- EV_CLEAR should make named pipes happy?
2456 (void)job_assumes_zero_p(j
, kevent_mod(j
->stdin_fd
, EVFILT_READ
, EV_ADD
|EV_CLEAR
, 0, 0, j
));
2461 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXPROFILE
) == 0) {
2462 where2put
= &j
->seatbelt_profile
;
2468 if (strcasecmp(key
, LAUNCH_JOBKEY_XPCDOMAIN
) == 0) {
2473 job_log(j
, LOG_WARNING
, "Unknown key for string: %s", key
);
2477 if (likely(where2put
)) {
2478 if (!(*where2put
= strdup(value
))) {
2479 (void)job_assumes_zero(j
, errno
);
2482 // See rdar://problem/5496612. These two are okay.
2483 if (strncmp(key
, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2484 || strncmp(key
, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2485 job_log(j
, LOG_APPLEONLY
, "This key is no longer relevant and should be removed: %s", key
);
2487 job_log(j
, LOG_WARNING
, "Unknown key: %s", key
);
2493 job_import_integer(job_t j
, const char *key
, long long value
)
2498 if (strcasecmp(key
, LAUNCH_JOBKEY_EXITTIMEOUT
) == 0) {
2499 if (unlikely(value
< 0)) {
2500 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
2501 } else if (unlikely(value
> UINT32_MAX
)) {
2502 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
2504 j
->exit_timeout
= (typeof(j
->exit_timeout
)) value
;
2506 } else if (strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY
) == 0) {
2507 j
->main_thread_priority
= value
;
2512 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0) {
2513 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2515 launch_data_t pri
= launch_data_new_integer(value
);
2516 if (job_assumes(j
, pri
!= NULL
)) {
2517 jetsam_property_setup(pri
, LAUNCH_JOBKEY_JETSAMPRIORITY
, j
);
2518 launch_data_free(pri
);
2523 if (strcasecmp(key
, LAUNCH_JOBKEY_NICE
) == 0) {
2524 if (unlikely(value
< PRIO_MIN
)) {
2525 job_log(j
, LOG_WARNING
, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MIN
);
2526 } else if (unlikely(value
> PRIO_MAX
)) {
2527 job_log(j
, LOG_WARNING
, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MAX
);
2529 j
->nice
= (typeof(j
->nice
)) value
;
2536 if (strcasecmp(key
, LAUNCH_JOBKEY_TIMEOUT
) == 0) {
2537 if (unlikely(value
< 0)) {
2538 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
2539 } else if (unlikely(value
> UINT32_MAX
)) {
2540 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
2542 j
->timeout
= (typeof(j
->timeout
)) value
;
2544 } else if (strcasecmp(key
, LAUNCH_JOBKEY_THROTTLEINTERVAL
) == 0) {
2546 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
2547 } else if (value
> UINT32_MAX
) {
2548 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
2550 j
->min_run_time
= (typeof(j
->min_run_time
)) value
;
2556 if (strcasecmp(key
, LAUNCH_JOBKEY_UMASK
) == 0) {
2563 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTINTERVAL
) == 0) {
2564 if (unlikely(value
<= 0)) {
2565 job_log(j
, LOG_WARNING
, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
2566 } else if (unlikely(value
> UINT32_MAX
)) {
2567 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
2569 runtime_add_weak_ref();
2570 j
->start_interval
= (typeof(j
->start_interval
)) value
;
2572 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
));
2575 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
2576 j
->seatbelt_flags
= value
;
2582 job_log(j
, LOG_WARNING
, "Unknown key for integer: %s", key
);
2588 job_import_opaque(job_t j
__attribute__((unused
)), const char *key
, launch_data_t value
__attribute__((unused
)))
2594 if (strcasecmp(key
, LAUNCH_JOBKEY_QUARANTINEDATA
) == 0) {
2595 size_t tmpsz
= launch_data_get_opaque_size(value
);
2597 if (job_assumes(j
, j
->quarantine_data
= malloc(tmpsz
))) {
2598 memcpy(j
->quarantine_data
, launch_data_get_opaque(value
), tmpsz
);
2599 j
->quarantine_data_sz
= tmpsz
;
2605 if (strcasecmp(key
, LAUNCH_JOBKEY_SECURITYSESSIONUUID
) == 0) {
2606 size_t tmpsz
= launch_data_get_opaque_size(value
);
2607 if (job_assumes(j
, tmpsz
== sizeof(uuid_t
))) {
2608 memcpy(j
->expected_audit_uuid
, launch_data_get_opaque(value
), sizeof(uuid_t
));
2618 policy_setup(launch_data_t obj
, const char *key
, void *context
)
2621 bool found_key
= false;
2626 if (strcasecmp(key
, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS
) == 0) {
2627 j
->deny_job_creation
= launch_data_get_bool(obj
);
2635 if (unlikely(!found_key
)) {
2636 job_log(j
, LOG_WARNING
, "Unknown policy: %s", key
);
2641 job_import_dictionary(job_t j
, const char *key
, launch_data_t value
)
2648 if (strcasecmp(key
, LAUNCH_JOBKEY_POLICIES
) == 0) {
2649 launch_data_dict_iterate(value
, policy_setup
, j
);
2654 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
2655 launch_data_dict_iterate(value
, semaphoreitem_setup
, j
);
2660 if (strcasecmp(key
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
) == 0) {
2661 j
->inetcompat
= true;
2662 j
->abandon_pg
= true;
2663 if ((tmp
= launch_data_dict_lookup(value
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
))) {
2664 j
->inetcompat_wait
= launch_data_get_bool(tmp
);
2670 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPROPERTIES
) == 0) {
2671 launch_data_dict_iterate(value
, (void (*)(launch_data_t
, const char *, void *))jetsam_property_setup
, j
);
2675 if (strcasecmp(key
, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES
) == 0) {
2676 launch_data_dict_iterate(value
, envitem_setup
, j
);
2681 if (strcasecmp(key
, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES
) == 0) {
2682 j
->importing_global_env
= true;
2683 launch_data_dict_iterate(value
, envitem_setup
, j
);
2684 j
->importing_global_env
= false;
2689 if (strcasecmp(key
, LAUNCH_JOBKEY_SOCKETS
) == 0) {
2690 launch_data_dict_iterate(value
, socketgroup_setup
, j
);
2691 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
2692 calendarinterval_new_from_obj(j
, value
);
2693 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SOFTRESOURCELIMITS
) == 0) {
2694 launch_data_dict_iterate(value
, limititem_setup
, j
);
2696 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
2697 launch_data_dict_iterate(value
, seatbelt_setup_flags
, j
);
2703 if (strcasecmp(key
, LAUNCH_JOBKEY_HARDRESOURCELIMITS
) == 0) {
2704 j
->importing_hard_limits
= true;
2705 launch_data_dict_iterate(value
, limititem_setup
, j
);
2706 j
->importing_hard_limits
= false;
2711 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICES
) == 0) {
2712 launch_data_dict_iterate(value
, machservice_setup
, j
);
2717 if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHEVENTS
) == 0) {
2718 launch_data_dict_iterate(value
, eventsystem_setup
, j
);
2720 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE
) == 0) {
2723 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE
) == 0) {
2729 job_log(j
, LOG_WARNING
, "Unknown key for dictionary: %s", key
);
2735 job_import_array(job_t j
, const char *key
, launch_data_t value
)
2737 size_t i
, value_cnt
= launch_data_array_get_count(value
);
2742 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
) == 0) {
2748 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
2750 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
2752 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
2753 job_log(j
, LOG_NOTICE
, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
2759 if (strcasecmp(key
, LAUNCH_JOBKEY_BINARYORDERPREFERENCE
) == 0) {
2760 if (job_assumes(j
, j
->j_binpref
= malloc(value_cnt
* sizeof(*j
->j_binpref
)))) {
2761 j
->j_binpref_cnt
= value_cnt
;
2762 for (i
= 0; i
< value_cnt
; i
++) {
2763 j
->j_binpref
[i
] = (cpu_type_t
) launch_data_get_integer(launch_data_array_get_index(value
, i
));
2770 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
2771 for (i
= 0; i
< value_cnt
; i
++) {
2772 calendarinterval_new_from_obj(j
, launch_data_array_get_index(value
, i
));
2777 job_log(j
, LOG_WARNING
, "Unknown key for array: %s", key
);
2783 job_import_keys(launch_data_t obj
, const char *key
, void *context
)
2786 launch_data_type_t kind
;
2789 launchd_syslog(LOG_ERR
, "NULL object given to job_import_keys().");
2793 kind
= launch_data_get_type(obj
);
2796 case LAUNCH_DATA_BOOL
:
2797 job_import_bool(j
, key
, launch_data_get_bool(obj
));
2799 case LAUNCH_DATA_STRING
:
2800 job_import_string(j
, key
, launch_data_get_string(obj
));
2802 case LAUNCH_DATA_INTEGER
:
2803 job_import_integer(j
, key
, launch_data_get_integer(obj
));
2805 case LAUNCH_DATA_DICTIONARY
:
2806 job_import_dictionary(j
, key
, obj
);
2808 case LAUNCH_DATA_ARRAY
:
2809 job_import_array(j
, key
, obj
);
2811 case LAUNCH_DATA_OPAQUE
:
2812 job_import_opaque(j
, key
, obj
);
2815 job_log(j
, LOG_WARNING
, "Unknown value type '%d' for key: %s", kind
, key
);
2821 jobmgr_import2(jobmgr_t jm
, launch_data_t pload
)
2823 launch_data_t tmp
, ldpa
;
2824 const char *label
= NULL
, *prog
= NULL
;
2825 const char **argv
= NULL
;
2828 if (!jobmgr_assumes(jm
, pload
!= NULL
)) {
2833 if (unlikely(launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
)) {
2838 if (unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
)))) {
2843 if (unlikely(launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
)) {
2848 if (unlikely(!(label
= launch_data_get_string(tmp
)))) {
2853 #if TARGET_OS_EMBEDDED
2854 if (unlikely(launchd_embedded_handofgod
&& _launchd_embedded_god
)) {
2855 if (unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_USERNAME
)))) {
2860 const char *username
= NULL
;
2861 if (likely(tmp
&& launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
2862 username
= launch_data_get_string(tmp
);
2868 if (!jobmgr_assumes(jm
, _launchd_embedded_god
->username
!= NULL
&& username
!= NULL
)) {
2873 if (unlikely(strcmp(_launchd_embedded_god
->username
, username
) != 0)) {
2877 } else if (launchd_embedded_handofgod
) {
2883 if ((tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAM
))
2884 && (launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
2885 prog
= launch_data_get_string(tmp
);
2889 if ((ldpa
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
))) {
2892 if (launch_data_get_type(ldpa
) != LAUNCH_DATA_ARRAY
) {
2897 c
= launch_data_array_get_count(ldpa
);
2899 argv
= alloca((c
+ 1) * sizeof(char *));
2901 for (i
= 0; i
< c
; i
++) {
2902 tmp
= launch_data_array_get_index(ldpa
, i
);
2904 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
2909 argv
[i
] = launch_data_get_string(tmp
);
2916 if (!prog
&& argc
== 0) {
2917 jobmgr_log(jm
, LOG_ERR
, "Job specifies neither Program nor ProgramArguments: %s", label
);
2922 /* Find the requested session. You cannot load services into XPC domains in
2925 launch_data_t session
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
2927 jobmgr_t jmt
= NULL
;
2928 if (launch_data_get_type(session
) == LAUNCH_DATA_STRING
) {
2929 jmt
= jobmgr_find_by_name(jm
, launch_data_get_string(session
));
2931 jobmgr_log(jm
, LOG_ERR
, "Could not find requested session: %s", launch_data_get_string(session
));
2936 jobmgr_log(jm
, LOG_ERR
, "Session type is not a string.");
2945 /* For legacy reasons, we have a global hash of all labels in all job
2946 * managers. So rather than make it a global, we store it in the root job
2947 * manager. But for an XPC domain, we store a local hash of all services in
2950 jobmgr_t where2look
= (jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) ? jm
: root_jobmgr
;
2951 if (unlikely((j
= job_find(where2look
, label
)) != NULL
)) {
2952 if (jm
->xpc_singleton
) {
2953 /* There can (and probably will be) multiple attemtps to import the
2954 * same XPC service from the same framework. This is okay. It's
2955 * treated as a singleton, so just return the existing one so that
2956 * it may be aliased into the requesting process' XPC domain.
2961 /* If we're not a global XPC domain, then it's an error to try
2962 * importing the same job/service multiple times.
2967 } else if (unlikely(!jobmgr_label_test(where2look
, label
))) {
2971 jobmgr_log(jm
, LOG_DEBUG
, "Importing %s.", label
);
2973 if (likely(j
= job_new(jm
, label
, prog
, argv
))) {
2974 launch_data_dict_iterate(pload
, job_import_keys
, j
);
2975 if (!uuid_is_null(j
->expected_audit_uuid
)) {
2976 uuid_string_t uuid_str
;
2977 uuid_unparse(j
->expected_audit_uuid
, uuid_str
);
2978 job_log(j
, LOG_DEBUG
, "Imported job. Waiting for session for UUID %s.", uuid_str
);
2979 LIST_INSERT_HEAD(&s_needing_sessions
, j
, needing_session_sle
);
2982 job_log(j
, LOG_DEBUG
, "No security session specified.");
2983 j
->asport
= MACH_PORT_NULL
;
2986 if (pid1_magic
&& !jm
->parentmgr
) {
2987 /* Workaround reentrancy in CF. We don't make this a global variable
2988 * because we don't want per-user launchd's to inherit it. So we
2989 * just set it for every job that we import into the System session.
2991 * See <rdar://problem/9468837>.
2993 envitem_new(j
, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
2996 if (j
->event_monitor
) {
3000 #if TARGET_OS_EMBEDDED
3001 /* SpringBoard runs at Interactive priority.
3003 * See <rdar://problem/9539873>.
3005 if (j
->embedded_god
) {
3006 j
->pstype
= POSIX_SPAWN_IOS_INTERACTIVE
;
3015 jobmgr_label_test(jobmgr_t jm
, const char *str
)
3017 char *endstr
= NULL
;
3020 if (str
[0] == '\0') {
3021 jobmgr_log(jm
, LOG_ERR
, "Empty job labels are not allowed");
3025 for (ptr
= str
; *ptr
; ptr
++) {
3026 if (iscntrl(*ptr
)) {
3027 jobmgr_log(jm
, LOG_ERR
, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr
- str
, *ptr
);
3032 strtoll(str
, &endstr
, 0);
3034 if (str
!= endstr
) {
3035 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to begin with numbers: %s", str
);
3039 if ((strncasecmp(str
, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3040 || (strncasecmp(str
, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
3041 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to use a reserved prefix: %s", str
);
3049 job_find(jobmgr_t jm
, const char *label
)
3057 LIST_FOREACH(ji
, &jm
->label_hash
[hash_label(label
)], label_hash_sle
) {
3058 if (unlikely(ji
->removal_pending
|| ji
->mgr
->shutting_down
)) {
3059 // 5351245 and 5488633 respectively
3063 if (strcmp(ji
->label
, label
) == 0) {
3072 // Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
3074 jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
)
3077 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
3078 if (ji
->p
== p
&& (!ji
->anonymous
|| (ji
->anonymous
&& anon_okay
))) {
3083 jobmgr_t jmi
= NULL
;
3084 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3085 if ((ji
= jobmgr_find_by_pid_deep(jmi
, p
, anon_okay
))) {
3094 jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
)
3098 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
3104 return create_anon
? job_new_anonymous(jm
, p
) : NULL
;
3108 job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
)
3113 if (jm
->jm_port
== mport
) {
3114 return jobmgr_find_by_pid(jm
, upid
, true);
3117 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3120 if ((jr
= job_mig_intran2(jmi
, mport
, upid
))) {
3125 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
3126 if (ji
->j_port
== mport
) {
3135 job_mig_intran(mach_port_t p
)
3137 struct ldcred
*ldc
= runtime_get_caller_creds();
3140 jr
= job_mig_intran2(root_jobmgr
, p
, ldc
->pid
);
3143 struct proc_bsdshortinfo proc
;
3144 if (proc_pidinfo(ldc
->pid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3145 if (errno
!= ESRCH
) {
3146 (void)jobmgr_assumes_zero(root_jobmgr
, errno
);
3148 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc
.pbsi_comm
, ldc
->pid
, ldc
->uid
, ldc
->euid
);
3157 job_find_by_service_port(mach_port_t p
)
3159 struct machservice
*ms
;
3161 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
3162 if (ms
->recv
&& (ms
->port
== p
)) {
3171 job_mig_destructor(job_t j
)
3173 /* The job can go invalid before this point.
3175 * <rdar://problem/5477111>
3177 if (unlikely(j
&& (j
!= workaround_5477111
) && j
->unload_at_mig_return
)) {
3178 job_log(j
, LOG_NOTICE
, "Unloading PID %u at MIG return.", j
->p
);
3182 workaround_5477111
= NULL
;
3184 calendarinterval_sanity_check();
3188 job_export_all2(jobmgr_t jm
, launch_data_t where
)
3193 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3194 job_export_all2(jmi
, where
);
3197 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
3200 if (jobmgr_assumes(jm
, (tmp
= job_export(ji
)) != NULL
)) {
3201 launch_data_dict_insert(where
, tmp
, ji
->label
);
3207 job_export_all(void)
3209 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
3212 job_export_all2(root_jobmgr
, resp
);
3214 (void)osx_assumes_zero(errno
);
3221 job_log_stray_pg(job_t j
)
3224 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
3225 int i
= 0, kp_cnt
= 0;
3227 if (!launchd_apple_internal
) {
3231 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG
, j
->p
, 0, 0);
3233 if (!job_assumes(j
, (pids
= malloc(len
)) != NULL
)) {
3236 if (job_assumes_zero_p(j
, (kp_cnt
= proc_listpgrppids(j
->p
, pids
, len
))) == -1) {
3240 for (i
= 0; i
< kp_cnt
; i
++) {
3241 pid_t p_i
= pids
[i
];
3244 } else if (p_i
== 0 || p_i
== 1) {
3248 struct proc_bsdshortinfo proc
;
3249 if (proc_pidinfo(p_i
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3250 if (errno
!= ESRCH
) {
3251 (void)job_assumes_zero(j
, errno
);
3256 pid_t pp_i
= proc
.pbsi_ppid
;
3257 const char *z
= (proc
.pbsi_status
== SZOMB
) ? "zombie " : "";
3258 const char *n
= proc
.pbsi_comm
;
3260 job_log(j
, LOG_WARNING
, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z
, p_i
, pp_i
, proc
.pbsi_pgid
, n
);
3273 bool is_system_bootstrapper
= ((j
->is_bootstrapper
&& pid1_magic
) && !j
->mgr
->parentmgr
);
3275 job_log(j
, LOG_DEBUG
, "Reaping");
3277 if (unlikely(j
->weird_bootstrap
)) {
3279 job_mig_swap_integer(j
, VPROC_GSK_WEIRD_BOOTSTRAP
, 0, 0, &junk
);
3283 (void)job_assumes_zero_p(j
, runtime_close(j
->fork_fd
));
3289 memset(&ru
, 0, sizeof(ru
));
3291 uint64_t rt
= runtime_get_nanoseconds_since(j
->start_time
);
3294 job_log(j
, LOG_PERF
, "Last instance wall time: %06f", (double)rt
/ (double)NSEC_PER_SEC
);
3297 /* The job is dead. While the PID/PGID is still known to be valid, try
3298 * to kill abandoned descendant processes.
3300 job_log_stray_pg(j
);
3301 if (!j
->abandon_pg
) {
3302 if (unlikely(killpg2(j
->p
, SIGTERM
) == -1 && errno
!= ESRCH
)) {
3303 job_log(j
, LOG_APPLEONLY
, "Bug: 5487498");
3307 /* We have to work around one of two kernel bugs here. ptrace(3) may
3308 * have abducted the child away from us and reparented it to the tracing
3309 * process. If the process then exits, we still get NOTE_EXIT, but we
3310 * cannot reap it because the kernel may not have restored the true
3311 * parent/child relationship in time.
3313 * See <rdar://problem/5020256>.
3315 * The other bug is if the shutdown monitor has suspended a task and not
3316 * resumed it before exiting. In this case, the kernel will not clean up
3317 * after the shutdown monitor. It will, instead, leave the task
3318 * task suspended and not process any pending signals on the event loop
3321 * There are a variety of other kernel bugs that could prevent a process
3322 * from exiting, usually having to do with faulty hardware or talking to
3323 * misbehaving drivers that mark a thread as uninterruptible and
3324 * deadlock/hang before unmarking it as such. So we have to work around
3327 * See <rdar://problem/9284889&9359725>.
3330 if (j
->workaround9359725
) {
3331 job_log(j
, LOG_NOTICE
, "Simulated exit: <rdar://problem/9359725>");
3332 status
= W_EXITCODE(-1, SIGSEGV
);
3333 memset(&ru
, 0, sizeof(ru
));
3334 } else if ((r
= wait4(j
->p
, &status
, 0, &ru
)) == -1) {
3335 job_log(j
, LOG_NOTICE
, "Assuming job exited: <rdar://problem/5020256>: %d: %s", errno
, strerror(errno
));
3336 status
= W_EXITCODE(-1, SIGSEGV
);
3337 memset(&ru
, 0, sizeof(ru
));
3340 if (launchd_log_perf
&& r
!= -1) {
3341 job_log(j
, LOG_PERF
, "Last instance user time: %ld.%06u", ru
.ru_utime
.tv_sec
, ru
.ru_utime
.tv_usec
);
3342 job_log(j
, LOG_PERF
, "Last instance system time: %ld.%06u", ru
.ru_stime
.tv_sec
, ru
.ru_stime
.tv_usec
);
3343 job_log(j
, LOG_PERF
, "Last instance maximum resident size: %lu", ru
.ru_maxrss
);
3344 job_log(j
, LOG_PERF
, "Last instance integral shared memory size: %lu", ru
.ru_ixrss
);
3345 job_log(j
, LOG_PERF
, "Last instance integral unshared data size: %lu", ru
.ru_idrss
);
3346 job_log(j
, LOG_PERF
, "Last instance integral unshared stack size: %lu", ru
.ru_isrss
);
3347 job_log(j
, LOG_PERF
, "Last instance page reclaims: %lu", ru
.ru_minflt
);
3348 job_log(j
, LOG_PERF
, "Last instance page faults: %lu", ru
.ru_majflt
);
3349 job_log(j
, LOG_PERF
, "Last instance swaps: %lu", ru
.ru_nswap
);
3350 job_log(j
, LOG_PERF
, "Last instance input ops: %lu", ru
.ru_inblock
);
3351 job_log(j
, LOG_PERF
, "Last instance output ops: %lu", ru
.ru_oublock
);
3352 job_log(j
, LOG_PERF
, "Last instance messages sent: %lu", ru
.ru_msgsnd
);
3353 job_log(j
, LOG_PERF
, "Last instance messages received: %lu", ru
.ru_msgrcv
);
3354 job_log(j
, LOG_PERF
, "Last instance signals received: %lu", ru
.ru_nsignals
);
3355 job_log(j
, LOG_PERF
, "Last instance voluntary context switches: %lu", ru
.ru_nvcsw
);
3356 job_log(j
, LOG_PERF
, "Last instance involuntary context switches: %lu", ru
.ru_nivcsw
);
3360 if (j
->exit_timeout
) {
3361 (void)kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
3364 LIST_REMOVE(j
, pid_hash_sle
);
3366 if (j
->sent_signal_time
) {
3367 uint64_t td_sec
, td_usec
, td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
3369 td_sec
= td
/ NSEC_PER_SEC
;
3370 td_usec
= (td
% NSEC_PER_SEC
) / NSEC_PER_USEC
;
3372 job_log(j
, LOG_DEBUG
, "Exited %llu.%06llu seconds after the first signal was sent", td_sec
, td_usec
);
3375 timeradd(&ru
.ru_utime
, &j
->ru
.ru_utime
, &j
->ru
.ru_utime
);
3376 timeradd(&ru
.ru_stime
, &j
->ru
.ru_stime
, &j
->ru
.ru_stime
);
3377 if (j
->ru
.ru_maxrss
< ru
.ru_maxrss
) {
3378 j
->ru
.ru_maxrss
= ru
.ru_maxrss
;
3381 j
->ru
.ru_ixrss
+= ru
.ru_ixrss
;
3382 j
->ru
.ru_idrss
+= ru
.ru_idrss
;
3383 j
->ru
.ru_isrss
+= ru
.ru_isrss
;
3384 j
->ru
.ru_minflt
+= ru
.ru_minflt
;
3385 j
->ru
.ru_majflt
+= ru
.ru_majflt
;
3386 j
->ru
.ru_nswap
+= ru
.ru_nswap
;
3387 j
->ru
.ru_inblock
+= ru
.ru_inblock
;
3388 j
->ru
.ru_oublock
+= ru
.ru_oublock
;
3389 j
->ru
.ru_msgsnd
+= ru
.ru_msgsnd
;
3390 j
->ru
.ru_msgrcv
+= ru
.ru_msgrcv
;
3391 j
->ru
.ru_nsignals
+= ru
.ru_nsignals
;
3392 j
->ru
.ru_nvcsw
+= ru
.ru_nvcsw
;
3393 j
->ru
.ru_nivcsw
+= ru
.ru_nivcsw
;
3394 job_log_perf_statistics(j
);
3396 int exit_status
= WEXITSTATUS(status
);
3397 if (WIFEXITED(status
) && exit_status
!= 0) {
3398 if (!j
->did_exec
&& _launchd_support_system
) {
3399 xpc_object_t event
= NULL
;
3400 switch (exit_status
) {
3404 job_log(j
, LOG_NOTICE
, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status
, strerror(exit_status
));
3405 event
= xpc_dictionary_create(NULL
, NULL
, 0);
3406 xpc_dictionary_set_string(event
, "Executable", j
->prog
? j
->prog
: j
->argv
[0]);
3408 xpc_dictionary_set_uint64(event
, "UID", j
->mach_uid
);
3409 } else if (j
->username
) {
3410 xpc_dictionary_set_string(event
, "UserName", j
->username
);
3414 xpc_dictionary_set_string(event
, "GroupName", j
->groupname
);
3417 (void)externalevent_new(j
, _launchd_support_system
, j
->label
, event
);
3420 j
->waiting4ok
= true;
3422 job_log(j
, LOG_NOTICE
, "Job failed to exec(3) for weird reason: %d", exit_status
);
3425 int level
= LOG_INFO
;
3426 if (exit_status
!= 0) {
3430 job_log(j
, level
, "Exited with code: %d", exit_status
);
3434 if (WIFSIGNALED(status
)) {
3435 int s
= WTERMSIG(status
);
3436 if ((SIGKILL
== s
|| SIGTERM
== s
) && !j
->stopped
) {
3437 job_log(j
, LOG_NOTICE
, "Exited: %s", strsignal(s
));
3438 } else if (!j
->stopped
&& !j
->clean_kill
) {
3440 // Signals which indicate a crash.
3447 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3448 * SIGTRAP, assume that it's a crash.
3452 job_log(j
, LOG_WARNING
, "Job appears to have crashed: %s", strsignal(s
));
3455 job_log(j
, LOG_WARNING
, "Exited abnormally: %s", strsignal(s
));
3459 if (is_system_bootstrapper
&& j
->crashed
) {
3460 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "The %s bootstrapper has crashed: %s", j
->mgr
->name
, strsignal(s
));
3467 struct machservice
*msi
= NULL
;
3468 if (j
->crashed
|| !(j
->did_exec
|| j
->anonymous
)) {
3469 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
3470 if (j
->crashed
&& !msi
->isActive
&& (msi
->drain_one_on_crash
|| msi
->drain_all_on_crash
)) {
3471 machservice_drain_port(msi
);
3474 if (!j
->did_exec
&& msi
->reset
&& job_assumes(j
, !msi
->isActive
)) {
3475 machservice_resetport(j
, msi
);
3480 /* HACK: Essentially duplicating the logic directly above. But this has
3481 * gotten really hairy, and I don't want to try consolidating it right now.
3483 if (j
->xpc_service
&& !j
->xpcproxy_did_exec
) {
3484 job_log(j
, LOG_ERR
, "XPC Service could not exec(3). Resetting port.");
3485 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
3486 /* Drain the messages but do not reset the port. If xpcproxy could
3487 * not exec(3), then we don't want to continue trying, since there
3488 * is very likely a serious configuration error with the service.
3490 * The above comment is weird. I originally said we should drain
3491 * messages but not reset the port, but that's exactly what we do
3492 * below, and I'm not sure which is the mistake, the comment or the
3495 * Since it's always been this way, I'll assume that the comment is
3496 * incorrect, but I'll leave it in place just to remind myself to
3497 * actually look into it at some point.
3499 * <rdar://problem/8986802>
3501 if (msi
->upfront
&& job_assumes(j
, !msi
->isActive
)) {
3502 machservice_resetport(j
, msi
);
3507 struct suspended_peruser
*spi
= NULL
;
3508 while ((spi
= LIST_FIRST(&j
->suspended_perusers
))) {
3509 job_log(j
, LOG_ERR
, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi
->j
->mach_uid
);
3510 spi
->j
->peruser_suspend_count
--;
3511 if (spi
->j
->peruser_suspend_count
== 0) {
3512 job_dispatch(spi
->j
, false);
3514 LIST_REMOVE(spi
, sle
);
3518 j
->last_exit_status
= status
;
3520 if (j
->exit_status_dest
) {
3521 errno
= helper_downcall_wait(j
->exit_status_dest
, j
->last_exit_status
);
3522 if (errno
&& errno
!= MACH_SEND_INVALID_DEST
) {
3523 (void)job_assumes_zero(j
, errno
);
3526 j
->exit_status_dest
= MACH_PORT_NULL
;
3529 if (j
->spawn_reply_port
) {
3530 /* If the child never called exec(3), we must send a spawn() reply so
3531 * that the requestor can get exit status from it. If we fail to send
3532 * the reply for some reason, we have to deallocate the exit status port
3535 kern_return_t kr
= job_mig_spawn2_reply(j
->spawn_reply_port
, BOOTSTRAP_SUCCESS
, j
->p
, j
->exit_status_port
);
3537 if (kr
!= MACH_SEND_INVALID_DEST
) {
3538 (void)job_assumes_zero(j
, kr
);
3541 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->exit_status_port
));
3544 j
->exit_status_port
= MACH_PORT_NULL
;
3545 j
->spawn_reply_port
= MACH_PORT_NULL
;
3549 total_anon_children
--;
3551 job_log(j
, LOG_PERF
, "Anonymous job exited holding reference.");
3555 job_log(j
, LOG_PERF
, "Job exited.");
3560 if (j
->has_console
) {
3564 if (j
->shutdown_monitor
) {
3565 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Shutdown monitor has exited.");
3566 _launchd_shutdown_monitor
= NULL
;
3567 j
->shutdown_monitor
= false;
3570 if (!j
->anonymous
) {
3571 j
->mgr
->normal_active_cnt
--;
3573 j
->sent_signal_time
= 0;
3574 j
->sent_sigkill
= false;
3575 j
->clean_kill
= false;
3576 j
->event_monitor_ready2signal
= false;
3581 jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
)
3586 if (jm
->shutting_down
) {
3590 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
3591 jobmgr_dispatch_all(jmi
, newmounthack
);
3594 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
3595 if (newmounthack
&& ji
->start_on_mount
) {
3596 ji
->start_pending
= true;
3599 job_dispatch(ji
, false);
3604 job_dispatch_curious_jobs(job_t j
)
3606 job_t ji
= NULL
, jt
= NULL
;
3607 SLIST_FOREACH_SAFE(ji
, &s_curious_jobs
, curious_jobs_sle
, jt
) {
3608 struct semaphoreitem
*si
= NULL
;
3609 SLIST_FOREACH(si
, &ji
->semaphores
, sle
) {
3610 if (!(si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
)) {
3614 if (strcmp(si
->what
, j
->label
) == 0) {
3615 job_log(ji
, LOG_DEBUG
, "Dispatching out of interest in \"%s\".", j
->label
);
3617 if (!ji
->removing
) {
3618 job_dispatch(ji
, false);
3620 job_log(ji
, LOG_NOTICE
, "The following job is circularly dependent upon this one: %s", j
->label
);
3623 /* ji could be removed here, so don't do anything with it or its semaphores
3633 job_dispatch(job_t j
, bool kickstart
)
3635 // Don't dispatch a job if it has no audit session set.
3636 if (!uuid_is_null(j
->expected_audit_uuid
)) {
3637 job_log(j
, LOG_DEBUG
, "Job is still awaiting its audit session UUID. Not dispatching.");
3641 job_log(j
, LOG_DEBUG
, "Job is an alias. Not dispatching.");
3645 if (j
->waiting4ok
) {
3646 job_log(j
, LOG_DEBUG
, "Job cannot exec(3). Not dispatching.");
3650 #if TARGET_OS_EMBEDDED
3651 if (launchd_embedded_handofgod
&& _launchd_embedded_god
) {
3652 if (!job_assumes(j
, _launchd_embedded_god
->username
!= NULL
&& j
->username
!= NULL
)) {
3657 if (strcmp(j
->username
, _launchd_embedded_god
->username
) != 0) {
3661 } else if (launchd_embedded_handofgod
) {
3668 * The whole job removal logic needs to be consolidated. The fact that
3669 * a job can be removed from just about anywhere makes it easy to have
3670 * stale pointers left behind somewhere on the stack that might get
3671 * used after the deallocation. In particular, during job iteration.
3673 * This is a classic example. The act of dispatching a job may delete it.
3675 if (!job_active(j
)) {
3676 if (job_useless(j
)) {
3677 job_log(j
, LOG_DEBUG
, "Job is useless. Removing.");
3681 if (unlikely(j
->per_user
&& j
->peruser_suspend_count
> 0)) {
3682 job_log(j
, LOG_DEBUG
, "Per-user launchd is suspended. Not dispatching.");
3686 if (kickstart
|| job_keepalive(j
)) {
3687 job_log(j
, LOG_DEBUG
, "%starting job", kickstart
? "Kicks" : "S");
3690 job_log(j
, LOG_DEBUG
, "Watching job.");
3696 * Path checking and monitoring is really racy right now.
3697 * We should clean this up post Leopard.
3699 if (job_keepalive(j
)) {
3704 job_log(j
, LOG_DEBUG
, "Tried to dispatch an already active job: %s.", job_active(j
));
3713 if (unlikely(!j
->p
|| j
->anonymous
)) {
3717 (void)job_assumes_zero_p(j
, kill2(j
->p
, SIGKILL
));
3719 j
->sent_sigkill
= true;
3720 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, LAUNCHD_SIGKILL_TIMER
, j
));
3722 job_log(j
, LOG_DEBUG
, "Sent SIGKILL signal");
3726 job_open_shutdown_transaction(job_t j
)
3728 int rv
= proc_set_dirty(j
->p
, true);
3730 job_log(j
, LOG_DEBUG
, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3731 j
->dirty_at_shutdown
= false;
3736 job_close_shutdown_transaction(job_t j
)
3738 if (j
->dirty_at_shutdown
) {
3739 job_log(j
, LOG_DEBUG
, "Closing shutdown transaction for job.");
3740 (void)job_assumes_zero(j
, proc_set_dirty(j
->p
, false));
3741 j
->dirty_at_shutdown
= false;
3746 job_log_children_without_exec(job_t j
)
3749 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
3750 int i
= 0, kp_cnt
= 0;
3752 if (!launchd_apple_internal
|| j
->anonymous
|| j
->per_user
) {
3756 if (!job_assumes(j
, (pids
= malloc(len
)) != NULL
)) {
3759 if (job_assumes_zero_p(j
, (kp_cnt
= proc_listchildpids(j
->p
, pids
, len
))) == -1) {
3763 for (i
= 0; i
< kp_cnt
; i
++) {
3764 struct proc_bsdshortinfo proc
;
3765 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
3766 if (errno
!= ESRCH
) {
3767 (void)job_assumes_zero(j
, errno
);
3771 if (proc
.pbsi_flags
& P_EXEC
) {
3775 job_log(j
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids
[i
]);
3783 job_cleanup_after_tracer(job_t j
)
3786 if (j
->reap_after_trace
) {
3787 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Reaping job now that attached tracer is gone.");
3789 EV_SET(&kev
, j
->p
, 0, 0, NOTE_EXIT
, 0, 0);
3791 // Fake a kevent to keep our logic consistent.
3792 job_callback_proc(j
, &kev
);
3794 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3795 * on the root job manager. To make our fakery complete, we will do garbage
3796 * collection at the beginning of the next run loop cycle (after we're done
3797 * draining the current queue of kevents).
3799 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&root_jobmgr
->reboot_flags
, EVFILT_TIMER
, EV_ADD
| EV_ONESHOT
, NOTE_NSECONDS
, 1, root_jobmgr
));
3804 job_callback_proc(job_t j
, struct kevent
*kev
)
3806 bool program_changed
= false;
3807 int fflags
= kev
->fflags
;
3809 job_log(j
, LOG_DEBUG
, "EVFILT_PROC event for job.");
3810 log_kevent_struct(LOG_DEBUG
, kev
, 0);
3812 if (fflags
& NOTE_EXIT
) {
3813 if (j
->p
== (pid_t
)kev
->ident
&& !j
->anonymous
) {
3814 /* Note that the third argument to proc_pidinfo() is a magic
3815 * argument for PROC_PIDT_SHORTBSDINFO. Specifically, passing 1
3816 * means "don't fail on a zombie PID".
3818 struct proc_bsdshortinfo proc
;
3819 if (job_assumes(j
, proc_pidinfo(j
->p
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) > 0)) {
3820 if (!job_assumes(j
, (pid_t
)proc
.pbsi_ppid
== getpid())) {
3821 /* Someone has attached to the process with ptrace().
3822 * There's a race here. If we determine that we are not the
3823 * parent process and then fail to attach a kevent to the
3824 * parent PID (who is probably using ptrace()), we can take
3825 * that as an indication that the parent exited between
3826 * sysctl(3) and kevent_mod(). The reparenting of the PID
3827 * should be atomic to us, so in that case, we reap the job
3830 * Otherwise, we wait for the death of the parent tracer and
3831 * then reap, just as we would if a job died while we were
3832 * sampling it at shutdown.
3834 * Note that we foolishly assume that in the process *tree*
3835 * a node cannot be its own parent. Apparently, that is not
3836 * correct. If this is the case, we forsake the process to
3837 * its own devices. Let it reap itself.
3839 if (!job_assumes(j
, proc
.pbsi_ppid
!= kev
->ident
)) {
3840 job_log(j
, LOG_WARNING
, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3843 if (job_assumes_zero_p(j
, kevent_mod(proc
.pbsi_ppid
, EVFILT_PROC
, EV_ADD
, NOTE_EXIT
, 0, j
)) != -1) {
3844 j
->tracing_pid
= proc
.pbsi_ppid
;
3845 j
->reap_after_trace
= true;
3850 } else if (!j
->anonymous
) {
3851 if (j
->tracing_pid
== (pid_t
)kev
->ident
) {
3852 job_cleanup_after_tracer(j
);
3855 } else if (j
->tracing_pid
&& !j
->reap_after_trace
) {
3856 // The job exited before our sample completed.
3857 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job has exited. Will reap after tracing PID %i exits.", j
->tracing_pid
);
3858 j
->reap_after_trace
= true;
3864 if (fflags
& NOTE_EXEC
) {
3865 program_changed
= true;
3868 struct proc_bsdshortinfo proc
;
3869 if (proc_pidinfo(j
->p
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) > 0) {
3870 char newlabel
[1000];
3872 snprintf(newlabel
, sizeof(newlabel
), "%p.anonymous.%s", j
, proc
.pbsi_comm
);
3874 job_log(j
, LOG_INFO
, "Program changed. Updating the label to: %s", newlabel
);
3876 LIST_REMOVE(j
, label_hash_sle
);
3877 strcpy((char *)j
->label
, newlabel
);
3879 jobmgr_t where2put
= root_jobmgr
;
3880 if (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
3883 LIST_INSERT_HEAD(&where2put
->label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
3884 } else if (errno
!= ESRCH
) {
3885 (void)job_assumes_zero(j
, errno
);
3888 if (j
->spawn_reply_port
) {
3889 errno
= job_mig_spawn2_reply(j
->spawn_reply_port
, BOOTSTRAP_SUCCESS
, j
->p
, j
->exit_status_port
);
3891 if (errno
!= MACH_SEND_INVALID_DEST
) {
3892 (void)job_assumes_zero(j
, errno
);
3894 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->exit_status_port
));
3897 j
->spawn_reply_port
= MACH_PORT_NULL
;
3898 j
->exit_status_port
= MACH_PORT_NULL
;
3901 if (j
->xpc_service
&& j
->did_exec
) {
3902 j
->xpcproxy_did_exec
= true;
3906 job_log(j
, LOG_DEBUG
, "Program changed");
3910 if (fflags
& NOTE_FORK
) {
3911 job_log(j
, LOG_DEBUG
, "fork()ed%s", program_changed
? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3912 job_log_children_without_exec(j
);
3915 if (fflags
& NOTE_EXIT
) {
3922 (void)job_dispatch(j
, false);
3928 job_callback_timer(job_t j
, void *ident
)
3931 job_log(j
, LOG_DEBUG
, "j == ident (%p)", ident
);
3932 job_dispatch(j
, true);
3933 } else if (&j
->semaphores
== ident
) {
3934 job_log(j
, LOG_DEBUG
, "&j->semaphores == ident (%p)", ident
);
3935 job_dispatch(j
, false);
3936 } else if (&j
->start_interval
== ident
) {
3937 job_log(j
, LOG_DEBUG
, "&j->start_interval == ident (%p)", ident
);
3938 j
->start_pending
= true;
3939 job_dispatch(j
, false);
3940 } else if (&j
->exit_timeout
== ident
) {
3941 if (!job_assumes(j
, j
->p
!= 0)) {
3945 if (j
->sent_sigkill
) {
3946 uint64_t td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
3949 td
-= j
->clean_kill
? 0 : j
->exit_timeout
;
3951 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j
->clean_kill
? "cleanly " : "", td
);
3952 j
->workaround9359725
= true;
3954 if (launchd_trap_sigkill_bugs
) {
3955 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3956 (void)job_assumes_zero(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
));
3959 /* We've simulated the exit, so we have to cancel the kevent for
3960 * this job, otherwise we may get a kevent later down the road that
3961 * has a stale context pointer (if we've removed the job). Or worse,
3962 * it'll corrupt our data structures if the job still exists or the
3963 * allocation was recycled.
3965 * If the failing process had a tracer attached to it, we need to
3966 * remove out NOTE_EXIT for that tracer too, otherwise the same
3967 * thing might happen.
3969 * Note that, if we're not shutting down, this will result in a
3970 * zombie process just hanging around forever. But if the process
3971 * didn't exit after receiving SIGKILL, odds are it would've just
3972 * stuck around forever anyway.
3974 * See <rdar://problem/9481630>.
3976 (void)kevent_mod((uintptr_t)j
->p
, EVFILT_PROC
, EV_DELETE
, 0, 0, NULL
);
3977 if (j
->tracing_pid
) {
3978 (void)kevent_mod((uintptr_t)j
->tracing_pid
, EVFILT_PROC
, EV_DELETE
, 0, 0, NULL
);
3981 struct kevent bogus_exit
;
3982 EV_SET(&bogus_exit
, j
->p
, EVFILT_PROC
, 0, NOTE_EXIT
, 0, 0);
3983 jobmgr_callback(j
->mgr
, &bogus_exit
);
3985 if (unlikely(j
->debug_before_kill
)) {
3986 job_log(j
, LOG_NOTICE
, "Exit timeout elapsed. Entering the kernel debugger");
3987 (void)job_assumes_zero(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
));
3990 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Exit timeout elapsed (%u seconds). Killing", j
->exit_timeout
);
3994 job_log(j
, LOG_ERR
, "Unrecognized job timer callback: %p", ident
);
3999 job_callback_read(job_t j
, int ident
)
4001 if (ident
== j
->stdin_fd
) {
4002 job_dispatch(j
, true);
4004 socketgroup_callback(j
);
4009 jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
)
4014 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
4015 jobmgr_reap_bulk(jmi
, kev
);
4018 if ((j
= jobmgr_find_by_pid(jm
, (pid_t
)kev
->ident
, false))) {
4020 job_callback(j
, kev
);
4025 jobmgr_callback(void *obj
, struct kevent
*kev
)
4029 #if TARGET_OS_EMBEDDED
4030 int flag2check
= VQ_MOUNT
;
4032 int flag2check
= VQ_UPDATE
;
4035 switch (kev
->filter
) {
4037 jobmgr_reap_bulk(jm
, kev
);
4038 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
4041 switch (kev
->ident
) {
4043 jobmgr_log(jm
, LOG_DEBUG
, "Got SIGTERM. Shutting down.");
4044 return launchd_shutdown();
4046 return calendarinterval_callback();
4048 // Turn on all logging.
4049 launchd_log_perf
= true;
4050 launchd_log_debug
= true;
4051 launchd_log_shutdown
= true;
4052 /* Hopefully /var is available by this point. If not, uh, oh well.
4053 * It's just a debugging facility.
4055 return jobmgr_log_perf_statistics(jm
);
4057 jobmgr_log(jm
, LOG_ERR
, "Unrecognized signal: %lu: %s", kev
->ident
, strsignal(kev
->ident
));
4061 if (kev
->fflags
& flag2check
) {
4062 if (!launchd_var_available
) {
4064 if (stat("/var/log", &sb
) == 0 && (sb
.st_mode
& S_IWUSR
)) {
4065 launchd_var_available
= true;
4068 } else if (kev
->fflags
& VQ_MOUNT
) {
4069 jobmgr_dispatch_all(jm
, true);
4071 jobmgr_dispatch_all_semaphores(jm
);
4074 if (kev
->ident
== (uintptr_t)&sorted_calendar_events
) {
4075 calendarinterval_callback();
4076 } else if (kev
->ident
== (uintptr_t)jm
) {
4077 jobmgr_log(jm
, LOG_DEBUG
, "Shutdown timer firing.");
4078 jobmgr_still_alive_with_check(jm
);
4079 } else if (kev
->ident
== (uintptr_t)&jm
->reboot_flags
) {
4080 jobmgr_do_garbage_collection(jm
);
4081 } else if (kev
->ident
== (uintptr_t)&launchd_runtime_busy_time
) {
4082 jobmgr_log(jm
, LOG_DEBUG
, "Idle exit timer fired. Shutting down.");
4083 if (jobmgr_assumes_zero(jm
, runtime_busy_cnt
) == 0) {
4084 return launchd_shutdown();
4089 if (kev
->ident
== (uintptr_t)s_no_hang_fd
) {
4090 int _no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
4091 if (unlikely(_no_hang_fd
!= -1)) {
4092 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "/dev/autofs_nowait has appeared!");
4093 (void)jobmgr_assumes_zero_p(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
));
4094 (void)jobmgr_assumes_zero_p(root_jobmgr
, runtime_close(s_no_hang_fd
));
4095 s_no_hang_fd
= _fd(_no_hang_fd
);
4097 } else if (pid1_magic
&& launchd_console
&& kev
->ident
== (uintptr_t)fileno(launchd_console
)) {
4099 if (jobmgr_assumes_zero_p(jm
, cfd
= open(_PATH_CONSOLE
, O_WRONLY
| O_NOCTTY
)) != -1) {
4101 if (!(launchd_console
= fdopen(cfd
, "w"))) {
4102 (void)jobmgr_assumes_zero(jm
, errno
);
4109 jobmgr_log(jm
, LOG_ERR
, "Unrecognized kevent filter: %hd", kev
->filter
);
4114 job_callback(void *obj
, struct kevent
*kev
)
4118 job_log(j
, LOG_DEBUG
, "Dispatching kevent callback.");
4120 switch (kev
->filter
) {
4122 return job_callback_proc(j
, kev
);
4124 return job_callback_timer(j
, (void *) kev
->ident
);
4126 return job_callback_read(j
, (int) kev
->ident
);
4127 case EVFILT_MACHPORT
:
4128 return (void)job_dispatch(j
, true);
4130 job_log(j
, LOG_ERR
, "Unrecognized job callback filter: %hd", kev
->filter
);
4143 u_int proc_fflags
= NOTE_EXIT
|NOTE_FORK
|NOTE_EXEC
;
4145 if (!job_assumes(j
, j
->mgr
!= NULL
)) {
4149 if (unlikely(job_active(j
))) {
4150 job_log(j
, LOG_DEBUG
, "Already started");
4155 * Some users adjust the wall-clock and then expect software to not notice.
4156 * Therefore, launchd must use an absolute clock instead of the wall clock
4157 * wherever possible.
4159 td
= runtime_get_nanoseconds_since(j
->start_time
);
4162 if (j
->start_time
&& (td
< j
->min_run_time
) && !j
->legacy_mach_job
&& !j
->inetcompat
) {
4163 time_t respawn_delta
= j
->min_run_time
- (uint32_t)td
;
4165 /* We technically should ref-count throttled jobs to prevent idle exit,
4166 * but we're not directly tracking the 'throttled' state at the moment.
4168 job_log(j
, LOG_NOTICE
, "Throttling respawn: Will start in %ld seconds", respawn_delta
);
4169 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, respawn_delta
, j
));
4174 if (likely(!j
->legacy_mach_job
)) {
4175 sipc
= ((!SLIST_EMPTY(&j
->sockets
) || !SLIST_EMPTY(&j
->machservices
)) && !j
->deny_job_creation
) || j
->embedded_god
;
4179 (void)job_assumes_zero_p(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, spair
));
4182 (void)job_assumes_zero_p(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, execspair
));
4184 switch (c
= runtime_fork(j
->weird_bootstrap
? j
->j_port
: j
->mgr
->jm_port
)) {
4186 job_log_error(j
, LOG_ERR
, "fork() failed, will try again in one second");
4187 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, 1, j
));
4190 (void)job_assumes_zero(j
, runtime_close(execspair
[0]));
4191 (void)job_assumes_zero(j
, runtime_close(execspair
[1]));
4193 (void)job_assumes_zero(j
, runtime_close(spair
[0]));
4194 (void)job_assumes_zero(j
, runtime_close(spair
[1]));
4198 if (unlikely(_vproc_post_fork_ping())) {
4199 _exit(EXIT_FAILURE
);
4202 (void)job_assumes_zero(j
, runtime_close(execspair
[0]));
4203 // wait for our parent to say they've attached a kevent to us
4204 read(_fd(execspair
[1]), &c
, sizeof(c
));
4207 (void)job_assumes_zero(j
, runtime_close(spair
[0]));
4208 snprintf(nbuf
, sizeof(nbuf
), "%d", spair
[1]);
4209 setenv(LAUNCHD_TRUSTED_FD_ENV
, nbuf
, 1);
4214 j
->start_time
= runtime_get_opaque_time();
4216 job_log(j
, LOG_DEBUG
, "Started as PID: %u", c
);
4218 j
->did_exec
= false;
4219 j
->xpcproxy_did_exec
= false;
4220 j
->checkedin
= false;
4221 j
->start_pending
= false;
4225 j
->workaround9359725
= false;
4226 if (j
->needs_kickoff
) {
4227 j
->needs_kickoff
= false;
4229 if (SLIST_EMPTY(&j
->semaphores
)) {
4230 j
->ondemand
= false;
4234 if (j
->has_console
) {
4238 job_log(j
, LOG_PERF
, "Job started.");
4241 LIST_INSERT_HEAD(&j
->mgr
->active_jobs
[ACTIVE_JOB_HASH(c
)], j
, pid_hash_sle
);
4244 j
->mgr
->normal_active_cnt
++;
4245 j
->fork_fd
= _fd(execspair
[0]);
4246 (void)job_assumes_zero(j
, runtime_close(execspair
[1]));
4248 (void)job_assumes_zero(j
, runtime_close(spair
[1]));
4249 ipc_open(_fd(spair
[0]), j
);
4251 if (kevent_mod(c
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
? root_jobmgr
: j
->mgr
) != -1) {
4254 if (errno
== ESRCH
) {
4255 job_log(j
, LOG_ERR
, "Child was killed before we could attach a kevent.");
4257 (void)job_assumes(j
, errno
== ESRCH
);
4261 /* If we have reaped this job within this same run loop pass, then
4262 * it will be currently ignored. So if there's a failure to attach a
4263 * kevent, we need to make sure that we watch the job so that we can
4266 * See <rdar://problem/10140809>.
4271 j
->wait4debugger_oneshot
= false;
4272 if (likely(!j
->stall_before_exec
)) {
4280 job_start_child(job_t j
)
4282 typeof(posix_spawn
) *psf
;
4283 const char *file2exec
= "/usr/libexec/launchproxy";
4285 posix_spawnattr_t spattr
;
4286 int gflags
= GLOB_NOSORT
|GLOB_NOCHECK
|GLOB_TILDE
|GLOB_DOOFFS
;
4288 short spflags
= POSIX_SPAWN_SETEXEC
;
4289 size_t binpref_out_cnt
= 0;
4292 (void)job_assumes_zero(j
, posix_spawnattr_init(&spattr
));
4294 job_setup_attributes(j
);
4296 if (unlikely(j
->argv
&& j
->globargv
)) {
4298 for (i
= 0; i
< j
->argc
; i
++) {
4300 gflags
|= GLOB_APPEND
;
4302 if (glob(j
->argv
[i
], gflags
, NULL
, &g
) != 0) {
4303 job_log_error(j
, LOG_ERR
, "glob(\"%s\")", j
->argv
[i
]);
4307 g
.gl_pathv
[0] = (char *)file2exec
;
4308 argv
= (const char **)g
.gl_pathv
;
4309 } else if (likely(j
->argv
)) {
4310 argv
= alloca((j
->argc
+ 2) * sizeof(char *));
4311 argv
[0] = file2exec
;
4312 for (i
= 0; i
< j
->argc
; i
++) {
4313 argv
[i
+ 1] = j
->argv
[i
];
4317 argv
= alloca(3 * sizeof(char *));
4318 argv
[0] = file2exec
;
4323 if (likely(!j
->inetcompat
)) {
4327 if (unlikely(j
->wait4debugger
|| j
->wait4debugger_oneshot
)) {
4328 if (!j
->legacy_LS_job
) {
4329 job_log(j
, LOG_WARNING
, "Spawned and waiting for the debugger to attach before continuing...");
4331 spflags
|= POSIX_SPAWN_START_SUSPENDED
;
4334 #if !TARGET_OS_EMBEDDED
4335 if (unlikely(j
->disable_aslr
)) {
4336 spflags
|= _POSIX_SPAWN_DISABLE_ASLR
;
4339 spflags
|= j
->pstype
;
4341 (void)job_assumes_zero(j
, posix_spawnattr_setflags(&spattr
, spflags
));
4342 if (unlikely(j
->j_binpref_cnt
)) {
4343 (void)job_assumes_zero(j
, posix_spawnattr_setbinpref_np(&spattr
, j
->j_binpref_cnt
, j
->j_binpref
, &binpref_out_cnt
));
4344 (void)job_assumes(j
, binpref_out_cnt
== j
->j_binpref_cnt
);
4347 #if TARGET_OS_EMBEDDED
4348 /* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4349 * against a race which arises if, during spawn, an initial jetsam property
4350 * update occurs before the values below are applied. In this case, the flag
4351 * ensures that the subsequent change is ignored; the explicit update should
4352 * be given priority.
4355 if (j
->jetsam_properties
) {
4356 flags
= POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY
;
4359 (void)job_assumes_zero(j
, posix_spawnattr_setjetsam(&spattr
, flags
, j
->jetsam_priority
, j
->jetsam_memlimit
));
4363 (void)job_assumes_zero(j
, posix_spawnattr_setcpumonitor(&spattr
, 85, 5 * 60));
4367 if (j
->quarantine_data
) {
4370 if (job_assumes(j
, qp
= qtn_proc_alloc())) {
4371 if (job_assumes_zero(j
, qtn_proc_init_with_data(qp
, j
->quarantine_data
, j
->quarantine_data_sz
) == 0)) {
4372 (void)job_assumes_zero(j
, qtn_proc_apply_to_self(qp
));
4379 if (j
->seatbelt_profile
) {
4380 char *seatbelt_err_buf
= NULL
;
4382 if (job_assumes_zero_p(j
, sandbox_init(j
->seatbelt_profile
, j
->seatbelt_flags
, &seatbelt_err_buf
)) == -1) {
4383 if (seatbelt_err_buf
) {
4384 job_log(j
, LOG_ERR
, "Sandbox failed to init: %s", seatbelt_err_buf
);
4391 psf
= j
->prog
? posix_spawn
: posix_spawnp
;
4393 if (likely(!j
->inetcompat
)) {
4394 file2exec
= j
->prog
? j
->prog
: argv
[0];
4397 errno
= psf(NULL
, file2exec
, NULL
, &spattr
, (char *const *)argv
, environ
);
4406 jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
)
4412 if (jm
->parentmgr
) {
4413 jobmgr_export_env_from_other_jobs(jm
->parentmgr
, dict
);
4415 char **tmpenviron
= environ
;
4416 for (; *tmpenviron
; tmpenviron
++) {
4418 launch_data_t s
= launch_data_alloc(LAUNCH_DATA_STRING
);
4419 launch_data_set_string(s
, strchr(*tmpenviron
, '=') + 1);
4420 strncpy(envkey
, *tmpenviron
, sizeof(envkey
));
4421 *(strchr(envkey
, '=')) = '\0';
4422 launch_data_dict_insert(dict
, s
, envkey
);
4426 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
4427 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
4428 if ((tmp
= launch_data_new_string(ei
->value
))) {
4429 launch_data_dict_insert(dict
, tmp
, ei
->key
);
4436 jobmgr_setup_env_from_other_jobs(jobmgr_t jm
)
4441 if (jm
->parentmgr
) {
4442 jobmgr_setup_env_from_other_jobs(jm
->parentmgr
);
4445 LIST_FOREACH(ji
, &jm
->global_env_jobs
, global_env_sle
) {
4446 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
4447 setenv(ei
->key
, ei
->value
, 1);
4453 job_log_pids_with_weird_uids(job_t j
)
4455 size_t len
= sizeof(pid_t
) * get_kern_max_proc();
4457 uid_t u
= j
->mach_uid
;
4458 int i
= 0, kp_cnt
= 0;
4460 if (!launchd_apple_internal
) {
4465 if (!job_assumes(j
, pids
!= NULL
)) {
4469 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS
, j
->p
, u
, 0);
4471 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4472 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4473 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4474 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4475 * struct back in a single call for each one.
4477 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4478 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4479 * libproc could go stale before we call proc_pidinfo().
4481 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4482 * of bytes written to the buffer.
4484 if (job_assumes_zero_p(j
, (kp_cnt
= proc_listallpids(pids
, len
))) == -1) {
4488 for (i
= 0; i
< kp_cnt
; i
++) {
4489 struct proc_bsdshortinfo proc
;
4490 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4493 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
4494 if (errno
!= ESRCH
) {
4495 (void)job_assumes_zero(j
, errno
);
4500 uid_t i_euid
= proc
.pbsi_uid
;
4501 uid_t i_uid
= proc
.pbsi_ruid
;
4502 uid_t i_svuid
= proc
.pbsi_svuid
;
4503 pid_t i_pid
= pids
[i
];
4505 if (i_euid
!= u
&& i_uid
!= u
&& i_svuid
!= u
) {
4509 job_log(j
, LOG_ERR
, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid
, proc
.pbsi_comm
, i_uid
, i_euid
, i_svuid
);
4511 // Temporarily disabled due to 5423935 and 4946119.
4513 // Ask the accountless process to exit.
4514 (void)job_assumes_zero_p(j
, kill2(i_pid
, SIGTERM
));
4522 static struct passwd
*
4523 job_getpwnam(job_t j
, const char *name
)
4526 * methodology for system daemons
4528 * first lookup user record without any opendirectoryd interaction,
4529 * we don't know what interprocess dependencies might be in flight.
4530 * if that fails, we re-enable opendirectoryd interaction and
4531 * re-issue the lookup. We have to disable the libinfo L1 cache
4532 * otherwise libinfo will return the negative cache entry on the retry
4534 #if !TARGET_OS_EMBEDDED
4535 struct passwd
*pw
= NULL
;
4537 if (pid1_magic
&& j
->mgr
== root_jobmgr
) {
4538 // 1 == SEARCH_MODULE_FLAG_DISABLED
4539 si_search_module_set_flags("ds", 1);
4540 gL1CacheEnabled
= false;
4542 pw
= getpwnam(name
);
4543 si_search_module_set_flags("ds", 0);
4547 pw
= getpwnam(name
);
4553 return getpwnam(name
);
4557 static struct group
*
4558 job_getgrnam(job_t j
, const char *name
)
4560 #if !TARGET_OS_EMBEDDED
4561 struct group
*gr
= NULL
;
4563 if (pid1_magic
&& j
->mgr
== root_jobmgr
) {
4564 si_search_module_set_flags("ds", 1);
4565 gL1CacheEnabled
= false;
4567 gr
= getgrnam(name
);
4569 si_search_module_set_flags("ds", 0);
4573 gr
= getgrnam(name
);
4579 return getgrnam(name
);
4584 job_postfork_test_user(job_t j
)
4586 // This function is all about 5201578
4588 const char *home_env_var
= getenv("HOME");
4589 const char *user_env_var
= getenv("USER");
4590 const char *logname_env_var
= getenv("LOGNAME");
4591 uid_t tmp_uid
, local_uid
= getuid();
4592 gid_t tmp_gid
, local_gid
= getgid();
4593 char shellpath
[PATH_MAX
];
4594 char homedir
[PATH_MAX
];
4595 char loginname
[2000];
4599 if (!job_assumes(j
, home_env_var
&& user_env_var
&& logname_env_var
4600 && strcmp(user_env_var
, logname_env_var
) == 0)) {
4604 if ((pwe
= job_getpwnam(j
, user_env_var
)) == NULL
) {
4605 job_log(j
, LOG_ERR
, "The account \"%s\" has been deleted out from under us!", user_env_var
);
4610 * We must copy the results of getpw*().
4612 * Why? Because subsequent API calls may call getpw*() as a part of
4613 * their implementation. Since getpw*() returns a [now thread scoped]
4614 * global, we must therefore cache the results before continuing.
4617 tmp_uid
= pwe
->pw_uid
;
4618 tmp_gid
= pwe
->pw_gid
;
4620 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
4621 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
4622 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
4624 if (strcmp(loginname
, logname_env_var
) != 0) {
4625 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "USER");
4628 if (strcmp(homedir
, home_env_var
) != 0) {
4629 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "HOME");
4632 if (local_uid
!= tmp_uid
) {
4633 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
4634 'U', tmp_uid
, local_uid
);
4637 if (local_gid
!= tmp_gid
) {
4638 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
4639 'G', tmp_gid
, local_gid
);
4646 (void)job_assumes_zero_p(j
, kill2(getppid(), SIGTERM
));
4647 _exit(EXIT_FAILURE
);
4649 job_log(j
, LOG_WARNING
, "In a future build of the OS, this error will be fatal.");
4654 job_postfork_become_user(job_t j
)
4656 char loginname
[2000];
4657 char tmpdirpath
[PATH_MAX
];
4658 char shellpath
[PATH_MAX
];
4659 char homedir
[PATH_MAX
];
4662 gid_t desired_gid
= -1;
4663 uid_t desired_uid
= -1;
4665 if (getuid() != 0) {
4666 return job_postfork_test_user(j
);
4670 * I contend that having UID == 0 and GID != 0 is of dubious value.
4671 * Nevertheless, this used to work in Tiger. See: 5425348
4673 if (j
->groupname
&& !j
->username
) {
4674 j
->username
= "root";
4678 if ((pwe
= job_getpwnam(j
, j
->username
)) == NULL
) {
4679 job_log(j
, LOG_ERR
, "getpwnam(\"%s\") failed", j
->username
);
4682 } else if (j
->mach_uid
) {
4683 if ((pwe
= getpwuid(j
->mach_uid
)) == NULL
) {
4684 job_log(j
, LOG_ERR
, "getpwuid(\"%u\") failed", j
->mach_uid
);
4685 job_log_pids_with_weird_uids(j
);
4693 * We must copy the results of getpw*().
4695 * Why? Because subsequent API calls may call getpw*() as a part of
4696 * their implementation. Since getpw*() returns a [now thread scoped]
4697 * global, we must therefore cache the results before continuing.
4700 desired_uid
= pwe
->pw_uid
;
4701 desired_gid
= pwe
->pw_gid
;
4703 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
4704 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
4705 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
4707 if (unlikely(pwe
->pw_expire
&& time(NULL
) >= pwe
->pw_expire
)) {
4708 job_log(j
, LOG_ERR
, "Expired account");
4709 _exit(EXIT_FAILURE
);
4713 if (unlikely(j
->username
&& strcmp(j
->username
, loginname
) != 0)) {
4714 job_log(j
, LOG_WARNING
, "Suspicious setup: User \"%s\" maps to user: %s", j
->username
, loginname
);
4715 } else if (unlikely(j
->mach_uid
&& (j
->mach_uid
!= desired_uid
))) {
4716 job_log(j
, LOG_WARNING
, "Suspicious setup: UID %u maps to UID %u", j
->mach_uid
, desired_uid
);
4722 if (unlikely((gre
= job_getgrnam(j
, j
->groupname
)) == NULL
)) {
4723 job_log(j
, LOG_ERR
, "getgrnam(\"%s\") failed", j
->groupname
);
4727 desired_gid
= gre
->gr_gid
;
4730 if (job_assumes_zero_p(j
, setlogin(loginname
)) == -1) {
4731 _exit(EXIT_FAILURE
);
4734 if (job_assumes_zero_p(j
, setgid(desired_gid
)) == -1) {
4735 _exit(EXIT_FAILURE
);
4739 * The kernel team and the DirectoryServices team want initgroups()
4740 * called after setgid(). See 4616864 for more information.
4743 if (likely(!j
->no_init_groups
)) {
4745 if (job_assumes_zero_p(j
, initgroups(loginname
, desired_gid
)) == -1) {
4746 _exit(EXIT_FAILURE
);
4749 /* Do our own little initgroups(). We do this to guarantee that we're
4750 * always opted into dynamic group resolution in the kernel. initgroups(3)
4751 * does not make this guarantee.
4753 int groups
[NGROUPS
], ngroups
;
4755 // A failure here isn't fatal, and we'll still get data we can use.
4756 (void)job_assumes_zero_p(j
, getgrouplist(j
->username
, desired_gid
, groups
, &ngroups
));
4758 if (job_assumes_zero_p(j
, syscall(SYS_initgroups
, ngroups
, groups
, desired_uid
)) == -1) {
4759 _exit(EXIT_FAILURE
);
4764 if (job_assumes_zero_p(j
, setuid(desired_uid
)) == -1) {
4765 _exit(EXIT_FAILURE
);
4768 r
= confstr(_CS_DARWIN_USER_TEMP_DIR
, tmpdirpath
, sizeof(tmpdirpath
));
4770 if (likely(r
> 0 && r
< sizeof(tmpdirpath
))) {
4771 setenv("TMPDIR", tmpdirpath
, 0);
4774 setenv("SHELL", shellpath
, 0);
4775 setenv("HOME", homedir
, 0);
4776 setenv("USER", loginname
, 0);
4777 setenv("LOGNAME", loginname
, 0);
4781 job_setup_attributes(job_t j
)
4783 struct limititem
*li
;
4786 if (unlikely(j
->setnice
)) {
4787 (void)job_assumes_zero_p(j
, setpriority(PRIO_PROCESS
, 0, j
->nice
));
4790 SLIST_FOREACH(li
, &j
->limits
, sle
) {
4793 if (job_assumes_zero_p(j
, getrlimit(li
->which
, &rl
) == -1)) {
4798 rl
.rlim_max
= li
->lim
.rlim_max
;
4801 rl
.rlim_cur
= li
->lim
.rlim_cur
;
4804 if (setrlimit(li
->which
, &rl
) == -1) {
4805 job_log_error(j
, LOG_WARNING
, "setrlimit()");
4809 if (unlikely(!j
->inetcompat
&& j
->session_create
)) {
4810 launchd_SessionCreate();
4813 if (unlikely(j
->low_pri_io
)) {
4814 (void)job_assumes_zero_p(j
, setiopolicy_np(IOPOL_TYPE_DISK
, IOPOL_SCOPE_PROCESS
, IOPOL_THROTTLE
));
4816 if (unlikely(j
->rootdir
)) {
4817 (void)job_assumes_zero_p(j
, chroot(j
->rootdir
));
4818 (void)job_assumes_zero_p(j
, chdir("."));
4821 job_postfork_become_user(j
);
4823 if (unlikely(j
->workingdir
)) {
4824 if (chdir(j
->workingdir
) == -1) {
4825 if (errno
== ENOENT
|| errno
== ENOTDIR
) {
4826 job_log(j
, LOG_ERR
, "Job specified non-existent working directory: %s", j
->workingdir
);
4828 (void)job_assumes_zero(j
, errno
);
4833 if (unlikely(j
->setmask
)) {
4838 (void)job_assumes_zero_p(j
, dup2(j
->stdin_fd
, STDIN_FILENO
));
4840 job_setup_fd(j
, STDIN_FILENO
, j
->stdinpath
, O_RDONLY
|O_CREAT
);
4842 job_setup_fd(j
, STDOUT_FILENO
, j
->stdoutpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
4843 job_setup_fd(j
, STDERR_FILENO
, j
->stderrpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
4845 jobmgr_setup_env_from_other_jobs(j
->mgr
);
4847 SLIST_FOREACH(ei
, &j
->env
, sle
) {
4848 setenv(ei
->key
, ei
->value
, 1);
4851 #if !TARGET_OS_EMBEDDED
4852 if (j
->jetsam_properties
) {
4853 (void)job_assumes_zero(j
, proc_setpcontrol(PROC_SETPC_TERMINATE
));
4857 #if TARGET_OS_EMBEDDED
4858 if (j
->main_thread_priority
!= 0) {
4859 struct sched_param params
;
4860 bzero(¶ms
, sizeof(params
));
4861 params
.sched_priority
= j
->main_thread_priority
;
4862 (void)job_assumes_zero_p(j
, pthread_setschedparam(pthread_self(), SCHED_OTHER
, ¶ms
));
4867 * We'd like to call setsid() unconditionally, but we have reason to
4868 * believe that prevents launchd from being able to send signals to
4869 * setuid children. We'll settle for process-groups.
4871 if (getppid() != 1) {
4872 (void)job_assumes_zero_p(j
, setpgid(0, 0));
4874 (void)job_assumes_zero_p(j
, setsid());
4879 job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
)
4887 if ((fd
= open(path
, flags
|O_NOCTTY
, DEFFILEMODE
)) == -1) {
4888 job_log_error(j
, LOG_WARNING
, "open(\"%s\", ...)", path
);
4892 (void)job_assumes_zero_p(j
, dup2(fd
, target_fd
));
4893 (void)job_assumes_zero(j
, runtime_close(fd
));
4897 calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
)
4899 struct calendarinterval
*ci_iter
, *ci_prev
= NULL
;
4900 time_t later
, head_later
;
4902 later
= cronemu(ci
->when
.tm_mon
, ci
->when
.tm_mday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
4904 if (ci
->when
.tm_wday
!= -1) {
4905 time_t otherlater
= cronemu_wday(ci
->when
.tm_wday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
4907 if (ci
->when
.tm_mday
== -1) {
4910 later
= later
< otherlater
? later
: otherlater
;
4914 ci
->when_next
= later
;
4916 LIST_FOREACH(ci_iter
, &sorted_calendar_events
, global_sle
) {
4917 if (ci
->when_next
< ci_iter
->when_next
) {
4918 LIST_INSERT_BEFORE(ci_iter
, ci
, global_sle
);
4925 if (ci_iter
== NULL
) {
4926 // ci must want to fire after every other timer, or there are no timers
4928 if (LIST_EMPTY(&sorted_calendar_events
)) {
4929 LIST_INSERT_HEAD(&sorted_calendar_events
, ci
, global_sle
);
4931 LIST_INSERT_AFTER(ci_prev
, ci
, global_sle
);
4935 head_later
= LIST_FIRST(&sorted_calendar_events
)->when_next
;
4937 if (job_assumes_zero_p(j
, kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_ADD
, NOTE_ABSOLUTE
|NOTE_SECONDS
, head_later
, root_jobmgr
)) != -1) {
4938 char time_string
[100];
4939 size_t time_string_len
;
4941 ctime_r(&later
, time_string
);
4942 time_string_len
= strlen(time_string
);
4944 if (likely(time_string_len
&& time_string
[time_string_len
- 1] == '\n')) {
4945 time_string
[time_string_len
- 1] = '\0';
4948 job_log(j
, LOG_INFO
, "Scheduled to run again at %s", time_string
);
4953 jobmgr_log_bug(aslmsg asl_message
__attribute__((unused
)), void *ctx
, const char *message
)
4956 jobmgr_log(jm
, LOG_ERR
, "%s", message
);
4962 job_log_bug(aslmsg asl_message
__attribute__((unused
)), void *ctx
, const char *message
)
4965 job_log(j
, LOG_ERR
, "%s", message
);
4971 job_log_perf_statistics(job_t j
)
4976 if (!launchd_log_perf
) {
4980 job_log(j
, LOG_PERF
, "Job is currently %srunning.", j
->p
? "" : "not ");
4981 job_log(j
, LOG_PERF
, "Number of runs: %u", j
->nruns
);
4983 job_log(j
, LOG_PERF
, "Total runtime: %06f.", (double)j
->trt
/ (double)NSEC_PER_SEC
);
4984 job_log(j
, LOG_PERF
, "Total user time: %ld.%06u", j
->ru
.ru_utime
.tv_sec
, j
->ru
.ru_utime
.tv_usec
);
4985 job_log(j
, LOG_PERF
, "Total system time: %ld.%06u", j
->ru
.ru_stime
.tv_sec
, j
->ru
.ru_stime
.tv_usec
);
4986 job_log(j
, LOG_PERF
, "Largest maximum resident size: %lu", j
->ru
.ru_maxrss
);
4987 job_log(j
, LOG_PERF
, "Total integral shared memory size: %lu", j
->ru
.ru_ixrss
);
4988 job_log(j
, LOG_PERF
, "Total integral unshared data size: %lu", j
->ru
.ru_idrss
);
4989 job_log(j
, LOG_PERF
, "Total integral unshared stack size: %lu", j
->ru
.ru_isrss
);
4990 job_log(j
, LOG_PERF
, "Total page reclaims: %lu", j
->ru
.ru_minflt
);
4991 job_log(j
, LOG_PERF
, "Total page faults: %lu", j
->ru
.ru_majflt
);
4992 job_log(j
, LOG_PERF
, "Total swaps: %lu", j
->ru
.ru_nswap
);
4993 job_log(j
, LOG_PERF
, "Total input ops: %lu", j
->ru
.ru_inblock
);
4994 job_log(j
, LOG_PERF
, "Total output ops: %lu", j
->ru
.ru_oublock
);
4995 job_log(j
, LOG_PERF
, "Total messages sent: %lu", j
->ru
.ru_msgsnd
);
4996 job_log(j
, LOG_PERF
, "Total messages received: %lu", j
->ru
.ru_msgrcv
);
4997 job_log(j
, LOG_PERF
, "Total signals received: %lu", j
->ru
.ru_nsignals
);
4998 job_log(j
, LOG_PERF
, "Total voluntary context switches: %lu", j
->ru
.ru_nvcsw
);
4999 job_log(j
, LOG_PERF
, "Total involuntary context switches: %lu", j
->ru
.ru_nivcsw
);
5003 uint64_t rt
= runtime_get_nanoseconds_since(j
->start_time
);
5004 job_log(j
, LOG_PERF
, "Current instance wall time: %06f", (double)rt
/ (double)NSEC_PER_SEC
);
5006 struct proc_taskinfo ti
;
5007 int r
= proc_pidinfo(j
->p
, PROC_PIDTASKINFO
, 1, &ti
, PROC_PIDTASKINFO_SIZE
);
5009 job_log(j
, LOG_PERF
, "Current instance virtual size: %llu", ti
.pti_virtual_size
);
5010 job_log(j
, LOG_PERF
, "Current instance resident size: %llu", ti
.pti_resident_size
);
5011 job_log(j
, LOG_PERF
, "Current instance user time: %06f", (double)ti
.pti_total_user
/ (double)NSEC_PER_SEC
);
5012 job_log(j
, LOG_PERF
, "Current instance system time: %06f", (double)ti
.pti_total_system
/ (double)NSEC_PER_SEC
);
5013 job_log(j
, LOG_PERF
, "Current instance number of user threads: %llu", ti
.pti_threads_user
);
5014 job_log(j
, LOG_PERF
, "Current instance number of system threads: %llu", ti
.pti_threads_system
);
5015 job_log(j
, LOG_PERF
, "Current instance default thread policy: %d", ti
.pti_policy
);
5016 job_log(j
, LOG_PERF
, "Current instance number of page faults: %d", ti
.pti_faults
);
5017 job_log(j
, LOG_PERF
, "Current instance number of page-ins: %d", ti
.pti_pageins
);
5018 job_log(j
, LOG_PERF
, "Current instance number of COW faults: %d", ti
.pti_cow_faults
);
5019 job_log(j
, LOG_PERF
, "Current instance number of Mach syscalls: %d", ti
.pti_syscalls_mach
);
5020 job_log(j
, LOG_PERF
, "Current instance number of Unix syscalls: %d", ti
.pti_syscalls_unix
);
5021 job_log(j
, LOG_PERF
, "Current instance number of threads: %d", ti
.pti_threadnum
);
5022 job_log(j
, LOG_PERF
, "Current instance number of running threads: %d", ti
.pti_numrunning
);
5023 job_log(j
, LOG_PERF
, "Current instance task priority: %d", ti
.pti_priority
);
5025 job_log(j
, LOG_PERF
, "proc_pidinfo(%d): %d: %s", j
->p
, errno
, strerror(errno
));
5030 job_log(j
, LOG_PERF
, "Job is configured to always run.");
5033 struct machservice
*msi
= NULL
;
5034 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
5036 job_log(j
, LOG_PERF
, "Job advertises service in plist: %s", msi
->name
);
5037 } else if (!(msi
->event_channel
|| msi
->per_pid
)) {
5038 job_log(j
, LOG_PERF
, "Job has dynamically registered service: %s", msi
->name
);
5039 } else if (msi
->per_pid
) {
5040 job_log(j
, LOG_PERF
, "Job advertises per-PID service: %s", msi
->name
);
5046 job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
)
5048 const char *label2use
= j
? j
->label
: "com.apple.launchd.job-unknown";
5049 const char *mgr2use
= j
? j
->mgr
->name
: "com.apple.launchd.jobmanager-unknown";
5054 struct launchd_syslog_attr attr
= {
5055 .from_name
= launchd_label
,
5056 .about_name
= label2use
,
5057 .session_name
= mgr2use
,
5059 .from_uid
= getuid(),
5060 .from_pid
= getpid(),
5061 .about_pid
= j
? j
->p
: 0,
5064 /* Hack: If bootstrap_port is set, we must be on the child side of a
5065 * fork(2), but before the exec*(3). Let's route the log message back to
5068 if (bootstrap_port
) {
5069 return _vproc_logv(pri
, err
, msg
, ap
);
5072 newmsgsz
= strlen(msg
) + 200;
5073 newmsg
= alloca(newmsgsz
);
5076 #if !TARGET_OS_EMBEDDED
5077 snprintf(newmsg
, newmsgsz
, "%s: %d: %s", msg
, err
, strerror(err
));
5079 snprintf(newmsg
, newmsgsz
, "(%s) %s: %d: %s", label2use
, msg
, err
, strerror(err
));
5082 #if !TARGET_OS_EMBEDDED
5083 snprintf(newmsg
, newmsgsz
, "%s", msg
);
5085 snprintf(newmsg
, newmsgsz
, "(%s) %s", label2use
, msg
);
5089 if (j
&& unlikely(j
->debug
)) {
5090 oldmask
= setlogmask(LOG_UPTO(LOG_DEBUG
));
5093 launchd_vsyslog(&attr
, newmsg
, ap
);
5095 if (j
&& unlikely(j
->debug
)) {
5096 setlogmask(oldmask
);
5101 job_log_error(job_t j
, int pri
, const char *msg
, ...)
5106 job_logv(j
, pri
, errno
, msg
, ap
);
5111 job_log(job_t j
, int pri
, const char *msg
, ...)
5116 job_logv(j
, pri
, 0, msg
, ap
);
5122 jobmgr_log_error(jobmgr_t jm
, int pri
, const char *msg
, ...)
5127 jobmgr_logv(jm
, pri
, errno
, msg
, ap
);
5133 jobmgr_log_perf_statistics(jobmgr_t jm
)
5135 jobmgr_t jmi
= NULL
;
5136 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
5137 jobmgr_log_perf_statistics(jmi
);
5140 if (jm
->xpc_singleton
) {
5141 jobmgr_log(jm
, LOG_PERF
, "XPC Singleton Domain: %s", jm
->shortdesc
);
5142 } else if (jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
5143 jobmgr_log(jm
, LOG_PERF
, "XPC Private Domain: %s", jm
->owner
);
5144 } else if (jm
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) {
5145 jobmgr_log(jm
, LOG_PERF
, "Created via bootstrap_subset()");
5148 jobmgr_log(jm
, LOG_PERF
, "Jobs in job manager:");
5151 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
5152 job_log_perf_statistics(ji
);
5155 jobmgr_log(jm
, LOG_PERF
, "End of job list.");
5159 jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...)
5164 jobmgr_logv(jm
, pri
, 0, msg
, ap
);
5169 jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
)
5177 size_t i
, o
, jmname_len
= strlen(jm
->name
), newmsgsz
;
5179 newname
= alloca((jmname_len
+ 1) * 2);
5180 newmsgsz
= (jmname_len
+ 1) * 2 + strlen(msg
) + 100;
5181 newmsg
= alloca(newmsgsz
);
5183 for (i
= 0, o
= 0; i
< jmname_len
; i
++, o
++) {
5184 if (jm
->name
[i
] == '%') {
5188 newname
[o
] = jm
->name
[i
];
5193 snprintf(newmsg
, newmsgsz
, "%s: %s: %s", newname
, msg
, strerror(err
));
5195 snprintf(newmsg
, newmsgsz
, "%s: %s", newname
, msg
);
5198 if (jm
->parentmgr
) {
5199 jobmgr_logv(jm
->parentmgr
, pri
, 0, newmsg
, ap
);
5201 struct launchd_syslog_attr attr
= {
5202 .from_name
= launchd_label
,
5203 .about_name
= launchd_label
,
5204 .session_name
= jm
->name
,
5206 .from_uid
= getuid(),
5207 .from_pid
= getpid(),
5208 .about_pid
= getpid(),
5211 launchd_vsyslog(&attr
, newmsg
, ap
);
5215 struct cal_dict_walk
{
5221 calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
)
5223 struct cal_dict_walk
*cdw
= context
;
5224 struct tm
*tmptm
= &cdw
->tmptm
;
5228 if (unlikely(LAUNCH_DATA_INTEGER
!= launch_data_get_type(obj
))) {
5229 // hack to let caller know something went wrong
5234 val
= launch_data_get_integer(obj
);
5237 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is less than zero.", key
);
5238 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MINUTE
) == 0) {
5240 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key
);
5243 tmptm
->tm_min
= (typeof(tmptm
->tm_min
)) val
;
5245 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_HOUR
) == 0) {
5247 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key
);
5250 tmptm
->tm_hour
= (typeof(tmptm
->tm_hour
)) val
;
5252 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_DAY
) == 0) {
5253 if (val
< 1 || val
> 31) {
5254 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key
);
5257 tmptm
->tm_mday
= (typeof(tmptm
->tm_mday
)) val
;
5259 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_WEEKDAY
) == 0) {
5261 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key
);
5264 tmptm
->tm_wday
= (typeof(tmptm
->tm_wday
)) val
;
5266 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MONTH
) == 0) {
5268 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key
);
5271 tmptm
->tm_mon
= (typeof(tmptm
->tm_mon
)) val
;
5272 tmptm
->tm_mon
-= 1; // 4798263 cron compatibility
5278 calendarinterval_new_from_obj(job_t j
, launch_data_t obj
)
5280 struct cal_dict_walk cdw
;
5283 memset(&cdw
.tmptm
, 0, sizeof(0));
5285 cdw
.tmptm
.tm_min
= -1;
5286 cdw
.tmptm
.tm_hour
= -1;
5287 cdw
.tmptm
.tm_mday
= -1;
5288 cdw
.tmptm
.tm_wday
= -1;
5289 cdw
.tmptm
.tm_mon
= -1;
5291 if (!job_assumes(j
, obj
!= NULL
)) {
5295 if (unlikely(LAUNCH_DATA_DICTIONARY
!= launch_data_get_type(obj
))) {
5299 launch_data_dict_iterate(obj
, calendarinterval_new_from_obj_dict_walk
, &cdw
);
5301 if (unlikely(cdw
.tmptm
.tm_sec
== -1)) {
5305 return calendarinterval_new(j
, &cdw
.tmptm
);
5309 calendarinterval_new(job_t j
, struct tm
*w
)
5311 struct calendarinterval
*ci
= calloc(1, sizeof(struct calendarinterval
));
5313 if (!job_assumes(j
, ci
!= NULL
)) {
5320 SLIST_INSERT_HEAD(&j
->cal_intervals
, ci
, sle
);
5322 calendarinterval_setalarm(j
, ci
);
5324 runtime_add_weak_ref();
5330 calendarinterval_delete(job_t j
, struct calendarinterval
*ci
)
5332 SLIST_REMOVE(&j
->cal_intervals
, ci
, calendarinterval
, sle
);
5333 LIST_REMOVE(ci
, global_sle
);
5337 runtime_del_weak_ref();
5341 calendarinterval_sanity_check(void)
5343 struct calendarinterval
*ci
= LIST_FIRST(&sorted_calendar_events
);
5344 time_t now
= time(NULL
);
5346 if (unlikely(ci
&& (ci
->when_next
< now
))) {
5347 (void)jobmgr_assumes_zero_p(root_jobmgr
, raise(SIGUSR1
));
5352 calendarinterval_callback(void)
5354 struct calendarinterval
*ci
, *ci_next
;
5355 time_t now
= time(NULL
);
5357 LIST_FOREACH_SAFE(ci
, &sorted_calendar_events
, global_sle
, ci_next
) {
5360 if (ci
->when_next
> now
) {
5364 LIST_REMOVE(ci
, global_sle
);
5365 calendarinterval_setalarm(j
, ci
);
5367 j
->start_pending
= true;
5368 job_dispatch(j
, false);
5373 socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
)
5375 struct socketgroup
*sg
= calloc(1, sizeof(struct socketgroup
) + strlen(name
) + 1);
5377 if (!job_assumes(j
, sg
!= NULL
)) {
5381 sg
->fds
= calloc(1, fd_cnt
* sizeof(int));
5382 sg
->fd_cnt
= fd_cnt
;
5384 if (!job_assumes(j
, sg
->fds
!= NULL
)) {
5389 memcpy(sg
->fds
, fds
, fd_cnt
* sizeof(int));
5390 strcpy(sg
->name_init
, name
);
5392 SLIST_INSERT_HEAD(&j
->sockets
, sg
, sle
);
5394 runtime_add_weak_ref();
5400 socketgroup_delete(job_t j
, struct socketgroup
*sg
)
5404 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5406 struct sockaddr_storage ss
;
5407 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
5408 socklen_t ss_len
= sizeof(ss
);
5411 if (job_assumes_zero(j
, getsockname(sg
->fds
[i
], (struct sockaddr
*)&ss
, &ss_len
) != -1)
5412 && job_assumes(j
, ss_len
> 0) && (ss
.ss_family
== AF_UNIX
)) {
5413 (void)job_assumes(j
, unlink(sun
->sun_path
) != -1);
5414 // We might conditionally need to delete a directory here
5417 (void)job_assumes_zero_p(j
, runtime_close(sg
->fds
[i
]));
5420 SLIST_REMOVE(&j
->sockets
, sg
, socketgroup
, sle
);
5425 runtime_del_weak_ref();
5429 socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
)
5431 struct kevent kev
[sg
->fd_cnt
];
5433 unsigned int i
, buf_off
= 0;
5435 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5436 EV_SET(&kev
[i
], sg
->fds
[i
], EVFILT_READ
, do_add
? EV_ADD
: EV_DELETE
, 0, 0, j
);
5437 buf_off
+= snprintf(buf
+ buf_off
, sizeof(buf
) - buf_off
, " %d", sg
->fds
[i
]);
5440 job_log(j
, LOG_DEBUG
, "%s Sockets:%s", do_add
? "Watching" : "Ignoring", buf
);
5442 (void)job_assumes_zero_p(j
, kevent_bulk_mod(kev
, sg
->fd_cnt
));
5444 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
5445 (void)job_assumes(j
, kev
[i
].flags
& EV_ERROR
);
5446 errno
= (typeof(errno
)) kev
[i
].data
;
5447 (void)job_assumes_zero(j
, kev
[i
].data
);
5452 socketgroup_ignore(job_t j
, struct socketgroup
*sg
)
5454 socketgroup_kevent_mod(j
, sg
, false);
5458 socketgroup_watch(job_t j
, struct socketgroup
*sg
)
5460 socketgroup_kevent_mod(j
, sg
, true);
5464 socketgroup_callback(job_t j
)
5466 job_dispatch(j
, true);
5470 envitem_new(job_t j
, const char *k
, const char *v
, bool global
)
5472 if (global
&& !launchd_allow_global_dyld_envvars
) {
5473 if (strncmp("DYLD_", k
, sizeof("DYLD_") - 1) == 0) {
5474 job_log(j
, LOG_ERR
, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k
, v
);
5479 struct envitem
*ei
= calloc(1, sizeof(struct envitem
) + strlen(k
) + 1 + strlen(v
) + 1);
5481 if (!job_assumes(j
, ei
!= NULL
)) {
5485 strcpy(ei
->key_init
, k
);
5486 ei
->value
= ei
->key_init
+ strlen(k
) + 1;
5487 strcpy(ei
->value
, v
);
5490 if (SLIST_EMPTY(&j
->global_env
)) {
5491 LIST_INSERT_HEAD(&j
->mgr
->global_env_jobs
, j
, global_env_sle
);
5493 SLIST_INSERT_HEAD(&j
->global_env
, ei
, sle
);
5495 SLIST_INSERT_HEAD(&j
->env
, ei
, sle
);
5498 job_log(j
, LOG_DEBUG
, "Added environmental variable: %s=%s", k
, v
);
5504 envitem_delete(job_t j
, struct envitem
*ei
, bool global
)
5507 SLIST_REMOVE(&j
->global_env
, ei
, envitem
, sle
);
5508 if (SLIST_EMPTY(&j
->global_env
)) {
5509 LIST_REMOVE(j
, global_env_sle
);
5512 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
5519 envitem_setup(launch_data_t obj
, const char *key
, void *context
)
5523 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
5527 if (strncmp(LAUNCHD_TRUSTED_FD_ENV
, key
, sizeof(LAUNCHD_TRUSTED_FD_ENV
) - 1) != 0) {
5528 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
);
5530 job_log(j
, LOG_DEBUG
, "Ignoring reserved environmental variable: %s", key
);
5535 limititem_update(job_t j
, int w
, rlim_t r
)
5537 struct limititem
*li
;
5539 SLIST_FOREACH(li
, &j
->limits
, sle
) {
5540 if (li
->which
== w
) {
5546 li
= calloc(1, sizeof(struct limititem
));
5548 if (!job_assumes(j
, li
!= NULL
)) {
5552 SLIST_INSERT_HEAD(&j
->limits
, li
, sle
);
5557 if (j
->importing_hard_limits
) {
5558 li
->lim
.rlim_max
= r
;
5561 li
->lim
.rlim_cur
= r
;
5569 limititem_delete(job_t j
, struct limititem
*li
)
5571 SLIST_REMOVE(&j
->limits
, li
, limititem
, sle
);
5578 seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
)
5582 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
5583 job_log(j
, LOG_WARNING
, "Sandbox flag value must be boolean: %s", key
);
5587 if (launch_data_get_bool(obj
) == false) {
5591 if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOX_NAMED
) == 0) {
5592 j
->seatbelt_flags
|= SANDBOX_NAMED
;
5598 limititem_setup(launch_data_t obj
, const char *key
, void *context
)
5601 size_t i
, limits_cnt
= (sizeof(launchd_keys2limits
) / sizeof(launchd_keys2limits
[0]));
5604 if (launch_data_get_type(obj
) != LAUNCH_DATA_INTEGER
) {
5608 rl
= launch_data_get_integer(obj
);
5610 for (i
= 0; i
< limits_cnt
; i
++) {
5611 if (strcasecmp(launchd_keys2limits
[i
].key
, key
) == 0) {
5616 if (i
== limits_cnt
) {
5620 limititem_update(j
, launchd_keys2limits
[i
].val
, rl
);
5624 job_useless(job_t j
)
5626 if ((j
->legacy_LS_job
|| j
->only_once
) && j
->start_time
!= 0) {
5627 if (j
->legacy_LS_job
&& j
->j_port
) {
5630 job_log(j
, LOG_INFO
, "Exited. Was only configured to run once.");
5632 } else if (j
->removal_pending
) {
5633 job_log(j
, LOG_DEBUG
, "Exited while removal was pending.");
5635 } else if (j
->shutdown_monitor
) {
5637 } else if (j
->mgr
->shutting_down
&& !j
->mgr
->parentmgr
) {
5638 job_log(j
, LOG_DEBUG
, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children
, total_anon_children
);
5639 if (total_children
== 0 && !j
->anonymous
) {
5640 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job was last to exit during shutdown of: %s.", j
->mgr
->name
);
5643 } else if (j
->legacy_mach_job
) {
5644 if (SLIST_EMPTY(&j
->machservices
)) {
5645 job_log(j
, LOG_INFO
, "Garbage collecting");
5647 } else if (!j
->checkedin
) {
5648 job_log(j
, LOG_WARNING
, "Failed to check-in!");
5652 /* If the job's executable does not have any valid architectures (for
5653 * example, if it's a PowerPC-only job), then we don't even bother
5654 * trying to relaunch it, as we have no reasonable expectation that
5655 * the situation will change.
5657 * <rdar://problem/9106979>
5659 if (!j
->did_exec
&& WEXITSTATUS(j
->last_exit_status
) == EBADARCH
) {
5660 job_log(j
, LOG_ERR
, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5669 job_keepalive(job_t j
)
5671 mach_msg_type_number_t statusCnt
;
5672 mach_port_status_t status
;
5673 struct semaphoreitem
*si
;
5674 struct machservice
*ms
;
5675 bool good_exit
= (WIFEXITED(j
->last_exit_status
) && WEXITSTATUS(j
->last_exit_status
) == 0);
5676 bool is_not_kextd
= (launchd_apple_internal
|| (strcmp(j
->label
, "com.apple.kextd") != 0));
5678 if (unlikely(j
->mgr
->shutting_down
)) {
5685 * We definitely need to revisit this after Leopard ships. Please see
5686 * launchctl.c for the other half of this hack.
5688 if (unlikely((j
->mgr
->global_on_demand_cnt
> 0) && is_not_kextd
)) {
5692 if (unlikely(j
->needs_kickoff
)) {
5693 job_log(j
, LOG_DEBUG
, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5697 if (j
->start_pending
) {
5698 job_log(j
, LOG_DEBUG
, "KeepAlive check: Pent-up non-IPC launch criteria.");
5703 job_log(j
, LOG_DEBUG
, "KeepAlive check: job configured to run continuously.");
5707 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
5708 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
5709 if (mach_port_get_attributes(mach_task_self(), ms
->port
, MACH_PORT_RECEIVE_STATUS
,
5710 (mach_port_info_t
)&status
, &statusCnt
) != KERN_SUCCESS
) {
5713 if (status
.mps_msgcount
) {
5714 job_log(j
, LOG_DEBUG
, "KeepAlive check: %d queued Mach messages on service: %s",
5715 status
.mps_msgcount
, ms
->name
);
5720 /* TODO: Coalesce external events and semaphore items, since they're basically
5723 struct externalevent
*ei
= NULL
;
5724 LIST_FOREACH(ei
, &j
->events
, job_le
) {
5725 if (ei
->state
== ei
->wanted_state
) {
5730 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
5731 bool wanted_state
= false;
5736 wanted_state
= true;
5738 if (network_up
== wanted_state
) {
5739 job_log(j
, LOG_DEBUG
, "KeepAlive: The network is %s.", wanted_state
? "up" : "down");
5743 case SUCCESSFUL_EXIT
:
5744 wanted_state
= true;
5746 if (good_exit
== wanted_state
) {
5747 job_log(j
, LOG_DEBUG
, "KeepAlive: The exit state was %s.", wanted_state
? "successful" : "failure");
5752 wanted_state
= true;
5754 if (j
->crashed
== wanted_state
) {
5758 case OTHER_JOB_ENABLED
:
5759 wanted_state
= true;
5760 case OTHER_JOB_DISABLED
:
5761 if ((bool)job_find(NULL
, si
->what
) == wanted_state
) {
5762 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "enabled" : "disabled", si
->what
);
5766 case OTHER_JOB_ACTIVE
:
5767 wanted_state
= true;
5768 case OTHER_JOB_INACTIVE
:
5769 if ((other_j
= job_find(NULL
, si
->what
))) {
5770 if ((bool)other_j
->p
== wanted_state
) {
5771 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "active" : "inactive", si
->what
);
5785 struct machservice
*ms
;
5786 if (j
->p
&& j
->shutdown_monitor
) {
5787 return "Monitoring shutdown";
5790 return "PID is still valid";
5793 if (j
->priv_port_has_senders
) {
5794 return "Privileged Port still has outstanding senders";
5797 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
5798 /* If we've simulated an exit, we mark the job as non-active, even
5799 * though doing so will leave it in an unsafe state. We do this so that
5800 * shutdown can proceed. See <rdar://problem/11126530>.
5802 * For a more sustainable solution, see <rdar://problem/11131336>.
5804 if (!j
->workaround9359725
&& ms
->recv
&& machservice_active(ms
)) {
5805 job_log(j
, LOG_INFO
, "Mach service is still active: %s", ms
->name
);
5806 return "Mach service is still active";
5814 machservice_watch(job_t j
, struct machservice
*ms
)
5817 (void)job_assumes_zero(j
, runtime_add_mport(ms
->port
, NULL
));
5822 machservice_ignore(job_t j
, struct machservice
*ms
)
5824 /* We only add ports whose receive rights we control into the port set, so
5825 * don't attempt to remove te service from the port set if we didn't put it
5826 * there in the first place. Otherwise, we could wind up trying to access a
5827 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
5829 * <rdar://problem/10898014>
5832 (void)job_assumes_zero(j
, runtime_remove_mport(ms
->port
));
5837 machservice_resetport(job_t j
, struct machservice
*ms
)
5839 LIST_REMOVE(ms
, port_hash_sle
);
5840 (void)job_assumes_zero(j
, launchd_mport_close_recv(ms
->port
));
5841 (void)job_assumes_zero(j
, launchd_mport_deallocate(ms
->port
));
5844 (void)job_assumes_zero(j
, launchd_mport_create_recv(&ms
->port
));
5845 (void)job_assumes_zero(j
, launchd_mport_make_send(ms
->port
));
5846 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
5850 machservice_stamp_port(job_t j
, struct machservice
*ms
)
5852 mach_port_context_t ctx
= 0;
5853 char *where2get
= j
->prog
? j
->prog
: j
->argv
[0];
5856 if ((prog
= strrchr(where2get
, '/'))) {
5862 (void)strncpy((char *)&ctx
, prog
, sizeof(ctx
));
5863 #if __LITTLE_ENDIAN__
5865 ctx
= OSSwapBigToHostInt64(ctx
);
5867 ctx
= OSSwapBigToHostInt32(ctx
);
5871 (void)job_assumes_zero(j
, mach_port_set_context(mach_task_self(), ms
->port
, ctx
));
5874 struct machservice
*
5875 machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
)
5877 /* Don't create new MachServices for dead ports. This is primarily for
5878 * clients who use bootstrap_register2(). They can pass in a send right, but
5879 * then that port can immediately go dead. Hilarity ensues.
5881 * <rdar://problem/10898014>
5883 if (*serviceport
== MACH_PORT_DEAD
) {
5887 struct machservice
*ms
= calloc(1, sizeof(struct machservice
) + strlen(name
) + 1);
5888 if (!job_assumes(j
, ms
!= NULL
)) {
5892 strcpy((char *)ms
->name
, name
);
5895 ms
->per_pid
= pid_local
;
5897 if (likely(*serviceport
== MACH_PORT_NULL
)) {
5898 if (job_assumes_zero(j
, launchd_mport_create_recv(&ms
->port
)) != KERN_SUCCESS
) {
5902 if (job_assumes_zero(j
, launchd_mport_make_send(ms
->port
)) != KERN_SUCCESS
) {
5905 *serviceport
= ms
->port
;
5908 ms
->port
= *serviceport
;
5909 ms
->isActive
= true;
5912 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
5914 jobmgr_t where2put
= j
->mgr
;
5915 // XPC domains are separate from Mach bootstraps.
5916 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
5917 if (launchd_flat_mach_namespace
&& !(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
)) {
5918 where2put
= root_jobmgr
;
5922 /* Don't allow MachServices added by multiple-instance jobs to be looked up
5923 * by others. We could just do this with a simple bit, but then we'd have to
5924 * uniquify the names ourselves to avoid collisions. This is just easier.
5926 if (!j
->dedicated_instance
) {
5927 LIST_INSERT_HEAD(&where2put
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
5929 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
5932 machservice_stamp_port(j
, ms
);
5935 job_log(j
, LOG_DEBUG
, "Mach service added%s: %s", (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) ? " to private namespace" : "", name
);
5939 (void)job_assumes_zero(j
, launchd_mport_close_recv(ms
->port
));
5945 struct machservice
*
5946 machservice_new_alias(job_t j
, struct machservice
*orig
)
5948 struct machservice
*ms
= calloc(1, sizeof(struct machservice
) + strlen(orig
->name
) + 1);
5949 if (job_assumes(j
, ms
!= NULL
)) {
5950 strcpy((char *)ms
->name
, orig
->name
);
5954 LIST_INSERT_HEAD(&j
->mgr
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
5955 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
5956 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Service aliased into job manager: %s", orig
->name
);
5963 machservice_status(struct machservice
*ms
)
5965 ms
= ms
->alias
? ms
->alias
: ms
;
5967 return BOOTSTRAP_STATUS_ACTIVE
;
5968 } else if (ms
->job
->ondemand
) {
5969 return BOOTSTRAP_STATUS_ON_DEMAND
;
5971 return BOOTSTRAP_STATUS_INACTIVE
;
5976 job_setup_exception_port(job_t j
, task_t target_task
)
5978 struct machservice
*ms
;
5979 thread_state_flavor_t f
= 0;
5980 mach_port_t exc_port
= the_exception_server
;
5982 if (unlikely(j
->alt_exc_handler
)) {
5983 ms
= jobmgr_lookup_service(j
->mgr
, j
->alt_exc_handler
, true, 0);
5985 exc_port
= machservice_port(ms
);
5987 job_log(j
, LOG_WARNING
, "Falling back to default Mach exception handler. Could not find: %s", j
->alt_exc_handler
);
5989 } else if (unlikely(j
->internal_exc_handler
)) {
5990 exc_port
= runtime_get_kernel_port();
5991 } else if (unlikely(!exc_port
)) {
5995 #if defined (__ppc__) || defined(__ppc64__)
5996 f
= PPC_THREAD_STATE64
;
5997 #elif defined(__i386__) || defined(__x86_64__)
5998 f
= x86_THREAD_STATE
;
5999 #elif defined(__arm__)
6000 f
= ARM_THREAD_STATE
;
6002 #error "unknown architecture"
6005 if (likely(target_task
)) {
6006 kern_return_t kr
= task_set_exception_ports(target_task
, EXC_MASK_CRASH
| EXC_MASK_RESOURCE
, exc_port
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
);
6008 if (kr
!= MACH_SEND_INVALID_DEST
) {
6009 (void)job_assumes_zero(j
, kr
);
6011 job_log(j
, LOG_WARNING
, "Task died before exception port could be set.");
6014 } else if (pid1_magic
&& the_exception_server
) {
6015 mach_port_t mhp
= mach_host_self();
6016 (void)job_assumes_zero(j
, host_set_exception_ports(mhp
, EXC_MASK_CRASH
| EXC_MASK_RESOURCE
, the_exception_server
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
));
6017 (void)job_assumes_zero(j
, launchd_mport_deallocate(mhp
));
6022 job_set_exception_port(job_t j
, mach_port_t port
)
6024 if (unlikely(!the_exception_server
)) {
6025 the_exception_server
= port
;
6026 job_setup_exception_port(j
, 0);
6028 job_log(j
, LOG_WARNING
, "The exception server is already claimed!");
6033 machservice_setup_options(launch_data_t obj
, const char *key
, void *context
)
6035 struct machservice
*ms
= context
;
6036 mach_port_t mhp
= mach_host_self();
6040 if (!job_assumes(ms
->job
, mhp
!= MACH_PORT_NULL
)) {
6044 switch (launch_data_get_type(obj
)) {
6045 case LAUNCH_DATA_INTEGER
:
6046 which_port
= (int)launch_data_get_integer(obj
); // XXX we should bound check this...
6047 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT
) == 0) {
6048 switch (which_port
) {
6049 case TASK_KERNEL_PORT
:
6050 case TASK_HOST_PORT
:
6051 case TASK_NAME_PORT
:
6052 case TASK_BOOTSTRAP_PORT
:
6053 /* I find it a little odd that zero isn't reserved in the header.
6054 * Normally Mach is fairly good about this convention...
6057 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved task special port: %d", which_port
);
6060 ms
->special_port_num
= which_port
;
6061 SLIST_INSERT_HEAD(&special_ports
, ms
, special_port_sle
);
6064 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT
) == 0 && pid1_magic
) {
6065 if (which_port
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
6066 (void)job_assumes_zero(ms
->job
, (errno
= host_set_special_port(mhp
, which_port
, ms
->port
)));
6068 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved host special port: %d", which_port
);
6071 case LAUNCH_DATA_BOOL
:
6072 b
= launch_data_get_bool(obj
);
6073 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE
) == 0) {
6074 ms
->debug_on_close
= b
;
6075 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_RESETATCLOSE
) == 0) {
6077 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN
) == 0) {
6079 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER
) == 0) {
6080 job_set_exception_port(ms
->job
, ms
->port
);
6081 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_KUNCSERVER
) == 0) {
6083 (void)job_assumes_zero(ms
->job
, host_set_UNDServer(mhp
, ms
->port
));
6086 case LAUNCH_DATA_STRING
:
6087 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH
) == 0) {
6088 const char *option
= launch_data_get_string(obj
);
6089 if (strcasecmp(option
, "One") == 0) {
6090 ms
->drain_one_on_crash
= true;
6091 } else if (strcasecmp(option
, "All") == 0) {
6092 ms
->drain_all_on_crash
= true;
6096 case LAUNCH_DATA_DICTIONARY
:
6097 job_set_exception_port(ms
->job
, ms
->port
);
6103 (void)job_assumes_zero(ms
->job
, launchd_mport_deallocate(mhp
));
6107 machservice_setup(launch_data_t obj
, const char *key
, void *context
)
6110 struct machservice
*ms
;
6111 mach_port_t p
= MACH_PORT_NULL
;
6113 if (unlikely(ms
= jobmgr_lookup_service(j
->mgr
, key
, false, 0))) {
6114 job_log(j
, LOG_WARNING
, "Conflict with job: %s over Mach service: %s", ms
->job
->label
, key
);
6118 if (!job_assumes(j
, (ms
= machservice_new(j
, key
, &p
, false)) != NULL
)) {
6122 ms
->isActive
= false;
6125 if (launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
) {
6126 launch_data_dict_iterate(obj
, machservice_setup_options
, ms
);
6131 jobmgr_do_garbage_collection(jobmgr_t jm
)
6133 jobmgr_t jmi
= NULL
, jmn
= NULL
;
6134 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
6135 jobmgr_do_garbage_collection(jmi
);
6138 if (!jm
->shutting_down
) {
6142 if (SLIST_EMPTY(&jm
->submgrs
)) {
6143 jobmgr_log(jm
, LOG_DEBUG
, "No submanagers left.");
6145 jobmgr_log(jm
, LOG_DEBUG
, "Still have submanagers.");
6146 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
6147 jobmgr_log(jm
, LOG_DEBUG
, "Submanager: %s", jmi
->name
);
6152 job_t ji
= NULL
, jn
= NULL
;
6153 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
6154 if (ji
->anonymous
) {
6158 // Let the shutdown monitor be up until the very end.
6159 if (ji
->shutdown_monitor
) {
6163 /* On our first pass through, open a transaction for all the jobs that
6164 * need to be dirty at shutdown. We'll close these transactions once the
6165 * jobs that do not need to be dirty at shutdown have all exited.
6167 if (ji
->dirty_at_shutdown
&& !jm
->shutdown_jobs_dirtied
) {
6168 job_open_shutdown_transaction(ji
);
6171 const char *active
= job_active(ji
);
6175 job_log(ji
, LOG_DEBUG
, "Job is active: %s", active
);
6178 if (!ji
->dirty_at_shutdown
) {
6182 if (ji
->clean_kill
) {
6183 job_log(ji
, LOG_DEBUG
, "Job was killed cleanly.");
6185 job_log(ji
, LOG_DEBUG
, "Job was sent SIGTERM%s.", ji
->sent_sigkill
? " and SIGKILL" : "");
6190 jm
->shutdown_jobs_dirtied
= true;
6192 if (!jm
->shutdown_jobs_cleaned
) {
6193 /* Once all normal jobs have exited, we clean the dirty-at-shutdown
6194 * jobs and make them into normal jobs so that the above loop will
6195 * handle them appropriately.
6197 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
6198 if (ji
->anonymous
) {
6202 if (!job_active(ji
)) {
6206 if (ji
->shutdown_monitor
) {
6210 job_close_shutdown_transaction(ji
);
6214 jm
->shutdown_jobs_cleaned
= true;
6217 if (SLIST_EMPTY(&jm
->submgrs
) && actives
== 0) {
6218 /* We may be in a situation where the shutdown monitor is all that's
6219 * left, in which case we want to stop it. Like dirty-at-shutdown
6220 * jobs, we turn it back into a normal job so that the main loop
6221 * treats it appropriately.
6224 * <rdar://problem/10756306>
6225 * <rdar://problem/11034971>
6226 * <rdar://problem/11549541>
6228 if (jm
->monitor_shutdown
&& _launchd_shutdown_monitor
) {
6229 /* The rest of shutdown has completed, so we can kill the shutdown
6230 * monitor now like it was any other job.
6232 _launchd_shutdown_monitor
->shutdown_monitor
= false;
6234 job_log(_launchd_shutdown_monitor
, LOG_NOTICE
| LOG_CONSOLE
, "Stopping shutdown monitor.");
6235 job_stop(_launchd_shutdown_monitor
);
6236 _launchd_shutdown_monitor
= NULL
;
6238 jobmgr_log(jm
, LOG_DEBUG
, "Removing.");
6249 jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
)
6251 /* I maintain that stray processes should be at the mercy of launchd during
6252 * shutdown, but nevertheless, things like diskimages-helper can stick
6253 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
6254 * to all the strays and don't wait for them to exit before moving on.
6256 * See rdar://problem/6562592
6259 for (i
= 0; i
< np
; i
++) {
6261 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Sending SIGTERM to PID %u and continuing...", p
[i
]);
6262 (void)jobmgr_assumes_zero_p(jm
, kill2(p
[i
], SIGTERM
));
6268 jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
)
6270 size_t kp_skipped
= 0, len
= sizeof(pid_t
) * get_kern_max_proc();
6272 int i
= 0, kp_cnt
= 0;
6274 if (likely(jm
->parentmgr
|| !pid1_magic
)) {
6278 if (!jobmgr_assumes(jm
, (pids
= malloc(len
)) != NULL
)) {
6282 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS
);
6284 if (jobmgr_assumes_zero_p(jm
, (kp_cnt
= proc_listallpids(pids
, len
))) == -1) {
6288 pid_t
*ps
= (pid_t
*)calloc(sizeof(pid_t
), kp_cnt
);
6289 for (i
= 0; i
< kp_cnt
; i
++) {
6290 struct proc_bsdshortinfo proc
;
6291 if (proc_pidinfo(pids
[i
], PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
6292 if (errno
!= ESRCH
) {
6293 (void)jobmgr_assumes_zero(jm
, errno
);
6300 pid_t p_i
= pids
[i
];
6301 pid_t pp_i
= proc
.pbsi_ppid
;
6302 pid_t pg_i
= proc
.pbsi_pgid
;
6303 const char *z
= (proc
.pbsi_status
== SZOMB
) ? "zombie " : "";
6304 const char *n
= proc
.pbsi_comm
;
6306 if (unlikely(p_i
== 0 || p_i
== 1)) {
6311 if (_launchd_shutdown_monitor
&& pp_i
== _launchd_shutdown_monitor
->p
) {
6316 // We might have some jobs hanging around that we've decided to shut down in spite of.
6317 job_t j
= jobmgr_find_by_pid(jm
, p_i
, false);
6318 if (!j
|| (j
&& j
->anonymous
)) {
6319 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z
, j
? "anonymous job" : "process", p_i
, pp_i
, pg_i
, n
);
6322 if (pp_i
== getpid() && !jobmgr_assumes(jm
, proc
.pbsi_status
!= SZOMB
)) {
6323 if (jobmgr_assumes_zero(jm
, waitpid(p_i
, &status
, WNOHANG
)) == 0) {
6324 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status
));
6328 job_t leader
= jobmgr_find_by_pid(jm
, pg_i
, false);
6329 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6330 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6331 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6332 * their userspace emissaries go away, before the call to reboot(2).
6334 if (leader
&& leader
->ignore_pg_at_shutdown
) {
6345 if ((kp_cnt
- kp_skipped
> 0) && kill_strays
) {
6346 jobmgr_kill_stray_children(jm
, ps
, kp_cnt
- kp_skipped
);
6355 jobmgr_parent(jobmgr_t jm
)
6357 return jm
->parentmgr
;
6361 job_uncork_fork(job_t j
)
6365 job_log(j
, LOG_DEBUG
, "Uncorking the fork().");
6366 /* this unblocks the child and avoids a race
6367 * between the above fork() and the kevent_mod() */
6368 (void)job_assumes(j
, write(j
->fork_fd
, &c
, sizeof(c
)) == sizeof(c
));
6369 (void)job_assumes_zero_p(j
, runtime_close(j
->fork_fd
));
6374 jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool skip_init
, mach_port_t asport
)
6376 job_t bootstrapper
= NULL
;
6379 __OSX_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s
, kqjobmgr_callback
) == 0);
6381 if (unlikely(jm
&& requestorport
== MACH_PORT_NULL
)) {
6382 jobmgr_log(jm
, LOG_ERR
, "Mach sub-bootstrap create request requires a requester port");
6386 jmr
= calloc(1, sizeof(struct jobmgr_s
) + (name
? (strlen(name
) + 1) : NAME_MAX
+ 1));
6388 if (!jobmgr_assumes(jm
, jmr
!= NULL
)) {
6396 jmr
->kqjobmgr_callback
= jobmgr_callback
;
6397 strcpy(jmr
->name_init
, name
? name
: "Under construction");
6399 jmr
->req_port
= requestorport
;
6401 if ((jmr
->parentmgr
= jm
)) {
6402 SLIST_INSERT_HEAD(&jm
->submgrs
, jmr
, sle
);
6405 if (jm
&& jobmgr_assumes_zero(jmr
, launchd_mport_notify_req(jmr
->req_port
, MACH_NOTIFY_DEAD_NAME
)) != KERN_SUCCESS
) {
6409 if (transfer_port
!= MACH_PORT_NULL
) {
6410 (void)jobmgr_assumes(jmr
, jm
!= NULL
);
6411 jmr
->jm_port
= transfer_port
;
6412 } else if (!jm
&& !pid1_magic
) {
6413 char *trusted_fd
= getenv(LAUNCHD_TRUSTED_FD_ENV
);
6416 snprintf(service_buf
, sizeof(service_buf
), "com.apple.launchd.peruser.%u", getuid());
6418 if (jobmgr_assumes_zero(jmr
, bootstrap_check_in(bootstrap_port
, service_buf
, &jmr
->jm_port
)) != 0) {
6423 int dfd
, lfd
= (int) strtol(trusted_fd
, NULL
, 10);
6425 if ((dfd
= dup(lfd
)) >= 0) {
6426 (void)jobmgr_assumes_zero_p(jmr
, runtime_close(dfd
));
6427 (void)jobmgr_assumes_zero_p(jmr
, runtime_close(lfd
));
6430 unsetenv(LAUNCHD_TRUSTED_FD_ENV
);
6433 // cut off the Libc cache, we don't want to deadlock against ourself
6434 inherited_bootstrap_port
= bootstrap_port
;
6435 bootstrap_port
= MACH_PORT_NULL
;
6436 osx_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port
, MACH_NOTIFY_DEAD_NAME
));
6438 // We set this explicitly as we start each child
6439 osx_assert_zero(launchd_set_bport(MACH_PORT_NULL
));
6440 } else if (jobmgr_assumes_zero(jmr
, launchd_mport_create_recv(&jmr
->jm_port
)) != KERN_SUCCESS
) {
6445 sprintf(jmr
->name_init
, "%u", MACH_PORT_INDEX(jmr
->jm_port
));
6449 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(SIGTERM
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
));
6450 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(SIGUSR1
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
));
6451 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(SIGUSR2
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
));
6452 (void)jobmgr_assumes_zero_p(jmr
, kevent_mod(0, EVFILT_FS
, EV_ADD
, VQ_MOUNT
|VQ_UNMOUNT
|VQ_UPDATE
, 0, jmr
));
6455 if (name
&& !skip_init
) {
6456 bootstrapper
= jobmgr_init_session(jmr
, name
, sflag
);
6459 if (!bootstrapper
|| !bootstrapper
->weird_bootstrap
) {
6460 if (jobmgr_assumes_zero(jmr
, runtime_add_mport(jmr
->jm_port
, job_server
)) != KERN_SUCCESS
) {
6465 jobmgr_log(jmr
, LOG_DEBUG
, "Created job manager%s%s", jm
? " with parent: " : ".", jm
? jm
->name
: "");
6468 bootstrapper
->asport
= asport
;
6470 jobmgr_log(jmr
, LOG_DEBUG
, "Bootstrapping new job manager with audit session %u", asport
);
6471 (void)jobmgr_assumes(jmr
, job_dispatch(bootstrapper
, true) != NULL
);
6473 jmr
->req_asport
= asport
;
6476 if (asport
!= MACH_PORT_NULL
) {
6477 (void)jobmgr_assumes_zero(jmr
, launchd_mport_copy_send(asport
));
6480 if (jmr
->parentmgr
) {
6481 runtime_add_weak_ref();
6497 jobmgr_new_xpc_singleton_domain(jobmgr_t jm
, name_t name
)
6499 jobmgr_t
new = NULL
;
6501 /* These job managers are basically singletons, so we use the root Mach
6502 * bootstrap port as their requestor ports so they'll never go away.
6504 mach_port_t req_port
= root_jobmgr
->jm_port
;
6505 if (jobmgr_assumes_zero(jm
, launchd_mport_make_send(req_port
)) == KERN_SUCCESS
) {
6506 new = jobmgr_new(root_jobmgr
, req_port
, MACH_PORT_NULL
, false, name
, true, MACH_PORT_NULL
);
6508 new->properties
|= BOOTSTRAP_PROPERTY_XPC_SINGLETON
;
6509 new->properties
|= BOOTSTRAP_PROPERTY_XPC_DOMAIN
;
6510 new->xpc_singleton
= true;
6518 jobmgr_find_xpc_per_user_domain(jobmgr_t jm
, uid_t uid
)
6520 jobmgr_t jmi
= NULL
;
6521 LIST_FOREACH(jmi
, &_s_xpc_user_domains
, xpc_le
) {
6522 if (jmi
->req_euid
== uid
) {
6528 (void)snprintf(name
, sizeof(name
), "com.apple.xpc.domain.peruser.%u", uid
);
6529 jmi
= jobmgr_new_xpc_singleton_domain(jm
, name
);
6530 if (jobmgr_assumes(jm
, jmi
!= NULL
)) {
6531 /* We need to create a per-user launchd for this UID if there isn't one
6532 * already so we can grab the bootstrap port.
6534 job_t puj
= jobmgr_lookup_per_user_context_internal(NULL
, uid
, &jmi
->req_bsport
);
6535 if (jobmgr_assumes(jmi
, puj
!= NULL
)) {
6536 (void)jobmgr_assumes_zero(jmi
, launchd_mport_copy_send(puj
->asport
));
6537 (void)jobmgr_assumes_zero(jmi
, launchd_mport_copy_send(jmi
->req_bsport
));
6538 jmi
->shortdesc
= "per-user";
6539 jmi
->req_asport
= puj
->asport
;
6540 jmi
->req_asid
= puj
->asid
;
6541 jmi
->req_euid
= uid
;
6544 LIST_INSERT_HEAD(&_s_xpc_user_domains
, jmi
, xpc_le
);
6554 jobmgr_find_xpc_per_session_domain(jobmgr_t jm
, au_asid_t asid
)
6556 jobmgr_t jmi
= NULL
;
6557 LIST_FOREACH(jmi
, &_s_xpc_session_domains
, xpc_le
) {
6558 if (jmi
->req_asid
== asid
) {
6564 (void)snprintf(name
, sizeof(name
), "com.apple.xpc.domain.persession.%i", asid
);
6565 jmi
= jobmgr_new_xpc_singleton_domain(jm
, name
);
6566 if (jobmgr_assumes(jm
, jmi
!= NULL
)) {
6567 (void)jobmgr_assumes_zero(jmi
, launchd_mport_make_send(root_jobmgr
->jm_port
));
6568 jmi
->shortdesc
= "per-session";
6569 jmi
->req_bsport
= root_jobmgr
->jm_port
;
6570 (void)jobmgr_assumes_zero(jmi
, audit_session_port(asid
, &jmi
->req_asport
));
6571 jmi
->req_asid
= asid
;
6575 LIST_INSERT_HEAD(&_s_xpc_session_domains
, jmi
, xpc_le
);
6584 jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
)
6586 const char *bootstrap_tool
[] = { "/bin/launchctl", "bootstrap", "-S", session_type
, sflag
? "-s" : NULL
, NULL
};
6587 char thelabel
[1000];
6590 snprintf(thelabel
, sizeof(thelabel
), "com.apple.launchctl.%s", session_type
);
6591 bootstrapper
= job_new(jm
, thelabel
, NULL
, bootstrap_tool
);
6593 if (jobmgr_assumes(jm
, bootstrapper
!= NULL
) && (jm
->parentmgr
|| !pid1_magic
)) {
6594 bootstrapper
->is_bootstrapper
= true;
6597 // <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
6598 snprintf(buf
, sizeof(buf
), "0x%X:0:0", getuid());
6599 envitem_new(bootstrapper
, "__CF_USER_TEXT_ENCODING", buf
, false);
6600 bootstrapper
->weird_bootstrap
= true;
6601 (void)jobmgr_assumes(jm
, job_setup_machport(bootstrapper
));
6602 } else if (bootstrapper
&& strncmp(session_type
, VPROCMGR_SESSION_SYSTEM
, sizeof(VPROCMGR_SESSION_SYSTEM
)) == 0) {
6603 #if TARGET_OS_EMBEDDED
6604 bootstrapper
->pstype
= POSIX_SPAWN_IOS_INTERACTIVE
;
6606 bootstrapper
->is_bootstrapper
= true;
6607 if (jobmgr_assumes(jm
, pid1_magic
)) {
6608 // Have our system bootstrapper print out to the console.
6609 bootstrapper
->stdoutpath
= strdup(_PATH_CONSOLE
);
6610 bootstrapper
->stderrpath
= strdup(_PATH_CONSOLE
);
6612 if (launchd_console
) {
6613 (void)jobmgr_assumes_zero_p(jm
, kevent_mod((uintptr_t)fileno(launchd_console
), EVFILT_VNODE
, EV_ADD
| EV_ONESHOT
, NOTE_REVOKE
, 0, jm
));
6618 jm
->session_initialized
= true;
6619 return bootstrapper
;
6623 jobmgr_delete_anything_with_port(jobmgr_t jm
, mach_port_t port
)
6625 struct machservice
*ms
, *next_ms
;
6628 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6629 * words, when some program hands us a second or subsequent send right to a
6630 * port we already have open, the Mach kernel gives us the same port number
6631 * back and increments an reference count associated with the port. This
6632 * This forces us, when discovering that a receive right at the other end
6633 * has been deleted, to wander all of our objects to see what weird places
6634 * clients might have handed us the same send right to use.
6637 if (jm
== root_jobmgr
) {
6638 if (port
== inherited_bootstrap_port
) {
6639 (void)jobmgr_assumes_zero(jm
, launchd_mport_deallocate(port
));
6640 inherited_bootstrap_port
= MACH_PORT_NULL
;
6642 return jobmgr_shutdown(jm
);
6645 LIST_FOREACH_SAFE(ms
, &port_hash
[HASH_PORT(port
)], port_hash_sle
, next_ms
) {
6646 if (ms
->port
== port
&& !ms
->recv
) {
6647 machservice_delete(ms
->job
, ms
, true);
6652 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
6653 jobmgr_delete_anything_with_port(jmi
, port
);
6656 if (jm
->req_port
== port
) {
6657 jobmgr_log(jm
, LOG_DEBUG
, "Request port died: %i", MACH_PORT_INDEX(port
));
6658 return jobmgr_shutdown(jm
);
6664 struct machservice
*
6665 jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
)
6667 struct machservice
*ms
;
6670 jobmgr_log(jm
, LOG_DEBUG
, "Looking up %sservice %s", target_pid
? "per-PID " : "", name
);
6673 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6674 * bootstrap in other bootstraps.
6677 // Start in the given bootstrap.
6678 if (unlikely((target_j
= jobmgr_find_by_pid(jm
, target_pid
, false)) == NULL
)) {
6679 // If we fail, do a deep traversal.
6680 if (unlikely((target_j
= jobmgr_find_by_pid_deep(root_jobmgr
, target_pid
, true)) == NULL
)) {
6681 jobmgr_log(jm
, LOG_DEBUG
, "Didn't find PID %i", target_pid
);
6686 SLIST_FOREACH(ms
, &target_j
->machservices
, sle
) {
6687 if (ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
6692 job_log(target_j
, LOG_DEBUG
, "Didn't find per-PID Mach service: %s", name
);
6696 jobmgr_t where2look
= jm
;
6697 // XPC domains are separate from Mach bootstraps.
6698 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
6699 if (launchd_flat_mach_namespace
&& !(jm
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
)) {
6700 where2look
= root_jobmgr
;
6704 LIST_FOREACH(ms
, &where2look
->ms_hash
[hash_ms(name
)], name_hash_sle
) {
6705 if (!ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
6710 if (jm
->parentmgr
== NULL
|| !check_parent
) {
6714 return jobmgr_lookup_service(jm
->parentmgr
, name
, true, 0);
6718 machservice_port(struct machservice
*ms
)
6724 machservice_job(struct machservice
*ms
)
6730 machservice_hidden(struct machservice
*ms
)
6736 machservice_active(struct machservice
*ms
)
6738 return ms
->isActive
;
6742 machservice_name(struct machservice
*ms
)
6748 machservice_drain_port(struct machservice
*ms
)
6750 bool drain_one
= ms
->drain_one_on_crash
;
6751 bool drain_all
= ms
->drain_all_on_crash
;
6753 if (!job_assumes(ms
->job
, (drain_one
|| drain_all
) == true)) {
6757 job_log(ms
->job
, LOG_INFO
, "Draining %s...", ms
->name
);
6759 char req_buff
[sizeof(union __RequestUnion__catch_mach_exc_subsystem
) * 2];
6760 char rep_buff
[sizeof(union __ReplyUnion__catch_mach_exc_subsystem
)];
6761 mig_reply_error_t
*req_hdr
= (mig_reply_error_t
*)&req_buff
;
6762 mig_reply_error_t
*rep_hdr
= (mig_reply_error_t
*)&rep_buff
;
6764 mach_msg_return_t mr
= ~MACH_MSG_SUCCESS
;
6767 /* This should be a direct check on the Mach service to see if it's an exception-handling
6768 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6769 * Mach services. But for now, it should be okay.
6771 if (ms
->job
->alt_exc_handler
|| ms
->job
->internal_exc_handler
) {
6772 mr
= launchd_exc_runtime_once(ms
->port
, sizeof(req_buff
), sizeof(rep_buff
), req_hdr
, rep_hdr
, 0);
6774 mach_msg_options_t options
= MACH_RCV_MSG
|
6777 mr
= mach_msg((mach_msg_header_t
*)req_hdr
, options
, 0, sizeof(req_buff
), ms
->port
, 0, MACH_PORT_NULL
);
6779 case MACH_MSG_SUCCESS
:
6780 mach_msg_destroy((mach_msg_header_t
*)req_hdr
);
6782 case MACH_RCV_TIMED_OUT
:
6784 case MACH_RCV_TOO_LARGE
:
6785 launchd_syslog(LOG_WARNING
, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff
));
6791 } while (drain_all
&& mr
!= MACH_RCV_TIMED_OUT
);
6795 machservice_delete(job_t j
, struct machservice
*ms
, bool port_died
)
6798 /* HACK: Egregious code duplication. But dealing with aliases is a
6799 * pretty simple affair since they can't and shouldn't have any complex
6800 * behaviors associated with them.
6802 LIST_REMOVE(ms
, name_hash_sle
);
6803 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
6808 if (unlikely(ms
->debug_on_close
)) {
6809 job_log(j
, LOG_NOTICE
, "About to enter kernel debugger because of Mach port: 0x%x", ms
->port
);
6810 (void)job_assumes_zero(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
));
6813 if (ms
->recv
&& job_assumes(j
, !machservice_active(ms
))) {
6814 job_log(j
, LOG_DEBUG
, "Closing receive right for %s", ms
->name
);
6815 (void)job_assumes_zero(j
, launchd_mport_close_recv(ms
->port
));
6818 (void)job_assumes_zero(j
, launchd_mport_deallocate(ms
->port
));
6820 if (unlikely(ms
->port
== the_exception_server
)) {
6821 the_exception_server
= 0;
6824 job_log(j
, LOG_DEBUG
, "Mach service deleted%s: %s", port_died
? " (port died)" : "", ms
->name
);
6826 if (ms
->special_port_num
) {
6827 SLIST_REMOVE(&special_ports
, ms
, machservice
, special_port_sle
);
6829 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
6831 if (!(j
->dedicated_instance
|| ms
->event_channel
)) {
6832 LIST_REMOVE(ms
, name_hash_sle
);
6834 LIST_REMOVE(ms
, port_hash_sle
);
6840 machservice_request_notifications(struct machservice
*ms
)
6842 mach_msg_id_t which
= MACH_NOTIFY_DEAD_NAME
;
6844 ms
->isActive
= true;
6847 which
= MACH_NOTIFY_PORT_DESTROYED
;
6848 job_checkin(ms
->job
);
6851 (void)job_assumes_zero(ms
->job
, launchd_mport_notify_req(ms
->port
, which
));
6854 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6855 #define END_OF(x) (&(x)[NELEM(x)])
6858 mach_cmd2argv(const char *string
)
6860 char *argv
[100], args
[1000];
6862 char *argp
= args
, term
, **argv_ret
, *co
;
6863 unsigned int nargs
= 0, i
;
6865 for (cp
= string
; *cp
;) {
6866 while (isspace(*cp
))
6868 term
= (*cp
== '"') ? *cp
++ : '\0';
6869 if (nargs
< NELEM(argv
)) {
6870 argv
[nargs
++] = argp
;
6872 while (*cp
&& (term
? *cp
!= term
: !isspace(*cp
)) && argp
< END_OF(args
)) {
6889 argv_ret
= malloc((nargs
+ 1) * sizeof(char *) + strlen(string
) + 1);
6892 (void)osx_assumes_zero(errno
);
6896 co
= (char *)argv_ret
+ (nargs
+ 1) * sizeof(char *);
6898 for (i
= 0; i
< nargs
; i
++) {
6899 strcpy(co
, argv
[i
]);
6901 co
+= strlen(argv
[i
]) + 1;
6909 job_checkin(job_t j
)
6911 j
->checkedin
= true;
6914 bool job_is_god(job_t j
)
6916 return j
->embedded_god
;
6920 job_ack_port_destruction(mach_port_t p
)
6922 struct machservice
*ms
;
6925 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
6926 if (ms
->recv
&& (ms
->port
== p
)) {
6932 launchd_syslog(LOG_WARNING
, "Could not find MachService to match receive right: 0x%x", p
);
6938 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "Receive right returned to us: %s", ms
->name
);
6940 /* Without being the exception handler, NOTE_EXIT is our only way to tell if
6941 * the job crashed, and we can't rely on NOTE_EXIT always being processed
6942 * after all the job's receive rights have been returned.
6944 * So when we get receive rights back, check to see if the job has been
6945 * reaped yet. If not, then we add this service to a list of services to be
6946 * drained on crash if it's requested that behavior. So, for a job with N
6947 * receive rights all requesting that they be drained on crash, we can
6948 * safely handle the following sequence of events.
6950 * ReceiveRight0Returned
6951 * ReceiveRight1Returned
6952 * ReceiveRight2Returned
6953 * NOTE_EXIT (reap, get exit status)
6954 * ReceiveRight3Returned
6958 * ReceiveRight(N - 1)Returned
6960 if (ms
->drain_one_on_crash
|| ms
->drain_all_on_crash
) {
6961 if (j
->crashed
&& j
->reaped
) {
6962 job_log(j
, LOG_DEBUG
, "Job has crashed. Draining port...");
6963 machservice_drain_port(ms
);
6964 } else if (!(j
->crashed
|| j
->reaped
)) {
6965 job_log(j
, LOG_DEBUG
, "Job's exit status is still unknown. Deferring drain.");
6969 ms
->isActive
= false;
6970 if (ms
->delete_on_destruction
) {
6971 machservice_delete(j
, ms
, false);
6972 } else if (ms
->reset
) {
6973 machservice_resetport(j
, ms
);
6976 machservice_stamp_port(j
, ms
);
6977 job_dispatch(j
, false);
6979 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
6985 job_ack_no_senders(job_t j
)
6987 j
->priv_port_has_senders
= false;
6989 (void)job_assumes_zero(j
, launchd_mport_close_recv(j
->j_port
));
6992 job_log(j
, LOG_DEBUG
, "No more senders on privileged Mach bootstrap port");
6994 job_dispatch(j
, false);
6998 semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
)
7000 struct semaphoreitem
*si
;
7001 size_t alloc_sz
= sizeof(struct semaphoreitem
);
7004 alloc_sz
+= strlen(what
) + 1;
7007 if (job_assumes(j
, si
= calloc(1, alloc_sz
)) == NULL
) {
7014 strcpy(si
->what_init
, what
);
7017 SLIST_INSERT_HEAD(&j
->semaphores
, si
, sle
);
7019 if ((why
== OTHER_JOB_ENABLED
|| why
== OTHER_JOB_DISABLED
) && !j
->nosy
) {
7020 job_log(j
, LOG_DEBUG
, "Job is interested in \"%s\".", what
);
7021 SLIST_INSERT_HEAD(&s_curious_jobs
, j
, curious_jobs_sle
);
7025 semaphoreitem_runtime_mod_ref(si
, true);
7031 semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
)
7034 * External events need to be tracked.
7035 * Internal events do NOT need to be tracked.
7039 case SUCCESSFUL_EXIT
:
7041 case OTHER_JOB_ENABLED
:
7042 case OTHER_JOB_DISABLED
:
7043 case OTHER_JOB_ACTIVE
:
7044 case OTHER_JOB_INACTIVE
:
7051 runtime_add_weak_ref();
7053 runtime_del_weak_ref();
7058 semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
)
7060 semaphoreitem_runtime_mod_ref(si
, false);
7062 SLIST_REMOVE(&j
->semaphores
, si
, semaphoreitem
, sle
);
7064 // We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
7065 if ((si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
) && j
->nosy
) {
7067 SLIST_REMOVE(&s_curious_jobs
, j
, job_s
, curious_jobs_sle
);
7074 semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
)
7076 struct semaphoreitem_dict_iter_context
*sdic
= context
;
7077 semaphore_reason_t why
;
7079 why
= launch_data_get_bool(obj
) ? sdic
->why_true
: sdic
->why_false
;
7081 semaphoreitem_new(sdic
->j
, why
, key
);
7085 semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
)
7087 struct semaphoreitem_dict_iter_context sdic
= { context
, 0, 0 };
7089 semaphore_reason_t why
;
7091 switch (launch_data_get_type(obj
)) {
7092 case LAUNCH_DATA_BOOL
:
7093 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE
) == 0) {
7094 why
= launch_data_get_bool(obj
) ? NETWORK_UP
: NETWORK_DOWN
;
7095 semaphoreitem_new(j
, why
, NULL
);
7096 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT
) == 0) {
7097 why
= launch_data_get_bool(obj
) ? SUCCESSFUL_EXIT
: FAILED_EXIT
;
7098 semaphoreitem_new(j
, why
, NULL
);
7099 j
->start_pending
= true;
7100 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND
) == 0) {
7101 j
->needs_kickoff
= launch_data_get_bool(obj
);
7102 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_CRASHED
) == 0) {
7103 why
= launch_data_get_bool(obj
) ? CRASHED
: DID_NOT_CRASH
;
7104 semaphoreitem_new(j
, why
, NULL
);
7105 j
->start_pending
= true;
7107 job_log(j
, LOG_ERR
, "Unrecognized KeepAlive attribute: %s", key
);
7110 case LAUNCH_DATA_DICTIONARY
:
7111 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE
) == 0) {
7112 sdic
.why_true
= OTHER_JOB_ACTIVE
;
7113 sdic
.why_false
= OTHER_JOB_INACTIVE
;
7114 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED
) == 0) {
7115 sdic
.why_true
= OTHER_JOB_ENABLED
;
7116 sdic
.why_false
= OTHER_JOB_DISABLED
;
7118 job_log(j
, LOG_ERR
, "Unrecognized KeepAlive attribute: %s", key
);
7122 launch_data_dict_iterate(obj
, semaphoreitem_setup_dict_iter
, &sdic
);
7125 job_log(j
, LOG_ERR
, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj
));
7131 externalevent_new(job_t j
, struct eventsystem
*sys
, const char *evname
, xpc_object_t event
)
7133 if (j
->event_monitor
) {
7134 job_log(j
, LOG_ERR
, "The event monitor job cannot use LaunchEvents or XPC Events.");
7138 struct externalevent
*ee
= (struct externalevent
*)calloc(1, sizeof(struct externalevent
) + strlen(evname
) + 1);
7143 ee
->event
= xpc_retain(event
);
7144 (void)strcpy(ee
->name
, evname
);
7146 ee
->id
= sys
->curid
;
7149 ee
->wanted_state
= true;
7152 if (sys
== _launchd_support_system
) {
7153 ee
->internal
= true;
7156 LIST_INSERT_HEAD(&j
->events
, ee
, job_le
);
7157 LIST_INSERT_HEAD(&sys
->events
, ee
, sys_le
);
7159 job_log(j
, LOG_DEBUG
, "New event: %s/%s", sys
->name
, evname
);
7166 externalevent_delete(struct externalevent
*ee
)
7168 xpc_release(ee
->event
);
7169 LIST_REMOVE(ee
, job_le
);
7170 LIST_REMOVE(ee
, sys_le
);
7178 externalevent_setup(launch_data_t obj
, const char *key
, void *context
)
7180 /* This method can ONLY be called on the job_import() path, as it assumes
7181 * the input is a launch_data_t.
7183 struct externalevent_iter_ctx
*ctx
= (struct externalevent_iter_ctx
*)context
;
7185 xpc_object_t xobj
= ld2xpc(obj
);
7187 job_log(ctx
->j
, LOG_DEBUG
, "Importing stream/event: %s/%s", ctx
->sys
->name
, key
);
7188 externalevent_new(ctx
->j
, ctx
->sys
, key
, xobj
);
7191 job_log(ctx
->j
, LOG_ERR
, "Could not import event for job: %s", key
);
7195 struct externalevent
*
7196 externalevent_find(const char *sysname
, uint64_t id
)
7198 struct externalevent
*ei
= NULL
;
7200 struct eventsystem
*es
= eventsystem_find(sysname
);
7202 LIST_FOREACH(ei
, &es
->events
, sys_le
) {
7208 launchd_syslog(LOG_ERR
, "Could not find event system: %s", sysname
);
7214 struct eventsystem
*
7215 eventsystem_new(const char *name
)
7217 struct eventsystem
*es
= (struct eventsystem
*)calloc(1, sizeof(struct eventsystem
) + strlen(name
) + 1);
7220 (void)strcpy(es
->name
, name
);
7221 LIST_INSERT_HEAD(&_s_event_systems
, es
, global_le
);
7223 (void)osx_assumes_zero(errno
);
7230 eventsystem_delete(struct eventsystem
*es
)
7232 struct externalevent
*ei
= NULL
;
7233 while ((ei
= LIST_FIRST(&es
->events
))) {
7234 externalevent_delete(ei
);
7237 LIST_REMOVE(es
, global_le
);
7243 eventsystem_setup(launch_data_t obj
, const char *key
, void *context
)
7245 job_t j
= (job_t
)context
;
7246 if (!job_assumes(j
, launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
)) {
7250 struct eventsystem
*sys
= eventsystem_find(key
);
7251 if (unlikely(sys
== NULL
)) {
7252 sys
= eventsystem_new(key
);
7253 job_log(j
, LOG_DEBUG
, "New event system: %s", key
);
7256 if (job_assumes(j
, sys
!= NULL
)) {
7257 struct externalevent_iter_ctx ctx
= {
7262 job_log(j
, LOG_DEBUG
, "Importing events for stream: %s", key
);
7263 launch_data_dict_iterate(obj
, externalevent_setup
, &ctx
);
7267 struct eventsystem
*
7268 eventsystem_find(const char *name
)
7270 struct eventsystem
*esi
= NULL
;
7271 LIST_FOREACH(esi
, &_s_event_systems
, global_le
) {
7272 if (strcmp(name
, esi
->name
) == 0) {
7281 eventsystem_ping(void)
7283 if (!_launchd_event_monitor
) {
7287 if (!_launchd_event_monitor
->p
) {
7288 (void)job_dispatch(_launchd_event_monitor
, true);
7290 if (_launchd_event_monitor
->event_monitor_ready2signal
) {
7291 (void)job_assumes_zero_p(_launchd_event_monitor
, kill(_launchd_event_monitor
->p
, SIGUSR1
));
7297 jobmgr_dispatch_all_semaphores(jobmgr_t jm
)
7303 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
7304 jobmgr_dispatch_all_semaphores(jmi
);
7307 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
7308 if (!SLIST_EMPTY(&ji
->semaphores
)) {
7309 job_dispatch(ji
, false);
7315 cronemu(int mon
, int mday
, int hour
, int min
)
7317 struct tm workingtm
;
7321 workingtm
= *localtime(&now
);
7323 workingtm
.tm_isdst
= -1;
7324 workingtm
.tm_sec
= 0;
7327 while (!cronemu_mon(&workingtm
, mon
, mday
, hour
, min
)) {
7328 workingtm
.tm_year
++;
7329 workingtm
.tm_mon
= 0;
7330 workingtm
.tm_mday
= 1;
7331 workingtm
.tm_hour
= 0;
7332 workingtm
.tm_min
= 0;
7336 return mktime(&workingtm
);
7340 cronemu_wday(int wday
, int hour
, int min
)
7342 struct tm workingtm
;
7346 workingtm
= *localtime(&now
);
7348 workingtm
.tm_isdst
= -1;
7349 workingtm
.tm_sec
= 0;
7356 while (!(workingtm
.tm_wday
== wday
&& cronemu_hour(&workingtm
, hour
, min
))) {
7357 workingtm
.tm_mday
++;
7358 workingtm
.tm_hour
= 0;
7359 workingtm
.tm_min
= 0;
7363 return mktime(&workingtm
);
7367 cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
)
7370 struct tm workingtm
= *wtm
;
7373 while (!cronemu_mday(&workingtm
, mday
, hour
, min
)) {
7375 workingtm
.tm_mday
= 1;
7376 workingtm
.tm_hour
= 0;
7377 workingtm
.tm_min
= 0;
7378 carrytest
= workingtm
.tm_mon
;
7380 if (carrytest
!= workingtm
.tm_mon
) {
7388 if (mon
< wtm
->tm_mon
) {
7392 if (mon
> wtm
->tm_mon
) {
7399 return cronemu_mday(wtm
, mday
, hour
, min
);
7403 cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
)
7406 struct tm workingtm
= *wtm
;
7409 while (!cronemu_hour(&workingtm
, hour
, min
)) {
7410 workingtm
.tm_mday
++;
7411 workingtm
.tm_hour
= 0;
7412 workingtm
.tm_min
= 0;
7413 carrytest
= workingtm
.tm_mday
;
7415 if (carrytest
!= workingtm
.tm_mday
) {
7423 if (mday
< wtm
->tm_mday
) {
7427 if (mday
> wtm
->tm_mday
) {
7428 wtm
->tm_mday
= mday
;
7433 return cronemu_hour(wtm
, hour
, min
);
7437 cronemu_hour(struct tm
*wtm
, int hour
, int min
)
7440 struct tm workingtm
= *wtm
;
7443 while (!cronemu_min(&workingtm
, min
)) {
7444 workingtm
.tm_hour
++;
7445 workingtm
.tm_min
= 0;
7446 carrytest
= workingtm
.tm_hour
;
7448 if (carrytest
!= workingtm
.tm_hour
) {
7456 if (hour
< wtm
->tm_hour
) {
7460 if (hour
> wtm
->tm_hour
) {
7461 wtm
->tm_hour
= hour
;
7465 return cronemu_min(wtm
, min
);
7469 cronemu_min(struct tm
*wtm
, int min
)
7475 if (min
< wtm
->tm_min
) {
7479 if (min
> wtm
->tm_min
) {
7487 job_mig_create_server(job_t j
, cmd_t server_cmd
, uid_t server_uid
, boolean_t on_demand
, mach_port_t
*server_portp
)
7489 struct ldcred
*ldc
= runtime_get_caller_creds();
7493 return BOOTSTRAP_NO_MEMORY
;
7496 if (unlikely(j
->deny_job_creation
)) {
7497 return BOOTSTRAP_NOT_PRIVILEGED
;
7501 const char **argv
= (const char **)mach_cmd2argv(server_cmd
);
7502 if (unlikely(argv
== NULL
)) {
7503 return BOOTSTRAP_NO_MEMORY
;
7505 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_PATH
, argv
[0]) > 0)) {
7507 return BOOTSTRAP_NOT_PRIVILEGED
;
7512 job_log(j
, LOG_DEBUG
, "Server create attempt: %s", server_cmd
);
7515 if (ldc
->euid
|| ldc
->uid
) {
7516 job_log(j
, LOG_WARNING
, "Server create attempt moved to per-user launchd: %s", server_cmd
);
7517 return VPROC_ERR_TRY_PER_USER
;
7520 if (unlikely(server_uid
!= getuid())) {
7521 job_log(j
, LOG_WARNING
, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7522 server_cmd
, getuid(), server_uid
);
7524 server_uid
= 0; // zero means "do nothing"
7527 js
= job_new_via_mach_init(j
, server_cmd
, server_uid
, on_demand
);
7529 if (unlikely(js
== NULL
)) {
7530 return BOOTSTRAP_NO_MEMORY
;
7533 *server_portp
= js
->j_port
;
7534 return BOOTSTRAP_SUCCESS
;
7538 job_mig_send_signal(job_t j
, mach_port_t srp
, name_t targetlabel
, int sig
)
7540 struct ldcred
*ldc
= runtime_get_caller_creds();
7544 return BOOTSTRAP_NO_MEMORY
;
7547 if (unlikely(ldc
->euid
!= 0 && ldc
->euid
!= getuid()) || j
->deny_job_creation
) {
7548 #if TARGET_OS_EMBEDDED
7549 if (!j
->embedded_god
) {
7550 return BOOTSTRAP_NOT_PRIVILEGED
;
7553 return BOOTSTRAP_NOT_PRIVILEGED
;
7558 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
7559 return BOOTSTRAP_NOT_PRIVILEGED
;
7563 if (unlikely(!(otherj
= job_find(NULL
, targetlabel
)))) {
7564 return BOOTSTRAP_UNKNOWN_SERVICE
;
7567 #if TARGET_OS_EMBEDDED
7568 if (j
->embedded_god
) {
7569 if (j
->username
&& otherj
->username
) {
7570 if (strcmp(j
->username
, otherj
->username
) != 0) {
7571 return BOOTSTRAP_NOT_PRIVILEGED
;
7574 return BOOTSTRAP_NOT_PRIVILEGED
;
7579 if (sig
== VPROC_MAGIC_UNLOAD_SIGNAL
) {
7580 bool do_block
= otherj
->p
;
7582 if (otherj
->anonymous
) {
7583 return BOOTSTRAP_NOT_PRIVILEGED
;
7589 job_log(j
, LOG_DEBUG
, "Blocking MIG return of job_remove(): %s", otherj
->label
);
7590 // this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
7591 (void)job_assumes(otherj
, waiting4removal_new(otherj
, srp
));
7592 return MIG_NO_REPLY
;
7596 } else if (otherj
->p
) {
7597 (void)job_assumes_zero_p(j
, kill2(otherj
->p
, sig
));
7604 job_mig_log_forward(job_t j
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
7606 struct ldcred
*ldc
= runtime_get_caller_creds();
7609 return BOOTSTRAP_NO_MEMORY
;
7612 if (!job_assumes(j
, j
->per_user
)) {
7613 return BOOTSTRAP_NOT_PRIVILEGED
;
7616 return launchd_log_forward(ldc
->euid
, ldc
->egid
, inval
, invalCnt
);
7620 job_mig_log_drain(job_t j
, mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
7622 struct ldcred
*ldc
= runtime_get_caller_creds();
7625 return BOOTSTRAP_NO_MEMORY
;
7628 if (unlikely(ldc
->euid
)) {
7629 return BOOTSTRAP_NOT_PRIVILEGED
;
7632 return launchd_log_drain(srp
, outval
, outvalCnt
);
7636 job_mig_swap_complex(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
,
7637 vm_offset_t inval
, mach_msg_type_number_t invalCnt
, vm_offset_t
*outval
,
7638 mach_msg_type_number_t
*outvalCnt
)
7641 launch_data_t input_obj
= NULL
, output_obj
= NULL
;
7642 size_t data_offset
= 0;
7644 struct ldcred
*ldc
= runtime_get_caller_creds();
7647 return BOOTSTRAP_NO_MEMORY
;
7650 if (inkey
&& ldc
->pid
!= j
->p
) {
7651 if (ldc
->euid
&& ldc
->euid
!= getuid()) {
7652 return BOOTSTRAP_NOT_PRIVILEGED
;
7656 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
7660 if (inkey
&& outkey
) {
7661 action
= "Swapping";
7668 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
7670 *outvalCnt
= 20 * 1024 * 1024;
7671 mig_allocate(outval
, *outvalCnt
);
7672 if (!job_assumes(j
, *outval
!= 0)) {
7676 /* Note to future maintainers: launch_data_unpack() does NOT return a heap
7677 * object. The data is decoded in-place. So do not call launch_data_free()
7680 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
7681 if (unlikely(invalCnt
&& !job_assumes(j
, (input_obj
= launch_data_unpack((void *)inval
, invalCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
))) {
7687 case VPROC_GSK_ENVIRONMENT
:
7688 if (!job_assumes(j
, (output_obj
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
7691 jobmgr_export_env_from_other_jobs(j
->mgr
, output_obj
);
7692 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
7693 if (!job_assumes(j
, launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
) != 0)) {
7696 launch_data_free(output_obj
);
7698 case VPROC_GSK_ALLJOBS
:
7699 if (!job_assumes(j
, (output_obj
= job_export_all()) != NULL
)) {
7702 ipc_revoke_fds(output_obj
);
7703 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
7704 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
7705 if (!job_assumes(j
, packed_size
!= 0)) {
7708 launch_data_free(output_obj
);
7710 case VPROC_GSK_MGR_NAME
:
7711 if (!job_assumes(j
, (output_obj
= launch_data_new_string(j
->mgr
->name
)) != NULL
)) {
7714 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
7715 if (!job_assumes(j
, packed_size
!= 0)) {
7719 launch_data_free(output_obj
);
7721 case VPROC_GSK_JOB_OVERRIDES_DB
:
7722 store
= launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB
, "overrides.plist");
7723 if (!store
|| !job_assumes(j
, (output_obj
= launch_data_new_string(store
)) != NULL
)) {
7729 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
7730 if (!job_assumes(j
, packed_size
!= 0)) {
7734 launch_data_free(output_obj
);
7736 case VPROC_GSK_ZERO
:
7737 mig_deallocate(*outval
, *outvalCnt
);
7745 mig_deallocate(inval
, invalCnt
);
7749 mig_deallocate(inval
, invalCnt
);
7751 mig_deallocate(*outval
, *outvalCnt
);
7754 launch_data_free(output_obj
);
7761 job_mig_swap_integer(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
, int64_t inval
, int64_t *outval
)
7764 kern_return_t kr
= 0;
7765 struct ldcred
*ldc
= runtime_get_caller_creds();
7769 return BOOTSTRAP_NO_MEMORY
;
7772 if (inkey
&& ldc
->pid
!= j
->p
) {
7773 if (ldc
->euid
&& ldc
->euid
!= getuid()) {
7774 return BOOTSTRAP_NOT_PRIVILEGED
;
7778 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
7782 if (inkey
&& outkey
) {
7783 action
= "Swapping";
7790 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
7793 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
7794 *outval
= j
->abandon_pg
;
7796 case VPROC_GSK_LAST_EXIT_STATUS
:
7797 *outval
= j
->last_exit_status
;
7799 case VPROC_GSK_MGR_UID
:
7802 case VPROC_GSK_MGR_PID
:
7805 case VPROC_GSK_IS_MANAGED
:
7806 *outval
= j
->anonymous
? 0 : 1;
7808 case VPROC_GSK_BASIC_KEEPALIVE
:
7809 *outval
= !j
->ondemand
;
7811 case VPROC_GSK_START_INTERVAL
:
7812 *outval
= j
->start_interval
;
7814 case VPROC_GSK_IDLE_TIMEOUT
:
7815 *outval
= j
->timeout
;
7817 case VPROC_GSK_EXIT_TIMEOUT
:
7818 *outval
= j
->exit_timeout
;
7820 case VPROC_GSK_GLOBAL_LOG_MASK
:
7821 oldmask
= runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
7823 runtime_setlogmask(oldmask
);
7825 case VPROC_GSK_GLOBAL_UMASK
:
7830 case VPROC_GSK_TRANSACTIONS_ENABLED
:
7831 job_log(j
, LOG_DEBUG
, "Reading EnableTransactions value.");
7832 *outval
= j
->enable_transactions
;
7834 case VPROC_GSK_WAITFORDEBUGGER
:
7835 *outval
= j
->wait4debugger
;
7837 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT
:
7838 *outval
= j
->embedded_god
;
7840 case VPROC_GSK_ZERO
:
7849 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
7850 j
->abandon_pg
= (bool)inval
;
7852 case VPROC_GSK_GLOBAL_ON_DEMAND
:
7853 job_log(j
, LOG_DEBUG
, "Job has set global on-demand mode to: %s", inval
? "true" : "false");
7854 kr
= job_set_global_on_demand(j
, inval
);
7856 case VPROC_GSK_BASIC_KEEPALIVE
:
7857 j
->ondemand
= !inval
;
7859 case VPROC_GSK_START_INTERVAL
:
7860 if (inval
> UINT32_MAX
|| inval
< 0) {
7863 if (j
->start_interval
== 0) {
7864 runtime_add_weak_ref();
7866 j
->start_interval
= (typeof(j
->start_interval
)) inval
;
7867 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
));
7868 } else if (j
->start_interval
) {
7869 (void)job_assumes_zero_p(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
));
7870 if (j
->start_interval
!= 0) {
7871 runtime_del_weak_ref();
7873 j
->start_interval
= 0;
7876 case VPROC_GSK_IDLE_TIMEOUT
:
7877 if (inval
< 0 || inval
> UINT32_MAX
) {
7880 j
->timeout
= (typeof(j
->timeout
)) inval
;
7883 case VPROC_GSK_EXIT_TIMEOUT
:
7884 if (inval
< 0 || inval
> UINT32_MAX
) {
7887 j
->exit_timeout
= (typeof(j
->exit_timeout
)) inval
;
7890 case VPROC_GSK_GLOBAL_LOG_MASK
:
7891 if (inval
< 0 || inval
> UINT32_MAX
) {
7894 runtime_setlogmask((int) inval
);
7897 case VPROC_GSK_GLOBAL_UMASK
:
7898 __OSX_COMPILETIME_ASSERT__(sizeof (mode_t
) == 2);
7899 if (inval
< 0 || inval
> UINT16_MAX
) {
7903 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
7906 umask((mode_t
) inval
);
7911 case VPROC_GSK_TRANSACTIONS_ENABLED
:
7914 case VPROC_GSK_WEIRD_BOOTSTRAP
:
7915 if (job_assumes(j
, j
->weird_bootstrap
)) {
7916 job_log(j
, LOG_DEBUG
, "Unsetting weird bootstrap.");
7918 mach_msg_size_t mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_job_subsystem
);
7920 if (job_mig_job_subsystem
.maxsize
> mxmsgsz
) {
7921 mxmsgsz
= job_mig_job_subsystem
.maxsize
;
7924 (void)job_assumes_zero(j
, runtime_add_mport(j
->mgr
->jm_port
, job_server
));
7925 j
->weird_bootstrap
= false;
7928 case VPROC_GSK_WAITFORDEBUGGER
:
7929 j
->wait4debugger_oneshot
= inval
;
7931 case VPROC_GSK_PERUSER_SUSPEND
:
7932 if (job_assumes(j
, pid1_magic
&& ldc
->euid
== 0)) {
7933 mach_port_t junk
= MACH_PORT_NULL
;
7934 job_t jpu
= jobmgr_lookup_per_user_context_internal(j
, (uid_t
)inval
, &junk
);
7935 if (job_assumes(j
, jpu
!= NULL
)) {
7936 struct suspended_peruser
*spi
= NULL
;
7937 LIST_FOREACH(spi
, &j
->suspended_perusers
, sle
) {
7938 if ((int64_t)(spi
->j
->mach_uid
) == inval
) {
7939 job_log(j
, LOG_WARNING
, "Job tried to suspend per-user launchd for UID %lli twice.", inval
);
7945 job_log(j
, LOG_INFO
, "Job is suspending the per-user launchd for UID %lli.", inval
);
7946 spi
= (struct suspended_peruser
*)calloc(sizeof(struct suspended_peruser
), 1);
7947 if (job_assumes(j
, spi
!= NULL
)) {
7948 /* Stop listening for events.
7950 * See <rdar://problem/9014146>.
7952 if (jpu
->peruser_suspend_count
== 0) {
7957 spi
->j
->peruser_suspend_count
++;
7958 LIST_INSERT_HEAD(&j
->suspended_perusers
, spi
, sle
);
7962 kr
= BOOTSTRAP_NO_MEMORY
;
7970 case VPROC_GSK_PERUSER_RESUME
:
7971 if (job_assumes(j
, pid1_magic
== true)) {
7972 struct suspended_peruser
*spi
= NULL
, *spt
= NULL
;
7973 LIST_FOREACH_SAFE(spi
, &j
->suspended_perusers
, sle
, spt
) {
7974 if ((int64_t)(spi
->j
->mach_uid
) == inval
) {
7975 spi
->j
->peruser_suspend_count
--;
7976 LIST_REMOVE(spi
, sle
);
7977 job_log(j
, LOG_INFO
, "Job is resuming the per-user launchd for UID %lli.", inval
);
7982 if (!job_assumes(j
, spi
!= NULL
)) {
7983 job_log(j
, LOG_WARNING
, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval
);
7984 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
7985 } else if (spi
->j
->peruser_suspend_count
== 0) {
7987 job_dispatch(spi
->j
, false);
7994 case VPROC_GSK_ZERO
:
8005 job_mig_post_fork_ping(job_t j
, task_t child_task
, mach_port_t
*asport
)
8007 struct machservice
*ms
;
8010 return BOOTSTRAP_NO_MEMORY
;
8013 job_log(j
, LOG_DEBUG
, "Post fork ping.");
8015 job_setup_exception_port(j
, child_task
);
8017 SLIST_FOREACH(ms
, &special_ports
, special_port_sle
) {
8018 if (j
->per_user
&& (ms
->special_port_num
!= TASK_ACCESS_PORT
)) {
8019 // The TASK_ACCESS_PORT funny business is to workaround 5325399.
8023 errno
= task_set_special_port(child_task
, ms
->special_port_num
, ms
->port
);
8025 if (errno
== MACH_SEND_INVALID_DEST
) {
8026 job_log(j
, LOG_WARNING
, "Task died before special ports could be set.");
8030 int desired_log_level
= LOG_ERR
;
8034 desired_log_level
= LOG_WARNING
;
8036 if (ms
->special_port_num
== TASK_SEATBELT_PORT
) {
8037 desired_log_level
= LOG_DEBUG
;
8041 job_log(j
, desired_log_level
, "Could not setup Mach task special port %u: %s", ms
->special_port_num
, mach_error_string(errno
));
8045 /* MIG will not zero-initialize this pointer, so we must always do so. See
8046 * <rdar://problem/8562593>.
8048 *asport
= MACH_PORT_NULL
;
8049 #if !TARGET_OS_EMBEDDED
8050 if (!j
->anonymous
) {
8051 /* XPC services will spawn into the root security session by default.
8052 * xpcproxy will switch them away if needed.
8054 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
8055 job_log(j
, LOG_DEBUG
, "Returning session port: 0x%x", j
->asport
);
8056 *asport
= j
->asport
;
8060 (void)job_assumes_zero(j
, launchd_mport_deallocate(child_task
));
8066 job_mig_reboot2(job_t j
, uint64_t flags
)
8068 char who_started_the_reboot
[2048] = "";
8069 struct proc_bsdshortinfo proc
;
8070 struct ldcred
*ldc
= runtime_get_caller_creds();
8074 return BOOTSTRAP_NO_MEMORY
;
8077 if (unlikely(!pid1_magic
)) {
8078 return BOOTSTRAP_NOT_PRIVILEGED
;
8081 #if !TARGET_OS_EMBEDDED
8082 if (unlikely(ldc
->euid
)) {
8084 if (unlikely(ldc
->euid
) && !j
->embedded_god
) {
8086 return BOOTSTRAP_NOT_PRIVILEGED
;
8089 for (pid_to_log
= ldc
->pid
; pid_to_log
; pid_to_log
= proc
.pbsi_ppid
) {
8091 if (proc_pidinfo(pid_to_log
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
8092 if (errno
!= ESRCH
) {
8093 (void)job_assumes_zero(j
, errno
);
8098 if (!job_assumes(j
, pid_to_log
!= (pid_t
)proc
.pbsi_ppid
)) {
8099 job_log(j
, LOG_WARNING
, "Job which is its own parent started reboot.");
8100 snprintf(who_started_the_reboot
, sizeof(who_started_the_reboot
), "%s[%u]->%s[%u]->%s[%u]->...", proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_comm
, pid_to_log
);
8104 who_offset
= strlen(who_started_the_reboot
);
8105 snprintf(who_started_the_reboot
+ who_offset
, sizeof(who_started_the_reboot
) - who_offset
,
8106 " %s[%u]%s", proc
.pbsi_comm
, pid_to_log
, proc
.pbsi_ppid
? " ->" : "");
8109 root_jobmgr
->reboot_flags
= (int)flags
;
8110 job_log(j
, LOG_DEBUG
, "reboot2() initiated by:%s", who_started_the_reboot
);
8117 job_mig_getsocket(job_t j
, name_t spr
)
8120 return BOOTSTRAP_NO_MEMORY
;
8123 if (j
->deny_job_creation
) {
8124 return BOOTSTRAP_NOT_PRIVILEGED
;
8128 struct ldcred
*ldc
= runtime_get_caller_creds();
8129 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8130 return BOOTSTRAP_NOT_PRIVILEGED
;
8136 if (unlikely(!sockpath
)) {
8137 return BOOTSTRAP_NO_MEMORY
;
8140 strncpy(spr
, sockpath
, sizeof(name_t
));
8142 return BOOTSTRAP_SUCCESS
;
8146 job_mig_log(job_t j
, int pri
, int err
, logmsg_t msg
)
8149 return BOOTSTRAP_NO_MEMORY
;
8152 if ((errno
= err
)) {
8153 job_log_error(j
, pri
, "%s", msg
);
8155 job_log(j
, pri
, "%s", msg
);
8162 job_setup_per_user_directory(job_t j
, uid_t uid
, const char *path
)
8166 bool created
= false;
8167 int r
= stat(path
, &sb
);
8168 if ((r
== -1 && errno
== ENOENT
) || (r
== 0 && !S_ISDIR(sb
.st_mode
))) {
8170 job_log(j
, LOG_NOTICE
, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path
);
8173 snprintf(old
, sizeof(old
), "%s.movedaside", path
);
8174 (void)job_assumes_zero_p(j
, rename(path
, old
));
8177 (void)job_assumes_zero_p(j
, mkdir(path
, S_IRWXU
));
8178 (void)job_assumes_zero_p(j
, chown(path
, uid
, 0));
8183 if (sb
.st_uid
!= uid
) {
8184 job_log(j
, LOG_NOTICE
, "Per-user launchd directory has improper user ownership. Repairing: %s", path
);
8185 (void)job_assumes_zero_p(j
, chown(path
, uid
, 0));
8187 if (sb
.st_gid
!= 0) {
8188 job_log(j
, LOG_NOTICE
, "Per-user launchd directory has improper group ownership. Repairing: %s", path
);
8189 (void)job_assumes_zero_p(j
, chown(path
, uid
, 0));
8191 if (sb
.st_mode
!= (S_IRWXU
| S_IFDIR
)) {
8192 job_log(j
, LOG_NOTICE
, "Per-user launchd directory has improper mode. Repairing: %s", path
);
8193 (void)job_assumes_zero_p(j
, chmod(path
, S_IRWXU
));
8199 job_setup_per_user_directories(job_t j
, uid_t uid
, const char *label
)
8201 char path
[PATH_MAX
];
8203 (void)snprintf(path
, sizeof(path
), LAUNCHD_DB_PREFIX
"/%s", label
);
8204 job_setup_per_user_directory(j
, uid
, path
);
8206 (void)snprintf(path
, sizeof(path
), LAUNCHD_LOG_PREFIX
"/%s", label
);
8207 job_setup_per_user_directory(j
, uid
, path
);
8211 jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, mach_port_t
*mp
)
8214 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
8215 if (!ji
->per_user
) {
8218 if (ji
->mach_uid
!= which_user
) {
8221 if (SLIST_EMPTY(&ji
->machservices
)) {
8224 if (!SLIST_FIRST(&ji
->machservices
)->per_user_hack
) {
8230 if (unlikely(ji
== NULL
)) {
8231 struct machservice
*ms
;
8234 job_log(j
, LOG_DEBUG
, "Creating per user launchd job for UID: %u", which_user
);
8236 sprintf(lbuf
, "com.apple.launchd.peruser.%u", which_user
);
8238 ji
= job_new(root_jobmgr
, lbuf
, "/sbin/launchd", NULL
);
8241 auditinfo_addr_t auinfo
= {
8245 .ai_auid
= which_user
,
8246 .ai_asid
= AU_ASSIGN_ASID
,
8249 if (setaudit_addr(&auinfo
, sizeof(auinfo
)) == 0) {
8250 job_log(ji
, LOG_DEBUG
, "Created new security session for per-user launchd: %u", auinfo
.ai_asid
);
8251 (void)job_assumes(ji
, (ji
->asport
= audit_session_self()) != MACH_PORT_NULL
);
8253 /* Kinda lame that we have to do this, but we can't create an
8254 * audit session without joining it.
8256 (void)job_assumes(ji
, audit_session_join(launchd_audit_port
));
8257 ji
->asid
= auinfo
.ai_asid
;
8259 job_log(ji
, LOG_WARNING
, "Could not set audit session!");
8264 ji
->mach_uid
= which_user
;
8265 ji
->per_user
= true;
8266 ji
->enable_transactions
= true;
8267 job_setup_per_user_directories(ji
, which_user
, lbuf
);
8269 if ((ms
= machservice_new(ji
, lbuf
, mp
, false)) == NULL
) {
8274 ms
->per_user_hack
= true;
8277 ji
= job_dispatch(ji
, false);
8281 *mp
= machservice_port(SLIST_FIRST(&ji
->machservices
));
8282 job_log(j
, LOG_DEBUG
, "Per user launchd job found for UID: %u", which_user
);
8289 job_mig_lookup_per_user_context(job_t j
, uid_t which_user
, mach_port_t
*up_cont
)
8291 struct ldcred
*ldc
= runtime_get_caller_creds();
8295 return BOOTSTRAP_NO_MEMORY
;
8298 if (launchd_osinstaller
) {
8299 return BOOTSTRAP_UNKNOWN_SERVICE
;
8302 #if TARGET_OS_EMBEDDED
8303 // There is no need for per-user launchd's on embedded.
8304 job_log(j
, LOG_ERR
, "Per-user launchds are not supported on this platform.");
8305 return BOOTSTRAP_UNKNOWN_SERVICE
;
8309 if (unlikely(sandbox_check(ldc
->pid
, "mach-per-user-lookup", SANDBOX_FILTER_NONE
) > 0)) {
8310 return BOOTSTRAP_NOT_PRIVILEGED
;
8314 job_log(j
, LOG_INFO
, "Looking up per user launchd for UID: %u", which_user
);
8316 if (unlikely(!pid1_magic
)) {
8317 job_log(j
, LOG_ERR
, "Only PID 1 supports per user launchd lookups.");
8318 return BOOTSTRAP_NOT_PRIVILEGED
;
8321 if (ldc
->euid
|| ldc
->uid
) {
8322 which_user
= ldc
->euid
?: ldc
->uid
;
8325 *up_cont
= MACH_PORT_NULL
;
8327 jpu
= jobmgr_lookup_per_user_context_internal(j
, which_user
, up_cont
);
8333 job_mig_check_in2(job_t j
, name_t servicename
, mach_port_t
*serviceportp
, uuid_t instance_id
, uint64_t flags
)
8335 bool per_pid_service
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
8336 bool strict
= flags
& BOOTSTRAP_STRICT_CHECKIN
;
8337 struct ldcred
*ldc
= runtime_get_caller_creds();
8338 struct machservice
*ms
= NULL
;
8342 return BOOTSTRAP_NO_MEMORY
;
8345 if (j
->dedicated_instance
) {
8346 struct machservice
*msi
= NULL
;
8347 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
8348 if (strncmp(servicename
, msi
->name
, sizeof(name_t
) - 1) == 0) {
8349 uuid_copy(instance_id
, j
->instance_id
);
8355 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, per_pid_service
? ldc
->pid
: 0);
8359 if (likely(ms
!= NULL
)) {
8361 return BOOTSTRAP_NOT_PRIVILEGED
;
8362 } else if (ms
->isActive
) {
8363 return BOOTSTRAP_SERVICE_ACTIVE
;
8366 return BOOTSTRAP_UNKNOWN_SERVICE
;
8368 } else if (ms
== NULL
) {
8369 if (job_assumes(j
, !j
->dedicated_instance
)) {
8370 *serviceportp
= MACH_PORT_NULL
;
8372 if (unlikely((ms
= machservice_new(j
, servicename
, serviceportp
, per_pid_service
)) == NULL
)) {
8373 return BOOTSTRAP_NO_MEMORY
;
8376 // Treat this like a legacy job.
8377 if (!j
->legacy_mach_job
) {
8378 ms
->isActive
= true;
8382 if (!(j
->anonymous
|| j
->legacy_LS_job
|| j
->legacy_mach_job
)) {
8383 job_log(j
, LOG_APPLEONLY
, "Please add the following service to the configuration file for this job: %s", servicename
);
8386 return BOOTSTRAP_UNKNOWN_SERVICE
;
8389 if (unlikely((jo
= machservice_job(ms
)) != j
)) {
8390 static pid_t last_warned_pid
;
8392 if (last_warned_pid
!= ldc
->pid
) {
8393 job_log(jo
, LOG_WARNING
, "The following job tried to hijack the service \"%s\" from this job: %s", servicename
, j
->label
);
8394 last_warned_pid
= ldc
->pid
;
8397 return BOOTSTRAP_NOT_PRIVILEGED
;
8399 if (unlikely(machservice_active(ms
))) {
8400 job_log(j
, LOG_WARNING
, "Check-in of Mach service failed. Already active: %s", servicename
);
8401 return BOOTSTRAP_SERVICE_ACTIVE
;
8406 machservice_request_notifications(ms
);
8408 job_log(j
, LOG_INFO
, "Check-in of service: %s", servicename
);
8410 *serviceportp
= machservice_port(ms
);
8411 return BOOTSTRAP_SUCCESS
;
8415 job_mig_register2(job_t j
, name_t servicename
, mach_port_t serviceport
, uint64_t flags
)
8417 struct machservice
*ms
;
8418 struct ldcred
*ldc
= runtime_get_caller_creds();
8421 return BOOTSTRAP_NO_MEMORY
;
8424 if (!(flags
& BOOTSTRAP_PER_PID_SERVICE
) && !j
->legacy_LS_job
) {
8425 job_log(j
, LOG_APPLEONLY
, "Performance: bootstrap_register() is deprecated. Service: %s", servicename
);
8428 job_log(j
, LOG_DEBUG
, "%sMach service registration attempt: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
8430 // 5641783 for the embedded hack
8431 #if !TARGET_OS_EMBEDDED
8433 * From a per-user/session launchd's perspective, SecurityAgent (UID
8434 * 92) is a rogue application (not our UID, not root and not a child of
8435 * us). We'll have to reconcile this design friction at a later date.
8437 if (unlikely(j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->uid
!= getuid() && ldc
->uid
!= 92)) {
8439 return VPROC_ERR_TRY_PER_USER
;
8441 return BOOTSTRAP_NOT_PRIVILEGED
;
8446 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, flags
& BOOTSTRAP_PER_PID_SERVICE
? ldc
->pid
: 0);
8449 if (machservice_job(ms
) != j
) {
8450 return BOOTSTRAP_NOT_PRIVILEGED
;
8452 if (machservice_active(ms
)) {
8453 job_log(j
, LOG_DEBUG
, "Mach service registration failed. Already active: %s", servicename
);
8454 return BOOTSTRAP_SERVICE_ACTIVE
;
8456 if (ms
->recv
&& (serviceport
!= MACH_PORT_NULL
)) {
8457 job_log(j
, LOG_ERR
, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename
);
8458 return BOOTSTRAP_NOT_PRIVILEGED
;
8461 machservice_delete(j
, ms
, false);
8464 if (likely(serviceport
!= MACH_PORT_NULL
)) {
8465 if (likely(ms
= machservice_new(j
, servicename
, &serviceport
, flags
& BOOTSTRAP_PER_PID_SERVICE
? true : false))) {
8466 machservice_request_notifications(ms
);
8468 return BOOTSTRAP_NO_MEMORY
;
8473 return BOOTSTRAP_SUCCESS
;
8477 job_mig_look_up2(job_t j
, mach_port_t srp
, name_t servicename
, mach_port_t
*serviceportp
, pid_t target_pid
, uuid_t instance_id
, uint64_t flags
)
8479 struct machservice
*ms
= NULL
;
8480 struct ldcred
*ldc
= runtime_get_caller_creds();
8482 bool per_pid_lookup
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
8483 bool specific_instance
= flags
& BOOTSTRAP_SPECIFIC_INSTANCE
;
8484 bool strict_lookup
= flags
& BOOTSTRAP_STRICT_LOOKUP
;
8485 bool privileged
= flags
& BOOTSTRAP_PRIVILEGED_SERVER
;
8488 return BOOTSTRAP_NO_MEMORY
;
8491 bool xpc_req
= (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
);
8493 // 5641783 for the embedded hack
8494 #if !TARGET_OS_EMBEDDED
8495 if (unlikely(pid1_magic
&& j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->euid
!= 0)) {
8496 return VPROC_ERR_TRY_PER_USER
;
8501 /* We don't do sandbox checking for XPC domains because, by definition, all
8502 * the services within your domain should be accessible to you.
8504 if (!xpc_req
&& unlikely(sandbox_check(ldc
->pid
, "mach-lookup", per_pid_lookup
? SANDBOX_FILTER_LOCAL_NAME
: SANDBOX_FILTER_GLOBAL_NAME
, servicename
) > 0)) {
8505 return BOOTSTRAP_NOT_PRIVILEGED
;
8509 if (per_pid_lookup
) {
8510 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, target_pid
);
8513 // Requests from XPC domains stay local.
8514 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, 0);
8516 /* A strict lookup which is privileged won't even bother trying to
8517 * find a service if we're not hosting the root Mach bootstrap.
8519 if (strict_lookup
&& privileged
) {
8520 if (inherited_bootstrap_port
== MACH_PORT_NULL
) {
8521 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
8524 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
8530 ms
= ms
->alias
? ms
->alias
: ms
;
8531 if (unlikely(specific_instance
&& ms
->job
->multiple_instances
)) {
8533 job_t instance
= NULL
;
8534 LIST_FOREACH(ji
, &ms
->job
->subjobs
, subjob_sle
) {
8535 if (uuid_compare(instance_id
, ji
->instance_id
) == 0) {
8541 if (unlikely(instance
== NULL
)) {
8542 job_log(ms
->job
, LOG_DEBUG
, "Creating new instance of job based on lookup of service %s", ms
->name
);
8543 instance
= job_new_subjob(ms
->job
, instance_id
);
8544 if (job_assumes(j
, instance
!= NULL
)) {
8545 /* Disable this support for now. We only support having
8546 * multi-instance jobs within private XPC domains.
8549 /* If the job is multi-instance, in a singleton XPC domain
8550 * and the request is not coming from within that singleton
8551 * domain, we need to alias the new job into the requesting
8554 if (!j
->mgr
->xpc_singleton
&& xpc_req
) {
8555 (void)job_assumes(instance
, job_new_alias(j
->mgr
, instance
));
8558 job_dispatch(instance
, false);
8563 if (job_assumes(j
, instance
!= NULL
)) {
8564 struct machservice
*msi
= NULL
;
8565 SLIST_FOREACH(msi
, &instance
->machservices
, sle
) {
8566 /* sizeof(servicename) will return the size of a pointer,
8567 * even though it's an array type, because when passing
8568 * arrays as parameters in C, they implicitly degrade to
8571 if (strncmp(servicename
, msi
->name
, sizeof(name_t
) - 1) == 0) {
8578 if (machservice_hidden(ms
) && !machservice_active(ms
)) {
8580 } else if (unlikely(ms
->per_user_hack
)) {
8587 (void)job_assumes(j
, machservice_port(ms
) != MACH_PORT_NULL
);
8588 job_log(j
, LOG_DEBUG
, "%sMach service lookup: %s", per_pid_lookup
? "Per PID " : "", servicename
);
8589 *serviceportp
= machservice_port(ms
);
8591 kr
= BOOTSTRAP_SUCCESS
;
8592 } else if (strict_lookup
&& !privileged
) {
8593 /* Hack: We need to simulate XPC's desire not to establish a hierarchy.
8594 * So if XPC is doing the lookup, and it's not a privileged lookup, we
8595 * won't forward. But if it is a privileged lookup, then we must
8598 return BOOTSTRAP_UNKNOWN_SERVICE
;
8599 } else if (inherited_bootstrap_port
!= MACH_PORT_NULL
) {
8600 // Requests from within an XPC domain don't get forwarded.
8601 job_log(j
, LOG_DEBUG
, "Mach service lookup forwarded: %s", servicename
);
8602 /* Clients potentially check the audit token of the reply to verify that
8603 * the returned send right is trustworthy.
8605 (void)job_assumes_zero(j
, vproc_mig_look_up2_forward(inherited_bootstrap_port
, srp
, servicename
, target_pid
, instance_id
, flags
));
8606 return MIG_NO_REPLY
;
8607 } else if (pid1_magic
&& j
->anonymous
&& ldc
->euid
>= 500 && strcasecmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
8608 /* 5240036 Should start background session when a lookup of CCacheServer
8611 * This is a total hack. We sniff out loginwindow session, and attempt
8612 * to guess what it is up to. If we find a EUID that isn't root, we
8613 * force it over to the per-user context.
8615 return VPROC_ERR_TRY_PER_USER
;
8617 job_log(j
, LOG_DEBUG
, "%sMach service lookup failed: %s", per_pid_lookup
? "Per PID " : "", servicename
);
8618 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
8625 job_mig_parent(job_t j
, mach_port_t srp
, mach_port_t
*parentport
)
8628 return BOOTSTRAP_NO_MEMORY
;
8631 job_log(j
, LOG_DEBUG
, "Requested parent bootstrap port");
8632 jobmgr_t jm
= j
->mgr
;
8634 if (jobmgr_parent(jm
)) {
8635 *parentport
= jobmgr_parent(jm
)->jm_port
;
8636 } else if (MACH_PORT_NULL
== inherited_bootstrap_port
) {
8637 *parentport
= jm
->jm_port
;
8639 (void)job_assumes_zero(j
, vproc_mig_parent_forward(inherited_bootstrap_port
, srp
));
8640 // The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
8641 return MIG_NO_REPLY
;
8643 return BOOTSTRAP_SUCCESS
;
8647 job_mig_get_root_bootstrap(job_t j
, mach_port_t
*rootbsp
)
8650 return BOOTSTRAP_NO_MEMORY
;
8653 if (inherited_bootstrap_port
== MACH_PORT_NULL
) {
8654 *rootbsp
= root_jobmgr
->jm_port
;
8655 (void)job_assumes_zero(j
, launchd_mport_make_send(root_jobmgr
->jm_port
));
8657 *rootbsp
= inherited_bootstrap_port
;
8658 (void)job_assumes_zero(j
, launchd_mport_copy_send(inherited_bootstrap_port
));
8661 return BOOTSTRAP_SUCCESS
;
8665 job_mig_info(job_t j
, name_array_t
*servicenamesp
,
8666 unsigned int *servicenames_cnt
, name_array_t
*servicejobsp
,
8667 unsigned int *servicejobs_cnt
, bootstrap_status_array_t
*serviceactivesp
,
8668 unsigned int *serviceactives_cnt
, uint64_t flags
)
8670 name_array_t service_names
= NULL
;
8671 name_array_t service_jobs
= NULL
;
8672 bootstrap_status_array_t service_actives
= NULL
;
8673 unsigned int cnt
= 0, cnt2
= 0;
8677 return BOOTSTRAP_NO_MEMORY
;
8680 if (launchd_flat_mach_namespace
) {
8681 if ((j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) || (flags
& BOOTSTRAP_FORCE_LOCAL
)) {
8691 struct machservice
*msi
= NULL
;
8692 for (i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++) {
8693 LIST_FOREACH(msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
8694 cnt
+= !msi
->per_pid
? 1 : 0;
8702 mig_allocate((vm_address_t
*)&service_names
, cnt
* sizeof(service_names
[0]));
8703 if (!job_assumes(j
, service_names
!= NULL
)) {
8707 mig_allocate((vm_address_t
*)&service_jobs
, cnt
* sizeof(service_jobs
[0]));
8708 if (!job_assumes(j
, service_jobs
!= NULL
)) {
8712 mig_allocate((vm_address_t
*)&service_actives
, cnt
* sizeof(service_actives
[0]));
8713 if (!job_assumes(j
, service_actives
!= NULL
)) {
8717 for (i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++) {
8718 LIST_FOREACH(msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
8719 if (!msi
->per_pid
) {
8720 strlcpy(service_names
[cnt2
], machservice_name(msi
), sizeof(service_names
[0]));
8721 msi
= msi
->alias
? msi
->alias
: msi
;
8722 if (msi
->job
->mgr
->shortdesc
) {
8723 strlcpy(service_jobs
[cnt2
], msi
->job
->mgr
->shortdesc
, sizeof(service_jobs
[0]));
8725 strlcpy(service_jobs
[cnt2
], msi
->job
->label
, sizeof(service_jobs
[0]));
8727 service_actives
[cnt2
] = machservice_status(msi
);
8733 (void)job_assumes(j
, cnt
== cnt2
);
8736 *servicenamesp
= service_names
;
8737 *servicejobsp
= service_jobs
;
8738 *serviceactivesp
= service_actives
;
8739 *servicenames_cnt
= *servicejobs_cnt
= *serviceactives_cnt
= cnt
;
8741 return BOOTSTRAP_SUCCESS
;
8744 if (service_names
) {
8745 mig_deallocate((vm_address_t
)service_names
, cnt
* sizeof(service_names
[0]));
8748 mig_deallocate((vm_address_t
)service_jobs
, cnt
* sizeof(service_jobs
[0]));
8750 if (service_actives
) {
8751 mig_deallocate((vm_address_t
)service_actives
, cnt
* sizeof(service_actives
[0]));
8754 return BOOTSTRAP_NO_MEMORY
;
8758 job_mig_lookup_children(job_t j
, mach_port_array_t
*child_ports
,
8759 mach_msg_type_number_t
*child_ports_cnt
, name_array_t
*child_names
,
8760 mach_msg_type_number_t
*child_names_cnt
,
8761 bootstrap_property_array_t
*child_properties
,
8762 mach_msg_type_number_t
*child_properties_cnt
)
8764 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
8766 return BOOTSTRAP_NO_MEMORY
;
8769 struct ldcred
*ldc
= runtime_get_caller_creds();
8771 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8772 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8773 * in a non-flat namespace.
8775 if (ldc
->euid
!= 0) {
8776 job_log(j
, LOG_WARNING
, "Attempt to look up children of bootstrap by unprivileged job.");
8777 return BOOTSTRAP_NOT_PRIVILEGED
;
8780 unsigned int cnt
= 0;
8782 jobmgr_t jmr
= j
->mgr
;
8783 jobmgr_t jmi
= NULL
;
8784 SLIST_FOREACH(jmi
, &jmr
->submgrs
, sle
) {
8788 // Find our per-user launchds if we're PID 1.
8791 LIST_FOREACH(ji
, &jmr
->jobs
, sle
) {
8792 cnt
+= ji
->per_user
? 1 : 0;
8797 return BOOTSTRAP_NO_CHILDREN
;
8800 mach_port_array_t _child_ports
= NULL
;
8801 mig_allocate((vm_address_t
*)&_child_ports
, cnt
* sizeof(_child_ports
[0]));
8802 if (!job_assumes(j
, _child_ports
!= NULL
)) {
8803 kr
= BOOTSTRAP_NO_MEMORY
;
8807 name_array_t _child_names
= NULL
;
8808 mig_allocate((vm_address_t
*)&_child_names
, cnt
* sizeof(_child_names
[0]));
8809 if (!job_assumes(j
, _child_names
!= NULL
)) {
8810 kr
= BOOTSTRAP_NO_MEMORY
;
8814 bootstrap_property_array_t _child_properties
= NULL
;
8815 mig_allocate((vm_address_t
*)&_child_properties
, cnt
* sizeof(_child_properties
[0]));
8816 if (!job_assumes(j
, _child_properties
!= NULL
)) {
8817 kr
= BOOTSTRAP_NO_MEMORY
;
8821 unsigned int cnt2
= 0;
8822 SLIST_FOREACH(jmi
, &jmr
->submgrs
, sle
) {
8823 if (jobmgr_assumes_zero(jmi
, launchd_mport_make_send(jmi
->jm_port
)) == KERN_SUCCESS
) {
8824 _child_ports
[cnt2
] = jmi
->jm_port
;
8826 _child_ports
[cnt2
] = MACH_PORT_NULL
;
8829 strlcpy(_child_names
[cnt2
], jmi
->name
, sizeof(_child_names
[0]));
8830 _child_properties
[cnt2
] = jmi
->properties
;
8835 if (pid1_magic
) LIST_FOREACH(ji
, &jmr
->jobs
, sle
) {
8837 if (job_assumes(ji
, SLIST_FIRST(&ji
->machservices
)->per_user_hack
== true)) {
8838 mach_port_t port
= machservice_port(SLIST_FIRST(&ji
->machservices
));
8840 if (job_assumes_zero(ji
, launchd_mport_copy_send(port
)) == KERN_SUCCESS
) {
8841 _child_ports
[cnt2
] = port
;
8843 _child_ports
[cnt2
] = MACH_PORT_NULL
;
8846 _child_ports
[cnt2
] = MACH_PORT_NULL
;
8849 strlcpy(_child_names
[cnt2
], ji
->label
, sizeof(_child_names
[0]));
8850 _child_properties
[cnt2
] |= BOOTSTRAP_PROPERTY_PERUSER
;
8856 *child_names_cnt
= cnt
;
8857 *child_ports_cnt
= cnt
;
8858 *child_properties_cnt
= cnt
;
8860 *child_names
= _child_names
;
8861 *child_ports
= _child_ports
;
8862 *child_properties
= _child_properties
;
8865 for (i
= 0; i
< cnt
; i
++) {
8866 job_log(j
, LOG_DEBUG
, "child_names[%u] = %s", i
, (char *)_child_names
[i
]);
8869 return BOOTSTRAP_SUCCESS
;
8872 mig_deallocate((vm_address_t
)_child_ports
, cnt
* sizeof(_child_ports
[0]));
8876 mig_deallocate((vm_address_t
)_child_names
, cnt
* sizeof(_child_ports
[0]));
8879 if (_child_properties
) {
8880 mig_deallocate((vm_address_t
)_child_properties
, cnt
* sizeof(_child_properties
[0]));
8887 job_mig_pid_is_managed(job_t j
__attribute__((unused
)), pid_t p
, boolean_t
*managed
)
8889 struct ldcred
*ldc
= runtime_get_caller_creds();
8890 if ((ldc
->euid
!= geteuid()) && (ldc
->euid
!= 0)) {
8891 return BOOTSTRAP_NOT_PRIVILEGED
;
8894 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
8895 * directly by launchd as agents.
8897 job_t j_for_pid
= jobmgr_find_by_pid_deep(root_jobmgr
, p
, false);
8898 if (j_for_pid
&& !j_for_pid
->anonymous
&& !j_for_pid
->legacy_LS_job
) {
8902 return BOOTSTRAP_SUCCESS
;
8906 job_mig_port_for_label(job_t j
__attribute__((unused
)), name_t label
, mach_port_t
*mp
)
8909 return BOOTSTRAP_NO_MEMORY
;
8912 struct ldcred
*ldc
= runtime_get_caller_creds();
8913 kern_return_t kr
= BOOTSTRAP_NOT_PRIVILEGED
;
8916 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8917 return BOOTSTRAP_NOT_PRIVILEGED
;
8921 mach_port_t _mp
= MACH_PORT_NULL
;
8922 if (!j
->deny_job_creation
&& (ldc
->euid
== 0 || ldc
->euid
== geteuid())) {
8923 job_t target_j
= job_find(NULL
, label
);
8924 if (jobmgr_assumes(root_jobmgr
, target_j
!= NULL
)) {
8925 if (target_j
->j_port
== MACH_PORT_NULL
) {
8926 (void)job_assumes(target_j
, job_setup_machport(target_j
) == true);
8929 _mp
= target_j
->j_port
;
8930 kr
= _mp
!= MACH_PORT_NULL
? BOOTSTRAP_SUCCESS
: BOOTSTRAP_NO_MEMORY
;
8932 kr
= BOOTSTRAP_NO_MEMORY
;
8941 job_mig_set_security_session(job_t j
, uuid_t uuid
, mach_port_t asport
)
8943 #if TARGET_OS_EMBEDDED
8944 return KERN_SUCCESS
;
8948 return BOOTSTRAP_NO_MEMORY
;
8951 uuid_string_t uuid_str
;
8952 uuid_unparse(uuid
, uuid_str
);
8953 job_log(j
, LOG_DEBUG
, "Setting session %u for UUID %s...", asport
, uuid_str
);
8955 job_t ji
= NULL
, jt
= NULL
;
8956 LIST_FOREACH_SAFE(ji
, &s_needing_sessions
, sle
, jt
) {
8957 uuid_string_t uuid_str2
;
8958 uuid_unparse(ji
->expected_audit_uuid
, uuid_str2
);
8960 if (uuid_compare(uuid
, ji
->expected_audit_uuid
) == 0) {
8961 uuid_clear(ji
->expected_audit_uuid
);
8962 if (asport
!= MACH_PORT_NULL
) {
8963 job_log(ji
, LOG_DEBUG
, "Job should join session with port 0x%x", asport
);
8964 (void)job_assumes_zero(j
, launchd_mport_copy_send(asport
));
8966 job_log(ji
, LOG_DEBUG
, "No session to set for job. Using our session.");
8969 ji
->asport
= asport
;
8970 LIST_REMOVE(ji
, needing_session_sle
);
8971 job_dispatch(ji
, false);
8975 /* Each job that the session port was set for holds a reference. At the end of
8976 * the loop, there will be one extra reference belonging to this MiG protocol.
8977 * We need to release it so that the session goes away when all the jobs
8978 * referencing it are unloaded.
8980 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
8982 return KERN_SUCCESS
;
8986 jobmgr_find_by_name(jobmgr_t jm
, const char *where
)
8990 // NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
8991 if (where
== NULL
) {
8992 if (strcasecmp(jm
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
8993 where
= VPROCMGR_SESSION_LOGINWINDOW
;
8995 where
= VPROCMGR_SESSION_AQUA
;
8999 if (strcasecmp(jm
->name
, where
) == 0) {
9003 if (strcasecmp(where
, VPROCMGR_SESSION_BACKGROUND
) == 0 && !pid1_magic
) {
9008 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
9009 if (unlikely(jmi
->shutting_down
)) {
9011 } else if (jmi
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
) {
9013 } else if (strcasecmp(jmi
->name
, where
) == 0) {
9015 } else if (strcasecmp(jmi
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0 && pid1_magic
) {
9016 SLIST_FOREACH(jmi2
, &jmi
->submgrs
, sle
) {
9017 if (strcasecmp(jmi2
->name
, where
) == 0) {
9030 job_mig_move_subset(job_t j
, mach_port_t target_subset
, name_t session_type
, mach_port_t asport
, uint64_t flags
)
9032 mach_msg_type_number_t l2l_i
, l2l_port_cnt
= 0;
9033 mach_port_array_t l2l_ports
= NULL
;
9034 mach_port_t reqport
, rcvright
;
9035 kern_return_t kr
= 1;
9036 launch_data_t out_obj_array
= NULL
;
9037 struct ldcred
*ldc
= runtime_get_caller_creds();
9038 jobmgr_t jmr
= NULL
;
9041 return BOOTSTRAP_NO_MEMORY
;
9044 if (job_mig_intran2(root_jobmgr
, target_subset
, ldc
->pid
)) {
9045 job_log(j
, LOG_ERR
, "Moving a session to ourself is bogus.");
9047 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9051 job_log(j
, LOG_DEBUG
, "Move subset attempt: 0x%x", target_subset
);
9053 kr
= _vproc_grab_subset(target_subset
, &reqport
, &rcvright
, &out_obj_array
, &l2l_ports
, &l2l_port_cnt
);
9054 if (job_assumes_zero(j
, kr
) != 0) {
9058 if (launch_data_array_get_count(out_obj_array
) != l2l_port_cnt
) {
9059 osx_assert_zero(l2l_port_cnt
);
9062 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, reqport
, rcvright
, false, session_type
, false, asport
)) != NULL
)) {
9063 kr
= BOOTSTRAP_NO_MEMORY
;
9067 jmr
->properties
|= BOOTSTRAP_PROPERTY_MOVEDSUBSET
;
9069 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9070 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9071 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9073 if (flags
& LAUNCH_GLOBAL_ON_DEMAND
) {
9074 // This is so awful.
9075 // Remove the job from its current job manager.
9076 LIST_REMOVE(j
, sle
);
9077 LIST_REMOVE(j
, pid_hash_sle
);
9079 // Put the job into the target job manager.
9080 LIST_INSERT_HEAD(&jmr
->jobs
, j
, sle
);
9081 LIST_INSERT_HEAD(&jmr
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
9084 job_set_global_on_demand(j
, true);
9086 if (!j
->holds_ref
) {
9087 job_log(j
, LOG_PERF
, "Job moved subset into: %s", j
->mgr
->name
);
9088 j
->holds_ref
= true;
9093 for (l2l_i
= 0; l2l_i
< l2l_port_cnt
; l2l_i
++) {
9094 launch_data_t tmp
, obj_at_idx
;
9095 struct machservice
*ms
;
9096 job_t j_for_service
;
9097 const char *serv_name
;
9101 (void)job_assumes(j
, obj_at_idx
= launch_data_array_get_index(out_obj_array
, l2l_i
));
9102 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PID
));
9103 target_pid
= (pid_t
)launch_data_get_integer(tmp
);
9104 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PERPID
));
9105 serv_perpid
= launch_data_get_bool(tmp
);
9106 (void)job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_NAME
));
9107 serv_name
= launch_data_get_string(tmp
);
9109 j_for_service
= jobmgr_find_by_pid(jmr
, target_pid
, true);
9111 if (unlikely(!j_for_service
)) {
9112 // The PID probably exited
9113 (void)job_assumes_zero(j
, launchd_mport_deallocate(l2l_ports
[l2l_i
]));
9117 if (likely(ms
= machservice_new(j_for_service
, serv_name
, &l2l_ports
[l2l_i
], serv_perpid
))) {
9118 job_log(j
, LOG_DEBUG
, "Importing %s into new bootstrap.", serv_name
);
9119 machservice_request_notifications(ms
);
9126 if (out_obj_array
) {
9127 launch_data_free(out_obj_array
);
9131 mig_deallocate((vm_address_t
)l2l_ports
, l2l_port_cnt
* sizeof(l2l_ports
[0]));
9135 if (target_subset
) {
9136 (void)job_assumes_zero(j
, launchd_mport_deallocate(target_subset
));
9139 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
9142 jobmgr_shutdown(jmr
);
9149 job_mig_init_session(job_t j
, name_t session_type
, mach_port_t asport
)
9152 return BOOTSTRAP_NO_MEMORY
;
9157 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
9158 if (j
->mgr
->session_initialized
) {
9159 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
9160 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
9161 } else if (strcmp(session_type
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
9167 * We're working around LoginWindow and the WindowServer.
9169 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9170 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9171 * spawns a replacement loginwindow session before cleaning up the previous one.
9173 * We're going to use the creation of a new LoginWindow context as a clue that the
9174 * previous LoginWindow context is on the way out and therefore we should just
9175 * kick-start the shutdown of it.
9178 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
9179 if (unlikely(jmi
->shutting_down
)) {
9181 } else if (strcasecmp(jmi
->name
, session_type
) == 0) {
9182 jobmgr_shutdown(jmi
);
9188 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Initializing as %s", session_type
);
9189 strcpy(j
->mgr
->name_init
, session_type
);
9191 if (job_assumes(j
, (j2
= jobmgr_init_session(j
->mgr
, session_type
, false)))) {
9192 j2
->asport
= asport
;
9193 (void)job_assumes(j
, job_dispatch(j2
, true));
9194 kr
= BOOTSTRAP_SUCCESS
;
9201 job_mig_switch_to_session(job_t j
, mach_port_t requestor_port
, name_t session_name
, mach_port_t asport
, mach_port_t
*new_bsport
)
9203 struct ldcred
*ldc
= runtime_get_caller_creds();
9204 if (!jobmgr_assumes(root_jobmgr
, j
!= NULL
)) {
9205 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s() called with NULL job: PID %d", __func__
, ldc
->pid
);
9206 return BOOTSTRAP_NO_MEMORY
;
9209 if (j
->mgr
->shutting_down
) {
9210 return BOOTSTRAP_UNKNOWN_SERVICE
;
9213 job_log(j
, LOG_DEBUG
, "Job wants to move to %s session.", session_name
);
9215 if (!job_assumes(j
, pid1_magic
== false)) {
9216 job_log(j
, LOG_WARNING
, "Switching sessions is not allowed in the system Mach bootstrap.");
9217 return BOOTSTRAP_NOT_PRIVILEGED
;
9220 if (!j
->anonymous
) {
9221 job_log(j
, LOG_NOTICE
, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9222 return BOOTSTRAP_NOT_PRIVILEGED
;
9225 jobmgr_t target_jm
= jobmgr_find_by_name(root_jobmgr
, session_name
);
9226 if (target_jm
== j
->mgr
) {
9227 job_log(j
, LOG_DEBUG
, "Job is already in its desired session (%s).", session_name
);
9228 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
9229 (void)job_assumes_zero(j
, launchd_mport_deallocate(requestor_port
));
9230 *new_bsport
= target_jm
->jm_port
;
9231 return BOOTSTRAP_SUCCESS
;
9235 target_jm
= jobmgr_new(j
->mgr
, requestor_port
, MACH_PORT_NULL
, false, session_name
, false, asport
);
9237 target_jm
->properties
|= BOOTSTRAP_PROPERTY_IMPLICITSUBSET
;
9238 (void)job_assumes_zero(j
, launchd_mport_deallocate(asport
));
9242 if (!job_assumes(j
, target_jm
!= NULL
)) {
9243 job_log(j
, LOG_WARNING
, "Could not find %s session!", session_name
);
9244 return BOOTSTRAP_NO_MEMORY
;
9247 // Remove the job from it's current job manager.
9248 LIST_REMOVE(j
, sle
);
9249 LIST_REMOVE(j
, pid_hash_sle
);
9251 job_t ji
= NULL
, jit
= NULL
;
9252 LIST_FOREACH_SAFE(ji
, &j
->mgr
->global_env_jobs
, global_env_sle
, jit
) {
9254 LIST_REMOVE(ji
, global_env_sle
);
9259 // Put the job into the target job manager.
9260 LIST_INSERT_HEAD(&target_jm
->jobs
, j
, sle
);
9261 LIST_INSERT_HEAD(&target_jm
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
9264 LIST_INSERT_HEAD(&target_jm
->global_env_jobs
, j
, global_env_sle
);
9267 // Move our Mach services over if we're not in a flat namespace.
9268 if (!launchd_flat_mach_namespace
&& !SLIST_EMPTY(&j
->machservices
)) {
9269 struct machservice
*msi
= NULL
, *msit
= NULL
;
9270 SLIST_FOREACH_SAFE(msi
, &j
->machservices
, sle
, msit
) {
9271 LIST_REMOVE(msi
, name_hash_sle
);
9272 LIST_INSERT_HEAD(&target_jm
->ms_hash
[hash_ms(msi
->name
)], msi
, name_hash_sle
);
9278 if (!j
->holds_ref
) {
9279 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9280 * stick around while they're still around.
9281 * For example, login calls into the PAM launchd module, which moves the process into
9282 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9283 * ourselves from going away.
9285 j
->holds_ref
= true;
9286 job_log(j
, LOG_PERF
, "Job switched into manager: %s", j
->mgr
->name
);
9290 *new_bsport
= target_jm
->jm_port
;
9292 return KERN_SUCCESS
;
9296 job_mig_take_subset(job_t j
, mach_port_t
*reqport
, mach_port_t
*rcvright
,
9297 vm_offset_t
*outdata
, mach_msg_type_number_t
*outdataCnt
,
9298 mach_port_array_t
*portsp
, unsigned int *ports_cnt
)
9300 launch_data_t tmp_obj
, tmp_dict
, outdata_obj_array
= NULL
;
9301 mach_port_array_t ports
= NULL
;
9302 unsigned int cnt
= 0, cnt2
= 0;
9304 struct machservice
*ms
;
9309 return BOOTSTRAP_NO_MEMORY
;
9314 if (unlikely(!pid1_magic
)) {
9315 job_log(j
, LOG_ERR
, "Only the system launchd will transfer Mach sub-bootstraps.");
9316 return BOOTSTRAP_NOT_PRIVILEGED
;
9318 if (unlikely(jobmgr_parent(jm
) == NULL
)) {
9319 job_log(j
, LOG_ERR
, "Root Mach bootstrap cannot be transferred.");
9320 return BOOTSTRAP_NOT_PRIVILEGED
;
9322 if (unlikely(strcasecmp(jm
->name
, VPROCMGR_SESSION_AQUA
) == 0)) {
9323 job_log(j
, LOG_ERR
, "Cannot transfer a setup GUI session.");
9324 return BOOTSTRAP_NOT_PRIVILEGED
;
9326 if (unlikely(!j
->anonymous
)) {
9327 job_log(j
, LOG_ERR
, "Only the anonymous job can transfer Mach sub-bootstraps.");
9328 return BOOTSTRAP_NOT_PRIVILEGED
;
9331 job_log(j
, LOG_DEBUG
, "Transferring sub-bootstrap to the per session launchd.");
9333 outdata_obj_array
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
9334 if (!job_assumes(j
, outdata_obj_array
)) {
9338 *outdataCnt
= 20 * 1024 * 1024;
9339 mig_allocate(outdata
, *outdataCnt
);
9340 if (!job_assumes(j
, *outdata
!= 0)) {
9344 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
9345 if (!ji
->anonymous
) {
9348 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
9353 mig_allocate((vm_address_t
*)&ports
, cnt
* sizeof(ports
[0]));
9354 if (!job_assumes(j
, ports
!= NULL
)) {
9358 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
9359 if (!ji
->anonymous
) {
9363 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
9364 if (job_assumes(j
, (tmp_dict
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
9365 (void)job_assumes(j
, launch_data_array_set_index(outdata_obj_array
, tmp_dict
, cnt2
));
9370 if (job_assumes(j
, (tmp_obj
= launch_data_new_string(machservice_name(ms
))))) {
9371 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_NAME
));
9376 if (job_assumes(j
, (tmp_obj
= launch_data_new_integer((ms
->job
->p
))))) {
9377 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PID
));
9382 if (job_assumes(j
, (tmp_obj
= launch_data_new_bool((ms
->per_pid
))))) {
9383 (void)job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PERPID
));
9388 ports
[cnt2
] = machservice_port(ms
);
9390 // Increment the send right by one so we can shutdown the jobmgr cleanly
9391 (void)jobmgr_assumes_zero(jm
, launchd_mport_copy_send(ports
[cnt2
]));
9396 (void)job_assumes(j
, cnt
== cnt2
);
9398 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
9399 packed_size
= launch_data_pack(outdata_obj_array
, (void *)*outdata
, *outdataCnt
, NULL
, NULL
);
9400 if (!job_assumes(j
, packed_size
!= 0)) {
9404 launch_data_free(outdata_obj_array
);
9409 *reqport
= jm
->req_port
;
9410 *rcvright
= jm
->jm_port
;
9415 workaround_5477111
= j
;
9417 jobmgr_shutdown(jm
);
9419 return BOOTSTRAP_SUCCESS
;
9422 if (outdata_obj_array
) {
9423 launch_data_free(outdata_obj_array
);
9426 mig_deallocate(*outdata
, *outdataCnt
);
9429 mig_deallocate((vm_address_t
)ports
, cnt
* sizeof(ports
[0]));
9432 return BOOTSTRAP_NO_MEMORY
;
9436 job_mig_subset(job_t j
, mach_port_t requestorport
, mach_port_t
*subsetportp
)
9442 return BOOTSTRAP_NO_MEMORY
;
9444 if (j
->mgr
->shutting_down
) {
9445 return BOOTSTRAP_UNKNOWN_SERVICE
;
9450 while ((jmr
= jobmgr_parent(jmr
)) != NULL
) {
9454 // Since we use recursion, we need an artificial depth for subsets
9455 if (unlikely(bsdepth
> 100)) {
9456 job_log(j
, LOG_ERR
, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth
);
9457 return BOOTSTRAP_NO_MEMORY
;
9460 char name
[NAME_MAX
];
9461 snprintf(name
, sizeof(name
), "%s[%i].subset.%i", j
->anonymous
? j
->prog
: j
->label
, j
->p
, MACH_PORT_INDEX(requestorport
));
9463 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, requestorport
, MACH_PORT_NULL
, false, name
, true, j
->asport
)) != NULL
)) {
9464 if (unlikely(requestorport
== MACH_PORT_NULL
)) {
9465 return BOOTSTRAP_NOT_PRIVILEGED
;
9467 return BOOTSTRAP_NO_MEMORY
;
9470 *subsetportp
= jmr
->jm_port
;
9471 jmr
->properties
|= BOOTSTRAP_PROPERTY_EXPLICITSUBSET
;
9473 /* A job could create multiple subsets, so only add a reference the first time
9474 * it does so we don't have to keep a count.
9476 if (j
->anonymous
&& !j
->holds_ref
) {
9477 job_log(j
, LOG_PERF
, "Job created subset: %s", jmr
->name
);
9478 j
->holds_ref
= true;
9482 job_log(j
, LOG_DEBUG
, "Job created a subset named \"%s\"", jmr
->name
);
9483 return BOOTSTRAP_SUCCESS
;
9487 _xpc_domain_import_service(jobmgr_t jm
, launch_data_t pload
)
9489 jobmgr_t where2put
= NULL
;
9491 if (launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
) {
9496 launch_data_t ldlabel
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
);
9497 if (!ldlabel
|| launch_data_get_type(ldlabel
) != LAUNCH_DATA_STRING
) {
9502 const char *label
= launch_data_get_string(ldlabel
);
9503 jobmgr_log(jm
, LOG_DEBUG
, "Importing service: %s", label
);
9505 launch_data_t destname
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_XPCDOMAIN
);
9507 bool supported_domain
= false;
9509 if (launch_data_get_type(destname
) == LAUNCH_DATA_STRING
) {
9510 const char *str
= launch_data_get_string(destname
);
9511 if (strcmp(str
, XPC_DOMAIN_TYPE_SYSTEM
) == 0) {
9512 where2put
= _s_xpc_system_domain
;
9513 } else if (strcmp(str
, XPC_DOMAIN_TYPE_PERUSER
) == 0) {
9514 where2put
= jobmgr_find_xpc_per_user_domain(jm
, jm
->req_euid
);
9515 supported_domain
= true;
9516 } else if (strcmp(str
, XPC_DOMAIN_TYPE_PERSESSION
) == 0) {
9517 where2put
= jobmgr_find_xpc_per_session_domain(jm
, jm
->req_asid
);
9519 jobmgr_log(jm
, LOG_ERR
, "Invalid XPC domain type: %s", str
);
9523 jobmgr_log(jm
, LOG_ERR
, "XPC domain type is not a string.");
9527 if (where2put
&& !supported_domain
) {
9528 launch_data_t mi
= NULL
;
9529 if ((mi
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_MULTIPLEINSTANCES
))) {
9530 if (launch_data_get_type(mi
) == LAUNCH_DATA_BOOL
&& launch_data_get_bool(mi
)) {
9531 jobmgr_log(where2put
, LOG_ERR
, "Multiple-instance services are not supported in this domain.");
9543 /* Gross. If the service already exists in a singleton domain, then
9544 * jobmgr_import2() will return the existing job. But if we fail to alias
9545 * this job, we will normally want to remove it. But if we did not create
9546 * it in the first place, then we need to avoid removing it. So check
9547 * errno against EEXIST in the success case and if it's EEXIST, then do
9548 * not remove the original job in the event of a failed alias.
9550 * This really needs to be re-thought, but I think it'll require a larger
9551 * evaluation of launchd's data structures. Right now, once a job is
9552 * imported into a singleton domain, it won't be removed until the system
9553 * shuts down, but that may not always be true. If it ever changes, we'll
9554 * have a problem because we'll have to account for all existing aliases
9555 * and clean them up somehow. Or just start ref-counting. I knew this
9556 * aliasing stuff would be trouble...
9558 * <rdar://problem/10646503>
9560 jobmgr_log(where2put
, LOG_DEBUG
, "Importing service...");
9563 if ((j
= jobmgr_import2(where2put
, pload
))) {
9564 bool created
= (errno
!= EEXIST
);
9565 j
->xpc_service
= true;
9567 if (where2put
->xpc_singleton
) {
9568 /* If the service was destined for one of the global domains,
9569 * then we have to alias it into our local domain to reserve the
9573 if (!(ja
= job_new_alias(jm
, j
))) {
9574 /* If we failed to alias the job because of a conflict over
9575 * the label, then we remove it from the global domain. We
9576 * don't want to risk having imported a malicious job into
9577 * one of the global domains.
9579 if (errno
!= EEXIST
) {
9580 job_log(j
, LOG_ERR
, "Failed to alias job into: %s: %d: %s", where2put
->name
, errno
, strerror(errno
));
9586 jobmgr_log(jm
, LOG_WARNING
, "Singleton service already existed in job-local namespace. Removing: %s", j
->label
);
9592 jobmgr_log(jm
, LOG_DEBUG
, "Aliased service into local domain: %s", j
->label
);
9593 (void)job_dispatch(j
, false);
9594 ja
->xpc_service
= true;
9598 (void)job_dispatch(j
, false);
9602 jobmgr_log(jm
, LOG_DEBUG
, "Could not find destination for service: %s", label
);
9609 _xpc_domain_import_services(job_t j
, launch_data_t services
)
9612 if (launch_data_get_type(services
) != LAUNCH_DATA_ARRAY
) {
9617 size_t c
= launch_data_array_get_count(services
);
9618 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Importing new services: %lu", c
);
9620 for (i
= 0; i
< c
; i
++) {
9621 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Importing service at index: %lu", i
);
9624 launch_data_t ploadi
= launch_data_array_get_index(services
, i
);
9625 if (!(nj
= _xpc_domain_import_service(j
->mgr
, ploadi
))) {
9626 if (!j
->mgr
->session_initialized
&& errno
) {
9627 /* Service import failures are only fatal if the domain is being
9628 * initialized. If we're extending the domain, we can run into
9629 * errors with services already existing, so we just ignore them.
9630 * In the case of a domain extension, we don't want to halt the
9631 * operation if we run into an error with one service.
9633 * <rdar://problem/10842779>
9635 jobmgr_log(j
->mgr
, LOG_ERR
, "Failed to import service at index: %lu: %d: %s", i
, errno
, strerror(errno
));
9640 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Imported service: %s", nj
->label
);
9652 xpc_domain_import2(job_t j
, mach_port_t reqport
, mach_port_t dport
)
9654 if (unlikely(!pid1_magic
)) {
9655 job_log(j
, LOG_ERR
, "XPC domains may only reside in PID 1.");
9656 return BOOTSTRAP_NOT_PRIVILEGED
;
9658 if (!j
|| !MACH_PORT_VALID(reqport
)) {
9659 return BOOTSTRAP_UNKNOWN_SERVICE
;
9661 if (root_jobmgr
->shutting_down
) {
9662 jobmgr_log(root_jobmgr
, LOG_ERR
, "Attempt to create new domain while shutting down.");
9663 return BOOTSTRAP_NOT_PRIVILEGED
;
9665 if (!j
->xpc_bootstrapper
) {
9666 job_log(j
, LOG_ERR
, "Attempt to create new XPC domain by unprivileged job.");
9667 return BOOTSTRAP_NOT_PRIVILEGED
;
9670 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
9671 /* All XPC domains are children of the root job manager. What we're creating
9672 * here is really just a skeleton. By creating it, we're adding reqp to our
9673 * port set. It will have two messages on it. The first specifies the
9674 * environment of the originator. This is so we can cache it and hand it to
9675 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9676 * to be bootstrapped in.
9678 jobmgr_t jm
= jobmgr_new(root_jobmgr
, reqport
, dport
, false, NULL
, true, MACH_PORT_NULL
);
9679 if (job_assumes(j
, jm
!= NULL
)) {
9680 jm
->properties
|= BOOTSTRAP_PROPERTY_XPC_DOMAIN
;
9681 jm
->shortdesc
= "private";
9682 kr
= BOOTSTRAP_SUCCESS
;
9689 xpc_domain_set_environment(job_t j
, mach_port_t rp
, mach_port_t bsport
, mach_port_t excport
, vm_offset_t ctx
, mach_msg_type_number_t ctx_sz
)
9692 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9693 * getting this message long after the requesting process has gone away.
9694 * See <rdar://problem/8593143>.
9696 return BOOTSTRAP_UNKNOWN_SERVICE
;
9699 jobmgr_t jm
= j
->mgr
;
9700 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
9701 return BOOTSTRAP_NOT_PRIVILEGED
;
9704 if (jm
->req_asport
!= MACH_PORT_NULL
) {
9705 return BOOTSTRAP_NOT_PRIVILEGED
;
9708 struct ldcred
*ldc
= runtime_get_caller_creds();
9709 struct proc_bsdshortinfo proc
;
9710 if (proc_pidinfo(ldc
->pid
, PROC_PIDT_SHORTBSDINFO
, 1, &proc
, PROC_PIDT_SHORTBSDINFO_SIZE
) == 0) {
9711 if (errno
!= ESRCH
) {
9712 (void)jobmgr_assumes_zero(jm
, errno
);
9717 return BOOTSTRAP_NO_MEMORY
;
9720 #if !TARGET_OS_EMBEDDED
9721 if (jobmgr_assumes_zero(jm
, audit_session_port(ldc
->asid
, &jm
->req_asport
)) != 0) {
9724 job_log(j
, LOG_ERR
, "Failed to get port for ASID: %u", ldc
->asid
);
9725 return BOOTSTRAP_NOT_PRIVILEGED
;
9728 jm
->req_asport
= MACH_PORT_DEAD
;
9731 (void)snprintf(jm
->name_init
, NAME_MAX
, "com.apple.xpc.domain.%s[%i]", proc
.pbsi_comm
, ldc
->pid
);
9732 strlcpy(jm
->owner
, proc
.pbsi_comm
, sizeof(jm
->owner
));
9733 jm
->req_bsport
= bsport
;
9734 jm
->req_excport
= excport
;
9737 jm
->req_ctx_sz
= ctx_sz
;
9738 jm
->req_pid
= ldc
->pid
;
9739 jm
->req_euid
= ldc
->euid
;
9740 jm
->req_egid
= ldc
->egid
;
9741 jm
->req_asid
= ldc
->asid
;
9743 return KERN_SUCCESS
;
9747 xpc_domain_load_services(job_t j
, vm_offset_t services_buff
, mach_msg_type_number_t services_sz
)
9750 return BOOTSTRAP_UNKNOWN_SERVICE
;
9753 job_t rootj
= jobmgr_find_by_pid(root_jobmgr
, j
->p
, false);
9754 if (!(rootj
&& rootj
->xpc_bootstrapper
)) {
9755 job_log(j
, LOG_ERR
, "Attempt to load services into XPC domain by unprivileged job.");
9756 return BOOTSTRAP_NOT_PRIVILEGED
;
9759 // This is just for XPC domains (for now).
9760 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
9761 return BOOTSTRAP_NOT_PRIVILEGED
;
9763 if (j
->mgr
->session_initialized
) {
9764 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to initialize an already-initialized XPC domain.");
9765 return BOOTSTRAP_NOT_PRIVILEGED
;
9769 launch_data_t services
= launch_data_unpack((void *)services_buff
, services_sz
, NULL
, 0, &offset
, NULL
);
9771 return BOOTSTRAP_NO_MEMORY
;
9774 int error
= _xpc_domain_import_services(j
, services
);
9776 j
->mgr
->error
= error
;
9777 jobmgr_log(j
->mgr
, LOG_ERR
, "Obliterating domain.");
9778 jobmgr_remove(j
->mgr
);
9780 j
->mgr
->session_initialized
= true;
9781 (void)jobmgr_assumes_zero(j
->mgr
, xpc_call_wakeup(j
->mgr
->req_rport
, BOOTSTRAP_SUCCESS
));
9782 j
->mgr
->req_rport
= MACH_PORT_NULL
;
9784 /* Returning a failure code will destroy the message, whereas returning
9785 * success will not, so we need to clean up here.
9787 mig_deallocate(services_buff
, services_sz
);
9788 error
= BOOTSTRAP_SUCCESS
;
9795 xpc_domain_check_in(job_t j
, mach_port_t
*bsport
, mach_port_t
*sbsport
,
9796 mach_port_t
*excport
, mach_port_t
*asport
, uint32_t *uid
, uint32_t *gid
,
9797 int32_t *asid
, vm_offset_t
*ctx
, mach_msg_type_number_t
*ctx_sz
)
9799 if (!jobmgr_assumes(root_jobmgr
, j
!= NULL
)) {
9800 return BOOTSTRAP_UNKNOWN_SERVICE
;
9802 jobmgr_t jm
= j
->mgr
;
9803 if (!(jm
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
9804 return BOOTSTRAP_NOT_PRIVILEGED
;
9807 if (jm
->req_asport
== MACH_PORT_NULL
) {
9808 return BOOTSTRAP_NOT_PRIVILEGED
;
9811 *bsport
= jm
->req_bsport
;
9812 *sbsport
= root_jobmgr
->jm_port
;
9813 *excport
= jm
->req_excport
;
9814 *asport
= jm
->req_asport
;
9815 *uid
= jm
->req_euid
;
9816 *gid
= jm
->req_egid
;
9817 *asid
= jm
->req_asid
;
9820 *ctx_sz
= jm
->req_ctx_sz
;
9822 return KERN_SUCCESS
;
9826 xpc_domain_get_service_name(job_t j
, event_name_t name
)
9829 return BOOTSTRAP_NO_MEMORY
;
9831 if (!j
->xpc_service
) {
9832 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to get service name by non-XPC service: %s", j
->label
);
9833 return BOOTSTRAP_NOT_PRIVILEGED
;
9836 struct machservice
* ms
= SLIST_FIRST(&j
->machservices
);
9838 jobmgr_log(j
->mgr
, LOG_ERR
, "Attempt to get service name of job with no MachServices: %s", j
->label
);
9839 return BOOTSTRAP_UNKNOWN_SERVICE
;
9842 (void)strlcpy(name
, ms
->name
, sizeof(event_name_t
));
9843 return BOOTSTRAP_SUCCESS
;
9846 #if XPC_LPI_VERSION >= 20111216
9848 xpc_domain_add_services(job_t j
, vm_offset_t services_buff
, mach_msg_type_number_t services_sz
)
9851 return BOOTSTRAP_UNKNOWN_SERVICE
;
9854 job_t rootj
= jobmgr_find_by_pid(root_jobmgr
, j
->p
, false);
9855 if (!(rootj
&& rootj
->xpc_bootstrapper
)) {
9856 job_log(j
, LOG_ERR
, "Attempt to add service to XPC domain by unprivileged job.");
9857 return BOOTSTRAP_NOT_PRIVILEGED
;
9860 if (!(j
->mgr
->properties
& BOOTSTRAP_PROPERTY_XPC_DOMAIN
)) {
9861 return BOOTSTRAP_NOT_PRIVILEGED
;
9865 launch_data_t services
= launch_data_unpack((void *)services_buff
, services_sz
, NULL
, 0, &offset
, NULL
);
9867 return BOOTSTRAP_NO_MEMORY
;
9870 int error
= _xpc_domain_import_services(j
, services
);
9872 mig_deallocate(services_buff
, services_sz
);
9879 #pragma mark XPC Events
9881 xpc_event_find_channel(job_t j
, const char *stream
, struct machservice
**ms
)
9883 int error
= EXNOMEM
;
9884 struct machservice
*msi
= NULL
;
9885 SLIST_FOREACH(msi
, &j
->machservices
, sle
) {
9886 if (strcmp(stream
, msi
->name
) == 0) {
9892 mach_port_t sp
= MACH_PORT_NULL
;
9893 msi
= machservice_new(j
, stream
, &sp
, false);
9898 job_log(j
, LOG_DEBUG
, "Creating new MachService for stream: %s", stream
);
9899 /* Hack to keep this from being publicly accessible through
9900 * bootstrap_look_up().
9902 if (!j
->dedicated_instance
) {
9903 LIST_REMOVE(msi
, name_hash_sle
);
9905 msi
->event_channel
= true;
9907 /* If we call job_dispatch() here before the audit session for the job
9908 * has been set, we'll end up not watching this service. But we also have
9909 * to take care not to watch the port if the job is active.
9911 * See <rdar://problem/10357855>.
9913 if (!j
->currently_ignored
) {
9914 machservice_watch(j
, msi
);
9919 } else if (!msi
->event_channel
) {
9920 job_log(j
, LOG_ERR
, "This job registered a MachService name identical to the requested event channel name: %s", stream
);
9931 xpc_event_get_event_name(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
9933 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
9938 uint64_t token
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_TOKEN
);
9943 job_log(j
, LOG_DEBUG
, "Getting event name for stream/token: %s/0x%llu", stream
, token
);
9946 struct externalevent
*event
= externalevent_find(stream
, token
);
9947 if (event
&& j
->event_monitor
) {
9948 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
9949 xpc_dictionary_set_string(reply2
, XPC_EVENT_ROUTINE_KEY_NAME
, event
->name
);
9952 job_log(j
, LOG_DEBUG
, "Found: %s", event
->name
);
9960 xpc_event_set_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
9962 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
9967 const char *key
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_NAME
);
9972 xpc_object_t event
= xpc_dictionary_get_value(request
, XPC_EVENT_ROUTINE_KEY_EVENT
);
9973 if (event
&& xpc_get_type(event
) != XPC_TYPE_DICTIONARY
) {
9977 job_log(j
, LOG_DEBUG
, "%s event for stream/key: %s/%s", event
? "Setting" : "Removing", stream
, key
);
9979 struct externalevent
*eei
= NULL
;
9980 LIST_FOREACH(eei
, &j
->events
, job_le
) {
9981 /* If the event for the given key already exists for the job, we need to
9982 * remove the old one first.
9984 if (strcmp(eei
->name
, key
) == 0 && strcmp(eei
->sys
->name
, stream
) == 0) {
9985 job_log(j
, LOG_DEBUG
, "Event exists. Removing.");
9986 externalevent_delete(eei
);
9991 int result
= EXNOMEM
;
9993 struct eventsystem
*es
= eventsystem_find(stream
);
9995 job_log(j
, LOG_DEBUG
, "Creating stream.");
9996 es
= eventsystem_new(stream
);
10000 job_log(j
, LOG_DEBUG
, "Adding event.");
10001 if (externalevent_new(j
, es
, key
, event
)) {
10002 job_log(j
, LOG_DEBUG
, "Added new event for key: %s", key
);
10005 job_log(j
, LOG_ERR
, "Could not create event for key: %s", key
);
10008 job_log(j
, LOG_ERR
, "Event stream could not be created: %s", stream
);
10011 /* If the event was NULL, then we just remove it and return. */
10016 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10024 xpc_event_copy_event(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10026 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10027 const char *key
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_NAME
);
10029 bool all_streams
= (stream
== NULL
);
10030 bool all_events
= (key
== NULL
|| strcmp(key
, "") == 0); // strcmp for libxpc compatibility
10031 xpc_object_t events
= NULL
;
10033 if (all_streams
&& !all_events
) {
10037 if (all_streams
|| all_events
) {
10038 job_log(j
, LOG_DEBUG
, "Fetching all events%s%s", stream
? " for stream: " : "", stream
? stream
: "");
10039 events
= xpc_dictionary_create(NULL
, NULL
, 0);
10041 job_log(j
, LOG_DEBUG
, "Fetching stream/key: %s/%s", stream
, key
);
10044 int result
= ESRCH
;
10045 struct externalevent
*eei
= NULL
;
10046 LIST_FOREACH(eei
, &j
->events
, job_le
) {
10048 xpc_object_t sub
= xpc_dictionary_get_value(events
, eei
->sys
->name
);
10050 sub
= xpc_dictionary_create(NULL
, NULL
, 0);
10051 xpc_dictionary_set_value(events
, eei
->sys
->name
, sub
);
10054 xpc_dictionary_set_value(sub
, eei
->name
, eei
->event
);
10055 } else if (strcmp(eei
->sys
->name
, stream
) == 0) {
10057 xpc_dictionary_set_value(events
, eei
->name
, eei
->event
);
10058 } else if (strcmp(eei
->name
, key
) == 0) {
10059 job_log(j
, LOG_DEBUG
, "Found event.");
10060 events
= xpc_retain(eei
->event
);
10067 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10068 xpc_dictionary_set_value(reply2
, XPC_EVENT_ROUTINE_KEY_EVENT
, events
);
10069 xpc_release(events
);
10079 xpc_event_channel_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10081 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10086 job_log(j
, LOG_DEBUG
, "Checking in stream: %s", stream
);
10088 struct machservice
*ms
= NULL
;
10089 int error
= xpc_event_find_channel(j
, stream
, &ms
);
10091 job_log(j
, LOG_ERR
, "Failed to check in: 0x%x: %s", error
, xpc_strerror(error
));
10092 } else if (ms
->isActive
) {
10093 job_log(j
, LOG_ERR
, "Attempt to check in on event channel multiple times: %s", stream
);
10096 machservice_request_notifications(ms
);
10098 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10099 xpc_dictionary_set_mach_recv(reply2
, XPC_EVENT_ROUTINE_KEY_PORT
, ms
->port
);
10108 xpc_event_channel_look_up(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10110 if (!j
->event_monitor
) {
10114 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10119 uint64_t token
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_TOKEN
);
10124 job_log(j
, LOG_DEBUG
, "Looking up channel for stream/token: %s/%llu", stream
, token
);
10126 struct externalevent
*ee
= externalevent_find(stream
, token
);
10131 struct machservice
*ms
= NULL
;
10132 int error
= xpc_event_find_channel(ee
->job
, stream
, &ms
);
10134 job_log(j
, LOG_DEBUG
, "Found event channel port: 0x%x", ms
->port
);
10135 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10136 xpc_dictionary_set_mach_send(reply2
, XPC_EVENT_ROUTINE_KEY_PORT
, ms
->port
);
10140 job_log(j
, LOG_ERR
, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream
, token
, error
, xpc_strerror(error
));
10147 xpc_event_provider_check_in(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10149 if (!j
->event_monitor
) {
10153 /* This indicates that the event monitor is now safe to signal. This state is
10154 * independent of whether this operation actually succeeds; we just need it
10155 * to ignore SIGUSR1.
10157 j
->event_monitor_ready2signal
= true;
10159 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10164 job_log(j
, LOG_DEBUG
, "Provider checking in for stream: %s", stream
);
10166 xpc_object_t events
= xpc_array_create(NULL
, 0);
10167 struct eventsystem
*es
= eventsystem_find(stream
);
10169 /* If we had to create the event stream, there were no events, so just
10170 * give back the empty array.
10172 job_log(j
, LOG_DEBUG
, "Creating event stream.");
10173 es
= eventsystem_new(stream
);
10174 if (!job_assumes(j
, es
)) {
10175 xpc_release(events
);
10179 if (strcmp(stream
, "com.apple.launchd.helper") == 0) {
10180 _launchd_support_system
= es
;
10183 job_log(j
, LOG_DEBUG
, "Filling event array.");
10185 struct externalevent
*ei
= NULL
;
10186 LIST_FOREACH(ei
, &es
->events
, sys_le
) {
10187 xpc_array_set_uint64(events
, XPC_ARRAY_APPEND
, ei
->id
);
10188 xpc_array_append_value(events
, ei
->event
);
10192 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10193 xpc_dictionary_set_value(reply2
, XPC_EVENT_ROUTINE_KEY_EVENTS
, events
);
10194 xpc_release(events
);
10201 xpc_event_provider_set_state(job_t j
, xpc_object_t request
, xpc_object_t
*reply
)
10203 job_t other_j
= NULL
;
10205 if (!j
->event_monitor
) {
10209 const char *stream
= xpc_dictionary_get_string(request
, XPC_EVENT_ROUTINE_KEY_STREAM
);
10214 uint64_t token
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_TOKEN
);
10219 bool state
= false;
10220 xpc_object_t xstate
= xpc_dictionary_get_value(request
, XPC_EVENT_ROUTINE_KEY_STATE
);
10221 if (!xstate
|| xpc_get_type(xstate
) != XPC_TYPE_BOOL
) {
10224 state
= xpc_bool_get_value(xstate
);
10227 job_log(j
, LOG_DEBUG
, "Setting event state to %s for stream/token: %s/%llu", state
? "true" : "false", stream
, token
);
10229 struct externalevent
*ei
= externalevent_find(stream
, token
);
10231 job_log(j
, LOG_ERR
, "Could not find stream/token: %s/%llu", stream
, token
);
10238 if (ei
->internal
) {
10239 job_log(ei
->job
, LOG_NOTICE
, "Job should be able to exec(3) now.");
10240 ei
->job
->waiting4ok
= false;
10241 externalevent_delete(ei
);
10244 (void)job_dispatch(other_j
, false);
10246 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10253 xpc_event_demux(mach_port_t p
, xpc_object_t request
, xpc_object_t
*reply
)
10255 uint64_t op
= xpc_dictionary_get_uint64(request
, XPC_EVENT_ROUTINE_KEY_OP
);
10260 audit_token_t token
;
10261 xpc_dictionary_get_audit_token(request
, &token
);
10262 runtime_record_caller_creds(&token
);
10264 job_t j
= job_mig_intran(p
);
10265 if (!j
|| j
->anonymous
) {
10269 job_log(j
, LOG_DEBUG
, "Incoming XPC event request: %llu", op
);
10273 case XPC_EVENT_GET_NAME
:
10274 error
= xpc_event_get_event_name(j
, request
, reply
);
10276 case XPC_EVENT_SET
:
10277 error
= xpc_event_set_event(j
, request
, reply
);
10279 case XPC_EVENT_COPY
:
10280 error
= xpc_event_copy_event(j
, request
, reply
);
10282 case XPC_EVENT_CHECK_IN
:
10283 error
= xpc_event_channel_check_in(j
, request
, reply
);
10285 case XPC_EVENT_LOOK_UP
:
10286 error
= xpc_event_channel_look_up(j
, request
, reply
);
10288 case XPC_EVENT_PROVIDER_CHECK_IN
:
10289 error
= xpc_event_provider_check_in(j
, request
, reply
);
10291 case XPC_EVENT_PROVIDER_SET_STATE
:
10292 error
= xpc_event_provider_set_state(j
, request
, reply
);
10296 job_log(j
, LOG_ERR
, "Unmanaged jobs may not make XPC Events requests.");
10301 job_log(j
, LOG_ERR
, "Bogus opcode.");
10306 xpc_object_t reply2
= xpc_dictionary_create_reply(request
);
10307 xpc_dictionary_set_uint64(reply2
, XPC_EVENT_ROUTINE_KEY_ERROR
, error
);
10315 job_mig_kickstart(job_t j
, name_t targetlabel
, pid_t
*out_pid
, unsigned int flags
)
10317 struct ldcred
*ldc
= runtime_get_caller_creds();
10321 return BOOTSTRAP_NO_MEMORY
;
10324 if (unlikely(!(otherj
= job_find(NULL
, targetlabel
)))) {
10325 return BOOTSTRAP_UNKNOWN_SERVICE
;
10328 #if TARGET_OS_EMBEDDED
10329 bool allow_non_root_kickstart
= j
->username
&& otherj
->username
&& (strcmp(j
->username
, otherj
->username
) == 0);
10331 bool allow_non_root_kickstart
= false;
10334 if (ldc
->euid
!= 0 && ldc
->euid
!= geteuid() && !allow_non_root_kickstart
) {
10335 return BOOTSTRAP_NOT_PRIVILEGED
;
10339 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
10340 return BOOTSTRAP_NOT_PRIVILEGED
;
10344 if (otherj
->p
&& (flags
& VPROCFLAG_STALL_JOB_EXEC
)) {
10345 return BOOTSTRAP_SERVICE_ACTIVE
;
10348 otherj
->stall_before_exec
= (flags
& VPROCFLAG_STALL_JOB_EXEC
);
10349 otherj
= job_dispatch(otherj
, true);
10351 if (!job_assumes(j
, otherj
&& otherj
->p
)) {
10352 // <rdar://problem/6787083> Clear this flag if we failed to start the job.
10353 otherj
->stall_before_exec
= false;
10354 return BOOTSTRAP_NO_MEMORY
;
10357 *out_pid
= otherj
->p
;
10363 job_mig_spawn_internal(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, job_t
*outj
)
10365 launch_data_t jobdata
= NULL
;
10366 size_t data_offset
= 0;
10367 struct ldcred
*ldc
= runtime_get_caller_creds();
10371 return BOOTSTRAP_NO_MEMORY
;
10374 if (unlikely(j
->deny_job_creation
)) {
10375 return BOOTSTRAP_NOT_PRIVILEGED
;
10379 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
10380 return BOOTSTRAP_NOT_PRIVILEGED
;
10384 if (unlikely(pid1_magic
&& ldc
->euid
&& ldc
->uid
)) {
10385 job_log(j
, LOG_DEBUG
, "Punting spawn to per-user-context");
10386 return VPROC_ERR_TRY_PER_USER
;
10389 if (!job_assumes(j
, indataCnt
!= 0)) {
10393 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
10394 if (!job_assumes(j
, (jobdata
= launch_data_unpack((void *)indata
, indataCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
10398 jobmgr_t target_jm
= jobmgr_find_by_name(j
->mgr
, NULL
);
10399 if (!jobmgr_assumes(j
->mgr
, target_jm
!= NULL
)) {
10400 jobmgr_log(j
->mgr
, LOG_ERR
, "This API can only be used by a process running within an Aqua session.");
10404 jr
= jobmgr_import2(target_jm
?: j
->mgr
, jobdata
);
10406 launch_data_t label
= NULL
;
10407 launch_data_t wait4debugger
= NULL
;
10411 /* If EEXIST was returned, we know that there is a label string in
10412 * the dictionary. So we don't need to check the types here; that
10413 * has already been done.
10415 label
= launch_data_dict_lookup(jobdata
, LAUNCH_JOBKEY_LABEL
);
10416 jr
= job_find(NULL
, launch_data_get_string(label
));
10417 if (job_assumes(j
, jr
!= NULL
) && !jr
->p
) {
10418 wait4debugger
= launch_data_dict_lookup(jobdata
, LAUNCH_JOBKEY_WAITFORDEBUGGER
);
10419 if (wait4debugger
&& launch_data_get_type(wait4debugger
) == LAUNCH_DATA_BOOL
) {
10420 if (launch_data_get_bool(wait4debugger
)) {
10421 /* If the job exists, we're going to kick-start it, but
10422 * we need to give the caller the opportunity to start
10423 * it suspended if it so desires. But this will only
10424 * take effect if the job isn't running.
10426 jr
->wait4debugger_oneshot
= true;
10432 return BOOTSTRAP_NAME_IN_USE
;
10434 return BOOTSTRAP_NO_MEMORY
;
10439 jr
->mach_uid
= ldc
->uid
;
10442 // TODO: Consolidate the app and legacy_LS_job bits.
10443 jr
->legacy_LS_job
= true;
10444 jr
->abandon_pg
= true;
10445 jr
->asport
= asport
;
10447 uuid_clear(jr
->expected_audit_uuid
);
10448 jr
= job_dispatch(jr
, true);
10450 if (!job_assumes(j
, jr
!= NULL
)) {
10452 return BOOTSTRAP_NO_MEMORY
;
10455 if (!job_assumes(jr
, jr
->p
)) {
10457 return BOOTSTRAP_NO_MEMORY
;
10460 job_log(jr
, LOG_DEBUG
, "Spawned by PID %u: %s", j
->p
, j
->label
);
10463 return BOOTSTRAP_SUCCESS
;
10467 job_mig_spawn2(job_t j
, mach_port_t rp
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t asport
, pid_t
*child_pid
, mach_port_t
*obsvr_port
)
10470 kern_return_t kr
= job_mig_spawn_internal(j
, indata
, indataCnt
, asport
, &nj
);
10471 if (likely(kr
== KERN_SUCCESS
)) {
10472 if (job_setup_exit_port(nj
) != KERN_SUCCESS
) {
10474 kr
= BOOTSTRAP_NO_MEMORY
;
10476 /* Do not return until the job has called exec(3), thereby making it
10477 * safe for the caller to send it SIGCONT.
10479 * <rdar://problem/9042798>
10481 nj
->spawn_reply_port
= rp
;
10484 } else if (kr
== BOOTSTRAP_NAME_IN_USE
) {
10485 bool was_running
= nj
->p
;
10486 if (job_dispatch(nj
, true)) {
10487 if (!was_running
) {
10488 job_log(nj
, LOG_DEBUG
, "Job exists but is not running. Kick-starting.");
10490 if (job_setup_exit_port(nj
) == KERN_SUCCESS
) {
10491 nj
->spawn_reply_port
= rp
;
10494 kr
= BOOTSTRAP_NO_MEMORY
;
10497 *obsvr_port
= MACH_PORT_NULL
;
10498 *child_pid
= nj
->p
;
10502 job_log(nj
, LOG_ERR
, "Failed to dispatch job, requestor: %s", j
->label
);
10503 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
10507 mig_deallocate(indata
, indataCnt
);
10512 job_do_legacy_ipc_request(job_t j
, launch_data_t request
, mach_port_t asport
__attribute__((unused
)))
10514 launch_data_t reply
= NULL
;
10517 if (launch_data_get_type(request
) == LAUNCH_DATA_STRING
) {
10518 if (strcmp(launch_data_get_string(request
), LAUNCH_KEY_CHECKIN
) == 0) {
10519 reply
= job_export(j
);
10527 #define LAUNCHD_MAX_LEGACY_FDS 128
10528 #define countof(x) (sizeof((x)) / sizeof((x[0])))
10531 job_mig_legacy_ipc_request(job_t j
, vm_offset_t request
,
10532 mach_msg_type_number_t requestCnt
, mach_port_array_t request_fds
,
10533 mach_msg_type_number_t request_fdsCnt
, vm_offset_t
*reply
,
10534 mach_msg_type_number_t
*replyCnt
, mach_port_array_t
*reply_fdps
,
10535 mach_msg_type_number_t
*reply_fdsCnt
, mach_port_t asport
)
10538 return BOOTSTRAP_NO_MEMORY
;
10541 /* TODO: Once we support actions other than checking in, we must check the
10542 * sandbox capabilities and EUID of the requestort.
10544 size_t nout_fdps
= 0;
10545 size_t nfds
= request_fdsCnt
/ sizeof(request_fds
[0]);
10546 if (nfds
> LAUNCHD_MAX_LEGACY_FDS
) {
10547 job_log(j
, LOG_ERR
, "Too many incoming descriptors: %lu", nfds
);
10548 return BOOTSTRAP_NO_MEMORY
;
10551 int in_fds
[LAUNCHD_MAX_LEGACY_FDS
];
10553 for (i
= 0; i
< nfds
; i
++) {
10554 in_fds
[i
] = fileport_makefd(request_fds
[i
]);
10555 if (in_fds
[i
] == -1) {
10556 job_log(j
, LOG_ERR
, "Bad descriptor passed in legacy IPC request at index: %lu", i
);
10560 // DON'T goto outbad before this point.
10562 *reply_fdps
= NULL
;
10563 launch_data_t ldreply
= NULL
;
10565 size_t dataoff
= 0;
10567 launch_data_t ldrequest
= launch_data_unpack((void *)request
, requestCnt
, in_fds
, nfds
, &dataoff
, &fdoff
);
10569 job_log(j
, LOG_ERR
, "Invalid legacy IPC request passed.");
10573 ldreply
= job_do_legacy_ipc_request(j
, ldrequest
, asport
);
10575 ldreply
= launch_data_new_errno(errno
);
10581 *replyCnt
= 10 * 1024 * 1024;
10582 mig_allocate(reply
, *replyCnt
);
10587 int out_fds
[LAUNCHD_MAX_LEGACY_FDS
];
10588 size_t nout_fds
= 0;
10589 size_t sz
= launch_data_pack(ldreply
, (void *)*reply
, *replyCnt
, out_fds
, &nout_fds
);
10591 job_log(j
, LOG_ERR
, "Could not pack legacy IPC reply.");
10596 if (nout_fds
> 128) {
10597 job_log(j
, LOG_ERR
, "Too many outgoing descriptors: %lu", nout_fds
);
10601 *reply_fdsCnt
= nout_fds
* sizeof((*reply_fdps
)[0]);
10602 mig_allocate((vm_address_t
*)reply_fdps
, *reply_fdsCnt
);
10603 if (!*reply_fdps
) {
10607 for (i
= 0; i
< nout_fds
; i
++) {
10608 mach_port_t fp
= MACH_PORT_NULL
;
10609 /* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
10610 * deal. Note, these get stuffed into an array whose disposition is
10611 * mach_port_move_send_t, so we don't have to worry about them after
10614 if (fileport_makeport(out_fds
[i
], &fp
) != 0) {
10615 job_log(j
, LOG_ERR
, "Could not pack response descriptor at index: %lu: %d: %s", i
, errno
, strerror(errno
));
10617 (*reply_fdps
)[i
] = fp
;
10620 nout_fdps
= nout_fds
;
10625 mig_deallocate(request
, requestCnt
);
10626 launch_data_free(ldreply
);
10630 (void)launchd_mport_deallocate(asport
);
10632 return BOOTSTRAP_SUCCESS
;
10635 for (i
= 0; i
< nfds
; i
++) {
10636 (void)close(in_fds
[i
]);
10639 for (i
= 0; i
< nout_fds
; i
++) {
10640 (void)launchd_mport_deallocate((*reply_fdps
)[i
]);
10644 mig_deallocate(*reply
, *replyCnt
);
10647 /* We should never hit this since the last goto out is in the case that
10648 * allocating this fails.
10651 mig_deallocate((vm_address_t
)*reply_fdps
, *reply_fdsCnt
);
10655 launch_data_free(ldreply
);
10658 return BOOTSTRAP_NO_MEMORY
;
10662 jobmgr_init(bool sflag
)
10664 const char *root_session_type
= pid1_magic
? VPROCMGR_SESSION_SYSTEM
: VPROCMGR_SESSION_BACKGROUND
;
10665 SLIST_INIT(&s_curious_jobs
);
10666 LIST_INIT(&s_needing_sessions
);
10668 osx_assert((root_jobmgr
= jobmgr_new(NULL
, MACH_PORT_NULL
, MACH_PORT_NULL
, sflag
, root_session_type
, false, MACH_PORT_NULL
)) != NULL
);
10669 osx_assert((_s_xpc_system_domain
= jobmgr_new_xpc_singleton_domain(root_jobmgr
, "com.apple.xpc.system")) != NULL
);
10670 _s_xpc_system_domain
->req_asid
= launchd_audit_session
;
10671 _s_xpc_system_domain
->req_asport
= launchd_audit_port
;
10672 _s_xpc_system_domain
->shortdesc
= "system";
10674 root_jobmgr
->monitor_shutdown
= true;
10677 uint32_t fflags
= NOTE_ATTRIB
| NOTE_LINK
| NOTE_REVOKE
| NOTE_EXTEND
| NOTE_WRITE
;
10678 s_no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
10679 if (likely(s_no_hang_fd
== -1)) {
10680 if (jobmgr_assumes_zero_p(root_jobmgr
, (s_no_hang_fd
= open("/dev", O_EVTONLY
| O_NONBLOCK
))) != -1) {
10681 (void)jobmgr_assumes_zero_p(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, root_jobmgr
));
10684 s_no_hang_fd
= _fd(s_no_hang_fd
);
10688 our_strhash(const char *s
)
10690 size_t c
, r
= 5381;
10693 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10696 while ((c
= *s
++)) {
10697 r
= ((r
<< 5) + r
) + c
; // hash*33 + c
10704 hash_label(const char *label
)
10706 return our_strhash(label
) % LABEL_HASH_SIZE
;
10710 hash_ms(const char *msstr
)
10712 return our_strhash(msstr
) % MACHSERVICE_HASH_SIZE
;
10716 waiting4removal_new(job_t j
, mach_port_t rp
)
10718 struct waiting_for_removal
*w4r
;
10720 if (!job_assumes(j
, (w4r
= malloc(sizeof(struct waiting_for_removal
))) != NULL
)) {
10724 w4r
->reply_port
= rp
;
10726 SLIST_INSERT_HEAD(&j
->removal_watchers
, w4r
, sle
);
10732 waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
)
10734 (void)job_assumes_zero(j
, job_mig_send_signal_reply(w4r
->reply_port
, 0));
10736 SLIST_REMOVE(&j
->removal_watchers
, w4r
, waiting_for_removal
, sle
);
10742 get_kern_max_proc(void)
10744 int mib
[] = { CTL_KERN
, KERN_MAXPROC
};
10746 size_t max_sz
= sizeof(max
);
10748 (void)posix_assumes_zero(sysctl(mib
, 2, &max
, &max_sz
, NULL
, 0));
10753 // See rdar://problem/6271234
10755 eliminate_double_reboot(void)
10757 if (unlikely(!pid1_magic
)) {
10762 const char *argv
[] = { _PATH_BSHELL
, "/etc/rc.deferred_install", NULL
};
10765 if (unlikely(stat(argv
[1], &sb
) != -1)) {
10766 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Going to run deferred install script.");
10769 result
= posix_spawnp(&p
, argv
[0], NULL
, NULL
, (char **)argv
, environ
);
10770 if (result
== -1) {
10771 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Couldn't run deferred install script: %d: %s", result
, strerror(result
));
10776 result
= waitpid(p
, &wstatus
, 0);
10777 if (result
== -1) {
10778 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Failed to reap deferred install script: %d: %s", errno
, strerror(errno
));
10782 if (WIFEXITED(wstatus
)) {
10783 if ((result
= WEXITSTATUS(wstatus
)) == 0) {
10784 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Deferred install script completed successfully.");
10786 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus
));
10789 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Weirdness with install script: %d", wstatus
);
10794 /* If the unlink(2) was to fail, it would be most likely fail with
10795 * EBUSY. All the other failure cases for unlink(2) don't apply when
10796 * we're running under PID 1 and have verified that the file exists.
10797 * Outside of someone deliberately messing with us (like if
10798 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
10799 * point for a filesystem) and I/O errors, we should be good.
10801 if (unlink(argv
[1]) == -1) {
10802 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Failed to remove deferred install script: %d: %s", errno
, strerror(errno
));
10808 jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
)
10810 job_log(j
, LOG_DEBUG
, "Setting Jetsam properties for job...");
10811 if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
10812 j
->jetsam_priority
= (typeof(j
->jetsam_priority
))launch_data_get_integer(obj
);
10813 job_log(j
, LOG_DEBUG
, "Priority: %d", j
->jetsam_priority
);
10814 } else if (strcasecmp(key
, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
10815 j
->jetsam_memlimit
= (typeof(j
->jetsam_memlimit
))launch_data_get_integer(obj
);
10816 job_log(j
, LOG_DEBUG
, "Memory limit: %d", j
->jetsam_memlimit
);
10817 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMFRONTMOST
) == 0) {
10818 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10819 * You can't set this in a plist.
10821 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMACTIVE
) == 0) {
10823 } else if (strcasecmp(key
, LAUNCH_KEY_JETSAMLABEL
) == 0) {
10824 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10825 * complain about it.
10828 job_log(j
, LOG_ERR
, "Unknown Jetsam key: %s", key
);
10831 if (unlikely(!j
->jetsam_properties
)) {
10832 j
->jetsam_properties
= true;
10836 #if TARGET_OS_EMBEDDED
10838 launchd_set_jetsam_priorities(launch_data_t priorities
)
10840 kern_return_t result
= 0;
10842 if (launch_data_get_type(priorities
) != LAUNCH_DATA_ARRAY
) {
10845 if (!launchd_embedded_handofgod
) {
10849 size_t npris
= launch_data_array_get_count(priorities
);
10853 for (i
= 0; i
< npris
; i
++) {
10854 launch_data_t ldi
= launch_data_array_get_index(priorities
, i
);
10855 if (launch_data_get_type(ldi
) != LAUNCH_DATA_DICTIONARY
) {
10859 launch_data_t ldlabel
= NULL
;
10860 if (!(ldlabel
= launch_data_dict_lookup(ldi
, LAUNCH_KEY_JETSAMLABEL
))) {
10863 const char *label
= launch_data_get_string(ldlabel
);
10865 ji
= job_find(root_jobmgr
, label
);
10870 launch_data_dict_iterate(ldi
, (void (*)(launch_data_t
, const char *, void *))jetsam_property_setup
, ji
);
10872 launch_data_t frontmost
= NULL
;
10873 if ((frontmost
= launch_data_dict_lookup(ldi
, LAUNCH_KEY_JETSAMFRONTMOST
)) && launch_data_get_type(frontmost
) == LAUNCH_DATA_BOOL
) {
10874 ji
->jetsam_frontmost
= launch_data_get_bool(frontmost
);
10877 launch_data_t active
= NULL
;
10878 if ((active
= launch_data_dict_lookup(ldi
, LAUNCH_KEY_JETSAMACTIVE
)) && launch_data_get_type(active
) == LAUNCH_DATA_BOOL
) {
10879 ji
->jetsam_active
= launch_data_get_bool(active
);
10882 launchd_update_jetsam_list(ji
);
10884 result
= result
!= 0 ? errno
: 0;
10891 launchd_update_jetsam_list(job_t j
)
10893 memorystatus_priority_entry_t mpe
;
10894 kern_return_t result
;
10897 mpe
.priority
= j
->jetsam_priority
;
10899 mpe
.flags
|= j
->jetsam_frontmost
? kMemorystatusFlagsFrontmost
: 0;
10900 mpe
.flags
|= j
->jetsam_active
? kMemorystatusFlagsActive
: 0;
10902 // ToDo - cache MIB if we keep this interface
10903 (void)posix_assumes_zero(result
= sysctlbyname("kern.memorystatus_jetsam_change", NULL
, NULL
, &mpe
, sizeof(memorystatus_priority_entry_t
)));