2 * @APPLE_APACHE_LICENSE_HEADER_START@
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 * @APPLE_APACHE_LICENSE_HEADER_END@
19 static const char *const __rcs_file_version__
= "$Revision: 24003 $";
22 #include "launchd_core_logic.h"
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
44 #include <sys/reboot.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
57 #include <netinet/in.h>
58 #include <netinet/in_var.h>
59 #include <netinet6/nd6.h>
60 #include <bsm/libbsm.h>
80 #include <malloc/malloc.h>
83 #define __APPLE_API_PRIVATE
87 #include <quarantine.h>
89 #if TARGET_OS_EMBEDDED
90 #include <sys/kern_memorystatus.h>
92 /* To make my life easier. */
93 typedef struct jetsam_priority_entry
{
97 int32_t hiwat_reserved1
;
98 int32_t hiwat_reserved2
;
99 int32_t hiwat_reserved3
;
100 } jetsam_priority_entry_t
;
103 kJetsamFlagsFrontmost
= (1 << 0),
104 kJetsamFlagsKilled
= (1 << 1)
109 #include "launch_priv.h"
110 #include "launch_internal.h"
111 #include "bootstrap.h"
112 #include "bootstrap_priv.h"
114 #include "vproc_internal.h"
119 #include "launchd_runtime.h"
120 #include "launchd_unix_ipc.h"
121 #include "protocol_vproc.h"
122 #include "protocol_vprocServer.h"
123 #include "protocol_job_reply.h"
124 #include "protocol_job_forward.h"
125 #include "mach_excServer.h"
128 * LAUNCHD_SAMPLE_TIMEOUT
129 * If the job hasn't exited in the given number of seconds after sending
130 * it a SIGTERM, start sampling it.
131 * LAUNCHD_DEFAULT_EXIT_TIMEOUT
132 * If the job hasn't exited in the given number of seconds after sending
133 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
135 #define LAUNCHD_MIN_JOB_RUN_TIME 10
136 #define LAUNCHD_SAMPLE_TIMEOUT 2
137 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
138 #define LAUNCHD_SIGKILL_TIMER 5
139 #define LAUNCHD_CLEAN_KILL_TIMER 1
141 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
143 #define TAKE_SUBSET_NAME "TakeSubsetName"
144 #define TAKE_SUBSET_PID "TakeSubsetPID"
145 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
147 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
149 extern char **environ
;
151 struct waiting_for_removal
{
152 SLIST_ENTRY(waiting_for_removal
) sle
;
153 mach_port_t reply_port
;
156 static bool waiting4removal_new(job_t j
, mach_port_t rp
);
157 static void waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
);
159 struct waiting_for_exit
{
160 LIST_ENTRY(waiting_for_exit
) sle
;
165 static bool waiting4exit_new(job_t j
, mach_port_t rp
, bool legacy
);
166 static void waiting4exit_delete(job_t j
, struct waiting_for_exit
*w4e
);
169 SLIST_ENTRY(machservice
) sle
;
170 SLIST_ENTRY(machservice
) special_port_sle
;
171 LIST_ENTRY(machservice
) name_hash_sle
;
172 LIST_ENTRY(machservice
) port_hash_sle
;
174 unsigned int gen_num
;
175 mach_port_name_t port
;
176 unsigned int isActive
:1,
184 delete_on_destruction
:1,
185 drain_one_on_crash
:1,
186 drain_all_on_crash
:1,
187 /* Don't let the size of this field to get too small. It has to be large enough
188 * to represent the reasonable range of special port numbers.
190 special_port_num
:20;
195 static SLIST_HEAD(, machservice
) special_ports
; /* hack, this should be per jobmgr_t */
197 #define PORT_HASH_SIZE 32
198 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
200 static LIST_HEAD(, machservice
) port_hash
[PORT_HASH_SIZE
];
202 static void machservice_setup(launch_data_t obj
, const char *key
, void *context
);
203 static void machservice_setup_options(launch_data_t obj
, const char *key
, void *context
);
204 static void machservice_resetport(job_t j
, struct machservice
*ms
);
205 static struct machservice
*machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
);
206 static void machservice_ignore(job_t j
, struct machservice
*ms
);
207 static void machservice_watch(job_t j
, struct machservice
*ms
);
208 static void machservice_delete(job_t j
, struct machservice
*, bool port_died
);
209 static void machservice_request_notifications(struct machservice
*);
210 static mach_port_t
machservice_port(struct machservice
*);
211 static job_t
machservice_job(struct machservice
*);
212 static bool machservice_hidden(struct machservice
*);
213 static bool machservice_active(struct machservice
*);
214 static const char *machservice_name(struct machservice
*);
215 static bootstrap_status_t
machservice_status(struct machservice
*);
216 void machservice_drain_port(struct machservice
*);
219 SLIST_ENTRY(socketgroup
) sle
;
221 unsigned int junkfds
:1, fd_cnt
:31;
228 static bool socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
, bool junkfds
);
229 static void socketgroup_delete(job_t j
, struct socketgroup
*sg
);
230 static void socketgroup_watch(job_t j
, struct socketgroup
*sg
);
231 static void socketgroup_ignore(job_t j
, struct socketgroup
*sg
);
232 static void socketgroup_callback(job_t j
);
233 static void socketgroup_setup(launch_data_t obj
, const char *key
, void *context
);
234 static void socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
);
236 struct calendarinterval
{
237 LIST_ENTRY(calendarinterval
) global_sle
;
238 SLIST_ENTRY(calendarinterval
) sle
;
244 static LIST_HEAD(, calendarinterval
) sorted_calendar_events
;
246 static bool calendarinterval_new(job_t j
, struct tm
*w
);
247 static bool calendarinterval_new_from_obj(job_t j
, launch_data_t obj
);
248 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
);
249 static void calendarinterval_delete(job_t j
, struct calendarinterval
*ci
);
250 static void calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
);
251 static void calendarinterval_callback(void);
252 static void calendarinterval_sanity_check(void);
255 SLIST_ENTRY(envitem
) sle
;
264 static bool envitem_new(job_t j
, const char *k
, const char *v
, bool global
, bool one_shot
);
265 static void envitem_delete(job_t j
, struct envitem
*ei
, bool global
);
266 static void envitem_setup(launch_data_t obj
, const char *key
, void *context
);
267 static void envitem_setup_one_shot(launch_data_t obj
, const char *key
, void *context
);
270 SLIST_ENTRY(limititem
) sle
;
272 unsigned int setsoft
:1, sethard
:1, which
:30;
275 static bool limititem_update(job_t j
, int w
, rlim_t r
);
276 static void limititem_delete(job_t j
, struct limititem
*li
);
277 static void limititem_setup(launch_data_t obj
, const char *key
, void *context
);
279 static void seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
);
282 static void jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
);
297 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
298 } semaphore_reason_t
;
300 struct semaphoreitem
{
301 SLIST_ENTRY(semaphoreitem
) sle
;
302 semaphore_reason_t why
;
303 bool watching_parent
;
311 struct semaphoreitem_dict_iter_context
{
313 semaphore_reason_t why_true
;
314 semaphore_reason_t why_false
;
317 static bool semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
);
318 static void semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
);
319 static void semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
);
320 static void semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
);
321 static void semaphoreitem_callback(job_t j
, struct kevent
*kev
);
322 static void semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
);
323 static void semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
);
324 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
);
326 #define ACTIVE_JOB_HASH_SIZE 32
327 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
329 #define MACHSERVICE_HASH_SIZE 37
332 JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST
,
334 JOBMGR_PHASE_HOPEFULLY_EXITS_LAST
,
338 static char *s_phases
[JOBMGR_PHASE_LAST
+ 1] = {
339 "HopefullyExitsFirst",
341 "HopefullyExitsLast",
346 kq_callback kqjobmgr_callback
;
347 SLIST_ENTRY(jobmgr_s
) sle
;
348 SLIST_HEAD(, jobmgr_s
) submgrs
;
349 LIST_HEAD(, job_s
) jobs
;
350 LIST_HEAD(, job_s
) jetsam_jobs
;
351 LIST_HEAD(, job_s
) active_jobs
[ACTIVE_JOB_HASH_SIZE
];
352 LIST_HEAD(, machservice
) ms_hash
[MACHSERVICE_HASH_SIZE
];
353 LIST_HEAD(, job_s
) global_env_jobs
;
354 STAILQ_HEAD(, job_s
) pending_samples
;
356 mach_port_t req_port
;
357 mach_port_t init_audit_session
;
361 unsigned int global_on_demand_cnt
;
362 unsigned int hopefully_first_cnt
;
363 unsigned int normal_active_cnt
;
364 unsigned int jetsam_jobs_cnt
;
365 unsigned int shutting_down
:1,
366 session_initialized
:1,
367 killed_hopefully_first_jobs
:1,
368 killed_normal_jobs
:1,
369 killed_hopefully_last_jobs
:1,
370 killed_stray_jobs
:1;
371 char sample_log_file
[PATH_MAX
];
379 #define jobmgr_assumes(jm, e) \
380 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
382 static jobmgr_t
jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool no_init
, mach_port_t session_port
);
383 static job_t
jobmgr_import2(jobmgr_t jm
, launch_data_t pload
);
384 static jobmgr_t
jobmgr_parent(jobmgr_t jm
);
385 static jobmgr_t
jobmgr_do_garbage_collection(jobmgr_t jm
);
386 static bool jobmgr_label_test(jobmgr_t jm
, const char *str
);
387 static void jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
);
388 static void jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
);
389 static void jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
);
390 static void jobmgr_remove(jobmgr_t jm
);
391 static void jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
);
392 static void jobmgr_dequeue_next_sample(jobmgr_t jm
);
393 static job_t
jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
);
394 static job_t
jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
);
395 static job_t
jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
);
396 static jobmgr_t
jobmgr_find_by_name(jobmgr_t jm
, const char *where
);
397 static job_t
job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
);
398 static job_t
jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, bool dispatch
, mach_port_t
*mp
);
399 static void job_export_all2(jobmgr_t jm
, launch_data_t where
);
400 static void jobmgr_callback(void *obj
, struct kevent
*kev
);
401 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm
);
402 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
);
403 static struct machservice
*jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
);
404 static void jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
405 static void jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
406 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
407 static void jobmgr_log_bug(jobmgr_t jm
, unsigned int line
);
409 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
410 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
412 struct suspended_peruser
{
413 LIST_ENTRY(suspended_peruser
) sle
;
418 kq_callback kqjob_callback
; /* MUST be first element of this structure for benefit of launchd's run loop. */
419 LIST_ENTRY(job_s
) sle
;
420 LIST_ENTRY(job_s
) needing_session_sle
;
421 LIST_ENTRY(job_s
) jetsam_sle
;
422 LIST_ENTRY(job_s
) pid_hash_sle
;
423 LIST_ENTRY(job_s
) label_hash_sle
;
424 LIST_ENTRY(job_s
) global_env_sle
;
425 STAILQ_ENTRY(job_s
) pending_samples_sle
;
426 SLIST_ENTRY(job_s
) curious_jobs_sle
;
427 LIST_HEAD(, suspended_peruser
) suspended_perusers
;
428 LIST_HEAD(, waiting_for_exit
) exit_watchers
;
429 SLIST_HEAD(, socketgroup
) sockets
;
430 SLIST_HEAD(, calendarinterval
) cal_intervals
;
431 SLIST_HEAD(, envitem
) global_env
;
432 SLIST_HEAD(, envitem
) env
;
433 SLIST_HEAD(, limititem
) limits
;
434 SLIST_HEAD(, machservice
) machservices
;
435 SLIST_HEAD(, semaphoreitem
) semaphores
;
436 SLIST_HEAD(, waiting_for_removal
) removal_watchers
;
438 cpu_type_t
*j_binpref
;
439 size_t j_binpref_cnt
;
441 mach_port_t wait_reply_port
; /* we probably should switch to a list of waiters */
454 char *alt_exc_handler
;
455 struct vproc_shmem_s
*shmem
;
456 struct machservice
*lastlookup
;
457 unsigned int lastlookup_gennum
;
459 char *seatbelt_profile
;
460 uint64_t seatbelt_flags
;
463 void *quarantine_data
;
464 size_t quarantine_data_sz
;
467 int last_exit_status
;
473 int32_t jetsam_priority
;
474 int32_t jetsam_memlimit
;
475 int32_t main_thread_priority
;
477 uint32_t exit_timeout
;
478 uint64_t sent_signal_time
;
480 uint32_t min_run_time
;
481 uint32_t start_interval
;
482 uint32_t peruser_suspend_count
; /* The number of jobs that have disabled this per-user launchd. */
486 J_TYPE_ANONYMOUS
= 1,
487 J_TYPE_LANCHSERVICES
,
492 bool debug
:1, /* man launchd.plist --> Debug */
493 ondemand
:1, /* man launchd.plist --> KeepAlive == false */
494 session_create
:1, /* man launchd.plist --> SessionCreate */
495 low_pri_io
:1, /* man launchd.plist --> LowPriorityIO */
496 no_init_groups
:1, /* man launchd.plist --> InitGroups */
497 priv_port_has_senders
:1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
498 importing_global_env
:1, /* a hack during job importing */
499 importing_hard_limits
:1, /* a hack during job importing */
500 setmask
:1, /* man launchd.plist --> Umask */
501 anonymous
:1, /* a process that launchd knows about, but isn't managed by launchd */
502 checkedin
:1, /* a legacy mach_init concept to detect sick jobs */
503 legacy_mach_job
:1, /* a job created via bootstrap_create_server() */
504 legacy_LS_job
:1, /* a job created via spawn_via_launchd() */
505 inetcompat
:1, /* a legacy job that wants inetd compatible semantics */
506 inetcompat_wait
:1, /* a twist on inetd compatibility */
507 start_pending
:1, /* an event fired and the job should start, but not necessarily right away */
508 globargv
:1, /* man launchd.plist --> EnableGlobbing */
509 wait4debugger
:1, /* man launchd.plist --> WaitForDebugger */
510 wait4debugger_oneshot
:1, /* One-shot WaitForDebugger. */
511 internal_exc_handler
:1, /* MachExceptionHandler == true */
512 stall_before_exec
:1, /* a hack to support an option of spawn_via_launchd() */
513 only_once
:1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
514 currently_ignored
:1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
515 forced_peers_to_demand_mode
:1, /* A job that forced all other jobs to be temporarily launch-on-demand */
516 setnice
:1, /* man launchd.plist --> Nice */
517 hopefully_exits_last
:1, /* man launchd.plist --> HopefullyExitsLast */
518 removal_pending
:1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
519 sent_sigkill
:1, /* job_kill() was called */
520 sampling_complete
:1, /* job_force_sampletool() was called (or is disabled) */
521 debug_before_kill
:1, /* enter the kernel debugger before killing a job */
522 weird_bootstrap
:1, /* a hack that launchd+launchctl use during jobmgr_t creation */
523 start_on_mount
:1, /* man launchd.plist --> StartOnMount */
524 per_user
:1, /* This job is a per-user launchd managed by the PID 1 launchd */
525 hopefully_exits_first
:1, /* man launchd.plist --> HopefullyExitsFirst */
526 deny_unknown_mslookups
:1, /* A flag for changing the behavior of bootstrap_look_up() */
527 unload_at_mig_return
:1, /* A job thoroughly confused launchd. We need to unload it ASAP */
528 abandon_pg
:1, /* man launchd.plist --> AbandonProcessGroup */
529 ignore_pg_at_shutdown
:1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
530 poll_for_vfs_changes
:1, /* a hack to work around the fact that kqueues don't work on all filesystems */
531 deny_job_creation
:1, /* Don't let this job create new 'job_t' objects in launchd */
532 kill_via_shmem
:1, /* man launchd.plist --> EnableTransactions */
533 sent_kill_via_shmem
:1, /* We need to 'kill_via_shmem' once-and-only-once */
534 clean_kill
:1, /* The job was sent SIGKILL because it was clean. */
535 pending_sample
:1, /* This job needs to be sampled for some reason. */
536 kill_after_sample
:1, /* The job is to be killed after sampling. */
537 is_being_sampled
:1, /* We've spawned a sample tool to sample the job. */
538 reap_after_trace
:1, /* The job exited before sample did, so we should reap it after sample is done. */
539 nosy
:1, /* The job has an OtherJobEnabled KeepAlive criterion. */
540 crashed
:1, /* The job is the default Mach exception handler, and it crashed. */
541 reaped
:1, /* We've received NOTE_EXIT for the job. */
542 stopped
:1, /* job_stop() was called. */
543 jetsam_frontmost
:1, /* The job is considered "frontmost" by Jetsam. */
544 needs_kickoff
:1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
545 is_bootstrapper
:1, /* The job is a bootstrapper. */
546 has_console
:1, /* The job owns the console. */
547 clean_exit_timer_expired
:1, /* The job was clean, received SIGKILL and failed to exit after LAUNCHD_CLEAN_KILL_TIMER seconds. */
548 embedded_special_privileges
:1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
549 did_exec
:1, /* The job exec(2)ed successfully. */
550 holds_ref
:1, /* The (anonymous) job called vprocmgr_switch_to_session(). */
551 jetsam_properties
:1; /* The job has Jetsam limits in place. */
554 mach_port_t audit_session
;
555 uuid_t expected_audit_uuid
;
559 #define LABEL_HASH_SIZE 53
561 static LIST_HEAD(, job_s
) label_hash
[LABEL_HASH_SIZE
];
562 static size_t hash_label(const char *label
) __attribute__((pure
));
563 static size_t hash_ms(const char *msstr
) __attribute__((pure
));
564 static SLIST_HEAD(, job_s
) s_curious_jobs
;
566 #define job_assumes(j, e) \
567 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
569 static void job_import_keys(launch_data_t obj
, const char *key
, void *context
);
570 static void job_import_bool(job_t j
, const char *key
, bool value
);
571 static void job_import_string(job_t j
, const char *key
, const char *value
);
572 static void job_import_integer(job_t j
, const char *key
, long long value
);
573 static void job_import_dictionary(job_t j
, const char *key
, launch_data_t value
);
574 static void job_import_array(job_t j
, const char *key
, launch_data_t value
);
575 static void job_import_opaque(job_t j
, const char *key
, launch_data_t value
);
576 static bool job_set_global_on_demand(job_t j
, bool val
);
577 static const char *job_active(job_t j
);
578 static void job_watch(job_t j
);
579 static void job_ignore(job_t j
);
580 static void job_cleanup_after_tracer(job_t j
);
581 static void job_reap(job_t j
);
582 static bool job_useless(job_t j
);
583 static bool job_keepalive(job_t j
);
584 static void job_dispatch_curious_jobs(job_t j
);
585 static void job_start(job_t j
);
586 static void job_start_child(job_t j
) __attribute__((noreturn
));
587 static void job_setup_attributes(job_t j
);
588 static bool job_setup_machport(job_t j
);
589 static void job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
);
590 static void job_postfork_become_user(job_t j
);
591 static void job_postfork_test_user(job_t j
);
592 static void job_log_pids_with_weird_uids(job_t j
);
593 static void job_setup_exception_port(job_t j
, task_t target_task
);
594 static void job_callback(void *obj
, struct kevent
*kev
);
595 static void job_callback_proc(job_t j
, struct kevent
*kev
);
596 static void job_callback_timer(job_t j
, void *ident
);
597 static void job_callback_read(job_t j
, int ident
);
598 static void job_log_stray_pg(job_t j
);
599 static void job_log_children_without_exec(job_t j
);
600 static job_t
job_new_anonymous(jobmgr_t jm
, pid_t anonpid
) __attribute__((malloc
, nonnull
, warn_unused_result
));
601 static job_t
job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
) __attribute__((malloc
, nonnull(1,2), warn_unused_result
));
602 static job_t
job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
) __attribute__((malloc
, nonnull
, warn_unused_result
));
603 static void job_kill(job_t j
);
604 static void job_uncork_fork(job_t j
);
605 static void job_log_stdouterr(job_t j
);
606 static void job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
) __attribute__((format(printf
, 4, 0)));
607 static void job_log_error(job_t j
, int pri
, const char *msg
, ...) __attribute__((format(printf
, 3, 4)));
608 static void job_log_bug(job_t j
, unsigned int line
);
609 static void job_log_stdouterr2(job_t j
, const char *msg
, ...);
610 static void job_set_exception_port(job_t j
, mach_port_t port
);
611 static kern_return_t
job_handle_mpm_wait(job_t j
, mach_port_t srp
, int *waitstatus
);
613 static const struct {
616 } launchd_keys2limits
[] = {
617 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE
, RLIMIT_CORE
},
618 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU
, RLIMIT_CPU
},
619 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA
, RLIMIT_DATA
},
620 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE
, RLIMIT_FSIZE
},
621 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK
, RLIMIT_MEMLOCK
},
622 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE
, RLIMIT_NOFILE
},
623 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC
, RLIMIT_NPROC
},
624 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS
, RLIMIT_RSS
},
625 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK
, RLIMIT_STACK
},
628 static time_t cronemu(int mon
, int mday
, int hour
, int min
);
629 static time_t cronemu_wday(int wday
, int hour
, int min
);
630 static bool cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
);
631 static bool cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
);
632 static bool cronemu_hour(struct tm
*wtm
, int hour
, int min
);
633 static bool cronemu_min(struct tm
*wtm
, int min
);
635 /* miscellaneous file local functions */
636 static size_t get_kern_max_proc(void);
637 static int dir_has_files(job_t j
, const char *path
);
638 static char **mach_cmd2argv(const char *string
);
639 static size_t our_strhash(const char *s
) __attribute__((pure
));
640 static void extract_rcsid_substr(const char *i
, char *o
, size_t osz
);
641 static void simulate_pid1_crash(void);
642 static pid_t
basic_spawn(job_t j
, void (*what_to_do
)(job_t
));
643 static void take_sample(job_t j
);
645 void eliminate_double_reboot(void);
647 /* file local globals */
648 static size_t total_children
;
649 static size_t total_anon_children
;
650 static mach_port_t the_exception_server
;
651 static job_t workaround_5477111
;
652 static LIST_HEAD(, job_s
) s_needing_sessions
;
653 mach_port_t g_audit_session_port
= MACH_PORT_NULL
;
655 #if !TARGET_OS_EMBEDDED
656 static job_t s_embedded_privileged_job
= (job_t
)&root_jobmgr
;
657 au_asid_t g_audit_session
= AU_DEFAUDITSID
;
659 static job_t s_embedded_privileged_job
= NULL
;
660 pid_t g_audit_session
= 0;
663 static int s_no_hang_fd
= -1;
665 /* process wide globals */
666 mach_port_t inherited_bootstrap_port
;
667 jobmgr_t root_jobmgr
;
668 bool g_shutdown_debugging
= false;
669 bool g_verbose_boot
= false;
670 bool g_embedded_privileged_action
= false;
671 bool g_runtime_busy_time
= false;
676 struct semaphoreitem
*si
;
677 struct socketgroup
*sg
;
678 struct machservice
*ms
;
680 if (j
->currently_ignored
) {
684 job_log(j
, LOG_DEBUG
, "Ignoring...");
686 j
->currently_ignored
= true;
688 if (j
->poll_for_vfs_changes
) {
689 j
->poll_for_vfs_changes
= false;
690 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
693 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
694 socketgroup_ignore(j
, sg
);
697 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
698 machservice_ignore(j
, ms
);
701 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
702 semaphoreitem_ignore(j
, si
);
709 struct semaphoreitem
*si
;
710 struct socketgroup
*sg
;
711 struct machservice
*ms
;
713 if (!j
->currently_ignored
) {
717 job_log(j
, LOG_DEBUG
, "Watching...");
719 j
->currently_ignored
= false;
721 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
722 socketgroup_watch(j
, sg
);
725 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
726 machservice_watch(j
, ms
);
729 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
730 semaphoreitem_watch(j
, si
);
740 if (unlikely(!j
->p
|| j
->anonymous
)) {
744 #if !TARGET_OS_EMBEDDED
745 if (j
->kill_via_shmem
&& !g_force_old_kill_path
) {
747 if (!j
->sent_kill_via_shmem
) {
748 j
->shmem
->vp_shmem_flags
|= VPROC_SHMEM_EXITING
;
749 newval
= __sync_sub_and_fetch(&j
->shmem
->vp_shmem_transaction_cnt
, 1);
750 j
->sent_kill_via_shmem
= true;
752 newval
= j
->shmem
->vp_shmem_transaction_cnt
;
757 } else if( j
->kill_via_shmem
) {
758 job_log(j
, LOG_DEBUG
, "Stopping transactional job the old-fashioned way.");
762 #if TARGET_OS_EMBEDDED
763 if( g_embedded_privileged_action
&& s_embedded_privileged_job
) {
764 if( !job_assumes(j
, s_embedded_privileged_job
->username
!= NULL
&& j
->username
!= NULL
) ) {
769 if( strcmp(j
->username
, s_embedded_privileged_job
->username
) != 0 ) {
773 } else if( g_embedded_privileged_action
) {
779 j
->sent_signal_time
= runtime_get_opaque_time();
782 j
->clean_kill
= true;
786 * If sampling is enabled and SAMPLE_TIMEOUT is earlier than the job exit_timeout,
787 * then set a timer for SAMPLE_TIMEOUT seconds after killing
789 unsigned int exit_timeout
= j
->exit_timeout
;
790 bool do_sample
= do_apple_internal_logging
;
791 unsigned int timeout
= exit_timeout
;
793 if (do_sample
&& (!exit_timeout
|| (LAUNCHD_SAMPLE_TIMEOUT
< exit_timeout
))) {
794 timeout
= LAUNCHD_SAMPLE_TIMEOUT
;
797 job_assumes(j
, runtime_kill(j
->p
, SIGTERM
) != -1);
800 j
->sampling_complete
= !do_sample
;
801 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
,
802 EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, timeout
, j
) != -1);
806 job_log(j
, LOG_DEBUG
, "This job has an infinite exit timeout");
809 if (j
->kill_via_shmem
) {
810 snprintf(extralog
, sizeof(extralog
), ": %d remaining transactions", newval
+ 1);
815 job_log(j
, LOG_DEBUG
, "Sent SIGTERM signal%s", extralog
);
824 launch_data_t tmp
, tmp2
, tmp3
, r
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
830 if ((tmp
= launch_data_new_string(j
->label
))) {
831 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LABEL
);
833 if ((tmp
= launch_data_new_string(j
->mgr
->name
))) {
834 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
836 if ((tmp
= launch_data_new_bool(j
->ondemand
))) {
837 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ONDEMAND
);
839 if ((tmp
= launch_data_new_integer(j
->last_exit_status
))) {
840 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_LASTEXITSTATUS
);
842 if (j
->p
&& (tmp
= launch_data_new_integer(j
->p
))) {
843 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PID
);
845 if ((tmp
= launch_data_new_integer(j
->timeout
))) {
846 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TIMEOUT
);
848 if (j
->prog
&& (tmp
= launch_data_new_string(j
->prog
))) {
849 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAM
);
851 if (j
->stdinpath
&& (tmp
= launch_data_new_string(j
->stdinpath
))) {
852 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDINPATH
);
854 if (j
->stdoutpath
&& (tmp
= launch_data_new_string(j
->stdoutpath
))) {
855 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDOUTPATH
);
857 if (j
->stderrpath
&& (tmp
= launch_data_new_string(j
->stderrpath
))) {
858 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_STANDARDERRORPATH
);
860 if (likely(j
->argv
) && (tmp
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
863 for (i
= 0; i
< j
->argc
; i
++) {
864 if ((tmp2
= launch_data_new_string(j
->argv
[i
]))) {
865 launch_data_array_set_index(tmp
, tmp2
, i
);
869 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
);
872 if (j
->kill_via_shmem
&& (tmp
= launch_data_new_bool(true))) {
873 int32_t tmp_cnt
= -1;
875 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
);
878 tmp_cnt
= j
->shmem
->vp_shmem_transaction_cnt
;
881 if (j
->sent_kill_via_shmem
) {
885 if ((tmp
= launch_data_new_integer(tmp_cnt
))) {
886 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_TRANSACTIONCOUNT
);
890 if (j
->session_create
&& (tmp
= launch_data_new_bool(true))) {
891 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SESSIONCREATE
);
894 if (j
->inetcompat
&& (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
895 if ((tmp2
= launch_data_new_bool(j
->inetcompat_wait
))) {
896 launch_data_dict_insert(tmp
, tmp2
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
);
898 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
);
901 if (!SLIST_EMPTY(&j
->sockets
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
902 struct socketgroup
*sg
;
905 SLIST_FOREACH(sg
, &j
->sockets
, sle
) {
909 if ((tmp2
= launch_data_alloc(LAUNCH_DATA_ARRAY
))) {
910 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
911 if ((tmp3
= launch_data_new_fd(sg
->fds
[i
]))) {
912 launch_data_array_set_index(tmp2
, tmp3
, i
);
915 launch_data_dict_insert(tmp
, tmp2
, sg
->name
);
919 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_SOCKETS
);
922 if (!SLIST_EMPTY(&j
->machservices
) && (tmp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
))) {
923 struct machservice
*ms
;
927 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
930 tmp3
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
933 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
934 launch_data_dict_insert(tmp3
, tmp2
, ms
->name
);
937 tmp2
= launch_data_new_machport(MACH_PORT_NULL
);
938 launch_data_dict_insert(tmp
, tmp2
, ms
->name
);
942 launch_data_dict_insert(r
, tmp
, LAUNCH_JOBKEY_MACHSERVICES
);
945 launch_data_dict_insert(r
, tmp3
, LAUNCH_JOBKEY_PERJOBMACHSERVICES
);
953 jobmgr_log_active_jobs(jobmgr_t jm
)
955 const char *why_active
;
959 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
960 jobmgr_log_active_jobs(jmi
);
963 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
964 if( (why_active
= job_active(ji
)) ) {
965 job_log(ji
, LOG_DEBUG
| LOG_CONSOLE
, "%s", why_active
);
971 jobmgr_still_alive_with_check(jobmgr_t jm
)
973 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Still alive with %lu/%lu (normal/anonymous) children. In %s phase of shutdown.", total_children
, total_anon_children
, s_phases
[jm
->shutdown_phase
]);
974 jobmgr_log_active_jobs(jm
);
978 jobmgr_shutdown(jobmgr_t jm
)
981 jobmgr_log(jm
, LOG_DEBUG
, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm
->reboot_flags
));
983 jm
->shutting_down
= true;
985 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
986 jobmgr_shutdown(jmi
);
989 if (jm
->parentmgr
== NULL
&& pid1_magic
) {
990 jobmgr_assumes(jm
, kevent_mod((uintptr_t)jm
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 5, jm
));
991 #if !TARGET_OS_EMBEDDED
992 /* Kill the update thread. */
993 jobmgr_assumes(jm
, __sync_sub_and_fetch(&g_sync_frequency
, 30) == 0);
997 return jobmgr_do_garbage_collection(jm
);
1001 jobmgr_remove(jobmgr_t jm
)
1006 jobmgr_log(jm
, LOG_DEBUG
, "Removing job manager.");
1007 if (!jobmgr_assumes(jm
, SLIST_EMPTY(&jm
->submgrs
))) {
1008 while ((jmi
= SLIST_FIRST(&jm
->submgrs
))) {
1013 while( (ji
= LIST_FIRST(&jm
->jobs
)) ) {
1014 if( !ji
->anonymous
&& ji
->p
) {
1015 job_log(ji
, LOG_WARNING
| LOG_CONSOLE
, "Job has overstayed its welcome. Forcing removal.");
1022 jobmgr_assumes(jm
, launchd_mport_deallocate(jm
->req_port
) == KERN_SUCCESS
);
1026 jobmgr_assumes(jm
, launchd_mport_close_recv(jm
->jm_port
) == KERN_SUCCESS
);
1029 if (jm
->parentmgr
) {
1030 runtime_del_weak_ref();
1031 SLIST_REMOVE(&jm
->parentmgr
->submgrs
, jm
, jobmgr_s
, sle
);
1032 } else if (pid1_magic
) {
1033 eliminate_double_reboot();
1034 launchd_log_vm_stats();
1035 jobmgr_log(root_jobmgr
, LOG_NOTICE
| LOG_CONSOLE
, "About to call: reboot(%s).", reboot_flags_to_C_names(jm
->reboot_flags
));
1037 jobmgr_assumes(jm
, reboot(jm
->reboot_flags
) != -1);
1039 jobmgr_log(jm
, LOG_DEBUG
, "About to exit");
1050 struct waiting_for_removal
*w4r
;
1051 struct calendarinterval
*ci
;
1052 struct semaphoreitem
*si
;
1053 struct socketgroup
*sg
;
1054 struct machservice
*ms
;
1055 struct limititem
*li
;
1058 #if TARGET_OS_EMBEDDED
1059 if( g_embedded_privileged_action
&& s_embedded_privileged_job
) {
1060 if( !job_assumes(j
, s_embedded_privileged_job
->username
!= NULL
&& j
->username
!= NULL
) ) {
1065 if( strcmp(j
->username
, s_embedded_privileged_job
->username
) != 0 ) {
1069 } else if( g_embedded_privileged_action
) {
1075 if (unlikely(j
->p
)) {
1079 job_log(j
, LOG_DEBUG
, "Removal pended until the job exits");
1081 if (!j
->removal_pending
) {
1082 j
->removal_pending
= true;
1089 job_dispatch_curious_jobs(j
);
1091 ipc_close_all_with_job(j
);
1093 job_log(j
, LOG_INFO
, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1094 j
->ru
.ru_utime
.tv_sec
, j
->ru
.ru_utime
.tv_usec
,
1095 j
->ru
.ru_stime
.tv_sec
, j
->ru
.ru_stime
.tv_usec
,
1096 j
->ru
.ru_maxrss
, j
->ru
.ru_ixrss
, j
->ru
.ru_idrss
, j
->ru
.ru_isrss
,
1097 j
->ru
.ru_minflt
, j
->ru
.ru_majflt
,
1098 j
->ru
.ru_nswap
, j
->ru
.ru_inblock
, j
->ru
.ru_oublock
,
1099 j
->ru
.ru_msgsnd
, j
->ru
.ru_msgrcv
,
1100 j
->ru
.ru_nsignals
, j
->ru
.ru_nvcsw
, j
->ru
.ru_nivcsw
);
1102 if (j
->forced_peers_to_demand_mode
) {
1103 job_set_global_on_demand(j
, false);
1106 if (!job_assumes(j
, j
->fork_fd
== 0)) {
1107 job_assumes(j
, runtime_close(j
->fork_fd
) != -1);
1111 job_assumes(j
, runtime_close(j
->stdin_fd
) != -1);
1114 if (!job_assumes(j
, j
->log_redirect_fd
== 0)) {
1115 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
1119 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1122 if (!job_assumes(j
, j
->wait_reply_port
== MACH_PORT_NULL
)) {
1123 job_assumes(j
, launchd_mport_deallocate(j
->wait_reply_port
) == KERN_SUCCESS
);
1126 while ((sg
= SLIST_FIRST(&j
->sockets
))) {
1127 socketgroup_delete(j
, sg
);
1129 while ((ci
= SLIST_FIRST(&j
->cal_intervals
))) {
1130 calendarinterval_delete(j
, ci
);
1132 while ((ei
= SLIST_FIRST(&j
->env
))) {
1133 envitem_delete(j
, ei
, false);
1135 while ((ei
= SLIST_FIRST(&j
->global_env
))) {
1136 envitem_delete(j
, ei
, true);
1138 while ((li
= SLIST_FIRST(&j
->limits
))) {
1139 limititem_delete(j
, li
);
1141 while ((ms
= SLIST_FIRST(&j
->machservices
))) {
1142 machservice_delete(j
, ms
, false);
1144 while ((si
= SLIST_FIRST(&j
->semaphores
))) {
1145 semaphoreitem_delete(j
, si
);
1147 while ((w4r
= SLIST_FIRST(&j
->removal_watchers
))) {
1148 waiting4removal_delete(j
, w4r
);
1160 if (j
->workingdir
) {
1161 free(j
->workingdir
);
1172 if (j
->stdoutpath
) {
1173 free(j
->stdoutpath
);
1175 if (j
->stderrpath
) {
1176 free(j
->stderrpath
);
1178 if (j
->alt_exc_handler
) {
1179 free(j
->alt_exc_handler
);
1182 if (j
->seatbelt_profile
) {
1183 free(j
->seatbelt_profile
);
1187 if (j
->quarantine_data
) {
1188 free(j
->quarantine_data
);
1194 if (j
->start_interval
) {
1195 runtime_del_weak_ref();
1196 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
1198 if (j
->poll_for_vfs_changes
) {
1199 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_DELETE
, 0, 0, j
) != -1);
1201 if( j
->exit_timeout
) {
1202 /* Not a big deal if this fails. It means that the timer's already been freed. */
1203 kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1205 if( j
->jetsam_properties
) {
1206 LIST_REMOVE(j
, jetsam_sle
);
1207 j
->mgr
->jetsam_jobs_cnt
--;
1209 if( j
->audit_session
!= MACH_PORT_NULL
) {
1210 job_assumes(j
, mach_port_deallocate(mach_task_self(), j
->audit_session
) == KERN_SUCCESS
);
1212 if( !uuid_is_null(j
->expected_audit_uuid
) ) {
1213 LIST_REMOVE(j
, needing_session_sle
);
1215 if( j
->embedded_special_privileges
) {
1216 s_embedded_privileged_job
= NULL
;
1219 kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
1221 LIST_REMOVE(j
, sle
);
1222 LIST_REMOVE(j
, label_hash_sle
);
1224 job_log(j
, LOG_DEBUG
, "Removed");
1230 socketgroup_setup(launch_data_t obj
, const char *key
, void *context
)
1232 launch_data_t tmp_oai
;
1234 size_t i
, fd_cnt
= 1;
1237 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1238 fd_cnt
= launch_data_array_get_count(obj
);
1241 fds
= alloca(fd_cnt
* sizeof(int));
1243 for (i
= 0; i
< fd_cnt
; i
++) {
1244 if (launch_data_get_type(obj
) == LAUNCH_DATA_ARRAY
) {
1245 tmp_oai
= launch_data_array_get_index(obj
, i
);
1250 fds
[i
] = launch_data_get_fd(tmp_oai
);
1253 socketgroup_new(j
, key
, fds
, fd_cnt
, strcmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0);
1255 ipc_revoke_fds(obj
);
1259 job_set_global_on_demand(job_t j
, bool val
)
1261 if (j
->forced_peers_to_demand_mode
&& val
) {
1263 } else if (!j
->forced_peers_to_demand_mode
&& !val
) {
1267 if ((j
->forced_peers_to_demand_mode
= val
)) {
1268 j
->mgr
->global_on_demand_cnt
++;
1270 j
->mgr
->global_on_demand_cnt
--;
1273 if (j
->mgr
->global_on_demand_cnt
== 0) {
1274 jobmgr_dispatch_all(j
->mgr
, false);
1281 job_setup_machport(job_t j
)
1283 mach_msg_size_t mxmsgsz
;
1285 if (!job_assumes(j
, launchd_mport_create_recv(&j
->j_port
) == KERN_SUCCESS
)) {
1289 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
1290 mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
1291 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
1292 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
1295 if (!job_assumes(j
, runtime_add_mport(j
->j_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
1299 if (!job_assumes(j
, launchd_mport_notify_req(j
->j_port
, MACH_NOTIFY_NO_SENDERS
) == KERN_SUCCESS
)) {
1300 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1306 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
1312 job_new_via_mach_init(job_t j
, const char *cmd
, uid_t uid
, bool ond
)
1314 const char **argv
= (const char **)mach_cmd2argv(cmd
);
1317 if (!job_assumes(j
, argv
!= NULL
)) {
1321 jr
= job_new(j
->mgr
, AUTO_PICK_LEGACY_LABEL
, NULL
, argv
);
1325 /* jobs can easily be denied creation during shutdown */
1326 if (unlikely(jr
== NULL
)) {
1332 jr
->legacy_mach_job
= true;
1333 jr
->abandon_pg
= true;
1334 jr
->priv_port_has_senders
= true; /* the IPC that called us will make-send on this port */
1336 if (!job_setup_machport(jr
)) {
1340 job_log(jr
, LOG_INFO
, "Legacy%s server created", ond
? " on-demand" : "");
1352 job_handle_mpm_wait(job_t j
, mach_port_t srp
, int *waitstatus
)
1355 j
->wait_reply_port
= srp
;
1356 return MIG_NO_REPLY
;
1359 *waitstatus
= j
->last_exit_status
;
1365 job_new_anonymous(jobmgr_t jm
, pid_t anonpid
)
1367 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, anonpid
};
1368 struct kinfo_proc kp
;
1369 size_t len
= sizeof(kp
);
1370 bool shutdown_state
;
1371 job_t jp
= NULL
, jr
= NULL
;
1372 uid_t kp_euid
, kp_uid
, kp_svuid
;
1373 gid_t kp_egid
, kp_gid
, kp_svgid
;
1375 if (!jobmgr_assumes(jm
, anonpid
!= 0)) {
1380 if (!jobmgr_assumes(jm
, anonpid
< 100000)) {
1381 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1386 if (!jobmgr_assumes(jm
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
1390 if (unlikely(len
!= sizeof(kp
))) {
1391 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for nonexistent PID: %u", anonpid
);
1396 if (!jobmgr_assumes(jm
, kp
.kp_proc
.p_comm
[0] != '\0')) {
1401 if (unlikely(kp
.kp_proc
.p_stat
== SZOMB
)) {
1402 jobmgr_log(jm
, LOG_DEBUG
, "Tried to create an anonymous job for zombie PID %u: %s", anonpid
, kp
.kp_proc
.p_comm
);
1405 if (unlikely(kp
.kp_proc
.p_flag
& P_SUGID
)) {
1406 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid
, kp
.kp_proc
.p_comm
);
1409 kp_euid
= kp
.kp_eproc
.e_ucred
.cr_uid
;
1410 kp_uid
= kp
.kp_eproc
.e_pcred
.p_ruid
;
1411 kp_svuid
= kp
.kp_eproc
.e_pcred
.p_svuid
;
1412 kp_egid
= kp
.kp_eproc
.e_ucred
.cr_gid
;
1413 kp_gid
= kp
.kp_eproc
.e_pcred
.p_rgid
;
1414 kp_svgid
= kp
.kp_eproc
.e_pcred
.p_svgid
;
1416 if (unlikely(kp_euid
!= kp_uid
|| kp_euid
!= kp_svuid
|| kp_uid
!= kp_svuid
|| kp_egid
!= kp_gid
|| kp_egid
!= kp_svgid
|| kp_gid
!= kp_svgid
)) {
1417 jobmgr_log(jm
, LOG_DEBUG
, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1418 kp_euid
, kp_uid
, kp_svuid
, kp_egid
, kp_gid
, kp_svgid
, anonpid
, kp
.kp_proc
.p_comm
);
1421 /* "Fix" for a problem that shouldn't even exist.
1422 * See rdar://problem/7264615 for the symptom and rdar://problem/5020256
1423 * as to why this can happen.
1425 if( !jobmgr_assumes(jm
, kp
.kp_eproc
.e_ppid
!= anonpid
) ) {
1426 jobmgr_log(jm
, LOG_WARNING
, "Process has become its own parent through ptrace(3). It should find a different way to do whatever it's doing. Setting PPID to 0: %s", kp
.kp_proc
.p_comm
);
1431 if (jp
&& !jp
->anonymous
&& unlikely(!(kp
.kp_proc
.p_flag
& P_EXEC
))) {
1432 job_log(jp
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u",
1436 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1437 if (unlikely(shutdown_state
= jm
->shutting_down
)) {
1438 jm
->shutting_down
= false;
1441 if (jobmgr_assumes(jm
, (jr
= job_new(jm
, AUTO_PICK_ANONYMOUS_LABEL
, kp
.kp_proc
.p_comm
, NULL
)) != NULL
)) {
1442 u_int proc_fflags
= NOTE_EXEC
|NOTE_FORK
|NOTE_EXIT
|NOTE_REAP
;
1444 total_anon_children
++;
1445 jr
->anonymous
= true;
1448 /* anonymous process reaping is messy */
1449 LIST_INSERT_HEAD(&jm
->active_jobs
[ACTIVE_JOB_HASH(jr
->p
)], jr
, pid_hash_sle
);
1451 if (unlikely(kevent_mod(jr
->p
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
) == -1) && job_assumes(jr
, errno
== ESRCH
)) {
1452 /* zombies are weird */
1453 job_log(jr
, LOG_ERR
, "Failed to add kevent for PID %u. Will unload at MIG return", jr
->p
);
1454 jr
->unload_at_mig_return
= true;
1457 if (unlikely(shutdown_state
&& jm
->hopefully_first_cnt
== 0)) {
1458 job_log(jr
, LOG_SCOLDING
, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1461 job_log(jr
, LOG_DEBUG
, "Created PID %u anonymously by PPID %u%s%s", anonpid
, kp
.kp_eproc
.e_ppid
, jp
? ": " : "", jp
? jp
->label
: "");
1464 if (unlikely(shutdown_state
)) {
1465 jm
->shutting_down
= true;
1468 /* This is down here to mitigate the effects of rdar://problem/7264615, in which a process
1469 * attaches to its own parent. We need to make sure that the anonymous job has been added
1470 * to the process list so that, if it's used ptrace(3) to cause a cycle in the process
1471 * tree (thereby making it not a tree anymore), we'll find the tracing parent PID of the
1472 * parent process, which is the child, when we go looking for it in jobmgr_find_by_pid().
1474 switch (kp
.kp_eproc
.e_ppid
) {
1480 /* we cannot possibly find a parent job_t that is useful in this function */
1485 jp
= jobmgr_find_by_pid(jm
, kp
.kp_eproc
.e_ppid
, true);
1486 jobmgr_assumes(jm
, jp
!= NULL
);
1494 job_new(jobmgr_t jm
, const char *label
, const char *prog
, const char *const *argv
)
1496 const char *const *argv_tmp
= argv
;
1497 char tmp_path
[PATH_MAX
];
1498 char auto_label
[1000];
1499 const char *bn
= NULL
;
1501 size_t minlabel_len
;
1505 launchd_assert(offsetof(struct job_s
, kqjob_callback
) == 0);
1507 if (unlikely(jm
->shutting_down
)) {
1512 if (unlikely(prog
== NULL
&& argv
== NULL
)) {
1517 char *anon_or_legacy
= ( label
== AUTO_PICK_ANONYMOUS_LABEL
) ? "anonymous" : "mach_init";
1518 if (unlikely(label
== AUTO_PICK_LEGACY_LABEL
|| label
== AUTO_PICK_ANONYMOUS_LABEL
)) {
1522 strlcpy(tmp_path
, argv
[0], sizeof(tmp_path
));
1523 bn
= basename(tmp_path
); /* prog for auto labels is kp.kp_kproc.p_comm */
1525 snprintf(auto_label
, sizeof(auto_label
), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy
, bn
);
1527 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1528 minlabel_len
= strlen(label
) + MAXCOMLEN
;
1530 minlabel_len
= strlen(label
);
1533 j
= calloc(1, sizeof(struct job_s
) + minlabel_len
+ 1);
1535 if (!jobmgr_assumes(jm
, j
!= NULL
)) {
1539 if (unlikely(label
== auto_label
)) {
1540 snprintf((char *)j
->label
, strlen(label
) + 1, "%p.%s.%s", j
, anon_or_legacy
, bn
);
1542 strcpy((char *)j
->label
, label
);
1544 j
->kqjob_callback
= job_callback
;
1546 j
->min_run_time
= LAUNCHD_MIN_JOB_RUN_TIME
;
1547 j
->timeout
= RUNTIME_ADVISABLE_IDLE_TIMEOUT
;
1548 j
->exit_timeout
= LAUNCHD_DEFAULT_EXIT_TIMEOUT
;
1549 j
->currently_ignored
= true;
1551 j
->checkedin
= true;
1552 j
->jetsam_priority
= -1;
1553 j
->jetsam_memlimit
= -1;
1554 uuid_clear(j
->expected_audit_uuid
);
1557 j
->prog
= strdup(prog
);
1558 if (!job_assumes(j
, j
->prog
!= NULL
)) {
1564 while (*argv_tmp
++) {
1568 for (i
= 0; i
< j
->argc
; i
++) {
1569 cc
+= strlen(argv
[i
]) + 1;
1572 j
->argv
= malloc((j
->argc
+ 1) * sizeof(char *) + cc
);
1574 if (!job_assumes(j
, j
->argv
!= NULL
)) {
1578 co
= ((char *)j
->argv
) + ((j
->argc
+ 1) * sizeof(char *));
1580 for (i
= 0; i
< j
->argc
; i
++) {
1582 strcpy(co
, argv
[i
]);
1583 co
+= strlen(argv
[i
]) + 1;
1588 if( strcmp(j
->label
, "com.apple.WindowServer") == 0 ) {
1589 j
->has_console
= true;
1592 LIST_INSERT_HEAD(&jm
->jobs
, j
, sle
);
1593 LIST_INSERT_HEAD(&label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
1594 uuid_clear(j
->expected_audit_uuid
);
1596 job_log(j
, LOG_DEBUG
, "Conceived");
1610 job_import(launch_data_t pload
)
1612 job_t j
= jobmgr_import2(root_jobmgr
, pload
);
1614 if (unlikely(j
== NULL
)) {
1618 /* Since jobs are effectively stalled until they get security sessions assigned
1619 * to them, we may wish to reconsider this behavior of calling the job "enabled"
1620 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
1622 job_dispatch_curious_jobs(j
);
1623 return job_dispatch(j
, false);
1627 job_import_bulk(launch_data_t pload
)
1629 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
1631 size_t i
, c
= launch_data_array_get_count(pload
);
1633 ja
= alloca(c
* sizeof(job_t
));
1635 for (i
= 0; i
< c
; i
++) {
1636 if( (likely(ja
[i
] = jobmgr_import2(root_jobmgr
, launch_data_array_get_index(pload
, i
)))) && errno
!= ENEEDAUTH
) {
1639 launch_data_array_set_index(resp
, launch_data_new_errno(errno
), i
);
1642 for (i
= 0; i
< c
; i
++) {
1643 if (likely(ja
[i
])) {
1644 job_dispatch_curious_jobs(ja
[i
]);
1645 job_dispatch(ja
[i
], false);
1653 job_import_bool(job_t j
, const char *key
, bool value
)
1655 bool found_key
= false;
1660 if (strcasecmp(key
, LAUNCH_JOBKEY_ABANDONPROCESSGROUP
) == 0) {
1661 j
->abandon_pg
= value
;
1667 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
1668 j
->ondemand
= !value
;
1674 if (strcasecmp(key
, LAUNCH_JOBKEY_ONDEMAND
) == 0) {
1675 j
->ondemand
= value
;
1681 if (strcasecmp(key
, LAUNCH_JOBKEY_DEBUG
) == 0) {
1684 } else if (strcasecmp(key
, LAUNCH_JOBKEY_DISABLED
) == 0) {
1685 job_assumes(j
, !value
);
1691 if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST
) == 0) {
1692 j
->hopefully_exits_last
= value
;
1694 } else if (strcasecmp(key
, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST
) == 0) {
1695 j
->hopefully_exits_first
= value
;
1701 if (strcasecmp(key
, LAUNCH_JOBKEY_SESSIONCREATE
) == 0) {
1702 j
->session_create
= value
;
1704 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTONMOUNT
) == 0) {
1705 j
->start_on_mount
= value
;
1707 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SERVICEIPC
) == 0) {
1708 /* this only does something on Mac OS X 10.4 "Tiger" */
1714 if (strcasecmp(key
, LAUNCH_JOBKEY_LOWPRIORITYIO
) == 0) {
1715 j
->low_pri_io
= value
;
1717 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LAUNCHONLYONCE
) == 0) {
1718 j
->only_once
= value
;
1724 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
1725 j
->internal_exc_handler
= value
;
1731 if (strcasecmp(key
, LAUNCH_JOBKEY_INITGROUPS
) == 0) {
1732 if (getuid() != 0) {
1733 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1736 j
->no_init_groups
= !value
;
1738 } else if( strcasecmp(key
, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN
) == 0 ) {
1739 j
->ignore_pg_at_shutdown
= value
;
1745 if (strcasecmp(key
, LAUNCH_JOBKEY_RUNATLOAD
) == 0) {
1747 /* We don't want value == false to change j->start_pending */
1748 j
->start_pending
= true;
1755 if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLEGLOBBING
) == 0) {
1756 j
->globargv
= value
;
1758 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENABLETRANSACTIONS
) == 0) {
1759 j
->kill_via_shmem
= value
;
1761 } else if (strcasecmp(key
, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL
) == 0) {
1762 j
->debug_before_kill
= value
;
1764 } else if( strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION
) == 0 ) {
1765 if( !s_embedded_privileged_job
) {
1766 j
->embedded_special_privileges
= value
;
1767 s_embedded_privileged_job
= j
;
1769 job_log(j
, LOG_ERR
, "Job tried to claim %s after it has already been claimed.", key
);
1776 if (strcasecmp(key
, LAUNCH_JOBKEY_WAITFORDEBUGGER
) == 0) {
1777 j
->wait4debugger
= value
;
1785 if (unlikely(!found_key
)) {
1786 job_log(j
, LOG_WARNING
, "Unknown key for boolean: %s", key
);
1791 job_import_string(job_t j
, const char *key
, const char *value
)
1793 char **where2put
= NULL
;
1798 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER
) == 0) {
1799 where2put
= &j
->alt_exc_handler
;
1804 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAM
) == 0) {
1810 if (strcasecmp(key
, LAUNCH_JOBKEY_LABEL
) == 0) {
1812 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
1814 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
1816 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
1822 if (strcasecmp(key
, LAUNCH_JOBKEY_ROOTDIRECTORY
) == 0) {
1823 if (getuid() != 0) {
1824 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1827 where2put
= &j
->rootdir
;
1832 if (strcasecmp(key
, LAUNCH_JOBKEY_WORKINGDIRECTORY
) == 0) {
1833 where2put
= &j
->workingdir
;
1838 if (strcasecmp(key
, LAUNCH_JOBKEY_USERNAME
) == 0) {
1839 if (getuid() != 0) {
1840 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1842 } else if (strcmp(value
, "root") == 0) {
1845 where2put
= &j
->username
;
1850 if (strcasecmp(key
, LAUNCH_JOBKEY_GROUPNAME
) == 0) {
1851 if (getuid() != 0) {
1852 job_log(j
, LOG_WARNING
, "Ignored this key: %s", key
);
1854 } else if (strcmp(value
, "wheel") == 0) {
1857 where2put
= &j
->groupname
;
1862 if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDOUTPATH
) == 0) {
1863 where2put
= &j
->stdoutpath
;
1864 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDERRORPATH
) == 0) {
1865 where2put
= &j
->stderrpath
;
1866 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STANDARDINPATH
) == 0) {
1867 where2put
= &j
->stdinpath
;
1868 j
->stdin_fd
= _fd(open(value
, O_RDONLY
|O_CREAT
|O_NOCTTY
|O_NONBLOCK
, DEFFILEMODE
));
1869 if (job_assumes(j
, j
->stdin_fd
!= -1)) {
1870 /* open() should not block, but regular IO by the job should */
1871 job_assumes(j
, fcntl(j
->stdin_fd
, F_SETFL
, 0) != -1);
1872 /* XXX -- EV_CLEAR should make named pipes happy? */
1873 job_assumes(j
, kevent_mod(j
->stdin_fd
, EVFILT_READ
, EV_ADD
|EV_CLEAR
, 0, 0, j
) != -1);
1878 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXPROFILE
) == 0) {
1879 where2put
= &j
->seatbelt_profile
;
1884 job_log(j
, LOG_WARNING
, "Unknown key for string: %s", key
);
1888 if (likely(where2put
)) {
1889 job_assumes(j
, (*where2put
= strdup(value
)) != NULL
);
1891 /* See rdar://problem/5496612. These two are okay. */
1892 if( strncmp(key
, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key
, "ServiceDescription", sizeof("ServiceDescription")) != 0 ) {
1893 job_log(j
, LOG_WARNING
, "Unknown key: %s", key
);
1899 job_import_integer(job_t j
, const char *key
, long long value
)
1904 if (strcasecmp(key
, LAUNCH_JOBKEY_EXITTIMEOUT
) == 0) {
1905 if (unlikely(value
< 0)) {
1906 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
1907 } else if (unlikely(value
> UINT32_MAX
)) {
1908 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT
);
1910 j
->exit_timeout
= (typeof(j
->exit_timeout
)) value
;
1912 } else if( strcasecmp(key
, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY
) == 0 ) {
1913 j
->main_thread_priority
= value
;
1918 if( strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0 ) {
1919 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
1921 launch_data_t pri
= launch_data_new_integer(value
);
1922 if( job_assumes(j
, pri
!= NULL
) ) {
1923 jetsam_property_setup(pri
, LAUNCH_JOBKEY_JETSAMPRIORITY
, j
);
1924 launch_data_free(pri
);
1929 if (strcasecmp(key
, LAUNCH_JOBKEY_NICE
) == 0) {
1930 if (unlikely(value
< PRIO_MIN
)) {
1931 job_log(j
, LOG_WARNING
, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MIN
);
1932 } else if (unlikely(value
> PRIO_MAX
)) {
1933 job_log(j
, LOG_WARNING
, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE
, PRIO_MAX
);
1935 j
->nice
= (typeof(j
->nice
)) value
;
1942 if (strcasecmp(key
, LAUNCH_JOBKEY_TIMEOUT
) == 0) {
1943 if (unlikely(value
< 0)) {
1944 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
1945 } else if (unlikely(value
> UINT32_MAX
)) {
1946 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT
);
1948 j
->timeout
= (typeof(j
->timeout
)) value
;
1950 } else if (strcasecmp(key
, LAUNCH_JOBKEY_THROTTLEINTERVAL
) == 0) {
1952 job_log(j
, LOG_WARNING
, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
1953 } else if (value
> UINT32_MAX
) {
1954 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL
);
1956 j
->min_run_time
= (typeof(j
->min_run_time
)) value
;
1962 if (strcasecmp(key
, LAUNCH_JOBKEY_UMASK
) == 0) {
1969 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTINTERVAL
) == 0) {
1970 if (unlikely(value
<= 0)) {
1971 job_log(j
, LOG_WARNING
, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
1972 } else if (unlikely(value
> UINT32_MAX
)) {
1973 job_log(j
, LOG_WARNING
, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL
);
1975 runtime_add_weak_ref();
1976 j
->start_interval
= (typeof(j
->start_interval
)) value
;
1978 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
) != -1);
1981 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
1982 j
->seatbelt_flags
= value
;
1988 job_log(j
, LOG_WARNING
, "Unknown key for integer: %s", key
);
1994 job_import_opaque(job_t j
__attribute__((unused
)),
1995 const char *key
, launch_data_t value
__attribute__((unused
)))
2001 if (strcasecmp(key
, LAUNCH_JOBKEY_QUARANTINEDATA
) == 0) {
2002 size_t tmpsz
= launch_data_get_opaque_size(value
);
2004 if (job_assumes(j
, j
->quarantine_data
= malloc(tmpsz
))) {
2005 memcpy(j
->quarantine_data
, launch_data_get_opaque(value
), tmpsz
);
2006 j
->quarantine_data_sz
= tmpsz
;
2012 if( strcasecmp(key
, LAUNCH_JOBKEY_SECURITYSESSIONUUID
) == 0 ) {
2013 size_t tmpsz
= launch_data_get_opaque_size(value
);
2014 if( job_assumes(j
, tmpsz
== sizeof(uuid_t
)) ) {
2015 memcpy(j
->expected_audit_uuid
, launch_data_get_opaque(value
), sizeof(uuid_t
));
2025 policy_setup(launch_data_t obj
, const char *key
, void *context
)
2028 bool found_key
= false;
2033 if (strcasecmp(key
, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS
) == 0) {
2034 j
->deny_job_creation
= launch_data_get_bool(obj
);
2042 if (unlikely(!found_key
)) {
2043 job_log(j
, LOG_WARNING
, "Unknown policy: %s", key
);
2048 job_import_dictionary(job_t j
, const char *key
, launch_data_t value
)
2055 if (strcasecmp(key
, LAUNCH_JOBKEY_POLICIES
) == 0) {
2056 launch_data_dict_iterate(value
, policy_setup
, j
);
2061 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE
) == 0) {
2062 launch_data_dict_iterate(value
, semaphoreitem_setup
, j
);
2067 if (strcasecmp(key
, LAUNCH_JOBKEY_INETDCOMPATIBILITY
) == 0) {
2068 j
->inetcompat
= true;
2069 j
->abandon_pg
= true;
2070 if ((tmp
= launch_data_dict_lookup(value
, LAUNCH_JOBINETDCOMPATIBILITY_WAIT
))) {
2071 j
->inetcompat_wait
= launch_data_get_bool(tmp
);
2077 if( strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPROPERTIES
) == 0 ) {
2078 launch_data_dict_iterate(value
, (void (*)(launch_data_t
, const char *, void *))jetsam_property_setup
, j
);
2082 if (strcasecmp(key
, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES
) == 0) {
2083 launch_data_dict_iterate(value
, envitem_setup
, j
);
2088 if (strcasecmp(key
, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES
) == 0) {
2089 j
->importing_global_env
= true;
2090 launch_data_dict_iterate(value
, envitem_setup
, j
);
2091 j
->importing_global_env
= false;
2096 if (strcasecmp(key
, LAUNCH_JOBKEY_SOCKETS
) == 0) {
2097 launch_data_dict_iterate(value
, socketgroup_setup
, j
);
2098 } else if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
2099 calendarinterval_new_from_obj(j
, value
);
2100 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SOFTRESOURCELIMITS
) == 0) {
2101 launch_data_dict_iterate(value
, limititem_setup
, j
);
2103 } else if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOXFLAGS
) == 0) {
2104 launch_data_dict_iterate(value
, seatbelt_setup_flags
, j
);
2110 if (strcasecmp(key
, LAUNCH_JOBKEY_HARDRESOURCELIMITS
) == 0) {
2111 j
->importing_hard_limits
= true;
2112 launch_data_dict_iterate(value
, limititem_setup
, j
);
2113 j
->importing_hard_limits
= false;
2118 if (strcasecmp(key
, LAUNCH_JOBKEY_MACHSERVICES
) == 0) {
2119 launch_data_dict_iterate(value
, machservice_setup
, j
);
2123 job_log(j
, LOG_WARNING
, "Unknown key for dictionary: %s", key
);
2129 job_import_array(job_t j
, const char *key
, launch_data_t value
)
2131 size_t i
, value_cnt
= launch_data_array_get_count(value
);
2137 if (strcasecmp(key
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
) == 0) {
2143 if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOHOSTS
) == 0) {
2145 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS
) == 0) {
2147 } else if (strcasecmp(key
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
) == 0) {
2148 job_log(j
, LOG_NOTICE
, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
2154 if (strcasecmp(key
, LAUNCH_JOBKEY_QUEUEDIRECTORIES
) == 0) {
2155 for (i
= 0; i
< value_cnt
; i
++) {
2156 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
2157 if (job_assumes(j
, str
!= NULL
)) {
2158 semaphoreitem_new(j
, DIR_NOT_EMPTY
, str
);
2166 if (strcasecmp(key
, LAUNCH_JOBKEY_WATCHPATHS
) == 0) {
2167 for (i
= 0; i
< value_cnt
; i
++) {
2168 str
= launch_data_get_string(launch_data_array_get_index(value
, i
));
2169 if (job_assumes(j
, str
!= NULL
)) {
2170 semaphoreitem_new(j
, PATH_CHANGES
, str
);
2177 if (strcasecmp(key
, LAUNCH_JOBKEY_BONJOURFDS
) == 0) {
2178 socketgroup_setup(value
, LAUNCH_JOBKEY_BONJOURFDS
, j
);
2179 } else if (strcasecmp(key
, LAUNCH_JOBKEY_BINARYORDERPREFERENCE
) == 0) {
2180 if (job_assumes(j
, j
->j_binpref
= malloc(value_cnt
* sizeof(*j
->j_binpref
)))) {
2181 j
->j_binpref_cnt
= value_cnt
;
2182 for (i
= 0; i
< value_cnt
; i
++) {
2183 j
->j_binpref
[i
] = (cpu_type_t
) launch_data_get_integer(launch_data_array_get_index(value
, i
));
2190 if (strcasecmp(key
, LAUNCH_JOBKEY_STARTCALENDARINTERVAL
) == 0) {
2191 for (i
= 0; i
< value_cnt
; i
++) {
2192 calendarinterval_new_from_obj(j
, launch_data_array_get_index(value
, i
));
2197 job_log(j
, LOG_WARNING
, "Unknown key for array: %s", key
);
2203 job_import_keys(launch_data_t obj
, const char *key
, void *context
)
2206 launch_data_type_t kind
;
2208 if (!launchd_assumes(obj
!= NULL
)) {
2212 kind
= launch_data_get_type(obj
);
2215 case LAUNCH_DATA_BOOL
:
2216 job_import_bool(j
, key
, launch_data_get_bool(obj
));
2218 case LAUNCH_DATA_STRING
:
2219 job_import_string(j
, key
, launch_data_get_string(obj
));
2221 case LAUNCH_DATA_INTEGER
:
2222 job_import_integer(j
, key
, launch_data_get_integer(obj
));
2224 case LAUNCH_DATA_DICTIONARY
:
2225 job_import_dictionary(j
, key
, obj
);
2227 case LAUNCH_DATA_ARRAY
:
2228 job_import_array(j
, key
, obj
);
2230 case LAUNCH_DATA_OPAQUE
:
2231 job_import_opaque(j
, key
, obj
);
2234 job_log(j
, LOG_WARNING
, "Unknown value type '%d' for key: %s", kind
, key
);
2240 jobmgr_import2(jobmgr_t jm
, launch_data_t pload
)
2242 launch_data_t tmp
, ldpa
;
2243 const char *label
= NULL
, *prog
= NULL
;
2244 const char **argv
= NULL
;
2247 if (!jobmgr_assumes(jm
, pload
!= NULL
)) {
2252 if (unlikely(launch_data_get_type(pload
) != LAUNCH_DATA_DICTIONARY
)) {
2257 if (unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LABEL
)))) {
2262 if (unlikely(launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
)) {
2267 if (unlikely(!(label
= launch_data_get_string(tmp
)))) {
2272 #if TARGET_OS_EMBEDDED
2273 if( unlikely(g_embedded_privileged_action
&& s_embedded_privileged_job
) ) {
2274 if( unlikely(!(tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_USERNAME
))) ) {
2279 const char *username
= NULL
;
2280 if( likely(tmp
&& launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
) ) {
2281 username
= launch_data_get_string(tmp
);
2287 if( !jobmgr_assumes(jm
, s_embedded_privileged_job
->username
!= NULL
&& username
!= NULL
) ) {
2292 if( unlikely(strcmp(s_embedded_privileged_job
->username
, username
) != 0) ) {
2296 } else if( g_embedded_privileged_action
) {
2302 if ((tmp
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAM
)) &&
2303 (launch_data_get_type(tmp
) == LAUNCH_DATA_STRING
)) {
2304 prog
= launch_data_get_string(tmp
);
2307 if ((ldpa
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_PROGRAMARGUMENTS
))) {
2310 if (launch_data_get_type(ldpa
) != LAUNCH_DATA_ARRAY
) {
2315 c
= launch_data_array_get_count(ldpa
);
2317 argv
= alloca((c
+ 1) * sizeof(char *));
2319 for (i
= 0; i
< c
; i
++) {
2320 tmp
= launch_data_array_get_index(ldpa
, i
);
2322 if (launch_data_get_type(tmp
) != LAUNCH_DATA_STRING
) {
2327 argv
[i
] = launch_data_get_string(tmp
);
2333 /* Hack to make sure the proper job manager is set the whole way through. */
2334 launch_data_t session
= launch_data_dict_lookup(pload
, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE
);
2336 jm
= jobmgr_find_by_name(jm
, launch_data_get_string(session
)) ?: jm
;
2339 jobmgr_log(jm
, LOG_DEBUG
, "Importing %s.", label
);
2341 if (unlikely((j
= job_find(label
)) != NULL
)) {
2344 } else if (unlikely(!jobmgr_label_test(jm
, label
))) {
2349 if (likely(j
= job_new(jm
, label
, prog
, argv
))) {
2350 launch_data_dict_iterate(pload
, job_import_keys
, j
);
2351 if( !uuid_is_null(j
->expected_audit_uuid
) ) {
2352 uuid_string_t uuid_str
;
2353 uuid_unparse(j
->expected_audit_uuid
, uuid_str
);
2354 job_log(j
, LOG_DEBUG
, "Imported job. Waiting for session for UUID %s.", uuid_str
);
2355 LIST_INSERT_HEAD(&s_needing_sessions
, j
, needing_session_sle
);
2358 job_log(j
, LOG_DEBUG
, "No security session specified.");
2359 j
->audit_session
= MACH_PORT_NULL
;
2367 jobmgr_label_test(jobmgr_t jm
, const char *str
)
2369 char *endstr
= NULL
;
2372 if (str
[0] == '\0') {
2373 jobmgr_log(jm
, LOG_ERR
, "Empty job labels are not allowed");
2377 for (ptr
= str
; *ptr
; ptr
++) {
2378 if (iscntrl(*ptr
)) {
2379 jobmgr_log(jm
, LOG_ERR
, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr
- str
, *ptr
);
2384 strtoll(str
, &endstr
, 0);
2386 if (str
!= endstr
) {
2387 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to begin with numbers: %s", str
);
2391 if ((strncasecmp(str
, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2392 (strncasecmp(str
, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2393 jobmgr_log(jm
, LOG_ERR
, "Job labels are not allowed to use a reserved prefix: %s", str
);
2401 job_find(const char *label
)
2405 LIST_FOREACH(ji
, &label_hash
[hash_label(label
)], label_hash_sle
) {
2406 if (unlikely(ji
->removal_pending
|| ji
->mgr
->shutting_down
)) {
2407 continue; /* 5351245 and 5488633 respectively */
2410 if (strcmp(ji
->label
, label
) == 0) {
2419 /* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
2421 jobmgr_find_by_pid_deep(jobmgr_t jm
, pid_t p
, bool anon_okay
)
2424 LIST_FOREACH( ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
2425 if (ji
->p
== p
&& (!ji
->anonymous
|| (ji
->anonymous
&& anon_okay
)) ) {
2430 jobmgr_t jmi
= NULL
;
2431 SLIST_FOREACH( jmi
, &jm
->submgrs
, sle
) {
2432 if( (ji
= jobmgr_find_by_pid_deep(jmi
, p
, anon_okay
)) ) {
2441 jobmgr_find_by_pid(jobmgr_t jm
, pid_t p
, bool create_anon
)
2445 LIST_FOREACH(ji
, &jm
->active_jobs
[ACTIVE_JOB_HASH(p
)], pid_hash_sle
) {
2451 return create_anon
? job_new_anonymous(jm
, p
) : NULL
;
2455 job_mig_intran2(jobmgr_t jm
, mach_port_t mport
, pid_t upid
)
2460 if (jm
->jm_port
== mport
) {
2461 return jobmgr_find_by_pid(jm
, upid
, true);
2464 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
2467 if ((jr
= job_mig_intran2(jmi
, mport
, upid
))) {
2472 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2473 if (ji
->j_port
== mport
) {
2482 job_mig_intran(mach_port_t p
)
2484 struct ldcred
*ldc
= runtime_get_caller_creds();
2487 jr
= job_mig_intran2(root_jobmgr
, p
, ldc
->pid
);
2489 if (!jobmgr_assumes(root_jobmgr
, jr
!= NULL
)) {
2490 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, 0 };
2491 struct kinfo_proc kp
;
2492 size_t len
= sizeof(kp
);
2496 if (jobmgr_assumes(root_jobmgr
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)
2497 && jobmgr_assumes(root_jobmgr
, len
== sizeof(kp
))) {
2498 jobmgr_log(root_jobmgr
, LOG_ERR
, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__
, ldc
->pid
, ldc
->uid
, ldc
->euid
, p
, kp
.kp_proc
.p_comm
);
2506 job_find_by_service_port(mach_port_t p
)
2508 struct machservice
*ms
;
2510 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
2511 if (ms
->recv
&& (ms
->port
== p
)) {
2520 job_mig_destructor(job_t j
)
2525 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
2528 if (unlikely(j
&& (j
!= workaround_5477111
) && j
->unload_at_mig_return
)) {
2529 job_log(j
, LOG_NOTICE
, "Unloading PID %u at MIG return.", j
->p
);
2533 workaround_5477111
= NULL
;
2535 calendarinterval_sanity_check();
2539 job_export_all2(jobmgr_t jm
, launch_data_t where
)
2544 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
2545 job_export_all2(jmi
, where
);
2548 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
2551 if (jobmgr_assumes(jm
, (tmp
= job_export(ji
)) != NULL
)) {
2552 launch_data_dict_insert(where
, tmp
, ji
->label
);
2558 job_export_all(void)
2560 launch_data_t resp
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
);
2562 if (launchd_assumes(resp
!= NULL
)) {
2563 job_export_all2(root_jobmgr
, resp
);
2570 job_log_stray_pg(job_t j
)
2572 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PGRP
, j
->p
};
2573 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
2574 struct kinfo_proc
*kp
;
2576 if (!do_apple_internal_logging
) {
2580 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG
, j
->p
, 0, 0);
2582 if (!job_assumes(j
, (kp
= malloc(len
)) != NULL
)) {
2585 if (!job_assumes(j
, sysctl(mib
, 4, kp
, &len
, NULL
, 0) != -1)) {
2589 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
2591 for (i
= 0; i
< kp_cnt
; i
++) {
2592 pid_t p_i
= kp
[i
].kp_proc
.p_pid
;
2593 pid_t pp_i
= kp
[i
].kp_eproc
.e_ppid
;
2594 const char *z
= (kp
[i
].kp_proc
.p_stat
== SZOMB
) ? "zombie " : "";
2595 const char *n
= kp
[i
].kp_proc
.p_comm
;
2599 } else if (!job_assumes(j
, p_i
!= 0 && p_i
!= 1)) {
2603 job_log(j
, LOG_WARNING
, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z
, p_i
, pp_i
, n
);
2616 bool is_system_bootstrapper
= j
->is_bootstrapper
&& pid1_magic
&& !j
->mgr
->parentmgr
;
2618 job_log(j
, LOG_DEBUG
, "Reaping");
2621 job_assumes(j
, vm_deallocate(mach_task_self(), (vm_address_t
)j
->shmem
, getpagesize()) == 0);
2625 if (unlikely(j
->weird_bootstrap
)) {
2627 job_mig_swap_integer(j
, VPROC_GSK_WEIRD_BOOTSTRAP
, 0, 0, &junk
);
2630 if (j
->log_redirect_fd
&& !j
->legacy_LS_job
) {
2631 job_log_stdouterr(j
); /* one last chance */
2633 if (j
->log_redirect_fd
) {
2634 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
2635 j
->log_redirect_fd
= 0;
2640 job_assumes(j
, runtime_close(j
->fork_fd
) != -1);
2646 memset(&ru
, 0, sizeof(ru
));
2649 * The job is dead. While the PID/PGID is still known to be
2650 * valid, try to kill abandoned descendant processes.
2652 job_log_stray_pg(j
);
2653 if (!j
->abandon_pg
) {
2654 if (unlikely(runtime_killpg(j
->p
, SIGTERM
) == -1 && errno
!= ESRCH
)) {
2656 job_log(j
, LOG_APPLEONLY
, "Bug: 5487498");
2658 job_assumes(j
, false);
2666 * The current implementation of ptrace() causes the traced process to
2667 * be abducted away from the true parent and adopted by the tracer.
2669 * Once the tracing process relinquishes control, the kernel then
2670 * restores the true parent/child relationship.
2672 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2673 * data structures changes, and they return an error if reality hasn't
2674 * been restored by the time they are called.
2676 if (!job_assumes(j
, wait4(j
->p
, &status
, 0, &ru
) != -1)) {
2677 job_log(j
, LOG_NOTICE
, "Working around 5020256. Assuming the job crashed.");
2679 status
= W_EXITCODE(0, SIGSEGV
);
2680 memset(&ru
, 0, sizeof(ru
));
2684 if (j
->exit_timeout
) {
2685 kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
);
2688 LIST_REMOVE(j
, pid_hash_sle
);
2690 if (j
->wait_reply_port
) {
2691 job_log(j
, LOG_DEBUG
, "MPM wait reply being sent");
2692 job_assumes(j
, job_mig_wait_reply(j
->wait_reply_port
, 0, status
) == 0);
2693 j
->wait_reply_port
= MACH_PORT_NULL
;
2696 if( j
->pending_sample
) {
2697 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job exited before we could sample it.");
2698 STAILQ_REMOVE(&j
->mgr
->pending_samples
, j
, job_s
, pending_samples_sle
);
2699 j
->pending_sample
= false;
2702 if (j
->sent_signal_time
) {
2703 uint64_t td_sec
, td_usec
, td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
2705 td_sec
= td
/ NSEC_PER_SEC
;
2706 td_usec
= (td
% NSEC_PER_SEC
) / NSEC_PER_USEC
;
2708 job_log(j
, LOG_DEBUG
, "Exited %llu.%06llu seconds after the first signal was sent", td_sec
, td_usec
);
2711 timeradd(&ru
.ru_utime
, &j
->ru
.ru_utime
, &j
->ru
.ru_utime
);
2712 timeradd(&ru
.ru_stime
, &j
->ru
.ru_stime
, &j
->ru
.ru_stime
);
2713 j
->ru
.ru_maxrss
+= ru
.ru_maxrss
;
2714 j
->ru
.ru_ixrss
+= ru
.ru_ixrss
;
2715 j
->ru
.ru_idrss
+= ru
.ru_idrss
;
2716 j
->ru
.ru_isrss
+= ru
.ru_isrss
;
2717 j
->ru
.ru_minflt
+= ru
.ru_minflt
;
2718 j
->ru
.ru_majflt
+= ru
.ru_majflt
;
2719 j
->ru
.ru_nswap
+= ru
.ru_nswap
;
2720 j
->ru
.ru_inblock
+= ru
.ru_inblock
;
2721 j
->ru
.ru_oublock
+= ru
.ru_oublock
;
2722 j
->ru
.ru_msgsnd
+= ru
.ru_msgsnd
;
2723 j
->ru
.ru_msgrcv
+= ru
.ru_msgrcv
;
2724 j
->ru
.ru_nsignals
+= ru
.ru_nsignals
;
2725 j
->ru
.ru_nvcsw
+= ru
.ru_nvcsw
;
2726 j
->ru
.ru_nivcsw
+= ru
.ru_nivcsw
;
2728 if (WIFEXITED(status
) && WEXITSTATUS(status
) != 0) {
2729 job_log(j
, LOG_WARNING
, "Exited with exit code: %d", WEXITSTATUS(status
));
2732 if (WIFSIGNALED(status
)) {
2733 int s
= WTERMSIG(status
);
2734 if ((SIGKILL
== s
|| SIGTERM
== s
) && !j
->stopped
) {
2735 job_log(j
, LOG_NOTICE
, "Exited: %s", strsignal(s
));
2736 } else if( !j
->stopped
&& !j
->clean_kill
) {
2738 /* Signals which indicate a crash. */
2745 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
2746 * SIGTRAP, assume that it's a crash.
2750 job_log(j
, LOG_WARNING
, "Job appears to have crashed: %s", strsignal(s
));
2753 job_log(j
, LOG_WARNING
, "Exited abnormally: %s", strsignal(s
));
2757 if( is_system_bootstrapper
&& j
->crashed
) {
2758 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "The %s bootstrapper has crashed: %s", j
->mgr
->name
, strsignal(s
));
2765 struct machservice
*msi
= NULL
;
2766 if( j
->crashed
|| !(j
->did_exec
|| j
->anonymous
) ) {
2767 SLIST_FOREACH( msi
, &j
->machservices
, sle
) {
2768 if( j
->crashed
&& !msi
->isActive
&& (msi
->drain_one_on_crash
|| msi
->drain_all_on_crash
) ) {
2769 machservice_drain_port(msi
);
2772 if( !j
->did_exec
&& msi
->reset
&& job_assumes(j
, !msi
->isActive
) ) {
2773 machservice_resetport(j
, msi
);
2778 struct suspended_peruser
*spi
= NULL
;
2779 while( (spi
= LIST_FIRST(&j
->suspended_perusers
)) ) {
2780 job_log(j
, LOG_ERR
, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi
->j
->mach_uid
);
2781 spi
->j
->peruser_suspend_count
--;
2782 if( spi
->j
->peruser_suspend_count
== 0 ) {
2783 job_dispatch(spi
->j
, false);
2785 LIST_REMOVE(spi
, sle
);
2789 struct waiting_for_exit
*w4e
= NULL
;
2790 while( (w4e
= LIST_FIRST(&j
->exit_watchers
)) ) {
2791 waiting4exit_delete(j
, w4e
);
2795 total_anon_children
--;
2796 if( j
->holds_ref
) {
2804 if( j
->has_console
) {
2808 if (j
->hopefully_exits_first
) {
2809 j
->mgr
->hopefully_first_cnt
--;
2810 } else if (!j
->anonymous
&& !j
->hopefully_exits_last
) {
2811 j
->mgr
->normal_active_cnt
--;
2813 j
->last_exit_status
= status
;
2814 j
->sent_signal_time
= 0;
2815 j
->sent_sigkill
= false;
2816 j
->clean_kill
= false;
2817 j
->sampling_complete
= false;
2818 j
->sent_kill_via_shmem
= false;
2819 j
->lastlookup
= NULL
;
2820 j
->lastlookup_gennum
= 0;
2824 * We need to someday evaluate other jobs and find those who wish to track the
2825 * active/inactive state of this job. The current job_dispatch() logic makes
2826 * this messy, given that jobs can be deleted at dispatch.
2831 jobmgr_dispatch_all(jobmgr_t jm
, bool newmounthack
)
2836 if (jm
->shutting_down
) {
2840 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
2841 jobmgr_dispatch_all(jmi
, newmounthack
);
2844 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
2845 if (newmounthack
&& ji
->start_on_mount
) {
2846 ji
->start_pending
= true;
2849 job_dispatch(ji
, false);
2854 basic_spawn(job_t j
, void (*what_to_do
)(job_t
))
2857 thread_state_flavor_t f
= 0;
2858 #if defined (__ppc__) || defined(__ppc64__)
2859 f
= PPC_THREAD_STATE64
;
2860 #elif defined(__i386__) || defined(__x86_64__)
2861 f
= x86_THREAD_STATE
;
2862 #elif defined(__arm__)
2863 f
= ARM_THREAD_STATE
;
2865 #error "unknown architecture"
2868 int execpair
[2] = { 0, 0 };
2869 job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, execpair
) != -1);
2871 switch( (p
= fork()) ) {
2873 job_assumes(j
, runtime_close(execpair
[0]) != -1);
2874 /* Wait for the parent to attach a kevent. */
2875 read(_fd(execpair
[1]), &p
, sizeof(p
));
2877 _exit(EXIT_FAILURE
);
2879 job_assumes(j
, runtime_close(execpair
[0]) != -1);
2880 job_assumes(j
, runtime_close(execpair
[1]) != -1);
2883 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "fork(2) failed: %d", errno
);
2886 job_assumes(j
, runtime_close(execpair
[1]) != -1);
2893 /* Let us know when sample is done. ONESHOT is implicit if we're just interested in NOTE_EXIT. */
2894 if( job_assumes(j
, (r
= kevent_mod(p
, EVFILT_PROC
, EV_ADD
, NOTE_EXIT
, 0, j
)) != -1) ) {
2895 if( !job_assumes(j
, write(execpair
[0], &p
, sizeof(p
)) == sizeof(p
)) ) {
2896 job_assumes(j
, kevent_mod(p
, EVFILT_PROC
, EV_DELETE
, 0, 0, NULL
) != -1);
2897 job_assumes(j
, runtime_kill(p
, SIGKILL
) != -1);
2902 job_assumes(j
, runtime_kill(p
, SIGKILL
) != -1);
2907 job_assumes(j
, waitpid(p
, &status
, WNOHANG
) != -1);
2911 if( execpair
[0] != -1 ) {
2912 job_assumes(j
, runtime_close(execpair
[0]) != -1);
2915 if( execpair
[1] != -1 ) {
2916 job_assumes(j
, runtime_close(execpair
[0]) != -1);
2923 take_sample(job_t j
)
2926 snprintf(pidstr
, sizeof(pidstr
), "%u", j
->p
);
2927 #if !TARGET_OS_EMBEDDED
2928 /* -nodsyms so sample doesn't try to use Spotlight to find dsym files after mds has gone away. */
2929 char *sample_args
[] = { "/usr/bin/sample", pidstr
, "1", "-unsupportedShowArch", "-mayDie", "-nodsyms", "-file", j
->mgr
->sample_log_file
, NULL
};
2931 char *sample_args
[] = { "/usr/bin/sample", pidstr
, "1", "-unsupportedShowArch", "-mayDie", "-file", j
->mgr
->sample_log_file
, NULL
};
2934 execve(sample_args
[0], sample_args
, environ
);
2935 _exit(EXIT_FAILURE
);
2939 jobmgr_dequeue_next_sample(jobmgr_t jm
)
2941 if( STAILQ_EMPTY(&jm
->pending_samples
) ) {
2942 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Sample queue is empty.");
2946 /* Dequeue the next in line. */
2947 job_t j
= STAILQ_FIRST(&jm
->pending_samples
);
2948 if( j
->is_being_sampled
) {
2949 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Sampling is in progress. Not dequeuing next job.");
2953 if( !job_assumes(j
, !j
->sampling_complete
) ) {
2957 if (!job_assumes(j
, do_apple_internal_logging
)) {
2961 if (!job_assumes(j
, mkdir(SHUTDOWN_LOG_DIR
, S_IRWXU
) != -1 || errno
== EEXIST
)) {
2966 snprintf(pidstr
, sizeof(pidstr
), "%u", j
->p
);
2967 snprintf(j
->mgr
->sample_log_file
, sizeof(j
->mgr
->sample_log_file
), SHUTDOWN_LOG_DIR
"/%s-%u.sample.txt", j
->label
, j
->p
);
2969 if (job_assumes(j
, unlink(jm
->sample_log_file
) != -1 || errno
== ENOENT
)) {
2970 pid_t sp
= basic_spawn(j
, take_sample
);
2973 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "Sampling for job failed!");
2974 STAILQ_REMOVE(&jm
->pending_samples
, j
, job_s
, pending_samples_sle
);
2975 j
->sampling_complete
= true;
2976 jobmgr_dequeue_next_sample(jm
);
2978 j
->tracing_pid
= sp
;
2979 j
->is_being_sampled
= true;
2980 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Sampling job (sample PID: %i, file: %s).", sp
, j
->mgr
->sample_log_file
);
2983 STAILQ_REMOVE(&jm
->pending_samples
, j
, job_s
, pending_samples_sle
);
2984 j
->sampling_complete
= true;
2987 j
->pending_sample
= false;
2991 job_dispatch_curious_jobs(job_t j
)
2993 job_t ji
= NULL
, jt
= NULL
;
2994 SLIST_FOREACH_SAFE( ji
, &s_curious_jobs
, curious_jobs_sle
, jt
) {
2995 struct semaphoreitem
*si
= NULL
;
2996 SLIST_FOREACH( si
, &ji
->semaphores
, sle
) {
2997 if( !(si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
) ) {
3001 if( strncmp(si
->what
, j
->label
, strlen(j
->label
)) == 0 ) {
3002 job_log(ji
, LOG_DEBUG
, "Dispatching out of interest in \"%s\".", j
->label
);
3004 job_dispatch(ji
, false);
3005 /* ji could be removed here, so don't do anything with it or its semaphores
3015 job_dispatch(job_t j
, bool kickstart
)
3017 /* Don't dispatch a job if it has no audit session set. */
3018 if( !uuid_is_null(j
->expected_audit_uuid
) ) {
3022 #if TARGET_OS_EMBEDDED
3023 if( g_embedded_privileged_action
&& s_embedded_privileged_job
) {
3024 if( !job_assumes(j
, s_embedded_privileged_job
->username
!= NULL
&& j
->username
!= NULL
) ) {
3029 if( strcmp(j
->username
, s_embedded_privileged_job
->username
) != 0 ) {
3033 } else if( g_embedded_privileged_action
) {
3040 * The whole job removal logic needs to be consolidated. The fact that
3041 * a job can be removed from just about anywhere makes it easy to have
3042 * stale pointers left behind somewhere on the stack that might get
3043 * used after the deallocation. In particular, during job iteration.
3045 * This is a classic example. The act of dispatching a job may delete it.
3047 if (!job_active(j
)) {
3048 if (job_useless(j
)) {
3052 if( unlikely(j
->per_user
&& j
->peruser_suspend_count
> 0) ) {
3056 if (kickstart
|| job_keepalive(j
)) {
3057 job_log(j
, LOG_DEBUG
, "Starting job (kickstart = %s)", kickstart
? "true" : "false");
3060 job_log(j
, LOG_DEBUG
, "Watching job (kickstart = %s)", kickstart
? "true" : "false");
3066 * Path checking and monitoring is really racy right now.
3067 * We should clean this up post Leopard.
3069 if (job_keepalive(j
)) {
3074 job_log(j
, LOG_DEBUG
, "Tried to dispatch an already active job (%s).", job_active(j
));
3081 job_log_stdouterr2(job_t j
, const char *msg
, ...)
3083 struct runtime_syslog_attr attr
= { j
->label
, j
->label
, j
->mgr
->name
, LOG_NOTICE
, getuid(), j
->p
, j
->p
};
3087 runtime_vsyslog(&attr
, msg
, ap
);
3092 job_log_stdouterr(job_t j
)
3094 char *msg
, *bufindex
, *buf
= malloc(BIG_PIPE_SIZE
+ 1);
3095 bool close_log_redir
= false;
3098 if (!job_assumes(j
, buf
!= NULL
)) {
3104 rsz
= read(j
->log_redirect_fd
, buf
, BIG_PIPE_SIZE
);
3106 if (unlikely(rsz
== 0)) {
3107 job_log(j
, LOG_DEBUG
, "Standard out/error pipe closed");
3108 close_log_redir
= true;
3109 } else if (rsz
== -1) {
3110 if( !job_assumes(j
, errno
== EAGAIN
) ) {
3111 close_log_redir
= true;
3116 while ((msg
= strsep(&bufindex
, "\n\r"))) {
3118 job_log_stdouterr2(j
, "%s", msg
);
3125 if (unlikely(close_log_redir
)) {
3126 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
3127 j
->log_redirect_fd
= 0;
3128 job_dispatch(j
, false);
3135 if (unlikely(!j
->p
|| j
->anonymous
)) {
3139 job_assumes(j
, runtime_kill(j
->p
, SIGKILL
) != -1);
3141 j
->sent_sigkill
= true;
3143 intptr_t timer
= j
->clean_kill
? LAUNCHD_CLEAN_KILL_TIMER
: LAUNCHD_SIGKILL_TIMER
;
3144 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, timer
, j
) != -1);
3146 job_log(j
, LOG_DEBUG
, "Sent SIGKILL signal");
3150 job_log_children_without_exec(job_t j
)
3152 /* <rdar://problem/5701343> ER: Add a KERN_PROC_PPID sysctl */
3153 #ifdef KERN_PROC_PPID
3154 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PPID
, j
->p
};
3156 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_ALL
};
3158 size_t mib_sz
= sizeof(mib
) / sizeof(mib
[0]);
3159 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
3160 struct kinfo_proc
*kp
;
3162 if (!do_apple_internal_logging
|| j
->anonymous
|| j
->per_user
) {
3166 if (!job_assumes(j
, (kp
= malloc(len
)) != NULL
)) {
3169 if (!job_assumes(j
, sysctl(mib
, (u_int
) mib_sz
, kp
, &len
, NULL
, 0) != -1)) {
3173 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
3175 for (i
= 0; i
< kp_cnt
; i
++) {
3176 #ifndef KERN_PROC_PPID
3177 if (kp
[i
].kp_eproc
.e_ppid
!= j
->p
) {
3181 if (kp
[i
].kp_proc
.p_flag
& P_EXEC
) {
3185 job_log(j
, LOG_DEBUG
, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u",
3186 kp
[i
].kp_proc
.p_pid
);
3194 job_cleanup_after_tracer(job_t j
)
3197 if( j
->is_being_sampled
) {
3199 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "sample[%i] finished with job.", j
->tracing_pid
);
3200 if( job_assumes(j
, waitpid(j
->tracing_pid
, &wstatus
, 0) != -1) ) {
3201 job_assumes(j
, WIFEXITED(wstatus
) && WEXITSTATUS(wstatus
) == 0);
3203 STAILQ_REMOVE(&j
->mgr
->pending_samples
, j
, job_s
, pending_samples_sle
);
3205 if( j
->kill_after_sample
) {
3206 if (unlikely(j
->debug_before_kill
)) {
3207 job_log(j
, LOG_NOTICE
, "Exit timeout elapsed. Entering the kernel debugger");
3208 job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
3211 job_log(j
, LOG_NOTICE
, "Killing...");
3214 j
->sampling_complete
= true;
3215 j
->is_being_sampled
= false;
3220 if( j
->reap_after_trace
) {
3221 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Reaping job now that attached tracer is gone.");
3223 EV_SET(&kev
, j
->p
, 0, 0, NOTE_EXIT
, 0, 0);
3225 /* Fake a kevent to keep our logic consistent. */
3226 job_callback_proc(j
, &kev
);
3228 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3229 * on the root job manager. To make our fakery complete, we will do garbage
3230 * collection at the beginning of the next run loop cycle (after we're done
3231 * draining the current queue of kevents).
3233 job_assumes(j
, kevent_mod((uintptr_t)&root_jobmgr
->reboot_flags
, EVFILT_TIMER
, EV_ADD
| EV_ONESHOT
, NOTE_NSECONDS
, 1, root_jobmgr
) != -1);
3237 jobmgr_dequeue_next_sample(jm
);
3242 job_callback_proc(job_t j
, struct kevent
*kev
)
3244 bool program_changed
= false;
3245 int fflags
= kev
->fflags
;
3247 job_log(j
, LOG_DEBUG
, "EVFILT_PROC event for job:");
3248 log_kevent_struct(LOG_DEBUG
, kev
, 0);
3250 if( fflags
& NOTE_EXIT
) {
3251 if( j
->p
== (pid_t
)kev
->ident
&& !j
->anonymous
&& !j
->is_being_sampled
) {
3252 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, j
->p
};
3253 struct kinfo_proc kp
;
3254 size_t len
= sizeof(kp
);
3256 /* Sometimes, the kernel says it succeeded but really didn't. */
3257 if( job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1) && len
== sizeof(kp
) ) {
3258 if( !job_assumes(j
, kp
.kp_eproc
.e_ppid
== getpid()) ) {
3259 /* Someone has attached to the process with ptrace(). There's a race here.
3260 * If we determine that we are not the parent process and then fail to attach
3261 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3262 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3263 * reparenting of the PID should be atomic to us, so in that case, we reap the
3266 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3267 * would if a job died while we were sampling it at shutdown.
3269 * Note that we foolishly assume that in the process *tree* a node cannot be its
3270 * own parent. Apparently, that is not correct. If this is the case, we forsake
3271 * the process to its own devices. Let it reap itself.
3273 if( !job_assumes(j
, kp
.kp_eproc
.e_ppid
!= (pid_t
)kev
->ident
) ) {
3274 job_log(j
, LOG_WARNING
, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3277 if( job_assumes(j
, kevent_mod(kp
.kp_eproc
.e_ppid
, EVFILT_PROC
, EV_ADD
, NOTE_EXIT
, 0, j
) != -1) ) {
3278 j
->tracing_pid
= kp
.kp_eproc
.e_ppid
;
3279 j
->reap_after_trace
= true;
3284 } else if( !j
->anonymous
) {
3285 if( j
->tracing_pid
== (pid_t
)kev
->ident
) {
3286 job_cleanup_after_tracer(j
);
3289 } else if( j
->tracing_pid
&& !j
->reap_after_trace
) {
3290 /* The job exited before our sample completed. */
3291 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job has exited. Will reap after tracing PID %i exits.", j
->tracing_pid
);
3292 j
->reap_after_trace
= true;
3298 if (fflags
& NOTE_EXEC
) {
3299 program_changed
= true;
3302 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, j
->p
};
3303 struct kinfo_proc kp
;
3304 size_t len
= sizeof(kp
);
3306 /* Sometimes, the kernel says it succeeded but really didn't. */
3307 if (job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1) && len
== sizeof(kp
)) {
3308 char newlabel
[1000];
3310 snprintf(newlabel
, sizeof(newlabel
), "%p.anonymous.%s", j
, kp
.kp_proc
.p_comm
);
3312 job_log(j
, LOG_INFO
, "Program changed. Updating the label to: %s", newlabel
);
3313 j
->lastlookup
= NULL
;
3314 j
->lastlookup_gennum
= 0;
3316 LIST_REMOVE(j
, label_hash_sle
);
3317 strcpy((char *)j
->label
, newlabel
);
3318 LIST_INSERT_HEAD(&label_hash
[hash_label(j
->label
)], j
, label_hash_sle
);
3322 job_log(j
, LOG_DEBUG
, "Program changed");
3326 if (fflags
& NOTE_FORK
) {
3327 job_log(j
, LOG_DEBUG
, "fork()ed%s", program_changed
? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3328 job_log_children_without_exec(j
);
3331 if (fflags
& NOTE_EXIT
) {
3334 if( !j
->anonymous
) {
3335 j
= job_dispatch(j
, false);
3342 if (j
&& (fflags
& NOTE_REAP
)) {
3343 job_assumes(j
, j
->p
== 0);
3348 job_callback_timer(job_t j
, void *ident
)
3351 job_log(j
, LOG_DEBUG
, "j == ident (%p)", ident
);
3352 job_dispatch(j
, true);
3353 } else if (&j
->semaphores
== ident
) {
3354 job_log(j
, LOG_DEBUG
, "&j->semaphores == ident (%p)", ident
);
3355 job_dispatch(j
, false);
3356 } else if (&j
->start_interval
== ident
) {
3357 job_log(j
, LOG_DEBUG
, "&j->start_interval == ident (%p)", ident
);
3358 j
->start_pending
= true;
3359 job_dispatch(j
, false);
3360 } else if (&j
->exit_timeout
== ident
) {
3361 if( !job_assumes(j
, j
->p
!= 0) ) {
3365 if( j
->clean_kill
) {
3366 job_log(j
, LOG_ERR
| LOG_CONSOLE
, "Clean job failed to exit %u second after receiving SIGKILL.", LAUNCHD_CLEAN_KILL_TIMER
);
3367 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
));
3368 j
->clean_exit_timer_expired
= true;
3370 jobmgr_do_garbage_collection(j
->mgr
);
3375 * This block might be executed up to 3 times for a given (slow) job
3376 * - once for the SAMPLE_TIMEOUT timer, at which point sampling is triggered
3377 * - once for the exit_timeout timer, at which point:
3378 * - sampling is performed if not triggered previously
3379 * - SIGKILL is being sent to the job
3380 * - once for the SIGKILL_TIMER timer, at which point we log an issue
3381 * with the long SIGKILL
3385 /* Don't sample per-user launchd's. */
3386 j
->sampling_complete
= true;
3388 bool was_is_or_will_be_sampled
= ( j
->sampling_complete
|| j
->is_being_sampled
|| j
->pending_sample
);
3389 bool should_enqueue
= ( !was_is_or_will_be_sampled
&& do_apple_internal_logging
);
3391 if (j
->sent_sigkill
) {
3392 uint64_t td
= runtime_get_nanoseconds_since(j
->sent_signal_time
);
3395 td
-= j
->clean_kill
? 0 : j
->exit_timeout
;
3397 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Did not die after sending SIGKILL %llu seconds ago...", td
);
3398 } else if( should_enqueue
&& (!j
->exit_timeout
|| (LAUNCHD_SAMPLE_TIMEOUT
< j
->exit_timeout
)) ) {
3399 /* This should work even if the job changes its exit_timeout midstream */
3400 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Sampling timeout elapsed (%u seconds). Scheduling a sample...", LAUNCHD_SAMPLE_TIMEOUT
);
3401 if (j
->exit_timeout
) {
3402 unsigned int ttk
= (j
->exit_timeout
- LAUNCHD_SAMPLE_TIMEOUT
);
3403 job_assumes(j
, kevent_mod((uintptr_t)&j
->exit_timeout
, EVFILT_TIMER
,
3404 EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, ttk
, j
) != -1);
3405 job_log(j
, LOG_NOTICE
| LOG_CONSOLE
, "Scheduled new exit timeout for %u seconds later", ttk
);
3408 STAILQ_INSERT_TAIL(&j
->mgr
->pending_samples
, j
, pending_samples_sle
);
3409 j
->pending_sample
= true;
3410 jobmgr_dequeue_next_sample(j
->mgr
);
3412 if( do_apple_internal_logging
&& !j
->sampling_complete
) {
3413 if( j
->is_being_sampled
|| j
->pending_sample
) {
3414 char pidstr
[24] = { 0 };
3415 snprintf(pidstr
, sizeof(pidstr
), "[%i] ", j
->tracing_pid
);
3417 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Exit timeout elapsed (%u seconds). Will kill after sample%shas completed.", j
->exit_timeout
, j
->tracing_pid
? pidstr
: " ");
3418 j
->kill_after_sample
= true;
3420 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Exit timeout elapsed (%u seconds). Will sample and then kill.", j
->exit_timeout
);
3422 STAILQ_INSERT_TAIL(&j
->mgr
->pending_samples
, j
, pending_samples_sle
);
3423 j
->pending_sample
= true;
3426 jobmgr_dequeue_next_sample(j
->mgr
);
3428 if (unlikely(j
->debug_before_kill
)) {
3429 job_log(j
, LOG_NOTICE
, "Exit timeout elapsed. Entering the kernel debugger");
3430 job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
3432 job_log(j
, LOG_WARNING
| LOG_CONSOLE
, "Exit timeout elapsed (%u seconds). Killing", j
->exit_timeout
);
3434 jobmgr_do_garbage_collection(j
->mgr
);
3438 job_assumes(j
, false);
3443 job_callback_read(job_t j
, int ident
)
3445 if (ident
== j
->log_redirect_fd
) {
3446 job_log_stdouterr(j
);
3447 } else if (ident
== j
->stdin_fd
) {
3448 job_dispatch(j
, true);
3450 socketgroup_callback(j
);
3455 jobmgr_reap_bulk(jobmgr_t jm
, struct kevent
*kev
)
3460 SLIST_FOREACH(jmi
, &jm
->submgrs
, sle
) {
3461 jobmgr_reap_bulk(jmi
, kev
);
3464 if ((j
= jobmgr_find_by_pid(jm
, (pid_t
)kev
->ident
, false))) {
3466 job_callback(j
, kev
);
3471 jobmgr_callback(void *obj
, struct kevent
*kev
)
3476 switch (kev
->filter
) {
3478 jobmgr_reap_bulk(jm
, kev
);
3479 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
3482 switch (kev
->ident
) {
3484 jobmgr_log(jm
, LOG_DEBUG
, "Got SIGTERM. Shutting down.");
3485 return launchd_shutdown();
3487 return calendarinterval_callback();
3489 fake_shutdown_in_progress
= true;
3490 runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
3492 runtime_closelog(); /* HACK -- force 'start' time to be set */
3495 int64_t now
= runtime_get_wall_time();
3497 jobmgr_log(jm
, LOG_NOTICE
, "Anticipatory shutdown began at: %lld.%06llu", now
/ USEC_PER_SEC
, now
% USEC_PER_SEC
);
3499 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
3500 if (ji
->per_user
&& ji
->p
) {
3501 job_assumes(ji
, runtime_kill(ji
->p
, SIGUSR2
) != -1);
3505 jobmgr_log(jm
, LOG_NOTICE
, "Anticipatory per-user launchd shutdown");
3510 return (void)jobmgr_assumes(jm
, false);
3514 if (kev
->fflags
& VQ_MOUNT
) {
3515 jobmgr_dispatch_all(jm
, true);
3517 jobmgr_dispatch_all_semaphores(jm
);
3520 if( kev
->ident
== (uintptr_t)&sorted_calendar_events
) {
3521 calendarinterval_callback();
3522 } else if( kev
->ident
== (uintptr_t)jm
) {
3523 jobmgr_log(jm
, LOG_DEBUG
, "Shutdown timer firing.");
3524 jobmgr_still_alive_with_check(jm
);
3525 } else if( kev
->ident
== (uintptr_t)&jm
->reboot_flags
) {
3526 jobmgr_do_garbage_collection(jm
);
3527 } else if( kev
->ident
== (uintptr_t)&g_runtime_busy_time
) {
3528 jobmgr_log(jm
, LOG_DEBUG
, "Idle exit timer fired. Shutting down.");
3529 if( jobmgr_assumes(jm
, runtime_busy_cnt
== 0) ) {
3530 return launchd_shutdown();
3535 if( kev
->ident
== (uintptr_t)s_no_hang_fd
) {
3536 int _no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
3537 if( unlikely(_no_hang_fd
!= -1) ) {
3538 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "/dev/autofs_nowait has appeared!");
3539 jobmgr_assumes(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
) != -1);
3540 jobmgr_assumes(root_jobmgr
, runtime_close(s_no_hang_fd
) != -1);
3541 s_no_hang_fd
= _fd(_no_hang_fd
);
3543 } else if( pid1_magic
&& g_console
&& kev
->ident
== (uintptr_t)fileno(g_console
) ) {
3545 if( launchd_assumes((cfd
= open(_PATH_CONSOLE
, O_WRONLY
| O_NOCTTY
)) != -1) ) {
3547 if( !launchd_assumes((g_console
= fdopen(cfd
, "w")) != NULL
) ) {
3554 return (void)jobmgr_assumes(jm
, false);
3559 job_callback(void *obj
, struct kevent
*kev
)
3563 job_log(j
, LOG_DEBUG
, "Dispatching kevent callback.");
3565 switch (kev
->filter
) {
3567 return job_callback_proc(j
, kev
);
3569 return job_callback_timer(j
, (void *) kev
->ident
);
3571 return semaphoreitem_callback(j
, kev
);
3573 return job_callback_read(j
, (int) kev
->ident
);
3574 case EVFILT_MACHPORT
:
3575 return (void)job_dispatch(j
, true);
3577 return (void)job_assumes(j
, false);
3591 u_int proc_fflags
= NOTE_EXIT
|NOTE_FORK
|NOTE_EXEC
|NOTE_REAP
;
3593 if (!job_assumes(j
, j
->mgr
!= NULL
)) {
3597 if (unlikely(job_active(j
))) {
3598 job_log(j
, LOG_DEBUG
, "Already started");
3603 * Some users adjust the wall-clock and then expect software to not notice.
3604 * Therefore, launchd must use an absolute clock instead of the wall clock
3605 * wherever possible.
3607 td
= runtime_get_nanoseconds_since(j
->start_time
);
3610 if (j
->start_time
&& (td
< j
->min_run_time
) && !j
->legacy_mach_job
&& !j
->inetcompat
) {
3611 time_t respawn_delta
= j
->min_run_time
- (uint32_t)td
;
3614 * We technically should ref-count throttled jobs to prevent idle exit,
3615 * but we're not directly tracking the 'throttled' state at the moment.
3618 job_log(j
, LOG_WARNING
, "Throttling respawn: Will start in %ld seconds", respawn_delta
);
3619 job_assumes(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, respawn_delta
, j
) != -1);
3624 if (likely(!j
->legacy_mach_job
)) {
3625 sipc
= ((!SLIST_EMPTY(&j
->sockets
) || !SLIST_EMPTY(&j
->machservices
)) && !j
->deny_job_creation
) || j
->embedded_special_privileges
;
3629 job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, spair
) != -1);
3632 job_assumes(j
, socketpair(AF_UNIX
, SOCK_STREAM
, 0, execspair
) != -1);
3634 if (likely(!j
->legacy_mach_job
) && job_assumes(j
, pipe(oepair
) != -1)) {
3635 j
->log_redirect_fd
= _fd(oepair
[0]);
3636 job_assumes(j
, fcntl(j
->log_redirect_fd
, F_SETFL
, O_NONBLOCK
) != -1);
3637 job_assumes(j
, kevent_mod(j
->log_redirect_fd
, EVFILT_READ
, EV_ADD
, 0, 0, j
) != -1);
3640 switch (c
= runtime_fork(j
->weird_bootstrap
? j
->j_port
: j
->mgr
->jm_port
)) {
3642 job_log_error(j
, LOG_ERR
, "fork() failed, will try again in one second");
3643 job_assumes(j
, kevent_mod((uintptr_t)j
, EVFILT_TIMER
, EV_ADD
|EV_ONESHOT
, NOTE_SECONDS
, 1, j
) != -1);
3646 job_assumes(j
, runtime_close(execspair
[0]) == 0);
3647 job_assumes(j
, runtime_close(execspair
[1]) == 0);
3649 job_assumes(j
, runtime_close(spair
[0]) == 0);
3650 job_assumes(j
, runtime_close(spair
[1]) == 0);
3652 if (likely(!j
->legacy_mach_job
)) {
3653 job_assumes(j
, runtime_close(oepair
[0]) != -1);
3654 job_assumes(j
, runtime_close(oepair
[1]) != -1);
3655 j
->log_redirect_fd
= 0;
3659 if (unlikely(_vproc_post_fork_ping())) {
3660 _exit(EXIT_FAILURE
);
3662 if (!j
->legacy_mach_job
) {
3663 job_assumes(j
, dup2(oepair
[1], STDOUT_FILENO
) != -1);
3664 job_assumes(j
, dup2(oepair
[1], STDERR_FILENO
) != -1);
3665 job_assumes(j
, runtime_close(oepair
[1]) != -1);
3667 job_assumes(j
, runtime_close(execspair
[0]) == 0);
3668 /* wait for our parent to say they've attached a kevent to us */
3669 read(_fd(execspair
[1]), &c
, sizeof(c
));
3672 job_assumes(j
, runtime_close(spair
[0]) == 0);
3673 snprintf(nbuf
, sizeof(nbuf
), "%d", spair
[1]);
3674 setenv(LAUNCHD_TRUSTED_FD_ENV
, nbuf
, 1);
3679 j
->start_time
= runtime_get_opaque_time();
3681 job_log(j
, LOG_DEBUG
, "Started as PID: %u", c
);
3683 j
->did_exec
= false;
3684 j
->checkedin
= false;
3685 j
->start_pending
= false;
3689 if( j
->needs_kickoff
) {
3690 j
->needs_kickoff
= false;
3692 if( SLIST_EMPTY(&j
->semaphores
) ) {
3693 j
->ondemand
= false;
3697 if( j
->has_console
) {
3703 LIST_INSERT_HEAD(&j
->mgr
->active_jobs
[ACTIVE_JOB_HASH(c
)], j
, pid_hash_sle
);
3705 if (likely(!j
->legacy_mach_job
)) {
3706 job_assumes(j
, runtime_close(oepair
[1]) != -1);
3709 if (unlikely(j
->hopefully_exits_first
)) {
3710 j
->mgr
->hopefully_first_cnt
++;
3711 } else if (likely(!j
->hopefully_exits_last
)) {
3712 j
->mgr
->normal_active_cnt
++;
3714 j
->fork_fd
= _fd(execspair
[0]);
3715 job_assumes(j
, runtime_close(execspair
[1]) == 0);
3717 job_assumes(j
, runtime_close(spair
[1]) == 0);
3718 ipc_open(_fd(spair
[0]), j
);
3720 if (job_assumes(j
, kevent_mod(c
, EVFILT_PROC
, EV_ADD
, proc_fflags
, 0, root_jobmgr
? root_jobmgr
: j
->mgr
) != -1)) {
3726 j
->wait4debugger_oneshot
= false;
3728 struct envitem
*ei
= NULL
, *et
= NULL
;
3729 SLIST_FOREACH_SAFE( ei
, &j
->env
, sle
, et
) {
3730 if( ei
->one_shot
) {
3731 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
3735 if (likely(!j
->stall_before_exec
)) {
3743 job_start_child(job_t j
)
3745 typeof(posix_spawn
) *psf
;
3746 const char *file2exec
= "/usr/libexec/launchproxy";
3748 posix_spawnattr_t spattr
;
3749 int gflags
= GLOB_NOSORT
|GLOB_NOCHECK
|GLOB_TILDE
|GLOB_DOOFFS
;
3751 short spflags
= POSIX_SPAWN_SETEXEC
;
3752 size_t binpref_out_cnt
= 0;
3755 job_assumes(j
, posix_spawnattr_init(&spattr
) == 0);
3757 job_setup_attributes(j
);
3759 if (unlikely(j
->argv
&& j
->globargv
)) {
3761 for (i
= 0; i
< j
->argc
; i
++) {
3763 gflags
|= GLOB_APPEND
;
3765 if (glob(j
->argv
[i
], gflags
, NULL
, &g
) != 0) {
3766 job_log_error(j
, LOG_ERR
, "glob(\"%s\")", j
->argv
[i
]);
3770 g
.gl_pathv
[0] = (char *)file2exec
;
3771 argv
= (const char **)g
.gl_pathv
;
3772 } else if (likely(j
->argv
)) {
3773 argv
= alloca((j
->argc
+ 2) * sizeof(char *));
3774 argv
[0] = file2exec
;
3775 for (i
= 0; i
< j
->argc
; i
++) {
3776 argv
[i
+ 1] = j
->argv
[i
];
3780 argv
= alloca(3 * sizeof(char *));
3781 argv
[0] = file2exec
;
3786 if (likely(!j
->inetcompat
)) {
3790 if (unlikely(j
->wait4debugger
|| j
->wait4debugger_oneshot
)) {
3791 job_log(j
, LOG_WARNING
, "Spawned and waiting for the debugger to attach before continuing...");
3792 spflags
|= POSIX_SPAWN_START_SUSPENDED
;
3795 job_assumes(j
, posix_spawnattr_setflags(&spattr
, spflags
) == 0);
3797 if (unlikely(j
->j_binpref_cnt
)) {
3798 job_assumes(j
, posix_spawnattr_setbinpref_np(&spattr
, j
->j_binpref_cnt
, j
->j_binpref
, &binpref_out_cnt
) == 0);
3799 job_assumes(j
, binpref_out_cnt
== j
->j_binpref_cnt
);
3803 if (j
->quarantine_data
) {
3806 if (job_assumes(j
, qp
= qtn_proc_alloc())) {
3807 if (job_assumes(j
, qtn_proc_init_with_data(qp
, j
->quarantine_data
, j
->quarantine_data_sz
) == 0)) {
3808 job_assumes(j
, qtn_proc_apply_to_self(qp
) == 0);
3815 if (j
->seatbelt_profile
) {
3816 char *seatbelt_err_buf
= NULL
;
3818 if (!job_assumes(j
, sandbox_init(j
->seatbelt_profile
, j
->seatbelt_flags
, &seatbelt_err_buf
) != -1)) {
3819 if (seatbelt_err_buf
) {
3820 job_log(j
, LOG_ERR
, "Sandbox failed to init: %s", seatbelt_err_buf
);
3827 psf
= j
->prog
? posix_spawn
: posix_spawnp
;
3829 if (likely(!j
->inetcompat
)) {
3830 file2exec
= j
->prog
? j
->prog
: argv
[0];
3833 errno
= psf(NULL
, file2exec
, NULL
, &spattr
, (char *const*)argv
, environ
);
3834 job_log_error(j
, LOG_ERR
, "posix_spawn(\"%s\", ...)", file2exec
);
3839 _exit(EXIT_FAILURE
);
3843 jobmgr_export_env_from_other_jobs(jobmgr_t jm
, launch_data_t dict
)
3849 if (jm
->parentmgr
) {
3850 jobmgr_export_env_from_other_jobs(jm
->parentmgr
, dict
);
3852 char **tmpenviron
= environ
;
3853 for (; *tmpenviron
; tmpenviron
++) {
3855 launch_data_t s
= launch_data_alloc(LAUNCH_DATA_STRING
);
3856 launch_data_set_string(s
, strchr(*tmpenviron
, '=') + 1);
3857 strncpy(envkey
, *tmpenviron
, sizeof(envkey
));
3858 *(strchr(envkey
, '=')) = '\0';
3859 launch_data_dict_insert(dict
, s
, envkey
);
3863 LIST_FOREACH(ji
, &jm
->jobs
, sle
) {
3864 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
3865 if ((tmp
= launch_data_new_string(ei
->value
))) {
3866 launch_data_dict_insert(dict
, tmp
, ei
->key
);
3873 jobmgr_setup_env_from_other_jobs(jobmgr_t jm
)
3878 if (jm
->parentmgr
) {
3879 jobmgr_setup_env_from_other_jobs(jm
->parentmgr
);
3882 LIST_FOREACH(ji
, &jm
->global_env_jobs
, global_env_sle
) {
3883 SLIST_FOREACH(ei
, &ji
->global_env
, sle
) {
3884 setenv(ei
->key
, ei
->value
, 1);
3890 job_log_pids_with_weird_uids(job_t j
)
3892 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_ALL
};
3893 size_t i
, kp_cnt
, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
3894 struct kinfo_proc
*kp
;
3895 uid_t u
= j
->mach_uid
;
3897 if (!do_apple_internal_logging
) {
3903 if (!job_assumes(j
, kp
!= NULL
)) {
3907 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS
, j
->p
, u
, 0);
3909 if (!job_assumes(j
, sysctl(mib
, 3, kp
, &len
, NULL
, 0) != -1)) {
3913 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
3915 for (i
= 0; i
< kp_cnt
; i
++) {
3916 uid_t i_euid
= kp
[i
].kp_eproc
.e_ucred
.cr_uid
;
3917 uid_t i_uid
= kp
[i
].kp_eproc
.e_pcred
.p_ruid
;
3918 uid_t i_svuid
= kp
[i
].kp_eproc
.e_pcred
.p_svuid
;
3919 pid_t i_pid
= kp
[i
].kp_proc
.p_pid
;
3921 if (i_euid
!= u
&& i_uid
!= u
&& i_svuid
!= u
) {
3925 job_log(j
, LOG_ERR
, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
3926 i_pid
, kp
[i
].kp_proc
.p_comm
, i_uid
, i_euid
, i_svuid
);
3928 /* Temporarily disabled due to 5423935 and 4946119. */
3930 /* Ask the accountless process to exit. */
3931 job_assumes(j
, runtime_kill(i_pid
, SIGTERM
) != -1);
3940 job_postfork_test_user(job_t j
)
3942 /* This function is all about 5201578 */
3944 const char *home_env_var
= getenv("HOME");
3945 const char *user_env_var
= getenv("USER");
3946 const char *logname_env_var
= getenv("LOGNAME");
3947 uid_t tmp_uid
, local_uid
= getuid();
3948 gid_t tmp_gid
, local_gid
= getgid();
3949 char shellpath
[PATH_MAX
];
3950 char homedir
[PATH_MAX
];
3951 char loginname
[2000];
3955 if (!job_assumes(j
, home_env_var
&& user_env_var
&& logname_env_var
3956 && strcmp(user_env_var
, logname_env_var
) == 0)) {
3960 if ((pwe
= getpwnam(user_env_var
)) == NULL
) {
3961 job_log(j
, LOG_ERR
, "The account \"%s\" has been deleted out from under us!", user_env_var
);
3966 * We must copy the results of getpw*().
3968 * Why? Because subsequent API calls may call getpw*() as a part of
3969 * their implementation. Since getpw*() returns a [now thread scoped]
3970 * global, we must therefore cache the results before continuing.
3973 tmp_uid
= pwe
->pw_uid
;
3974 tmp_gid
= pwe
->pw_gid
;
3976 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
3977 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
3978 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
3980 if (strcmp(loginname
, logname_env_var
) != 0) {
3981 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "USER");
3984 if (strcmp(homedir
, home_env_var
) != 0) {
3985 job_log(j
, LOG_ERR
, "The %s environmental variable changed out from under us!", "HOME");
3988 if (local_uid
!= tmp_uid
) {
3989 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
3990 'U', tmp_uid
, local_uid
);
3993 if (local_gid
!= tmp_gid
) {
3994 job_log(j
, LOG_ERR
, "The %cID of the account (%u) changed out from under us (%u)!",
3995 'G', tmp_gid
, local_gid
);
4002 job_assumes(j
, runtime_kill(getppid(), SIGTERM
) != -1);
4003 _exit(EXIT_FAILURE
);
4005 job_log(j
, LOG_WARNING
, "In a future build of the OS, this error will be fatal.");
4010 job_postfork_become_user(job_t j
)
4012 char loginname
[2000];
4013 char tmpdirpath
[PATH_MAX
];
4014 char shellpath
[PATH_MAX
];
4015 char homedir
[PATH_MAX
];
4018 gid_t desired_gid
= -1;
4019 uid_t desired_uid
= -1;
4021 if (getuid() != 0) {
4022 return job_postfork_test_user(j
);
4026 * I contend that having UID == 0 and GID != 0 is of dubious value.
4027 * Nevertheless, this used to work in Tiger. See: 5425348
4029 if (j
->groupname
&& !j
->username
) {
4030 j
->username
= "root";
4034 if ((pwe
= getpwnam(j
->username
)) == NULL
) {
4035 job_log(j
, LOG_ERR
, "getpwnam(\"%s\") failed", j
->username
);
4036 _exit(EXIT_FAILURE
);
4038 } else if (j
->mach_uid
) {
4039 if ((pwe
= getpwuid(j
->mach_uid
)) == NULL
) {
4040 job_log(j
, LOG_ERR
, "getpwuid(\"%u\") failed", j
->mach_uid
);
4041 job_log_pids_with_weird_uids(j
);
4042 _exit(EXIT_FAILURE
);
4049 * We must copy the results of getpw*().
4051 * Why? Because subsequent API calls may call getpw*() as a part of
4052 * their implementation. Since getpw*() returns a [now thread scoped]
4053 * global, we must therefore cache the results before continuing.
4056 desired_uid
= pwe
->pw_uid
;
4057 desired_gid
= pwe
->pw_gid
;
4059 strlcpy(shellpath
, pwe
->pw_shell
, sizeof(shellpath
));
4060 strlcpy(loginname
, pwe
->pw_name
, sizeof(loginname
));
4061 strlcpy(homedir
, pwe
->pw_dir
, sizeof(homedir
));
4063 if (unlikely(pwe
->pw_expire
&& time(NULL
) >= pwe
->pw_expire
)) {
4064 job_log(j
, LOG_ERR
, "Expired account");
4065 _exit(EXIT_FAILURE
);
4069 if (unlikely(j
->username
&& strcmp(j
->username
, loginname
) != 0)) {
4070 job_log(j
, LOG_WARNING
, "Suspicious setup: User \"%s\" maps to user: %s", j
->username
, loginname
);
4071 } else if (unlikely(j
->mach_uid
&& (j
->mach_uid
!= desired_uid
))) {
4072 job_log(j
, LOG_WARNING
, "Suspicious setup: UID %u maps to UID %u", j
->mach_uid
, desired_uid
);
4078 if (unlikely((gre
= getgrnam(j
->groupname
)) == NULL
)) {
4079 job_log(j
, LOG_ERR
, "getgrnam(\"%s\") failed", j
->groupname
);
4080 _exit(EXIT_FAILURE
);
4083 desired_gid
= gre
->gr_gid
;
4086 if (!job_assumes(j
, setlogin(loginname
) != -1)) {
4087 _exit(EXIT_FAILURE
);
4090 if (!job_assumes(j
, setgid(desired_gid
) != -1)) {
4091 _exit(EXIT_FAILURE
);
4095 * The kernel team and the DirectoryServices team want initgroups()
4096 * called after setgid(). See 4616864 for more information.
4099 if (likely(!j
->no_init_groups
)) {
4101 if (!job_assumes(j
, initgroups(loginname
, desired_gid
) != -1)) {
4102 _exit(EXIT_FAILURE
);
4105 /* Do our own little initgroups(). We do this to guarantee that we're
4106 * always opted into dynamic group resolution in the kernel. initgroups(3)
4107 * does not make this guarantee.
4109 int groups
[NGROUPS
], ngroups
;
4111 /* A failure here isn't fatal, and we'll still get data we can use. */
4112 job_assumes(j
, getgrouplist(j
->username
, desired_gid
, groups
, &ngroups
) != -1);
4114 if( !job_assumes(j
, syscall(SYS_initgroups
, ngroups
, groups
, desired_uid
) != -1) ) {
4115 _exit(EXIT_FAILURE
);
4120 if (!job_assumes(j
, setuid(desired_uid
) != -1)) {
4121 _exit(EXIT_FAILURE
);
4124 r
= confstr(_CS_DARWIN_USER_TEMP_DIR
, tmpdirpath
, sizeof(tmpdirpath
));
4126 if (likely(r
> 0 && r
< sizeof(tmpdirpath
))) {
4127 setenv("TMPDIR", tmpdirpath
, 0);
4130 setenv("SHELL", shellpath
, 0);
4131 setenv("HOME", homedir
, 0);
4132 setenv("USER", loginname
, 0);
4133 setenv("LOGNAME", loginname
, 0);
4137 job_setup_attributes(job_t j
)
4139 struct limititem
*li
;
4142 if (unlikely(j
->setnice
)) {
4143 job_assumes(j
, setpriority(PRIO_PROCESS
, 0, j
->nice
) != -1);
4146 SLIST_FOREACH(li
, &j
->limits
, sle
) {
4149 if (!job_assumes(j
, getrlimit(li
->which
, &rl
) != -1)) {
4154 rl
.rlim_max
= li
->lim
.rlim_max
;
4157 rl
.rlim_cur
= li
->lim
.rlim_cur
;
4160 if (setrlimit(li
->which
, &rl
) == -1) {
4161 job_log_error(j
, LOG_WARNING
, "setrlimit()");
4165 #if !TARGET_OS_EMBEDDED
4166 if( unlikely(j
->per_user
) ) {
4167 auditinfo_addr_t auinfo
= {
4168 .ai_termid
= { .at_type
= AU_IPv4
},
4169 .ai_auid
= j
->mach_uid
,
4170 .ai_asid
= AU_ASSIGN_ASID
,
4172 (void)au_user_mask(j
->username
, &auinfo
.ai_mask
);
4174 if( !launchd_assumes(setaudit_addr(&auinfo
, sizeof(auinfo
)) != -1) ) {
4175 runtime_syslog(LOG_WARNING
, "Could not set audit session! (errno = %d)", errno
);
4176 _exit(EXIT_FAILURE
);
4178 job_log(j
, LOG_DEBUG
, "Created new security session for per-user launchd.");
4183 if (unlikely(!j
->inetcompat
&& j
->session_create
)) {
4184 launchd_SessionCreate();
4187 if (unlikely(j
->low_pri_io
)) {
4188 job_assumes(j
, setiopolicy_np(IOPOL_TYPE_DISK
, IOPOL_SCOPE_PROCESS
, IOPOL_THROTTLE
) != -1);
4190 if (unlikely(j
->rootdir
)) {
4191 job_assumes(j
, chroot(j
->rootdir
) != -1);
4192 job_assumes(j
, chdir(".") != -1);
4195 job_postfork_become_user(j
);
4197 if (unlikely(j
->workingdir
)) {
4198 job_assumes(j
, chdir(j
->workingdir
) != -1);
4201 if (unlikely(j
->setmask
)) {
4206 job_assumes(j
, dup2(j
->stdin_fd
, STDIN_FILENO
) != -1);
4208 job_setup_fd(j
, STDIN_FILENO
, j
->stdinpath
, O_RDONLY
|O_CREAT
);
4210 job_setup_fd(j
, STDOUT_FILENO
, j
->stdoutpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
4211 job_setup_fd(j
, STDERR_FILENO
, j
->stderrpath
, O_WRONLY
|O_CREAT
|O_APPEND
);
4213 jobmgr_setup_env_from_other_jobs(j
->mgr
);
4215 SLIST_FOREACH(ei
, &j
->env
, sle
) {
4216 setenv(ei
->key
, ei
->value
, 1);
4219 if( do_apple_internal_logging
) {
4220 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING
, "true", 1);
4223 #if !TARGET_OS_EMBEDDED
4224 if( j
->jetsam_properties
) {
4225 job_assumes(j
, proc_setpcontrol(PROC_SETPC_TERMINATE
) == 0);
4229 #if TARGET_OS_EMBEDDED
4230 if( j
->main_thread_priority
!= 0 ) {
4231 struct sched_param params
;
4232 bzero(¶ms
, sizeof(params
));
4233 params
.sched_priority
= j
->main_thread_priority
;
4234 job_assumes(j
, pthread_setschedparam(pthread_self(), SCHED_OTHER
, ¶ms
) != -1);
4239 * We'd like to call setsid() unconditionally, but we have reason to
4240 * believe that prevents launchd from being able to send signals to
4241 * setuid children. We'll settle for process-groups.
4243 if (getppid() != 1) {
4244 job_assumes(j
, setpgid(0, 0) != -1);
4246 job_assumes(j
, setsid() != -1);
4251 job_setup_fd(job_t j
, int target_fd
, const char *path
, int flags
)
4259 if ((fd
= open(path
, flags
|O_NOCTTY
, DEFFILEMODE
)) == -1) {
4260 job_log_error(j
, LOG_WARNING
, "open(\"%s\", ...)", path
);
4264 job_assumes(j
, dup2(fd
, target_fd
) != -1);
4265 job_assumes(j
, runtime_close(fd
) == 0);
4269 dir_has_files(job_t j
, const char *path
)
4271 DIR *dd
= opendir(path
);
4275 if (unlikely(!dd
)) {
4279 while ((de
= readdir(dd
))) {
4280 if (strcmp(de
->d_name
, ".") && strcmp(de
->d_name
, "..")) {
4286 job_assumes(j
, closedir(dd
) == 0);
4291 calendarinterval_setalarm(job_t j
, struct calendarinterval
*ci
)
4293 struct calendarinterval
*ci_iter
, *ci_prev
= NULL
;
4294 time_t later
, head_later
;
4296 later
= cronemu(ci
->when
.tm_mon
, ci
->when
.tm_mday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
4298 if (ci
->when
.tm_wday
!= -1) {
4299 time_t otherlater
= cronemu_wday(ci
->when
.tm_wday
, ci
->when
.tm_hour
, ci
->when
.tm_min
);
4301 if (ci
->when
.tm_mday
== -1) {
4304 later
= later
< otherlater
? later
: otherlater
;
4308 ci
->when_next
= later
;
4310 LIST_FOREACH(ci_iter
, &sorted_calendar_events
, global_sle
) {
4311 if (ci
->when_next
< ci_iter
->when_next
) {
4312 LIST_INSERT_BEFORE(ci_iter
, ci
, global_sle
);
4319 if (ci_iter
== NULL
) {
4320 /* ci must want to fire after every other timer, or there are no timers */
4322 if (LIST_EMPTY(&sorted_calendar_events
)) {
4323 LIST_INSERT_HEAD(&sorted_calendar_events
, ci
, global_sle
);
4325 LIST_INSERT_AFTER(ci_prev
, ci
, global_sle
);
4329 head_later
= LIST_FIRST(&sorted_calendar_events
)->when_next
;
4331 if (job_assumes(j
, kevent_mod((uintptr_t)&sorted_calendar_events
, EVFILT_TIMER
, EV_ADD
, NOTE_ABSOLUTE
|NOTE_SECONDS
, head_later
, root_jobmgr
) != -1)) {
4332 char time_string
[100];
4333 size_t time_string_len
;
4335 ctime_r(&later
, time_string
);
4336 time_string_len
= strlen(time_string
);
4338 if (likely(time_string_len
&& time_string
[time_string_len
- 1] == '\n')) {
4339 time_string
[time_string_len
- 1] = '\0';
4342 job_log(j
, LOG_INFO
, "Scheduled to run again at %s", time_string
);
4347 extract_rcsid_substr(const char *i
, char *o
, size_t osz
)
4349 char *rcs_rev_tmp
= strchr(i
, ' ');
4354 strlcpy(o
, rcs_rev_tmp
+ 1, osz
);
4355 rcs_rev_tmp
= strchr(o
, ' ');
4357 *rcs_rev_tmp
= '\0';
4363 jobmgr_log_bug(jobmgr_t jm
, unsigned int line
)
4365 static const char *file
;
4366 int saved_errno
= errno
;
4369 runtime_ktrace1(RTKT_LAUNCHD_BUG
);
4371 extract_rcsid_substr(__rcs_file_version__
, buf
, sizeof(buf
));
4374 file
= strrchr(__FILE__
, '/');
4382 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4384 jobmgr_log(jm
, LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
4386 runtime_syslog(LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
4391 job_log_bug(job_t j
, unsigned int line
)
4393 static const char *file
;
4394 int saved_errno
= errno
;
4397 runtime_ktrace1(RTKT_LAUNCHD_BUG
);
4399 extract_rcsid_substr(__rcs_file_version__
, buf
, sizeof(buf
));
4402 file
= strrchr(__FILE__
, '/');
4410 /* I cannot think of any reason why 'j' should ever be NULL, nor have I ever seen the case in the wild */
4412 job_log(j
, LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
4414 runtime_syslog(LOG_NOTICE
, "Bug: %s:%u (%s):%u", file
, line
, buf
, saved_errno
);
4419 job_logv(job_t j
, int pri
, int err
, const char *msg
, va_list ap
)
4421 const char *label2use
= j
? j
->label
: "com.apple.launchd.NULL";
4422 const char *mgr2use
= j
? j
->mgr
->name
: "NULL";
4423 struct runtime_syslog_attr attr
= { g_my_label
, label2use
, mgr2use
, pri
, getuid(), getpid(), j
? j
->p
: 0 };
4429 * Hack: If bootstrap_port is set, we must be on the child side of a
4430 * fork(), but before the exec*(). Let's route the log message back to
4433 if (bootstrap_port
) {
4434 return _vproc_logv(pri
, err
, msg
, ap
);
4437 newmsgsz
= strlen(msg
) + 200;
4438 newmsg
= alloca(newmsgsz
);
4441 #if !TARGET_OS_EMBEDDED
4442 snprintf(newmsg
, newmsgsz
, "%s: %s", msg
, strerror(err
));
4444 snprintf(newmsg
, newmsgsz
, "(%s) %s: %s", label2use
, msg
, strerror(err
));
4447 #if !TARGET_OS_EMBEDDED
4448 snprintf(newmsg
, newmsgsz
, "%s", msg
);
4450 snprintf(newmsg
, newmsgsz
, "(%s) %s", label2use
, msg
);
4454 if( j
&& unlikely(j
->debug
) ) {
4455 oldmask
= setlogmask(LOG_UPTO(LOG_DEBUG
));
4458 runtime_vsyslog(&attr
, newmsg
, ap
);
4460 if( j
&& unlikely(j
->debug
) ) {
4461 setlogmask(oldmask
);
4466 job_log_error(job_t j
, int pri
, const char *msg
, ...)
4471 job_logv(j
, pri
, errno
, msg
, ap
);
4476 job_log(job_t j
, int pri
, const char *msg
, ...)
4481 job_logv(j
, pri
, 0, msg
, ap
);
4487 jobmgr_log_error(jobmgr_t jm
, int pri
, const char *msg
, ...)
4492 jobmgr_logv(jm
, pri
, errno
, msg
, ap
);
4498 jobmgr_log(jobmgr_t jm
, int pri
, const char *msg
, ...)
4503 jobmgr_logv(jm
, pri
, 0, msg
, ap
);
4508 jobmgr_logv(jobmgr_t jm
, int pri
, int err
, const char *msg
, va_list ap
)
4512 size_t i
, o
, jmname_len
= strlen(jm
->name
), newmsgsz
;
4514 newname
= alloca((jmname_len
+ 1) * 2);
4515 newmsgsz
= (jmname_len
+ 1) * 2 + strlen(msg
) + 100;
4516 newmsg
= alloca(newmsgsz
);
4518 for (i
= 0, o
= 0; i
< jmname_len
; i
++, o
++) {
4519 if (jm
->name
[i
] == '%') {
4523 newname
[o
] = jm
->name
[i
];
4528 snprintf(newmsg
, newmsgsz
, "%s: %s: %s", newname
, msg
, strerror(err
));
4530 snprintf(newmsg
, newmsgsz
, "%s: %s", newname
, msg
);
4533 if (jm
->parentmgr
) {
4534 jobmgr_logv(jm
->parentmgr
, pri
, 0, newmsg
, ap
);
4536 struct runtime_syslog_attr attr
= { g_my_label
, g_my_label
, jm
->name
, pri
, getuid(), getpid(), getpid() };
4538 runtime_vsyslog(&attr
, newmsg
, ap
);
4543 semaphoreitem_ignore(job_t j
, struct semaphoreitem
*si
)
4546 job_log(j
, LOG_DEBUG
, "Ignoring Vnode: %d", si
->fd
);
4547 job_assumes(j
, kevent_mod(si
->fd
, EVFILT_VNODE
, EV_DELETE
, 0, 0, NULL
) != -1);
4552 semaphoreitem_watch(job_t j
, struct semaphoreitem
*si
)
4554 char *parentdir
, tmp_path
[PATH_MAX
];
4555 int saved_errno
= 0;
4556 int fflags
= NOTE_DELETE
|NOTE_RENAME
;
4561 fflags
|= NOTE_ATTRIB
|NOTE_LINK
;
4564 fflags
|= NOTE_REVOKE
|NOTE_EXTEND
|NOTE_WRITE
;
4572 /* dirname() may modify tmp_path */
4573 strlcpy(tmp_path
, si
->what
, sizeof(tmp_path
));
4575 if (!job_assumes(j
, (parentdir
= dirname(tmp_path
)))) {
4579 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
4583 if( stat(si
->what
, &sb
) == 0 ) {
4584 /* If we're watching a character or block device, only watch the parent directory.
4585 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
4586 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
4587 * open(2)s the file (like a character device that waits for a carrier signal) or
4588 * (b) preventing other processes from obtaining an exclusive lock on the file, even
4589 * though we're opening it with O_EVTONLY.
4591 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
4592 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
4593 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
4594 * each time the parent changes to see if it appeared or disappeared.
4596 if( S_ISREG(sb
.st_mode
) || S_ISDIR(sb
.st_mode
) ) {
4597 si
->fd
= _fd(open(si
->what
, O_EVTONLY
| O_NOCTTY
| O_NONBLOCK
));
4601 if( si
->fd
== -1 ) {
4602 si
->watching_parent
= job_assumes(j
, (si
->fd
= _fd(open(parentdir
, O_EVTONLY
| O_NOCTTY
| O_NONBLOCK
))) != -1);
4604 si
->watching_parent
= false;
4609 return job_log_error(j
, LOG_ERR
, "Path monitoring failed on \"%s\"", si
->what
);
4612 job_log(j
, LOG_DEBUG
, "Watching %svnode (%s): %d", si
->watching_parent
? "parent ": "", si
->what
, si
->fd
);
4614 if (kevent_mod(si
->fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, j
) == -1) {
4615 saved_errno
= errno
;
4617 * The FD can be revoked between the open() and kevent().
4618 * This is similar to the inability for kevents to be
4619 * attached to short lived zombie processes after fork()
4620 * but before kevent().
4622 job_assumes(j
, runtime_close(si
->fd
) == 0);
4625 } while (unlikely((si
->fd
== -1) && (saved_errno
== ENOENT
)));
4627 if (saved_errno
== ENOTSUP
) {
4629 * 3524219 NFS needs kqueue support
4630 * 4124079 VFS needs generic kqueue support
4631 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
4633 job_log(j
, LOG_DEBUG
, "Falling back to polling for path: %s", si
->what
);
4635 if (!j
->poll_for_vfs_changes
) {
4636 j
->poll_for_vfs_changes
= true;
4637 job_assumes(j
, kevent_mod((uintptr_t)&j
->semaphores
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, 3, j
) != -1);
4643 semaphoreitem_callback(job_t j
, struct kevent
*kev
)
4645 char invalidation_reason
[100] = "";
4646 struct semaphoreitem
*si
;
4648 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
4654 job_log(j
, LOG_DEBUG
, "P%s changed (%u): %s", si
->watching_parent
? "arent path" : "ath", si
->why
, si
->what
);
4660 if (si
->fd
== (int)kev
->ident
) {
4665 if (!job_assumes(j
, si
!= NULL
)) {
4669 if (NOTE_DELETE
& kev
->fflags
) {
4670 strcat(invalidation_reason
, "deleted");
4673 if (NOTE_RENAME
& kev
->fflags
) {
4674 if (invalidation_reason
[0]) {
4675 strcat(invalidation_reason
, "/renamed");
4677 strcat(invalidation_reason
, "renamed");
4681 if (NOTE_REVOKE
& kev
->fflags
) {
4682 if (invalidation_reason
[0]) {
4683 strcat(invalidation_reason
, "/revoked");
4685 strcat(invalidation_reason
, "revoked");
4689 if (invalidation_reason
[0]) {
4690 job_log(j
, LOG_DEBUG
, "Path %s: %s", invalidation_reason
, si
->what
);
4691 job_assumes(j
, runtime_close(si
->fd
) == 0);
4692 si
->fd
= -1; /* this will get fixed in semaphoreitem_watch() */
4695 if( !si
->watching_parent
) {
4696 if (si
->why
== PATH_CHANGES
) {
4697 j
->start_pending
= true;
4699 semaphoreitem_watch(j
, si
);
4701 } else { /* Something happened to the parent directory. See if our target file appeared. */
4702 if( !invalidation_reason
[0] ) {
4703 job_assumes(j
, runtime_close(si
->fd
) == 0);
4704 si
->fd
= -1; /* this will get fixed in semaphoreitem_watch() */
4705 semaphoreitem_watch(j
, si
);
4707 /* Need to think about what should happen if the parent directory goes invalid. */
4710 job_dispatch(j
, false);
4713 struct cal_dict_walk
{
4719 calendarinterval_new_from_obj_dict_walk(launch_data_t obj
, const char *key
, void *context
)
4721 struct cal_dict_walk
*cdw
= context
;
4722 struct tm
*tmptm
= &cdw
->tmptm
;
4726 if (unlikely(LAUNCH_DATA_INTEGER
!= launch_data_get_type(obj
))) {
4727 /* hack to let caller know something went wrong */
4732 val
= launch_data_get_integer(obj
);
4735 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is less than zero.", key
);
4736 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MINUTE
) == 0) {
4738 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key
);
4741 tmptm
->tm_min
= (typeof(tmptm
->tm_min
)) val
;
4743 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_HOUR
) == 0) {
4745 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key
);
4748 tmptm
->tm_hour
= (typeof(tmptm
->tm_hour
)) val
;
4750 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_DAY
) == 0) {
4751 if( val
< 1 || val
> 31 ) {
4752 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key
);
4755 tmptm
->tm_mday
= (typeof(tmptm
->tm_mday
)) val
;
4757 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_WEEKDAY
) == 0) {
4759 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key
);
4762 tmptm
->tm_wday
= (typeof(tmptm
->tm_wday
)) val
;
4764 } else if (strcasecmp(key
, LAUNCH_JOBKEY_CAL_MONTH
) == 0) {
4766 job_log(j
, LOG_WARNING
, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key
);
4769 tmptm
->tm_mon
= (typeof(tmptm
->tm_mon
)) val
;
4770 tmptm
->tm_mon
-= 1; /* 4798263 cron compatibility */
4776 calendarinterval_new_from_obj(job_t j
, launch_data_t obj
)
4778 struct cal_dict_walk cdw
;
4781 memset(&cdw
.tmptm
, 0, sizeof(0));
4783 cdw
.tmptm
.tm_min
= -1;
4784 cdw
.tmptm
.tm_hour
= -1;
4785 cdw
.tmptm
.tm_mday
= -1;
4786 cdw
.tmptm
.tm_wday
= -1;
4787 cdw
.tmptm
.tm_mon
= -1;
4789 if (!job_assumes(j
, obj
!= NULL
)) {
4793 if (unlikely(LAUNCH_DATA_DICTIONARY
!= launch_data_get_type(obj
))) {
4797 launch_data_dict_iterate(obj
, calendarinterval_new_from_obj_dict_walk
, &cdw
);
4799 if (unlikely(cdw
.tmptm
.tm_sec
== -1)) {
4803 return calendarinterval_new(j
, &cdw
.tmptm
);
4807 calendarinterval_new(job_t j
, struct tm
*w
)
4809 struct calendarinterval
*ci
= calloc(1, sizeof(struct calendarinterval
));
4811 if (!job_assumes(j
, ci
!= NULL
)) {
4818 SLIST_INSERT_HEAD(&j
->cal_intervals
, ci
, sle
);
4820 calendarinterval_setalarm(j
, ci
);
4822 runtime_add_weak_ref();
4828 calendarinterval_delete(job_t j
, struct calendarinterval
*ci
)
4830 SLIST_REMOVE(&j
->cal_intervals
, ci
, calendarinterval
, sle
);
4831 LIST_REMOVE(ci
, global_sle
);
4835 runtime_del_weak_ref();
4839 calendarinterval_sanity_check(void)
4841 struct calendarinterval
*ci
= LIST_FIRST(&sorted_calendar_events
);
4842 time_t now
= time(NULL
);
4844 if (unlikely(ci
&& (ci
->when_next
< now
))) {
4845 jobmgr_assumes(root_jobmgr
, raise(SIGUSR1
) != -1);
4850 calendarinterval_callback(void)
4852 struct calendarinterval
*ci
, *ci_next
;
4853 time_t now
= time(NULL
);
4855 LIST_FOREACH_SAFE(ci
, &sorted_calendar_events
, global_sle
, ci_next
) {
4858 if (ci
->when_next
> now
) {
4862 LIST_REMOVE(ci
, global_sle
);
4863 calendarinterval_setalarm(j
, ci
);
4865 j
->start_pending
= true;
4866 job_dispatch(j
, false);
4871 socketgroup_new(job_t j
, const char *name
, int *fds
, size_t fd_cnt
, bool junkfds
)
4873 struct socketgroup
*sg
= calloc(1, sizeof(struct socketgroup
) + strlen(name
) + 1);
4875 if (!job_assumes(j
, sg
!= NULL
)) {
4879 sg
->fds
= calloc(1, fd_cnt
* sizeof(int));
4880 sg
->fd_cnt
= fd_cnt
;
4881 sg
->junkfds
= junkfds
;
4883 if (!job_assumes(j
, sg
->fds
!= NULL
)) {
4888 memcpy(sg
->fds
, fds
, fd_cnt
* sizeof(int));
4889 strcpy(sg
->name_init
, name
);
4891 SLIST_INSERT_HEAD(&j
->sockets
, sg
, sle
);
4893 runtime_add_weak_ref();
4899 socketgroup_delete(job_t j
, struct socketgroup
*sg
)
4903 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
4905 struct sockaddr_storage ss
;
4906 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
4907 socklen_t ss_len
= sizeof(ss
);
4910 if (job_assumes(j
, getsockname(sg
->fds
[i
], (struct sockaddr
*)&ss
, &ss_len
) != -1)
4911 && job_assumes(j
, ss_len
> 0) && (ss
.ss_family
== AF_UNIX
)) {
4912 job_assumes(j
, unlink(sun
->sun_path
) != -1);
4913 /* We might conditionally need to delete a directory here */
4916 job_assumes(j
, runtime_close(sg
->fds
[i
]) != -1);
4919 SLIST_REMOVE(&j
->sockets
, sg
, socketgroup
, sle
);
4924 runtime_del_weak_ref();
4928 socketgroup_kevent_mod(job_t j
, struct socketgroup
*sg
, bool do_add
)
4930 struct kevent kev
[sg
->fd_cnt
];
4932 unsigned int i
, buf_off
= 0;
4934 if (unlikely(sg
->junkfds
)) {
4938 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
4939 EV_SET(&kev
[i
], sg
->fds
[i
], EVFILT_READ
, do_add
? EV_ADD
: EV_DELETE
, 0, 0, j
);
4940 buf_off
+= snprintf(buf
+ buf_off
, sizeof(buf
) - buf_off
, " %d", sg
->fds
[i
]);
4943 job_log(j
, LOG_DEBUG
, "%s Sockets:%s", do_add
? "Watching" : "Ignoring", buf
);
4945 job_assumes(j
, kevent_bulk_mod(kev
, sg
->fd_cnt
) != -1);
4947 for (i
= 0; i
< sg
->fd_cnt
; i
++) {
4948 job_assumes(j
, kev
[i
].flags
& EV_ERROR
);
4949 errno
= (typeof(errno
)) kev
[i
].data
;
4950 job_assumes(j
, kev
[i
].data
== 0);
4955 socketgroup_ignore(job_t j
, struct socketgroup
*sg
)
4957 socketgroup_kevent_mod(j
, sg
, false);
4961 socketgroup_watch(job_t j
, struct socketgroup
*sg
)
4963 socketgroup_kevent_mod(j
, sg
, true);
4967 socketgroup_callback(job_t j
)
4969 job_dispatch(j
, true);
4973 envitem_new(job_t j
, const char *k
, const char *v
, bool global
, bool one_shot
)
4975 struct envitem
*ei
= calloc(1, sizeof(struct envitem
) + strlen(k
) + 1 + strlen(v
) + 1);
4977 if (!job_assumes(j
, ei
!= NULL
)) {
4981 strcpy(ei
->key_init
, k
);
4982 ei
->value
= ei
->key_init
+ strlen(k
) + 1;
4983 strcpy(ei
->value
, v
);
4984 ei
->one_shot
= one_shot
;
4987 if (SLIST_EMPTY(&j
->global_env
)) {
4988 LIST_INSERT_HEAD(&j
->mgr
->global_env_jobs
, j
, global_env_sle
);
4990 SLIST_INSERT_HEAD(&j
->global_env
, ei
, sle
);
4992 SLIST_INSERT_HEAD(&j
->env
, ei
, sle
);
4995 job_log(j
, LOG_DEBUG
, "Added environmental variable: %s=%s", k
, v
);
5001 envitem_delete(job_t j
, struct envitem
*ei
, bool global
)
5004 SLIST_REMOVE(&j
->global_env
, ei
, envitem
, sle
);
5005 if (SLIST_EMPTY(&j
->global_env
)) {
5006 LIST_REMOVE(j
, global_env_sle
);
5009 SLIST_REMOVE(&j
->env
, ei
, envitem
, sle
);
5016 envitem_setup(launch_data_t obj
, const char *key
, void *context
)
5020 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
5024 if( strncmp(LAUNCHD_TRUSTED_FD_ENV
, key
, sizeof(LAUNCHD_TRUSTED_FD_ENV
) - 1) != 0 ) {
5025 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
, false);
5027 job_log(j
, LOG_DEBUG
, "Ignoring reserved environmental variable: %s", key
);
5032 envitem_setup_one_shot(launch_data_t obj
, const char *key
, void *context
)
5036 if (launch_data_get_type(obj
) != LAUNCH_DATA_STRING
) {
5040 if( strncmp(LAUNCHD_TRUSTED_FD_ENV
, key
, sizeof(LAUNCHD_TRUSTED_FD_ENV
) - 1) != 0 ) {
5041 envitem_new(j
, key
, launch_data_get_string(obj
), j
->importing_global_env
, true);
5043 job_log(j
, LOG_DEBUG
, "Ignoring reserved environmental variable: %s", key
);
5048 limititem_update(job_t j
, int w
, rlim_t r
)
5050 struct limititem
*li
;
5052 SLIST_FOREACH(li
, &j
->limits
, sle
) {
5053 if (li
->which
== w
) {
5059 li
= calloc(1, sizeof(struct limititem
));
5061 if (!job_assumes(j
, li
!= NULL
)) {
5065 SLIST_INSERT_HEAD(&j
->limits
, li
, sle
);
5070 if (j
->importing_hard_limits
) {
5071 li
->lim
.rlim_max
= r
;
5074 li
->lim
.rlim_cur
= r
;
5082 limititem_delete(job_t j
, struct limititem
*li
)
5084 SLIST_REMOVE(&j
->limits
, li
, limititem
, sle
);
5091 seatbelt_setup_flags(launch_data_t obj
, const char *key
, void *context
)
5095 if (launch_data_get_type(obj
) != LAUNCH_DATA_BOOL
) {
5096 job_log(j
, LOG_WARNING
, "Sandbox flag value must be boolean: %s", key
);
5100 if (launch_data_get_bool(obj
) == false) {
5104 if (strcasecmp(key
, LAUNCH_JOBKEY_SANDBOX_NAMED
) == 0) {
5105 j
->seatbelt_flags
|= SANDBOX_NAMED
;
5111 limititem_setup(launch_data_t obj
, const char *key
, void *context
)
5114 size_t i
, limits_cnt
= (sizeof(launchd_keys2limits
) / sizeof(launchd_keys2limits
[0]));
5117 if (launch_data_get_type(obj
) != LAUNCH_DATA_INTEGER
) {
5121 rl
= launch_data_get_integer(obj
);
5123 for (i
= 0; i
< limits_cnt
; i
++) {
5124 if (strcasecmp(launchd_keys2limits
[i
].key
, key
) == 0) {
5129 if (i
== limits_cnt
) {
5133 limititem_update(j
, launchd_keys2limits
[i
].val
, rl
);
5137 job_useless(job_t j
)
5139 if ((j
->legacy_LS_job
|| j
->only_once
) && j
->start_time
!= 0) {
5140 if (j
->legacy_LS_job
&& j
->j_port
) {
5143 job_log(j
, LOG_INFO
, "Exited. Was only configured to run once.");
5145 } else if (j
->removal_pending
) {
5146 job_log(j
, LOG_DEBUG
, "Exited while removal was pending.");
5148 } else if (j
->mgr
->shutting_down
&& (j
->hopefully_exits_first
|| j
->mgr
->hopefully_first_cnt
== 0)) {
5149 job_log(j
, LOG_DEBUG
, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children
, total_anon_children
);
5150 if( total_children
== 0 && !j
->anonymous
) {
5151 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job was last (non-anonymous) to exit during %s shutdown.", (pid1_magic
&& j
->mgr
== root_jobmgr
) ? "system" : "job manager");
5152 } else if( total_anon_children
== 0 && j
->anonymous
) {
5153 job_log(j
, LOG_DEBUG
| LOG_CONSOLE
, "Job was last (anonymous) to exit during %s shutdown.", (pid1_magic
&& j
->mgr
== root_jobmgr
) ? "system" : "job manager");
5156 } else if (j
->legacy_mach_job
) {
5157 if (SLIST_EMPTY(&j
->machservices
)) {
5158 job_log(j
, LOG_INFO
, "Garbage collecting");
5160 } else if (!j
->checkedin
) {
5161 job_log(j
, LOG_WARNING
, "Failed to check-in!");
5170 job_keepalive(job_t j
)
5172 mach_msg_type_number_t statusCnt
;
5173 mach_port_status_t status
;
5174 struct semaphoreitem
*si
;
5175 struct machservice
*ms
;
5177 bool good_exit
= (WIFEXITED(j
->last_exit_status
) && WEXITSTATUS(j
->last_exit_status
) == 0);
5178 bool is_not_kextd
= (do_apple_internal_logging
|| (strcmp(j
->label
, "com.apple.kextd") != 0));
5180 if (unlikely(j
->mgr
->shutting_down
)) {
5187 * We definitely need to revisit this after Leopard ships. Please see
5188 * launchctl.c for the other half of this hack.
5190 if (unlikely((j
->mgr
->global_on_demand_cnt
> 0) && is_not_kextd
)) {
5194 if( unlikely(j
->needs_kickoff
) ) {
5195 job_log(j
, LOG_DEBUG
, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5199 if (j
->start_pending
) {
5200 job_log(j
, LOG_DEBUG
, "KeepAlive check: Pent-up non-IPC launch criteria.");
5205 job_log(j
, LOG_DEBUG
, "KeepAlive check: job configured to run continuously.");
5209 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
5210 statusCnt
= MACH_PORT_RECEIVE_STATUS_COUNT
;
5211 if (mach_port_get_attributes(mach_task_self(), ms
->port
, MACH_PORT_RECEIVE_STATUS
,
5212 (mach_port_info_t
)&status
, &statusCnt
) != KERN_SUCCESS
) {
5215 if (status
.mps_msgcount
) {
5216 job_log(j
, LOG_DEBUG
, "KeepAlive check: %d queued Mach messages on service: %s",
5217 status
.mps_msgcount
, ms
->name
);
5223 SLIST_FOREACH(si
, &j
->semaphores
, sle
) {
5224 bool wanted_state
= false;
5230 wanted_state
= true;
5232 if (network_up
== wanted_state
) {
5233 job_log(j
, LOG_DEBUG
, "KeepAlive: The network is %s.", wanted_state
? "up" : "down");
5237 case SUCCESSFUL_EXIT
:
5238 wanted_state
= true;
5240 if (good_exit
== wanted_state
) {
5241 job_log(j
, LOG_DEBUG
, "KeepAlive: The exit state was %s.", wanted_state
? "successful" : "failure");
5245 case OTHER_JOB_ENABLED
:
5246 wanted_state
= true;
5247 case OTHER_JOB_DISABLED
:
5248 if ((bool)job_find(si
->what
) == wanted_state
) {
5249 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "enabled" : "disabled", si
->what
);
5253 case OTHER_JOB_ACTIVE
:
5254 wanted_state
= true;
5255 case OTHER_JOB_INACTIVE
:
5256 if ((other_j
= job_find(si
->what
))) {
5257 if ((bool)other_j
->p
== wanted_state
) {
5258 job_log(j
, LOG_DEBUG
, "KeepAlive: The following job is %s: %s", wanted_state
? "active" : "inactive", si
->what
);
5264 wanted_state
= true;
5266 if ((bool)(stat(si
->what
, &sb
) == 0) == wanted_state
) {
5267 job_log(j
, LOG_DEBUG
, "KeepAlive: The following path %s: %s", wanted_state
? "exists" : "is missing", si
->what
);
5270 if( wanted_state
) { /* File is not there but we wish it was. */
5271 if( si
->fd
!= -1 && !si
->watching_parent
) { /* Need to be watching the parent now. */
5272 job_assumes(j
, runtime_close(si
->fd
) == 0);
5274 semaphoreitem_watch(j
, si
);
5276 } else { /* File is there but we wish it wasn't. */
5277 if( si
->fd
!= -1 && si
->watching_parent
) { /* Need to watch the file now. */
5278 job_assumes(j
, runtime_close(si
->fd
) == 0);
5280 semaphoreitem_watch(j
, si
);
5288 if (-1 == (qdir_file_cnt
= dir_has_files(j
, si
->what
))) {
5289 job_log_error(j
, LOG_ERR
, "Failed to count the number of files in \"%s\"", si
->what
);
5290 } else if (qdir_file_cnt
> 0) {
5291 job_log(j
, LOG_DEBUG
, "KeepAlive: Directory is not empty: %s", si
->what
);
5304 struct machservice
*ms
;
5306 return "PID is still valid";
5309 if (j
->mgr
->shutting_down
&& j
->log_redirect_fd
) {
5310 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
5311 j
->log_redirect_fd
= 0;
5314 if (j
->log_redirect_fd
) {
5315 if (job_assumes(j
, j
->legacy_LS_job
)) {
5316 return "Standard out/error is still valid";
5318 job_assumes(j
, runtime_close(j
->log_redirect_fd
) != -1);
5319 j
->log_redirect_fd
= 0;
5323 if (j
->priv_port_has_senders
) {
5324 return "Privileged Port still has outstanding senders";
5327 SLIST_FOREACH(ms
, &j
->machservices
, sle
) {
5328 if (ms
->recv
&& machservice_active(ms
)) {
5329 return "Mach service is still active";
5337 machservice_watch(job_t j
, struct machservice
*ms
)
5340 job_assumes(j
, runtime_add_mport(ms
->port
, NULL
, 0) == KERN_SUCCESS
);
5345 machservice_ignore(job_t j
, struct machservice
*ms
)
5347 job_assumes(j
, runtime_remove_mport(ms
->port
) == KERN_SUCCESS
);
5351 machservice_resetport(job_t j
, struct machservice
*ms
)
5353 LIST_REMOVE(ms
, port_hash_sle
);
5354 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
5355 job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
5357 job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
);
5358 job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
);
5359 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
5362 struct machservice
*
5363 machservice_new(job_t j
, const char *name
, mach_port_t
*serviceport
, bool pid_local
)
5365 struct machservice
*ms
= calloc(1, sizeof(struct machservice
) + strlen(name
) + 1);
5367 if (!job_assumes(j
, ms
!= NULL
)) {
5371 strcpy((char *)ms
->name
, name
);
5374 ms
->per_pid
= pid_local
;
5376 if (likely(*serviceport
== MACH_PORT_NULL
)) {
5377 if (!job_assumes(j
, launchd_mport_create_recv(&ms
->port
) == KERN_SUCCESS
)) {
5381 if (!job_assumes(j
, launchd_mport_make_send(ms
->port
) == KERN_SUCCESS
)) {
5384 *serviceport
= ms
->port
;
5387 ms
->port
= *serviceport
;
5388 ms
->isActive
= true;
5391 SLIST_INSERT_HEAD(&j
->machservices
, ms
, sle
);
5393 jobmgr_t jm_to_insert
= j
->mgr
;
5394 if( g_flat_mach_namespace
) {
5395 jm_to_insert
= (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) ? j
->mgr
: root_jobmgr
;
5398 LIST_INSERT_HEAD(&jm_to_insert
->ms_hash
[hash_ms(ms
->name
)], ms
, name_hash_sle
);
5399 LIST_INSERT_HEAD(&port_hash
[HASH_PORT(ms
->port
)], ms
, port_hash_sle
);
5401 job_log(j
, LOG_DEBUG
, "Mach service added%s: %s", (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) ? " to private namespace" : "", name
);
5405 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
5412 machservice_status(struct machservice
*ms
)
5415 return BOOTSTRAP_STATUS_ACTIVE
;
5416 } else if (ms
->job
->ondemand
) {
5417 return BOOTSTRAP_STATUS_ON_DEMAND
;
5419 return BOOTSTRAP_STATUS_INACTIVE
;
5424 job_setup_exception_port(job_t j
, task_t target_task
)
5426 struct machservice
*ms
;
5427 thread_state_flavor_t f
= 0;
5428 mach_port_t exc_port
= the_exception_server
;
5430 if (unlikely(j
->alt_exc_handler
)) {
5431 ms
= jobmgr_lookup_service(j
->mgr
, j
->alt_exc_handler
, true, 0);
5433 exc_port
= machservice_port(ms
);
5435 job_log(j
, LOG_WARNING
, "Falling back to default Mach exception handler. Could not find: %s", j
->alt_exc_handler
);
5437 } else if (unlikely(j
->internal_exc_handler
)) {
5438 exc_port
= runtime_get_kernel_port();
5439 } else if (unlikely(!exc_port
)) {
5443 #if defined (__ppc__) || defined(__ppc64__)
5444 f
= PPC_THREAD_STATE64
;
5445 #elif defined(__i386__) || defined(__x86_64__)
5446 f
= x86_THREAD_STATE
;
5447 #elif defined(__arm__)
5448 f
= ARM_THREAD_STATE
;
5450 #error "unknown architecture"
5453 if (likely(target_task
)) {
5454 job_assumes(j
, task_set_exception_ports(target_task
, EXC_MASK_CRASH
, exc_port
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
5455 } else if (pid1_magic
&& the_exception_server
) {
5456 mach_port_t mhp
= mach_host_self();
5457 job_assumes(j
, host_set_exception_ports(mhp
, EXC_MASK_CRASH
, the_exception_server
, EXCEPTION_STATE_IDENTITY
| MACH_EXCEPTION_CODES
, f
) == KERN_SUCCESS
);
5458 job_assumes(j
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
5463 job_set_exception_port(job_t j
, mach_port_t port
)
5465 if (unlikely(!the_exception_server
)) {
5466 the_exception_server
= port
;
5467 job_setup_exception_port(j
, 0);
5469 job_log(j
, LOG_WARNING
, "The exception server is already claimed!");
5474 machservice_setup_options(launch_data_t obj
, const char *key
, void *context
)
5476 struct machservice
*ms
= context
;
5477 mach_port_t mhp
= mach_host_self();
5481 if (!job_assumes(ms
->job
, mhp
!= MACH_PORT_NULL
)) {
5485 switch (launch_data_get_type(obj
)) {
5486 case LAUNCH_DATA_INTEGER
:
5487 which_port
= (int)launch_data_get_integer(obj
); /* XXX we should bound check this... */
5488 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT
) == 0) {
5489 switch (which_port
) {
5490 case TASK_KERNEL_PORT
:
5491 case TASK_HOST_PORT
:
5492 case TASK_NAME_PORT
:
5493 case TASK_BOOTSTRAP_PORT
:
5494 /* I find it a little odd that zero isn't reserved in the header.
5495 * Normally Mach is fairly good about this convention... */
5497 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved task special port: %d", which_port
);
5500 ms
->special_port_num
= which_port
;
5501 SLIST_INSERT_HEAD(&special_ports
, ms
, special_port_sle
);
5504 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT
) == 0 && pid1_magic
) {
5505 if (which_port
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
5506 job_assumes(ms
->job
, (errno
= host_set_special_port(mhp
, which_port
, ms
->port
)) == KERN_SUCCESS
);
5508 job_log(ms
->job
, LOG_WARNING
, "Tried to set a reserved host special port: %d", which_port
);
5511 case LAUNCH_DATA_BOOL
:
5512 b
= launch_data_get_bool(obj
);
5513 if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE
) == 0) {
5514 ms
->debug_on_close
= b
;
5515 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_RESETATCLOSE
) == 0) {
5517 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN
) == 0) {
5519 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER
) == 0) {
5520 job_set_exception_port(ms
->job
, ms
->port
);
5521 } else if (strcasecmp(key
, LAUNCH_JOBKEY_MACH_KUNCSERVER
) == 0) {
5523 job_assumes(ms
->job
, host_set_UNDServer(mhp
, ms
->port
) == KERN_SUCCESS
);
5526 case LAUNCH_DATA_STRING
:
5527 if( strcasecmp(key
, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH
) == 0 ) {
5528 const char *option
= launch_data_get_string(obj
);
5529 if( strcasecmp(option
, "One") == 0 ) {
5530 ms
->drain_one_on_crash
= true;
5531 } else if( strcasecmp(option
, "All") == 0 ) {
5532 ms
->drain_all_on_crash
= true;
5536 case LAUNCH_DATA_DICTIONARY
:
5537 job_set_exception_port(ms
->job
, ms
->port
);
5543 job_assumes(ms
->job
, launchd_mport_deallocate(mhp
) == KERN_SUCCESS
);
5547 machservice_setup(launch_data_t obj
, const char *key
, void *context
)
5550 struct machservice
*ms
;
5551 mach_port_t p
= MACH_PORT_NULL
;
5553 if (unlikely(ms
= jobmgr_lookup_service(j
->mgr
, key
, false, 0))) {
5554 job_log(j
, LOG_WARNING
, "Conflict with job: %s over Mach service: %s", ms
->job
->label
, key
);
5558 if (!job_assumes(j
, (ms
= machservice_new(j
, key
, &p
, false)) != NULL
)) {
5562 ms
->isActive
= false;
5564 if (launch_data_get_type(obj
) == LAUNCH_DATA_DICTIONARY
) {
5565 launch_data_dict_iterate(obj
, machservice_setup_options
, ms
);
5570 jobmgr_do_garbage_collection(jobmgr_t jm
)
5572 jobmgr_t jmi
= NULL
, jmn
= NULL
;
5573 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
5574 jobmgr_do_garbage_collection(jmi
);
5577 if( !jm
->shutting_down
) {
5581 if( SLIST_EMPTY(&jm
->submgrs
) ) {
5582 jobmgr_log(jm
, LOG_DEBUG
, "No submanagers left.");
5584 jobmgr_log(jm
, LOG_DEBUG
, "Still have submanagers.");
5588 for( phase
= jm
->shutdown_phase
; phase
< JOBMGR_PHASE_LAST
; phase
++ ) {
5589 if( phase
== JOBMGR_PHASE_HOPEFULLY_EXITS_LAST
) {
5590 if( jm
== root_jobmgr
) {
5591 simulate_pid1_crash();
5594 if( jm
== root_jobmgr
&& pid1_magic
&& !jm
->killed_stray_jobs
) {
5595 jobmgr_log_stray_children(jm
, true);
5596 jm
->killed_stray_jobs
= true;
5600 uint32_t unkilled_cnt
= 0;
5601 job_t ji
= NULL
, jn
= NULL
;
5602 LIST_FOREACH_SAFE( ji
, &jm
->jobs
, sle
, jn
) {
5603 if( phase
== JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST
&& !ji
->hopefully_exits_first
) {
5605 } else if( phase
== JOBMGR_PHASE_NORMAL
) {
5606 if( ji
->holds_ref
) {
5607 /* If we're shutting down, release the hold holds_ref jobs
5613 if( ji
->hopefully_exits_first
|| ji
->hopefully_exits_last
) {
5616 } else if( phase
== JOBMGR_PHASE_HOPEFULLY_EXITS_LAST
&& !ji
->hopefully_exits_last
) {
5620 if( ji
->anonymous
) {
5624 const char *active
= job_active(ji
);
5626 job_log(ji
, LOG_DEBUG
, "Job is inactive. Removing.");
5630 if( !ji
->stopped
) {
5631 job_log(ji
, LOG_DEBUG
, "Stopping job.");
5635 if( ji
->clean_kill
) {
5636 job_log(ji
, LOG_DEBUG
, "Job was clean and sent SIGKILL.");
5637 if( !ji
->clean_exit_timer_expired
) {
5638 /* Give jobs that were clean and sent SIGKILL 1 second to exit after receipt. */
5641 job_log(ji
, LOG_ERR
, "Job was clean, killed and has not exited after 1 second. Moving on.");
5644 job_log(ji
, LOG_DEBUG
, "Job was sent SIGTERM%s.", ji
->sent_sigkill
? " and SIGKILL" : "");
5645 unkilled_cnt
+= !ji
->sent_sigkill
;
5649 job_log(ji
, LOG_DEBUG
, "Job is active: %s", active
);
5652 } /* LIST_FOREACH_SAFE */
5654 if( unkilled_cnt
== 0 ) {
5655 jobmgr_log(jm
, LOG_DEBUG
, "Done with the %s bucket, advancing.", s_phases
[jm
->shutdown_phase
]);
5656 jm
->shutdown_phase
++;
5658 jobmgr_log(jm
, LOG_DEBUG
, "Still %u unkilled job%s in %s bucket.", unkilled_cnt
, unkilled_cnt
> 1 ? "s" : "", s_phases
[jm
->shutdown_phase
]);
5659 phase
= JOBMGR_PHASE_LAST
;
5664 if( jm
->shutdown_phase
> JOBMGR_PHASE_HOPEFULLY_EXITS_LAST
&& SLIST_EMPTY(&jm
->submgrs
) ) {
5665 jobmgr_log(jm
, LOG_DEBUG
, "Removing.");
5666 jobmgr_log_stray_children(jm
, false);
5675 jobmgr_kill_stray_children(jobmgr_t jm
, pid_t
*p
, size_t np
)
5678 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
5679 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
5680 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
5681 * for them to exit before moving on.
5683 * See rdar://problem/6562592
5686 for( i
= 0; i
< np
; i
++ ) {
5688 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Sending SIGTERM to PID %u and continuing...", p
[i
]);
5689 jobmgr_assumes(jm
, runtime_kill(p
[i
], SIGTERM
) != -1);
5693 struct timespec tts
= { 2, 0 }; /* Wait 2 seconds for stray children to die after being SIGTERM'ed. */
5694 struct timespec kts
= { 1, 0 }; /* Wait 1 second for stray children to die after being SIGKILL'ed. */
5695 uint64_t start
, end
, nanosec
;
5697 int r
, kq
= kqueue();
5699 if (!jobmgr_assumes(jm
, kq
!= -1)) {
5703 start
= runtime_get_opaque_time();
5704 size_t i
= 0, n2t
= 0;
5705 for( i
= 0; i
< np
; i
++ ) {
5707 EV_SET(&kev
, p
[i
], EVFILT_PROC
, EV_ADD
, NOTE_EXIT
, 0, 0);
5709 if( jobmgr_assumes(jm
, kevent(kq
, &kev
, 1, NULL
, 0, NULL
) != -1) ) {
5710 jobmgr_assumes(jm
, runtime_kill(p
[i
], SIGTERM
) != -1);
5713 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "Disregarding PID %u and continuing.", p
[i
]);
5719 while( n2t
> 0 && (r
= kevent(kq
, NULL
, 0, &kev
, 1, &tts
)) ) {
5721 waitpid((pid_t
)kev
.ident
, &status
, WNOHANG
);
5723 end
= runtime_get_opaque_time();
5724 nanosec
= runtime_opaque_time_to_nano(end
- start
);
5725 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "PID %u died after %llu nanoseconds.", (pid_t
)kev
.ident
, nanosec
);
5727 for( i
= 0; i
< np
; i
++ ) {
5728 p
[i
] = ( p
[i
] == (pid_t
)kev
.ident
) ? 0 : p
[i
];
5733 for( i
= 0; i
< np
; i
++ ) {
5735 jobmgr_assumes(jm
, runtime_kill(p
[i
], SIGKILL
) != -1);
5740 while( n2k
> 0 && (r
= kevent(kq
, NULL
, 0, &kev
, 1, &kts
)) ) {
5742 waitpid((pid_t
)kev
.ident
, &status
, WNOHANG
);
5744 end
= runtime_get_opaque_time();
5745 nanosec
= runtime_opaque_time_to_nano(end
- start
);
5746 jobmgr_log(jm
, LOG_DEBUG
| LOG_CONSOLE
, "PID %u was killed and died after %llu nanoseconds.", (pid_t
)kev
.ident
, nanosec
);
5748 for( i
= 0; i
< np
; i
++ ) {
5749 p
[i
] = ( p
[i
] == (pid_t
)kev
.ident
) ? 0 : p
[i
];
5753 for( i
= 0; i
< np
; i
++ ) {
5755 jobmgr_log(jm
, LOG_NOTICE
| LOG_CONSOLE
, "PID %u did not die after being SIGKILL'ed 1 second ago.", p
[i
]);
5762 jobmgr_log_stray_children(jobmgr_t jm
, bool kill_strays
)
5764 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_ALL
};
5765 size_t i
, kp_cnt
= 0, kp_skipped
= 0, len
= sizeof(struct kinfo_proc
) * get_kern_max_proc();
5766 struct kinfo_proc
*kp
;
5768 if (likely(jm
->parentmgr
|| !pid1_magic
)) {
5772 if (!jobmgr_assumes(jm
, (kp
= malloc(len
)) != NULL
)) {
5776 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS
);
5778 if (!jobmgr_assumes(jm
, sysctl(mib
, 3, kp
, &len
, NULL
, 0) != -1)) {
5782 kp_cnt
= len
/ sizeof(struct kinfo_proc
);
5783 pid_t
*ps
= (pid_t
*)calloc(sizeof(pid_t
), kp_cnt
);
5785 for (i
= 0; i
< kp_cnt
; i
++) {
5786 pid_t p_i
= kp
[i
].kp_proc
.p_pid
;
5787 pid_t pp_i
= kp
[i
].kp_eproc
.e_ppid
;
5788 pid_t pg_i
= kp
[i
].kp_eproc
.e_pgid
;
5789 const char *z
= (kp
[i
].kp_proc
.p_stat
== SZOMB
) ? "zombie " : "";
5790 const char *n
= kp
[i
].kp_proc
.p_comm
;
5792 if (unlikely(p_i
== 0 || p_i
== 1)) {
5797 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
5798 job_t j
= jobmgr_find_by_pid(jm
, p_i
, false);
5799 if( !j
|| (j
&& j
->anonymous
) ) {
5800 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z
, j
? "anonymous job" : "process", p_i
, pp_i
, pg_i
, n
);
5803 if( pp_i
== getpid() && !jobmgr_assumes(jm
, kp
[i
].kp_proc
.p_stat
!= SZOMB
) ) {
5804 if( jobmgr_assumes(jm
, waitpid(p_i
, &status
, WNOHANG
) == 0) ) {
5805 jobmgr_log(jm
, LOG_INFO
| LOG_CONSOLE
, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status
));
5809 job_t leader
= jobmgr_find_by_pid(jm
, pg_i
, false);
5810 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
5811 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
5812 * hints to the kernel along the way, so that it could shutdown certain subsystems when
5813 * their userspace emissaries go away, before the call to reboot(2).
5815 if( leader
&& leader
->ignore_pg_at_shutdown
) {
5826 if( (kp_cnt
- kp_skipped
> 0) && kill_strays
) {
5827 jobmgr_kill_stray_children(jm
, ps
, kp_cnt
);
5836 jobmgr_parent(jobmgr_t jm
)
5838 return jm
->parentmgr
;
5842 job_uncork_fork(job_t j
)
5846 job_log(j
, LOG_DEBUG
, "Uncorking the fork().");
5847 /* this unblocks the child and avoids a race
5848 * between the above fork() and the kevent_mod() */
5849 job_assumes(j
, write(j
->fork_fd
, &c
, sizeof(c
)) == sizeof(c
));
5850 job_assumes(j
, runtime_close(j
->fork_fd
) != -1);
5855 jobmgr_new(jobmgr_t jm
, mach_port_t requestorport
, mach_port_t transfer_port
, bool sflag
, const char *name
, bool no_init
, mach_port_t session_port
)
5857 mach_msg_size_t mxmsgsz
;
5858 job_t bootstrapper
= NULL
;
5861 launchd_assert(offsetof(struct jobmgr_s
, kqjobmgr_callback
) == 0);
5863 if (unlikely(jm
&& requestorport
== MACH_PORT_NULL
)) {
5864 jobmgr_log(jm
, LOG_ERR
, "Mach sub-bootstrap create request requires a requester port");
5868 jmr
= calloc(1, sizeof(struct jobmgr_s
) + (name
? (strlen(name
) + 1) : NAME_MAX
+ 1));
5870 if (!jobmgr_assumes(jm
, jmr
!= NULL
)) {
5874 jmr
->kqjobmgr_callback
= jobmgr_callback
;
5875 strcpy(jmr
->name_init
, name
? name
: "Under construction");
5877 jmr
->req_port
= requestorport
;
5879 if ((jmr
->parentmgr
= jm
)) {
5880 SLIST_INSERT_HEAD(&jm
->submgrs
, jmr
, sle
);
5883 if (jm
&& !jobmgr_assumes(jmr
, launchd_mport_notify_req(jmr
->req_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
)) {
5887 if (transfer_port
!= MACH_PORT_NULL
) {
5888 jobmgr_assumes(jmr
, jm
!= NULL
);
5889 jmr
->jm_port
= transfer_port
;
5890 } else if (!jm
&& !pid1_magic
) {
5891 char *trusted_fd
= getenv(LAUNCHD_TRUSTED_FD_ENV
);
5894 snprintf(service_buf
, sizeof(service_buf
), "com.apple.launchd.peruser.%u", getuid());
5896 if (!jobmgr_assumes(jmr
, bootstrap_check_in(bootstrap_port
, service_buf
, &jmr
->jm_port
) == 0)) {
5901 int dfd
, lfd
= (int) strtol(trusted_fd
, NULL
, 10);
5903 if ((dfd
= dup(lfd
)) >= 0) {
5904 jobmgr_assumes(jmr
, runtime_close(dfd
) != -1);
5905 jobmgr_assumes(jmr
, runtime_close(lfd
) != -1);
5908 unsetenv(LAUNCHD_TRUSTED_FD_ENV
);
5911 /* cut off the Libc cache, we don't want to deadlock against ourself */
5912 inherited_bootstrap_port
= bootstrap_port
;
5913 bootstrap_port
= MACH_PORT_NULL
;
5914 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port
, MACH_NOTIFY_DEAD_NAME
) == KERN_SUCCESS
);
5916 /* We set this explicitly as we start each child */
5917 launchd_assert(launchd_set_bport(MACH_PORT_NULL
) == KERN_SUCCESS
);
5918 } else if (!jobmgr_assumes(jmr
, launchd_mport_create_recv(&jmr
->jm_port
) == KERN_SUCCESS
)) {
5923 sprintf(jmr
->name_init
, "%u", MACH_PORT_INDEX(jmr
->jm_port
));
5926 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
5927 mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
5928 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
5929 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
5933 jobmgr_assumes(jmr
, kevent_mod(SIGTERM
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
5934 jobmgr_assumes(jmr
, kevent_mod(SIGUSR1
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
5935 jobmgr_assumes(jmr
, kevent_mod(SIGUSR2
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, jmr
) != -1);
5936 jobmgr_assumes(jmr
, kevent_mod(0, EVFILT_FS
, EV_ADD
, VQ_MOUNT
|VQ_UNMOUNT
|VQ_UPDATE
, 0, jmr
) != -1);
5939 if (name
&& !no_init
) {
5940 bootstrapper
= jobmgr_init_session(jmr
, name
, sflag
);
5943 if (!bootstrapper
|| !bootstrapper
->weird_bootstrap
) {
5944 if (!jobmgr_assumes(jmr
, runtime_add_mport(jmr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
)) {
5949 STAILQ_INIT(&jmr
->pending_samples
);
5951 jobmgr_log(jmr
, LOG_DEBUG
, "Created job manager%s%s", jm
? " with parent: " : ".", jm
? jm
->name
: "");
5954 bootstrapper
->audit_session
= session_port
;
5955 if( session_port
!= MACH_PORT_NULL
) {
5956 mach_port_mod_refs(mach_task_self(), session_port
, MACH_PORT_RIGHT_SEND
, 1);
5959 jobmgr_log(jmr
, LOG_DEBUG
, "Bootstrapping new job manager with audit session %u", session_port
);
5960 jobmgr_assumes(jmr
, job_dispatch(bootstrapper
, true) != NULL
);
5963 if (jmr
->parentmgr
) {
5964 runtime_add_weak_ref();
5977 jobmgr_init_session(jobmgr_t jm
, const char *session_type
, bool sflag
)
5979 const char *bootstrap_tool
[] = { "/bin/launchctl", "bootstrap", "-S", session_type
, sflag
? "-s" : NULL
, NULL
};
5980 char thelabel
[1000];
5983 snprintf(thelabel
, sizeof(thelabel
), "com.apple.launchctl.%s", session_type
);
5984 bootstrapper
= job_new(jm
, thelabel
, NULL
, bootstrap_tool
);
5986 if( jobmgr_assumes(jm
, bootstrapper
!= NULL
) && (jm
->parentmgr
|| !pid1_magic
) ) {
5987 bootstrapper
->is_bootstrapper
= true;
5990 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
5991 snprintf(buf
, sizeof(buf
), "0x%X:0:0", getuid());
5992 envitem_new(bootstrapper
, "__CF_USER_TEXT_ENCODING", buf
, false, false);
5993 bootstrapper
->weird_bootstrap
= true;
5994 jobmgr_assumes(jm
, job_setup_machport(bootstrapper
));
5995 } else if( bootstrapper
&& strncmp(session_type
, VPROCMGR_SESSION_SYSTEM
, sizeof(VPROCMGR_SESSION_SYSTEM
)) == 0 ) {
5996 bootstrapper
->is_bootstrapper
= true;
5997 if( jobmgr_assumes(jm
, pid1_magic
) ) {
5998 /* Have our system bootstrapper print out to the console. */
5999 bootstrapper
->stdoutpath
= strdup(_PATH_CONSOLE
);
6000 bootstrapper
->stderrpath
= strdup(_PATH_CONSOLE
);
6003 jobmgr_assumes(jm
, kevent_mod((uintptr_t)fileno(g_console
), EVFILT_VNODE
, EV_ADD
| EV_ONESHOT
, NOTE_REVOKE
, 0, jm
) != -1);
6008 jm
->session_initialized
= true;
6010 return bootstrapper
;
6014 jobmgr_delete_anything_with_port(jobmgr_t jm
, mach_port_t port
)
6016 struct machservice
*ms
, *next_ms
;
6019 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6020 * words, when some program hands us a second or subsequent send right
6021 * to a port we already have open, the Mach kernel gives us the same
6022 * port number back and increments an reference count associated with
6023 * the port. This forces us, when discovering that a receive right at
6024 * the other end has been deleted, to wander all of our objects to see
6025 * what weird places clients might have handed us the same send right
6029 if (jm
== root_jobmgr
) {
6030 if (port
== inherited_bootstrap_port
) {
6031 jobmgr_assumes(jm
, launchd_mport_deallocate(port
) == KERN_SUCCESS
);
6032 inherited_bootstrap_port
= MACH_PORT_NULL
;
6034 return jobmgr_shutdown(jm
);
6037 LIST_FOREACH_SAFE(ms
, &port_hash
[HASH_PORT(port
)], port_hash_sle
, next_ms
) {
6038 if (ms
->port
== port
&& !ms
->recv
) {
6039 machservice_delete(ms
->job
, ms
, true);
6044 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
6045 jobmgr_delete_anything_with_port(jmi
, port
);
6048 if (jm
->req_port
== port
) {
6049 jobmgr_log(jm
, LOG_DEBUG
, "Request port died: %i", MACH_PORT_INDEX(port
));
6050 return jobmgr_shutdown(jm
);
6056 struct machservice
*
6057 jobmgr_lookup_service(jobmgr_t jm
, const char *name
, bool check_parent
, pid_t target_pid
)
6059 struct machservice
*ms
;
6062 jobmgr_log(jm
, LOG_DEBUG
, "Looking up %sservice %s", target_pid
? "per-PID " : "", name
);
6065 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6066 * bootstrap in other bootstraps.
6069 /* Start in the given bootstrap. */
6070 if( unlikely((target_j
= jobmgr_find_by_pid(jm
, target_pid
, false)) == NULL
) ) {
6071 /* If we fail, do a deep traversal. */
6072 if (unlikely((target_j
= jobmgr_find_by_pid_deep(root_jobmgr
, target_pid
, true)) == NULL
)) {
6073 jobmgr_log(jm
, LOG_DEBUG
, "Didn't find PID %i", target_pid
);
6078 SLIST_FOREACH(ms
, &target_j
->machservices
, sle
) {
6079 if (ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
6084 job_log(target_j
, LOG_DEBUG
, "Didn't find per-PID Mach service: %s", name
);
6088 jobmgr_t jm_to_search
= ( g_flat_mach_namespace
&& !(jm
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) ) ? root_jobmgr
: jm
;
6089 LIST_FOREACH(ms
, &jm_to_search
->ms_hash
[hash_ms(name
)], name_hash_sle
) {
6090 if (!ms
->per_pid
&& strcmp(name
, ms
->name
) == 0) {
6095 if (jm
->parentmgr
== NULL
|| !check_parent
) {
6099 return jobmgr_lookup_service(jm
->parentmgr
, name
, true, 0);
6103 machservice_port(struct machservice
*ms
)
6109 machservice_job(struct machservice
*ms
)
6115 machservice_hidden(struct machservice
*ms
)
6121 machservice_active(struct machservice
*ms
)
6123 return ms
->isActive
;
6127 machservice_name(struct machservice
*ms
)
6133 machservice_drain_port(struct machservice
*ms
)
6135 bool drain_one
= ms
->drain_one_on_crash
;
6136 bool drain_all
= ms
->drain_all_on_crash
;
6138 if( !job_assumes(ms
->job
, (drain_one
|| drain_all
) == true) ) {
6142 job_log(ms
->job
, LOG_INFO
, "Draining %s...", ms
->name
);
6144 char req_buff
[sizeof(union __RequestUnion__catch_mach_exc_subsystem
) * 2];
6145 char rep_buff
[sizeof(union __ReplyUnion__catch_mach_exc_subsystem
)];
6146 mig_reply_error_t
*req_hdr
= (mig_reply_error_t
*)&req_buff
;
6147 mig_reply_error_t
*rep_hdr
= (mig_reply_error_t
*)&rep_buff
;
6149 mach_msg_return_t mr
= ~MACH_MSG_SUCCESS
;
6152 /* This should be a direct check on the Mach service to see if it's an exception-handling
6153 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6154 * Mach services. But for now, it should be okay.
6156 if( ms
->job
->alt_exc_handler
|| ms
->job
->internal_exc_handler
) {
6157 mr
= launchd_exc_runtime_once(ms
->port
, sizeof(req_buff
), sizeof(rep_buff
), req_hdr
, rep_hdr
, 0);
6159 mach_msg_options_t options
= MACH_RCV_MSG
|
6162 mr
= mach_msg((mach_msg_header_t
*)req_hdr
, options
, 0, sizeof(req_buff
), ms
->port
, 0, MACH_PORT_NULL
);
6164 case MACH_MSG_SUCCESS
:
6165 mach_msg_destroy((mach_msg_header_t
*)req_hdr
);
6167 case MACH_RCV_TIMED_OUT
:
6169 case MACH_RCV_TOO_LARGE
:
6170 runtime_syslog(LOG_WARNING
, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff
));
6176 } while( drain_all
&& mr
!= MACH_RCV_TIMED_OUT
);
6180 machservice_delete(job_t j
, struct machservice
*ms
, bool port_died
)
6182 if (unlikely(ms
->debug_on_close
)) {
6183 job_log(j
, LOG_NOTICE
, "About to enter kernel debugger because of Mach port: 0x%x", ms
->port
);
6184 job_assumes(j
, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER
) == KERN_SUCCESS
);
6187 if (ms
->recv
&& job_assumes(j
, !machservice_active(ms
))) {
6188 job_log(j
, LOG_DEBUG
, "Closing receive right for %s", ms
->name
);
6189 job_assumes(j
, launchd_mport_close_recv(ms
->port
) == KERN_SUCCESS
);
6192 job_assumes(j
, launchd_mport_deallocate(ms
->port
) == KERN_SUCCESS
);
6194 if (unlikely(ms
->port
== the_exception_server
)) {
6195 the_exception_server
= 0;
6198 job_log(j
, LOG_DEBUG
, "Mach service deleted%s: %s", port_died
? " (port died)" : "", ms
->name
);
6200 if (ms
->special_port_num
) {
6201 SLIST_REMOVE(&special_ports
, ms
, machservice
, special_port_sle
);
6204 SLIST_REMOVE(&j
->machservices
, ms
, machservice
, sle
);
6205 LIST_REMOVE(ms
, name_hash_sle
);
6206 LIST_REMOVE(ms
, port_hash_sle
);
6212 machservice_request_notifications(struct machservice
*ms
)
6214 mach_msg_id_t which
= MACH_NOTIFY_DEAD_NAME
;
6216 ms
->isActive
= true;
6219 which
= MACH_NOTIFY_PORT_DESTROYED
;
6220 job_checkin(ms
->job
);
6223 job_assumes(ms
->job
, launchd_mport_notify_req(ms
->port
, which
) == KERN_SUCCESS
);
6226 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6227 #define END_OF(x) (&(x)[NELEM(x)])
6230 mach_cmd2argv(const char *string
)
6232 char *argv
[100], args
[1000];
6234 char *argp
= args
, term
, **argv_ret
, *co
;
6235 unsigned int nargs
= 0, i
;
6237 for (cp
= string
; *cp
;) {
6238 while (isspace(*cp
))
6240 term
= (*cp
== '"') ? *cp
++ : '\0';
6241 if (nargs
< NELEM(argv
)) {
6242 argv
[nargs
++] = argp
;
6244 while (*cp
&& (term
? *cp
!= term
: !isspace(*cp
)) && argp
< END_OF(args
)) {
6261 argv_ret
= malloc((nargs
+ 1) * sizeof(char *) + strlen(string
) + 1);
6263 if (!launchd_assumes(argv_ret
!= NULL
)) {
6267 co
= (char *)argv_ret
+ (nargs
+ 1) * sizeof(char *);
6269 for (i
= 0; i
< nargs
; i
++) {
6270 strcpy(co
, argv
[i
]);
6272 co
+= strlen(argv
[i
]) + 1;
6280 job_checkin(job_t j
)
6282 j
->checkedin
= true;
6285 bool job_is_god(job_t j
)
6287 return j
->embedded_special_privileges
;
6291 job_ack_port_destruction(mach_port_t p
)
6293 struct machservice
*ms
;
6296 LIST_FOREACH(ms
, &port_hash
[HASH_PORT(p
)], port_hash_sle
) {
6297 if (ms
->recv
&& (ms
->port
== p
)) {
6302 if (!jobmgr_assumes(root_jobmgr
, ms
!= NULL
)) {
6308 jobmgr_log(root_jobmgr
, LOG_DEBUG
, "Receive right returned to us: %s", ms
->name
);
6310 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
6311 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
6312 * receive rights have been returned.
6314 * So when we get receive rights back, check to see if the job has been reaped yet. If
6315 * not, then we add this service to a list of services to be drained on crash if it's
6316 * requested that behavior. So, for a job with N receive rights all requesting that they
6317 * be drained on crash, we can safely handle the following sequence of events.
6319 * ReceiveRight0Returned
6320 * ReceiveRight1Returned
6321 * ReceiveRight2Returned
6322 * NOTE_EXIT (reap, get exit status)
6323 * ReceiveRight3Returned
6327 * ReceiveRight(N - 1)Returned
6330 if( ms
->drain_one_on_crash
|| ms
->drain_all_on_crash
) {
6331 if( j
->crashed
&& j
->reaped
) {
6332 job_log(j
, LOG_DEBUG
, "Job has crashed. Draining port...");
6333 machservice_drain_port(ms
);
6334 } else if( !(j
->crashed
|| j
->reaped
) ) {
6335 job_log(j
, LOG_DEBUG
, "Job's exit status is still unknown. Deferring drain.");
6339 ms
->isActive
= false;
6340 if (ms
->delete_on_destruction
) {
6341 machservice_delete(j
, ms
, false);
6342 } else if (ms
->reset
) {
6343 machservice_resetport(j
, ms
);
6346 job_dispatch(j
, false);
6348 root_jobmgr
= jobmgr_do_garbage_collection(root_jobmgr
);
6354 job_ack_no_senders(job_t j
)
6356 j
->priv_port_has_senders
= false;
6358 job_assumes(j
, launchd_mport_close_recv(j
->j_port
) == KERN_SUCCESS
);
6361 job_log(j
, LOG_DEBUG
, "No more senders on privileged Mach bootstrap port");
6363 job_dispatch(j
, false);
6367 semaphoreitem_new(job_t j
, semaphore_reason_t why
, const char *what
)
6369 struct semaphoreitem
*si
;
6370 size_t alloc_sz
= sizeof(struct semaphoreitem
);
6373 alloc_sz
+= strlen(what
) + 1;
6376 if (!job_assumes(j
, si
= calloc(1, alloc_sz
))) {
6384 strcpy(si
->what_init
, what
);
6387 SLIST_INSERT_HEAD(&j
->semaphores
, si
, sle
);
6389 if( (why
== OTHER_JOB_ENABLED
|| why
== OTHER_JOB_DISABLED
) && !j
->nosy
) {
6390 job_log(j
, LOG_DEBUG
, "Job is interested in \"%s\".", what
);
6391 SLIST_INSERT_HEAD(&s_curious_jobs
, j
, curious_jobs_sle
);
6395 semaphoreitem_runtime_mod_ref(si
, true);
6401 semaphoreitem_runtime_mod_ref(struct semaphoreitem
*si
, bool add
)
6404 * External events need to be tracked.
6405 * Internal events do NOT need to be tracked.
6409 case SUCCESSFUL_EXIT
:
6411 case OTHER_JOB_ENABLED
:
6412 case OTHER_JOB_DISABLED
:
6413 case OTHER_JOB_ACTIVE
:
6414 case OTHER_JOB_INACTIVE
:
6421 runtime_add_weak_ref();
6423 runtime_del_weak_ref();
6428 semaphoreitem_delete(job_t j
, struct semaphoreitem
*si
)
6430 semaphoreitem_runtime_mod_ref(si
, false);
6432 SLIST_REMOVE(&j
->semaphores
, si
, semaphoreitem
, sle
);
6435 job_assumes(j
, runtime_close(si
->fd
) != -1);
6438 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
6439 if( (si
->why
== OTHER_JOB_ENABLED
|| si
->why
== OTHER_JOB_DISABLED
) && j
->nosy
) {
6441 SLIST_REMOVE(&s_curious_jobs
, j
, job_s
, curious_jobs_sle
);
6448 semaphoreitem_setup_dict_iter(launch_data_t obj
, const char *key
, void *context
)
6450 struct semaphoreitem_dict_iter_context
*sdic
= context
;
6451 semaphore_reason_t why
;
6453 why
= launch_data_get_bool(obj
) ? sdic
->why_true
: sdic
->why_false
;
6455 semaphoreitem_new(sdic
->j
, why
, key
);
6459 semaphoreitem_setup(launch_data_t obj
, const char *key
, void *context
)
6461 struct semaphoreitem_dict_iter_context sdic
= { context
, 0, 0 };
6463 semaphore_reason_t why
;
6465 switch (launch_data_get_type(obj
)) {
6466 case LAUNCH_DATA_BOOL
:
6467 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE
) == 0) {
6468 why
= launch_data_get_bool(obj
) ? NETWORK_UP
: NETWORK_DOWN
;
6469 semaphoreitem_new(j
, why
, NULL
);
6470 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT
) == 0) {
6471 why
= launch_data_get_bool(obj
) ? SUCCESSFUL_EXIT
: FAILED_EXIT
;
6472 semaphoreitem_new(j
, why
, NULL
);
6473 j
->start_pending
= true;
6474 } else if( strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND
) == 0 ) {
6475 j
->needs_kickoff
= launch_data_get_bool(obj
);
6477 job_assumes(j
, false);
6480 case LAUNCH_DATA_DICTIONARY
:
6481 if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE
) == 0) {
6482 sdic
.why_true
= PATH_EXISTS
;
6483 sdic
.why_false
= PATH_MISSING
;
6484 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE
) == 0) {
6485 sdic
.why_true
= OTHER_JOB_ACTIVE
;
6486 sdic
.why_false
= OTHER_JOB_INACTIVE
;
6487 } else if (strcasecmp(key
, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED
) == 0) {
6488 sdic
.why_true
= OTHER_JOB_ENABLED
;
6489 sdic
.why_false
= OTHER_JOB_DISABLED
;
6491 job_assumes(j
, false);
6495 launch_data_dict_iterate(obj
, semaphoreitem_setup_dict_iter
, &sdic
);
6498 job_assumes(j
, false);
6504 jobmgr_dispatch_all_semaphores(jobmgr_t jm
)
6510 SLIST_FOREACH_SAFE(jmi
, &jm
->submgrs
, sle
, jmn
) {
6511 jobmgr_dispatch_all_semaphores(jmi
);
6514 LIST_FOREACH_SAFE(ji
, &jm
->jobs
, sle
, jn
) {
6515 if (!SLIST_EMPTY(&ji
->semaphores
)) {
6516 job_dispatch(ji
, false);
6522 cronemu(int mon
, int mday
, int hour
, int min
)
6524 struct tm workingtm
;
6528 workingtm
= *localtime(&now
);
6530 workingtm
.tm_isdst
= -1;
6531 workingtm
.tm_sec
= 0;
6534 while (!cronemu_mon(&workingtm
, mon
, mday
, hour
, min
)) {
6535 workingtm
.tm_year
++;
6536 workingtm
.tm_mon
= 0;
6537 workingtm
.tm_mday
= 1;
6538 workingtm
.tm_hour
= 0;
6539 workingtm
.tm_min
= 0;
6543 return mktime(&workingtm
);
6547 cronemu_wday(int wday
, int hour
, int min
)
6549 struct tm workingtm
;
6553 workingtm
= *localtime(&now
);
6555 workingtm
.tm_isdst
= -1;
6556 workingtm
.tm_sec
= 0;
6563 while (!(workingtm
.tm_wday
== wday
&& cronemu_hour(&workingtm
, hour
, min
))) {
6564 workingtm
.tm_mday
++;
6565 workingtm
.tm_hour
= 0;
6566 workingtm
.tm_min
= 0;
6570 return mktime(&workingtm
);
6574 cronemu_mon(struct tm
*wtm
, int mon
, int mday
, int hour
, int min
)
6577 struct tm workingtm
= *wtm
;
6580 while (!cronemu_mday(&workingtm
, mday
, hour
, min
)) {
6582 workingtm
.tm_mday
= 1;
6583 workingtm
.tm_hour
= 0;
6584 workingtm
.tm_min
= 0;
6585 carrytest
= workingtm
.tm_mon
;
6587 if (carrytest
!= workingtm
.tm_mon
) {
6595 if (mon
< wtm
->tm_mon
) {
6599 if (mon
> wtm
->tm_mon
) {
6606 return cronemu_mday(wtm
, mday
, hour
, min
);
6610 cronemu_mday(struct tm
*wtm
, int mday
, int hour
, int min
)
6613 struct tm workingtm
= *wtm
;
6616 while (!cronemu_hour(&workingtm
, hour
, min
)) {
6617 workingtm
.tm_mday
++;
6618 workingtm
.tm_hour
= 0;
6619 workingtm
.tm_min
= 0;
6620 carrytest
= workingtm
.tm_mday
;
6622 if (carrytest
!= workingtm
.tm_mday
) {
6630 if (mday
< wtm
->tm_mday
) {
6634 if (mday
> wtm
->tm_mday
) {
6635 wtm
->tm_mday
= mday
;
6640 return cronemu_hour(wtm
, hour
, min
);
6644 cronemu_hour(struct tm
*wtm
, int hour
, int min
)
6647 struct tm workingtm
= *wtm
;
6650 while (!cronemu_min(&workingtm
, min
)) {
6651 workingtm
.tm_hour
++;
6652 workingtm
.tm_min
= 0;
6653 carrytest
= workingtm
.tm_hour
;
6655 if (carrytest
!= workingtm
.tm_hour
) {
6663 if (hour
< wtm
->tm_hour
) {
6667 if (hour
> wtm
->tm_hour
) {
6668 wtm
->tm_hour
= hour
;
6672 return cronemu_min(wtm
, min
);
6676 cronemu_min(struct tm
*wtm
, int min
)
6682 if (min
< wtm
->tm_min
) {
6686 if (min
> wtm
->tm_min
) {
6694 job_mig_setup_shmem(job_t j
, mach_port_t
*shmem_port
)
6696 memory_object_size_t size_of_page
, size_of_page_orig
;
6697 vm_address_t vm_addr
;
6700 if (!launchd_assumes(j
!= NULL
)) {
6701 return BOOTSTRAP_NO_MEMORY
;
6704 if (unlikely(j
->anonymous
)) {
6705 job_log(j
, LOG_DEBUG
, "Anonymous job tried to setup shared memory");
6706 return BOOTSTRAP_NOT_PRIVILEGED
;
6709 if (unlikely(j
->shmem
)) {
6710 job_log(j
, LOG_ERR
, "Tried to setup shared memory more than once");
6711 return BOOTSTRAP_NOT_PRIVILEGED
;
6714 size_of_page_orig
= size_of_page
= getpagesize();
6716 kr
= vm_allocate(mach_task_self(), &vm_addr
, size_of_page
, true);
6718 if (!job_assumes(j
, kr
== 0)) {
6722 j
->shmem
= (typeof(j
->shmem
))vm_addr
;
6723 j
->shmem
->vp_shmem_standby_timeout
= j
->timeout
;
6725 kr
= mach_make_memory_entry_64(mach_task_self(), &size_of_page
,
6726 (memory_object_offset_t
)vm_addr
, VM_PROT_READ
|VM_PROT_WRITE
, shmem_port
, 0);
6728 if (job_assumes(j
, kr
== 0)) {
6729 job_assumes(j
, size_of_page
== size_of_page_orig
);
6732 /* no need to inherit this in child processes */
6733 job_assumes(j
, vm_inherit(mach_task_self(), (vm_address_t
)j
->shmem
, size_of_page_orig
, VM_INHERIT_NONE
) == 0);
6739 job_mig_create_server(job_t j
, cmd_t server_cmd
, uid_t server_uid
, boolean_t on_demand
, mach_port_t
*server_portp
)
6741 struct ldcred
*ldc
= runtime_get_caller_creds();
6744 if (!launchd_assumes(j
!= NULL
)) {
6745 return BOOTSTRAP_NO_MEMORY
;
6748 if (unlikely(j
->deny_job_creation
)) {
6749 return BOOTSTRAP_NOT_PRIVILEGED
;
6753 const char **argv
= (const char **)mach_cmd2argv(server_cmd
);
6754 if (unlikely(argv
== NULL
)) {
6755 return BOOTSTRAP_NO_MEMORY
;
6757 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_PATH
, argv
[0]) > 0)) {
6759 return BOOTSTRAP_NOT_PRIVILEGED
;
6764 job_log(j
, LOG_DEBUG
, "Server create attempt: %s", server_cmd
);
6767 if (ldc
->euid
|| ldc
->uid
) {
6768 job_log(j
, LOG_WARNING
, "Server create attempt moved to per-user launchd: %s", server_cmd
);
6769 return VPROC_ERR_TRY_PER_USER
;
6772 if (unlikely(server_uid
!= getuid())) {
6773 job_log(j
, LOG_WARNING
, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
6774 server_cmd
, getuid(), server_uid
);
6776 server_uid
= 0; /* zero means "do nothing" */
6779 js
= job_new_via_mach_init(j
, server_cmd
, server_uid
, on_demand
);
6781 if (unlikely(js
== NULL
)) {
6782 return BOOTSTRAP_NO_MEMORY
;
6785 *server_portp
= js
->j_port
;
6786 return BOOTSTRAP_SUCCESS
;
6790 job_mig_send_signal(job_t j
, mach_port_t srp
, name_t targetlabel
, int sig
)
6792 struct ldcred
*ldc
= runtime_get_caller_creds();
6795 if (!launchd_assumes(j
!= NULL
)) {
6796 return BOOTSTRAP_NO_MEMORY
;
6799 if( unlikely(ldc
->euid
!= 0 && ldc
->euid
!= getuid()) || j
->deny_job_creation
) {
6803 if( unlikely(ldc
->euid
!= 0 && ldc
->euid
!= getuid()) || j
->deny_job_creation
) {
6804 #if TARGET_OS_EMBEDDED
6805 if( !j
->embedded_special_privileges
) {
6806 return BOOTSTRAP_NOT_PRIVILEGED
;
6809 return BOOTSTRAP_NOT_PRIVILEGED
;
6814 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
6815 return BOOTSTRAP_NOT_PRIVILEGED
;
6819 if (unlikely(!(otherj
= job_find(targetlabel
)))) {
6820 return BOOTSTRAP_UNKNOWN_SERVICE
;
6823 #if TARGET_OS_EMBEDDED
6824 if( j
->embedded_special_privileges
&& strcmp(j
->username
, otherj
->username
) != 0 ) {
6825 return BOOTSTRAP_NOT_PRIVILEGED
;
6829 if (sig
== VPROC_MAGIC_UNLOAD_SIGNAL
) {
6830 bool do_block
= otherj
->p
;
6832 if (otherj
->anonymous
) {
6833 return BOOTSTRAP_NOT_PRIVILEGED
;
6839 job_log(j
, LOG_DEBUG
, "Blocking MIG return of job_remove(): %s", otherj
->label
);
6840 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
6841 job_assumes(otherj
, waiting4removal_new(otherj
, srp
));
6842 return MIG_NO_REPLY
;
6846 } else if (sig
== VPROC_MAGIC_TRYKILL_SIGNAL
) {
6847 if (!j
->kill_via_shmem
) {
6848 return BOOTSTRAP_NOT_PRIVILEGED
;
6852 j
->sent_kill_via_shmem
= true;
6853 job_assumes(j
, runtime_kill(otherj
->p
, SIGKILL
) != -1);
6856 #if !TARGET_OS_EMBEDDED
6857 if (__sync_bool_compare_and_swap(&j
->shmem
->vp_shmem_transaction_cnt
, 0, -1)) {
6858 j
->shmem
->vp_shmem_flags
|= VPROC_SHMEM_EXITING
;
6859 j
->sent_kill_via_shmem
= true;
6860 job_assumes(j
, runtime_kill(otherj
->p
, SIGKILL
) != -1);
6864 return BOOTSTRAP_NOT_PRIVILEGED
;
6865 } else if (otherj
->p
) {
6866 job_assumes(j
, runtime_kill(otherj
->p
, sig
) != -1);
6873 job_mig_log_forward(job_t j
, vm_offset_t inval
, mach_msg_type_number_t invalCnt
)
6875 struct ldcred
*ldc
= runtime_get_caller_creds();
6877 if (!launchd_assumes(j
!= NULL
)) {
6878 return BOOTSTRAP_NO_MEMORY
;
6881 if (!job_assumes(j
, j
->per_user
)) {
6882 return BOOTSTRAP_NOT_PRIVILEGED
;
6885 return runtime_log_forward(ldc
->euid
, ldc
->egid
, inval
, invalCnt
);
6889 job_mig_log_drain(job_t j
, mach_port_t srp
, vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
6891 struct ldcred
*ldc
= runtime_get_caller_creds();
6893 if (!launchd_assumes(j
!= NULL
)) {
6894 return BOOTSTRAP_NO_MEMORY
;
6897 if (unlikely(ldc
->euid
)) {
6898 return BOOTSTRAP_NOT_PRIVILEGED
;
6901 return runtime_log_drain(srp
, outval
, outvalCnt
);
6905 job_mig_swap_complex(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
,
6906 vm_offset_t inval
, mach_msg_type_number_t invalCnt
,
6907 vm_offset_t
*outval
, mach_msg_type_number_t
*outvalCnt
)
6910 launch_data_t input_obj
= NULL
, output_obj
= NULL
;
6911 size_t data_offset
= 0;
6913 struct ldcred
*ldc
= runtime_get_caller_creds();
6915 if (!launchd_assumes(j
!= NULL
)) {
6916 return BOOTSTRAP_NO_MEMORY
;
6919 if (unlikely(inkey
&& ldc
->euid
&& ldc
->euid
!= getuid())) {
6920 return BOOTSTRAP_NOT_PRIVILEGED
;
6923 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
6927 if (inkey
&& outkey
) {
6928 action
= "Swapping";
6935 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
6937 *outvalCnt
= 20 * 1024 * 1024;
6938 mig_allocate(outval
, *outvalCnt
);
6939 if (!job_assumes(j
, *outval
!= 0)) {
6943 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
6944 if (unlikely(invalCnt
&& !job_assumes(j
, (input_obj
= launch_data_unpack((void *)inval
, invalCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
))) {
6949 case VPROC_GSK_ENVIRONMENT
:
6950 if (!job_assumes(j
, (output_obj
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
6953 jobmgr_export_env_from_other_jobs(j
->mgr
, output_obj
);
6954 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
6955 if (!job_assumes(j
, launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
) != 0)) {
6958 launch_data_free(output_obj
);
6960 case VPROC_GSK_ALLJOBS
:
6961 if (!job_assumes(j
, (output_obj
= job_export_all()) != NULL
)) {
6964 ipc_revoke_fds(output_obj
);
6965 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
6966 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
6967 if (!job_assumes(j
, packed_size
!= 0)) {
6970 launch_data_free(output_obj
);
6972 case VPROC_GSK_MGR_NAME
:
6973 if( !job_assumes(j
, (output_obj
= launch_data_new_string(j
->mgr
->name
)) != NULL
) ) {
6976 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
6977 if (!job_assumes(j
, packed_size
!= 0)) {
6981 launch_data_free(output_obj
);
6983 case VPROC_GSK_JOB_OVERRIDES_DB
:
6984 if( !job_assumes(j
, (output_obj
= launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES
))) != NULL
) ) {
6987 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
6988 if (!job_assumes(j
, packed_size
!= 0)) {
6992 launch_data_free(output_obj
);
6994 case VPROC_GSK_JOB_CACHE_DB
:
6995 if( !job_assumes(j
, (output_obj
= launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE
))) != NULL
) ) {
6998 packed_size
= launch_data_pack(output_obj
, (void *)*outval
, *outvalCnt
, NULL
, NULL
);
6999 if (!job_assumes(j
, packed_size
!= 0)) {
7003 job_log(j
, LOG_DEBUG
, "Location of job cache database: %s", launch_data_get_string(output_obj
));
7005 launch_data_free(output_obj
);
7008 mig_deallocate(*outval
, *outvalCnt
);
7016 if (invalCnt
) switch (inkey
) {
7017 case VPROC_GSK_ENVIRONMENT
:
7018 if( launch_data_get_type(input_obj
) == LAUNCH_DATA_DICTIONARY
) {
7020 job_log(j
, LOG_INFO
, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
7022 launch_data_dict_iterate(input_obj
, envitem_setup_one_shot
, j
);
7031 mig_deallocate(inval
, invalCnt
);
7037 mig_deallocate(*outval
, *outvalCnt
);
7043 job_mig_swap_integer(job_t j
, vproc_gsk_t inkey
, vproc_gsk_t outkey
, int64_t inval
, int64_t *outval
)
7046 kern_return_t kr
= 0;
7047 struct ldcred
*ldc
= runtime_get_caller_creds();
7050 if (!launchd_assumes(j
!= NULL
)) {
7051 return BOOTSTRAP_NO_MEMORY
;
7054 if (unlikely(inkey
&& ldc
->euid
&& ldc
->euid
!= getuid())) {
7055 return BOOTSTRAP_NOT_PRIVILEGED
;
7058 if (unlikely(inkey
&& outkey
&& !job_assumes(j
, inkey
== outkey
))) {
7062 if (inkey
&& outkey
) {
7063 action
= "Swapping";
7070 job_log(j
, LOG_DEBUG
, "%s key: %u", action
, inkey
? inkey
: outkey
);
7073 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
7074 *outval
= j
->abandon_pg
;
7076 case VPROC_GSK_LAST_EXIT_STATUS
:
7077 *outval
= j
->last_exit_status
;
7079 case VPROC_GSK_MGR_UID
:
7082 case VPROC_GSK_MGR_PID
:
7085 case VPROC_GSK_IS_MANAGED
:
7086 *outval
= j
->anonymous
? 0 : 1;
7088 case VPROC_GSK_BASIC_KEEPALIVE
:
7089 *outval
= !j
->ondemand
;
7091 case VPROC_GSK_START_INTERVAL
:
7092 *outval
= j
->start_interval
;
7094 case VPROC_GSK_IDLE_TIMEOUT
:
7095 *outval
= j
->timeout
;
7097 case VPROC_GSK_EXIT_TIMEOUT
:
7098 *outval
= j
->exit_timeout
;
7100 case VPROC_GSK_GLOBAL_LOG_MASK
:
7101 oldmask
= runtime_setlogmask(LOG_UPTO(LOG_DEBUG
));
7103 runtime_setlogmask(oldmask
);
7105 case VPROC_GSK_GLOBAL_UMASK
:
7110 case VPROC_GSK_TRANSACTIONS_ENABLED
:
7111 job_log(j
, LOG_DEBUG
, "Reading transaction model status.");
7112 *outval
= j
->kill_via_shmem
;
7114 case VPROC_GSK_WAITFORDEBUGGER
:
7115 *outval
= j
->wait4debugger
;
7117 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT
:
7118 *outval
= j
->embedded_special_privileges
;
7129 case VPROC_GSK_ABANDON_PROCESS_GROUP
:
7130 j
->abandon_pg
= (bool)inval
;
7132 case VPROC_GSK_GLOBAL_ON_DEMAND
:
7133 job_log(j
, LOG_DEBUG
, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval
? "true" : "false", j
->forced_peers_to_demand_mode
? "true" : "false");
7134 kr
= job_set_global_on_demand(j
, (bool)inval
) ? 0 : 1;
7136 case VPROC_GSK_BASIC_KEEPALIVE
:
7137 j
->ondemand
= !inval
;
7139 case VPROC_GSK_START_INTERVAL
:
7140 if (inval
> UINT32_MAX
|| inval
< 0) {
7143 if (j
->start_interval
== 0) {
7144 runtime_add_weak_ref();
7146 j
->start_interval
= (typeof(j
->start_interval
)) inval
;
7147 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_ADD
, NOTE_SECONDS
, j
->start_interval
, j
) != -1);
7148 } else if (j
->start_interval
) {
7149 job_assumes(j
, kevent_mod((uintptr_t)&j
->start_interval
, EVFILT_TIMER
, EV_DELETE
, 0, 0, NULL
) != -1);
7150 if (j
->start_interval
!= 0) {
7151 runtime_del_weak_ref();
7153 j
->start_interval
= 0;
7156 case VPROC_GSK_IDLE_TIMEOUT
:
7157 if (inval
< 0 || inval
> UINT32_MAX
) {
7160 j
->timeout
= (typeof(j
->timeout
)) inval
;
7163 case VPROC_GSK_EXIT_TIMEOUT
:
7164 if (inval
< 0 || inval
> UINT32_MAX
) {
7167 j
->exit_timeout
= (typeof(j
->exit_timeout
)) inval
;
7170 case VPROC_GSK_GLOBAL_LOG_MASK
:
7171 if (inval
< 0 || inval
> UINT32_MAX
) {
7174 runtime_setlogmask((int) inval
);
7177 case VPROC_GSK_GLOBAL_UMASK
:
7178 launchd_assert(sizeof (mode_t
) == 2);
7179 if (inval
< 0 || inval
> UINT16_MAX
) {
7182 umask((mode_t
) inval
);
7185 case VPROC_GSK_TRANSACTIONS_ENABLED
:
7186 if( !job_assumes(j
, inval
!= 0) ) {
7187 job_log(j
, LOG_WARNING
, "Attempt to unregister from transaction model. This is not supported.");
7190 job_log(j
, LOG_DEBUG
, "Now participating in transaction model.");
7191 j
->kill_via_shmem
= (bool)inval
;
7192 job_log(j
, LOG_DEBUG
, "j->kill_via_shmem = %s", j
->kill_via_shmem
? "true" : "false");
7195 case VPROC_GSK_WEIRD_BOOTSTRAP
:
7196 if( job_assumes(j
, j
->weird_bootstrap
) ) {
7197 job_log(j
, LOG_DEBUG
, "Unsetting weird bootstrap.");
7199 mach_msg_size_t mxmsgsz
= (typeof(mxmsgsz
)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem
);
7201 if (job_mig_protocol_vproc_subsystem
.maxsize
> mxmsgsz
) {
7202 mxmsgsz
= job_mig_protocol_vproc_subsystem
.maxsize
;
7205 job_assumes(j
, runtime_add_mport(j
->mgr
->jm_port
, protocol_vproc_server
, mxmsgsz
) == KERN_SUCCESS
);
7206 j
->weird_bootstrap
= false;
7209 case VPROC_GSK_WAITFORDEBUGGER
:
7210 j
->wait4debugger_oneshot
= inval
;
7212 case VPROC_GSK_PERUSER_SUSPEND
:
7213 if( job_assumes(j
, pid1_magic
&& ldc
->euid
== 0) ) {
7214 mach_port_t junk
= MACH_PORT_NULL
;
7215 job_t jpu
= jobmgr_lookup_per_user_context_internal(j
, (uid_t
)inval
, false, &junk
);
7216 if( job_assumes(j
, jpu
!= NULL
) ) {
7217 struct suspended_peruser
*spi
= NULL
;
7218 LIST_FOREACH( spi
, &j
->suspended_perusers
, sle
) {
7219 if( (int64_t)(spi
->j
->mach_uid
) == inval
) {
7220 job_log(j
, LOG_WARNING
, "Job tried to suspend per-user launchd for UID %lli twice.", inval
);
7226 job_log(j
, LOG_INFO
, "Job is suspending the per-user launchd for UID %lli.", inval
);
7227 spi
= (struct suspended_peruser
*)calloc(sizeof(struct suspended_peruser
), 1);
7228 if( job_assumes(j
, spi
!= NULL
) ) {
7230 spi
->j
->peruser_suspend_count
++;
7231 LIST_INSERT_HEAD(&j
->suspended_perusers
, spi
, sle
);
7234 kr
= BOOTSTRAP_NO_MEMORY
;
7242 case VPROC_GSK_PERUSER_RESUME
:
7243 if( job_assumes(j
, pid1_magic
== true) ) {
7244 struct suspended_peruser
*spi
= NULL
, *spt
= NULL
;
7245 LIST_FOREACH_SAFE( spi
, &j
->suspended_perusers
, sle
, spt
) {
7246 if( (int64_t)(spi
->j
->mach_uid
) == inval
) {
7247 spi
->j
->peruser_suspend_count
--;
7248 LIST_REMOVE(spi
, sle
);
7249 job_log(j
, LOG_INFO
, "Job is resuming the per-user launchd for UID %lli.", inval
);
7254 if( !job_assumes(j
, spi
!= NULL
) ) {
7255 job_log(j
, LOG_WARNING
, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval
);
7256 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
7257 } else if( spi
->j
->peruser_suspend_count
== 0 ) {
7258 job_dispatch(spi
->j
, false);
7276 job_mig_post_fork_ping(job_t j
, task_t child_task
, mach_port_t
*audit_session
)
7278 struct machservice
*ms
;
7280 if (!launchd_assumes(j
!= NULL
)) {
7281 return BOOTSTRAP_NO_MEMORY
;
7284 job_log(j
, LOG_DEBUG
, "Post fork ping.");
7286 job_setup_exception_port(j
, child_task
);
7288 SLIST_FOREACH(ms
, &special_ports
, special_port_sle
) {
7289 if (j
->per_user
&& (ms
->special_port_num
!= TASK_ACCESS_PORT
)) {
7290 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
7294 errno
= task_set_special_port(child_task
, ms
->special_port_num
, ms
->port
);
7296 if (unlikely(errno
)) {
7297 int desired_log_level
= LOG_ERR
;
7302 desired_log_level
= LOG_WARNING
;
7304 if (ms
->special_port_num
== TASK_SEATBELT_PORT
) {
7305 desired_log_level
= LOG_DEBUG
;
7309 job_log(j
, desired_log_level
, "Could not setup Mach task special port %u: %s", ms
->special_port_num
, mach_error_string(errno
));
7313 mach_port_t _session
= MACH_PORT_NULL
;
7314 #if !TARGET_OS_EMBEDDED
7315 if( !j
->anonymous
&& !j
->per_user
) {
7316 job_log(j
, LOG_DEBUG
, "Returning session port %u", j
->audit_session
);
7317 _session
= j
->audit_session
;
7320 *audit_session
= _session
;
7321 job_assumes(j
, launchd_mport_deallocate(child_task
) == KERN_SUCCESS
);
7327 job_mig_reboot2(job_t j
, uint64_t flags
)
7329 char who_started_the_reboot
[2048] = "";
7330 struct kinfo_proc kp
;
7331 struct ldcred
*ldc
= runtime_get_caller_creds();
7334 if (!launchd_assumes(j
!= NULL
)) {
7335 return BOOTSTRAP_NO_MEMORY
;
7338 if (unlikely(!pid1_magic
)) {
7339 return BOOTSTRAP_NOT_PRIVILEGED
;
7342 #if !TARGET_OS_EMBEDDED
7343 if (unlikely(ldc
->euid
)) {
7345 if( unlikely(ldc
->euid
) && !j
->embedded_special_privileges
) {
7347 return BOOTSTRAP_NOT_PRIVILEGED
;
7350 for (pid_to_log
= ldc
->pid
; pid_to_log
; pid_to_log
= kp
.kp_eproc
.e_ppid
) {
7351 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, pid_to_log
};
7352 size_t who_offset
, len
= sizeof(kp
);
7354 if (!job_assumes(j
, sysctl(mib
, 4, &kp
, &len
, NULL
, 0) != -1)) {
7358 if( !job_assumes(j
, pid_to_log
!= kp
.kp_eproc
.e_ppid
) ) {
7359 job_log(j
, LOG_WARNING
, "Job which is its own parent started reboot.");
7360 snprintf(who_started_the_reboot
, sizeof(who_started_the_reboot
), "%s[%u]->%s[%u]->%s[%u]->...", kp
.kp_proc
.p_comm
, pid_to_log
, kp
.kp_proc
.p_comm
, pid_to_log
, kp
.kp_proc
.p_comm
, pid_to_log
);
7364 who_offset
= strlen(who_started_the_reboot
);
7365 snprintf(who_started_the_reboot
+ who_offset
, sizeof(who_started_the_reboot
) - who_offset
,
7366 " %s[%u]%s", kp
.kp_proc
.p_comm
, pid_to_log
, kp
.kp_eproc
.e_ppid
? " ->" : "");
7369 root_jobmgr
->reboot_flags
= (int)flags
;
7373 job_log(j
, LOG_DEBUG
, "reboot2() initiated by:%s", who_started_the_reboot
);
7379 job_mig_getsocket(job_t j
, name_t spr
)
7381 if (!launchd_assumes(j
!= NULL
)) {
7382 return BOOTSTRAP_NO_MEMORY
;
7385 if( j
->deny_job_creation
) {
7386 return BOOTSTRAP_NOT_PRIVILEGED
;
7391 if (unlikely(!sockpath
)) {
7392 return BOOTSTRAP_NO_MEMORY
;
7395 strncpy(spr
, sockpath
, sizeof(name_t
));
7397 return BOOTSTRAP_SUCCESS
;
7401 job_mig_log(job_t j
, int pri
, int err
, logmsg_t msg
)
7403 if (!launchd_assumes(j
!= NULL
)) {
7404 return BOOTSTRAP_NO_MEMORY
;
7407 if ((errno
= err
)) {
7408 job_log_error(j
, pri
, "%s", msg
);
7410 job_log(j
, pri
, "%s", msg
);
7417 jobmgr_lookup_per_user_context_internal(job_t j
, uid_t which_user
, bool dispatch
, mach_port_t
*mp
)
7420 LIST_FOREACH(ji
, &root_jobmgr
->jobs
, sle
) {
7421 if (!ji
->per_user
) {
7424 if (ji
->mach_uid
!= which_user
) {
7427 if (SLIST_EMPTY(&ji
->machservices
)) {
7430 if (!SLIST_FIRST(&ji
->machservices
)->per_user_hack
) {
7436 if( unlikely(ji
== NULL
) ) {
7437 struct machservice
*ms
;
7440 job_log(j
, LOG_DEBUG
, "Creating per user launchd job for UID: %u", which_user
);
7442 sprintf(lbuf
, "com.apple.launchd.peruser.%u", which_user
);
7444 ji
= job_new(root_jobmgr
, lbuf
, "/sbin/launchd", NULL
);
7447 ji
->mach_uid
= which_user
;
7448 ji
->per_user
= true;
7449 ji
->kill_via_shmem
= true;
7452 char pu_db
[PATH_MAX
];
7453 snprintf(pu_db
, sizeof(pu_db
), LAUNCHD_DB_PREFIX
"/%s", lbuf
);
7455 bool created
= false;
7456 int err
= stat(pu_db
, &sb
);
7457 if( (err
== -1 && errno
== ENOENT
) || (err
== 0 && !S_ISDIR(sb
.st_mode
)) ) {
7459 char move_aside
[PATH_MAX
];
7460 snprintf(move_aside
, sizeof(move_aside
), LAUNCHD_DB_PREFIX
"/%s.movedaside", lbuf
);
7462 job_assumes(ji
, rename(pu_db
, move_aside
) != -1);
7465 job_assumes(ji
, mkdir(pu_db
, S_IRWXU
) != -1);
7466 job_assumes(ji
, chown(pu_db
, which_user
, 0) != -1);
7471 if( !job_assumes(ji
, sb
.st_uid
== which_user
) ) {
7472 job_assumes(ji
, chown(pu_db
, which_user
, 0) != -1);
7474 if( !job_assumes(ji
, sb
.st_gid
== 0) ) {
7475 job_assumes(ji
, chown(pu_db
, which_user
, 0) != -1);
7477 if( !job_assumes(ji
, sb
.st_mode
== (S_IRWXU
| S_IFDIR
)) ) {
7478 job_assumes(ji
, chmod(pu_db
, S_IRWXU
) != -1);
7482 if ((ms
= machservice_new(ji
, lbuf
, mp
, false)) == NULL
) {
7486 ms
->per_user_hack
= true;
7489 ji
= dispatch
? job_dispatch(ji
, false) : ji
;
7493 *mp
= machservice_port(SLIST_FIRST(&ji
->machservices
));
7494 job_log(j
, LOG_DEBUG
, "Per user launchd job found for UID: %u", which_user
);
7501 job_mig_lookup_per_user_context(job_t j
, uid_t which_user
, mach_port_t
*up_cont
)
7503 struct ldcred
*ldc
= runtime_get_caller_creds();
7506 #if TARGET_OS_EMBEDDED
7507 /* There is no need for per-user launchd's on embedded. */
7508 job_log(j
, LOG_ERR
, "Per-user launchds are not supported on this platform.");
7509 return BOOTSTRAP_NOT_PRIVILEGED
;
7513 if (unlikely(sandbox_check(ldc
->pid
, "mach-per-user-lookup", SANDBOX_FILTER_NONE
) > 0)) {
7514 return BOOTSTRAP_NOT_PRIVILEGED
;
7518 if (!launchd_assumes(j
!= NULL
)) {
7519 return BOOTSTRAP_NO_MEMORY
;
7522 job_log(j
, LOG_INFO
, "Looking up per user launchd for UID: %u", which_user
);
7524 if (unlikely(!pid1_magic
)) {
7525 job_log(j
, LOG_ERR
, "Only PID 1 supports per user launchd lookups.");
7526 return BOOTSTRAP_NOT_PRIVILEGED
;
7529 if (ldc
->euid
|| ldc
->uid
) {
7530 which_user
= ldc
->euid
?: ldc
->uid
;
7533 *up_cont
= MACH_PORT_NULL
;
7535 jpu
= jobmgr_lookup_per_user_context_internal(j
, which_user
, true, up_cont
);
7541 job_mig_check_in2(job_t j
, name_t servicename
, mach_port_t
*serviceportp
, uint64_t flags
)
7543 bool per_pid_service
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
7544 struct ldcred
*ldc
= runtime_get_caller_creds();
7545 struct machservice
*ms
;
7548 if (!launchd_assumes(j
!= NULL
)) {
7549 return BOOTSTRAP_NO_MEMORY
;
7552 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, per_pid_service
? ldc
->pid
: 0);
7555 *serviceportp
= MACH_PORT_NULL
;
7557 if (unlikely((ms
= machservice_new(j
, servicename
, serviceportp
, per_pid_service
)) == NULL
)) {
7558 return BOOTSTRAP_NO_MEMORY
;
7561 /* Treat this like a legacy job. */
7562 if( !j
->legacy_mach_job
) {
7563 ms
->isActive
= true;
7567 if (!(j
->anonymous
|| j
->legacy_LS_job
|| j
->legacy_mach_job
)) {
7568 job_log(j
, LOG_SCOLDING
, "Please add the following service to the configuration file for this job: %s", servicename
);
7571 if (unlikely((jo
= machservice_job(ms
)) != j
)) {
7572 static pid_t last_warned_pid
;
7574 if (last_warned_pid
!= ldc
->pid
) {
7575 job_log(jo
, LOG_WARNING
, "The following job tried to hijack the service \"%s\" from this job: %s", servicename
, j
->label
);
7576 last_warned_pid
= ldc
->pid
;
7579 return BOOTSTRAP_NOT_PRIVILEGED
;
7581 if (unlikely(machservice_active(ms
))) {
7582 job_log(j
, LOG_WARNING
, "Check-in of Mach service failed. Already active: %s", servicename
);
7583 return BOOTSTRAP_SERVICE_ACTIVE
;
7588 machservice_request_notifications(ms
);
7590 job_log(j
, LOG_INFO
, "Check-in of service: %s", servicename
);
7592 *serviceportp
= machservice_port(ms
);
7593 return BOOTSTRAP_SUCCESS
;
7597 job_mig_register2(job_t j
, name_t servicename
, mach_port_t serviceport
, uint64_t flags
)
7599 struct machservice
*ms
;
7600 struct ldcred
*ldc
= runtime_get_caller_creds();
7602 if (!launchd_assumes(j
!= NULL
)) {
7603 return BOOTSTRAP_NO_MEMORY
;
7606 if (!(flags
& BOOTSTRAP_PER_PID_SERVICE
) && !j
->legacy_LS_job
) {
7607 job_log(j
, LOG_SCOLDING
, "Performance: bootstrap_register() is deprecated. Service: %s", servicename
);
7610 job_log(j
, LOG_DEBUG
, "%sMach service registration attempt: %s", flags
& BOOTSTRAP_PER_PID_SERVICE
? "Per PID " : "", servicename
);
7612 /* 5641783 for the embedded hack */
7613 #if !TARGET_OS_EMBEDDED
7615 * From a per-user/session launchd's perspective, SecurityAgent (UID
7616 * 92) is a rogue application (not our UID, not root and not a child of
7617 * us). We'll have to reconcile this design friction at a later date.
7619 if (unlikely(j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->uid
!= getuid() && ldc
->uid
!= 92)) {
7621 return VPROC_ERR_TRY_PER_USER
;
7623 return BOOTSTRAP_NOT_PRIVILEGED
;
7628 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, flags
& BOOTSTRAP_PER_PID_SERVICE
? ldc
->pid
: 0);
7631 if (machservice_job(ms
) != j
) {
7632 return BOOTSTRAP_NOT_PRIVILEGED
;
7634 if (machservice_active(ms
)) {
7635 job_log(j
, LOG_DEBUG
, "Mach service registration failed. Already active: %s", servicename
);
7636 return BOOTSTRAP_SERVICE_ACTIVE
;
7638 if (ms
->recv
&& (serviceport
!= MACH_PORT_NULL
)) {
7639 job_log(j
, LOG_ERR
, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename
);
7640 return BOOTSTRAP_NOT_PRIVILEGED
;
7643 machservice_delete(j
, ms
, false);
7646 if (likely(serviceport
!= MACH_PORT_NULL
)) {
7647 if (likely(ms
= machservice_new(j
, servicename
, &serviceport
, flags
& BOOTSTRAP_PER_PID_SERVICE
? true : false))) {
7648 machservice_request_notifications(ms
);
7650 return BOOTSTRAP_NO_MEMORY
;
7655 return BOOTSTRAP_SUCCESS
;
7659 job_mig_look_up2(job_t j
, mach_port_t srp
, name_t servicename
, mach_port_t
*serviceportp
, pid_t target_pid
, uint64_t flags
)
7661 struct machservice
*ms
;
7662 struct ldcred
*ldc
= runtime_get_caller_creds();
7664 bool per_pid_lookup
= flags
& BOOTSTRAP_PER_PID_SERVICE
;
7666 if (!launchd_assumes(j
!= NULL
)) {
7667 return BOOTSTRAP_NO_MEMORY
;
7670 /* 5641783 for the embedded hack */
7671 #if !TARGET_OS_EMBEDDED
7672 if (unlikely(pid1_magic
&& j
->anonymous
&& j
->mgr
->parentmgr
== NULL
&& ldc
->uid
!= 0 && ldc
->euid
!= 0)) {
7673 return VPROC_ERR_TRY_PER_USER
;
7678 if (unlikely(sandbox_check(ldc
->pid
, "mach-lookup", per_pid_lookup
? SANDBOX_FILTER_LOCAL_NAME
: SANDBOX_FILTER_GLOBAL_NAME
, servicename
) > 0)) {
7679 return BOOTSTRAP_NOT_PRIVILEGED
;
7683 if (per_pid_lookup
) {
7684 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, false, target_pid
);
7686 ms
= jobmgr_lookup_service(j
->mgr
, servicename
, true, 0);
7690 if (machservice_hidden(ms
) && !machservice_active(ms
)) {
7692 } else if (unlikely(ms
->per_user_hack
)) {
7698 job_assumes(j
, machservice_port(ms
) != MACH_PORT_NULL
);
7699 job_log(j
, LOG_DEBUG
, "%sMach service lookup: %s", per_pid_lookup
? "Per PID " : "", servicename
);
7701 if (unlikely(!per_pid_lookup
&& j
->lastlookup
== ms
&& j
->lastlookup_gennum
== ms
->gen_num
&& !j
->per_user
)) {
7702 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
7703 job_log(j
, LOG_DEBUG
, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms
->job
->label
, servicename
);
7707 j
->lastlookup_gennum
= ms
->gen_num
;
7709 *serviceportp
= machservice_port(ms
);
7711 kr
= BOOTSTRAP_SUCCESS
;
7712 } else if (!per_pid_lookup
&& (inherited_bootstrap_port
!= MACH_PORT_NULL
)) {
7713 job_log(j
, LOG_DEBUG
, "Mach service lookup forwarded: %s", servicename
);
7714 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
7715 job_assumes(j
, vproc_mig_look_up2_forward(inherited_bootstrap_port
, srp
, servicename
, 0, 0) == 0);
7716 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
7717 return MIG_NO_REPLY
;
7718 } else if (pid1_magic
&& j
->anonymous
&& ldc
->euid
>= 500 && strcasecmp(j
->mgr
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
7720 * 5240036 Should start background session when a lookup of CCacheServer occurs
7722 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
7723 * If we find a EUID that isn't root, we force it over to the per-user context.
7725 return VPROC_ERR_TRY_PER_USER
;
7727 job_log(j
, LOG_DEBUG
, "%sMach service lookup failed: %s", per_pid_lookup
? "Per PID " : "", servicename
);
7728 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
7735 job_mig_parent(job_t j
, mach_port_t srp
, mach_port_t
*parentport
)
7737 if (!launchd_assumes(j
!= NULL
)) {
7738 return BOOTSTRAP_NO_MEMORY
;
7741 job_log(j
, LOG_DEBUG
, "Requested parent bootstrap port");
7742 jobmgr_t jm
= j
->mgr
;
7744 if (jobmgr_parent(jm
)) {
7745 *parentport
= jobmgr_parent(jm
)->jm_port
;
7746 } else if (MACH_PORT_NULL
== inherited_bootstrap_port
) {
7747 *parentport
= jm
->jm_port
;
7749 job_assumes(j
, vproc_mig_parent_forward(inherited_bootstrap_port
, srp
) == 0);
7750 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
7751 return MIG_NO_REPLY
;
7753 return BOOTSTRAP_SUCCESS
;
7757 job_mig_info(job_t j
, name_array_t
*servicenamesp
, unsigned int *servicenames_cnt
,
7758 name_array_t
*servicejobsp
, unsigned int *servicejobs_cnt
,
7759 bootstrap_status_array_t
*serviceactivesp
, unsigned int *serviceactives_cnt
,
7762 name_array_t service_names
= NULL
;
7763 name_array_t service_jobs
= NULL
;
7764 bootstrap_status_array_t service_actives
= NULL
;
7765 unsigned int cnt
= 0, cnt2
= 0;
7768 if (!launchd_assumes(j
!= NULL
)) {
7769 return BOOTSTRAP_NO_MEMORY
;
7772 if( g_flat_mach_namespace
) {
7773 if( (j
->mgr
->properties
& BOOTSTRAP_PROPERTY_EXPLICITSUBSET
) || (flags
& BOOTSTRAP_FORCE_LOCAL
) ) {
7783 struct machservice
*msi
= NULL
;
7784 for( i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++ ) {
7785 LIST_FOREACH( msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
7786 cnt
+= !msi
->per_pid
? 1 : 0;
7794 mig_allocate((vm_address_t
*)&service_names
, cnt
* sizeof(service_names
[0]));
7795 if (!job_assumes(j
, service_names
!= NULL
)) {
7799 mig_allocate((vm_address_t
*)&service_jobs
, cnt
* sizeof(service_jobs
[0]));
7800 if (!job_assumes(j
, service_jobs
!= NULL
)) {
7804 mig_allocate((vm_address_t
*)&service_actives
, cnt
* sizeof(service_actives
[0]));
7805 if (!job_assumes(j
, service_actives
!= NULL
)) {
7809 for( i
= 0; i
< MACHSERVICE_HASH_SIZE
; i
++ ) {
7810 LIST_FOREACH( msi
, &jm
->ms_hash
[i
], name_hash_sle
) {
7811 if( !msi
->per_pid
) {
7812 strlcpy(service_names
[cnt2
], machservice_name(msi
), sizeof(service_names
[0]));
7813 strlcpy(service_jobs
[cnt2
], msi
->job
->label
, sizeof(service_jobs
[0]));
7814 service_actives
[cnt2
] = machservice_status(msi
);
7820 job_assumes(j
, cnt
== cnt2
);
7823 *servicenamesp
= service_names
;
7824 *servicejobsp
= service_jobs
;
7825 *serviceactivesp
= service_actives
;
7826 *servicenames_cnt
= *servicejobs_cnt
= *serviceactives_cnt
= cnt
;
7828 return BOOTSTRAP_SUCCESS
;
7831 if (service_names
) {
7832 mig_deallocate((vm_address_t
)service_names
, cnt
* sizeof(service_names
[0]));
7835 mig_deallocate((vm_address_t
)service_jobs
, cnt
* sizeof(service_jobs
[0]));
7837 if (service_actives
) {
7838 mig_deallocate((vm_address_t
)service_actives
, cnt
* sizeof(service_actives
[0]));
7841 return BOOTSTRAP_NO_MEMORY
;
7845 job_mig_lookup_children(job_t j
, mach_port_array_t
*child_ports
, mach_msg_type_number_t
*child_ports_cnt
,
7846 name_array_t
*child_names
, mach_msg_type_number_t
*child_names_cnt
,
7847 bootstrap_property_array_t
*child_properties
, mach_msg_type_number_t
*child_properties_cnt
)
7849 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
7850 if( !launchd_assumes(j
!= NULL
) ) {
7851 return BOOTSTRAP_NO_MEMORY
;
7854 struct ldcred
*ldc
= runtime_get_caller_creds();
7856 /* Only allow root processes to look up children, even if we're in the per-user launchd.
7857 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
7858 * in a non-flat namespace.
7860 if( ldc
->euid
!= 0 ) {
7861 job_log(j
, LOG_WARNING
, "Attempt to look up children of bootstrap by unprivileged job.");
7862 return BOOTSTRAP_NOT_PRIVILEGED
;
7865 unsigned int cnt
= 0;
7867 jobmgr_t jmr
= j
->mgr
;
7868 jobmgr_t jmi
= NULL
;
7869 SLIST_FOREACH( jmi
, &jmr
->submgrs
, sle
) {
7873 /* Find our per-user launchds if we're PID 1. */
7876 LIST_FOREACH( ji
, &jmr
->jobs
, sle
) {
7877 cnt
+= ji
->per_user
? 1 : 0;
7882 return BOOTSTRAP_NO_CHILDREN
;
7885 mach_port_array_t _child_ports
= NULL
;
7886 mig_allocate((vm_address_t
*)&_child_ports
, cnt
* sizeof(_child_ports
[0]));
7887 if( !job_assumes(j
, _child_ports
!= NULL
) ) {
7888 kr
= BOOTSTRAP_NO_MEMORY
;
7892 name_array_t _child_names
= NULL
;
7893 mig_allocate((vm_address_t
*)&_child_names
, cnt
* sizeof(_child_names
[0]));
7894 if( !job_assumes(j
, _child_names
!= NULL
) ) {
7895 kr
= BOOTSTRAP_NO_MEMORY
;
7899 bootstrap_property_array_t _child_properties
= NULL
;
7900 mig_allocate((vm_address_t
*)&_child_properties
, cnt
* sizeof(_child_properties
[0]));
7901 if( !job_assumes(j
, _child_properties
!= NULL
) ) {
7902 kr
= BOOTSTRAP_NO_MEMORY
;
7906 unsigned int cnt2
= 0;
7907 SLIST_FOREACH( jmi
, &jmr
->submgrs
, sle
) {
7908 if( jobmgr_assumes(jmi
, launchd_mport_make_send(jmi
->jm_port
) == KERN_SUCCESS
) ) {
7909 _child_ports
[cnt2
] = jmi
->jm_port
;
7911 _child_ports
[cnt2
] = MACH_PORT_NULL
;
7914 strlcpy(_child_names
[cnt2
], jmi
->name
, sizeof(_child_names
[0]));
7915 _child_properties
[cnt2
] = jmi
->properties
;
7920 if( pid1_magic
) LIST_FOREACH( ji
, &jmr
->jobs
, sle
) {
7921 if( ji
->per_user
) {
7922 if( job_assumes(ji
, SLIST_FIRST(&ji
->machservices
)->per_user_hack
== true) ) {
7923 mach_port_t port
= machservice_port(SLIST_FIRST(&ji
->machservices
));
7925 if( job_assumes(ji
, launchd_mport_copy_send(port
) == KERN_SUCCESS
) ) {
7926 _child_ports
[cnt2
] = port
;
7928 _child_ports
[cnt2
] = MACH_PORT_NULL
;
7931 _child_ports
[cnt2
] = MACH_PORT_NULL
;
7934 strlcpy(_child_names
[cnt2
], ji
->label
, sizeof(_child_names
[0]));
7935 _child_properties
[cnt2
] |= BOOTSTRAP_PROPERTY_PERUSER
;
7941 *child_names_cnt
= cnt
;
7942 *child_ports_cnt
= cnt
;
7943 *child_properties_cnt
= cnt
;
7945 *child_names
= _child_names
;
7946 *child_ports
= _child_ports
;
7947 *child_properties
= _child_properties
;
7950 for( i
= 0; i
< cnt
; i
++ ) {
7951 job_log(j
, LOG_DEBUG
, "child_names[%u] = %s", i
, (char *)_child_names
[i
]);
7954 return BOOTSTRAP_SUCCESS
;
7956 if( _child_ports
) {
7957 mig_deallocate((vm_address_t
)_child_ports
, cnt
* sizeof(_child_ports
[0]));
7960 if( _child_names
) {
7961 mig_deallocate((vm_address_t
)_child_names
, cnt
* sizeof(_child_ports
[0]));
7964 if( _child_properties
) {
7965 mig_deallocate((vm_address_t
)_child_properties
, cnt
* sizeof(_child_properties
[0]));
7972 job_mig_transaction_count_for_pid(job_t j
, pid_t p
, int32_t *cnt
, boolean_t
*condemned
)
7974 kern_return_t kr
= KERN_FAILURE
;
7975 struct ldcred
*ldc
= runtime_get_caller_creds();
7976 if( (ldc
->euid
!= geteuid()) && (ldc
->euid
!= 0) ) {
7977 return BOOTSTRAP_NOT_PRIVILEGED
;
7980 job_t j_for_pid
= jobmgr_find_by_pid_deep(j
->mgr
, p
, false);
7982 if( j_for_pid
->kill_via_shmem
) {
7983 if( j_for_pid
->shmem
) {
7984 *cnt
= j_for_pid
->shmem
->vp_shmem_transaction_cnt
;
7985 *condemned
= j_for_pid
->shmem
->vp_shmem_flags
& VPROC_SHMEM_EXITING
;
7986 *cnt
+= *condemned
? 1 : 0;
7992 kr
= BOOTSTRAP_SUCCESS
;
7994 kr
= BOOTSTRAP_NO_MEMORY
;
7997 kr
= BOOTSTRAP_UNKNOWN_SERVICE
;
8004 job_mig_pid_is_managed(job_t j
__attribute__((unused
)), pid_t p
, boolean_t
*managed
)
8006 struct ldcred
*ldc
= runtime_get_caller_creds();
8007 if( (ldc
->euid
!= geteuid()) && (ldc
->euid
!= 0) ) {
8008 return BOOTSTRAP_NOT_PRIVILEGED
;
8011 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
8012 * directly by launchd as agents.
8014 job_t j_for_pid
= jobmgr_find_by_pid_deep(root_jobmgr
, p
, false);
8015 if( j_for_pid
&& !j_for_pid
->anonymous
&& !j_for_pid
->legacy_LS_job
) {
8019 return BOOTSTRAP_SUCCESS
;
8023 job_mig_port_for_label(job_t j
__attribute__((unused
)), name_t label
, mach_port_t
*mp
)
8025 struct ldcred
*ldc
= runtime_get_caller_creds();
8026 kern_return_t kr
= BOOTSTRAP_NOT_PRIVILEGED
;
8028 mach_port_t _mp
= MACH_PORT_NULL
;
8029 if( !j
->deny_job_creation
&& (ldc
->euid
== 0 || ldc
->euid
== geteuid()) ) {
8030 job_t target_j
= job_find(label
);
8031 if( jobmgr_assumes(root_jobmgr
, target_j
!= NULL
) ) {
8032 if( target_j
->j_port
== MACH_PORT_NULL
) {
8033 job_assumes(target_j
, job_setup_machport(target_j
) == true);
8036 _mp
= target_j
->j_port
;
8037 kr
= _mp
!= MACH_PORT_NULL
? BOOTSTRAP_SUCCESS
: BOOTSTRAP_NO_MEMORY
;
8039 kr
= BOOTSTRAP_NO_MEMORY
;
8047 #if !TARGET_OS_EMBEDDED
8049 job_mig_set_security_session(job_t j
, uuid_t uuid
, mach_port_t session
)
8051 uuid_string_t uuid_str
;
8052 uuid_unparse(uuid
, uuid_str
);
8053 job_log(j
, LOG_DEBUG
, "Setting session %u for UUID %s...", session
, uuid_str
);
8055 job_t ji
= NULL
, jt
= NULL
;
8056 LIST_FOREACH_SAFE( ji
, &s_needing_sessions
, sle
, jt
) {
8057 uuid_string_t uuid_str2
;
8058 uuid_unparse(ji
->expected_audit_uuid
, uuid_str2
);
8060 if( uuid_compare(uuid
, ji
->expected_audit_uuid
) == 0 ) {
8061 uuid_clear(ji
->expected_audit_uuid
);
8062 if( session
!= MACH_PORT_NULL
) {
8063 job_log(ji
, LOG_DEBUG
, "Job should join session with port %u", session
);
8064 mach_port_mod_refs(mach_task_self(), session
, MACH_PORT_RIGHT_SEND
, 1);
8066 job_log(ji
, LOG_DEBUG
, "No session to set for job. Using our session.");
8069 ji
->audit_session
= session
;
8070 LIST_REMOVE(ji
, needing_session_sle
);
8071 job_dispatch(ji
, false);
8075 /* Each job that the session port was set for holds a reference. At the end of
8076 * the loop, there will be one extra reference belonging to this MiG protocol.
8077 * We need to release it so that the session goes away when all the jobs
8078 * referencing it are unloaded.
8080 mach_port_deallocate(mach_task_self(), session
);
8082 return KERN_SUCCESS
;
8086 job_mig_set_security_session(job_t j
__attribute__((unused
)), uuid_t uuid
__attribute__((unused
)), mach_port_t session
__attribute__((unused
)))
8088 return KERN_SUCCESS
;
8093 jobmgr_find_by_name(jobmgr_t jm
, const char *where
)
8097 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
8098 if (where
== NULL
) {
8099 if (strcasecmp(jm
->name
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
8100 where
= VPROCMGR_SESSION_LOGINWINDOW
;
8102 where
= VPROCMGR_SESSION_AQUA
;
8106 if (strcasecmp(jm
->name
, where
) == 0) {
8110 if( strcasecmp(where
, VPROCMGR_SESSION_BACKGROUND
) == 0 && !pid1_magic
) {
8115 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
8116 if (unlikely(jmi
->shutting_down
)) {
8118 } else if (strcasecmp(jmi
->name
, where
) == 0) {
8120 } else if (strcasecmp(jmi
->name
, VPROCMGR_SESSION_BACKGROUND
) == 0 && pid1_magic
) {
8121 SLIST_FOREACH(jmi2
, &jmi
->submgrs
, sle
) {
8122 if (strcasecmp(jmi2
->name
, where
) == 0) {
8135 job_mig_move_subset(job_t j
, mach_port_t target_subset
, name_t session_type
, mach_port_t audit_session
, uint64_t flags
)
8137 mach_msg_type_number_t l2l_i
, l2l_port_cnt
= 0;
8138 mach_port_array_t l2l_ports
= NULL
;
8139 mach_port_t reqport
, rcvright
;
8140 kern_return_t kr
= 1;
8141 launch_data_t out_obj_array
= NULL
;
8142 struct ldcred
*ldc
= runtime_get_caller_creds();
8143 jobmgr_t jmr
= NULL
;
8145 if (!launchd_assumes(j
!= NULL
)) {
8146 return BOOTSTRAP_NO_MEMORY
;
8149 if (job_mig_intran2(root_jobmgr
, target_subset
, ldc
->pid
)) {
8150 job_log(j
, LOG_ERR
, "Moving a session to ourself is bogus.");
8152 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
8156 job_log(j
, LOG_DEBUG
, "Move subset attempt: 0x%x", target_subset
);
8158 kr
= _vproc_grab_subset(target_subset
, &reqport
, &rcvright
, &out_obj_array
, &l2l_ports
, &l2l_port_cnt
);
8160 if (!job_assumes(j
, kr
== 0)) {
8164 launchd_assert(launch_data_array_get_count(out_obj_array
) == l2l_port_cnt
);
8166 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, reqport
, rcvright
, false, session_type
, false, audit_session
)) != NULL
)) {
8167 kr
= BOOTSTRAP_NO_MEMORY
;
8171 jmr
->properties
|= BOOTSTRAP_PROPERTY_MOVEDSUBSET
;
8173 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
8174 * processing an IPC request, we'll do this action before the new job manager can get any IPC
8175 * requests. This serialization is guaranteed since we are single-threaded in that respect.
8177 if( flags
& LAUNCH_GLOBAL_ON_DEMAND
) {
8178 /* This is so awful. */
8179 /* Remove the job from its current job manager. */
8180 LIST_REMOVE(j
, sle
);
8181 LIST_REMOVE(j
, pid_hash_sle
);
8183 /* Put the job into the target job manager. */
8184 LIST_INSERT_HEAD(&jmr
->jobs
, j
, sle
);
8185 LIST_INSERT_HEAD(&jmr
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
8188 job_set_global_on_demand(j
, true);
8190 if( !j
->holds_ref
) {
8191 j
->holds_ref
= true;
8196 for (l2l_i
= 0; l2l_i
< l2l_port_cnt
; l2l_i
++) {
8197 launch_data_t tmp
, obj_at_idx
;
8198 struct machservice
*ms
;
8199 job_t j_for_service
;
8200 const char *serv_name
;
8204 job_assumes(j
, obj_at_idx
= launch_data_array_get_index(out_obj_array
, l2l_i
));
8205 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PID
));
8206 target_pid
= (pid_t
)launch_data_get_integer(tmp
);
8207 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_PERPID
));
8208 serv_perpid
= launch_data_get_bool(tmp
);
8209 job_assumes(j
, tmp
= launch_data_dict_lookup(obj_at_idx
, TAKE_SUBSET_NAME
));
8210 serv_name
= launch_data_get_string(tmp
);
8212 j_for_service
= jobmgr_find_by_pid(jmr
, target_pid
, true);
8214 if (unlikely(!j_for_service
)) {
8215 /* The PID probably exited */
8216 job_assumes(j
, launchd_mport_deallocate(l2l_ports
[l2l_i
]) == KERN_SUCCESS
);
8220 if (likely(ms
= machservice_new(j_for_service
, serv_name
, &l2l_ports
[l2l_i
], serv_perpid
))) {
8221 job_log(j
, LOG_DEBUG
, "Importing %s into new bootstrap.", serv_name
);
8222 machservice_request_notifications(ms
);
8229 if (out_obj_array
) {
8230 launch_data_free(out_obj_array
);
8234 mig_deallocate((vm_address_t
)l2l_ports
, l2l_port_cnt
* sizeof(l2l_ports
[0]));
8238 if (target_subset
) {
8239 job_assumes(j
, launchd_mport_deallocate(target_subset
) == KERN_SUCCESS
);
8242 jobmgr_shutdown(jmr
);
8249 job_mig_init_session(job_t j
, name_t session_type
, mach_port_t audit_session
)
8253 kern_return_t kr
= BOOTSTRAP_NO_MEMORY
;
8254 if (j
->mgr
->session_initialized
) {
8255 job_log(j
, LOG_ERR
, "Tried to initialize an already setup session!");
8256 kr
= BOOTSTRAP_NOT_PRIVILEGED
;
8257 } else if (strcmp(session_type
, VPROCMGR_SESSION_LOGINWINDOW
) == 0) {
8263 * We're working around LoginWindow and the WindowServer.
8265 * In practice, there is only one LoginWindow session. Unfortunately, for certain
8266 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
8267 * spawns a replacement loginwindow session before cleaning up the previous one.
8269 * We're going to use the creation of a new LoginWindow context as a clue that the
8270 * previous LoginWindow context is on the way out and therefore we should just
8271 * kick-start the shutdown of it.
8274 SLIST_FOREACH(jmi
, &root_jobmgr
->submgrs
, sle
) {
8275 if (unlikely(jmi
->shutting_down
)) {
8277 } else if (strcasecmp(jmi
->name
, session_type
) == 0) {
8278 jobmgr_shutdown(jmi
);
8284 jobmgr_log(j
->mgr
, LOG_DEBUG
, "Initializing as %s", session_type
);
8285 strcpy(j
->mgr
->name_init
, session_type
);
8287 if (job_assumes(j
, (j2
= jobmgr_init_session(j
->mgr
, session_type
, false)))) {
8288 j2
->audit_session
= audit_session
;
8289 job_assumes(j
, job_dispatch(j2
, true));
8290 kr
= BOOTSTRAP_SUCCESS
;
8297 job_mig_switch_to_session(job_t j
, mach_port_t requestor_port
, name_t session_name
, mach_port_t audit_session
, mach_port_t
*new_bsport
)
8299 job_log(j
, LOG_DEBUG
, "Job wants to move to %s session.", session_name
);
8301 if( !job_assumes(j
, pid1_magic
== false) ) {
8302 job_log(j
, LOG_WARNING
, "Switching sessions is not allowed in the system Mach bootstrap.");
8303 return BOOTSTRAP_NOT_PRIVILEGED
;
8306 if( !j
->anonymous
) {
8307 job_log(j
, LOG_NOTICE
, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
8308 return BOOTSTRAP_NOT_PRIVILEGED
;
8311 jobmgr_t target_jm
= jobmgr_find_by_name(root_jobmgr
, session_name
);
8312 if( target_jm
== j
->mgr
) {
8313 job_log(j
, LOG_DEBUG
, "Job is already in its desired session (%s).", session_name
);
8314 *new_bsport
= target_jm
->jm_port
;
8315 return BOOTSTRAP_SUCCESS
;
8319 target_jm
= jobmgr_new(j
->mgr
, requestor_port
, MACH_PORT_NULL
, false, session_name
, false, audit_session
);
8321 mach_port_deallocate(mach_task_self(), audit_session
);
8323 target_jm
->properties
|= BOOTSTRAP_PROPERTY_IMPLICITSUBSET
;
8327 if( !job_assumes(j
, target_jm
!= NULL
) ) {
8328 job_log(j
, LOG_WARNING
, "Could not find %s session!", session_name
);
8329 return BOOTSTRAP_NO_MEMORY
;
8332 /* Remove the job from it's current job manager. */
8333 LIST_REMOVE(j
, sle
);
8334 LIST_REMOVE(j
, pid_hash_sle
);
8336 job_t ji
= NULL
, jit
= NULL
;
8337 LIST_FOREACH_SAFE( ji
, &j
->mgr
->global_env_jobs
, global_env_sle
, jit
) {
8339 LIST_REMOVE(ji
, global_env_sle
);
8344 /* Put the job into the target job manager. */
8345 LIST_INSERT_HEAD(&target_jm
->jobs
, j
, sle
);
8346 LIST_INSERT_HEAD(&target_jm
->active_jobs
[ACTIVE_JOB_HASH(j
->p
)], j
, pid_hash_sle
);
8349 LIST_INSERT_HEAD(&target_jm
->global_env_jobs
, j
, global_env_sle
);
8352 /* Move our Mach services over if we're not in a flat namespace. */
8353 if( !g_flat_mach_namespace
&& !SLIST_EMPTY(&j
->machservices
) ) {
8354 struct machservice
*msi
= NULL
, *msit
= NULL
;
8355 SLIST_FOREACH_SAFE( msi
, &j
->machservices
, sle
, msit
) {
8356 LIST_REMOVE(msi
, name_hash_sle
);
8357 LIST_INSERT_HEAD(&target_jm
->ms_hash
[hash_ms(msi
->name
)], msi
, name_hash_sle
);
8363 if( !j
->holds_ref
) {
8364 /* Anonymous jobs which move around are particularly interesting to us, so we want to
8365 * stick around while they're still around.
8366 * For example, login calls into the PAM launchd module, which moves the process into
8367 * the StandardIO session by default. So we'll hold a reference on that job to prevent
8368 * ourselves from going away.
8370 j
->holds_ref
= true;
8374 *new_bsport
= target_jm
->jm_port
;
8376 return KERN_SUCCESS
;
8380 job_mig_take_subset(job_t j
, mach_port_t
*reqport
, mach_port_t
*rcvright
,
8381 vm_offset_t
*outdata
, mach_msg_type_number_t
*outdataCnt
,
8382 mach_port_array_t
*portsp
, unsigned int *ports_cnt
)
8384 launch_data_t tmp_obj
, tmp_dict
, outdata_obj_array
= NULL
;
8385 mach_port_array_t ports
= NULL
;
8386 unsigned int cnt
= 0, cnt2
= 0;
8388 struct machservice
*ms
;
8392 if (!launchd_assumes(j
!= NULL
)) {
8393 return BOOTSTRAP_NO_MEMORY
;
8398 if (unlikely(!pid1_magic
)) {
8399 job_log(j
, LOG_ERR
, "Only the system launchd will transfer Mach sub-bootstraps.");
8400 return BOOTSTRAP_NOT_PRIVILEGED
;
8402 if (unlikely(jobmgr_parent(jm
) == NULL
)) {
8403 job_log(j
, LOG_ERR
, "Root Mach bootstrap cannot be transferred.");
8404 return BOOTSTRAP_NOT_PRIVILEGED
;
8406 if (unlikely(strcasecmp(jm
->name
, VPROCMGR_SESSION_AQUA
) == 0)) {
8407 job_log(j
, LOG_ERR
, "Cannot transfer a setup GUI session.");
8408 return BOOTSTRAP_NOT_PRIVILEGED
;
8410 if (unlikely(!j
->anonymous
)) {
8411 job_log(j
, LOG_ERR
, "Only the anonymous job can transfer Mach sub-bootstraps.");
8412 return BOOTSTRAP_NOT_PRIVILEGED
;
8415 job_log(j
, LOG_DEBUG
, "Transferring sub-bootstrap to the per session launchd.");
8417 outdata_obj_array
= launch_data_alloc(LAUNCH_DATA_ARRAY
);
8418 if (!job_assumes(j
, outdata_obj_array
)) {
8422 *outdataCnt
= 20 * 1024 * 1024;
8423 mig_allocate(outdata
, *outdataCnt
);
8424 if (!job_assumes(j
, *outdata
!= 0)) {
8428 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
8429 if (!ji
->anonymous
) {
8432 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
8437 mig_allocate((vm_address_t
*)&ports
, cnt
* sizeof(ports
[0]));
8438 if (!job_assumes(j
, ports
!= NULL
)) {
8442 LIST_FOREACH(ji
, &j
->mgr
->jobs
, sle
) {
8443 if (!ji
->anonymous
) {
8447 SLIST_FOREACH(ms
, &ji
->machservices
, sle
) {
8448 if (job_assumes(j
, (tmp_dict
= launch_data_alloc(LAUNCH_DATA_DICTIONARY
)))) {
8449 job_assumes(j
, launch_data_array_set_index(outdata_obj_array
, tmp_dict
, cnt2
));
8454 if (job_assumes(j
, (tmp_obj
= launch_data_new_string(machservice_name(ms
))))) {
8455 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_NAME
));
8460 if (job_assumes(j
, (tmp_obj
= launch_data_new_integer((ms
->job
->p
))))) {
8461 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PID
));
8466 if (job_assumes(j
, (tmp_obj
= launch_data_new_bool((ms
->per_pid
))))) {
8467 job_assumes(j
, launch_data_dict_insert(tmp_dict
, tmp_obj
, TAKE_SUBSET_PERPID
));
8472 ports
[cnt2
] = machservice_port(ms
);
8474 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
8475 jobmgr_assumes(jm
, (errno
= mach_port_mod_refs(mach_task_self(), ports
[cnt2
], MACH_PORT_RIGHT_SEND
, 1)) == 0);
8480 job_assumes(j
, cnt
== cnt2
);
8482 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK
);
8483 packed_size
= launch_data_pack(outdata_obj_array
, (void *)*outdata
, *outdataCnt
, NULL
, NULL
);
8484 if (!job_assumes(j
, packed_size
!= 0)) {
8488 launch_data_free(outdata_obj_array
);
8493 *reqport
= jm
->req_port
;
8494 *rcvright
= jm
->jm_port
;
8499 workaround_5477111
= j
;
8501 jobmgr_shutdown(jm
);
8503 return BOOTSTRAP_SUCCESS
;
8506 if (outdata_obj_array
) {
8507 launch_data_free(outdata_obj_array
);
8510 mig_deallocate(*outdata
, *outdataCnt
);
8513 mig_deallocate((vm_address_t
)ports
, cnt
* sizeof(ports
[0]));
8516 return BOOTSTRAP_NO_MEMORY
;
8520 job_mig_subset(job_t j
, mach_port_t requestorport
, mach_port_t
*subsetportp
)
8525 if (!launchd_assumes(j
!= NULL
)) {
8526 return BOOTSTRAP_NO_MEMORY
;
8531 while ((jmr
= jobmgr_parent(jmr
)) != NULL
) {
8535 /* Since we use recursion, we need an artificial depth for subsets */
8536 if (unlikely(bsdepth
> 100)) {
8537 job_log(j
, LOG_ERR
, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth
);
8538 return BOOTSTRAP_NO_MEMORY
;
8541 char name
[NAME_MAX
];
8542 snprintf(name
, sizeof(name
), "%s[%i].subset.%i", j
->anonymous
? j
->prog
: j
->label
, j
->p
, MACH_PORT_INDEX(requestorport
));
8544 if (!job_assumes(j
, (jmr
= jobmgr_new(j
->mgr
, requestorport
, MACH_PORT_NULL
, false, name
, true, j
->audit_session
)) != NULL
)) {
8545 if (unlikely(requestorport
== MACH_PORT_NULL
)) {
8546 return BOOTSTRAP_NOT_PRIVILEGED
;
8548 return BOOTSTRAP_NO_MEMORY
;
8551 *subsetportp
= jmr
->jm_port
;
8552 jmr
->properties
|= BOOTSTRAP_PROPERTY_EXPLICITSUBSET
;
8554 /* A job could create multiple subsets, so only add a reference the first time
8555 * it does so we don't have to keep a count.
8557 if( j
->anonymous
&& !j
->holds_ref
) {
8558 j
->holds_ref
= true;
8562 job_log(j
, LOG_DEBUG
, "Job created a subset named \"%s\"", jmr
->name
);
8563 return BOOTSTRAP_SUCCESS
;
8567 job_mig_embedded_wait(job_t j
, name_t targetlabel
, integer_t
*waitstatus
)
8571 if (!launchd_assumes(j
!= NULL
)) {
8572 return BOOTSTRAP_NO_MEMORY
;
8575 if (unlikely(!(otherj
= job_find(targetlabel
)))) {
8576 return BOOTSTRAP_UNKNOWN_SERVICE
;
8579 *waitstatus
= j
->last_exit_status
;
8585 job_mig_kickstart(job_t j
, name_t targetlabel
, pid_t
*out_pid
, mach_port_t
*out_name_port
, mach_port_t
*obsrvr_port
, unsigned int flags
)
8587 struct ldcred
*ldc
= runtime_get_caller_creds();
8590 if (!launchd_assumes(j
!= NULL
)) {
8591 return BOOTSTRAP_NO_MEMORY
;
8594 if (unlikely(!(otherj
= job_find(targetlabel
)))) {
8595 return BOOTSTRAP_UNKNOWN_SERVICE
;
8598 #if TARGET_OS_EMBEDDED
8599 bool allow_non_root_kickstart
= j
->username
&& otherj
->username
&& ( strcmp(j
->username
, otherj
->username
) == 0 );
8601 bool allow_non_root_kickstart
= false;
8604 if( ldc
->euid
!= 0 && ldc
->euid
!= geteuid() && !allow_non_root_kickstart
) {
8605 return BOOTSTRAP_NOT_PRIVILEGED
;
8608 if( otherj
->p
&& (flags
& VPROCFLAG_STALL_JOB_EXEC
) ) {
8609 return BOOTSTRAP_SERVICE_ACTIVE
;
8612 otherj
->stall_before_exec
= ( flags
& VPROCFLAG_STALL_JOB_EXEC
);
8613 otherj
= job_dispatch(otherj
, true);
8615 if (!job_assumes(j
, otherj
&& otherj
->p
)) {
8616 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
8617 otherj
->stall_before_exec
= false;
8618 return BOOTSTRAP_NO_MEMORY
;
8621 /* If any of these proceeding steps fail, we return an error to the client.
8622 * the problem is that, if the client has requested the job be stalled before
8623 * exec(2), the client won't be able to uncork the fork(2), leaving the job
8624 * forever stalled until the client tries again and we successfully start
8627 * See <rdar://problem/6787083> for more about the implications.
8629 * Fortunately, these next actions should pretty much never fail. In the
8630 * future, we should look at cleaning up after these failures if the job
8631 * was started in a stalled state.
8634 kern_return_t kr
= task_name_for_pid(mach_task_self(), otherj
->p
, out_name_port
);
8635 if (!job_assumes(j
, kr
== 0)) {
8639 if (!job_setup_machport(otherj
)) {
8640 return BOOTSTRAP_NO_MEMORY
;
8643 *obsrvr_port
= otherj
->j_port
;
8644 *out_pid
= otherj
->p
;
8650 job_mig_wait(job_t j
, mach_port_t srp
, integer_t
*waitstatus
)
8653 if (!launchd_assumes(j
!= NULL
)) {
8654 return BOOTSTRAP_NO_MEMORY
;
8656 return job_handle_mpm_wait(j
, srp
, waitstatus
);
8659 /* To make the compiler happy. */
8660 job_handle_mpm_wait(NULL
, MACH_PORT_NULL
, NULL
);
8662 struct ldcred
*ldc
= runtime_get_caller_creds();
8663 job_t calling_j
= jobmgr_find_by_pid(j
->mgr
, ldc
->pid
, true);
8665 return job_mig_wait2(calling_j
, j
, srp
, waitstatus
, true);
8670 job_mig_wait2(job_t j
, job_t target_j
, mach_port_t srp
, integer_t
*status
, boolean_t legacy
)
8672 if( !launchd_assumes(j
!= NULL
) ) {
8673 return BOOTSTRAP_NO_MEMORY
;
8675 if( !launchd_assumes(target_j
!= NULL
) ) {
8676 return BOOTSTRAP_NO_MEMORY
;
8678 if( !launchd_assumes(status
!= NULL
) ) {
8679 return BOOTSTRAP_NO_MEMORY
;
8682 /* See rdar://problem/7084138 for why we do the second part of this check.
8683 * Basically, since Finder, Dock and SystemUIServer are now real launchd
8684 * jobs, they don't get removed after exiting, like legacy LaunchServices
8685 * jobs do. So there's a race. coreservicesd came in asking for the exit
8686 * status after we'd relaunched Finder, so Finder's PID isn't 0.
8688 * So we check to make sure the target job isn't a LaunchServices job and
8689 * that the request is coming through the legacy path (mpm_wait()). If so,
8690 * we return the last exit status, regardless of the current PID value.
8692 if( target_j
->p
== 0 || (!target_j
->legacy_LS_job
&& legacy
) ) {
8693 *status
= target_j
->last_exit_status
;
8694 return BOOTSTRAP_SUCCESS
;
8697 if( !job_assumes(j
, waiting4exit_new(target_j
, srp
, legacy
) == true) ) {
8698 return BOOTSTRAP_NO_MEMORY
;
8701 return MIG_NO_REPLY
;
8705 job_mig_uncork_fork(job_t j
)
8707 if (!launchd_assumes(j
!= NULL
)) {
8708 return BOOTSTRAP_NO_MEMORY
;
8711 if (unlikely(!j
->stall_before_exec
)) {
8712 job_log(j
, LOG_WARNING
, "Attempt to uncork a job that isn't in the middle of a fork().");
8717 j
->stall_before_exec
= false;
8722 job_mig_spawn(job_t j
, vm_offset_t indata
, mach_msg_type_number_t indataCnt
, mach_port_t audit_session
, pid_t
*child_pid
, mach_port_t
*obsvr_port
)
8724 launch_data_t input_obj
= NULL
;
8725 size_t data_offset
= 0;
8726 struct ldcred
*ldc
= runtime_get_caller_creds();
8729 if (!launchd_assumes(j
!= NULL
)) {
8730 return BOOTSTRAP_NO_MEMORY
;
8733 if (unlikely(j
->deny_job_creation
)) {
8734 return BOOTSTRAP_NOT_PRIVILEGED
;
8738 if (unlikely(sandbox_check(ldc
->pid
, "job-creation", SANDBOX_FILTER_NONE
) > 0)) {
8739 return BOOTSTRAP_NOT_PRIVILEGED
;
8743 if (unlikely(pid1_magic
&& ldc
->euid
&& ldc
->uid
)) {
8744 job_log(j
, LOG_DEBUG
, "Punting spawn to per-user-context");
8745 return VPROC_ERR_TRY_PER_USER
;
8748 if (!job_assumes(j
, indataCnt
!= 0)) {
8752 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK
);
8753 if (!job_assumes(j
, (input_obj
= launch_data_unpack((void *)indata
, indataCnt
, NULL
, 0, &data_offset
, NULL
)) != NULL
)) {
8757 jobmgr_t target_jm
= jobmgr_find_by_name(j
->mgr
, NULL
);
8758 if( !jobmgr_assumes(j
->mgr
, target_jm
!= NULL
) ) {
8759 jobmgr_log(j
->mgr
, LOG_NOTICE
, "%s() can't find its session!", __func__
);
8763 jr
= jobmgr_import2(target_jm
?: j
->mgr
, input_obj
);
8765 if (!job_assumes(j
, jr
!= NULL
)) {
8768 return BOOTSTRAP_NAME_IN_USE
;
8770 return BOOTSTRAP_NO_MEMORY
;
8775 jr
->mach_uid
= ldc
->uid
;
8778 jr
->legacy_LS_job
= true;
8779 jr
->abandon_pg
= true;
8780 jr
->stall_before_exec
= jr
->wait4debugger
;
8781 jr
->wait4debugger
= false;
8782 jr
->audit_session
= audit_session
;
8783 uuid_clear(jr
->expected_audit_uuid
);
8785 jr
= job_dispatch(jr
, true);
8787 if (!job_assumes(j
, jr
!= NULL
)) {
8788 return BOOTSTRAP_NO_MEMORY
;
8791 if (!job_assumes(jr
, jr
->p
)) {
8793 return BOOTSTRAP_NO_MEMORY
;
8796 if (!job_setup_machport(jr
)) {
8798 return BOOTSTRAP_NO_MEMORY
;
8801 job_log(jr
, LOG_DEBUG
, "Spawned by PID %u: %s", j
->p
, j
->label
);
8804 *obsvr_port
= jr
->j_port
;
8806 mig_deallocate(indata
, indataCnt
);
8808 return BOOTSTRAP_SUCCESS
;
8812 jobmgr_init(bool sflag
)
8814 const char *root_session_type
= pid1_magic
? VPROCMGR_SESSION_SYSTEM
: VPROCMGR_SESSION_BACKGROUND
;
8815 SLIST_INIT(&s_curious_jobs
);
8816 LIST_INIT(&s_needing_sessions
);
8818 launchd_assert((root_jobmgr
= jobmgr_new(NULL
, MACH_PORT_NULL
, MACH_PORT_NULL
, sflag
, root_session_type
, false, MACH_PORT_NULL
)) != NULL
);
8820 uint32_t fflags
= NOTE_ATTRIB
| NOTE_LINK
| NOTE_REVOKE
| NOTE_EXTEND
| NOTE_WRITE
;
8821 s_no_hang_fd
= open("/dev/autofs_nowait", O_EVTONLY
| O_NONBLOCK
);
8822 if( likely(s_no_hang_fd
== -1) ) {
8823 if( jobmgr_assumes(root_jobmgr
, (s_no_hang_fd
= open("/dev", O_EVTONLY
| O_NONBLOCK
)) != -1) ) {
8824 jobmgr_assumes(root_jobmgr
, kevent_mod((uintptr_t)s_no_hang_fd
, EVFILT_VNODE
, EV_ADD
, fflags
, 0, root_jobmgr
) != -1);
8827 s_no_hang_fd
= _fd(s_no_hang_fd
);
8831 our_strhash(const char *s
)
8836 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
8839 while ((c
= *s
++)) {
8840 r
= ((r
<< 5) + r
) + c
; /* hash*33 + c */
8847 hash_label(const char *label
)
8849 return our_strhash(label
) % LABEL_HASH_SIZE
;
8853 hash_ms(const char *msstr
)
8855 return our_strhash(msstr
) % MACHSERVICE_HASH_SIZE
;
8859 waiting4removal_new(job_t j
, mach_port_t rp
)
8861 struct waiting_for_removal
*w4r
;
8863 if (!job_assumes(j
, (w4r
= malloc(sizeof(struct waiting_for_removal
))) != NULL
)) {
8867 w4r
->reply_port
= rp
;
8869 SLIST_INSERT_HEAD(&j
->removal_watchers
, w4r
, sle
);
8875 waiting4removal_delete(job_t j
, struct waiting_for_removal
*w4r
)
8877 job_assumes(j
, job_mig_send_signal_reply(w4r
->reply_port
, 0) == 0);
8879 SLIST_REMOVE(&j
->removal_watchers
, w4r
, waiting_for_removal
, sle
);
8885 waiting4exit_new(job_t j
, mach_port_t rp
, bool legacy
)
8887 struct waiting_for_exit
*w4e
= NULL
;
8888 if( !job_assumes(j
, (w4e
= malloc(sizeof(struct waiting_for_exit
))) != NULL
) ) {
8893 w4e
->legacy
= legacy
;
8894 LIST_INSERT_HEAD(&j
->exit_watchers
, w4e
, sle
);
8900 waiting4exit_delete(job_t j
, struct waiting_for_exit
*w4e
)
8902 if( !w4e
->legacy
) {
8903 job_assumes(j
, job_mig_wait2_reply(w4e
->rp
, KERN_SUCCESS
, j
->last_exit_status
, false) == KERN_SUCCESS
);
8905 job_assumes(j
, job_mig_wait_reply(w4e
->rp
, KERN_SUCCESS
, j
->last_exit_status
) == KERN_SUCCESS
);
8908 LIST_REMOVE(w4e
, sle
);
8914 get_kern_max_proc(void)
8916 int mib
[] = { CTL_KERN
, KERN_MAXPROC
};
8918 size_t max_sz
= sizeof(max
);
8920 launchd_assumes(sysctl(mib
, 2, &max
, &max_sz
, NULL
, 0) != -1);
8925 /* See rdar://problem/6271234 */
8927 eliminate_double_reboot(void)
8929 if( unlikely(!pid1_magic
) ) {
8934 const char *argv
[] = { _PATH_BSHELL
, "/etc/rc.deferred_install", NULL
};
8935 char *try_again
= "Will try again at next boot.";
8938 if( unlikely(stat(argv
[1], &sb
) != -1) ) {
8939 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Going to run deferred install script.");
8944 jobmgr_assumes(root_jobmgr
, (errno
= posix_spawnp(&p
, argv
[0], NULL
, NULL
, (char **)argv
, environ
)) == 0);
8947 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Couldn't run deferred install script! %s", try_again
);
8951 if( !jobmgr_assumes(root_jobmgr
, waitpid(p
, &wstatus
, 0) != -1) ) {
8952 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Couldn't confirm that deferred install script exited successfully! %s", try_again
);
8956 if( jobmgr_assumes(root_jobmgr
, WIFEXITED(wstatus
) != 0) ) {
8957 if( jobmgr_assumes(root_jobmgr
, (result
= WEXITSTATUS(wstatus
)) == EXIT_SUCCESS
) ) {
8958 jobmgr_log(root_jobmgr
, LOG_DEBUG
| LOG_CONSOLE
, "Deferred install script completed successfully.");
8960 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus
), try_again
);
8963 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again
);
8968 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
8969 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
8970 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
8971 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
8973 if( !jobmgr_assumes(root_jobmgr
, unlink(argv
[1]) != -1) ) {
8974 jobmgr_log(root_jobmgr
, LOG_WARNING
| LOG_CONSOLE
, "Deferred install script couldn't be removed!");
8980 simulate_pid1_crash(void)
8982 if( pid1_magic
&& g_simulate_pid1_crash
) {
8983 runtime_syslog(LOG_EMERG
| LOG_CONSOLE
, "About to simulate a crash.");
8989 jetsam_property_setup(launch_data_t obj
, const char *key
, job_t j
)
8991 job_log(j
, LOG_DEBUG
, "Setting Jetsam properties for job...");
8992 if( strcasecmp(key
, LAUNCH_JOBKEY_JETSAMPRIORITY
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
8993 j
->jetsam_priority
= (typeof(j
->jetsam_priority
))launch_data_get_integer(obj
);
8994 job_log(j
, LOG_DEBUG
, "Priority: %d", j
->jetsam_priority
);
8995 } else if( strcasecmp(key
, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT
) == 0 && launch_data_get_type(obj
) == LAUNCH_DATA_INTEGER
) {
8996 j
->jetsam_memlimit
= (typeof(j
->jetsam_memlimit
))launch_data_get_integer(obj
);
8997 job_log(j
, LOG_DEBUG
, "Memory limit: %d", j
->jetsam_memlimit
);
8998 } else if( strcasecmp(key
, LAUNCH_KEY_JETSAMFRONTMOST
) == 0 ) {
8999 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
9000 * You can't set this in a plist.
9002 } else if( strcasecmp(key
, LAUNCH_KEY_JETSAMLABEL
) == 0 ) {
9003 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
9004 * complain about it.
9007 job_log(j
, LOG_ERR
, "Unknown Jetsam key: %s", key
);
9010 if( unlikely(!j
->jetsam_properties
) ) {
9011 j
->jetsam_properties
= true;
9012 LIST_INSERT_HEAD(&j
->mgr
->jetsam_jobs
, j
, jetsam_sle
);
9013 j
->mgr
->jetsam_jobs_cnt
++;
9018 launchd_set_jetsam_priorities(launch_data_t priorities
)
9020 if( !launchd_assumes(launch_data_get_type(priorities
) == LAUNCH_DATA_ARRAY
) ) {
9025 #if !TARGET_OS_EMBEDDED
9027 jm
= jobmgr_find_by_name(root_jobmgr
, VPROCMGR_SESSION_AQUA
);
9028 if( !launchd_assumes(jm
!= NULL
) ) {
9032 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
9035 if( !g_embedded_privileged_action
) {
9040 size_t npris
= launch_data_array_get_count(priorities
);
9044 for( i
= 0; i
< npris
; i
++ ) {
9045 launch_data_t ldi
= launch_data_array_get_index(priorities
, i
);
9046 if( !launchd_assumes(launch_data_get_type(ldi
) == LAUNCH_DATA_DICTIONARY
) ) {
9050 launch_data_t label
= NULL
;
9051 if( !launchd_assumes(label
= launch_data_dict_lookup(ldi
, LAUNCH_KEY_JETSAMLABEL
)) ) {
9054 const char *_label
= launch_data_get_string(label
);
9056 ji
= job_find(_label
);
9057 if( !launchd_assumes(ji
!= NULL
) ) {
9061 launch_data_dict_iterate(ldi
, (void (*)(launch_data_t
, const char *, void *))jetsam_property_setup
, ji
);
9063 launch_data_t frontmost
= NULL
;
9064 if( (frontmost
= launch_data_dict_lookup(ldi
, LAUNCH_KEY_JETSAMFRONTMOST
)) && launch_data_get_type(frontmost
) == LAUNCH_DATA_BOOL
) {
9065 ji
->jetsam_frontmost
= launch_data_get_bool(frontmost
);
9070 job_t
*jobs
= (job_t
*)calloc(jm
->jetsam_jobs_cnt
, sizeof(job_t
));
9071 if( launchd_assumes(jobs
!= NULL
) ) {
9072 LIST_FOREACH( ji
, &jm
->jetsam_jobs
, jetsam_sle
) {
9080 size_t totalpris
= i
;
9082 int result
= EINVAL
;
9084 /* It is conceivable that there could be no Jetsam jobs running. */
9085 if( totalpris
> 0 ) {
9087 qsort_b((void *)jobs
, totalpris
, sizeof(job_t
), ^ int (const void *lhs
, const void *rhs
) {
9088 job_t _lhs
= *(job_t
*)lhs
;
9089 job_t _rhs
= *(job_t
*)rhs
;
9090 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
9091 if( _lhs
->jetsam_priority
> _rhs
->jetsam_priority
) {
9093 } else if( _lhs
->jetsam_priority
< _rhs
->jetsam_priority
) {
9100 jetsam_priority_entry_t
*jpris
= (jetsam_priority_entry_t
*)calloc(totalpris
, sizeof(jetsam_priority_entry_t
));
9101 if( !launchd_assumes(jpris
!= NULL
) ) {
9104 for( i
= 0; i
< totalpris
; i
++ ) {
9105 jpris
[i
].pid
= jobs
[i
]->p
; /* Subject to time-of-use vs. time-of-check, obviously. */
9106 jpris
[i
].flags
|= jobs
[i
]->jetsam_frontmost
? kJetsamFlagsFrontmost
: 0;
9107 jpris
[i
].hiwat_pages
= jobs
[i
]->jetsam_memlimit
;
9110 launchd_assumes((result
= sysctlbyname("kern.memorystatus_priority_list", NULL
, NULL
, &jpris
[0], totalpris
* sizeof(jetsam_priority_entry_t
))) != -1);
9111 result
= result
!= 0 ? errno
: 0;