]> git.saurik.com Git - apple/launchd.git/blame - src/core.c
launchd-842.92.1.tar.gz
[apple/launchd.git] / src / core.c
CommitLineData
ed34e3c3 1/*
ed34e3c3
A
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
5b0a4722 19#include "config.h"
eabd1701
A
20#include "core.h"
21#include "internal.h"
22#include "helper.h"
ed34e3c3 23
f36da725 24#include <TargetConditionals.h>
ed34e3c3
A
25#include <mach/mach.h>
26#include <mach/mach_error.h>
27#include <mach/boolean.h>
28#include <mach/message.h>
29#include <mach/notify.h>
30#include <mach/mig_errors.h>
31#include <mach/mach_traps.h>
32#include <mach/mach_interface.h>
33#include <mach/host_info.h>
34#include <mach/mach_host.h>
35#include <mach/exception.h>
5b0a4722 36#include <mach/host_reboot.h>
ed34e3c3
A
37#include <sys/types.h>
38#include <sys/queue.h>
39#include <sys/event.h>
ed34e3c3
A
40#include <sys/stat.h>
41#include <sys/ucred.h>
42#include <sys/fcntl.h>
43#include <sys/un.h>
5b0a4722 44#include <sys/reboot.h>
ed34e3c3
A
45#include <sys/wait.h>
46#include <sys/sysctl.h>
47#include <sys/sockio.h>
48#include <sys/time.h>
49#include <sys/resource.h>
50#include <sys/ioctl.h>
51#include <sys/mount.h>
5b0a4722 52#include <sys/pipe.h>
ddbbfbc1
A
53#include <sys/mman.h>
54#include <sys/socket.h>
55#include <sys/syscall.h>
eabd1701 56#include <sys/kern_memorystatus.h>
ed34e3c3
A
57#include <net/if.h>
58#include <netinet/in.h>
59#include <netinet/in_var.h>
60#include <netinet6/nd6.h>
5b0a4722 61#include <bsm/libbsm.h>
ed34e3c3
A
62#include <unistd.h>
63#include <signal.h>
64#include <errno.h>
ed34e3c3
A
65#include <libgen.h>
66#include <stdio.h>
67#include <stdlib.h>
68#include <stdarg.h>
69#include <stdbool.h>
70#include <paths.h>
71#include <pwd.h>
72#include <grp.h>
73#include <ttyent.h>
74#include <dlfcn.h>
75#include <dirent.h>
76#include <string.h>
77#include <ctype.h>
78#include <glob.h>
dcace88f 79#include <System/sys/spawn.h>
eabd1701 80#include <System/sys/spawn_internal.h>
5b0a4722 81#include <spawn.h>
eabd1701 82#include <spawn_private.h>
dcace88f 83#include <time.h>
5c88273d 84#include <libinfo.h>
95379394 85#include <os/assumes.h>
eabd1701 86#include <xpc/launchd.h>
95379394
A
87#include <asl.h>
88#include <_simple.h>
dcace88f 89
ddbbfbc1 90#include <libproc.h>
95379394 91#include <libproc_internal.h>
eabd1701 92#include <System/sys/proc_info.h>
ddbbfbc1
A
93#include <malloc/malloc.h>
94#include <pthread.h>
f36da725 95#if HAVE_SANDBOX
ddbbfbc1 96#define __APPLE_API_PRIVATE
5b0a4722 97#include <sandbox.h>
f36da725
A
98#endif
99#if HAVE_QUARANTINE
100#include <quarantine.h>
101#endif
95379394
A
102#if HAVE_RESPONSIBILITY
103#include <responsibility.h>
104#endif
eabd1701 105#if !TARGET_OS_EMBEDDED
5c88273d 106extern int gL1CacheEnabled;
ddbbfbc1 107#endif
95379394
A
108#if HAVE_SYSTEMSTATS
109#include <systemstats/systemstats.h>
110#endif
5b0a4722 111
ef398931
A
112#include "launch.h"
113#include "launch_priv.h"
114#include "launch_internal.h"
115#include "bootstrap.h"
116#include "bootstrap_priv.h"
117#include "vproc.h"
118#include "vproc_internal.h"
5b0a4722
A
119
120#include "reboot2.h"
ed34e3c3 121
ed34e3c3 122#include "launchd.h"
eabd1701
A
123#include "runtime.h"
124#include "ipc.h"
125#include "job.h"
126#include "jobServer.h"
127#include "job_reply.h"
128#include "job_forward.h"
ddbbfbc1 129#include "mach_excServer.h"
dcace88f 130
eabd1701 131#define POSIX_SPAWN_IOS_INTERACTIVE 0
dcace88f 132
95379394
A
133#if TARGET_OS_EMBEDDED
134/* Default memory highwatermark for daemons as set out in <rdar://problem/10307788>. */
135#define DEFAULT_JETSAM_DAEMON_HIGHWATERMARK 5
136#endif
137
dcace88f 138/* LAUNCHD_DEFAULT_EXIT_TIMEOUT
ddbbfbc1
A
139 * If the job hasn't exited in the given number of seconds after sending
140 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
141 */
eabd1701
A
142#define LAUNCHD_MIN_JOB_RUN_TIME 10
143#define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
144#define LAUNCHD_SIGKILL_TIMER 4
145#define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
5b0a4722 146
ddbbfbc1 147#define SHUTDOWN_LOG_DIR "/var/log/shutdown"
5b0a4722 148
eabd1701
A
149#define TAKE_SUBSET_NAME "TakeSubsetName"
150#define TAKE_SUBSET_PID "TakeSubsetPID"
151#define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
5b0a4722 152
eabd1701 153#define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
5b0a4722
A
154
155extern char **environ;
156
157struct waiting_for_removal {
158 SLIST_ENTRY(waiting_for_removal) sle;
159 mach_port_t reply_port;
160};
161
162static bool waiting4removal_new(job_t j, mach_port_t rp);
163static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
164
ed34e3c3
A
165struct machservice {
166 SLIST_ENTRY(machservice) sle;
5b0a4722
A
167 SLIST_ENTRY(machservice) special_port_sle;
168 LIST_ENTRY(machservice) name_hash_sle;
169 LIST_ENTRY(machservice) port_hash_sle;
dcace88f 170 struct machservice *alias;
eabd1701
A
171 job_t job;
172 unsigned int gen_num;
173 mach_port_name_t port;
dcace88f 174 unsigned int
eabd1701
A
175 isActive:1,
176 reset:1,
177 recv:1,
178 hide:1,
179 kUNCServer:1,
180 per_user_hack:1,
181 debug_on_close:1,
182 per_pid:1,
183 delete_on_destruction:1,
184 drain_one_on_crash:1,
185 drain_all_on_crash:1,
186 upfront:1,
187 event_channel:1,
95379394 188 recv_race_hack :1,
eabd1701
A
189 /* Don't let the size of this field to get too small. It has to be large
190 * enough to represent the reasonable range of special port numbers.
dcace88f 191 */
95379394 192 special_port_num:17;
eabd1701 193 const char name[0];
ed34e3c3
A
194};
195
eabd1701
A
196// HACK: This should be per jobmgr_t
197static SLIST_HEAD(, machservice) special_ports;
5b0a4722
A
198
199#define PORT_HASH_SIZE 32
eabd1701 200#define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
5b0a4722
A
201
202static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
203
ed34e3c3
A
204static void machservice_setup(launch_data_t obj, const char *key, void *context);
205static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
5b0a4722 206static void machservice_resetport(job_t j, struct machservice *ms);
eabd1701 207static void machservice_stamp_port(job_t j, struct machservice *ms);
5b0a4722 208static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
dcace88f 209static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
5b0a4722
A
210static void machservice_ignore(job_t j, struct machservice *ms);
211static void machservice_watch(job_t j, struct machservice *ms);
212static void machservice_delete(job_t j, struct machservice *, bool port_died);
213static void machservice_request_notifications(struct machservice *);
214static mach_port_t machservice_port(struct machservice *);
215static job_t machservice_job(struct machservice *);
216static bool machservice_hidden(struct machservice *);
217static bool machservice_active(struct machservice *);
218static const char *machservice_name(struct machservice *);
219static bootstrap_status_t machservice_status(struct machservice *);
ddbbfbc1 220void machservice_drain_port(struct machservice *);
ed34e3c3
A
221
222struct socketgroup {
223 SLIST_ENTRY(socketgroup) sle;
224 int *fds;
eabd1701 225 unsigned int fd_cnt;
f36da725
A
226 union {
227 const char name[0];
228 char name_init[0];
229 };
ed34e3c3
A
230};
231
eabd1701 232static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt);
5b0a4722
A
233static void socketgroup_delete(job_t j, struct socketgroup *sg);
234static void socketgroup_watch(job_t j, struct socketgroup *sg);
235static void socketgroup_ignore(job_t j, struct socketgroup *sg);
236static void socketgroup_callback(job_t j);
ed34e3c3 237static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
5b0a4722 238static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
ed34e3c3
A
239
240struct calendarinterval {
5b0a4722 241 LIST_ENTRY(calendarinterval) global_sle;
ed34e3c3 242 SLIST_ENTRY(calendarinterval) sle;
5b0a4722 243 job_t job;
ed34e3c3 244 struct tm when;
5b0a4722 245 time_t when_next;
ed34e3c3
A
246};
247
5b0a4722
A
248static LIST_HEAD(, calendarinterval) sorted_calendar_events;
249
250static bool calendarinterval_new(job_t j, struct tm *w);
251static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
252static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
253static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
254static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
255static void calendarinterval_callback(void);
256static void calendarinterval_sanity_check(void);
ed34e3c3
A
257
258struct envitem {
259 SLIST_ENTRY(envitem) sle;
260 char *value;
f36da725
A
261 union {
262 const char key[0];
263 char key_init[0];
264 };
ed34e3c3
A
265};
266
eabd1701 267static bool envitem_new(job_t j, const char *k, const char *v, bool global);
5b0a4722 268static void envitem_delete(job_t j, struct envitem *ei, bool global);
ed34e3c3
A
269static void envitem_setup(launch_data_t obj, const char *key, void *context);
270
271struct limititem {
272 SLIST_ENTRY(limititem) sle;
273 struct rlimit lim;
274 unsigned int setsoft:1, sethard:1, which:30;
275};
276
5b0a4722
A
277static bool limititem_update(job_t j, int w, rlim_t r);
278static void limititem_delete(job_t j, struct limititem *li);
ed34e3c3 279static void limititem_setup(launch_data_t obj, const char *key, void *context);
f36da725 280#if HAVE_SANDBOX
5b0a4722 281static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
f36da725 282#endif
ed34e3c3 283
587e987e
A
284static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
285
ed34e3c3
A
286typedef enum {
287 NETWORK_UP = 1,
288 NETWORK_DOWN,
289 SUCCESSFUL_EXIT,
290 FAILED_EXIT,
dcace88f
A
291 CRASHED,
292 DID_NOT_CRASH,
5b0a4722
A
293 OTHER_JOB_ENABLED,
294 OTHER_JOB_DISABLED,
295 OTHER_JOB_ACTIVE,
296 OTHER_JOB_INACTIVE,
ed34e3c3
A
297} semaphore_reason_t;
298
299struct semaphoreitem {
300 SLIST_ENTRY(semaphoreitem) sle;
301 semaphore_reason_t why;
eabd1701 302
f36da725
A
303 union {
304 const char what[0];
305 char what_init[0];
306 };
ed34e3c3
A
307};
308
5b0a4722
A
309struct semaphoreitem_dict_iter_context {
310 job_t j;
311 semaphore_reason_t why_true;
312 semaphore_reason_t why_false;
313};
ed34e3c3 314
5b0a4722
A
315static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
316static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
317static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
318static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
5b0a4722
A
319static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
320
dcace88f
A
321struct externalevent {
322 LIST_ENTRY(externalevent) sys_le;
323 LIST_ENTRY(externalevent) job_le;
324 struct eventsystem *sys;
eabd1701 325
dcace88f
A
326 uint64_t id;
327 job_t job;
328 bool state;
329 bool wanted_state;
eabd1701
A
330 bool internal;
331 xpc_object_t event;
95379394 332 xpc_object_t entitlements;
eabd1701 333
dcace88f
A
334 char name[0];
335};
5b0a4722 336
dcace88f
A
337struct externalevent_iter_ctx {
338 job_t j;
339 struct eventsystem *sys;
ddbbfbc1
A
340};
341
95379394 342static bool externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags);
dcace88f
A
343static void externalevent_delete(struct externalevent *ee);
344static void externalevent_setup(launch_data_t obj, const char *key, void *context);
345static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
346
347struct eventsystem {
348 LIST_ENTRY(eventsystem) global_le;
349 LIST_HEAD(, externalevent) events;
350 uint64_t curid;
dcace88f 351 char name[0];
ddbbfbc1
A
352};
353
dcace88f 354static struct eventsystem *eventsystem_new(const char *name);
eabd1701 355static void eventsystem_delete(struct eventsystem *sys) __attribute__((unused));
dcace88f
A
356static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
357static struct eventsystem *eventsystem_find(const char *name);
358static void eventsystem_ping(void);
359
95379394
A
360struct waiting4attach {
361 LIST_ENTRY(waiting4attach) le;
362 mach_port_t port;
363 pid_t dest;
364 xpc_service_type_t type;
365 char name[0];
366};
367
368static LIST_HEAD(, waiting4attach) _launchd_domain_waiters;
369
370static struct waiting4attach *waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type);
371static void waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a);
372static struct waiting4attach *waiting4attach_find(jobmgr_t jm, job_t j);
373
eabd1701
A
374#define ACTIVE_JOB_HASH_SIZE 32
375#define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
dcace88f
A
376
377#define MACHSERVICE_HASH_SIZE 37
378
379#define LABEL_HASH_SIZE 53
5b0a4722
A
380struct jobmgr_s {
381 kq_callback kqjobmgr_callback;
dcace88f 382 LIST_ENTRY(jobmgr_s) xpc_le;
5b0a4722
A
383 SLIST_ENTRY(jobmgr_s) sle;
384 SLIST_HEAD(, jobmgr_s) submgrs;
385 LIST_HEAD(, job_s) jobs;
95379394 386 LIST_HEAD(, waiting4attach) attaches;
eabd1701
A
387
388 /* For legacy reasons, we keep all job labels that are imported in the root
389 * job manager's label hash. If a job manager is an XPC domain, then it gets
390 * its own label hash that is separate from the "global" one stored in the
391 * root job manager.
dcace88f
A
392 */
393 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
5b0a4722
A
394 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
395 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
ddbbfbc1 396 LIST_HEAD(, job_s) global_env_jobs;
5b0a4722
A
397 mach_port_t jm_port;
398 mach_port_t req_port;
399 jobmgr_t parentmgr;
400 int reboot_flags;
dcace88f 401 time_t shutdown_time;
5b0a4722 402 unsigned int global_on_demand_cnt;
5b0a4722 403 unsigned int normal_active_cnt;
dcace88f 404 unsigned int
eabd1701
A
405 shutting_down:1,
406 session_initialized:1,
407 killed_stray_jobs:1,
408 monitor_shutdown:1,
409 shutdown_jobs_dirtied:1,
410 shutdown_jobs_cleaned:1,
411 xpc_singleton:1;
ddbbfbc1 412 uint32_t properties;
eabd1701 413 // XPC-specific properties.
dcace88f
A
414 char owner[MAXCOMLEN];
415 char *shortdesc;
416 mach_port_t req_bsport;
417 mach_port_t req_excport;
418 mach_port_t req_asport;
95379394 419 mach_port_t req_gui_asport;
dcace88f
A
420 pid_t req_pid;
421 uid_t req_euid;
422 gid_t req_egid;
423 au_asid_t req_asid;
424 vm_offset_t req_ctx;
425 mach_msg_type_number_t req_ctx_sz;
426 mach_port_t req_rport;
95379394 427 uint64_t req_uniqueid;
dcace88f 428 kern_return_t error;
f36da725
A
429 union {
430 const char name[0];
431 char name_init[0];
432 };
5b0a4722 433};
ed34e3c3 434
eabd1701 435// Global XPC domains.
dcace88f
A
436static jobmgr_t _s_xpc_system_domain;
437static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
438static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
dcace88f 439
95379394
A
440#define jobmgr_assumes(jm, e) os_assumes_ctx(jobmgr_log_bug, jm, (e))
441#define jobmgr_assumes_zero(jm, e) os_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
eabd1701 442#define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
5b0a4722 443
dcace88f 444static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
dcace88f
A
445static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
446static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
447static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
5b0a4722
A
448static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
449static jobmgr_t jobmgr_parent(jobmgr_t jm);
450static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
f36da725 451static bool jobmgr_label_test(jobmgr_t jm, const char *str);
5b0a4722 452static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
ddbbfbc1
A
453static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
454static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
5b0a4722
A
455static void jobmgr_remove(jobmgr_t jm);
456static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
457static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
ddbbfbc1 458static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
5b0a4722 459static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
95379394 460static job_t managed_job(pid_t p);
ddbbfbc1 461static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
5b0a4722 462static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
dcace88f 463static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
5b0a4722
A
464static void job_export_all2(jobmgr_t jm, launch_data_t where);
465static void jobmgr_callback(void *obj, struct kevent *kev);
466static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
467static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
468static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
469static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
470static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
95379394 471static void jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children);
eabd1701 472// static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
95379394 473static bool jobmgr_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
5b0a4722
A
474
475#define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
ddbbfbc1 476#define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
dcace88f 477#define AUTO_PICK_XPC_LABEL (const char *)(~2)
ddbbfbc1
A
478
479struct suspended_peruser {
480 LIST_ENTRY(suspended_peruser) sle;
481 job_t j;
482};
5b0a4722
A
483
484struct job_s {
eabd1701
A
485 // MUST be first element of this structure.
486 kq_callback kqjob_callback;
5b0a4722 487 LIST_ENTRY(job_s) sle;
dcace88f 488 LIST_ENTRY(job_s) subjob_sle;
ddbbfbc1
A
489 LIST_ENTRY(job_s) needing_session_sle;
490 LIST_ENTRY(job_s) jetsam_sle;
5b0a4722 491 LIST_ENTRY(job_s) pid_hash_sle;
95379394 492 LIST_ENTRY(job_s) global_pid_hash_sle;
5b0a4722 493 LIST_ENTRY(job_s) label_hash_sle;
ddbbfbc1 494 LIST_ENTRY(job_s) global_env_sle;
ddbbfbc1
A
495 SLIST_ENTRY(job_s) curious_jobs_sle;
496 LIST_HEAD(, suspended_peruser) suspended_perusers;
497 LIST_HEAD(, waiting_for_exit) exit_watchers;
dcace88f
A
498 LIST_HEAD(, job_s) subjobs;
499 LIST_HEAD(, externalevent) events;
ed34e3c3 500 SLIST_HEAD(, socketgroup) sockets;
ed34e3c3
A
501 SLIST_HEAD(, calendarinterval) cal_intervals;
502 SLIST_HEAD(, envitem) global_env;
503 SLIST_HEAD(, envitem) env;
504 SLIST_HEAD(, limititem) limits;
505 SLIST_HEAD(, machservice) machservices;
506 SLIST_HEAD(, semaphoreitem) semaphores;
5b0a4722 507 SLIST_HEAD(, waiting_for_removal) removal_watchers;
95379394
A
508 struct waiting4attach *w4a;
509 job_t original;
dcace88f 510 job_t alias;
5b0a4722
A
511 cpu_type_t *j_binpref;
512 size_t j_binpref_cnt;
513 mach_port_t j_port;
dcace88f
A
514 mach_port_t exit_status_dest;
515 mach_port_t exit_status_port;
516 mach_port_t spawn_reply_port;
ed34e3c3 517 uid_t mach_uid;
5b0a4722 518 jobmgr_t mgr;
ddbbfbc1 519 size_t argc;
ed34e3c3
A
520 char **argv;
521 char *prog;
522 char *rootdir;
523 char *workingdir;
524 char *username;
525 char *groupname;
ddbbfbc1 526 char *stdinpath;
ed34e3c3
A
527 char *stdoutpath;
528 char *stderrpath;
fe044cc9 529 char *alt_exc_handler;
95379394 530 char *cfbundleidentifier;
eabd1701
A
531 unsigned int nruns;
532 uint64_t trt;
f36da725 533#if HAVE_SANDBOX
5b0a4722
A
534 char *seatbelt_profile;
535 uint64_t seatbelt_flags;
95379394 536 char *container_identifier;
f36da725
A
537#endif
538#if HAVE_QUARANTINE
5b0a4722
A
539 void *quarantine_data;
540 size_t quarantine_data_sz;
f36da725 541#endif
ed34e3c3 542 pid_t p;
95379394 543 uint64_t uniqueid;
ed34e3c3 544 int last_exit_status;
ddbbfbc1
A
545 int stdin_fd;
546 int fork_fd;
ed34e3c3 547 int nice;
dcace88f 548 uint32_t pstype;
95379394 549 uint32_t psproctype;
587e987e
A
550 int32_t jetsam_priority;
551 int32_t jetsam_memlimit;
552 int32_t main_thread_priority;
ddbbfbc1
A
553 uint32_t timeout;
554 uint32_t exit_timeout;
555 uint64_t sent_signal_time;
5b0a4722
A
556 uint64_t start_time;
557 uint32_t min_run_time;
95379394 558 bool unthrottle;
5b0a4722 559 uint32_t start_interval;
eabd1701 560 uint32_t peruser_suspend_count;
dcace88f 561 uuid_t instance_id;
ed34e3c3 562 mode_t mask;
dcace88f 563 mach_port_t asport;
dcace88f 564 au_asid_t asid;
ddbbfbc1 565 uuid_t expected_audit_uuid;
eabd1701
A
566 bool
567 // man launchd.plist --> Debug
568 debug:1,
569 // man launchd.plist --> KeepAlive == false
570 ondemand:1,
571 // man launchd.plist --> SessionCreate
572 session_create:1,
573 // man launchd.plist --> LowPriorityIO
574 low_pri_io:1,
575 // man launchd.plist --> InitGroups
576 no_init_groups:1,
577 /* A legacy mach_init concept to make bootstrap_create_server/service()
578 * work
579 */
580 priv_port_has_senders:1,
581 // A hack during job importing
582 importing_global_env:1,
583 // A hack during job importing
584 importing_hard_limits:1,
585 // man launchd.plist --> Umask
586 setmask:1,
587 // A process that launchd knows about but doesn't manage.
588 anonymous:1,
589 // A legacy mach_init concept to detect sick jobs
590 checkedin:1,
591 // A job created via bootstrap_create_server()
592 legacy_mach_job:1,
593 // A job created via spawn_via_launchd()
594 legacy_LS_job:1,
595 // A legacy job that wants inetd compatible semantics
596 inetcompat:1,
597 // A twist on inetd compatibility
598 inetcompat_wait:1,
599 /* An event fired and the job should start, but not necessarily right
600 * away.
601 */
602 start_pending:1,
603 // man launchd.plist --> EnableGlobbing
604 globargv:1,
605 // man launchd.plist --> WaitForDebugger
606 wait4debugger:1,
607 // One-shot WaitForDebugger.
608 wait4debugger_oneshot:1,
609 // MachExceptionHandler == true
610 internal_exc_handler:1,
611 // A hack to support an option of spawn_via_launchd()
612 stall_before_exec:1,
613 /* man launchd.plist --> LaunchOnlyOnce.
614 *
615 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
616 */
617 only_once:1,
618 /* Make job_ignore() / job_watch() work. If these calls were balanced,
619 * then this wouldn't be necessarily.
620 */
621 currently_ignored:1,
622 /* A job that forced all other jobs to be temporarily launch-on-
623 * demand
624 */
625 forced_peers_to_demand_mode:1,
626 // man launchd.plist --> Nice
627 setnice:1,
628 /* A job was asked to be unloaded/removed while running, we'll remove it
629 * after it exits.
630 */
631 removal_pending:1,
632 // job_kill() was called.
633 sent_sigkill:1,
634 // Enter the kernel debugger before killing a job.
635 debug_before_kill:1,
636 // A hack that launchd+launchctl use during jobmgr_t creation.
637 weird_bootstrap:1,
638 // man launchd.plist --> StartOnMount
639 start_on_mount:1,
640 // This job is a per-user launchd managed by the PID 1 launchd.
641 per_user:1,
642 // A job thoroughly confused launchd. We need to unload it ASAP.
643 unload_at_mig_return:1,
644 // man launchd.plist --> AbandonProcessGroup
645 abandon_pg:1,
646 /* During shutdown, do not send SIGTERM to stray processes in the
647 * process group of this job.
648 */
649 ignore_pg_at_shutdown:1,
650 /* Don't let this job create new 'job_t' objects in launchd. Has been
651 * seriously overloaded for the purposes of sandboxing.
652 */
653 deny_job_creation:1,
654 // man launchd.plist --> EnableTransactions
655 enable_transactions:1,
656 // The job was sent SIGKILL because it was clean.
657 clean_kill:1,
eabd1701
A
658 // The job has an OtherJobEnabled KeepAlive criterion.
659 nosy:1,
660 // The job exited due to a crash.
661 crashed:1,
662 // We've received NOTE_EXIT for the job and reaped it.
663 reaped:1,
664 // job_stop() was called.
665 stopped:1,
eabd1701
A
666 /* The job is to be kept alive continuously, but it must first get an
667 * initial kick off.
668 */
669 needs_kickoff:1,
670 // The job is a bootstrapper.
671 is_bootstrapper:1,
672 // The job owns the console.
673 has_console:1,
674 /* The job runs as a non-root user on embedded but has select privileges
675 * of the root user. This is SpringBoard.
676 */
677 embedded_god:1,
95379394
A
678 // The job is responsible for drawing the home screen on embedded.
679 embedded_home:1,
eabd1701
A
680 // We got NOTE_EXEC for the job.
681 did_exec:1,
682 // The job is an XPC service, and XPC proxy successfully exec(3)ed.
683 xpcproxy_did_exec:1,
684 // The (anonymous) job called vprocmgr_switch_to_session().
685 holds_ref:1,
686 // The job has Jetsam limits in place.
687 jetsam_properties:1,
95379394
A
688 // The job's Jetsam memory limits should only be applied in the background
689 jetsam_memory_limit_background:1,
eabd1701
A
690 /* This job was created as the result of a look up of a service provided
691 * by a MultipleInstance job.
692 */
693 dedicated_instance:1,
694 // The job supports creating additional instances of itself.
695 multiple_instances:1,
696 /* The sub-job was already removed from the parent's list of
697 * sub-jobs.
698 */
699 former_subjob:1,
700 /* The job is responsible for monitoring external events for this
701 * launchd.
702 */
703 event_monitor:1,
704 // The event monitor job has retrieved the initial list of events.
705 event_monitor_ready2signal:1,
706 // A lame hack.
707 removing:1,
708 // Disable ASLR when launching this job.
709 disable_aslr:1,
710 // The job is an XPC Service.
711 xpc_service:1,
712 // The job is the Performance team's shutdown monitor.
713 shutdown_monitor:1,
714 // We should open a transaction for the job when shutdown begins.
715 dirty_at_shutdown:1,
716 /* The job was sent SIGKILL but did not exit in a timely fashion,
717 * indicating a kernel bug.
718 */
719 workaround9359725:1,
720 // The job is the XPC domain bootstrapper.
721 xpc_bootstrapper:1,
722 // The job is an app (on either iOS or OS X) and has different resource
723 // limitations.
724 app:1,
95379394
A
725 // FairPlay decryption failed on the job. This should only ever happen
726 // to apps.
727 fpfail:1,
eabd1701
A
728 // The job failed to exec(3) for reasons that may be transient, so we're
729 // waiting for UserEventAgent to tell us when it's okay to try spawning
730 // again (i.e. when the executable path appears, when the UID appears,
731 // etc.).
b97faa4c 732 waiting4ok:1,
95379394
A
733 // The job exited due to memory pressure.
734 jettisoned:1,
735 // The job supports idle-exit.
736 idle_exit:1,
b97faa4c 737 // The job was implicitly reaped by the kernel.
95379394
A
738 implicit_reap:1,
739 system_app :1,
740 joins_gui_session :1,
f9823965
A
741 low_priority_background_io :1,
742 legacy_timers :1;
eabd1701 743
5b0a4722 744 const char label[0];
ed34e3c3
A
745};
746
5b0a4722
A
747static size_t hash_label(const char *label) __attribute__((pure));
748static size_t hash_ms(const char *msstr) __attribute__((pure));
ddbbfbc1 749static SLIST_HEAD(, job_s) s_curious_jobs;
95379394 750static LIST_HEAD(, job_s) managed_actives[ACTIVE_JOB_HASH_SIZE];
5b0a4722 751
95379394
A
752#define job_assumes(j, e) os_assumes_ctx(job_log_bug, j, (e))
753#define job_assumes_zero(j, e) os_assumes_zero_ctx(job_log_bug, j, (e))
eabd1701 754#define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
5b0a4722 755
ed34e3c3 756static void job_import_keys(launch_data_t obj, const char *key, void *context);
5b0a4722
A
757static void job_import_bool(job_t j, const char *key, bool value);
758static void job_import_string(job_t j, const char *key, const char *value);
759static void job_import_integer(job_t j, const char *key, long long value);
760static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
761static void job_import_array(job_t j, const char *key, launch_data_t value);
762static void job_import_opaque(job_t j, const char *key, launch_data_t value);
763static bool job_set_global_on_demand(job_t j, bool val);
764static const char *job_active(job_t j);
765static void job_watch(job_t j);
766static void job_ignore(job_t j);
767static void job_reap(job_t j);
768static bool job_useless(job_t j);
769static bool job_keepalive(job_t j);
ddbbfbc1 770static void job_dispatch_curious_jobs(job_t j);
5b0a4722
A
771static void job_start(job_t j);
772static void job_start_child(job_t j) __attribute__((noreturn));
773static void job_setup_attributes(job_t j);
774static bool job_setup_machport(job_t j);
dcace88f 775static kern_return_t job_setup_exit_port(job_t j);
5b0a4722
A
776static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
777static void job_postfork_become_user(job_t j);
ddbbfbc1
A
778static void job_postfork_test_user(job_t j);
779static void job_log_pids_with_weird_uids(job_t j);
5b0a4722 780static void job_setup_exception_port(job_t j, task_t target_task);
ed34e3c3 781static void job_callback(void *obj, struct kevent *kev);
ddbbfbc1 782static void job_callback_proc(job_t j, struct kevent *kev);
5b0a4722
A
783static void job_callback_timer(job_t j, void *ident);
784static void job_callback_read(job_t j, int ident);
785static void job_log_stray_pg(job_t j);
ddbbfbc1
A
786static void job_log_children_without_exec(job_t j);
787static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
788static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
dcace88f 789static job_t job_new_alias(jobmgr_t jm, job_t src);
ddbbfbc1 790static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
dcace88f 791static job_t job_new_subjob(job_t j, uuid_t identifier);
5b0a4722
A
792static void job_kill(job_t j);
793static void job_uncork_fork(job_t j);
5b0a4722
A
794static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
795static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
95379394
A
796static bool job_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
797static void job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status);
798#if HAVE_SYSTEMSTATS
799static void job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status);
800#endif
ddbbfbc1 801static void job_set_exception_port(job_t j, mach_port_t port);
dcace88f
A
802static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
803static void job_open_shutdown_transaction(job_t ji);
804static void job_close_shutdown_transaction(job_t ji);
eabd1701
A
805static launch_data_t job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport);
806static void job_setup_per_user_directory(job_t j, uid_t uid, const char *path);
807static void job_setup_per_user_directories(job_t j, uid_t uid, const char *label);
95379394
A
808static void job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data);
809static void job_update_jetsam_memory_limit(job_t j, int32_t limit);
810
811#if TARGET_OS_EMBEDDED
812static bool job_import_defaults(launch_data_t pload);
813#endif
814
815static struct priority_properties_t {
816 long long band;
817 int priority;
818} _launchd_priority_map[] = {
819 { XPC_JETSAM_BAND_SUSPENDED, JETSAM_PRIORITY_IDLE },
820 { XPC_JETSAM_BAND_BACKGROUND_OPPORTUNISTIC, JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC },
821 { XPC_JETSAM_BAND_BACKGROUND, JETSAM_PRIORITY_BACKGROUND },
822 { XPC_JETSAM_BAND_MAIL, JETSAM_PRIORITY_MAIL },
823 { XPC_JETSAM_BAND_PHONE, JETSAM_PRIORITY_PHONE },
824 { XPC_JETSAM_BAND_UI_SUPPORT, JETSAM_PRIORITY_UI_SUPPORT },
825 { XPC_JETSAM_BAND_FOREGROUND_SUPPORT, JETSAM_PRIORITY_FOREGROUND_SUPPORT },
826 { XPC_JETSAM_BAND_FOREGROUND, JETSAM_PRIORITY_FOREGROUND },
827 { XPC_JETSAM_BAND_AUDIO, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
828 { XPC_JETSAM_BAND_ACCESSORY, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
829 { XPC_JETSAM_BAND_CRITICAL, JETSAM_PRIORITY_CRITICAL },
830 { XPC_JETSAM_BAND_TELEPHONY, JETSAM_PRIORITY_TELEPHONY },
831};
5b0a4722 832
ed34e3c3
A
833static const struct {
834 const char *key;
835 int val;
836} launchd_keys2limits[] = {
95379394
A
837 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
838 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
839 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
840 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
841 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
842 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
843 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
844 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
845 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
ed34e3c3
A
846};
847
848static time_t cronemu(int mon, int mday, int hour, int min);
849static time_t cronemu_wday(int wday, int hour, int min);
850static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
851static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
852static bool cronemu_hour(struct tm *wtm, int hour, int min);
853static bool cronemu_min(struct tm *wtm, int min);
854
eabd1701 855// miscellaneous file local functions
ddbbfbc1 856static size_t get_kern_max_proc(void);
ed34e3c3 857static char **mach_cmd2argv(const char *string);
5b0a4722 858static size_t our_strhash(const char *s) __attribute__((pure));
ddbbfbc1
A
859
860void eliminate_double_reboot(void);
861
eabd1701
A
862#pragma mark XPC Domain Forward Declarations
863static job_t _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
864static int _xpc_domain_import_services(job_t j, launch_data_t services);
865
866#pragma mark XPC Event Forward Declarations
867static int xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms);
868static int xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply);
869static int xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply);
870static int xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply);
871static int xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
872static int xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply);
873static int xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
874static int xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply);
875
95379394
A
876#pragma mark XPC Process Forward Declarations
877static int xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply);
878static int xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply);
879
eabd1701
A
880// file local globals
881static job_t _launchd_embedded_god = NULL;
95379394 882static job_t _launchd_embedded_home = NULL;
5b0a4722
A
883static size_t total_children;
884static size_t total_anon_children;
885static mach_port_t the_exception_server;
5b0a4722 886static job_t workaround_5477111;
ddbbfbc1 887static LIST_HEAD(, job_s) s_needing_sessions;
dcace88f 888static LIST_HEAD(, eventsystem) _s_event_systems;
eabd1701
A
889static struct eventsystem *_launchd_support_system;
890static job_t _launchd_event_monitor;
891static job_t _launchd_xpc_bootstrapper;
892static job_t _launchd_shutdown_monitor;
ddbbfbc1 893
95379394
A
894#if TARGET_OS_EMBEDDED
895static xpc_object_t _launchd_defaults_cache;
896
897mach_port_t launchd_audit_port = MACH_PORT_DEAD;
898pid_t launchd_audit_session = 0;
899#else
eabd1701 900mach_port_t launchd_audit_port = MACH_PORT_NULL;
eabd1701 901au_asid_t launchd_audit_session = AU_DEFAUDITSID;
ddbbfbc1
A
902#endif
903
904static int s_no_hang_fd = -1;
5b0a4722 905
eabd1701 906// process wide globals
5b0a4722
A
907mach_port_t inherited_bootstrap_port;
908jobmgr_t root_jobmgr;
eabd1701
A
909bool launchd_shutdown_debugging = false;
910bool launchd_verbose_boot = false;
911bool launchd_embedded_handofgod = false;
912bool launchd_runtime_busy_time = false;
ed34e3c3
A
913
914void
5b0a4722 915job_ignore(job_t j)
ed34e3c3
A
916{
917 struct socketgroup *sg;
918 struct machservice *ms;
ed34e3c3 919
5b0a4722
A
920 if (j->currently_ignored) {
921 return;
922 }
923
924 job_log(j, LOG_DEBUG, "Ignoring...");
925
926 j->currently_ignored = true;
927
5b0a4722 928 SLIST_FOREACH(sg, &j->sockets, sle) {
ed34e3c3 929 socketgroup_ignore(j, sg);
5b0a4722 930 }
ed34e3c3 931
5b0a4722
A
932 SLIST_FOREACH(ms, &j->machservices, sle) {
933 machservice_ignore(j, ms);
934 }
ed34e3c3
A
935}
936
937void
5b0a4722 938job_watch(job_t j)
ed34e3c3
A
939{
940 struct socketgroup *sg;
941 struct machservice *ms;
ed34e3c3 942
5b0a4722
A
943 if (!j->currently_ignored) {
944 return;
945 }
946
947 job_log(j, LOG_DEBUG, "Watching...");
948
949 j->currently_ignored = false;
950
951 SLIST_FOREACH(sg, &j->sockets, sle) {
ed34e3c3 952 socketgroup_watch(j, sg);
5b0a4722 953 }
ed34e3c3 954
5b0a4722
A
955 SLIST_FOREACH(ms, &j->machservices, sle) {
956 machservice_watch(j, ms);
957 }
ed34e3c3
A
958}
959
960void
5b0a4722 961job_stop(job_t j)
ed34e3c3 962{
eabd1701 963 int sig;
ddbbfbc1 964
dcace88f 965 if (unlikely(!j->p || j->stopped || j->anonymous)) {
5b0a4722
A
966 return;
967 }
ed34e3c3 968
ddbbfbc1 969#if TARGET_OS_EMBEDDED
eabd1701
A
970 if (launchd_embedded_handofgod && _launchd_embedded_god) {
971 if (!_launchd_embedded_god->username || !j->username) {
ddbbfbc1
A
972 errno = EPERM;
973 return;
974 }
eabd1701
A
975
976 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
ddbbfbc1
A
977 errno = EPERM;
978 return;
979 }
eabd1701 980 } else if (launchd_embedded_handofgod) {
ddbbfbc1
A
981 errno = EINVAL;
982 return;
5b0a4722 983 }
ddbbfbc1
A
984#endif
985
986 j->sent_signal_time = runtime_get_opaque_time();
987
eabd1701 988 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Stopping job...");
ddbbfbc1 989
eabd1701
A
990 int error = -1;
991 error = proc_terminate(j->p, &sig);
992 if (error) {
993 job_log(j, LOG_ERR | LOG_CONSOLE, "Could not terminate job: %d: %s", error, strerror(error));
994 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Using fallback option to terminate job...");
995 error = kill2(j->p, SIGTERM);
996 if (error) {
997 job_log(j, LOG_ERR, "Could not signal job: %d: %s", error, strerror(error));
dcace88f 998 } else {
eabd1701 999 sig = SIGTERM;
ddbbfbc1 1000 }
eabd1701 1001 }
ddbbfbc1 1002
eabd1701
A
1003 if (!error) {
1004 switch (sig) {
1005 case SIGKILL:
1006 j->sent_sigkill = true;
1007 j->clean_kill = true;
b97faa4c
A
1008
1009 /* We cannot effectively simulate an exit for jobs during the course
1010 * of a normal run. Even if we pretend that the job exited, we will
1011 * still not have gotten the receive rights associated with the
1012 * job's MachServices back, so we cannot safely respawn it.
1013 */
1014 if (j->mgr->shutting_down) {
1015 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j);
1016 (void)job_assumes_zero_p(j, error);
1017 }
5b0a4722 1018
eabd1701
A
1019 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sent job SIGKILL.");
1020 break;
1021 case SIGTERM:
1022 if (j->exit_timeout) {
1023 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j);
1024 (void)job_assumes_zero_p(j, error);
1025 } else {
1026 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
1027 }
1028 job_log(j, LOG_DEBUG, "Sent job SIGTERM.");
1029 break;
1030 default:
1031 job_log(j, LOG_ERR | LOG_CONSOLE, "Job was sent unexpected signal: %d: %s", sig, strsignal(sig));
1032 break;
1033 }
ddbbfbc1 1034 }
eabd1701 1035
ddbbfbc1 1036 j->stopped = true;
ed34e3c3
A
1037}
1038
1039launch_data_t
5b0a4722 1040job_export(job_t j)
ed34e3c3
A
1041{
1042 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1043
5b0a4722 1044 if (r == NULL) {
ed34e3c3 1045 return NULL;
5b0a4722 1046 }
ed34e3c3 1047
5b0a4722 1048 if ((tmp = launch_data_new_string(j->label))) {
ed34e3c3 1049 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
5b0a4722
A
1050 }
1051 if ((tmp = launch_data_new_string(j->mgr->name))) {
1052 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
1053 }
1054 if ((tmp = launch_data_new_bool(j->ondemand))) {
ed34e3c3 1055 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
5b0a4722 1056 }
95379394
A
1057
1058 long long status = j->last_exit_status;
1059 if (j->fpfail) {
1060 status = LAUNCH_EXITSTATUS_FAIRPLAY_FAIL;
1061 }
1062 if ((tmp = launch_data_new_integer(status))) {
ed34e3c3 1063 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
95379394
A
1064 }
1065
5b0a4722 1066 if (j->p && (tmp = launch_data_new_integer(j->p))) {
ed34e3c3 1067 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
5b0a4722
A
1068 }
1069 if ((tmp = launch_data_new_integer(j->timeout))) {
ed34e3c3 1070 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
5b0a4722
A
1071 }
1072 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
ed34e3c3 1073 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
5b0a4722 1074 }
ddbbfbc1
A
1075 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
1076 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
1077 }
5b0a4722 1078 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
ed34e3c3 1079 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
5b0a4722
A
1080 }
1081 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
ed34e3c3 1082 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
5b0a4722 1083 }
ddbbfbc1
A
1084 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1085 size_t i;
ed34e3c3
A
1086
1087 for (i = 0; i < j->argc; i++) {
5b0a4722 1088 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
ed34e3c3 1089 launch_data_array_set_index(tmp, tmp2, i);
5b0a4722 1090 }
ed34e3c3
A
1091 }
1092
1093 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
1094 }
1095
eabd1701 1096 if (j->enable_transactions && (tmp = launch_data_new_bool(true))) {
ddbbfbc1 1097 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
ddbbfbc1
A
1098 }
1099
5b0a4722
A
1100 if (j->session_create && (tmp = launch_data_new_bool(true))) {
1101 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
1102 }
1103
ed34e3c3 1104 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
5b0a4722 1105 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
ed34e3c3 1106 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
5b0a4722 1107 }
ed34e3c3
A
1108 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
1109 }
1110
1111 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1112 struct socketgroup *sg;
eabd1701 1113 unsigned int i;
ed34e3c3
A
1114
1115 SLIST_FOREACH(sg, &j->sockets, sle) {
1116 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1117 for (i = 0; i < sg->fd_cnt; i++) {
5b0a4722 1118 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
ed34e3c3 1119 launch_data_array_set_index(tmp2, tmp3, i);
5b0a4722 1120 }
ed34e3c3
A
1121 }
1122 launch_data_dict_insert(tmp, tmp2, sg->name);
1123 }
1124 }
1125
1126 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1127 }
1128
1129 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1130 struct machservice *ms;
eabd1701 1131
5b0a4722 1132 tmp3 = NULL;
ed34e3c3
A
1133
1134 SLIST_FOREACH(ms, &j->machservices, sle) {
5b0a4722
A
1135 if (ms->per_pid) {
1136 if (tmp3 == NULL) {
1137 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1138 }
1139 if (tmp3) {
1140 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1141 launch_data_dict_insert(tmp3, tmp2, ms->name);
1142 }
1143 } else {
1144 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1145 launch_data_dict_insert(tmp, tmp2, ms->name);
1146 }
ed34e3c3
A
1147 }
1148
1149 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
5b0a4722
A
1150
1151 if (tmp3) {
1152 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1153 }
1154 }
1155
1156 return r;
1157}
1158
1159static void
1160jobmgr_log_active_jobs(jobmgr_t jm)
1161{
1162 const char *why_active;
1163 jobmgr_t jmi;
1164 job_t ji;
1165
1166 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1167 jobmgr_log_active_jobs(jmi);
1168 }
1169
eabd1701
A
1170 int level = LOG_DEBUG;
1171 if (pid1_magic) {
1172 level |= LOG_CONSOLE;
1173 }
1174
5b0a4722 1175 LIST_FOREACH(ji, &jm->jobs, sle) {
dcace88f
A
1176 if ((why_active = job_active(ji))) {
1177 if (ji->p != 1) {
eabd1701
A
1178 job_log(ji, level, "%s", why_active);
1179
1180 uint32_t flags = 0;
1181 (void)proc_get_dirty(ji->p, &flags);
1182 if (!(flags & PROC_DIRTY_TRACKED)) {
1183 continue;
1184 }
1185
1186 char *dirty = "clean";
1187 if (flags & PROC_DIRTY_IS_DIRTY) {
1188 dirty = "dirty";
1189 }
1190
1191 char *idle_exit = "idle-exit unsupported";
1192 if (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) {
1193 idle_exit = "idle-exit supported";
1194 }
1195
1196 job_log(ji, level, "Killability: %s/%s", dirty, idle_exit);
dcace88f 1197 }
ddbbfbc1 1198 }
ed34e3c3 1199 }
5b0a4722
A
1200}
1201
1202static void
ddbbfbc1 1203jobmgr_still_alive_with_check(jobmgr_t jm)
5b0a4722 1204{
eabd1701
A
1205 int level = LOG_DEBUG;
1206 if (pid1_magic) {
1207 level |= LOG_CONSOLE;
1208 }
1209
1210 jobmgr_log(jm, level, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
ddbbfbc1 1211 jobmgr_log_active_jobs(jm);
eabd1701 1212 launchd_log_push();
5b0a4722
A
1213}
1214
1215jobmgr_t
1216jobmgr_shutdown(jobmgr_t jm)
1217{
1218 jobmgr_t jmi, jmn;
5b0a4722
A
1219 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1220
dcace88f
A
1221 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1222
1223 struct tm curtime;
1224 (void)localtime_r(&jm->shutdown_time, &curtime);
1225
1226 char date[26];
1227 (void)asctime_r(&curtime, date);
eabd1701 1228 // Trim the new line that asctime_r(3) puts there for some reason.
dcace88f
A
1229 date[24] = 0;
1230
1231 if (jm == root_jobmgr && pid1_magic) {
1232 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1233 } else {
1234 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1235 }
1236
5b0a4722
A
1237 jm->shutting_down = true;
1238
1239 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1240 jobmgr_shutdown(jmi);
1241 }
dcace88f 1242
eabd1701
A
1243 if (!jm->parentmgr) {
1244 if (pid1_magic) {
1245 // Spawn the shutdown monitor.
1246 if (_launchd_shutdown_monitor && !_launchd_shutdown_monitor->p) {
1247 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1248 job_dispatch(_launchd_shutdown_monitor, true);
1249 }
dcace88f 1250 }
eabd1701
A
1251
1252 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
ed34e3c3
A
1253 }
1254
5b0a4722 1255 return jobmgr_do_garbage_collection(jm);
ed34e3c3
A
1256}
1257
1258void
5b0a4722 1259jobmgr_remove(jobmgr_t jm)
ed34e3c3 1260{
5b0a4722
A
1261 jobmgr_t jmi;
1262 job_t ji;
ed34e3c3 1263
ddbbfbc1 1264 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
eabd1701
A
1265 if (!SLIST_EMPTY(&jm->submgrs)) {
1266 size_t cnt = 0;
5b0a4722
A
1267 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1268 jobmgr_remove(jmi);
eabd1701 1269 cnt++;
5b0a4722 1270 }
eabd1701
A
1271
1272 (void)jobmgr_assumes_zero(jm, cnt);
5b0a4722
A
1273 }
1274
dcace88f 1275 while ((ji = LIST_FIRST(&jm->jobs))) {
eabd1701
A
1276 if (!ji->anonymous && ji->p != 0) {
1277 job_log(ji, LOG_ERR, "Job is still active at job manager teardown.");
ddbbfbc1
A
1278 ji->p = 0;
1279 }
95379394 1280
5b0a4722
A
1281 job_remove(ji);
1282 }
1283
95379394
A
1284 struct waiting4attach *w4ai = NULL;
1285 while ((w4ai = LIST_FIRST(&jm->attaches))) {
1286 waiting4attach_delete(jm, w4ai);
1287 }
1288
5b0a4722 1289 if (jm->req_port) {
eabd1701 1290 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_port));
5b0a4722 1291 }
5b0a4722 1292 if (jm->jm_port) {
eabd1701 1293 (void)jobmgr_assumes_zero(jm, launchd_mport_close_recv(jm->jm_port));
dcace88f
A
1294 }
1295
1296 if (jm->req_bsport) {
eabd1701 1297 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_bsport));
dcace88f
A
1298 }
1299 if (jm->req_excport) {
eabd1701 1300 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_excport));
dcace88f 1301 }
eabd1701
A
1302 if (MACH_PORT_VALID(jm->req_asport)) {
1303 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_asport));
dcace88f 1304 }
dcace88f
A
1305 if (jm->req_rport) {
1306 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1307 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1308 /* If the originator went away, the reply port will be a dead name,
1309 * and we expect this to fail.
1310 */
eabd1701 1311 (void)jobmgr_assumes_zero(jm, kr);
dcace88f
A
1312 }
1313 }
dcace88f 1314 if (jm->req_ctx) {
eabd1701 1315 (void)jobmgr_assumes_zero(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz));
dcace88f
A
1316 }
1317
1318 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1319 struct tm curtime;
1320 (void)localtime_r(&ts, &curtime);
1321
1322 char date[26];
1323 (void)asctime_r(&curtime, date);
1324 date[24] = 0;
1325
1326 time_t delta = ts - jm->shutdown_time;
1327 if (jm == root_jobmgr && pid1_magic) {
1328 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1329 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1330 } else {
1331 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1332 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
5b0a4722
A
1333 }
1334
5b0a4722 1335 if (jm->parentmgr) {
ddbbfbc1 1336 runtime_del_weak_ref();
5b0a4722 1337 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
95379394
A
1338
1339 // Hack for the guest user so that its stuff doesn't persist.
1340 //
1341 // <rdar://problem/14527875>
1342 if (strcmp(jm->name, VPROCMGR_SESSION_AQUA) == 0 && getuid() == 201) {
1343 raise(SIGTERM);
1344 }
ddbbfbc1
A
1345 } else if (pid1_magic) {
1346 eliminate_double_reboot();
1347 launchd_log_vm_stats();
dcace88f 1348 jobmgr_log_stray_children(jm, true);
ddbbfbc1 1349 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
eabd1701
A
1350 launchd_closelog();
1351 (void)jobmgr_assumes_zero_p(jm, reboot(jm->reboot_flags));
5b0a4722 1352 } else {
5b0a4722 1353 jobmgr_log(jm, LOG_DEBUG, "About to exit");
eabd1701 1354 launchd_closelog();
5b0a4722
A
1355 exit(EXIT_SUCCESS);
1356 }
eabd1701 1357
5b0a4722 1358 free(jm);
ed34e3c3
A
1359}
1360
1361void
5b0a4722 1362job_remove(job_t j)
ed34e3c3 1363{
5b0a4722 1364 struct waiting_for_removal *w4r;
ed34e3c3 1365 struct calendarinterval *ci;
5b0a4722 1366 struct semaphoreitem *si;
ed34e3c3 1367 struct socketgroup *sg;
5b0a4722 1368 struct machservice *ms;
ed34e3c3
A
1369 struct limititem *li;
1370 struct envitem *ei;
dcace88f
A
1371
1372 if (j->alias) {
1373 /* HACK: Egregious code duplication. But as with machservice_delete(),
1374 * job aliases can't (and shouldn't) have any complex behaviors
1375 * associated with them.
1376 */
1377 while ((ms = SLIST_FIRST(&j->machservices))) {
1378 machservice_delete(j, ms, false);
1379 }
1380
1381 LIST_REMOVE(j, sle);
1382 LIST_REMOVE(j, label_hash_sle);
1383 free(j);
1384 return;
1385 }
1386
ddbbfbc1 1387#if TARGET_OS_EMBEDDED
eabd1701
A
1388 if (launchd_embedded_handofgod && _launchd_embedded_god) {
1389 if (!(_launchd_embedded_god->username && j->username)) {
ddbbfbc1
A
1390 errno = EPERM;
1391 return;
ed34e3c3 1392 }
eabd1701
A
1393
1394 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
ddbbfbc1
A
1395 errno = EPERM;
1396 return;
1397 }
eabd1701 1398 } else if (launchd_embedded_handofgod) {
ddbbfbc1 1399 errno = EINVAL;
5b0a4722 1400 return;
ed34e3c3 1401 }
ddbbfbc1 1402#endif
eabd1701
A
1403
1404 /* Do this BEFORE we check and see whether the job is still active. If we're
1405 * a sub-job, we're being removed due to the parent job removing us.
1406 * Therefore, the parent job will free itself after this call completes. So
1407 * if we defer removing ourselves from the parent's list, we'll crash when
1408 * we finally get around to it.
dcace88f
A
1409 */
1410 if (j->dedicated_instance && !j->former_subjob) {
1411 LIST_REMOVE(j, subjob_sle);
1412 j->former_subjob = true;
1413 }
eabd1701 1414
ddbbfbc1
A
1415 if (unlikely(j->p)) {
1416 if (j->anonymous) {
1417 job_reap(j);
1418 } else {
1419 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1420
1421 if (!j->removal_pending) {
1422 j->removal_pending = true;
1423 job_stop(j);
1424 }
eabd1701 1425
ddbbfbc1
A
1426 return;
1427 }
1428 }
eabd1701 1429
dcace88f
A
1430 if (!j->removing) {
1431 j->removing = true;
1432 job_dispatch_curious_jobs(j);
1433 }
ed34e3c3 1434
5b0a4722 1435 ipc_close_all_with_job(j);
ed34e3c3 1436
5b0a4722
A
1437 if (j->forced_peers_to_demand_mode) {
1438 job_set_global_on_demand(j, false);
1439 }
ed34e3c3 1440
eabd1701
A
1441 if (job_assumes_zero(j, j->fork_fd)) {
1442 (void)posix_assumes_zero(runtime_close(j->fork_fd));
ddbbfbc1
A
1443 }
1444
1445 if (j->stdin_fd) {
eabd1701 1446 (void)posix_assumes_zero(runtime_close(j->stdin_fd));
5b0a4722 1447 }
ed34e3c3 1448
5b0a4722 1449 if (j->j_port) {
eabd1701 1450 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
5b0a4722 1451 }
ed34e3c3 1452
5b0a4722 1453 while ((sg = SLIST_FIRST(&j->sockets))) {
ed34e3c3 1454 socketgroup_delete(j, sg);
5b0a4722
A
1455 }
1456 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
ed34e3c3 1457 calendarinterval_delete(j, ci);
5b0a4722
A
1458 }
1459 while ((ei = SLIST_FIRST(&j->env))) {
ed34e3c3 1460 envitem_delete(j, ei, false);
5b0a4722
A
1461 }
1462 while ((ei = SLIST_FIRST(&j->global_env))) {
ed34e3c3 1463 envitem_delete(j, ei, true);
5b0a4722
A
1464 }
1465 while ((li = SLIST_FIRST(&j->limits))) {
ed34e3c3 1466 limititem_delete(j, li);
5b0a4722
A
1467 }
1468 while ((ms = SLIST_FIRST(&j->machservices))) {
1469 machservice_delete(j, ms, false);
1470 }
1471 while ((si = SLIST_FIRST(&j->semaphores))) {
ed34e3c3 1472 semaphoreitem_delete(j, si);
5b0a4722
A
1473 }
1474 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1475 waiting4removal_delete(j, w4r);
1476 }
eabd1701 1477
dcace88f
A
1478 struct externalevent *eei = NULL;
1479 while ((eei = LIST_FIRST(&j->events))) {
dcace88f
A
1480 externalevent_delete(eei);
1481 }
1482
dcace88f 1483 if (j->event_monitor) {
eabd1701 1484 _launchd_event_monitor = NULL;
dcace88f 1485 }
eabd1701
A
1486 if (j->xpc_bootstrapper) {
1487 _launchd_xpc_bootstrapper = NULL;
dcace88f 1488 }
ed34e3c3 1489
5b0a4722 1490 if (j->prog) {
ed34e3c3 1491 free(j->prog);
5b0a4722
A
1492 }
1493 if (j->argv) {
ed34e3c3 1494 free(j->argv);
5b0a4722
A
1495 }
1496 if (j->rootdir) {
ed34e3c3 1497 free(j->rootdir);
5b0a4722
A
1498 }
1499 if (j->workingdir) {
ed34e3c3 1500 free(j->workingdir);
5b0a4722
A
1501 }
1502 if (j->username) {
ed34e3c3 1503 free(j->username);
5b0a4722
A
1504 }
1505 if (j->groupname) {
ed34e3c3 1506 free(j->groupname);
5b0a4722 1507 }
ddbbfbc1
A
1508 if (j->stdinpath) {
1509 free(j->stdinpath);
1510 }
5b0a4722 1511 if (j->stdoutpath) {
ed34e3c3 1512 free(j->stdoutpath);
5b0a4722
A
1513 }
1514 if (j->stderrpath) {
ed34e3c3 1515 free(j->stderrpath);
5b0a4722 1516 }
fe044cc9
A
1517 if (j->alt_exc_handler) {
1518 free(j->alt_exc_handler);
1519 }
95379394
A
1520 if (j->cfbundleidentifier) {
1521 free(j->cfbundleidentifier);
1522 }
f36da725 1523#if HAVE_SANDBOX
5b0a4722
A
1524 if (j->seatbelt_profile) {
1525 free(j->seatbelt_profile);
1526 }
95379394
A
1527 if (j->container_identifier) {
1528 free(j->container_identifier);
1529 }
f36da725
A
1530#endif
1531#if HAVE_QUARANTINE
5b0a4722
A
1532 if (j->quarantine_data) {
1533 free(j->quarantine_data);
1534 }
f36da725 1535#endif
5b0a4722
A
1536 if (j->j_binpref) {
1537 free(j->j_binpref);
1538 }
1539 if (j->start_interval) {
ddbbfbc1 1540 runtime_del_weak_ref();
eabd1701 1541 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
5b0a4722 1542 }
dcace88f 1543 if (j->exit_timeout) {
eabd1701
A
1544 /* If this fails, it just means the timer's already fired, so no need to
1545 * wrap it in an assumes() macro.
1546 */
1547 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
ddbbfbc1 1548 }
dcace88f 1549 if (j->asport != MACH_PORT_NULL) {
eabd1701 1550 (void)job_assumes_zero(j, launchd_mport_deallocate(j->asport));
ddbbfbc1 1551 }
dcace88f 1552 if (!uuid_is_null(j->expected_audit_uuid)) {
ddbbfbc1
A
1553 LIST_REMOVE(j, needing_session_sle);
1554 }
eabd1701
A
1555 if (j->embedded_god) {
1556 _launchd_embedded_god = NULL;
ddbbfbc1 1557 }
95379394
A
1558 if (j->embedded_home) {
1559 _launchd_embedded_home = NULL;
1560 }
dcace88f 1561 if (j->shutdown_monitor) {
eabd1701 1562 _launchd_shutdown_monitor = NULL;
dcace88f 1563 }
5c88273d 1564
eabd1701 1565 (void)kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
5c88273d 1566
5b0a4722
A
1567 LIST_REMOVE(j, sle);
1568 LIST_REMOVE(j, label_hash_sle);
1569
dcace88f
A
1570 job_t ji = NULL;
1571 job_t jit = NULL;
1572 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1573 job_remove(ji);
1574 }
eabd1701 1575
5b0a4722
A
1576 job_log(j, LOG_DEBUG, "Removed");
1577
5c88273d 1578 j->kqjob_callback = (kq_callback)0x8badf00d;
ed34e3c3
A
1579 free(j);
1580}
1581
1582void
1583socketgroup_setup(launch_data_t obj, const char *key, void *context)
1584{
1585 launch_data_t tmp_oai;
5b0a4722 1586 job_t j = context;
ddbbfbc1 1587 size_t i, fd_cnt = 1;
ed34e3c3
A
1588 int *fds;
1589
5b0a4722 1590 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
ed34e3c3 1591 fd_cnt = launch_data_array_get_count(obj);
5b0a4722 1592 }
ed34e3c3
A
1593
1594 fds = alloca(fd_cnt * sizeof(int));
1595
1596 for (i = 0; i < fd_cnt; i++) {
5b0a4722 1597 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
ed34e3c3 1598 tmp_oai = launch_data_array_get_index(obj, i);
5b0a4722 1599 } else {
ed34e3c3 1600 tmp_oai = obj;
5b0a4722 1601 }
ed34e3c3
A
1602
1603 fds[i] = launch_data_get_fd(tmp_oai);
1604 }
1605
eabd1701 1606 socketgroup_new(j, key, fds, fd_cnt);
ed34e3c3
A
1607
1608 ipc_revoke_fds(obj);
1609}
1610
1611bool
5b0a4722
A
1612job_set_global_on_demand(job_t j, bool val)
1613{
1614 if (j->forced_peers_to_demand_mode && val) {
1615 return false;
1616 } else if (!j->forced_peers_to_demand_mode && !val) {
1617 return false;
1618 }
1619
1620 if ((j->forced_peers_to_demand_mode = val)) {
1621 j->mgr->global_on_demand_cnt++;
1622 } else {
1623 j->mgr->global_on_demand_cnt--;
1624 }
1625
1626 if (j->mgr->global_on_demand_cnt == 0) {
1627 jobmgr_dispatch_all(j->mgr, false);
1628 }
1629
1630 return true;
1631}
1632
1633bool
1634job_setup_machport(job_t j)
ed34e3c3 1635{
eabd1701 1636 if (job_assumes_zero(j, launchd_mport_create_recv(&j->j_port)) != KERN_SUCCESS) {
ed34e3c3 1637 goto out_bad;
5b0a4722
A
1638 }
1639
eabd1701 1640 if (job_assumes_zero(j, runtime_add_mport(j->j_port, job_server)) != KERN_SUCCESS) {
ed34e3c3 1641 goto out_bad2;
5b0a4722
A
1642 }
1643
eabd1701
A
1644 if (job_assumes_zero(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS)) != KERN_SUCCESS) {
1645 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
5b0a4722
A
1646 goto out_bad;
1647 }
ed34e3c3
A
1648
1649 return true;
1650out_bad2:
eabd1701 1651 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
ed34e3c3
A
1652out_bad:
1653 return false;
1654}
1655
dcace88f
A
1656kern_return_t
1657job_setup_exit_port(job_t j)
1658{
1659 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
eabd1701 1660 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
dcace88f
A
1661 return MACH_PORT_NULL;
1662 }
1663
1664 struct mach_port_limits limits = {
1665 .mpl_qlimit = 1,
1666 };
1667 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
eabd1701 1668 (void)job_assumes_zero(j, kr);
dcace88f
A
1669
1670 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
eabd1701
A
1671 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1672 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
dcace88f
A
1673 j->exit_status_port = MACH_PORT_NULL;
1674 }
1675
1676 return kr;
1677}
1678
5b0a4722
A
1679job_t
1680job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
ed34e3c3
A
1681{
1682 const char **argv = (const char **)mach_cmd2argv(cmd);
5b0a4722 1683 job_t jr = NULL;
ed34e3c3 1684
eabd1701 1685 if (!argv) {
ed34e3c3 1686 goto out_bad;
5b0a4722 1687 }
ed34e3c3 1688
5b0a4722 1689 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
ed34e3c3
A
1690 free(argv);
1691
eabd1701 1692 // Job creation can be denied during shutdown.
ddbbfbc1 1693 if (unlikely(jr == NULL)) {
ed34e3c3 1694 goto out_bad;
5b0a4722 1695 }
ed34e3c3 1696
5b0a4722
A
1697 jr->mach_uid = uid;
1698 jr->ondemand = ond;
1699 jr->legacy_mach_job = true;
1700 jr->abandon_pg = true;
eabd1701 1701 jr->priv_port_has_senders = true; // the IPC that called us will make-send on this port
ed34e3c3 1702
5b0a4722 1703 if (!job_setup_machport(jr)) {
ed34e3c3
A
1704 goto out_bad;
1705 }
1706
5b0a4722 1707 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
ed34e3c3 1708
5b0a4722 1709 return jr;
ed34e3c3
A
1710
1711out_bad:
5b0a4722
A
1712 if (jr) {
1713 job_remove(jr);
1714 }
ed34e3c3
A
1715 return NULL;
1716}
1717
5b0a4722
A
1718job_t
1719job_new_anonymous(jobmgr_t jm, pid_t anonpid)
ed34e3c3 1720{
dcace88f 1721 struct proc_bsdshortinfo proc;
5b0a4722
A
1722 bool shutdown_state;
1723 job_t jp = NULL, jr = NULL;
ddbbfbc1
A
1724 uid_t kp_euid, kp_uid, kp_svuid;
1725 gid_t kp_egid, kp_gid, kp_svgid;
5b0a4722 1726
eabd1701 1727 if (anonpid == 0) {
ddbbfbc1 1728 errno = EINVAL;
ed34e3c3
A
1729 return NULL;
1730 }
eabd1701
A
1731
1732 if (anonpid >= 100000) {
1733 /* The kernel current defines PID_MAX to be 99999, but that define isn't
1734 * exported.
1735 */
1736 launchd_syslog(LOG_WARNING, "Did PID_MAX change? Got request from PID: %d", anonpid);
ddbbfbc1 1737 errno = EINVAL;
ed34e3c3 1738 return NULL;
5b0a4722 1739 }
ed34e3c3 1740
dcace88f 1741 /* libproc returns the number of bytes written into the buffer upon success,
eabd1701 1742 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
dcace88f
A
1743 */
1744 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1745 if (errno != ESRCH) {
eabd1701 1746 (void)jobmgr_assumes_zero(jm, errno);
dcace88f 1747 }
ed34e3c3
A
1748 return NULL;
1749 }
1750
eabd1701
A
1751 if (proc.pbsi_comm[0] == '\0') {
1752 launchd_syslog(LOG_WARNING, "Blank command for PID: %d", anonpid);
ddbbfbc1 1753 errno = EINVAL;
5b0a4722
A
1754 return NULL;
1755 }
ed34e3c3 1756
dcace88f
A
1757 if (unlikely(proc.pbsi_status == SZOMB)) {
1758 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
ddbbfbc1
A
1759 }
1760
dcace88f
A
1761 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1762 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
ddbbfbc1
A
1763 }
1764
dcace88f
A
1765 kp_euid = proc.pbsi_uid;
1766 kp_uid = proc.pbsi_ruid;
1767 kp_svuid = proc.pbsi_svuid;
1768 kp_egid = proc.pbsi_gid;
1769 kp_gid = proc.pbsi_rgid;
1770 kp_svgid = proc.pbsi_svgid;
ddbbfbc1
A
1771
1772 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1773 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
dcace88f 1774 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
ed34e3c3
A
1775 }
1776
eabd1701
A
1777 /* "Fix" for when the kernel turns the process tree into a weird, cyclic
1778 * graph.
1779 *
1780 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
587e987e
A
1781 * as to why this can happen.
1782 */
eabd1701
A
1783 if ((pid_t)proc.pbsi_ppid == anonpid) {
1784 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). Ignoring: %s", proc.pbsi_comm);
587e987e
A
1785 errno = EINVAL;
1786 return NULL;
5b0a4722
A
1787 }
1788
eabd1701
A
1789 /* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1790 * jobs can pop up during shutdown and need to talk to us.
1791 */
ddbbfbc1 1792 if (unlikely(shutdown_state = jm->shutting_down)) {
5b0a4722
A
1793 jm->shutting_down = false;
1794 }
1795
eabd1701 1796 // We only set requestor_pid for XPC domains.
dcace88f 1797 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
eabd1701 1798 if ((jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL))) {
f271391c 1799 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
5b0a4722
A
1800
1801 total_anon_children++;
1802 jr->anonymous = true;
1803 jr->p = anonpid;
1804
eabd1701 1805 // Anonymous process reaping is messy.
5b0a4722
A
1806 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1807
eabd1701
A
1808 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1)) {
1809 if (errno != ESRCH) {
1810 (void)job_assumes_zero(jr, errno);
1811 }
1812
1813 // Zombies interact weirdly with kevent(3).
5b0a4722
A
1814 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1815 jr->unload_at_mig_return = true;
1816 }
1817
dcace88f 1818 if (unlikely(shutdown_state)) {
eabd1701 1819 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
ed34e3c3 1820 }
5b0a4722 1821
dcace88f 1822 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
eabd1701 1823 } else {
95379394 1824 (void)os_assumes_zero(errno);
ed34e3c3
A
1825 }
1826
eabd1701 1827 // Undo our hack from above.
ddbbfbc1 1828 if (unlikely(shutdown_state)) {
5b0a4722
A
1829 jm->shutting_down = true;
1830 }
ed34e3c3 1831
eabd1701
A
1832 /* This is down here to prevent infinite recursion due to a process
1833 * attaching to its parent through ptrace(3) -- causing a cycle in the
1834 * process tree and thereby not making it a tree anymore. We need to make
1835 * sure that the anonymous job has been added to the process list so that
1836 * we'll find the tracing parent PID of the parent process, which is the
1837 * child, when we go looking for it in jobmgr_find_by_pid().
1838 *
1839 * <rdar://problem/7264615>
587e987e 1840 */
dcace88f 1841 switch (proc.pbsi_ppid) {
eabd1701
A
1842 case 0:
1843 // The kernel.
1844 break;
1845 case 1:
1846 if (!pid1_magic) {
587e987e 1847 break;
eabd1701
A
1848 }
1849 // Fall through.
1850 default:
1851 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1852 if (jobmgr_assumes(jm, jp != NULL)) {
1853 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1854 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
dcace88f 1855 }
eabd1701
A
1856 }
1857 break;
587e987e
A
1858 }
1859
ed34e3c3
A
1860 return jr;
1861}
1862
dcace88f
A
1863job_t
1864job_new_subjob(job_t j, uuid_t identifier)
1865{
1866 char label[0];
1867 uuid_string_t idstr;
1868 uuid_unparse(identifier, idstr);
1869 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1870
1871 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
eabd1701 1872 if (nj != NULL) {
dcace88f 1873 nj->kqjob_callback = job_callback;
95379394 1874 nj->original = j;
dcace88f
A
1875 nj->mgr = j->mgr;
1876 nj->min_run_time = j->min_run_time;
1877 nj->timeout = j->timeout;
1878 nj->exit_timeout = j->exit_timeout;
eabd1701 1879
dcace88f 1880 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
eabd1701
A
1881
1882 // Set all our simple Booleans that are applicable.
dcace88f
A
1883 nj->debug = j->debug;
1884 nj->ondemand = j->ondemand;
1885 nj->checkedin = true;
1886 nj->low_pri_io = j->low_pri_io;
1887 nj->setmask = j->setmask;
1888 nj->wait4debugger = j->wait4debugger;
1889 nj->internal_exc_handler = j->internal_exc_handler;
1890 nj->setnice = j->setnice;
1891 nj->abandon_pg = j->abandon_pg;
1892 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1893 nj->deny_job_creation = j->deny_job_creation;
eabd1701 1894 nj->enable_transactions = j->enable_transactions;
dcace88f
A
1895 nj->needs_kickoff = j->needs_kickoff;
1896 nj->currently_ignored = true;
1897 nj->dedicated_instance = true;
1898 nj->xpc_service = j->xpc_service;
a6e7a709 1899 nj->xpc_bootstrapper = j->xpc_bootstrapper;
95379394
A
1900 nj->jetsam_priority = j->jetsam_priority;
1901 nj->jetsam_memlimit = j->jetsam_memlimit;
1902 nj->psproctype = j->psproctype;
eabd1701 1903
dcace88f
A
1904 nj->mask = j->mask;
1905 uuid_copy(nj->instance_id, identifier);
eabd1701
A
1906
1907 // These jobs are purely on-demand Mach jobs.
1908 // {Hard | Soft}ResourceLimits are not supported.
1909 // JetsamPriority is not supported.
1910
dcace88f
A
1911 if (j->prog) {
1912 nj->prog = strdup(j->prog);
1913 }
1914 if (j->argv) {
1915 size_t sz = malloc_size(j->argv);
1916 nj->argv = (char **)malloc(sz);
eabd1701
A
1917 if (nj->argv != NULL) {
1918 // This is the start of our strings.
dcace88f 1919 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
eabd1701 1920
dcace88f
A
1921 size_t i = 0;
1922 for (i = 0; i < j->argc; i++) {
1923 (void)strcpy(p, j->argv[i]);
1924 nj->argv[i] = p;
1925 p += (strlen(j->argv[i]) + 1);
1926 }
1927 nj->argv[i] = NULL;
eabd1701
A
1928 } else {
1929 (void)job_assumes_zero(nj, errno);
dcace88f 1930 }
eabd1701 1931
dcace88f
A
1932 nj->argc = j->argc;
1933 }
eabd1701
A
1934
1935 struct machservice *msi = NULL;
1936 SLIST_FOREACH(msi, &j->machservices, sle) {
1937 /* Only copy MachServices that were actually declared in the plist.
1938 * So skip over per-PID ones and ones that were created via
1939 * bootstrap_register().
1940 */
1941 if (msi->upfront) {
1942 mach_port_t mp = MACH_PORT_NULL;
95379394 1943 struct machservice *msj = machservice_new(nj, msi->name, &mp, false);
eabd1701
A
1944 if (msj != NULL) {
1945 msj->reset = msi->reset;
1946 msj->delete_on_destruction = msi->delete_on_destruction;
1947 msj->drain_one_on_crash = msi->drain_one_on_crash;
1948 msj->drain_all_on_crash = msi->drain_all_on_crash;
95379394
A
1949
1950 kern_return_t kr = mach_port_set_attributes(mach_task_self(), msj->port, MACH_PORT_TEMPOWNER, NULL, 0);
1951 (void)job_assumes_zero(j, kr);
eabd1701
A
1952 } else {
1953 (void)job_assumes_zero(nj, errno);
1954 }
1955 }
1956 }
1957
1958 // We ignore global environment variables.
dcace88f
A
1959 struct envitem *ei = NULL;
1960 SLIST_FOREACH(ei, &j->env, sle) {
eabd1701
A
1961 if (envitem_new(nj, ei->key, ei->value, false)) {
1962 (void)job_assumes_zero(nj, errno);
1963 }
dcace88f
A
1964 }
1965 uuid_string_t val;
1966 uuid_unparse(identifier, val);
eabd1701
A
1967 if (envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false)) {
1968 (void)job_assumes_zero(nj, errno);
1969 }
1970
dcace88f
A
1971 if (j->rootdir) {
1972 nj->rootdir = strdup(j->rootdir);
1973 }
1974 if (j->workingdir) {
1975 nj->workingdir = strdup(j->workingdir);
1976 }
1977 if (j->username) {
1978 nj->username = strdup(j->username);
1979 }
1980 if (j->groupname) {
1981 nj->groupname = strdup(j->groupname);
1982 }
eabd1701
A
1983
1984 /* FIXME: We shouldn't redirect all the output from these jobs to the
1985 * same file. We should uniquify the file names. But this hasn't shown
1986 * to be a problem in practice.
dcace88f
A
1987 */
1988 if (j->stdinpath) {
1989 nj->stdinpath = strdup(j->stdinpath);
1990 }
1991 if (j->stdoutpath) {
1992 nj->stdoutpath = strdup(j->stdinpath);
1993 }
1994 if (j->stderrpath) {
1995 nj->stderrpath = strdup(j->stderrpath);
1996 }
1997 if (j->alt_exc_handler) {
1998 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1999 }
95379394
A
2000 if (j->cfbundleidentifier) {
2001 nj->cfbundleidentifier = strdup(j->cfbundleidentifier);
2002 }
eabd1701 2003#if HAVE_SANDBOX
dcace88f
A
2004 if (j->seatbelt_profile) {
2005 nj->seatbelt_profile = strdup(j->seatbelt_profile);
2006 }
95379394
A
2007 if (j->container_identifier) {
2008 nj->container_identifier = strdup(j->container_identifier);
2009 }
eabd1701
A
2010#endif
2011
2012#if HAVE_QUARANTINE
dcace88f
A
2013 if (j->quarantine_data) {
2014 nj->quarantine_data = strdup(j->quarantine_data);
2015 }
2016 nj->quarantine_data_sz = j->quarantine_data_sz;
eabd1701 2017#endif
dcace88f
A
2018 if (j->j_binpref) {
2019 size_t sz = malloc_size(j->j_binpref);
2020 nj->j_binpref = (cpu_type_t *)malloc(sz);
eabd1701 2021 if (nj->j_binpref) {
dcace88f 2022 memcpy(&nj->j_binpref, &j->j_binpref, sz);
eabd1701
A
2023 } else {
2024 (void)job_assumes_zero(nj, errno);
dcace88f
A
2025 }
2026 }
eabd1701 2027
dcace88f 2028 if (j->asport != MACH_PORT_NULL) {
eabd1701 2029 (void)job_assumes_zero(nj, launchd_mport_copy_send(j->asport));
dcace88f
A
2030 nj->asport = j->asport;
2031 }
eabd1701 2032
dcace88f 2033 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
eabd1701 2034
dcace88f
A
2035 jobmgr_t where2put = root_jobmgr;
2036 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2037 where2put = j->mgr;
2038 }
2039 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
2040 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
eabd1701 2041 } else {
95379394 2042 (void)os_assumes_zero(errno);
dcace88f 2043 }
eabd1701 2044
dcace88f
A
2045 return nj;
2046}
2047
5b0a4722
A
2048job_t
2049job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
ed34e3c3
A
2050{
2051 const char *const *argv_tmp = argv;
ddbbfbc1 2052 char tmp_path[PATH_MAX];
5b0a4722
A
2053 char auto_label[1000];
2054 const char *bn = NULL;
ed34e3c3 2055 char *co;
5b0a4722 2056 size_t minlabel_len;
ddbbfbc1 2057 size_t i, cc = 0;
5b0a4722
A
2058 job_t j;
2059
95379394 2060 __OS_COMPILETIME_ASSERT__(offsetof(struct job_s, kqjob_callback) == 0);
5b0a4722 2061
ddbbfbc1 2062 if (unlikely(jm->shutting_down)) {
5b0a4722
A
2063 errno = EINVAL;
2064 return NULL;
2065 }
ed34e3c3 2066
ddbbfbc1 2067 if (unlikely(prog == NULL && argv == NULL)) {
ed34e3c3
A
2068 errno = EINVAL;
2069 return NULL;
2070 }
2071
eabd1701
A
2072 /* I'd really like to redo this someday. Anonymous jobs carry all the
2073 * baggage of managed jobs with them, even though most of it is unused.
2074 * Maybe when we have Objective-C objects in libSystem, there can be a base
2075 * job type that anonymous and managed jobs inherit from...
2076 */
dcace88f 2077 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
ddbbfbc1
A
2078 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
2079 if (prog) {
2080 bn = prog;
2081 } else {
2082 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
eabd1701
A
2083 // prog for auto labels is kp.kp_kproc.p_comm.
2084 bn = basename(tmp_path);
ddbbfbc1 2085 }
eabd1701
A
2086
2087 (void)snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
5b0a4722 2088 label = auto_label;
eabd1701
A
2089 /* This is so we can do gross things later. See NOTE_EXEC for anonymous
2090 * jobs.
2091 */
5b0a4722
A
2092 minlabel_len = strlen(label) + MAXCOMLEN;
2093 } else {
dcace88f
A
2094 if (label == AUTO_PICK_XPC_LABEL) {
2095 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
2096 } else {
2097 minlabel_len = strlen(label);
2098 }
5b0a4722
A
2099 }
2100
2101 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
ed34e3c3 2102
eabd1701 2103 if (!j) {
95379394 2104 (void)os_assumes_zero(errno);
5b0a4722
A
2105 return NULL;
2106 }
ed34e3c3 2107
ddbbfbc1 2108 if (unlikely(label == auto_label)) {
eabd1701 2109 (void)snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
5b0a4722 2110 } else {
eabd1701 2111 (void)strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
5b0a4722 2112 }
eabd1701 2113
ed34e3c3 2114 j->kqjob_callback = job_callback;
5b0a4722
A
2115 j->mgr = jm;
2116 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
2117 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
2118 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
2119 j->currently_ignored = true;
ed34e3c3
A
2120 j->ondemand = true;
2121 j->checkedin = true;
eabd1701 2122 j->jetsam_priority = DEFAULT_JETSAM_PRIORITY;
587e987e 2123 j->jetsam_memlimit = -1;
ddbbfbc1 2124 uuid_clear(j->expected_audit_uuid);
eabd1701
A
2125#if TARGET_OS_EMBEDDED
2126 /* Run embedded daemons as background by default. SpringBoard jobs are
2127 * Interactive by default. Unfortunately, so many daemons have opted into
2128 * this priority band that its usefulness is highly questionable.
2129 *
2130 * See <rdar://problem/9539873>.
95379394
A
2131 *
2132 * Also ensure that daemons have a default memory highwatermark unless
2133 * otherwise specified, as per <rdar://problem/10307814>.
eabd1701
A
2134 */
2135 if (launchd_embedded_handofgod) {
95379394 2136 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
eabd1701
A
2137 j->app = true;
2138 } else {
95379394
A
2139 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2140 j->jetsam_memlimit = DEFAULT_JETSAM_DAEMON_HIGHWATERMARK;
eabd1701 2141 }
95379394
A
2142#else
2143 /* Jobs on OS X that just come from disk are "standard" by default so that
2144 * third-party daemons/agents don't encounter unexpected throttling.
2145 */
2146 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
eabd1701
A
2147#endif
2148
ed34e3c3
A
2149 if (prog) {
2150 j->prog = strdup(prog);
eabd1701 2151 if (!j->prog) {
95379394 2152 (void)os_assumes_zero(errno);
ed34e3c3 2153 goto out_bad;
5b0a4722 2154 }
ed34e3c3
A
2155 }
2156
ddbbfbc1
A
2157 if (likely(argv)) {
2158 while (*argv_tmp++) {
ed34e3c3 2159 j->argc++;
ddbbfbc1 2160 }
ed34e3c3 2161
5b0a4722 2162 for (i = 0; i < j->argc; i++) {
ed34e3c3 2163 cc += strlen(argv[i]) + 1;
5b0a4722 2164 }
ed34e3c3
A
2165
2166 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
eabd1701
A
2167 if (!j->argv) {
2168 (void)job_assumes_zero(j, errno);
ed34e3c3 2169 goto out_bad;
5b0a4722 2170 }
ed34e3c3
A
2171
2172 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
2173
2174 for (i = 0; i < j->argc; i++) {
2175 j->argv[i] = co;
eabd1701 2176 (void)strcpy(co, argv[i]);
ed34e3c3
A
2177 co += strlen(argv[i]) + 1;
2178 }
2179 j->argv[i] = NULL;
2180 }
2181
eabd1701 2182 // Sssshhh... don't tell anyone.
dcace88f 2183 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
ddbbfbc1
A
2184 j->has_console = true;
2185 }
2186
5b0a4722 2187 LIST_INSERT_HEAD(&jm->jobs, j, sle);
eabd1701 2188
dcace88f
A
2189 jobmgr_t where2put_label = root_jobmgr;
2190 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2191 where2put_label = j->mgr;
2192 }
2193 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
ddbbfbc1 2194 uuid_clear(j->expected_audit_uuid);
5b0a4722
A
2195
2196 job_log(j, LOG_DEBUG, "Conceived");
ed34e3c3
A
2197
2198 return j;
2199
2200out_bad:
5b0a4722
A
2201 if (j->prog) {
2202 free(j->prog);
ed34e3c3 2203 }
5b0a4722
A
2204 free(j);
2205
ed34e3c3
A
2206 return NULL;
2207}
2208
dcace88f
A
2209job_t
2210job_new_alias(jobmgr_t jm, job_t src)
2211{
dcace88f
A
2212 if (job_find(jm, src->label)) {
2213 errno = EEXIST;
eabd1701
A
2214 return NULL;
2215 }
dcace88f 2216
eabd1701
A
2217 job_t j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2218 if (!j) {
95379394 2219 (void)os_assumes_zero(errno);
eabd1701
A
2220 return NULL;
2221 }
2222
2223 (void)strcpy((char *)j->label, src->label);
2224 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2225 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2226 /* Bad jump address. The kqueue callback for aliases should never be
2227 * invoked.
2228 */
2229 j->kqjob_callback = (kq_callback)0xfa1afe1;
2230 j->alias = src;
2231 j->mgr = jm;
2232
2233 struct machservice *msi = NULL;
2234 SLIST_FOREACH(msi, &src->machservices, sle) {
2235 if (!machservice_new_alias(j, msi)) {
2236 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2237 errno = EINVAL;
2238 job_remove(j);
2239 j = NULL;
2240 break;
dcace88f
A
2241 }
2242 }
2243
eabd1701
A
2244 if (j) {
2245 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2246 }
2247
dcace88f
A
2248 return j;
2249}
dcace88f 2250
5b0a4722 2251job_t
ed34e3c3
A
2252job_import(launch_data_t pload)
2253{
95379394
A
2254#if TARGET_OS_EMBEDDED
2255 /* If this is the special payload of default values, handle it here */
2256 if (unlikely(launch_data_dict_lookup(pload, LAUNCH_JOBKEY_DEFAULTS))) {
2257 job_import_defaults(pload);
2258 return NULL;
2259 }
2260#endif
2261
5b0a4722 2262 job_t j = jobmgr_import2(root_jobmgr, pload);
ed34e3c3 2263
ddbbfbc1 2264 if (unlikely(j == NULL)) {
ed34e3c3 2265 return NULL;
5b0a4722 2266 }
ed34e3c3 2267
eabd1701
A
2268 /* Since jobs are effectively stalled until they get security sessions
2269 * assigned to them, we may wish to reconsider this behavior of calling the
2270 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2271 * criterion set.
ddbbfbc1
A
2272 */
2273 job_dispatch_curious_jobs(j);
5b0a4722 2274 return job_dispatch(j, false);
ed34e3c3
A
2275}
2276
95379394
A
2277#if TARGET_OS_EMBEDDED
2278
2279bool
2280job_import_defaults(launch_data_t pload)
2281{
2282 bool result = false;
2283 xpc_object_t xd = NULL, defaults;
2284
2285 if (_launchd_defaults_cache) {
2286 xpc_release(_launchd_defaults_cache);
2287 _launchd_defaults_cache = NULL;
2288 }
2289
2290 xd = ld2xpc(pload);
2291 if (!xd || xpc_get_type(xd) != XPC_TYPE_DICTIONARY) {
2292 goto out;
2293 }
2294
2295 defaults = xpc_dictionary_get_value(xd, LAUNCHD_JOB_DEFAULTS);
2296 if (!defaults || xpc_get_type(defaults) != XPC_TYPE_DICTIONARY) {
2297 goto out;
2298 }
2299
2300 _launchd_defaults_cache = xpc_copy(defaults);
2301 result = true;
2302out:
2303 if (xd) {
2304 xpc_release(xd);
2305 }
2306
2307 return result;
2308}
2309
2310bool
2311job_apply_defaults(job_t j) {
2312 const char *test_prefix = "com.apple.test.";
2313
2314 char *sb_prefix_end, *sb_suffix_start;
2315 char true_job_label[strlen(j->label)];
2316 const char *label;
2317
2318 if (((sb_prefix_end = strchr(j->label, ':')) != NULL) &&
2319 ((sb_suffix_start = strchr(sb_prefix_end + 1, '[')) != NULL)) {
2320 /*
2321 * Workaround 'UIKitApplication:com.apple.foo[bar]' convention for the processes
2322 * we're interested in. To be removed when <rdar://problem/13066361> is addressed.
2323 */
2324 snprintf(true_job_label, sb_suffix_start - sb_prefix_end, "%s", sb_prefix_end + 1);
2325 label = true_job_label;
2326 } else {
2327 /* Just test the standard label */
2328 label = j->label;
2329 }
2330
2331 /* Test for cache presence and apply if found */
2332 if (_launchd_defaults_cache) {
2333 xpc_object_t props = xpc_dictionary_get_value(_launchd_defaults_cache, label);
2334 if (props && xpc_get_type(props) == XPC_TYPE_DICTIONARY) {
2335 launch_data_t lv = xpc2ld(props);
2336 launch_data_dict_iterate(lv, job_import_keys, j);
2337 launch_data_free(lv);
2338 return true;
2339 }
2340 }
2341
2342 /* Limit free? Disable the memory limit if this is a test job; see <rdar://problem/13180697> */
2343 if (!strncmp(label, test_prefix, strlen(test_prefix))) {
2344 j->jetsam_memlimit = -1;
2345 return true;
2346 }
2347
2348 return false;
2349}
2350
2351#endif
2352
ed34e3c3
A
2353launch_data_t
2354job_import_bulk(launch_data_t pload)
2355{
2356 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
5b0a4722 2357 job_t *ja;
ed34e3c3
A
2358 size_t i, c = launch_data_array_get_count(pload);
2359
ddbbfbc1 2360 ja = alloca(c * sizeof(job_t));
ed34e3c3
A
2361
2362 for (i = 0; i < c; i++) {
dcace88f 2363 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
ed34e3c3 2364 errno = 0;
5b0a4722 2365 }
ed34e3c3
A
2366 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2367 }
2368
2369 for (i = 0; i < c; i++) {
ddbbfbc1
A
2370 if (likely(ja[i])) {
2371 job_dispatch_curious_jobs(ja[i]);
2372 job_dispatch(ja[i], false);
5b0a4722 2373 }
ed34e3c3
A
2374 }
2375
2376 return resp;
2377}
2378
2379void
5b0a4722 2380job_import_bool(job_t j, const char *key, bool value)
ed34e3c3 2381{
5b0a4722
A
2382 bool found_key = false;
2383
ed34e3c3 2384 switch (key[0]) {
5b0a4722
A
2385 case 'a':
2386 case 'A':
2387 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2388 j->abandon_pg = value;
2389 found_key = true;
2390 }
ed34e3c3 2391 break;
dcace88f
A
2392 case 'b':
2393 case 'B':
2394 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2395 j->dirty_at_shutdown = value;
2396 found_key = true;
2397 }
2398 break;
95379394
A
2399 case 'j':
2400 case 'J':
2401 if (strcasecmp(key, LAUNCH_JOBKEY_JOINGUISESSION) == 0) {
2402 j->joins_gui_session = value;
2403 found_key = true;
2404 }
2405 break;
ed34e3c3
A
2406 case 'k':
2407 case 'K':
5b0a4722 2408 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
ed34e3c3 2409 j->ondemand = !value;
5b0a4722
A
2410 found_key = true;
2411 }
ed34e3c3
A
2412 break;
2413 case 'o':
2414 case 'O':
5b0a4722 2415 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
ed34e3c3 2416 j->ondemand = value;
5b0a4722
A
2417 found_key = true;
2418 }
ed34e3c3
A
2419 break;
2420 case 'd':
2421 case 'D':
5b0a4722 2422 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
ed34e3c3 2423 j->debug = value;
5b0a4722
A
2424 found_key = true;
2425 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
dcace88f
A
2426 (void)job_assumes(j, !value);
2427 found_key = true;
2428 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2429 j->disable_aslr = value;
5b0a4722
A
2430 found_key = true;
2431 }
2432 break;
2433 case 'h':
2434 case 'H':
2435 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
eabd1701 2436 job_log(j, LOG_PERF, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
dcace88f 2437 j->dirty_at_shutdown = value;
5b0a4722
A
2438 found_key = true;
2439 }
ed34e3c3
A
2440 break;
2441 case 's':
2442 case 'S':
5b0a4722 2443 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
ed34e3c3 2444 j->session_create = value;
5b0a4722
A
2445 found_key = true;
2446 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2447 j->start_on_mount = value;
2448 found_key = true;
2449 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
eabd1701 2450 // this only does something on Mac OS X 10.4 "Tiger"
5b0a4722 2451 found_key = true;
dcace88f 2452 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
eabd1701 2453 if (_launchd_shutdown_monitor) {
dcace88f
A
2454 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2455 } else {
2456 j->shutdown_monitor = true;
eabd1701 2457 _launchd_shutdown_monitor = j;
dcace88f
A
2458 }
2459 found_key = true;
5b0a4722 2460 }
ed34e3c3
A
2461 break;
2462 case 'l':
2463 case 'L':
5b0a4722 2464 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
ed34e3c3 2465 j->low_pri_io = value;
5b0a4722
A
2466 found_key = true;
2467 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2468 j->only_once = value;
2469 found_key = true;
95379394
A
2470 } else if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYBACKGROUNDIO) == 0) {
2471 j->low_priority_background_io = true;
2472 found_key = true;
f9823965
A
2473 } else if (strcasecmp(key, LAUNCH_JOBKEY_LEGACYTIMERS) == 0) {
2474#if !TARGET_OS_EMBEDDED
2475 j->legacy_timers = value;
2476#else // !TARGET_OS_EMBEDDED
2477 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2478#endif // !TARGET_OS_EMBEDDED
2479 found_key = true;
5b0a4722 2480 }
ed34e3c3 2481 break;
fe044cc9
A
2482 case 'm':
2483 case 'M':
2484 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2485 j->internal_exc_handler = value;
2486 found_key = true;
dcace88f
A
2487 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2488 j->multiple_instances = value;
2489 found_key = true;
fe044cc9
A
2490 }
2491 break;
ed34e3c3
A
2492 case 'i':
2493 case 'I':
5b0a4722
A
2494 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2495 if (getuid() != 0) {
2496 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2497 return;
2498 }
2499 j->no_init_groups = !value;
2500 found_key = true;
dcace88f 2501 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
ddbbfbc1
A
2502 j->ignore_pg_at_shutdown = value;
2503 found_key = true;
5b0a4722 2504 }
ed34e3c3
A
2505 break;
2506 case 'r':
2507 case 'R':
5b0a4722
A
2508 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2509 if (value) {
eabd1701 2510 // We don't want value == false to change j->start_pending
5b0a4722
A
2511 j->start_pending = true;
2512 }
2513 found_key = true;
2514 }
ed34e3c3
A
2515 break;
2516 case 'e':
2517 case 'E':
5b0a4722 2518 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
ed34e3c3 2519 j->globargv = value;
5b0a4722 2520 found_key = true;
ddbbfbc1 2521 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
eabd1701 2522 j->enable_transactions = value;
ddbbfbc1 2523 found_key = true;
5b0a4722
A
2524 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2525 j->debug_before_kill = value;
2526 found_key = true;
dcace88f 2527 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
eabd1701
A
2528#if TARGET_OS_EMBEDDED
2529 if (!_launchd_embedded_god) {
2530 if ((j->embedded_god = value)) {
2531 _launchd_embedded_god = j;
2532 }
ddbbfbc1
A
2533 } else {
2534 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2535 }
eabd1701
A
2536#else
2537 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2538#endif
ddbbfbc1 2539 found_key = true;
95379394
A
2540 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDHOMESCREEN) == 0) {
2541#if TARGET_OS_EMBEDDED
2542 if (!_launchd_embedded_home) {
2543 if ((j->embedded_home = value)) {
2544 _launchd_embedded_home = j;
2545 }
2546 } else {
2547 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2548 }
2549#else
2550 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2551#endif
dcace88f 2552 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
eabd1701 2553 if (!_launchd_event_monitor) {
dcace88f
A
2554 j->event_monitor = value;
2555 if (value) {
eabd1701 2556 _launchd_event_monitor = j;
dcace88f
A
2557 }
2558 } else {
eabd1701 2559 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor->label);
dcace88f
A
2560 }
2561 found_key = true;
5b0a4722 2562 }
ed34e3c3
A
2563 break;
2564 case 'w':
2565 case 'W':
5b0a4722 2566 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
ed34e3c3 2567 j->wait4debugger = value;
5b0a4722
A
2568 found_key = true;
2569 }
ed34e3c3 2570 break;
a6e7a709
A
2571 case 'x':
2572 case 'X':
2573 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2574 if (pid1_magic) {
eabd1701
A
2575 if (_launchd_xpc_bootstrapper) {
2576 job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper->label);
a6e7a709 2577 } else {
eabd1701 2578 _launchd_xpc_bootstrapper = j;
a6e7a709
A
2579 j->xpc_bootstrapper = value;
2580 }
2581 } else {
2582 job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2583 }
2584 }
2585 found_key = true;
2586 break;
ed34e3c3
A
2587 default:
2588 break;
2589 }
5b0a4722 2590
ddbbfbc1 2591 if (unlikely(!found_key)) {
5b0a4722
A
2592 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2593 }
ed34e3c3
A
2594}
2595
2596void
5b0a4722 2597job_import_string(job_t j, const char *key, const char *value)
ed34e3c3
A
2598{
2599 char **where2put = NULL;
ed34e3c3
A
2600
2601 switch (key[0]) {
95379394
A
2602 case 'c':
2603 case 'C':
2604 if (strcasecmp(key, LAUNCH_JOBKEY_CFBUNDLEIDENTIFIER) == 0) {
2605 where2put = &j->cfbundleidentifier;
2606 }
2607 break;
fe044cc9
A
2608 case 'm':
2609 case 'M':
2610 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2611 where2put = &j->alt_exc_handler;
2612 }
2613 break;
ed34e3c3
A
2614 case 'p':
2615 case 'P':
5b0a4722
A
2616 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2617 return;
95379394
A
2618 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0
2619 || strcasecmp(key, LAUNCH_JOBKEY_PROCESSTYPE) == 0) {
2620 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE) == 0) {
2621 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
2622 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_ADAPTIVE) == 0) {
2623 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE;
2624 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_STANDARD) == 0) {
2625 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
eabd1701 2626 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND) == 0) {
95379394
A
2627 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2628 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2629 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_TAL;
2630 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_SYSTEMAPP) == 0) {
2631 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2632 j->system_app = true;
2633 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_APP) == 0) {
2634 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2635 j->app = true;
eabd1701 2636 } else {
dcace88f
A
2637 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2638 }
2639 return;
5b0a4722 2640 }
ed34e3c3
A
2641 break;
2642 case 'l':
2643 case 'L':
5b0a4722
A
2644 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2645 return;
2646 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2647 return;
2648 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2649 return;
2650 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
5b0a4722
A
2651 return;
2652 }
ed34e3c3
A
2653 break;
2654 case 'r':
2655 case 'R':
5b0a4722
A
2656 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2657 if (getuid() != 0) {
2658 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2659 return;
2660 }
ed34e3c3 2661 where2put = &j->rootdir;
5b0a4722 2662 }
ed34e3c3
A
2663 break;
2664 case 'w':
2665 case 'W':
5b0a4722 2666 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
ed34e3c3 2667 where2put = &j->workingdir;
5b0a4722 2668 }
ed34e3c3
A
2669 break;
2670 case 'u':
2671 case 'U':
5b0a4722
A
2672 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2673 if (getuid() != 0) {
2674 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2675 return;
2676 } else if (strcmp(value, "root") == 0) {
2677 return;
2678 }
ed34e3c3 2679 where2put = &j->username;
5b0a4722 2680 }
ed34e3c3
A
2681 break;
2682 case 'g':
2683 case 'G':
5b0a4722
A
2684 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2685 if (getuid() != 0) {
2686 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2687 return;
2688 } else if (strcmp(value, "wheel") == 0) {
2689 return;
2690 }
ed34e3c3 2691 where2put = &j->groupname;
5b0a4722 2692 }
ed34e3c3
A
2693 break;
2694 case 's':
2695 case 'S':
2696 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2697 where2put = &j->stdoutpath;
2698 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2699 where2put = &j->stderrpath;
ddbbfbc1
A
2700 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2701 where2put = &j->stdinpath;
2702 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
eabd1701
A
2703 if (job_assumes_zero_p(j, j->stdin_fd) != -1) {
2704 // open() should not block, but regular IO by the job should
2705 (void)job_assumes_zero_p(j, fcntl(j->stdin_fd, F_SETFL, 0));
2706 // XXX -- EV_CLEAR should make named pipes happy?
2707 (void)job_assumes_zero_p(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j));
ddbbfbc1
A
2708 } else {
2709 j->stdin_fd = 0;
2710 }
f36da725 2711#if HAVE_SANDBOX
5b0a4722
A
2712 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2713 where2put = &j->seatbelt_profile;
95379394
A
2714 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXCONTAINER) == 0) {
2715 where2put = &j->container_identifier;
f36da725 2716#endif
ed34e3c3
A
2717 }
2718 break;
dcace88f
A
2719 case 'X':
2720 case 'x':
2721 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2722 return;
2723 }
2724 break;
ed34e3c3 2725 default:
5b0a4722 2726 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
ed34e3c3
A
2727 break;
2728 }
2729
ddbbfbc1 2730 if (likely(where2put)) {
eabd1701
A
2731 if (!(*where2put = strdup(value))) {
2732 (void)job_assumes_zero(j, errno);
2733 }
ed34e3c3 2734 } else {
eabd1701
A
2735 // See rdar://problem/5496612. These two are okay.
2736 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2737 || strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2738 job_log(j, LOG_APPLEONLY, "This key is no longer relevant and should be removed: %s", key);
2739 } else {
ddbbfbc1
A
2740 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2741 }
ed34e3c3
A
2742 }
2743}
2744
2745void
5b0a4722 2746job_import_integer(job_t j, const char *key, long long value)
ed34e3c3
A
2747{
2748 switch (key[0]) {
95379394
A
2749 case 'a':
2750 case 'A':
2751#if TARGET_OS_EMBEDDED
2752 if (strcasecmp(key, LAUNCH_JOBKEY_ASID) == 0) {
2753 if (launchd_embedded_handofgod) {
2754 if (audit_session_port((au_asid_t)value, &j->asport) == -1 && errno != ENOSYS) {
2755 (void)job_assumes_zero(j, errno);
2756 }
2757 }
2758 }
2759#endif
5b0a4722
A
2760 case 'e':
2761 case 'E':
2762 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
ddbbfbc1 2763 if (unlikely(value < 0)) {
5b0a4722 2764 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
ddbbfbc1 2765 } else if (unlikely(value > UINT32_MAX)) {
5b0a4722
A
2766 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2767 } else {
ddbbfbc1 2768 j->exit_timeout = (typeof(j->exit_timeout)) value;
5b0a4722 2769 }
dcace88f 2770 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
ddbbfbc1
A
2771 j->main_thread_priority = value;
2772 }
2773 break;
2774 case 'j':
2775 case 'J':
dcace88f 2776 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
587e987e 2777 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
eabd1701 2778
587e987e 2779 launch_data_t pri = launch_data_new_integer(value);
dcace88f 2780 if (job_assumes(j, pri != NULL)) {
587e987e
A
2781 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2782 launch_data_free(pri);
2783 }
5b0a4722 2784 }
ed34e3c3
A
2785 case 'n':
2786 case 'N':
5b0a4722 2787 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
ddbbfbc1
A
2788 if (unlikely(value < PRIO_MIN)) {
2789 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2790 } else if (unlikely(value > PRIO_MAX)) {
2791 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2792 } else {
2793 j->nice = (typeof(j->nice)) value;
2794 j->setnice = true;
2795 }
5b0a4722 2796 }
ed34e3c3
A
2797 break;
2798 case 't':
2799 case 'T':
2800 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
ddbbfbc1 2801 if (unlikely(value < 0)) {
5b0a4722 2802 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
ddbbfbc1 2803 } else if (unlikely(value > UINT32_MAX)) {
5b0a4722
A
2804 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2805 } else {
ddbbfbc1 2806 j->timeout = (typeof(j->timeout)) value;
5b0a4722
A
2807 }
2808 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2809 if (value < 0) {
2810 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2811 } else if (value > UINT32_MAX) {
2812 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2813 } else {
ddbbfbc1 2814 j->min_run_time = (typeof(j->min_run_time)) value;
5b0a4722 2815 }
ed34e3c3
A
2816 }
2817 break;
2818 case 'u':
2819 case 'U':
2820 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2821 j->mask = value;
2822 j->setmask = true;
2823 }
2824 break;
2825 case 's':
2826 case 'S':
2827 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
ddbbfbc1 2828 if (unlikely(value <= 0)) {
5b0a4722 2829 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
ddbbfbc1 2830 } else if (unlikely(value > UINT32_MAX)) {
5b0a4722
A
2831 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2832 } else {
ddbbfbc1
A
2833 runtime_add_weak_ref();
2834 j->start_interval = (typeof(j->start_interval)) value;
5b0a4722 2835
eabd1701 2836 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
5b0a4722 2837 }
f36da725 2838#if HAVE_SANDBOX
5b0a4722
A
2839 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2840 j->seatbelt_flags = value;
f36da725 2841#endif
5b0a4722
A
2842 }
2843
2844 break;
2845 default:
2846 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2847 break;
2848 }
2849}
2850
2851void
eabd1701 2852job_import_opaque(job_t j __attribute__((unused)), const char *key, launch_data_t value __attribute__((unused)))
5b0a4722
A
2853{
2854 switch (key[0]) {
2855 case 'q':
2856 case 'Q':
f36da725 2857#if HAVE_QUARANTINE
5b0a4722
A
2858 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2859 size_t tmpsz = launch_data_get_opaque_size(value);
2860
2861 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2862 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2863 j->quarantine_data_sz = tmpsz;
2864 }
ed34e3c3 2865 }
f36da725 2866#endif
ddbbfbc1
A
2867 case 's':
2868 case 'S':
dcace88f 2869 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
ddbbfbc1 2870 size_t tmpsz = launch_data_get_opaque_size(value);
dcace88f 2871 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
ddbbfbc1
A
2872 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2873 }
2874 }
ed34e3c3
A
2875 break;
2876 default:
2877 break;
2878 }
2879}
2880
f36da725
A
2881static void
2882policy_setup(launch_data_t obj, const char *key, void *context)
2883{
2884 job_t j = context;
2885 bool found_key = false;
2886
2887 switch (key[0]) {
2888 case 'd':
2889 case 'D':
2890 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2891 j->deny_job_creation = launch_data_get_bool(obj);
2892 found_key = true;
2893 }
2894 break;
2895 default:
2896 break;
2897 }
2898
2899 if (unlikely(!found_key)) {
2900 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2901 }
2902}
2903
ed34e3c3 2904void
5b0a4722 2905job_import_dictionary(job_t j, const char *key, launch_data_t value)
ed34e3c3
A
2906{
2907 launch_data_t tmp;
2908
2909 switch (key[0]) {
f36da725
A
2910 case 'p':
2911 case 'P':
2912 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2913 launch_data_dict_iterate(value, policy_setup, j);
2914 }
2915 break;
ed34e3c3
A
2916 case 'k':
2917 case 'K':
5b0a4722 2918 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
ed34e3c3 2919 launch_data_dict_iterate(value, semaphoreitem_setup, j);
5b0a4722 2920 }
ed34e3c3
A
2921 break;
2922 case 'i':
2923 case 'I':
2924 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2925 j->inetcompat = true;
5b0a4722
A
2926 j->abandon_pg = true;
2927 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
ed34e3c3 2928 j->inetcompat_wait = launch_data_get_bool(tmp);
5b0a4722 2929 }
ed34e3c3
A
2930 }
2931 break;
587e987e
A
2932 case 'j':
2933 case 'J':
dcace88f 2934 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
587e987e
A
2935 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2936 }
ed34e3c3
A
2937 case 'e':
2938 case 'E':
5b0a4722 2939 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
ed34e3c3 2940 launch_data_dict_iterate(value, envitem_setup, j);
ddbbfbc1 2941 }
ed34e3c3
A
2942 break;
2943 case 'u':
2944 case 'U':
2945 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2946 j->importing_global_env = true;
2947 launch_data_dict_iterate(value, envitem_setup, j);
2948 j->importing_global_env = false;
2949 }
2950 break;
2951 case 's':
2952 case 'S':
2953 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2954 launch_data_dict_iterate(value, socketgroup_setup, j);
2955 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2956 calendarinterval_new_from_obj(j, value);
2957 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2958 launch_data_dict_iterate(value, limititem_setup, j);
f36da725 2959#if HAVE_SANDBOX
5b0a4722
A
2960 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2961 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
f36da725 2962#endif
ed34e3c3
A
2963 }
2964 break;
2965 case 'h':
2966 case 'H':
2967 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2968 j->importing_hard_limits = true;
2969 launch_data_dict_iterate(value, limititem_setup, j);
2970 j->importing_hard_limits = false;
2971 }
2972 break;
2973 case 'm':
2974 case 'M':
2975 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2976 launch_data_dict_iterate(value, machservice_setup, j);
ed34e3c3
A
2977 }
2978 break;
dcace88f
A
2979 case 'l':
2980 case 'L':
2981 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2982 launch_data_dict_iterate(value, eventsystem_setup, j);
2983 } else {
2984 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2985 return;
2986 }
2987 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2988 return;
2989 }
2990 }
2991 break;
ed34e3c3 2992 default:
5b0a4722 2993 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
ed34e3c3
A
2994 break;
2995 }
2996}
2997
2998void
5b0a4722 2999job_import_array(job_t j, const char *key, launch_data_t value)
ed34e3c3 3000{
5b0a4722 3001 size_t i, value_cnt = launch_data_array_get_count(value);
ed34e3c3
A
3002
3003 switch (key[0]) {
5b0a4722
A
3004 case 'p':
3005 case 'P':
3006 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
3007 return;
3008 }
3009 break;
3010 case 'l':
3011 case 'L':
3012 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
3013 return;
3014 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
3015 return;
3016 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
3017 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
3018 return;
3019 }
3020 break;
ed34e3c3
A
3021 case 'b':
3022 case 'B':
eabd1701 3023 if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
5b0a4722
A
3024 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
3025 j->j_binpref_cnt = value_cnt;
3026 for (i = 0; i < value_cnt; i++) {
ddbbfbc1 3027 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
5b0a4722
A
3028 }
3029 }
3030 }
ed34e3c3
A
3031 break;
3032 case 's':
3033 case 'S':
3034 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
5b0a4722 3035 for (i = 0; i < value_cnt; i++) {
ed34e3c3 3036 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
5b0a4722 3037 }
ed34e3c3
A
3038 }
3039 break;
3040 default:
5b0a4722 3041 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
ed34e3c3
A
3042 break;
3043 }
ed34e3c3
A
3044}
3045
3046void
3047job_import_keys(launch_data_t obj, const char *key, void *context)
3048{
5b0a4722 3049 job_t j = context;
ed34e3c3
A
3050 launch_data_type_t kind;
3051
eabd1701
A
3052 if (!obj) {
3053 launchd_syslog(LOG_ERR, "NULL object given to job_import_keys().");
ed34e3c3 3054 return;
5b0a4722 3055 }
ed34e3c3
A
3056
3057 kind = launch_data_get_type(obj);
3058
3059 switch (kind) {
3060 case LAUNCH_DATA_BOOL:
3061 job_import_bool(j, key, launch_data_get_bool(obj));
3062 break;
3063 case LAUNCH_DATA_STRING:
3064 job_import_string(j, key, launch_data_get_string(obj));
3065 break;
3066 case LAUNCH_DATA_INTEGER:
3067 job_import_integer(j, key, launch_data_get_integer(obj));
3068 break;
3069 case LAUNCH_DATA_DICTIONARY:
3070 job_import_dictionary(j, key, obj);
3071 break;
3072 case LAUNCH_DATA_ARRAY:
3073 job_import_array(j, key, obj);
3074 break;
5b0a4722
A
3075 case LAUNCH_DATA_OPAQUE:
3076 job_import_opaque(j, key, obj);
3077 break;
ed34e3c3
A
3078 default:
3079 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
3080 break;
3081 }
3082}
3083
dcace88f 3084job_t
5b0a4722 3085jobmgr_import2(jobmgr_t jm, launch_data_t pload)
ed34e3c3
A
3086{
3087 launch_data_t tmp, ldpa;
3088 const char *label = NULL, *prog = NULL;
3089 const char **argv = NULL;
5b0a4722
A
3090 job_t j;
3091
ddbbfbc1
A
3092 if (!jobmgr_assumes(jm, pload != NULL)) {
3093 errno = EINVAL;
3094 return NULL;
3095 }
3096
3097 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
5b0a4722
A
3098 errno = EINVAL;
3099 return NULL;
3100 }
ed34e3c3 3101
ddbbfbc1 3102 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
5b0a4722 3103 errno = EINVAL;
ed34e3c3 3104 return NULL;
5b0a4722 3105 }
ed34e3c3 3106
ddbbfbc1 3107 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
5b0a4722 3108 errno = EINVAL;
ed34e3c3 3109 return NULL;
5b0a4722 3110 }
ed34e3c3 3111
ddbbfbc1 3112 if (unlikely(!(label = launch_data_get_string(tmp)))) {
5b0a4722
A
3113 errno = EINVAL;
3114 return NULL;
ed34e3c3 3115 }
5b0a4722 3116
ddbbfbc1 3117#if TARGET_OS_EMBEDDED
eabd1701 3118 if (unlikely(launchd_embedded_handofgod && _launchd_embedded_god)) {
dcace88f 3119 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
ddbbfbc1
A
3120 errno = EPERM;
3121 return NULL;
3122 }
eabd1701 3123
ddbbfbc1 3124 const char *username = NULL;
dcace88f 3125 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
ddbbfbc1
A
3126 username = launch_data_get_string(tmp);
3127 } else {
3128 errno = EPERM;
3129 return NULL;
3130 }
eabd1701
A
3131
3132 if (!jobmgr_assumes(jm, _launchd_embedded_god->username != NULL && username != NULL)) {
ddbbfbc1
A
3133 errno = EPERM;
3134 return NULL;
3135 }
eabd1701
A
3136
3137 if (unlikely(strcmp(_launchd_embedded_god->username, username) != 0)) {
ddbbfbc1
A
3138 errno = EPERM;
3139 return NULL;
3140 }
eabd1701 3141 } else if (launchd_embedded_handofgod) {
5b0a4722
A
3142 errno = EINVAL;
3143 return NULL;
3144 }
ddbbfbc1 3145#endif
5b0a4722 3146
eabd1701
A
3147 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM))
3148 && (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
ed34e3c3
A
3149 prog = launch_data_get_string(tmp);
3150 }
ed34e3c3 3151
dcace88f 3152 int argc = 0;
5b0a4722
A
3153 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
3154 size_t i, c;
3155
3156 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
3157 errno = EINVAL;
3158 return NULL;
3159 }
3160
3161 c = launch_data_array_get_count(ldpa);
3162
3163 argv = alloca((c + 1) * sizeof(char *));
3164
3165 for (i = 0; i < c; i++) {
3166 tmp = launch_data_array_get_index(ldpa, i);
3167
3168 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
3169 errno = EINVAL;
3170 return NULL;
3171 }
3172
3173 argv[i] = launch_data_get_string(tmp);
3174 }
3175
3176 argv[i] = NULL;
dcace88f 3177 argc = i;
5b0a4722
A
3178 }
3179
dcace88f
A
3180 if (!prog && argc == 0) {
3181 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
3182 errno = EINVAL;
3183 return NULL;
3184 }
3185
3186 /* Find the requested session. You cannot load services into XPC domains in
3187 * this manner.
3188 */
ddbbfbc1 3189 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
dcace88f
A
3190 if (session) {
3191 jobmgr_t jmt = NULL;
3192 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
3193 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
3194 if (!jmt) {
3195 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
3196 } else {
3197 jm = jmt;
3198 }
3199 } else {
3200 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
3201 }
3202
3203 if (!jmt) {
3204 errno = EINVAL;
3205 return NULL;
3206 }
ddbbfbc1 3207 }
dcace88f
A
3208
3209 /* For legacy reasons, we have a global hash of all labels in all job
3210 * managers. So rather than make it a global, we store it in the root job
3211 * manager. But for an XPC domain, we store a local hash of all services in
3212 * the domain.
3213 */
3214 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
3215 if (unlikely((j = job_find(where2look, label)) != NULL)) {
3216 if (jm->xpc_singleton) {
3217 /* There can (and probably will be) multiple attemtps to import the
3218 * same XPC service from the same framework. This is okay. It's
3219 * treated as a singleton, so just return the existing one so that
3220 * it may be aliased into the requesting process' XPC domain.
3221 */
eabd1701 3222 errno = EEXIST;
dcace88f
A
3223 return j;
3224 } else {
3225 /* If we're not a global XPC domain, then it's an error to try
3226 * importing the same job/service multiple times.
3227 */
3228 errno = EEXIST;
3229 return NULL;
3230 }
3231 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
ed34e3c3
A
3232 errno = EINVAL;
3233 return NULL;
3234 }
dcace88f 3235 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
ed34e3c3 3236
ddbbfbc1 3237 if (likely(j = job_new(jm, label, prog, argv))) {
95379394
A
3238#if TARGET_OS_EMBEDDED
3239 job_apply_defaults(j);
3240#endif
5b0a4722 3241 launch_data_dict_iterate(pload, job_import_keys, j);
dcace88f 3242 if (!uuid_is_null(j->expected_audit_uuid)) {
ddbbfbc1
A
3243 uuid_string_t uuid_str;
3244 uuid_unparse(j->expected_audit_uuid, uuid_str);
3245 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
3246 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
3247 errno = ENEEDAUTH;
3248 } else {
3249 job_log(j, LOG_DEBUG, "No security session specified.");
dcace88f
A
3250 j->asport = MACH_PORT_NULL;
3251 }
3252
eabd1701
A
3253 if (pid1_magic && !jm->parentmgr) {
3254 /* Workaround reentrancy in CF. We don't make this a global variable
3255 * because we don't want per-user launchd's to inherit it. So we
3256 * just set it for every job that we import into the System session.
3257 *
3258 * See <rdar://problem/9468837>.
3259 */
3260 envitem_new(j, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
3261 }
3262
dcace88f 3263 if (j->event_monitor) {
eabd1701
A
3264 eventsystem_ping();
3265 }
dcace88f 3266
eabd1701 3267#if TARGET_OS_EMBEDDED
95379394 3268 /* SpringBoard and backboardd must run at elevated priority.
eabd1701 3269 *
95379394 3270 * See <rdar://problem/9539873> and <rdar://problem/10984383>.
eabd1701 3271 */
95379394
A
3272 if (j->embedded_god || j->embedded_home) {
3273 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
ddbbfbc1 3274 }
eabd1701 3275#endif
5b0a4722 3276 }
ed34e3c3 3277
5b0a4722
A
3278 return j;
3279}
ed34e3c3 3280
f36da725
A
3281bool
3282jobmgr_label_test(jobmgr_t jm, const char *str)
3283{
f36da725
A
3284 const char *ptr;
3285
3286 if (str[0] == '\0') {
3287 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
3288 return false;
3289 }
ddbbfbc1 3290
f36da725
A
3291 for (ptr = str; *ptr; ptr++) {
3292 if (iscntrl(*ptr)) {
3293 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
3294 return false;
3295 }
3296 }
ddbbfbc1 3297
eabd1701
A
3298 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3299 || (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
f36da725
A
3300 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
3301 return false;
3302 }
3303
3304 return true;
3305}
3306
5b0a4722 3307job_t
dcace88f 3308job_find(jobmgr_t jm, const char *label)
5b0a4722
A
3309{
3310 job_t ji;
eabd1701 3311
dcace88f
A
3312 if (!jm) {
3313 jm = root_jobmgr;
3314 }
eabd1701 3315
dcace88f 3316 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
ddbbfbc1 3317 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
eabd1701
A
3318 // 5351245 and 5488633 respectively
3319 continue;
5b0a4722
A
3320 }
3321
3322 if (strcmp(ji->label, label) == 0) {
3323 return ji;
3324 }
ed34e3c3
A
3325 }
3326
5b0a4722
A
3327 errno = ESRCH;
3328 return NULL;
3329}
ed34e3c3 3330
eabd1701 3331// Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
5b0a4722 3332job_t
ddbbfbc1 3333jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
5b0a4722
A
3334{
3335 job_t ji = NULL;
dcace88f 3336 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
eabd1701 3337 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay))) {
ddbbfbc1
A
3338 return ji;
3339 }
3340 }
5b0a4722 3341
ddbbfbc1 3342 jobmgr_t jmi = NULL;
dcace88f
A
3343 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3344 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
5b0a4722
A
3345 break;
3346 }
3347 }
3348
ddbbfbc1
A
3349 return ji;
3350}
3351
3352job_t
3353jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3354{
3355 job_t ji;
3356
3357 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3358 if (ji->p == p) {
3359 return ji;
3360 }
5b0a4722 3361 }
ddbbfbc1
A
3362
3363 return create_anon ? job_new_anonymous(jm, p) : NULL;
ed34e3c3
A
3364}
3365
95379394
A
3366job_t
3367managed_job(pid_t p)
3368{
3369 job_t ji;
3370
3371 LIST_FOREACH(ji, &managed_actives[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3372 if (ji->p == p) {
3373 return ji;
3374 }
3375 }
3376
3377 return NULL;
3378}
3379
5b0a4722
A
3380job_t
3381job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
ed34e3c3 3382{
5b0a4722
A
3383 jobmgr_t jmi;
3384 job_t ji;
ed34e3c3 3385
5b0a4722 3386 if (jm->jm_port == mport) {
ddbbfbc1 3387 return jobmgr_find_by_pid(jm, upid, true);
5b0a4722 3388 }
ed34e3c3 3389
5b0a4722
A
3390 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3391 job_t jr;
ed34e3c3 3392
5b0a4722 3393 if ((jr = job_mig_intran2(jmi, mport, upid))) {
ed34e3c3 3394 return jr;
5b0a4722
A
3395 }
3396 }
3397
3398 LIST_FOREACH(ji, &jm->jobs, sle) {
3399 if (ji->j_port == mport) {
3400 return ji;
3401 }
ed34e3c3
A
3402 }
3403
ed34e3c3
A
3404 return NULL;
3405}
3406
5b0a4722
A
3407job_t
3408job_mig_intran(mach_port_t p)
ed34e3c3 3409{
ddbbfbc1 3410 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 3411 job_t jr;
ed34e3c3 3412
ddbbfbc1 3413 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
5b0a4722 3414
eabd1701 3415 if (!jr) {
dcace88f
A
3416 struct proc_bsdshortinfo proc;
3417 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3418 if (errno != ESRCH) {
eabd1701 3419 (void)jobmgr_assumes_zero(root_jobmgr, errno);
dcace88f 3420 } else {
eabd1701 3421 jobmgr_log(root_jobmgr, LOG_ERR, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc.pbsi_comm, ldc->pid, ldc->uid, ldc->euid);
dcace88f 3422 }
5b0a4722
A
3423 }
3424 }
3425
3426 return jr;
3427}
3428
3429job_t
3430job_find_by_service_port(mach_port_t p)
3431{
3432 struct machservice *ms;
3433
3434 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3435 if (ms->recv && (ms->port == p)) {
3436 return ms->job;
3437 }
ed34e3c3
A
3438 }
3439
ed34e3c3
A
3440 return NULL;
3441}
3442
3443void
5b0a4722 3444job_mig_destructor(job_t j)
ed34e3c3 3445{
eabd1701 3446 /* The job can go invalid before this point.
5b0a4722 3447 *
eabd1701 3448 * <rdar://problem/5477111>
5b0a4722 3449 */
ddbbfbc1 3450 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
5b0a4722
A
3451 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3452 job_remove(j);
3453 }
ed34e3c3 3454
5b0a4722 3455 workaround_5477111 = NULL;
ed34e3c3 3456
5b0a4722
A
3457 calendarinterval_sanity_check();
3458}
3459
3460void
3461job_export_all2(jobmgr_t jm, launch_data_t where)
3462{
3463 jobmgr_t jmi;
3464 job_t ji;
3465
3466 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3467 job_export_all2(jmi, where);
3468 }
3469
3470 LIST_FOREACH(ji, &jm->jobs, sle) {
3471 launch_data_t tmp;
3472
3473 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3474 launch_data_dict_insert(where, tmp, ji->label);
3475 }
3476 }
ed34e3c3
A
3477}
3478
3479launch_data_t
3480job_export_all(void)
3481{
3482 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3483
eabd1701 3484 if (resp != NULL) {
5b0a4722 3485 job_export_all2(root_jobmgr, resp);
eabd1701 3486 } else {
95379394 3487 (void)os_assumes_zero(errno);
5b0a4722 3488 }
ed34e3c3
A
3489
3490 return resp;
3491}
3492
3493void
5b0a4722 3494job_log_stray_pg(job_t j)
ed34e3c3 3495{
dcace88f
A
3496 pid_t *pids = NULL;
3497 size_t len = sizeof(pid_t) * get_kern_max_proc();
3498 int i = 0, kp_cnt = 0;
eabd1701
A
3499
3500 if (!launchd_apple_internal) {
f36da725
A
3501 return;
3502 }
ddbbfbc1
A
3503
3504 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
f36da725 3505
dcace88f 3506 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
5b0a4722
A
3507 return;
3508 }
eabd1701 3509 if (job_assumes_zero_p(j, (kp_cnt = proc_listpgrppids(j->p, pids, len))) == -1) {
5b0a4722 3510 goto out;
ed34e3c3 3511 }
eabd1701 3512
5b0a4722 3513 for (i = 0; i < kp_cnt; i++) {
dcace88f 3514 pid_t p_i = pids[i];
5b0a4722
A
3515 if (p_i == j->p) {
3516 continue;
eabd1701 3517 } else if (p_i == 0 || p_i == 1) {
5b0a4722
A
3518 continue;
3519 }
eabd1701 3520
dcace88f
A
3521 struct proc_bsdshortinfo proc;
3522 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3523 if (errno != ESRCH) {
eabd1701 3524 (void)job_assumes_zero(j, errno);
dcace88f
A
3525 }
3526 continue;
3527 }
eabd1701 3528
dcace88f
A
3529 pid_t pp_i = proc.pbsi_ppid;
3530 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3531 const char *n = proc.pbsi_comm;
5b0a4722 3532
dcace88f 3533 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
5b0a4722
A
3534 }
3535
3536out:
dcace88f 3537 free(pids);
5b0a4722
A
3538}
3539
95379394
A
3540#if HAVE_SYSTEMSTATS
3541static void
3542systemstats_timer_callback(void)
3543{
3544 jobmgr_log_perf_statistics(root_jobmgr, true);
3545}
3546
3547static bool
3548systemstats_is_enabled(void)
3549{
3550 static bool systemstats_enabled;
3551
3552 if (!systemstats_enabled) {
3553 char *store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_LOGS, NULL);
3554 systemstats_enabled = systemstats_init(SYSTEMSTATS_WRITER_launchd, store);
3555 free(store);
3556
3557 uint64_t interval;
3558 interval = systemstats_get_log_interval(SYSTEMSTATS_WRITER_launchd);
3559
3560 if (pid1_magic && systemstats_enabled && interval) {
3561 jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)systemstats_timer_callback, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, interval, root_jobmgr));
3562 }
3563 }
3564
3565 return systemstats_enabled;
3566}
3567#endif // HAVE_SYSTEMSTATS
3568
5b0a4722
A
3569void
3570job_reap(job_t j)
3571{
eabd1701 3572 bool is_system_bootstrapper = ((j->is_bootstrapper && pid1_magic) && !j->mgr->parentmgr);
5b0a4722 3573
eabd1701 3574 job_log(j, LOG_DEBUG, "Reaping");
5b0a4722 3575
ddbbfbc1
A
3576 if (unlikely(j->weird_bootstrap)) {
3577 int64_t junk = 0;
3578 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
5b0a4722
A
3579 }
3580
ddbbfbc1 3581 if (j->fork_fd) {
eabd1701 3582 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
ddbbfbc1 3583 j->fork_fd = 0;
5b0a4722
A
3584 }
3585
95379394
A
3586 bool was_dirty = false;
3587 if (!(j->anonymous || j->implicit_reap)) {
3588 uint32_t flags = 0;
3589 (void)job_assumes_zero(j, proc_get_dirty(j->p, &flags));
3590
3591 j->idle_exit = (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT);
3592 was_dirty = (flags & PROC_DIRTY_IS_DIRTY);
3593
3594 job_log(j, LOG_DEBUG, "%sob exited %s.", j->idle_exit ? "Idle-exit j" : "J", was_dirty ? "while dirty" : "cleanly");
3595 }
3596
3597 if (j->idle_exit && was_dirty) {
3598 if (j->jettisoned) {
3599 job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned while dirty. Will respawn immediately.");
3600 j->unthrottle = true;
3601 j->start_pending = true;
3602 } else {
3603 job_log(j, LOG_INFO, "Idle-exit job exited while dirty.");
3604 }
3605 } else if (j->idle_exit && j->jettisoned) {
3606 /* If an idle-exit job is jettisoned, then we shouldn't throttle its
3607 * next respawn because it could not help when it exited. If it ran for
3608 * the minimum runtime, then this doesn't really matter. If it ran for
3609 * less than the minimum runtime, it will not be throttled.
3610 *
3611 * <rdar://problem/12098667>
3612 */
3613 job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned. Will bypass throttle interval for next on-demand launch.");
3614 j->unthrottle = true;
3615 }
3616
5b0a4722 3617 if (j->anonymous) {
b97faa4c 3618 j->last_exit_status = 0;
5b0a4722 3619 } else {
eabd1701
A
3620 uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
3621 j->trt += rt;
3622
3623 job_log(j, LOG_PERF, "Last instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
3624 j->nruns++;
3625
3626 /* The job is dead. While the PID/PGID is still known to be valid, try
3627 * to kill abandoned descendant processes.
5b0a4722
A
3628 */
3629 job_log_stray_pg(j);
3630 if (!j->abandon_pg) {
eabd1701 3631 if (unlikely(killpg2(j->p, SIGTERM) == -1 && errno != ESRCH)) {
ddbbfbc1 3632 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
ddbbfbc1 3633 }
5b0a4722 3634 }
dcace88f 3635
eabd1701 3636 int r = -1;
b97faa4c
A
3637 if (!j->implicit_reap) {
3638 /* If the shutdown monitor has suspended a task and not resumed it
3639 * resumed it before exiting, the kernel will not clean up after the
3640 * shutdown monitor. It will, instead, leave the task suspended and
3641 * not process any pending signals on the event loop for the task.
3642 *
3643 * There are a variety of other kernel bugs that could prevent a
3644 * process from exiting, usually having to do with faulty hardware
3645 * or talking to misbehaving drivers that mark a thread as
3646 * uninterruptible and deadlock/hang before unmarking it as such. So
3647 * we have to work around that too.
3648 *
3649 * See <rdar://problem/9284889&9359725>.
3650 */
3651 if (j->workaround9359725) {
3652 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3653 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
95379394
A
3654 } else {
3655#if HAVE_SYSTEMSTATS
3656 int r2;
3657 struct rusage_info_v1 ri;
3658 r2 = job_assumes_zero(j, proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)&ri));
3659#endif
3660 if ((r = wait4(j->p, &j->last_exit_status, 0, NULL)) == -1) {
3661 job_log(j, LOG_ERR, "Reap failed. Assuming job exited: %d: %s", errno, strerror(errno));
3662 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3663 }
eabd1701 3664
95379394
A
3665 if (j->idle_exit && j->jettisoned) {
3666 // Treat idle-exit jettisons as successful exit.
3667 //
3668 // <rdar://problem/13338973>
3669 (void)job_assumes_zero(j, WTERMSIG(j->last_exit_status));
3670 j->last_exit_status = W_EXITCODE(0, 0);
3671 }
3672#if HAVE_SYSTEMSTATS
3673 if (r2 == 0) {
3674 job_log_perf_statistics(j, &ri, j->last_exit_status);
3675 }
3676#endif
3677 }
3678 } else {
3679 job_log(j, LOG_INFO, "Job was implicitly reaped by the kernel.");
eabd1701 3680 }
ed34e3c3
A
3681 }
3682
5b0a4722 3683 if (j->exit_timeout) {
eabd1701 3684 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
5b0a4722 3685 }
eabd1701 3686
5b0a4722 3687 LIST_REMOVE(j, pid_hash_sle);
95379394
A
3688 if (!j->anonymous) {
3689 LIST_REMOVE(j, global_pid_hash_sle);
3690 }
5b0a4722 3691
ddbbfbc1
A
3692 if (j->sent_signal_time) {
3693 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
5b0a4722
A
3694
3695 td_sec = td / NSEC_PER_SEC;
3696 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3697
ddbbfbc1 3698 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
5b0a4722
A
3699 }
3700
b97faa4c
A
3701 int exit_status = WEXITSTATUS(j->last_exit_status);
3702 if (WIFEXITED(j->last_exit_status) && exit_status != 0) {
eabd1701
A
3703 if (!j->did_exec && _launchd_support_system) {
3704 xpc_object_t event = NULL;
3705 switch (exit_status) {
3706 case ENOENT:
3707 case ENOTDIR:
3708 case ESRCH:
3709 job_log(j, LOG_NOTICE, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status, strerror(exit_status));
3710 event = xpc_dictionary_create(NULL, NULL, 0);
3711 xpc_dictionary_set_string(event, "Executable", j->prog ? j->prog : j->argv[0]);
3712 if (j->mach_uid) {
3713 xpc_dictionary_set_uint64(event, "UID", j->mach_uid);
3714 } else if (j->username) {
3715 xpc_dictionary_set_string(event, "UserName", j->username);
3716 }
ed34e3c3 3717
eabd1701
A
3718 if (j->groupname) {
3719 xpc_dictionary_set_string(event, "GroupName", j->groupname);
3720 }
dcace88f 3721
95379394 3722 (void)externalevent_new(j, _launchd_support_system, j->label, event, 0);
eabd1701
A
3723 xpc_release(event);
3724
3725 j->waiting4ok = true;
3726 default:
3727 job_log(j, LOG_NOTICE, "Job failed to exec(3) for weird reason: %d", exit_status);
3728 }
3729 } else {
3730 int level = LOG_INFO;
3731 if (exit_status != 0) {
3732 level = LOG_ERR;
3733 }
3734
3735 job_log(j, level, "Exited with code: %d", exit_status);
3736 }
ed34e3c3
A
3737 }
3738
b97faa4c
A
3739 if (WIFSIGNALED(j->last_exit_status)) {
3740 int s = WTERMSIG(j->last_exit_status);
ddbbfbc1 3741 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
ed34e3c3 3742 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
95379394 3743 } else if (!(j->stopped || j->clean_kill || j->jettisoned)) {
dcace88f 3744 switch (s) {
eabd1701 3745 // Signals which indicate a crash.
dcace88f
A
3746 case SIGILL:
3747 case SIGABRT:
3748 case SIGFPE:
3749 case SIGBUS:
3750 case SIGSEGV:
3751 case SIGSYS:
3752 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3753 * SIGTRAP, assume that it's a crash.
3754 */
3755 case SIGTRAP:
3756 j->crashed = true;
3757 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3758 break;
3759 default:
3760 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3761 break;
ddbbfbc1 3762 }
eabd1701 3763
dcace88f 3764 if (is_system_bootstrapper && j->crashed) {
ddbbfbc1
A
3765 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3766 }
ed34e3c3
A
3767 }
3768 }
3769
ddbbfbc1 3770 j->reaped = true;
eabd1701 3771
ddbbfbc1 3772 struct machservice *msi = NULL;
dcace88f
A
3773 if (j->crashed || !(j->did_exec || j->anonymous)) {
3774 SLIST_FOREACH(msi, &j->machservices, sle) {
3775 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
ddbbfbc1
A
3776 machservice_drain_port(msi);
3777 }
eabd1701 3778
dcace88f 3779 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
ddbbfbc1
A
3780 machservice_resetport(j, msi);
3781 }
3782 }
3783 }
dcace88f
A
3784
3785 /* HACK: Essentially duplicating the logic directly above. But this has
3786 * gotten really hairy, and I don't want to try consolidating it right now.
3787 */
3788 if (j->xpc_service && !j->xpcproxy_did_exec) {
3789 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3790 SLIST_FOREACH(msi, &j->machservices, sle) {
3791 /* Drain the messages but do not reset the port. If xpcproxy could
3792 * not exec(3), then we don't want to continue trying, since there
3793 * is very likely a serious configuration error with the service.
3794 *
eabd1701
A
3795 * The above comment is weird. I originally said we should drain
3796 * messages but not reset the port, but that's exactly what we do
3797 * below, and I'm not sure which is the mistake, the comment or the
3798 * actual behavior.
3799 *
3800 * Since it's always been this way, I'll assume that the comment is
3801 * incorrect, but I'll leave it in place just to remind myself to
3802 * actually look into it at some point.
3803 *
dcace88f
A
3804 * <rdar://problem/8986802>
3805 */
eabd1701
A
3806 if (msi->upfront && job_assumes(j, !msi->isActive)) {
3807 machservice_resetport(j, msi);
3808 }
dcace88f
A
3809 }
3810 }
3811
ddbbfbc1 3812 struct suspended_peruser *spi = NULL;
dcace88f 3813 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
ddbbfbc1
A
3814 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3815 spi->j->peruser_suspend_count--;
dcace88f 3816 if (spi->j->peruser_suspend_count == 0) {
ddbbfbc1
A
3817 job_dispatch(spi->j, false);
3818 }
3819 LIST_REMOVE(spi, sle);
3820 free(spi);
3821 }
f70a210c 3822
dcace88f
A
3823 if (j->exit_status_dest) {
3824 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3825 if (errno && errno != MACH_SEND_INVALID_DEST) {
eabd1701 3826 (void)job_assumes_zero(j, errno);
dcace88f
A
3827 }
3828
3829 j->exit_status_dest = MACH_PORT_NULL;
ddbbfbc1 3830 }
dcace88f
A
3831
3832 if (j->spawn_reply_port) {
3833 /* If the child never called exec(3), we must send a spawn() reply so
3834 * that the requestor can get exit status from it. If we fail to send
3835 * the reply for some reason, we have to deallocate the exit status port
3836 * ourselves.
3837 */
3838 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3839 if (kr) {
3840 if (kr != MACH_SEND_INVALID_DEST) {
eabd1701 3841 (void)job_assumes_zero(j, kr);
dcace88f
A
3842 }
3843
eabd1701 3844 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
dcace88f
A
3845 }
3846
3847 j->exit_status_port = MACH_PORT_NULL;
3848 j->spawn_reply_port = MACH_PORT_NULL;
3849 }
3850
ddbbfbc1
A
3851 if (j->anonymous) {
3852 total_anon_children--;
dcace88f 3853 if (j->holds_ref) {
eabd1701 3854 job_log(j, LOG_PERF, "Anonymous job exited holding reference.");
ddbbfbc1
A
3855 runtime_del_ref();
3856 }
3857 } else {
eabd1701 3858 job_log(j, LOG_PERF, "Job exited.");
ddbbfbc1
A
3859 runtime_del_ref();
3860 total_children--;
3861 }
eabd1701 3862
dcace88f 3863 if (j->has_console) {
eabd1701 3864 launchd_wsp = 0;
ddbbfbc1 3865 }
dcace88f
A
3866
3867 if (j->shutdown_monitor) {
3868 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
eabd1701 3869 _launchd_shutdown_monitor = NULL;
dcace88f
A
3870 j->shutdown_monitor = false;
3871 }
3872
dcace88f 3873 if (!j->anonymous) {
5b0a4722
A
3874 j->mgr->normal_active_cnt--;
3875 }
ddbbfbc1 3876 j->sent_signal_time = 0;
5b0a4722 3877 j->sent_sigkill = false;
ddbbfbc1 3878 j->clean_kill = false;
eabd1701 3879 j->event_monitor_ready2signal = false;
5b0a4722 3880 j->p = 0;
95379394 3881 j->uniqueid = 0;
5b0a4722
A
3882}
3883
3884void
3885jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3886{
3887 jobmgr_t jmi, jmn;
3888 job_t ji, jn;
3889
3890 if (jm->shutting_down) {
3891 return;
3892 }
3893
3894 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3895 jobmgr_dispatch_all(jmi, newmounthack);
ed34e3c3
A
3896 }
3897
5b0a4722
A
3898 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3899 if (newmounthack && ji->start_on_mount) {
3900 ji->start_pending = true;
3901 }
ed34e3c3 3902
5b0a4722 3903 job_dispatch(ji, false);
ed34e3c3 3904 }
5b0a4722 3905}
ed34e3c3 3906
ddbbfbc1
A
3907void
3908job_dispatch_curious_jobs(job_t j)
3909{
3910 job_t ji = NULL, jt = NULL;
dcace88f 3911 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
ddbbfbc1 3912 struct semaphoreitem *si = NULL;
dcace88f
A
3913 SLIST_FOREACH(si, &ji->semaphores, sle) {
3914 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
ddbbfbc1
A
3915 continue;
3916 }
eabd1701 3917
dcace88f 3918 if (strcmp(si->what, j->label) == 0) {
ddbbfbc1 3919 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
eabd1701 3920
dcace88f
A
3921 if (!ji->removing) {
3922 job_dispatch(ji, false);
3923 } else {
3924 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3925 }
eabd1701 3926
ddbbfbc1
A
3927 /* ji could be removed here, so don't do anything with it or its semaphores
3928 * after this point.
3929 */
3930 break;
3931 }
3932 }
3933 }
3934}
3935
5b0a4722
A
3936job_t
3937job_dispatch(job_t j, bool kickstart)
3938{
eabd1701 3939 // Don't dispatch a job if it has no audit session set.
dcace88f 3940 if (!uuid_is_null(j->expected_audit_uuid)) {
eabd1701 3941 job_log(j, LOG_DEBUG, "Job is still awaiting its audit session UUID. Not dispatching.");
ddbbfbc1
A
3942 return NULL;
3943 }
dcace88f 3944 if (j->alias) {
eabd1701
A
3945 job_log(j, LOG_DEBUG, "Job is an alias. Not dispatching.");
3946 return NULL;
3947 }
3948
3949 if (j->waiting4ok) {
3950 job_log(j, LOG_DEBUG, "Job cannot exec(3). Not dispatching.");
3951 return NULL;
dcace88f 3952 }
ddbbfbc1
A
3953
3954#if TARGET_OS_EMBEDDED
eabd1701
A
3955 if (launchd_embedded_handofgod && _launchd_embedded_god) {
3956 if (!job_assumes(j, _launchd_embedded_god->username != NULL && j->username != NULL)) {
ddbbfbc1
A
3957 errno = EPERM;
3958 return NULL;
3959 }
eabd1701
A
3960
3961 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
ddbbfbc1
A
3962 errno = EPERM;
3963 return NULL;
3964 }
eabd1701 3965 } else if (launchd_embedded_handofgod) {
ddbbfbc1
A
3966 errno = EINVAL;
3967 return NULL;
3968 }
3969#endif
3970
5b0a4722
A
3971 /*
3972 * The whole job removal logic needs to be consolidated. The fact that
3973 * a job can be removed from just about anywhere makes it easy to have
3974 * stale pointers left behind somewhere on the stack that might get
3975 * used after the deallocation. In particular, during job iteration.
3976 *
3977 * This is a classic example. The act of dispatching a job may delete it.
ddbbfbc1 3978 */
5b0a4722
A
3979 if (!job_active(j)) {
3980 if (job_useless(j)) {
eabd1701 3981 job_log(j, LOG_DEBUG, "Job is useless. Removing.");
5b0a4722
A
3982 job_remove(j);
3983 return NULL;
ddbbfbc1 3984 }
dcace88f 3985 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
eabd1701 3986 job_log(j, LOG_DEBUG, "Per-user launchd is suspended. Not dispatching.");
ddbbfbc1
A
3987 return NULL;
3988 }
eabd1701 3989
ddbbfbc1 3990 if (kickstart || job_keepalive(j)) {
eabd1701 3991 job_log(j, LOG_DEBUG, "%starting job", kickstart ? "Kicks" : "S");
5b0a4722
A
3992 job_start(j);
3993 } else {
eabd1701 3994 job_log(j, LOG_DEBUG, "Watching job.");
5b0a4722 3995 job_watch(j);
5b0a4722
A
3996 }
3997 } else {
eabd1701 3998 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job: %s.", job_active(j));
5b0a4722
A
3999 }
4000
4001 return j;
ed34e3c3
A
4002}
4003
ed34e3c3 4004void
5b0a4722 4005job_kill(job_t j)
ed34e3c3 4006{
ddbbfbc1 4007 if (unlikely(!j->p || j->anonymous)) {
5b0a4722
A
4008 return;
4009 }
ed34e3c3 4010
eabd1701 4011 (void)job_assumes_zero_p(j, kill2(j->p, SIGKILL));
5b0a4722
A
4012
4013 j->sent_sigkill = true;
eabd1701 4014 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j));
5b0a4722 4015
ddbbfbc1 4016 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
5b0a4722
A
4017}
4018
4019void
dcace88f 4020job_open_shutdown_transaction(job_t j)
5b0a4722 4021{
eabd1701
A
4022 int rv = proc_set_dirty(j->p, true);
4023 if (rv != 0) {
dcace88f
A
4024 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
4025 j->dirty_at_shutdown = false;
4026 }
4027}
4028
4029void
4030job_close_shutdown_transaction(job_t j)
4031{
4032 if (j->dirty_at_shutdown) {
4033 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
eabd1701 4034 (void)job_assumes_zero(j, proc_set_dirty(j->p, false));
dcace88f
A
4035 j->dirty_at_shutdown = false;
4036 }
4037}
ddbbfbc1 4038
dcace88f
A
4039void
4040job_log_children_without_exec(job_t j)
4041{
4042 pid_t *pids = NULL;
4043 size_t len = sizeof(pid_t) * get_kern_max_proc();
4044 int i = 0, kp_cnt = 0;
eabd1701
A
4045
4046 if (!launchd_apple_internal || j->anonymous || j->per_user) {
ddbbfbc1
A
4047 return;
4048 }
4049
dcace88f 4050 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
ddbbfbc1
A
4051 return;
4052 }
eabd1701 4053 if (job_assumes_zero_p(j, (kp_cnt = proc_listchildpids(j->p, pids, len))) == -1) {
ddbbfbc1
A
4054 goto out;
4055 }
4056
ddbbfbc1 4057 for (i = 0; i < kp_cnt; i++) {
dcace88f
A
4058 struct proc_bsdshortinfo proc;
4059 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4060 if (errno != ESRCH) {
eabd1701 4061 (void)job_assumes_zero(j, errno);
dcace88f 4062 }
ddbbfbc1
A
4063 continue;
4064 }
dcace88f 4065 if (proc.pbsi_flags & P_EXEC) {
ddbbfbc1
A
4066 continue;
4067 }
4068
dcace88f 4069 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
ddbbfbc1
A
4070 }
4071
4072out:
dcace88f 4073 free(pids);
ddbbfbc1
A
4074}
4075
ddbbfbc1
A
4076void
4077job_callback_proc(job_t j, struct kevent *kev)
4078{
4079 bool program_changed = false;
4080 int fflags = kev->fflags;
eabd1701 4081
dcace88f 4082 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
ddbbfbc1 4083 log_kevent_struct(LOG_DEBUG, kev, 0);
eabd1701 4084
ddbbfbc1
A
4085 if (fflags & NOTE_EXEC) {
4086 program_changed = true;
4087
4088 if (j->anonymous) {
dcace88f
A
4089 struct proc_bsdshortinfo proc;
4090 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
ddbbfbc1 4091 char newlabel[1000];
5b0a4722 4092
dcace88f 4093 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
5b0a4722 4094
ddbbfbc1 4095 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
5b0a4722 4096
ddbbfbc1
A
4097 LIST_REMOVE(j, label_hash_sle);
4098 strcpy((char *)j->label, newlabel);
eabd1701 4099
dcace88f
A
4100 jobmgr_t where2put = root_jobmgr;
4101 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
4102 where2put = j->mgr;
4103 }
4104 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
4105 } else if (errno != ESRCH) {
eabd1701 4106 (void)job_assumes_zero(j, errno);
ddbbfbc1
A
4107 }
4108 } else {
dcace88f
A
4109 if (j->spawn_reply_port) {
4110 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
4111 if (errno) {
4112 if (errno != MACH_SEND_INVALID_DEST) {
eabd1701 4113 (void)job_assumes_zero(j, errno);
dcace88f 4114 }
eabd1701 4115 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
dcace88f
A
4116 }
4117
4118 j->spawn_reply_port = MACH_PORT_NULL;
4119 j->exit_status_port = MACH_PORT_NULL;
4120 }
4121
4122 if (j->xpc_service && j->did_exec) {
4123 j->xpcproxy_did_exec = true;
4124 }
4125
4126 j->did_exec = true;
4127 job_log(j, LOG_DEBUG, "Program changed");
4128 }
4129 }
ed34e3c3 4130
5b0a4722 4131 if (fflags & NOTE_FORK) {
ddbbfbc1
A
4132 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
4133 job_log_children_without_exec(j);
5b0a4722
A
4134 }
4135
4136 if (fflags & NOTE_EXIT) {
95379394
A
4137 if (kev->data & NOTE_EXIT_DECRYPTFAIL) {
4138 j->fpfail = true;
4139 job_log(j, LOG_WARNING, "FairPlay decryption failed on binary for job.");
4140 } else if (kev->data & NOTE_EXIT_MEMORY) {
4141 j->jettisoned = true;
4142 job_log(j, LOG_INFO, "Job was killed due to memory pressure.");
b97faa4c
A
4143 }
4144
ed34e3c3
A
4145 job_reap(j);
4146
dcace88f 4147 if (j->anonymous) {
5b0a4722
A
4148 job_remove(j);
4149 j = NULL;
dcace88f 4150 } else {
95379394
A
4151 struct waiting4attach *w4ai = NULL;
4152 struct waiting4attach *w4ait = NULL;
4153 LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
4154 if (w4ai->dest == (pid_t)kev->ident) {
4155 waiting4attach_delete(j->mgr, w4ai);
4156 }
4157 }
4158
eabd1701 4159 (void)job_dispatch(j, false);
ed34e3c3 4160 }
5b0a4722 4161 }
5b0a4722
A
4162}
4163
4164void
4165job_callback_timer(job_t j, void *ident)
4166{
4167 if (j == ident) {
ddbbfbc1 4168 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
5b0a4722
A
4169 job_dispatch(j, true);
4170 } else if (&j->semaphores == ident) {
ddbbfbc1 4171 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
5b0a4722
A
4172 job_dispatch(j, false);
4173 } else if (&j->start_interval == ident) {
ddbbfbc1 4174 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
5b0a4722
A
4175 j->start_pending = true;
4176 job_dispatch(j, false);
4177 } else if (&j->exit_timeout == ident) {
dcace88f 4178 if (!job_assumes(j, j->p != 0)) {
ddbbfbc1
A
4179 return;
4180 }
dcace88f 4181
5b0a4722 4182 if (j->sent_sigkill) {
ddbbfbc1 4183 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
5b0a4722
A
4184
4185 td /= NSEC_PER_SEC;
ddbbfbc1
A
4186 td -= j->clean_kill ? 0 : j->exit_timeout;
4187
dcace88f
A
4188 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
4189 j->workaround9359725 = true;
4190
95379394
A
4191 // This basically has to be done off the main thread. We have no
4192 // mechanism for draining the main queue in our run loop (like CF
4193 // does), and the kevent mechanism wants an object to be associated
4194 // as the callback. So we just create a dispatch source and reap the
4195 // errant PID whenever we can. Note that it is not safe for us to do
4196 // any logging in this block, since logging requires exclusive
4197 // access to global data structures that is only protected by the
4198 // main thread.
4199 dispatch_source_t hack_13570156 = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, j->p, DISPATCH_PROC_EXIT, dispatch_get_global_queue(0, 0));
4200 dispatch_source_set_event_handler(hack_13570156, ^{
4201 pid_t pid = (pid_t)dispatch_source_get_handle(hack_13570156);
4202
4203 int status = 0;
4204 (void)waitpid(pid, &status, 0);
4205 dispatch_release(hack_13570156);
4206 });
4207
4208 dispatch_resume(hack_13570156);
4209
eabd1701 4210 if (launchd_trap_sigkill_bugs) {
dcace88f 4211 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
eabd1701 4212 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
ddbbfbc1 4213 }
ddbbfbc1 4214
dcace88f
A
4215 struct kevent bogus_exit;
4216 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
4217 jobmgr_callback(j->mgr, &bogus_exit);
4218 } else {
4219 if (unlikely(j->debug_before_kill)) {
4220 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
eabd1701 4221 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
5b0a4722 4222 }
dcace88f
A
4223
4224 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
4225 job_kill(j);
ed34e3c3 4226 }
5b0a4722 4227 } else {
eabd1701 4228 job_log(j, LOG_ERR, "Unrecognized job timer callback: %p", ident);
5b0a4722
A
4229 }
4230}
4231
4232void
4233job_callback_read(job_t j, int ident)
4234{
eabd1701 4235 if (ident == j->stdin_fd) {
ddbbfbc1 4236 job_dispatch(j, true);
5b0a4722
A
4237 } else {
4238 socketgroup_callback(j);
4239 }
4240}
4241
4242void
4243jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
4244{
4245 jobmgr_t jmi;
4246 job_t j;
4247
4248 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
4249 jobmgr_reap_bulk(jmi, kev);
4250 }
4251
ddbbfbc1 4252 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
5b0a4722
A
4253 kev->udata = j;
4254 job_callback(j, kev);
4255 }
4256}
4257
4258void
4259jobmgr_callback(void *obj, struct kevent *kev)
4260{
4261 jobmgr_t jm = obj;
eabd1701
A
4262
4263#if TARGET_OS_EMBEDDED
4264 int flag2check = VQ_MOUNT;
4265#else
4266 int flag2check = VQ_UPDATE;
4267#endif
5b0a4722
A
4268
4269 switch (kev->filter) {
4270 case EVFILT_PROC:
4271 jobmgr_reap_bulk(jm, kev);
ddbbfbc1 4272 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
5b0a4722
A
4273 break;
4274 case EVFILT_SIGNAL:
4275 switch (kev->ident) {
ddbbfbc1 4276 case SIGTERM:
587e987e 4277 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
5b0a4722
A
4278 return launchd_shutdown();
4279 case SIGUSR1:
4280 return calendarinterval_callback();
ddbbfbc1 4281 case SIGUSR2:
eabd1701
A
4282 // Turn on all logging.
4283 launchd_log_perf = true;
4284 launchd_log_debug = true;
4285 launchd_log_shutdown = true;
4286 /* Hopefully /var is available by this point. If not, uh, oh well.
4287 * It's just a debugging facility.
4288 */
95379394
A
4289 return jobmgr_log_perf_statistics(jm, false);
4290 case SIGINFO:
4291 return jobmgr_log_perf_statistics(jm, true);
5b0a4722 4292 default:
eabd1701 4293 jobmgr_log(jm, LOG_ERR, "Unrecognized signal: %lu: %s", kev->ident, strsignal(kev->ident));
ed34e3c3 4294 }
5b0a4722
A
4295 break;
4296 case EVFILT_FS:
eabd1701
A
4297 if (kev->fflags & flag2check) {
4298 if (!launchd_var_available) {
4299 struct stat sb;
4300 if (stat("/var/log", &sb) == 0 && (sb.st_mode & S_IWUSR)) {
4301 launchd_var_available = true;
4302 }
4303 }
4304 } else if (kev->fflags & VQ_MOUNT) {
5b0a4722 4305 jobmgr_dispatch_all(jm, true);
ed34e3c3 4306 }
5b0a4722 4307 jobmgr_dispatch_all_semaphores(jm);
ed34e3c3 4308 break;
5b0a4722 4309 case EVFILT_TIMER:
dcace88f 4310 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
5b0a4722 4311 calendarinterval_callback();
dcace88f 4312 } else if (kev->ident == (uintptr_t)jm) {
ddbbfbc1
A
4313 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4314 jobmgr_still_alive_with_check(jm);
dcace88f 4315 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
ddbbfbc1 4316 jobmgr_do_garbage_collection(jm);
eabd1701 4317 } else if (kev->ident == (uintptr_t)&launchd_runtime_busy_time) {
587e987e 4318 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
eabd1701 4319 if (jobmgr_assumes_zero(jm, runtime_busy_cnt) == 0) {
587e987e
A
4320 return launchd_shutdown();
4321 }
95379394
A
4322#if HAVE_SYSTEMSTATS
4323 } else if (kev->ident == (uintptr_t)systemstats_timer_callback) {
4324 systemstats_timer_callback();
4325#endif
ddbbfbc1
A
4326 }
4327 break;
4328 case EVFILT_VNODE:
dcace88f 4329 if (kev->ident == (uintptr_t)s_no_hang_fd) {
ddbbfbc1 4330 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
dcace88f 4331 if (unlikely(_no_hang_fd != -1)) {
ddbbfbc1 4332 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
eabd1701
A
4333 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL));
4334 (void)jobmgr_assumes_zero_p(root_jobmgr, runtime_close(s_no_hang_fd));
ddbbfbc1
A
4335 s_no_hang_fd = _fd(_no_hang_fd);
4336 }
eabd1701 4337 } else if (pid1_magic && launchd_console && kev->ident == (uintptr_t)fileno(launchd_console)) {
ddbbfbc1 4338 int cfd = -1;
eabd1701 4339 if (jobmgr_assumes_zero_p(jm, cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) {
ddbbfbc1 4340 _fd(cfd);
eabd1701
A
4341 if (!(launchd_console = fdopen(cfd, "w"))) {
4342 (void)jobmgr_assumes_zero(jm, errno);
4343 (void)close(cfd);
ddbbfbc1
A
4344 }
4345 }
5b0a4722 4346 }
ed34e3c3
A
4347 break;
4348 default:
eabd1701 4349 jobmgr_log(jm, LOG_ERR, "Unrecognized kevent filter: %hd", kev->filter);
ed34e3c3 4350 }
5b0a4722 4351}
ed34e3c3 4352
5b0a4722
A
4353void
4354job_callback(void *obj, struct kevent *kev)
4355{
4356 job_t j = obj;
4357
4358 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4359
4360 switch (kev->filter) {
4361 case EVFILT_PROC:
ddbbfbc1 4362 return job_callback_proc(j, kev);
5b0a4722 4363 case EVFILT_TIMER:
ddbbfbc1 4364 return job_callback_timer(j, (void *) kev->ident);
5b0a4722 4365 case EVFILT_READ:
ddbbfbc1 4366 return job_callback_read(j, (int) kev->ident);
5b0a4722
A
4367 case EVFILT_MACHPORT:
4368 return (void)job_dispatch(j, true);
4369 default:
eabd1701 4370 job_log(j, LOG_ERR, "Unrecognized job callback filter: %hd", kev->filter);
ed34e3c3
A
4371 }
4372}
4373
4374void
5b0a4722 4375job_start(job_t j)
ed34e3c3 4376{
ddbbfbc1 4377 uint64_t td;
ed34e3c3
A
4378 int spair[2];
4379 int execspair[2];
4380 char nbuf[64];
4381 pid_t c;
4382 bool sipc = false;
95379394 4383 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXIT_DETAIL|NOTE_EXITSTATUS;
eabd1701 4384
5b0a4722 4385 if (!job_assumes(j, j->mgr != NULL)) {
ed34e3c3 4386 return;
5b0a4722 4387 }
eabd1701 4388
ddbbfbc1 4389 if (unlikely(job_active(j))) {
ed34e3c3
A
4390 job_log(j, LOG_DEBUG, "Already started");
4391 return;
5b0a4722 4392 }
eabd1701 4393
95379394
A
4394 if (!LIST_EMPTY(&j->mgr->attaches)) {
4395 job_log(j, LOG_DEBUG, "Looking for attachments for job: %s", j->label);
4396 (void)waiting4attach_find(j->mgr, j);
4397 }
4398
5b0a4722
A
4399 /*
4400 * Some users adjust the wall-clock and then expect software to not notice.
ddbbfbc1
A
4401 * Therefore, launchd must use an absolute clock instead of the wall clock
4402 * wherever possible.
5b0a4722 4403 */
ddbbfbc1 4404 td = runtime_get_nanoseconds_since(j->start_time);
5b0a4722 4405 td /= NSEC_PER_SEC;
eabd1701 4406
95379394 4407 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat && !j->unthrottle) {
5b0a4722 4408 time_t respawn_delta = j->min_run_time - (uint32_t)td;
eabd1701 4409 /* We technically should ref-count throttled jobs to prevent idle exit,
5b0a4722
A
4410 * but we're not directly tracking the 'throttled' state at the moment.
4411 */
eabd1701
A
4412 job_log(j, LOG_NOTICE, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4413 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j));
5b0a4722 4414 job_ignore(j);
ed34e3c3
A
4415 return;
4416 }
eabd1701 4417
ddbbfbc1 4418 if (likely(!j->legacy_mach_job)) {
eabd1701 4419 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_god;
5b0a4722 4420 }
ed34e3c3 4421
dcace88f 4422 if (sipc) {
eabd1701 4423 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair));
ed34e3c3 4424 }
eabd1701
A
4425
4426 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair));
4427
5b0a4722 4428 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
ed34e3c3
A
4429 case -1:
4430 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
eabd1701 4431 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j));
ddbbfbc1 4432 job_ignore(j);
eabd1701
A
4433
4434 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4435 (void)job_assumes_zero(j, runtime_close(execspair[1]));
ed34e3c3 4436 if (sipc) {
eabd1701
A
4437 (void)job_assumes_zero(j, runtime_close(spair[0]));
4438 (void)job_assumes_zero(j, runtime_close(spair[1]));
fe044cc9 4439 }
ed34e3c3
A
4440 break;
4441 case 0:
ddbbfbc1 4442 if (unlikely(_vproc_post_fork_ping())) {
5b0a4722
A
4443 _exit(EXIT_FAILURE);
4444 }
eabd1701
A
4445
4446 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4447 // wait for our parent to say they've attached a kevent to us
ed34e3c3 4448 read(_fd(execspair[1]), &c, sizeof(c));
eabd1701 4449
ed34e3c3 4450 if (sipc) {
eabd1701 4451 (void)job_assumes_zero(j, runtime_close(spair[0]));
5b0a4722 4452 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
ed34e3c3
A
4453 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4454 }
5b0a4722 4455 job_start_child(j);
ed34e3c3
A
4456 break;
4457 default:
ddbbfbc1 4458 j->start_time = runtime_get_opaque_time();
eabd1701 4459
5b0a4722 4460 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
eabd1701 4461
ddbbfbc1 4462 j->did_exec = false;
95379394
A
4463 j->fpfail = false;
4464 j->jettisoned = false;
dcace88f 4465 j->xpcproxy_did_exec = false;
ddbbfbc1 4466 j->checkedin = false;
5b0a4722 4467 j->start_pending = false;
ddbbfbc1
A
4468 j->reaped = false;
4469 j->crashed = false;
4470 j->stopped = false;
eabd1701 4471 j->workaround9359725 = false;
b97faa4c 4472 j->implicit_reap = false;
95379394 4473 j->unthrottle = false;
dcace88f 4474 if (j->needs_kickoff) {
ddbbfbc1 4475 j->needs_kickoff = false;
eabd1701 4476
dcace88f 4477 if (SLIST_EMPTY(&j->semaphores)) {
ddbbfbc1
A
4478 j->ondemand = false;
4479 }
4480 }
eabd1701 4481
dcace88f 4482 if (j->has_console) {
eabd1701 4483 launchd_wsp = c;
ddbbfbc1 4484 }
eabd1701
A
4485
4486 job_log(j, LOG_PERF, "Job started.");
5b0a4722 4487 runtime_add_ref();
ed34e3c3 4488 total_children++;
5b0a4722 4489 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
95379394 4490 LIST_INSERT_HEAD(&managed_actives[ACTIVE_JOB_HASH(c)], j, global_pid_hash_sle);
5b0a4722 4491 j->p = c;
dcace88f 4492
95379394
A
4493 struct proc_uniqidentifierinfo info;
4494 if (proc_pidinfo(c, PROC_PIDUNIQIDENTIFIERINFO, 0, &info, PROC_PIDUNIQIDENTIFIERINFO_SIZE) != 0) {
4495 // ignore errors here, kevent_mod below will catch them and clean up
4496 j->uniqueid = info.p_uniqueid;
4497 }
4498
dcace88f 4499 j->mgr->normal_active_cnt++;
ddbbfbc1 4500 j->fork_fd = _fd(execspair[0]);
eabd1701 4501 (void)job_assumes_zero(j, runtime_close(execspair[1]));
ed34e3c3 4502 if (sipc) {
eabd1701 4503 (void)job_assumes_zero(j, runtime_close(spair[1]));
ed34e3c3
A
4504 ipc_open(_fd(spair[0]), j);
4505 }
eabd1701 4506 if (kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1) {
5b0a4722 4507 job_ignore(j);
ed34e3c3 4508 } else {
eabd1701
A
4509 if (errno == ESRCH) {
4510 job_log(j, LOG_ERR, "Child was killed before we could attach a kevent.");
4511 } else {
4512 (void)job_assumes(j, errno == ESRCH);
4513 }
5b0a4722 4514 job_reap(j);
ed34e3c3 4515
eabd1701
A
4516 /* If we have reaped this job within this same run loop pass, then
4517 * it will be currently ignored. So if there's a failure to attach a
4518 * kevent, we need to make sure that we watch the job so that we can
4519 * respawn it.
4520 *
4521 * See <rdar://problem/10140809>.
4522 */
4523 job_watch(j);
ddbbfbc1 4524 }
eabd1701 4525
95379394
A
4526#if HAVE_SYSTEMSTATS
4527 if (systemstats_is_enabled()) {
4528 /* We don't really *need* to make the full rusage call -- it
4529 * will be mostly 0s and very small numbers. We only need
4530 * ri_proc_start_abstime, because that's how we disambiguiate
4531 * PIDs when they wrap around; and the UUID.
4532 * In the future we should use the 64-bit process unique ID,
4533 * so there's nothing to disambiguiate, and skip the full
4534 * rusage call here.
4535 *
4536 * Well, the future is now.
4537 */
4538 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_LAUNCHD_SHOULD_LOG_JOB_START)) {
4539 job_log_perf_statistics(j, NULL, -3);
4540 }
4541 }
4542#endif
eabd1701 4543 j->wait4debugger_oneshot = false;
ddbbfbc1 4544 if (likely(!j->stall_before_exec)) {
5b0a4722 4545 job_uncork_fork(j);
ed34e3c3
A
4546 }
4547 break;
4548 }
4549}
4550
5b0a4722
A
4551void
4552job_start_child(job_t j)
ed34e3c3 4553{
ddbbfbc1 4554 typeof(posix_spawn) *psf;
ed34e3c3
A
4555 const char *file2exec = "/usr/libexec/launchproxy";
4556 const char **argv;
5b0a4722 4557 posix_spawnattr_t spattr;
ed34e3c3
A
4558 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4559 glob_t g;
5b0a4722 4560 short spflags = POSIX_SPAWN_SETEXEC;
95379394 4561 int psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
5b0a4722 4562 size_t binpref_out_cnt = 0;
ddbbfbc1 4563 size_t i;
5b0a4722 4564
eabd1701 4565 (void)job_assumes_zero(j, posix_spawnattr_init(&spattr));
5b0a4722 4566
ed34e3c3
A
4567 job_setup_attributes(j);
4568
95379394
A
4569 bool use_xpcproxy = false;
4570 struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
4571 if (w4a) {
4572 (void)setenv(XPC_SERVICE_ENV_ATTACHED, "1", 1);
4573 if (!j->xpc_service) {
4574 use_xpcproxy = true;
4575 }
4576 }
4577
4578 if (use_xpcproxy) {
4579 argv = alloca(3 * sizeof(char *));
4580 argv[0] = "/usr/libexec/xpcproxy";
4581 argv[1] = "-debug";
4582 argv[2] = NULL;
4583
4584 file2exec = argv[0];
4585 } else if (unlikely(j->argv && j->globargv)) {
ed34e3c3
A
4586 g.gl_offs = 1;
4587 for (i = 0; i < j->argc; i++) {
5b0a4722 4588 if (i > 0) {
ed34e3c3 4589 gflags |= GLOB_APPEND;
5b0a4722 4590 }
ed34e3c3
A
4591 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4592 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4593 exit(EXIT_FAILURE);
4594 }
4595 }
4596 g.gl_pathv[0] = (char *)file2exec;
4597 argv = (const char **)g.gl_pathv;
ddbbfbc1 4598 } else if (likely(j->argv)) {
ed34e3c3
A
4599 argv = alloca((j->argc + 2) * sizeof(char *));
4600 argv[0] = file2exec;
5b0a4722 4601 for (i = 0; i < j->argc; i++) {
ed34e3c3 4602 argv[i + 1] = j->argv[i];
5b0a4722 4603 }
ed34e3c3
A
4604 argv[i + 1] = NULL;
4605 } else {
4606 argv = alloca(3 * sizeof(char *));
4607 argv[0] = file2exec;
4608 argv[1] = j->prog;
4609 argv[2] = NULL;
4610 }
4611
95379394 4612 if (likely(!(j->inetcompat || use_xpcproxy))) {
ed34e3c3 4613 argv++;
5b0a4722
A
4614 }
4615
ddbbfbc1 4616 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
95379394 4617 if (!j->app) {
dcace88f
A
4618 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4619 }
5b0a4722
A
4620 spflags |= POSIX_SPAWN_START_SUSPENDED;
4621 }
4622
eabd1701 4623#if !TARGET_OS_EMBEDDED
dcace88f
A
4624 if (unlikely(j->disable_aslr)) {
4625 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4626 }
eabd1701 4627#endif
dcace88f
A
4628 spflags |= j->pstype;
4629
eabd1701 4630 (void)job_assumes_zero(j, posix_spawnattr_setflags(&spattr, spflags));
ddbbfbc1 4631 if (unlikely(j->j_binpref_cnt)) {
eabd1701 4632 (void)job_assumes_zero(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt));
dcace88f 4633 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
5b0a4722
A
4634 }
4635
95379394
A
4636 psproctype = j->psproctype;
4637 (void)job_assumes_zero(j, posix_spawnattr_setprocesstype_np(&spattr, psproctype));
4638
eabd1701
A
4639#if TARGET_OS_EMBEDDED
4640 /* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4641 * against a race which arises if, during spawn, an initial jetsam property
4642 * update occurs before the values below are applied. In this case, the flag
4643 * ensures that the subsequent change is ignored; the explicit update should
4644 * be given priority.
4645 */
95379394
A
4646 (void)job_assumes_zero(j, posix_spawnattr_setjetsam(&spattr,
4647 POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY | (j->jetsam_memory_limit_background ? POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND : 0),
4648 j->jetsam_priority, j->jetsam_memlimit));
4649#endif
4650
4651 mach_port_array_t sports = NULL;
4652 mach_msg_type_number_t sports_cnt = 0;
4653 kern_return_t kr = vproc_mig_get_listener_port_rights(bootstrap_port, &sports, &sports_cnt);
4654 if (kr == 0 && sports_cnt) {
4655 /* For some reason, this SPI takes a count as a signed quantity. */
4656 (void)posix_spawnattr_set_importancewatch_port_np(&spattr, (int)sports_cnt, sports);
4657
4658 /* All "count" parameters in MIG are counts of the array. So an array of
4659 * mach_port_t containing 10 elements will have a count of ten, but it
4660 * will occupy 40 bytes. So we must do the multiplication here to pass
4661 * the correct size.
4662 *
4663 * Note that we do NOT release the send rights. We need them to be valid
4664 * at the time they are passed to posix_spawn(2). When we exec(3) using
4665 * posix_spawn(2), they'll be cleaned up anyway.
4666 */
4667 mig_deallocate((vm_address_t)sports, sports_cnt * sizeof(sports[0]));
4668 } else if (kr != BOOTSTRAP_UNKNOWN_SERVICE) {
4669 (void)job_assumes_zero(j, kr);
4670 }
4671
4672#if TARGET_OS_EMBEDDED
4673 if (!j->app || j->system_app) {
4674 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
eabd1701 4675 }
95379394
A
4676#else
4677 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
4678#endif
eabd1701 4679
95379394
A
4680#if !TARGET_OS_EMBEDDED
4681 struct task_qos_policy qosinfo = {
4682 .task_latency_qos_tier = LATENCY_QOS_LAUNCH_DEFAULT_TIER,
4683 .task_throughput_qos_tier = THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER,
4684 };
4685
f9823965
A
4686 if (!j->legacy_timers) {
4687 kr = task_policy_set(mach_task_self(), TASK_BASE_QOS_POLICY, (task_policy_t)&qosinfo, TASK_QOS_POLICY_COUNT);
4688 (void)job_assumes_zero_p(j, kr);
4689 }
eabd1701
A
4690#endif
4691
95379394
A
4692#if HAVE_RESPONSIBILITY
4693 /* Specify which process is responsible for the new job. Per-app XPC
4694 * services are the responsibility of the app. Other processes are
4695 * responsible for themselves. This decision is final and also applies
4696 * to the process's children, so don't initialize responsibility when
4697 * starting a per-user launchd.
4698 */
4699 if (j->mgr->req_pid) {
4700 responsibility_init2(j->mgr->req_pid, NULL);
4701 } else if (!j->per_user) {
4702 responsibility_init2(getpid(), j->prog ? j->prog : j->argv[0]);
eabd1701 4703 }
95379394 4704#endif
eabd1701 4705
f36da725 4706#if HAVE_QUARANTINE
5b0a4722
A
4707 if (j->quarantine_data) {
4708 qtn_proc_t qp;
ed34e3c3 4709
5b0a4722 4710 if (job_assumes(j, qp = qtn_proc_alloc())) {
eabd1701
A
4711 if (job_assumes_zero(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4712 (void)job_assumes_zero(j, qtn_proc_apply_to_self(qp));
5b0a4722
A
4713 }
4714 }
4715 }
f36da725 4716#endif
ed34e3c3 4717
f36da725 4718#if HAVE_SANDBOX
95379394
A
4719#if TARGET_OS_EMBEDDED
4720 struct sandbox_spawnattrs sbattrs;
4721 if (j->seatbelt_profile || j->container_identifier) {
4722 sandbox_spawnattrs_init(&sbattrs);
4723 if (j->seatbelt_profile) {
4724 sandbox_spawnattrs_setprofilename(&sbattrs, j->seatbelt_profile);
4725 }
4726 if (j->container_identifier) {
4727 sandbox_spawnattrs_setcontainer(&sbattrs, j->container_identifier);
4728 }
4729 (void)job_assumes_zero(j, posix_spawnattr_setmacpolicyinfo_np(&spattr, "Sandbox", &sbattrs, sizeof(sbattrs)));
4730 }
4731#else
5b0a4722
A
4732 if (j->seatbelt_profile) {
4733 char *seatbelt_err_buf = NULL;
ed34e3c3 4734
eabd1701 4735 if (job_assumes_zero_p(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf)) == -1) {
5b0a4722
A
4736 if (seatbelt_err_buf) {
4737 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4738 }
4739 goto out_bad;
4740 }
ed34e3c3 4741 }
95379394 4742#endif
f36da725 4743#endif
ed34e3c3 4744
ddbbfbc1
A
4745 psf = j->prog ? posix_spawn : posix_spawnp;
4746
95379394 4747 if (likely(!(j->inetcompat || use_xpcproxy))) {
ddbbfbc1 4748 file2exec = j->prog ? j->prog : argv[0];
ed34e3c3
A
4749 }
4750
dcace88f 4751 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
dcace88f 4752
95379394 4753#if HAVE_SANDBOX && !TARGET_OS_EMBEDDED
5b0a4722 4754out_bad:
ef398931 4755#endif
dcace88f 4756 _exit(errno);
ed34e3c3
A
4757}
4758
5b0a4722
A
4759void
4760jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
ed34e3c3 4761{
5b0a4722 4762 launch_data_t tmp;
ed34e3c3 4763 struct envitem *ei;
5b0a4722 4764 job_t ji;
ed34e3c3 4765
5b0a4722
A
4766 if (jm->parentmgr) {
4767 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4768 } else {
4769 char **tmpenviron = environ;
4770 for (; *tmpenviron; tmpenviron++) {
4771 char envkey[1024];
4772 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4773 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4774 strncpy(envkey, *tmpenviron, sizeof(envkey));
4775 *(strchr(envkey, '=')) = '\0';
4776 launch_data_dict_insert(dict, s, envkey);
4777 }
4778 }
ed34e3c3 4779
5b0a4722
A
4780 LIST_FOREACH(ji, &jm->jobs, sle) {
4781 SLIST_FOREACH(ei, &ji->global_env, sle) {
4782 if ((tmp = launch_data_new_string(ei->value))) {
4783 launch_data_dict_insert(dict, tmp, ei->key);
4784 }
4785 }
4786 }
ed34e3c3
A
4787}
4788
4789void
5b0a4722 4790jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
ed34e3c3 4791{
ed34e3c3 4792 struct envitem *ei;
5b0a4722 4793 job_t ji;
ed34e3c3 4794
5b0a4722
A
4795 if (jm->parentmgr) {
4796 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4797 }
ed34e3c3 4798
ddbbfbc1 4799 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
5b0a4722
A
4800 SLIST_FOREACH(ei, &ji->global_env, sle) {
4801 setenv(ei->key, ei->value, 1);
ed34e3c3 4802 }
5b0a4722
A
4803 }
4804}
ed34e3c3 4805
5b0a4722 4806void
ddbbfbc1 4807job_log_pids_with_weird_uids(job_t j)
5b0a4722 4808{
dcace88f
A
4809 size_t len = sizeof(pid_t) * get_kern_max_proc();
4810 pid_t *pids = NULL;
5b0a4722 4811 uid_t u = j->mach_uid;
dcace88f 4812 int i = 0, kp_cnt = 0;
eabd1701
A
4813
4814 if (!launchd_apple_internal) {
f36da725
A
4815 return;
4816 }
ddbbfbc1 4817
dcace88f
A
4818 pids = malloc(len);
4819 if (!job_assumes(j, pids != NULL)) {
5b0a4722
A
4820 return;
4821 }
ddbbfbc1
A
4822
4823 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4824
dcace88f
A
4825 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4826 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4827 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4828 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4829 * struct back in a single call for each one.
4830 *
4831 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4832 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4833 * libproc could go stale before we call proc_pidinfo().
4834 *
4835 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4836 * of bytes written to the buffer.
4837 */
eabd1701 4838 if (job_assumes_zero_p(j, (kp_cnt = proc_listallpids(pids, len))) == -1) {
5b0a4722 4839 goto out;
ed34e3c3
A
4840 }
4841
5b0a4722 4842 for (i = 0; i < kp_cnt; i++) {
dcace88f
A
4843 struct proc_bsdshortinfo proc;
4844 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4845 * detailed above.
4846 */
4847 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4848 if (errno != ESRCH) {
eabd1701 4849 (void)job_assumes_zero(j, errno);
dcace88f
A
4850 }
4851 continue;
4852 }
eabd1701 4853
dcace88f
A
4854 uid_t i_euid = proc.pbsi_uid;
4855 uid_t i_uid = proc.pbsi_ruid;
4856 uid_t i_svuid = proc.pbsi_svuid;
4857 pid_t i_pid = pids[i];
5b0a4722
A
4858
4859 if (i_euid != u && i_uid != u && i_svuid != u) {
4860 continue;
4861 }
ed34e3c3 4862
dcace88f 4863 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
5b0a4722 4864
eabd1701 4865// Temporarily disabled due to 5423935 and 4946119.
5b0a4722 4866#if 0
eabd1701
A
4867 // Ask the accountless process to exit.
4868 (void)job_assumes_zero_p(j, kill2(i_pid, SIGTERM));
5b0a4722 4869#endif
ed34e3c3 4870 }
5b0a4722
A
4871
4872out:
dcace88f 4873 free(pids);
5b0a4722
A
4874}
4875
5c88273d
A
4876static struct passwd *
4877job_getpwnam(job_t j, const char *name)
4878{
eabd1701
A
4879 /*
4880 * methodology for system daemons
4881 *
4882 * first lookup user record without any opendirectoryd interaction,
4883 * we don't know what interprocess dependencies might be in flight.
4884 * if that fails, we re-enable opendirectoryd interaction and
4885 * re-issue the lookup. We have to disable the libinfo L1 cache
4886 * otherwise libinfo will return the negative cache entry on the retry
4887 */
5c88273d 4888#if !TARGET_OS_EMBEDDED
eabd1701 4889 struct passwd *pw = NULL;
5c88273d 4890
eabd1701
A
4891 if (pid1_magic && j->mgr == root_jobmgr) {
4892 // 1 == SEARCH_MODULE_FLAG_DISABLED
4893 si_search_module_set_flags("ds", 1);
4894 gL1CacheEnabled = false;
4895
4896 pw = getpwnam(name);
4897 si_search_module_set_flags("ds", 0);
4898 }
4899
4900 if (pw == NULL) {
4901 pw = getpwnam(name);
4902 }
4903
4904 return pw;
5c88273d 4905#else
eabd1701
A
4906#pragma unused (j)
4907 return getpwnam(name);
5c88273d
A
4908#endif
4909}
4910
4911static struct group *
4912job_getgrnam(job_t j, const char *name)
4913{
4914#if !TARGET_OS_EMBEDDED
4915 struct group *gr = NULL;
4916
4917 if (pid1_magic && j->mgr == root_jobmgr) {
eabd1701 4918 si_search_module_set_flags("ds", 1);
5c88273d
A
4919 gL1CacheEnabled = false;
4920
4921 gr = getgrnam(name);
4922
4923 si_search_module_set_flags("ds", 0);
4924 }
4925
4926 if (gr == NULL) {
4927 gr = getgrnam(name);
4928 }
4929
4930 return gr;
4931#else
4932#pragma unused (j)
4933 return getgrnam(name);
4934#endif
4935}
4936
cf0bacfd 4937void
ddbbfbc1 4938job_postfork_test_user(job_t j)
cf0bacfd 4939{
eabd1701 4940 // This function is all about 5201578
cf0bacfd 4941
ddbbfbc1
A
4942 const char *home_env_var = getenv("HOME");
4943 const char *user_env_var = getenv("USER");
4944 const char *logname_env_var = getenv("LOGNAME");
4945 uid_t tmp_uid, local_uid = getuid();
4946 gid_t tmp_gid, local_gid = getgid();
4947 char shellpath[PATH_MAX];
4948 char homedir[PATH_MAX];
4949 char loginname[2000];
4950 struct passwd *pwe;
4951
4952
4953 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4954 && strcmp(user_env_var, logname_env_var) == 0)) {
4955 goto out_bad;
cf0bacfd
A
4956 }
4957
5c88273d 4958 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
ddbbfbc1
A
4959 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4960 goto out_bad;
cf0bacfd 4961 }
ddbbfbc1
A
4962
4963 /*
4964 * We must copy the results of getpw*().
4965 *
4966 * Why? Because subsequent API calls may call getpw*() as a part of
4967 * their implementation. Since getpw*() returns a [now thread scoped]
4968 * global, we must therefore cache the results before continuing.
4969 */
4970
4971 tmp_uid = pwe->pw_uid;
4972 tmp_gid = pwe->pw_gid;
4973
4974 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4975 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4976 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4977
4978 if (strcmp(loginname, logname_env_var) != 0) {
4979 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4980 goto out_bad;
4981 }
4982 if (strcmp(homedir, home_env_var) != 0) {
4983 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4984 goto out_bad;
4985 }
4986 if (local_uid != tmp_uid) {
4987 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4988 'U', tmp_uid, local_uid);
4989 goto out_bad;
4990 }
4991 if (local_gid != tmp_gid) {
4992 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4993 'G', tmp_gid, local_gid);
4994 goto out_bad;
4995 }
4996
4997 return;
4998out_bad:
4999#if 0
eabd1701 5000 (void)job_assumes_zero_p(j, kill2(getppid(), SIGTERM));
ddbbfbc1
A
5001 _exit(EXIT_FAILURE);
5002#else
5003 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
ef398931 5004#endif
ddbbfbc1 5005}
cf0bacfd 5006
5b0a4722
A
5007void
5008job_postfork_become_user(job_t j)
5009{
5010 char loginname[2000];
5011 char tmpdirpath[PATH_MAX];
5012 char shellpath[PATH_MAX];
5013 char homedir[PATH_MAX];
5014 struct passwd *pwe;
5015 size_t r;
5016 gid_t desired_gid = -1;
5017 uid_t desired_uid = -1;
5018
5019 if (getuid() != 0) {
ddbbfbc1 5020 return job_postfork_test_user(j);
5b0a4722
A
5021 }
5022
5023 /*
5024 * I contend that having UID == 0 and GID != 0 is of dubious value.
5025 * Nevertheless, this used to work in Tiger. See: 5425348
5026 */
5027 if (j->groupname && !j->username) {
5028 j->username = "root";
5029 }
5030
5031 if (j->username) {
5c88273d 5032 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
5b0a4722 5033 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
eabd1701 5034 _exit(ESRCH);
5b0a4722
A
5035 }
5036 } else if (j->mach_uid) {
5037 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
5038 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
ddbbfbc1 5039 job_log_pids_with_weird_uids(j);
eabd1701 5040 _exit(ESRCH);
5b0a4722
A
5041 }
5042 } else {
5043 return;
5044 }
5045
5046 /*
5047 * We must copy the results of getpw*().
5048 *
5049 * Why? Because subsequent API calls may call getpw*() as a part of
5050 * their implementation. Since getpw*() returns a [now thread scoped]
5051 * global, we must therefore cache the results before continuing.
5052 */
5053
5054 desired_uid = pwe->pw_uid;
5055 desired_gid = pwe->pw_gid;
5056
5057 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
5058 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
5059 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
5060
ddbbfbc1 5061 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
5b0a4722
A
5062 job_log(j, LOG_ERR, "Expired account");
5063 _exit(EXIT_FAILURE);
5064 }
5065
5066
ddbbfbc1 5067 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
5b0a4722 5068 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
ddbbfbc1 5069 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
5b0a4722 5070 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
ed34e3c3 5071 }
5b0a4722 5072
ed34e3c3 5073 if (j->groupname) {
5b0a4722
A
5074 struct group *gre;
5075
5c88273d 5076 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
ed34e3c3 5077 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
eabd1701 5078 _exit(ESRCH);
ed34e3c3 5079 }
5b0a4722
A
5080
5081 desired_gid = gre->gr_gid;
5082 }
5083
eabd1701 5084 if (job_assumes_zero_p(j, setlogin(loginname)) == -1) {
5b0a4722 5085 _exit(EXIT_FAILURE);
ed34e3c3 5086 }
ed34e3c3 5087
eabd1701 5088 if (job_assumes_zero_p(j, setgid(desired_gid)) == -1) {
5b0a4722
A
5089 _exit(EXIT_FAILURE);
5090 }
ed34e3c3 5091
5b0a4722
A
5092 /*
5093 * The kernel team and the DirectoryServices team want initgroups()
5094 * called after setgid(). See 4616864 for more information.
5095 */
ed34e3c3 5096
ddbbfbc1 5097 if (likely(!j->no_init_groups)) {
eabd1701
A
5098#if 1
5099 if (job_assumes_zero_p(j, initgroups(loginname, desired_gid)) == -1) {
5b0a4722 5100 _exit(EXIT_FAILURE);
ed34e3c3 5101 }
eabd1701 5102#else
ddbbfbc1
A
5103 /* Do our own little initgroups(). We do this to guarantee that we're
5104 * always opted into dynamic group resolution in the kernel. initgroups(3)
5105 * does not make this guarantee.
5106 */
5107 int groups[NGROUPS], ngroups;
eabd1701
A
5108
5109 // A failure here isn't fatal, and we'll still get data we can use.
5110 (void)job_assumes_zero_p(j, getgrouplist(j->username, desired_gid, groups, &ngroups));
5111
5112 if (job_assumes_zero_p(j, syscall(SYS_initgroups, ngroups, groups, desired_uid)) == -1) {
ddbbfbc1
A
5113 _exit(EXIT_FAILURE);
5114 }
eabd1701 5115#endif
ed34e3c3 5116 }
5b0a4722 5117
eabd1701 5118 if (job_assumes_zero_p(j, setuid(desired_uid)) == -1) {
5b0a4722 5119 _exit(EXIT_FAILURE);
ed34e3c3 5120 }
5b0a4722
A
5121
5122 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
5123
ddbbfbc1 5124 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
5b0a4722 5125 setenv("TMPDIR", tmpdirpath, 0);
ed34e3c3 5126 }
5b0a4722
A
5127
5128 setenv("SHELL", shellpath, 0);
5129 setenv("HOME", homedir, 0);
5130 setenv("USER", loginname, 0);
5131 setenv("LOGNAME", loginname, 0);
5132}
5133
5134void
5135job_setup_attributes(job_t j)
5136{
5137 struct limititem *li;
5138 struct envitem *ei;
5139
ddbbfbc1 5140 if (unlikely(j->setnice)) {
eabd1701 5141 (void)job_assumes_zero_p(j, setpriority(PRIO_PROCESS, 0, j->nice));
5b0a4722
A
5142 }
5143
5144 SLIST_FOREACH(li, &j->limits, sle) {
5145 struct rlimit rl;
5146
eabd1701 5147 if (job_assumes_zero_p(j, getrlimit(li->which, &rl) == -1)) {
5b0a4722
A
5148 continue;
5149 }
5150
5151 if (li->sethard) {
5152 rl.rlim_max = li->lim.rlim_max;
5153 }
5154 if (li->setsoft) {
5155 rl.rlim_cur = li->lim.rlim_cur;
5156 }
5157
5158 if (setrlimit(li->which, &rl) == -1) {
5159 job_log_error(j, LOG_WARNING, "setrlimit()");
ed34e3c3
A
5160 }
5161 }
5162
ddbbfbc1 5163 if (unlikely(!j->inetcompat && j->session_create)) {
5b0a4722
A
5164 launchd_SessionCreate();
5165 }
5166
ddbbfbc1 5167 if (unlikely(j->low_pri_io)) {
eabd1701 5168 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE));
5b0a4722 5169 }
95379394
A
5170 if (j->low_priority_background_io) {
5171 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_DARWIN_BG, IOPOL_THROTTLE));
5172 }
ddbbfbc1 5173 if (unlikely(j->rootdir)) {
eabd1701
A
5174 (void)job_assumes_zero_p(j, chroot(j->rootdir));
5175 (void)job_assumes_zero_p(j, chdir("."));
5b0a4722
A
5176 }
5177
5178 job_postfork_become_user(j);
5179
ddbbfbc1 5180 if (unlikely(j->workingdir)) {
eabd1701
A
5181 if (chdir(j->workingdir) == -1) {
5182 if (errno == ENOENT || errno == ENOTDIR) {
5183 job_log(j, LOG_ERR, "Job specified non-existent working directory: %s", j->workingdir);
5184 } else {
5185 (void)job_assumes_zero(j, errno);
5186 }
5187 }
5b0a4722
A
5188 }
5189
ddbbfbc1 5190 if (unlikely(j->setmask)) {
5b0a4722
A
5191 umask(j->mask);
5192 }
5193
ddbbfbc1 5194 if (j->stdin_fd) {
eabd1701 5195 (void)job_assumes_zero_p(j, dup2(j->stdin_fd, STDIN_FILENO));
ddbbfbc1
A
5196 } else {
5197 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
5198 }
5199 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
5200 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
ed34e3c3 5201
5b0a4722
A
5202 jobmgr_setup_env_from_other_jobs(j->mgr);
5203
5204 SLIST_FOREACH(ei, &j->env, sle) {
ed34e3c3 5205 setenv(ei->key, ei->value, 1);
5b0a4722
A
5206 }
5207
ddbbfbc1 5208#if !TARGET_OS_EMBEDDED
dcace88f 5209 if (j->jetsam_properties) {
eabd1701 5210 (void)job_assumes_zero(j, proc_setpcontrol(PROC_SETPC_TERMINATE));
ddbbfbc1
A
5211 }
5212#endif
5213
5214#if TARGET_OS_EMBEDDED
dcace88f 5215 if (j->main_thread_priority != 0) {
ddbbfbc1
A
5216 struct sched_param params;
5217 bzero(&params, sizeof(params));
5218 params.sched_priority = j->main_thread_priority;
eabd1701 5219 (void)job_assumes_zero_p(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params));
ddbbfbc1
A
5220 }
5221#endif
5222
5b0a4722
A
5223 /*
5224 * We'd like to call setsid() unconditionally, but we have reason to
5225 * believe that prevents launchd from being able to send signals to
5226 * setuid children. We'll settle for process-groups.
5227 */
5228 if (getppid() != 1) {
eabd1701 5229 (void)job_assumes_zero_p(j, setpgid(0, 0));
5b0a4722 5230 } else {
eabd1701 5231 (void)job_assumes_zero_p(j, setsid());
5b0a4722
A
5232 }
5233}
5234
5235void
5236job_setup_fd(job_t j, int target_fd, const char *path, int flags)
5237{
5238 int fd;
5239
5240 if (!path) {
5241 return;
5242 }
5243
5244 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
5245 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
5246 return;
5247 }
ed34e3c3 5248
eabd1701
A
5249 (void)job_assumes_zero_p(j, dup2(fd, target_fd));
5250 (void)job_assumes_zero(j, runtime_close(fd));
ed34e3c3
A
5251}
5252
5253void
5b0a4722 5254calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
ed34e3c3 5255{
5b0a4722
A
5256 struct calendarinterval *ci_iter, *ci_prev = NULL;
5257 time_t later, head_later;
ed34e3c3
A
5258
5259 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
5260
5261 if (ci->when.tm_wday != -1) {
5262 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
5263
5264 if (ci->when.tm_mday == -1) {
5265 later = otherlater;
5266 } else {
5267 later = later < otherlater ? later : otherlater;
5268 }
5269 }
5270
5b0a4722
A
5271 ci->when_next = later;
5272
5273 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
5274 if (ci->when_next < ci_iter->when_next) {
5275 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
5276 break;
5277 }
5278
5279 ci_prev = ci_iter;
ed34e3c3 5280 }
ed34e3c3 5281
5b0a4722 5282 if (ci_iter == NULL) {
eabd1701 5283 // ci must want to fire after every other timer, or there are no timers
ed34e3c3 5284
5b0a4722
A
5285 if (LIST_EMPTY(&sorted_calendar_events)) {
5286 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
ed34e3c3 5287 } else {
5b0a4722 5288 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
ed34e3c3
A
5289 }
5290 }
ed34e3c3 5291
5b0a4722 5292 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
ed34e3c3 5293
eabd1701 5294 if (job_assumes_zero_p(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr)) != -1) {
5b0a4722
A
5295 char time_string[100];
5296 size_t time_string_len;
ed34e3c3 5297
5b0a4722
A
5298 ctime_r(&later, time_string);
5299 time_string_len = strlen(time_string);
ed34e3c3 5300
ddbbfbc1 5301 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
5b0a4722
A
5302 time_string[time_string_len - 1] = '\0';
5303 }
ed34e3c3 5304
5b0a4722
A
5305 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
5306 }
ed34e3c3
A
5307}
5308
eabd1701 5309bool
95379394 5310jobmgr_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
ed34e3c3 5311{
eabd1701
A
5312 jobmgr_t jm = ctx;
5313 jobmgr_log(jm, LOG_ERR, "%s", message);
ed34e3c3 5314
eabd1701 5315 return true;
ed34e3c3
A
5316}
5317
eabd1701 5318bool
95379394 5319job_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
ed34e3c3 5320{
eabd1701
A
5321 job_t j = ctx;
5322 job_log(j, LOG_ERR, "%s", message);
ed34e3c3 5323
eabd1701 5324 return true;
5b0a4722 5325}
ed34e3c3 5326
95379394 5327// ri: NULL = please sample j->p; non-NULL = use this sample
ed34e3c3 5328void
95379394 5329job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status)
ed34e3c3 5330{
95379394
A
5331#if HAVE_SYSTEMSTATS
5332 if (j->anonymous || !j->p) {
eabd1701
A
5333 return;
5334 }
95379394 5335 if (!systemstats_is_enabled()) {
eabd1701
A
5336 return;
5337 }
95379394
A
5338 const char *name;
5339 if (j->cfbundleidentifier) {
5340 name = j->cfbundleidentifier;
5341 } else {
5342 name = j->label;
eabd1701 5343 }
95379394
A
5344 int r = 0;
5345 struct rusage_info_v1 ris;
5346 if (ri == NULL) {
5347 ri = &ris;
5348 r = proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)ri);
5349 }
5350 if (r == -1) {
5351 return;
ed34e3c3 5352 }
95379394
A
5353 job_log_systemstats(j->p, j->uniqueid, runtime_get_uniqueid(), j->mgr->req_pid, j->mgr->req_uniqueid, name, ri, exit_status);
5354#else
5355#pragma unused (j, ri, exit_status)
5356#endif
5357}
5b0a4722 5358
95379394
A
5359#if HAVE_SYSTEMSTATS
5360// ri: NULL = don't write fields from ri; non-NULL = use this sample
5361static
5362void
5363job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status)
5364{
5365 if (!systemstats_is_enabled()) {
5366 return;
eabd1701
A
5367 }
5368
95379394
A
5369 struct systemstats_process_usage_s info;
5370 bzero(&info, sizeof(info));
5371 info.name = name;
5372 info.pid = pid;
5373 info.exit_status = exit_status;
5374 info.uid = getuid();
5375 info.ppid = getpid();
5376 info.responsible_pid = req_pid;
5377
5378 if (likely(ri)) {
5379 info.macho_uuid = (const uint8_t *)&ri->ri_uuid;
5380 info.user_time = ri->ri_user_time;
5381 info.system_time = ri->ri_system_time;
5382 info.pkg_idle_wkups = ri->ri_pkg_idle_wkups;
5383 info.interrupt_wkups = ri->ri_interrupt_wkups;
5384 info.proc_start_abstime = ri->ri_proc_start_abstime;
5385 info.proc_exit_abstime = ri->ri_proc_exit_abstime;
5386#if SYSTEMSTATS_API_VERSION >= 20130319
5387 info.pageins = ri->ri_pageins;
5388 info.wired_size = ri->ri_wired_size;
5389 info.resident_size = ri->ri_resident_size;
5390 info.phys_footprint = ri->ri_phys_footprint;
5391 // info.purgeablesize = ???
5392#endif
5393#if SYSTEMSTATS_API_VERSION >= 20130328
5394 info.child_user_time = ri->ri_child_user_time;
5395 info.child_system_time = ri->ri_child_system_time;
5396 info.child_pkg_idle_wkups = ri->ri_child_pkg_idle_wkups;
5397 info.child_interrupt_wkups = ri->ri_child_interrupt_wkups;
5398 info.child_pageins = ri->ri_child_pageins;
5399 info.child_elapsed_abstime = ri->ri_child_elapsed_abstime;
5400#endif
ddbbfbc1 5401 }
95379394
A
5402#if SYSTEMSTATS_API_VERSION >= 20130410
5403 info.uniqueid = uniqueid;
5404 info.parent_uniqueid = parent_uniqueid;
5405 info.responsible_uniqueid = req_uniqueid;
5406#endif
5407 systemstats_write_process_usage(&info);
ed34e3c3 5408}
95379394 5409#endif /* HAVE_SYSTEMSTATS */
ed34e3c3 5410
95379394
A
5411struct waiting4attach *
5412waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type)
ed34e3c3 5413{
95379394
A
5414 size_t xtra = strlen(name) + 1;
5415
5416 struct waiting4attach *w4a = malloc(sizeof(*w4a) + xtra);
5417 if (!w4a) {
5418 return NULL;
5419 }
5420
5421 w4a->port = port;
5422 w4a->dest = dest;
5423 w4a->type = type;
5424 (void)strcpy(w4a->name, name);
5425
5426 if (dest) {
5427 LIST_INSERT_HEAD(&_launchd_domain_waiters, w4a, le);
5428 } else {
5429 LIST_INSERT_HEAD(&jm->attaches, w4a, le);
5430 }
5431
5432
5433 (void)jobmgr_assumes_zero(jm, launchd_mport_notify_req(port, MACH_NOTIFY_DEAD_NAME));
5434 return w4a;
5435}
5436
5437void
5438waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a)
5439{
5440 jobmgr_log(jm, LOG_DEBUG, "Canceling dead-name notification for waiter port: 0x%x", w4a->port);
5441
5442 LIST_REMOVE(w4a, le);
5443
5444 mach_port_t previous = MACH_PORT_NULL;
5445 (void)jobmgr_assumes_zero(jm, mach_port_request_notification(mach_task_self(), w4a->port, MACH_NOTIFY_DEAD_NAME, 0, MACH_PORT_NULL, MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous));
5446 if (previous) {
5447 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(previous));
5448 }
5449
5450 jobmgr_assumes_zero(jm, launchd_mport_deallocate(w4a->port));
5451 free(w4a);
5452}
5453
5454struct waiting4attach *
5455waiting4attach_find(jobmgr_t jm, job_t j)
5456{
5457 char *name2use = (char *)j->label;
5458 if (j->app) {
5459 struct envitem *ei = NULL;
5460 SLIST_FOREACH(ei, &j->env, sle) {
5461 if (strcmp(ei->key, XPC_SERVICE_RENDEZVOUS_TOKEN) == 0) {
5462 name2use = ei->value;
5463 break;
5464 }
5465 }
5466 }
5467
5468 struct waiting4attach *w4ai = NULL;
5469 LIST_FOREACH(w4ai, &jm->attaches, le) {
5470 if (strcmp(name2use, w4ai->name) == 0) {
5471 job_log(j, LOG_DEBUG, "Found attachment: %s", name2use);
5472 break;
5473 }
5474 }
5475
5476 return w4ai;
5477}
5478
5479void
5480job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
5481{
5482 const char *label2use = j ? j->label : "com.apple.launchd.job-unknown";
5483 const char *mgr2use = j ? j->mgr->name : "com.apple.launchd.jobmanager-unknown";
5484 char *newmsg;
5485 int oldmask = 0;
5486 size_t newmsgsz;
ed34e3c3 5487
eabd1701
A
5488 struct launchd_syslog_attr attr = {
5489 .from_name = launchd_label,
5490 .about_name = label2use,
5491 .session_name = mgr2use,
5492 .priority = pri,
5493 .from_uid = getuid(),
5494 .from_pid = getpid(),
5495 .about_pid = j ? j->p : 0,
5496 };
5497
5498 /* Hack: If bootstrap_port is set, we must be on the child side of a
5499 * fork(2), but before the exec*(3). Let's route the log message back to
5b0a4722
A
5500 * launchd proper.
5501 */
5502 if (bootstrap_port) {
5503 return _vproc_logv(pri, err, msg, ap);
5504 }
ed34e3c3 5505
5b0a4722
A
5506 newmsgsz = strlen(msg) + 200;
5507 newmsg = alloca(newmsgsz);
ed34e3c3 5508
5b0a4722 5509 if (err) {
dcace88f 5510#if !TARGET_OS_EMBEDDED
eabd1701 5511 snprintf(newmsg, newmsgsz, "%s: %d: %s", msg, err, strerror(err));
dcace88f 5512#else
eabd1701 5513 snprintf(newmsg, newmsgsz, "(%s) %s: %d: %s", label2use, msg, err, strerror(err));
dcace88f 5514#endif
5b0a4722 5515 } else {
dcace88f 5516#if !TARGET_OS_EMBEDDED
5b0a4722 5517 snprintf(newmsg, newmsgsz, "%s", msg);
dcace88f 5518#else
ddbbfbc1 5519 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
dcace88f 5520#endif
5b0a4722 5521 }
ed34e3c3 5522
dcace88f 5523 if (j && unlikely(j->debug)) {
5b0a4722
A
5524 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5525 }
ed34e3c3 5526
eabd1701 5527 launchd_vsyslog(&attr, newmsg, ap);
ed34e3c3 5528
dcace88f 5529 if (j && unlikely(j->debug)) {
5b0a4722 5530 setlogmask(oldmask);
ed34e3c3
A
5531 }
5532}
5533
5534void
5b0a4722 5535job_log_error(job_t j, int pri, const char *msg, ...)
ed34e3c3 5536{
5b0a4722 5537 va_list ap;
ed34e3c3 5538
5b0a4722
A
5539 va_start(ap, msg);
5540 job_logv(j, pri, errno, msg, ap);
5541 va_end(ap);
ed34e3c3
A
5542}
5543
5b0a4722
A
5544void
5545job_log(job_t j, int pri, const char *msg, ...)
ed34e3c3 5546{
5b0a4722 5547 va_list ap;
ed34e3c3 5548
5b0a4722
A
5549 va_start(ap, msg);
5550 job_logv(j, pri, 0, msg, ap);
5551 va_end(ap);
5552}
ed34e3c3 5553
5b0a4722
A
5554#if 0
5555void
5556jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5557{
5558 va_list ap;
ed34e3c3 5559
5b0a4722
A
5560 va_start(ap, msg);
5561 jobmgr_logv(jm, pri, errno, msg, ap);
5562 va_end(ap);
5563}
5564#endif
ed34e3c3 5565
eabd1701 5566void
95379394 5567jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children)
eabd1701 5568{
95379394
A
5569#if HAVE_SYSTEMSTATS
5570 // Log information for kernel_task and pid 1 launchd.
5571 if (systemstats_is_enabled() && pid1_magic && jm == root_jobmgr) {
5572#if SYSTEMSTATS_API_VERSION >= 20130328
5573 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_SHOULD_LOG_ENERGY_STATISTICS)) {
5574 systemstats_write_intel_energy_statistics(NULL);
5575 }
5576#else
5577 systemstats_write_intel_energy_statistics(NULL);
5578#endif
5579 job_log_systemstats(0, 0, 0, 0, 0, "com.apple.kernel", NULL, -1);
5580 job_log_systemstats(1, 1, 0, 1, 1, "com.apple.launchd", NULL, -1);
5581 }
5582#endif
eabd1701
A
5583 jobmgr_t jmi = NULL;
5584 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
95379394 5585 jobmgr_log_perf_statistics(jmi, signal_children);
eabd1701
A
5586 }
5587
5588 if (jm->xpc_singleton) {
5589 jobmgr_log(jm, LOG_PERF, "XPC Singleton Domain: %s", jm->shortdesc);
5590 } else if (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
5591 jobmgr_log(jm, LOG_PERF, "XPC Private Domain: %s", jm->owner);
5592 } else if (jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) {
5593 jobmgr_log(jm, LOG_PERF, "Created via bootstrap_subset()");
5594 }
5595
5596 jobmgr_log(jm, LOG_PERF, "Jobs in job manager:");
5597
5598 job_t ji = NULL;
5599 LIST_FOREACH(ji, &jm->jobs, sle) {
95379394
A
5600 job_log_perf_statistics(ji, NULL, -1);
5601 if (unlikely(signal_children) && unlikely(strstr(ji->label, "com.apple.launchd.peruser.") == ji->label)) {
5602 jobmgr_log(jm, LOG_PERF, "Sending SIGINFO to peruser launchd %d", ji->p);
5603 kill(ji->p, SIGINFO);
5604 }
eabd1701
A
5605 }
5606
5607 jobmgr_log(jm, LOG_PERF, "End of job list.");
5608}
5609
5b0a4722
A
5610void
5611jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5612{
5613 va_list ap;
ed34e3c3 5614
5b0a4722
A
5615 va_start(ap, msg);
5616 jobmgr_logv(jm, pri, 0, msg, ap);
5617 va_end(ap);
ed34e3c3
A
5618}
5619
5b0a4722
A
5620void
5621jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
ed34e3c3 5622{
eabd1701
A
5623 if (!jm) {
5624 jm = root_jobmgr;
5625 }
5626
5b0a4722
A
5627 char *newmsg;
5628 char *newname;
5629 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
ed34e3c3 5630
5b0a4722
A
5631 newname = alloca((jmname_len + 1) * 2);
5632 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5633 newmsg = alloca(newmsgsz);
ed34e3c3 5634
5b0a4722
A
5635 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5636 if (jm->name[i] == '%') {
5637 newname[o] = '%';
5638 o++;
5639 }
5640 newname[o] = jm->name[i];
5641 }
5642 newname[o] = '\0';
ed34e3c3 5643
5b0a4722
A
5644 if (err) {
5645 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5646 } else {
5647 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5648 }
ed34e3c3 5649
5b0a4722
A
5650 if (jm->parentmgr) {
5651 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5652 } else {
eabd1701
A
5653 struct launchd_syslog_attr attr = {
5654 .from_name = launchd_label,
5655 .about_name = launchd_label,
5656 .session_name = jm->name,
5657 .priority = pri,
5658 .from_uid = getuid(),
5659 .from_pid = getpid(),
5660 .about_pid = getpid(),
5661 };
5b0a4722 5662
eabd1701 5663 launchd_vsyslog(&attr, newmsg, ap);
5b0a4722 5664 }
5b0a4722
A
5665}
5666
ddbbfbc1
A
5667struct cal_dict_walk {
5668 job_t j;
5669 struct tm tmptm;
5670};
5671
5b0a4722
A
5672void
5673calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5674{
ddbbfbc1
A
5675 struct cal_dict_walk *cdw = context;
5676 struct tm *tmptm = &cdw->tmptm;
5677 job_t j = cdw->j;
5b0a4722
A
5678 int64_t val;
5679
ddbbfbc1 5680 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
eabd1701 5681 // hack to let caller know something went wrong
5b0a4722
A
5682 tmptm->tm_sec = -1;
5683 return;
5684 }
5685
5686 val = launch_data_get_integer(obj);
5687
ddbbfbc1
A
5688 if (val < 0) {
5689 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5690 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
dcace88f 5691 if (val > 59) {
ddbbfbc1
A
5692 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5693 tmptm->tm_sec = -1;
5694 } else {
5695 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5696 }
5b0a4722 5697 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
dcace88f 5698 if (val > 23) {
ddbbfbc1
A
5699 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5700 tmptm->tm_sec = -1;
5701 } else {
5702 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5703 }
5b0a4722 5704 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
dcace88f 5705 if (val < 1 || val > 31) {
ddbbfbc1
A
5706 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5707 tmptm->tm_sec = -1;
5708 } else {
5709 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5710 }
5b0a4722 5711 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
dcace88f 5712 if (val > 7) {
ddbbfbc1
A
5713 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5714 tmptm->tm_sec = -1;
5715 } else {
5716 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5717 }
5b0a4722 5718 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
dcace88f 5719 if (val > 12) {
ddbbfbc1
A
5720 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5721 tmptm->tm_sec = -1;
5722 } else {
5723 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
eabd1701 5724 tmptm->tm_mon -= 1; // 4798263 cron compatibility
ddbbfbc1 5725 }
5b0a4722
A
5726 }
5727}
5728
5729bool
5730calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5731{
ddbbfbc1 5732 struct cal_dict_walk cdw;
5b0a4722 5733
ddbbfbc1
A
5734 cdw.j = j;
5735 memset(&cdw.tmptm, 0, sizeof(0));
5b0a4722 5736
ddbbfbc1
A
5737 cdw.tmptm.tm_min = -1;
5738 cdw.tmptm.tm_hour = -1;
5739 cdw.tmptm.tm_mday = -1;
5740 cdw.tmptm.tm_wday = -1;
5741 cdw.tmptm.tm_mon = -1;
5b0a4722
A
5742
5743 if (!job_assumes(j, obj != NULL)) {
5744 return false;
5745 }
5746
ddbbfbc1 5747 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5b0a4722
A
5748 return false;
5749 }
5750
ddbbfbc1 5751 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5b0a4722 5752
ddbbfbc1 5753 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5b0a4722
A
5754 return false;
5755 }
5756
ddbbfbc1 5757 return calendarinterval_new(j, &cdw.tmptm);
5b0a4722
A
5758}
5759
5760bool
5761calendarinterval_new(job_t j, struct tm *w)
5762{
5763 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5764
5765 if (!job_assumes(j, ci != NULL)) {
5766 return false;
5767 }
5768
5769 ci->when = *w;
5770 ci->job = j;
5771
5772 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
eabd1701 5773
5b0a4722
A
5774 calendarinterval_setalarm(j, ci);
5775
ddbbfbc1 5776 runtime_add_weak_ref();
5b0a4722
A
5777
5778 return true;
5779}
5780
5781void
5782calendarinterval_delete(job_t j, struct calendarinterval *ci)
5783{
5784 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5785 LIST_REMOVE(ci, global_sle);
5786
5787 free(ci);
5788
ddbbfbc1 5789 runtime_del_weak_ref();
5b0a4722
A
5790}
5791
5792void
5793calendarinterval_sanity_check(void)
5794{
5795 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5796 time_t now = time(NULL);
5797
ddbbfbc1 5798 if (unlikely(ci && (ci->when_next < now))) {
eabd1701 5799 (void)jobmgr_assumes_zero_p(root_jobmgr, raise(SIGUSR1));
5b0a4722
A
5800 }
5801}
5802
5803void
5804calendarinterval_callback(void)
5805{
5806 struct calendarinterval *ci, *ci_next;
5807 time_t now = time(NULL);
5808
5809 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5810 job_t j = ci->job;
5811
5812 if (ci->when_next > now) {
5813 break;
5814 }
5815
5816 LIST_REMOVE(ci, global_sle);
5817 calendarinterval_setalarm(j, ci);
5818
5819 j->start_pending = true;
5820 job_dispatch(j, false);
5821 }
5822}
5823
5824bool
eabd1701 5825socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt)
5b0a4722
A
5826{
5827 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5828
5829 if (!job_assumes(j, sg != NULL)) {
5830 return false;
5831 }
ed34e3c3
A
5832
5833 sg->fds = calloc(1, fd_cnt * sizeof(int));
5834 sg->fd_cnt = fd_cnt;
ed34e3c3 5835
5b0a4722 5836 if (!job_assumes(j, sg->fds != NULL)) {
ed34e3c3
A
5837 free(sg);
5838 return false;
5839 }
5840
5841 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
f36da725 5842 strcpy(sg->name_init, name);
ed34e3c3
A
5843
5844 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5845
ddbbfbc1 5846 runtime_add_weak_ref();
5b0a4722 5847
ed34e3c3
A
5848 return true;
5849}
5850
5851void
5b0a4722 5852socketgroup_delete(job_t j, struct socketgroup *sg)
ed34e3c3
A
5853{
5854 unsigned int i;
5855
5b0a4722
A
5856 for (i = 0; i < sg->fd_cnt; i++) {
5857#if 0
5858 struct sockaddr_storage ss;
5859 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5860 socklen_t ss_len = sizeof(ss);
5861
eabd1701
A
5862 // 5480306
5863 if (job_assumes_zero(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5b0a4722 5864 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
dcace88f 5865 (void)job_assumes(j, unlink(sun->sun_path) != -1);
eabd1701 5866 // We might conditionally need to delete a directory here
5b0a4722
A
5867 }
5868#endif
eabd1701 5869 (void)job_assumes_zero_p(j, runtime_close(sg->fds[i]));
5b0a4722 5870 }
ed34e3c3
A
5871
5872 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5873
5874 free(sg->fds);
5875 free(sg);
5b0a4722 5876
ddbbfbc1 5877 runtime_del_weak_ref();
ed34e3c3
A
5878}
5879
5880void
5b0a4722 5881socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
ed34e3c3 5882{
5b0a4722 5883 struct kevent kev[sg->fd_cnt];
ed34e3c3
A
5884 char buf[10000];
5885 unsigned int i, buf_off = 0;
5886
5b0a4722
A
5887 for (i = 0; i < sg->fd_cnt; i++) {
5888 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5889 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5890 }
ed34e3c3 5891
5b0a4722 5892 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
ed34e3c3 5893
eabd1701 5894 (void)job_assumes_zero_p(j, kevent_bulk_mod(kev, sg->fd_cnt));
ed34e3c3 5895
5b0a4722 5896 for (i = 0; i < sg->fd_cnt; i++) {
dcace88f 5897 (void)job_assumes(j, kev[i].flags & EV_ERROR);
ddbbfbc1 5898 errno = (typeof(errno)) kev[i].data;
eabd1701 5899 (void)job_assumes_zero(j, kev[i].data);
5b0a4722 5900 }
ed34e3c3
A
5901}
5902
5903void
5b0a4722 5904socketgroup_ignore(job_t j, struct socketgroup *sg)
ed34e3c3 5905{
5b0a4722
A
5906 socketgroup_kevent_mod(j, sg, false);
5907}
ed34e3c3 5908
5b0a4722
A
5909void
5910socketgroup_watch(job_t j, struct socketgroup *sg)
5911{
5912 socketgroup_kevent_mod(j, sg, true);
ed34e3c3
A
5913}
5914
5915void
5b0a4722 5916socketgroup_callback(job_t j)
ed34e3c3 5917{
5b0a4722 5918 job_dispatch(j, true);
ed34e3c3
A
5919}
5920
5921bool
eabd1701 5922envitem_new(job_t j, const char *k, const char *v, bool global)
ed34e3c3 5923{
eabd1701
A
5924 if (global && !launchd_allow_global_dyld_envvars) {
5925 if (strncmp("DYLD_", k, sizeof("DYLD_") - 1) == 0) {
5926 job_log(j, LOG_ERR, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k, v);
5927 return false;
5928 }
5929 }
5930
ed34e3c3
A
5931 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5932
5b0a4722 5933 if (!job_assumes(j, ei != NULL)) {
ed34e3c3 5934 return false;
5b0a4722 5935 }
ed34e3c3 5936
f36da725
A
5937 strcpy(ei->key_init, k);
5938 ei->value = ei->key_init + strlen(k) + 1;
ed34e3c3
A
5939 strcpy(ei->value, v);
5940
5941 if (global) {
ddbbfbc1
A
5942 if (SLIST_EMPTY(&j->global_env)) {
5943 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5944 }
ed34e3c3
A
5945 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5946 } else {
5947 SLIST_INSERT_HEAD(&j->env, ei, sle);
5948 }
5949
5b0a4722
A
5950 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5951
ed34e3c3
A
5952 return true;
5953}
5954
5955void
5b0a4722 5956envitem_delete(job_t j, struct envitem *ei, bool global)
ed34e3c3
A
5957{
5958 if (global) {
5959 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
ddbbfbc1
A
5960 if (SLIST_EMPTY(&j->global_env)) {
5961 LIST_REMOVE(j, global_env_sle);
5962 }
ed34e3c3
A
5963 } else {
5964 SLIST_REMOVE(&j->env, ei, envitem, sle);
5965 }
5966
5967 free(ei);
5968}
5969
5970void
5971envitem_setup(launch_data_t obj, const char *key, void *context)
5972{
5b0a4722 5973 job_t j = context;
ed34e3c3 5974
5b0a4722 5975 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
ed34e3c3 5976 return;
5b0a4722 5977 }
ed34e3c3 5978
dcace88f 5979 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
eabd1701 5980 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
ddbbfbc1
A
5981 } else {
5982 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5983 }
5984}
5985
ed34e3c3 5986bool
5b0a4722 5987limititem_update(job_t j, int w, rlim_t r)
ed34e3c3
A
5988{
5989 struct limititem *li;
5990
5991 SLIST_FOREACH(li, &j->limits, sle) {
5b0a4722 5992 if (li->which == w) {
ed34e3c3 5993 break;
5b0a4722 5994 }
ed34e3c3
A
5995 }
5996
5997 if (li == NULL) {
5998 li = calloc(1, sizeof(struct limititem));
5999
5b0a4722 6000 if (!job_assumes(j, li != NULL)) {
ed34e3c3 6001 return false;
5b0a4722
A
6002 }
6003
6004 SLIST_INSERT_HEAD(&j->limits, li, sle);
ed34e3c3
A
6005
6006 li->which = w;
6007 }
6008
6009 if (j->importing_hard_limits) {
6010 li->lim.rlim_max = r;
6011 li->sethard = true;
6012 } else {
6013 li->lim.rlim_cur = r;
6014 li->setsoft = true;
6015 }
6016
6017 return true;
6018}
6019
6020void
5b0a4722 6021limititem_delete(job_t j, struct limititem *li)
ed34e3c3
A
6022{
6023 SLIST_REMOVE(&j->limits, li, limititem, sle);
6024
6025 free(li);
6026}
6027
f36da725 6028#if HAVE_SANDBOX
5b0a4722
A
6029void
6030seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
6031{
6032 job_t j = context;
6033
6034 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
6035 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
6036 return;
6037 }
6038
6039 if (launch_data_get_bool(obj) == false) {
6040 return;
6041 }
6042
6043 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
6044 j->seatbelt_flags |= SANDBOX_NAMED;
6045 }
6046}
f36da725 6047#endif
5b0a4722 6048
ed34e3c3
A
6049void
6050limititem_setup(launch_data_t obj, const char *key, void *context)
6051{
5b0a4722 6052 job_t j = context;
ddbbfbc1 6053 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
ed34e3c3
A
6054 rlim_t rl;
6055
5b0a4722 6056 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
ed34e3c3 6057 return;
5b0a4722 6058 }
ed34e3c3
A
6059
6060 rl = launch_data_get_integer(obj);
6061
6062 for (i = 0; i < limits_cnt; i++) {
5b0a4722 6063 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
ed34e3c3 6064 break;
5b0a4722 6065 }
ed34e3c3
A
6066 }
6067
5b0a4722 6068 if (i == limits_cnt) {
ed34e3c3 6069 return;
5b0a4722 6070 }
ed34e3c3
A
6071
6072 limititem_update(j, launchd_keys2limits[i].val, rl);
6073}
6074
6075bool
5b0a4722 6076job_useless(job_t j)
ed34e3c3 6077{
ddbbfbc1
A
6078 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
6079 if (j->legacy_LS_job && j->j_port) {
5b0a4722
A
6080 return false;
6081 }
ed34e3c3
A
6082 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
6083 return true;
5b0a4722
A
6084 } else if (j->removal_pending) {
6085 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
ed34e3c3 6086 return true;
dcace88f
A
6087 } else if (j->shutdown_monitor) {
6088 return false;
eabd1701 6089 } else if (j->mgr->shutting_down && !j->mgr->parentmgr) {
5b0a4722 6090 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
dcace88f
A
6091 if (total_children == 0 && !j->anonymous) {
6092 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
ddbbfbc1 6093 }
ed34e3c3 6094 return true;
5b0a4722
A
6095 } else if (j->legacy_mach_job) {
6096 if (SLIST_EMPTY(&j->machservices)) {
6097 job_log(j, LOG_INFO, "Garbage collecting");
6098 return true;
6099 } else if (!j->checkedin) {
6100 job_log(j, LOG_WARNING, "Failed to check-in!");
6101 return true;
6102 }
dcace88f
A
6103 } else {
6104 /* If the job's executable does not have any valid architectures (for
6105 * example, if it's a PowerPC-only job), then we don't even bother
6106 * trying to relaunch it, as we have no reasonable expectation that
6107 * the situation will change.
6108 *
6109 * <rdar://problem/9106979>
6110 */
6111 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
6112 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
6113 return true;
6114 }
ed34e3c3
A
6115 }
6116
6117 return false;
6118}
6119
6120bool
5b0a4722 6121job_keepalive(job_t j)
ed34e3c3
A
6122{
6123 mach_msg_type_number_t statusCnt;
6124 mach_port_status_t status;
6125 struct semaphoreitem *si;
6126 struct machservice *ms;
ed34e3c3 6127 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
eabd1701 6128 bool is_not_kextd = (launchd_apple_internal || (strcmp(j->label, "com.apple.kextd") != 0));
ed34e3c3 6129
ddbbfbc1 6130 if (unlikely(j->mgr->shutting_down)) {
f36da725
A
6131 return false;
6132 }
6133
5b0a4722
A
6134 /*
6135 * 5066316
6136 *
6137 * We definitely need to revisit this after Leopard ships. Please see
6138 * launchctl.c for the other half of this hack.
6139 */
ddbbfbc1
A
6140 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
6141 return false;
6142 }
6143
dcace88f 6144 if (unlikely(j->needs_kickoff)) {
ddbbfbc1 6145 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5b0a4722
A
6146 return false;
6147 }
6148
6149 if (j->start_pending) {
6150 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
ed34e3c3
A
6151 return true;
6152 }
6153
6154 if (!j->ondemand) {
6155 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
6156 return true;
6157 }
6158
6159 SLIST_FOREACH(ms, &j->machservices, sle) {
6160 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
6161 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5b0a4722 6162 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
ed34e3c3 6163 continue;
5b0a4722 6164 }
ed34e3c3 6165 if (status.mps_msgcount) {
ddbbfbc1 6166 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
ed34e3c3
A
6167 status.mps_msgcount, ms->name);
6168 return true;
6169 }
6170 }
eabd1701 6171
dcace88f
A
6172 /* TODO: Coalesce external events and semaphore items, since they're basically
6173 * the same thing.
6174 */
6175 struct externalevent *ei = NULL;
6176 LIST_FOREACH(ei, &j->events, job_le) {
6177 if (ei->state == ei->wanted_state) {
6178 return true;
6179 }
6180 }
eabd1701 6181
ed34e3c3
A
6182 SLIST_FOREACH(si, &j->semaphores, sle) {
6183 bool wanted_state = false;
5b0a4722
A
6184 job_t other_j;
6185
ed34e3c3
A
6186 switch (si->why) {
6187 case NETWORK_UP:
6188 wanted_state = true;
6189 case NETWORK_DOWN:
6190 if (network_up == wanted_state) {
5b0a4722 6191 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
ed34e3c3
A
6192 return true;
6193 }
6194 break;
6195 case SUCCESSFUL_EXIT:
6196 wanted_state = true;
6197 case FAILED_EXIT:
6198 if (good_exit == wanted_state) {
5b0a4722
A
6199 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
6200 return true;
6201 }
6202 break;
dcace88f
A
6203 case CRASHED:
6204 wanted_state = true;
6205 case DID_NOT_CRASH:
6206 if (j->crashed == wanted_state) {
6207 return true;
6208 }
6209 break;
5b0a4722
A
6210 case OTHER_JOB_ENABLED:
6211 wanted_state = true;
6212 case OTHER_JOB_DISABLED:
dcace88f 6213 if ((bool)job_find(NULL, si->what) == wanted_state) {
5b0a4722 6214 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
ed34e3c3
A
6215 return true;
6216 }
6217 break;
5b0a4722
A
6218 case OTHER_JOB_ACTIVE:
6219 wanted_state = true;
6220 case OTHER_JOB_INACTIVE:
dcace88f 6221 if ((other_j = job_find(NULL, si->what))) {
5b0a4722
A
6222 if ((bool)other_j->p == wanted_state) {
6223 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
6224 return true;
6225 }
6226 }
6227 break;
ed34e3c3
A
6228 }
6229 }
6230
ed34e3c3
A
6231 return false;
6232}
6233
5b0a4722
A
6234const char *
6235job_active(job_t j)
ed34e3c3 6236{
dcace88f
A
6237 if (j->p && j->shutdown_monitor) {
6238 return "Monitoring shutdown";
6239 }
5b0a4722
A
6240 if (j->p) {
6241 return "PID is still valid";
6242 }
ed34e3c3 6243
5b0a4722
A
6244 if (j->priv_port_has_senders) {
6245 return "Privileged Port still has outstanding senders";
ed34e3c3
A
6246 }
6247
b97faa4c 6248 struct machservice *ms;
ed34e3c3 6249 SLIST_FOREACH(ms, &j->machservices, sle) {
eabd1701
A
6250 /* If we've simulated an exit, we mark the job as non-active, even
6251 * though doing so will leave it in an unsafe state. We do this so that
6252 * shutdown can proceed. See <rdar://problem/11126530>.
eabd1701
A
6253 */
6254 if (!j->workaround9359725 && ms->recv && machservice_active(ms)) {
6255 job_log(j, LOG_INFO, "Mach service is still active: %s", ms->name);
5b0a4722
A
6256 return "Mach service is still active";
6257 }
ed34e3c3
A
6258 }
6259
5b0a4722 6260 return NULL;
ed34e3c3
A
6261}
6262
5b0a4722
A
6263void
6264machservice_watch(job_t j, struct machservice *ms)
ed34e3c3 6265{
5b0a4722 6266 if (ms->recv) {
95379394
A
6267 if (job_assumes_zero(j, runtime_add_mport(ms->port, NULL)) == KERN_INVALID_RIGHT) {
6268 ms->recv_race_hack = true;
6269 }
5b0a4722 6270 }
ed34e3c3
A
6271}
6272
5b0a4722
A
6273void
6274machservice_ignore(job_t j, struct machservice *ms)
ed34e3c3 6275{
eabd1701
A
6276 /* We only add ports whose receive rights we control into the port set, so
6277 * don't attempt to remove te service from the port set if we didn't put it
6278 * there in the first place. Otherwise, we could wind up trying to access a
6279 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
6280 *
6281 * <rdar://problem/10898014>
6282 */
6283 if (ms->recv) {
6284 (void)job_assumes_zero(j, runtime_remove_mport(ms->port));
6285 }
ed34e3c3
A
6286}
6287
6288void
5b0a4722 6289machservice_resetport(job_t j, struct machservice *ms)
ed34e3c3 6290{
5b0a4722 6291 LIST_REMOVE(ms, port_hash_sle);
eabd1701
A
6292 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6293 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
6294
5b0a4722 6295 ms->gen_num++;
eabd1701
A
6296 (void)job_assumes_zero(j, launchd_mport_create_recv(&ms->port));
6297 (void)job_assumes_zero(j, launchd_mport_make_send(ms->port));
5b0a4722 6298 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
ed34e3c3
A
6299}
6300
eabd1701
A
6301void
6302machservice_stamp_port(job_t j, struct machservice *ms)
6303{
6304 mach_port_context_t ctx = 0;
6305 char *where2get = j->prog ? j->prog : j->argv[0];
6306
6307 char *prog = NULL;
6308 if ((prog = strrchr(where2get, '/'))) {
6309 prog++;
6310 } else {
6311 prog = where2get;
6312 }
6313
6314 (void)strncpy((char *)&ctx, prog, sizeof(ctx));
6315#if __LITTLE_ENDIAN__
6316#if __LP64__
6317 ctx = OSSwapBigToHostInt64(ctx);
6318#else
6319 ctx = OSSwapBigToHostInt32(ctx);
6320#endif
6321#endif
6322
6323 (void)job_assumes_zero(j, mach_port_set_context(mach_task_self(), ms->port, ctx));
6324}
6325
ed34e3c3 6326struct machservice *
5b0a4722 6327machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
ed34e3c3 6328{
eabd1701
A
6329 /* Don't create new MachServices for dead ports. This is primarily for
6330 * clients who use bootstrap_register2(). They can pass in a send right, but
6331 * then that port can immediately go dead. Hilarity ensues.
6332 *
6333 * <rdar://problem/10898014>
6334 */
6335 if (*serviceport == MACH_PORT_DEAD) {
6336 return NULL;
6337 }
ed34e3c3 6338
eabd1701 6339 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
ddbbfbc1 6340 if (!job_assumes(j, ms != NULL)) {
ed34e3c3 6341 return NULL;
5b0a4722 6342 }
ed34e3c3 6343
5b0a4722 6344 strcpy((char *)ms->name, name);
ed34e3c3 6345 ms->job = j;
ddbbfbc1 6346 ms->gen_num = 1;
5b0a4722 6347 ms->per_pid = pid_local;
ed34e3c3 6348
ddbbfbc1 6349 if (likely(*serviceport == MACH_PORT_NULL)) {
eabd1701 6350 if (job_assumes_zero(j, launchd_mport_create_recv(&ms->port)) != KERN_SUCCESS) {
ed34e3c3 6351 goto out_bad;
5b0a4722 6352 }
ed34e3c3 6353
eabd1701 6354 if (job_assumes_zero(j, launchd_mport_make_send(ms->port)) != KERN_SUCCESS) {
ed34e3c3 6355 goto out_bad2;
5b0a4722 6356 }
ed34e3c3 6357 *serviceport = ms->port;
ed34e3c3
A
6358 ms->recv = true;
6359 } else {
6360 ms->port = *serviceport;
6361 ms->isActive = true;
6362 }
6363
6364 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
dcace88f
A
6365
6366 jobmgr_t where2put = j->mgr;
eabd1701 6367 // XPC domains are separate from Mach bootstraps.
dcace88f 6368 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
eabd1701 6369 if (launchd_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
dcace88f
A
6370 where2put = root_jobmgr;
6371 }
ddbbfbc1 6372 }
eabd1701
A
6373
6374 /* Don't allow MachServices added by multiple-instance jobs to be looked up
6375 * by others. We could just do this with a simple bit, but then we'd have to
6376 * uniquify the names ourselves to avoid collisions. This is just easier.
dcace88f
A
6377 */
6378 if (!j->dedicated_instance) {
6379 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6380 }
5b0a4722 6381 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
ed34e3c3 6382
eabd1701
A
6383 if (ms->recv) {
6384 machservice_stamp_port(j, ms);
6385 }
6386
ddbbfbc1 6387 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
ed34e3c3
A
6388
6389 return ms;
6390out_bad2:
eabd1701 6391 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
ed34e3c3
A
6392out_bad:
6393 free(ms);
6394 return NULL;
6395}
6396
dcace88f
A
6397struct machservice *
6398machservice_new_alias(job_t j, struct machservice *orig)
6399{
6400 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
6401 if (job_assumes(j, ms != NULL)) {
6402 strcpy((char *)ms->name, orig->name);
6403 ms->alias = orig;
6404 ms->job = j;
6405
6406 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6407 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6408 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
6409 }
6410
6411 return ms;
6412}
dcace88f 6413
ed34e3c3
A
6414bootstrap_status_t
6415machservice_status(struct machservice *ms)
6416{
dcace88f 6417 ms = ms->alias ? ms->alias : ms;
ed34e3c3
A
6418 if (ms->isActive) {
6419 return BOOTSTRAP_STATUS_ACTIVE;
6420 } else if (ms->job->ondemand) {
6421 return BOOTSTRAP_STATUS_ON_DEMAND;
6422 } else {
6423 return BOOTSTRAP_STATUS_INACTIVE;
6424 }
6425}
6426
6427void
5b0a4722 6428job_setup_exception_port(job_t j, task_t target_task)
ed34e3c3 6429{
fe044cc9 6430 struct machservice *ms;
ed34e3c3 6431 thread_state_flavor_t f = 0;
fe044cc9 6432 mach_port_t exc_port = the_exception_server;
5b0a4722 6433
ddbbfbc1 6434 if (unlikely(j->alt_exc_handler)) {
fe044cc9 6435 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
ddbbfbc1 6436 if (likely(ms)) {
fe044cc9
A
6437 exc_port = machservice_port(ms);
6438 } else {
6439 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
6440 }
ddbbfbc1 6441 } else if (unlikely(j->internal_exc_handler)) {
fe044cc9 6442 exc_port = runtime_get_kernel_port();
ddbbfbc1 6443 } else if (unlikely(!exc_port)) {
5b0a4722
A
6444 return;
6445 }
ed34e3c3 6446
ddbbfbc1 6447#if defined (__ppc__) || defined(__ppc64__)
ed34e3c3 6448 f = PPC_THREAD_STATE64;
ef398931 6449#elif defined(__i386__) || defined(__x86_64__)
ed34e3c3 6450 f = x86_THREAD_STATE;
f36da725
A
6451#elif defined(__arm__)
6452 f = ARM_THREAD_STATE;
6453#else
6454#error "unknown architecture"
ed34e3c3
A
6455#endif
6456
ddbbfbc1 6457 if (likely(target_task)) {
95379394 6458 kern_return_t kr = task_set_exception_ports(target_task, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f);
eabd1701
A
6459 if (kr) {
6460 if (kr != MACH_SEND_INVALID_DEST) {
6461 (void)job_assumes_zero(j, kr);
6462 } else {
6463 job_log(j, LOG_WARNING, "Task died before exception port could be set.");
6464 }
6465 }
ddbbfbc1 6466 } else if (pid1_magic && the_exception_server) {
5b0a4722 6467 mach_port_t mhp = mach_host_self();
95379394 6468 (void)job_assumes_zero(j, host_set_exception_ports(mhp, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f));
eabd1701 6469 (void)job_assumes_zero(j, launchd_mport_deallocate(mhp));
5b0a4722 6470 }
5b0a4722
A
6471}
6472
6473void
ddbbfbc1 6474job_set_exception_port(job_t j, mach_port_t port)
5b0a4722 6475{
ddbbfbc1 6476 if (unlikely(!the_exception_server)) {
5b0a4722
A
6477 the_exception_server = port;
6478 job_setup_exception_port(j, 0);
6479 } else {
6480 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6481 }
6482}
6483
6484void
6485machservice_setup_options(launch_data_t obj, const char *key, void *context)
6486{
6487 struct machservice *ms = context;
6488 mach_port_t mhp = mach_host_self();
6489 int which_port;
6490 bool b;
6491
6492 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
ed34e3c3
A
6493 return;
6494 }
6495
6496 switch (launch_data_get_type(obj)) {
6497 case LAUNCH_DATA_INTEGER:
eabd1701 6498 which_port = (int)launch_data_get_integer(obj); // XXX we should bound check this...
ed34e3c3 6499 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
5b0a4722
A
6500 switch (which_port) {
6501 case TASK_KERNEL_PORT:
6502 case TASK_HOST_PORT:
6503 case TASK_NAME_PORT:
6504 case TASK_BOOTSTRAP_PORT:
ddbbfbc1 6505 /* I find it a little odd that zero isn't reserved in the header.
eabd1701
A
6506 * Normally Mach is fairly good about this convention...
6507 */
5b0a4722
A
6508 case 0:
6509 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6510 break;
6511 default:
6512 ms->special_port_num = which_port;
6513 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6514 break;
6515 }
ddbbfbc1 6516 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
5b0a4722 6517 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
eabd1701 6518 (void)job_assumes_zero(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)));
5b0a4722
A
6519 } else {
6520 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6521 }
ed34e3c3
A
6522 }
6523 case LAUNCH_DATA_BOOL:
6524 b = launch_data_get_bool(obj);
5b0a4722
A
6525 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6526 ms->debug_on_close = b;
6527 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
ed34e3c3
A
6528 ms->reset = b;
6529 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6530 ms->hide = b;
6531 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
ddbbfbc1 6532 job_set_exception_port(ms->job, ms->port);
ed34e3c3
A
6533 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6534 ms->kUNCServer = b;
eabd1701 6535 (void)job_assumes_zero(ms->job, host_set_UNDServer(mhp, ms->port));
ed34e3c3
A
6536 }
6537 break;
ddbbfbc1 6538 case LAUNCH_DATA_STRING:
dcace88f 6539 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
ddbbfbc1 6540 const char *option = launch_data_get_string(obj);
dcace88f 6541 if (strcasecmp(option, "One") == 0) {
ddbbfbc1 6542 ms->drain_one_on_crash = true;
dcace88f 6543 } else if (strcasecmp(option, "All") == 0) {
ddbbfbc1
A
6544 ms->drain_all_on_crash = true;
6545 }
6546 }
6547 break;
5b0a4722 6548 case LAUNCH_DATA_DICTIONARY:
95379394
A
6549 if (launch_data_dict_get_count(obj) == 0) {
6550 job_set_exception_port(ms->job, ms->port);
6551 }
5b0a4722 6552 break;
ed34e3c3
A
6553 default:
6554 break;
6555 }
6556
eabd1701 6557 (void)job_assumes_zero(ms->job, launchd_mport_deallocate(mhp));
ed34e3c3
A
6558}
6559
6560void
6561machservice_setup(launch_data_t obj, const char *key, void *context)
6562{
5b0a4722 6563 job_t j = context;
ed34e3c3
A
6564 struct machservice *ms;
6565 mach_port_t p = MACH_PORT_NULL;
6566
ddbbfbc1 6567 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
ed34e3c3
A
6568 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6569 return;
6570 }
6571
ddbbfbc1 6572 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
ed34e3c3
A
6573 return;
6574 }
6575
6576 ms->isActive = false;
dcace88f 6577 ms->upfront = true;
eabd1701 6578
ed34e3c3
A
6579 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6580 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6581 }
95379394
A
6582
6583 kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
6584 (void)job_assumes_zero(j, kr);
ed34e3c3
A
6585}
6586
5b0a4722
A
6587jobmgr_t
6588jobmgr_do_garbage_collection(jobmgr_t jm)
ed34e3c3 6589{
ddbbfbc1 6590 jobmgr_t jmi = NULL, jmn = NULL;
5b0a4722
A
6591 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6592 jobmgr_do_garbage_collection(jmi);
6593 }
ed34e3c3 6594
dcace88f 6595 if (!jm->shutting_down) {
5b0a4722
A
6596 return jm;
6597 }
eabd1701 6598
dcace88f 6599 if (SLIST_EMPTY(&jm->submgrs)) {
ddbbfbc1
A
6600 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6601 } else {
6602 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
5c88273d
A
6603 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6604 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6605 }
5b0a4722 6606 }
dcace88f
A
6607
6608 size_t actives = 0;
6609 job_t ji = NULL, jn = NULL;
6610 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6611 if (ji->anonymous) {
6612 continue;
6613 }
eabd1701
A
6614
6615 // Let the shutdown monitor be up until the very end.
dcace88f
A
6616 if (ji->shutdown_monitor) {
6617 continue;
5b0a4722
A
6618 }
6619
dcace88f
A
6620 /* On our first pass through, open a transaction for all the jobs that
6621 * need to be dirty at shutdown. We'll close these transactions once the
6622 * jobs that do not need to be dirty at shutdown have all exited.
6623 */
6624 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6625 job_open_shutdown_transaction(ji);
6626 }
6627
6628 const char *active = job_active(ji);
6629 if (!active) {
6630 job_remove(ji);
6631 } else {
6632 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6633 job_stop(ji);
6634
eabd1701 6635 if (!ji->dirty_at_shutdown) {
dcace88f 6636 actives++;
ddbbfbc1 6637 }
dcace88f
A
6638
6639 if (ji->clean_kill) {
6640 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
ddbbfbc1 6641 } else {
dcace88f
A
6642 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6643 }
6644 }
6645 }
6646
6647 jm->shutdown_jobs_dirtied = true;
6648 if (actives == 0) {
6649 if (!jm->shutdown_jobs_cleaned) {
eabd1701
A
6650 /* Once all normal jobs have exited, we clean the dirty-at-shutdown
6651 * jobs and make them into normal jobs so that the above loop will
6652 * handle them appropriately.
6653 */
dcace88f 6654 LIST_FOREACH(ji, &jm->jobs, sle) {
eabd1701
A
6655 if (ji->anonymous) {
6656 continue;
6657 }
6658
6659 if (!job_active(ji)) {
6660 continue;
ddbbfbc1 6661 }
eabd1701
A
6662
6663 if (ji->shutdown_monitor) {
6664 continue;
6665 }
6666
6667 job_close_shutdown_transaction(ji);
6668 actives++;
ddbbfbc1 6669 }
dcace88f
A
6670
6671 jm->shutdown_jobs_cleaned = true;
eabd1701 6672 }
dcace88f 6673
eabd1701
A
6674 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6675 /* We may be in a situation where the shutdown monitor is all that's
6676 * left, in which case we want to stop it. Like dirty-at-shutdown
6677 * jobs, we turn it back into a normal job so that the main loop
6678 * treats it appropriately.
6679 *
6680 * See:
6681 * <rdar://problem/10756306>
6682 * <rdar://problem/11034971>
6683 * <rdar://problem/11549541>
6684 */
6685 if (jm->monitor_shutdown && _launchd_shutdown_monitor) {
6686 /* The rest of shutdown has completed, so we can kill the shutdown
6687 * monitor now like it was any other job.
6688 */
6689 _launchd_shutdown_monitor->shutdown_monitor = false;
6690
6691 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6692 job_stop(_launchd_shutdown_monitor);
6693 _launchd_shutdown_monitor = NULL;
6694 } else {
6695 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6696 jobmgr_remove(jm);
6697 return NULL;
6698 }
ddbbfbc1 6699 }
dcace88f
A
6700 }
6701
eabd1701 6702 return jm;
ddbbfbc1 6703}
5b0a4722 6704
ddbbfbc1
A
6705void
6706jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6707{
eabd1701
A
6708 /* I maintain that stray processes should be at the mercy of launchd during
6709 * shutdown, but nevertheless, things like diskimages-helper can stick
6710 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
6711 * to all the strays and don't wait for them to exit before moving on.
ddbbfbc1
A
6712 *
6713 * See rdar://problem/6562592
6714 */
6715 size_t i = 0;
dcace88f
A
6716 for (i = 0; i < np; i++) {
6717 if (p[i] != 0) {
ddbbfbc1 6718 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
eabd1701 6719 (void)jobmgr_assumes_zero_p(jm, kill2(p[i], SIGTERM));
ddbbfbc1
A
6720 }
6721 }
ed34e3c3
A
6722}
6723
6724void
ddbbfbc1 6725jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
ed34e3c3 6726{
dcace88f
A
6727 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6728 pid_t *pids = NULL;
6729 int i = 0, kp_cnt = 0;
eabd1701 6730
ddbbfbc1 6731 if (likely(jm->parentmgr || !pid1_magic)) {
5b0a4722
A
6732 return;
6733 }
ed34e3c3 6734
dcace88f 6735 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
5b0a4722
A
6736 return;
6737 }
ddbbfbc1
A
6738
6739 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6740
eabd1701 6741 if (jobmgr_assumes_zero_p(jm, (kp_cnt = proc_listallpids(pids, len))) == -1) {
5b0a4722
A
6742 goto out;
6743 }
6744
ddbbfbc1 6745 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
5b0a4722 6746 for (i = 0; i < kp_cnt; i++) {
dcace88f
A
6747 struct proc_bsdshortinfo proc;
6748 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6749 if (errno != ESRCH) {
eabd1701 6750 (void)jobmgr_assumes_zero(jm, errno);
dcace88f
A
6751 }
6752
6753 kp_skipped++;
6754 continue;
6755 }
eabd1701 6756
dcace88f
A
6757 pid_t p_i = pids[i];
6758 pid_t pp_i = proc.pbsi_ppid;
6759 pid_t pg_i = proc.pbsi_pgid;
6760 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6761 const char *n = proc.pbsi_comm;
5b0a4722 6762
ddbbfbc1
A
6763 if (unlikely(p_i == 0 || p_i == 1)) {
6764 kp_skipped++;
5b0a4722 6765 continue;
ed34e3c3 6766 }
dcace88f 6767
eabd1701 6768 if (_launchd_shutdown_monitor && pp_i == _launchd_shutdown_monitor->p) {
dcace88f
A
6769 kp_skipped++;
6770 continue;
6771 }
eabd1701
A
6772
6773 // We might have some jobs hanging around that we've decided to shut down in spite of.
ddbbfbc1 6774 job_t j = jobmgr_find_by_pid(jm, p_i, false);
dcace88f 6775 if (!j || (j && j->anonymous)) {
ddbbfbc1 6776 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
eabd1701 6777
ddbbfbc1 6778 int status = 0;
dcace88f 6779 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
eabd1701 6780 if (jobmgr_assumes_zero(jm, waitpid(p_i, &status, WNOHANG)) == 0) {
ddbbfbc1
A
6781 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6782 }
6783 kp_skipped++;
6784 } else {
6785 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6786 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6787 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6788 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6789 * their userspace emissaries go away, before the call to reboot(2).
6790 */
dcace88f 6791 if (leader && leader->ignore_pg_at_shutdown) {
ddbbfbc1
A
6792 kp_skipped++;
6793 } else {
6794 ps[i] = p_i;
6795 }
6796 }
6797 } else {
6798 kp_skipped++;
6799 }
6800 }
5b0a4722 6801
dcace88f
A
6802 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6803 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
ed34e3c3
A
6804 }
6805
ddbbfbc1 6806 free(ps);
5b0a4722 6807out:
dcace88f 6808 free(pids);
ed34e3c3
A
6809}
6810
5b0a4722
A
6811jobmgr_t
6812jobmgr_parent(jobmgr_t jm)
ed34e3c3 6813{
5b0a4722
A
6814 return jm->parentmgr;
6815}
ed34e3c3 6816
5b0a4722
A
6817void
6818job_uncork_fork(job_t j)
6819{
6820 pid_t c = j->p;
6821
6822 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6823 /* this unblocks the child and avoids a race
6824 * between the above fork() and the kevent_mod() */
dcace88f 6825 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
eabd1701 6826 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
ddbbfbc1 6827 j->fork_fd = 0;
5b0a4722
A
6828}
6829
6830jobmgr_t
dcace88f 6831jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
5b0a4722 6832{
5b0a4722
A
6833 job_t bootstrapper = NULL;
6834 jobmgr_t jmr;
6835
95379394 6836 __OS_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
5b0a4722 6837
ddbbfbc1 6838 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
5b0a4722 6839 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
ed34e3c3
A
6840 return NULL;
6841 }
6842
ddbbfbc1
A
6843 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6844
6845 if (!jobmgr_assumes(jm, jmr != NULL)) {
ed34e3c3 6846 return NULL;
5b0a4722 6847 }
ed34e3c3 6848
dcace88f
A
6849 if (jm == NULL) {
6850 root_jobmgr = jmr;
6851 }
6852
5b0a4722 6853 jmr->kqjobmgr_callback = jobmgr_callback;
f36da725 6854 strcpy(jmr->name_init, name ? name : "Under construction");
5b0a4722
A
6855
6856 jmr->req_port = requestorport;
6857
6858 if ((jmr->parentmgr = jm)) {
6859 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6860 }
6861
eabd1701 6862 if (jm && jobmgr_assumes_zero(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME)) != KERN_SUCCESS) {
ed34e3c3
A
6863 goto out_bad;
6864 }
6865
5b0a4722 6866 if (transfer_port != MACH_PORT_NULL) {
dcace88f 6867 (void)jobmgr_assumes(jmr, jm != NULL);
5b0a4722 6868 jmr->jm_port = transfer_port;
ddbbfbc1 6869 } else if (!jm && !pid1_magic) {
5b0a4722
A
6870 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6871 name_t service_buf;
6872
6873 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6874
eabd1701 6875 if (jobmgr_assumes_zero(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port)) != 0) {
5b0a4722
A
6876 goto out_bad;
6877 }
6878
6879 if (trusted_fd) {
ddbbfbc1 6880 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
5b0a4722
A
6881
6882 if ((dfd = dup(lfd)) >= 0) {
eabd1701
A
6883 (void)jobmgr_assumes_zero_p(jmr, runtime_close(dfd));
6884 (void)jobmgr_assumes_zero_p(jmr, runtime_close(lfd));
5b0a4722
A
6885 }
6886
6887 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6888 }
6889
eabd1701 6890 // cut off the Libc cache, we don't want to deadlock against ourself
5b0a4722
A
6891 inherited_bootstrap_port = bootstrap_port;
6892 bootstrap_port = MACH_PORT_NULL;
95379394 6893 os_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME));
ed34e3c3 6894
eabd1701 6895 // We set this explicitly as we start each child
95379394 6896 os_assert_zero(launchd_set_bport(MACH_PORT_NULL));
eabd1701 6897 } else if (jobmgr_assumes_zero(jmr, launchd_mport_create_recv(&jmr->jm_port)) != KERN_SUCCESS) {
ed34e3c3 6898 goto out_bad;
5b0a4722 6899 }
ed34e3c3 6900
5b0a4722 6901 if (!name) {
f36da725 6902 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
ed34e3c3
A
6903 }
6904
5b0a4722 6905 if (!jm) {
eabd1701
A
6906 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6907 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6908 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
95379394 6909 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGINFO, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
eabd1701 6910 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr));
5b0a4722
A
6911 }
6912
dcace88f 6913 if (name && !skip_init) {
5b0a4722
A
6914 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6915 }
6916
6917 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
eabd1701 6918 if (jobmgr_assumes_zero(jmr, runtime_add_mport(jmr->jm_port, job_server)) != KERN_SUCCESS) {
5b0a4722
A
6919 goto out_bad;
6920 }
6921 }
6922
6923 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6924
6925 if (bootstrapper) {
dcace88f 6926 bootstrapper->asport = asport;
eabd1701 6927
dcace88f
A
6928 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6929 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6930 } else {
6931 jmr->req_asport = asport;
6932 }
6933
6934 if (asport != MACH_PORT_NULL) {
eabd1701 6935 (void)jobmgr_assumes_zero(jmr, launchd_mport_copy_send(asport));
5b0a4722
A
6936 }
6937
6938 if (jmr->parentmgr) {
ddbbfbc1 6939 runtime_add_weak_ref();
5b0a4722
A
6940 }
6941
6942 return jmr;
ed34e3c3
A
6943
6944out_bad:
5b0a4722
A
6945 if (jmr) {
6946 jobmgr_remove(jmr);
dcace88f
A
6947 if (jm == NULL) {
6948 root_jobmgr = NULL;
6949 }
5b0a4722 6950 }
ed34e3c3
A
6951 return NULL;
6952}
6953
dcace88f
A
6954jobmgr_t
6955jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6956{
6957 jobmgr_t new = NULL;
6958
6959 /* These job managers are basically singletons, so we use the root Mach
6960 * bootstrap port as their requestor ports so they'll never go away.
6961 */
6962 mach_port_t req_port = root_jobmgr->jm_port;
eabd1701 6963 if (jobmgr_assumes_zero(jm, launchd_mport_make_send(req_port)) == KERN_SUCCESS) {
dcace88f
A
6964 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6965 if (new) {
6966 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6967 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6968 new->xpc_singleton = true;
6969 }
6970 }
6971
6972 return new;
6973}
6974
6975jobmgr_t
6976jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6977{
6978 jobmgr_t jmi = NULL;
6979 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6980 if (jmi->req_euid == uid) {
6981 return jmi;
6982 }
6983 }
6984
6985 name_t name;
6986 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6987 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6988 if (jobmgr_assumes(jm, jmi != NULL)) {
6989 /* We need to create a per-user launchd for this UID if there isn't one
6990 * already so we can grab the bootstrap port.
6991 */
6992 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6993 if (jobmgr_assumes(jmi, puj != NULL)) {
eabd1701
A
6994 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(puj->asport));
6995 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(jmi->req_bsport));
dcace88f
A
6996 jmi->shortdesc = "per-user";
6997 jmi->req_asport = puj->asport;
6998 jmi->req_asid = puj->asid;
6999 jmi->req_euid = uid;
7000 jmi->req_egid = -1;
7001
7002 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
7003 } else {
7004 jobmgr_remove(jmi);
7005 }
7006 }
7007
7008 return jmi;
7009}
7010
7011jobmgr_t
7012jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
7013{
7014 jobmgr_t jmi = NULL;
7015 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
7016 if (jmi->req_asid == asid) {
7017 return jmi;
7018 }
7019 }
7020
7021 name_t name;
7022 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
7023 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
7024 if (jobmgr_assumes(jm, jmi != NULL)) {
eabd1701 7025 (void)jobmgr_assumes_zero(jmi, launchd_mport_make_send(root_jobmgr->jm_port));
dcace88f
A
7026 jmi->shortdesc = "per-session";
7027 jmi->req_bsport = root_jobmgr->jm_port;
eabd1701 7028 (void)jobmgr_assumes_zero(jmi, audit_session_port(asid, &jmi->req_asport));
dcace88f
A
7029 jmi->req_asid = asid;
7030 jmi->req_euid = -1;
7031 jmi->req_egid = -1;
7032
7033 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
7034 } else {
7035 jobmgr_remove(jmi);
7036 }
7037
7038 return jmi;
7039}
dcace88f 7040
5b0a4722
A
7041job_t
7042jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
7043{
7044 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
7045 char thelabel[1000];
7046 job_t bootstrapper;
7047
7048 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
7049 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
eabd1701 7050
dcace88f 7051 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
ddbbfbc1 7052 bootstrapper->is_bootstrapper = true;
5b0a4722
A
7053 char buf[100];
7054
eabd1701 7055 // <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
5b0a4722 7056 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
eabd1701 7057 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
5b0a4722 7058 bootstrapper->weird_bootstrap = true;
dcace88f
A
7059 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
7060 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
eabd1701 7061#if TARGET_OS_EMBEDDED
95379394 7062 bootstrapper->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
eabd1701 7063#endif
ddbbfbc1 7064 bootstrapper->is_bootstrapper = true;
dcace88f 7065 if (jobmgr_assumes(jm, pid1_magic)) {
eabd1701 7066 // Have our system bootstrapper print out to the console.
ddbbfbc1
A
7067 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
7068 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
7069
eabd1701
A
7070 if (launchd_console) {
7071 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)fileno(launchd_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm));
ddbbfbc1
A
7072 }
7073 }
5b0a4722
A
7074 }
7075
7076 jm->session_initialized = true;
5b0a4722
A
7077 return bootstrapper;
7078}
7079
7080jobmgr_t
7081jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
ed34e3c3
A
7082{
7083 struct machservice *ms, *next_ms;
5b0a4722 7084 jobmgr_t jmi, jmn;
ed34e3c3
A
7085
7086 /* Mach ports, unlike Unix descriptors, are reference counted. In other
eabd1701
A
7087 * words, when some program hands us a second or subsequent send right to a
7088 * port we already have open, the Mach kernel gives us the same port number
7089 * back and increments an reference count associated with the port. This
7090 * This forces us, when discovering that a receive right at the other end
7091 * has been deleted, to wander all of our objects to see what weird places
7092 * clients might have handed us the same send right to use.
ed34e3c3
A
7093 */
7094
5b0a4722
A
7095 if (jm == root_jobmgr) {
7096 if (port == inherited_bootstrap_port) {
eabd1701 7097 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(port));
5b0a4722 7098 inherited_bootstrap_port = MACH_PORT_NULL;
ed34e3c3 7099
5b0a4722
A
7100 return jobmgr_shutdown(jm);
7101 }
7102
7103 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
ddbbfbc1 7104 if (ms->port == port && !ms->recv) {
5b0a4722
A
7105 machservice_delete(ms->job, ms, true);
7106 }
7107 }
7108 }
7109
7110 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7111 jobmgr_delete_anything_with_port(jmi, port);
7112 }
ed34e3c3 7113
5b0a4722 7114 if (jm->req_port == port) {
ddbbfbc1 7115 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
5b0a4722 7116 return jobmgr_shutdown(jm);
ed34e3c3 7117 }
5b0a4722 7118
95379394
A
7119 struct waiting4attach *w4ai = NULL;
7120 struct waiting4attach *w4ait = NULL;
7121 LIST_FOREACH_SAFE(w4ai, &jm->attaches, le, w4ait) {
7122 if (port == w4ai->port) {
7123 waiting4attach_delete(jm, w4ai);
7124 break;
7125 }
7126 }
7127
5b0a4722 7128 return jm;
ed34e3c3
A
7129}
7130
7131struct machservice *
5b0a4722 7132jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
ed34e3c3
A
7133{
7134 struct machservice *ms;
ddbbfbc1 7135 job_t target_j;
ed34e3c3 7136
ddbbfbc1 7137 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
ed34e3c3 7138
ddbbfbc1
A
7139 if (target_pid) {
7140 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
7141 * bootstrap in other bootstraps.
7142 */
eabd1701
A
7143
7144 // Start in the given bootstrap.
dcace88f 7145 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
eabd1701 7146 // If we fail, do a deep traversal.
ddbbfbc1
A
7147 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
7148 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
7149 return NULL;
7150 }
7151 }
eabd1701 7152
ddbbfbc1
A
7153 SLIST_FOREACH(ms, &target_j->machservices, sle) {
7154 if (ms->per_pid && strcmp(name, ms->name) == 0) {
ed34e3c3 7155 return ms;
5b0a4722 7156 }
ed34e3c3 7157 }
ed34e3c3 7158
ddbbfbc1 7159 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
ed34e3c3 7160 return NULL;
5b0a4722 7161 }
eabd1701 7162
dcace88f 7163 jobmgr_t where2look = jm;
eabd1701 7164 // XPC domains are separate from Mach bootstraps.
dcace88f 7165 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
eabd1701 7166 if (launchd_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
dcace88f
A
7167 where2look = root_jobmgr;
7168 }
7169 }
eabd1701 7170
dcace88f
A
7171 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
7172 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
7173 return ms;
ddbbfbc1
A
7174 }
7175 }
ed34e3c3 7176
ddbbfbc1 7177 if (jm->parentmgr == NULL || !check_parent) {
ed34e3c3 7178 return NULL;
5b0a4722 7179 }
ed34e3c3 7180
5b0a4722 7181 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
ed34e3c3
A
7182}
7183
7184mach_port_t
7185machservice_port(struct machservice *ms)
7186{
7187 return ms->port;
7188}
7189
5b0a4722 7190job_t
ed34e3c3
A
7191machservice_job(struct machservice *ms)
7192{
7193 return ms->job;
7194}
7195
7196bool
7197machservice_hidden(struct machservice *ms)
7198{
7199 return ms->hide;
7200}
7201
7202bool
7203machservice_active(struct machservice *ms)
7204{
7205 return ms->isActive;
7206}
7207
7208const char *
7209machservice_name(struct machservice *ms)
7210{
7211 return ms->name;
7212}
7213
ddbbfbc1
A
7214void
7215machservice_drain_port(struct machservice *ms)
7216{
7217 bool drain_one = ms->drain_one_on_crash;
7218 bool drain_all = ms->drain_all_on_crash;
eabd1701 7219
dcace88f 7220 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
ddbbfbc1
A
7221 return;
7222 }
7223
7224 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
eabd1701 7225
ddbbfbc1
A
7226 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
7227 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
7228 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
7229 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
7230
7231 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
eabd1701 7232
ddbbfbc1
A
7233 do {
7234 /* This should be a direct check on the Mach service to see if it's an exception-handling
7235 * port, and it will break things if ReportCrash or SafetyNet start advertising other
7236 * Mach services. But for now, it should be okay.
7237 */
dcace88f 7238 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
ddbbfbc1
A
7239 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
7240 } else {
7241 mach_msg_options_t options = MACH_RCV_MSG |
7242 MACH_RCV_TIMEOUT ;
7243
7244 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
dcace88f
A
7245 switch (mr) {
7246 case MACH_MSG_SUCCESS:
7247 mach_msg_destroy((mach_msg_header_t *)req_hdr);
7248 break;
7249 case MACH_RCV_TIMED_OUT:
7250 break;
7251 case MACH_RCV_TOO_LARGE:
eabd1701 7252 launchd_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
dcace88f
A
7253 break;
7254 default:
7255 break;
ddbbfbc1
A
7256 }
7257 }
dcace88f 7258 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
ddbbfbc1
A
7259}
7260
ed34e3c3 7261void
5b0a4722 7262machservice_delete(job_t j, struct machservice *ms, bool port_died)
ed34e3c3 7263{
dcace88f
A
7264 if (ms->alias) {
7265 /* HACK: Egregious code duplication. But dealing with aliases is a
7266 * pretty simple affair since they can't and shouldn't have any complex
7267 * behaviors associated with them.
7268 */
7269 LIST_REMOVE(ms, name_hash_sle);
7270 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
7271 free(ms);
7272 return;
7273 }
7274
ddbbfbc1 7275 if (unlikely(ms->debug_on_close)) {
5b0a4722 7276 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
eabd1701 7277 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
5b0a4722
A
7278 }
7279
ddbbfbc1
A
7280 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
7281 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
eabd1701 7282 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
5b0a4722
A
7283 }
7284
eabd1701 7285 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
5b0a4722 7286
ddbbfbc1 7287 if (unlikely(ms->port == the_exception_server)) {
5b0a4722 7288 the_exception_server = 0;
ed34e3c3
A
7289 }
7290
ddbbfbc1 7291 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
ed34e3c3 7292
5b0a4722
A
7293 if (ms->special_port_num) {
7294 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
7295 }
5b0a4722 7296 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
dcace88f
A
7297
7298 if (!(j->dedicated_instance || ms->event_channel)) {
7299 LIST_REMOVE(ms, name_hash_sle);
7300 }
5b0a4722 7301 LIST_REMOVE(ms, port_hash_sle);
ed34e3c3
A
7302
7303 free(ms);
7304}
7305
7306void
5b0a4722 7307machservice_request_notifications(struct machservice *ms)
ed34e3c3
A
7308{
7309 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
7310
7311 ms->isActive = true;
7312
5b0a4722 7313 if (ms->recv) {
ed34e3c3
A
7314 which = MACH_NOTIFY_PORT_DESTROYED;
7315 job_checkin(ms->job);
7316 }
7317
eabd1701 7318 (void)job_assumes_zero(ms->job, launchd_mport_notify_req(ms->port, which));
ed34e3c3
A
7319}
7320
eabd1701
A
7321#define NELEM(x) (sizeof(x)/sizeof(x[0]))
7322#define END_OF(x) (&(x)[NELEM(x)])
ed34e3c3
A
7323
7324char **
7325mach_cmd2argv(const char *string)
7326{
7327 char *argv[100], args[1000];
7328 const char *cp;
7329 char *argp = args, term, **argv_ret, *co;
7330 unsigned int nargs = 0, i;
7331
7332 for (cp = string; *cp;) {
7333 while (isspace(*cp))
7334 cp++;
7335 term = (*cp == '"') ? *cp++ : '\0';
5b0a4722 7336 if (nargs < NELEM(argv)) {
ed34e3c3 7337 argv[nargs++] = argp;
5b0a4722 7338 }
ed34e3c3 7339 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
5b0a4722 7340 if (*cp == '\\') {
ed34e3c3 7341 cp++;
5b0a4722 7342 }
ed34e3c3 7343 *argp++ = *cp;
5b0a4722 7344 if (*cp) {
ed34e3c3 7345 cp++;
5b0a4722 7346 }
ed34e3c3
A
7347 }
7348 *argp++ = '\0';
7349 }
7350 argv[nargs] = NULL;
7351
5b0a4722 7352 if (nargs == 0) {
ed34e3c3 7353 return NULL;
5b0a4722 7354 }
ed34e3c3
A
7355
7356 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
7357
eabd1701 7358 if (!argv_ret) {
95379394 7359 (void)os_assumes_zero(errno);
ed34e3c3 7360 return NULL;
5b0a4722 7361 }
ed34e3c3
A
7362
7363 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
7364
7365 for (i = 0; i < nargs; i++) {
7366 strcpy(co, argv[i]);
7367 argv_ret[i] = co;
7368 co += strlen(argv[i]) + 1;
7369 }
7370 argv_ret[i] = NULL;
eabd1701 7371
ed34e3c3
A
7372 return argv_ret;
7373}
7374
7375void
5b0a4722 7376job_checkin(job_t j)
ed34e3c3
A
7377{
7378 j->checkedin = true;
7379}
7380
ddbbfbc1
A
7381bool job_is_god(job_t j)
7382{
eabd1701 7383 return j->embedded_god;
ddbbfbc1
A
7384}
7385
ed34e3c3 7386bool
5b0a4722 7387job_ack_port_destruction(mach_port_t p)
ed34e3c3 7388{
ed34e3c3 7389 struct machservice *ms;
ddbbfbc1 7390 job_t j;
ed34e3c3 7391
5b0a4722
A
7392 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
7393 if (ms->recv && (ms->port == p)) {
ed34e3c3 7394 break;
5b0a4722 7395 }
ed34e3c3
A
7396 }
7397
eabd1701
A
7398 if (!ms) {
7399 launchd_syslog(LOG_WARNING, "Could not find MachService to match receive right: 0x%x", p);
ed34e3c3 7400 return false;
5b0a4722 7401 }
ed34e3c3 7402
ddbbfbc1 7403 j = ms->job;
ed34e3c3 7404
ddbbfbc1 7405 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
eabd1701
A
7406
7407 /* Without being the exception handler, NOTE_EXIT is our only way to tell if
7408 * the job crashed, and we can't rely on NOTE_EXIT always being processed
7409 * after all the job's receive rights have been returned.
ddbbfbc1 7410 *
eabd1701
A
7411 * So when we get receive rights back, check to see if the job has been
7412 * reaped yet. If not, then we add this service to a list of services to be
7413 * drained on crash if it's requested that behavior. So, for a job with N
7414 * receive rights all requesting that they be drained on crash, we can
7415 * safely handle the following sequence of events.
ddbbfbc1
A
7416 *
7417 * ReceiveRight0Returned
7418 * ReceiveRight1Returned
7419 * ReceiveRight2Returned
7420 * NOTE_EXIT (reap, get exit status)
7421 * ReceiveRight3Returned
7422 * .
7423 * .
7424 * .
7425 * ReceiveRight(N - 1)Returned
7426 */
dcace88f
A
7427 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
7428 if (j->crashed && j->reaped) {
ddbbfbc1
A
7429 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
7430 machservice_drain_port(ms);
dcace88f 7431 } else if (!(j->crashed || j->reaped)) {
ddbbfbc1
A
7432 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
7433 }
5b0a4722 7434 }
eabd1701 7435
ddbbfbc1
A
7436 ms->isActive = false;
7437 if (ms->delete_on_destruction) {
7438 machservice_delete(j, ms, false);
7439 } else if (ms->reset) {
7440 machservice_resetport(j, ms);
7441 }
eabd1701 7442
95379394
A
7443 kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
7444 (void)job_assumes_zero(j, kr);
eabd1701 7445 machservice_stamp_port(j, ms);
ddbbfbc1 7446 job_dispatch(j, false);
ed34e3c3 7447
95379394
A
7448 if (ms->recv_race_hack) {
7449 ms->recv_race_hack = false;
7450 machservice_watch(ms->job, ms);
7451 }
7452
5b0a4722 7453 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
ed34e3c3
A
7454
7455 return true;
7456}
7457
7458void
5b0a4722 7459job_ack_no_senders(job_t j)
ed34e3c3
A
7460{
7461 j->priv_port_has_senders = false;
7462
eabd1701 7463 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
5b0a4722
A
7464 j->j_port = 0;
7465
ed34e3c3
A
7466 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
7467
5b0a4722 7468 job_dispatch(j, false);
ed34e3c3
A
7469}
7470
ed34e3c3 7471bool
5b0a4722 7472semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
ed34e3c3
A
7473{
7474 struct semaphoreitem *si;
7475 size_t alloc_sz = sizeof(struct semaphoreitem);
7476
5b0a4722 7477 if (what) {
ed34e3c3 7478 alloc_sz += strlen(what) + 1;
5b0a4722 7479 }
ed34e3c3 7480
eabd1701 7481 if (job_assumes(j, si = calloc(1, alloc_sz)) == NULL) {
ed34e3c3 7482 return false;
5b0a4722 7483 }
ed34e3c3
A
7484
7485 si->why = why;
7486
5b0a4722 7487 if (what) {
f36da725 7488 strcpy(si->what_init, what);
5b0a4722 7489 }
ed34e3c3
A
7490
7491 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
eabd1701 7492
dcace88f 7493 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
ddbbfbc1
A
7494 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7495 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7496 j->nosy = true;
7497 }
ed34e3c3 7498
5b0a4722
A
7499 semaphoreitem_runtime_mod_ref(si, true);
7500
ed34e3c3
A
7501 return true;
7502}
7503
7504void
5b0a4722
A
7505semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7506{
7507 /*
7508 * External events need to be tracked.
7509 * Internal events do NOT need to be tracked.
7510 */
7511
7512 switch (si->why) {
7513 case SUCCESSFUL_EXIT:
7514 case FAILED_EXIT:
7515 case OTHER_JOB_ENABLED:
7516 case OTHER_JOB_DISABLED:
7517 case OTHER_JOB_ACTIVE:
7518 case OTHER_JOB_INACTIVE:
7519 return;
7520 default:
7521 break;
7522 }
7523
7524 if (add) {
ddbbfbc1 7525 runtime_add_weak_ref();
5b0a4722 7526 } else {
ddbbfbc1 7527 runtime_del_weak_ref();
5b0a4722
A
7528 }
7529}
7530
7531void
7532semaphoreitem_delete(job_t j, struct semaphoreitem *si)
ed34e3c3 7533{
5b0a4722
A
7534 semaphoreitem_runtime_mod_ref(si, false);
7535
7536 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
ed34e3c3 7537
eabd1701 7538 // We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
dcace88f 7539 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
ddbbfbc1
A
7540 j->nosy = false;
7541 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7542 }
eabd1701 7543
5b0a4722 7544 free(si);
ed34e3c3
A
7545}
7546
7547void
5b0a4722 7548semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
ed34e3c3 7549{
5b0a4722 7550 struct semaphoreitem_dict_iter_context *sdic = context;
ed34e3c3
A
7551 semaphore_reason_t why;
7552
5b0a4722 7553 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
ed34e3c3 7554
5b0a4722 7555 semaphoreitem_new(sdic->j, why, key);
ed34e3c3
A
7556}
7557
7558void
7559semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7560{
5b0a4722
A
7561 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7562 job_t j = context;
ed34e3c3
A
7563 semaphore_reason_t why;
7564
5b0a4722
A
7565 switch (launch_data_get_type(obj)) {
7566 case LAUNCH_DATA_BOOL:
7567 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7568 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7569 semaphoreitem_new(j, why, NULL);
7570 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7571 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7572 semaphoreitem_new(j, why, NULL);
7573 j->start_pending = true;
dcace88f 7574 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
ddbbfbc1 7575 j->needs_kickoff = launch_data_get_bool(obj);
dcace88f
A
7576 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7577 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7578 semaphoreitem_new(j, why, NULL);
7579 j->start_pending = true;
5b0a4722 7580 } else {
eabd1701 7581 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
5b0a4722
A
7582 }
7583 break;
7584 case LAUNCH_DATA_DICTIONARY:
eabd1701 7585 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
5b0a4722
A
7586 sdic.why_true = OTHER_JOB_ACTIVE;
7587 sdic.why_false = OTHER_JOB_INACTIVE;
7588 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7589 sdic.why_true = OTHER_JOB_ENABLED;
7590 sdic.why_false = OTHER_JOB_DISABLED;
7591 } else {
eabd1701 7592 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
5b0a4722
A
7593 break;
7594 }
7595
7596 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7597 break;
7598 default:
eabd1701 7599 job_log(j, LOG_ERR, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj));
5b0a4722 7600 break;
ed34e3c3
A
7601 }
7602}
7603
dcace88f 7604bool
95379394 7605externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags)
dcace88f 7606{
eabd1701
A
7607 if (j->event_monitor) {
7608 job_log(j, LOG_ERR, "The event monitor job cannot use LaunchEvents or XPC Events.");
7609 return false;
7610 }
7611
dcace88f 7612 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
eabd1701
A
7613 if (!ee) {
7614 return false;
7615 }
7616
7617 ee->event = xpc_retain(event);
7618 (void)strcpy(ee->name, evname);
7619 ee->job = j;
7620 ee->id = sys->curid;
7621 ee->sys = sys;
7622 ee->state = false;
7623 ee->wanted_state = true;
7624 sys->curid++;
7625
95379394
A
7626 if (flags & XPC_EVENT_FLAG_ENTITLEMENTS) {
7627 struct ldcred *ldc = runtime_get_caller_creds();
7628 if (ldc) {
7629 ee->entitlements = xpc_copy_entitlements_for_pid(ldc->pid);
7630 }
7631 }
7632
eabd1701
A
7633 if (sys == _launchd_support_system) {
7634 ee->internal = true;
dcace88f
A
7635 }
7636
eabd1701
A
7637 LIST_INSERT_HEAD(&j->events, ee, job_le);
7638 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7639
7640 job_log(j, LOG_DEBUG, "New event: %s/%s", sys->name, evname);
7641
dcace88f 7642 eventsystem_ping();
eabd1701 7643 return true;
dcace88f
A
7644}
7645
7646void
7647externalevent_delete(struct externalevent *ee)
7648{
eabd1701 7649 xpc_release(ee->event);
95379394
A
7650 if (ee->entitlements) {
7651 xpc_release(ee->entitlements);
7652 }
dcace88f
A
7653 LIST_REMOVE(ee, job_le);
7654 LIST_REMOVE(ee, sys_le);
eabd1701 7655
dcace88f
A
7656 free(ee);
7657
7658 eventsystem_ping();
7659}
7660
7661void
7662externalevent_setup(launch_data_t obj, const char *key, void *context)
7663{
eabd1701
A
7664 /* This method can ONLY be called on the job_import() path, as it assumes
7665 * the input is a launch_data_t.
7666 */
dcace88f 7667 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
eabd1701
A
7668
7669 xpc_object_t xobj = ld2xpc(obj);
7670 if (xobj) {
7671 job_log(ctx->j, LOG_DEBUG, "Importing stream/event: %s/%s", ctx->sys->name, key);
95379394 7672 externalevent_new(ctx->j, ctx->sys, key, xobj, 0);
eabd1701
A
7673 xpc_release(xobj);
7674 } else {
7675 job_log(ctx->j, LOG_ERR, "Could not import event for job: %s", key);
7676 }
dcace88f
A
7677}
7678
7679struct externalevent *
7680externalevent_find(const char *sysname, uint64_t id)
7681{
7682 struct externalevent *ei = NULL;
eabd1701 7683
dcace88f 7684 struct eventsystem *es = eventsystem_find(sysname);
eabd1701 7685 if (es != NULL) {
dcace88f
A
7686 LIST_FOREACH(ei, &es->events, sys_le) {
7687 if (ei->id == id) {
7688 break;
7689 }
7690 }
eabd1701
A
7691 } else {
7692 launchd_syslog(LOG_ERR, "Could not find event system: %s", sysname);
dcace88f 7693 }
eabd1701 7694
dcace88f
A
7695 return ei;
7696}
7697
7698struct eventsystem *
7699eventsystem_new(const char *name)
7700{
7701 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
eabd1701
A
7702 if (es != NULL) {
7703 es->curid = 1;
7704 (void)strcpy(es->name, name);
dcace88f 7705 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
eabd1701 7706 } else {
95379394 7707 (void)os_assumes_zero(errno);
dcace88f
A
7708 }
7709
7710 return es;
7711}
7712
7713void
7714eventsystem_delete(struct eventsystem *es)
7715{
7716 struct externalevent *ei = NULL;
7717 while ((ei = LIST_FIRST(&es->events))) {
7718 externalevent_delete(ei);
7719 }
eabd1701 7720
dcace88f 7721 LIST_REMOVE(es, global_le);
eabd1701 7722
dcace88f
A
7723 free(es);
7724}
7725
7726void
7727eventsystem_setup(launch_data_t obj, const char *key, void *context)
7728{
7729 job_t j = (job_t)context;
7730 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7731 return;
7732 }
eabd1701 7733
dcace88f
A
7734 struct eventsystem *sys = eventsystem_find(key);
7735 if (unlikely(sys == NULL)) {
7736 sys = eventsystem_new(key);
7737 job_log(j, LOG_DEBUG, "New event system: %s", key);
7738 }
eabd1701 7739
dcace88f
A
7740 if (job_assumes(j, sys != NULL)) {
7741 struct externalevent_iter_ctx ctx = {
7742 .j = j,
7743 .sys = sys,
7744 };
eabd1701
A
7745
7746 job_log(j, LOG_DEBUG, "Importing events for stream: %s", key);
dcace88f 7747 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
dcace88f
A
7748 }
7749}
7750
7751struct eventsystem *
7752eventsystem_find(const char *name)
7753{
7754 struct eventsystem *esi = NULL;
7755 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7756 if (strcmp(name, esi->name) == 0) {
7757 break;
7758 }
7759 }
eabd1701 7760
dcace88f
A
7761 return esi;
7762}
7763
7764void
7765eventsystem_ping(void)
7766{
eabd1701
A
7767 if (!_launchd_event_monitor) {
7768 return;
7769 }
7770
7771 if (!_launchd_event_monitor->p) {
7772 (void)job_dispatch(_launchd_event_monitor, true);
7773 } else {
7774 if (_launchd_event_monitor->event_monitor_ready2signal) {
7775 (void)job_assumes_zero_p(_launchd_event_monitor, kill(_launchd_event_monitor->p, SIGUSR1));
dcace88f 7776 }
dcace88f
A
7777 }
7778}
7779
ed34e3c3 7780void
5b0a4722 7781jobmgr_dispatch_all_semaphores(jobmgr_t jm)
ed34e3c3 7782{
5b0a4722
A
7783 jobmgr_t jmi, jmn;
7784 job_t ji, jn;
ed34e3c3 7785
ed34e3c3 7786
5b0a4722
A
7787 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7788 jobmgr_dispatch_all_semaphores(jmi);
7789 }
ed34e3c3 7790
5b0a4722
A
7791 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7792 if (!SLIST_EMPTY(&ji->semaphores)) {
7793 job_dispatch(ji, false);
7794 }
7795 }
ed34e3c3
A
7796}
7797
7798time_t
7799cronemu(int mon, int mday, int hour, int min)
7800{
7801 struct tm workingtm;
7802 time_t now;
7803
7804 now = time(NULL);
7805 workingtm = *localtime(&now);
7806
7807 workingtm.tm_isdst = -1;
7808 workingtm.tm_sec = 0;
7809 workingtm.tm_min++;
7810
7811 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7812 workingtm.tm_year++;
7813 workingtm.tm_mon = 0;
7814 workingtm.tm_mday = 1;
7815 workingtm.tm_hour = 0;
7816 workingtm.tm_min = 0;
7817 mktime(&workingtm);
7818 }
7819
7820 return mktime(&workingtm);
7821}
7822
7823time_t
7824cronemu_wday(int wday, int hour, int min)
7825{
7826 struct tm workingtm;
7827 time_t now;
7828
7829 now = time(NULL);
7830 workingtm = *localtime(&now);
7831
7832 workingtm.tm_isdst = -1;
7833 workingtm.tm_sec = 0;
7834 workingtm.tm_min++;
7835
5b0a4722 7836 if (wday == 7) {
ed34e3c3 7837 wday = 0;
5b0a4722 7838 }
ed34e3c3
A
7839
7840 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7841 workingtm.tm_mday++;
7842 workingtm.tm_hour = 0;
7843 workingtm.tm_min = 0;
7844 mktime(&workingtm);
7845 }
7846
7847 return mktime(&workingtm);
7848}
7849
7850bool
7851cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7852{
7853 if (mon == -1) {
7854 struct tm workingtm = *wtm;
7855 int carrytest;
7856
7857 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7858 workingtm.tm_mon++;
7859 workingtm.tm_mday = 1;
7860 workingtm.tm_hour = 0;
7861 workingtm.tm_min = 0;
7862 carrytest = workingtm.tm_mon;
7863 mktime(&workingtm);
5b0a4722 7864 if (carrytest != workingtm.tm_mon) {
ed34e3c3 7865 return false;
5b0a4722 7866 }
ed34e3c3
A
7867 }
7868 *wtm = workingtm;
7869 return true;
7870 }
7871
5b0a4722 7872 if (mon < wtm->tm_mon) {
ed34e3c3 7873 return false;
5b0a4722 7874 }
ed34e3c3 7875
5b0a4722 7876 if (mon > wtm->tm_mon) {
ed34e3c3
A
7877 wtm->tm_mon = mon;
7878 wtm->tm_mday = 1;
7879 wtm->tm_hour = 0;
7880 wtm->tm_min = 0;
7881 }
7882
7883 return cronemu_mday(wtm, mday, hour, min);
7884}
7885
7886bool
7887cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7888{
7889 if (mday == -1) {
7890 struct tm workingtm = *wtm;
7891 int carrytest;
7892
7893 while (!cronemu_hour(&workingtm, hour, min)) {
7894 workingtm.tm_mday++;
7895 workingtm.tm_hour = 0;
7896 workingtm.tm_min = 0;
7897 carrytest = workingtm.tm_mday;
7898 mktime(&workingtm);
5b0a4722 7899 if (carrytest != workingtm.tm_mday) {
ed34e3c3 7900 return false;
5b0a4722 7901 }
ed34e3c3
A
7902 }
7903 *wtm = workingtm;
7904 return true;
7905 }
7906
5b0a4722 7907 if (mday < wtm->tm_mday) {
ed34e3c3 7908 return false;
5b0a4722 7909 }
ed34e3c3 7910
5b0a4722 7911 if (mday > wtm->tm_mday) {
ed34e3c3
A
7912 wtm->tm_mday = mday;
7913 wtm->tm_hour = 0;
7914 wtm->tm_min = 0;
7915 }
7916
7917 return cronemu_hour(wtm, hour, min);
7918}
7919
7920bool
7921cronemu_hour(struct tm *wtm, int hour, int min)
7922{
7923 if (hour == -1) {
7924 struct tm workingtm = *wtm;
7925 int carrytest;
7926
7927 while (!cronemu_min(&workingtm, min)) {
7928 workingtm.tm_hour++;
7929 workingtm.tm_min = 0;
7930 carrytest = workingtm.tm_hour;
7931 mktime(&workingtm);
5b0a4722 7932 if (carrytest != workingtm.tm_hour) {
ed34e3c3 7933 return false;
5b0a4722 7934 }
ed34e3c3
A
7935 }
7936 *wtm = workingtm;
7937 return true;
7938 }
7939
5b0a4722 7940 if (hour < wtm->tm_hour) {
ed34e3c3 7941 return false;
5b0a4722 7942 }
ed34e3c3
A
7943
7944 if (hour > wtm->tm_hour) {
7945 wtm->tm_hour = hour;
7946 wtm->tm_min = 0;
7947 }
7948
7949 return cronemu_min(wtm, min);
7950}
7951
7952bool
7953cronemu_min(struct tm *wtm, int min)
7954{
5b0a4722 7955 if (min == -1) {
ed34e3c3 7956 return true;
5b0a4722 7957 }
ed34e3c3 7958
5b0a4722 7959 if (min < wtm->tm_min) {
ed34e3c3 7960 return false;
5b0a4722 7961 }
ed34e3c3
A
7962
7963 if (min > wtm->tm_min) {
7964 wtm->tm_min = min;
7965 }
7966
7967 return true;
7968}
5b0a4722
A
7969
7970kern_return_t
7971job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7972{
ddbbfbc1 7973 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
7974 job_t js;
7975
eabd1701 7976 if (!j) {
5b0a4722
A
7977 return BOOTSTRAP_NO_MEMORY;
7978 }
7979
f36da725
A
7980 if (unlikely(j->deny_job_creation)) {
7981 return BOOTSTRAP_NOT_PRIVILEGED;
7982 }
7983
ddbbfbc1
A
7984#if HAVE_SANDBOX
7985 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7986 if (unlikely(argv == NULL)) {
7987 return BOOTSTRAP_NO_MEMORY;
7988 }
7989 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7990 free(argv);
7991 return BOOTSTRAP_NOT_PRIVILEGED;
7992 }
7993 free(argv);
7994#endif
5b0a4722
A
7995
7996 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7997
ddbbfbc1
A
7998 if (pid1_magic) {
7999 if (ldc->euid || ldc->uid) {
8000 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
8001 return VPROC_ERR_TRY_PER_USER;
5b0a4722 8002 }
ddbbfbc1
A
8003 } else {
8004 if (unlikely(server_uid != getuid())) {
5b0a4722
A
8005 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
8006 server_cmd, getuid(), server_uid);
8007 }
eabd1701 8008 server_uid = 0; // zero means "do nothing"
5b0a4722
A
8009 }
8010
8011 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
8012
ddbbfbc1 8013 if (unlikely(js == NULL)) {
5b0a4722
A
8014 return BOOTSTRAP_NO_MEMORY;
8015 }
8016
8017 *server_portp = js->j_port;
8018 return BOOTSTRAP_SUCCESS;
8019}
8020
8021kern_return_t
8022job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
8023{
ddbbfbc1 8024 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
8025 job_t otherj;
8026
eabd1701 8027 if (!j) {
5b0a4722
A
8028 return BOOTSTRAP_NO_MEMORY;
8029 }
8030
dcace88f 8031 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
eabd1701
A
8032#if TARGET_OS_EMBEDDED
8033 if (!j->embedded_god) {
ddbbfbc1
A
8034 return BOOTSTRAP_NOT_PRIVILEGED;
8035 }
eabd1701 8036#else
5b0a4722 8037 return BOOTSTRAP_NOT_PRIVILEGED;
eabd1701 8038#endif
5b0a4722
A
8039 }
8040
ddbbfbc1
A
8041#if HAVE_SANDBOX
8042 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8043 return BOOTSTRAP_NOT_PRIVILEGED;
8044 }
8045#endif
eabd1701 8046
dcace88f 8047 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
5b0a4722
A
8048 return BOOTSTRAP_UNKNOWN_SERVICE;
8049 }
8050
ddbbfbc1 8051#if TARGET_OS_EMBEDDED
eabd1701
A
8052 if (j->embedded_god) {
8053 if (j->username && otherj->username) {
8054 if (strcmp(j->username, otherj->username) != 0) {
8055 return BOOTSTRAP_NOT_PRIVILEGED;
8056 }
8057 } else {
8058 return BOOTSTRAP_NOT_PRIVILEGED;
8059 }
ddbbfbc1
A
8060 }
8061#endif
8062
8063 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
8064 bool do_block = otherj->p;
8065
5b0a4722
A
8066 if (otherj->anonymous) {
8067 return BOOTSTRAP_NOT_PRIVILEGED;
8068 }
8069
8070 job_remove(otherj);
8071
8072 if (do_block) {
8073 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
eabd1701 8074 // this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
dcace88f 8075 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
5b0a4722
A
8076 return MIG_NO_REPLY;
8077 } else {
8078 return 0;
8079 }
8080 } else if (otherj->p) {
eabd1701 8081 (void)job_assumes_zero_p(j, kill2(otherj->p, sig));
5b0a4722
A
8082 }
8083
8084 return 0;
8085}
8086
8087kern_return_t
8088job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
8089{
ddbbfbc1 8090 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 8091
eabd1701 8092 if (!j) {
5b0a4722
A
8093 return BOOTSTRAP_NO_MEMORY;
8094 }
8095
8096 if (!job_assumes(j, j->per_user)) {
8097 return BOOTSTRAP_NOT_PRIVILEGED;
8098 }
8099
eabd1701 8100 return launchd_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
5b0a4722
A
8101}
8102
8103kern_return_t
8104job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
8105{
ddbbfbc1 8106 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 8107
eabd1701 8108 if (!j) {
5b0a4722
A
8109 return BOOTSTRAP_NO_MEMORY;
8110 }
8111
ddbbfbc1 8112 if (unlikely(ldc->euid)) {
5b0a4722
A
8113 return BOOTSTRAP_NOT_PRIVILEGED;
8114 }
8115
eabd1701 8116 return launchd_log_drain(srp, outval, outvalCnt);
5b0a4722
A
8117}
8118
8119kern_return_t
eabd1701
A
8120job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
8121 vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval,
8122 mach_msg_type_number_t *outvalCnt)
5b0a4722
A
8123{
8124 const char *action;
ddbbfbc1 8125 launch_data_t input_obj = NULL, output_obj = NULL;
5b0a4722
A
8126 size_t data_offset = 0;
8127 size_t packed_size;
ddbbfbc1 8128 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 8129
eabd1701 8130 if (!j) {
5b0a4722
A
8131 return BOOTSTRAP_NO_MEMORY;
8132 }
eabd1701
A
8133
8134 if (inkey && ldc->pid != j->p) {
8135 if (ldc->euid && ldc->euid != getuid()) {
8136 return BOOTSTRAP_NOT_PRIVILEGED;
8137 }
8138 }
8139
ddbbfbc1 8140 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
5b0a4722
A
8141 return 1;
8142 }
8143
8144 if (inkey && outkey) {
8145 action = "Swapping";
8146 } else if (inkey) {
8147 action = "Setting";
8148 } else {
8149 action = "Getting";
8150 }
8151
8152 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8153
8154 *outvalCnt = 20 * 1024 * 1024;
8155 mig_allocate(outval, *outvalCnt);
8156 if (!job_assumes(j, *outval != 0)) {
8157 return 1;
8158 }
8159
eabd1701
A
8160 /* Note to future maintainers: launch_data_unpack() does NOT return a heap
8161 * object. The data is decoded in-place. So do not call launch_data_free()
8162 * on input_obj.
dcace88f 8163 */
ddbbfbc1
A
8164 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
8165 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
5b0a4722
A
8166 goto out_bad;
8167 }
8168
eabd1701 8169 char *store = NULL;
5b0a4722
A
8170 switch (outkey) {
8171 case VPROC_GSK_ENVIRONMENT:
8172 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
8173 goto out_bad;
8174 }
8175 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
ddbbfbc1 8176 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
5b0a4722
A
8177 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
8178 goto out_bad;
8179 }
8180 launch_data_free(output_obj);
8181 break;
8182 case VPROC_GSK_ALLJOBS:
8183 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
8184 goto out_bad;
8185 }
8186 ipc_revoke_fds(output_obj);
ddbbfbc1
A
8187 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8188 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8189 if (!job_assumes(j, packed_size != 0)) {
8190 goto out_bad;
8191 }
8192 launch_data_free(output_obj);
8193 break;
8194 case VPROC_GSK_MGR_NAME:
dcace88f 8195 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
ddbbfbc1
A
8196 goto out_bad;
8197 }
8198 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8199 if (!job_assumes(j, packed_size != 0)) {
8200 goto out_bad;
8201 }
eabd1701 8202
ddbbfbc1
A
8203 launch_data_free(output_obj);
8204 break;
8205 case VPROC_GSK_JOB_OVERRIDES_DB:
eabd1701
A
8206 store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB, "overrides.plist");
8207 if (!store || !job_assumes(j, (output_obj = launch_data_new_string(store)) != NULL)) {
8208 free(store);
ddbbfbc1
A
8209 goto out_bad;
8210 }
eabd1701
A
8211
8212 free(store);
5b0a4722
A
8213 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8214 if (!job_assumes(j, packed_size != 0)) {
8215 goto out_bad;
8216 }
eabd1701 8217
5b0a4722
A
8218 launch_data_free(output_obj);
8219 break;
eabd1701 8220 case VPROC_GSK_ZERO:
5b0a4722
A
8221 mig_deallocate(*outval, *outvalCnt);
8222 *outval = 0;
8223 *outvalCnt = 0;
8224 break;
8225 default:
8226 goto out_bad;
8227 }
8228
5b0a4722 8229 mig_deallocate(inval, invalCnt);
5b0a4722 8230 return 0;
eabd1701 8231
5b0a4722 8232out_bad:
dcace88f 8233 mig_deallocate(inval, invalCnt);
5b0a4722
A
8234 if (*outval) {
8235 mig_deallocate(*outval, *outvalCnt);
8236 }
dcace88f
A
8237 if (output_obj) {
8238 launch_data_free(output_obj);
8239 }
eabd1701 8240
5b0a4722
A
8241 return 1;
8242}
8243
8244kern_return_t
8245job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
8246{
8247 const char *action;
8248 kern_return_t kr = 0;
ddbbfbc1 8249 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
8250 int oldmask;
8251
eabd1701 8252 if (!j) {
5b0a4722
A
8253 return BOOTSTRAP_NO_MEMORY;
8254 }
8255
eabd1701
A
8256 if (inkey && ldc->pid != j->p) {
8257 if (ldc->euid && ldc->euid != getuid()) {
8258 return BOOTSTRAP_NOT_PRIVILEGED;
8259 }
8260 }
5b0a4722 8261
ddbbfbc1 8262 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
5b0a4722
A
8263 return 1;
8264 }
8265
8266 if (inkey && outkey) {
8267 action = "Swapping";
8268 } else if (inkey) {
8269 action = "Setting";
8270 } else {
8271 action = "Getting";
8272 }
8273
8274 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8275
8276 switch (outkey) {
ddbbfbc1
A
8277 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8278 *outval = j->abandon_pg;
8279 break;
5b0a4722
A
8280 case VPROC_GSK_LAST_EXIT_STATUS:
8281 *outval = j->last_exit_status;
8282 break;
8283 case VPROC_GSK_MGR_UID:
8284 *outval = getuid();
8285 break;
8286 case VPROC_GSK_MGR_PID:
8287 *outval = getpid();
8288 break;
8289 case VPROC_GSK_IS_MANAGED:
8290 *outval = j->anonymous ? 0 : 1;
8291 break;
8292 case VPROC_GSK_BASIC_KEEPALIVE:
8293 *outval = !j->ondemand;
8294 break;
8295 case VPROC_GSK_START_INTERVAL:
8296 *outval = j->start_interval;
8297 break;
8298 case VPROC_GSK_IDLE_TIMEOUT:
8299 *outval = j->timeout;
8300 break;
8301 case VPROC_GSK_EXIT_TIMEOUT:
8302 *outval = j->exit_timeout;
8303 break;
8304 case VPROC_GSK_GLOBAL_LOG_MASK:
8305 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
8306 *outval = oldmask;
8307 runtime_setlogmask(oldmask);
8308 break;
8309 case VPROC_GSK_GLOBAL_UMASK:
8310 oldmask = umask(0);
8311 *outval = oldmask;
8312 umask(oldmask);
8313 break;
ddbbfbc1 8314 case VPROC_GSK_TRANSACTIONS_ENABLED:
eabd1701
A
8315 job_log(j, LOG_DEBUG, "Reading EnableTransactions value.");
8316 *outval = j->enable_transactions;
ddbbfbc1
A
8317 break;
8318 case VPROC_GSK_WAITFORDEBUGGER:
8319 *outval = j->wait4debugger;
8320 break;
8321 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
eabd1701 8322 *outval = j->embedded_god;
ddbbfbc1 8323 break;
eabd1701 8324 case VPROC_GSK_ZERO:
5b0a4722
A
8325 *outval = 0;
8326 break;
8327 default:
8328 kr = 1;
8329 break;
8330 }
8331
8332 switch (inkey) {
ddbbfbc1
A
8333 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8334 j->abandon_pg = (bool)inval;
8335 break;
5b0a4722 8336 case VPROC_GSK_GLOBAL_ON_DEMAND:
eabd1701
A
8337 job_log(j, LOG_DEBUG, "Job has set global on-demand mode to: %s", inval ? "true" : "false");
8338 kr = job_set_global_on_demand(j, inval);
5b0a4722
A
8339 break;
8340 case VPROC_GSK_BASIC_KEEPALIVE:
8341 j->ondemand = !inval;
8342 break;
8343 case VPROC_GSK_START_INTERVAL:
ddbbfbc1 8344 if (inval > UINT32_MAX || inval < 0) {
5b0a4722
A
8345 kr = 1;
8346 } else if (inval) {
8347 if (j->start_interval == 0) {
ddbbfbc1 8348 runtime_add_weak_ref();
5b0a4722 8349 }
ddbbfbc1 8350 j->start_interval = (typeof(j->start_interval)) inval;
eabd1701 8351 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
5b0a4722 8352 } else if (j->start_interval) {
eabd1701 8353 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
5b0a4722 8354 if (j->start_interval != 0) {
ddbbfbc1 8355 runtime_del_weak_ref();
5b0a4722
A
8356 }
8357 j->start_interval = 0;
8358 }
8359 break;
8360 case VPROC_GSK_IDLE_TIMEOUT:
ddbbfbc1
A
8361 if (inval < 0 || inval > UINT32_MAX) {
8362 kr = 1;
8363 } else {
8364 j->timeout = (typeof(j->timeout)) inval;
5b0a4722
A
8365 }
8366 break;
8367 case VPROC_GSK_EXIT_TIMEOUT:
ddbbfbc1
A
8368 if (inval < 0 || inval > UINT32_MAX) {
8369 kr = 1;
8370 } else {
8371 j->exit_timeout = (typeof(j->exit_timeout)) inval;
5b0a4722
A
8372 }
8373 break;
8374 case VPROC_GSK_GLOBAL_LOG_MASK:
ddbbfbc1
A
8375 if (inval < 0 || inval > UINT32_MAX) {
8376 kr = 1;
8377 } else {
8378 runtime_setlogmask((int) inval);
8379 }
5b0a4722
A
8380 break;
8381 case VPROC_GSK_GLOBAL_UMASK:
95379394 8382 __OS_COMPILETIME_ASSERT__(sizeof (mode_t) == 2);
ddbbfbc1
A
8383 if (inval < 0 || inval > UINT16_MAX) {
8384 kr = 1;
8385 } else {
dcace88f
A
8386#if HAVE_SANDBOX
8387 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8388 kr = 1;
8389 } else {
8390 umask((mode_t) inval);
8391 }
8392#endif
ddbbfbc1
A
8393 }
8394 break;
8395 case VPROC_GSK_TRANSACTIONS_ENABLED:
eabd1701 8396 /* No-op. */
ddbbfbc1
A
8397 break;
8398 case VPROC_GSK_WEIRD_BOOTSTRAP:
dcace88f 8399 if (job_assumes(j, j->weird_bootstrap)) {
ddbbfbc1 8400 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
eabd1701
A
8401
8402 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_job_subsystem);
8403
8404 if (job_mig_job_subsystem.maxsize > mxmsgsz) {
8405 mxmsgsz = job_mig_job_subsystem.maxsize;
8406 }
8407
8408 (void)job_assumes_zero(j, runtime_add_mport(j->mgr->jm_port, job_server));
ddbbfbc1
A
8409 j->weird_bootstrap = false;
8410 }
8411 break;
8412 case VPROC_GSK_WAITFORDEBUGGER:
8413 j->wait4debugger_oneshot = inval;
8414 break;
8415 case VPROC_GSK_PERUSER_SUSPEND:
dcace88f 8416 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
ddbbfbc1 8417 mach_port_t junk = MACH_PORT_NULL;
dcace88f
A
8418 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8419 if (job_assumes(j, jpu != NULL)) {
ddbbfbc1 8420 struct suspended_peruser *spi = NULL;
dcace88f
A
8421 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8422 if ((int64_t)(spi->j->mach_uid) == inval) {
ddbbfbc1
A
8423 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8424 break;
8425 }
8426 }
8427
dcace88f 8428 if (spi == NULL) {
ddbbfbc1
A
8429 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8430 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
dcace88f
A
8431 if (job_assumes(j, spi != NULL)) {
8432 /* Stop listening for events.
8433 *
8434 * See <rdar://problem/9014146>.
8435 */
8436 if (jpu->peruser_suspend_count == 0) {
8437 job_ignore(jpu);
8438 }
8439
ddbbfbc1
A
8440 spi->j = jpu;
8441 spi->j->peruser_suspend_count++;
8442 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8443 job_stop(spi->j);
dcace88f 8444 *outval = jpu->p;
ddbbfbc1
A
8445 } else {
8446 kr = BOOTSTRAP_NO_MEMORY;
8447 }
8448 }
8449 }
8450 } else {
8451 kr = 1;
8452 }
8453 break;
8454 case VPROC_GSK_PERUSER_RESUME:
dcace88f 8455 if (job_assumes(j, pid1_magic == true)) {
ddbbfbc1 8456 struct suspended_peruser *spi = NULL, *spt = NULL;
dcace88f
A
8457 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8458 if ((int64_t)(spi->j->mach_uid) == inval) {
ddbbfbc1
A
8459 spi->j->peruser_suspend_count--;
8460 LIST_REMOVE(spi, sle);
8461 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8462 break;
8463 }
8464 }
eabd1701 8465
dcace88f 8466 if (!job_assumes(j, spi != NULL)) {
ddbbfbc1
A
8467 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8468 kr = BOOTSTRAP_NOT_PRIVILEGED;
dcace88f
A
8469 } else if (spi->j->peruser_suspend_count == 0) {
8470 job_watch(spi->j);
ddbbfbc1
A
8471 job_dispatch(spi->j, false);
8472 free(spi);
8473 }
8474 } else {
8475 kr = 1;
8476 }
5b0a4722 8477 break;
eabd1701 8478 case VPROC_GSK_ZERO:
5b0a4722
A
8479 break;
8480 default:
8481 kr = 1;
8482 break;
8483 }
8484
8485 return kr;
8486}
8487
8488kern_return_t
dcace88f 8489job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
5b0a4722 8490{
eabd1701 8491 if (!j) {
5b0a4722
A
8492 return BOOTSTRAP_NO_MEMORY;
8493 }
8494
8495 job_log(j, LOG_DEBUG, "Post fork ping.");
8496
95379394 8497 struct machservice *ms;
5b0a4722 8498 job_setup_exception_port(j, child_task);
5b0a4722
A
8499 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8500 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
eabd1701 8501 // The TASK_ACCESS_PORT funny business is to workaround 5325399.
5b0a4722
A
8502 continue;
8503 }
8504
8505 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
eabd1701
A
8506 if (errno) {
8507 if (errno == MACH_SEND_INVALID_DEST) {
8508 job_log(j, LOG_WARNING, "Task died before special ports could be set.");
8509 break;
8510 }
5b0a4722 8511
5b0a4722 8512 int desired_log_level = LOG_ERR;
5b0a4722 8513 if (j->anonymous) {
eabd1701 8514 // 5338127
5b0a4722
A
8515
8516 desired_log_level = LOG_WARNING;
8517
8518 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8519 desired_log_level = LOG_DEBUG;
8520 }
8521 }
8522
8523 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8524 }
8525 }
8526
95379394
A
8527 /* MIG will not zero-initialize this pointer, so we must always do so.
8528 *
dcace88f
A
8529 * <rdar://problem/8562593>.
8530 */
8531 *asport = MACH_PORT_NULL;
ddbbfbc1 8532#if !TARGET_OS_EMBEDDED
dcace88f
A
8533 if (!j->anonymous) {
8534 /* XPC services will spawn into the root security session by default.
8535 * xpcproxy will switch them away if needed.
8536 */
8537 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
eabd1701 8538 job_log(j, LOG_DEBUG, "Returning session port: 0x%x", j->asport);
dcace88f
A
8539 *asport = j->asport;
8540 }
ddbbfbc1 8541 }
dcace88f 8542#endif
eabd1701 8543 (void)job_assumes_zero(j, launchd_mport_deallocate(child_task));
5b0a4722
A
8544
8545 return 0;
8546}
8547
95379394
A
8548kern_return_t
8549job_mig_get_listener_port_rights(job_t j, mach_port_array_t *sports, mach_msg_type_number_t *sports_cnt)
8550{
8551 if (!j) {
8552 return BOOTSTRAP_NO_MEMORY;
8553 }
8554
8555 size_t cnt = 0;
8556 struct machservice *msi = NULL;
8557 SLIST_FOREACH(msi, &j->machservices, sle) {
8558 if (msi->upfront && job_assumes(j, msi->recv)) {
8559 cnt++;
8560 }
8561 }
8562
8563 if (cnt == 0) {
8564 return BOOTSTRAP_UNKNOWN_SERVICE;
8565 }
8566
8567 mach_port_array_t sports2 = NULL;
8568 mig_allocate((vm_address_t *)&sports2, cnt * sizeof(sports2[0]));
8569 if (!sports2) {
8570 return BOOTSTRAP_NO_MEMORY;
8571 }
8572
8573 size_t i = 0;
8574 SLIST_FOREACH(msi, &j->machservices, sle) {
8575 if (msi->upfront && msi->recv) {
8576 sports2[i] = msi->port;
8577 i++;
8578 }
8579 }
8580
8581 *sports = sports2;
8582 *sports_cnt = cnt;
8583
8584 return KERN_SUCCESS;
8585}
8586
8587kern_return_t
8588job_mig_register_gui_session(job_t j, mach_port_t asport)
8589{
8590 if (!j->per_user) {
8591 return BOOTSTRAP_NOT_PRIVILEGED;
8592 }
8593
8594 jobmgr_t jm = jobmgr_find_xpc_per_user_domain(root_jobmgr, j->mach_uid);
8595 if (!jm) {
8596 return BOOTSTRAP_UNKNOWN_SERVICE;
8597 }
8598
8599 if (jm->req_gui_asport) {
8600 // This job manager persists, so we need to allow the per-user launchd
8601 // to update the GUI session as it comes and goes.
8602 jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_gui_asport));
8603 }
8604
8605 jm->req_gui_asport = asport;
8606 return KERN_SUCCESS;
8607}
8608
5b0a4722
A
8609kern_return_t
8610job_mig_reboot2(job_t j, uint64_t flags)
8611{
8612 char who_started_the_reboot[2048] = "";
dcace88f 8613 struct proc_bsdshortinfo proc;
ddbbfbc1 8614 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
8615 pid_t pid_to_log;
8616
eabd1701 8617 if (!j) {
5b0a4722
A
8618 return BOOTSTRAP_NO_MEMORY;
8619 }
8620
ddbbfbc1 8621 if (unlikely(!pid1_magic)) {
5b0a4722
A
8622 return BOOTSTRAP_NOT_PRIVILEGED;
8623 }
8624
ddbbfbc1
A
8625#if !TARGET_OS_EMBEDDED
8626 if (unlikely(ldc->euid)) {
8627#else
eabd1701 8628 if (unlikely(ldc->euid) && !j->embedded_god) {
ddbbfbc1 8629#endif
5b0a4722
A
8630 return BOOTSTRAP_NOT_PRIVILEGED;
8631 }
8632
dcace88f
A
8633 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8634 size_t who_offset;
8635 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8636 if (errno != ESRCH) {
eabd1701 8637 (void)job_assumes_zero(j, errno);
dcace88f 8638 }
5b0a4722
A
8639 return 1;
8640 }
8641
dcace88f 8642 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
587e987e 8643 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
dcace88f 8644 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
587e987e
A
8645 break;
8646 }
8647
5b0a4722
A
8648 who_offset = strlen(who_started_the_reboot);
8649 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
dcace88f 8650 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
5b0a4722
A
8651 }
8652
8653 root_jobmgr->reboot_flags = (int)flags;
5b0a4722 8654 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
dcace88f 8655 launchd_shutdown();
5b0a4722
A
8656
8657 return 0;
8658}
8659
8660kern_return_t
8661job_mig_getsocket(job_t j, name_t spr)
8662{
eabd1701 8663 if (!j) {
5b0a4722
A
8664 return BOOTSTRAP_NO_MEMORY;
8665 }
8666
dcace88f
A
8667 if (j->deny_job_creation) {
8668 return BOOTSTRAP_NOT_PRIVILEGED;
8669 }
8670
8671#if HAVE_SANDBOX
8672 struct ldcred *ldc = runtime_get_caller_creds();
8673 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
ddbbfbc1
A
8674 return BOOTSTRAP_NOT_PRIVILEGED;
8675 }
dcace88f 8676#endif
ddbbfbc1 8677
5b0a4722
A
8678 ipc_server_init();
8679
ddbbfbc1 8680 if (unlikely(!sockpath)) {
5b0a4722
A
8681 return BOOTSTRAP_NO_MEMORY;
8682 }
8683
8684 strncpy(spr, sockpath, sizeof(name_t));
eabd1701 8685
5b0a4722
A
8686 return BOOTSTRAP_SUCCESS;
8687}
8688
8689kern_return_t
8690job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8691{
eabd1701 8692 if (!j) {
5b0a4722
A
8693 return BOOTSTRAP_NO_MEMORY;
8694 }
8695
8696 if ((errno = err)) {
8697 job_log_error(j, pri, "%s", msg);
8698 } else {
8699 job_log(j, pri, "%s", msg);
8700 }
8701
8702 return 0;
8703}
8704
eabd1701
A
8705void
8706job_setup_per_user_directory(job_t j, uid_t uid, const char *path)
8707{
8708 struct stat sb;
8709
8710 bool created = false;
8711 int r = stat(path, &sb);
8712 if ((r == -1 && errno == ENOENT) || (r == 0 && !S_ISDIR(sb.st_mode))) {
8713 if (r == 0) {
8714 job_log(j, LOG_NOTICE, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path);
8715
8716 char old[PATH_MAX];
8717 snprintf(old, sizeof(old), "%s.movedaside", path);
8718 (void)job_assumes_zero_p(j, rename(path, old));
8719 }
8720
8721 (void)job_assumes_zero_p(j, mkdir(path, S_IRWXU));
8722 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8723 created = true;
8724 }
8725
8726 if (!created) {
8727 if (sb.st_uid != uid) {
8728 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper user ownership. Repairing: %s", path);
8729 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8730 }
8731 if (sb.st_gid != 0) {
8732 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper group ownership. Repairing: %s", path);
8733 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8734 }
8735 if (sb.st_mode != (S_IRWXU | S_IFDIR)) {
8736 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper mode. Repairing: %s", path);
8737 (void)job_assumes_zero_p(j, chmod(path, S_IRWXU));
8738 }
8739 }
8740}
8741
8742void
8743job_setup_per_user_directories(job_t j, uid_t uid, const char *label)
8744{
8745 char path[PATH_MAX];
8746
8747 (void)snprintf(path, sizeof(path), LAUNCHD_DB_PREFIX "/%s", label);
8748 job_setup_per_user_directory(j, uid, path);
8749
8750 (void)snprintf(path, sizeof(path), LAUNCHD_LOG_PREFIX "/%s", label);
8751 job_setup_per_user_directory(j, uid, path);
8752}
8753
ddbbfbc1 8754job_t
dcace88f 8755jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
5b0a4722 8756{
ddbbfbc1 8757 job_t ji = NULL;
5b0a4722
A
8758 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8759 if (!ji->per_user) {
8760 continue;
8761 }
8762 if (ji->mach_uid != which_user) {
8763 continue;
8764 }
8765 if (SLIST_EMPTY(&ji->machservices)) {
8766 continue;
8767 }
8768 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8769 continue;
8770 }
8771 break;
8772 }
eabd1701 8773
dcace88f 8774 if (unlikely(ji == NULL)) {
5b0a4722
A
8775 struct machservice *ms;
8776 char lbuf[1024];
eabd1701 8777
5b0a4722 8778 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
eabd1701 8779
5b0a4722 8780 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
eabd1701 8781
5b0a4722 8782 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
eabd1701 8783
dcace88f
A
8784 if (ji != NULL) {
8785 auditinfo_addr_t auinfo = {
eabd1701
A
8786 .ai_termid = {
8787 .at_type = AU_IPv4
8788 },
dcace88f
A
8789 .ai_auid = which_user,
8790 .ai_asid = AU_ASSIGN_ASID,
8791 };
8792
8793 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8794 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8795 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8796
8797 /* Kinda lame that we have to do this, but we can't create an
8798 * audit session without joining it.
8799 */
eabd1701 8800 (void)job_assumes(ji, audit_session_join(launchd_audit_port));
dcace88f
A
8801 ji->asid = auinfo.ai_asid;
8802 } else {
8803 job_log(ji, LOG_WARNING, "Could not set audit session!");
8804 job_remove(ji);
8805 return NULL;
8806 }
8807
ddbbfbc1
A
8808 ji->mach_uid = which_user;
8809 ji->per_user = true;
eabd1701
A
8810 ji->enable_transactions = true;
8811 job_setup_per_user_directories(ji, which_user, lbuf);
5b0a4722 8812
ddbbfbc1
A
8813 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8814 job_remove(ji);
8815 ji = NULL;
8816 } else {
eabd1701 8817 ms->upfront = true;
ddbbfbc1
A
8818 ms->per_user_hack = true;
8819 ms->hide = true;
dcace88f
A
8820
8821 ji = job_dispatch(ji, false);
ddbbfbc1 8822 }
5b0a4722 8823 }
5b0a4722 8824 } else {
ddbbfbc1 8825 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
5b0a4722
A
8826 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8827 }
eabd1701 8828
ddbbfbc1
A
8829 return ji;
8830}
5b0a4722 8831
ddbbfbc1
A
8832kern_return_t
8833job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8834{
8835 struct ldcred *ldc = runtime_get_caller_creds();
8836 job_t jpu;
eabd1701
A
8837
8838 if (!j) {
8839 return BOOTSTRAP_NO_MEMORY;
8840 }
8841
8842 if (launchd_osinstaller) {
8843 return BOOTSTRAP_UNKNOWN_SERVICE;
8844 }
8845
ddbbfbc1 8846#if TARGET_OS_EMBEDDED
eabd1701 8847 // There is no need for per-user launchd's on embedded.
ddbbfbc1 8848 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
eabd1701 8849 return BOOTSTRAP_UNKNOWN_SERVICE;
ddbbfbc1 8850#endif
eabd1701 8851
ddbbfbc1
A
8852#if HAVE_SANDBOX
8853 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8854 return BOOTSTRAP_NOT_PRIVILEGED;
5b0a4722 8855 }
ddbbfbc1 8856#endif
eabd1701 8857
ddbbfbc1 8858 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
eabd1701 8859
ddbbfbc1
A
8860 if (unlikely(!pid1_magic)) {
8861 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8862 return BOOTSTRAP_NOT_PRIVILEGED;
8863 }
eabd1701 8864
ddbbfbc1
A
8865 if (ldc->euid || ldc->uid) {
8866 which_user = ldc->euid ?: ldc->uid;
8867 }
eabd1701 8868
ddbbfbc1 8869 *up_cont = MACH_PORT_NULL;
eabd1701 8870
dcace88f 8871 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
eabd1701 8872
5b0a4722
A
8873 return 0;
8874}
8875
8876kern_return_t
dcace88f 8877job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
5b0a4722 8878{
ddbbfbc1 8879 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
dcace88f 8880 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
ddbbfbc1 8881 struct ldcred *ldc = runtime_get_caller_creds();
dcace88f 8882 struct machservice *ms = NULL;
ddbbfbc1 8883 job_t jo;
5b0a4722 8884
eabd1701 8885 if (!j) {
5b0a4722
A
8886 return BOOTSTRAP_NO_MEMORY;
8887 }
8888
dcace88f
A
8889 if (j->dedicated_instance) {
8890 struct machservice *msi = NULL;
8891 SLIST_FOREACH(msi, &j->machservices, sle) {
8892 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8893 uuid_copy(instance_id, j->instance_id);
8894 ms = msi;
8895 break;
8896 }
ddbbfbc1 8897 }
dcace88f
A
8898 } else {
8899 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8900 }
ddbbfbc1 8901
dcace88f
A
8902 if (strict) {
8903 if (likely(ms != NULL)) {
8904 if (ms->job != j) {
8905 return BOOTSTRAP_NOT_PRIVILEGED;
8906 } else if (ms->isActive) {
8907 return BOOTSTRAP_SERVICE_ACTIVE;
8908 }
8909 } else {
8910 return BOOTSTRAP_UNKNOWN_SERVICE;
8911 }
8912 } else if (ms == NULL) {
8913 if (job_assumes(j, !j->dedicated_instance)) {
8914 *serviceportp = MACH_PORT_NULL;
eabd1701 8915
95379394
A
8916#if HAVE_SANDBOX
8917 if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8918 return BOOTSTRAP_NOT_PRIVILEGED;
8919 }
8920#endif
dcace88f
A
8921 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8922 return BOOTSTRAP_NO_MEMORY;
8923 }
eabd1701
A
8924
8925 // Treat this like a legacy job.
dcace88f
A
8926 if (!j->legacy_mach_job) {
8927 ms->isActive = true;
8928 ms->recv = false;
8929 }
eabd1701 8930
dcace88f 8931 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
eabd1701 8932 job_log(j, LOG_APPLEONLY, "Please add the following service to the configuration file for this job: %s", servicename);
dcace88f
A
8933 }
8934 } else {
8935 return BOOTSTRAP_UNKNOWN_SERVICE;
ddbbfbc1
A
8936 }
8937 } else {
8938 if (unlikely((jo = machservice_job(ms)) != j)) {
8939 static pid_t last_warned_pid;
8940
8941 if (last_warned_pid != ldc->pid) {
8942 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8943 last_warned_pid = ldc->pid;
8944 }
8945
8946 return BOOTSTRAP_NOT_PRIVILEGED;
8947 }
8948 if (unlikely(machservice_active(ms))) {
8949 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8950 return BOOTSTRAP_SERVICE_ACTIVE;
5b0a4722 8951 }
5b0a4722
A
8952 }
8953
ddbbfbc1 8954 job_checkin(j);
5b0a4722
A
8955 machservice_request_notifications(ms);
8956
8957 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8958
8959 *serviceportp = machservice_port(ms);
8960 return BOOTSTRAP_SUCCESS;
8961}
8962
8963kern_return_t
8964job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8965{
8966 struct machservice *ms;
ddbbfbc1 8967 struct ldcred *ldc = runtime_get_caller_creds();
95379394 8968 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
5b0a4722 8969
eabd1701 8970 if (!j) {
5b0a4722
A
8971 return BOOTSTRAP_NO_MEMORY;
8972 }
8973
95379394 8974 if (!per_pid_service && !j->legacy_LS_job) {
eabd1701 8975 job_log(j, LOG_APPLEONLY, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
ddbbfbc1 8976 }
5b0a4722
A
8977
8978 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8979
95379394
A
8980#if HAVE_SANDBOX
8981 if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8982 return BOOTSTRAP_NOT_PRIVILEGED;
8983 }
8984#endif
8985
eabd1701 8986 // 5641783 for the embedded hack
f36da725 8987#if !TARGET_OS_EMBEDDED
5b0a4722
A
8988 /*
8989 * From a per-user/session launchd's perspective, SecurityAgent (UID
8990 * 92) is a rogue application (not our UID, not root and not a child of
8991 * us). We'll have to reconcile this design friction at a later date.
8992 */
ddbbfbc1
A
8993 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8994 if (pid1_magic) {
5b0a4722
A
8995 return VPROC_ERR_TRY_PER_USER;
8996 } else {
8997 return BOOTSTRAP_NOT_PRIVILEGED;
8998 }
8999 }
f36da725 9000#endif
eabd1701 9001
ddbbfbc1 9002 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
5b0a4722 9003
ddbbfbc1 9004 if (unlikely(ms)) {
5b0a4722
A
9005 if (machservice_job(ms) != j) {
9006 return BOOTSTRAP_NOT_PRIVILEGED;
9007 }
9008 if (machservice_active(ms)) {
9009 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
9010 return BOOTSTRAP_SERVICE_ACTIVE;
9011 }
ddbbfbc1
A
9012 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
9013 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
9014 return BOOTSTRAP_NOT_PRIVILEGED;
9015 }
5b0a4722
A
9016 job_checkin(j);
9017 machservice_delete(j, ms, false);
9018 }
9019
ddbbfbc1
A
9020 if (likely(serviceport != MACH_PORT_NULL)) {
9021 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
5b0a4722
A
9022 machservice_request_notifications(ms);
9023 } else {
9024 return BOOTSTRAP_NO_MEMORY;
9025 }
9026 }
9027
eabd1701 9028
5b0a4722
A
9029 return BOOTSTRAP_SUCCESS;
9030}
9031
9032kern_return_t
dcace88f 9033job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
5b0a4722 9034{
dcace88f 9035 struct machservice *ms = NULL;
ddbbfbc1 9036 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 9037 kern_return_t kr;
ddbbfbc1 9038 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
dcace88f
A
9039 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
9040 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
9041 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
5b0a4722 9042
eabd1701 9043 if (!j) {
5b0a4722
A
9044 return BOOTSTRAP_NO_MEMORY;
9045 }
9046
eabd1701 9047 bool xpc_req = (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN);
dcace88f 9048
eabd1701 9049 // 5641783 for the embedded hack
f36da725 9050#if !TARGET_OS_EMBEDDED
ddbbfbc1 9051 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
5b0a4722
A
9052 return VPROC_ERR_TRY_PER_USER;
9053 }
f36da725 9054#endif
5b0a4722 9055
ddbbfbc1 9056#if HAVE_SANDBOX
dcace88f 9057 /* We don't do sandbox checking for XPC domains because, by definition, all
eabd1701 9058 * the services within your domain should be accessible to you.
dcace88f
A
9059 */
9060 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
5b0a4722
A
9061 return BOOTSTRAP_NOT_PRIVILEGED;
9062 }
ddbbfbc1 9063#endif
5b0a4722 9064
ddbbfbc1 9065 if (per_pid_lookup) {
5b0a4722
A
9066 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
9067 } else {
dcace88f 9068 if (xpc_req) {
eabd1701 9069 // Requests from XPC domains stay local.
dcace88f
A
9070 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
9071 } else {
9072 /* A strict lookup which is privileged won't even bother trying to
9073 * find a service if we're not hosting the root Mach bootstrap.
9074 */
9075 if (strict_lookup && privileged) {
9076 if (inherited_bootstrap_port == MACH_PORT_NULL) {
9077 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9078 }
9079 } else {
9080 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9081 }
9082 }
5b0a4722
A
9083 }
9084
ddbbfbc1 9085 if (likely(ms)) {
dcace88f
A
9086 ms = ms->alias ? ms->alias : ms;
9087 if (unlikely(specific_instance && ms->job->multiple_instances)) {
9088 job_t ji = NULL;
9089 job_t instance = NULL;
9090 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
9091 if (uuid_compare(instance_id, ji->instance_id) == 0) {
9092 instance = ji;
9093 break;
9094 }
9095 }
9096
9097 if (unlikely(instance == NULL)) {
9098 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
9099 instance = job_new_subjob(ms->job, instance_id);
9100 if (job_assumes(j, instance != NULL)) {
9101 /* Disable this support for now. We only support having
9102 * multi-instance jobs within private XPC domains.
9103 */
9104#if 0
9105 /* If the job is multi-instance, in a singleton XPC domain
9106 * and the request is not coming from within that singleton
9107 * domain, we need to alias the new job into the requesting
9108 * domain.
9109 */
9110 if (!j->mgr->xpc_singleton && xpc_req) {
9111 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
9112 }
9113#endif
9114 job_dispatch(instance, false);
9115 }
9116 }
eabd1701 9117
ddbbfbc1 9118 ms = NULL;
dcace88f
A
9119 if (job_assumes(j, instance != NULL)) {
9120 struct machservice *msi = NULL;
9121 SLIST_FOREACH(msi, &instance->machservices, sle) {
eabd1701
A
9122 /* sizeof(servicename) will return the size of a pointer,
9123 * even though it's an array type, because when passing
9124 * arrays as parameters in C, they implicitly degrade to
9125 * pointers.
dcace88f
A
9126 */
9127 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
9128 ms = msi;
9129 break;
9130 }
9131 }
9132 }
9133 } else {
9134 if (machservice_hidden(ms) && !machservice_active(ms)) {
9135 ms = NULL;
9136 } else if (unlikely(ms->per_user_hack)) {
9137 ms = NULL;
9138 }
ddbbfbc1 9139 }
5b0a4722
A
9140 }
9141
ddbbfbc1 9142 if (likely(ms)) {
dcace88f 9143 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
ddbbfbc1 9144 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
5b0a4722 9145 *serviceportp = machservice_port(ms);
ddbbfbc1 9146
5b0a4722 9147 kr = BOOTSTRAP_SUCCESS;
dcace88f 9148 } else if (strict_lookup && !privileged) {
eabd1701
A
9149 /* Hack: We need to simulate XPC's desire not to establish a hierarchy.
9150 * So if XPC is doing the lookup, and it's not a privileged lookup, we
9151 * won't forward. But if it is a privileged lookup, then we must
9152 * forward.
dcace88f
A
9153 */
9154 return BOOTSTRAP_UNKNOWN_SERVICE;
9155 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
eabd1701 9156 // Requests from within an XPC domain don't get forwarded.
5b0a4722 9157 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
eabd1701
A
9158 /* Clients potentially check the audit token of the reply to verify that
9159 * the returned send right is trustworthy.
9160 */
9161 (void)job_assumes_zero(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags));
ddbbfbc1
A
9162 return MIG_NO_REPLY;
9163 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
eabd1701
A
9164 /* 5240036 Should start background session when a lookup of CCacheServer
9165 * occurs
5b0a4722 9166 *
eabd1701
A
9167 * This is a total hack. We sniff out loginwindow session, and attempt
9168 * to guess what it is up to. If we find a EUID that isn't root, we
9169 * force it over to the per-user context.
5b0a4722
A
9170 */
9171 return VPROC_ERR_TRY_PER_USER;
9172 } else {
ddbbfbc1 9173 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
5b0a4722
A
9174 kr = BOOTSTRAP_UNKNOWN_SERVICE;
9175 }
9176
9177 return kr;
9178}
9179
9180kern_return_t
ddbbfbc1 9181job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
5b0a4722 9182{
eabd1701 9183 if (!j) {
5b0a4722
A
9184 return BOOTSTRAP_NO_MEMORY;
9185 }
9186
9187 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
9188 jobmgr_t jm = j->mgr;
9189
5b0a4722
A
9190 if (jobmgr_parent(jm)) {
9191 *parentport = jobmgr_parent(jm)->jm_port;
9192 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
9193 *parentport = jm->jm_port;
9194 } else {
eabd1701
A
9195 (void)job_assumes_zero(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp));
9196 // The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
ddbbfbc1 9197 return MIG_NO_REPLY;
5b0a4722
A
9198 }
9199 return BOOTSTRAP_SUCCESS;
9200}
9201
9202kern_return_t
dcace88f
A
9203job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
9204{
5c88273d
A
9205 if (!j) {
9206 return BOOTSTRAP_NO_MEMORY;
9207 }
9208
dcace88f
A
9209 if (inherited_bootstrap_port == MACH_PORT_NULL) {
9210 *rootbsp = root_jobmgr->jm_port;
eabd1701 9211 (void)job_assumes_zero(j, launchd_mport_make_send(root_jobmgr->jm_port));
dcace88f
A
9212 } else {
9213 *rootbsp = inherited_bootstrap_port;
eabd1701 9214 (void)job_assumes_zero(j, launchd_mport_copy_send(inherited_bootstrap_port));
dcace88f
A
9215 }
9216
9217 return BOOTSTRAP_SUCCESS;
9218}
9219
9220kern_return_t
eabd1701
A
9221job_mig_info(job_t j, name_array_t *servicenamesp,
9222 unsigned int *servicenames_cnt, name_array_t *servicejobsp,
9223 unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp,
9224 unsigned int *serviceactives_cnt, uint64_t flags)
5b0a4722
A
9225{
9226 name_array_t service_names = NULL;
ddbbfbc1 9227 name_array_t service_jobs = NULL;
5b0a4722
A
9228 bootstrap_status_array_t service_actives = NULL;
9229 unsigned int cnt = 0, cnt2 = 0;
5b0a4722 9230 jobmgr_t jm;
cf0bacfd 9231
eabd1701 9232 if (!j) {
5b0a4722
A
9233 return BOOTSTRAP_NO_MEMORY;
9234 }
9235
95379394
A
9236#if TARGET_OS_EMBEDDED
9237 struct ldcred *ldc = runtime_get_caller_creds();
9238 if (ldc->euid) {
9239 return EPERM;
9240 }
9241#endif // TARGET_OS_EMBEDDED
9242
eabd1701 9243 if (launchd_flat_mach_namespace) {
dcace88f 9244 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
ddbbfbc1
A
9245 jm = j->mgr;
9246 } else {
9247 jm = root_jobmgr;
9248 }
9249 } else {
9250 jm = j->mgr;
9251 }
5b0a4722 9252
ddbbfbc1
A
9253 unsigned int i = 0;
9254 struct machservice *msi = NULL;
dcace88f
A
9255 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9256 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
ddbbfbc1 9257 cnt += !msi->per_pid ? 1 : 0;
5b0a4722
A
9258 }
9259 }
9260
9261 if (cnt == 0) {
9262 goto out;
9263 }
9264
9265 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
ddbbfbc1
A
9266 if (!job_assumes(j, service_names != NULL)) {
9267 goto out_bad;
9268 }
9269
9270 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
9271 if (!job_assumes(j, service_jobs != NULL)) {
5b0a4722
A
9272 goto out_bad;
9273 }
9274
9275 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
ddbbfbc1 9276 if (!job_assumes(j, service_actives != NULL)) {
5b0a4722
A
9277 goto out_bad;
9278 }
9279
dcace88f
A
9280 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9281 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
9282 if (!msi->per_pid) {
ddbbfbc1 9283 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
dcace88f
A
9284 msi = msi->alias ? msi->alias : msi;
9285 if (msi->job->mgr->shortdesc) {
9286 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
9287 } else {
9288 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
9289 }
ddbbfbc1 9290 service_actives[cnt2] = machservice_status(msi);
5b0a4722
A
9291 cnt2++;
9292 }
9293 }
9294 }
9295
dcace88f 9296 (void)job_assumes(j, cnt == cnt2);
5b0a4722
A
9297
9298out:
9299 *servicenamesp = service_names;
ddbbfbc1 9300 *servicejobsp = service_jobs;
5b0a4722 9301 *serviceactivesp = service_actives;
ddbbfbc1 9302 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
5b0a4722
A
9303
9304 return BOOTSTRAP_SUCCESS;
9305
9306out_bad:
9307 if (service_names) {
9308 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
9309 }
ddbbfbc1
A
9310 if (service_jobs) {
9311 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
9312 }
5b0a4722
A
9313 if (service_actives) {
9314 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
9315 }
9316
9317 return BOOTSTRAP_NO_MEMORY;
9318}
9319
ddbbfbc1 9320kern_return_t
eabd1701
A
9321job_mig_lookup_children(job_t j, mach_port_array_t *child_ports,
9322 mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names,
9323 mach_msg_type_number_t *child_names_cnt,
9324 bootstrap_property_array_t *child_properties,
9325 mach_msg_type_number_t *child_properties_cnt)
5b0a4722 9326{
ddbbfbc1 9327 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
eabd1701 9328 if (!j) {
ddbbfbc1
A
9329 return BOOTSTRAP_NO_MEMORY;
9330 }
eabd1701 9331
ddbbfbc1 9332 struct ldcred *ldc = runtime_get_caller_creds();
eabd1701 9333
ddbbfbc1
A
9334 /* Only allow root processes to look up children, even if we're in the per-user launchd.
9335 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
9336 * in a non-flat namespace.
9337 */
dcace88f 9338 if (ldc->euid != 0) {
ddbbfbc1
A
9339 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
9340 return BOOTSTRAP_NOT_PRIVILEGED;
9341 }
eabd1701 9342
ddbbfbc1 9343 unsigned int cnt = 0;
eabd1701 9344
ddbbfbc1
A
9345 jobmgr_t jmr = j->mgr;
9346 jobmgr_t jmi = NULL;
dcace88f 9347 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
ddbbfbc1
A
9348 cnt++;
9349 }
eabd1701
A
9350
9351 // Find our per-user launchds if we're PID 1.
ddbbfbc1 9352 job_t ji = NULL;
dcace88f
A
9353 if (pid1_magic) {
9354 LIST_FOREACH(ji, &jmr->jobs, sle) {
ddbbfbc1
A
9355 cnt += ji->per_user ? 1 : 0;
9356 }
9357 }
eabd1701 9358
dcace88f 9359 if (cnt == 0) {
ddbbfbc1
A
9360 return BOOTSTRAP_NO_CHILDREN;
9361 }
eabd1701 9362
ddbbfbc1 9363 mach_port_array_t _child_ports = NULL;
95379394
A
9364 name_array_t _child_names = NULL;
9365 bootstrap_property_array_t _child_properties = NULL;
9366
ddbbfbc1 9367 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
dcace88f 9368 if (!job_assumes(j, _child_ports != NULL)) {
ddbbfbc1
A
9369 kr = BOOTSTRAP_NO_MEMORY;
9370 goto out_bad;
9371 }
eabd1701 9372
ddbbfbc1 9373 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
dcace88f 9374 if (!job_assumes(j, _child_names != NULL)) {
ddbbfbc1
A
9375 kr = BOOTSTRAP_NO_MEMORY;
9376 goto out_bad;
9377 }
eabd1701 9378
ddbbfbc1 9379 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
dcace88f 9380 if (!job_assumes(j, _child_properties != NULL)) {
ddbbfbc1
A
9381 kr = BOOTSTRAP_NO_MEMORY;
9382 goto out_bad;
9383 }
eabd1701 9384
ddbbfbc1 9385 unsigned int cnt2 = 0;
dcace88f 9386 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
eabd1701 9387 if (jobmgr_assumes_zero(jmi, launchd_mport_make_send(jmi->jm_port)) == KERN_SUCCESS) {
ddbbfbc1
A
9388 _child_ports[cnt2] = jmi->jm_port;
9389 } else {
9390 _child_ports[cnt2] = MACH_PORT_NULL;
9391 }
eabd1701 9392
ddbbfbc1
A
9393 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
9394 _child_properties[cnt2] = jmi->properties;
eabd1701 9395
ddbbfbc1
A
9396 cnt2++;
9397 }
eabd1701
A
9398
9399 if (pid1_magic) LIST_FOREACH(ji, &jmr->jobs, sle) {
dcace88f
A
9400 if (ji->per_user) {
9401 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
ddbbfbc1 9402 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
eabd1701
A
9403
9404 if (job_assumes_zero(ji, launchd_mport_copy_send(port)) == KERN_SUCCESS) {
ddbbfbc1
A
9405 _child_ports[cnt2] = port;
9406 } else {
9407 _child_ports[cnt2] = MACH_PORT_NULL;
9408 }
9409 } else {
9410 _child_ports[cnt2] = MACH_PORT_NULL;
9411 }
eabd1701 9412
ddbbfbc1
A
9413 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
9414 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
eabd1701 9415
ddbbfbc1
A
9416 cnt2++;
9417 }
9418 }
eabd1701 9419
ddbbfbc1
A
9420 *child_names_cnt = cnt;
9421 *child_ports_cnt = cnt;
9422 *child_properties_cnt = cnt;
eabd1701 9423
ddbbfbc1
A
9424 *child_names = _child_names;
9425 *child_ports = _child_ports;
9426 *child_properties = _child_properties;
eabd1701 9427
ddbbfbc1 9428 unsigned int i = 0;
dcace88f 9429 for (i = 0; i < cnt; i++) {
ddbbfbc1
A
9430 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
9431 }
eabd1701 9432
ddbbfbc1
A
9433 return BOOTSTRAP_SUCCESS;
9434out_bad:
dcace88f 9435 if (_child_ports) {
ddbbfbc1
A
9436 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
9437 }
eabd1701 9438
dcace88f 9439 if (_child_names) {
95379394 9440 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_names[0]));
ddbbfbc1 9441 }
eabd1701 9442
dcace88f 9443 if (_child_properties) {
ddbbfbc1
A
9444 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
9445 }
eabd1701 9446
ddbbfbc1
A
9447 return kr;
9448}
9449
9450kern_return_t
eabd1701 9451job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
ddbbfbc1 9452{
ddbbfbc1 9453 struct ldcred *ldc = runtime_get_caller_creds();
dcace88f 9454 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
ddbbfbc1
A
9455 return BOOTSTRAP_NOT_PRIVILEGED;
9456 }
ddbbfbc1 9457
ddbbfbc1
A
9458 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9459 * directly by launchd as agents.
9460 */
9461 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
dcace88f 9462 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
ddbbfbc1
A
9463 *managed = true;
9464 }
eabd1701 9465
ddbbfbc1
A
9466 return BOOTSTRAP_SUCCESS;
9467}
9468
9469kern_return_t
9470job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
9471{
5c88273d
A
9472 if (!j) {
9473 return BOOTSTRAP_NO_MEMORY;
9474 }
9475
ddbbfbc1
A
9476 struct ldcred *ldc = runtime_get_caller_creds();
9477 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
dcace88f
A
9478
9479#if HAVE_SANDBOX
9480 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
9481 return BOOTSTRAP_NOT_PRIVILEGED;
9482 }
9483#endif
9484
ddbbfbc1 9485 mach_port_t _mp = MACH_PORT_NULL;
dcace88f
A
9486 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
9487 job_t target_j = job_find(NULL, label);
9488 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
9489 if (target_j->j_port == MACH_PORT_NULL) {
9490 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
ddbbfbc1 9491 }
eabd1701 9492
ddbbfbc1
A
9493 _mp = target_j->j_port;
9494 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
9495 } else {
9496 kr = BOOTSTRAP_NO_MEMORY;
9497 }
9498 }
9499
9500 *mp = _mp;
9501 return kr;
9502}
5b0a4722 9503
ddbbfbc1 9504kern_return_t
dcace88f 9505job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
ddbbfbc1 9506{
eabd1701
A
9507#if TARGET_OS_EMBEDDED
9508 return KERN_SUCCESS;
9509#endif
9510
5c88273d
A
9511 if (!j) {
9512 return BOOTSTRAP_NO_MEMORY;
9513 }
9514
ddbbfbc1
A
9515 uuid_string_t uuid_str;
9516 uuid_unparse(uuid, uuid_str);
dcace88f 9517 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
eabd1701 9518
ddbbfbc1 9519 job_t ji = NULL, jt = NULL;
dcace88f 9520 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
ddbbfbc1
A
9521 uuid_string_t uuid_str2;
9522 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9523
dcace88f 9524 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
ddbbfbc1 9525 uuid_clear(ji->expected_audit_uuid);
eabd1701
A
9526 if (asport != MACH_PORT_NULL) {
9527 job_log(ji, LOG_DEBUG, "Job should join session with port 0x%x", asport);
9528 (void)job_assumes_zero(j, launchd_mport_copy_send(asport));
ddbbfbc1
A
9529 } else {
9530 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9531 }
eabd1701 9532
dcace88f 9533 ji->asport = asport;
ddbbfbc1 9534 LIST_REMOVE(ji, needing_session_sle);
b97faa4c
A
9535
9536 if (ji->event_monitor) {
9537 eventsystem_ping();
9538 } else {
9539 job_dispatch(ji, false);
9540 }
ddbbfbc1
A
9541 }
9542 }
eabd1701 9543
ddbbfbc1
A
9544 /* Each job that the session port was set for holds a reference. At the end of
9545 * the loop, there will be one extra reference belonging to this MiG protocol.
9546 * We need to release it so that the session goes away when all the jobs
9547 * referencing it are unloaded.
9548 */
eabd1701 9549 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
ddbbfbc1
A
9550
9551 return KERN_SUCCESS;
9552}
ddbbfbc1
A
9553
9554jobmgr_t
9555jobmgr_find_by_name(jobmgr_t jm, const char *where)
9556{
9557 jobmgr_t jmi, jmi2;
5b0a4722 9558
eabd1701 9559 // NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
5b0a4722 9560 if (where == NULL) {
ddbbfbc1 9561 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
5b0a4722
A
9562 where = VPROCMGR_SESSION_LOGINWINDOW;
9563 } else {
9564 where = VPROCMGR_SESSION_AQUA;
9565 }
9566 }
9567
ddbbfbc1
A
9568 if (strcasecmp(jm->name, where) == 0) {
9569 return jm;
9570 }
eabd1701 9571
dcace88f 9572 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
ddbbfbc1
A
9573 jmi = root_jobmgr;
9574 goto jm_found;
5b0a4722
A
9575 }
9576
ddbbfbc1
A
9577 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9578 if (unlikely(jmi->shutting_down)) {
5b0a4722 9579 continue;
dcace88f
A
9580 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9581 continue;
5b0a4722
A
9582 } else if (strcasecmp(jmi->name, where) == 0) {
9583 goto jm_found;
ddbbfbc1 9584 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
5b0a4722
A
9585 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9586 if (strcasecmp(jmi2->name, where) == 0) {
9587 jmi = jmi2;
9588 goto jm_found;
9589 }
9590 }
9591 }
9592 }
eabd1701 9593
5b0a4722 9594jm_found:
ddbbfbc1 9595 return jmi;
5b0a4722
A
9596}
9597
9598kern_return_t
dcace88f 9599job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
5b0a4722
A
9600{
9601 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9602 mach_port_array_t l2l_ports = NULL;
9603 mach_port_t reqport, rcvright;
9604 kern_return_t kr = 1;
9605 launch_data_t out_obj_array = NULL;
ddbbfbc1 9606 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
9607 jobmgr_t jmr = NULL;
9608
eabd1701 9609 if (!j) {
5b0a4722
A
9610 return BOOTSTRAP_NO_MEMORY;
9611 }
9612
ddbbfbc1 9613 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
5b0a4722
A
9614 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9615
9616 kr = BOOTSTRAP_NOT_PRIVILEGED;
9617 goto out;
9618 }
9619
9620 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9621
ddbbfbc1 9622 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
eabd1701 9623 if (job_assumes_zero(j, kr) != 0) {
5b0a4722
A
9624 goto out;
9625 }
9626
eabd1701 9627 if (launch_data_array_get_count(out_obj_array) != l2l_port_cnt) {
95379394 9628 os_assert_zero(l2l_port_cnt);
eabd1701 9629 }
5b0a4722 9630
dcace88f 9631 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
5b0a4722
A
9632 kr = BOOTSTRAP_NO_MEMORY;
9633 goto out;
9634 }
9635
95379394
A
9636 if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9637 jobmgr_log(jmr, LOG_NOTICE, "Registering new GUI session.");
9638 kr = vproc_mig_register_gui_session(inherited_bootstrap_port, asport);
9639 if (kr) {
9640 jobmgr_log(jmr, LOG_ERR, "Failed to register GUI session with PID 1: 0x%x/0x%x", inherited_bootstrap_port, kr);
9641 }
9642 }
9643
ddbbfbc1
A
9644 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9645
9646 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9647 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9648 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9649 */
dcace88f 9650 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
eabd1701
A
9651 // This is so awful.
9652 // Remove the job from its current job manager.
ddbbfbc1
A
9653 LIST_REMOVE(j, sle);
9654 LIST_REMOVE(j, pid_hash_sle);
9655
eabd1701 9656 // Put the job into the target job manager.
ddbbfbc1
A
9657 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9658 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
eabd1701 9659
ddbbfbc1
A
9660 j->mgr = jmr;
9661 job_set_global_on_demand(j, true);
eabd1701 9662
dcace88f 9663 if (!j->holds_ref) {
eabd1701 9664 job_log(j, LOG_PERF, "Job moved subset into: %s", j->mgr->name);
587e987e
A
9665 j->holds_ref = true;
9666 runtime_add_ref();
9667 }
ddbbfbc1 9668 }
eabd1701 9669
5b0a4722
A
9670 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9671 launch_data_t tmp, obj_at_idx;
9672 struct machservice *ms;
9673 job_t j_for_service;
9674 const char *serv_name;
9675 pid_t target_pid;
9676 bool serv_perpid;
9677
dcace88f
A
9678 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9679 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
5b0a4722 9680 target_pid = (pid_t)launch_data_get_integer(tmp);
dcace88f 9681 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
5b0a4722 9682 serv_perpid = launch_data_get_bool(tmp);
dcace88f 9683 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
5b0a4722
A
9684 serv_name = launch_data_get_string(tmp);
9685
9686 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9687
ddbbfbc1 9688 if (unlikely(!j_for_service)) {
eabd1701
A
9689 // The PID probably exited
9690 (void)job_assumes_zero(j, launchd_mport_deallocate(l2l_ports[l2l_i]));
5b0a4722
A
9691 continue;
9692 }
9693
ddbbfbc1
A
9694 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9695 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
5b0a4722
A
9696 machservice_request_notifications(ms);
9697 }
9698 }
9699
9700 kr = 0;
9701
9702out:
9703 if (out_obj_array) {
9704 launch_data_free(out_obj_array);
9705 }
9706
9707 if (l2l_ports) {
9708 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9709 }
9710
9711 if (kr == 0) {
9712 if (target_subset) {
eabd1701 9713 (void)job_assumes_zero(j, launchd_mport_deallocate(target_subset));
dcace88f
A
9714 }
9715 if (asport) {
eabd1701 9716 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
5b0a4722
A
9717 }
9718 } else if (jmr) {
9719 jobmgr_shutdown(jmr);
9720 }
9721
9722 return kr;
9723}
9724
ddbbfbc1 9725kern_return_t
dcace88f 9726job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
ddbbfbc1 9727{
5c88273d
A
9728 if (!j) {
9729 return BOOTSTRAP_NO_MEMORY;
9730 }
9731
ddbbfbc1 9732 job_t j2;
eabd1701 9733
ddbbfbc1
A
9734 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9735 if (j->mgr->session_initialized) {
9736 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9737 kr = BOOTSTRAP_NOT_PRIVILEGED;
5f168eaf 9738 return kr;
ddbbfbc1
A
9739 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9740 jobmgr_t jmi;
eabd1701 9741
ddbbfbc1
A
9742 /*
9743 * 5330262
9744 *
9745 * We're working around LoginWindow and the WindowServer.
9746 *
9747 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9748 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9749 * spawns a replacement loginwindow session before cleaning up the previous one.
9750 *
9751 * We're going to use the creation of a new LoginWindow context as a clue that the
9752 * previous LoginWindow context is on the way out and therefore we should just
9753 * kick-start the shutdown of it.
9754 */
eabd1701 9755
ddbbfbc1
A
9756 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9757 if (unlikely(jmi->shutting_down)) {
9758 continue;
9759 } else if (strcasecmp(jmi->name, session_type) == 0) {
9760 jobmgr_shutdown(jmi);
9761 break;
9762 }
9763 }
95379394
A
9764 } else if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9765 (void)job_assumes_zero(j, runtime_remove_mport(j->mgr->jm_port));
9766 }
eabd1701 9767
ddbbfbc1
A
9768 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9769 strcpy(j->mgr->name_init, session_type);
eabd1701 9770
ddbbfbc1 9771 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
dcace88f
A
9772 j2->asport = asport;
9773 (void)job_assumes(j, job_dispatch(j2, true));
ddbbfbc1
A
9774 kr = BOOTSTRAP_SUCCESS;
9775 }
eabd1701 9776
ddbbfbc1
A
9777 return kr;
9778}
9779
9780kern_return_t
dcace88f 9781job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
ddbbfbc1 9782{
62123c11
A
9783 struct ldcred *ldc = runtime_get_caller_creds();
9784 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9785 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9786 return BOOTSTRAP_NO_MEMORY;
9787 }
9788
5c88273d
A
9789 if (j->mgr->shutting_down) {
9790 return BOOTSTRAP_UNKNOWN_SERVICE;
9791 }
9792
ddbbfbc1 9793 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
eabd1701 9794
dcace88f 9795 if (!job_assumes(j, pid1_magic == false)) {
ddbbfbc1
A
9796 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9797 return BOOTSTRAP_NOT_PRIVILEGED;
9798 }
eabd1701 9799
dcace88f 9800 if (!j->anonymous) {
ddbbfbc1
A
9801 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9802 return BOOTSTRAP_NOT_PRIVILEGED;
9803 }
eabd1701 9804
ddbbfbc1 9805 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
dcace88f 9806 if (target_jm == j->mgr) {
ddbbfbc1 9807 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
eabd1701
A
9808 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9809 (void)job_assumes_zero(j, launchd_mport_deallocate(requestor_port));
ddbbfbc1
A
9810 *new_bsport = target_jm->jm_port;
9811 return BOOTSTRAP_SUCCESS;
9812 }
eabd1701 9813
dcace88f
A
9814 if (!target_jm) {
9815 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9816 if (target_jm) {
ddbbfbc1 9817 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
eabd1701 9818 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
ddbbfbc1
A
9819 }
9820 }
eabd1701 9821
dcace88f 9822 if (!job_assumes(j, target_jm != NULL)) {
ddbbfbc1
A
9823 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9824 return BOOTSTRAP_NO_MEMORY;
9825 }
eabd1701
A
9826
9827 // Remove the job from it's current job manager.
ddbbfbc1
A
9828 LIST_REMOVE(j, sle);
9829 LIST_REMOVE(j, pid_hash_sle);
9830
9831 job_t ji = NULL, jit = NULL;
dcace88f
A
9832 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9833 if (ji == j) {
ddbbfbc1
A
9834 LIST_REMOVE(ji, global_env_sle);
9835 break;
9836 }
9837 }
eabd1701
A
9838
9839 // Put the job into the target job manager.
ddbbfbc1
A
9840 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9841 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
eabd1701 9842
dcace88f 9843 if (ji) {
ddbbfbc1
A
9844 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9845 }
eabd1701
A
9846
9847 // Move our Mach services over if we're not in a flat namespace.
9848 if (!launchd_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
ddbbfbc1 9849 struct machservice *msi = NULL, *msit = NULL;
dcace88f 9850 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
ddbbfbc1
A
9851 LIST_REMOVE(msi, name_hash_sle);
9852 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9853 }
9854 }
eabd1701 9855
ddbbfbc1 9856 j->mgr = target_jm;
eabd1701 9857
dcace88f 9858 if (!j->holds_ref) {
587e987e
A
9859 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9860 * stick around while they're still around.
9861 * For example, login calls into the PAM launchd module, which moves the process into
9862 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9863 * ourselves from going away.
9864 */
9865 j->holds_ref = true;
eabd1701 9866 job_log(j, LOG_PERF, "Job switched into manager: %s", j->mgr->name);
587e987e
A
9867 runtime_add_ref();
9868 }
eabd1701 9869
587e987e 9870 *new_bsport = target_jm->jm_port;
eabd1701 9871
ddbbfbc1
A
9872 return KERN_SUCCESS;
9873}
9874
5b0a4722
A
9875kern_return_t
9876job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9877 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9878 mach_port_array_t *portsp, unsigned int *ports_cnt)
9879{
9880 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9881 mach_port_array_t ports = NULL;
9882 unsigned int cnt = 0, cnt2 = 0;
9883 size_t packed_size;
9884 struct machservice *ms;
9885 jobmgr_t jm;
9886 job_t ji;
9887
eabd1701 9888 if (!j) {
5b0a4722
A
9889 return BOOTSTRAP_NO_MEMORY;
9890 }
9891
9892 jm = j->mgr;
9893
ddbbfbc1 9894 if (unlikely(!pid1_magic)) {
5b0a4722
A
9895 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9896 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1
A
9897 }
9898 if (unlikely(jobmgr_parent(jm) == NULL)) {
5b0a4722
A
9899 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9900 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1
A
9901 }
9902 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
5b0a4722
A
9903 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9904 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1
A
9905 }
9906 if (unlikely(!j->anonymous)) {
5b0a4722
A
9907 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9908 return BOOTSTRAP_NOT_PRIVILEGED;
9909 }
9910
9911 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9912
9913 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9914 if (!job_assumes(j, outdata_obj_array)) {
9915 goto out_bad;
9916 }
9917
9918 *outdataCnt = 20 * 1024 * 1024;
9919 mig_allocate(outdata, *outdataCnt);
9920 if (!job_assumes(j, *outdata != 0)) {
9921 return 1;
9922 }
9923
9924 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9925 if (!ji->anonymous) {
9926 continue;
9927 }
9928 SLIST_FOREACH(ms, &ji->machservices, sle) {
9929 cnt++;
9930 }
9931 }
9932
9933 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
ddbbfbc1 9934 if (!job_assumes(j, ports != NULL)) {
5b0a4722
A
9935 goto out_bad;
9936 }
9937
9938 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9939 if (!ji->anonymous) {
9940 continue;
9941 }
9942
9943 SLIST_FOREACH(ms, &ji->machservices, sle) {
9944 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
dcace88f 9945 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
5b0a4722
A
9946 } else {
9947 goto out_bad;
9948 }
9949
9950 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
dcace88f 9951 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
5b0a4722
A
9952 } else {
9953 goto out_bad;
9954 }
9955
9956 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
dcace88f 9957 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
5b0a4722
A
9958 } else {
9959 goto out_bad;
9960 }
9961
9962 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
dcace88f 9963 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
5b0a4722
A
9964 } else {
9965 goto out_bad;
9966 }
9967
9968 ports[cnt2] = machservice_port(ms);
9969
eabd1701
A
9970 // Increment the send right by one so we can shutdown the jobmgr cleanly
9971 (void)jobmgr_assumes_zero(jm, launchd_mport_copy_send(ports[cnt2]));
5b0a4722
A
9972 cnt2++;
9973 }
9974 }
9975
dcace88f 9976 (void)job_assumes(j, cnt == cnt2);
5b0a4722 9977
ddbbfbc1 9978 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
5b0a4722
A
9979 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9980 if (!job_assumes(j, packed_size != 0)) {
9981 goto out_bad;
9982 }
9983
9984 launch_data_free(outdata_obj_array);
9985
9986 *portsp = ports;
9987 *ports_cnt = cnt;
9988
9989 *reqport = jm->req_port;
9990 *rcvright = jm->jm_port;
9991
9992 jm->req_port = 0;
9993 jm->jm_port = 0;
9994
9995 workaround_5477111 = j;
9996
9997 jobmgr_shutdown(jm);
9998
9999 return BOOTSTRAP_SUCCESS;
10000
10001out_bad:
10002 if (outdata_obj_array) {
10003 launch_data_free(outdata_obj_array);
10004 }
10005 if (*outdata) {
10006 mig_deallocate(*outdata, *outdataCnt);
10007 }
10008 if (ports) {
10009 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
10010 }
10011
10012 return BOOTSTRAP_NO_MEMORY;
10013}
10014
10015kern_return_t
10016job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
10017{
10018 int bsdepth = 0;
10019 jobmgr_t jmr;
10020
eabd1701 10021 if (!j) {
5b0a4722
A
10022 return BOOTSTRAP_NO_MEMORY;
10023 }
5c88273d
A
10024 if (j->mgr->shutting_down) {
10025 return BOOTSTRAP_UNKNOWN_SERVICE;
10026 }
5b0a4722
A
10027
10028 jmr = j->mgr;
10029
10030 while ((jmr = jobmgr_parent(jmr)) != NULL) {
10031 bsdepth++;
10032 }
10033
eabd1701 10034 // Since we use recursion, we need an artificial depth for subsets
ddbbfbc1 10035 if (unlikely(bsdepth > 100)) {
5b0a4722
A
10036 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
10037 return BOOTSTRAP_NO_MEMORY;
10038 }
10039
ddbbfbc1
A
10040 char name[NAME_MAX];
10041 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
10042
dcace88f 10043 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
ddbbfbc1 10044 if (unlikely(requestorport == MACH_PORT_NULL)) {
5b0a4722
A
10045 return BOOTSTRAP_NOT_PRIVILEGED;
10046 }
10047 return BOOTSTRAP_NO_MEMORY;
10048 }
10049
10050 *subsetportp = jmr->jm_port;
ddbbfbc1 10051 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
eabd1701 10052
587e987e
A
10053 /* A job could create multiple subsets, so only add a reference the first time
10054 * it does so we don't have to keep a count.
10055 */
dcace88f 10056 if (j->anonymous && !j->holds_ref) {
eabd1701 10057 job_log(j, LOG_PERF, "Job created subset: %s", jmr->name);
587e987e
A
10058 j->holds_ref = true;
10059 runtime_add_ref();
10060 }
eabd1701 10061
ddbbfbc1 10062 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
5b0a4722
A
10063 return BOOTSTRAP_SUCCESS;
10064}
10065
dcace88f 10066job_t
eabd1701 10067_xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
dcace88f
A
10068{
10069 jobmgr_t where2put = NULL;
10070
eabd1701
A
10071 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
10072 errno = EINVAL;
10073 return NULL;
10074 }
10075
10076 launch_data_t ldlabel = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL);
10077 if (!ldlabel || launch_data_get_type(ldlabel) != LAUNCH_DATA_STRING) {
10078 errno = EINVAL;
10079 return NULL;
10080 }
10081
10082 const char *label = launch_data_get_string(ldlabel);
10083 jobmgr_log(jm, LOG_DEBUG, "Importing service: %s", label);
10084
dcace88f
A
10085 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
10086 if (destname) {
eabd1701
A
10087 bool supported_domain = false;
10088
dcace88f
A
10089 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
10090 const char *str = launch_data_get_string(destname);
10091 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
10092 where2put = _s_xpc_system_domain;
10093 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
10094 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
eabd1701 10095 supported_domain = true;
dcace88f
A
10096 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
10097 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
10098 } else {
10099 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
10100 errno = EINVAL;
10101 }
10102 } else {
10103 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
10104 errno = EINVAL;
10105 }
f36da725 10106
eabd1701 10107 if (where2put && !supported_domain) {
dcace88f
A
10108 launch_data_t mi = NULL;
10109 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
10110 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
10111 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
10112 where2put = NULL;
10113 errno = EINVAL;
10114 }
10115 }
10116 }
10117 } else {
10118 where2put = jm;
10119 }
10120
10121 job_t j = NULL;
10122 if (where2put) {
eabd1701
A
10123 /* Gross. If the service already exists in a singleton domain, then
10124 * jobmgr_import2() will return the existing job. But if we fail to alias
10125 * this job, we will normally want to remove it. But if we did not create
10126 * it in the first place, then we need to avoid removing it. So check
10127 * errno against EEXIST in the success case and if it's EEXIST, then do
10128 * not remove the original job in the event of a failed alias.
10129 *
10130 * This really needs to be re-thought, but I think it'll require a larger
10131 * evaluation of launchd's data structures. Right now, once a job is
10132 * imported into a singleton domain, it won't be removed until the system
10133 * shuts down, but that may not always be true. If it ever changes, we'll
10134 * have a problem because we'll have to account for all existing aliases
10135 * and clean them up somehow. Or just start ref-counting. I knew this
10136 * aliasing stuff would be trouble...
10137 *
10138 * <rdar://problem/10646503>
10139 */
dcace88f 10140 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
eabd1701
A
10141
10142 errno = 0;
10143 if ((j = jobmgr_import2(where2put, pload))) {
10144 bool created = (errno != EEXIST);
dcace88f 10145 j->xpc_service = true;
eabd1701 10146
dcace88f
A
10147 if (where2put->xpc_singleton) {
10148 /* If the service was destined for one of the global domains,
10149 * then we have to alias it into our local domain to reserve the
10150 * name.
10151 */
eabd1701
A
10152 job_t ja = NULL;
10153 if (!(ja = job_new_alias(jm, j))) {
dcace88f
A
10154 /* If we failed to alias the job because of a conflict over
10155 * the label, then we remove it from the global domain. We
10156 * don't want to risk having imported a malicious job into
10157 * one of the global domains.
10158 */
10159 if (errno != EEXIST) {
eabd1701 10160 job_log(j, LOG_ERR, "Failed to alias job into: %s: %d: %s", where2put->name, errno, strerror(errno));
dcace88f 10161 } else {
eabd1701 10162 errno = 0;
dcace88f
A
10163 }
10164
eabd1701
A
10165 if (created) {
10166 jobmgr_log(jm, LOG_WARNING, "Singleton service already existed in job-local namespace. Removing: %s", j->label);
10167 job_remove(j);
10168 }
10169
10170 j = NULL;
dcace88f 10171 } else {
eabd1701
A
10172 jobmgr_log(jm, LOG_DEBUG, "Aliased service into local domain: %s", j->label);
10173 (void)job_dispatch(j, false);
dcace88f
A
10174 ja->xpc_service = true;
10175 j = ja;
10176 }
eabd1701
A
10177 } else {
10178 (void)job_dispatch(j, false);
dcace88f
A
10179 }
10180 }
eabd1701
A
10181 } else {
10182 jobmgr_log(jm, LOG_DEBUG, "Could not find destination for service: %s", label);
dcace88f
A
10183 }
10184
10185 return j;
f36da725
A
10186}
10187
eabd1701
A
10188int
10189_xpc_domain_import_services(job_t j, launch_data_t services)
10190{
10191 int error = EINVAL;
10192 if (launch_data_get_type(services) != LAUNCH_DATA_ARRAY) {
10193 return error;
10194 }
10195
10196 size_t i = 0;
10197 size_t c = launch_data_array_get_count(services);
10198 jobmgr_log(j->mgr, LOG_DEBUG, "Importing new services: %lu", c);
10199
10200 for (i = 0; i < c; i++) {
10201 jobmgr_log(j->mgr, LOG_DEBUG, "Importing service at index: %lu", i);
10202
10203 job_t nj = NULL;
10204 launch_data_t ploadi = launch_data_array_get_index(services, i);
10205 if (!(nj = _xpc_domain_import_service(j->mgr, ploadi))) {
10206 if (!j->mgr->session_initialized && errno) {
10207 /* Service import failures are only fatal if the domain is being
10208 * initialized. If we're extending the domain, we can run into
10209 * errors with services already existing, so we just ignore them.
10210 * In the case of a domain extension, we don't want to halt the
10211 * operation if we run into an error with one service.
10212 *
10213 * <rdar://problem/10842779>
10214 */
10215 jobmgr_log(j->mgr, LOG_ERR, "Failed to import service at index: %lu: %d: %s", i, errno, strerror(errno));
10216 error = errno;
10217 break;
10218 }
10219 } else {
10220 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service: %s", nj->label);
10221 }
10222 }
10223
10224 if (i == c) {
10225 error = 0;
10226 }
10227
10228 return error;
10229}
10230
f36da725 10231kern_return_t
dcace88f 10232xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
f36da725 10233{
dcace88f
A
10234 if (unlikely(!pid1_magic)) {
10235 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
10236 return BOOTSTRAP_NOT_PRIVILEGED;
10237 }
5c88273d 10238 if (!j || !MACH_PORT_VALID(reqport)) {
dcace88f
A
10239 return BOOTSTRAP_UNKNOWN_SERVICE;
10240 }
5c88273d
A
10241 if (root_jobmgr->shutting_down) {
10242 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
10243 return BOOTSTRAP_NOT_PRIVILEGED;
10244 }
a6e7a709
A
10245 if (!j->xpc_bootstrapper) {
10246 job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
10247 return BOOTSTRAP_NOT_PRIVILEGED;
10248 }
f36da725 10249
dcace88f
A
10250 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
10251 /* All XPC domains are children of the root job manager. What we're creating
10252 * here is really just a skeleton. By creating it, we're adding reqp to our
10253 * port set. It will have two messages on it. The first specifies the
10254 * environment of the originator. This is so we can cache it and hand it to
10255 * xpcproxy to bootstrap our services. The second is the set of jobs that is
10256 * to be bootstrapped in.
10257 */
10258 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
10259 if (job_assumes(j, jm != NULL)) {
10260 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
10261 jm->shortdesc = "private";
10262 kr = BOOTSTRAP_SUCCESS;
f36da725
A
10263 }
10264
dcace88f
A
10265 return kr;
10266}
10267
10268kern_return_t
10269xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
10270{
10271 if (!j) {
10272 /* Due to the whacky nature of XPC service bootstrapping, we can end up
10273 * getting this message long after the requesting process has gone away.
10274 * See <rdar://problem/8593143>.
10275 */
f36da725
A
10276 return BOOTSTRAP_UNKNOWN_SERVICE;
10277 }
10278
dcace88f
A
10279 jobmgr_t jm = j->mgr;
10280 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10281 return BOOTSTRAP_NOT_PRIVILEGED;
10282 }
eabd1701 10283
dcace88f
A
10284 if (jm->req_asport != MACH_PORT_NULL) {
10285 return BOOTSTRAP_NOT_PRIVILEGED;
10286 }
eabd1701 10287
dcace88f 10288 struct ldcred *ldc = runtime_get_caller_creds();
95379394
A
10289 struct proc_bsdinfowithuniqid proc;
10290 if (proc_pidinfo(ldc->pid, PROC_PIDT_BSDINFOWITHUNIQID, 1, &proc, PROC_PIDT_BSDINFOWITHUNIQID_SIZE) == 0) {
dcace88f 10291 if (errno != ESRCH) {
eabd1701 10292 (void)jobmgr_assumes_zero(jm, errno);
dcace88f 10293 }
ddbbfbc1 10294
dcace88f
A
10295 jm->error = errno;
10296 jobmgr_remove(jm);
10297 return BOOTSTRAP_NO_MEMORY;
10298 }
10299
eabd1701
A
10300#if !TARGET_OS_EMBEDDED
10301 if (jobmgr_assumes_zero(jm, audit_session_port(ldc->asid, &jm->req_asport)) != 0) {
dcace88f
A
10302 jm->error = EPERM;
10303 jobmgr_remove(jm);
10304 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
f36da725
A
10305 return BOOTSTRAP_NOT_PRIVILEGED;
10306 }
eabd1701
A
10307#else
10308 jm->req_asport = MACH_PORT_DEAD;
10309#endif
f36da725 10310
95379394
A
10311 struct waiting4attach *w4ai = NULL;
10312 struct waiting4attach *w4ait = NULL;
10313 LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
10314 if (w4ai->dest == ldc->pid) {
10315 jobmgr_log(jm, LOG_DEBUG, "Migrating attach for: %s", w4ai->name);
10316 LIST_REMOVE(w4ai, le);
10317 LIST_INSERT_HEAD(&jm->attaches, w4ai, le);
10318 w4ai->dest = 0;
10319 }
10320 }
10321
10322 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s.%d", proc.pbsd.pbi_comm, ldc->pid);
10323 strlcpy(jm->owner, proc.pbsd.pbi_comm, sizeof(jm->owner));
dcace88f
A
10324 jm->req_bsport = bsport;
10325 jm->req_excport = excport;
10326 jm->req_rport = rp;
10327 jm->req_ctx = ctx;
10328 jm->req_ctx_sz = ctx_sz;
10329 jm->req_pid = ldc->pid;
10330 jm->req_euid = ldc->euid;
10331 jm->req_egid = ldc->egid;
10332 jm->req_asid = ldc->asid;
95379394 10333 jm->req_uniqueid = proc.p_uniqidentifier.p_uniqueid;
eabd1701 10334
dcace88f
A
10335 return KERN_SUCCESS;
10336}
10337
10338kern_return_t
10339xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10340{
10341 if (!j) {
10342 return BOOTSTRAP_UNKNOWN_SERVICE;
ddbbfbc1
A
10343 }
10344
eabd1701
A
10345 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10346 if (!(rootj && rootj->xpc_bootstrapper)) {
a6e7a709
A
10347 job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
10348 return BOOTSTRAP_NOT_PRIVILEGED;
10349 }
10350
eabd1701 10351 // This is just for XPC domains (for now).
dcace88f
A
10352 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10353 return BOOTSTRAP_NOT_PRIVILEGED;
10354 }
10355 if (j->mgr->session_initialized) {
10356 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
10357 return BOOTSTRAP_NOT_PRIVILEGED;
10358 }
f36da725 10359
dcace88f
A
10360 size_t offset = 0;
10361 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
eabd1701 10362 if (!services) {
f36da725
A
10363 return BOOTSTRAP_NO_MEMORY;
10364 }
10365
eabd1701
A
10366 int error = _xpc_domain_import_services(j, services);
10367 if (error) {
10368 j->mgr->error = error;
10369 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
10370 jobmgr_remove(j->mgr);
10371 } else {
dcace88f 10372 j->mgr->session_initialized = true;
eabd1701 10373 (void)jobmgr_assumes_zero(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS));
dcace88f
A
10374 j->mgr->req_rport = MACH_PORT_NULL;
10375
10376 /* Returning a failure code will destroy the message, whereas returning
10377 * success will not, so we need to clean up here.
10378 */
10379 mig_deallocate(services_buff, services_sz);
eabd1701 10380 error = BOOTSTRAP_SUCCESS;
ddbbfbc1 10381 }
f36da725 10382
eabd1701 10383 return error;
f36da725
A
10384}
10385
5b0a4722 10386kern_return_t
eabd1701
A
10387xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport,
10388 mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid,
10389 int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
5b0a4722 10390{
dcace88f
A
10391 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
10392 return BOOTSTRAP_UNKNOWN_SERVICE;
5b0a4722 10393 }
dcace88f
A
10394 jobmgr_t jm = j->mgr;
10395 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10396 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1 10397 }
eabd1701 10398
dcace88f
A
10399 if (jm->req_asport == MACH_PORT_NULL) {
10400 return BOOTSTRAP_NOT_PRIVILEGED;
10401 }
eabd1701 10402
dcace88f
A
10403 *bsport = jm->req_bsport;
10404 *sbsport = root_jobmgr->jm_port;
10405 *excport = jm->req_excport;
95379394
A
10406 if (j->joins_gui_session) {
10407 if (jm->req_gui_asport) {
10408 *asport = jm->req_gui_asport;
10409 } else {
10410 job_log(j, LOG_NOTICE, "No GUI session set for UID of user service. This service may not act properly.");
10411 *asport = jm->req_asport;
10412 }
10413 } else {
10414 *asport = jm->req_asport;
10415 }
10416
dcace88f
A
10417 *uid = jm->req_euid;
10418 *gid = jm->req_egid;
10419 *asid = jm->req_asid;
eabd1701 10420
dcace88f
A
10421 *ctx = jm->req_ctx;
10422 *ctx_sz = jm->req_ctx_sz;
eabd1701 10423
dcace88f 10424 return KERN_SUCCESS;
5b0a4722
A
10425}
10426
10427kern_return_t
dcace88f 10428xpc_domain_get_service_name(job_t j, event_name_t name)
5b0a4722 10429{
dcace88f 10430 if (!j) {
5b0a4722
A
10431 return BOOTSTRAP_NO_MEMORY;
10432 }
95379394 10433
dcace88f
A
10434 if (!j->xpc_service) {
10435 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
10436 return BOOTSTRAP_NOT_PRIVILEGED;
5b0a4722 10437 }
dcace88f 10438
95379394
A
10439 const char *what2find = j->label;
10440 if (j->dedicated_instance) {
10441 what2find = j->original->label;
10442 }
10443
10444 struct machservice *msi = NULL;
10445 SLIST_FOREACH(msi, &j->machservices, sle) {
10446 if (strcmp(msi->name, what2find) == 0) {
10447 break;
10448 }
10449 }
10450
10451 if (!msi) {
10452 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name that does not exist: %s", j->label);
dcace88f 10453 return BOOTSTRAP_UNKNOWN_SERVICE;
5b0a4722 10454 }
dcace88f 10455
95379394 10456 (void)strlcpy(name, msi->name, sizeof(event_name_t));
dcace88f
A
10457 return BOOTSTRAP_SUCCESS;
10458}
dcace88f 10459
eabd1701 10460#if XPC_LPI_VERSION >= 20111216
dcace88f 10461kern_return_t
eabd1701 10462xpc_domain_add_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
dcace88f 10463{
eabd1701
A
10464 if (!j) {
10465 return BOOTSTRAP_UNKNOWN_SERVICE;
10466 }
10467
10468 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10469 if (!(rootj && rootj->xpc_bootstrapper)) {
10470 job_log(j, LOG_ERR, "Attempt to add service to XPC domain by unprivileged job.");
10471 return BOOTSTRAP_NOT_PRIVILEGED;
10472 }
10473
10474 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10475 return BOOTSTRAP_NOT_PRIVILEGED;
10476 }
10477
10478 size_t offset = 0;
10479 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10480 if (!services) {
10481 return BOOTSTRAP_NO_MEMORY;
10482 }
10483
10484 int error = _xpc_domain_import_services(j, services);
10485 if (!error) {
10486 mig_deallocate(services_buff, services_sz);
10487 }
10488
10489 return error;
dcace88f 10490}
eabd1701 10491#endif
dcace88f 10492
eabd1701
A
10493#pragma mark XPC Events
10494int
10495xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms)
10496{
10497 int error = EXNOMEM;
10498 struct machservice *msi = NULL;
10499 SLIST_FOREACH(msi, &j->machservices, sle) {
10500 if (strcmp(stream, msi->name) == 0) {
10501 break;
10502 }
10503 }
10504
10505 if (!msi) {
10506 mach_port_t sp = MACH_PORT_NULL;
10507 msi = machservice_new(j, stream, &sp, false);
10508 if (!msi) {
10509 return EXNOMEM;
10510 }
10511
10512 job_log(j, LOG_DEBUG, "Creating new MachService for stream: %s", stream);
10513 /* Hack to keep this from being publicly accessible through
10514 * bootstrap_look_up().
10515 */
10516 if (!j->dedicated_instance) {
10517 LIST_REMOVE(msi, name_hash_sle);
10518 }
10519 msi->event_channel = true;
10520
10521 /* If we call job_dispatch() here before the audit session for the job
10522 * has been set, we'll end up not watching this service. But we also have
10523 * to take care not to watch the port if the job is active.
10524 *
10525 * See <rdar://problem/10357855>.
10526 */
10527 if (!j->currently_ignored) {
10528 machservice_watch(j, msi);
10529 }
10530
10531 error = 0;
10532 *ms = msi;
10533 } else if (!msi->event_channel) {
10534 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10535 error = EEXIST;
10536 } else {
10537 error = 0;
10538 *ms = msi;
10539 }
10540
10541 return error;
10542}
10543
10544int
10545xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply)
dcace88f 10546{
eabd1701
A
10547 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10548 if (!stream) {
10549 return EXINVAL;
10550 }
10551
10552 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10553 if (!token) {
10554 return EXINVAL;
10555 }
10556
10557 job_log(j, LOG_DEBUG, "Getting event name for stream/token: %s/0x%llu", stream, token);
10558
10559 int result = ESRCH;
dcace88f
A
10560 struct externalevent *event = externalevent_find(stream, token);
10561 if (event && j->event_monitor) {
eabd1701
A
10562 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10563 xpc_dictionary_set_string(reply2, XPC_EVENT_ROUTINE_KEY_NAME, event->name);
10564 *reply = reply2;
10565
10566 job_log(j, LOG_DEBUG, "Found: %s", event->name);
10567 result = 0;
cf0bacfd 10568 }
dcace88f 10569
eabd1701 10570 return result;
dcace88f 10571}
eabd1701 10572
95379394
A
10573int
10574xpc_event_copy_entitlements(job_t j, xpc_object_t request, xpc_object_t *reply)
10575{
10576 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10577 if (!stream) {
10578 return EXINVAL;
10579 }
10580
10581 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10582 if (!token) {
10583 return EXINVAL;
10584 }
10585
10586 job_log(j, LOG_DEBUG, "Getting entitlements for stream/token: %s/0x%llu", stream, token);
10587
10588 int result = ESRCH;
10589 struct externalevent *event = externalevent_find(stream, token);
10590 if (event && j->event_monitor) {
10591 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10592 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_ENTITLEMENTS, event->entitlements);
10593 *reply = reply2;
10594
10595 job_log(j, LOG_DEBUG, "Found: %s", event->name);
10596 result = 0;
10597 }
10598
10599 return result;
10600}
10601
10602// TODO - can be removed with rdar://problem/12666150
10603#ifndef XPC_EVENT_FLAG_ALLOW_UNMANAGED
10604#define XPC_EVENT_FLAG_ALLOW_UNMANAGED (1 << 1)
10605#endif
10606
eabd1701
A
10607int
10608xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply)
dcace88f 10609{
eabd1701
A
10610 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10611 if (!stream) {
10612 return EXINVAL;
10613 }
10614
10615 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10616 if (!key) {
10617 return EXINVAL;
ededfeb7 10618 }
dcace88f 10619
eabd1701
A
10620 xpc_object_t event = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_EVENT);
10621 if (event && xpc_get_type(event) != XPC_TYPE_DICTIONARY) {
10622 return EXINVAL;
10623 }
10624
95379394
A
10625 uint64_t flags = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_FLAGS);
10626
10627 /* Don't allow events to be set for anonymous jobs unless specifically
10628 * requested in the flags. Only permit this for internal development.
10629 */
10630 if (j->anonymous && ((flags & XPC_EVENT_FLAG_ALLOW_UNMANAGED) == 0 || !launchd_apple_internal)) {
10631 job_log(j, LOG_ERR, "Unmanaged jobs may not make XPC Events requests.");
10632 return EPERM;
10633 }
10634
eabd1701
A
10635 job_log(j, LOG_DEBUG, "%s event for stream/key: %s/%s", event ? "Setting" : "Removing", stream, key);
10636
dcace88f
A
10637 struct externalevent *eei = NULL;
10638 LIST_FOREACH(eei, &j->events, job_le) {
eabd1701
A
10639 /* If the event for the given key already exists for the job, we need to
10640 * remove the old one first.
10641 */
dcace88f 10642 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
eabd1701 10643 job_log(j, LOG_DEBUG, "Event exists. Removing.");
dcace88f 10644 externalevent_delete(eei);
dcace88f
A
10645 break;
10646 }
10647 }
10648
eabd1701
A
10649 int result = EXNOMEM;
10650 if (event) {
10651 struct eventsystem *es = eventsystem_find(stream);
10652 if (!es) {
10653 job_log(j, LOG_DEBUG, "Creating stream.");
10654 es = eventsystem_new(stream);
10655 }
dcace88f 10656
eabd1701
A
10657 if (es) {
10658 job_log(j, LOG_DEBUG, "Adding event.");
95379394 10659 if (externalevent_new(j, es, key, event, flags)) {
eabd1701
A
10660 job_log(j, LOG_DEBUG, "Added new event for key: %s", key);
10661 result = 0;
10662 } else {
10663 job_log(j, LOG_ERR, "Could not create event for key: %s", key);
10664 }
10665 } else {
10666 job_log(j, LOG_ERR, "Event stream could not be created: %s", stream);
dcace88f 10667 }
eabd1701
A
10668 } else {
10669 /* If the event was NULL, then we just remove it and return. */
10670 result = 0;
dcace88f
A
10671 }
10672
eabd1701
A
10673 if (result == 0) {
10674 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10675 *reply = reply2;
dcace88f
A
10676 }
10677
eabd1701 10678 return result;
ddbbfbc1 10679}
cf0bacfd 10680
eabd1701
A
10681int
10682xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply)
dcace88f 10683{
eabd1701
A
10684 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10685 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10686
10687 bool all_streams = (stream == NULL);
10688 bool all_events = (key == NULL || strcmp(key, "") == 0); // strcmp for libxpc compatibility
10689 xpc_object_t events = NULL;
10690
10691 if (all_streams && !all_events) {
10692 return EXINVAL;
10693 }
10694
10695 if (all_streams || all_events) {
10696 job_log(j, LOG_DEBUG, "Fetching all events%s%s", stream ? " for stream: " : "", stream ? stream : "");
10697 events = xpc_dictionary_create(NULL, NULL, 0);
10698 } else {
10699 job_log(j, LOG_DEBUG, "Fetching stream/key: %s/%s", stream, key);
10700 }
10701
10702 int result = ESRCH;
dcace88f
A
10703 struct externalevent *eei = NULL;
10704 LIST_FOREACH(eei, &j->events, job_le) {
eabd1701
A
10705 if (all_streams) {
10706 xpc_object_t sub = xpc_dictionary_get_value(events, eei->sys->name);
10707 if (sub == NULL) {
10708 sub = xpc_dictionary_create(NULL, NULL, 0);
10709 xpc_dictionary_set_value(events, eei->sys->name, sub);
10710 xpc_release(sub);
10711 }
10712 xpc_dictionary_set_value(sub, eei->name, eei->event);
10713 } else if (strcmp(eei->sys->name, stream) == 0) {
10714 if (all_events) {
10715 xpc_dictionary_set_value(events, eei->name, eei->event);
10716 } else if (strcmp(eei->name, key) == 0) {
10717 job_log(j, LOG_DEBUG, "Found event.");
10718 events = xpc_retain(eei->event);
10719 break;
dcace88f 10720 }
dcace88f
A
10721 }
10722 }
10723
eabd1701
A
10724 if (events) {
10725 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10726 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENT, events);
10727 xpc_release(events);
10728
10729 *reply = reply2;
10730 result = 0;
10731 }
10732
10733 return result;
dcace88f
A
10734}
10735
eabd1701
A
10736int
10737xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
ddbbfbc1 10738{
eabd1701
A
10739 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10740 if (!stream) {
10741 return EXINVAL;
dcace88f
A
10742 }
10743
eabd1701 10744 job_log(j, LOG_DEBUG, "Checking in stream: %s", stream);
dcace88f 10745
eabd1701
A
10746 struct machservice *ms = NULL;
10747 int error = xpc_event_find_channel(j, stream, &ms);
10748 if (error) {
10749 job_log(j, LOG_ERR, "Failed to check in: 0x%x: %s", error, xpc_strerror(error));
10750 } else if (ms->isActive) {
10751 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10752 error = EBUSY;
dcace88f 10753 } else {
eabd1701
A
10754 machservice_request_notifications(ms);
10755
10756 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10757 xpc_dictionary_set_mach_recv(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10758 *reply = reply2;
10759 error = 0;
dcace88f
A
10760 }
10761
eabd1701 10762 return error;
dcace88f
A
10763}
10764
eabd1701
A
10765int
10766xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply)
10767{
10768 if (!j->event_monitor) {
10769 return EPERM;
10770 }
10771
10772 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10773 if (!stream) {
10774 return EXINVAL;
10775 }
10776
10777 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10778 if (!token) {
10779 return EXINVAL;
10780 }
10781
10782 job_log(j, LOG_DEBUG, "Looking up channel for stream/token: %s/%llu", stream, token);
10783
10784 struct externalevent *ee = externalevent_find(stream, token);
10785 if (!ee) {
10786 return ESRCH;
10787 }
10788
10789 struct machservice *ms = NULL;
10790 int error = xpc_event_find_channel(ee->job, stream, &ms);
10791 if (!error) {
10792 job_log(j, LOG_DEBUG, "Found event channel port: 0x%x", ms->port);
10793 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10794 xpc_dictionary_set_mach_send(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10795 *reply = reply2;
10796 error = 0;
10797 } else {
10798 job_log(j, LOG_ERR, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream, token, error, xpc_strerror(error));
10799 }
10800
10801 return error;
10802}
10803
10804int
10805xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10806{
10807 if (!j->event_monitor) {
10808 return EPERM;
10809 }
10810
95379394
A
10811 /* This indicates that the event monitor is now safe to signal. This state
10812 * is independent of whether this operation actually succeeds; we just need
10813 * it to ignore SIGUSR1.
eabd1701
A
10814 */
10815 j->event_monitor_ready2signal = true;
10816
10817 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10818 if (!stream) {
10819 return EXINVAL;
10820 }
10821
10822 job_log(j, LOG_DEBUG, "Provider checking in for stream: %s", stream);
10823
10824 xpc_object_t events = xpc_array_create(NULL, 0);
10825 struct eventsystem *es = eventsystem_find(stream);
10826 if (!es) {
10827 /* If we had to create the event stream, there were no events, so just
10828 * give back the empty array.
10829 */
10830 job_log(j, LOG_DEBUG, "Creating event stream.");
10831 es = eventsystem_new(stream);
10832 if (!job_assumes(j, es)) {
10833 xpc_release(events);
10834 return EXNOMEM;
10835 }
10836
10837 if (strcmp(stream, "com.apple.launchd.helper") == 0) {
10838 _launchd_support_system = es;
10839 }
10840 } else {
10841 job_log(j, LOG_DEBUG, "Filling event array.");
10842
10843 struct externalevent *ei = NULL;
10844 LIST_FOREACH(ei, &es->events, sys_le) {
10845 xpc_array_set_uint64(events, XPC_ARRAY_APPEND, ei->id);
10846 xpc_array_append_value(events, ei->event);
dcace88f
A
10847 }
10848 }
10849
eabd1701
A
10850 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10851 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENTS, events);
10852 xpc_release(events);
10853 *reply = reply2;
10854
10855 return 0;
dcace88f
A
10856}
10857
eabd1701
A
10858int
10859xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply)
dcace88f 10860{
eabd1701
A
10861 job_t other_j = NULL;
10862
dcace88f 10863 if (!j->event_monitor) {
eabd1701 10864 return EPERM;
dcace88f
A
10865 }
10866
eabd1701
A
10867 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10868 if (!stream) {
10869 return EXINVAL;
10870 }
10871
10872 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10873 if (!token) {
10874 return EXINVAL;
10875 }
10876
10877 bool state = false;
10878 xpc_object_t xstate = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_STATE);
10879 if (!xstate || xpc_get_type(xstate) != XPC_TYPE_BOOL) {
10880 return EXINVAL;
10881 } else {
10882 state = xpc_bool_get_value(xstate);
10883 }
10884
10885 job_log(j, LOG_DEBUG, "Setting event state to %s for stream/token: %s/%llu", state ? "true" : "false", stream, token);
10886
10887 struct externalevent *ei = externalevent_find(stream, token);
10888 if (!ei) {
10889 job_log(j, LOG_ERR, "Could not find stream/token: %s/%llu", stream, token);
10890 return ESRCH;
dcace88f
A
10891 }
10892
eabd1701
A
10893 other_j = ei->job;
10894 ei->state = state;
10895
10896 if (ei->internal) {
10897 job_log(ei->job, LOG_NOTICE, "Job should be able to exec(3) now.");
10898 ei->job->waiting4ok = false;
10899 externalevent_delete(ei);
dcace88f
A
10900 }
10901
eabd1701
A
10902 (void)job_dispatch(other_j, false);
10903
10904 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10905 *reply = reply2;
10906
10907 return 0;
10908}
10909
10910bool
10911xpc_event_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
10912{
10913 uint64_t op = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_OP);
10914 if (!op) {
10915 return false;
10916 }
10917
10918 audit_token_t token;
10919 xpc_dictionary_get_audit_token(request, &token);
10920 runtime_record_caller_creds(&token);
10921
95379394
A
10922 struct ldcred *ldc = runtime_get_caller_creds();
10923 job_t j = managed_job(ldc->pid);
10924 if (!j) {
10925 j = job_mig_intran(p);
10926 if (!j) {
10927 op = -1;
10928 }
eabd1701
A
10929 }
10930
10931 job_log(j, LOG_DEBUG, "Incoming XPC event request: %llu", op);
10932
10933 int error = -1;
10934 switch (op) {
10935 case XPC_EVENT_GET_NAME:
10936 error = xpc_event_get_event_name(j, request, reply);
10937 break;
10938 case XPC_EVENT_SET:
10939 error = xpc_event_set_event(j, request, reply);
10940 break;
10941 case XPC_EVENT_COPY:
10942 error = xpc_event_copy_event(j, request, reply);
10943 break;
10944 case XPC_EVENT_CHECK_IN:
10945 error = xpc_event_channel_check_in(j, request, reply);
10946 break;
10947 case XPC_EVENT_LOOK_UP:
10948 error = xpc_event_channel_look_up(j, request, reply);
10949 break;
10950 case XPC_EVENT_PROVIDER_CHECK_IN:
10951 error = xpc_event_provider_check_in(j, request, reply);
10952 break;
10953 case XPC_EVENT_PROVIDER_SET_STATE:
10954 error = xpc_event_provider_set_state(j, request, reply);
10955 break;
95379394
A
10956 case XPC_EVENT_COPY_ENTITLEMENTS:
10957 error = xpc_event_copy_entitlements(j, request, reply);
10958 break;
eabd1701 10959 case -1:
95379394 10960 error = EINVAL;
eabd1701
A
10961 break;
10962 default:
10963 job_log(j, LOG_ERR, "Bogus opcode.");
10964 error = EDOM;
10965 }
10966
10967 if (error) {
10968 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10969 xpc_dictionary_set_uint64(reply2, XPC_EVENT_ROUTINE_KEY_ERROR, error);
10970 *reply = reply2;
10971 }
10972
10973 return true;
dcace88f
A
10974}
10975
95379394
A
10976uint64_t
10977xpc_get_jetsam_entitlement(const char *key)
10978{
10979 uint64_t entitlement = 0;
10980
10981 audit_token_t *token = runtime_get_caller_token();
10982 xpc_object_t value = xpc_copy_entitlement_for_token(key, token);
10983 if (value) {
10984 if (xpc_get_type(value) == XPC_TYPE_UINT64) {
10985 entitlement = xpc_uint64_get_value(value);
10986 }
10987
10988 xpc_release(value);
10989 }
10990
10991 return entitlement;
10992}
10993
10994int
10995xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply)
10996{
10997 if (!j) {
10998 return EINVAL;
10999 }
11000
11001 const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11002 if (!label) {
11003 return EXINVAL;
11004 }
11005
11006 xpc_jetsam_band_t entitled_band = -1;
11007 xpc_jetsam_band_t requested_band = (xpc_jetsam_band_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_PRIORITY_BAND);
11008 if (!requested_band) {
11009 return EXINVAL;
11010 }
11011
11012 if (!(requested_band >= XPC_JETSAM_BAND_SUSPENDED && requested_band < XPC_JETSAM_BAND_LAST)) {
11013 return EXINVAL;
11014 }
11015
11016 uint64_t rcdata = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_RCDATA);
11017
11018 job_t tj = job_find(root_jobmgr, label);
11019 if (!tj) {
11020 return EXSRCH;
11021 }
11022
11023 boolean_t allow = false;
11024 if (j->embedded_god) {
11025 allow = true;
11026 } else {
11027 entitled_band = xpc_get_jetsam_entitlement("com.apple.private.jetsam.modify-priority");
11028 if (entitled_band >= requested_band) {
11029 allow = true;
11030 }
11031 }
11032
11033 if (!allow) {
11034 if (launchd_no_jetsam_perm_check) {
11035 job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set priority: %d", requested_band);
11036 } else {
11037 job_log(j, LOG_ERR, "Job cannot decrease Jetsam priority band (requested/maximum): %d/%d", requested_band, entitled_band);
11038 return EPERM;
11039 }
11040 }
11041
11042 job_log(j, LOG_INFO, "Setting Jetsam band: %d.", requested_band);
11043 job_update_jetsam_properties(tj, requested_band, rcdata);
11044
11045 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11046 *reply = reply2;
11047
11048 return 0;
11049}
11050
11051int
11052xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply)
11053{
11054 if (!j) {
11055 return EINVAL;
11056 }
11057
11058 const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11059 if (!label) {
11060 return EXINVAL;
11061 }
11062
11063 int32_t entitlement_limit = 0;
11064 int32_t requested_limit = (int32_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_MEMORY_LIMIT);
11065
11066 job_t tj = job_find(root_jobmgr, label);
11067 if (!tj) {
11068 return EXSRCH;
11069 }
11070
11071 boolean_t allow = false;
11072 if (j->embedded_god) {
11073 allow = true;
11074 } else {
11075 entitlement_limit = (int32_t)xpc_get_jetsam_entitlement("com.apple.private.jetsam.memory_limit");
11076 if (entitlement_limit >= requested_limit) {
11077 allow = true;
11078 }
11079 }
11080
11081 if (!allow) {
11082 if (launchd_no_jetsam_perm_check) {
11083 job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set memory limit: %d", requested_limit);
11084 } else {
11085 job_log(j, LOG_ERR, "Job cannot set Jetsam memory limit (requested/maximum): %d/%d", requested_limit, entitlement_limit);
11086 return EPERM;
11087 }
11088 }
11089
11090 job_log(j, LOG_INFO, "Setting Jetsam memory limit: %d.", requested_limit);
11091 job_update_jetsam_memory_limit(tj, requested_limit);
11092
11093 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11094 *reply = reply2;
11095
11096 return 0;
11097}
11098
11099static jobmgr_t
11100_xpc_process_find_target_manager(job_t j, xpc_service_type_t type, pid_t pid)
11101{
11102 jobmgr_t target = NULL;
11103 if (type == XPC_SERVICE_TYPE_BUNDLED) {
11104 job_log(j, LOG_DEBUG, "Bundled service. Searching for XPC domains for PID: %d", pid);
11105
11106 jobmgr_t jmi = NULL;
11107 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11108 if (jmi->req_pid && jmi->req_pid == pid) {
11109 jobmgr_log(jmi, LOG_DEBUG, "Found job manager for PID.");
11110 target = jmi;
11111 break;
11112 }
11113 }
11114 } else if (type == XPC_SERVICE_TYPE_LAUNCHD || type == XPC_SERVICE_TYPE_APP) {
11115 target = j->mgr;
11116 }
11117
11118 return target;
11119}
11120
11121static int
11122xpc_process_attach(job_t j, xpc_object_t request, xpc_object_t *reply)
11123{
11124 if (!j) {
11125 return EINVAL;
11126 }
11127
11128 audit_token_t *token = runtime_get_caller_token();
11129 xpc_object_t entitlement = xpc_copy_entitlement_for_token(XPC_SERVICE_ENTITLEMENT_ATTACH, token);
11130 if (!entitlement) {
11131 job_log(j, LOG_ERR, "Job does not have entitlement: %s", XPC_SERVICE_ENTITLEMENT_ATTACH);
11132 return EPERM;
11133 }
11134
11135 if (entitlement != XPC_BOOL_TRUE) {
11136 char *desc = xpc_copy_description(entitlement);
11137 job_log(j, LOG_ERR, "Job has bad value for entitlement: %s:\n%s", XPC_SERVICE_ENTITLEMENT_ATTACH, desc);
11138 free(desc);
11139
11140 xpc_release(entitlement);
11141 return EPERM;
11142 }
11143
11144 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11145 if (!name) {
11146 return EXINVAL;
11147 }
11148
11149 xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11150 if (!type) {
11151 return EXINVAL;
11152 }
11153
11154 mach_port_t port = xpc_dictionary_copy_mach_send(request, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT);
11155 if (!MACH_PORT_VALID(port)) {
11156 return EXINVAL;
11157 }
11158
11159 pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_HANDLE);
11160
11161 job_log(j, LOG_DEBUG, "Attaching to service: %s", name);
11162
11163 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11164 jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11165 if (target) {
11166 jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11167 (void)jobmgr_assumes(target, waiting4attach_new(target, name, port, 0, type));
11168
11169 /* HACK: This is awful. For legacy reasons, launchd job labels are all
11170 * stored in a global namespace, which is stored in the root job
11171 * manager. But XPC domains have a per-domain namespace. So if we're
11172 * looking for a legacy launchd job, we have to redirect any attachment
11173 * attempts to the root job manager to find existing instances.
11174 *
11175 * But because we store attachments on a per-job manager basis, we have
11176 * to create the new attachment in the actual target job manager, hence
11177 * why we change the target only after we've created the attachment.
11178 */
11179 if (strcmp(target->name, VPROCMGR_SESSION_AQUA) == 0) {
11180 target = root_jobmgr;
11181 }
11182
11183 job_t existing = job_find(target, name);
11184 if (existing && existing->p) {
11185 job_log(existing, LOG_DEBUG, "Found existing instance of service.");
11186 xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_PID, existing->p);
11187 } else {
11188 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11189 }
11190 } else if (type == XPC_SERVICE_TYPE_BUNDLED) {
11191 (void)job_assumes(j, waiting4attach_new(target, name, port, pid, type));
11192 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11193 } else {
11194 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, EXSRCH);
11195 }
11196
11197 *reply = reply2;
11198 return 0;
11199}
11200
11201static int
11202xpc_process_detach(job_t j, xpc_object_t request, xpc_object_t *reply __unused)
11203{
11204 if (!j) {
11205 return EINVAL;
11206 }
11207
11208 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11209 if (!name) {
11210 return EXINVAL;
11211 }
11212
11213 xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11214 if (!type) {
11215 return EXINVAL;
11216 }
11217
11218 job_log(j, LOG_DEBUG, "Deatching from service: %s", name);
11219
11220 pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_PID);
11221 jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11222 if (target) {
11223 jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11224
11225 struct waiting4attach *w4ai = NULL;
11226 struct waiting4attach *w4ait = NULL;
11227 LIST_FOREACH_SAFE(w4ai, &target->attaches, le, w4ait) {
11228 if (strcmp(name, w4ai->name) == 0) {
11229 jobmgr_log(target, LOG_DEBUG, "Found attachment. Deleting.");
11230 waiting4attach_delete(target, w4ai);
11231 break;
11232 }
11233 }
11234 }
11235
11236 return 0;
11237}
11238
11239static int
11240xpc_process_get_properties(job_t j, xpc_object_t request, xpc_object_t *reply)
11241{
11242 if (j->anonymous) {
11243 /* Total hack. libxpc will send requests to the pipe created out of the
11244 * process' bootstrap port, so when job_mig_intran() tries to resolve
11245 * the process into a job, it'll wind up creating an anonymous job if
11246 * the requestor was an XPC service, whose job manager is an XPC domain.
11247 */
11248 pid_t pid = j->p;
11249 jobmgr_t jmi = NULL;
11250 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11251 if ((j = jobmgr_find_by_pid(jmi, pid, false))) {
11252 break;
11253 }
11254 }
11255 }
11256
11257 if (!j || j->anonymous) {
11258 return EXINVAL;
11259 }
11260
11261 struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
11262 if (!w4a) {
11263 return EXINVAL;
11264 }
11265
11266 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11267 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_TYPE, w4a->type);
11268 xpc_dictionary_set_mach_send(reply2, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT, w4a->port);
11269 if (j->prog) {
11270 xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->prog);
11271 } else {
11272 xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->argv[0]);
11273 }
11274
11275 if (j->argv) {
11276 xpc_object_t xargv = xpc_array_create(NULL, 0);
11277
11278 size_t i = 0;
11279 for (i = 0; i < j->argc; i++) {
11280 if (j->argv[i]) {
11281 xpc_array_set_string(xargv, XPC_ARRAY_APPEND, j->argv[i]);
11282 }
11283 }
11284
11285 xpc_dictionary_set_value(reply2, XPC_PROCESS_ROUTINE_KEY_ARGV, xargv);
11286 xpc_release(xargv);
11287 }
11288
11289 *reply = reply2;
11290 return 0;
11291}
11292
11293static int
11294xpc_process_service_kill(job_t j, xpc_object_t request, xpc_object_t *reply)
11295{
11296#if XPC_LPI_VERSION >= 20130426
11297 if (!j) {
11298 return ESRCH;
11299 }
11300
11301 jobmgr_t jm = _xpc_process_find_target_manager(j, XPC_SERVICE_TYPE_BUNDLED, j->p);
11302 if (!jm) {
11303 return ENOENT;
11304 }
11305
11306 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11307 if (!name) {
11308 return EINVAL;
11309 }
11310
11311 int64_t whichsig = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_SIGNAL);
11312 if (!whichsig) {
11313 return EINVAL;
11314 }
11315
11316 job_t j2kill = job_find(jm, name);
11317 if (!j2kill) {
11318 return ESRCH;
11319 }
11320
11321 if (j2kill->alias) {
11322 // Only allow for private instances to be killed.
11323 return EPERM;
11324 }
11325
11326 struct proc_bsdshortinfo proc;
11327 if (proc_pidinfo(j2kill->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
11328 if (errno != ESRCH) {
11329 (void)jobmgr_assumes_zero(root_jobmgr, errno);
11330 }
11331
11332 return errno;
11333 }
11334
11335 struct ldcred *ldc = runtime_get_caller_creds();
11336 if (proc.pbsi_uid != ldc->euid) {
11337 // Do not allow non-root to kill RoleAccount services running as a
11338 // different user.
11339 return EPERM;
11340 }
11341
11342 if (!j2kill->p) {
11343 return EALREADY;
11344 }
11345
11346 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11347 if (!reply2) {
11348 return EINVAL;
11349 }
11350
11351 int error = 0;
11352 int ret = kill(j2kill->p, whichsig);
11353 if (ret) {
11354 error = errno;
11355 }
11356
11357 xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11358 *reply = reply2;
11359 return 0;
11360#else
11361 return ENOTSUP;
11362#endif
11363}
11364
11365bool
11366xpc_process_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
11367{
11368 uint64_t op = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_OP);
11369 if (!op) {
11370 return false;
11371 }
11372
11373 audit_token_t token;
11374 xpc_dictionary_get_audit_token(request, &token);
11375 runtime_record_caller_creds(&token);
11376
11377 job_t j = job_mig_intran(p);
11378 job_log(j, LOG_DEBUG, "Incoming XPC process request: %llu", op);
11379
11380 int error = -1;
11381 switch (op) {
11382 case XPC_PROCESS_JETSAM_SET_BAND:
11383 error = xpc_process_set_jetsam_band(j, request, reply);
11384 break;
11385 case XPC_PROCESS_JETSAM_SET_MEMORY_LIMIT:
11386 error = xpc_process_set_jetsam_memory_limit(j, request, reply);
11387 break;
11388 case XPC_PROCESS_SERVICE_ATTACH:
11389 error = xpc_process_attach(j, request, reply);
11390 break;
11391 case XPC_PROCESS_SERVICE_DETACH:
11392 error = xpc_process_detach(j, request, reply);
11393 break;
11394 case XPC_PROCESS_SERVICE_GET_PROPERTIES:
11395 error = xpc_process_get_properties(j, request, reply);
11396 break;
11397 case XPC_PROCESS_SERVICE_KILL:
11398 error = xpc_process_service_kill(j, request, reply);
11399 break;
11400 default:
11401 job_log(j, LOG_ERR, "Bogus process opcode.");
11402 error = EDOM;
11403 }
11404
11405 if (error) {
11406 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11407 if (reply2) {
11408 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11409 }
11410
11411 *reply = reply2;
11412 }
11413
11414 return true;
11415}
11416
dcace88f
A
11417kern_return_t
11418job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
11419{
11420 struct ldcred *ldc = runtime_get_caller_creds();
11421 job_t otherj;
11422
eabd1701 11423 if (!j) {
5b0a4722
A
11424 return BOOTSTRAP_NO_MEMORY;
11425 }
11426
dcace88f
A
11427 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
11428 return BOOTSTRAP_UNKNOWN_SERVICE;
5b0a4722
A
11429 }
11430
dcace88f
A
11431#if TARGET_OS_EMBEDDED
11432 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
11433#else
11434 bool allow_non_root_kickstart = false;
11435#endif
11436
11437 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
11438 return BOOTSTRAP_NOT_PRIVILEGED;
11439 }
11440
11441#if HAVE_SANDBOX
11442 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11443 return BOOTSTRAP_NOT_PRIVILEGED;
11444 }
11445#endif
11446
11447 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
11448 return BOOTSTRAP_SERVICE_ACTIVE;
11449 }
11450
11451 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
11452 otherj = job_dispatch(otherj, true);
11453
11454 if (!job_assumes(j, otherj && otherj->p)) {
eabd1701 11455 // <rdar://problem/6787083> Clear this flag if we failed to start the job.
dcace88f
A
11456 otherj->stall_before_exec = false;
11457 return BOOTSTRAP_NO_MEMORY;
11458 }
11459
11460 *out_pid = otherj->p;
11461
5b0a4722
A
11462 return 0;
11463}
11464
11465kern_return_t
dcace88f 11466job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
5b0a4722 11467{
dcace88f 11468 launch_data_t jobdata = NULL;
5b0a4722 11469 size_t data_offset = 0;
ddbbfbc1 11470 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 11471 job_t jr;
eabd1701
A
11472
11473 if (!j) {
5b0a4722
A
11474 return BOOTSTRAP_NO_MEMORY;
11475 }
11476
f36da725
A
11477 if (unlikely(j->deny_job_creation)) {
11478 return BOOTSTRAP_NOT_PRIVILEGED;
11479 }
11480
ddbbfbc1
A
11481#if HAVE_SANDBOX
11482 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11483 return BOOTSTRAP_NOT_PRIVILEGED;
11484 }
11485#endif
eabd1701 11486
ddbbfbc1 11487 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
5b0a4722
A
11488 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
11489 return VPROC_ERR_TRY_PER_USER;
11490 }
11491
11492 if (!job_assumes(j, indataCnt != 0)) {
11493 return 1;
11494 }
11495
ddbbfbc1 11496 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
dcace88f 11497 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
5b0a4722
A
11498 return 1;
11499 }
11500
ddbbfbc1 11501 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
dcace88f
A
11502 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
11503 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
ddbbfbc1
A
11504 return 1;
11505 }
11506
dcace88f
A
11507 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
11508
11509 launch_data_t label = NULL;
11510 launch_data_t wait4debugger = NULL;
11511 if (!jr) {
5b0a4722
A
11512 switch (errno) {
11513 case EEXIST:
dcace88f
A
11514 /* If EEXIST was returned, we know that there is a label string in
11515 * the dictionary. So we don't need to check the types here; that
11516 * has already been done.
11517 */
11518 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
11519 jr = job_find(NULL, launch_data_get_string(label));
11520 if (job_assumes(j, jr != NULL) && !jr->p) {
11521 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
11522 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
11523 if (launch_data_get_bool(wait4debugger)) {
11524 /* If the job exists, we're going to kick-start it, but
11525 * we need to give the caller the opportunity to start
11526 * it suspended if it so desires. But this will only
11527 * take effect if the job isn't running.
11528 */
11529 jr->wait4debugger_oneshot = true;
11530 }
11531 }
11532 }
11533
11534 *outj = jr;
5b0a4722
A
11535 return BOOTSTRAP_NAME_IN_USE;
11536 default:
11537 return BOOTSTRAP_NO_MEMORY;
11538 }
11539 }
11540
ddbbfbc1
A
11541 if (pid1_magic) {
11542 jr->mach_uid = ldc->uid;
5b0a4722
A
11543 }
11544
eabd1701 11545 // TODO: Consolidate the app and legacy_LS_job bits.
ddbbfbc1 11546 jr->legacy_LS_job = true;
5b0a4722 11547 jr->abandon_pg = true;
dcace88f 11548 jr->asport = asport;
eabd1701 11549 jr->app = true;
ddbbfbc1 11550 uuid_clear(jr->expected_audit_uuid);
5b0a4722
A
11551 jr = job_dispatch(jr, true);
11552
11553 if (!job_assumes(j, jr != NULL)) {
fe044cc9
A
11554 job_remove(jr);
11555 return BOOTSTRAP_NO_MEMORY;
11556 }
5b0a4722 11557
dcace88f 11558 if (!job_assumes(jr, jr->p)) {
5b0a4722
A
11559 job_remove(jr);
11560 return BOOTSTRAP_NO_MEMORY;
11561 }
11562
fe044cc9 11563 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
dcace88f 11564 *outj = jr;
5b0a4722 11565
dcace88f
A
11566 return BOOTSTRAP_SUCCESS;
11567}
11568
11569kern_return_t
11570job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
11571{
11572 job_t nj = NULL;
11573 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
11574 if (likely(kr == KERN_SUCCESS)) {
11575 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
11576 job_remove(nj);
11577 kr = BOOTSTRAP_NO_MEMORY;
11578 } else {
11579 /* Do not return until the job has called exec(3), thereby making it
11580 * safe for the caller to send it SIGCONT.
11581 *
11582 * <rdar://problem/9042798>
11583 */
11584 nj->spawn_reply_port = rp;
11585 kr = MIG_NO_REPLY;
11586 }
11587 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
11588 bool was_running = nj->p;
11589 if (job_dispatch(nj, true)) {
11590 if (!was_running) {
11591 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
eabd1701 11592
dcace88f
A
11593 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
11594 nj->spawn_reply_port = rp;
11595 kr = MIG_NO_REPLY;
11596 } else {
11597 kr = BOOTSTRAP_NO_MEMORY;
11598 }
11599 } else {
11600 *obsvr_port = MACH_PORT_NULL;
11601 *child_pid = nj->p;
11602 kr = KERN_SUCCESS;
11603 }
11604 } else {
11605 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
11606 kr = BOOTSTRAP_UNKNOWN_SERVICE;
11607 }
11608 }
5b0a4722
A
11609
11610 mig_deallocate(indata, indataCnt);
dcace88f
A
11611 return kr;
11612}
11613
eabd1701
A
11614launch_data_t
11615job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport __attribute__((unused)))
11616{
11617 launch_data_t reply = NULL;
11618
11619 errno = ENOTSUP;
11620 if (launch_data_get_type(request) == LAUNCH_DATA_STRING) {
11621 if (strcmp(launch_data_get_string(request), LAUNCH_KEY_CHECKIN) == 0) {
11622 reply = job_export(j);
11623 job_checkin(j);
11624 }
11625 }
11626
11627 return reply;
11628}
11629
11630#define LAUNCHD_MAX_LEGACY_FDS 128
11631#define countof(x) (sizeof((x)) / sizeof((x[0])))
11632
dcace88f 11633kern_return_t
eabd1701
A
11634job_mig_legacy_ipc_request(job_t j, vm_offset_t request,
11635 mach_msg_type_number_t requestCnt, mach_port_array_t request_fds,
11636 mach_msg_type_number_t request_fdsCnt, vm_offset_t *reply,
11637 mach_msg_type_number_t *replyCnt, mach_port_array_t *reply_fdps,
11638 mach_msg_type_number_t *reply_fdsCnt, mach_port_t asport)
dcace88f 11639{
eabd1701
A
11640 if (!j) {
11641 return BOOTSTRAP_NO_MEMORY;
dcace88f
A
11642 }
11643
eabd1701
A
11644 /* TODO: Once we support actions other than checking in, we must check the
11645 * sandbox capabilities and EUID of the requestort.
dcace88f 11646 */
eabd1701
A
11647 size_t nout_fdps = 0;
11648 size_t nfds = request_fdsCnt / sizeof(request_fds[0]);
11649 if (nfds > LAUNCHD_MAX_LEGACY_FDS) {
11650 job_log(j, LOG_ERR, "Too many incoming descriptors: %lu", nfds);
11651 return BOOTSTRAP_NO_MEMORY;
11652 }
11653
11654 int in_fds[LAUNCHD_MAX_LEGACY_FDS];
11655 size_t i = 0;
11656 for (i = 0; i < nfds; i++) {
11657 in_fds[i] = fileport_makefd(request_fds[i]);
11658 if (in_fds[i] == -1) {
11659 job_log(j, LOG_ERR, "Bad descriptor passed in legacy IPC request at index: %lu", i);
dcace88f 11660 }
eabd1701 11661 }
dcace88f 11662
eabd1701
A
11663 // DON'T goto outbad before this point.
11664 *reply = 0;
11665 *reply_fdps = NULL;
11666 launch_data_t ldreply = NULL;
11667
11668 size_t dataoff = 0;
11669 size_t fdoff = 0;
11670 launch_data_t ldrequest = launch_data_unpack((void *)request, requestCnt, in_fds, nfds, &dataoff, &fdoff);
11671 if (!ldrequest) {
11672 job_log(j, LOG_ERR, "Invalid legacy IPC request passed.");
11673 goto out_bad;
dcace88f 11674 }
5b0a4722 11675
eabd1701
A
11676 ldreply = job_do_legacy_ipc_request(j, ldrequest, asport);
11677 if (!ldreply) {
11678 ldreply = launch_data_new_errno(errno);
11679 if (!ldreply) {
11680 goto out_bad;
11681 }
dcace88f 11682 }
eabd1701
A
11683
11684 *replyCnt = 10 * 1024 * 1024;
11685 mig_allocate(reply, *replyCnt);
11686 if (!*reply) {
11687 goto out_bad;
11688 }
11689
11690 int out_fds[LAUNCHD_MAX_LEGACY_FDS];
11691 size_t nout_fds = 0;
11692 size_t sz = launch_data_pack(ldreply, (void *)*reply, *replyCnt, out_fds, &nout_fds);
11693 if (!sz) {
11694 job_log(j, LOG_ERR, "Could not pack legacy IPC reply.");
11695 goto out_bad;
11696 }
11697
11698 if (nout_fds) {
11699 if (nout_fds > 128) {
11700 job_log(j, LOG_ERR, "Too many outgoing descriptors: %lu", nout_fds);
11701 goto out_bad;
11702 }
11703
11704 *reply_fdsCnt = nout_fds * sizeof((*reply_fdps)[0]);
11705 mig_allocate((vm_address_t *)reply_fdps, *reply_fdsCnt);
11706 if (!*reply_fdps) {
11707 goto out_bad;
11708 }
11709
11710 for (i = 0; i < nout_fds; i++) {
11711 mach_port_t fp = MACH_PORT_NULL;
11712 /* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
11713 * deal. Note, these get stuffed into an array whose disposition is
11714 * mach_port_move_send_t, so we don't have to worry about them after
11715 * returning.
11716 */
11717 if (fileport_makeport(out_fds[i], &fp) != 0) {
11718 job_log(j, LOG_ERR, "Could not pack response descriptor at index: %lu: %d: %s", i, errno, strerror(errno));
dcace88f 11719 }
eabd1701 11720 (*reply_fdps)[i] = fp;
dcace88f 11721 }
eabd1701
A
11722
11723 nout_fdps = nout_fds;
dcace88f 11724 } else {
eabd1701 11725 *reply_fdsCnt = 0;
dcace88f 11726 }
eabd1701
A
11727
11728 mig_deallocate(request, requestCnt);
11729 launch_data_free(ldreply);
11730 ldreply = NULL;
11731
11732 // Unused for now.
11733 (void)launchd_mport_deallocate(asport);
11734
5b0a4722 11735 return BOOTSTRAP_SUCCESS;
eabd1701
A
11736
11737out_bad:
11738 for (i = 0; i < nfds; i++) {
11739 (void)close(in_fds[i]);
11740 }
11741
11742 for (i = 0; i < nout_fds; i++) {
11743 (void)launchd_mport_deallocate((*reply_fdps)[i]);
11744 }
11745
11746 if (*reply) {
11747 mig_deallocate(*reply, *replyCnt);
11748 }
11749
11750 /* We should never hit this since the last goto out is in the case that
11751 * allocating this fails.
11752 */
11753 if (*reply_fdps) {
11754 mig_deallocate((vm_address_t)*reply_fdps, *reply_fdsCnt);
11755 }
11756
11757 if (ldreply) {
11758 launch_data_free(ldreply);
11759 }
11760
11761 return BOOTSTRAP_NO_MEMORY;
5b0a4722
A
11762}
11763
11764void
11765jobmgr_init(bool sflag)
11766{
ddbbfbc1
A
11767 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
11768 SLIST_INIT(&s_curious_jobs);
11769 LIST_INIT(&s_needing_sessions);
eabd1701 11770
95379394
A
11771 os_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
11772 os_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
eabd1701
A
11773 _s_xpc_system_domain->req_asid = launchd_audit_session;
11774 _s_xpc_system_domain->req_asport = launchd_audit_port;
dcace88f 11775 _s_xpc_system_domain->shortdesc = "system";
dcace88f
A
11776 if (pid1_magic) {
11777 root_jobmgr->monitor_shutdown = true;
11778 }
11779
ddbbfbc1
A
11780 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
11781 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
dcace88f 11782 if (likely(s_no_hang_fd == -1)) {
eabd1701
A
11783 if (jobmgr_assumes_zero_p(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK))) != -1) {
11784 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr));
ddbbfbc1
A
11785 }
11786 }
11787 s_no_hang_fd = _fd(s_no_hang_fd);
5b0a4722
A
11788}
11789
11790size_t
11791our_strhash(const char *s)
11792{
11793 size_t c, r = 5381;
11794
11795 /* djb2
11796 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
11797 */
11798
11799 while ((c = *s++)) {
eabd1701 11800 r = ((r << 5) + r) + c; // hash*33 + c
5b0a4722
A
11801 }
11802
11803 return r;
11804}
11805
11806size_t
11807hash_label(const char *label)
11808{
11809 return our_strhash(label) % LABEL_HASH_SIZE;
11810}
11811
11812size_t
11813hash_ms(const char *msstr)
11814{
11815 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
11816}
11817
11818bool
ddbbfbc1 11819waiting4removal_new(job_t j, mach_port_t rp)
5b0a4722 11820{
ddbbfbc1 11821 struct waiting_for_removal *w4r;
5b0a4722 11822
ddbbfbc1 11823 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
5b0a4722
A
11824 return false;
11825 }
11826
ddbbfbc1 11827 w4r->reply_port = rp;
5b0a4722 11828
ddbbfbc1 11829 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
5b0a4722
A
11830
11831 return true;
11832}
11833
11834void
ddbbfbc1 11835waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
5b0a4722 11836{
eabd1701 11837 (void)job_assumes_zero(j, job_mig_send_signal_reply(w4r->reply_port, 0));
5b0a4722 11838
ddbbfbc1 11839 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
5b0a4722 11840
ddbbfbc1 11841 free(w4r);
5b0a4722
A
11842}
11843
f36da725
A
11844size_t
11845get_kern_max_proc(void)
11846{
11847 int mib[] = { CTL_KERN, KERN_MAXPROC };
11848 int max = 100;
11849 size_t max_sz = sizeof(max);
eabd1701
A
11850
11851 (void)posix_assumes_zero(sysctl(mib, 2, &max, &max_sz, NULL, 0));
11852
f36da725
A
11853 return max;
11854}
11855
eabd1701 11856// See rdar://problem/6271234
5b0a4722 11857void
ddbbfbc1 11858eliminate_double_reboot(void)
5b0a4722 11859{
dcace88f 11860 if (unlikely(!pid1_magic)) {
ddbbfbc1
A
11861 return;
11862 }
eabd1701 11863
f36da725 11864 struct stat sb;
ddbbfbc1 11865 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
eabd1701
A
11866 int result = -1;
11867
dcace88f 11868 if (unlikely(stat(argv[1], &sb) != -1)) {
ddbbfbc1 11869 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
eabd1701
A
11870
11871 pid_t p = 0;
11872 result = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ);
11873 if (result == -1) {
11874 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script: %d: %s", result, strerror(result));
ddbbfbc1
A
11875 goto out;
11876 }
eabd1701
A
11877
11878 int wstatus = 0;
11879 result = waitpid(p, &wstatus, 0);
11880 if (result == -1) {
11881 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to reap deferred install script: %d: %s", errno, strerror(errno));
ddbbfbc1
A
11882 goto out;
11883 }
eabd1701
A
11884
11885 if (WIFEXITED(wstatus)) {
11886 if ((result = WEXITSTATUS(wstatus)) == 0) {
ddbbfbc1
A
11887 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
11888 } else {
eabd1701 11889 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus));
ddbbfbc1
A
11890 }
11891 } else {
eabd1701 11892 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Weirdness with install script: %d", wstatus);
ddbbfbc1
A
11893 }
11894 }
11895out:
dcace88f 11896 if (result == 0) {
eabd1701
A
11897 /* If the unlink(2) was to fail, it would be most likely fail with
11898 * EBUSY. All the other failure cases for unlink(2) don't apply when
11899 * we're running under PID 1 and have verified that the file exists.
11900 * Outside of someone deliberately messing with us (like if
11901 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
11902 * point for a filesystem) and I/O errors, we should be good.
ddbbfbc1 11903 */
eabd1701
A
11904 if (unlink(argv[1]) == -1) {
11905 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to remove deferred install script: %d: %s", errno, strerror(errno));
ddbbfbc1
A
11906 }
11907 }
11908}
11909
587e987e
A
11910void
11911jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
11912{
11913 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
dcace88f 11914 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
587e987e 11915 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
95379394
A
11916
11917#if XPC_LPI_VERSION >= 20120810
11918 if (j->jetsam_priority > XPC_JETSAM_PRIORITY_RESERVED && j->jetsam_priority < XPC_JETSAM_PRIORITY_RESERVED + XPC_JETSAM_BAND_LAST) {
11919 size_t band = j->jetsam_priority - XPC_JETSAM_PRIORITY_RESERVED;
11920 j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11921 }
11922#endif
587e987e 11923 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
dcace88f 11924 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
587e987e
A
11925 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
11926 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
95379394
A
11927 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMITBACKGROUND) == 0) {
11928 j->jetsam_memory_limit_background = true;
11929 job_log(j, LOG_DEBUG, "Memory limit is for background state only");
dcace88f 11930 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
587e987e
A
11931 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
11932 * You can't set this in a plist.
11933 */
eabd1701
A
11934 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMACTIVE) == 0) {
11935 // Ignore.
dcace88f 11936 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
587e987e
A
11937 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
11938 * complain about it.
11939 */
11940 } else {
11941 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
ddbbfbc1 11942 }
eabd1701 11943
dcace88f 11944 if (unlikely(!j->jetsam_properties)) {
587e987e 11945 j->jetsam_properties = true;
587e987e 11946 }
ddbbfbc1 11947}
5b0a4722 11948
95379394
A
11949void
11950job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data)
ddbbfbc1 11951{
95379394
A
11952#if TARGET_OS_EMBEDDED
11953 j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11954 j->jetsam_properties = true;
eabd1701 11955
95379394
A
11956 memorystatus_priority_properties_t mjp;
11957 mjp.priority = j->jetsam_priority;
11958 mjp.user_data = user_data;
eabd1701 11959
95379394
A
11960 size_t size = sizeof(mjp);
11961 int r = memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, j->p, 0, &mjp, size);
11962 if (r == -1 && errno != ESRCH) {
11963 (void)job_assumes_zero(j, errno);
ddbbfbc1 11964 }
95379394
A
11965#else
11966#pragma unused(j, band, user_data)
11967#endif
eabd1701
A
11968}
11969
95379394
A
11970void
11971job_update_jetsam_memory_limit(job_t j, int32_t limit)
eabd1701 11972{
95379394
A
11973#if TARGET_OS_EMBEDDED
11974 j->jetsam_memlimit = limit;
11975 j->jetsam_properties = true;
eabd1701 11976
95379394
A
11977 int r = memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK, j->p, limit, NULL, 0);
11978 if (r == -1 && errno != ESRCH) {
11979 (void)job_assumes_zero(j, errno);
11980 }
11981#else
11982#pragma unused(j, limit)
eabd1701 11983#endif
95379394 11984}