]> git.saurik.com Git - apple/launchd.git/blame - launchd/src/launchd_core_logic.c
launchd-392.39.tar.gz
[apple/launchd.git] / launchd / src / launchd_core_logic.c
CommitLineData
ed34e3c3 1/*
ed34e3c3
A
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
4445fe9e 19static const char *const __rcs_file_version__ = "$Revision: 26200 $";
5b0a4722
A
20
21#include "config.h"
22#include "launchd_core_logic.h"
dcace88f
A
23#include "launch_internal.h"
24#include "launchd_helper.h"
ed34e3c3 25
f36da725 26#include <TargetConditionals.h>
ed34e3c3
A
27#include <mach/mach.h>
28#include <mach/mach_error.h>
29#include <mach/boolean.h>
30#include <mach/message.h>
31#include <mach/notify.h>
32#include <mach/mig_errors.h>
33#include <mach/mach_traps.h>
34#include <mach/mach_interface.h>
35#include <mach/host_info.h>
36#include <mach/mach_host.h>
37#include <mach/exception.h>
5b0a4722 38#include <mach/host_reboot.h>
ed34e3c3
A
39#include <sys/types.h>
40#include <sys/queue.h>
41#include <sys/event.h>
ed34e3c3
A
42#include <sys/stat.h>
43#include <sys/ucred.h>
44#include <sys/fcntl.h>
45#include <sys/un.h>
5b0a4722 46#include <sys/reboot.h>
ed34e3c3
A
47#include <sys/wait.h>
48#include <sys/sysctl.h>
49#include <sys/sockio.h>
50#include <sys/time.h>
51#include <sys/resource.h>
52#include <sys/ioctl.h>
53#include <sys/mount.h>
5b0a4722 54#include <sys/pipe.h>
ddbbfbc1
A
55#include <sys/mman.h>
56#include <sys/socket.h>
57#include <sys/syscall.h>
ed34e3c3
A
58#include <net/if.h>
59#include <netinet/in.h>
60#include <netinet/in_var.h>
61#include <netinet6/nd6.h>
5b0a4722 62#include <bsm/libbsm.h>
ed34e3c3
A
63#include <unistd.h>
64#include <signal.h>
65#include <errno.h>
ed34e3c3
A
66#include <libgen.h>
67#include <stdio.h>
68#include <stdlib.h>
69#include <stdarg.h>
70#include <stdbool.h>
71#include <paths.h>
72#include <pwd.h>
73#include <grp.h>
74#include <ttyent.h>
75#include <dlfcn.h>
76#include <dirent.h>
77#include <string.h>
78#include <ctype.h>
79#include <glob.h>
dcace88f 80#include <System/sys/spawn.h>
5b0a4722 81#include <spawn.h>
dcace88f 82#include <time.h>
5c88273d 83#include <libinfo.h>
dcace88f 84
ddbbfbc1
A
85#include <libproc.h>
86#include <malloc/malloc.h>
87#include <pthread.h>
dcace88f 88#include <libproc.h>
f36da725 89#if HAVE_SANDBOX
ddbbfbc1 90#define __APPLE_API_PRIVATE
5b0a4722 91#include <sandbox.h>
f36da725
A
92#endif
93#if HAVE_QUARANTINE
94#include <quarantine.h>
95#endif
ddbbfbc1
A
96#if TARGET_OS_EMBEDDED
97#include <sys/kern_memorystatus.h>
98#else
5c88273d 99extern int gL1CacheEnabled;
ddbbfbc1
A
100/* To make my life easier. */
101typedef struct jetsam_priority_entry {
587e987e 102 pid_t pid;
dcace88f 103 uint32_t priority;
587e987e
A
104 uint32_t flags;
105 int32_t hiwat_pages;
106 int32_t hiwat_reserved1;
107 int32_t hiwat_reserved2;
108 int32_t hiwat_reserved3;
ddbbfbc1
A
109} jetsam_priority_entry_t;
110
111enum {
112 kJetsamFlagsFrontmost = (1 << 0),
113 kJetsamFlagsKilled = (1 << 1)
114};
115#endif
5b0a4722 116
ef398931
A
117#include "launch.h"
118#include "launch_priv.h"
119#include "launch_internal.h"
120#include "bootstrap.h"
121#include "bootstrap_priv.h"
122#include "vproc.h"
123#include "vproc_internal.h"
5b0a4722
A
124
125#include "reboot2.h"
ed34e3c3 126
ed34e3c3 127#include "launchd.h"
5b0a4722 128#include "launchd_runtime.h"
ed34e3c3 129#include "launchd_unix_ipc.h"
5b0a4722
A
130#include "protocol_vproc.h"
131#include "protocol_vprocServer.h"
ef398931 132#include "protocol_job_reply.h"
ddbbfbc1
A
133#include "protocol_job_forward.h"
134#include "mach_excServer.h"
dcace88f
A
135#if !TARGET_OS_EMBEDDED
136#include "domainServer.h"
137#include "init.h"
5c88273d 138#endif /* !TARGET_OS_EMBEDDED */
dcace88f 139#include "eventsServer.h"
5b0a4722 140
dcace88f
A
141#ifndef POSIX_SPAWN_OSX_TALAPP_START
142#define POSIX_SPAWN_OSX_TALAPP_START 0x0400
143#endif
144
145#ifndef POSIX_SPAWN_OSX_WIDGET_START
146#define POSIX_SPAWN_OSX_WIDGET_START 0x0800
147#endif
148
149#ifndef POSIX_SPAWN_IOS_APP_START
150#define POSIX_SPAWN_IOS_APP_START 0x1000
151#endif
152
153/* LAUNCHD_DEFAULT_EXIT_TIMEOUT
ddbbfbc1
A
154 * If the job hasn't exited in the given number of seconds after sending
155 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
156 */
157#define LAUNCHD_MIN_JOB_RUN_TIME 10
ddbbfbc1 158#define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
dcace88f
A
159#define LAUNCHD_SIGKILL_TIMER 2
160#define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
5b0a4722 161
ddbbfbc1 162#define SHUTDOWN_LOG_DIR "/var/log/shutdown"
5b0a4722
A
163
164#define TAKE_SUBSET_NAME "TakeSubsetName"
165#define TAKE_SUBSET_PID "TakeSubsetPID"
166#define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
167
168#define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
169
170extern char **environ;
171
172struct waiting_for_removal {
173 SLIST_ENTRY(waiting_for_removal) sle;
174 mach_port_t reply_port;
175};
176
177static bool waiting4removal_new(job_t j, mach_port_t rp);
178static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
179
ed34e3c3
A
180struct machservice {
181 SLIST_ENTRY(machservice) sle;
5b0a4722
A
182 SLIST_ENTRY(machservice) special_port_sle;
183 LIST_ENTRY(machservice) name_hash_sle;
184 LIST_ENTRY(machservice) port_hash_sle;
dcace88f 185 struct machservice *alias;
ddbbfbc1 186 job_t job;
5b0a4722 187 unsigned int gen_num;
ed34e3c3 188 mach_port_name_t port;
dcace88f
A
189 unsigned int
190 isActive :1,
191 reset :1,
192 recv :1,
193 hide :1,
194 kUNCServer :1,
195 per_user_hack :1,
196 debug_on_close :1,
197 per_pid :1,
198 delete_on_destruction :1,
199 drain_one_on_crash :1,
200 drain_all_on_crash :1,
201 event_update_port :1, /* The job which owns this port is the event monitor. */
202 upfront :1, /* This service was declared in the plist. */
203 event_channel :1, /* The job is to receive events on this channel. */
204 /* Don't let the size of this field to get too small. It has to be large enough
205 * to represent the reasonable range of special port numbers.
206 */
207 special_port_num :18;
5b0a4722 208 const char name[0];
ed34e3c3
A
209};
210
5b0a4722
A
211static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
212
213#define PORT_HASH_SIZE 32
214#define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
215
216static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
217
ed34e3c3
A
218static void machservice_setup(launch_data_t obj, const char *key, void *context);
219static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
5b0a4722
A
220static void machservice_resetport(job_t j, struct machservice *ms);
221static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
dcace88f
A
222#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
223static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
5c88273d 224#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
5b0a4722
A
225static void machservice_ignore(job_t j, struct machservice *ms);
226static void machservice_watch(job_t j, struct machservice *ms);
227static void machservice_delete(job_t j, struct machservice *, bool port_died);
228static void machservice_request_notifications(struct machservice *);
229static mach_port_t machservice_port(struct machservice *);
230static job_t machservice_job(struct machservice *);
231static bool machservice_hidden(struct machservice *);
232static bool machservice_active(struct machservice *);
233static const char *machservice_name(struct machservice *);
234static bootstrap_status_t machservice_status(struct machservice *);
ddbbfbc1 235void machservice_drain_port(struct machservice *);
dcace88f 236static struct machservice *xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p);
ed34e3c3
A
237
238struct socketgroup {
239 SLIST_ENTRY(socketgroup) sle;
240 int *fds;
241 unsigned int junkfds:1, fd_cnt:31;
f36da725
A
242 union {
243 const char name[0];
244 char name_init[0];
245 };
ed34e3c3
A
246};
247
ddbbfbc1 248static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds);
5b0a4722
A
249static void socketgroup_delete(job_t j, struct socketgroup *sg);
250static void socketgroup_watch(job_t j, struct socketgroup *sg);
251static void socketgroup_ignore(job_t j, struct socketgroup *sg);
252static void socketgroup_callback(job_t j);
ed34e3c3 253static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
5b0a4722 254static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
ed34e3c3
A
255
256struct calendarinterval {
5b0a4722 257 LIST_ENTRY(calendarinterval) global_sle;
ed34e3c3 258 SLIST_ENTRY(calendarinterval) sle;
5b0a4722 259 job_t job;
ed34e3c3 260 struct tm when;
5b0a4722 261 time_t when_next;
ed34e3c3
A
262};
263
5b0a4722
A
264static LIST_HEAD(, calendarinterval) sorted_calendar_events;
265
266static bool calendarinterval_new(job_t j, struct tm *w);
267static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
268static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
269static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
270static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
271static void calendarinterval_callback(void);
272static void calendarinterval_sanity_check(void);
ed34e3c3
A
273
274struct envitem {
275 SLIST_ENTRY(envitem) sle;
ddbbfbc1 276 bool one_shot;
ed34e3c3 277 char *value;
f36da725
A
278 union {
279 const char key[0];
280 char key_init[0];
281 };
ed34e3c3
A
282};
283
ddbbfbc1 284static bool envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot);
5b0a4722 285static void envitem_delete(job_t j, struct envitem *ei, bool global);
ed34e3c3 286static void envitem_setup(launch_data_t obj, const char *key, void *context);
ddbbfbc1 287static void envitem_setup_one_shot(launch_data_t obj, const char *key, void *context);
ed34e3c3
A
288
289struct limititem {
290 SLIST_ENTRY(limititem) sle;
291 struct rlimit lim;
292 unsigned int setsoft:1, sethard:1, which:30;
293};
294
5b0a4722
A
295static bool limititem_update(job_t j, int w, rlim_t r);
296static void limititem_delete(job_t j, struct limititem *li);
ed34e3c3 297static void limititem_setup(launch_data_t obj, const char *key, void *context);
f36da725 298#if HAVE_SANDBOX
5b0a4722 299static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
f36da725 300#endif
ed34e3c3 301
587e987e
A
302static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
303
ed34e3c3
A
304typedef enum {
305 NETWORK_UP = 1,
306 NETWORK_DOWN,
307 SUCCESSFUL_EXIT,
308 FAILED_EXIT,
dcace88f
A
309 CRASHED,
310 DID_NOT_CRASH,
ed34e3c3
A
311 PATH_EXISTS,
312 PATH_MISSING,
5b0a4722
A
313 OTHER_JOB_ENABLED,
314 OTHER_JOB_DISABLED,
315 OTHER_JOB_ACTIVE,
316 OTHER_JOB_INACTIVE,
317 PATH_CHANGES,
318 DIR_NOT_EMPTY,
ed34e3c3
A
319 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
320} semaphore_reason_t;
321
322struct semaphoreitem {
323 SLIST_ENTRY(semaphoreitem) sle;
324 semaphore_reason_t why;
ddbbfbc1 325 bool watching_parent;
5b0a4722 326 int fd;
dcace88f 327
f36da725
A
328 union {
329 const char what[0];
330 char what_init[0];
331 };
ed34e3c3
A
332};
333
5b0a4722
A
334struct semaphoreitem_dict_iter_context {
335 job_t j;
336 semaphore_reason_t why_true;
337 semaphore_reason_t why_false;
338};
ed34e3c3 339
5b0a4722
A
340static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
341static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
342static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
343static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
344static void semaphoreitem_callback(job_t j, struct kevent *kev);
345static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
346static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
347static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
348
dcace88f
A
349struct externalevent {
350 LIST_ENTRY(externalevent) sys_le;
351 LIST_ENTRY(externalevent) job_le;
352 struct eventsystem *sys;
353
354 uint64_t id;
355 job_t job;
356 bool state;
357 bool wanted_state;
358 launch_data_t event;
359
360 char name[0];
361};
5b0a4722 362
dcace88f
A
363struct externalevent_iter_ctx {
364 job_t j;
365 struct eventsystem *sys;
ddbbfbc1
A
366};
367
dcace88f
A
368static bool externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event);
369static void externalevent_delete(struct externalevent *ee);
370static void externalevent_setup(launch_data_t obj, const char *key, void *context);
371static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
372
373struct eventsystem {
374 LIST_ENTRY(eventsystem) global_le;
375 LIST_HEAD(, externalevent) events;
376 uint64_t curid;
377 bool has_updates;
378 char name[0];
ddbbfbc1
A
379};
380
dcace88f
A
381static struct eventsystem *eventsystem_new(const char *name);
382static void eventsystem_delete(struct eventsystem *sys);
383static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
384static struct eventsystem *eventsystem_find(const char *name);
385static void eventsystem_ping(void);
386
387#define ACTIVE_JOB_HASH_SIZE 32
388#define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
389
390#define MACHSERVICE_HASH_SIZE 37
391
392#define LABEL_HASH_SIZE 53
5b0a4722
A
393struct jobmgr_s {
394 kq_callback kqjobmgr_callback;
dcace88f 395 LIST_ENTRY(jobmgr_s) xpc_le;
5b0a4722
A
396 SLIST_ENTRY(jobmgr_s) sle;
397 SLIST_HEAD(, jobmgr_s) submgrs;
398 LIST_HEAD(, job_s) jobs;
ddbbfbc1 399 LIST_HEAD(, job_s) jetsam_jobs;
dcace88f
A
400
401 /* For legacy reasons, we keep all job labels that are imported in the
402 * root job manager's label hash. If a job manager is an XPC domain, then
403 * it gets its own label hash that is separate from the "global" one
404 * stored in the root job manager.
405 */
406 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
5b0a4722
A
407 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
408 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
ddbbfbc1 409 LIST_HEAD(, job_s) global_env_jobs;
5b0a4722
A
410 mach_port_t jm_port;
411 mach_port_t req_port;
412 jobmgr_t parentmgr;
413 int reboot_flags;
dcace88f 414 time_t shutdown_time;
5b0a4722 415 unsigned int global_on_demand_cnt;
5b0a4722 416 unsigned int normal_active_cnt;
ddbbfbc1 417 unsigned int jetsam_jobs_cnt;
dcace88f
A
418 unsigned int
419 shutting_down :1,
420 session_initialized :1,
421 killed_stray_jobs :1,
422 monitor_shutdown :1,
423 shutdown_jobs_dirtied :1,
424 shutdown_jobs_cleaned :1,
425 xpc_singleton :1;
ddbbfbc1 426 uint32_t properties;
dcace88f
A
427 /* XPC-specific properties. */
428 char owner[MAXCOMLEN];
429 char *shortdesc;
430 mach_port_t req_bsport;
431 mach_port_t req_excport;
432 mach_port_t req_asport;
433 pid_t req_pid;
434 uid_t req_euid;
435 gid_t req_egid;
436 au_asid_t req_asid;
437 vm_offset_t req_ctx;
438 mach_msg_type_number_t req_ctx_sz;
439 mach_port_t req_rport;
440 kern_return_t error;
f36da725
A
441 union {
442 const char name[0];
443 char name_init[0];
444 };
5b0a4722 445};
ed34e3c3 446
dcace88f
A
447/* Global XPC domains. */
448#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
449static jobmgr_t _s_xpc_system_domain;
450static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
451static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
5c88273d 452#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
dcace88f 453
5b0a4722 454#define jobmgr_assumes(jm, e) \
ddbbfbc1 455 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
5b0a4722 456
dcace88f
A
457static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
458#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
459static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
460static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
461static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
462static job_t xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
5c88273d 463#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
5b0a4722
A
464static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
465static jobmgr_t jobmgr_parent(jobmgr_t jm);
466static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
f36da725 467static bool jobmgr_label_test(jobmgr_t jm, const char *str);
5b0a4722 468static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
ddbbfbc1
A
469static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
470static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
5b0a4722
A
471static void jobmgr_remove(jobmgr_t jm);
472static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
473static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
ddbbfbc1 474static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
5b0a4722 475static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
ddbbfbc1 476static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
5b0a4722 477static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
dcace88f 478static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
5b0a4722
A
479static void job_export_all2(jobmgr_t jm, launch_data_t where);
480static void jobmgr_callback(void *obj, struct kevent *kev);
481static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
482static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
483static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
484static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
485static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
486/* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
ddbbfbc1 487static void jobmgr_log_bug(jobmgr_t jm, unsigned int line);
5b0a4722
A
488
489#define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
ddbbfbc1 490#define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
dcace88f 491#define AUTO_PICK_XPC_LABEL (const char *)(~2)
ddbbfbc1
A
492
493struct suspended_peruser {
494 LIST_ENTRY(suspended_peruser) sle;
495 job_t j;
496};
5b0a4722
A
497
498struct job_s {
ddbbfbc1 499 kq_callback kqjob_callback; /* MUST be first element of this structure for benefit of launchd's run loop. */
5b0a4722 500 LIST_ENTRY(job_s) sle;
dcace88f 501 LIST_ENTRY(job_s) subjob_sle;
ddbbfbc1
A
502 LIST_ENTRY(job_s) needing_session_sle;
503 LIST_ENTRY(job_s) jetsam_sle;
5b0a4722
A
504 LIST_ENTRY(job_s) pid_hash_sle;
505 LIST_ENTRY(job_s) label_hash_sle;
ddbbfbc1 506 LIST_ENTRY(job_s) global_env_sle;
ddbbfbc1
A
507 SLIST_ENTRY(job_s) curious_jobs_sle;
508 LIST_HEAD(, suspended_peruser) suspended_perusers;
509 LIST_HEAD(, waiting_for_exit) exit_watchers;
dcace88f
A
510 LIST_HEAD(, job_s) subjobs;
511 LIST_HEAD(, externalevent) events;
ed34e3c3 512 SLIST_HEAD(, socketgroup) sockets;
ed34e3c3
A
513 SLIST_HEAD(, calendarinterval) cal_intervals;
514 SLIST_HEAD(, envitem) global_env;
515 SLIST_HEAD(, envitem) env;
516 SLIST_HEAD(, limititem) limits;
517 SLIST_HEAD(, machservice) machservices;
518 SLIST_HEAD(, semaphoreitem) semaphores;
5b0a4722 519 SLIST_HEAD(, waiting_for_removal) removal_watchers;
dcace88f 520 job_t alias;
ed34e3c3 521 struct rusage ru;
5b0a4722
A
522 cpu_type_t *j_binpref;
523 size_t j_binpref_cnt;
524 mach_port_t j_port;
dcace88f
A
525 mach_port_t exit_status_dest;
526 mach_port_t exit_status_port;
527 mach_port_t spawn_reply_port;
ed34e3c3 528 uid_t mach_uid;
5b0a4722 529 jobmgr_t mgr;
ddbbfbc1 530 size_t argc;
ed34e3c3
A
531 char **argv;
532 char *prog;
533 char *rootdir;
534 char *workingdir;
535 char *username;
536 char *groupname;
ddbbfbc1 537 char *stdinpath;
ed34e3c3
A
538 char *stdoutpath;
539 char *stderrpath;
fe044cc9 540 char *alt_exc_handler;
ddbbfbc1 541 struct vproc_shmem_s *shmem;
5b0a4722
A
542 struct machservice *lastlookup;
543 unsigned int lastlookup_gennum;
f36da725 544#if HAVE_SANDBOX
5b0a4722
A
545 char *seatbelt_profile;
546 uint64_t seatbelt_flags;
f36da725
A
547#endif
548#if HAVE_QUARANTINE
5b0a4722
A
549 void *quarantine_data;
550 size_t quarantine_data_sz;
f36da725 551#endif
ed34e3c3 552 pid_t p;
ed34e3c3 553 int last_exit_status;
ddbbfbc1
A
554 int stdin_fd;
555 int fork_fd;
5b0a4722 556 int log_redirect_fd;
ed34e3c3 557 int nice;
5b0a4722 558 int stdout_err_fd;
dcace88f 559 uint32_t pstype;
587e987e
A
560 int32_t jetsam_priority;
561 int32_t jetsam_memlimit;
dcace88f 562 int32_t jetsam_seq;
587e987e 563 int32_t main_thread_priority;
ddbbfbc1
A
564 uint32_t timeout;
565 uint32_t exit_timeout;
566 uint64_t sent_signal_time;
5b0a4722
A
567 uint64_t start_time;
568 uint32_t min_run_time;
569 uint32_t start_interval;
ddbbfbc1 570 uint32_t peruser_suspend_count; /* The number of jobs that have disabled this per-user launchd. */
dcace88f
A
571 uuid_t instance_id;
572 uint32_t fail_cnt;
ddbbfbc1
A
573#if 0
574 /* someday ... */
575 enum {
576 J_TYPE_ANONYMOUS = 1,
577 J_TYPE_LANCHSERVICES,
578 J_TYPE_MACHINIT,
579 J_TYPE_INETD,
580 } j_type;
581#endif
dcace88f
A
582 bool
583 debug :1, /* man launchd.plist --> Debug */
584 ondemand :1, /* man launchd.plist --> KeepAlive == false */
585 session_create :1, /* man launchd.plist --> SessionCreate */
586 low_pri_io :1, /* man launchd.plist --> LowPriorityIO */
587 no_init_groups :1, /* man launchd.plist --> InitGroups */
588 priv_port_has_senders :1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
589 importing_global_env :1, /* a hack during job importing */
590 importing_hard_limits :1, /* a hack during job importing */
591 setmask :1, /* man launchd.plist --> Umask */
592 anonymous :1, /* a process that launchd knows about, but isn't managed by launchd */
593 checkedin :1, /* a legacy mach_init concept to detect sick jobs */
594 legacy_mach_job :1, /* a job created via bootstrap_create_server() */
595 legacy_LS_job :1, /* a job created via spawn_via_launchd() */
596 inetcompat :1, /* a legacy job that wants inetd compatible semantics */
597 inetcompat_wait :1, /* a twist on inetd compatibility */
598 start_pending :1, /* an event fired and the job should start, but not necessarily right away */
599 globargv :1, /* man launchd.plist --> EnableGlobbing */
600 wait4debugger :1, /* man launchd.plist --> WaitForDebugger */
601 wait4debugger_oneshot :1, /* One-shot WaitForDebugger. */
602 internal_exc_handler :1, /* MachExceptionHandler == true */
603 stall_before_exec :1, /* a hack to support an option of spawn_via_launchd() */
604 only_once :1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
605 currently_ignored :1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
606 forced_peers_to_demand_mode :1, /* A job that forced all other jobs to be temporarily launch-on-demand */
607 setnice :1, /* man launchd.plist --> Nice */
608 removal_pending :1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
609 sent_sigkill :1, /* job_kill() was called */
610 debug_before_kill :1, /* enter the kernel debugger before killing a job */
611 weird_bootstrap :1, /* a hack that launchd+launchctl use during jobmgr_t creation */
612 start_on_mount :1, /* man launchd.plist --> StartOnMount */
613 per_user :1, /* This job is a per-user launchd managed by the PID 1 launchd */
614 unload_at_mig_return :1, /* A job thoroughly confused launchd. We need to unload it ASAP */
615 abandon_pg :1, /* man launchd.plist --> AbandonProcessGroup */
616 ignore_pg_at_shutdown :1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
617 poll_for_vfs_changes :1, /* a hack to work around the fact that kqueues don't work on all filesystems */
618 deny_job_creation :1, /* Don't let this job create new 'job_t' objects in launchd */
619 kill_via_shmem :1, /* man launchd.plist --> EnableTransactions */
620 sent_kill_via_shmem :1, /* We need to 'kill_via_shmem' once-and-only-once */
621 clean_kill :1, /* The job was sent SIGKILL because it was clean. */
622 kill_after_sample :1, /* The job is to be killed after sampling. */
623 reap_after_trace :1, /* The job exited before sample did, so we should reap it after sample is done. */
624 nosy :1, /* The job has an OtherJobEnabled KeepAlive criterion. */
625 crashed :1, /* The job is the default Mach exception handler, and it crashed. */
626 reaped :1, /* We've received NOTE_EXIT for the job. */
627 stopped :1, /* job_stop() was called. */
628 jetsam_frontmost :1, /* The job is considered "frontmost" by Jetsam. */
629 needs_kickoff :1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
630 is_bootstrapper :1, /* The job is a bootstrapper. */
631 has_console :1, /* The job owns the console. */
632 embedded_special_privileges :1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
633 did_exec :1, /* The job exec(2)ed successfully. */
634 xpcproxy_did_exec :1, /* The job is an XPC service, and XPC proxy successfully exec(3)ed. */
635 holds_ref :1, /* The (anonymous) job called vprocmgr_switch_to_session(). */
636 jetsam_properties :1, /* The job has Jetsam limits in place. */
637 dedicated_instance :1, /* This job was created as the result of a look up of a service provided by a per-lookup job. */
638 multiple_instances :1, /* The job supports creating additional instances of itself. */
639 former_subjob :1, /* The sub-job was already removed from the parent's list of sub-jobs. */
640 event_monitor :1, /* The job is responsible for monitoring external events for this launchd. */
641 removing :1, /* A lame hack. */
642 disable_aslr :1, /* Disable ASLR when launching this job. */
643 xpc_service :1, /* The job is an XPC Service. */
644 shutdown_monitor :1, /* The job is the Performance team's shutdown monitor. */
645 dirty_at_shutdown :1, /* We should open a transaction for the job when shutdown begins. */
a6e7a709
A
646 workaround9359725 :1, /* The job was sent SIGKILL but did not exit in a timely fashion, indicating a kernel bug. */
647 xpc_bootstrapper :1;
dcace88f 648
ed34e3c3 649 mode_t mask;
ddbbfbc1 650 pid_t tracing_pid;
dcace88f
A
651 mach_port_t asport;
652 /* Only set for per-user launchd's. */
653 au_asid_t asid;
ddbbfbc1 654 uuid_t expected_audit_uuid;
5b0a4722 655 const char label[0];
ed34e3c3
A
656};
657
5b0a4722
A
658static size_t hash_label(const char *label) __attribute__((pure));
659static size_t hash_ms(const char *msstr) __attribute__((pure));
ddbbfbc1 660static SLIST_HEAD(, job_s) s_curious_jobs;
5b0a4722
A
661
662#define job_assumes(j, e) \
ddbbfbc1 663 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
5b0a4722 664
ed34e3c3 665static void job_import_keys(launch_data_t obj, const char *key, void *context);
5b0a4722
A
666static void job_import_bool(job_t j, const char *key, bool value);
667static void job_import_string(job_t j, const char *key, const char *value);
668static void job_import_integer(job_t j, const char *key, long long value);
669static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
670static void job_import_array(job_t j, const char *key, launch_data_t value);
671static void job_import_opaque(job_t j, const char *key, launch_data_t value);
672static bool job_set_global_on_demand(job_t j, bool val);
673static const char *job_active(job_t j);
674static void job_watch(job_t j);
675static void job_ignore(job_t j);
ddbbfbc1 676static void job_cleanup_after_tracer(job_t j);
5b0a4722
A
677static void job_reap(job_t j);
678static bool job_useless(job_t j);
679static bool job_keepalive(job_t j);
ddbbfbc1 680static void job_dispatch_curious_jobs(job_t j);
5b0a4722
A
681static void job_start(job_t j);
682static void job_start_child(job_t j) __attribute__((noreturn));
683static void job_setup_attributes(job_t j);
684static bool job_setup_machport(job_t j);
dcace88f 685static kern_return_t job_setup_exit_port(job_t j);
5b0a4722
A
686static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
687static void job_postfork_become_user(job_t j);
ddbbfbc1
A
688static void job_postfork_test_user(job_t j);
689static void job_log_pids_with_weird_uids(job_t j);
5b0a4722 690static void job_setup_exception_port(job_t j, task_t target_task);
ed34e3c3 691static void job_callback(void *obj, struct kevent *kev);
ddbbfbc1 692static void job_callback_proc(job_t j, struct kevent *kev);
5b0a4722
A
693static void job_callback_timer(job_t j, void *ident);
694static void job_callback_read(job_t j, int ident);
695static void job_log_stray_pg(job_t j);
ddbbfbc1
A
696static void job_log_children_without_exec(job_t j);
697static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
698static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
dcace88f
A
699#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
700static job_t job_new_alias(jobmgr_t jm, job_t src);
5c88273d 701#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
ddbbfbc1 702static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
dcace88f 703static job_t job_new_subjob(job_t j, uuid_t identifier);
5b0a4722
A
704static void job_kill(job_t j);
705static void job_uncork_fork(job_t j);
706static void job_log_stdouterr(job_t j);
707static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
708static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
ddbbfbc1 709static void job_log_bug(job_t j, unsigned int line);
5b0a4722 710static void job_log_stdouterr2(job_t j, const char *msg, ...);
ddbbfbc1 711static void job_set_exception_port(job_t j, mach_port_t port);
dcace88f
A
712static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
713static void job_open_shutdown_transaction(job_t ji);
714static void job_close_shutdown_transaction(job_t ji);
5b0a4722 715
ed34e3c3
A
716static const struct {
717 const char *key;
718 int val;
719} launchd_keys2limits[] = {
5b0a4722
A
720 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
721 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
722 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
723 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
724 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
725 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
726 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
727 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
728 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
ed34e3c3
A
729};
730
731static time_t cronemu(int mon, int mday, int hour, int min);
732static time_t cronemu_wday(int wday, int hour, int min);
733static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
734static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
735static bool cronemu_hour(struct tm *wtm, int hour, int min);
736static bool cronemu_min(struct tm *wtm, int min);
737
dcace88f
A
738/* These functions are a total nightmare to get to through headers.
739 * See rdar://problem/8223092.
740 */
741typedef __darwin_mach_port_t fileport_t;
742#define FILEPORT_NULL ((fileport_t)0)
743extern int fileport_makeport(int, fileport_t *);
744extern int fileport_makefd(fileport_t);
745
5b0a4722 746/* miscellaneous file local functions */
ddbbfbc1 747static size_t get_kern_max_proc(void);
5b0a4722 748static int dir_has_files(job_t j, const char *path);
ed34e3c3 749static char **mach_cmd2argv(const char *string);
5b0a4722
A
750static size_t our_strhash(const char *s) __attribute__((pure));
751static void extract_rcsid_substr(const char *i, char *o, size_t osz);
ddbbfbc1
A
752
753void eliminate_double_reboot(void);
754
5b0a4722
A
755/* file local globals */
756static size_t total_children;
757static size_t total_anon_children;
758static mach_port_t the_exception_server;
5b0a4722 759static job_t workaround_5477111;
ddbbfbc1 760static LIST_HEAD(, job_s) s_needing_sessions;
dcace88f
A
761static LIST_HEAD(, eventsystem) _s_event_systems;
762static job_t _s_event_monitor;
a6e7a709 763static job_t _s_xpc_bootstrapper;
dcace88f
A
764static job_t _s_shutdown_monitor;
765static mach_port_t _s_event_update_port;
ddbbfbc1 766mach_port_t g_audit_session_port = MACH_PORT_NULL;
dcace88f 767static uint32_t s_jetsam_sequence_id;
ddbbfbc1
A
768
769#if !TARGET_OS_EMBEDDED
770static job_t s_embedded_privileged_job = (job_t)&root_jobmgr;
771au_asid_t g_audit_session = AU_DEFAUDITSID;
772#else
773static job_t s_embedded_privileged_job = NULL;
774pid_t g_audit_session = 0;
775#endif
776
777static int s_no_hang_fd = -1;
5b0a4722
A
778
779/* process wide globals */
780mach_port_t inherited_bootstrap_port;
781jobmgr_t root_jobmgr;
ddbbfbc1
A
782bool g_shutdown_debugging = false;
783bool g_verbose_boot = false;
784bool g_embedded_privileged_action = false;
587e987e 785bool g_runtime_busy_time = false;
ed34e3c3
A
786
787void
5b0a4722 788job_ignore(job_t j)
ed34e3c3 789{
5b0a4722 790 struct semaphoreitem *si;
ed34e3c3
A
791 struct socketgroup *sg;
792 struct machservice *ms;
ed34e3c3 793
5b0a4722
A
794 if (j->currently_ignored) {
795 return;
796 }
797
798 job_log(j, LOG_DEBUG, "Ignoring...");
799
800 j->currently_ignored = true;
801
802 if (j->poll_for_vfs_changes) {
803 j->poll_for_vfs_changes = false;
dcace88f 804 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
5b0a4722
A
805 }
806
807 SLIST_FOREACH(sg, &j->sockets, sle) {
ed34e3c3 808 socketgroup_ignore(j, sg);
5b0a4722 809 }
ed34e3c3 810
5b0a4722
A
811 SLIST_FOREACH(ms, &j->machservices, sle) {
812 machservice_ignore(j, ms);
813 }
ed34e3c3 814
5b0a4722
A
815 SLIST_FOREACH(si, &j->semaphores, sle) {
816 semaphoreitem_ignore(j, si);
817 }
ed34e3c3
A
818}
819
820void
5b0a4722 821job_watch(job_t j)
ed34e3c3 822{
5b0a4722 823 struct semaphoreitem *si;
ed34e3c3
A
824 struct socketgroup *sg;
825 struct machservice *ms;
ed34e3c3 826
5b0a4722
A
827 if (!j->currently_ignored) {
828 return;
829 }
830
831 job_log(j, LOG_DEBUG, "Watching...");
832
833 j->currently_ignored = false;
834
835 SLIST_FOREACH(sg, &j->sockets, sle) {
ed34e3c3 836 socketgroup_watch(j, sg);
5b0a4722 837 }
ed34e3c3 838
5b0a4722
A
839 SLIST_FOREACH(ms, &j->machservices, sle) {
840 machservice_watch(j, ms);
841 }
ed34e3c3 842
5b0a4722
A
843 SLIST_FOREACH(si, &j->semaphores, sle) {
844 semaphoreitem_watch(j, si);
845 }
ed34e3c3
A
846}
847
848void
5b0a4722 849job_stop(job_t j)
ed34e3c3 850{
ddbbfbc1
A
851 char extralog[100];
852 int32_t newval = 1;
853
dcace88f 854 if (unlikely(!j->p || j->stopped || j->anonymous)) {
5b0a4722
A
855 return;
856 }
ed34e3c3 857
ddbbfbc1 858#if TARGET_OS_EMBEDDED
dcace88f
A
859 if (g_embedded_privileged_action && s_embedded_privileged_job) {
860 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
ddbbfbc1
A
861 errno = EPERM;
862 return;
863 }
864
dcace88f 865 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
ddbbfbc1
A
866 errno = EPERM;
867 return;
868 }
dcace88f 869 } else if (g_embedded_privileged_action) {
ddbbfbc1
A
870 errno = EINVAL;
871 return;
5b0a4722 872 }
ddbbfbc1
A
873#endif
874
dcace88f
A
875 if (j->kill_via_shmem) {
876 if (j->shmem) {
877 if (!j->sent_kill_via_shmem) {
878 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
879 newval = __sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
880 j->sent_kill_via_shmem = true;
881 } else {
882 newval = j->shmem->vp_shmem_transaction_cnt;
883 }
884 } else {
885 newval = -1;
886 }
887 }
888
ddbbfbc1
A
889 j->sent_signal_time = runtime_get_opaque_time();
890
891 if (newval < 0) {
892 j->clean_kill = true;
893 job_kill(j);
894 } else {
dcace88f 895 (void)job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
ddbbfbc1 896
dcace88f
A
897 if (j->exit_timeout) {
898 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j) != -1);
899 } else {
900 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
ddbbfbc1
A
901 }
902
903 if (j->kill_via_shmem) {
904 snprintf(extralog, sizeof(extralog), ": %d remaining transactions", newval + 1);
905 } else {
906 extralog[0] = '\0';
907 }
5b0a4722 908
ddbbfbc1
A
909 job_log(j, LOG_DEBUG, "Sent SIGTERM signal%s", extralog);
910 }
911
912 j->stopped = true;
ed34e3c3
A
913}
914
915launch_data_t
5b0a4722 916job_export(job_t j)
ed34e3c3
A
917{
918 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
919
5b0a4722 920 if (r == NULL) {
ed34e3c3 921 return NULL;
5b0a4722 922 }
ed34e3c3 923
5b0a4722 924 if ((tmp = launch_data_new_string(j->label))) {
ed34e3c3 925 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
5b0a4722
A
926 }
927 if ((tmp = launch_data_new_string(j->mgr->name))) {
928 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
929 }
930 if ((tmp = launch_data_new_bool(j->ondemand))) {
ed34e3c3 931 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
5b0a4722
A
932 }
933 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
ed34e3c3 934 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
5b0a4722
A
935 }
936 if (j->p && (tmp = launch_data_new_integer(j->p))) {
ed34e3c3 937 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
5b0a4722
A
938 }
939 if ((tmp = launch_data_new_integer(j->timeout))) {
ed34e3c3 940 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
5b0a4722
A
941 }
942 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
ed34e3c3 943 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
5b0a4722 944 }
ddbbfbc1
A
945 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
946 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
947 }
5b0a4722 948 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
ed34e3c3 949 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
5b0a4722
A
950 }
951 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
ed34e3c3 952 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
5b0a4722 953 }
ddbbfbc1
A
954 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
955 size_t i;
ed34e3c3
A
956
957 for (i = 0; i < j->argc; i++) {
5b0a4722 958 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
ed34e3c3 959 launch_data_array_set_index(tmp, tmp2, i);
5b0a4722 960 }
ed34e3c3
A
961 }
962
963 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
964 }
965
ddbbfbc1
A
966 if (j->kill_via_shmem && (tmp = launch_data_new_bool(true))) {
967 int32_t tmp_cnt = -1;
968
969 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
970
971 if (j->shmem) {
972 tmp_cnt = j->shmem->vp_shmem_transaction_cnt;
973 }
974
975 if (j->sent_kill_via_shmem) {
976 tmp_cnt++;
977 }
978
979 if ((tmp = launch_data_new_integer(tmp_cnt))) {
980 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TRANSACTIONCOUNT);
981 }
982 }
983
5b0a4722
A
984 if (j->session_create && (tmp = launch_data_new_bool(true))) {
985 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
986 }
987
ed34e3c3 988 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
5b0a4722 989 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
ed34e3c3 990 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
5b0a4722 991 }
ed34e3c3
A
992 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
993 }
994
995 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
996 struct socketgroup *sg;
997 int i;
998
999 SLIST_FOREACH(sg, &j->sockets, sle) {
5b0a4722
A
1000 if (sg->junkfds) {
1001 continue;
1002 }
ed34e3c3
A
1003 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1004 for (i = 0; i < sg->fd_cnt; i++) {
5b0a4722 1005 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
ed34e3c3 1006 launch_data_array_set_index(tmp2, tmp3, i);
5b0a4722 1007 }
ed34e3c3
A
1008 }
1009 launch_data_dict_insert(tmp, tmp2, sg->name);
1010 }
1011 }
1012
1013 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1014 }
1015
1016 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1017 struct machservice *ms;
5b0a4722
A
1018
1019 tmp3 = NULL;
ed34e3c3
A
1020
1021 SLIST_FOREACH(ms, &j->machservices, sle) {
5b0a4722
A
1022 if (ms->per_pid) {
1023 if (tmp3 == NULL) {
1024 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1025 }
1026 if (tmp3) {
1027 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1028 launch_data_dict_insert(tmp3, tmp2, ms->name);
1029 }
1030 } else {
1031 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1032 launch_data_dict_insert(tmp, tmp2, ms->name);
1033 }
ed34e3c3
A
1034 }
1035
1036 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
5b0a4722
A
1037
1038 if (tmp3) {
1039 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1040 }
1041 }
1042
1043 return r;
1044}
1045
1046static void
1047jobmgr_log_active_jobs(jobmgr_t jm)
1048{
1049 const char *why_active;
1050 jobmgr_t jmi;
1051 job_t ji;
1052
1053 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1054 jobmgr_log_active_jobs(jmi);
1055 }
1056
1057 LIST_FOREACH(ji, &jm->jobs, sle) {
dcace88f
A
1058 if ((why_active = job_active(ji))) {
1059 if (ji->p != 1) {
1060 job_log(ji, LOG_DEBUG | LOG_CONSOLE, "%s", why_active);
1061 }
ddbbfbc1 1062 }
ed34e3c3 1063 }
5b0a4722
A
1064}
1065
1066static void
ddbbfbc1 1067jobmgr_still_alive_with_check(jobmgr_t jm)
5b0a4722 1068{
dcace88f 1069 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
ddbbfbc1 1070 jobmgr_log_active_jobs(jm);
5b0a4722
A
1071}
1072
1073jobmgr_t
1074jobmgr_shutdown(jobmgr_t jm)
1075{
1076 jobmgr_t jmi, jmn;
5b0a4722
A
1077 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1078
dcace88f
A
1079 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1080
1081 struct tm curtime;
1082 (void)localtime_r(&jm->shutdown_time, &curtime);
1083
1084 char date[26];
1085 (void)asctime_r(&curtime, date);
1086 /* Trim the new line that asctime_r(3) puts there for some reason. */
1087 date[24] = 0;
1088
1089 if (jm == root_jobmgr && pid1_magic) {
1090 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1091 } else {
1092 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1093 }
1094
5b0a4722
A
1095 jm->shutting_down = true;
1096
1097 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1098 jobmgr_shutdown(jmi);
1099 }
ddbbfbc1
A
1100
1101 if (jm->parentmgr == NULL && pid1_magic) {
dcace88f
A
1102 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1103
1104 /* Spawn the shutdown monitor. */
1105 if (_s_shutdown_monitor && !_s_shutdown_monitor->p) {
1106 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1107 job_dispatch(_s_shutdown_monitor, true);
1108 }
ed34e3c3
A
1109 }
1110
5b0a4722 1111 return jobmgr_do_garbage_collection(jm);
ed34e3c3
A
1112}
1113
1114void
5b0a4722 1115jobmgr_remove(jobmgr_t jm)
ed34e3c3 1116{
5b0a4722
A
1117 jobmgr_t jmi;
1118 job_t ji;
ed34e3c3 1119
ddbbfbc1 1120 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
5b0a4722
A
1121 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
1122 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1123 jobmgr_remove(jmi);
1124 }
1125 }
1126
dcace88f
A
1127 while ((ji = LIST_FIRST(&jm->jobs))) {
1128 if (!ji->anonymous && !job_assumes(ji, ji->p == 0)) {
ddbbfbc1
A
1129 ji->p = 0;
1130 }
5b0a4722
A
1131 job_remove(ji);
1132 }
1133
1134 if (jm->req_port) {
dcace88f 1135 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
5b0a4722 1136 }
5b0a4722 1137 if (jm->jm_port) {
dcace88f
A
1138 (void)jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
1139 }
1140
1141 if (jm->req_bsport) {
1142 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_bsport) == KERN_SUCCESS);
1143 }
1144 if (jm->req_excport) {
1145 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_excport) == KERN_SUCCESS);
1146 }
1147 if (jm->req_asport) {
1148 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_asport) == KERN_SUCCESS);
1149 }
1150#if !TARGET_OS_EMBEDDED
1151 if (jm->req_rport) {
1152 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1153 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1154 /* If the originator went away, the reply port will be a dead name,
1155 * and we expect this to fail.
1156 */
1157 errno = kr;
1158 (void)jobmgr_assumes(jm, kr == KERN_SUCCESS);
1159 }
1160 }
5c88273d 1161#endif /* !TARGET_OS_EMBEDDED */
dcace88f
A
1162 if (jm->req_ctx) {
1163 (void)jobmgr_assumes(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz) == KERN_SUCCESS);
1164 }
1165
1166 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1167 struct tm curtime;
1168 (void)localtime_r(&ts, &curtime);
1169
1170 char date[26];
1171 (void)asctime_r(&curtime, date);
1172 date[24] = 0;
1173
1174 time_t delta = ts - jm->shutdown_time;
1175 if (jm == root_jobmgr && pid1_magic) {
1176 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1177 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1178 } else {
1179 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1180 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
5b0a4722
A
1181 }
1182
5b0a4722 1183 if (jm->parentmgr) {
ddbbfbc1 1184 runtime_del_weak_ref();
5b0a4722 1185 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
ddbbfbc1
A
1186 } else if (pid1_magic) {
1187 eliminate_double_reboot();
1188 launchd_log_vm_stats();
dcace88f 1189 jobmgr_log_stray_children(jm, true);
ddbbfbc1 1190 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
5b0a4722 1191 runtime_closelog();
dcace88f 1192 (void)jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
5b0a4722 1193 } else {
5b0a4722 1194 jobmgr_log(jm, LOG_DEBUG, "About to exit");
ddbbfbc1 1195 runtime_closelog();
5b0a4722
A
1196 exit(EXIT_SUCCESS);
1197 }
1198
1199 free(jm);
ed34e3c3
A
1200}
1201
1202void
5b0a4722 1203job_remove(job_t j)
ed34e3c3 1204{
5b0a4722 1205 struct waiting_for_removal *w4r;
ed34e3c3 1206 struct calendarinterval *ci;
5b0a4722 1207 struct semaphoreitem *si;
ed34e3c3 1208 struct socketgroup *sg;
5b0a4722 1209 struct machservice *ms;
ed34e3c3
A
1210 struct limititem *li;
1211 struct envitem *ei;
dcace88f
A
1212
1213 if (j->alias) {
1214 /* HACK: Egregious code duplication. But as with machservice_delete(),
1215 * job aliases can't (and shouldn't) have any complex behaviors
1216 * associated with them.
1217 */
1218 while ((ms = SLIST_FIRST(&j->machservices))) {
1219 machservice_delete(j, ms, false);
1220 }
1221
1222 LIST_REMOVE(j, sle);
1223 LIST_REMOVE(j, label_hash_sle);
1224 free(j);
1225 return;
1226 }
1227
ddbbfbc1 1228#if TARGET_OS_EMBEDDED
dcace88f
A
1229 if (g_embedded_privileged_action && s_embedded_privileged_job) {
1230 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
ddbbfbc1
A
1231 errno = EPERM;
1232 return;
ed34e3c3 1233 }
ddbbfbc1 1234
dcace88f 1235 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
ddbbfbc1
A
1236 errno = EPERM;
1237 return;
1238 }
dcace88f 1239 } else if (g_embedded_privileged_action) {
ddbbfbc1 1240 errno = EINVAL;
5b0a4722 1241 return;
ed34e3c3 1242 }
ddbbfbc1
A
1243#endif
1244
dcace88f
A
1245 /* Do this BEFORE we check and see whether the job is still active. If we're a
1246 * sub-job, we're being removed due to the parent job removing us. Therefore, the
1247 * parent job will free itself after this call completes. So if we defer removing
1248 * ourselves from the parent's list, we'll crash when we finally get around to it.
1249 */
1250 if (j->dedicated_instance && !j->former_subjob) {
1251 LIST_REMOVE(j, subjob_sle);
1252 j->former_subjob = true;
1253 }
1254
ddbbfbc1
A
1255 if (unlikely(j->p)) {
1256 if (j->anonymous) {
1257 job_reap(j);
1258 } else {
1259 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1260
1261 if (!j->removal_pending) {
1262 j->removal_pending = true;
1263 job_stop(j);
1264 }
dcace88f 1265
ddbbfbc1
A
1266 return;
1267 }
1268 }
1269
dcace88f
A
1270 if (!j->removing) {
1271 j->removing = true;
1272 job_dispatch_curious_jobs(j);
1273 }
ed34e3c3 1274
5b0a4722 1275 ipc_close_all_with_job(j);
ed34e3c3 1276
ddbbfbc1
A
1277 job_log(j, LOG_INFO, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1278 j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec,
1279 j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec,
1280 j->ru.ru_maxrss, j->ru.ru_ixrss, j->ru.ru_idrss, j->ru.ru_isrss,
1281 j->ru.ru_minflt, j->ru.ru_majflt,
1282 j->ru.ru_nswap, j->ru.ru_inblock, j->ru.ru_oublock,
1283 j->ru.ru_msgsnd, j->ru.ru_msgrcv,
1284 j->ru.ru_nsignals, j->ru.ru_nvcsw, j->ru.ru_nivcsw);
1285
5b0a4722
A
1286 if (j->forced_peers_to_demand_mode) {
1287 job_set_global_on_demand(j, false);
1288 }
ed34e3c3 1289
ddbbfbc1 1290 if (!job_assumes(j, j->fork_fd == 0)) {
dcace88f 1291 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
ddbbfbc1
A
1292 }
1293
1294 if (j->stdin_fd) {
dcace88f 1295 (void)job_assumes(j, runtime_close(j->stdin_fd) != -1);
ed34e3c3
A
1296 }
1297
5b0a4722 1298 if (!job_assumes(j, j->log_redirect_fd == 0)) {
dcace88f 1299 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5b0a4722 1300 }
ed34e3c3 1301
5b0a4722 1302 if (j->j_port) {
dcace88f 1303 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
5b0a4722 1304 }
ed34e3c3 1305
5b0a4722 1306 while ((sg = SLIST_FIRST(&j->sockets))) {
ed34e3c3 1307 socketgroup_delete(j, sg);
5b0a4722
A
1308 }
1309 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
ed34e3c3 1310 calendarinterval_delete(j, ci);
5b0a4722
A
1311 }
1312 while ((ei = SLIST_FIRST(&j->env))) {
ed34e3c3 1313 envitem_delete(j, ei, false);
5b0a4722
A
1314 }
1315 while ((ei = SLIST_FIRST(&j->global_env))) {
ed34e3c3 1316 envitem_delete(j, ei, true);
5b0a4722
A
1317 }
1318 while ((li = SLIST_FIRST(&j->limits))) {
ed34e3c3 1319 limititem_delete(j, li);
5b0a4722
A
1320 }
1321 while ((ms = SLIST_FIRST(&j->machservices))) {
1322 machservice_delete(j, ms, false);
1323 }
1324 while ((si = SLIST_FIRST(&j->semaphores))) {
ed34e3c3 1325 semaphoreitem_delete(j, si);
5b0a4722
A
1326 }
1327 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1328 waiting4removal_delete(j, w4r);
1329 }
dcace88f
A
1330
1331 struct externalevent *eei = NULL;
1332 while ((eei = LIST_FIRST(&j->events))) {
1333 eventsystem_ping();
1334 externalevent_delete(eei);
1335 }
1336
1337#if 0
1338 /* Event systems exist independently of an actual monitor job. They're
1339 * created on-demand when a job has a LaunchEvents dictionary. So we
1340 * really don't need to get rid of them.
1341 */
1342 if (j->event_monitor) {
1343 struct eventsystem *esi = NULL;
1344 while ((esi = LIST_FIRST(&_s_event_systems))) {
1345 eventsystem_delete(esi);
1346 }
1347 }
1348#else
1349 if (false) {
1350 /* Make gcc happy. */
1351 eventsystem_delete(NULL);
1352 }
1353 if (j->event_monitor) {
1354 if (_s_event_update_port != MACH_PORT_NULL) {
1355 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
1356 _s_event_update_port = MACH_PORT_NULL;
1357 }
1358 _s_event_monitor = NULL;
1359 }
1360#endif
ed34e3c3 1361
5b0a4722 1362 if (j->prog) {
ed34e3c3 1363 free(j->prog);
5b0a4722
A
1364 }
1365 if (j->argv) {
ed34e3c3 1366 free(j->argv);
5b0a4722
A
1367 }
1368 if (j->rootdir) {
ed34e3c3 1369 free(j->rootdir);
5b0a4722
A
1370 }
1371 if (j->workingdir) {
ed34e3c3 1372 free(j->workingdir);
5b0a4722
A
1373 }
1374 if (j->username) {
ed34e3c3 1375 free(j->username);
5b0a4722
A
1376 }
1377 if (j->groupname) {
ed34e3c3 1378 free(j->groupname);
5b0a4722 1379 }
ddbbfbc1
A
1380 if (j->stdinpath) {
1381 free(j->stdinpath);
1382 }
5b0a4722 1383 if (j->stdoutpath) {
ed34e3c3 1384 free(j->stdoutpath);
5b0a4722
A
1385 }
1386 if (j->stderrpath) {
ed34e3c3 1387 free(j->stderrpath);
5b0a4722 1388 }
fe044cc9
A
1389 if (j->alt_exc_handler) {
1390 free(j->alt_exc_handler);
1391 }
f36da725 1392#if HAVE_SANDBOX
5b0a4722
A
1393 if (j->seatbelt_profile) {
1394 free(j->seatbelt_profile);
1395 }
f36da725
A
1396#endif
1397#if HAVE_QUARANTINE
5b0a4722
A
1398 if (j->quarantine_data) {
1399 free(j->quarantine_data);
1400 }
f36da725 1401#endif
5b0a4722
A
1402 if (j->j_binpref) {
1403 free(j->j_binpref);
1404 }
1405 if (j->start_interval) {
ddbbfbc1 1406 runtime_del_weak_ref();
dcace88f 1407 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
5b0a4722
A
1408 }
1409 if (j->poll_for_vfs_changes) {
dcace88f 1410 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
5b0a4722 1411 }
dcace88f 1412 if (j->exit_timeout) {
ddbbfbc1
A
1413 /* Not a big deal if this fails. It means that the timer's already been freed. */
1414 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1415 }
dcace88f 1416 if (j->jetsam_properties) {
ddbbfbc1
A
1417 LIST_REMOVE(j, jetsam_sle);
1418 j->mgr->jetsam_jobs_cnt--;
1419 }
dcace88f
A
1420 if (j->asport != MACH_PORT_NULL) {
1421 (void)job_assumes(j, launchd_mport_deallocate(j->asport) == KERN_SUCCESS);
ddbbfbc1 1422 }
dcace88f 1423 if (!uuid_is_null(j->expected_audit_uuid)) {
ddbbfbc1
A
1424 LIST_REMOVE(j, needing_session_sle);
1425 }
dcace88f 1426 if (j->embedded_special_privileges) {
ddbbfbc1
A
1427 s_embedded_privileged_job = NULL;
1428 }
dcace88f
A
1429 if (j->shutdown_monitor) {
1430 _s_shutdown_monitor = NULL;
1431 }
5c88273d 1432
ed34e3c3 1433 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
5c88273d 1434
5b0a4722
A
1435 LIST_REMOVE(j, sle);
1436 LIST_REMOVE(j, label_hash_sle);
1437
dcace88f
A
1438 job_t ji = NULL;
1439 job_t jit = NULL;
1440 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1441 job_remove(ji);
1442 }
1443
5b0a4722
A
1444 job_log(j, LOG_DEBUG, "Removed");
1445
5c88273d 1446 j->kqjob_callback = (kq_callback)0x8badf00d;
ed34e3c3
A
1447 free(j);
1448}
1449
1450void
1451socketgroup_setup(launch_data_t obj, const char *key, void *context)
1452{
1453 launch_data_t tmp_oai;
5b0a4722 1454 job_t j = context;
ddbbfbc1 1455 size_t i, fd_cnt = 1;
ed34e3c3
A
1456 int *fds;
1457
5b0a4722 1458 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
ed34e3c3 1459 fd_cnt = launch_data_array_get_count(obj);
5b0a4722 1460 }
ed34e3c3
A
1461
1462 fds = alloca(fd_cnt * sizeof(int));
1463
1464 for (i = 0; i < fd_cnt; i++) {
5b0a4722 1465 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
ed34e3c3 1466 tmp_oai = launch_data_array_get_index(obj, i);
5b0a4722 1467 } else {
ed34e3c3 1468 tmp_oai = obj;
5b0a4722 1469 }
ed34e3c3
A
1470
1471 fds[i] = launch_data_get_fd(tmp_oai);
1472 }
1473
1474 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
1475
1476 ipc_revoke_fds(obj);
1477}
1478
1479bool
5b0a4722
A
1480job_set_global_on_demand(job_t j, bool val)
1481{
1482 if (j->forced_peers_to_demand_mode && val) {
1483 return false;
1484 } else if (!j->forced_peers_to_demand_mode && !val) {
1485 return false;
1486 }
1487
1488 if ((j->forced_peers_to_demand_mode = val)) {
1489 j->mgr->global_on_demand_cnt++;
1490 } else {
1491 j->mgr->global_on_demand_cnt--;
1492 }
1493
1494 if (j->mgr->global_on_demand_cnt == 0) {
1495 jobmgr_dispatch_all(j->mgr, false);
1496 }
1497
1498 return true;
1499}
1500
1501bool
1502job_setup_machport(job_t j)
ed34e3c3 1503{
5b0a4722
A
1504 mach_msg_size_t mxmsgsz;
1505
1506 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
ed34e3c3 1507 goto out_bad;
5b0a4722
A
1508 }
1509
1510 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
ddbbfbc1 1511 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
5b0a4722
A
1512 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
1513 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
1514 }
ed34e3c3 1515
5b0a4722 1516 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
ed34e3c3 1517 goto out_bad2;
5b0a4722
A
1518 }
1519
1520 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
dcace88f 1521 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
5b0a4722
A
1522 goto out_bad;
1523 }
ed34e3c3
A
1524
1525 return true;
1526out_bad2:
dcace88f 1527 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
ed34e3c3
A
1528out_bad:
1529 return false;
1530}
1531
dcace88f
A
1532kern_return_t
1533job_setup_exit_port(job_t j)
1534{
1535 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1536 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1537 return MACH_PORT_NULL;
1538 }
1539
1540 struct mach_port_limits limits = {
1541 .mpl_qlimit = 1,
1542 };
1543 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1544 (void)job_assumes(j, kr == KERN_SUCCESS);
1545
1546 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1547 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1548 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
1549 j->exit_status_port = MACH_PORT_NULL;
1550 }
1551
1552 return kr;
1553}
1554
5b0a4722
A
1555job_t
1556job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
ed34e3c3
A
1557{
1558 const char **argv = (const char **)mach_cmd2argv(cmd);
5b0a4722 1559 job_t jr = NULL;
ed34e3c3 1560
5b0a4722 1561 if (!job_assumes(j, argv != NULL)) {
ed34e3c3 1562 goto out_bad;
5b0a4722 1563 }
ed34e3c3 1564
5b0a4722 1565 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
ed34e3c3
A
1566
1567 free(argv);
1568
5b0a4722 1569 /* jobs can easily be denied creation during shutdown */
ddbbfbc1 1570 if (unlikely(jr == NULL)) {
ed34e3c3 1571 goto out_bad;
5b0a4722 1572 }
ed34e3c3 1573
5b0a4722
A
1574 jr->mach_uid = uid;
1575 jr->ondemand = ond;
1576 jr->legacy_mach_job = true;
1577 jr->abandon_pg = true;
1578 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
ed34e3c3 1579
5b0a4722 1580 if (!job_setup_machport(jr)) {
ed34e3c3
A
1581 goto out_bad;
1582 }
1583
5b0a4722 1584 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
ed34e3c3 1585
5b0a4722 1586 return jr;
ed34e3c3
A
1587
1588out_bad:
5b0a4722
A
1589 if (jr) {
1590 job_remove(jr);
1591 }
ed34e3c3
A
1592 return NULL;
1593}
1594
5b0a4722
A
1595job_t
1596job_new_anonymous(jobmgr_t jm, pid_t anonpid)
ed34e3c3 1597{
dcace88f 1598 struct proc_bsdshortinfo proc;
5b0a4722
A
1599 bool shutdown_state;
1600 job_t jp = NULL, jr = NULL;
ddbbfbc1
A
1601 uid_t kp_euid, kp_uid, kp_svuid;
1602 gid_t kp_egid, kp_gid, kp_svgid;
5b0a4722
A
1603
1604 if (!jobmgr_assumes(jm, anonpid != 0)) {
ddbbfbc1 1605 errno = EINVAL;
ed34e3c3
A
1606 return NULL;
1607 }
5b0a4722
A
1608
1609 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1610 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
ddbbfbc1 1611 errno = EINVAL;
ed34e3c3 1612 return NULL;
5b0a4722 1613 }
ed34e3c3 1614
dcace88f
A
1615 /* libproc returns the number of bytes written into the buffer upon success,
1616 * zero on failure.
1617 */
1618 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1619 if (errno != ESRCH) {
1620 (void)jobmgr_assumes(jm, errno == 0);
1621 }
ed34e3c3
A
1622 return NULL;
1623 }
1624
dcace88f 1625 if (!jobmgr_assumes(jm, proc.pbsi_comm[0] != '\0')) {
ddbbfbc1 1626 errno = EINVAL;
5b0a4722
A
1627 return NULL;
1628 }
ed34e3c3 1629
dcace88f
A
1630 if (unlikely(proc.pbsi_status == SZOMB)) {
1631 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
ddbbfbc1
A
1632 }
1633
dcace88f
A
1634 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1635 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
ddbbfbc1
A
1636 }
1637
dcace88f
A
1638 kp_euid = proc.pbsi_uid;
1639 kp_uid = proc.pbsi_ruid;
1640 kp_svuid = proc.pbsi_svuid;
1641 kp_egid = proc.pbsi_gid;
1642 kp_gid = proc.pbsi_rgid;
1643 kp_svgid = proc.pbsi_svgid;
ddbbfbc1
A
1644
1645 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1646 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
dcace88f 1647 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
ed34e3c3
A
1648 }
1649
587e987e
A
1650 /* "Fix" for a problem that shouldn't even exist.
1651 * See rdar://problem/7264615 for the symptom and rdar://problem/5020256
1652 * as to why this can happen.
1653 */
dcace88f
A
1654 if (!jobmgr_assumes(jm, (pid_t)proc.pbsi_ppid != anonpid)) {
1655 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). It should find a different way to do whatever it's doing. Setting PPID to 0: %s", proc.pbsi_comm);
587e987e
A
1656 errno = EINVAL;
1657 return NULL;
5b0a4722
A
1658 }
1659
1660 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
ddbbfbc1 1661 if (unlikely(shutdown_state = jm->shutting_down)) {
5b0a4722
A
1662 jm->shutting_down = false;
1663 }
1664
dcace88f
A
1665 /* We only set requestor_pid for XPC domains. */
1666 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1667 if (jobmgr_assumes(jm, (jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL)) != NULL)) {
f271391c 1668 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
5b0a4722
A
1669
1670 total_anon_children++;
1671 jr->anonymous = true;
1672 jr->p = anonpid;
1673
1674 /* anonymous process reaping is messy */
1675 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1676
ddbbfbc1 1677 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1) && job_assumes(jr, errno == ESRCH)) {
5b0a4722
A
1678 /* zombies are weird */
1679 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1680 jr->unload_at_mig_return = true;
1681 }
1682
dcace88f 1683 if (unlikely(shutdown_state)) {
ddbbfbc1 1684 job_log(jr, LOG_SCOLDING, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
ed34e3c3 1685 }
5b0a4722 1686
dcace88f 1687 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
ed34e3c3
A
1688 }
1689
ddbbfbc1 1690 if (unlikely(shutdown_state)) {
5b0a4722
A
1691 jm->shutting_down = true;
1692 }
ed34e3c3 1693
587e987e
A
1694 /* This is down here to mitigate the effects of rdar://problem/7264615, in which a process
1695 * attaches to its own parent. We need to make sure that the anonymous job has been added
1696 * to the process list so that, if it's used ptrace(3) to cause a cycle in the process
1697 * tree (thereby making it not a tree anymore), we'll find the tracing parent PID of the
1698 * parent process, which is the child, when we go looking for it in jobmgr_find_by_pid().
1699 */
dcace88f 1700 switch (proc.pbsi_ppid) {
587e987e
A
1701 case 0:
1702 /* the kernel */
1703 break;
1704 case 1:
1705 if (!pid1_magic) {
1706 /* we cannot possibly find a parent job_t that is useful in this function */
1707 break;
1708 }
1709 /* fall through */
1710 default:
dcace88f
A
1711 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1712 if (jobmgr_assumes(jm, jp != NULL)) {
1713 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1714 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1715 }
1716 }
587e987e
A
1717 break;
1718 }
1719
ed34e3c3
A
1720 return jr;
1721}
1722
dcace88f
A
1723job_t
1724job_new_subjob(job_t j, uuid_t identifier)
1725{
1726 char label[0];
1727 uuid_string_t idstr;
1728 uuid_unparse(identifier, idstr);
1729 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1730
1731 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1732 if (launchd_assumes(nj != NULL)) {
1733 nj->kqjob_callback = job_callback;
1734 nj->mgr = j->mgr;
1735 nj->min_run_time = j->min_run_time;
1736 nj->timeout = j->timeout;
1737 nj->exit_timeout = j->exit_timeout;
1738
1739 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1740
1741 /* Set all our simple Booleans that are applicable. */
1742 nj->debug = j->debug;
1743 nj->ondemand = j->ondemand;
1744 nj->checkedin = true;
1745 nj->low_pri_io = j->low_pri_io;
1746 nj->setmask = j->setmask;
1747 nj->wait4debugger = j->wait4debugger;
1748 nj->internal_exc_handler = j->internal_exc_handler;
1749 nj->setnice = j->setnice;
1750 nj->abandon_pg = j->abandon_pg;
1751 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1752 nj->deny_job_creation = j->deny_job_creation;
1753 nj->kill_via_shmem = j->kill_via_shmem;
1754 nj->needs_kickoff = j->needs_kickoff;
1755 nj->currently_ignored = true;
1756 nj->dedicated_instance = true;
1757 nj->xpc_service = j->xpc_service;
a6e7a709 1758 nj->xpc_bootstrapper = j->xpc_bootstrapper;
dcace88f
A
1759
1760 nj->mask = j->mask;
1761 uuid_copy(nj->instance_id, identifier);
1762
1763 /* These jobs are purely on-demand Mach jobs. */
1764
1765 /* {Hard | Soft}ResourceLimits are not supported. */
1766
1767 struct machservice *msi = NULL;
1768 SLIST_FOREACH(msi, &j->machservices, sle) {
1769 /* Only copy MachServices that were actually declared in the plist.
1770 * So skip over per-PID ones and ones that were created via
1771 * bootstrap_register().
1772 */
1773 if (msi->upfront) {
1774 mach_port_t mp = MACH_PORT_NULL;
1775 struct machservice *msj = machservice_new(nj, msi->name, &mp, msi->per_pid);
1776 if (job_assumes(nj, msj != NULL)) {
1777 msj->reset = msi->reset;
1778 msj->delete_on_destruction = msi->delete_on_destruction;
1779 msj->drain_one_on_crash = msi->drain_one_on_crash;
1780 msj->drain_all_on_crash = msi->drain_all_on_crash;
1781 }
1782 }
1783 }
1784
1785 if (j->prog) {
1786 nj->prog = strdup(j->prog);
1787 }
1788 if (j->argv) {
1789 size_t sz = malloc_size(j->argv);
1790 nj->argv = (char **)malloc(sz);
1791 if (job_assumes(nj, nj->argv != NULL)) {
1792 /* This is the start of our strings. */
1793 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1794
1795 size_t i = 0;
1796 for (i = 0; i < j->argc; i++) {
1797 (void)strcpy(p, j->argv[i]);
1798 nj->argv[i] = p;
1799 p += (strlen(j->argv[i]) + 1);
1800 }
1801 nj->argv[i] = NULL;
1802 }
1803
1804 nj->argc = j->argc;
1805 }
1806
1807 /* We ignore global environment variables. */
1808 struct envitem *ei = NULL;
1809 SLIST_FOREACH(ei, &j->env, sle) {
1810 (void)job_assumes(nj, envitem_new(nj, ei->key, ei->value, false, false));
1811 }
1812 uuid_string_t val;
1813 uuid_unparse(identifier, val);
1814 (void)job_assumes(nj, envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false, false));
1815
1816 if (j->rootdir) {
1817 nj->rootdir = strdup(j->rootdir);
1818 }
1819 if (j->workingdir) {
1820 nj->workingdir = strdup(j->workingdir);
1821 }
1822 if (j->username) {
1823 nj->username = strdup(j->username);
1824 }
1825 if (j->groupname) {
1826 nj->groupname = strdup(j->groupname);
1827 }
1828 /* FIXME: We shouldn't redirect all the output from these jobs to the same
1829 * file. We should uniquify the file names.
1830 */
1831 if (j->stdinpath) {
1832 nj->stdinpath = strdup(j->stdinpath);
1833 }
1834 if (j->stdoutpath) {
1835 nj->stdoutpath = strdup(j->stdinpath);
1836 }
1837 if (j->stderrpath) {
1838 nj->stderrpath = strdup(j->stderrpath);
1839 }
1840 if (j->alt_exc_handler) {
1841 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1842 }
1843 #if HAVE_SANDBOX
1844 if (j->seatbelt_profile) {
1845 nj->seatbelt_profile = strdup(j->seatbelt_profile);
1846 }
1847 #endif
1848
1849 #if HAVE_QUARANTINE
1850 if (j->quarantine_data) {
1851 nj->quarantine_data = strdup(j->quarantine_data);
1852 }
1853 nj->quarantine_data_sz = j->quarantine_data_sz;
1854 #endif
1855 if (j->j_binpref) {
1856 size_t sz = malloc_size(j->j_binpref);
1857 nj->j_binpref = (cpu_type_t *)malloc(sz);
1858 if (job_assumes(nj, nj->j_binpref)) {
1859 memcpy(&nj->j_binpref, &j->j_binpref, sz);
1860 }
1861 }
1862
1863 /* JetsamPriority is unsupported. */
1864
1865 if (j->asport != MACH_PORT_NULL) {
1866 (void)job_assumes(nj, launchd_mport_copy_send(j->asport) == KERN_SUCCESS);
1867 nj->asport = j->asport;
1868 }
1869
1870 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
1871
1872 jobmgr_t where2put = root_jobmgr;
1873 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1874 where2put = j->mgr;
1875 }
1876 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
1877 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
1878 }
1879
1880 return nj;
1881}
1882
5b0a4722
A
1883job_t
1884job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
ed34e3c3
A
1885{
1886 const char *const *argv_tmp = argv;
ddbbfbc1 1887 char tmp_path[PATH_MAX];
5b0a4722
A
1888 char auto_label[1000];
1889 const char *bn = NULL;
ed34e3c3 1890 char *co;
5b0a4722 1891 size_t minlabel_len;
ddbbfbc1 1892 size_t i, cc = 0;
5b0a4722
A
1893 job_t j;
1894
1895 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1896
ddbbfbc1 1897 if (unlikely(jm->shutting_down)) {
5b0a4722
A
1898 errno = EINVAL;
1899 return NULL;
1900 }
ed34e3c3 1901
ddbbfbc1 1902 if (unlikely(prog == NULL && argv == NULL)) {
ed34e3c3
A
1903 errno = EINVAL;
1904 return NULL;
1905 }
1906
dcace88f 1907 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
ddbbfbc1
A
1908 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1909 if (prog) {
1910 bn = prog;
1911 } else {
1912 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1913 bn = basename(tmp_path); /* prog for auto labels is kp.kp_kproc.p_comm */
1914 }
1915 snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
5b0a4722
A
1916 label = auto_label;
1917 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1918 minlabel_len = strlen(label) + MAXCOMLEN;
1919 } else {
dcace88f
A
1920 if (label == AUTO_PICK_XPC_LABEL) {
1921 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
1922 } else {
1923 minlabel_len = strlen(label);
1924 }
5b0a4722
A
1925 }
1926
1927 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
ed34e3c3 1928
5b0a4722
A
1929 if (!jobmgr_assumes(jm, j != NULL)) {
1930 return NULL;
1931 }
ed34e3c3 1932
ddbbfbc1
A
1933 if (unlikely(label == auto_label)) {
1934 snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
5b0a4722 1935 } else {
dcace88f 1936 strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
5b0a4722 1937 }
ed34e3c3 1938 j->kqjob_callback = job_callback;
5b0a4722
A
1939 j->mgr = jm;
1940 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1941 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1942 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1943 j->currently_ignored = true;
ed34e3c3
A
1944 j->ondemand = true;
1945 j->checkedin = true;
587e987e
A
1946 j->jetsam_priority = -1;
1947 j->jetsam_memlimit = -1;
dcace88f 1948 j->jetsam_seq = -1;
ddbbfbc1
A
1949 uuid_clear(j->expected_audit_uuid);
1950
ed34e3c3
A
1951 if (prog) {
1952 j->prog = strdup(prog);
5b0a4722 1953 if (!job_assumes(j, j->prog != NULL)) {
ed34e3c3 1954 goto out_bad;
5b0a4722 1955 }
ed34e3c3
A
1956 }
1957
ddbbfbc1
A
1958 if (likely(argv)) {
1959 while (*argv_tmp++) {
ed34e3c3 1960 j->argc++;
ddbbfbc1 1961 }
ed34e3c3 1962
5b0a4722 1963 for (i = 0; i < j->argc; i++) {
ed34e3c3 1964 cc += strlen(argv[i]) + 1;
5b0a4722 1965 }
ed34e3c3
A
1966
1967 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1968
5b0a4722 1969 if (!job_assumes(j, j->argv != NULL)) {
ed34e3c3 1970 goto out_bad;
5b0a4722 1971 }
ed34e3c3
A
1972
1973 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1974
1975 for (i = 0; i < j->argc; i++) {
1976 j->argv[i] = co;
1977 strcpy(co, argv[i]);
1978 co += strlen(argv[i]) + 1;
1979 }
1980 j->argv[i] = NULL;
1981 }
1982
dcace88f 1983 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
ddbbfbc1
A
1984 j->has_console = true;
1985 }
1986
5b0a4722 1987 LIST_INSERT_HEAD(&jm->jobs, j, sle);
dcace88f
A
1988
1989 jobmgr_t where2put_label = root_jobmgr;
1990 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1991 where2put_label = j->mgr;
1992 }
1993 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
ddbbfbc1 1994 uuid_clear(j->expected_audit_uuid);
5b0a4722
A
1995
1996 job_log(j, LOG_DEBUG, "Conceived");
ed34e3c3
A
1997
1998 return j;
1999
2000out_bad:
5b0a4722
A
2001 if (j->prog) {
2002 free(j->prog);
ed34e3c3 2003 }
5b0a4722
A
2004 free(j);
2005
ed34e3c3
A
2006 return NULL;
2007}
2008
dcace88f
A
2009#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
2010job_t
2011job_new_alias(jobmgr_t jm, job_t src)
2012{
2013 job_t j = NULL;
2014 if (job_find(jm, src->label)) {
2015 errno = EEXIST;
2016 } else {
2017 j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2018 if (jobmgr_assumes(jm, j != NULL)) {
2019 strcpy((char *)j->label, src->label);
2020 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2021 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2022 /* Bad jump address. The kqueue callback for aliases should never be
2023 * invoked.
2024 */
2025 j->kqjob_callback = (kq_callback)0xfa1afe1;
2026 j->alias = src;
2027 j->mgr = jm;
2028
2029 struct machservice *msi = NULL;
2030 SLIST_FOREACH(msi, &src->machservices, sle) {
2031 if (!machservice_new_alias(j, msi)) {
2032 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2033 errno = EINVAL;
2034 job_remove(j);
2035 j = NULL;
2036 break;
2037 }
2038 }
2039 }
2040
2041 if (j) {
2042 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2043 }
2044 }
2045
2046 return j;
2047}
5c88273d 2048#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
dcace88f 2049
5b0a4722 2050job_t
ed34e3c3
A
2051job_import(launch_data_t pload)
2052{
5b0a4722 2053 job_t j = jobmgr_import2(root_jobmgr, pload);
ed34e3c3 2054
ddbbfbc1 2055 if (unlikely(j == NULL)) {
ed34e3c3 2056 return NULL;
5b0a4722 2057 }
ed34e3c3 2058
ddbbfbc1
A
2059 /* Since jobs are effectively stalled until they get security sessions assigned
2060 * to them, we may wish to reconsider this behavior of calling the job "enabled"
2061 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
2062 */
2063 job_dispatch_curious_jobs(j);
5b0a4722 2064 return job_dispatch(j, false);
ed34e3c3
A
2065}
2066
2067launch_data_t
2068job_import_bulk(launch_data_t pload)
2069{
2070 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
5b0a4722 2071 job_t *ja;
ed34e3c3
A
2072 size_t i, c = launch_data_array_get_count(pload);
2073
ddbbfbc1 2074 ja = alloca(c * sizeof(job_t));
ed34e3c3
A
2075
2076 for (i = 0; i < c; i++) {
dcace88f 2077 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
ed34e3c3 2078 errno = 0;
5b0a4722 2079 }
ed34e3c3
A
2080 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2081 }
2082
2083 for (i = 0; i < c; i++) {
ddbbfbc1
A
2084 if (likely(ja[i])) {
2085 job_dispatch_curious_jobs(ja[i]);
2086 job_dispatch(ja[i], false);
5b0a4722 2087 }
ed34e3c3
A
2088 }
2089
2090 return resp;
2091}
2092
2093void
5b0a4722 2094job_import_bool(job_t j, const char *key, bool value)
ed34e3c3 2095{
5b0a4722
A
2096 bool found_key = false;
2097
ed34e3c3 2098 switch (key[0]) {
5b0a4722
A
2099 case 'a':
2100 case 'A':
2101 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2102 j->abandon_pg = value;
2103 found_key = true;
2104 }
ed34e3c3 2105 break;
dcace88f
A
2106 case 'b':
2107 case 'B':
2108 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2109 j->dirty_at_shutdown = value;
2110 found_key = true;
2111 }
2112 break;
ed34e3c3
A
2113 case 'k':
2114 case 'K':
5b0a4722 2115 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
ed34e3c3 2116 j->ondemand = !value;
5b0a4722
A
2117 found_key = true;
2118 }
ed34e3c3
A
2119 break;
2120 case 'o':
2121 case 'O':
5b0a4722 2122 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
ed34e3c3 2123 j->ondemand = value;
5b0a4722
A
2124 found_key = true;
2125 }
ed34e3c3
A
2126 break;
2127 case 'd':
2128 case 'D':
5b0a4722 2129 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
ed34e3c3 2130 j->debug = value;
5b0a4722
A
2131 found_key = true;
2132 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
dcace88f
A
2133 (void)job_assumes(j, !value);
2134 found_key = true;
2135 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2136 j->disable_aslr = value;
5b0a4722
A
2137 found_key = true;
2138 }
2139 break;
2140 case 'h':
2141 case 'H':
2142 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
dcace88f
A
2143 job_log(j, LOG_INFO, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2144 j->dirty_at_shutdown = value;
5b0a4722
A
2145 found_key = true;
2146 }
ed34e3c3
A
2147 break;
2148 case 's':
2149 case 'S':
5b0a4722 2150 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
ed34e3c3 2151 j->session_create = value;
5b0a4722
A
2152 found_key = true;
2153 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2154 j->start_on_mount = value;
2155 found_key = true;
2156 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2157 /* this only does something on Mac OS X 10.4 "Tiger" */
2158 found_key = true;
dcace88f
A
2159 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2160 if (_s_shutdown_monitor) {
2161 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2162 } else {
2163 j->shutdown_monitor = true;
2164 _s_shutdown_monitor = j;
2165 }
2166 found_key = true;
5b0a4722 2167 }
ed34e3c3
A
2168 break;
2169 case 'l':
2170 case 'L':
5b0a4722 2171 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
ed34e3c3 2172 j->low_pri_io = value;
5b0a4722
A
2173 found_key = true;
2174 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2175 j->only_once = value;
2176 found_key = true;
2177 }
ed34e3c3 2178 break;
fe044cc9
A
2179 case 'm':
2180 case 'M':
2181 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2182 j->internal_exc_handler = value;
2183 found_key = true;
dcace88f
A
2184 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2185 j->multiple_instances = value;
2186 found_key = true;
fe044cc9
A
2187 }
2188 break;
ed34e3c3
A
2189 case 'i':
2190 case 'I':
5b0a4722
A
2191 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2192 if (getuid() != 0) {
2193 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2194 return;
2195 }
2196 j->no_init_groups = !value;
2197 found_key = true;
dcace88f 2198 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
ddbbfbc1
A
2199 j->ignore_pg_at_shutdown = value;
2200 found_key = true;
5b0a4722 2201 }
ed34e3c3
A
2202 break;
2203 case 'r':
2204 case 'R':
5b0a4722
A
2205 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2206 if (value) {
2207 /* We don't want value == false to change j->start_pending */
2208 j->start_pending = true;
2209 }
2210 found_key = true;
2211 }
ed34e3c3
A
2212 break;
2213 case 'e':
2214 case 'E':
5b0a4722 2215 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
ed34e3c3 2216 j->globargv = value;
5b0a4722 2217 found_key = true;
ddbbfbc1
A
2218 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2219 j->kill_via_shmem = value;
2220 found_key = true;
5b0a4722
A
2221 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2222 j->debug_before_kill = value;
2223 found_key = true;
dcace88f
A
2224 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2225 if (!s_embedded_privileged_job) {
ddbbfbc1
A
2226 j->embedded_special_privileges = value;
2227 s_embedded_privileged_job = j;
2228 } else {
2229 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2230 }
2231 found_key = true;
dcace88f
A
2232 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2233 if (job_assumes(j, _s_event_monitor == NULL)) {
2234 j->event_monitor = value;
2235 if (value) {
2236 _s_event_monitor = j;
2237 }
2238 } else {
2239 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility!");
2240 }
2241 found_key = true;
5b0a4722 2242 }
ed34e3c3
A
2243 break;
2244 case 'w':
2245 case 'W':
5b0a4722 2246 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
ed34e3c3 2247 j->wait4debugger = value;
5b0a4722
A
2248 found_key = true;
2249 }
ed34e3c3 2250 break;
a6e7a709
A
2251 case 'x':
2252 case 'X':
2253 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2254 if (pid1_magic) {
2255 if (_s_xpc_bootstrapper) {
2256 job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _s_xpc_bootstrapper->label);
2257 } else {
2258 _s_xpc_bootstrapper = j;
2259 j->xpc_bootstrapper = value;
2260 }
2261 } else {
2262 job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2263 }
2264 }
2265 found_key = true;
2266 break;
ed34e3c3
A
2267 default:
2268 break;
2269 }
5b0a4722 2270
ddbbfbc1 2271 if (unlikely(!found_key)) {
5b0a4722
A
2272 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2273 }
ed34e3c3
A
2274}
2275
2276void
5b0a4722 2277job_import_string(job_t j, const char *key, const char *value)
ed34e3c3
A
2278{
2279 char **where2put = NULL;
ed34e3c3
A
2280
2281 switch (key[0]) {
fe044cc9
A
2282 case 'm':
2283 case 'M':
2284 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2285 where2put = &j->alt_exc_handler;
2286 }
2287 break;
ed34e3c3
A
2288 case 'p':
2289 case 'P':
5b0a4722
A
2290 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2291 return;
dcace88f
A
2292 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0) {
2293 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2294 j->pstype = POSIX_SPAWN_OSX_TALAPP_START;
2295 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET) == 0) {
2296 j->pstype = POSIX_SPAWN_OSX_WIDGET_START;
2297 }
2298#if TARGET_OS_EMBEDDED
2299 else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP) == 0) {
2300 j->pstype = POSIX_SPAWN_IOS_APP_START;
2301 }
5c88273d 2302#endif /* TARGET_OS_EMBEDDED */
dcace88f
A
2303 else {
2304 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2305 }
2306 return;
5b0a4722 2307 }
ed34e3c3
A
2308 break;
2309 case 'l':
2310 case 'L':
5b0a4722
A
2311 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2312 return;
2313 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2314 return;
2315 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2316 return;
2317 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
5b0a4722
A
2318 return;
2319 }
ed34e3c3
A
2320 break;
2321 case 'r':
2322 case 'R':
5b0a4722
A
2323 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2324 if (getuid() != 0) {
2325 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2326 return;
2327 }
ed34e3c3 2328 where2put = &j->rootdir;
5b0a4722 2329 }
ed34e3c3
A
2330 break;
2331 case 'w':
2332 case 'W':
5b0a4722 2333 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
ed34e3c3 2334 where2put = &j->workingdir;
5b0a4722 2335 }
ed34e3c3
A
2336 break;
2337 case 'u':
2338 case 'U':
5b0a4722
A
2339 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2340 if (getuid() != 0) {
2341 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2342 return;
2343 } else if (strcmp(value, "root") == 0) {
2344 return;
2345 }
ed34e3c3 2346 where2put = &j->username;
5b0a4722 2347 }
ed34e3c3
A
2348 break;
2349 case 'g':
2350 case 'G':
5b0a4722
A
2351 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2352 if (getuid() != 0) {
2353 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2354 return;
2355 } else if (strcmp(value, "wheel") == 0) {
2356 return;
2357 }
ed34e3c3 2358 where2put = &j->groupname;
5b0a4722 2359 }
ed34e3c3
A
2360 break;
2361 case 's':
2362 case 'S':
2363 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2364 where2put = &j->stdoutpath;
2365 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2366 where2put = &j->stderrpath;
ddbbfbc1
A
2367 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2368 where2put = &j->stdinpath;
2369 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2370 if (job_assumes(j, j->stdin_fd != -1)) {
2371 /* open() should not block, but regular IO by the job should */
dcace88f 2372 (void)job_assumes(j, fcntl(j->stdin_fd, F_SETFL, 0) != -1);
ddbbfbc1 2373 /* XXX -- EV_CLEAR should make named pipes happy? */
dcace88f 2374 (void)job_assumes(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j) != -1);
ddbbfbc1
A
2375 } else {
2376 j->stdin_fd = 0;
2377 }
f36da725 2378#if HAVE_SANDBOX
5b0a4722
A
2379 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2380 where2put = &j->seatbelt_profile;
f36da725 2381#endif
ed34e3c3
A
2382 }
2383 break;
dcace88f
A
2384 case 'X':
2385 case 'x':
2386 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2387 return;
2388 }
2389 break;
ed34e3c3 2390 default:
5b0a4722 2391 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
ed34e3c3
A
2392 break;
2393 }
2394
ddbbfbc1 2395 if (likely(where2put)) {
dcace88f 2396 (void)job_assumes(j, (*where2put = strdup(value)) != NULL);
ed34e3c3 2397 } else {
ddbbfbc1 2398 /* See rdar://problem/5496612. These two are okay. */
dcace88f 2399 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) != 0) {
ddbbfbc1
A
2400 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2401 }
ed34e3c3
A
2402 }
2403}
2404
2405void
5b0a4722 2406job_import_integer(job_t j, const char *key, long long value)
ed34e3c3
A
2407{
2408 switch (key[0]) {
5b0a4722
A
2409 case 'e':
2410 case 'E':
2411 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
ddbbfbc1 2412 if (unlikely(value < 0)) {
5b0a4722 2413 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
ddbbfbc1 2414 } else if (unlikely(value > UINT32_MAX)) {
5b0a4722
A
2415 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2416 } else {
ddbbfbc1 2417 j->exit_timeout = (typeof(j->exit_timeout)) value;
5b0a4722 2418 }
dcace88f 2419 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
ddbbfbc1
A
2420 j->main_thread_priority = value;
2421 }
2422 break;
2423 case 'j':
2424 case 'J':
dcace88f 2425 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
587e987e
A
2426 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2427
2428 launch_data_t pri = launch_data_new_integer(value);
dcace88f 2429 if (job_assumes(j, pri != NULL)) {
587e987e
A
2430 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2431 launch_data_free(pri);
2432 }
5b0a4722 2433 }
ed34e3c3
A
2434 case 'n':
2435 case 'N':
5b0a4722 2436 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
ddbbfbc1
A
2437 if (unlikely(value < PRIO_MIN)) {
2438 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2439 } else if (unlikely(value > PRIO_MAX)) {
2440 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2441 } else {
2442 j->nice = (typeof(j->nice)) value;
2443 j->setnice = true;
2444 }
5b0a4722 2445 }
ed34e3c3
A
2446 break;
2447 case 't':
2448 case 'T':
2449 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
ddbbfbc1 2450 if (unlikely(value < 0)) {
5b0a4722 2451 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
ddbbfbc1 2452 } else if (unlikely(value > UINT32_MAX)) {
5b0a4722
A
2453 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2454 } else {
ddbbfbc1 2455 j->timeout = (typeof(j->timeout)) value;
5b0a4722
A
2456 }
2457 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2458 if (value < 0) {
2459 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2460 } else if (value > UINT32_MAX) {
2461 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2462 } else {
ddbbfbc1 2463 j->min_run_time = (typeof(j->min_run_time)) value;
5b0a4722 2464 }
ed34e3c3
A
2465 }
2466 break;
2467 case 'u':
2468 case 'U':
2469 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2470 j->mask = value;
2471 j->setmask = true;
2472 }
2473 break;
2474 case 's':
2475 case 'S':
2476 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
ddbbfbc1 2477 if (unlikely(value <= 0)) {
5b0a4722 2478 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
ddbbfbc1 2479 } else if (unlikely(value > UINT32_MAX)) {
5b0a4722
A
2480 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2481 } else {
ddbbfbc1
A
2482 runtime_add_weak_ref();
2483 j->start_interval = (typeof(j->start_interval)) value;
5b0a4722 2484
dcace88f 2485 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
5b0a4722 2486 }
f36da725 2487#if HAVE_SANDBOX
5b0a4722
A
2488 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2489 j->seatbelt_flags = value;
f36da725 2490#endif
5b0a4722
A
2491 }
2492
2493 break;
2494 default:
2495 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2496 break;
2497 }
2498}
2499
2500void
ef398931
A
2501job_import_opaque(job_t j __attribute__((unused)),
2502 const char *key, launch_data_t value __attribute__((unused)))
5b0a4722
A
2503{
2504 switch (key[0]) {
2505 case 'q':
2506 case 'Q':
f36da725 2507#if HAVE_QUARANTINE
5b0a4722
A
2508 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2509 size_t tmpsz = launch_data_get_opaque_size(value);
2510
2511 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2512 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2513 j->quarantine_data_sz = tmpsz;
2514 }
ed34e3c3 2515 }
f36da725 2516#endif
ddbbfbc1
A
2517 case 's':
2518 case 'S':
dcace88f 2519 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
ddbbfbc1 2520 size_t tmpsz = launch_data_get_opaque_size(value);
dcace88f 2521 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
ddbbfbc1
A
2522 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2523 }
2524 }
ed34e3c3
A
2525 break;
2526 default:
2527 break;
2528 }
2529}
2530
f36da725
A
2531static void
2532policy_setup(launch_data_t obj, const char *key, void *context)
2533{
2534 job_t j = context;
2535 bool found_key = false;
2536
2537 switch (key[0]) {
2538 case 'd':
2539 case 'D':
2540 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2541 j->deny_job_creation = launch_data_get_bool(obj);
2542 found_key = true;
2543 }
2544 break;
2545 default:
2546 break;
2547 }
2548
2549 if (unlikely(!found_key)) {
2550 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2551 }
2552}
2553
ed34e3c3 2554void
5b0a4722 2555job_import_dictionary(job_t j, const char *key, launch_data_t value)
ed34e3c3
A
2556{
2557 launch_data_t tmp;
2558
2559 switch (key[0]) {
f36da725
A
2560 case 'p':
2561 case 'P':
2562 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2563 launch_data_dict_iterate(value, policy_setup, j);
2564 }
2565 break;
ed34e3c3
A
2566 case 'k':
2567 case 'K':
5b0a4722 2568 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
ed34e3c3 2569 launch_data_dict_iterate(value, semaphoreitem_setup, j);
5b0a4722 2570 }
ed34e3c3
A
2571 break;
2572 case 'i':
2573 case 'I':
2574 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2575 j->inetcompat = true;
5b0a4722
A
2576 j->abandon_pg = true;
2577 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
ed34e3c3 2578 j->inetcompat_wait = launch_data_get_bool(tmp);
5b0a4722 2579 }
ed34e3c3
A
2580 }
2581 break;
587e987e
A
2582 case 'j':
2583 case 'J':
dcace88f 2584 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
587e987e
A
2585 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2586 }
ed34e3c3
A
2587 case 'e':
2588 case 'E':
5b0a4722 2589 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
ed34e3c3 2590 launch_data_dict_iterate(value, envitem_setup, j);
ddbbfbc1 2591 }
ed34e3c3
A
2592 break;
2593 case 'u':
2594 case 'U':
2595 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2596 j->importing_global_env = true;
2597 launch_data_dict_iterate(value, envitem_setup, j);
2598 j->importing_global_env = false;
2599 }
2600 break;
2601 case 's':
2602 case 'S':
2603 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2604 launch_data_dict_iterate(value, socketgroup_setup, j);
2605 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2606 calendarinterval_new_from_obj(j, value);
2607 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2608 launch_data_dict_iterate(value, limititem_setup, j);
f36da725 2609#if HAVE_SANDBOX
5b0a4722
A
2610 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2611 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
f36da725 2612#endif
ed34e3c3
A
2613 }
2614 break;
2615 case 'h':
2616 case 'H':
2617 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2618 j->importing_hard_limits = true;
2619 launch_data_dict_iterate(value, limititem_setup, j);
2620 j->importing_hard_limits = false;
2621 }
2622 break;
2623 case 'm':
2624 case 'M':
2625 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2626 launch_data_dict_iterate(value, machservice_setup, j);
ed34e3c3
A
2627 }
2628 break;
dcace88f
A
2629 case 'l':
2630 case 'L':
2631 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2632 launch_data_dict_iterate(value, eventsystem_setup, j);
2633 } else {
2634 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2635 return;
2636 }
2637 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2638 return;
2639 }
2640 }
2641 break;
ed34e3c3 2642 default:
5b0a4722 2643 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
ed34e3c3
A
2644 break;
2645 }
2646}
2647
2648void
5b0a4722 2649job_import_array(job_t j, const char *key, launch_data_t value)
ed34e3c3 2650{
5b0a4722
A
2651 size_t i, value_cnt = launch_data_array_get_count(value);
2652 const char *str;
ed34e3c3
A
2653
2654 switch (key[0]) {
5b0a4722
A
2655 case 'p':
2656 case 'P':
2657 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2658 return;
2659 }
2660 break;
2661 case 'l':
2662 case 'L':
2663 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2664 return;
2665 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2666 return;
2667 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2668 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2669 return;
2670 }
2671 break;
ed34e3c3
A
2672 case 'q':
2673 case 'Q':
2674 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
5b0a4722
A
2675 for (i = 0; i < value_cnt; i++) {
2676 str = launch_data_get_string(launch_data_array_get_index(value, i));
2677 if (job_assumes(j, str != NULL)) {
2678 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
2679 }
2680 }
2681
ed34e3c3
A
2682 }
2683 break;
2684 case 'w':
2685 case 'W':
5b0a4722
A
2686 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
2687 for (i = 0; i < value_cnt; i++) {
2688 str = launch_data_get_string(launch_data_array_get_index(value, i));
2689 if (job_assumes(j, str != NULL)) {
2690 semaphoreitem_new(j, PATH_CHANGES, str);
2691 }
2692 }
2693 }
ed34e3c3
A
2694 break;
2695 case 'b':
2696 case 'B':
5b0a4722 2697 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
ed34e3c3 2698 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
5b0a4722
A
2699 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2700 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2701 j->j_binpref_cnt = value_cnt;
2702 for (i = 0; i < value_cnt; i++) {
ddbbfbc1 2703 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
5b0a4722
A
2704 }
2705 }
2706 }
ed34e3c3
A
2707 break;
2708 case 's':
2709 case 'S':
2710 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
5b0a4722 2711 for (i = 0; i < value_cnt; i++) {
ed34e3c3 2712 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
5b0a4722 2713 }
ed34e3c3
A
2714 }
2715 break;
2716 default:
5b0a4722 2717 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
ed34e3c3
A
2718 break;
2719 }
ed34e3c3
A
2720}
2721
2722void
2723job_import_keys(launch_data_t obj, const char *key, void *context)
2724{
5b0a4722 2725 job_t j = context;
ed34e3c3
A
2726 launch_data_type_t kind;
2727
ddbbfbc1 2728 if (!launchd_assumes(obj != NULL)) {
ed34e3c3 2729 return;
5b0a4722 2730 }
ed34e3c3
A
2731
2732 kind = launch_data_get_type(obj);
2733
2734 switch (kind) {
2735 case LAUNCH_DATA_BOOL:
2736 job_import_bool(j, key, launch_data_get_bool(obj));
2737 break;
2738 case LAUNCH_DATA_STRING:
2739 job_import_string(j, key, launch_data_get_string(obj));
2740 break;
2741 case LAUNCH_DATA_INTEGER:
2742 job_import_integer(j, key, launch_data_get_integer(obj));
2743 break;
2744 case LAUNCH_DATA_DICTIONARY:
2745 job_import_dictionary(j, key, obj);
2746 break;
2747 case LAUNCH_DATA_ARRAY:
2748 job_import_array(j, key, obj);
2749 break;
5b0a4722
A
2750 case LAUNCH_DATA_OPAQUE:
2751 job_import_opaque(j, key, obj);
2752 break;
ed34e3c3
A
2753 default:
2754 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2755 break;
2756 }
2757}
2758
dcace88f 2759job_t
5b0a4722 2760jobmgr_import2(jobmgr_t jm, launch_data_t pload)
ed34e3c3
A
2761{
2762 launch_data_t tmp, ldpa;
2763 const char *label = NULL, *prog = NULL;
2764 const char **argv = NULL;
5b0a4722
A
2765 job_t j;
2766
ddbbfbc1
A
2767 if (!jobmgr_assumes(jm, pload != NULL)) {
2768 errno = EINVAL;
2769 return NULL;
2770 }
2771
2772 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
5b0a4722
A
2773 errno = EINVAL;
2774 return NULL;
2775 }
ed34e3c3 2776
ddbbfbc1 2777 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
5b0a4722 2778 errno = EINVAL;
ed34e3c3 2779 return NULL;
5b0a4722 2780 }
ed34e3c3 2781
ddbbfbc1 2782 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
5b0a4722 2783 errno = EINVAL;
ed34e3c3 2784 return NULL;
5b0a4722 2785 }
ed34e3c3 2786
ddbbfbc1 2787 if (unlikely(!(label = launch_data_get_string(tmp)))) {
5b0a4722
A
2788 errno = EINVAL;
2789 return NULL;
ed34e3c3 2790 }
5b0a4722 2791
ddbbfbc1 2792#if TARGET_OS_EMBEDDED
dcace88f
A
2793 if (unlikely(g_embedded_privileged_action && s_embedded_privileged_job)) {
2794 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
ddbbfbc1
A
2795 errno = EPERM;
2796 return NULL;
2797 }
2798
2799 const char *username = NULL;
dcace88f 2800 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
ddbbfbc1
A
2801 username = launch_data_get_string(tmp);
2802 } else {
2803 errno = EPERM;
2804 return NULL;
2805 }
2806
dcace88f 2807 if (!jobmgr_assumes(jm, s_embedded_privileged_job->username != NULL && username != NULL)) {
ddbbfbc1
A
2808 errno = EPERM;
2809 return NULL;
2810 }
2811
dcace88f 2812 if (unlikely(strcmp(s_embedded_privileged_job->username, username) != 0)) {
ddbbfbc1
A
2813 errno = EPERM;
2814 return NULL;
2815 }
dcace88f 2816 } else if (g_embedded_privileged_action) {
5b0a4722
A
2817 errno = EINVAL;
2818 return NULL;
2819 }
ddbbfbc1 2820#endif
5b0a4722 2821
ed34e3c3
A
2822 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
2823 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2824 prog = launch_data_get_string(tmp);
2825 }
ed34e3c3 2826
dcace88f 2827 int argc = 0;
5b0a4722
A
2828 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2829 size_t i, c;
2830
2831 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2832 errno = EINVAL;
2833 return NULL;
2834 }
2835
2836 c = launch_data_array_get_count(ldpa);
2837
2838 argv = alloca((c + 1) * sizeof(char *));
2839
2840 for (i = 0; i < c; i++) {
2841 tmp = launch_data_array_get_index(ldpa, i);
2842
2843 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2844 errno = EINVAL;
2845 return NULL;
2846 }
2847
2848 argv[i] = launch_data_get_string(tmp);
2849 }
2850
2851 argv[i] = NULL;
dcace88f 2852 argc = i;
5b0a4722
A
2853 }
2854
dcace88f
A
2855 if (!prog && argc == 0) {
2856 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
2857 errno = EINVAL;
2858 return NULL;
2859 }
2860
2861 /* Find the requested session. You cannot load services into XPC domains in
2862 * this manner.
2863 */
ddbbfbc1 2864 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
dcace88f
A
2865 if (session) {
2866 jobmgr_t jmt = NULL;
2867 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
2868 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
2869 if (!jmt) {
2870 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
2871 } else {
2872 jm = jmt;
2873 }
2874 } else {
2875 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
2876 }
2877
2878 if (!jmt) {
2879 errno = EINVAL;
2880 return NULL;
2881 }
ddbbfbc1 2882 }
dcace88f
A
2883
2884 /* For legacy reasons, we have a global hash of all labels in all job
2885 * managers. So rather than make it a global, we store it in the root job
2886 * manager. But for an XPC domain, we store a local hash of all services in
2887 * the domain.
2888 */
2889 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
2890 if (unlikely((j = job_find(where2look, label)) != NULL)) {
2891 if (jm->xpc_singleton) {
2892 /* There can (and probably will be) multiple attemtps to import the
2893 * same XPC service from the same framework. This is okay. It's
2894 * treated as a singleton, so just return the existing one so that
2895 * it may be aliased into the requesting process' XPC domain.
2896 */
2897 return j;
2898 } else {
2899 /* If we're not a global XPC domain, then it's an error to try
2900 * importing the same job/service multiple times.
2901 */
2902 errno = EEXIST;
2903 return NULL;
2904 }
2905 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
ed34e3c3
A
2906 errno = EINVAL;
2907 return NULL;
2908 }
dcace88f 2909 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
ed34e3c3 2910
ddbbfbc1 2911 if (likely(j = job_new(jm, label, prog, argv))) {
5b0a4722 2912 launch_data_dict_iterate(pload, job_import_keys, j);
dcace88f 2913 if (!uuid_is_null(j->expected_audit_uuid)) {
ddbbfbc1
A
2914 uuid_string_t uuid_str;
2915 uuid_unparse(j->expected_audit_uuid, uuid_str);
2916 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2917 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2918 errno = ENEEDAUTH;
2919 } else {
2920 job_log(j, LOG_DEBUG, "No security session specified.");
dcace88f
A
2921 j->asport = MACH_PORT_NULL;
2922 }
2923
2924 if (j->event_monitor) {
2925 if (job_assumes(j, LIST_FIRST(&j->events) == NULL)) {
2926 struct machservice *msi = NULL;
2927 SLIST_FOREACH(msi, &j->machservices, sle) {
2928 if (msi->event_update_port) {
2929 break;
2930 }
2931 }
2932
2933 if (job_assumes(j, msi != NULL)) {
2934 /* Create our send-once right so we can kick things off. */
2935 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
2936 if (!LIST_EMPTY(&_s_event_systems)) {
2937 eventsystem_ping();
2938 }
2939 }
2940 } else {
2941 job_log(j, LOG_ERR, "The event monitor job may not have a LaunchEvents dictionary.");
2942 job_remove(j);
2943 j = NULL;
2944 }
ddbbfbc1 2945 }
5b0a4722 2946 }
ed34e3c3 2947
5b0a4722
A
2948 return j;
2949}
ed34e3c3 2950
f36da725
A
2951bool
2952jobmgr_label_test(jobmgr_t jm, const char *str)
2953{
f36da725
A
2954 const char *ptr;
2955
2956 if (str[0] == '\0') {
2957 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
2958 return false;
2959 }
ddbbfbc1 2960
f36da725
A
2961 for (ptr = str; *ptr; ptr++) {
2962 if (iscntrl(*ptr)) {
2963 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
2964 return false;
2965 }
2966 }
ddbbfbc1 2967
f36da725
A
2968 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2969 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2970 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
2971 return false;
2972 }
2973
2974 return true;
2975}
2976
5b0a4722 2977job_t
dcace88f 2978job_find(jobmgr_t jm, const char *label)
5b0a4722
A
2979{
2980 job_t ji;
dcace88f
A
2981
2982 if (!jm) {
2983 jm = root_jobmgr;
2984 }
2985
2986 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
ddbbfbc1
A
2987 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
2988 continue; /* 5351245 and 5488633 respectively */
5b0a4722
A
2989 }
2990
2991 if (strcmp(ji->label, label) == 0) {
2992 return ji;
2993 }
ed34e3c3
A
2994 }
2995
5b0a4722
A
2996 errno = ESRCH;
2997 return NULL;
2998}
ed34e3c3 2999
ddbbfbc1 3000/* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
5b0a4722 3001job_t
ddbbfbc1 3002jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
5b0a4722
A
3003{
3004 job_t ji = NULL;
dcace88f 3005 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
ddbbfbc1
A
3006 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay)) ) {
3007 return ji;
3008 }
3009 }
5b0a4722 3010
ddbbfbc1 3011 jobmgr_t jmi = NULL;
dcace88f
A
3012 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3013 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
5b0a4722
A
3014 break;
3015 }
3016 }
3017
ddbbfbc1
A
3018 return ji;
3019}
3020
3021job_t
3022jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3023{
3024 job_t ji;
3025
3026 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3027 if (ji->p == p) {
3028 return ji;
3029 }
5b0a4722 3030 }
ddbbfbc1
A
3031
3032 return create_anon ? job_new_anonymous(jm, p) : NULL;
ed34e3c3
A
3033}
3034
5b0a4722
A
3035job_t
3036job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
ed34e3c3 3037{
5b0a4722
A
3038 jobmgr_t jmi;
3039 job_t ji;
ed34e3c3 3040
5b0a4722 3041 if (jm->jm_port == mport) {
ddbbfbc1 3042 return jobmgr_find_by_pid(jm, upid, true);
5b0a4722 3043 }
ed34e3c3 3044
5b0a4722
A
3045 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3046 job_t jr;
ed34e3c3 3047
5b0a4722 3048 if ((jr = job_mig_intran2(jmi, mport, upid))) {
ed34e3c3 3049 return jr;
5b0a4722
A
3050 }
3051 }
3052
3053 LIST_FOREACH(ji, &jm->jobs, sle) {
3054 if (ji->j_port == mport) {
3055 return ji;
3056 }
ed34e3c3
A
3057 }
3058
ed34e3c3
A
3059 return NULL;
3060}
3061
5b0a4722
A
3062job_t
3063job_mig_intran(mach_port_t p)
ed34e3c3 3064{
ddbbfbc1 3065 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 3066 job_t jr;
ed34e3c3 3067
ddbbfbc1 3068 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
5b0a4722
A
3069
3070 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
dcace88f
A
3071 struct proc_bsdshortinfo proc;
3072 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3073 if (errno != ESRCH) {
3074 (void)jobmgr_assumes(root_jobmgr, errno == 0);
3075 } else {
3076 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc->pid, ldc->uid, ldc->euid, p, proc.pbsi_comm);
3077 }
5b0a4722
A
3078 }
3079 }
3080
3081 return jr;
3082}
3083
3084job_t
3085job_find_by_service_port(mach_port_t p)
3086{
3087 struct machservice *ms;
3088
3089 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3090 if (ms->recv && (ms->port == p)) {
3091 return ms->job;
3092 }
ed34e3c3
A
3093 }
3094
ed34e3c3
A
3095 return NULL;
3096}
3097
3098void
5b0a4722 3099job_mig_destructor(job_t j)
ed34e3c3 3100{
5b0a4722
A
3101 /*
3102 * 5477111
3103 *
3104 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
3105 */
3106
ddbbfbc1 3107 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
5b0a4722
A
3108 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3109 job_remove(j);
3110 }
ed34e3c3 3111
5b0a4722 3112 workaround_5477111 = NULL;
ed34e3c3 3113
5b0a4722
A
3114 calendarinterval_sanity_check();
3115}
3116
3117void
3118job_export_all2(jobmgr_t jm, launch_data_t where)
3119{
3120 jobmgr_t jmi;
3121 job_t ji;
3122
3123 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3124 job_export_all2(jmi, where);
3125 }
3126
3127 LIST_FOREACH(ji, &jm->jobs, sle) {
3128 launch_data_t tmp;
3129
3130 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3131 launch_data_dict_insert(where, tmp, ji->label);
3132 }
3133 }
ed34e3c3
A
3134}
3135
3136launch_data_t
3137job_export_all(void)
3138{
3139 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3140
5b0a4722
A
3141 if (launchd_assumes(resp != NULL)) {
3142 job_export_all2(root_jobmgr, resp);
3143 }
ed34e3c3
A
3144
3145 return resp;
3146}
3147
3148void
5b0a4722 3149job_log_stray_pg(job_t j)
ed34e3c3 3150{
dcace88f
A
3151 pid_t *pids = NULL;
3152 size_t len = sizeof(pid_t) * get_kern_max_proc();
3153 int i = 0, kp_cnt = 0;
3154
ddbbfbc1 3155 if (!do_apple_internal_logging) {
f36da725
A
3156 return;
3157 }
ddbbfbc1
A
3158
3159 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
f36da725 3160
dcace88f 3161 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
5b0a4722
A
3162 return;
3163 }
dcace88f 3164 if (!job_assumes(j, (kp_cnt = proc_listpgrppids(j->p, pids, len)) != -1)) {
5b0a4722 3165 goto out;
ed34e3c3 3166 }
dcace88f 3167
5b0a4722 3168 for (i = 0; i < kp_cnt; i++) {
dcace88f 3169 pid_t p_i = pids[i];
5b0a4722
A
3170 if (p_i == j->p) {
3171 continue;
3172 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
3173 continue;
3174 }
dcace88f
A
3175
3176 struct proc_bsdshortinfo proc;
3177 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3178 if (errno != ESRCH) {
3179 job_assumes(j, errno == 0);
3180 }
3181 continue;
3182 }
3183
3184 pid_t pp_i = proc.pbsi_ppid;
3185 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3186 const char *n = proc.pbsi_comm;
5b0a4722 3187
dcace88f 3188 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
5b0a4722
A
3189 }
3190
3191out:
dcace88f 3192 free(pids);
5b0a4722
A
3193}
3194
3195void
3196job_reap(job_t j)
3197{
3198 struct rusage ru;
3199 int status;
3200
ddbbfbc1
A
3201 bool is_system_bootstrapper = j->is_bootstrapper && pid1_magic && !j->mgr->parentmgr;
3202
5b0a4722
A
3203 job_log(j, LOG_DEBUG, "Reaping");
3204
ddbbfbc1 3205 if (j->shmem) {
dcace88f 3206 (void)job_assumes(j, vm_deallocate(mach_task_self(), (vm_address_t)j->shmem, getpagesize()) == 0);
ddbbfbc1
A
3207 j->shmem = NULL;
3208 }
5b0a4722 3209
ddbbfbc1
A
3210 if (unlikely(j->weird_bootstrap)) {
3211 int64_t junk = 0;
3212 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
5b0a4722
A
3213 }
3214
ddbbfbc1
A
3215 if (j->log_redirect_fd && !j->legacy_LS_job) {
3216 job_log_stdouterr(j); /* one last chance */
3217
3218 if (j->log_redirect_fd) {
dcace88f 3219 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
ddbbfbc1
A
3220 j->log_redirect_fd = 0;
3221 }
5b0a4722
A
3222 }
3223
ddbbfbc1 3224 if (j->fork_fd) {
dcace88f 3225 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
ddbbfbc1 3226 j->fork_fd = 0;
5b0a4722
A
3227 }
3228
3229 if (j->anonymous) {
3230 status = 0;
3231 memset(&ru, 0, sizeof(ru));
3232 } else {
3233 /*
3234 * The job is dead. While the PID/PGID is still known to be
3235 * valid, try to kill abandoned descendant processes.
3236 */
3237 job_log_stray_pg(j);
3238 if (!j->abandon_pg) {
ddbbfbc1
A
3239 if (unlikely(runtime_killpg(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3240#ifdef __LP64__
3241 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3242#else
dcace88f 3243 (void)job_assumes(j, false);
ddbbfbc1
A
3244#endif
3245 }
5b0a4722 3246 }
dcace88f
A
3247
3248 /* We have to work around one of two kernel bugs here. ptrace(3) may
3249 * have abducted the child away from us and reparented it to the tracing
3250 * process. If the process then exits, we still get NOTE_EXIT, but we
3251 * cannot reap it because the kernel may not have restored the true
3252 * parent/child relationship in time.
3253 *
3254 * See <rdar://problem/5020256>.
5b0a4722 3255 *
dcace88f
A
3256 * The other bug is if the shutdown monitor has suspended a task and not
3257 * resumed it before exiting. In this case, the kernel will not clean up
3258 * after the shutdown monitor. It will, instead, leave the task
3259 * task suspended and not process any pending signals on the event loop
3260 * for the task.
5b0a4722 3261 *
dcace88f
A
3262 * There are a variety of other kernel bugs that could prevent a process
3263 * from exiting, usually having to do with faulty hardware or talking to
3264 * misbehaving drivers that mark a thread as uninterruptible and
3265 * deadlock/hang before unmarking it as such. So we have to work around
3266 * that too.
5b0a4722 3267 *
dcace88f 3268 * See <rdar://problem/9284889&9359725>.
5b0a4722 3269 */
dcace88f
A
3270 if (j->workaround9359725) {
3271 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3272 status = W_EXITCODE(-1, SIGSEGV);
3273 memset(&ru, 0, sizeof(ru));
3274 } else if (wait4(j->p, &status, 0, &ru) == -1) {
3275 job_log(j, LOG_NOTICE, "Assuming job exited: <rdar://problem/5020256>: %d: %s", errno, strerror(errno));
3276 status = W_EXITCODE(-1, SIGSEGV);
5b0a4722
A
3277 memset(&ru, 0, sizeof(ru));
3278 }
ed34e3c3
A
3279 }
3280
5b0a4722
A
3281 if (j->exit_timeout) {
3282 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3283 }
ddbbfbc1 3284
5b0a4722
A
3285 LIST_REMOVE(j, pid_hash_sle);
3286
ddbbfbc1
A
3287 if (j->sent_signal_time) {
3288 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
5b0a4722
A
3289
3290 td_sec = td / NSEC_PER_SEC;
3291 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3292
ddbbfbc1 3293 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
5b0a4722
A
3294 }
3295
ed34e3c3
A
3296 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
3297 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
3298 j->ru.ru_maxrss += ru.ru_maxrss;
3299 j->ru.ru_ixrss += ru.ru_ixrss;
3300 j->ru.ru_idrss += ru.ru_idrss;
3301 j->ru.ru_isrss += ru.ru_isrss;
3302 j->ru.ru_minflt += ru.ru_minflt;
3303 j->ru.ru_majflt += ru.ru_majflt;
3304 j->ru.ru_nswap += ru.ru_nswap;
3305 j->ru.ru_inblock += ru.ru_inblock;
3306 j->ru.ru_oublock += ru.ru_oublock;
3307 j->ru.ru_msgsnd += ru.ru_msgsnd;
3308 j->ru.ru_msgrcv += ru.ru_msgrcv;
3309 j->ru.ru_nsignals += ru.ru_nsignals;
3310 j->ru.ru_nvcsw += ru.ru_nvcsw;
3311 j->ru.ru_nivcsw += ru.ru_nivcsw;
3312
3313 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
dcace88f
A
3314 int level = LOG_WARNING;
3315 if (!j->did_exec && (j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
3316 level = LOG_DEBUG;
3317 }
3318
3319 job_log(j, level, "Exited with code: %d", WEXITSTATUS(status));
3320 } else {
3321 j->fail_cnt = 0;
ed34e3c3
A
3322 }
3323
3324 if (WIFSIGNALED(status)) {
3325 int s = WTERMSIG(status);
ddbbfbc1 3326 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
ed34e3c3 3327 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
dcace88f
A
3328 } else if (!j->stopped && !j->clean_kill) {
3329 switch (s) {
3330 /* Signals which indicate a crash. */
3331 case SIGILL:
3332 case SIGABRT:
3333 case SIGFPE:
3334 case SIGBUS:
3335 case SIGSEGV:
3336 case SIGSYS:
3337 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3338 * SIGTRAP, assume that it's a crash.
3339 */
3340 case SIGTRAP:
3341 j->crashed = true;
3342 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3343 break;
3344 default:
3345 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3346 break;
ddbbfbc1
A
3347 }
3348
dcace88f 3349 if (is_system_bootstrapper && j->crashed) {
ddbbfbc1
A
3350 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3351 }
ed34e3c3
A
3352 }
3353 }
3354
ddbbfbc1
A
3355 j->reaped = true;
3356
3357 struct machservice *msi = NULL;
dcace88f
A
3358 if (j->crashed || !(j->did_exec || j->anonymous)) {
3359 SLIST_FOREACH(msi, &j->machservices, sle) {
3360 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
ddbbfbc1
A
3361 machservice_drain_port(msi);
3362 }
3363
dcace88f 3364 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
ddbbfbc1
A
3365 machservice_resetport(j, msi);
3366 }
3367 }
3368 }
dcace88f
A
3369
3370 /* HACK: Essentially duplicating the logic directly above. But this has
3371 * gotten really hairy, and I don't want to try consolidating it right now.
3372 */
3373 if (j->xpc_service && !j->xpcproxy_did_exec) {
3374 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3375 SLIST_FOREACH(msi, &j->machservices, sle) {
3376 /* Drain the messages but do not reset the port. If xpcproxy could
3377 * not exec(3), then we don't want to continue trying, since there
3378 * is very likely a serious configuration error with the service.
3379 *
3380 * <rdar://problem/8986802>
3381 */
3382 machservice_resetport(j, msi);
3383 }
3384 }
3385
ddbbfbc1 3386 struct suspended_peruser *spi = NULL;
dcace88f 3387 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
ddbbfbc1
A
3388 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3389 spi->j->peruser_suspend_count--;
dcace88f 3390 if (spi->j->peruser_suspend_count == 0) {
ddbbfbc1
A
3391 job_dispatch(spi->j, false);
3392 }
3393 LIST_REMOVE(spi, sle);
3394 free(spi);
3395 }
f70a210c
A
3396
3397 j->last_exit_status = status;
dcace88f
A
3398
3399 if (j->exit_status_dest) {
3400 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3401 if (errno && errno != MACH_SEND_INVALID_DEST) {
3402 (void)job_assumes(j, errno == 0);
3403 }
3404
3405 j->exit_status_dest = MACH_PORT_NULL;
ddbbfbc1 3406 }
dcace88f
A
3407
3408 if (j->spawn_reply_port) {
3409 /* If the child never called exec(3), we must send a spawn() reply so
3410 * that the requestor can get exit status from it. If we fail to send
3411 * the reply for some reason, we have to deallocate the exit status port
3412 * ourselves.
3413 */
3414 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3415 if (kr) {
3416 if (kr != MACH_SEND_INVALID_DEST) {
3417 errno = kr;
3418 (void)job_assumes(j, errno == KERN_SUCCESS);
3419 }
3420
3421 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3422 }
3423
3424 j->exit_status_port = MACH_PORT_NULL;
3425 j->spawn_reply_port = MACH_PORT_NULL;
3426 }
3427
ddbbfbc1
A
3428 if (j->anonymous) {
3429 total_anon_children--;
dcace88f 3430 if (j->holds_ref) {
ddbbfbc1
A
3431 runtime_del_ref();
3432 }
3433 } else {
3434 runtime_del_ref();
3435 total_children--;
3436 }
3437
dcace88f 3438 if (j->has_console) {
ddbbfbc1
A
3439 g_wsp = 0;
3440 }
dcace88f
A
3441
3442 if (j->shutdown_monitor) {
3443 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3444 _s_shutdown_monitor = NULL;
3445 j->shutdown_monitor = false;
3446 }
3447
3448 if (j->event_monitor && !j->mgr->shutting_down) {
3449 msi = NULL;
3450 SLIST_FOREACH(msi, &j->machservices, sle) {
3451 if (msi->event_update_port) {
3452 break;
3453 }
3454 }
3455 /* Only do this if we've gotten the port-destroyed notification already.
3456 * If we haven't yet, the port destruction handler will do this.
3457 */
3458 if (job_assumes(j, msi != NULL) && !msi->isActive) {
3459 if (_s_event_update_port == MACH_PORT_NULL) {
3460 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
3461 }
3462 eventsystem_ping();
3463 }
3464 }
ddbbfbc1 3465
dcace88f 3466 if (!j->anonymous) {
5b0a4722
A
3467 j->mgr->normal_active_cnt--;
3468 }
ddbbfbc1 3469 j->sent_signal_time = 0;
5b0a4722 3470 j->sent_sigkill = false;
ddbbfbc1 3471 j->clean_kill = false;
ddbbfbc1 3472 j->sent_kill_via_shmem = false;
cf0bacfd
A
3473 j->lastlookup = NULL;
3474 j->lastlookup_gennum = 0;
5b0a4722 3475 j->p = 0;
5b0a4722
A
3476}
3477
3478void
3479jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3480{
3481 jobmgr_t jmi, jmn;
3482 job_t ji, jn;
3483
3484 if (jm->shutting_down) {
3485 return;
3486 }
3487
3488 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3489 jobmgr_dispatch_all(jmi, newmounthack);
ed34e3c3
A
3490 }
3491
5b0a4722
A
3492 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3493 if (newmounthack && ji->start_on_mount) {
3494 ji->start_pending = true;
3495 }
ed34e3c3 3496
5b0a4722 3497 job_dispatch(ji, false);
ed34e3c3 3498 }
5b0a4722 3499}
ed34e3c3 3500
ddbbfbc1
A
3501void
3502job_dispatch_curious_jobs(job_t j)
3503{
3504 job_t ji = NULL, jt = NULL;
dcace88f 3505 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
ddbbfbc1 3506 struct semaphoreitem *si = NULL;
dcace88f
A
3507 SLIST_FOREACH(si, &ji->semaphores, sle) {
3508 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
ddbbfbc1
A
3509 continue;
3510 }
3511
dcace88f 3512 if (strcmp(si->what, j->label) == 0) {
ddbbfbc1
A
3513 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3514
dcace88f
A
3515 if (!ji->removing) {
3516 job_dispatch(ji, false);
3517 } else {
3518 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3519 }
3520
ddbbfbc1
A
3521 /* ji could be removed here, so don't do anything with it or its semaphores
3522 * after this point.
3523 */
3524 break;
3525 }
3526 }
3527 }
3528}
3529
5b0a4722
A
3530job_t
3531job_dispatch(job_t j, bool kickstart)
3532{
ddbbfbc1 3533 /* Don't dispatch a job if it has no audit session set. */
dcace88f 3534 if (!uuid_is_null(j->expected_audit_uuid)) {
ddbbfbc1
A
3535 return NULL;
3536 }
dcace88f
A
3537 if (j->alias) {
3538 j = j->alias;
3539 }
ddbbfbc1
A
3540
3541#if TARGET_OS_EMBEDDED
dcace88f
A
3542 if (g_embedded_privileged_action && s_embedded_privileged_job) {
3543 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
ddbbfbc1
A
3544 errno = EPERM;
3545 return NULL;
3546 }
3547
dcace88f 3548 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
ddbbfbc1
A
3549 errno = EPERM;
3550 return NULL;
3551 }
dcace88f 3552 } else if (g_embedded_privileged_action) {
ddbbfbc1
A
3553 errno = EINVAL;
3554 return NULL;
3555 }
3556#endif
3557
5b0a4722
A
3558 /*
3559 * The whole job removal logic needs to be consolidated. The fact that
3560 * a job can be removed from just about anywhere makes it easy to have
3561 * stale pointers left behind somewhere on the stack that might get
3562 * used after the deallocation. In particular, during job iteration.
3563 *
3564 * This is a classic example. The act of dispatching a job may delete it.
ddbbfbc1 3565 */
5b0a4722
A
3566 if (!job_active(j)) {
3567 if (job_useless(j)) {
3568 job_remove(j);
3569 return NULL;
ddbbfbc1 3570 }
dcace88f 3571 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
ddbbfbc1
A
3572 return NULL;
3573 }
3574
3575 if (kickstart || job_keepalive(j)) {
3576 job_log(j, LOG_DEBUG, "Starting job (kickstart = %s)", kickstart ? "true" : "false");
5b0a4722
A
3577 job_start(j);
3578 } else {
ddbbfbc1 3579 job_log(j, LOG_DEBUG, "Watching job (kickstart = %s)", kickstart ? "true" : "false");
5b0a4722
A
3580 job_watch(j);
3581
3582 /*
3583 * 5455720
3584 *
3585 * Path checking and monitoring is really racy right now.
3586 * We should clean this up post Leopard.
3587 */
3588 if (job_keepalive(j)) {
3589 job_start(j);
3590 }
3591 }
3592 } else {
ddbbfbc1 3593 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s).", job_active(j));
5b0a4722
A
3594 }
3595
3596 return j;
ed34e3c3
A
3597}
3598
3599void
5b0a4722 3600job_log_stdouterr2(job_t j, const char *msg, ...)
ed34e3c3 3601{
5b0a4722
A
3602 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
3603 va_list ap;
3604
3605 va_start(ap, msg);
3606 runtime_vsyslog(&attr, msg, ap);
3607 va_end(ap);
3608}
3609
3610void
3611job_log_stdouterr(job_t j)
3612{
3613 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
3614 bool close_log_redir = false;
3615 ssize_t rsz;
3616
3617 if (!job_assumes(j, buf != NULL)) {
ed34e3c3 3618 return;
5b0a4722
A
3619 }
3620
3621 bufindex = buf;
3622
3623 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
3624
ddbbfbc1 3625 if (unlikely(rsz == 0)) {
5b0a4722
A
3626 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
3627 close_log_redir = true;
ddbbfbc1 3628 } else if (rsz == -1) {
dcace88f 3629 if (!job_assumes(j, errno == EAGAIN)) {
ddbbfbc1
A
3630 close_log_redir = true;
3631 }
ed34e3c3 3632 } else {
5b0a4722
A
3633 buf[rsz] = '\0';
3634
3635 while ((msg = strsep(&bufindex, "\n\r"))) {
3636 if (msg[0]) {
3637 job_log_stdouterr2(j, "%s", msg);
3638 }
3639 }
3640 }
3641
3642 free(buf);
3643
ddbbfbc1 3644 if (unlikely(close_log_redir)) {
dcace88f 3645 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5b0a4722
A
3646 j->log_redirect_fd = 0;
3647 job_dispatch(j, false);
ed34e3c3
A
3648 }
3649}
3650
3651void
5b0a4722 3652job_kill(job_t j)
ed34e3c3 3653{
ddbbfbc1 3654 if (unlikely(!j->p || j->anonymous)) {
5b0a4722
A
3655 return;
3656 }
ed34e3c3 3657
dcace88f 3658 (void)job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
5b0a4722
A
3659
3660 j->sent_sigkill = true;
dcace88f 3661 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j) != -1);
5b0a4722 3662
ddbbfbc1 3663 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
5b0a4722
A
3664}
3665
3666void
dcace88f 3667job_open_shutdown_transaction(job_t j)
5b0a4722 3668{
dcace88f
A
3669 if (j->kill_via_shmem) {
3670 if (j->shmem) {
3671 job_log(j, LOG_DEBUG, "Opening shutdown transaction for job.");
3672 (void)__sync_add_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
3673 } else {
3674 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it has not set up shared memory. Treating normally.");
3675 j->dirty_at_shutdown = false;
3676 }
3677 } else {
3678 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3679 j->dirty_at_shutdown = false;
3680 }
3681}
3682
3683void
3684job_close_shutdown_transaction(job_t j)
3685{
3686 if (j->dirty_at_shutdown) {
3687 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
3688 if (__sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1) == -1) {
3689 job_log(j, LOG_DEBUG, "Job is now clean. Killing.");
3690 job_kill(j);
3691 }
3692 j->dirty_at_shutdown = false;
3693 }
3694}
ddbbfbc1 3695
dcace88f
A
3696void
3697job_log_children_without_exec(job_t j)
3698{
3699 pid_t *pids = NULL;
3700 size_t len = sizeof(pid_t) * get_kern_max_proc();
3701 int i = 0, kp_cnt = 0;
3702
ddbbfbc1
A
3703 if (!do_apple_internal_logging || j->anonymous || j->per_user) {
3704 return;
3705 }
3706
dcace88f 3707 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
ddbbfbc1
A
3708 return;
3709 }
dcace88f 3710 if (!job_assumes(j, (kp_cnt = proc_listchildpids(j->p, pids, len)) != -1)) {
ddbbfbc1
A
3711 goto out;
3712 }
3713
ddbbfbc1 3714 for (i = 0; i < kp_cnt; i++) {
dcace88f
A
3715 struct proc_bsdshortinfo proc;
3716 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3717 if (errno != ESRCH) {
3718 job_assumes(j, errno == 0);
3719 }
ddbbfbc1
A
3720 continue;
3721 }
dcace88f 3722 if (proc.pbsi_flags & P_EXEC) {
ddbbfbc1
A
3723 continue;
3724 }
3725
dcace88f 3726 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
ddbbfbc1
A
3727 }
3728
3729out:
dcace88f 3730 free(pids);
ddbbfbc1
A
3731}
3732
3733void
3734job_cleanup_after_tracer(job_t j)
3735{
ddbbfbc1 3736 j->tracing_pid = 0;
dcace88f 3737 if (j->reap_after_trace) {
ddbbfbc1
A
3738 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Reaping job now that attached tracer is gone.");
3739 struct kevent kev;
3740 EV_SET(&kev, j->p, 0, 0, NOTE_EXIT, 0, 0);
dcace88f 3741
ddbbfbc1
A
3742 /* Fake a kevent to keep our logic consistent. */
3743 job_callback_proc(j, &kev);
dcace88f 3744
ddbbfbc1
A
3745 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3746 * on the root job manager. To make our fakery complete, we will do garbage
3747 * collection at the beginning of the next run loop cycle (after we're done
3748 * draining the current queue of kevents).
3749 */
dcace88f 3750 (void)job_assumes(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr) != -1);
ddbbfbc1
A
3751 }
3752}
3753
3754void
3755job_callback_proc(job_t j, struct kevent *kev)
3756{
3757 bool program_changed = false;
3758 int fflags = kev->fflags;
3759
dcace88f 3760 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
ddbbfbc1
A
3761 log_kevent_struct(LOG_DEBUG, kev, 0);
3762
dcace88f
A
3763 if (fflags & NOTE_EXIT) {
3764 if (j->p == (pid_t)kev->ident && !j->anonymous) {
3765 /* Note that the third argument to proc_pidinfo() is a magic argument for
3766 * PROC_PIDT_SHORTBSDINFO. Specifically, passing 1 means "don't fail on a zombie
3767 * PID".
3768 */
3769 struct proc_bsdshortinfo proc;
3770 if (job_assumes(j, proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0)) {
3771 if (!job_assumes(j, (pid_t)proc.pbsi_ppid == getpid())) {
ddbbfbc1
A
3772 /* Someone has attached to the process with ptrace(). There's a race here.
3773 * If we determine that we are not the parent process and then fail to attach
3774 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3775 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3776 * reparenting of the PID should be atomic to us, so in that case, we reap the
3777 * job as normal.
3778 *
3779 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3780 * would if a job died while we were sampling it at shutdown.
587e987e
A
3781 *
3782 * Note that we foolishly assume that in the process *tree* a node cannot be its
3783 * own parent. Apparently, that is not correct. If this is the case, we forsake
3784 * the process to its own devices. Let it reap itself.
ddbbfbc1 3785 */
dcace88f 3786 if (!job_assumes(j, proc.pbsi_ppid != kev->ident)) {
587e987e
A
3787 job_log(j, LOG_WARNING, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3788 return;
3789 }
dcace88f
A
3790 if (job_assumes(j, kevent_mod(proc.pbsi_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j) != -1)) {
3791 j->tracing_pid = proc.pbsi_ppid;
ddbbfbc1
A
3792 j->reap_after_trace = true;
3793 return;
3794 }
3795 }
3796 }
dcace88f
A
3797 } else if (!j->anonymous) {
3798 if (j->tracing_pid == (pid_t)kev->ident) {
ddbbfbc1
A
3799 job_cleanup_after_tracer(j);
3800
3801 return;
dcace88f 3802 } else if (j->tracing_pid && !j->reap_after_trace) {
ddbbfbc1
A
3803 /* The job exited before our sample completed. */
3804 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
3805 j->reap_after_trace = true;
3806 return;
3807 }
3808 }
3809 }
3810
3811 if (fflags & NOTE_EXEC) {
3812 program_changed = true;
3813
3814 if (j->anonymous) {
dcace88f
A
3815 struct proc_bsdshortinfo proc;
3816 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
ddbbfbc1 3817 char newlabel[1000];
5b0a4722 3818
dcace88f 3819 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
5b0a4722 3820
ddbbfbc1
A
3821 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3822 j->lastlookup = NULL;
3823 j->lastlookup_gennum = 0;
5b0a4722 3824
ddbbfbc1
A
3825 LIST_REMOVE(j, label_hash_sle);
3826 strcpy((char *)j->label, newlabel);
dcace88f
A
3827
3828 jobmgr_t where2put = root_jobmgr;
3829 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
3830 where2put = j->mgr;
3831 }
3832 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
3833 } else if (errno != ESRCH) {
3834 job_assumes(j, errno == 0);
ddbbfbc1
A
3835 }
3836 } else {
dcace88f
A
3837 if (j->spawn_reply_port) {
3838 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3839 if (errno) {
3840 if (errno != MACH_SEND_INVALID_DEST) {
3841 (void)job_assumes(j, errno == KERN_SUCCESS);
3842 }
3843 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3844 }
3845
3846 j->spawn_reply_port = MACH_PORT_NULL;
3847 j->exit_status_port = MACH_PORT_NULL;
3848 }
3849
3850 if (j->xpc_service && j->did_exec) {
3851 j->xpcproxy_did_exec = true;
3852 }
3853
3854 j->did_exec = true;
3855 job_log(j, LOG_DEBUG, "Program changed");
3856 }
3857 }
ed34e3c3 3858
5b0a4722 3859 if (fflags & NOTE_FORK) {
ddbbfbc1
A
3860 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3861 job_log_children_without_exec(j);
5b0a4722
A
3862 }
3863
3864 if (fflags & NOTE_EXIT) {
ed34e3c3
A
3865 job_reap(j);
3866
dcace88f 3867 if (j->anonymous) {
5b0a4722
A
3868 job_remove(j);
3869 j = NULL;
dcace88f
A
3870 } else {
3871 j = job_dispatch(j, false);
ed34e3c3 3872 }
5b0a4722 3873 }
5b0a4722
A
3874}
3875
3876void
3877job_callback_timer(job_t j, void *ident)
3878{
3879 if (j == ident) {
ddbbfbc1 3880 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
5b0a4722
A
3881 job_dispatch(j, true);
3882 } else if (&j->semaphores == ident) {
ddbbfbc1 3883 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
5b0a4722
A
3884 job_dispatch(j, false);
3885 } else if (&j->start_interval == ident) {
ddbbfbc1 3886 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
5b0a4722
A
3887 j->start_pending = true;
3888 job_dispatch(j, false);
3889 } else if (&j->exit_timeout == ident) {
dcace88f 3890 if (!job_assumes(j, j->p != 0)) {
ddbbfbc1
A
3891 return;
3892 }
dcace88f 3893
5b0a4722 3894 if (j->sent_sigkill) {
ddbbfbc1 3895 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
5b0a4722
A
3896
3897 td /= NSEC_PER_SEC;
ddbbfbc1
A
3898 td -= j->clean_kill ? 0 : j->exit_timeout;
3899
dcace88f
A
3900 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
3901 j->workaround9359725 = true;
3902
3903 if (g_trap_sigkill_bugs) {
3904 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3905 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
ddbbfbc1 3906 }
ddbbfbc1 3907
5c88273d
A
3908 /* We've simulated the exit, so we have to cancel the kevent for
3909 * this job, otherwise we may get a kevent later down the road that
3910 * has a stale context pointer (if we've removed the job). Or worse,
3911 * it'll corrupt our data structures if the job still exists or the
3912 * allocation was recycled.
3913 *
3914 * If the failing process had a tracer attached to it, we need to
3915 * remove out NOTE_EXIT for that tracer too, otherwise the same
3916 * thing might happen.
3917 *
3918 * Note that, if we're not shutting down, this will result in a
3919 * zombie process just hanging around forever. But if the process
3920 * didn't exit after receiving SIGKILL, odds are it would've just
3921 * stuck around forever anyway.
3922 *
3923 * See <rdar://problem/9481630>.
3924 */
3925 kevent_mod((uintptr_t)j->p, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3926 if (j->tracing_pid) {
3927 kevent_mod((uintptr_t)j->tracing_pid, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3928 }
3929
dcace88f
A
3930 struct kevent bogus_exit;
3931 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
3932 jobmgr_callback(j->mgr, &bogus_exit);
3933 } else {
3934 if (unlikely(j->debug_before_kill)) {
3935 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3936 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
5b0a4722 3937 }
dcace88f
A
3938
3939 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3940 job_kill(j);
ed34e3c3 3941 }
5b0a4722 3942 } else {
dcace88f 3943 (void)job_assumes(j, false);
5b0a4722
A
3944 }
3945}
3946
3947void
3948job_callback_read(job_t j, int ident)
3949{
3950 if (ident == j->log_redirect_fd) {
3951 job_log_stdouterr(j);
ddbbfbc1
A
3952 } else if (ident == j->stdin_fd) {
3953 job_dispatch(j, true);
5b0a4722
A
3954 } else {
3955 socketgroup_callback(j);
3956 }
3957}
3958
3959void
3960jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
3961{
3962 jobmgr_t jmi;
3963 job_t j;
3964
3965 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3966 jobmgr_reap_bulk(jmi, kev);
3967 }
3968
ddbbfbc1 3969 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
5b0a4722
A
3970 kev->udata = j;
3971 job_callback(j, kev);
3972 }
3973}
3974
3975void
3976jobmgr_callback(void *obj, struct kevent *kev)
3977{
3978 jobmgr_t jm = obj;
ddbbfbc1 3979 job_t ji;
5b0a4722
A
3980
3981 switch (kev->filter) {
3982 case EVFILT_PROC:
3983 jobmgr_reap_bulk(jm, kev);
ddbbfbc1 3984 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
5b0a4722
A
3985 break;
3986 case EVFILT_SIGNAL:
3987 switch (kev->ident) {
ddbbfbc1 3988 case SIGTERM:
587e987e 3989 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
5b0a4722
A
3990 return launchd_shutdown();
3991 case SIGUSR1:
3992 return calendarinterval_callback();
ddbbfbc1
A
3993 case SIGUSR2:
3994 fake_shutdown_in_progress = true;
3995 runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
3996
3997 runtime_closelog(); /* HACK -- force 'start' time to be set */
3998
3999 if (pid1_magic) {
4000 int64_t now = runtime_get_wall_time();
4001
4002 jobmgr_log(jm, LOG_NOTICE, "Anticipatory shutdown began at: %lld.%06llu", now / USEC_PER_SEC, now % USEC_PER_SEC);
4003
4004 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
4005 if (ji->per_user && ji->p) {
dcace88f 4006 (void)job_assumes(ji, runtime_kill(ji->p, SIGUSR2) != -1);
ddbbfbc1
A
4007 }
4008 }
4009 } else {
4010 jobmgr_log(jm, LOG_NOTICE, "Anticipatory per-user launchd shutdown");
4011 }
4012
4013 return;
5b0a4722
A
4014 default:
4015 return (void)jobmgr_assumes(jm, false);
ed34e3c3 4016 }
5b0a4722
A
4017 break;
4018 case EVFILT_FS:
4019 if (kev->fflags & VQ_MOUNT) {
4020 jobmgr_dispatch_all(jm, true);
ed34e3c3 4021 }
5b0a4722 4022 jobmgr_dispatch_all_semaphores(jm);
ed34e3c3 4023 break;
5b0a4722 4024 case EVFILT_TIMER:
dcace88f 4025 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
5b0a4722 4026 calendarinterval_callback();
dcace88f 4027 } else if (kev->ident == (uintptr_t)jm) {
ddbbfbc1
A
4028 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4029 jobmgr_still_alive_with_check(jm);
dcace88f 4030 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
ddbbfbc1 4031 jobmgr_do_garbage_collection(jm);
dcace88f 4032 } else if (kev->ident == (uintptr_t)&g_runtime_busy_time) {
587e987e 4033 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
dcace88f 4034 if (jobmgr_assumes(jm, runtime_busy_cnt == 0)) {
587e987e
A
4035 return launchd_shutdown();
4036 }
ddbbfbc1
A
4037 }
4038 break;
4039 case EVFILT_VNODE:
dcace88f 4040 if (kev->ident == (uintptr_t)s_no_hang_fd) {
ddbbfbc1 4041 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
dcace88f 4042 if (unlikely(_no_hang_fd != -1)) {
ddbbfbc1 4043 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
dcace88f
A
4044 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
4045 (void)jobmgr_assumes(root_jobmgr, runtime_close(s_no_hang_fd) != -1);
ddbbfbc1
A
4046 s_no_hang_fd = _fd(_no_hang_fd);
4047 }
dcace88f 4048 } else if (pid1_magic && g_console && kev->ident == (uintptr_t)fileno(g_console)) {
ddbbfbc1 4049 int cfd = -1;
dcace88f 4050 if (launchd_assumes((cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1)) {
ddbbfbc1 4051 _fd(cfd);
dcace88f 4052 if (!launchd_assumes((g_console = fdopen(cfd, "w")) != NULL)) {
ddbbfbc1
A
4053 close(cfd);
4054 }
4055 }
5b0a4722 4056 }
ed34e3c3
A
4057 break;
4058 default:
5b0a4722 4059 return (void)jobmgr_assumes(jm, false);
ed34e3c3 4060 }
5b0a4722 4061}
ed34e3c3 4062
5b0a4722
A
4063void
4064job_callback(void *obj, struct kevent *kev)
4065{
4066 job_t j = obj;
4067
4068 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4069
4070 switch (kev->filter) {
4071 case EVFILT_PROC:
ddbbfbc1 4072 return job_callback_proc(j, kev);
5b0a4722 4073 case EVFILT_TIMER:
ddbbfbc1 4074 return job_callback_timer(j, (void *) kev->ident);
5b0a4722
A
4075 case EVFILT_VNODE:
4076 return semaphoreitem_callback(j, kev);
4077 case EVFILT_READ:
ddbbfbc1 4078 return job_callback_read(j, (int) kev->ident);
5b0a4722
A
4079 case EVFILT_MACHPORT:
4080 return (void)job_dispatch(j, true);
4081 default:
4082 return (void)job_assumes(j, false);
ed34e3c3
A
4083 }
4084}
4085
4086void
5b0a4722 4087job_start(job_t j)
ed34e3c3 4088{
ddbbfbc1 4089 uint64_t td;
ed34e3c3
A
4090 int spair[2];
4091 int execspair[2];
5b0a4722 4092 int oepair[2];
ed34e3c3
A
4093 char nbuf[64];
4094 pid_t c;
4095 bool sipc = false;
f271391c 4096 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC;
ddbbfbc1 4097
5b0a4722 4098 if (!job_assumes(j, j->mgr != NULL)) {
ed34e3c3 4099 return;
5b0a4722 4100 }
ddbbfbc1
A
4101
4102 if (unlikely(job_active(j))) {
ed34e3c3
A
4103 job_log(j, LOG_DEBUG, "Already started");
4104 return;
5b0a4722 4105 }
ddbbfbc1 4106
5b0a4722
A
4107 /*
4108 * Some users adjust the wall-clock and then expect software to not notice.
ddbbfbc1
A
4109 * Therefore, launchd must use an absolute clock instead of the wall clock
4110 * wherever possible.
5b0a4722 4111 */
ddbbfbc1 4112 td = runtime_get_nanoseconds_since(j->start_time);
5b0a4722 4113 td /= NSEC_PER_SEC;
ddbbfbc1 4114
5b0a4722
A
4115 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
4116 time_t respawn_delta = j->min_run_time - (uint32_t)td;
dcace88f 4117
5b0a4722
A
4118 /*
4119 * We technically should ref-count throttled jobs to prevent idle exit,
4120 * but we're not directly tracking the 'throttled' state at the moment.
4121 */
dcace88f
A
4122 int level = LOG_WARNING;
4123 if (!j->did_exec && ((j->fail_cnt - 1) % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4124 level = LOG_DEBUG;
4125 }
4126
4127 job_log(j, level, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4128 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
5b0a4722 4129 job_ignore(j);
ed34e3c3
A
4130 return;
4131 }
ddbbfbc1
A
4132
4133 if (likely(!j->legacy_mach_job)) {
4134 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_special_privileges;
5b0a4722 4135 }
ed34e3c3 4136
dcace88f
A
4137 if (sipc) {
4138 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
5b0a4722 4139 }
ddbbfbc1 4140
dcace88f 4141 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
ddbbfbc1
A
4142
4143 if (likely(!j->legacy_mach_job) && job_assumes(j, pipe(oepair) != -1)) {
5b0a4722 4144 j->log_redirect_fd = _fd(oepair[0]);
dcace88f
A
4145 (void)job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
4146 (void)job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
ed34e3c3 4147 }
ddbbfbc1 4148
5b0a4722 4149 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
ed34e3c3
A
4150 case -1:
4151 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
dcace88f 4152 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j) != -1);
ddbbfbc1
A
4153 job_ignore(j);
4154
dcace88f
A
4155 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
4156 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
ed34e3c3 4157 if (sipc) {
dcace88f
A
4158 (void)job_assumes(j, runtime_close(spair[0]) == 0);
4159 (void)job_assumes(j, runtime_close(spair[1]) == 0);
ed34e3c3 4160 }
ddbbfbc1 4161 if (likely(!j->legacy_mach_job)) {
dcace88f
A
4162 (void)job_assumes(j, runtime_close(oepair[0]) != -1);
4163 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
fe044cc9
A
4164 j->log_redirect_fd = 0;
4165 }
ed34e3c3
A
4166 break;
4167 case 0:
ddbbfbc1 4168 if (unlikely(_vproc_post_fork_ping())) {
5b0a4722
A
4169 _exit(EXIT_FAILURE);
4170 }
4171 if (!j->legacy_mach_job) {
dcace88f
A
4172 (void)job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
4173 (void)job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
4174 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
5b0a4722 4175 }
dcace88f 4176 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
ed34e3c3
A
4177 /* wait for our parent to say they've attached a kevent to us */
4178 read(_fd(execspair[1]), &c, sizeof(c));
ddbbfbc1 4179
ed34e3c3 4180 if (sipc) {
dcace88f 4181 (void)job_assumes(j, runtime_close(spair[0]) == 0);
5b0a4722 4182 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
ed34e3c3
A
4183 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4184 }
5b0a4722 4185 job_start_child(j);
ed34e3c3
A
4186 break;
4187 default:
ddbbfbc1
A
4188 j->start_time = runtime_get_opaque_time();
4189
5b0a4722 4190 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
ddbbfbc1
A
4191
4192 j->did_exec = false;
dcace88f 4193 j->xpcproxy_did_exec = false;
ddbbfbc1 4194 j->checkedin = false;
5b0a4722 4195 j->start_pending = false;
ddbbfbc1
A
4196 j->reaped = false;
4197 j->crashed = false;
4198 j->stopped = false;
dcace88f 4199 if (j->needs_kickoff) {
ddbbfbc1
A
4200 j->needs_kickoff = false;
4201
dcace88f 4202 if (SLIST_EMPTY(&j->semaphores)) {
ddbbfbc1
A
4203 j->ondemand = false;
4204 }
4205 }
4206
dcace88f 4207 if (j->has_console) {
ddbbfbc1
A
4208 g_wsp = c;
4209 }
4210
5b0a4722 4211 runtime_add_ref();
ed34e3c3 4212 total_children++;
5b0a4722 4213 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
ddbbfbc1
A
4214
4215 if (likely(!j->legacy_mach_job)) {
dcace88f 4216 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
5b0a4722
A
4217 }
4218 j->p = c;
dcace88f
A
4219
4220 j->mgr->normal_active_cnt++;
ddbbfbc1 4221 j->fork_fd = _fd(execspair[0]);
dcace88f 4222 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
ed34e3c3 4223 if (sipc) {
dcace88f 4224 (void)job_assumes(j, runtime_close(spair[1]) == 0);
ed34e3c3
A
4225 ipc_open(_fd(spair[0]), j);
4226 }
5b0a4722
A
4227 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
4228 job_ignore(j);
ed34e3c3 4229 } else {
5b0a4722 4230 job_reap(j);
ed34e3c3 4231 }
ddbbfbc1
A
4232
4233 j->wait4debugger_oneshot = false;
ed34e3c3 4234
ddbbfbc1 4235 struct envitem *ei = NULL, *et = NULL;
dcace88f
A
4236 SLIST_FOREACH_SAFE(ei, &j->env, sle, et) {
4237 if (ei->one_shot) {
ddbbfbc1
A
4238 SLIST_REMOVE(&j->env, ei, envitem, sle);
4239 }
4240 }
4241
4242 if (likely(!j->stall_before_exec)) {
5b0a4722 4243 job_uncork_fork(j);
ed34e3c3
A
4244 }
4245 break;
4246 }
4247}
4248
5b0a4722
A
4249void
4250job_start_child(job_t j)
ed34e3c3 4251{
ddbbfbc1 4252 typeof(posix_spawn) *psf;
ed34e3c3
A
4253 const char *file2exec = "/usr/libexec/launchproxy";
4254 const char **argv;
5b0a4722 4255 posix_spawnattr_t spattr;
ed34e3c3
A
4256 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4257 glob_t g;
5b0a4722
A
4258 short spflags = POSIX_SPAWN_SETEXEC;
4259 size_t binpref_out_cnt = 0;
ddbbfbc1 4260 size_t i;
5b0a4722 4261
dcace88f 4262 (void)job_assumes(j, posix_spawnattr_init(&spattr) == 0);
5b0a4722 4263
ed34e3c3
A
4264 job_setup_attributes(j);
4265
ddbbfbc1 4266 if (unlikely(j->argv && j->globargv)) {
ed34e3c3
A
4267 g.gl_offs = 1;
4268 for (i = 0; i < j->argc; i++) {
5b0a4722 4269 if (i > 0) {
ed34e3c3 4270 gflags |= GLOB_APPEND;
5b0a4722 4271 }
ed34e3c3
A
4272 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4273 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4274 exit(EXIT_FAILURE);
4275 }
4276 }
4277 g.gl_pathv[0] = (char *)file2exec;
4278 argv = (const char **)g.gl_pathv;
ddbbfbc1 4279 } else if (likely(j->argv)) {
ed34e3c3
A
4280 argv = alloca((j->argc + 2) * sizeof(char *));
4281 argv[0] = file2exec;
5b0a4722 4282 for (i = 0; i < j->argc; i++) {
ed34e3c3 4283 argv[i + 1] = j->argv[i];
5b0a4722 4284 }
ed34e3c3
A
4285 argv[i + 1] = NULL;
4286 } else {
4287 argv = alloca(3 * sizeof(char *));
4288 argv[0] = file2exec;
4289 argv[1] = j->prog;
4290 argv[2] = NULL;
4291 }
4292
ddbbfbc1 4293 if (likely(!j->inetcompat)) {
ed34e3c3 4294 argv++;
5b0a4722
A
4295 }
4296
ddbbfbc1 4297 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
dcace88f
A
4298 if (!j->legacy_LS_job) {
4299 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4300 }
5b0a4722
A
4301 spflags |= POSIX_SPAWN_START_SUSPENDED;
4302 }
4303
dcace88f
A
4304 if (unlikely(j->disable_aslr)) {
4305 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4306 }
4307 spflags |= j->pstype;
4308
4309 (void)job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
5b0a4722 4310
ddbbfbc1 4311 if (unlikely(j->j_binpref_cnt)) {
dcace88f
A
4312 (void)job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
4313 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
5b0a4722
A
4314 }
4315
f36da725 4316#if HAVE_QUARANTINE
5b0a4722
A
4317 if (j->quarantine_data) {
4318 qtn_proc_t qp;
ed34e3c3 4319
5b0a4722
A
4320 if (job_assumes(j, qp = qtn_proc_alloc())) {
4321 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
dcace88f 4322 (void)job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
5b0a4722
A
4323 }
4324 }
4325 }
f36da725 4326#endif
ed34e3c3 4327
f36da725 4328#if HAVE_SANDBOX
5b0a4722
A
4329 if (j->seatbelt_profile) {
4330 char *seatbelt_err_buf = NULL;
ed34e3c3 4331
5b0a4722
A
4332 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
4333 if (seatbelt_err_buf) {
4334 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4335 }
4336 goto out_bad;
4337 }
ed34e3c3 4338 }
f36da725 4339#endif
ed34e3c3 4340
ddbbfbc1
A
4341 psf = j->prog ? posix_spawn : posix_spawnp;
4342
4343 if (likely(!j->inetcompat)) {
4344 file2exec = j->prog ? j->prog : argv[0];
ed34e3c3
A
4345 }
4346
dcace88f
A
4347 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4348 if (errno != EBADARCH) {
4349 int level = LOG_ERR;
4350 if ((j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4351 level = LOG_DEBUG;
4352 }
4353 job_log_error(j, level, "posix_spawn(\"%s\", ...)", file2exec);
4354 errno = EXIT_FAILURE;
4355 }
4356
ef398931 4357#if HAVE_SANDBOX
5b0a4722 4358out_bad:
ef398931 4359#endif
dcace88f 4360 _exit(errno);
ed34e3c3
A
4361}
4362
5b0a4722
A
4363void
4364jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
ed34e3c3 4365{
5b0a4722 4366 launch_data_t tmp;
ed34e3c3 4367 struct envitem *ei;
5b0a4722 4368 job_t ji;
ed34e3c3 4369
5b0a4722
A
4370 if (jm->parentmgr) {
4371 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4372 } else {
4373 char **tmpenviron = environ;
4374 for (; *tmpenviron; tmpenviron++) {
4375 char envkey[1024];
4376 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4377 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4378 strncpy(envkey, *tmpenviron, sizeof(envkey));
4379 *(strchr(envkey, '=')) = '\0';
4380 launch_data_dict_insert(dict, s, envkey);
4381 }
4382 }
ed34e3c3 4383
5b0a4722
A
4384 LIST_FOREACH(ji, &jm->jobs, sle) {
4385 SLIST_FOREACH(ei, &ji->global_env, sle) {
4386 if ((tmp = launch_data_new_string(ei->value))) {
4387 launch_data_dict_insert(dict, tmp, ei->key);
4388 }
4389 }
4390 }
ed34e3c3
A
4391}
4392
4393void
5b0a4722 4394jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
ed34e3c3 4395{
ed34e3c3 4396 struct envitem *ei;
5b0a4722 4397 job_t ji;
ed34e3c3 4398
5b0a4722
A
4399 if (jm->parentmgr) {
4400 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4401 }
ed34e3c3 4402
ddbbfbc1 4403 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
5b0a4722
A
4404 SLIST_FOREACH(ei, &ji->global_env, sle) {
4405 setenv(ei->key, ei->value, 1);
ed34e3c3 4406 }
5b0a4722
A
4407 }
4408}
ed34e3c3 4409
5b0a4722 4410void
ddbbfbc1 4411job_log_pids_with_weird_uids(job_t j)
5b0a4722 4412{
dcace88f
A
4413 size_t len = sizeof(pid_t) * get_kern_max_proc();
4414 pid_t *pids = NULL;
5b0a4722 4415 uid_t u = j->mach_uid;
dcace88f
A
4416 int i = 0, kp_cnt = 0;
4417
ddbbfbc1 4418 if (!do_apple_internal_logging) {
f36da725
A
4419 return;
4420 }
ddbbfbc1 4421
dcace88f
A
4422 pids = malloc(len);
4423 if (!job_assumes(j, pids != NULL)) {
5b0a4722
A
4424 return;
4425 }
ddbbfbc1
A
4426
4427 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4428
dcace88f
A
4429 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4430 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4431 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4432 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4433 * struct back in a single call for each one.
4434 *
4435 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4436 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4437 * libproc could go stale before we call proc_pidinfo().
4438 *
4439 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4440 * of bytes written to the buffer.
4441 */
4442 if (!job_assumes(j, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
5b0a4722 4443 goto out;
ed34e3c3
A
4444 }
4445
5b0a4722 4446 for (i = 0; i < kp_cnt; i++) {
dcace88f
A
4447 struct proc_bsdshortinfo proc;
4448 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4449 * detailed above.
4450 */
4451 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4452 if (errno != ESRCH) {
4453 job_assumes(j, errno == 0);
4454 }
4455 continue;
4456 }
4457
4458 uid_t i_euid = proc.pbsi_uid;
4459 uid_t i_uid = proc.pbsi_ruid;
4460 uid_t i_svuid = proc.pbsi_svuid;
4461 pid_t i_pid = pids[i];
5b0a4722
A
4462
4463 if (i_euid != u && i_uid != u && i_svuid != u) {
4464 continue;
4465 }
ed34e3c3 4466
dcace88f 4467 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
5b0a4722
A
4468
4469/* Temporarily disabled due to 5423935 and 4946119. */
4470#if 0
4471 /* Ask the accountless process to exit. */
dcace88f 4472 (void)job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
5b0a4722 4473#endif
ed34e3c3 4474 }
5b0a4722
A
4475
4476out:
dcace88f 4477 free(pids);
5b0a4722
A
4478}
4479
5c88273d
A
4480static struct passwd *
4481job_getpwnam(job_t j, const char *name)
4482{
4483 /*
4484 * methodology for system daemons
4485 *
4486 * first lookup user record without any opendirectoryd interaction,
4487 * we don't know what interprocess dependencies might be in flight.
4488 * if that fails, we re-enable opendirectoryd interaction and
4489 * re-issue the lookup. We have to disable the libinfo L1 cache
4490 * otherwise libinfo will return the negative cache entry on the retry
4491 */
4492
4493#if !TARGET_OS_EMBEDDED
4494 struct passwd *pw = NULL;
4495
4496 if (pid1_magic && j->mgr == root_jobmgr) {
4497 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4498 gL1CacheEnabled = false;
4499
4500 pw = getpwnam(name);
4501
4502 si_search_module_set_flags("ds", 0);
4503 }
4504
4505 if (pw == NULL) {
4506 pw = getpwnam(name);
4507 }
4508
4509 return pw;
4510#else
4511 return getpwnam(name);
4512#endif
4513}
4514
4515static struct group *
4516job_getgrnam(job_t j, const char *name)
4517{
4518#if !TARGET_OS_EMBEDDED
4519 struct group *gr = NULL;
4520
4521 if (pid1_magic && j->mgr == root_jobmgr) {
4522 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4523 gL1CacheEnabled = false;
4524
4525 gr = getgrnam(name);
4526
4527 si_search_module_set_flags("ds", 0);
4528 }
4529
4530 if (gr == NULL) {
4531 gr = getgrnam(name);
4532 }
4533
4534 return gr;
4535#else
4536#pragma unused (j)
4537 return getgrnam(name);
4538#endif
4539}
4540
cf0bacfd 4541void
ddbbfbc1 4542job_postfork_test_user(job_t j)
cf0bacfd 4543{
ddbbfbc1 4544 /* This function is all about 5201578 */
cf0bacfd 4545
ddbbfbc1
A
4546 const char *home_env_var = getenv("HOME");
4547 const char *user_env_var = getenv("USER");
4548 const char *logname_env_var = getenv("LOGNAME");
4549 uid_t tmp_uid, local_uid = getuid();
4550 gid_t tmp_gid, local_gid = getgid();
4551 char shellpath[PATH_MAX];
4552 char homedir[PATH_MAX];
4553 char loginname[2000];
4554 struct passwd *pwe;
4555
4556
4557 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4558 && strcmp(user_env_var, logname_env_var) == 0)) {
4559 goto out_bad;
cf0bacfd
A
4560 }
4561
5c88273d 4562 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
ddbbfbc1
A
4563 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4564 goto out_bad;
cf0bacfd 4565 }
ddbbfbc1
A
4566
4567 /*
4568 * We must copy the results of getpw*().
4569 *
4570 * Why? Because subsequent API calls may call getpw*() as a part of
4571 * their implementation. Since getpw*() returns a [now thread scoped]
4572 * global, we must therefore cache the results before continuing.
4573 */
4574
4575 tmp_uid = pwe->pw_uid;
4576 tmp_gid = pwe->pw_gid;
4577
4578 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4579 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4580 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4581
4582 if (strcmp(loginname, logname_env_var) != 0) {
4583 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4584 goto out_bad;
4585 }
4586 if (strcmp(homedir, home_env_var) != 0) {
4587 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4588 goto out_bad;
4589 }
4590 if (local_uid != tmp_uid) {
4591 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4592 'U', tmp_uid, local_uid);
4593 goto out_bad;
4594 }
4595 if (local_gid != tmp_gid) {
4596 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4597 'G', tmp_gid, local_gid);
4598 goto out_bad;
4599 }
4600
4601 return;
4602out_bad:
4603#if 0
dcace88f 4604 (void)job_assumes(j, runtime_kill(getppid(), SIGTERM) != -1);
ddbbfbc1
A
4605 _exit(EXIT_FAILURE);
4606#else
4607 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
ef398931 4608#endif
ddbbfbc1 4609}
cf0bacfd 4610
5b0a4722
A
4611void
4612job_postfork_become_user(job_t j)
4613{
4614 char loginname[2000];
4615 char tmpdirpath[PATH_MAX];
4616 char shellpath[PATH_MAX];
4617 char homedir[PATH_MAX];
4618 struct passwd *pwe;
4619 size_t r;
4620 gid_t desired_gid = -1;
4621 uid_t desired_uid = -1;
4622
4623 if (getuid() != 0) {
ddbbfbc1 4624 return job_postfork_test_user(j);
5b0a4722
A
4625 }
4626
4627 /*
4628 * I contend that having UID == 0 and GID != 0 is of dubious value.
4629 * Nevertheless, this used to work in Tiger. See: 5425348
4630 */
4631 if (j->groupname && !j->username) {
4632 j->username = "root";
4633 }
4634
4635 if (j->username) {
5c88273d 4636 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
5b0a4722
A
4637 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
4638 _exit(EXIT_FAILURE);
4639 }
4640 } else if (j->mach_uid) {
4641 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4642 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
ddbbfbc1 4643 job_log_pids_with_weird_uids(j);
5b0a4722
A
4644 _exit(EXIT_FAILURE);
4645 }
4646 } else {
4647 return;
4648 }
4649
4650 /*
4651 * We must copy the results of getpw*().
4652 *
4653 * Why? Because subsequent API calls may call getpw*() as a part of
4654 * their implementation. Since getpw*() returns a [now thread scoped]
4655 * global, we must therefore cache the results before continuing.
4656 */
4657
4658 desired_uid = pwe->pw_uid;
4659 desired_gid = pwe->pw_gid;
4660
4661 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4662 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4663 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4664
ddbbfbc1 4665 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
5b0a4722
A
4666 job_log(j, LOG_ERR, "Expired account");
4667 _exit(EXIT_FAILURE);
4668 }
4669
4670
ddbbfbc1 4671 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
5b0a4722 4672 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
ddbbfbc1 4673 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
5b0a4722 4674 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
ed34e3c3 4675 }
5b0a4722 4676
ed34e3c3 4677 if (j->groupname) {
5b0a4722
A
4678 struct group *gre;
4679
5c88273d 4680 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
ed34e3c3 4681 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
5b0a4722 4682 _exit(EXIT_FAILURE);
ed34e3c3 4683 }
5b0a4722
A
4684
4685 desired_gid = gre->gr_gid;
4686 }
4687
4688 if (!job_assumes(j, setlogin(loginname) != -1)) {
4689 _exit(EXIT_FAILURE);
ed34e3c3 4690 }
ed34e3c3 4691
5b0a4722
A
4692 if (!job_assumes(j, setgid(desired_gid) != -1)) {
4693 _exit(EXIT_FAILURE);
4694 }
ed34e3c3 4695
5b0a4722
A
4696 /*
4697 * The kernel team and the DirectoryServices team want initgroups()
4698 * called after setgid(). See 4616864 for more information.
4699 */
ed34e3c3 4700
ddbbfbc1
A
4701 if (likely(!j->no_init_groups)) {
4702 #if 1
5b0a4722
A
4703 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
4704 _exit(EXIT_FAILURE);
ed34e3c3 4705 }
ddbbfbc1
A
4706 #else
4707 /* Do our own little initgroups(). We do this to guarantee that we're
4708 * always opted into dynamic group resolution in the kernel. initgroups(3)
4709 * does not make this guarantee.
4710 */
4711 int groups[NGROUPS], ngroups;
4712
4713 /* A failure here isn't fatal, and we'll still get data we can use. */
dcace88f 4714 (void)job_assumes(j, getgrouplist(j->username, desired_gid, groups, &ngroups) != -1);
ddbbfbc1 4715
dcace88f 4716 if (!job_assumes(j, syscall(SYS_initgroups, ngroups, groups, desired_uid) != -1)) {
ddbbfbc1
A
4717 _exit(EXIT_FAILURE);
4718 }
4719 #endif
ed34e3c3 4720 }
5b0a4722
A
4721
4722 if (!job_assumes(j, setuid(desired_uid) != -1)) {
4723 _exit(EXIT_FAILURE);
ed34e3c3 4724 }
5b0a4722
A
4725
4726 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4727
ddbbfbc1 4728 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
5b0a4722 4729 setenv("TMPDIR", tmpdirpath, 0);
ed34e3c3 4730 }
5b0a4722
A
4731
4732 setenv("SHELL", shellpath, 0);
4733 setenv("HOME", homedir, 0);
4734 setenv("USER", loginname, 0);
4735 setenv("LOGNAME", loginname, 0);
4736}
4737
4738void
4739job_setup_attributes(job_t j)
4740{
4741 struct limititem *li;
4742 struct envitem *ei;
4743
ddbbfbc1 4744 if (unlikely(j->setnice)) {
dcace88f 4745 (void)job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
5b0a4722
A
4746 }
4747
4748 SLIST_FOREACH(li, &j->limits, sle) {
4749 struct rlimit rl;
4750
4751 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
4752 continue;
4753 }
4754
4755 if (li->sethard) {
4756 rl.rlim_max = li->lim.rlim_max;
4757 }
4758 if (li->setsoft) {
4759 rl.rlim_cur = li->lim.rlim_cur;
4760 }
4761
4762 if (setrlimit(li->which, &rl) == -1) {
4763 job_log_error(j, LOG_WARNING, "setrlimit()");
ed34e3c3
A
4764 }
4765 }
4766
ddbbfbc1 4767 if (unlikely(!j->inetcompat && j->session_create)) {
5b0a4722
A
4768 launchd_SessionCreate();
4769 }
4770
ddbbfbc1 4771 if (unlikely(j->low_pri_io)) {
dcace88f 4772 (void)job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
5b0a4722 4773 }
ddbbfbc1 4774 if (unlikely(j->rootdir)) {
dcace88f
A
4775 (void)job_assumes(j, chroot(j->rootdir) != -1);
4776 (void)job_assumes(j, chdir(".") != -1);
5b0a4722
A
4777 }
4778
4779 job_postfork_become_user(j);
4780
ddbbfbc1 4781 if (unlikely(j->workingdir)) {
dcace88f 4782 (void)job_assumes(j, chdir(j->workingdir) != -1);
5b0a4722
A
4783 }
4784
ddbbfbc1 4785 if (unlikely(j->setmask)) {
5b0a4722
A
4786 umask(j->mask);
4787 }
4788
ddbbfbc1 4789 if (j->stdin_fd) {
dcace88f 4790 (void)job_assumes(j, dup2(j->stdin_fd, STDIN_FILENO) != -1);
ddbbfbc1
A
4791 } else {
4792 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4793 }
4794 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4795 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
ed34e3c3 4796
5b0a4722
A
4797 jobmgr_setup_env_from_other_jobs(j->mgr);
4798
4799 SLIST_FOREACH(ei, &j->env, sle) {
ed34e3c3 4800 setenv(ei->key, ei->value, 1);
5b0a4722
A
4801 }
4802
dcace88f 4803 if (do_apple_internal_logging) {
ddbbfbc1
A
4804 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING, "true", 1);
4805 }
4806
4807#if !TARGET_OS_EMBEDDED
dcace88f
A
4808 if (j->jetsam_properties) {
4809 (void)job_assumes(j, proc_setpcontrol(PROC_SETPC_TERMINATE) == 0);
ddbbfbc1
A
4810 }
4811#endif
4812
4813#if TARGET_OS_EMBEDDED
dcace88f 4814 if (j->main_thread_priority != 0) {
ddbbfbc1
A
4815 struct sched_param params;
4816 bzero(&params, sizeof(params));
4817 params.sched_priority = j->main_thread_priority;
dcace88f 4818 (void)job_assumes(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params) != -1);
ddbbfbc1
A
4819 }
4820#endif
4821
5b0a4722
A
4822 /*
4823 * We'd like to call setsid() unconditionally, but we have reason to
4824 * believe that prevents launchd from being able to send signals to
4825 * setuid children. We'll settle for process-groups.
4826 */
4827 if (getppid() != 1) {
dcace88f 4828 (void)job_assumes(j, setpgid(0, 0) != -1);
5b0a4722 4829 } else {
dcace88f 4830 (void)job_assumes(j, setsid() != -1);
5b0a4722
A
4831 }
4832}
4833
4834void
4835job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4836{
4837 int fd;
4838
4839 if (!path) {
4840 return;
4841 }
4842
4843 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4844 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4845 return;
4846 }
ed34e3c3 4847
dcace88f
A
4848 (void)job_assumes(j, dup2(fd, target_fd) != -1);
4849 (void)job_assumes(j, runtime_close(fd) == 0);
ed34e3c3
A
4850}
4851
4852int
5b0a4722 4853dir_has_files(job_t j, const char *path)
ed34e3c3
A
4854{
4855 DIR *dd = opendir(path);
4856 struct dirent *de;
4857 bool r = 0;
4858
ddbbfbc1 4859 if (unlikely(!dd)) {
ed34e3c3 4860 return -1;
5b0a4722 4861 }
ed34e3c3
A
4862
4863 while ((de = readdir(dd))) {
4864 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
4865 r = 1;
4866 break;
4867 }
4868 }
4869
dcace88f 4870 (void)job_assumes(j, closedir(dd) == 0);
ed34e3c3
A
4871 return r;
4872}
4873
4874void
5b0a4722 4875calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
ed34e3c3 4876{
5b0a4722
A
4877 struct calendarinterval *ci_iter, *ci_prev = NULL;
4878 time_t later, head_later;
ed34e3c3
A
4879
4880 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4881
4882 if (ci->when.tm_wday != -1) {
4883 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4884
4885 if (ci->when.tm_mday == -1) {
4886 later = otherlater;
4887 } else {
4888 later = later < otherlater ? later : otherlater;
4889 }
4890 }
4891
5b0a4722
A
4892 ci->when_next = later;
4893
4894 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4895 if (ci->when_next < ci_iter->when_next) {
4896 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4897 break;
4898 }
4899
4900 ci_prev = ci_iter;
ed34e3c3 4901 }
ed34e3c3 4902
5b0a4722
A
4903 if (ci_iter == NULL) {
4904 /* ci must want to fire after every other timer, or there are no timers */
ed34e3c3 4905
5b0a4722
A
4906 if (LIST_EMPTY(&sorted_calendar_events)) {
4907 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
ed34e3c3 4908 } else {
5b0a4722 4909 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
ed34e3c3
A
4910 }
4911 }
ed34e3c3 4912
5b0a4722 4913 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
ed34e3c3 4914
5b0a4722
A
4915 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
4916 char time_string[100];
4917 size_t time_string_len;
ed34e3c3 4918
5b0a4722
A
4919 ctime_r(&later, time_string);
4920 time_string_len = strlen(time_string);
ed34e3c3 4921
ddbbfbc1 4922 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
5b0a4722
A
4923 time_string[time_string_len - 1] = '\0';
4924 }
ed34e3c3 4925
5b0a4722
A
4926 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4927 }
ed34e3c3
A
4928}
4929
4930void
5b0a4722 4931extract_rcsid_substr(const char *i, char *o, size_t osz)
ed34e3c3 4932{
5b0a4722 4933 char *rcs_rev_tmp = strchr(i, ' ');
ed34e3c3 4934
5b0a4722
A
4935 if (!rcs_rev_tmp) {
4936 strlcpy(o, i, osz);
4937 } else {
4938 strlcpy(o, rcs_rev_tmp + 1, osz);
4939 rcs_rev_tmp = strchr(o, ' ');
4940 if (rcs_rev_tmp) {
4941 *rcs_rev_tmp = '\0';
4942 }
4943 }
ed34e3c3
A
4944}
4945
5b0a4722 4946void
ddbbfbc1 4947jobmgr_log_bug(jobmgr_t jm, unsigned int line)
ed34e3c3 4948{
ddbbfbc1 4949 static const char *file;
5b0a4722 4950 int saved_errno = errno;
5b0a4722 4951 char buf[100];
ed34e3c3 4952
ddbbfbc1
A
4953 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4954
4955 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
ed34e3c3 4956
5b0a4722 4957 if (!file) {
ddbbfbc1
A
4958 file = strrchr(__FILE__, '/');
4959 if (!file) {
4960 file = __FILE__;
4961 } else {
4962 file += 1;
4963 }
5b0a4722 4964 }
ed34e3c3 4965
ddbbfbc1
A
4966 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4967 if (likely(jm)) {
4968 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4969 } else {
4970 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4971 }
5b0a4722 4972}
ed34e3c3
A
4973
4974void
ddbbfbc1 4975job_log_bug(job_t j, unsigned int line)
ed34e3c3 4976{
ddbbfbc1 4977 static const char *file;
5b0a4722 4978 int saved_errno = errno;
5b0a4722 4979 char buf[100];
ed34e3c3 4980
ddbbfbc1
A
4981 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4982
4983 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
ed34e3c3 4984
5b0a4722 4985 if (!file) {
ddbbfbc1
A
4986 file = strrchr(__FILE__, '/');
4987 if (!file) {
4988 file = __FILE__;
4989 } else {
4990 file += 1;
4991 }
ed34e3c3 4992 }
5b0a4722 4993
ddbbfbc1
A
4994 if (likely(j)) {
4995 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4996 } else {
4997 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4998 }
ed34e3c3
A
4999}
5000
5001void
5b0a4722 5002job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
ed34e3c3 5003{
ddbbfbc1
A
5004 const char *label2use = j ? j->label : "com.apple.launchd.NULL";
5005 const char *mgr2use = j ? j->mgr->name : "NULL";
5006 struct runtime_syslog_attr attr = { g_my_label, label2use, mgr2use, pri, getuid(), getpid(), j ? j->p : 0 };
5b0a4722
A
5007 char *newmsg;
5008 int oldmask = 0;
5009 size_t newmsgsz;
ed34e3c3 5010
5b0a4722
A
5011 /*
5012 * Hack: If bootstrap_port is set, we must be on the child side of a
5013 * fork(), but before the exec*(). Let's route the log message back to
5014 * launchd proper.
5015 */
5016 if (bootstrap_port) {
5017 return _vproc_logv(pri, err, msg, ap);
5018 }
ed34e3c3 5019
5b0a4722
A
5020 newmsgsz = strlen(msg) + 200;
5021 newmsg = alloca(newmsgsz);
ed34e3c3 5022
5b0a4722 5023 if (err) {
dcace88f 5024#if !TARGET_OS_EMBEDDED
5b0a4722 5025 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
dcace88f 5026#else
ddbbfbc1 5027 snprintf(newmsg, newmsgsz, "(%s) %s: %s", label2use, msg, strerror(err));
dcace88f 5028#endif
5b0a4722 5029 } else {
dcace88f 5030#if !TARGET_OS_EMBEDDED
5b0a4722 5031 snprintf(newmsg, newmsgsz, "%s", msg);
dcace88f 5032#else
ddbbfbc1 5033 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
dcace88f 5034#endif
5b0a4722 5035 }
ed34e3c3 5036
dcace88f 5037 if (j && unlikely(j->debug)) {
5b0a4722
A
5038 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5039 }
ed34e3c3 5040
5b0a4722 5041 runtime_vsyslog(&attr, newmsg, ap);
ed34e3c3 5042
dcace88f 5043 if (j && unlikely(j->debug)) {
5b0a4722 5044 setlogmask(oldmask);
ed34e3c3
A
5045 }
5046}
5047
5048void
5b0a4722 5049job_log_error(job_t j, int pri, const char *msg, ...)
ed34e3c3 5050{
5b0a4722 5051 va_list ap;
ed34e3c3 5052
5b0a4722
A
5053 va_start(ap, msg);
5054 job_logv(j, pri, errno, msg, ap);
5055 va_end(ap);
ed34e3c3
A
5056}
5057
5b0a4722
A
5058void
5059job_log(job_t j, int pri, const char *msg, ...)
ed34e3c3 5060{
5b0a4722 5061 va_list ap;
ed34e3c3 5062
5b0a4722
A
5063 va_start(ap, msg);
5064 job_logv(j, pri, 0, msg, ap);
5065 va_end(ap);
5066}
ed34e3c3 5067
5b0a4722
A
5068#if 0
5069void
5070jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5071{
5072 va_list ap;
ed34e3c3 5073
5b0a4722
A
5074 va_start(ap, msg);
5075 jobmgr_logv(jm, pri, errno, msg, ap);
5076 va_end(ap);
5077}
5078#endif
ed34e3c3 5079
5b0a4722
A
5080void
5081jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5082{
5083 va_list ap;
ed34e3c3 5084
5b0a4722
A
5085 va_start(ap, msg);
5086 jobmgr_logv(jm, pri, 0, msg, ap);
5087 va_end(ap);
ed34e3c3
A
5088}
5089
5b0a4722
A
5090void
5091jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
ed34e3c3 5092{
5b0a4722
A
5093 char *newmsg;
5094 char *newname;
5095 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
ed34e3c3 5096
5b0a4722
A
5097 newname = alloca((jmname_len + 1) * 2);
5098 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5099 newmsg = alloca(newmsgsz);
ed34e3c3 5100
5b0a4722
A
5101 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5102 if (jm->name[i] == '%') {
5103 newname[o] = '%';
5104 o++;
5105 }
5106 newname[o] = jm->name[i];
5107 }
5108 newname[o] = '\0';
ed34e3c3 5109
5b0a4722
A
5110 if (err) {
5111 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5112 } else {
5113 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5114 }
ed34e3c3 5115
5b0a4722
A
5116 if (jm->parentmgr) {
5117 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5118 } else {
ddbbfbc1 5119 struct runtime_syslog_attr attr = { g_my_label, g_my_label, jm->name, pri, getuid(), getpid(), getpid() };
ed34e3c3 5120
5b0a4722
A
5121 runtime_vsyslog(&attr, newmsg, ap);
5122 }
ed34e3c3
A
5123}
5124
5125void
5b0a4722 5126semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
ed34e3c3 5127{
5b0a4722
A
5128 if (si->fd != -1) {
5129 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
dcace88f 5130 (void)job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
5b0a4722 5131 }
ed34e3c3
A
5132}
5133
5134void
5b0a4722 5135semaphoreitem_watch(job_t j, struct semaphoreitem *si)
ed34e3c3 5136{
f36da725 5137 char *parentdir, tmp_path[PATH_MAX];
5b0a4722 5138 int saved_errno = 0;
ddbbfbc1 5139 int fflags = NOTE_DELETE|NOTE_RENAME;
ed34e3c3 5140
5b0a4722 5141 switch (si->why) {
5b0a4722
A
5142 case DIR_NOT_EMPTY:
5143 case PATH_CHANGES:
ddbbfbc1
A
5144 fflags |= NOTE_ATTRIB|NOTE_LINK;
5145 /* fall through */
5146 case PATH_EXISTS:
5147 fflags |= NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
5148 /* fall through */
5149 case PATH_MISSING:
5b0a4722
A
5150 break;
5151 default:
5152 return;
ed34e3c3
A
5153 }
5154
f36da725
A
5155 /* dirname() may modify tmp_path */
5156 strlcpy(tmp_path, si->what, sizeof(tmp_path));
5157
5158 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
5159 return;
5160 }
5161
5b0a4722
A
5162 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
5163 do {
5164 if (si->fd == -1) {
ddbbfbc1 5165 struct stat sb;
dcace88f 5166 if (stat(si->what, &sb) == 0) {
ddbbfbc1
A
5167 /* If we're watching a character or block device, only watch the parent directory.
5168 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
5169 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
5170 * open(2)s the file (like a character device that waits for a carrier signal) or
5171 * (b) preventing other processes from obtaining an exclusive lock on the file, even
5172 * though we're opening it with O_EVTONLY.
5173 *
5174 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
5175 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
5176 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
5177 * each time the parent changes to see if it appeared or disappeared.
5178 */
dcace88f 5179 if (S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode)) {
ddbbfbc1
A
5180 si->fd = _fd(open(si->what, O_EVTONLY | O_NOCTTY | O_NONBLOCK));
5181 }
5182 }
5183
dcace88f 5184 if (si->fd == -1) {
ddbbfbc1
A
5185 si->watching_parent = job_assumes(j, (si->fd = _fd(open(parentdir, O_EVTONLY | O_NOCTTY | O_NONBLOCK))) != -1);
5186 } else {
5187 si->watching_parent = false;
5b0a4722
A
5188 }
5189 }
5190
5191 if (si->fd == -1) {
ddbbfbc1 5192 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", si->what);
5b0a4722
A
5193 }
5194
ddbbfbc1 5195 job_log(j, LOG_DEBUG, "Watching %svnode (%s): %d", si->watching_parent ? "parent ": "", si->what, si->fd);
5b0a4722
A
5196
5197 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
5198 saved_errno = errno;
5199 /*
5200 * The FD can be revoked between the open() and kevent().
5201 * This is similar to the inability for kevents to be
5202 * attached to short lived zombie processes after fork()
5203 * but before kevent().
5204 */
dcace88f 5205 (void)job_assumes(j, runtime_close(si->fd) == 0);
5b0a4722
A
5206 si->fd = -1;
5207 }
ddbbfbc1 5208 } while (unlikely((si->fd == -1) && (saved_errno == ENOENT)));
5b0a4722
A
5209
5210 if (saved_errno == ENOTSUP) {
5211 /*
5212 * 3524219 NFS needs kqueue support
5213 * 4124079 VFS needs generic kqueue support
5214 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
5215 */
5216 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
5217
5218 if (!j->poll_for_vfs_changes) {
5219 j->poll_for_vfs_changes = true;
dcace88f 5220 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
5b0a4722 5221 }
ed34e3c3
A
5222 }
5223}
5224
5b0a4722
A
5225void
5226semaphoreitem_callback(job_t j, struct kevent *kev)
ed34e3c3 5227{
5b0a4722
A
5228 char invalidation_reason[100] = "";
5229 struct semaphoreitem *si;
5230
5231 SLIST_FOREACH(si, &j->semaphores, sle) {
5232 switch (si->why) {
5233 case PATH_CHANGES:
5234 case PATH_EXISTS:
5235 case PATH_MISSING:
5236 case DIR_NOT_EMPTY:
ddbbfbc1 5237 job_log(j, LOG_DEBUG, "P%s changed (%u): %s", si->watching_parent ? "arent path" : "ath", si->why, si->what);
5b0a4722
A
5238 break;
5239 default:
5240 continue;
5241 }
5242
5243 if (si->fd == (int)kev->ident) {
5244 break;
5245 }
5246 }
5247
5248 if (!job_assumes(j, si != NULL)) {
5249 return;
5250 }
5251
5252 if (NOTE_DELETE & kev->fflags) {
5253 strcat(invalidation_reason, "deleted");
5254 }
5255
5256 if (NOTE_RENAME & kev->fflags) {
5257 if (invalidation_reason[0]) {
5258 strcat(invalidation_reason, "/renamed");
5259 } else {
5260 strcat(invalidation_reason, "renamed");
5261 }
5262 }
5263
5264 if (NOTE_REVOKE & kev->fflags) {
5265 if (invalidation_reason[0]) {
5266 strcat(invalidation_reason, "/revoked");
5267 } else {
5268 strcat(invalidation_reason, "revoked");
5269 }
5270 }
5271
5272 if (invalidation_reason[0]) {
5273 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
dcace88f 5274 (void)job_assumes(j, runtime_close(si->fd) == 0);
5b0a4722
A
5275 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5276 }
5277
dcace88f 5278 if (!si->watching_parent) {
ddbbfbc1
A
5279 if (si->why == PATH_CHANGES) {
5280 j->start_pending = true;
5281 } else {
5282 semaphoreitem_watch(j, si);
5283 }
5284 } else { /* Something happened to the parent directory. See if our target file appeared. */
dcace88f
A
5285 if (!invalidation_reason[0]) {
5286 (void)job_assumes(j, runtime_close(si->fd) == 0);
ddbbfbc1
A
5287 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5288 semaphoreitem_watch(j, si);
5289 }
5290 /* Need to think about what should happen if the parent directory goes invalid. */
5b0a4722 5291 }
ddbbfbc1 5292
5b0a4722
A
5293 job_dispatch(j, false);
5294}
5295
ddbbfbc1
A
5296struct cal_dict_walk {
5297 job_t j;
5298 struct tm tmptm;
5299};
5300
5b0a4722
A
5301void
5302calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5303{
ddbbfbc1
A
5304 struct cal_dict_walk *cdw = context;
5305 struct tm *tmptm = &cdw->tmptm;
5306 job_t j = cdw->j;
5b0a4722
A
5307 int64_t val;
5308
ddbbfbc1 5309 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5b0a4722
A
5310 /* hack to let caller know something went wrong */
5311 tmptm->tm_sec = -1;
5312 return;
5313 }
5314
5315 val = launch_data_get_integer(obj);
5316
ddbbfbc1
A
5317 if (val < 0) {
5318 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5319 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
dcace88f 5320 if (val > 59) {
ddbbfbc1
A
5321 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5322 tmptm->tm_sec = -1;
5323 } else {
5324 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5325 }
5b0a4722 5326 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
dcace88f 5327 if (val > 23) {
ddbbfbc1
A
5328 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5329 tmptm->tm_sec = -1;
5330 } else {
5331 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5332 }
5b0a4722 5333 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
dcace88f 5334 if (val < 1 || val > 31) {
ddbbfbc1
A
5335 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5336 tmptm->tm_sec = -1;
5337 } else {
5338 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5339 }
5b0a4722 5340 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
dcace88f 5341 if (val > 7) {
ddbbfbc1
A
5342 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5343 tmptm->tm_sec = -1;
5344 } else {
5345 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5346 }
5b0a4722 5347 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
dcace88f 5348 if (val > 12) {
ddbbfbc1
A
5349 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5350 tmptm->tm_sec = -1;
5351 } else {
5352 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5353 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
5354 }
5b0a4722
A
5355 }
5356}
5357
5358bool
5359calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5360{
ddbbfbc1 5361 struct cal_dict_walk cdw;
5b0a4722 5362
ddbbfbc1
A
5363 cdw.j = j;
5364 memset(&cdw.tmptm, 0, sizeof(0));
5b0a4722 5365
ddbbfbc1
A
5366 cdw.tmptm.tm_min = -1;
5367 cdw.tmptm.tm_hour = -1;
5368 cdw.tmptm.tm_mday = -1;
5369 cdw.tmptm.tm_wday = -1;
5370 cdw.tmptm.tm_mon = -1;
5b0a4722
A
5371
5372 if (!job_assumes(j, obj != NULL)) {
5373 return false;
5374 }
5375
ddbbfbc1 5376 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5b0a4722
A
5377 return false;
5378 }
5379
ddbbfbc1 5380 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5b0a4722 5381
ddbbfbc1 5382 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5b0a4722
A
5383 return false;
5384 }
5385
ddbbfbc1 5386 return calendarinterval_new(j, &cdw.tmptm);
5b0a4722
A
5387}
5388
5389bool
5390calendarinterval_new(job_t j, struct tm *w)
5391{
5392 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5393
5394 if (!job_assumes(j, ci != NULL)) {
5395 return false;
5396 }
5397
5398 ci->when = *w;
5399 ci->job = j;
5400
5401 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5402
5403 calendarinterval_setalarm(j, ci);
5404
ddbbfbc1 5405 runtime_add_weak_ref();
5b0a4722
A
5406
5407 return true;
5408}
5409
5410void
5411calendarinterval_delete(job_t j, struct calendarinterval *ci)
5412{
5413 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5414 LIST_REMOVE(ci, global_sle);
5415
5416 free(ci);
5417
ddbbfbc1 5418 runtime_del_weak_ref();
5b0a4722
A
5419}
5420
5421void
5422calendarinterval_sanity_check(void)
5423{
5424 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5425 time_t now = time(NULL);
5426
ddbbfbc1 5427 if (unlikely(ci && (ci->when_next < now))) {
dcace88f 5428 (void)jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
5b0a4722
A
5429 }
5430}
5431
5432void
5433calendarinterval_callback(void)
5434{
5435 struct calendarinterval *ci, *ci_next;
5436 time_t now = time(NULL);
5437
5438 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5439 job_t j = ci->job;
5440
5441 if (ci->when_next > now) {
5442 break;
5443 }
5444
5445 LIST_REMOVE(ci, global_sle);
5446 calendarinterval_setalarm(j, ci);
5447
5448 j->start_pending = true;
5449 job_dispatch(j, false);
5450 }
5451}
5452
5453bool
ddbbfbc1 5454socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds)
5b0a4722
A
5455{
5456 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5457
5458 if (!job_assumes(j, sg != NULL)) {
5459 return false;
5460 }
ed34e3c3
A
5461
5462 sg->fds = calloc(1, fd_cnt * sizeof(int));
5463 sg->fd_cnt = fd_cnt;
5464 sg->junkfds = junkfds;
5465
5b0a4722 5466 if (!job_assumes(j, sg->fds != NULL)) {
ed34e3c3
A
5467 free(sg);
5468 return false;
5469 }
5470
5471 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
f36da725 5472 strcpy(sg->name_init, name);
ed34e3c3
A
5473
5474 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5475
ddbbfbc1 5476 runtime_add_weak_ref();
5b0a4722 5477
ed34e3c3
A
5478 return true;
5479}
5480
5481void
5b0a4722 5482socketgroup_delete(job_t j, struct socketgroup *sg)
ed34e3c3
A
5483{
5484 unsigned int i;
5485
5b0a4722
A
5486 for (i = 0; i < sg->fd_cnt; i++) {
5487#if 0
5488 struct sockaddr_storage ss;
5489 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5490 socklen_t ss_len = sizeof(ss);
5491
5492 /* 5480306 */
5493 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5494 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
dcace88f 5495 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5b0a4722
A
5496 /* We might conditionally need to delete a directory here */
5497 }
5498#endif
dcace88f 5499 (void)job_assumes(j, runtime_close(sg->fds[i]) != -1);
5b0a4722 5500 }
ed34e3c3
A
5501
5502 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5503
5504 free(sg->fds);
5505 free(sg);
5b0a4722 5506
ddbbfbc1 5507 runtime_del_weak_ref();
ed34e3c3
A
5508}
5509
5510void
5b0a4722 5511socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
ed34e3c3 5512{
5b0a4722 5513 struct kevent kev[sg->fd_cnt];
ed34e3c3
A
5514 char buf[10000];
5515 unsigned int i, buf_off = 0;
5516
ddbbfbc1 5517 if (unlikely(sg->junkfds)) {
ed34e3c3 5518 return;
5b0a4722
A
5519 }
5520
5521 for (i = 0; i < sg->fd_cnt; i++) {
5522 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5523 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5524 }
ed34e3c3 5525
5b0a4722 5526 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
ed34e3c3 5527
dcace88f 5528 (void)job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
ed34e3c3 5529
5b0a4722 5530 for (i = 0; i < sg->fd_cnt; i++) {
dcace88f 5531 (void)job_assumes(j, kev[i].flags & EV_ERROR);
ddbbfbc1 5532 errno = (typeof(errno)) kev[i].data;
dcace88f 5533 (void)job_assumes(j, kev[i].data == 0);
5b0a4722 5534 }
ed34e3c3
A
5535}
5536
5537void
5b0a4722 5538socketgroup_ignore(job_t j, struct socketgroup *sg)
ed34e3c3 5539{
5b0a4722
A
5540 socketgroup_kevent_mod(j, sg, false);
5541}
ed34e3c3 5542
5b0a4722
A
5543void
5544socketgroup_watch(job_t j, struct socketgroup *sg)
5545{
5546 socketgroup_kevent_mod(j, sg, true);
ed34e3c3
A
5547}
5548
5549void
5b0a4722 5550socketgroup_callback(job_t j)
ed34e3c3 5551{
5b0a4722 5552 job_dispatch(j, true);
ed34e3c3
A
5553}
5554
5555bool
ddbbfbc1 5556envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot)
ed34e3c3
A
5557{
5558 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5559
5b0a4722 5560 if (!job_assumes(j, ei != NULL)) {
ed34e3c3 5561 return false;
5b0a4722 5562 }
ed34e3c3 5563
f36da725
A
5564 strcpy(ei->key_init, k);
5565 ei->value = ei->key_init + strlen(k) + 1;
ed34e3c3 5566 strcpy(ei->value, v);
ddbbfbc1 5567 ei->one_shot = one_shot;
ed34e3c3
A
5568
5569 if (global) {
ddbbfbc1
A
5570 if (SLIST_EMPTY(&j->global_env)) {
5571 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5572 }
ed34e3c3
A
5573 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5574 } else {
5575 SLIST_INSERT_HEAD(&j->env, ei, sle);
5576 }
5577
5b0a4722
A
5578 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5579
ed34e3c3
A
5580 return true;
5581}
5582
5583void
5b0a4722 5584envitem_delete(job_t j, struct envitem *ei, bool global)
ed34e3c3
A
5585{
5586 if (global) {
5587 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
ddbbfbc1
A
5588 if (SLIST_EMPTY(&j->global_env)) {
5589 LIST_REMOVE(j, global_env_sle);
5590 }
ed34e3c3
A
5591 } else {
5592 SLIST_REMOVE(&j->env, ei, envitem, sle);
5593 }
5594
5595 free(ei);
5596}
5597
5598void
5599envitem_setup(launch_data_t obj, const char *key, void *context)
5600{
5b0a4722 5601 job_t j = context;
ed34e3c3 5602
5b0a4722 5603 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
ed34e3c3 5604 return;
5b0a4722 5605 }
ed34e3c3 5606
dcace88f 5607 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
ddbbfbc1
A
5608 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, false);
5609 } else {
5610 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5611 }
5612}
5613
5614void
5615envitem_setup_one_shot(launch_data_t obj, const char *key, void *context)
5616{
5617 job_t j = context;
5618
5619 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5620 return;
5621 }
5622
dcace88f 5623 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
ddbbfbc1
A
5624 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, true);
5625 } else {
5626 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5627 }
ed34e3c3
A
5628}
5629
5630bool
5b0a4722 5631limititem_update(job_t j, int w, rlim_t r)
ed34e3c3
A
5632{
5633 struct limititem *li;
5634
5635 SLIST_FOREACH(li, &j->limits, sle) {
5b0a4722 5636 if (li->which == w) {
ed34e3c3 5637 break;
5b0a4722 5638 }
ed34e3c3
A
5639 }
5640
5641 if (li == NULL) {
5642 li = calloc(1, sizeof(struct limititem));
5643
5b0a4722 5644 if (!job_assumes(j, li != NULL)) {
ed34e3c3 5645 return false;
5b0a4722
A
5646 }
5647
5648 SLIST_INSERT_HEAD(&j->limits, li, sle);
ed34e3c3
A
5649
5650 li->which = w;
5651 }
5652
5653 if (j->importing_hard_limits) {
5654 li->lim.rlim_max = r;
5655 li->sethard = true;
5656 } else {
5657 li->lim.rlim_cur = r;
5658 li->setsoft = true;
5659 }
5660
5661 return true;
5662}
5663
5664void
5b0a4722 5665limititem_delete(job_t j, struct limititem *li)
ed34e3c3
A
5666{
5667 SLIST_REMOVE(&j->limits, li, limititem, sle);
5668
5669 free(li);
5670}
5671
f36da725 5672#if HAVE_SANDBOX
5b0a4722
A
5673void
5674seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5675{
5676 job_t j = context;
5677
5678 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5679 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5680 return;
5681 }
5682
5683 if (launch_data_get_bool(obj) == false) {
5684 return;
5685 }
5686
5687 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5688 j->seatbelt_flags |= SANDBOX_NAMED;
5689 }
5690}
f36da725 5691#endif
5b0a4722 5692
ed34e3c3
A
5693void
5694limititem_setup(launch_data_t obj, const char *key, void *context)
5695{
5b0a4722 5696 job_t j = context;
ddbbfbc1 5697 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
ed34e3c3
A
5698 rlim_t rl;
5699
5b0a4722 5700 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
ed34e3c3 5701 return;
5b0a4722 5702 }
ed34e3c3
A
5703
5704 rl = launch_data_get_integer(obj);
5705
5706 for (i = 0; i < limits_cnt; i++) {
5b0a4722 5707 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
ed34e3c3 5708 break;
5b0a4722 5709 }
ed34e3c3
A
5710 }
5711
5b0a4722 5712 if (i == limits_cnt) {
ed34e3c3 5713 return;
5b0a4722 5714 }
ed34e3c3
A
5715
5716 limititem_update(j, launchd_keys2limits[i].val, rl);
5717}
5718
5719bool
5b0a4722 5720job_useless(job_t j)
ed34e3c3 5721{
ddbbfbc1
A
5722 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5723 if (j->legacy_LS_job && j->j_port) {
5b0a4722
A
5724 return false;
5725 }
ed34e3c3
A
5726 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5727 return true;
5b0a4722
A
5728 } else if (j->removal_pending) {
5729 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
ed34e3c3 5730 return true;
dcace88f
A
5731 } else if (j->shutdown_monitor) {
5732 return false;
5733 } else if (j->mgr->shutting_down) {
5b0a4722 5734 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
dcace88f
A
5735 if (total_children == 0 && !j->anonymous) {
5736 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
ddbbfbc1 5737 }
ed34e3c3 5738 return true;
5b0a4722
A
5739 } else if (j->legacy_mach_job) {
5740 if (SLIST_EMPTY(&j->machservices)) {
5741 job_log(j, LOG_INFO, "Garbage collecting");
5742 return true;
5743 } else if (!j->checkedin) {
5744 job_log(j, LOG_WARNING, "Failed to check-in!");
5745 return true;
5746 }
dcace88f
A
5747 } else {
5748 /* If the job's executable does not have any valid architectures (for
5749 * example, if it's a PowerPC-only job), then we don't even bother
5750 * trying to relaunch it, as we have no reasonable expectation that
5751 * the situation will change.
5752 *
5753 * <rdar://problem/9106979>
5754 */
5755 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
5756 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5757 return true;
5758 }
ed34e3c3
A
5759 }
5760
5761 return false;
5762}
5763
5764bool
5b0a4722 5765job_keepalive(job_t j)
ed34e3c3
A
5766{
5767 mach_msg_type_number_t statusCnt;
5768 mach_port_status_t status;
5769 struct semaphoreitem *si;
5770 struct machservice *ms;
5771 struct stat sb;
5772 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
ddbbfbc1 5773 bool is_not_kextd = (do_apple_internal_logging || (strcmp(j->label, "com.apple.kextd") != 0));
ed34e3c3 5774
ddbbfbc1 5775 if (unlikely(j->mgr->shutting_down)) {
f36da725
A
5776 return false;
5777 }
5778
5b0a4722
A
5779 /*
5780 * 5066316
5781 *
5782 * We definitely need to revisit this after Leopard ships. Please see
5783 * launchctl.c for the other half of this hack.
5784 */
ddbbfbc1
A
5785 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5786 return false;
5787 }
5788
dcace88f 5789 if (unlikely(j->needs_kickoff)) {
ddbbfbc1 5790 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5b0a4722
A
5791 return false;
5792 }
5793
5794 if (j->start_pending) {
5795 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
ed34e3c3
A
5796 return true;
5797 }
5798
5799 if (!j->ondemand) {
5800 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5801 return true;
5802 }
5803
5804 SLIST_FOREACH(ms, &j->machservices, sle) {
5805 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5806 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5b0a4722 5807 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
ed34e3c3 5808 continue;
5b0a4722 5809 }
ed34e3c3 5810 if (status.mps_msgcount) {
ddbbfbc1 5811 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
ed34e3c3
A
5812 status.mps_msgcount, ms->name);
5813 return true;
5814 }
5815 }
dcace88f
A
5816
5817 /* TODO: Coalesce external events and semaphore items, since they're basically
5818 * the same thing.
5819 */
5820 struct externalevent *ei = NULL;
5821 LIST_FOREACH(ei, &j->events, job_le) {
5822 if (ei->state == ei->wanted_state) {
5823 return true;
5824 }
5825 }
5826
ed34e3c3
A
5827 SLIST_FOREACH(si, &j->semaphores, sle) {
5828 bool wanted_state = false;
5b0a4722
A
5829 int qdir_file_cnt;
5830 job_t other_j;
5831
ed34e3c3
A
5832 switch (si->why) {
5833 case NETWORK_UP:
5834 wanted_state = true;
5835 case NETWORK_DOWN:
5836 if (network_up == wanted_state) {
5b0a4722 5837 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
ed34e3c3
A
5838 return true;
5839 }
5840 break;
5841 case SUCCESSFUL_EXIT:
5842 wanted_state = true;
5843 case FAILED_EXIT:
5844 if (good_exit == wanted_state) {
5b0a4722
A
5845 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5846 return true;
5847 }
5848 break;
dcace88f
A
5849 case CRASHED:
5850 wanted_state = true;
5851 case DID_NOT_CRASH:
5852 if (j->crashed == wanted_state) {
5853 return true;
5854 }
5855 break;
5b0a4722
A
5856 case OTHER_JOB_ENABLED:
5857 wanted_state = true;
5858 case OTHER_JOB_DISABLED:
dcace88f 5859 if ((bool)job_find(NULL, si->what) == wanted_state) {
5b0a4722 5860 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
ed34e3c3
A
5861 return true;
5862 }
5863 break;
5b0a4722
A
5864 case OTHER_JOB_ACTIVE:
5865 wanted_state = true;
5866 case OTHER_JOB_INACTIVE:
dcace88f 5867 if ((other_j = job_find(NULL, si->what))) {
5b0a4722
A
5868 if ((bool)other_j->p == wanted_state) {
5869 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5870 return true;
5871 }
5872 }
5873 break;
ed34e3c3
A
5874 case PATH_EXISTS:
5875 wanted_state = true;
5876 case PATH_MISSING:
5877 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
5b0a4722
A
5878 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
5879 return true;
ddbbfbc1 5880 } else {
dcace88f
A
5881 if (wanted_state) { /* File is not there but we wish it was. */
5882 if (si->fd != -1 && !si->watching_parent) { /* Need to be watching the parent now. */
5883 (void)job_assumes(j, runtime_close(si->fd) == 0);
ddbbfbc1
A
5884 si->fd = -1;
5885 semaphoreitem_watch(j, si);
5886 }
5887 } else { /* File is there but we wish it wasn't. */
dcace88f
A
5888 if (si->fd != -1 && si->watching_parent) { /* Need to watch the file now. */
5889 (void)job_assumes(j, runtime_close(si->fd) == 0);
ddbbfbc1
A
5890 si->fd = -1;
5891 semaphoreitem_watch(j, si);
5892 }
5893 }
5b0a4722
A
5894 }
5895 break;
5896 case PATH_CHANGES:
5897 break;
5898 case DIR_NOT_EMPTY:
5899 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
5900 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
5901 } else if (qdir_file_cnt > 0) {
5902 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
ed34e3c3
A
5903 return true;
5904 }
ed34e3c3
A
5905 break;
5906 }
5907 }
5908
ed34e3c3
A
5909 return false;
5910}
5911
5b0a4722
A
5912const char *
5913job_active(job_t j)
ed34e3c3
A
5914{
5915 struct machservice *ms;
dcace88f
A
5916 if (j->p && j->shutdown_monitor) {
5917 return "Monitoring shutdown";
5918 }
5b0a4722
A
5919 if (j->p) {
5920 return "PID is still valid";
5921 }
ed34e3c3 5922
5b0a4722 5923 if (j->mgr->shutting_down && j->log_redirect_fd) {
dcace88f 5924 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5b0a4722
A
5925 j->log_redirect_fd = 0;
5926 }
ed34e3c3 5927
5b0a4722 5928 if (j->log_redirect_fd) {
ddbbfbc1 5929 if (job_assumes(j, j->legacy_LS_job)) {
5b0a4722
A
5930 return "Standard out/error is still valid";
5931 } else {
dcace88f 5932 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5b0a4722 5933 j->log_redirect_fd = 0;
ed34e3c3 5934 }
5b0a4722
A
5935 }
5936
5937 if (j->priv_port_has_senders) {
5938 return "Privileged Port still has outstanding senders";
ed34e3c3
A
5939 }
5940
5941 SLIST_FOREACH(ms, &j->machservices, sle) {
ddbbfbc1 5942 if (ms->recv && machservice_active(ms)) {
5b0a4722
A
5943 return "Mach service is still active";
5944 }
ed34e3c3
A
5945 }
5946
5b0a4722 5947 return NULL;
ed34e3c3
A
5948}
5949
5b0a4722
A
5950void
5951machservice_watch(job_t j, struct machservice *ms)
ed34e3c3 5952{
5b0a4722 5953 if (ms->recv) {
dcace88f 5954 (void)job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
5b0a4722 5955 }
ed34e3c3
A
5956}
5957
5b0a4722
A
5958void
5959machservice_ignore(job_t j, struct machservice *ms)
ed34e3c3 5960{
dcace88f 5961 (void)job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
ed34e3c3
A
5962}
5963
5964void
5b0a4722 5965machservice_resetport(job_t j, struct machservice *ms)
ed34e3c3 5966{
5b0a4722 5967 LIST_REMOVE(ms, port_hash_sle);
dcace88f
A
5968 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5969 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
5b0a4722 5970 ms->gen_num++;
dcace88f
A
5971 (void)job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
5972 (void)job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
5b0a4722 5973 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
ed34e3c3
A
5974}
5975
5976struct machservice *
5b0a4722 5977machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
ed34e3c3 5978{
ddbbfbc1 5979 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
ed34e3c3 5980
ddbbfbc1 5981 if (!job_assumes(j, ms != NULL)) {
ed34e3c3 5982 return NULL;
5b0a4722 5983 }
ed34e3c3 5984
5b0a4722 5985 strcpy((char *)ms->name, name);
ed34e3c3 5986 ms->job = j;
ddbbfbc1 5987 ms->gen_num = 1;
5b0a4722 5988 ms->per_pid = pid_local;
ed34e3c3 5989
ddbbfbc1 5990 if (likely(*serviceport == MACH_PORT_NULL)) {
5b0a4722 5991 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
ed34e3c3 5992 goto out_bad;
5b0a4722 5993 }
ed34e3c3 5994
5b0a4722 5995 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
ed34e3c3 5996 goto out_bad2;
5b0a4722 5997 }
ed34e3c3 5998 *serviceport = ms->port;
ed34e3c3
A
5999 ms->recv = true;
6000 } else {
6001 ms->port = *serviceport;
6002 ms->isActive = true;
6003 }
6004
6005 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
dcace88f
A
6006
6007 jobmgr_t where2put = j->mgr;
6008 /* XPC domains are separate from Mach bootstraps. */
6009 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6010 if (g_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6011 where2put = root_jobmgr;
6012 }
ddbbfbc1
A
6013 }
6014
dcace88f
A
6015 /* Don't allow MachServices added by multiple-instance jobs to be looked up by others.
6016 * We could just do this with a simple bit, but then we'd have to uniquify the
6017 * names ourselves to avoid collisions. This is just easier.
6018 */
6019 if (!j->dedicated_instance) {
6020 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6021 }
5b0a4722 6022 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
ed34e3c3 6023
ddbbfbc1 6024 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
ed34e3c3
A
6025
6026 return ms;
6027out_bad2:
dcace88f 6028 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
ed34e3c3
A
6029out_bad:
6030 free(ms);
6031 return NULL;
6032}
6033
dcace88f
A
6034#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6035struct machservice *
6036machservice_new_alias(job_t j, struct machservice *orig)
6037{
6038 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
6039 if (job_assumes(j, ms != NULL)) {
6040 strcpy((char *)ms->name, orig->name);
6041 ms->alias = orig;
6042 ms->job = j;
6043
6044 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6045 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6046 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
6047 }
6048
6049 return ms;
6050}
5c88273d 6051#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
dcace88f 6052
ed34e3c3
A
6053bootstrap_status_t
6054machservice_status(struct machservice *ms)
6055{
dcace88f 6056 ms = ms->alias ? ms->alias : ms;
ed34e3c3
A
6057 if (ms->isActive) {
6058 return BOOTSTRAP_STATUS_ACTIVE;
6059 } else if (ms->job->ondemand) {
6060 return BOOTSTRAP_STATUS_ON_DEMAND;
6061 } else {
6062 return BOOTSTRAP_STATUS_INACTIVE;
6063 }
6064}
6065
6066void
5b0a4722 6067job_setup_exception_port(job_t j, task_t target_task)
ed34e3c3 6068{
fe044cc9 6069 struct machservice *ms;
ed34e3c3 6070 thread_state_flavor_t f = 0;
fe044cc9 6071 mach_port_t exc_port = the_exception_server;
5b0a4722 6072
ddbbfbc1 6073 if (unlikely(j->alt_exc_handler)) {
fe044cc9 6074 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
ddbbfbc1 6075 if (likely(ms)) {
fe044cc9
A
6076 exc_port = machservice_port(ms);
6077 } else {
6078 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
6079 }
ddbbfbc1 6080 } else if (unlikely(j->internal_exc_handler)) {
fe044cc9 6081 exc_port = runtime_get_kernel_port();
ddbbfbc1 6082 } else if (unlikely(!exc_port)) {
5b0a4722
A
6083 return;
6084 }
ed34e3c3 6085
ddbbfbc1 6086#if defined (__ppc__) || defined(__ppc64__)
ed34e3c3 6087 f = PPC_THREAD_STATE64;
ef398931 6088#elif defined(__i386__) || defined(__x86_64__)
ed34e3c3 6089 f = x86_THREAD_STATE;
f36da725
A
6090#elif defined(__arm__)
6091 f = ARM_THREAD_STATE;
6092#else
6093#error "unknown architecture"
ed34e3c3
A
6094#endif
6095
ddbbfbc1 6096 if (likely(target_task)) {
dcace88f 6097 (void)job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
ddbbfbc1 6098 } else if (pid1_magic && the_exception_server) {
5b0a4722 6099 mach_port_t mhp = mach_host_self();
dcace88f 6100 (void)job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
5b0a4722
A
6101 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
6102 }
5b0a4722
A
6103}
6104
6105void
ddbbfbc1 6106job_set_exception_port(job_t j, mach_port_t port)
5b0a4722 6107{
ddbbfbc1 6108 if (unlikely(!the_exception_server)) {
5b0a4722
A
6109 the_exception_server = port;
6110 job_setup_exception_port(j, 0);
6111 } else {
6112 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6113 }
6114}
6115
6116void
6117machservice_setup_options(launch_data_t obj, const char *key, void *context)
6118{
6119 struct machservice *ms = context;
6120 mach_port_t mhp = mach_host_self();
6121 int which_port;
6122 bool b;
6123
6124 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
ed34e3c3
A
6125 return;
6126 }
6127
6128 switch (launch_data_get_type(obj)) {
6129 case LAUNCH_DATA_INTEGER:
ddbbfbc1 6130 which_port = (int)launch_data_get_integer(obj); /* XXX we should bound check this... */
ed34e3c3 6131 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
5b0a4722
A
6132 switch (which_port) {
6133 case TASK_KERNEL_PORT:
6134 case TASK_HOST_PORT:
6135 case TASK_NAME_PORT:
6136 case TASK_BOOTSTRAP_PORT:
ddbbfbc1
A
6137 /* I find it a little odd that zero isn't reserved in the header.
6138 * Normally Mach is fairly good about this convention... */
5b0a4722
A
6139 case 0:
6140 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6141 break;
6142 default:
6143 ms->special_port_num = which_port;
6144 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6145 break;
6146 }
ddbbfbc1 6147 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
5b0a4722 6148 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
dcace88f 6149 (void)job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
5b0a4722
A
6150 } else {
6151 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6152 }
ed34e3c3
A
6153 }
6154 case LAUNCH_DATA_BOOL:
6155 b = launch_data_get_bool(obj);
5b0a4722
A
6156 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6157 ms->debug_on_close = b;
6158 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
ed34e3c3
A
6159 ms->reset = b;
6160 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6161 ms->hide = b;
6162 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
ddbbfbc1 6163 job_set_exception_port(ms->job, ms->port);
ed34e3c3
A
6164 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6165 ms->kUNCServer = b;
dcace88f
A
6166 (void)job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
6167 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_PINGEVENTUPDATES) == 0) {
6168 ms->event_update_port = b;
ed34e3c3
A
6169 }
6170 break;
ddbbfbc1 6171 case LAUNCH_DATA_STRING:
dcace88f 6172 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
ddbbfbc1 6173 const char *option = launch_data_get_string(obj);
dcace88f 6174 if (strcasecmp(option, "One") == 0) {
ddbbfbc1 6175 ms->drain_one_on_crash = true;
dcace88f 6176 } else if (strcasecmp(option, "All") == 0) {
ddbbfbc1
A
6177 ms->drain_all_on_crash = true;
6178 }
6179 }
6180 break;
5b0a4722 6181 case LAUNCH_DATA_DICTIONARY:
ddbbfbc1 6182 job_set_exception_port(ms->job, ms->port);
5b0a4722 6183 break;
ed34e3c3
A
6184 default:
6185 break;
6186 }
6187
5b0a4722 6188 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
ed34e3c3
A
6189}
6190
6191void
6192machservice_setup(launch_data_t obj, const char *key, void *context)
6193{
5b0a4722 6194 job_t j = context;
ed34e3c3
A
6195 struct machservice *ms;
6196 mach_port_t p = MACH_PORT_NULL;
6197
ddbbfbc1 6198 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
ed34e3c3
A
6199 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6200 return;
6201 }
6202
ddbbfbc1 6203 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
ed34e3c3
A
6204 return;
6205 }
6206
6207 ms->isActive = false;
dcace88f
A
6208 ms->upfront = true;
6209
ed34e3c3
A
6210 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6211 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6212 }
6213}
6214
5b0a4722
A
6215jobmgr_t
6216jobmgr_do_garbage_collection(jobmgr_t jm)
ed34e3c3 6217{
ddbbfbc1 6218 jobmgr_t jmi = NULL, jmn = NULL;
5b0a4722
A
6219 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6220 jobmgr_do_garbage_collection(jmi);
6221 }
ed34e3c3 6222
dcace88f 6223 if (!jm->shutting_down) {
5b0a4722
A
6224 return jm;
6225 }
ddbbfbc1 6226
dcace88f 6227 if (SLIST_EMPTY(&jm->submgrs)) {
ddbbfbc1
A
6228 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6229 } else {
6230 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
5c88273d
A
6231 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6232 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6233 }
5b0a4722 6234 }
dcace88f
A
6235
6236 size_t actives = 0;
6237 job_t ji = NULL, jn = NULL;
6238 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6239 if (ji->anonymous) {
6240 continue;
6241 }
6242
6243 /* Let the shutdown monitor be up until the very end. */
6244 if (ji->shutdown_monitor) {
6245 continue;
5b0a4722
A
6246 }
6247
dcace88f
A
6248 /* On our first pass through, open a transaction for all the jobs that
6249 * need to be dirty at shutdown. We'll close these transactions once the
6250 * jobs that do not need to be dirty at shutdown have all exited.
6251 */
6252 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6253 job_open_shutdown_transaction(ji);
6254 }
6255
6256 const char *active = job_active(ji);
6257 if (!active) {
6258 job_remove(ji);
6259 } else {
6260 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6261 job_stop(ji);
6262
6263 if (ji->p && !ji->dirty_at_shutdown) {
6264 /* We really only care if the job has not yet been reaped.
6265 * There's no reason to delay shutdown if a Mach port has not
6266 * yet been sent back to us, for example. While we're shutting
6267 * all the "normal" jobs down, do not count the
6268 * dirty-at-shutdown jobs toward the total of actives.
6269 *
6270 * Note that there's a potential race here where we may not get
6271 * a port back in time, so that when we hit jobmgr_remove(), we
6272 * end up removing the job and then our attempt to close the
6273 * Mach port will fail. But at that point, the failure won't
6274 * even make it to the syslog, so not a big deal.
6275 */
6276 actives++;
ddbbfbc1 6277 }
dcace88f
A
6278
6279 if (ji->clean_kill) {
6280 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
ddbbfbc1 6281 } else {
dcace88f
A
6282 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6283 }
6284 }
6285 }
6286
6287 jm->shutdown_jobs_dirtied = true;
6288 if (actives == 0) {
6289 if (!jm->shutdown_jobs_cleaned) {
6290 LIST_FOREACH(ji, &jm->jobs, sle) {
6291 if (!ji->anonymous) {
6292 job_close_shutdown_transaction(ji);
6293 actives++;
ddbbfbc1
A
6294 }
6295 }
dcace88f
A
6296
6297 jm->shutdown_jobs_cleaned = true;
6298 } else if (jm->monitor_shutdown && _s_shutdown_monitor) {
6299 /* The rest of shutdown has completed, so we can kill the shutdown
6300 * monitor now like it was any other job.
6301 */
6302 _s_shutdown_monitor->shutdown_monitor = false;
6303 actives = 1;
6304
6305 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6306 job_stop(_s_shutdown_monitor);
6307 _s_shutdown_monitor = NULL;
ddbbfbc1 6308 }
dcace88f
A
6309 }
6310
ddbbfbc1 6311 jobmgr_t r = jm;
dcace88f 6312 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
ddbbfbc1 6313 jobmgr_log(jm, LOG_DEBUG, "Removing.");
ddbbfbc1
A
6314 jobmgr_remove(jm);
6315 r = NULL;
5b0a4722 6316 }
ddbbfbc1
A
6317
6318 return r;
6319}
5b0a4722 6320
ddbbfbc1
A
6321void
6322jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6323{
ddbbfbc1
A
6324 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
6325 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
6326 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
6327 * for them to exit before moving on.
6328 *
6329 * See rdar://problem/6562592
6330 */
6331 size_t i = 0;
dcace88f
A
6332 for (i = 0; i < np; i++) {
6333 if (p[i] != 0) {
ddbbfbc1 6334 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
dcace88f 6335 (void)jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
ddbbfbc1
A
6336 }
6337 }
ed34e3c3
A
6338}
6339
6340void
ddbbfbc1 6341jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
ed34e3c3 6342{
dcace88f
A
6343 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6344 pid_t *pids = NULL;
6345 int i = 0, kp_cnt = 0;
6346
ddbbfbc1 6347 if (likely(jm->parentmgr || !pid1_magic)) {
5b0a4722
A
6348 return;
6349 }
ed34e3c3 6350
dcace88f 6351 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
5b0a4722
A
6352 return;
6353 }
ddbbfbc1
A
6354
6355 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6356
dcace88f 6357 if (!jobmgr_assumes(jm, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
5b0a4722
A
6358 goto out;
6359 }
6360
ddbbfbc1 6361 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
5b0a4722 6362 for (i = 0; i < kp_cnt; i++) {
dcace88f
A
6363 struct proc_bsdshortinfo proc;
6364 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6365 if (errno != ESRCH) {
6366 jobmgr_assumes(jm, errno == 0);
6367 }
6368
6369 kp_skipped++;
6370 continue;
6371 }
6372
6373 pid_t p_i = pids[i];
6374 pid_t pp_i = proc.pbsi_ppid;
6375 pid_t pg_i = proc.pbsi_pgid;
6376 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6377 const char *n = proc.pbsi_comm;
5b0a4722 6378
ddbbfbc1
A
6379 if (unlikely(p_i == 0 || p_i == 1)) {
6380 kp_skipped++;
5b0a4722 6381 continue;
ed34e3c3 6382 }
dcace88f
A
6383
6384 if (_s_shutdown_monitor && pp_i == _s_shutdown_monitor->p) {
6385 kp_skipped++;
6386 continue;
6387 }
ddbbfbc1
A
6388
6389 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
6390 job_t j = jobmgr_find_by_pid(jm, p_i, false);
dcace88f 6391 if (!j || (j && j->anonymous)) {
ddbbfbc1
A
6392 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6393
6394 int status = 0;
dcace88f
A
6395 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6396 if (jobmgr_assumes(jm, waitpid(p_i, &status, WNOHANG) == 0)) {
ddbbfbc1
A
6397 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6398 }
6399 kp_skipped++;
6400 } else {
6401 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6402 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6403 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6404 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6405 * their userspace emissaries go away, before the call to reboot(2).
6406 */
dcace88f 6407 if (leader && leader->ignore_pg_at_shutdown) {
ddbbfbc1
A
6408 kp_skipped++;
6409 } else {
6410 ps[i] = p_i;
6411 }
6412 }
6413 } else {
6414 kp_skipped++;
6415 }
6416 }
5b0a4722 6417
dcace88f
A
6418 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6419 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
ed34e3c3
A
6420 }
6421
ddbbfbc1 6422 free(ps);
5b0a4722 6423out:
dcace88f 6424 free(pids);
ed34e3c3
A
6425}
6426
5b0a4722
A
6427jobmgr_t
6428jobmgr_parent(jobmgr_t jm)
ed34e3c3 6429{
5b0a4722
A
6430 return jm->parentmgr;
6431}
ed34e3c3 6432
5b0a4722
A
6433void
6434job_uncork_fork(job_t j)
6435{
6436 pid_t c = j->p;
6437
6438 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6439 /* this unblocks the child and avoids a race
6440 * between the above fork() and the kevent_mod() */
dcace88f
A
6441 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6442 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
ddbbfbc1 6443 j->fork_fd = 0;
5b0a4722
A
6444}
6445
6446jobmgr_t
dcace88f 6447jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
5b0a4722
A
6448{
6449 mach_msg_size_t mxmsgsz;
6450 job_t bootstrapper = NULL;
6451 jobmgr_t jmr;
6452
6453 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6454
ddbbfbc1 6455 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
5b0a4722 6456 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
ed34e3c3
A
6457 return NULL;
6458 }
6459
ddbbfbc1
A
6460 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6461
6462 if (!jobmgr_assumes(jm, jmr != NULL)) {
ed34e3c3 6463 return NULL;
5b0a4722 6464 }
ed34e3c3 6465
dcace88f
A
6466 if (jm == NULL) {
6467 root_jobmgr = jmr;
6468 }
6469
5b0a4722 6470 jmr->kqjobmgr_callback = jobmgr_callback;
f36da725 6471 strcpy(jmr->name_init, name ? name : "Under construction");
5b0a4722
A
6472
6473 jmr->req_port = requestorport;
6474
6475 if ((jmr->parentmgr = jm)) {
6476 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6477 }
6478
6479 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
ed34e3c3
A
6480 goto out_bad;
6481 }
6482
5b0a4722 6483 if (transfer_port != MACH_PORT_NULL) {
dcace88f 6484 (void)jobmgr_assumes(jmr, jm != NULL);
5b0a4722 6485 jmr->jm_port = transfer_port;
ddbbfbc1 6486 } else if (!jm && !pid1_magic) {
5b0a4722
A
6487 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6488 name_t service_buf;
6489
6490 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6491
6492 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
6493 goto out_bad;
6494 }
6495
6496 if (trusted_fd) {
ddbbfbc1 6497 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
5b0a4722
A
6498
6499 if ((dfd = dup(lfd)) >= 0) {
dcace88f
A
6500 (void)jobmgr_assumes(jmr, runtime_close(dfd) != -1);
6501 (void)jobmgr_assumes(jmr, runtime_close(lfd) != -1);
5b0a4722
A
6502 }
6503
6504 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6505 }
6506
6507 /* cut off the Libc cache, we don't want to deadlock against ourself */
6508 inherited_bootstrap_port = bootstrap_port;
6509 bootstrap_port = MACH_PORT_NULL;
6510 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
ed34e3c3 6511
5b0a4722
A
6512 /* We set this explicitly as we start each child */
6513 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
6514 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
ed34e3c3 6515 goto out_bad;
5b0a4722 6516 }
ed34e3c3 6517
5b0a4722 6518 if (!name) {
f36da725 6519 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
ed34e3c3
A
6520 }
6521
5b0a4722 6522 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
ddbbfbc1 6523 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
5b0a4722
A
6524 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
6525 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
6526 }
6527
dcace88f
A
6528 /* Total hacks. But the MIG server loop is too generic, and the more dynamic
6529 * parts of it haven't been tested, or if they have, it was a very long time
6530 * ago.
6531 */
6532 if (xpc_events_xpc_events_subsystem.maxsize > mxmsgsz) {
6533 mxmsgsz = xpc_events_xpc_events_subsystem.maxsize;
6534 }
6535 if (xpc_domain_xpc_domain_subsystem.maxsize > mxmsgsz) {
6536 mxmsgsz = xpc_domain_xpc_domain_subsystem.maxsize;
6537 }
6538
5b0a4722 6539 if (!jm) {
dcace88f
A
6540 (void)jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6541 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6542 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6543 (void)jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
5b0a4722
A
6544 }
6545
dcace88f 6546 if (name && !skip_init) {
5b0a4722
A
6547 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6548 }
6549
6550 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6551 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
6552 goto out_bad;
6553 }
6554 }
6555
6556 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6557
6558 if (bootstrapper) {
dcace88f 6559 bootstrapper->asport = asport;
ddbbfbc1 6560
dcace88f
A
6561 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6562 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6563 } else {
6564 jmr->req_asport = asport;
6565 }
6566
6567 if (asport != MACH_PORT_NULL) {
6568 (void)jobmgr_assumes(jmr, launchd_mport_copy_send(asport) == KERN_SUCCESS);
5b0a4722
A
6569 }
6570
6571 if (jmr->parentmgr) {
ddbbfbc1 6572 runtime_add_weak_ref();
5b0a4722
A
6573 }
6574
6575 return jmr;
ed34e3c3
A
6576
6577out_bad:
5b0a4722
A
6578 if (jmr) {
6579 jobmgr_remove(jmr);
dcace88f
A
6580 if (jm == NULL) {
6581 root_jobmgr = NULL;
6582 }
5b0a4722 6583 }
ed34e3c3
A
6584 return NULL;
6585}
6586
dcace88f
A
6587#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6588jobmgr_t
6589jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6590{
6591 jobmgr_t new = NULL;
6592
6593 /* These job managers are basically singletons, so we use the root Mach
6594 * bootstrap port as their requestor ports so they'll never go away.
6595 */
6596 mach_port_t req_port = root_jobmgr->jm_port;
6597 if (jobmgr_assumes(jm, launchd_mport_make_send(req_port) == KERN_SUCCESS)) {
6598 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6599 if (new) {
6600 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6601 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6602 new->xpc_singleton = true;
6603 }
6604 }
6605
6606 return new;
6607}
6608
6609jobmgr_t
6610jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6611{
6612 jobmgr_t jmi = NULL;
6613 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6614 if (jmi->req_euid == uid) {
6615 return jmi;
6616 }
6617 }
6618
6619 name_t name;
6620 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6621 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6622 if (jobmgr_assumes(jm, jmi != NULL)) {
6623 /* We need to create a per-user launchd for this UID if there isn't one
6624 * already so we can grab the bootstrap port.
6625 */
6626 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6627 if (jobmgr_assumes(jmi, puj != NULL)) {
6628 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(puj->asport) == KERN_SUCCESS);
6629 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(jmi->req_bsport) == KERN_SUCCESS);
6630 jmi->shortdesc = "per-user";
6631 jmi->req_asport = puj->asport;
6632 jmi->req_asid = puj->asid;
6633 jmi->req_euid = uid;
6634 jmi->req_egid = -1;
6635
6636 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
6637 } else {
6638 jobmgr_remove(jmi);
6639 }
6640 }
6641
6642 return jmi;
6643}
6644
6645jobmgr_t
6646jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
6647{
6648 jobmgr_t jmi = NULL;
6649 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
6650 if (jmi->req_asid == asid) {
6651 return jmi;
6652 }
6653 }
6654
6655 name_t name;
6656 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
6657 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6658 if (jobmgr_assumes(jm, jmi != NULL)) {
6659 (void)jobmgr_assumes(jmi, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
6660 jmi->shortdesc = "per-session";
6661 jmi->req_bsport = root_jobmgr->jm_port;
6662 (void)jobmgr_assumes(jmi, audit_session_port(asid, &jmi->req_asport) == 0);
6663 jmi->req_asid = asid;
6664 jmi->req_euid = -1;
6665 jmi->req_egid = -1;
6666
6667 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
6668 } else {
6669 jobmgr_remove(jmi);
6670 }
6671
6672 return jmi;
6673}
5c88273d 6674#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
dcace88f 6675
5b0a4722
A
6676job_t
6677jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
6678{
6679 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
6680 char thelabel[1000];
6681 job_t bootstrapper;
6682
6683 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
6684 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
ddbbfbc1 6685
dcace88f 6686 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
ddbbfbc1 6687 bootstrapper->is_bootstrapper = true;
5b0a4722
A
6688 char buf[100];
6689
6690 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
6691 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
ddbbfbc1 6692 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false, false);
5b0a4722 6693 bootstrapper->weird_bootstrap = true;
dcace88f
A
6694 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
6695 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
ddbbfbc1 6696 bootstrapper->is_bootstrapper = true;
dcace88f 6697 if (jobmgr_assumes(jm, pid1_magic)) {
ddbbfbc1
A
6698 /* Have our system bootstrapper print out to the console. */
6699 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
6700 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
6701
dcace88f
A
6702 if (g_console) {
6703 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
ddbbfbc1
A
6704 }
6705 }
5b0a4722
A
6706 }
6707
6708 jm->session_initialized = true;
5b0a4722
A
6709 return bootstrapper;
6710}
6711
6712jobmgr_t
6713jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
ed34e3c3
A
6714{
6715 struct machservice *ms, *next_ms;
5b0a4722 6716 jobmgr_t jmi, jmn;
ed34e3c3
A
6717
6718 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6719 * words, when some program hands us a second or subsequent send right
6720 * to a port we already have open, the Mach kernel gives us the same
6721 * port number back and increments an reference count associated with
6722 * the port. This forces us, when discovering that a receive right at
6723 * the other end has been deleted, to wander all of our objects to see
6724 * what weird places clients might have handed us the same send right
6725 * to use.
6726 */
6727
5b0a4722
A
6728 if (jm == root_jobmgr) {
6729 if (port == inherited_bootstrap_port) {
dcace88f 6730 (void)jobmgr_assumes(jm, launchd_mport_deallocate(port) == KERN_SUCCESS);
5b0a4722 6731 inherited_bootstrap_port = MACH_PORT_NULL;
ed34e3c3 6732
5b0a4722
A
6733 return jobmgr_shutdown(jm);
6734 }
6735
6736 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
ddbbfbc1 6737 if (ms->port == port && !ms->recv) {
5b0a4722
A
6738 machservice_delete(ms->job, ms, true);
6739 }
6740 }
6741 }
6742
6743 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6744 jobmgr_delete_anything_with_port(jmi, port);
6745 }
ed34e3c3 6746
5b0a4722 6747 if (jm->req_port == port) {
ddbbfbc1 6748 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
5b0a4722 6749 return jobmgr_shutdown(jm);
ed34e3c3 6750 }
5b0a4722
A
6751
6752 return jm;
ed34e3c3
A
6753}
6754
6755struct machservice *
5b0a4722 6756jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
ed34e3c3
A
6757{
6758 struct machservice *ms;
ddbbfbc1 6759 job_t target_j;
ed34e3c3 6760
ddbbfbc1 6761 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
ed34e3c3 6762
ddbbfbc1
A
6763 if (target_pid) {
6764 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6765 * bootstrap in other bootstraps.
6766 */
6767
6768 /* Start in the given bootstrap. */
dcace88f 6769 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
ddbbfbc1
A
6770 /* If we fail, do a deep traversal. */
6771 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6772 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6773 return NULL;
6774 }
6775 }
6776
6777 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6778 if (ms->per_pid && strcmp(name, ms->name) == 0) {
ed34e3c3 6779 return ms;
5b0a4722 6780 }
ed34e3c3 6781 }
ed34e3c3 6782
ddbbfbc1 6783 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
ed34e3c3 6784 return NULL;
5b0a4722 6785 }
ddbbfbc1 6786
dcace88f
A
6787 jobmgr_t where2look = jm;
6788 /* XPC domains are separate from Mach bootstraps. */
6789 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6790 if (g_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6791 where2look = root_jobmgr;
6792 }
6793 }
6794
6795 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
6796 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6797 return ms;
ddbbfbc1
A
6798 }
6799 }
ed34e3c3 6800
ddbbfbc1 6801 if (jm->parentmgr == NULL || !check_parent) {
ed34e3c3 6802 return NULL;
5b0a4722 6803 }
ed34e3c3 6804
5b0a4722 6805 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
ed34e3c3
A
6806}
6807
6808mach_port_t
6809machservice_port(struct machservice *ms)
6810{
6811 return ms->port;
6812}
6813
5b0a4722 6814job_t
ed34e3c3
A
6815machservice_job(struct machservice *ms)
6816{
6817 return ms->job;
6818}
6819
6820bool
6821machservice_hidden(struct machservice *ms)
6822{
6823 return ms->hide;
6824}
6825
6826bool
6827machservice_active(struct machservice *ms)
6828{
6829 return ms->isActive;
6830}
6831
6832const char *
6833machservice_name(struct machservice *ms)
6834{
6835 return ms->name;
6836}
6837
ddbbfbc1
A
6838void
6839machservice_drain_port(struct machservice *ms)
6840{
6841 bool drain_one = ms->drain_one_on_crash;
6842 bool drain_all = ms->drain_all_on_crash;
6843
dcace88f 6844 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
ddbbfbc1
A
6845 return;
6846 }
6847
6848 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6849
6850 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6851 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6852 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6853 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6854
6855 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6856
6857 do {
6858 /* This should be a direct check on the Mach service to see if it's an exception-handling
6859 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6860 * Mach services. But for now, it should be okay.
6861 */
dcace88f 6862 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
ddbbfbc1
A
6863 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6864 } else {
6865 mach_msg_options_t options = MACH_RCV_MSG |
6866 MACH_RCV_TIMEOUT ;
6867
6868 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
dcace88f
A
6869 switch (mr) {
6870 case MACH_MSG_SUCCESS:
6871 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6872 break;
6873 case MACH_RCV_TIMED_OUT:
6874 break;
6875 case MACH_RCV_TOO_LARGE:
6876 runtime_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6877 break;
6878 default:
6879 break;
ddbbfbc1
A
6880 }
6881 }
dcace88f 6882 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
ddbbfbc1
A
6883}
6884
ed34e3c3 6885void
5b0a4722 6886machservice_delete(job_t j, struct machservice *ms, bool port_died)
ed34e3c3 6887{
dcace88f
A
6888 if (ms->alias) {
6889 /* HACK: Egregious code duplication. But dealing with aliases is a
6890 * pretty simple affair since they can't and shouldn't have any complex
6891 * behaviors associated with them.
6892 */
6893 LIST_REMOVE(ms, name_hash_sle);
6894 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6895 free(ms);
6896 return;
6897 }
6898
ddbbfbc1 6899 if (unlikely(ms->debug_on_close)) {
5b0a4722 6900 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
dcace88f 6901 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
5b0a4722
A
6902 }
6903
ddbbfbc1
A
6904 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6905 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
dcace88f 6906 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5b0a4722
A
6907 }
6908
dcace88f 6909 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
5b0a4722 6910
ddbbfbc1 6911 if (unlikely(ms->port == the_exception_server)) {
5b0a4722 6912 the_exception_server = 0;
ed34e3c3
A
6913 }
6914
ddbbfbc1 6915 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
ed34e3c3 6916
5b0a4722
A
6917 if (ms->special_port_num) {
6918 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6919 }
5b0a4722 6920 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
dcace88f
A
6921
6922 if (!(j->dedicated_instance || ms->event_channel)) {
6923 LIST_REMOVE(ms, name_hash_sle);
6924 }
5b0a4722 6925 LIST_REMOVE(ms, port_hash_sle);
ed34e3c3
A
6926
6927 free(ms);
6928}
6929
6930void
5b0a4722 6931machservice_request_notifications(struct machservice *ms)
ed34e3c3
A
6932{
6933 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6934
6935 ms->isActive = true;
6936
5b0a4722 6937 if (ms->recv) {
ed34e3c3
A
6938 which = MACH_NOTIFY_PORT_DESTROYED;
6939 job_checkin(ms->job);
6940 }
6941
dcace88f 6942 (void)job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
ed34e3c3
A
6943}
6944
5b0a4722
A
6945#define NELEM(x) (sizeof(x)/sizeof(x[0]))
6946#define END_OF(x) (&(x)[NELEM(x)])
ed34e3c3
A
6947
6948char **
6949mach_cmd2argv(const char *string)
6950{
6951 char *argv[100], args[1000];
6952 const char *cp;
6953 char *argp = args, term, **argv_ret, *co;
6954 unsigned int nargs = 0, i;
6955
6956 for (cp = string; *cp;) {
6957 while (isspace(*cp))
6958 cp++;
6959 term = (*cp == '"') ? *cp++ : '\0';
5b0a4722 6960 if (nargs < NELEM(argv)) {
ed34e3c3 6961 argv[nargs++] = argp;
5b0a4722 6962 }
ed34e3c3 6963 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
5b0a4722 6964 if (*cp == '\\') {
ed34e3c3 6965 cp++;
5b0a4722 6966 }
ed34e3c3 6967 *argp++ = *cp;
5b0a4722 6968 if (*cp) {
ed34e3c3 6969 cp++;
5b0a4722 6970 }
ed34e3c3
A
6971 }
6972 *argp++ = '\0';
6973 }
6974 argv[nargs] = NULL;
6975
5b0a4722 6976 if (nargs == 0) {
ed34e3c3 6977 return NULL;
5b0a4722 6978 }
ed34e3c3
A
6979
6980 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6981
5b0a4722 6982 if (!launchd_assumes(argv_ret != NULL)) {
ed34e3c3 6983 return NULL;
5b0a4722 6984 }
ed34e3c3
A
6985
6986 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6987
6988 for (i = 0; i < nargs; i++) {
6989 strcpy(co, argv[i]);
6990 argv_ret[i] = co;
6991 co += strlen(argv[i]) + 1;
6992 }
6993 argv_ret[i] = NULL;
6994
6995 return argv_ret;
6996}
6997
6998void
5b0a4722 6999job_checkin(job_t j)
ed34e3c3
A
7000{
7001 j->checkedin = true;
7002}
7003
ddbbfbc1
A
7004bool job_is_god(job_t j)
7005{
7006 return j->embedded_special_privileges;
7007}
7008
ed34e3c3 7009bool
5b0a4722 7010job_ack_port_destruction(mach_port_t p)
ed34e3c3 7011{
ed34e3c3 7012 struct machservice *ms;
ddbbfbc1 7013 job_t j;
ed34e3c3 7014
5b0a4722
A
7015 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
7016 if (ms->recv && (ms->port == p)) {
ed34e3c3 7017 break;
5b0a4722 7018 }
ed34e3c3
A
7019 }
7020
ddbbfbc1 7021 if (!jobmgr_assumes(root_jobmgr, ms != NULL)) {
ed34e3c3 7022 return false;
5b0a4722 7023 }
ed34e3c3 7024
ddbbfbc1 7025 j = ms->job;
ed34e3c3 7026
ddbbfbc1
A
7027 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
7028
7029 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
7030 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
7031 * receive rights have been returned.
7032 *
7033 * So when we get receive rights back, check to see if the job has been reaped yet. If
7034 * not, then we add this service to a list of services to be drained on crash if it's
7035 * requested that behavior. So, for a job with N receive rights all requesting that they
7036 * be drained on crash, we can safely handle the following sequence of events.
7037 *
7038 * ReceiveRight0Returned
7039 * ReceiveRight1Returned
7040 * ReceiveRight2Returned
7041 * NOTE_EXIT (reap, get exit status)
7042 * ReceiveRight3Returned
7043 * .
7044 * .
7045 * .
7046 * ReceiveRight(N - 1)Returned
7047 */
7048
dcace88f
A
7049 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
7050 if (j->crashed && j->reaped) {
ddbbfbc1
A
7051 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
7052 machservice_drain_port(ms);
dcace88f 7053 } else if (!(j->crashed || j->reaped)) {
ddbbfbc1
A
7054 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
7055 }
5b0a4722 7056 }
ddbbfbc1 7057
dcace88f
A
7058 /* If we get this notification after the job has been reaped, then we want to ping
7059 * the event port to keep things going.
7060 */
7061 if (ms->event_update_port && !j->p && job_assumes(j, j->event_monitor)) {
7062 if (_s_event_update_port == MACH_PORT_NULL) {
7063 (void)job_assumes(j, launchd_mport_make_send_once(ms->port, &_s_event_update_port) == KERN_SUCCESS);
7064 }
7065 eventsystem_ping();
7066 }
7067
ddbbfbc1
A
7068 ms->isActive = false;
7069 if (ms->delete_on_destruction) {
7070 machservice_delete(j, ms, false);
7071 } else if (ms->reset) {
7072 machservice_resetport(j, ms);
7073 }
7074
7075 job_dispatch(j, false);
ed34e3c3 7076
5b0a4722 7077 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
ed34e3c3
A
7078
7079 return true;
7080}
7081
7082void
5b0a4722 7083job_ack_no_senders(job_t j)
ed34e3c3
A
7084{
7085 j->priv_port_has_senders = false;
7086
dcace88f 7087 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
5b0a4722
A
7088 j->j_port = 0;
7089
ed34e3c3
A
7090 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
7091
5b0a4722 7092 job_dispatch(j, false);
ed34e3c3
A
7093}
7094
ed34e3c3 7095bool
5b0a4722 7096semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
ed34e3c3
A
7097{
7098 struct semaphoreitem *si;
7099 size_t alloc_sz = sizeof(struct semaphoreitem);
7100
5b0a4722 7101 if (what) {
ed34e3c3 7102 alloc_sz += strlen(what) + 1;
5b0a4722 7103 }
ed34e3c3 7104
5b0a4722 7105 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
ed34e3c3 7106 return false;
5b0a4722 7107 }
ed34e3c3 7108
5b0a4722 7109 si->fd = -1;
ed34e3c3
A
7110 si->why = why;
7111
5b0a4722 7112 if (what) {
f36da725 7113 strcpy(si->what_init, what);
5b0a4722 7114 }
ed34e3c3
A
7115
7116 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
ddbbfbc1 7117
dcace88f 7118 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
ddbbfbc1
A
7119 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7120 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7121 j->nosy = true;
7122 }
ed34e3c3 7123
5b0a4722
A
7124 semaphoreitem_runtime_mod_ref(si, true);
7125
ed34e3c3
A
7126 return true;
7127}
7128
7129void
5b0a4722
A
7130semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7131{
7132 /*
7133 * External events need to be tracked.
7134 * Internal events do NOT need to be tracked.
7135 */
7136
7137 switch (si->why) {
7138 case SUCCESSFUL_EXIT:
7139 case FAILED_EXIT:
7140 case OTHER_JOB_ENABLED:
7141 case OTHER_JOB_DISABLED:
7142 case OTHER_JOB_ACTIVE:
7143 case OTHER_JOB_INACTIVE:
7144 return;
7145 default:
7146 break;
7147 }
7148
7149 if (add) {
ddbbfbc1 7150 runtime_add_weak_ref();
5b0a4722 7151 } else {
ddbbfbc1 7152 runtime_del_weak_ref();
5b0a4722
A
7153 }
7154}
7155
7156void
7157semaphoreitem_delete(job_t j, struct semaphoreitem *si)
ed34e3c3 7158{
5b0a4722
A
7159 semaphoreitem_runtime_mod_ref(si, false);
7160
7161 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
ed34e3c3 7162
5b0a4722 7163 if (si->fd != -1) {
dcace88f 7164 (void)job_assumes(j, runtime_close(si->fd) != -1);
5b0a4722 7165 }
ddbbfbc1
A
7166
7167 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
dcace88f 7168 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
ddbbfbc1
A
7169 j->nosy = false;
7170 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7171 }
dcace88f 7172
5b0a4722 7173 free(si);
ed34e3c3
A
7174}
7175
7176void
5b0a4722 7177semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
ed34e3c3 7178{
5b0a4722 7179 struct semaphoreitem_dict_iter_context *sdic = context;
ed34e3c3
A
7180 semaphore_reason_t why;
7181
5b0a4722 7182 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
ed34e3c3 7183
5b0a4722 7184 semaphoreitem_new(sdic->j, why, key);
ed34e3c3
A
7185}
7186
7187void
7188semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7189{
5b0a4722
A
7190 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7191 job_t j = context;
ed34e3c3
A
7192 semaphore_reason_t why;
7193
5b0a4722
A
7194 switch (launch_data_get_type(obj)) {
7195 case LAUNCH_DATA_BOOL:
7196 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7197 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7198 semaphoreitem_new(j, why, NULL);
7199 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7200 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7201 semaphoreitem_new(j, why, NULL);
7202 j->start_pending = true;
dcace88f 7203 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
ddbbfbc1 7204 j->needs_kickoff = launch_data_get_bool(obj);
dcace88f
A
7205 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7206 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7207 semaphoreitem_new(j, why, NULL);
7208 j->start_pending = true;
5b0a4722 7209 } else {
dcace88f 7210 (void)job_assumes(j, false);
5b0a4722
A
7211 }
7212 break;
7213 case LAUNCH_DATA_DICTIONARY:
7214 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
7215 sdic.why_true = PATH_EXISTS;
7216 sdic.why_false = PATH_MISSING;
7217 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7218 sdic.why_true = OTHER_JOB_ACTIVE;
7219 sdic.why_false = OTHER_JOB_INACTIVE;
7220 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7221 sdic.why_true = OTHER_JOB_ENABLED;
7222 sdic.why_false = OTHER_JOB_DISABLED;
7223 } else {
dcace88f 7224 (void)job_assumes(j, false);
5b0a4722
A
7225 break;
7226 }
7227
7228 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7229 break;
7230 default:
dcace88f 7231 (void)job_assumes(j, false);
5b0a4722 7232 break;
ed34e3c3
A
7233 }
7234}
7235
dcace88f
A
7236bool
7237externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event)
7238{
7239 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7240 if (job_assumes(j, ee != NULL)) {
7241 ee->event = launch_data_copy(event);
7242 if (job_assumes(j, ee->event != NULL)) {
7243 strcpy(ee->name, evname);
7244 ee->job = j;
7245 ee->id = sys->curid;
7246 ee->sys = sys;
7247 ee->state = false;
7248 ee->wanted_state = true;
7249 sys->curid++;
7250
7251 LIST_INSERT_HEAD(&j->events, ee, job_le);
7252 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7253
7254 job_log(j, LOG_DEBUG, "New event: %s:%s", sys->name, evname);
7255 } else {
7256 free(ee);
7257 ee = NULL;
7258 }
7259 }
7260
7261 eventsystem_ping();
7262 return ee;
7263}
7264
7265void
7266externalevent_delete(struct externalevent *ee)
7267{
7268 launch_data_free(ee->event);
7269 LIST_REMOVE(ee, job_le);
7270 LIST_REMOVE(ee, sys_le);
7271
7272 free(ee);
7273
7274 eventsystem_ping();
7275}
7276
7277void
7278externalevent_setup(launch_data_t obj, const char *key, void *context)
7279{
7280 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7281 (void)job_assumes(ctx->j, externalevent_new(ctx->j, ctx->sys, (char *)key, obj));
7282}
7283
7284struct externalevent *
7285externalevent_find(const char *sysname, uint64_t id)
7286{
7287 struct externalevent *ei = NULL;
7288
7289 struct eventsystem *es = eventsystem_find(sysname);
7290 if (launchd_assumes(es != NULL)) {
7291 LIST_FOREACH(ei, &es->events, sys_le) {
7292 if (ei->id == id) {
7293 break;
7294 }
7295 }
7296 }
7297
7298 return ei;
7299}
7300
7301struct eventsystem *
7302eventsystem_new(const char *name)
7303{
7304 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7305 if (launchd_assumes(es != NULL)) {
7306 strcpy(es->name, name);
7307 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7308 }
7309
7310 return es;
7311}
7312
7313void
7314eventsystem_delete(struct eventsystem *es)
7315{
7316 struct externalevent *ei = NULL;
7317 while ((ei = LIST_FIRST(&es->events))) {
7318 externalevent_delete(ei);
7319 }
7320
7321 LIST_REMOVE(es, global_le);
7322
7323 free(es);
7324}
7325
7326void
7327eventsystem_setup(launch_data_t obj, const char *key, void *context)
7328{
7329 job_t j = (job_t)context;
7330 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7331 return;
7332 }
7333
7334 struct eventsystem *sys = eventsystem_find(key);
7335 if (unlikely(sys == NULL)) {
7336 sys = eventsystem_new(key);
7337 job_log(j, LOG_DEBUG, "New event system: %s", key);
7338 }
7339
7340 if (job_assumes(j, sys != NULL)) {
7341 struct externalevent_iter_ctx ctx = {
7342 .j = j,
7343 .sys = sys,
7344 };
7345 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7346 sys->has_updates = true;
7347 }
7348}
7349
7350struct eventsystem *
7351eventsystem_find(const char *name)
7352{
7353 struct eventsystem *esi = NULL;
7354 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7355 if (strcmp(name, esi->name) == 0) {
7356 break;
7357 }
7358 }
7359
7360 return esi;
7361}
7362
7363void
7364eventsystem_ping(void)
7365{
7366 /* We don't wrap this in an assumes() macro because we could potentially
7367 * call this function many times before the helper job gets back to us
7368 * and gives us another send-once right. So if it's MACH_PORT_NULL, that
7369 * means that we've sent a ping, but the helper hasn't yet checked in to
7370 * get the new set of notifications.
7371 */
7372 if (_s_event_update_port != MACH_PORT_NULL) {
7373 kern_return_t kr = helper_downcall_ping(_s_event_update_port);
7374 if (kr != KERN_SUCCESS) {
7375 runtime_syslog(LOG_NOTICE, "helper_downcall_ping(): kr = 0x%x", kr);
7376 }
7377 _s_event_update_port = MACH_PORT_NULL;
7378 }
7379}
7380
ed34e3c3 7381void
5b0a4722 7382jobmgr_dispatch_all_semaphores(jobmgr_t jm)
ed34e3c3 7383{
5b0a4722
A
7384 jobmgr_t jmi, jmn;
7385 job_t ji, jn;
ed34e3c3 7386
ed34e3c3 7387
5b0a4722
A
7388 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7389 jobmgr_dispatch_all_semaphores(jmi);
7390 }
ed34e3c3 7391
5b0a4722
A
7392 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7393 if (!SLIST_EMPTY(&ji->semaphores)) {
7394 job_dispatch(ji, false);
7395 }
7396 }
ed34e3c3
A
7397}
7398
7399time_t
7400cronemu(int mon, int mday, int hour, int min)
7401{
7402 struct tm workingtm;
7403 time_t now;
7404
7405 now = time(NULL);
7406 workingtm = *localtime(&now);
7407
7408 workingtm.tm_isdst = -1;
7409 workingtm.tm_sec = 0;
7410 workingtm.tm_min++;
7411
7412 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7413 workingtm.tm_year++;
7414 workingtm.tm_mon = 0;
7415 workingtm.tm_mday = 1;
7416 workingtm.tm_hour = 0;
7417 workingtm.tm_min = 0;
7418 mktime(&workingtm);
7419 }
7420
7421 return mktime(&workingtm);
7422}
7423
7424time_t
7425cronemu_wday(int wday, int hour, int min)
7426{
7427 struct tm workingtm;
7428 time_t now;
7429
7430 now = time(NULL);
7431 workingtm = *localtime(&now);
7432
7433 workingtm.tm_isdst = -1;
7434 workingtm.tm_sec = 0;
7435 workingtm.tm_min++;
7436
5b0a4722 7437 if (wday == 7) {
ed34e3c3 7438 wday = 0;
5b0a4722 7439 }
ed34e3c3
A
7440
7441 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7442 workingtm.tm_mday++;
7443 workingtm.tm_hour = 0;
7444 workingtm.tm_min = 0;
7445 mktime(&workingtm);
7446 }
7447
7448 return mktime(&workingtm);
7449}
7450
7451bool
7452cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7453{
7454 if (mon == -1) {
7455 struct tm workingtm = *wtm;
7456 int carrytest;
7457
7458 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7459 workingtm.tm_mon++;
7460 workingtm.tm_mday = 1;
7461 workingtm.tm_hour = 0;
7462 workingtm.tm_min = 0;
7463 carrytest = workingtm.tm_mon;
7464 mktime(&workingtm);
5b0a4722 7465 if (carrytest != workingtm.tm_mon) {
ed34e3c3 7466 return false;
5b0a4722 7467 }
ed34e3c3
A
7468 }
7469 *wtm = workingtm;
7470 return true;
7471 }
7472
5b0a4722 7473 if (mon < wtm->tm_mon) {
ed34e3c3 7474 return false;
5b0a4722 7475 }
ed34e3c3 7476
5b0a4722 7477 if (mon > wtm->tm_mon) {
ed34e3c3
A
7478 wtm->tm_mon = mon;
7479 wtm->tm_mday = 1;
7480 wtm->tm_hour = 0;
7481 wtm->tm_min = 0;
7482 }
7483
7484 return cronemu_mday(wtm, mday, hour, min);
7485}
7486
7487bool
7488cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7489{
7490 if (mday == -1) {
7491 struct tm workingtm = *wtm;
7492 int carrytest;
7493
7494 while (!cronemu_hour(&workingtm, hour, min)) {
7495 workingtm.tm_mday++;
7496 workingtm.tm_hour = 0;
7497 workingtm.tm_min = 0;
7498 carrytest = workingtm.tm_mday;
7499 mktime(&workingtm);
5b0a4722 7500 if (carrytest != workingtm.tm_mday) {
ed34e3c3 7501 return false;
5b0a4722 7502 }
ed34e3c3
A
7503 }
7504 *wtm = workingtm;
7505 return true;
7506 }
7507
5b0a4722 7508 if (mday < wtm->tm_mday) {
ed34e3c3 7509 return false;
5b0a4722 7510 }
ed34e3c3 7511
5b0a4722 7512 if (mday > wtm->tm_mday) {
ed34e3c3
A
7513 wtm->tm_mday = mday;
7514 wtm->tm_hour = 0;
7515 wtm->tm_min = 0;
7516 }
7517
7518 return cronemu_hour(wtm, hour, min);
7519}
7520
7521bool
7522cronemu_hour(struct tm *wtm, int hour, int min)
7523{
7524 if (hour == -1) {
7525 struct tm workingtm = *wtm;
7526 int carrytest;
7527
7528 while (!cronemu_min(&workingtm, min)) {
7529 workingtm.tm_hour++;
7530 workingtm.tm_min = 0;
7531 carrytest = workingtm.tm_hour;
7532 mktime(&workingtm);
5b0a4722 7533 if (carrytest != workingtm.tm_hour) {
ed34e3c3 7534 return false;
5b0a4722 7535 }
ed34e3c3
A
7536 }
7537 *wtm = workingtm;
7538 return true;
7539 }
7540
5b0a4722 7541 if (hour < wtm->tm_hour) {
ed34e3c3 7542 return false;
5b0a4722 7543 }
ed34e3c3
A
7544
7545 if (hour > wtm->tm_hour) {
7546 wtm->tm_hour = hour;
7547 wtm->tm_min = 0;
7548 }
7549
7550 return cronemu_min(wtm, min);
7551}
7552
7553bool
7554cronemu_min(struct tm *wtm, int min)
7555{
5b0a4722 7556 if (min == -1) {
ed34e3c3 7557 return true;
5b0a4722 7558 }
ed34e3c3 7559
5b0a4722 7560 if (min < wtm->tm_min) {
ed34e3c3 7561 return false;
5b0a4722 7562 }
ed34e3c3
A
7563
7564 if (min > wtm->tm_min) {
7565 wtm->tm_min = min;
7566 }
7567
7568 return true;
7569}
5b0a4722 7570
ddbbfbc1
A
7571kern_return_t
7572job_mig_setup_shmem(job_t j, mach_port_t *shmem_port)
7573{
7574 memory_object_size_t size_of_page, size_of_page_orig;
7575 vm_address_t vm_addr;
7576 kern_return_t kr;
7577
7578 if (!launchd_assumes(j != NULL)) {
7579 return BOOTSTRAP_NO_MEMORY;
7580 }
7581
7582 if (unlikely(j->anonymous)) {
7583 job_log(j, LOG_DEBUG, "Anonymous job tried to setup shared memory");
7584 return BOOTSTRAP_NOT_PRIVILEGED;
7585 }
7586
7587 if (unlikely(j->shmem)) {
7588 job_log(j, LOG_ERR, "Tried to setup shared memory more than once");
7589 return BOOTSTRAP_NOT_PRIVILEGED;
7590 }
7591
7592 size_of_page_orig = size_of_page = getpagesize();
7593
7594 kr = vm_allocate(mach_task_self(), &vm_addr, size_of_page, true);
7595
7596 if (!job_assumes(j, kr == 0)) {
7597 return kr;
7598 }
7599
7600 j->shmem = (typeof(j->shmem))vm_addr;
7601 j->shmem->vp_shmem_standby_timeout = j->timeout;
7602
7603 kr = mach_make_memory_entry_64(mach_task_self(), &size_of_page,
7604 (memory_object_offset_t)vm_addr, VM_PROT_READ|VM_PROT_WRITE, shmem_port, 0);
7605
7606 if (job_assumes(j, kr == 0)) {
dcace88f 7607 (void)job_assumes(j, size_of_page == size_of_page_orig);
ddbbfbc1
A
7608 }
7609
7610 /* no need to inherit this in child processes */
dcace88f 7611 (void)job_assumes(j, vm_inherit(mach_task_self(), (vm_address_t)j->shmem, size_of_page_orig, VM_INHERIT_NONE) == 0);
ddbbfbc1
A
7612
7613 return kr;
7614}
7615
5b0a4722
A
7616kern_return_t
7617job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7618{
ddbbfbc1 7619 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
7620 job_t js;
7621
7622 if (!launchd_assumes(j != NULL)) {
7623 return BOOTSTRAP_NO_MEMORY;
7624 }
7625
f36da725
A
7626 if (unlikely(j->deny_job_creation)) {
7627 return BOOTSTRAP_NOT_PRIVILEGED;
7628 }
7629
ddbbfbc1
A
7630#if HAVE_SANDBOX
7631 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7632 if (unlikely(argv == NULL)) {
7633 return BOOTSTRAP_NO_MEMORY;
7634 }
7635 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7636 free(argv);
7637 return BOOTSTRAP_NOT_PRIVILEGED;
7638 }
7639 free(argv);
7640#endif
5b0a4722
A
7641
7642 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7643
ddbbfbc1
A
7644 if (pid1_magic) {
7645 if (ldc->euid || ldc->uid) {
7646 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
7647 return VPROC_ERR_TRY_PER_USER;
5b0a4722 7648 }
ddbbfbc1
A
7649 } else {
7650 if (unlikely(server_uid != getuid())) {
5b0a4722
A
7651 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7652 server_cmd, getuid(), server_uid);
7653 }
7654 server_uid = 0; /* zero means "do nothing" */
7655 }
7656
7657 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
7658
ddbbfbc1 7659 if (unlikely(js == NULL)) {
5b0a4722
A
7660 return BOOTSTRAP_NO_MEMORY;
7661 }
7662
7663 *server_portp = js->j_port;
7664 return BOOTSTRAP_SUCCESS;
7665}
7666
7667kern_return_t
7668job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
7669{
ddbbfbc1 7670 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
7671 job_t otherj;
7672
7673 if (!launchd_assumes(j != NULL)) {
7674 return BOOTSTRAP_NO_MEMORY;
7675 }
7676
dcace88f 7677 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
ddbbfbc1 7678 #if TARGET_OS_EMBEDDED
dcace88f 7679 if (!j->embedded_special_privileges) {
ddbbfbc1
A
7680 return BOOTSTRAP_NOT_PRIVILEGED;
7681 }
7682 #else
5b0a4722 7683 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1 7684 #endif
5b0a4722
A
7685 }
7686
ddbbfbc1
A
7687#if HAVE_SANDBOX
7688 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7689 return BOOTSTRAP_NOT_PRIVILEGED;
7690 }
7691#endif
7692
dcace88f 7693 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
5b0a4722
A
7694 return BOOTSTRAP_UNKNOWN_SERVICE;
7695 }
7696
ddbbfbc1 7697#if TARGET_OS_EMBEDDED
dcace88f 7698 if (j->embedded_special_privileges && strcmp(j->username, otherj->username) != 0) {
ddbbfbc1
A
7699 return BOOTSTRAP_NOT_PRIVILEGED;
7700 }
7701#endif
7702
7703 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
7704 bool do_block = otherj->p;
7705
5b0a4722
A
7706 if (otherj->anonymous) {
7707 return BOOTSTRAP_NOT_PRIVILEGED;
7708 }
7709
7710 job_remove(otherj);
7711
7712 if (do_block) {
7713 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
7714 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
dcace88f 7715 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
5b0a4722
A
7716 return MIG_NO_REPLY;
7717 } else {
7718 return 0;
7719 }
ddbbfbc1
A
7720 } else if (sig == VPROC_MAGIC_TRYKILL_SIGNAL) {
7721 if (!j->kill_via_shmem) {
7722 return BOOTSTRAP_NOT_PRIVILEGED;
7723 }
7724
7725 if (!j->shmem) {
7726 j->sent_kill_via_shmem = true;
dcace88f 7727 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
ddbbfbc1
A
7728 return 0;
7729 }
dcace88f
A
7730
7731#if !TARGET_OS_EMBEDDED
ddbbfbc1
A
7732 if (__sync_bool_compare_and_swap(&j->shmem->vp_shmem_transaction_cnt, 0, -1)) {
7733 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
7734 j->sent_kill_via_shmem = true;
dcace88f 7735 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
ddbbfbc1
A
7736 return 0;
7737 }
dcace88f 7738#endif
ddbbfbc1 7739 return BOOTSTRAP_NOT_PRIVILEGED;
5b0a4722 7740 } else if (otherj->p) {
dcace88f 7741 (void)job_assumes(j, runtime_kill(otherj->p, sig) != -1);
5b0a4722
A
7742 }
7743
7744 return 0;
7745}
7746
7747kern_return_t
7748job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
7749{
ddbbfbc1 7750 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
7751
7752 if (!launchd_assumes(j != NULL)) {
7753 return BOOTSTRAP_NO_MEMORY;
7754 }
7755
7756 if (!job_assumes(j, j->per_user)) {
7757 return BOOTSTRAP_NOT_PRIVILEGED;
7758 }
7759
ddbbfbc1 7760 return runtime_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
5b0a4722
A
7761}
7762
7763kern_return_t
7764job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7765{
ddbbfbc1 7766 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
7767
7768 if (!launchd_assumes(j != NULL)) {
7769 return BOOTSTRAP_NO_MEMORY;
7770 }
7771
ddbbfbc1 7772 if (unlikely(ldc->euid)) {
5b0a4722
A
7773 return BOOTSTRAP_NOT_PRIVILEGED;
7774 }
7775
7776 return runtime_log_drain(srp, outval, outvalCnt);
7777}
7778
7779kern_return_t
dcace88f 7780job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
5b0a4722
A
7781{
7782 const char *action;
ddbbfbc1 7783 launch_data_t input_obj = NULL, output_obj = NULL;
5b0a4722
A
7784 size_t data_offset = 0;
7785 size_t packed_size;
ddbbfbc1 7786 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
7787
7788 if (!launchd_assumes(j != NULL)) {
7789 return BOOTSTRAP_NO_MEMORY;
7790 }
ddbbfbc1 7791 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
5b0a4722
A
7792 return BOOTSTRAP_NOT_PRIVILEGED;
7793 }
ddbbfbc1 7794 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
5b0a4722
A
7795 return 1;
7796 }
7797
7798 if (inkey && outkey) {
7799 action = "Swapping";
7800 } else if (inkey) {
7801 action = "Setting";
7802 } else {
7803 action = "Getting";
7804 }
7805
7806 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7807
7808 *outvalCnt = 20 * 1024 * 1024;
7809 mig_allocate(outval, *outvalCnt);
7810 if (!job_assumes(j, *outval != 0)) {
7811 return 1;
7812 }
7813
dcace88f
A
7814 /* Note to future maintainers: launch_data_unpack() does NOT return a heap object. The data
7815 * is decoded in-place. So do not call launch_data_free() on input_obj.
7816 */
ddbbfbc1
A
7817 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
7818 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
5b0a4722
A
7819 goto out_bad;
7820 }
7821
7822 switch (outkey) {
7823 case VPROC_GSK_ENVIRONMENT:
7824 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
7825 goto out_bad;
7826 }
7827 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
ddbbfbc1 7828 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
5b0a4722
A
7829 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
7830 goto out_bad;
7831 }
7832 launch_data_free(output_obj);
7833 break;
7834 case VPROC_GSK_ALLJOBS:
7835 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
7836 goto out_bad;
7837 }
7838 ipc_revoke_fds(output_obj);
ddbbfbc1
A
7839 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7840 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7841 if (!job_assumes(j, packed_size != 0)) {
7842 goto out_bad;
7843 }
7844 launch_data_free(output_obj);
7845 break;
7846 case VPROC_GSK_MGR_NAME:
dcace88f 7847 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
ddbbfbc1
A
7848 goto out_bad;
7849 }
7850 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7851 if (!job_assumes(j, packed_size != 0)) {
7852 goto out_bad;
7853 }
dcace88f 7854
ddbbfbc1
A
7855 launch_data_free(output_obj);
7856 break;
7857 case VPROC_GSK_JOB_OVERRIDES_DB:
dcace88f 7858 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES))) != NULL)) {
ddbbfbc1
A
7859 goto out_bad;
7860 }
7861 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7862 if (!job_assumes(j, packed_size != 0)) {
7863 goto out_bad;
7864 }
7865
7866 launch_data_free(output_obj);
7867 break;
7868 case VPROC_GSK_JOB_CACHE_DB:
dcace88f 7869 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE))) != NULL)) {
ddbbfbc1
A
7870 goto out_bad;
7871 }
5b0a4722
A
7872 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7873 if (!job_assumes(j, packed_size != 0)) {
7874 goto out_bad;
7875 }
ddbbfbc1
A
7876
7877 job_log(j, LOG_DEBUG, "Location of job cache database: %s", launch_data_get_string(output_obj));
dcace88f 7878
5b0a4722
A
7879 launch_data_free(output_obj);
7880 break;
7881 case 0:
7882 mig_deallocate(*outval, *outvalCnt);
7883 *outval = 0;
7884 *outvalCnt = 0;
7885 break;
7886 default:
7887 goto out_bad;
7888 }
7889
7890 if (invalCnt) switch (inkey) {
dcace88f
A
7891 case VPROC_GSK_ENVIRONMENT:
7892 if (launch_data_get_type(input_obj) == LAUNCH_DATA_DICTIONARY) {
7893 if (j->p) {
7894 job_log(j, LOG_INFO, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
7895 }
7896 launch_data_dict_iterate(input_obj, envitem_setup_one_shot, j);
ddbbfbc1 7897 }
dcace88f
A
7898 break;
7899 case 0:
7900 break;
7901 default:
7902 goto out_bad;
5b0a4722 7903 }
dcace88f 7904
5b0a4722 7905 mig_deallocate(inval, invalCnt);
5b0a4722 7906 return 0;
dcace88f 7907
5b0a4722 7908out_bad:
dcace88f 7909 mig_deallocate(inval, invalCnt);
5b0a4722
A
7910 if (*outval) {
7911 mig_deallocate(*outval, *outvalCnt);
7912 }
dcace88f
A
7913 if (output_obj) {
7914 launch_data_free(output_obj);
7915 }
7916
5b0a4722
A
7917 return 1;
7918}
7919
7920kern_return_t
7921job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7922{
7923 const char *action;
7924 kern_return_t kr = 0;
ddbbfbc1 7925 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
7926 int oldmask;
7927
5b0a4722
A
7928 if (!launchd_assumes(j != NULL)) {
7929 return BOOTSTRAP_NO_MEMORY;
7930 }
7931
ddbbfbc1 7932 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
5b0a4722
A
7933 return BOOTSTRAP_NOT_PRIVILEGED;
7934 }
7935
ddbbfbc1 7936 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
5b0a4722
A
7937 return 1;
7938 }
7939
7940 if (inkey && outkey) {
7941 action = "Swapping";
7942 } else if (inkey) {
7943 action = "Setting";
7944 } else {
7945 action = "Getting";
7946 }
7947
7948 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7949
7950 switch (outkey) {
ddbbfbc1
A
7951 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7952 *outval = j->abandon_pg;
7953 break;
5b0a4722
A
7954 case VPROC_GSK_LAST_EXIT_STATUS:
7955 *outval = j->last_exit_status;
7956 break;
7957 case VPROC_GSK_MGR_UID:
7958 *outval = getuid();
7959 break;
7960 case VPROC_GSK_MGR_PID:
7961 *outval = getpid();
7962 break;
7963 case VPROC_GSK_IS_MANAGED:
7964 *outval = j->anonymous ? 0 : 1;
7965 break;
7966 case VPROC_GSK_BASIC_KEEPALIVE:
7967 *outval = !j->ondemand;
7968 break;
7969 case VPROC_GSK_START_INTERVAL:
7970 *outval = j->start_interval;
7971 break;
7972 case VPROC_GSK_IDLE_TIMEOUT:
7973 *outval = j->timeout;
7974 break;
7975 case VPROC_GSK_EXIT_TIMEOUT:
7976 *outval = j->exit_timeout;
7977 break;
7978 case VPROC_GSK_GLOBAL_LOG_MASK:
7979 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7980 *outval = oldmask;
7981 runtime_setlogmask(oldmask);
7982 break;
7983 case VPROC_GSK_GLOBAL_UMASK:
7984 oldmask = umask(0);
7985 *outval = oldmask;
7986 umask(oldmask);
7987 break;
ddbbfbc1
A
7988 case VPROC_GSK_TRANSACTIONS_ENABLED:
7989 job_log(j, LOG_DEBUG, "Reading transaction model status.");
7990 *outval = j->kill_via_shmem;
7991 break;
7992 case VPROC_GSK_WAITFORDEBUGGER:
7993 *outval = j->wait4debugger;
7994 break;
7995 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
7996 *outval = j->embedded_special_privileges;
7997 break;
5b0a4722
A
7998 case 0:
7999 *outval = 0;
8000 break;
8001 default:
8002 kr = 1;
8003 break;
8004 }
8005
8006 switch (inkey) {
ddbbfbc1
A
8007 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8008 j->abandon_pg = (bool)inval;
8009 break;
5b0a4722 8010 case VPROC_GSK_GLOBAL_ON_DEMAND:
ddbbfbc1 8011 job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
5b0a4722
A
8012 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
8013 break;
8014 case VPROC_GSK_BASIC_KEEPALIVE:
8015 j->ondemand = !inval;
8016 break;
8017 case VPROC_GSK_START_INTERVAL:
ddbbfbc1 8018 if (inval > UINT32_MAX || inval < 0) {
5b0a4722
A
8019 kr = 1;
8020 } else if (inval) {
8021 if (j->start_interval == 0) {
ddbbfbc1 8022 runtime_add_weak_ref();
5b0a4722 8023 }
ddbbfbc1 8024 j->start_interval = (typeof(j->start_interval)) inval;
dcace88f 8025 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
5b0a4722 8026 } else if (j->start_interval) {
dcace88f 8027 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
5b0a4722 8028 if (j->start_interval != 0) {
ddbbfbc1 8029 runtime_del_weak_ref();
5b0a4722
A
8030 }
8031 j->start_interval = 0;
8032 }
8033 break;
8034 case VPROC_GSK_IDLE_TIMEOUT:
ddbbfbc1
A
8035 if (inval < 0 || inval > UINT32_MAX) {
8036 kr = 1;
8037 } else {
8038 j->timeout = (typeof(j->timeout)) inval;
5b0a4722
A
8039 }
8040 break;
8041 case VPROC_GSK_EXIT_TIMEOUT:
ddbbfbc1
A
8042 if (inval < 0 || inval > UINT32_MAX) {
8043 kr = 1;
8044 } else {
8045 j->exit_timeout = (typeof(j->exit_timeout)) inval;
5b0a4722
A
8046 }
8047 break;
8048 case VPROC_GSK_GLOBAL_LOG_MASK:
ddbbfbc1
A
8049 if (inval < 0 || inval > UINT32_MAX) {
8050 kr = 1;
8051 } else {
8052 runtime_setlogmask((int) inval);
8053 }
5b0a4722
A
8054 break;
8055 case VPROC_GSK_GLOBAL_UMASK:
ddbbfbc1
A
8056 launchd_assert(sizeof (mode_t) == 2);
8057 if (inval < 0 || inval > UINT16_MAX) {
8058 kr = 1;
8059 } else {
dcace88f
A
8060#if HAVE_SANDBOX
8061 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8062 kr = 1;
8063 } else {
8064 umask((mode_t) inval);
8065 }
8066#endif
ddbbfbc1
A
8067 }
8068 break;
8069 case VPROC_GSK_TRANSACTIONS_ENABLED:
dcace88f 8070 if (!job_assumes(j, inval != 0)) {
ddbbfbc1
A
8071 job_log(j, LOG_WARNING, "Attempt to unregister from transaction model. This is not supported.");
8072 kr = 1;
8073 } else {
ddbbfbc1 8074 j->kill_via_shmem = (bool)inval;
ddbbfbc1
A
8075 }
8076 break;
8077 case VPROC_GSK_WEIRD_BOOTSTRAP:
dcace88f 8078 if (job_assumes(j, j->weird_bootstrap)) {
ddbbfbc1
A
8079 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
8080
8081 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
8082
8083 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
8084 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
8085 }
8086
dcace88f 8087 (void)job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
ddbbfbc1
A
8088 j->weird_bootstrap = false;
8089 }
8090 break;
8091 case VPROC_GSK_WAITFORDEBUGGER:
8092 j->wait4debugger_oneshot = inval;
8093 break;
8094 case VPROC_GSK_PERUSER_SUSPEND:
dcace88f 8095 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
ddbbfbc1 8096 mach_port_t junk = MACH_PORT_NULL;
dcace88f
A
8097 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8098 if (job_assumes(j, jpu != NULL)) {
ddbbfbc1 8099 struct suspended_peruser *spi = NULL;
dcace88f
A
8100 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8101 if ((int64_t)(spi->j->mach_uid) == inval) {
ddbbfbc1
A
8102 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8103 break;
8104 }
8105 }
8106
dcace88f 8107 if (spi == NULL) {
ddbbfbc1
A
8108 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8109 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
dcace88f
A
8110 if (job_assumes(j, spi != NULL)) {
8111 /* Stop listening for events.
8112 *
8113 * See <rdar://problem/9014146>.
8114 */
8115 if (jpu->peruser_suspend_count == 0) {
8116 job_ignore(jpu);
8117 }
8118
ddbbfbc1
A
8119 spi->j = jpu;
8120 spi->j->peruser_suspend_count++;
8121 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8122 job_stop(spi->j);
dcace88f 8123 *outval = jpu->p;
ddbbfbc1
A
8124 } else {
8125 kr = BOOTSTRAP_NO_MEMORY;
8126 }
8127 }
8128 }
8129 } else {
8130 kr = 1;
8131 }
8132 break;
8133 case VPROC_GSK_PERUSER_RESUME:
dcace88f 8134 if (job_assumes(j, pid1_magic == true)) {
ddbbfbc1 8135 struct suspended_peruser *spi = NULL, *spt = NULL;
dcace88f
A
8136 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8137 if ((int64_t)(spi->j->mach_uid) == inval) {
ddbbfbc1
A
8138 spi->j->peruser_suspend_count--;
8139 LIST_REMOVE(spi, sle);
8140 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8141 break;
8142 }
8143 }
8144
dcace88f 8145 if (!job_assumes(j, spi != NULL)) {
ddbbfbc1
A
8146 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8147 kr = BOOTSTRAP_NOT_PRIVILEGED;
dcace88f
A
8148 } else if (spi->j->peruser_suspend_count == 0) {
8149 job_watch(spi->j);
ddbbfbc1
A
8150 job_dispatch(spi->j, false);
8151 free(spi);
8152 }
8153 } else {
8154 kr = 1;
8155 }
5b0a4722
A
8156 break;
8157 case 0:
8158 break;
8159 default:
8160 kr = 1;
8161 break;
8162 }
8163
8164 return kr;
8165}
8166
8167kern_return_t
dcace88f 8168job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
5b0a4722
A
8169{
8170 struct machservice *ms;
8171
8172 if (!launchd_assumes(j != NULL)) {
8173 return BOOTSTRAP_NO_MEMORY;
8174 }
8175
8176 job_log(j, LOG_DEBUG, "Post fork ping.");
8177
8178 job_setup_exception_port(j, child_task);
8179
8180 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8181 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8182 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
8183 continue;
8184 }
8185
8186 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8187
ddbbfbc1 8188 if (unlikely(errno)) {
5b0a4722
A
8189 int desired_log_level = LOG_ERR;
8190
8191 if (j->anonymous) {
8192 /* 5338127 */
8193
8194 desired_log_level = LOG_WARNING;
8195
8196 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8197 desired_log_level = LOG_DEBUG;
8198 }
8199 }
8200
8201 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8202 }
8203 }
8204
dcace88f
A
8205 /* MIG will not zero-initialize this pointer, so we must always do so. See
8206 * <rdar://problem/8562593>.
8207 */
8208 *asport = MACH_PORT_NULL;
ddbbfbc1 8209#if !TARGET_OS_EMBEDDED
dcace88f
A
8210 if (!j->anonymous) {
8211 /* XPC services will spawn into the root security session by default.
8212 * xpcproxy will switch them away if needed.
8213 */
8214 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8215 job_log(j, LOG_DEBUG, "Returning j->asport: %u", j->asport);
8216 *asport = j->asport;
8217 }
ddbbfbc1 8218 }
dcace88f
A
8219#endif
8220 (void)job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
5b0a4722
A
8221
8222 return 0;
8223}
8224
8225kern_return_t
8226job_mig_reboot2(job_t j, uint64_t flags)
8227{
8228 char who_started_the_reboot[2048] = "";
dcace88f 8229 struct proc_bsdshortinfo proc;
ddbbfbc1 8230 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
8231 pid_t pid_to_log;
8232
8233 if (!launchd_assumes(j != NULL)) {
8234 return BOOTSTRAP_NO_MEMORY;
8235 }
8236
ddbbfbc1 8237 if (unlikely(!pid1_magic)) {
5b0a4722
A
8238 return BOOTSTRAP_NOT_PRIVILEGED;
8239 }
8240
ddbbfbc1
A
8241#if !TARGET_OS_EMBEDDED
8242 if (unlikely(ldc->euid)) {
8243#else
dcace88f 8244 if (unlikely(ldc->euid) && !j->embedded_special_privileges) {
ddbbfbc1 8245#endif
5b0a4722
A
8246 return BOOTSTRAP_NOT_PRIVILEGED;
8247 }
8248
dcace88f
A
8249 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8250 size_t who_offset;
8251 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8252 if (errno != ESRCH) {
8253 job_assumes(j, errno == 0);
8254 }
5b0a4722
A
8255 return 1;
8256 }
8257
dcace88f 8258 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
587e987e 8259 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
dcace88f 8260 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
587e987e
A
8261 break;
8262 }
8263
5b0a4722
A
8264 who_offset = strlen(who_started_the_reboot);
8265 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
dcace88f 8266 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
5b0a4722
A
8267 }
8268
8269 root_jobmgr->reboot_flags = (int)flags;
5b0a4722 8270 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
dcace88f 8271 launchd_shutdown();
5b0a4722
A
8272
8273 return 0;
8274}
8275
8276kern_return_t
8277job_mig_getsocket(job_t j, name_t spr)
8278{
8279 if (!launchd_assumes(j != NULL)) {
8280 return BOOTSTRAP_NO_MEMORY;
8281 }
8282
dcace88f
A
8283 if (j->deny_job_creation) {
8284 return BOOTSTRAP_NOT_PRIVILEGED;
8285 }
8286
8287#if HAVE_SANDBOX
8288 struct ldcred *ldc = runtime_get_caller_creds();
8289 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
ddbbfbc1
A
8290 return BOOTSTRAP_NOT_PRIVILEGED;
8291 }
dcace88f 8292#endif
ddbbfbc1 8293
5b0a4722
A
8294 ipc_server_init();
8295
ddbbfbc1 8296 if (unlikely(!sockpath)) {
5b0a4722
A
8297 return BOOTSTRAP_NO_MEMORY;
8298 }
8299
8300 strncpy(spr, sockpath, sizeof(name_t));
8301
8302 return BOOTSTRAP_SUCCESS;
8303}
8304
8305kern_return_t
8306job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8307{
8308 if (!launchd_assumes(j != NULL)) {
8309 return BOOTSTRAP_NO_MEMORY;
8310 }
8311
8312 if ((errno = err)) {
8313 job_log_error(j, pri, "%s", msg);
8314 } else {
8315 job_log(j, pri, "%s", msg);
8316 }
8317
8318 return 0;
8319}
8320
ddbbfbc1 8321job_t
dcace88f 8322jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
5b0a4722 8323{
ddbbfbc1 8324 job_t ji = NULL;
5b0a4722
A
8325 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8326 if (!ji->per_user) {
8327 continue;
8328 }
8329 if (ji->mach_uid != which_user) {
8330 continue;
8331 }
8332 if (SLIST_EMPTY(&ji->machservices)) {
8333 continue;
8334 }
8335 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8336 continue;
8337 }
8338 break;
8339 }
ddbbfbc1 8340
dcace88f 8341 if (unlikely(ji == NULL)) {
5b0a4722
A
8342 struct machservice *ms;
8343 char lbuf[1024];
ddbbfbc1 8344
5b0a4722 8345 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
ddbbfbc1 8346
5b0a4722 8347 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
ddbbfbc1 8348
5b0a4722 8349 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
ddbbfbc1 8350
dcace88f
A
8351 if (ji != NULL) {
8352 auditinfo_addr_t auinfo = {
8353 .ai_termid = { .at_type = AU_IPv4 },
8354 .ai_auid = which_user,
8355 .ai_asid = AU_ASSIGN_ASID,
8356 };
8357
8358 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8359 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8360 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8361
8362 /* Kinda lame that we have to do this, but we can't create an
8363 * audit session without joining it.
8364 */
8365 (void)job_assumes(ji, audit_session_join(g_audit_session_port));
8366 ji->asid = auinfo.ai_asid;
8367 } else {
8368 job_log(ji, LOG_WARNING, "Could not set audit session!");
8369 job_remove(ji);
8370 return NULL;
8371 }
8372
ddbbfbc1
A
8373 ji->mach_uid = which_user;
8374 ji->per_user = true;
8375 ji->kill_via_shmem = true;
dcace88f 8376
ddbbfbc1
A
8377 struct stat sb;
8378 char pu_db[PATH_MAX];
8379 snprintf(pu_db, sizeof(pu_db), LAUNCHD_DB_PREFIX "/%s", lbuf);
8380
8381 bool created = false;
8382 int err = stat(pu_db, &sb);
dcace88f
A
8383 if ((err == -1 && errno == ENOENT) || (err == 0 && !S_ISDIR(sb.st_mode))) {
8384 if (err == 0) {
ddbbfbc1
A
8385 char move_aside[PATH_MAX];
8386 snprintf(move_aside, sizeof(move_aside), LAUNCHD_DB_PREFIX "/%s.movedaside", lbuf);
8387
dcace88f 8388 (void)job_assumes(ji, rename(pu_db, move_aside) != -1);
ddbbfbc1 8389 }
5b0a4722 8390
dcace88f
A
8391 (void)job_assumes(ji, mkdir(pu_db, S_IRWXU) != -1);
8392 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
ddbbfbc1
A
8393 created = true;
8394 }
8395
dcace88f
A
8396 if (!created) {
8397 if (!job_assumes(ji, sb.st_uid == which_user)) {
8398 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
ddbbfbc1 8399 }
dcace88f
A
8400 if (!job_assumes(ji, sb.st_gid == 0)) {
8401 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
ddbbfbc1 8402 }
dcace88f
A
8403 if (!job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR))) {
8404 (void)job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
ddbbfbc1
A
8405 }
8406 }
8407
8408 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8409 job_remove(ji);
8410 ji = NULL;
8411 } else {
8412 ms->per_user_hack = true;
8413 ms->hide = true;
dcace88f
A
8414
8415 ji = job_dispatch(ji, false);
ddbbfbc1 8416 }
5b0a4722 8417 }
5b0a4722 8418 } else {
ddbbfbc1 8419 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
5b0a4722
A
8420 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8421 }
ddbbfbc1
A
8422
8423 return ji;
8424}
5b0a4722 8425
ddbbfbc1
A
8426kern_return_t
8427job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8428{
8429 struct ldcred *ldc = runtime_get_caller_creds();
8430 job_t jpu;
8431
8432#if TARGET_OS_EMBEDDED
8433 /* There is no need for per-user launchd's on embedded. */
8434 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8435 return BOOTSTRAP_NOT_PRIVILEGED;
8436#endif
8437
8438#if HAVE_SANDBOX
8439 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8440 return BOOTSTRAP_NOT_PRIVILEGED;
5b0a4722 8441 }
ddbbfbc1
A
8442#endif
8443
8444 if (!launchd_assumes(j != NULL)) {
8445 return BOOTSTRAP_NO_MEMORY;
8446 }
8447
8448 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8449
8450 if (unlikely(!pid1_magic)) {
8451 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8452 return BOOTSTRAP_NOT_PRIVILEGED;
8453 }
8454
8455 if (ldc->euid || ldc->uid) {
8456 which_user = ldc->euid ?: ldc->uid;
8457 }
8458
8459 *up_cont = MACH_PORT_NULL;
8460
dcace88f 8461 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
ddbbfbc1 8462
5b0a4722
A
8463 return 0;
8464}
8465
8466kern_return_t
dcace88f 8467job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
5b0a4722 8468{
ddbbfbc1 8469 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
dcace88f 8470 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
ddbbfbc1 8471 struct ldcred *ldc = runtime_get_caller_creds();
dcace88f 8472 struct machservice *ms = NULL;
ddbbfbc1 8473 job_t jo;
5b0a4722
A
8474
8475 if (!launchd_assumes(j != NULL)) {
8476 return BOOTSTRAP_NO_MEMORY;
8477 }
8478
dcace88f
A
8479 if (j->dedicated_instance) {
8480 struct machservice *msi = NULL;
8481 SLIST_FOREACH(msi, &j->machservices, sle) {
8482 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8483 uuid_copy(instance_id, j->instance_id);
8484 ms = msi;
8485 break;
8486 }
ddbbfbc1 8487 }
dcace88f
A
8488 } else {
8489 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8490 }
ddbbfbc1 8491
dcace88f
A
8492 if (strict) {
8493 if (likely(ms != NULL)) {
8494 if (ms->job != j) {
8495 return BOOTSTRAP_NOT_PRIVILEGED;
8496 } else if (ms->isActive) {
8497 return BOOTSTRAP_SERVICE_ACTIVE;
8498 }
8499 } else {
8500 return BOOTSTRAP_UNKNOWN_SERVICE;
8501 }
8502 } else if (ms == NULL) {
8503 if (job_assumes(j, !j->dedicated_instance)) {
8504 *serviceportp = MACH_PORT_NULL;
8505
8506 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8507 return BOOTSTRAP_NO_MEMORY;
8508 }
8509
8510 /* Treat this like a legacy job. */
8511 if (!j->legacy_mach_job) {
8512 ms->isActive = true;
8513 ms->recv = false;
8514 }
8515
8516 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8517 job_log(j, LOG_SCOLDING, "Please add the following service to the configuration file for this job: %s", servicename);
8518 }
8519 } else {
8520 return BOOTSTRAP_UNKNOWN_SERVICE;
ddbbfbc1
A
8521 }
8522 } else {
8523 if (unlikely((jo = machservice_job(ms)) != j)) {
8524 static pid_t last_warned_pid;
8525
8526 if (last_warned_pid != ldc->pid) {
8527 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8528 last_warned_pid = ldc->pid;
8529 }
8530
8531 return BOOTSTRAP_NOT_PRIVILEGED;
8532 }
8533 if (unlikely(machservice_active(ms))) {
8534 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8535 return BOOTSTRAP_SERVICE_ACTIVE;
5b0a4722 8536 }
5b0a4722
A
8537 }
8538
ddbbfbc1 8539 job_checkin(j);
5b0a4722
A
8540 machservice_request_notifications(ms);
8541
8542 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8543
8544 *serviceportp = machservice_port(ms);
8545 return BOOTSTRAP_SUCCESS;
8546}
8547
8548kern_return_t
8549job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8550{
8551 struct machservice *ms;
ddbbfbc1 8552 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
8553
8554 if (!launchd_assumes(j != NULL)) {
8555 return BOOTSTRAP_NO_MEMORY;
8556 }
8557
ddbbfbc1
A
8558 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
8559 job_log(j, LOG_SCOLDING, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8560 }
5b0a4722
A
8561
8562 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8563
f36da725
A
8564 /* 5641783 for the embedded hack */
8565#if !TARGET_OS_EMBEDDED
5b0a4722
A
8566 /*
8567 * From a per-user/session launchd's perspective, SecurityAgent (UID
8568 * 92) is a rogue application (not our UID, not root and not a child of
8569 * us). We'll have to reconcile this design friction at a later date.
8570 */
ddbbfbc1
A
8571 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8572 if (pid1_magic) {
5b0a4722
A
8573 return VPROC_ERR_TRY_PER_USER;
8574 } else {
8575 return BOOTSTRAP_NOT_PRIVILEGED;
8576 }
8577 }
f36da725 8578#endif
5b0a4722 8579
ddbbfbc1 8580 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
5b0a4722 8581
ddbbfbc1 8582 if (unlikely(ms)) {
5b0a4722
A
8583 if (machservice_job(ms) != j) {
8584 return BOOTSTRAP_NOT_PRIVILEGED;
8585 }
8586 if (machservice_active(ms)) {
8587 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
8588 return BOOTSTRAP_SERVICE_ACTIVE;
8589 }
ddbbfbc1
A
8590 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
8591 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
8592 return BOOTSTRAP_NOT_PRIVILEGED;
8593 }
5b0a4722
A
8594 job_checkin(j);
8595 machservice_delete(j, ms, false);
8596 }
8597
ddbbfbc1
A
8598 if (likely(serviceport != MACH_PORT_NULL)) {
8599 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
5b0a4722
A
8600 machservice_request_notifications(ms);
8601 } else {
8602 return BOOTSTRAP_NO_MEMORY;
8603 }
8604 }
8605
ddbbfbc1 8606
5b0a4722
A
8607 return BOOTSTRAP_SUCCESS;
8608}
8609
8610kern_return_t
dcace88f 8611job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
5b0a4722 8612{
dcace88f 8613 struct machservice *ms = NULL;
ddbbfbc1 8614 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 8615 kern_return_t kr;
ddbbfbc1 8616 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
dcace88f
A
8617 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
8618 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
8619 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
5b0a4722
A
8620
8621 if (!launchd_assumes(j != NULL)) {
8622 return BOOTSTRAP_NO_MEMORY;
8623 }
8624
dcace88f
A
8625 bool xpc_req = j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN;
8626
f36da725
A
8627 /* 5641783 for the embedded hack */
8628#if !TARGET_OS_EMBEDDED
ddbbfbc1 8629 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
5b0a4722
A
8630 return VPROC_ERR_TRY_PER_USER;
8631 }
f36da725 8632#endif
5b0a4722 8633
ddbbfbc1 8634#if HAVE_SANDBOX
dcace88f
A
8635 /* We don't do sandbox checking for XPC domains because, by definition, all
8636 * the services within your domain should be accessibly to you.
8637 */
8638 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
5b0a4722
A
8639 return BOOTSTRAP_NOT_PRIVILEGED;
8640 }
ddbbfbc1 8641#endif
5b0a4722 8642
ddbbfbc1 8643 if (per_pid_lookup) {
5b0a4722
A
8644 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
8645 } else {
dcace88f
A
8646 if (xpc_req) {
8647 /* Requests from XPC domains stay local. */
8648 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
8649 } else {
8650 /* A strict lookup which is privileged won't even bother trying to
8651 * find a service if we're not hosting the root Mach bootstrap.
8652 */
8653 if (strict_lookup && privileged) {
8654 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8655 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8656 }
8657 } else {
8658 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8659 }
8660 }
5b0a4722
A
8661 }
8662
ddbbfbc1 8663 if (likely(ms)) {
dcace88f
A
8664 ms = ms->alias ? ms->alias : ms;
8665 if (unlikely(specific_instance && ms->job->multiple_instances)) {
8666 job_t ji = NULL;
8667 job_t instance = NULL;
8668 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
8669 if (uuid_compare(instance_id, ji->instance_id) == 0) {
8670 instance = ji;
8671 break;
8672 }
8673 }
8674
8675 if (unlikely(instance == NULL)) {
8676 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
8677 instance = job_new_subjob(ms->job, instance_id);
8678 if (job_assumes(j, instance != NULL)) {
8679 /* Disable this support for now. We only support having
8680 * multi-instance jobs within private XPC domains.
8681 */
8682#if 0
8683 /* If the job is multi-instance, in a singleton XPC domain
8684 * and the request is not coming from within that singleton
8685 * domain, we need to alias the new job into the requesting
8686 * domain.
8687 */
8688 if (!j->mgr->xpc_singleton && xpc_req) {
8689 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
8690 }
8691#endif
8692 job_dispatch(instance, false);
8693 }
8694 }
8695
ddbbfbc1 8696 ms = NULL;
dcace88f
A
8697 if (job_assumes(j, instance != NULL)) {
8698 struct machservice *msi = NULL;
8699 SLIST_FOREACH(msi, &instance->machservices, sle) {
8700 /* sizeof(servicename) will return the size of a pointer, even though it's
8701 * an array type, because when passing arrays as parameters in C, they
8702 * implicitly degrade to pointers.
8703 */
8704 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8705 ms = msi;
8706 break;
8707 }
8708 }
8709 }
8710 } else {
8711 if (machservice_hidden(ms) && !machservice_active(ms)) {
8712 ms = NULL;
8713 } else if (unlikely(ms->per_user_hack)) {
8714 ms = NULL;
8715 }
ddbbfbc1 8716 }
5b0a4722
A
8717 }
8718
ddbbfbc1 8719 if (likely(ms)) {
dcace88f 8720 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
ddbbfbc1
A
8721 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
8722
8723 if (unlikely(!per_pid_lookup && j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user)) {
8724 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
8725 job_log(j, LOG_DEBUG, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms->job->label, servicename);
5b0a4722 8726 }
ddbbfbc1 8727
5b0a4722
A
8728 j->lastlookup = ms;
8729 j->lastlookup_gennum = ms->gen_num;
ddbbfbc1 8730
5b0a4722 8731 *serviceportp = machservice_port(ms);
ddbbfbc1 8732
5b0a4722 8733 kr = BOOTSTRAP_SUCCESS;
dcace88f
A
8734 } else if (strict_lookup && !privileged) {
8735 /* Hack: We need to simulate XPC's desire not to establish a hierarchy. So if
8736 * XPC is doing the lookup, and it's not a privileged lookup, we won't forward.
8737 * But if it is a privileged lookup (that is, was looked up in XPC_DOMAIN_LOCAL_SYSTEM)
8738 * then we must forward.
8739 */
8740 return BOOTSTRAP_UNKNOWN_SERVICE;
8741 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
8742 /* Requests from within an XPC domain don't get forwarded. */
5b0a4722 8743 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
ddbbfbc1 8744 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
dcace88f 8745 (void)job_assumes(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags) == 0);
ddbbfbc1
A
8746 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8747 return MIG_NO_REPLY;
8748 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
5b0a4722
A
8749 /*
8750 * 5240036 Should start background session when a lookup of CCacheServer occurs
8751 *
8752 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
8753 * If we find a EUID that isn't root, we force it over to the per-user context.
8754 */
8755 return VPROC_ERR_TRY_PER_USER;
8756 } else {
ddbbfbc1 8757 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
5b0a4722
A
8758 kr = BOOTSTRAP_UNKNOWN_SERVICE;
8759 }
8760
8761 return kr;
8762}
8763
8764kern_return_t
ddbbfbc1 8765job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
5b0a4722
A
8766{
8767 if (!launchd_assumes(j != NULL)) {
8768 return BOOTSTRAP_NO_MEMORY;
8769 }
8770
8771 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
8772 jobmgr_t jm = j->mgr;
8773
5b0a4722
A
8774 if (jobmgr_parent(jm)) {
8775 *parentport = jobmgr_parent(jm)->jm_port;
8776 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
8777 *parentport = jm->jm_port;
8778 } else {
dcace88f 8779 (void)job_assumes(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp) == 0);
ddbbfbc1
A
8780 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8781 return MIG_NO_REPLY;
5b0a4722
A
8782 }
8783 return BOOTSTRAP_SUCCESS;
8784}
8785
8786kern_return_t
dcace88f
A
8787job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
8788{
5c88273d
A
8789 if (!j) {
8790 return BOOTSTRAP_NO_MEMORY;
8791 }
8792
dcace88f
A
8793 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8794 *rootbsp = root_jobmgr->jm_port;
8795 (void)job_assumes(j, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
8796 } else {
8797 *rootbsp = inherited_bootstrap_port;
8798 (void)job_assumes(j, launchd_mport_copy_send(inherited_bootstrap_port) == KERN_SUCCESS);
8799 }
8800
8801 return BOOTSTRAP_SUCCESS;
8802}
8803
8804kern_return_t
8805job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt, name_array_t *servicejobsp, unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt, uint64_t flags)
5b0a4722
A
8806{
8807 name_array_t service_names = NULL;
ddbbfbc1 8808 name_array_t service_jobs = NULL;
5b0a4722
A
8809 bootstrap_status_array_t service_actives = NULL;
8810 unsigned int cnt = 0, cnt2 = 0;
5b0a4722 8811 jobmgr_t jm;
cf0bacfd 8812
5b0a4722
A
8813 if (!launchd_assumes(j != NULL)) {
8814 return BOOTSTRAP_NO_MEMORY;
8815 }
8816
dcace88f
A
8817 if (g_flat_mach_namespace) {
8818 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
ddbbfbc1
A
8819 jm = j->mgr;
8820 } else {
8821 jm = root_jobmgr;
8822 }
8823 } else {
8824 jm = j->mgr;
8825 }
5b0a4722 8826
ddbbfbc1
A
8827 unsigned int i = 0;
8828 struct machservice *msi = NULL;
dcace88f
A
8829 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8830 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
ddbbfbc1 8831 cnt += !msi->per_pid ? 1 : 0;
5b0a4722
A
8832 }
8833 }
8834
8835 if (cnt == 0) {
8836 goto out;
8837 }
8838
8839 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
ddbbfbc1
A
8840 if (!job_assumes(j, service_names != NULL)) {
8841 goto out_bad;
8842 }
8843
8844 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
8845 if (!job_assumes(j, service_jobs != NULL)) {
5b0a4722
A
8846 goto out_bad;
8847 }
8848
8849 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
ddbbfbc1 8850 if (!job_assumes(j, service_actives != NULL)) {
5b0a4722
A
8851 goto out_bad;
8852 }
8853
dcace88f
A
8854 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8855 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8856 if (!msi->per_pid) {
ddbbfbc1 8857 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
dcace88f
A
8858 msi = msi->alias ? msi->alias : msi;
8859 if (msi->job->mgr->shortdesc) {
8860 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
8861 } else {
8862 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
8863 }
ddbbfbc1 8864 service_actives[cnt2] = machservice_status(msi);
5b0a4722
A
8865 cnt2++;
8866 }
8867 }
8868 }
8869
dcace88f 8870 (void)job_assumes(j, cnt == cnt2);
5b0a4722
A
8871
8872out:
8873 *servicenamesp = service_names;
ddbbfbc1 8874 *servicejobsp = service_jobs;
5b0a4722 8875 *serviceactivesp = service_actives;
ddbbfbc1 8876 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
5b0a4722
A
8877
8878 return BOOTSTRAP_SUCCESS;
8879
8880out_bad:
8881 if (service_names) {
8882 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
8883 }
ddbbfbc1
A
8884 if (service_jobs) {
8885 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
8886 }
5b0a4722
A
8887 if (service_actives) {
8888 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
8889 }
8890
8891 return BOOTSTRAP_NO_MEMORY;
8892}
8893
ddbbfbc1 8894kern_return_t
dcace88f 8895job_mig_lookup_children(job_t j, mach_port_array_t *child_ports, mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names, mach_msg_type_number_t *child_names_cnt, bootstrap_property_array_t *child_properties,mach_msg_type_number_t *child_properties_cnt)
5b0a4722 8896{
ddbbfbc1 8897 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
dcace88f 8898 if (!launchd_assumes(j != NULL)) {
ddbbfbc1
A
8899 return BOOTSTRAP_NO_MEMORY;
8900 }
8901
8902 struct ldcred *ldc = runtime_get_caller_creds();
8903
8904 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8905 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8906 * in a non-flat namespace.
8907 */
dcace88f 8908 if (ldc->euid != 0) {
ddbbfbc1
A
8909 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
8910 return BOOTSTRAP_NOT_PRIVILEGED;
8911 }
8912
8913 unsigned int cnt = 0;
8914
8915 jobmgr_t jmr = j->mgr;
8916 jobmgr_t jmi = NULL;
dcace88f 8917 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
ddbbfbc1
A
8918 cnt++;
8919 }
8920
8921 /* Find our per-user launchds if we're PID 1. */
8922 job_t ji = NULL;
dcace88f
A
8923 if (pid1_magic) {
8924 LIST_FOREACH(ji, &jmr->jobs, sle) {
ddbbfbc1
A
8925 cnt += ji->per_user ? 1 : 0;
8926 }
8927 }
8928
dcace88f 8929 if (cnt == 0) {
ddbbfbc1
A
8930 return BOOTSTRAP_NO_CHILDREN;
8931 }
8932
8933 mach_port_array_t _child_ports = NULL;
8934 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
dcace88f 8935 if (!job_assumes(j, _child_ports != NULL)) {
ddbbfbc1
A
8936 kr = BOOTSTRAP_NO_MEMORY;
8937 goto out_bad;
8938 }
8939
8940 name_array_t _child_names = NULL;
8941 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
dcace88f 8942 if (!job_assumes(j, _child_names != NULL)) {
ddbbfbc1
A
8943 kr = BOOTSTRAP_NO_MEMORY;
8944 goto out_bad;
8945 }
8946
8947 bootstrap_property_array_t _child_properties = NULL;
8948 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
dcace88f 8949 if (!job_assumes(j, _child_properties != NULL)) {
ddbbfbc1
A
8950 kr = BOOTSTRAP_NO_MEMORY;
8951 goto out_bad;
8952 }
8953
8954 unsigned int cnt2 = 0;
dcace88f
A
8955 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8956 if (jobmgr_assumes(jmi, launchd_mport_make_send(jmi->jm_port) == KERN_SUCCESS)) {
ddbbfbc1
A
8957 _child_ports[cnt2] = jmi->jm_port;
8958 } else {
8959 _child_ports[cnt2] = MACH_PORT_NULL;
8960 }
8961
8962 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
8963 _child_properties[cnt2] = jmi->properties;
8964
8965 cnt2++;
8966 }
8967
dcace88f
A
8968 if (pid1_magic) LIST_FOREACH( ji, &jmr->jobs, sle) {
8969 if (ji->per_user) {
8970 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
ddbbfbc1
A
8971 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
8972
dcace88f 8973 if (job_assumes(ji, launchd_mport_copy_send(port) == KERN_SUCCESS)) {
ddbbfbc1
A
8974 _child_ports[cnt2] = port;
8975 } else {
8976 _child_ports[cnt2] = MACH_PORT_NULL;
8977 }
8978 } else {
8979 _child_ports[cnt2] = MACH_PORT_NULL;
8980 }
8981
8982 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
8983 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
8984
8985 cnt2++;
8986 }
8987 }
8988
8989 *child_names_cnt = cnt;
8990 *child_ports_cnt = cnt;
8991 *child_properties_cnt = cnt;
8992
8993 *child_names = _child_names;
8994 *child_ports = _child_ports;
8995 *child_properties = _child_properties;
8996
8997 unsigned int i = 0;
dcace88f 8998 for (i = 0; i < cnt; i++) {
ddbbfbc1
A
8999 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
9000 }
9001
9002 return BOOTSTRAP_SUCCESS;
9003out_bad:
dcace88f 9004 if (_child_ports) {
ddbbfbc1
A
9005 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
9006 }
9007
dcace88f 9008 if (_child_names) {
ddbbfbc1
A
9009 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
9010 }
9011
dcace88f 9012 if (_child_properties) {
ddbbfbc1
A
9013 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
9014 }
9015
9016 return kr;
9017}
9018
9019kern_return_t
9020job_mig_transaction_count_for_pid(job_t j, pid_t p, int32_t *cnt, boolean_t *condemned)
9021{
5c88273d
A
9022 if (!j) {
9023 return BOOTSTRAP_NO_MEMORY;
9024 }
9025
ddbbfbc1
A
9026 kern_return_t kr = KERN_FAILURE;
9027 struct ldcred *ldc = runtime_get_caller_creds();
dcace88f 9028 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
ddbbfbc1
A
9029 return BOOTSTRAP_NOT_PRIVILEGED;
9030 }
9031
9032 job_t j_for_pid = jobmgr_find_by_pid_deep(j->mgr, p, false);
dcace88f
A
9033 if (j_for_pid) {
9034 if (j_for_pid->kill_via_shmem) {
9035 if (j_for_pid->shmem) {
ddbbfbc1
A
9036 *cnt = j_for_pid->shmem->vp_shmem_transaction_cnt;
9037 *condemned = j_for_pid->shmem->vp_shmem_flags & VPROC_SHMEM_EXITING;
9038 *cnt += *condemned ? 1 : 0;
9039 } else {
9040 *cnt = 0;
9041 *condemned = false;
9042 }
9043
9044 kr = BOOTSTRAP_SUCCESS;
9045 } else {
9046 kr = BOOTSTRAP_NO_MEMORY;
9047 }
9048 } else {
9049 kr = BOOTSTRAP_UNKNOWN_SERVICE;
9050 }
9051
9052 return kr;
9053}
9054
9055kern_return_t
9056job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
9057{
9058 struct ldcred *ldc = runtime_get_caller_creds();
dcace88f 9059 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
ddbbfbc1
A
9060 return BOOTSTRAP_NOT_PRIVILEGED;
9061 }
9062
9063 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9064 * directly by launchd as agents.
9065 */
9066 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
dcace88f 9067 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
ddbbfbc1
A
9068 *managed = true;
9069 }
9070
9071 return BOOTSTRAP_SUCCESS;
9072}
9073
9074kern_return_t
9075job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
9076{
5c88273d
A
9077 if (!j) {
9078 return BOOTSTRAP_NO_MEMORY;
9079 }
9080
ddbbfbc1
A
9081 struct ldcred *ldc = runtime_get_caller_creds();
9082 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
dcace88f
A
9083
9084#if HAVE_SANDBOX
9085 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
9086 return BOOTSTRAP_NOT_PRIVILEGED;
9087 }
9088#endif
9089
ddbbfbc1 9090 mach_port_t _mp = MACH_PORT_NULL;
dcace88f
A
9091 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
9092 job_t target_j = job_find(NULL, label);
9093 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
9094 if (target_j->j_port == MACH_PORT_NULL) {
9095 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
ddbbfbc1
A
9096 }
9097
9098 _mp = target_j->j_port;
9099 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
9100 } else {
9101 kr = BOOTSTRAP_NO_MEMORY;
9102 }
9103 }
9104
9105 *mp = _mp;
9106 return kr;
9107}
5b0a4722 9108
ddbbfbc1
A
9109#if !TARGET_OS_EMBEDDED
9110kern_return_t
dcace88f 9111job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
ddbbfbc1 9112{
5c88273d
A
9113 if (!j) {
9114 return BOOTSTRAP_NO_MEMORY;
9115 }
9116
ddbbfbc1
A
9117 uuid_string_t uuid_str;
9118 uuid_unparse(uuid, uuid_str);
dcace88f 9119 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
ddbbfbc1
A
9120
9121 job_t ji = NULL, jt = NULL;
dcace88f 9122 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
ddbbfbc1
A
9123 uuid_string_t uuid_str2;
9124 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9125
dcace88f 9126 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
ddbbfbc1 9127 uuid_clear(ji->expected_audit_uuid);
dcace88f
A
9128 if (asport != MACH_PORT_NULL ) {
9129 job_log(ji, LOG_DEBUG, "Job should join session with port %u", asport);
9130 (void)job_assumes(j, launchd_mport_copy_send(asport) == KERN_SUCCESS);
ddbbfbc1
A
9131 } else {
9132 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9133 }
9134
dcace88f 9135 ji->asport = asport;
ddbbfbc1
A
9136 LIST_REMOVE(ji, needing_session_sle);
9137 job_dispatch(ji, false);
9138 }
9139 }
9140
9141 /* Each job that the session port was set for holds a reference. At the end of
9142 * the loop, there will be one extra reference belonging to this MiG protocol.
9143 * We need to release it so that the session goes away when all the jobs
9144 * referencing it are unloaded.
9145 */
dcace88f 9146 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
ddbbfbc1
A
9147
9148 return KERN_SUCCESS;
9149}
9150#else
9151kern_return_t
9152job_mig_set_security_session(job_t j __attribute__((unused)), uuid_t uuid __attribute__((unused)), mach_port_t session __attribute__((unused)))
9153{
9154 return KERN_SUCCESS;
9155}
9156#endif
9157
9158jobmgr_t
9159jobmgr_find_by_name(jobmgr_t jm, const char *where)
9160{
9161 jobmgr_t jmi, jmi2;
5b0a4722
A
9162
9163 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
9164 if (where == NULL) {
ddbbfbc1 9165 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
5b0a4722
A
9166 where = VPROCMGR_SESSION_LOGINWINDOW;
9167 } else {
9168 where = VPROCMGR_SESSION_AQUA;
9169 }
9170 }
9171
ddbbfbc1
A
9172 if (strcasecmp(jm->name, where) == 0) {
9173 return jm;
9174 }
9175
dcace88f 9176 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
ddbbfbc1
A
9177 jmi = root_jobmgr;
9178 goto jm_found;
5b0a4722
A
9179 }
9180
ddbbfbc1
A
9181 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9182 if (unlikely(jmi->shutting_down)) {
5b0a4722 9183 continue;
dcace88f
A
9184 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9185 continue;
5b0a4722
A
9186 } else if (strcasecmp(jmi->name, where) == 0) {
9187 goto jm_found;
ddbbfbc1 9188 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
5b0a4722
A
9189 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9190 if (strcasecmp(jmi2->name, where) == 0) {
9191 jmi = jmi2;
9192 goto jm_found;
9193 }
9194 }
9195 }
9196 }
ddbbfbc1 9197
5b0a4722 9198jm_found:
ddbbfbc1 9199 return jmi;
5b0a4722
A
9200}
9201
9202kern_return_t
dcace88f 9203job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
5b0a4722
A
9204{
9205 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9206 mach_port_array_t l2l_ports = NULL;
9207 mach_port_t reqport, rcvright;
9208 kern_return_t kr = 1;
9209 launch_data_t out_obj_array = NULL;
ddbbfbc1 9210 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722
A
9211 jobmgr_t jmr = NULL;
9212
9213 if (!launchd_assumes(j != NULL)) {
9214 return BOOTSTRAP_NO_MEMORY;
9215 }
9216
ddbbfbc1 9217 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
5b0a4722
A
9218 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9219
9220 kr = BOOTSTRAP_NOT_PRIVILEGED;
9221 goto out;
9222 }
9223
9224 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9225
ddbbfbc1 9226 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
5b0a4722
A
9227
9228 if (!job_assumes(j, kr == 0)) {
9229 goto out;
9230 }
9231
9232 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
9233
dcace88f 9234 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
5b0a4722
A
9235 kr = BOOTSTRAP_NO_MEMORY;
9236 goto out;
9237 }
9238
ddbbfbc1
A
9239 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9240
9241 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9242 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9243 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9244 */
dcace88f 9245 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
ddbbfbc1
A
9246 /* This is so awful. */
9247 /* Remove the job from its current job manager. */
9248 LIST_REMOVE(j, sle);
9249 LIST_REMOVE(j, pid_hash_sle);
9250
9251 /* Put the job into the target job manager. */
9252 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9253 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9254
9255 j->mgr = jmr;
9256 job_set_global_on_demand(j, true);
587e987e 9257
dcace88f 9258 if (!j->holds_ref) {
587e987e
A
9259 j->holds_ref = true;
9260 runtime_add_ref();
9261 }
ddbbfbc1
A
9262 }
9263
5b0a4722
A
9264 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9265 launch_data_t tmp, obj_at_idx;
9266 struct machservice *ms;
9267 job_t j_for_service;
9268 const char *serv_name;
9269 pid_t target_pid;
9270 bool serv_perpid;
9271
dcace88f
A
9272 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9273 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
5b0a4722 9274 target_pid = (pid_t)launch_data_get_integer(tmp);
dcace88f 9275 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
5b0a4722 9276 serv_perpid = launch_data_get_bool(tmp);
dcace88f 9277 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
5b0a4722
A
9278 serv_name = launch_data_get_string(tmp);
9279
9280 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9281
ddbbfbc1 9282 if (unlikely(!j_for_service)) {
5b0a4722 9283 /* The PID probably exited */
dcace88f 9284 (void)job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
5b0a4722
A
9285 continue;
9286 }
9287
ddbbfbc1
A
9288 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9289 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
5b0a4722
A
9290 machservice_request_notifications(ms);
9291 }
9292 }
9293
9294 kr = 0;
9295
9296out:
9297 if (out_obj_array) {
9298 launch_data_free(out_obj_array);
9299 }
9300
9301 if (l2l_ports) {
9302 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9303 }
9304
9305 if (kr == 0) {
9306 if (target_subset) {
dcace88f
A
9307 (void)job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
9308 }
9309 if (asport) {
9310 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
5b0a4722
A
9311 }
9312 } else if (jmr) {
9313 jobmgr_shutdown(jmr);
9314 }
9315
9316 return kr;
9317}
9318
ddbbfbc1 9319kern_return_t
dcace88f 9320job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
ddbbfbc1 9321{
5c88273d
A
9322 if (!j) {
9323 return BOOTSTRAP_NO_MEMORY;
9324 }
9325
ddbbfbc1
A
9326 job_t j2;
9327
9328 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9329 if (j->mgr->session_initialized) {
9330 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9331 kr = BOOTSTRAP_NOT_PRIVILEGED;
9332 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9333 jobmgr_t jmi;
9334
9335 /*
9336 * 5330262
9337 *
9338 * We're working around LoginWindow and the WindowServer.
9339 *
9340 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9341 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9342 * spawns a replacement loginwindow session before cleaning up the previous one.
9343 *
9344 * We're going to use the creation of a new LoginWindow context as a clue that the
9345 * previous LoginWindow context is on the way out and therefore we should just
9346 * kick-start the shutdown of it.
9347 */
9348
9349 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9350 if (unlikely(jmi->shutting_down)) {
9351 continue;
9352 } else if (strcasecmp(jmi->name, session_type) == 0) {
9353 jobmgr_shutdown(jmi);
9354 break;
9355 }
9356 }
9357 }
9358
9359 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9360 strcpy(j->mgr->name_init, session_type);
9361
9362 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
dcace88f
A
9363 j2->asport = asport;
9364 (void)job_assumes(j, job_dispatch(j2, true));
ddbbfbc1
A
9365 kr = BOOTSTRAP_SUCCESS;
9366 }
9367
9368 return kr;
9369}
9370
9371kern_return_t
dcace88f 9372job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
ddbbfbc1 9373{
62123c11
A
9374 struct ldcred *ldc = runtime_get_caller_creds();
9375 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9376 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9377 return BOOTSTRAP_NO_MEMORY;
9378 }
9379
5c88273d
A
9380 if (j->mgr->shutting_down) {
9381 return BOOTSTRAP_UNKNOWN_SERVICE;
9382 }
9383
ddbbfbc1
A
9384 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9385
dcace88f 9386 if (!job_assumes(j, pid1_magic == false)) {
ddbbfbc1
A
9387 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9388 return BOOTSTRAP_NOT_PRIVILEGED;
9389 }
9390
dcace88f 9391 if (!j->anonymous) {
ddbbfbc1
A
9392 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9393 return BOOTSTRAP_NOT_PRIVILEGED;
9394 }
9395
9396 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
dcace88f 9397 if (target_jm == j->mgr) {
ddbbfbc1
A
9398 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9399 *new_bsport = target_jm->jm_port;
9400 return BOOTSTRAP_SUCCESS;
9401 }
9402
dcace88f
A
9403 if (!target_jm) {
9404 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9405 if (target_jm) {
ddbbfbc1 9406 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
dcace88f 9407 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
ddbbfbc1
A
9408 }
9409 }
9410
dcace88f 9411 if (!job_assumes(j, target_jm != NULL)) {
ddbbfbc1
A
9412 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9413 return BOOTSTRAP_NO_MEMORY;
9414 }
9415
9416 /* Remove the job from it's current job manager. */
9417 LIST_REMOVE(j, sle);
9418 LIST_REMOVE(j, pid_hash_sle);
9419
9420 job_t ji = NULL, jit = NULL;
dcace88f
A
9421 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9422 if (ji == j) {
ddbbfbc1
A
9423 LIST_REMOVE(ji, global_env_sle);
9424 break;
9425 }
9426 }
9427
9428 /* Put the job into the target job manager. */
9429 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9430 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9431
dcace88f 9432 if (ji) {
ddbbfbc1
A
9433 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9434 }
9435
9436 /* Move our Mach services over if we're not in a flat namespace. */
dcace88f 9437 if (!g_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
ddbbfbc1 9438 struct machservice *msi = NULL, *msit = NULL;
dcace88f 9439 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
ddbbfbc1
A
9440 LIST_REMOVE(msi, name_hash_sle);
9441 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9442 }
9443 }
9444
9445 j->mgr = target_jm;
ddbbfbc1 9446
dcace88f 9447 if (!j->holds_ref) {
587e987e
A
9448 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9449 * stick around while they're still around.
9450 * For example, login calls into the PAM launchd module, which moves the process into
9451 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9452 * ourselves from going away.
9453 */
9454 j->holds_ref = true;
9455 runtime_add_ref();
9456 }
9457
9458 *new_bsport = target_jm->jm_port;
ddbbfbc1
A
9459
9460 return KERN_SUCCESS;
9461}
9462
5b0a4722
A
9463kern_return_t
9464job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9465 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9466 mach_port_array_t *portsp, unsigned int *ports_cnt)
9467{
9468 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9469 mach_port_array_t ports = NULL;
9470 unsigned int cnt = 0, cnt2 = 0;
9471 size_t packed_size;
9472 struct machservice *ms;
9473 jobmgr_t jm;
9474 job_t ji;
9475
9476 if (!launchd_assumes(j != NULL)) {
9477 return BOOTSTRAP_NO_MEMORY;
9478 }
9479
9480 jm = j->mgr;
9481
ddbbfbc1 9482 if (unlikely(!pid1_magic)) {
5b0a4722
A
9483 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9484 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1
A
9485 }
9486 if (unlikely(jobmgr_parent(jm) == NULL)) {
5b0a4722
A
9487 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9488 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1
A
9489 }
9490 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
5b0a4722
A
9491 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9492 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1
A
9493 }
9494 if (unlikely(!j->anonymous)) {
5b0a4722
A
9495 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9496 return BOOTSTRAP_NOT_PRIVILEGED;
9497 }
9498
9499 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9500
9501 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9502 if (!job_assumes(j, outdata_obj_array)) {
9503 goto out_bad;
9504 }
9505
9506 *outdataCnt = 20 * 1024 * 1024;
9507 mig_allocate(outdata, *outdataCnt);
9508 if (!job_assumes(j, *outdata != 0)) {
9509 return 1;
9510 }
9511
9512 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9513 if (!ji->anonymous) {
9514 continue;
9515 }
9516 SLIST_FOREACH(ms, &ji->machservices, sle) {
9517 cnt++;
9518 }
9519 }
9520
9521 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
ddbbfbc1 9522 if (!job_assumes(j, ports != NULL)) {
5b0a4722
A
9523 goto out_bad;
9524 }
9525
9526 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9527 if (!ji->anonymous) {
9528 continue;
9529 }
9530
9531 SLIST_FOREACH(ms, &ji->machservices, sle) {
9532 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
dcace88f 9533 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
5b0a4722
A
9534 } else {
9535 goto out_bad;
9536 }
9537
9538 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
dcace88f 9539 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
5b0a4722
A
9540 } else {
9541 goto out_bad;
9542 }
9543
9544 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
dcace88f 9545 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
5b0a4722
A
9546 } else {
9547 goto out_bad;
9548 }
9549
9550 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
dcace88f 9551 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
5b0a4722
A
9552 } else {
9553 goto out_bad;
9554 }
9555
9556 ports[cnt2] = machservice_port(ms);
9557
9558 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
dcace88f 9559 (void)jobmgr_assumes(jm, (errno = launchd_mport_copy_send(ports[cnt2])) == KERN_SUCCESS);
5b0a4722
A
9560 cnt2++;
9561 }
9562 }
9563
dcace88f 9564 (void)job_assumes(j, cnt == cnt2);
5b0a4722 9565
ddbbfbc1 9566 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
5b0a4722
A
9567 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9568 if (!job_assumes(j, packed_size != 0)) {
9569 goto out_bad;
9570 }
9571
9572 launch_data_free(outdata_obj_array);
9573
9574 *portsp = ports;
9575 *ports_cnt = cnt;
9576
9577 *reqport = jm->req_port;
9578 *rcvright = jm->jm_port;
9579
9580 jm->req_port = 0;
9581 jm->jm_port = 0;
9582
9583 workaround_5477111 = j;
9584
9585 jobmgr_shutdown(jm);
9586
9587 return BOOTSTRAP_SUCCESS;
9588
9589out_bad:
9590 if (outdata_obj_array) {
9591 launch_data_free(outdata_obj_array);
9592 }
9593 if (*outdata) {
9594 mig_deallocate(*outdata, *outdataCnt);
9595 }
9596 if (ports) {
9597 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
9598 }
9599
9600 return BOOTSTRAP_NO_MEMORY;
9601}
9602
9603kern_return_t
9604job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
9605{
9606 int bsdepth = 0;
9607 jobmgr_t jmr;
9608
9609 if (!launchd_assumes(j != NULL)) {
9610 return BOOTSTRAP_NO_MEMORY;
9611 }
5c88273d
A
9612 if (j->mgr->shutting_down) {
9613 return BOOTSTRAP_UNKNOWN_SERVICE;
9614 }
5b0a4722
A
9615
9616 jmr = j->mgr;
9617
9618 while ((jmr = jobmgr_parent(jmr)) != NULL) {
9619 bsdepth++;
9620 }
9621
9622 /* Since we use recursion, we need an artificial depth for subsets */
ddbbfbc1 9623 if (unlikely(bsdepth > 100)) {
5b0a4722
A
9624 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
9625 return BOOTSTRAP_NO_MEMORY;
9626 }
9627
ddbbfbc1
A
9628 char name[NAME_MAX];
9629 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
9630
dcace88f 9631 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
ddbbfbc1 9632 if (unlikely(requestorport == MACH_PORT_NULL)) {
5b0a4722
A
9633 return BOOTSTRAP_NOT_PRIVILEGED;
9634 }
9635 return BOOTSTRAP_NO_MEMORY;
9636 }
9637
9638 *subsetportp = jmr->jm_port;
ddbbfbc1
A
9639 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
9640
587e987e
A
9641 /* A job could create multiple subsets, so only add a reference the first time
9642 * it does so we don't have to keep a count.
9643 */
dcace88f 9644 if (j->anonymous && !j->holds_ref) {
587e987e
A
9645 j->holds_ref = true;
9646 runtime_add_ref();
9647 }
9648
ddbbfbc1 9649 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
5b0a4722
A
9650 return BOOTSTRAP_SUCCESS;
9651}
9652
dcace88f
A
9653#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
9654job_t
9655xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
9656{
9657 jobmgr_t where2put = NULL;
9658
9659 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
9660 if (destname) {
9661 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
9662 const char *str = launch_data_get_string(destname);
9663 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
9664 where2put = _s_xpc_system_domain;
9665 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
9666 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
9667 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
9668 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
9669 } else {
9670 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
9671 errno = EINVAL;
9672 }
9673 } else {
9674 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
9675 errno = EINVAL;
9676 }
f36da725 9677
dcace88f
A
9678 if (where2put) {
9679 launch_data_t mi = NULL;
9680 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
9681 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
9682 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
9683 where2put = NULL;
9684 errno = EINVAL;
9685 }
9686 }
9687 }
9688 } else {
9689 where2put = jm;
9690 }
9691
9692 job_t j = NULL;
9693 if (where2put) {
9694 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
9695 j = jobmgr_import2(where2put, pload);
9696 if (j) {
9697 j->xpc_service = true;
9698 if (where2put->xpc_singleton) {
9699 /* If the service was destined for one of the global domains,
9700 * then we have to alias it into our local domain to reserve the
9701 * name.
9702 */
9703 job_t ja = job_new_alias(jm, j);
9704 if (!ja) {
9705 /* If we failed to alias the job because of a conflict over
9706 * the label, then we remove it from the global domain. We
9707 * don't want to risk having imported a malicious job into
9708 * one of the global domains.
9709 */
9710 if (errno != EEXIST) {
9711 job_assumes(j, errno == 0);
9712 } else {
9713 job_log(j, LOG_ERR, "Failed to alias job into: %s", where2put->name);
9714 }
9715
9716 job_remove(j);
9717 } else {
9718 ja->xpc_service = true;
9719 j = ja;
9720 }
9721 }
9722 }
9723 }
9724
9725 return j;
f36da725
A
9726}
9727
9728kern_return_t
dcace88f 9729xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
f36da725 9730{
dcace88f
A
9731 if (unlikely(!pid1_magic)) {
9732 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
9733 return BOOTSTRAP_NOT_PRIVILEGED;
9734 }
5c88273d 9735 if (!j || !MACH_PORT_VALID(reqport)) {
dcace88f
A
9736 return BOOTSTRAP_UNKNOWN_SERVICE;
9737 }
5c88273d
A
9738 if (root_jobmgr->shutting_down) {
9739 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
9740 return BOOTSTRAP_NOT_PRIVILEGED;
9741 }
a6e7a709
A
9742 if (!j->xpc_bootstrapper) {
9743 job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
9744 return BOOTSTRAP_NOT_PRIVILEGED;
9745 }
f36da725 9746
dcace88f
A
9747 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9748 /* All XPC domains are children of the root job manager. What we're creating
9749 * here is really just a skeleton. By creating it, we're adding reqp to our
9750 * port set. It will have two messages on it. The first specifies the
9751 * environment of the originator. This is so we can cache it and hand it to
9752 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9753 * to be bootstrapped in.
9754 */
9755 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
9756 if (job_assumes(j, jm != NULL)) {
9757 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
9758 jm->shortdesc = "private";
9759 kr = BOOTSTRAP_SUCCESS;
f36da725
A
9760 }
9761
dcace88f
A
9762 return kr;
9763}
9764
9765kern_return_t
9766xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
9767{
9768 if (!j) {
9769 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9770 * getting this message long after the requesting process has gone away.
9771 * See <rdar://problem/8593143>.
9772 */
f36da725
A
9773 return BOOTSTRAP_UNKNOWN_SERVICE;
9774 }
9775
dcace88f
A
9776 jobmgr_t jm = j->mgr;
9777 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9778 return BOOTSTRAP_NOT_PRIVILEGED;
9779 }
9780
9781 if (jm->req_asport != MACH_PORT_NULL) {
9782 return BOOTSTRAP_NOT_PRIVILEGED;
9783 }
9784
9785 struct ldcred *ldc = runtime_get_caller_creds();
9786 struct proc_bsdshortinfo proc;
9787 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
9788 if (errno != ESRCH) {
9789 jobmgr_assumes(jm, errno == 0);
9790 }
ddbbfbc1 9791
dcace88f
A
9792 jm->error = errno;
9793 jobmgr_remove(jm);
9794 return BOOTSTRAP_NO_MEMORY;
9795 }
9796
9797 if (!jobmgr_assumes(jm, audit_session_port(ldc->asid, &jm->req_asport) == 0)) {
9798 jm->error = EPERM;
9799 jobmgr_remove(jm);
9800 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
f36da725
A
9801 return BOOTSTRAP_NOT_PRIVILEGED;
9802 }
9803
dcace88f
A
9804 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s[%i]", proc.pbsi_comm, ldc->pid);
9805 strlcpy(jm->owner, proc.pbsi_comm, sizeof(jm->owner));
9806 jm->req_bsport = bsport;
9807 jm->req_excport = excport;
9808 jm->req_rport = rp;
9809 jm->req_ctx = ctx;
9810 jm->req_ctx_sz = ctx_sz;
9811 jm->req_pid = ldc->pid;
9812 jm->req_euid = ldc->euid;
9813 jm->req_egid = ldc->egid;
9814 jm->req_asid = ldc->asid;
9815
9816 return KERN_SUCCESS;
9817}
9818
9819kern_return_t
9820xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9821{
9822 if (!j) {
9823 return BOOTSTRAP_UNKNOWN_SERVICE;
ddbbfbc1
A
9824 }
9825
a6e7a709
A
9826 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
9827 if (!(rootj && rootj->xpc_bootstrapper)) {
9828 job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
9829 return BOOTSTRAP_NOT_PRIVILEGED;
9830 }
9831
dcace88f
A
9832 /* This is just for XPC domains (for now). */
9833 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9834 return BOOTSTRAP_NOT_PRIVILEGED;
9835 }
9836 if (j->mgr->session_initialized) {
9837 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
9838 return BOOTSTRAP_NOT_PRIVILEGED;
9839 }
f36da725 9840
dcace88f
A
9841 size_t offset = 0;
9842 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9843 if (!jobmgr_assumes(j->mgr, services != NULL)) {
f36da725
A
9844 return BOOTSTRAP_NO_MEMORY;
9845 }
9846
dcace88f
A
9847 size_t i = 0;
9848 size_t c = launch_data_array_get_count(services);
9849 for (i = 0; i < c; i++) {
9850 job_t nj = NULL;
9851 launch_data_t ploadi = launch_data_array_get_index(services, i);
9852 if (!(nj = xpc_domain_import_service(j->mgr, ploadi))) {
9853 /* If loading one job fails, just fail the whole thing. At this
9854 * point, xpchelper should receive the failure and then just refuse
9855 * to launch the application, since its XPC services could not be
9856 * fully bootstrapped.
9857 *
9858 * Take care to not reference the job or its manager after this
9859 * point.
9860 */
9861 if (errno == EINVAL) {
9862 jobmgr_log(j->mgr, LOG_ERR, "Service at index is not valid: %lu", i);
9863 } else if (errno == EEXIST) {
9864 /* If we get back EEXIST, we know that the payload was a
9865 * dictionary with a label. But, well, I guess it never hurts to
9866 * check.
9867 */
9868 char *label = "(bogus)";
9869 if (launch_data_get_type(ploadi) == LAUNCH_DATA_DICTIONARY) {
9870 launch_data_t llabel = launch_data_dict_lookup(ploadi, LAUNCH_JOBKEY_LABEL);
9871 if (launch_data_get_type(llabel) == LAUNCH_DATA_STRING) {
9872 label = (char *)launch_data_get_string(llabel);
9873 }
9874 }
9875 jobmgr_log(j->mgr, LOG_ERR, "Service name conflict: %s", label);
9876 }
ddbbfbc1 9877
dcace88f
A
9878 j->mgr->error = errno;
9879 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
9880 jobmgr_remove(j->mgr);
9881 break;
9882 } else {
9883 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service %s", nj->label);
9884 job_dispatch(nj, false);
9885 }
f36da725
A
9886 }
9887
dcace88f
A
9888 kern_return_t result = BOOTSTRAP_NO_MEMORY;
9889 if (i == c) {
9890 j->mgr->session_initialized = true;
9891 (void)jobmgr_assumes(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS) == KERN_SUCCESS);
9892 j->mgr->req_rport = MACH_PORT_NULL;
9893
9894 /* Returning a failure code will destroy the message, whereas returning
9895 * success will not, so we need to clean up here.
9896 */
9897 mig_deallocate(services_buff, services_sz);
9898 result = BOOTSTRAP_SUCCESS;
ddbbfbc1 9899 }
f36da725 9900
dcace88f 9901 return result;
f36da725
A
9902}
9903
5b0a4722 9904kern_return_t
dcace88f 9905xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport, mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid, int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
5b0a4722 9906{
dcace88f
A
9907 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9908 return BOOTSTRAP_UNKNOWN_SERVICE;
5b0a4722 9909 }
dcace88f
A
9910 jobmgr_t jm = j->mgr;
9911 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9912 return BOOTSTRAP_NOT_PRIVILEGED;
ddbbfbc1 9913 }
ddbbfbc1 9914
dcace88f
A
9915 if (jm->req_asport == MACH_PORT_NULL) {
9916 return BOOTSTRAP_NOT_PRIVILEGED;
9917 }
9918
9919 *bsport = jm->req_bsport;
9920 *sbsport = root_jobmgr->jm_port;
9921 *excport = jm->req_excport;
9922 *asport = jm->req_asport;
9923 *uid = jm->req_euid;
9924 *gid = jm->req_egid;
9925 *asid = jm->req_asid;
9926
9927 *ctx = jm->req_ctx;
9928 *ctx_sz = jm->req_ctx_sz;
9929
9930 return KERN_SUCCESS;
5b0a4722
A
9931}
9932
9933kern_return_t
dcace88f 9934xpc_domain_get_service_name(job_t j, event_name_t name)
5b0a4722 9935{
dcace88f 9936 if (!j) {
5b0a4722
A
9937 return BOOTSTRAP_NO_MEMORY;
9938 }
dcace88f
A
9939 if (!j->xpc_service) {
9940 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
9941 return BOOTSTRAP_NOT_PRIVILEGED;
5b0a4722 9942 }
dcace88f
A
9943
9944 struct machservice * ms = SLIST_FIRST(&j->machservices);
9945 if (!ms) {
9946 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name of job with no machservices: %s", j->label);
9947 return BOOTSTRAP_UNKNOWN_SERVICE;
5b0a4722 9948 }
dcace88f
A
9949
9950 (void)strlcpy(name, ms->name, sizeof(event_name_t));
9951 return BOOTSTRAP_SUCCESS;
9952}
5c88273d 9953#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
dcace88f
A
9954
9955kern_return_t
9956xpc_events_get_channel_name(job_t j __attribute__((unused)), event_name_t stream __attribute__((unused)), uint64_t token __attribute__((unused)), event_name_t name __attribute__((unused)))
9957{
9958 return KERN_FAILURE;
9959}
9960
9961kern_return_t
9962xpc_events_get_event_name(job_t j, event_name_t stream, uint64_t token, event_name_t name)
9963{
9964 struct externalevent *event = externalevent_find(stream, token);
9965 if (event && j->event_monitor) {
9966 (void)strcpy(name, event->name);
9967 } else {
9968 event = NULL;
cf0bacfd 9969 }
dcace88f
A
9970
9971 return event ? BOOTSTRAP_SUCCESS : BOOTSTRAP_UNKNOWN_SERVICE;
9972}
ddbbfbc1 9973
dcace88f
A
9974kern_return_t
9975xpc_events_set_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t event, mach_msg_type_number_t eventCnt)
9976{
9977 if (j->anonymous) {
9978 return BOOTSTRAP_NOT_PRIVILEGED;
ededfeb7 9979 }
dcace88f
A
9980
9981 struct externalevent *eei = NULL;
9982 LIST_FOREACH(eei, &j->events, job_le) {
9983 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9984 externalevent_delete(eei);
9985 eventsystem_ping();
9986 break;
9987 }
9988 }
9989
9990 bool success = false;
9991 struct eventsystem *es = eventsystem_find(stream);
9992 if (!es) {
9993 es = eventsystem_new(stream);
9994 (void)job_assumes(j, es != NULL);
9995 }
9996
9997 if (es) {
9998 size_t offset = 0;
9999 launch_data_t unpacked = launch_data_unpack((void *)event, eventCnt, NULL, 0, &offset, 0);
10000 if (unpacked && launch_data_get_type(unpacked) == LAUNCH_DATA_DICTIONARY) {
10001 success = externalevent_new(j, es, key, unpacked);
10002 }
10003 }
10004
10005 if (!success) {
10006 mig_deallocate(event, eventCnt);
10007 }
10008
10009 return KERN_SUCCESS;
ddbbfbc1 10010}
cf0bacfd 10011
ddbbfbc1 10012kern_return_t
dcace88f
A
10013xpc_events_get_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t *event, mach_msg_type_number_t *eventCnt)
10014{
10015 struct externalevent *eei = NULL;
10016 LIST_FOREACH(eei, &j->events, job_le) {
10017 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
10018 /* Big enough. */
10019 *eventCnt = 10 * 1024;
10020 mig_allocate(event, *eventCnt);
10021
10022 size_t sz = launch_data_pack(eei->event, (void *)*event, *eventCnt, NULL, NULL);
10023 if (!job_assumes(j, sz != 0)) {
10024 mig_deallocate(*event, *eventCnt);
10025 return BOOTSTRAP_NO_MEMORY;
10026 }
10027
10028 return BOOTSTRAP_SUCCESS;
10029 }
10030 }
10031
10032 return BOOTSTRAP_UNKNOWN_SERVICE;
10033}
10034
10035struct machservice *
10036xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p)
ddbbfbc1 10037{
dcace88f
A
10038 struct machservice *msi = NULL;
10039 SLIST_FOREACH(msi, &j->machservices, sle) {
10040 if (strcmp(stream, msi->name) == 0) {
10041 break;
10042 }
10043 }
10044
10045 if (!msi) {
10046 mach_port_t sp = MACH_PORT_NULL;
10047 msi = machservice_new(j, stream, &sp, false);
10048 if (job_assumes(j, msi)) {
10049 /* Hack to keep this from being publicly accessible through
10050 * bootstrap_look_up().
10051 */
10052 LIST_REMOVE(msi, name_hash_sle);
10053 msi->event_channel = true;
10054 *p = sp;
10055
f70834ee
A
10056 /* If we call job_dispatch() here before the audit session for the
10057 * job has been set, we'll end up not watching this service. But we
10058 * also have to take care not to watch the port if the job is
10059 * active.
10060 *
10061 * See <rdar://problem/10357855>.
10062 */
10063 if (!j->currently_ignored) {
10064 machservice_watch(j, msi);
10065 }
dcace88f
A
10066 } else {
10067 errno = BOOTSTRAP_NO_MEMORY;
10068 }
10069 } else {
10070 if (!msi->event_channel) {
10071 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10072 msi = NULL;
10073 errno = BOOTSTRAP_NAME_IN_USE;
10074 } else {
10075 *p = msi->port;
10076 }
10077 }
10078
10079 return msi;
10080}
10081
10082kern_return_t
10083xpc_events_channel_check_in(job_t j, event_name_t stream, uint64_t flags __attribute__((unused)), mach_port_t *p)
10084{
10085 struct machservice *ms = xpc_events_find_channel(j, stream, p);
10086 if (ms) {
10087 if (ms->isActive) {
10088 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10089 *p = MACH_PORT_NULL;
10090 errno = BOOTSTRAP_SERVICE_ACTIVE;
10091 } else {
10092 job_checkin(j);
10093 machservice_request_notifications(ms);
10094 errno = BOOTSTRAP_SUCCESS;
10095 }
10096 }
10097
10098 return errno;
10099}
10100
10101kern_return_t
10102xpc_events_channel_look_up(job_t j, event_name_t stream, event_token_t token, uint64_t flags __attribute__((unused)), mach_port_t *p)
10103{
10104 if (!j->event_monitor) {
10105 return BOOTSTRAP_NOT_PRIVILEGED;
10106 }
10107
10108 struct externalevent *ee = externalevent_find(stream, token);
10109 if (!ee) {
10110 return BOOTSTRAP_UNKNOWN_SERVICE;
10111 }
10112
10113 struct machservice *ms = xpc_events_find_channel(ee->job, stream, p);
10114 if (ms) {
10115 errno = BOOTSTRAP_SUCCESS;
10116 }
10117
10118 return errno;
10119}
10120
10121kern_return_t
10122job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
10123{
10124 struct ldcred *ldc = runtime_get_caller_creds();
10125 job_t otherj;
10126
ddbbfbc1 10127 if (!launchd_assumes(j != NULL)) {
5b0a4722
A
10128 return BOOTSTRAP_NO_MEMORY;
10129 }
10130
dcace88f
A
10131 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
10132 return BOOTSTRAP_UNKNOWN_SERVICE;
5b0a4722
A
10133 }
10134
dcace88f
A
10135#if TARGET_OS_EMBEDDED
10136 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
10137#else
10138 bool allow_non_root_kickstart = false;
10139#endif
10140
10141 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
10142 return BOOTSTRAP_NOT_PRIVILEGED;
10143 }
10144
10145#if HAVE_SANDBOX
10146 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10147 return BOOTSTRAP_NOT_PRIVILEGED;
10148 }
10149#endif
10150
10151 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
10152 return BOOTSTRAP_SERVICE_ACTIVE;
10153 }
10154
10155 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
10156 otherj = job_dispatch(otherj, true);
10157
10158 if (!job_assumes(j, otherj && otherj->p)) {
10159 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
10160 otherj->stall_before_exec = false;
10161 return BOOTSTRAP_NO_MEMORY;
10162 }
10163
10164 *out_pid = otherj->p;
10165
5b0a4722
A
10166 return 0;
10167}
10168
10169kern_return_t
dcace88f 10170job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
5b0a4722 10171{
dcace88f 10172 launch_data_t jobdata = NULL;
5b0a4722 10173 size_t data_offset = 0;
ddbbfbc1 10174 struct ldcred *ldc = runtime_get_caller_creds();
5b0a4722 10175 job_t jr;
dcace88f 10176
5b0a4722
A
10177 if (!launchd_assumes(j != NULL)) {
10178 return BOOTSTRAP_NO_MEMORY;
10179 }
10180
f36da725
A
10181 if (unlikely(j->deny_job_creation)) {
10182 return BOOTSTRAP_NOT_PRIVILEGED;
10183 }
10184
ddbbfbc1
A
10185#if HAVE_SANDBOX
10186 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10187 return BOOTSTRAP_NOT_PRIVILEGED;
10188 }
10189#endif
dcace88f 10190
ddbbfbc1 10191 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
5b0a4722
A
10192 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
10193 return VPROC_ERR_TRY_PER_USER;
10194 }
10195
10196 if (!job_assumes(j, indataCnt != 0)) {
10197 return 1;
10198 }
10199
ddbbfbc1 10200 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
dcace88f 10201 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
5b0a4722
A
10202 return 1;
10203 }
10204
ddbbfbc1 10205 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
dcace88f
A
10206 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
10207 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
ddbbfbc1
A
10208 return 1;
10209 }
10210
dcace88f
A
10211 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
10212
10213 launch_data_t label = NULL;
10214 launch_data_t wait4debugger = NULL;
10215 if (!jr) {
5b0a4722
A
10216 switch (errno) {
10217 case EEXIST:
dcace88f
A
10218 /* If EEXIST was returned, we know that there is a label string in
10219 * the dictionary. So we don't need to check the types here; that
10220 * has already been done.
10221 */
10222 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
10223 jr = job_find(NULL, launch_data_get_string(label));
10224 if (job_assumes(j, jr != NULL) && !jr->p) {
10225 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
10226 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
10227 if (launch_data_get_bool(wait4debugger)) {
10228 /* If the job exists, we're going to kick-start it, but
10229 * we need to give the caller the opportunity to start
10230 * it suspended if it so desires. But this will only
10231 * take effect if the job isn't running.
10232 */
10233 jr->wait4debugger_oneshot = true;
10234 }
10235 }
10236 }
10237
10238 *outj = jr;
5b0a4722
A
10239 return BOOTSTRAP_NAME_IN_USE;
10240 default:
10241 return BOOTSTRAP_NO_MEMORY;
10242 }
10243 }
10244
ddbbfbc1
A
10245 if (pid1_magic) {
10246 jr->mach_uid = ldc->uid;
5b0a4722
A
10247 }
10248
ddbbfbc1 10249 jr->legacy_LS_job = true;
5b0a4722 10250 jr->abandon_pg = true;
dcace88f 10251 jr->asport = asport;
ddbbfbc1 10252 uuid_clear(jr->expected_audit_uuid);
5b0a4722
A
10253 jr = job_dispatch(jr, true);
10254
10255 if (!job_assumes(j, jr != NULL)) {
fe044cc9
A
10256 job_remove(jr);
10257 return BOOTSTRAP_NO_MEMORY;
10258 }
5b0a4722 10259
dcace88f 10260 if (!job_assumes(jr, jr->p)) {
5b0a4722
A
10261 job_remove(jr);
10262 return BOOTSTRAP_NO_MEMORY;
10263 }
10264
fe044cc9 10265 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
dcace88f 10266 *outj = jr;
5b0a4722 10267
dcace88f
A
10268 return BOOTSTRAP_SUCCESS;
10269}
10270
10271kern_return_t
10272job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
10273{
10274 job_t nj = NULL;
10275 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
10276 if (likely(kr == KERN_SUCCESS)) {
10277 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
10278 job_remove(nj);
10279 kr = BOOTSTRAP_NO_MEMORY;
10280 } else {
10281 /* Do not return until the job has called exec(3), thereby making it
10282 * safe for the caller to send it SIGCONT.
10283 *
10284 * <rdar://problem/9042798>
10285 */
10286 nj->spawn_reply_port = rp;
10287 kr = MIG_NO_REPLY;
10288 }
10289 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
10290 bool was_running = nj->p;
10291 if (job_dispatch(nj, true)) {
10292 if (!was_running) {
10293 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
10294
10295 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
10296 nj->spawn_reply_port = rp;
10297 kr = MIG_NO_REPLY;
10298 } else {
10299 kr = BOOTSTRAP_NO_MEMORY;
10300 }
10301 } else {
10302 *obsvr_port = MACH_PORT_NULL;
10303 *child_pid = nj->p;
10304 kr = KERN_SUCCESS;
10305 }
10306 } else {
10307 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
10308 kr = BOOTSTRAP_UNKNOWN_SERVICE;
10309 }
10310 }
5b0a4722
A
10311
10312 mig_deallocate(indata, indataCnt);
dcace88f
A
10313 return kr;
10314}
10315
10316kern_return_t
10317job_mig_event_source_check_in(job_t j, name_t name, mach_port_t ping_port, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt, uint64_t *tokens)
10318{
10319 if (!j || !j->event_monitor) {
10320 return BOOTSTRAP_NOT_PRIVILEGED;
10321 }
10322
10323 /* Update our ping-port. One ping will force all the notification systems
10324 * to check in, so they'll all give us send-once rights. It doesn't really
10325 * matter which one we keep around. It's not the most efficient thing ever,
10326 * but keep in mind that, by doing this over one channel, we can do it over
10327 * the job's MachService. This means that we'll get it back when the job dies,
10328 * and we can create ourselves a send-once right if we didn't have one already,
10329 * and we can just keep the helper alive without it needing to bootstrap
10330 * communication.
10331 *
10332 * So we're trading efficiency for robustness. In this case, the checkins
10333 * should happen pretty infrequently, so it's pretty worth it.
10334 */
10335 if (_s_event_update_port != MACH_PORT_NULL) {
10336 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
10337 }
10338 _s_event_update_port = ping_port;
10339
10340 kern_return_t result = BOOTSTRAP_NO_MEMORY;
10341 launch_data_t arr = launch_data_alloc(LAUNCH_DATA_ARRAY);
10342 if (job_assumes(j, arr != NULL)) {
10343 struct eventsystem *es = eventsystem_find(name);
10344 if (unlikely(es == NULL)) {
10345 es = eventsystem_new(name);
10346 }
10347
10348 if (job_assumes(j, es != NULL)) {
10349 struct externalevent *ei = NULL;
10350 size_t i = 0;
10351 LIST_FOREACH(ei, &es->events, sys_le) {
10352 (void)job_assumes(j, launch_data_array_set_index(arr, ei->event, i));
10353 if (job_assumes(j, i < 1024)) {
10354 tokens[i] = ei->id;
10355 } else {
10356 break;
10357 }
10358 i++;
10359 }
10360
10361 /* Big enough. */
10362 *outvalCnt = 10 * 1024;
10363 mig_allocate(outval, *outvalCnt);
10364
10365 size_t sz = launch_data_pack(arr, (void *)*outval, *outvalCnt, NULL, NULL);
10366 if (job_assumes(j, sz != 0)) {
10367 result = BOOTSTRAP_SUCCESS;
10368 } else {
10369 mig_deallocate(*outval, *outvalCnt);
10370 }
10371 }
10372
10373 /* Total hack, but launch_data doesn't do ref-counting. */
10374 struct _launch_data *hack = (struct _launch_data *)arr;
10375 free(hack->_array);
10376 free(arr);
10377 }
10378
10379 return result;
10380}
5b0a4722 10381
dcace88f
A
10382kern_return_t
10383job_mig_event_set_state(job_t j, name_t name, uint64_t token, boolean_t state)
10384{
5c88273d 10385 if (!j || !j->event_monitor) {
dcace88f
A
10386 return BOOTSTRAP_NOT_PRIVILEGED;
10387 }
10388
10389 struct externalevent *ei = externalevent_find(name, token);
10390 if (job_assumes(j, ei != NULL)) {
10391 ei->state = state;
10392 if(job_dispatch(ei->job, false) == NULL) {
10393 if (errno == EPERM) {
10394 return BOOTSTRAP_NOT_PRIVILEGED;
10395 }
10396 return BOOTSTRAP_NO_MEMORY;
10397 }
10398 } else {
10399 return BOOTSTRAP_NO_MEMORY;
10400 }
10401
5b0a4722
A
10402 return BOOTSTRAP_SUCCESS;
10403}
10404
10405void
10406jobmgr_init(bool sflag)
10407{
ddbbfbc1
A
10408 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
10409 SLIST_INIT(&s_curious_jobs);
10410 LIST_INIT(&s_needing_sessions);
10411
10412 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
dcace88f
A
10413#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
10414 launchd_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
10415 _s_xpc_system_domain->req_asid = g_audit_session;
10416 _s_xpc_system_domain->req_asport = g_audit_session_port;
10417 _s_xpc_system_domain->shortdesc = "system";
5c88273d 10418#endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
dcace88f
A
10419 if (pid1_magic) {
10420 root_jobmgr->monitor_shutdown = true;
10421 }
10422
ddbbfbc1
A
10423 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
10424 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
dcace88f
A
10425 if (likely(s_no_hang_fd == -1)) {
10426 if (jobmgr_assumes(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK)) != -1)) {
10427 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr) != -1);
ddbbfbc1
A
10428 }
10429 }
10430 s_no_hang_fd = _fd(s_no_hang_fd);
5b0a4722
A
10431}
10432
10433size_t
10434our_strhash(const char *s)
10435{
10436 size_t c, r = 5381;
10437
10438 /* djb2
10439 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10440 */
10441
10442 while ((c = *s++)) {
10443 r = ((r << 5) + r) + c; /* hash*33 + c */
10444 }
10445
10446 return r;
10447}
10448
10449size_t
10450hash_label(const char *label)
10451{
10452 return our_strhash(label) % LABEL_HASH_SIZE;
10453}
10454
10455size_t
10456hash_ms(const char *msstr)
10457{
10458 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
10459}
10460
10461bool
ddbbfbc1 10462waiting4removal_new(job_t j, mach_port_t rp)
5b0a4722 10463{
ddbbfbc1 10464 struct waiting_for_removal *w4r;
5b0a4722 10465
ddbbfbc1 10466 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
5b0a4722
A
10467 return false;
10468 }
10469
ddbbfbc1 10470 w4r->reply_port = rp;
5b0a4722 10471
ddbbfbc1 10472 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
5b0a4722
A
10473
10474 return true;
10475}
10476
10477void
ddbbfbc1 10478waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
5b0a4722 10479{
dcace88f 10480 (void)job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
5b0a4722 10481
ddbbfbc1 10482 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
5b0a4722 10483
ddbbfbc1 10484 free(w4r);
5b0a4722
A
10485}
10486
f36da725
A
10487size_t
10488get_kern_max_proc(void)
10489{
10490 int mib[] = { CTL_KERN, KERN_MAXPROC };
10491 int max = 100;
10492 size_t max_sz = sizeof(max);
ddbbfbc1 10493
dcace88f 10494 (void)launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
ddbbfbc1 10495
f36da725
A
10496 return max;
10497}
10498
ddbbfbc1 10499/* See rdar://problem/6271234 */
5b0a4722 10500void
ddbbfbc1 10501eliminate_double_reboot(void)
5b0a4722 10502{
dcace88f 10503 if (unlikely(!pid1_magic)) {
ddbbfbc1
A
10504 return;
10505 }
10506
f36da725 10507 struct stat sb;
ddbbfbc1
A
10508 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
10509 char *try_again = "Will try again at next boot.";
10510 int result = ~0;
10511
dcace88f 10512 if (unlikely(stat(argv[1], &sb) != -1)) {
ddbbfbc1
A
10513 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
10514
10515 int wstatus;
10516 pid_t p;
10517
dcace88f 10518 (void)jobmgr_assumes(root_jobmgr, (errno = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ)) == 0);
ddbbfbc1
A
10519
10520 if (errno) {
10521 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script! %s", try_again);
10522 goto out;
10523 }
10524
dcace88f 10525 if (!jobmgr_assumes(root_jobmgr, waitpid(p, &wstatus, 0) != -1)) {
ddbbfbc1
A
10526 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't confirm that deferred install script exited successfully! %s", try_again);
10527 goto out;
10528 }
10529
dcace88f
A
10530 if (jobmgr_assumes(root_jobmgr, WIFEXITED(wstatus) != 0)) {
10531 if (jobmgr_assumes(root_jobmgr, (result = WEXITSTATUS(wstatus)) == EXIT_SUCCESS)) {
ddbbfbc1
A
10532 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
10533 } else {
10534 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus), try_again);
10535 }
10536 } else {
10537 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again);
10538 }
10539 }
10540out:
dcace88f 10541 if (result == 0) {
ddbbfbc1
A
10542 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
10543 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
10544 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
10545 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
10546 */
dcace88f 10547 if (!jobmgr_assumes(root_jobmgr, unlink(argv[1]) != -1)) {
ddbbfbc1
A
10548 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script couldn't be removed!");
10549 }
10550 }
10551}
10552
587e987e
A
10553void
10554jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
10555{
10556 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
dcace88f 10557 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
587e987e
A
10558 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
10559 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
dcace88f 10560 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
587e987e
A
10561 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
10562 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
dcace88f 10563 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
587e987e
A
10564 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10565 * You can't set this in a plist.
10566 */
dcace88f 10567 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
587e987e
A
10568 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10569 * complain about it.
10570 */
10571 } else {
10572 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
ddbbfbc1
A
10573 }
10574
dcace88f 10575 if (unlikely(!j->jetsam_properties)) {
587e987e
A
10576 j->jetsam_properties = true;
10577 LIST_INSERT_HEAD(&j->mgr->jetsam_jobs, j, jetsam_sle);
10578 j->mgr->jetsam_jobs_cnt++;
10579 }
dcace88f
A
10580
10581 j->jetsam_seq = s_jetsam_sequence_id++;
ddbbfbc1 10582}
5b0a4722 10583
ddbbfbc1
A
10584int
10585launchd_set_jetsam_priorities(launch_data_t priorities)
10586{
dcace88f 10587 if (!launchd_assumes(launch_data_get_type(priorities) == LAUNCH_DATA_ARRAY)) {
ddbbfbc1
A
10588 return EINVAL;
10589 }
587e987e 10590
ddbbfbc1
A
10591 jobmgr_t jm = NULL;
10592#if !TARGET_OS_EMBEDDED
10593 /* For testing. */
10594 jm = jobmgr_find_by_name(root_jobmgr, VPROCMGR_SESSION_AQUA);
dcace88f 10595 if (!launchd_assumes(jm != NULL)) {
ddbbfbc1
A
10596 return EINVAL;
10597 }
10598#else
10599 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
10600 jm = root_jobmgr;
10601
dcace88f 10602 if (!g_embedded_privileged_action) {
ddbbfbc1 10603 return EPERM;
f36da725 10604 }
ddbbfbc1
A
10605#endif
10606
10607 size_t npris = launch_data_array_get_count(priorities);
10608
10609 job_t ji = NULL;
10610 size_t i = 0;
dcace88f 10611 for (i = 0; i < npris; i++) {
ddbbfbc1 10612 launch_data_t ldi = launch_data_array_get_index(priorities, i);
dcace88f 10613 if (!launchd_assumes(launch_data_get_type(ldi) == LAUNCH_DATA_DICTIONARY)) {
ddbbfbc1
A
10614 continue;
10615 }
10616
10617 launch_data_t label = NULL;
dcace88f 10618 if (!launchd_assumes(label = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL))) {
ddbbfbc1
A
10619 continue;
10620 }
10621 const char *_label = launch_data_get_string(label);
10622
dcace88f
A
10623 ji = job_find(NULL, _label);
10624 if (!launchd_assumes(ji != NULL)) {
ddbbfbc1
A
10625 continue;
10626 }
587e987e
A
10627
10628 launch_data_dict_iterate(ldi, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, ji);
10629
ddbbfbc1 10630 launch_data_t frontmost = NULL;
dcace88f 10631 if ((frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) && launch_data_get_type(frontmost) == LAUNCH_DATA_BOOL) {
587e987e 10632 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
ddbbfbc1 10633 }
ddbbfbc1
A
10634 }
10635
10636 i = 0;
10637 job_t *jobs = (job_t *)calloc(jm->jetsam_jobs_cnt, sizeof(job_t));
dcace88f
A
10638 if (launchd_assumes(jobs != NULL)) {
10639 LIST_FOREACH(ji, &jm->jetsam_jobs, jetsam_sle) {
10640 if (ji->p) {
587e987e
A
10641 jobs[i] = ji;
10642 i++;
10643 }
ddbbfbc1
A
10644 }
10645 }
587e987e 10646
ddbbfbc1
A
10647 size_t totalpris = i;
10648
10649 int result = EINVAL;
587e987e
A
10650
10651 /* It is conceivable that there could be no Jetsam jobs running. */
dcace88f 10652 if (totalpris > 0) {
587e987e
A
10653 /* Yay blocks! */
10654 qsort_b((void *)jobs, totalpris, sizeof(job_t), ^ int (const void *lhs, const void *rhs) {
10655 job_t _lhs = *(job_t *)lhs;
10656 job_t _rhs = *(job_t *)rhs;
10657 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
dcace88f 10658 if (_lhs->jetsam_priority > _rhs->jetsam_priority) {
587e987e 10659 return -1;
dcace88f 10660 } else if (_lhs->jetsam_priority < _rhs->jetsam_priority) {
587e987e
A
10661 return 1;
10662 }
dcace88f
A
10663 /* Priority is equal, so sort by sequence ID to maintain LRU order */
10664 if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) > 0 ) {
10665 return 1;
10666 } else if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) < 0 ) {
10667 return -1;
10668 }
587e987e
A
10669
10670 return 0;
10671 });
ddbbfbc1
A
10672
10673 jetsam_priority_entry_t *jpris = (jetsam_priority_entry_t *)calloc(totalpris, sizeof(jetsam_priority_entry_t));
dcace88f 10674 if (!launchd_assumes(jpris != NULL)) {
ddbbfbc1
A
10675 result = ENOMEM;
10676 } else {
dcace88f 10677 for (i = 0; i < totalpris; i++) {
587e987e
A
10678 jpris[i].pid = jobs[i]->p; /* Subject to time-of-use vs. time-of-check, obviously. */
10679 jpris[i].flags |= jobs[i]->jetsam_frontmost ? kJetsamFlagsFrontmost : 0;
10680 jpris[i].hiwat_pages = jobs[i]->jetsam_memlimit;
ddbbfbc1
A
10681 }
10682
dcace88f 10683 (void)launchd_assumes((result = sysctlbyname("kern.memorystatus_priority_list", NULL, NULL, &jpris[0], totalpris * sizeof(jetsam_priority_entry_t))) != -1);
587e987e 10684 result = result != 0 ? errno : 0;
ddbbfbc1
A
10685
10686 free(jpris);
10687 }
10688 }
587e987e 10689
dcace88f 10690 if (jobs) {
587e987e
A
10691 free(jobs);
10692 }
ddbbfbc1
A
10693
10694 return result;
5b0a4722 10695}