]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
launchd-392.38.tar.gz
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 25693 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23 #include "launch_internal.h"
24 #include "launchd_helper.h"
25
26 #include <TargetConditionals.h>
27 #include <mach/mach.h>
28 #include <mach/mach_error.h>
29 #include <mach/boolean.h>
30 #include <mach/message.h>
31 #include <mach/notify.h>
32 #include <mach/mig_errors.h>
33 #include <mach/mach_traps.h>
34 #include <mach/mach_interface.h>
35 #include <mach/host_info.h>
36 #include <mach/mach_host.h>
37 #include <mach/exception.h>
38 #include <mach/host_reboot.h>
39 #include <sys/types.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/stat.h>
43 #include <sys/ucred.h>
44 #include <sys/fcntl.h>
45 #include <sys/un.h>
46 #include <sys/reboot.h>
47 #include <sys/wait.h>
48 #include <sys/sysctl.h>
49 #include <sys/sockio.h>
50 #include <sys/time.h>
51 #include <sys/resource.h>
52 #include <sys/ioctl.h>
53 #include <sys/mount.h>
54 #include <sys/pipe.h>
55 #include <sys/mman.h>
56 #include <sys/socket.h>
57 #include <sys/syscall.h>
58 #include <net/if.h>
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet6/nd6.h>
62 #include <bsm/libbsm.h>
63 #include <unistd.h>
64 #include <signal.h>
65 #include <errno.h>
66 #include <libgen.h>
67 #include <stdio.h>
68 #include <stdlib.h>
69 #include <stdarg.h>
70 #include <stdbool.h>
71 #include <paths.h>
72 #include <pwd.h>
73 #include <grp.h>
74 #include <ttyent.h>
75 #include <dlfcn.h>
76 #include <dirent.h>
77 #include <string.h>
78 #include <ctype.h>
79 #include <glob.h>
80 #include <System/sys/spawn.h>
81 #include <spawn.h>
82 #include <time.h>
83 #include <libinfo.h>
84
85 #include <libproc.h>
86 #include <malloc/malloc.h>
87 #include <pthread.h>
88 #include <libproc.h>
89 #if HAVE_SANDBOX
90 #define __APPLE_API_PRIVATE
91 #include <sandbox.h>
92 #endif
93 #if HAVE_QUARANTINE
94 #include <quarantine.h>
95 #endif
96 #if TARGET_OS_EMBEDDED
97 #include <sys/kern_memorystatus.h>
98 #else
99 extern int gL1CacheEnabled;
100 /* To make my life easier. */
101 typedef struct jetsam_priority_entry {
102 pid_t pid;
103 uint32_t priority;
104 uint32_t flags;
105 int32_t hiwat_pages;
106 int32_t hiwat_reserved1;
107 int32_t hiwat_reserved2;
108 int32_t hiwat_reserved3;
109 } jetsam_priority_entry_t;
110
111 enum {
112 kJetsamFlagsFrontmost = (1 << 0),
113 kJetsamFlagsKilled = (1 << 1)
114 };
115 #endif
116
117 #include "launch.h"
118 #include "launch_priv.h"
119 #include "launch_internal.h"
120 #include "bootstrap.h"
121 #include "bootstrap_priv.h"
122 #include "vproc.h"
123 #include "vproc_internal.h"
124
125 #include "reboot2.h"
126
127 #include "launchd.h"
128 #include "launchd_runtime.h"
129 #include "launchd_unix_ipc.h"
130 #include "protocol_vproc.h"
131 #include "protocol_vprocServer.h"
132 #include "protocol_job_reply.h"
133 #include "protocol_job_forward.h"
134 #include "mach_excServer.h"
135 #if !TARGET_OS_EMBEDDED
136 #include "domainServer.h"
137 #include "init.h"
138 #endif /* !TARGET_OS_EMBEDDED */
139 #include "eventsServer.h"
140
141 #ifndef POSIX_SPAWN_OSX_TALAPP_START
142 #define POSIX_SPAWN_OSX_TALAPP_START 0x0400
143 #endif
144
145 #ifndef POSIX_SPAWN_OSX_WIDGET_START
146 #define POSIX_SPAWN_OSX_WIDGET_START 0x0800
147 #endif
148
149 #ifndef POSIX_SPAWN_IOS_APP_START
150 #define POSIX_SPAWN_IOS_APP_START 0x1000
151 #endif
152
153 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
154 * If the job hasn't exited in the given number of seconds after sending
155 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
156 */
157 #define LAUNCHD_MIN_JOB_RUN_TIME 10
158 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
159 #define LAUNCHD_SIGKILL_TIMER 2
160 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
161
162 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
163
164 #define TAKE_SUBSET_NAME "TakeSubsetName"
165 #define TAKE_SUBSET_PID "TakeSubsetPID"
166 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
167
168 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
169
170 extern char **environ;
171
172 struct waiting_for_removal {
173 SLIST_ENTRY(waiting_for_removal) sle;
174 mach_port_t reply_port;
175 };
176
177 static bool waiting4removal_new(job_t j, mach_port_t rp);
178 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
179
180 struct machservice {
181 SLIST_ENTRY(machservice) sle;
182 SLIST_ENTRY(machservice) special_port_sle;
183 LIST_ENTRY(machservice) name_hash_sle;
184 LIST_ENTRY(machservice) port_hash_sle;
185 struct machservice *alias;
186 job_t job;
187 unsigned int gen_num;
188 mach_port_name_t port;
189 unsigned int
190 isActive :1,
191 reset :1,
192 recv :1,
193 hide :1,
194 kUNCServer :1,
195 per_user_hack :1,
196 debug_on_close :1,
197 per_pid :1,
198 delete_on_destruction :1,
199 drain_one_on_crash :1,
200 drain_all_on_crash :1,
201 event_update_port :1, /* The job which owns this port is the event monitor. */
202 upfront :1, /* This service was declared in the plist. */
203 event_channel :1, /* The job is to receive events on this channel. */
204 /* Don't let the size of this field to get too small. It has to be large enough
205 * to represent the reasonable range of special port numbers.
206 */
207 special_port_num :18;
208 const char name[0];
209 };
210
211 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
212
213 #define PORT_HASH_SIZE 32
214 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
215
216 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
217
218 static void machservice_setup(launch_data_t obj, const char *key, void *context);
219 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
220 static void machservice_resetport(job_t j, struct machservice *ms);
221 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
222 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
223 static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
224 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
225 static void machservice_ignore(job_t j, struct machservice *ms);
226 static void machservice_watch(job_t j, struct machservice *ms);
227 static void machservice_delete(job_t j, struct machservice *, bool port_died);
228 static void machservice_request_notifications(struct machservice *);
229 static mach_port_t machservice_port(struct machservice *);
230 static job_t machservice_job(struct machservice *);
231 static bool machservice_hidden(struct machservice *);
232 static bool machservice_active(struct machservice *);
233 static const char *machservice_name(struct machservice *);
234 static bootstrap_status_t machservice_status(struct machservice *);
235 void machservice_drain_port(struct machservice *);
236 static struct machservice *xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p);
237
238 struct socketgroup {
239 SLIST_ENTRY(socketgroup) sle;
240 int *fds;
241 unsigned int junkfds:1, fd_cnt:31;
242 union {
243 const char name[0];
244 char name_init[0];
245 };
246 };
247
248 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds);
249 static void socketgroup_delete(job_t j, struct socketgroup *sg);
250 static void socketgroup_watch(job_t j, struct socketgroup *sg);
251 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
252 static void socketgroup_callback(job_t j);
253 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
254 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
255
256 struct calendarinterval {
257 LIST_ENTRY(calendarinterval) global_sle;
258 SLIST_ENTRY(calendarinterval) sle;
259 job_t job;
260 struct tm when;
261 time_t when_next;
262 };
263
264 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
265
266 static bool calendarinterval_new(job_t j, struct tm *w);
267 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
268 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
269 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
270 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
271 static void calendarinterval_callback(void);
272 static void calendarinterval_sanity_check(void);
273
274 struct envitem {
275 SLIST_ENTRY(envitem) sle;
276 bool one_shot;
277 char *value;
278 union {
279 const char key[0];
280 char key_init[0];
281 };
282 };
283
284 static bool envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot);
285 static void envitem_delete(job_t j, struct envitem *ei, bool global);
286 static void envitem_setup(launch_data_t obj, const char *key, void *context);
287 static void envitem_setup_one_shot(launch_data_t obj, const char *key, void *context);
288
289 struct limititem {
290 SLIST_ENTRY(limititem) sle;
291 struct rlimit lim;
292 unsigned int setsoft:1, sethard:1, which:30;
293 };
294
295 static bool limititem_update(job_t j, int w, rlim_t r);
296 static void limititem_delete(job_t j, struct limititem *li);
297 static void limititem_setup(launch_data_t obj, const char *key, void *context);
298 #if HAVE_SANDBOX
299 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
300 #endif
301
302 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
303
304 typedef enum {
305 NETWORK_UP = 1,
306 NETWORK_DOWN,
307 SUCCESSFUL_EXIT,
308 FAILED_EXIT,
309 CRASHED,
310 DID_NOT_CRASH,
311 PATH_EXISTS,
312 PATH_MISSING,
313 OTHER_JOB_ENABLED,
314 OTHER_JOB_DISABLED,
315 OTHER_JOB_ACTIVE,
316 OTHER_JOB_INACTIVE,
317 PATH_CHANGES,
318 DIR_NOT_EMPTY,
319 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
320 } semaphore_reason_t;
321
322 struct semaphoreitem {
323 SLIST_ENTRY(semaphoreitem) sle;
324 semaphore_reason_t why;
325 bool watching_parent;
326 int fd;
327
328 union {
329 const char what[0];
330 char what_init[0];
331 };
332 };
333
334 struct semaphoreitem_dict_iter_context {
335 job_t j;
336 semaphore_reason_t why_true;
337 semaphore_reason_t why_false;
338 };
339
340 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
341 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
342 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
343 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
344 static void semaphoreitem_callback(job_t j, struct kevent *kev);
345 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
346 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
347 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
348
349 struct externalevent {
350 LIST_ENTRY(externalevent) sys_le;
351 LIST_ENTRY(externalevent) job_le;
352 struct eventsystem *sys;
353
354 uint64_t id;
355 job_t job;
356 bool state;
357 bool wanted_state;
358 launch_data_t event;
359
360 char name[0];
361 };
362
363 struct externalevent_iter_ctx {
364 job_t j;
365 struct eventsystem *sys;
366 };
367
368 static bool externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event);
369 static void externalevent_delete(struct externalevent *ee);
370 static void externalevent_setup(launch_data_t obj, const char *key, void *context);
371 static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
372
373 struct eventsystem {
374 LIST_ENTRY(eventsystem) global_le;
375 LIST_HEAD(, externalevent) events;
376 uint64_t curid;
377 bool has_updates;
378 char name[0];
379 };
380
381 static struct eventsystem *eventsystem_new(const char *name);
382 static void eventsystem_delete(struct eventsystem *sys);
383 static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
384 static struct eventsystem *eventsystem_find(const char *name);
385 static void eventsystem_ping(void);
386
387 #define ACTIVE_JOB_HASH_SIZE 32
388 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
389
390 #define MACHSERVICE_HASH_SIZE 37
391
392 #define LABEL_HASH_SIZE 53
393 struct jobmgr_s {
394 kq_callback kqjobmgr_callback;
395 LIST_ENTRY(jobmgr_s) xpc_le;
396 SLIST_ENTRY(jobmgr_s) sle;
397 SLIST_HEAD(, jobmgr_s) submgrs;
398 LIST_HEAD(, job_s) jobs;
399 LIST_HEAD(, job_s) jetsam_jobs;
400
401 /* For legacy reasons, we keep all job labels that are imported in the
402 * root job manager's label hash. If a job manager is an XPC domain, then
403 * it gets its own label hash that is separate from the "global" one
404 * stored in the root job manager.
405 */
406 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
407 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
408 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
409 LIST_HEAD(, job_s) global_env_jobs;
410 mach_port_t jm_port;
411 mach_port_t req_port;
412 jobmgr_t parentmgr;
413 int reboot_flags;
414 time_t shutdown_time;
415 unsigned int global_on_demand_cnt;
416 unsigned int normal_active_cnt;
417 unsigned int jetsam_jobs_cnt;
418 unsigned int
419 shutting_down :1,
420 session_initialized :1,
421 killed_stray_jobs :1,
422 monitor_shutdown :1,
423 shutdown_jobs_dirtied :1,
424 shutdown_jobs_cleaned :1,
425 xpc_singleton :1;
426 uint32_t properties;
427 /* XPC-specific properties. */
428 char owner[MAXCOMLEN];
429 char *shortdesc;
430 mach_port_t req_bsport;
431 mach_port_t req_excport;
432 mach_port_t req_asport;
433 pid_t req_pid;
434 uid_t req_euid;
435 gid_t req_egid;
436 au_asid_t req_asid;
437 vm_offset_t req_ctx;
438 mach_msg_type_number_t req_ctx_sz;
439 mach_port_t req_rport;
440 kern_return_t error;
441 union {
442 const char name[0];
443 char name_init[0];
444 };
445 };
446
447 /* Global XPC domains. */
448 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
449 static jobmgr_t _s_xpc_system_domain;
450 static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
451 static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
452 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
453
454 #define jobmgr_assumes(jm, e) \
455 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
456
457 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
458 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
459 static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
460 static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
461 static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
462 static job_t xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
463 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
464 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
465 static jobmgr_t jobmgr_parent(jobmgr_t jm);
466 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
467 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
468 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
469 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
470 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
471 static void jobmgr_remove(jobmgr_t jm);
472 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
473 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
474 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
475 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
476 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
477 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
478 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
479 static void job_export_all2(jobmgr_t jm, launch_data_t where);
480 static void jobmgr_callback(void *obj, struct kevent *kev);
481 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
482 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
483 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
484 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
485 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
486 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
487 static void jobmgr_log_bug(jobmgr_t jm, unsigned int line);
488
489 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
490 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
491 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
492
493 struct suspended_peruser {
494 LIST_ENTRY(suspended_peruser) sle;
495 job_t j;
496 };
497
498 struct job_s {
499 kq_callback kqjob_callback; /* MUST be first element of this structure for benefit of launchd's run loop. */
500 LIST_ENTRY(job_s) sle;
501 LIST_ENTRY(job_s) subjob_sle;
502 LIST_ENTRY(job_s) needing_session_sle;
503 LIST_ENTRY(job_s) jetsam_sle;
504 LIST_ENTRY(job_s) pid_hash_sle;
505 LIST_ENTRY(job_s) label_hash_sle;
506 LIST_ENTRY(job_s) global_env_sle;
507 SLIST_ENTRY(job_s) curious_jobs_sle;
508 LIST_HEAD(, suspended_peruser) suspended_perusers;
509 LIST_HEAD(, waiting_for_exit) exit_watchers;
510 LIST_HEAD(, job_s) subjobs;
511 LIST_HEAD(, externalevent) events;
512 SLIST_HEAD(, socketgroup) sockets;
513 SLIST_HEAD(, calendarinterval) cal_intervals;
514 SLIST_HEAD(, envitem) global_env;
515 SLIST_HEAD(, envitem) env;
516 SLIST_HEAD(, limititem) limits;
517 SLIST_HEAD(, machservice) machservices;
518 SLIST_HEAD(, semaphoreitem) semaphores;
519 SLIST_HEAD(, waiting_for_removal) removal_watchers;
520 job_t alias;
521 struct rusage ru;
522 cpu_type_t *j_binpref;
523 size_t j_binpref_cnt;
524 mach_port_t j_port;
525 mach_port_t exit_status_dest;
526 mach_port_t exit_status_port;
527 mach_port_t spawn_reply_port;
528 uid_t mach_uid;
529 jobmgr_t mgr;
530 size_t argc;
531 char **argv;
532 char *prog;
533 char *rootdir;
534 char *workingdir;
535 char *username;
536 char *groupname;
537 char *stdinpath;
538 char *stdoutpath;
539 char *stderrpath;
540 char *alt_exc_handler;
541 struct vproc_shmem_s *shmem;
542 struct machservice *lastlookup;
543 unsigned int lastlookup_gennum;
544 #if HAVE_SANDBOX
545 char *seatbelt_profile;
546 uint64_t seatbelt_flags;
547 #endif
548 #if HAVE_QUARANTINE
549 void *quarantine_data;
550 size_t quarantine_data_sz;
551 #endif
552 pid_t p;
553 int last_exit_status;
554 int stdin_fd;
555 int fork_fd;
556 int log_redirect_fd;
557 int nice;
558 int stdout_err_fd;
559 uint32_t pstype;
560 int32_t jetsam_priority;
561 int32_t jetsam_memlimit;
562 int32_t jetsam_seq;
563 int32_t main_thread_priority;
564 uint32_t timeout;
565 uint32_t exit_timeout;
566 uint64_t sent_signal_time;
567 uint64_t start_time;
568 uint32_t min_run_time;
569 uint32_t start_interval;
570 uint32_t peruser_suspend_count; /* The number of jobs that have disabled this per-user launchd. */
571 uuid_t instance_id;
572 uint32_t fail_cnt;
573 #if 0
574 /* someday ... */
575 enum {
576 J_TYPE_ANONYMOUS = 1,
577 J_TYPE_LANCHSERVICES,
578 J_TYPE_MACHINIT,
579 J_TYPE_INETD,
580 } j_type;
581 #endif
582 bool
583 debug :1, /* man launchd.plist --> Debug */
584 ondemand :1, /* man launchd.plist --> KeepAlive == false */
585 session_create :1, /* man launchd.plist --> SessionCreate */
586 low_pri_io :1, /* man launchd.plist --> LowPriorityIO */
587 no_init_groups :1, /* man launchd.plist --> InitGroups */
588 priv_port_has_senders :1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
589 importing_global_env :1, /* a hack during job importing */
590 importing_hard_limits :1, /* a hack during job importing */
591 setmask :1, /* man launchd.plist --> Umask */
592 anonymous :1, /* a process that launchd knows about, but isn't managed by launchd */
593 checkedin :1, /* a legacy mach_init concept to detect sick jobs */
594 legacy_mach_job :1, /* a job created via bootstrap_create_server() */
595 legacy_LS_job :1, /* a job created via spawn_via_launchd() */
596 inetcompat :1, /* a legacy job that wants inetd compatible semantics */
597 inetcompat_wait :1, /* a twist on inetd compatibility */
598 start_pending :1, /* an event fired and the job should start, but not necessarily right away */
599 globargv :1, /* man launchd.plist --> EnableGlobbing */
600 wait4debugger :1, /* man launchd.plist --> WaitForDebugger */
601 wait4debugger_oneshot :1, /* One-shot WaitForDebugger. */
602 internal_exc_handler :1, /* MachExceptionHandler == true */
603 stall_before_exec :1, /* a hack to support an option of spawn_via_launchd() */
604 only_once :1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
605 currently_ignored :1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
606 forced_peers_to_demand_mode :1, /* A job that forced all other jobs to be temporarily launch-on-demand */
607 setnice :1, /* man launchd.plist --> Nice */
608 removal_pending :1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
609 sent_sigkill :1, /* job_kill() was called */
610 debug_before_kill :1, /* enter the kernel debugger before killing a job */
611 weird_bootstrap :1, /* a hack that launchd+launchctl use during jobmgr_t creation */
612 start_on_mount :1, /* man launchd.plist --> StartOnMount */
613 per_user :1, /* This job is a per-user launchd managed by the PID 1 launchd */
614 unload_at_mig_return :1, /* A job thoroughly confused launchd. We need to unload it ASAP */
615 abandon_pg :1, /* man launchd.plist --> AbandonProcessGroup */
616 ignore_pg_at_shutdown :1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
617 poll_for_vfs_changes :1, /* a hack to work around the fact that kqueues don't work on all filesystems */
618 deny_job_creation :1, /* Don't let this job create new 'job_t' objects in launchd */
619 kill_via_shmem :1, /* man launchd.plist --> EnableTransactions */
620 sent_kill_via_shmem :1, /* We need to 'kill_via_shmem' once-and-only-once */
621 clean_kill :1, /* The job was sent SIGKILL because it was clean. */
622 kill_after_sample :1, /* The job is to be killed after sampling. */
623 reap_after_trace :1, /* The job exited before sample did, so we should reap it after sample is done. */
624 nosy :1, /* The job has an OtherJobEnabled KeepAlive criterion. */
625 crashed :1, /* The job is the default Mach exception handler, and it crashed. */
626 reaped :1, /* We've received NOTE_EXIT for the job. */
627 stopped :1, /* job_stop() was called. */
628 jetsam_frontmost :1, /* The job is considered "frontmost" by Jetsam. */
629 needs_kickoff :1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
630 is_bootstrapper :1, /* The job is a bootstrapper. */
631 has_console :1, /* The job owns the console. */
632 embedded_special_privileges :1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
633 did_exec :1, /* The job exec(2)ed successfully. */
634 xpcproxy_did_exec :1, /* The job is an XPC service, and XPC proxy successfully exec(3)ed. */
635 holds_ref :1, /* The (anonymous) job called vprocmgr_switch_to_session(). */
636 jetsam_properties :1, /* The job has Jetsam limits in place. */
637 dedicated_instance :1, /* This job was created as the result of a look up of a service provided by a per-lookup job. */
638 multiple_instances :1, /* The job supports creating additional instances of itself. */
639 former_subjob :1, /* The sub-job was already removed from the parent's list of sub-jobs. */
640 event_monitor :1, /* The job is responsible for monitoring external events for this launchd. */
641 removing :1, /* A lame hack. */
642 disable_aslr :1, /* Disable ASLR when launching this job. */
643 xpc_service :1, /* The job is an XPC Service. */
644 shutdown_monitor :1, /* The job is the Performance team's shutdown monitor. */
645 dirty_at_shutdown :1, /* We should open a transaction for the job when shutdown begins. */
646 workaround9359725 :1, /* The job was sent SIGKILL but did not exit in a timely fashion, indicating a kernel bug. */
647 xpc_bootstrapper :1;
648
649 mode_t mask;
650 pid_t tracing_pid;
651 mach_port_t asport;
652 /* Only set for per-user launchd's. */
653 au_asid_t asid;
654 uuid_t expected_audit_uuid;
655 const char label[0];
656 };
657
658 static size_t hash_label(const char *label) __attribute__((pure));
659 static size_t hash_ms(const char *msstr) __attribute__((pure));
660 static SLIST_HEAD(, job_s) s_curious_jobs;
661
662 #define job_assumes(j, e) \
663 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
664
665 static void job_import_keys(launch_data_t obj, const char *key, void *context);
666 static void job_import_bool(job_t j, const char *key, bool value);
667 static void job_import_string(job_t j, const char *key, const char *value);
668 static void job_import_integer(job_t j, const char *key, long long value);
669 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
670 static void job_import_array(job_t j, const char *key, launch_data_t value);
671 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
672 static bool job_set_global_on_demand(job_t j, bool val);
673 static const char *job_active(job_t j);
674 static void job_watch(job_t j);
675 static void job_ignore(job_t j);
676 static void job_cleanup_after_tracer(job_t j);
677 static void job_reap(job_t j);
678 static bool job_useless(job_t j);
679 static bool job_keepalive(job_t j);
680 static void job_dispatch_curious_jobs(job_t j);
681 static void job_start(job_t j);
682 static void job_start_child(job_t j) __attribute__((noreturn));
683 static void job_setup_attributes(job_t j);
684 static bool job_setup_machport(job_t j);
685 static kern_return_t job_setup_exit_port(job_t j);
686 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
687 static void job_postfork_become_user(job_t j);
688 static void job_postfork_test_user(job_t j);
689 static void job_log_pids_with_weird_uids(job_t j);
690 static void job_setup_exception_port(job_t j, task_t target_task);
691 static void job_callback(void *obj, struct kevent *kev);
692 static void job_callback_proc(job_t j, struct kevent *kev);
693 static void job_callback_timer(job_t j, void *ident);
694 static void job_callback_read(job_t j, int ident);
695 static void job_log_stray_pg(job_t j);
696 static void job_log_children_without_exec(job_t j);
697 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
698 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
699 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
700 static job_t job_new_alias(jobmgr_t jm, job_t src);
701 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
702 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
703 static job_t job_new_subjob(job_t j, uuid_t identifier);
704 static void job_kill(job_t j);
705 static void job_uncork_fork(job_t j);
706 static void job_log_stdouterr(job_t j);
707 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
708 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
709 static void job_log_bug(job_t j, unsigned int line);
710 static void job_log_stdouterr2(job_t j, const char *msg, ...);
711 static void job_set_exception_port(job_t j, mach_port_t port);
712 static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
713 static void job_open_shutdown_transaction(job_t ji);
714 static void job_close_shutdown_transaction(job_t ji);
715
716 static const struct {
717 const char *key;
718 int val;
719 } launchd_keys2limits[] = {
720 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
721 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
722 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
723 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
724 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
725 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
726 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
727 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
728 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
729 };
730
731 static time_t cronemu(int mon, int mday, int hour, int min);
732 static time_t cronemu_wday(int wday, int hour, int min);
733 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
734 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
735 static bool cronemu_hour(struct tm *wtm, int hour, int min);
736 static bool cronemu_min(struct tm *wtm, int min);
737
738 /* These functions are a total nightmare to get to through headers.
739 * See rdar://problem/8223092.
740 */
741 typedef __darwin_mach_port_t fileport_t;
742 #define FILEPORT_NULL ((fileport_t)0)
743 extern int fileport_makeport(int, fileport_t *);
744 extern int fileport_makefd(fileport_t);
745
746 /* miscellaneous file local functions */
747 static size_t get_kern_max_proc(void);
748 static int dir_has_files(job_t j, const char *path);
749 static char **mach_cmd2argv(const char *string);
750 static size_t our_strhash(const char *s) __attribute__((pure));
751 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
752
753 void eliminate_double_reboot(void);
754
755 /* file local globals */
756 static size_t total_children;
757 static size_t total_anon_children;
758 static mach_port_t the_exception_server;
759 static job_t workaround_5477111;
760 static LIST_HEAD(, job_s) s_needing_sessions;
761 static LIST_HEAD(, eventsystem) _s_event_systems;
762 static job_t _s_event_monitor;
763 static job_t _s_xpc_bootstrapper;
764 static job_t _s_shutdown_monitor;
765 static mach_port_t _s_event_update_port;
766 mach_port_t g_audit_session_port = MACH_PORT_NULL;
767 static uint32_t s_jetsam_sequence_id;
768
769 #if !TARGET_OS_EMBEDDED
770 static job_t s_embedded_privileged_job = (job_t)&root_jobmgr;
771 au_asid_t g_audit_session = AU_DEFAUDITSID;
772 #else
773 static job_t s_embedded_privileged_job = NULL;
774 pid_t g_audit_session = 0;
775 #endif
776
777 static int s_no_hang_fd = -1;
778
779 /* process wide globals */
780 mach_port_t inherited_bootstrap_port;
781 jobmgr_t root_jobmgr;
782 bool g_shutdown_debugging = false;
783 bool g_verbose_boot = false;
784 bool g_embedded_privileged_action = false;
785 bool g_runtime_busy_time = false;
786
787 void
788 job_ignore(job_t j)
789 {
790 struct semaphoreitem *si;
791 struct socketgroup *sg;
792 struct machservice *ms;
793
794 if (j->currently_ignored) {
795 return;
796 }
797
798 job_log(j, LOG_DEBUG, "Ignoring...");
799
800 j->currently_ignored = true;
801
802 if (j->poll_for_vfs_changes) {
803 j->poll_for_vfs_changes = false;
804 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
805 }
806
807 SLIST_FOREACH(sg, &j->sockets, sle) {
808 socketgroup_ignore(j, sg);
809 }
810
811 SLIST_FOREACH(ms, &j->machservices, sle) {
812 machservice_ignore(j, ms);
813 }
814
815 SLIST_FOREACH(si, &j->semaphores, sle) {
816 semaphoreitem_ignore(j, si);
817 }
818 }
819
820 void
821 job_watch(job_t j)
822 {
823 struct semaphoreitem *si;
824 struct socketgroup *sg;
825 struct machservice *ms;
826
827 if (!j->currently_ignored) {
828 return;
829 }
830
831 job_log(j, LOG_DEBUG, "Watching...");
832
833 j->currently_ignored = false;
834
835 SLIST_FOREACH(sg, &j->sockets, sle) {
836 socketgroup_watch(j, sg);
837 }
838
839 SLIST_FOREACH(ms, &j->machservices, sle) {
840 machservice_watch(j, ms);
841 }
842
843 SLIST_FOREACH(si, &j->semaphores, sle) {
844 semaphoreitem_watch(j, si);
845 }
846 }
847
848 void
849 job_stop(job_t j)
850 {
851 char extralog[100];
852 int32_t newval = 1;
853
854 if (unlikely(!j->p || j->stopped || j->anonymous)) {
855 return;
856 }
857
858 #if TARGET_OS_EMBEDDED
859 if (g_embedded_privileged_action && s_embedded_privileged_job) {
860 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
861 errno = EPERM;
862 return;
863 }
864
865 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
866 errno = EPERM;
867 return;
868 }
869 } else if (g_embedded_privileged_action) {
870 errno = EINVAL;
871 return;
872 }
873 #endif
874
875 if (j->kill_via_shmem) {
876 if (j->shmem) {
877 if (!j->sent_kill_via_shmem) {
878 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
879 newval = __sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
880 j->sent_kill_via_shmem = true;
881 } else {
882 newval = j->shmem->vp_shmem_transaction_cnt;
883 }
884 } else {
885 newval = -1;
886 }
887 }
888
889 j->sent_signal_time = runtime_get_opaque_time();
890
891 if (newval < 0) {
892 j->clean_kill = true;
893 job_kill(j);
894 } else {
895 (void)job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
896
897 if (j->exit_timeout) {
898 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j) != -1);
899 } else {
900 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
901 }
902
903 if (j->kill_via_shmem) {
904 snprintf(extralog, sizeof(extralog), ": %d remaining transactions", newval + 1);
905 } else {
906 extralog[0] = '\0';
907 }
908
909 job_log(j, LOG_DEBUG, "Sent SIGTERM signal%s", extralog);
910 }
911
912 j->stopped = true;
913 }
914
915 launch_data_t
916 job_export(job_t j)
917 {
918 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
919
920 if (r == NULL) {
921 return NULL;
922 }
923
924 if ((tmp = launch_data_new_string(j->label))) {
925 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
926 }
927 if ((tmp = launch_data_new_string(j->mgr->name))) {
928 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
929 }
930 if ((tmp = launch_data_new_bool(j->ondemand))) {
931 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
932 }
933 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
934 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
935 }
936 if (j->p && (tmp = launch_data_new_integer(j->p))) {
937 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
938 }
939 if ((tmp = launch_data_new_integer(j->timeout))) {
940 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
941 }
942 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
943 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
944 }
945 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
946 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
947 }
948 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
949 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
950 }
951 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
952 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
953 }
954 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
955 size_t i;
956
957 for (i = 0; i < j->argc; i++) {
958 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
959 launch_data_array_set_index(tmp, tmp2, i);
960 }
961 }
962
963 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
964 }
965
966 if (j->kill_via_shmem && (tmp = launch_data_new_bool(true))) {
967 int32_t tmp_cnt = -1;
968
969 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
970
971 if (j->shmem) {
972 tmp_cnt = j->shmem->vp_shmem_transaction_cnt;
973 }
974
975 if (j->sent_kill_via_shmem) {
976 tmp_cnt++;
977 }
978
979 if ((tmp = launch_data_new_integer(tmp_cnt))) {
980 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TRANSACTIONCOUNT);
981 }
982 }
983
984 if (j->session_create && (tmp = launch_data_new_bool(true))) {
985 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
986 }
987
988 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
989 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
990 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
991 }
992 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
993 }
994
995 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
996 struct socketgroup *sg;
997 int i;
998
999 SLIST_FOREACH(sg, &j->sockets, sle) {
1000 if (sg->junkfds) {
1001 continue;
1002 }
1003 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1004 for (i = 0; i < sg->fd_cnt; i++) {
1005 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1006 launch_data_array_set_index(tmp2, tmp3, i);
1007 }
1008 }
1009 launch_data_dict_insert(tmp, tmp2, sg->name);
1010 }
1011 }
1012
1013 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1014 }
1015
1016 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1017 struct machservice *ms;
1018
1019 tmp3 = NULL;
1020
1021 SLIST_FOREACH(ms, &j->machservices, sle) {
1022 if (ms->per_pid) {
1023 if (tmp3 == NULL) {
1024 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1025 }
1026 if (tmp3) {
1027 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1028 launch_data_dict_insert(tmp3, tmp2, ms->name);
1029 }
1030 } else {
1031 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1032 launch_data_dict_insert(tmp, tmp2, ms->name);
1033 }
1034 }
1035
1036 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1037
1038 if (tmp3) {
1039 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1040 }
1041 }
1042
1043 return r;
1044 }
1045
1046 static void
1047 jobmgr_log_active_jobs(jobmgr_t jm)
1048 {
1049 const char *why_active;
1050 jobmgr_t jmi;
1051 job_t ji;
1052
1053 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1054 jobmgr_log_active_jobs(jmi);
1055 }
1056
1057 LIST_FOREACH(ji, &jm->jobs, sle) {
1058 if ((why_active = job_active(ji))) {
1059 if (ji->p != 1) {
1060 job_log(ji, LOG_DEBUG | LOG_CONSOLE, "%s", why_active);
1061 }
1062 }
1063 }
1064 }
1065
1066 static void
1067 jobmgr_still_alive_with_check(jobmgr_t jm)
1068 {
1069 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1070 jobmgr_log_active_jobs(jm);
1071 }
1072
1073 jobmgr_t
1074 jobmgr_shutdown(jobmgr_t jm)
1075 {
1076 jobmgr_t jmi, jmn;
1077 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1078
1079 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1080
1081 struct tm curtime;
1082 (void)localtime_r(&jm->shutdown_time, &curtime);
1083
1084 char date[26];
1085 (void)asctime_r(&curtime, date);
1086 /* Trim the new line that asctime_r(3) puts there for some reason. */
1087 date[24] = 0;
1088
1089 if (jm == root_jobmgr && pid1_magic) {
1090 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1091 } else {
1092 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1093 }
1094
1095 jm->shutting_down = true;
1096
1097 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1098 jobmgr_shutdown(jmi);
1099 }
1100
1101 if (jm->parentmgr == NULL && pid1_magic) {
1102 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1103
1104 /* Spawn the shutdown monitor. */
1105 if (_s_shutdown_monitor && !_s_shutdown_monitor->p) {
1106 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1107 job_dispatch(_s_shutdown_monitor, true);
1108 }
1109 }
1110
1111 return jobmgr_do_garbage_collection(jm);
1112 }
1113
1114 void
1115 jobmgr_remove(jobmgr_t jm)
1116 {
1117 jobmgr_t jmi;
1118 job_t ji;
1119
1120 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1121 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
1122 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1123 jobmgr_remove(jmi);
1124 }
1125 }
1126
1127 while ((ji = LIST_FIRST(&jm->jobs))) {
1128 if (!ji->anonymous && !job_assumes(ji, ji->p == 0)) {
1129 ji->p = 0;
1130 }
1131 job_remove(ji);
1132 }
1133
1134 if (jm->req_port) {
1135 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
1136 }
1137 if (jm->jm_port) {
1138 (void)jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
1139 }
1140
1141 if (jm->req_bsport) {
1142 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_bsport) == KERN_SUCCESS);
1143 }
1144 if (jm->req_excport) {
1145 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_excport) == KERN_SUCCESS);
1146 }
1147 if (jm->req_asport) {
1148 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_asport) == KERN_SUCCESS);
1149 }
1150 #if !TARGET_OS_EMBEDDED
1151 if (jm->req_rport) {
1152 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1153 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1154 /* If the originator went away, the reply port will be a dead name,
1155 * and we expect this to fail.
1156 */
1157 errno = kr;
1158 (void)jobmgr_assumes(jm, kr == KERN_SUCCESS);
1159 }
1160 }
1161 #endif /* !TARGET_OS_EMBEDDED */
1162 if (jm->req_ctx) {
1163 (void)jobmgr_assumes(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz) == KERN_SUCCESS);
1164 }
1165
1166 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1167 struct tm curtime;
1168 (void)localtime_r(&ts, &curtime);
1169
1170 char date[26];
1171 (void)asctime_r(&curtime, date);
1172 date[24] = 0;
1173
1174 time_t delta = ts - jm->shutdown_time;
1175 if (jm == root_jobmgr && pid1_magic) {
1176 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1177 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1178 } else {
1179 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1180 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1181 }
1182
1183 if (jm->parentmgr) {
1184 runtime_del_weak_ref();
1185 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1186 } else if (pid1_magic) {
1187 eliminate_double_reboot();
1188 launchd_log_vm_stats();
1189 jobmgr_log_stray_children(jm, true);
1190 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1191 runtime_closelog();
1192 (void)jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
1193 } else {
1194 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1195 runtime_closelog();
1196 exit(EXIT_SUCCESS);
1197 }
1198
1199 free(jm);
1200 }
1201
1202 void
1203 job_remove(job_t j)
1204 {
1205 struct waiting_for_removal *w4r;
1206 struct calendarinterval *ci;
1207 struct semaphoreitem *si;
1208 struct socketgroup *sg;
1209 struct machservice *ms;
1210 struct limititem *li;
1211 struct envitem *ei;
1212
1213 if (j->alias) {
1214 /* HACK: Egregious code duplication. But as with machservice_delete(),
1215 * job aliases can't (and shouldn't) have any complex behaviors
1216 * associated with them.
1217 */
1218 while ((ms = SLIST_FIRST(&j->machservices))) {
1219 machservice_delete(j, ms, false);
1220 }
1221
1222 LIST_REMOVE(j, sle);
1223 LIST_REMOVE(j, label_hash_sle);
1224 free(j);
1225 return;
1226 }
1227
1228 #if TARGET_OS_EMBEDDED
1229 if (g_embedded_privileged_action && s_embedded_privileged_job) {
1230 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
1231 errno = EPERM;
1232 return;
1233 }
1234
1235 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
1236 errno = EPERM;
1237 return;
1238 }
1239 } else if (g_embedded_privileged_action) {
1240 errno = EINVAL;
1241 return;
1242 }
1243 #endif
1244
1245 /* Do this BEFORE we check and see whether the job is still active. If we're a
1246 * sub-job, we're being removed due to the parent job removing us. Therefore, the
1247 * parent job will free itself after this call completes. So if we defer removing
1248 * ourselves from the parent's list, we'll crash when we finally get around to it.
1249 */
1250 if (j->dedicated_instance && !j->former_subjob) {
1251 LIST_REMOVE(j, subjob_sle);
1252 j->former_subjob = true;
1253 }
1254
1255 if (unlikely(j->p)) {
1256 if (j->anonymous) {
1257 job_reap(j);
1258 } else {
1259 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1260
1261 if (!j->removal_pending) {
1262 j->removal_pending = true;
1263 job_stop(j);
1264 }
1265
1266 return;
1267 }
1268 }
1269
1270 if (!j->removing) {
1271 j->removing = true;
1272 job_dispatch_curious_jobs(j);
1273 }
1274
1275 ipc_close_all_with_job(j);
1276
1277 job_log(j, LOG_INFO, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1278 j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec,
1279 j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec,
1280 j->ru.ru_maxrss, j->ru.ru_ixrss, j->ru.ru_idrss, j->ru.ru_isrss,
1281 j->ru.ru_minflt, j->ru.ru_majflt,
1282 j->ru.ru_nswap, j->ru.ru_inblock, j->ru.ru_oublock,
1283 j->ru.ru_msgsnd, j->ru.ru_msgrcv,
1284 j->ru.ru_nsignals, j->ru.ru_nvcsw, j->ru.ru_nivcsw);
1285
1286 if (j->forced_peers_to_demand_mode) {
1287 job_set_global_on_demand(j, false);
1288 }
1289
1290 if (!job_assumes(j, j->fork_fd == 0)) {
1291 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
1292 }
1293
1294 if (j->stdin_fd) {
1295 (void)job_assumes(j, runtime_close(j->stdin_fd) != -1);
1296 }
1297
1298 if (!job_assumes(j, j->log_redirect_fd == 0)) {
1299 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
1300 }
1301
1302 if (j->j_port) {
1303 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1304 }
1305
1306 while ((sg = SLIST_FIRST(&j->sockets))) {
1307 socketgroup_delete(j, sg);
1308 }
1309 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1310 calendarinterval_delete(j, ci);
1311 }
1312 while ((ei = SLIST_FIRST(&j->env))) {
1313 envitem_delete(j, ei, false);
1314 }
1315 while ((ei = SLIST_FIRST(&j->global_env))) {
1316 envitem_delete(j, ei, true);
1317 }
1318 while ((li = SLIST_FIRST(&j->limits))) {
1319 limititem_delete(j, li);
1320 }
1321 while ((ms = SLIST_FIRST(&j->machservices))) {
1322 machservice_delete(j, ms, false);
1323 }
1324 while ((si = SLIST_FIRST(&j->semaphores))) {
1325 semaphoreitem_delete(j, si);
1326 }
1327 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1328 waiting4removal_delete(j, w4r);
1329 }
1330
1331 struct externalevent *eei = NULL;
1332 while ((eei = LIST_FIRST(&j->events))) {
1333 eventsystem_ping();
1334 externalevent_delete(eei);
1335 }
1336
1337 #if 0
1338 /* Event systems exist independently of an actual monitor job. They're
1339 * created on-demand when a job has a LaunchEvents dictionary. So we
1340 * really don't need to get rid of them.
1341 */
1342 if (j->event_monitor) {
1343 struct eventsystem *esi = NULL;
1344 while ((esi = LIST_FIRST(&_s_event_systems))) {
1345 eventsystem_delete(esi);
1346 }
1347 }
1348 #else
1349 if (false) {
1350 /* Make gcc happy. */
1351 eventsystem_delete(NULL);
1352 }
1353 if (j->event_monitor) {
1354 if (_s_event_update_port != MACH_PORT_NULL) {
1355 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
1356 _s_event_update_port = MACH_PORT_NULL;
1357 }
1358 _s_event_monitor = NULL;
1359 }
1360 #endif
1361
1362 if (j->prog) {
1363 free(j->prog);
1364 }
1365 if (j->argv) {
1366 free(j->argv);
1367 }
1368 if (j->rootdir) {
1369 free(j->rootdir);
1370 }
1371 if (j->workingdir) {
1372 free(j->workingdir);
1373 }
1374 if (j->username) {
1375 free(j->username);
1376 }
1377 if (j->groupname) {
1378 free(j->groupname);
1379 }
1380 if (j->stdinpath) {
1381 free(j->stdinpath);
1382 }
1383 if (j->stdoutpath) {
1384 free(j->stdoutpath);
1385 }
1386 if (j->stderrpath) {
1387 free(j->stderrpath);
1388 }
1389 if (j->alt_exc_handler) {
1390 free(j->alt_exc_handler);
1391 }
1392 #if HAVE_SANDBOX
1393 if (j->seatbelt_profile) {
1394 free(j->seatbelt_profile);
1395 }
1396 #endif
1397 #if HAVE_QUARANTINE
1398 if (j->quarantine_data) {
1399 free(j->quarantine_data);
1400 }
1401 #endif
1402 if (j->j_binpref) {
1403 free(j->j_binpref);
1404 }
1405 if (j->start_interval) {
1406 runtime_del_weak_ref();
1407 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
1408 }
1409 if (j->poll_for_vfs_changes) {
1410 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
1411 }
1412 if (j->exit_timeout) {
1413 /* Not a big deal if this fails. It means that the timer's already been freed. */
1414 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1415 }
1416 if (j->jetsam_properties) {
1417 LIST_REMOVE(j, jetsam_sle);
1418 j->mgr->jetsam_jobs_cnt--;
1419 }
1420 if (j->asport != MACH_PORT_NULL) {
1421 (void)job_assumes(j, launchd_mport_deallocate(j->asport) == KERN_SUCCESS);
1422 }
1423 if (!uuid_is_null(j->expected_audit_uuid)) {
1424 LIST_REMOVE(j, needing_session_sle);
1425 }
1426 if (j->embedded_special_privileges) {
1427 s_embedded_privileged_job = NULL;
1428 }
1429 if (j->shutdown_monitor) {
1430 _s_shutdown_monitor = NULL;
1431 }
1432
1433 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1434
1435 LIST_REMOVE(j, sle);
1436 LIST_REMOVE(j, label_hash_sle);
1437
1438 job_t ji = NULL;
1439 job_t jit = NULL;
1440 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1441 job_remove(ji);
1442 }
1443
1444 job_log(j, LOG_DEBUG, "Removed");
1445
1446 j->kqjob_callback = (kq_callback)0x8badf00d;
1447 free(j);
1448 }
1449
1450 void
1451 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1452 {
1453 launch_data_t tmp_oai;
1454 job_t j = context;
1455 size_t i, fd_cnt = 1;
1456 int *fds;
1457
1458 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1459 fd_cnt = launch_data_array_get_count(obj);
1460 }
1461
1462 fds = alloca(fd_cnt * sizeof(int));
1463
1464 for (i = 0; i < fd_cnt; i++) {
1465 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1466 tmp_oai = launch_data_array_get_index(obj, i);
1467 } else {
1468 tmp_oai = obj;
1469 }
1470
1471 fds[i] = launch_data_get_fd(tmp_oai);
1472 }
1473
1474 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
1475
1476 ipc_revoke_fds(obj);
1477 }
1478
1479 bool
1480 job_set_global_on_demand(job_t j, bool val)
1481 {
1482 if (j->forced_peers_to_demand_mode && val) {
1483 return false;
1484 } else if (!j->forced_peers_to_demand_mode && !val) {
1485 return false;
1486 }
1487
1488 if ((j->forced_peers_to_demand_mode = val)) {
1489 j->mgr->global_on_demand_cnt++;
1490 } else {
1491 j->mgr->global_on_demand_cnt--;
1492 }
1493
1494 if (j->mgr->global_on_demand_cnt == 0) {
1495 jobmgr_dispatch_all(j->mgr, false);
1496 }
1497
1498 return true;
1499 }
1500
1501 bool
1502 job_setup_machport(job_t j)
1503 {
1504 mach_msg_size_t mxmsgsz;
1505
1506 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
1507 goto out_bad;
1508 }
1509
1510 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
1511 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
1512 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
1513 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
1514 }
1515
1516 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
1517 goto out_bad2;
1518 }
1519
1520 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
1521 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1522 goto out_bad;
1523 }
1524
1525 return true;
1526 out_bad2:
1527 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1528 out_bad:
1529 return false;
1530 }
1531
1532 kern_return_t
1533 job_setup_exit_port(job_t j)
1534 {
1535 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1536 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1537 return MACH_PORT_NULL;
1538 }
1539
1540 struct mach_port_limits limits = {
1541 .mpl_qlimit = 1,
1542 };
1543 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1544 (void)job_assumes(j, kr == KERN_SUCCESS);
1545
1546 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1547 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1548 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
1549 j->exit_status_port = MACH_PORT_NULL;
1550 }
1551
1552 return kr;
1553 }
1554
1555 job_t
1556 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1557 {
1558 const char **argv = (const char **)mach_cmd2argv(cmd);
1559 job_t jr = NULL;
1560
1561 if (!job_assumes(j, argv != NULL)) {
1562 goto out_bad;
1563 }
1564
1565 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1566
1567 free(argv);
1568
1569 /* jobs can easily be denied creation during shutdown */
1570 if (unlikely(jr == NULL)) {
1571 goto out_bad;
1572 }
1573
1574 jr->mach_uid = uid;
1575 jr->ondemand = ond;
1576 jr->legacy_mach_job = true;
1577 jr->abandon_pg = true;
1578 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
1579
1580 if (!job_setup_machport(jr)) {
1581 goto out_bad;
1582 }
1583
1584 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1585
1586 return jr;
1587
1588 out_bad:
1589 if (jr) {
1590 job_remove(jr);
1591 }
1592 return NULL;
1593 }
1594
1595 job_t
1596 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1597 {
1598 struct proc_bsdshortinfo proc;
1599 bool shutdown_state;
1600 job_t jp = NULL, jr = NULL;
1601 uid_t kp_euid, kp_uid, kp_svuid;
1602 gid_t kp_egid, kp_gid, kp_svgid;
1603
1604 if (!jobmgr_assumes(jm, anonpid != 0)) {
1605 errno = EINVAL;
1606 return NULL;
1607 }
1608
1609 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1610 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1611 errno = EINVAL;
1612 return NULL;
1613 }
1614
1615 /* libproc returns the number of bytes written into the buffer upon success,
1616 * zero on failure.
1617 */
1618 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1619 if (errno != ESRCH) {
1620 (void)jobmgr_assumes(jm, errno == 0);
1621 }
1622 return NULL;
1623 }
1624
1625 if (!jobmgr_assumes(jm, proc.pbsi_comm[0] != '\0')) {
1626 errno = EINVAL;
1627 return NULL;
1628 }
1629
1630 if (unlikely(proc.pbsi_status == SZOMB)) {
1631 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1632 }
1633
1634 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1635 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1636 }
1637
1638 kp_euid = proc.pbsi_uid;
1639 kp_uid = proc.pbsi_ruid;
1640 kp_svuid = proc.pbsi_svuid;
1641 kp_egid = proc.pbsi_gid;
1642 kp_gid = proc.pbsi_rgid;
1643 kp_svgid = proc.pbsi_svgid;
1644
1645 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1646 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1647 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1648 }
1649
1650 /* "Fix" for a problem that shouldn't even exist.
1651 * See rdar://problem/7264615 for the symptom and rdar://problem/5020256
1652 * as to why this can happen.
1653 */
1654 if (!jobmgr_assumes(jm, (pid_t)proc.pbsi_ppid != anonpid)) {
1655 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). It should find a different way to do whatever it's doing. Setting PPID to 0: %s", proc.pbsi_comm);
1656 errno = EINVAL;
1657 return NULL;
1658 }
1659
1660 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1661 if (unlikely(shutdown_state = jm->shutting_down)) {
1662 jm->shutting_down = false;
1663 }
1664
1665 /* We only set requestor_pid for XPC domains. */
1666 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1667 if (jobmgr_assumes(jm, (jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL)) != NULL)) {
1668 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1669
1670 total_anon_children++;
1671 jr->anonymous = true;
1672 jr->p = anonpid;
1673
1674 /* anonymous process reaping is messy */
1675 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1676
1677 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1) && job_assumes(jr, errno == ESRCH)) {
1678 /* zombies are weird */
1679 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1680 jr->unload_at_mig_return = true;
1681 }
1682
1683 if (unlikely(shutdown_state)) {
1684 job_log(jr, LOG_SCOLDING, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1685 }
1686
1687 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1688 }
1689
1690 if (unlikely(shutdown_state)) {
1691 jm->shutting_down = true;
1692 }
1693
1694 /* This is down here to mitigate the effects of rdar://problem/7264615, in which a process
1695 * attaches to its own parent. We need to make sure that the anonymous job has been added
1696 * to the process list so that, if it's used ptrace(3) to cause a cycle in the process
1697 * tree (thereby making it not a tree anymore), we'll find the tracing parent PID of the
1698 * parent process, which is the child, when we go looking for it in jobmgr_find_by_pid().
1699 */
1700 switch (proc.pbsi_ppid) {
1701 case 0:
1702 /* the kernel */
1703 break;
1704 case 1:
1705 if (!pid1_magic) {
1706 /* we cannot possibly find a parent job_t that is useful in this function */
1707 break;
1708 }
1709 /* fall through */
1710 default:
1711 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1712 if (jobmgr_assumes(jm, jp != NULL)) {
1713 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1714 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1715 }
1716 }
1717 break;
1718 }
1719
1720 return jr;
1721 }
1722
1723 job_t
1724 job_new_subjob(job_t j, uuid_t identifier)
1725 {
1726 char label[0];
1727 uuid_string_t idstr;
1728 uuid_unparse(identifier, idstr);
1729 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1730
1731 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1732 if (launchd_assumes(nj != NULL)) {
1733 nj->kqjob_callback = job_callback;
1734 nj->mgr = j->mgr;
1735 nj->min_run_time = j->min_run_time;
1736 nj->timeout = j->timeout;
1737 nj->exit_timeout = j->exit_timeout;
1738
1739 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1740
1741 /* Set all our simple Booleans that are applicable. */
1742 nj->debug = j->debug;
1743 nj->ondemand = j->ondemand;
1744 nj->checkedin = true;
1745 nj->low_pri_io = j->low_pri_io;
1746 nj->setmask = j->setmask;
1747 nj->wait4debugger = j->wait4debugger;
1748 nj->internal_exc_handler = j->internal_exc_handler;
1749 nj->setnice = j->setnice;
1750 nj->abandon_pg = j->abandon_pg;
1751 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1752 nj->deny_job_creation = j->deny_job_creation;
1753 nj->kill_via_shmem = j->kill_via_shmem;
1754 nj->needs_kickoff = j->needs_kickoff;
1755 nj->currently_ignored = true;
1756 nj->dedicated_instance = true;
1757 nj->xpc_service = j->xpc_service;
1758 nj->xpc_bootstrapper = j->xpc_bootstrapper;
1759
1760 nj->mask = j->mask;
1761 uuid_copy(nj->instance_id, identifier);
1762
1763 /* These jobs are purely on-demand Mach jobs. */
1764
1765 /* {Hard | Soft}ResourceLimits are not supported. */
1766
1767 struct machservice *msi = NULL;
1768 SLIST_FOREACH(msi, &j->machservices, sle) {
1769 /* Only copy MachServices that were actually declared in the plist.
1770 * So skip over per-PID ones and ones that were created via
1771 * bootstrap_register().
1772 */
1773 if (msi->upfront) {
1774 mach_port_t mp = MACH_PORT_NULL;
1775 struct machservice *msj = machservice_new(nj, msi->name, &mp, msi->per_pid);
1776 if (job_assumes(nj, msj != NULL)) {
1777 msj->reset = msi->reset;
1778 msj->delete_on_destruction = msi->delete_on_destruction;
1779 msj->drain_one_on_crash = msi->drain_one_on_crash;
1780 msj->drain_all_on_crash = msi->drain_all_on_crash;
1781 }
1782 }
1783 }
1784
1785 if (j->prog) {
1786 nj->prog = strdup(j->prog);
1787 }
1788 if (j->argv) {
1789 size_t sz = malloc_size(j->argv);
1790 nj->argv = (char **)malloc(sz);
1791 if (job_assumes(nj, nj->argv != NULL)) {
1792 /* This is the start of our strings. */
1793 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1794
1795 size_t i = 0;
1796 for (i = 0; i < j->argc; i++) {
1797 (void)strcpy(p, j->argv[i]);
1798 nj->argv[i] = p;
1799 p += (strlen(j->argv[i]) + 1);
1800 }
1801 nj->argv[i] = NULL;
1802 }
1803
1804 nj->argc = j->argc;
1805 }
1806
1807 /* We ignore global environment variables. */
1808 struct envitem *ei = NULL;
1809 SLIST_FOREACH(ei, &j->env, sle) {
1810 (void)job_assumes(nj, envitem_new(nj, ei->key, ei->value, false, false));
1811 }
1812 uuid_string_t val;
1813 uuid_unparse(identifier, val);
1814 (void)job_assumes(nj, envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false, false));
1815
1816 if (j->rootdir) {
1817 nj->rootdir = strdup(j->rootdir);
1818 }
1819 if (j->workingdir) {
1820 nj->workingdir = strdup(j->workingdir);
1821 }
1822 if (j->username) {
1823 nj->username = strdup(j->username);
1824 }
1825 if (j->groupname) {
1826 nj->groupname = strdup(j->groupname);
1827 }
1828 /* FIXME: We shouldn't redirect all the output from these jobs to the same
1829 * file. We should uniquify the file names.
1830 */
1831 if (j->stdinpath) {
1832 nj->stdinpath = strdup(j->stdinpath);
1833 }
1834 if (j->stdoutpath) {
1835 nj->stdoutpath = strdup(j->stdinpath);
1836 }
1837 if (j->stderrpath) {
1838 nj->stderrpath = strdup(j->stderrpath);
1839 }
1840 if (j->alt_exc_handler) {
1841 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1842 }
1843 #if HAVE_SANDBOX
1844 if (j->seatbelt_profile) {
1845 nj->seatbelt_profile = strdup(j->seatbelt_profile);
1846 }
1847 #endif
1848
1849 #if HAVE_QUARANTINE
1850 if (j->quarantine_data) {
1851 nj->quarantine_data = strdup(j->quarantine_data);
1852 }
1853 nj->quarantine_data_sz = j->quarantine_data_sz;
1854 #endif
1855 if (j->j_binpref) {
1856 size_t sz = malloc_size(j->j_binpref);
1857 nj->j_binpref = (cpu_type_t *)malloc(sz);
1858 if (job_assumes(nj, nj->j_binpref)) {
1859 memcpy(&nj->j_binpref, &j->j_binpref, sz);
1860 }
1861 }
1862
1863 /* JetsamPriority is unsupported. */
1864
1865 if (j->asport != MACH_PORT_NULL) {
1866 (void)job_assumes(nj, launchd_mport_copy_send(j->asport) == KERN_SUCCESS);
1867 nj->asport = j->asport;
1868 }
1869
1870 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
1871
1872 jobmgr_t where2put = root_jobmgr;
1873 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1874 where2put = j->mgr;
1875 }
1876 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
1877 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
1878 }
1879
1880 return nj;
1881 }
1882
1883 job_t
1884 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1885 {
1886 const char *const *argv_tmp = argv;
1887 char tmp_path[PATH_MAX];
1888 char auto_label[1000];
1889 const char *bn = NULL;
1890 char *co;
1891 size_t minlabel_len;
1892 size_t i, cc = 0;
1893 job_t j;
1894
1895 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1896
1897 if (unlikely(jm->shutting_down)) {
1898 errno = EINVAL;
1899 return NULL;
1900 }
1901
1902 if (unlikely(prog == NULL && argv == NULL)) {
1903 errno = EINVAL;
1904 return NULL;
1905 }
1906
1907 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
1908 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1909 if (prog) {
1910 bn = prog;
1911 } else {
1912 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1913 bn = basename(tmp_path); /* prog for auto labels is kp.kp_kproc.p_comm */
1914 }
1915 snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
1916 label = auto_label;
1917 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1918 minlabel_len = strlen(label) + MAXCOMLEN;
1919 } else {
1920 if (label == AUTO_PICK_XPC_LABEL) {
1921 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
1922 } else {
1923 minlabel_len = strlen(label);
1924 }
1925 }
1926
1927 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1928
1929 if (!jobmgr_assumes(jm, j != NULL)) {
1930 return NULL;
1931 }
1932
1933 if (unlikely(label == auto_label)) {
1934 snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
1935 } else {
1936 strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
1937 }
1938 j->kqjob_callback = job_callback;
1939 j->mgr = jm;
1940 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1941 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1942 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1943 j->currently_ignored = true;
1944 j->ondemand = true;
1945 j->checkedin = true;
1946 j->jetsam_priority = -1;
1947 j->jetsam_memlimit = -1;
1948 j->jetsam_seq = -1;
1949 uuid_clear(j->expected_audit_uuid);
1950
1951 if (prog) {
1952 j->prog = strdup(prog);
1953 if (!job_assumes(j, j->prog != NULL)) {
1954 goto out_bad;
1955 }
1956 }
1957
1958 if (likely(argv)) {
1959 while (*argv_tmp++) {
1960 j->argc++;
1961 }
1962
1963 for (i = 0; i < j->argc; i++) {
1964 cc += strlen(argv[i]) + 1;
1965 }
1966
1967 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1968
1969 if (!job_assumes(j, j->argv != NULL)) {
1970 goto out_bad;
1971 }
1972
1973 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1974
1975 for (i = 0; i < j->argc; i++) {
1976 j->argv[i] = co;
1977 strcpy(co, argv[i]);
1978 co += strlen(argv[i]) + 1;
1979 }
1980 j->argv[i] = NULL;
1981 }
1982
1983 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
1984 j->has_console = true;
1985 }
1986
1987 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1988
1989 jobmgr_t where2put_label = root_jobmgr;
1990 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1991 where2put_label = j->mgr;
1992 }
1993 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
1994 uuid_clear(j->expected_audit_uuid);
1995
1996 job_log(j, LOG_DEBUG, "Conceived");
1997
1998 return j;
1999
2000 out_bad:
2001 if (j->prog) {
2002 free(j->prog);
2003 }
2004 free(j);
2005
2006 return NULL;
2007 }
2008
2009 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
2010 job_t
2011 job_new_alias(jobmgr_t jm, job_t src)
2012 {
2013 job_t j = NULL;
2014 if (job_find(jm, src->label)) {
2015 errno = EEXIST;
2016 } else {
2017 j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2018 if (jobmgr_assumes(jm, j != NULL)) {
2019 strcpy((char *)j->label, src->label);
2020 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2021 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2022 /* Bad jump address. The kqueue callback for aliases should never be
2023 * invoked.
2024 */
2025 j->kqjob_callback = (kq_callback)0xfa1afe1;
2026 j->alias = src;
2027 j->mgr = jm;
2028
2029 struct machservice *msi = NULL;
2030 SLIST_FOREACH(msi, &src->machservices, sle) {
2031 if (!machservice_new_alias(j, msi)) {
2032 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2033 errno = EINVAL;
2034 job_remove(j);
2035 j = NULL;
2036 break;
2037 }
2038 }
2039 }
2040
2041 if (j) {
2042 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2043 }
2044 }
2045
2046 return j;
2047 }
2048 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
2049
2050 job_t
2051 job_import(launch_data_t pload)
2052 {
2053 job_t j = jobmgr_import2(root_jobmgr, pload);
2054
2055 if (unlikely(j == NULL)) {
2056 return NULL;
2057 }
2058
2059 /* Since jobs are effectively stalled until they get security sessions assigned
2060 * to them, we may wish to reconsider this behavior of calling the job "enabled"
2061 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
2062 */
2063 job_dispatch_curious_jobs(j);
2064 return job_dispatch(j, false);
2065 }
2066
2067 launch_data_t
2068 job_import_bulk(launch_data_t pload)
2069 {
2070 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2071 job_t *ja;
2072 size_t i, c = launch_data_array_get_count(pload);
2073
2074 ja = alloca(c * sizeof(job_t));
2075
2076 for (i = 0; i < c; i++) {
2077 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2078 errno = 0;
2079 }
2080 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2081 }
2082
2083 for (i = 0; i < c; i++) {
2084 if (likely(ja[i])) {
2085 job_dispatch_curious_jobs(ja[i]);
2086 job_dispatch(ja[i], false);
2087 }
2088 }
2089
2090 return resp;
2091 }
2092
2093 void
2094 job_import_bool(job_t j, const char *key, bool value)
2095 {
2096 bool found_key = false;
2097
2098 switch (key[0]) {
2099 case 'a':
2100 case 'A':
2101 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2102 j->abandon_pg = value;
2103 found_key = true;
2104 }
2105 break;
2106 case 'b':
2107 case 'B':
2108 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2109 j->dirty_at_shutdown = value;
2110 found_key = true;
2111 }
2112 break;
2113 case 'k':
2114 case 'K':
2115 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2116 j->ondemand = !value;
2117 found_key = true;
2118 }
2119 break;
2120 case 'o':
2121 case 'O':
2122 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2123 j->ondemand = value;
2124 found_key = true;
2125 }
2126 break;
2127 case 'd':
2128 case 'D':
2129 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2130 j->debug = value;
2131 found_key = true;
2132 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2133 (void)job_assumes(j, !value);
2134 found_key = true;
2135 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2136 j->disable_aslr = value;
2137 found_key = true;
2138 }
2139 break;
2140 case 'h':
2141 case 'H':
2142 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2143 job_log(j, LOG_INFO, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2144 j->dirty_at_shutdown = value;
2145 found_key = true;
2146 }
2147 break;
2148 case 's':
2149 case 'S':
2150 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2151 j->session_create = value;
2152 found_key = true;
2153 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2154 j->start_on_mount = value;
2155 found_key = true;
2156 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2157 /* this only does something on Mac OS X 10.4 "Tiger" */
2158 found_key = true;
2159 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2160 if (_s_shutdown_monitor) {
2161 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2162 } else {
2163 j->shutdown_monitor = true;
2164 _s_shutdown_monitor = j;
2165 }
2166 found_key = true;
2167 }
2168 break;
2169 case 'l':
2170 case 'L':
2171 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2172 j->low_pri_io = value;
2173 found_key = true;
2174 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2175 j->only_once = value;
2176 found_key = true;
2177 }
2178 break;
2179 case 'm':
2180 case 'M':
2181 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2182 j->internal_exc_handler = value;
2183 found_key = true;
2184 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2185 j->multiple_instances = value;
2186 found_key = true;
2187 }
2188 break;
2189 case 'i':
2190 case 'I':
2191 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2192 if (getuid() != 0) {
2193 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2194 return;
2195 }
2196 j->no_init_groups = !value;
2197 found_key = true;
2198 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2199 j->ignore_pg_at_shutdown = value;
2200 found_key = true;
2201 }
2202 break;
2203 case 'r':
2204 case 'R':
2205 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2206 if (value) {
2207 /* We don't want value == false to change j->start_pending */
2208 j->start_pending = true;
2209 }
2210 found_key = true;
2211 }
2212 break;
2213 case 'e':
2214 case 'E':
2215 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2216 j->globargv = value;
2217 found_key = true;
2218 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2219 j->kill_via_shmem = value;
2220 found_key = true;
2221 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2222 j->debug_before_kill = value;
2223 found_key = true;
2224 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2225 if (!s_embedded_privileged_job) {
2226 j->embedded_special_privileges = value;
2227 s_embedded_privileged_job = j;
2228 } else {
2229 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2230 }
2231 found_key = true;
2232 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2233 if (job_assumes(j, _s_event_monitor == NULL)) {
2234 j->event_monitor = value;
2235 if (value) {
2236 _s_event_monitor = j;
2237 }
2238 } else {
2239 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility!");
2240 }
2241 found_key = true;
2242 }
2243 break;
2244 case 'w':
2245 case 'W':
2246 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2247 j->wait4debugger = value;
2248 found_key = true;
2249 }
2250 break;
2251 case 'x':
2252 case 'X':
2253 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2254 if (pid1_magic) {
2255 if (_s_xpc_bootstrapper) {
2256 job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _s_xpc_bootstrapper->label);
2257 } else {
2258 _s_xpc_bootstrapper = j;
2259 j->xpc_bootstrapper = value;
2260 }
2261 } else {
2262 job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2263 }
2264 }
2265 found_key = true;
2266 break;
2267 default:
2268 break;
2269 }
2270
2271 if (unlikely(!found_key)) {
2272 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2273 }
2274 }
2275
2276 void
2277 job_import_string(job_t j, const char *key, const char *value)
2278 {
2279 char **where2put = NULL;
2280
2281 switch (key[0]) {
2282 case 'm':
2283 case 'M':
2284 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2285 where2put = &j->alt_exc_handler;
2286 }
2287 break;
2288 case 'p':
2289 case 'P':
2290 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2291 return;
2292 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0) {
2293 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2294 j->pstype = POSIX_SPAWN_OSX_TALAPP_START;
2295 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET) == 0) {
2296 j->pstype = POSIX_SPAWN_OSX_WIDGET_START;
2297 }
2298 #if TARGET_OS_EMBEDDED
2299 else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP) == 0) {
2300 j->pstype = POSIX_SPAWN_IOS_APP_START;
2301 }
2302 #endif /* TARGET_OS_EMBEDDED */
2303 else {
2304 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2305 }
2306 return;
2307 }
2308 break;
2309 case 'l':
2310 case 'L':
2311 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2312 return;
2313 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2314 return;
2315 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2316 return;
2317 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2318 return;
2319 }
2320 break;
2321 case 'r':
2322 case 'R':
2323 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2324 if (getuid() != 0) {
2325 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2326 return;
2327 }
2328 where2put = &j->rootdir;
2329 }
2330 break;
2331 case 'w':
2332 case 'W':
2333 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2334 where2put = &j->workingdir;
2335 }
2336 break;
2337 case 'u':
2338 case 'U':
2339 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2340 if (getuid() != 0) {
2341 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2342 return;
2343 } else if (strcmp(value, "root") == 0) {
2344 return;
2345 }
2346 where2put = &j->username;
2347 }
2348 break;
2349 case 'g':
2350 case 'G':
2351 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2352 if (getuid() != 0) {
2353 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2354 return;
2355 } else if (strcmp(value, "wheel") == 0) {
2356 return;
2357 }
2358 where2put = &j->groupname;
2359 }
2360 break;
2361 case 's':
2362 case 'S':
2363 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2364 where2put = &j->stdoutpath;
2365 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2366 where2put = &j->stderrpath;
2367 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2368 where2put = &j->stdinpath;
2369 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2370 if (job_assumes(j, j->stdin_fd != -1)) {
2371 /* open() should not block, but regular IO by the job should */
2372 (void)job_assumes(j, fcntl(j->stdin_fd, F_SETFL, 0) != -1);
2373 /* XXX -- EV_CLEAR should make named pipes happy? */
2374 (void)job_assumes(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j) != -1);
2375 } else {
2376 j->stdin_fd = 0;
2377 }
2378 #if HAVE_SANDBOX
2379 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2380 where2put = &j->seatbelt_profile;
2381 #endif
2382 }
2383 break;
2384 case 'X':
2385 case 'x':
2386 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2387 return;
2388 }
2389 break;
2390 default:
2391 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2392 break;
2393 }
2394
2395 if (likely(where2put)) {
2396 (void)job_assumes(j, (*where2put = strdup(value)) != NULL);
2397 } else {
2398 /* See rdar://problem/5496612. These two are okay. */
2399 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) != 0) {
2400 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2401 }
2402 }
2403 }
2404
2405 void
2406 job_import_integer(job_t j, const char *key, long long value)
2407 {
2408 switch (key[0]) {
2409 case 'e':
2410 case 'E':
2411 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2412 if (unlikely(value < 0)) {
2413 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2414 } else if (unlikely(value > UINT32_MAX)) {
2415 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2416 } else {
2417 j->exit_timeout = (typeof(j->exit_timeout)) value;
2418 }
2419 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2420 j->main_thread_priority = value;
2421 }
2422 break;
2423 case 'j':
2424 case 'J':
2425 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2426 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2427
2428 launch_data_t pri = launch_data_new_integer(value);
2429 if (job_assumes(j, pri != NULL)) {
2430 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2431 launch_data_free(pri);
2432 }
2433 }
2434 case 'n':
2435 case 'N':
2436 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2437 if (unlikely(value < PRIO_MIN)) {
2438 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2439 } else if (unlikely(value > PRIO_MAX)) {
2440 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2441 } else {
2442 j->nice = (typeof(j->nice)) value;
2443 j->setnice = true;
2444 }
2445 }
2446 break;
2447 case 't':
2448 case 'T':
2449 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2450 if (unlikely(value < 0)) {
2451 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2452 } else if (unlikely(value > UINT32_MAX)) {
2453 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2454 } else {
2455 j->timeout = (typeof(j->timeout)) value;
2456 }
2457 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2458 if (value < 0) {
2459 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2460 } else if (value > UINT32_MAX) {
2461 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2462 } else {
2463 j->min_run_time = (typeof(j->min_run_time)) value;
2464 }
2465 }
2466 break;
2467 case 'u':
2468 case 'U':
2469 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2470 j->mask = value;
2471 j->setmask = true;
2472 }
2473 break;
2474 case 's':
2475 case 'S':
2476 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2477 if (unlikely(value <= 0)) {
2478 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2479 } else if (unlikely(value > UINT32_MAX)) {
2480 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2481 } else {
2482 runtime_add_weak_ref();
2483 j->start_interval = (typeof(j->start_interval)) value;
2484
2485 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
2486 }
2487 #if HAVE_SANDBOX
2488 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2489 j->seatbelt_flags = value;
2490 #endif
2491 }
2492
2493 break;
2494 default:
2495 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2496 break;
2497 }
2498 }
2499
2500 void
2501 job_import_opaque(job_t j __attribute__((unused)),
2502 const char *key, launch_data_t value __attribute__((unused)))
2503 {
2504 switch (key[0]) {
2505 case 'q':
2506 case 'Q':
2507 #if HAVE_QUARANTINE
2508 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2509 size_t tmpsz = launch_data_get_opaque_size(value);
2510
2511 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2512 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2513 j->quarantine_data_sz = tmpsz;
2514 }
2515 }
2516 #endif
2517 case 's':
2518 case 'S':
2519 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2520 size_t tmpsz = launch_data_get_opaque_size(value);
2521 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2522 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2523 }
2524 }
2525 break;
2526 default:
2527 break;
2528 }
2529 }
2530
2531 static void
2532 policy_setup(launch_data_t obj, const char *key, void *context)
2533 {
2534 job_t j = context;
2535 bool found_key = false;
2536
2537 switch (key[0]) {
2538 case 'd':
2539 case 'D':
2540 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2541 j->deny_job_creation = launch_data_get_bool(obj);
2542 found_key = true;
2543 }
2544 break;
2545 default:
2546 break;
2547 }
2548
2549 if (unlikely(!found_key)) {
2550 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2551 }
2552 }
2553
2554 void
2555 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2556 {
2557 launch_data_t tmp;
2558
2559 switch (key[0]) {
2560 case 'p':
2561 case 'P':
2562 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2563 launch_data_dict_iterate(value, policy_setup, j);
2564 }
2565 break;
2566 case 'k':
2567 case 'K':
2568 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2569 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2570 }
2571 break;
2572 case 'i':
2573 case 'I':
2574 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2575 j->inetcompat = true;
2576 j->abandon_pg = true;
2577 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2578 j->inetcompat_wait = launch_data_get_bool(tmp);
2579 }
2580 }
2581 break;
2582 case 'j':
2583 case 'J':
2584 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2585 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2586 }
2587 case 'e':
2588 case 'E':
2589 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2590 launch_data_dict_iterate(value, envitem_setup, j);
2591 }
2592 break;
2593 case 'u':
2594 case 'U':
2595 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2596 j->importing_global_env = true;
2597 launch_data_dict_iterate(value, envitem_setup, j);
2598 j->importing_global_env = false;
2599 }
2600 break;
2601 case 's':
2602 case 'S':
2603 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2604 launch_data_dict_iterate(value, socketgroup_setup, j);
2605 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2606 calendarinterval_new_from_obj(j, value);
2607 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2608 launch_data_dict_iterate(value, limititem_setup, j);
2609 #if HAVE_SANDBOX
2610 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2611 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2612 #endif
2613 }
2614 break;
2615 case 'h':
2616 case 'H':
2617 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2618 j->importing_hard_limits = true;
2619 launch_data_dict_iterate(value, limititem_setup, j);
2620 j->importing_hard_limits = false;
2621 }
2622 break;
2623 case 'm':
2624 case 'M':
2625 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2626 launch_data_dict_iterate(value, machservice_setup, j);
2627 }
2628 break;
2629 case 'l':
2630 case 'L':
2631 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2632 launch_data_dict_iterate(value, eventsystem_setup, j);
2633 } else {
2634 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2635 return;
2636 }
2637 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2638 return;
2639 }
2640 }
2641 break;
2642 default:
2643 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2644 break;
2645 }
2646 }
2647
2648 void
2649 job_import_array(job_t j, const char *key, launch_data_t value)
2650 {
2651 size_t i, value_cnt = launch_data_array_get_count(value);
2652 const char *str;
2653
2654 switch (key[0]) {
2655 case 'p':
2656 case 'P':
2657 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2658 return;
2659 }
2660 break;
2661 case 'l':
2662 case 'L':
2663 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2664 return;
2665 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2666 return;
2667 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2668 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2669 return;
2670 }
2671 break;
2672 case 'q':
2673 case 'Q':
2674 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
2675 for (i = 0; i < value_cnt; i++) {
2676 str = launch_data_get_string(launch_data_array_get_index(value, i));
2677 if (job_assumes(j, str != NULL)) {
2678 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
2679 }
2680 }
2681
2682 }
2683 break;
2684 case 'w':
2685 case 'W':
2686 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
2687 for (i = 0; i < value_cnt; i++) {
2688 str = launch_data_get_string(launch_data_array_get_index(value, i));
2689 if (job_assumes(j, str != NULL)) {
2690 semaphoreitem_new(j, PATH_CHANGES, str);
2691 }
2692 }
2693 }
2694 break;
2695 case 'b':
2696 case 'B':
2697 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
2698 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
2699 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2700 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2701 j->j_binpref_cnt = value_cnt;
2702 for (i = 0; i < value_cnt; i++) {
2703 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
2704 }
2705 }
2706 }
2707 break;
2708 case 's':
2709 case 'S':
2710 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2711 for (i = 0; i < value_cnt; i++) {
2712 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
2713 }
2714 }
2715 break;
2716 default:
2717 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
2718 break;
2719 }
2720 }
2721
2722 void
2723 job_import_keys(launch_data_t obj, const char *key, void *context)
2724 {
2725 job_t j = context;
2726 launch_data_type_t kind;
2727
2728 if (!launchd_assumes(obj != NULL)) {
2729 return;
2730 }
2731
2732 kind = launch_data_get_type(obj);
2733
2734 switch (kind) {
2735 case LAUNCH_DATA_BOOL:
2736 job_import_bool(j, key, launch_data_get_bool(obj));
2737 break;
2738 case LAUNCH_DATA_STRING:
2739 job_import_string(j, key, launch_data_get_string(obj));
2740 break;
2741 case LAUNCH_DATA_INTEGER:
2742 job_import_integer(j, key, launch_data_get_integer(obj));
2743 break;
2744 case LAUNCH_DATA_DICTIONARY:
2745 job_import_dictionary(j, key, obj);
2746 break;
2747 case LAUNCH_DATA_ARRAY:
2748 job_import_array(j, key, obj);
2749 break;
2750 case LAUNCH_DATA_OPAQUE:
2751 job_import_opaque(j, key, obj);
2752 break;
2753 default:
2754 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2755 break;
2756 }
2757 }
2758
2759 job_t
2760 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
2761 {
2762 launch_data_t tmp, ldpa;
2763 const char *label = NULL, *prog = NULL;
2764 const char **argv = NULL;
2765 job_t j;
2766
2767 if (!jobmgr_assumes(jm, pload != NULL)) {
2768 errno = EINVAL;
2769 return NULL;
2770 }
2771
2772 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
2773 errno = EINVAL;
2774 return NULL;
2775 }
2776
2777 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
2778 errno = EINVAL;
2779 return NULL;
2780 }
2781
2782 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
2783 errno = EINVAL;
2784 return NULL;
2785 }
2786
2787 if (unlikely(!(label = launch_data_get_string(tmp)))) {
2788 errno = EINVAL;
2789 return NULL;
2790 }
2791
2792 #if TARGET_OS_EMBEDDED
2793 if (unlikely(g_embedded_privileged_action && s_embedded_privileged_job)) {
2794 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
2795 errno = EPERM;
2796 return NULL;
2797 }
2798
2799 const char *username = NULL;
2800 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2801 username = launch_data_get_string(tmp);
2802 } else {
2803 errno = EPERM;
2804 return NULL;
2805 }
2806
2807 if (!jobmgr_assumes(jm, s_embedded_privileged_job->username != NULL && username != NULL)) {
2808 errno = EPERM;
2809 return NULL;
2810 }
2811
2812 if (unlikely(strcmp(s_embedded_privileged_job->username, username) != 0)) {
2813 errno = EPERM;
2814 return NULL;
2815 }
2816 } else if (g_embedded_privileged_action) {
2817 errno = EINVAL;
2818 return NULL;
2819 }
2820 #endif
2821
2822 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
2823 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2824 prog = launch_data_get_string(tmp);
2825 }
2826
2827 int argc = 0;
2828 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2829 size_t i, c;
2830
2831 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2832 errno = EINVAL;
2833 return NULL;
2834 }
2835
2836 c = launch_data_array_get_count(ldpa);
2837
2838 argv = alloca((c + 1) * sizeof(char *));
2839
2840 for (i = 0; i < c; i++) {
2841 tmp = launch_data_array_get_index(ldpa, i);
2842
2843 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2844 errno = EINVAL;
2845 return NULL;
2846 }
2847
2848 argv[i] = launch_data_get_string(tmp);
2849 }
2850
2851 argv[i] = NULL;
2852 argc = i;
2853 }
2854
2855 if (!prog && argc == 0) {
2856 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
2857 errno = EINVAL;
2858 return NULL;
2859 }
2860
2861 /* Find the requested session. You cannot load services into XPC domains in
2862 * this manner.
2863 */
2864 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2865 if (session) {
2866 jobmgr_t jmt = NULL;
2867 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
2868 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
2869 if (!jmt) {
2870 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
2871 } else {
2872 jm = jmt;
2873 }
2874 } else {
2875 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
2876 }
2877
2878 if (!jmt) {
2879 errno = EINVAL;
2880 return NULL;
2881 }
2882 }
2883
2884 /* For legacy reasons, we have a global hash of all labels in all job
2885 * managers. So rather than make it a global, we store it in the root job
2886 * manager. But for an XPC domain, we store a local hash of all services in
2887 * the domain.
2888 */
2889 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
2890 if (unlikely((j = job_find(where2look, label)) != NULL)) {
2891 if (jm->xpc_singleton) {
2892 /* There can (and probably will be) multiple attemtps to import the
2893 * same XPC service from the same framework. This is okay. It's
2894 * treated as a singleton, so just return the existing one so that
2895 * it may be aliased into the requesting process' XPC domain.
2896 */
2897 return j;
2898 } else {
2899 /* If we're not a global XPC domain, then it's an error to try
2900 * importing the same job/service multiple times.
2901 */
2902 errno = EEXIST;
2903 return NULL;
2904 }
2905 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
2906 errno = EINVAL;
2907 return NULL;
2908 }
2909 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
2910
2911 if (likely(j = job_new(jm, label, prog, argv))) {
2912 launch_data_dict_iterate(pload, job_import_keys, j);
2913 if (!uuid_is_null(j->expected_audit_uuid)) {
2914 uuid_string_t uuid_str;
2915 uuid_unparse(j->expected_audit_uuid, uuid_str);
2916 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2917 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2918 errno = ENEEDAUTH;
2919 } else {
2920 job_log(j, LOG_DEBUG, "No security session specified.");
2921 j->asport = MACH_PORT_NULL;
2922 }
2923
2924 if (j->event_monitor) {
2925 if (job_assumes(j, LIST_FIRST(&j->events) == NULL)) {
2926 struct machservice *msi = NULL;
2927 SLIST_FOREACH(msi, &j->machservices, sle) {
2928 if (msi->event_update_port) {
2929 break;
2930 }
2931 }
2932
2933 if (job_assumes(j, msi != NULL)) {
2934 /* Create our send-once right so we can kick things off. */
2935 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
2936 if (!LIST_EMPTY(&_s_event_systems)) {
2937 eventsystem_ping();
2938 }
2939 }
2940 } else {
2941 job_log(j, LOG_ERR, "The event monitor job may not have a LaunchEvents dictionary.");
2942 job_remove(j);
2943 j = NULL;
2944 }
2945 }
2946 }
2947
2948 return j;
2949 }
2950
2951 bool
2952 jobmgr_label_test(jobmgr_t jm, const char *str)
2953 {
2954 char *endstr = NULL;
2955 const char *ptr;
2956
2957 if (str[0] == '\0') {
2958 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
2959 return false;
2960 }
2961
2962 for (ptr = str; *ptr; ptr++) {
2963 if (iscntrl(*ptr)) {
2964 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
2965 return false;
2966 }
2967 }
2968
2969 strtoll(str, &endstr, 0);
2970
2971 if (str != endstr) {
2972 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
2973 return false;
2974 }
2975
2976 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2977 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2978 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
2979 return false;
2980 }
2981
2982 return true;
2983 }
2984
2985 job_t
2986 job_find(jobmgr_t jm, const char *label)
2987 {
2988 job_t ji;
2989
2990 if (!jm) {
2991 jm = root_jobmgr;
2992 }
2993
2994 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
2995 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
2996 continue; /* 5351245 and 5488633 respectively */
2997 }
2998
2999 if (strcmp(ji->label, label) == 0) {
3000 return ji;
3001 }
3002 }
3003
3004 errno = ESRCH;
3005 return NULL;
3006 }
3007
3008 /* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
3009 job_t
3010 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
3011 {
3012 job_t ji = NULL;
3013 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3014 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay)) ) {
3015 return ji;
3016 }
3017 }
3018
3019 jobmgr_t jmi = NULL;
3020 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3021 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3022 break;
3023 }
3024 }
3025
3026 return ji;
3027 }
3028
3029 job_t
3030 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3031 {
3032 job_t ji;
3033
3034 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3035 if (ji->p == p) {
3036 return ji;
3037 }
3038 }
3039
3040 return create_anon ? job_new_anonymous(jm, p) : NULL;
3041 }
3042
3043 job_t
3044 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3045 {
3046 jobmgr_t jmi;
3047 job_t ji;
3048
3049 if (jm->jm_port == mport) {
3050 return jobmgr_find_by_pid(jm, upid, true);
3051 }
3052
3053 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3054 job_t jr;
3055
3056 if ((jr = job_mig_intran2(jmi, mport, upid))) {
3057 return jr;
3058 }
3059 }
3060
3061 LIST_FOREACH(ji, &jm->jobs, sle) {
3062 if (ji->j_port == mport) {
3063 return ji;
3064 }
3065 }
3066
3067 return NULL;
3068 }
3069
3070 job_t
3071 job_mig_intran(mach_port_t p)
3072 {
3073 struct ldcred *ldc = runtime_get_caller_creds();
3074 job_t jr;
3075
3076 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3077
3078 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
3079 struct proc_bsdshortinfo proc;
3080 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3081 if (errno != ESRCH) {
3082 (void)jobmgr_assumes(root_jobmgr, errno == 0);
3083 } else {
3084 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc->pid, ldc->uid, ldc->euid, p, proc.pbsi_comm);
3085 }
3086 }
3087 }
3088
3089 return jr;
3090 }
3091
3092 job_t
3093 job_find_by_service_port(mach_port_t p)
3094 {
3095 struct machservice *ms;
3096
3097 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3098 if (ms->recv && (ms->port == p)) {
3099 return ms->job;
3100 }
3101 }
3102
3103 return NULL;
3104 }
3105
3106 void
3107 job_mig_destructor(job_t j)
3108 {
3109 /*
3110 * 5477111
3111 *
3112 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
3113 */
3114
3115 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3116 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3117 job_remove(j);
3118 }
3119
3120 workaround_5477111 = NULL;
3121
3122 calendarinterval_sanity_check();
3123 }
3124
3125 void
3126 job_export_all2(jobmgr_t jm, launch_data_t where)
3127 {
3128 jobmgr_t jmi;
3129 job_t ji;
3130
3131 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3132 job_export_all2(jmi, where);
3133 }
3134
3135 LIST_FOREACH(ji, &jm->jobs, sle) {
3136 launch_data_t tmp;
3137
3138 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3139 launch_data_dict_insert(where, tmp, ji->label);
3140 }
3141 }
3142 }
3143
3144 launch_data_t
3145 job_export_all(void)
3146 {
3147 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3148
3149 if (launchd_assumes(resp != NULL)) {
3150 job_export_all2(root_jobmgr, resp);
3151 }
3152
3153 return resp;
3154 }
3155
3156 void
3157 job_log_stray_pg(job_t j)
3158 {
3159 pid_t *pids = NULL;
3160 size_t len = sizeof(pid_t) * get_kern_max_proc();
3161 int i = 0, kp_cnt = 0;
3162
3163 if (!do_apple_internal_logging) {
3164 return;
3165 }
3166
3167 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3168
3169 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3170 return;
3171 }
3172 if (!job_assumes(j, (kp_cnt = proc_listpgrppids(j->p, pids, len)) != -1)) {
3173 goto out;
3174 }
3175
3176 for (i = 0; i < kp_cnt; i++) {
3177 pid_t p_i = pids[i];
3178 if (p_i == j->p) {
3179 continue;
3180 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
3181 continue;
3182 }
3183
3184 struct proc_bsdshortinfo proc;
3185 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3186 if (errno != ESRCH) {
3187 job_assumes(j, errno == 0);
3188 }
3189 continue;
3190 }
3191
3192 pid_t pp_i = proc.pbsi_ppid;
3193 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3194 const char *n = proc.pbsi_comm;
3195
3196 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3197 }
3198
3199 out:
3200 free(pids);
3201 }
3202
3203 void
3204 job_reap(job_t j)
3205 {
3206 struct rusage ru;
3207 int status;
3208
3209 bool is_system_bootstrapper = j->is_bootstrapper && pid1_magic && !j->mgr->parentmgr;
3210
3211 job_log(j, LOG_DEBUG, "Reaping");
3212
3213 if (j->shmem) {
3214 (void)job_assumes(j, vm_deallocate(mach_task_self(), (vm_address_t)j->shmem, getpagesize()) == 0);
3215 j->shmem = NULL;
3216 }
3217
3218 if (unlikely(j->weird_bootstrap)) {
3219 int64_t junk = 0;
3220 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3221 }
3222
3223 if (j->log_redirect_fd && !j->legacy_LS_job) {
3224 job_log_stdouterr(j); /* one last chance */
3225
3226 if (j->log_redirect_fd) {
3227 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3228 j->log_redirect_fd = 0;
3229 }
3230 }
3231
3232 if (j->fork_fd) {
3233 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
3234 j->fork_fd = 0;
3235 }
3236
3237 if (j->anonymous) {
3238 status = 0;
3239 memset(&ru, 0, sizeof(ru));
3240 } else {
3241 /*
3242 * The job is dead. While the PID/PGID is still known to be
3243 * valid, try to kill abandoned descendant processes.
3244 */
3245 job_log_stray_pg(j);
3246 if (!j->abandon_pg) {
3247 if (unlikely(runtime_killpg(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3248 #ifdef __LP64__
3249 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3250 #else
3251 (void)job_assumes(j, false);
3252 #endif
3253 }
3254 }
3255
3256 /* We have to work around one of two kernel bugs here. ptrace(3) may
3257 * have abducted the child away from us and reparented it to the tracing
3258 * process. If the process then exits, we still get NOTE_EXIT, but we
3259 * cannot reap it because the kernel may not have restored the true
3260 * parent/child relationship in time.
3261 *
3262 * See <rdar://problem/5020256>.
3263 *
3264 * The other bug is if the shutdown monitor has suspended a task and not
3265 * resumed it before exiting. In this case, the kernel will not clean up
3266 * after the shutdown monitor. It will, instead, leave the task
3267 * task suspended and not process any pending signals on the event loop
3268 * for the task.
3269 *
3270 * There are a variety of other kernel bugs that could prevent a process
3271 * from exiting, usually having to do with faulty hardware or talking to
3272 * misbehaving drivers that mark a thread as uninterruptible and
3273 * deadlock/hang before unmarking it as such. So we have to work around
3274 * that too.
3275 *
3276 * See <rdar://problem/9284889&9359725>.
3277 */
3278 if (j->workaround9359725) {
3279 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3280 status = W_EXITCODE(-1, SIGSEGV);
3281 memset(&ru, 0, sizeof(ru));
3282 } else if (wait4(j->p, &status, 0, &ru) == -1) {
3283 job_log(j, LOG_NOTICE, "Assuming job exited: <rdar://problem/5020256>: %d: %s", errno, strerror(errno));
3284 status = W_EXITCODE(-1, SIGSEGV);
3285 memset(&ru, 0, sizeof(ru));
3286 }
3287 }
3288
3289 if (j->exit_timeout) {
3290 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3291 }
3292
3293 LIST_REMOVE(j, pid_hash_sle);
3294
3295 if (j->sent_signal_time) {
3296 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3297
3298 td_sec = td / NSEC_PER_SEC;
3299 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3300
3301 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3302 }
3303
3304 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
3305 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
3306 j->ru.ru_maxrss += ru.ru_maxrss;
3307 j->ru.ru_ixrss += ru.ru_ixrss;
3308 j->ru.ru_idrss += ru.ru_idrss;
3309 j->ru.ru_isrss += ru.ru_isrss;
3310 j->ru.ru_minflt += ru.ru_minflt;
3311 j->ru.ru_majflt += ru.ru_majflt;
3312 j->ru.ru_nswap += ru.ru_nswap;
3313 j->ru.ru_inblock += ru.ru_inblock;
3314 j->ru.ru_oublock += ru.ru_oublock;
3315 j->ru.ru_msgsnd += ru.ru_msgsnd;
3316 j->ru.ru_msgrcv += ru.ru_msgrcv;
3317 j->ru.ru_nsignals += ru.ru_nsignals;
3318 j->ru.ru_nvcsw += ru.ru_nvcsw;
3319 j->ru.ru_nivcsw += ru.ru_nivcsw;
3320
3321 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
3322 int level = LOG_WARNING;
3323 if (!j->did_exec && (j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
3324 level = LOG_DEBUG;
3325 }
3326
3327 job_log(j, level, "Exited with code: %d", WEXITSTATUS(status));
3328 } else {
3329 j->fail_cnt = 0;
3330 }
3331
3332 if (WIFSIGNALED(status)) {
3333 int s = WTERMSIG(status);
3334 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3335 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3336 } else if (!j->stopped && !j->clean_kill) {
3337 switch (s) {
3338 /* Signals which indicate a crash. */
3339 case SIGILL:
3340 case SIGABRT:
3341 case SIGFPE:
3342 case SIGBUS:
3343 case SIGSEGV:
3344 case SIGSYS:
3345 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3346 * SIGTRAP, assume that it's a crash.
3347 */
3348 case SIGTRAP:
3349 j->crashed = true;
3350 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3351 break;
3352 default:
3353 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3354 break;
3355 }
3356
3357 if (is_system_bootstrapper && j->crashed) {
3358 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3359 }
3360 }
3361 }
3362
3363 j->reaped = true;
3364
3365 struct machservice *msi = NULL;
3366 if (j->crashed || !(j->did_exec || j->anonymous)) {
3367 SLIST_FOREACH(msi, &j->machservices, sle) {
3368 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3369 machservice_drain_port(msi);
3370 }
3371
3372 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3373 machservice_resetport(j, msi);
3374 }
3375 }
3376 }
3377
3378 /* HACK: Essentially duplicating the logic directly above. But this has
3379 * gotten really hairy, and I don't want to try consolidating it right now.
3380 */
3381 if (j->xpc_service && !j->xpcproxy_did_exec) {
3382 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3383 SLIST_FOREACH(msi, &j->machservices, sle) {
3384 /* Drain the messages but do not reset the port. If xpcproxy could
3385 * not exec(3), then we don't want to continue trying, since there
3386 * is very likely a serious configuration error with the service.
3387 *
3388 * <rdar://problem/8986802>
3389 */
3390 machservice_resetport(j, msi);
3391 }
3392 }
3393
3394 struct suspended_peruser *spi = NULL;
3395 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3396 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3397 spi->j->peruser_suspend_count--;
3398 if (spi->j->peruser_suspend_count == 0) {
3399 job_dispatch(spi->j, false);
3400 }
3401 LIST_REMOVE(spi, sle);
3402 free(spi);
3403 }
3404
3405 j->last_exit_status = status;
3406
3407 if (j->exit_status_dest) {
3408 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3409 if (errno && errno != MACH_SEND_INVALID_DEST) {
3410 (void)job_assumes(j, errno == 0);
3411 }
3412
3413 j->exit_status_dest = MACH_PORT_NULL;
3414 }
3415
3416 if (j->spawn_reply_port) {
3417 /* If the child never called exec(3), we must send a spawn() reply so
3418 * that the requestor can get exit status from it. If we fail to send
3419 * the reply for some reason, we have to deallocate the exit status port
3420 * ourselves.
3421 */
3422 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3423 if (kr) {
3424 if (kr != MACH_SEND_INVALID_DEST) {
3425 errno = kr;
3426 (void)job_assumes(j, errno == KERN_SUCCESS);
3427 }
3428
3429 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3430 }
3431
3432 j->exit_status_port = MACH_PORT_NULL;
3433 j->spawn_reply_port = MACH_PORT_NULL;
3434 }
3435
3436 if (j->anonymous) {
3437 total_anon_children--;
3438 if (j->holds_ref) {
3439 runtime_del_ref();
3440 }
3441 } else {
3442 runtime_del_ref();
3443 total_children--;
3444 }
3445
3446 if (j->has_console) {
3447 g_wsp = 0;
3448 }
3449
3450 if (j->shutdown_monitor) {
3451 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3452 _s_shutdown_monitor = NULL;
3453 j->shutdown_monitor = false;
3454 }
3455
3456 if (j->event_monitor && !j->mgr->shutting_down) {
3457 msi = NULL;
3458 SLIST_FOREACH(msi, &j->machservices, sle) {
3459 if (msi->event_update_port) {
3460 break;
3461 }
3462 }
3463 /* Only do this if we've gotten the port-destroyed notification already.
3464 * If we haven't yet, the port destruction handler will do this.
3465 */
3466 if (job_assumes(j, msi != NULL) && !msi->isActive) {
3467 if (_s_event_update_port == MACH_PORT_NULL) {
3468 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
3469 }
3470 eventsystem_ping();
3471 }
3472 }
3473
3474 if (!j->anonymous) {
3475 j->mgr->normal_active_cnt--;
3476 }
3477 j->sent_signal_time = 0;
3478 j->sent_sigkill = false;
3479 j->clean_kill = false;
3480 j->sent_kill_via_shmem = false;
3481 j->lastlookup = NULL;
3482 j->lastlookup_gennum = 0;
3483 j->p = 0;
3484 }
3485
3486 void
3487 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3488 {
3489 jobmgr_t jmi, jmn;
3490 job_t ji, jn;
3491
3492 if (jm->shutting_down) {
3493 return;
3494 }
3495
3496 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3497 jobmgr_dispatch_all(jmi, newmounthack);
3498 }
3499
3500 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3501 if (newmounthack && ji->start_on_mount) {
3502 ji->start_pending = true;
3503 }
3504
3505 job_dispatch(ji, false);
3506 }
3507 }
3508
3509 void
3510 job_dispatch_curious_jobs(job_t j)
3511 {
3512 job_t ji = NULL, jt = NULL;
3513 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3514 struct semaphoreitem *si = NULL;
3515 SLIST_FOREACH(si, &ji->semaphores, sle) {
3516 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3517 continue;
3518 }
3519
3520 if (strcmp(si->what, j->label) == 0) {
3521 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3522
3523 if (!ji->removing) {
3524 job_dispatch(ji, false);
3525 } else {
3526 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3527 }
3528
3529 /* ji could be removed here, so don't do anything with it or its semaphores
3530 * after this point.
3531 */
3532 break;
3533 }
3534 }
3535 }
3536 }
3537
3538 job_t
3539 job_dispatch(job_t j, bool kickstart)
3540 {
3541 /* Don't dispatch a job if it has no audit session set. */
3542 if (!uuid_is_null(j->expected_audit_uuid)) {
3543 return NULL;
3544 }
3545 if (j->alias) {
3546 j = j->alias;
3547 }
3548
3549 #if TARGET_OS_EMBEDDED
3550 if (g_embedded_privileged_action && s_embedded_privileged_job) {
3551 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
3552 errno = EPERM;
3553 return NULL;
3554 }
3555
3556 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
3557 errno = EPERM;
3558 return NULL;
3559 }
3560 } else if (g_embedded_privileged_action) {
3561 errno = EINVAL;
3562 return NULL;
3563 }
3564 #endif
3565
3566 /*
3567 * The whole job removal logic needs to be consolidated. The fact that
3568 * a job can be removed from just about anywhere makes it easy to have
3569 * stale pointers left behind somewhere on the stack that might get
3570 * used after the deallocation. In particular, during job iteration.
3571 *
3572 * This is a classic example. The act of dispatching a job may delete it.
3573 */
3574 if (!job_active(j)) {
3575 if (job_useless(j)) {
3576 job_remove(j);
3577 return NULL;
3578 }
3579 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3580 return NULL;
3581 }
3582
3583 if (kickstart || job_keepalive(j)) {
3584 job_log(j, LOG_DEBUG, "Starting job (kickstart = %s)", kickstart ? "true" : "false");
3585 job_start(j);
3586 } else {
3587 job_log(j, LOG_DEBUG, "Watching job (kickstart = %s)", kickstart ? "true" : "false");
3588 job_watch(j);
3589
3590 /*
3591 * 5455720
3592 *
3593 * Path checking and monitoring is really racy right now.
3594 * We should clean this up post Leopard.
3595 */
3596 if (job_keepalive(j)) {
3597 job_start(j);
3598 }
3599 }
3600 } else {
3601 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s).", job_active(j));
3602 }
3603
3604 return j;
3605 }
3606
3607 void
3608 job_log_stdouterr2(job_t j, const char *msg, ...)
3609 {
3610 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
3611 va_list ap;
3612
3613 va_start(ap, msg);
3614 runtime_vsyslog(&attr, msg, ap);
3615 va_end(ap);
3616 }
3617
3618 void
3619 job_log_stdouterr(job_t j)
3620 {
3621 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
3622 bool close_log_redir = false;
3623 ssize_t rsz;
3624
3625 if (!job_assumes(j, buf != NULL)) {
3626 return;
3627 }
3628
3629 bufindex = buf;
3630
3631 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
3632
3633 if (unlikely(rsz == 0)) {
3634 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
3635 close_log_redir = true;
3636 } else if (rsz == -1) {
3637 if (!job_assumes(j, errno == EAGAIN)) {
3638 close_log_redir = true;
3639 }
3640 } else {
3641 buf[rsz] = '\0';
3642
3643 while ((msg = strsep(&bufindex, "\n\r"))) {
3644 if (msg[0]) {
3645 job_log_stdouterr2(j, "%s", msg);
3646 }
3647 }
3648 }
3649
3650 free(buf);
3651
3652 if (unlikely(close_log_redir)) {
3653 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3654 j->log_redirect_fd = 0;
3655 job_dispatch(j, false);
3656 }
3657 }
3658
3659 void
3660 job_kill(job_t j)
3661 {
3662 if (unlikely(!j->p || j->anonymous)) {
3663 return;
3664 }
3665
3666 (void)job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
3667
3668 j->sent_sigkill = true;
3669 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j) != -1);
3670
3671 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
3672 }
3673
3674 void
3675 job_open_shutdown_transaction(job_t j)
3676 {
3677 if (j->kill_via_shmem) {
3678 if (j->shmem) {
3679 job_log(j, LOG_DEBUG, "Opening shutdown transaction for job.");
3680 (void)__sync_add_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
3681 } else {
3682 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it has not set up shared memory. Treating normally.");
3683 j->dirty_at_shutdown = false;
3684 }
3685 } else {
3686 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3687 j->dirty_at_shutdown = false;
3688 }
3689 }
3690
3691 void
3692 job_close_shutdown_transaction(job_t j)
3693 {
3694 if (j->dirty_at_shutdown) {
3695 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
3696 if (__sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1) == -1) {
3697 job_log(j, LOG_DEBUG, "Job is now clean. Killing.");
3698 job_kill(j);
3699 }
3700 j->dirty_at_shutdown = false;
3701 }
3702 }
3703
3704 void
3705 job_log_children_without_exec(job_t j)
3706 {
3707 pid_t *pids = NULL;
3708 size_t len = sizeof(pid_t) * get_kern_max_proc();
3709 int i = 0, kp_cnt = 0;
3710
3711 if (!do_apple_internal_logging || j->anonymous || j->per_user) {
3712 return;
3713 }
3714
3715 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3716 return;
3717 }
3718 if (!job_assumes(j, (kp_cnt = proc_listchildpids(j->p, pids, len)) != -1)) {
3719 goto out;
3720 }
3721
3722 for (i = 0; i < kp_cnt; i++) {
3723 struct proc_bsdshortinfo proc;
3724 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3725 if (errno != ESRCH) {
3726 job_assumes(j, errno == 0);
3727 }
3728 continue;
3729 }
3730 if (proc.pbsi_flags & P_EXEC) {
3731 continue;
3732 }
3733
3734 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
3735 }
3736
3737 out:
3738 free(pids);
3739 }
3740
3741 void
3742 job_cleanup_after_tracer(job_t j)
3743 {
3744 j->tracing_pid = 0;
3745 if (j->reap_after_trace) {
3746 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Reaping job now that attached tracer is gone.");
3747 struct kevent kev;
3748 EV_SET(&kev, j->p, 0, 0, NOTE_EXIT, 0, 0);
3749
3750 /* Fake a kevent to keep our logic consistent. */
3751 job_callback_proc(j, &kev);
3752
3753 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3754 * on the root job manager. To make our fakery complete, we will do garbage
3755 * collection at the beginning of the next run loop cycle (after we're done
3756 * draining the current queue of kevents).
3757 */
3758 (void)job_assumes(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr) != -1);
3759 }
3760 }
3761
3762 void
3763 job_callback_proc(job_t j, struct kevent *kev)
3764 {
3765 bool program_changed = false;
3766 int fflags = kev->fflags;
3767
3768 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
3769 log_kevent_struct(LOG_DEBUG, kev, 0);
3770
3771 if (fflags & NOTE_EXIT) {
3772 if (j->p == (pid_t)kev->ident && !j->anonymous) {
3773 /* Note that the third argument to proc_pidinfo() is a magic argument for
3774 * PROC_PIDT_SHORTBSDINFO. Specifically, passing 1 means "don't fail on a zombie
3775 * PID".
3776 */
3777 struct proc_bsdshortinfo proc;
3778 if (job_assumes(j, proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0)) {
3779 if (!job_assumes(j, (pid_t)proc.pbsi_ppid == getpid())) {
3780 /* Someone has attached to the process with ptrace(). There's a race here.
3781 * If we determine that we are not the parent process and then fail to attach
3782 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3783 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3784 * reparenting of the PID should be atomic to us, so in that case, we reap the
3785 * job as normal.
3786 *
3787 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3788 * would if a job died while we were sampling it at shutdown.
3789 *
3790 * Note that we foolishly assume that in the process *tree* a node cannot be its
3791 * own parent. Apparently, that is not correct. If this is the case, we forsake
3792 * the process to its own devices. Let it reap itself.
3793 */
3794 if (!job_assumes(j, proc.pbsi_ppid != kev->ident)) {
3795 job_log(j, LOG_WARNING, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3796 return;
3797 }
3798 if (job_assumes(j, kevent_mod(proc.pbsi_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j) != -1)) {
3799 j->tracing_pid = proc.pbsi_ppid;
3800 j->reap_after_trace = true;
3801 return;
3802 }
3803 }
3804 }
3805 } else if (!j->anonymous) {
3806 if (j->tracing_pid == (pid_t)kev->ident) {
3807 job_cleanup_after_tracer(j);
3808
3809 return;
3810 } else if (j->tracing_pid && !j->reap_after_trace) {
3811 /* The job exited before our sample completed. */
3812 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
3813 j->reap_after_trace = true;
3814 return;
3815 }
3816 }
3817 }
3818
3819 if (fflags & NOTE_EXEC) {
3820 program_changed = true;
3821
3822 if (j->anonymous) {
3823 struct proc_bsdshortinfo proc;
3824 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
3825 char newlabel[1000];
3826
3827 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
3828
3829 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3830 j->lastlookup = NULL;
3831 j->lastlookup_gennum = 0;
3832
3833 LIST_REMOVE(j, label_hash_sle);
3834 strcpy((char *)j->label, newlabel);
3835
3836 jobmgr_t where2put = root_jobmgr;
3837 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
3838 where2put = j->mgr;
3839 }
3840 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
3841 } else if (errno != ESRCH) {
3842 job_assumes(j, errno == 0);
3843 }
3844 } else {
3845 if (j->spawn_reply_port) {
3846 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3847 if (errno) {
3848 if (errno != MACH_SEND_INVALID_DEST) {
3849 (void)job_assumes(j, errno == KERN_SUCCESS);
3850 }
3851 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3852 }
3853
3854 j->spawn_reply_port = MACH_PORT_NULL;
3855 j->exit_status_port = MACH_PORT_NULL;
3856 }
3857
3858 if (j->xpc_service && j->did_exec) {
3859 j->xpcproxy_did_exec = true;
3860 }
3861
3862 j->did_exec = true;
3863 job_log(j, LOG_DEBUG, "Program changed");
3864 }
3865 }
3866
3867 if (fflags & NOTE_FORK) {
3868 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3869 job_log_children_without_exec(j);
3870 }
3871
3872 if (fflags & NOTE_EXIT) {
3873 job_reap(j);
3874
3875 if (j->anonymous) {
3876 job_remove(j);
3877 j = NULL;
3878 } else {
3879 j = job_dispatch(j, false);
3880 }
3881 }
3882 }
3883
3884 void
3885 job_callback_timer(job_t j, void *ident)
3886 {
3887 if (j == ident) {
3888 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
3889 job_dispatch(j, true);
3890 } else if (&j->semaphores == ident) {
3891 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
3892 job_dispatch(j, false);
3893 } else if (&j->start_interval == ident) {
3894 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
3895 j->start_pending = true;
3896 job_dispatch(j, false);
3897 } else if (&j->exit_timeout == ident) {
3898 if (!job_assumes(j, j->p != 0)) {
3899 return;
3900 }
3901
3902 if (j->sent_sigkill) {
3903 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
3904
3905 td /= NSEC_PER_SEC;
3906 td -= j->clean_kill ? 0 : j->exit_timeout;
3907
3908 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
3909 j->workaround9359725 = true;
3910
3911 if (g_trap_sigkill_bugs) {
3912 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3913 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3914 }
3915
3916 /* We've simulated the exit, so we have to cancel the kevent for
3917 * this job, otherwise we may get a kevent later down the road that
3918 * has a stale context pointer (if we've removed the job). Or worse,
3919 * it'll corrupt our data structures if the job still exists or the
3920 * allocation was recycled.
3921 *
3922 * If the failing process had a tracer attached to it, we need to
3923 * remove out NOTE_EXIT for that tracer too, otherwise the same
3924 * thing might happen.
3925 *
3926 * Note that, if we're not shutting down, this will result in a
3927 * zombie process just hanging around forever. But if the process
3928 * didn't exit after receiving SIGKILL, odds are it would've just
3929 * stuck around forever anyway.
3930 *
3931 * See <rdar://problem/9481630>.
3932 */
3933 kevent_mod((uintptr_t)j->p, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3934 if (j->tracing_pid) {
3935 kevent_mod((uintptr_t)j->tracing_pid, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3936 }
3937
3938 struct kevent bogus_exit;
3939 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
3940 jobmgr_callback(j->mgr, &bogus_exit);
3941 } else {
3942 if (unlikely(j->debug_before_kill)) {
3943 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3944 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3945 }
3946
3947 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3948 job_kill(j);
3949 }
3950 } else {
3951 (void)job_assumes(j, false);
3952 }
3953 }
3954
3955 void
3956 job_callback_read(job_t j, int ident)
3957 {
3958 if (ident == j->log_redirect_fd) {
3959 job_log_stdouterr(j);
3960 } else if (ident == j->stdin_fd) {
3961 job_dispatch(j, true);
3962 } else {
3963 socketgroup_callback(j);
3964 }
3965 }
3966
3967 void
3968 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
3969 {
3970 jobmgr_t jmi;
3971 job_t j;
3972
3973 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3974 jobmgr_reap_bulk(jmi, kev);
3975 }
3976
3977 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
3978 kev->udata = j;
3979 job_callback(j, kev);
3980 }
3981 }
3982
3983 void
3984 jobmgr_callback(void *obj, struct kevent *kev)
3985 {
3986 jobmgr_t jm = obj;
3987 job_t ji;
3988
3989 switch (kev->filter) {
3990 case EVFILT_PROC:
3991 jobmgr_reap_bulk(jm, kev);
3992 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
3993 break;
3994 case EVFILT_SIGNAL:
3995 switch (kev->ident) {
3996 case SIGTERM:
3997 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
3998 return launchd_shutdown();
3999 case SIGUSR1:
4000 return calendarinterval_callback();
4001 case SIGUSR2:
4002 fake_shutdown_in_progress = true;
4003 runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
4004
4005 runtime_closelog(); /* HACK -- force 'start' time to be set */
4006
4007 if (pid1_magic) {
4008 int64_t now = runtime_get_wall_time();
4009
4010 jobmgr_log(jm, LOG_NOTICE, "Anticipatory shutdown began at: %lld.%06llu", now / USEC_PER_SEC, now % USEC_PER_SEC);
4011
4012 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
4013 if (ji->per_user && ji->p) {
4014 (void)job_assumes(ji, runtime_kill(ji->p, SIGUSR2) != -1);
4015 }
4016 }
4017 } else {
4018 jobmgr_log(jm, LOG_NOTICE, "Anticipatory per-user launchd shutdown");
4019 }
4020
4021 return;
4022 default:
4023 return (void)jobmgr_assumes(jm, false);
4024 }
4025 break;
4026 case EVFILT_FS:
4027 if (kev->fflags & VQ_MOUNT) {
4028 jobmgr_dispatch_all(jm, true);
4029 }
4030 jobmgr_dispatch_all_semaphores(jm);
4031 break;
4032 case EVFILT_TIMER:
4033 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
4034 calendarinterval_callback();
4035 } else if (kev->ident == (uintptr_t)jm) {
4036 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4037 jobmgr_still_alive_with_check(jm);
4038 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4039 jobmgr_do_garbage_collection(jm);
4040 } else if (kev->ident == (uintptr_t)&g_runtime_busy_time) {
4041 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4042 if (jobmgr_assumes(jm, runtime_busy_cnt == 0)) {
4043 return launchd_shutdown();
4044 }
4045 }
4046 break;
4047 case EVFILT_VNODE:
4048 if (kev->ident == (uintptr_t)s_no_hang_fd) {
4049 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4050 if (unlikely(_no_hang_fd != -1)) {
4051 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4052 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
4053 (void)jobmgr_assumes(root_jobmgr, runtime_close(s_no_hang_fd) != -1);
4054 s_no_hang_fd = _fd(_no_hang_fd);
4055 }
4056 } else if (pid1_magic && g_console && kev->ident == (uintptr_t)fileno(g_console)) {
4057 int cfd = -1;
4058 if (launchd_assumes((cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1)) {
4059 _fd(cfd);
4060 if (!launchd_assumes((g_console = fdopen(cfd, "w")) != NULL)) {
4061 close(cfd);
4062 }
4063 }
4064 }
4065 break;
4066 default:
4067 return (void)jobmgr_assumes(jm, false);
4068 }
4069 }
4070
4071 void
4072 job_callback(void *obj, struct kevent *kev)
4073 {
4074 job_t j = obj;
4075
4076 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4077
4078 switch (kev->filter) {
4079 case EVFILT_PROC:
4080 return job_callback_proc(j, kev);
4081 case EVFILT_TIMER:
4082 return job_callback_timer(j, (void *) kev->ident);
4083 case EVFILT_VNODE:
4084 return semaphoreitem_callback(j, kev);
4085 case EVFILT_READ:
4086 return job_callback_read(j, (int) kev->ident);
4087 case EVFILT_MACHPORT:
4088 return (void)job_dispatch(j, true);
4089 default:
4090 return (void)job_assumes(j, false);
4091 }
4092 }
4093
4094 void
4095 job_start(job_t j)
4096 {
4097 uint64_t td;
4098 int spair[2];
4099 int execspair[2];
4100 int oepair[2];
4101 char nbuf[64];
4102 pid_t c;
4103 bool sipc = false;
4104 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC;
4105
4106 if (!job_assumes(j, j->mgr != NULL)) {
4107 return;
4108 }
4109
4110 if (unlikely(job_active(j))) {
4111 job_log(j, LOG_DEBUG, "Already started");
4112 return;
4113 }
4114
4115 /*
4116 * Some users adjust the wall-clock and then expect software to not notice.
4117 * Therefore, launchd must use an absolute clock instead of the wall clock
4118 * wherever possible.
4119 */
4120 td = runtime_get_nanoseconds_since(j->start_time);
4121 td /= NSEC_PER_SEC;
4122
4123 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
4124 time_t respawn_delta = j->min_run_time - (uint32_t)td;
4125
4126 /*
4127 * We technically should ref-count throttled jobs to prevent idle exit,
4128 * but we're not directly tracking the 'throttled' state at the moment.
4129 */
4130 int level = LOG_WARNING;
4131 if (!j->did_exec && ((j->fail_cnt - 1) % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4132 level = LOG_DEBUG;
4133 }
4134
4135 job_log(j, level, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4136 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
4137 job_ignore(j);
4138 return;
4139 }
4140
4141 if (likely(!j->legacy_mach_job)) {
4142 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_special_privileges;
4143 }
4144
4145 if (sipc) {
4146 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
4147 }
4148
4149 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
4150
4151 if (likely(!j->legacy_mach_job) && job_assumes(j, pipe(oepair) != -1)) {
4152 j->log_redirect_fd = _fd(oepair[0]);
4153 (void)job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
4154 (void)job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
4155 }
4156
4157 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4158 case -1:
4159 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4160 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j) != -1);
4161 job_ignore(j);
4162
4163 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
4164 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
4165 if (sipc) {
4166 (void)job_assumes(j, runtime_close(spair[0]) == 0);
4167 (void)job_assumes(j, runtime_close(spair[1]) == 0);
4168 }
4169 if (likely(!j->legacy_mach_job)) {
4170 (void)job_assumes(j, runtime_close(oepair[0]) != -1);
4171 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4172 j->log_redirect_fd = 0;
4173 }
4174 break;
4175 case 0:
4176 if (unlikely(_vproc_post_fork_ping())) {
4177 _exit(EXIT_FAILURE);
4178 }
4179 if (!j->legacy_mach_job) {
4180 (void)job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
4181 (void)job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
4182 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4183 }
4184 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
4185 /* wait for our parent to say they've attached a kevent to us */
4186 read(_fd(execspair[1]), &c, sizeof(c));
4187
4188 if (sipc) {
4189 (void)job_assumes(j, runtime_close(spair[0]) == 0);
4190 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4191 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4192 }
4193 job_start_child(j);
4194 break;
4195 default:
4196 j->start_time = runtime_get_opaque_time();
4197
4198 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4199
4200 j->did_exec = false;
4201 j->xpcproxy_did_exec = false;
4202 j->checkedin = false;
4203 j->start_pending = false;
4204 j->reaped = false;
4205 j->crashed = false;
4206 j->stopped = false;
4207 if (j->needs_kickoff) {
4208 j->needs_kickoff = false;
4209
4210 if (SLIST_EMPTY(&j->semaphores)) {
4211 j->ondemand = false;
4212 }
4213 }
4214
4215 if (j->has_console) {
4216 g_wsp = c;
4217 }
4218
4219 runtime_add_ref();
4220 total_children++;
4221 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4222
4223 if (likely(!j->legacy_mach_job)) {
4224 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4225 }
4226 j->p = c;
4227
4228 j->mgr->normal_active_cnt++;
4229 j->fork_fd = _fd(execspair[0]);
4230 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
4231 if (sipc) {
4232 (void)job_assumes(j, runtime_close(spair[1]) == 0);
4233 ipc_open(_fd(spair[0]), j);
4234 }
4235 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
4236 job_ignore(j);
4237 } else {
4238 job_reap(j);
4239 }
4240
4241 j->wait4debugger_oneshot = false;
4242
4243 struct envitem *ei = NULL, *et = NULL;
4244 SLIST_FOREACH_SAFE(ei, &j->env, sle, et) {
4245 if (ei->one_shot) {
4246 SLIST_REMOVE(&j->env, ei, envitem, sle);
4247 }
4248 }
4249
4250 if (likely(!j->stall_before_exec)) {
4251 job_uncork_fork(j);
4252 }
4253 break;
4254 }
4255 }
4256
4257 void
4258 job_start_child(job_t j)
4259 {
4260 typeof(posix_spawn) *psf;
4261 const char *file2exec = "/usr/libexec/launchproxy";
4262 const char **argv;
4263 posix_spawnattr_t spattr;
4264 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4265 glob_t g;
4266 short spflags = POSIX_SPAWN_SETEXEC;
4267 size_t binpref_out_cnt = 0;
4268 size_t i;
4269
4270 (void)job_assumes(j, posix_spawnattr_init(&spattr) == 0);
4271
4272 job_setup_attributes(j);
4273
4274 if (unlikely(j->argv && j->globargv)) {
4275 g.gl_offs = 1;
4276 for (i = 0; i < j->argc; i++) {
4277 if (i > 0) {
4278 gflags |= GLOB_APPEND;
4279 }
4280 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4281 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4282 exit(EXIT_FAILURE);
4283 }
4284 }
4285 g.gl_pathv[0] = (char *)file2exec;
4286 argv = (const char **)g.gl_pathv;
4287 } else if (likely(j->argv)) {
4288 argv = alloca((j->argc + 2) * sizeof(char *));
4289 argv[0] = file2exec;
4290 for (i = 0; i < j->argc; i++) {
4291 argv[i + 1] = j->argv[i];
4292 }
4293 argv[i + 1] = NULL;
4294 } else {
4295 argv = alloca(3 * sizeof(char *));
4296 argv[0] = file2exec;
4297 argv[1] = j->prog;
4298 argv[2] = NULL;
4299 }
4300
4301 if (likely(!j->inetcompat)) {
4302 argv++;
4303 }
4304
4305 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4306 if (!j->legacy_LS_job) {
4307 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4308 }
4309 spflags |= POSIX_SPAWN_START_SUSPENDED;
4310 }
4311
4312 if (unlikely(j->disable_aslr)) {
4313 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4314 }
4315 spflags |= j->pstype;
4316
4317 (void)job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
4318
4319 if (unlikely(j->j_binpref_cnt)) {
4320 (void)job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
4321 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4322 }
4323
4324 #if HAVE_QUARANTINE
4325 if (j->quarantine_data) {
4326 qtn_proc_t qp;
4327
4328 if (job_assumes(j, qp = qtn_proc_alloc())) {
4329 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4330 (void)job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
4331 }
4332 }
4333 }
4334 #endif
4335
4336 #if HAVE_SANDBOX
4337 if (j->seatbelt_profile) {
4338 char *seatbelt_err_buf = NULL;
4339
4340 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
4341 if (seatbelt_err_buf) {
4342 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4343 }
4344 goto out_bad;
4345 }
4346 }
4347 #endif
4348
4349 psf = j->prog ? posix_spawn : posix_spawnp;
4350
4351 if (likely(!j->inetcompat)) {
4352 file2exec = j->prog ? j->prog : argv[0];
4353 }
4354
4355 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4356 if (errno != EBADARCH) {
4357 int level = LOG_ERR;
4358 if ((j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4359 level = LOG_DEBUG;
4360 }
4361 job_log_error(j, level, "posix_spawn(\"%s\", ...)", file2exec);
4362 errno = EXIT_FAILURE;
4363 }
4364
4365 #if HAVE_SANDBOX
4366 out_bad:
4367 #endif
4368 _exit(errno);
4369 }
4370
4371 void
4372 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4373 {
4374 launch_data_t tmp;
4375 struct envitem *ei;
4376 job_t ji;
4377
4378 if (jm->parentmgr) {
4379 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4380 } else {
4381 char **tmpenviron = environ;
4382 for (; *tmpenviron; tmpenviron++) {
4383 char envkey[1024];
4384 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4385 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4386 strncpy(envkey, *tmpenviron, sizeof(envkey));
4387 *(strchr(envkey, '=')) = '\0';
4388 launch_data_dict_insert(dict, s, envkey);
4389 }
4390 }
4391
4392 LIST_FOREACH(ji, &jm->jobs, sle) {
4393 SLIST_FOREACH(ei, &ji->global_env, sle) {
4394 if ((tmp = launch_data_new_string(ei->value))) {
4395 launch_data_dict_insert(dict, tmp, ei->key);
4396 }
4397 }
4398 }
4399 }
4400
4401 void
4402 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4403 {
4404 struct envitem *ei;
4405 job_t ji;
4406
4407 if (jm->parentmgr) {
4408 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4409 }
4410
4411 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4412 SLIST_FOREACH(ei, &ji->global_env, sle) {
4413 setenv(ei->key, ei->value, 1);
4414 }
4415 }
4416 }
4417
4418 void
4419 job_log_pids_with_weird_uids(job_t j)
4420 {
4421 size_t len = sizeof(pid_t) * get_kern_max_proc();
4422 pid_t *pids = NULL;
4423 uid_t u = j->mach_uid;
4424 int i = 0, kp_cnt = 0;
4425
4426 if (!do_apple_internal_logging) {
4427 return;
4428 }
4429
4430 pids = malloc(len);
4431 if (!job_assumes(j, pids != NULL)) {
4432 return;
4433 }
4434
4435 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4436
4437 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4438 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4439 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4440 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4441 * struct back in a single call for each one.
4442 *
4443 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4444 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4445 * libproc could go stale before we call proc_pidinfo().
4446 *
4447 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4448 * of bytes written to the buffer.
4449 */
4450 if (!job_assumes(j, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
4451 goto out;
4452 }
4453
4454 for (i = 0; i < kp_cnt; i++) {
4455 struct proc_bsdshortinfo proc;
4456 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4457 * detailed above.
4458 */
4459 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4460 if (errno != ESRCH) {
4461 job_assumes(j, errno == 0);
4462 }
4463 continue;
4464 }
4465
4466 uid_t i_euid = proc.pbsi_uid;
4467 uid_t i_uid = proc.pbsi_ruid;
4468 uid_t i_svuid = proc.pbsi_svuid;
4469 pid_t i_pid = pids[i];
4470
4471 if (i_euid != u && i_uid != u && i_svuid != u) {
4472 continue;
4473 }
4474
4475 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4476
4477 /* Temporarily disabled due to 5423935 and 4946119. */
4478 #if 0
4479 /* Ask the accountless process to exit. */
4480 (void)job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
4481 #endif
4482 }
4483
4484 out:
4485 free(pids);
4486 }
4487
4488 static struct passwd *
4489 job_getpwnam(job_t j, const char *name)
4490 {
4491 /*
4492 * methodology for system daemons
4493 *
4494 * first lookup user record without any opendirectoryd interaction,
4495 * we don't know what interprocess dependencies might be in flight.
4496 * if that fails, we re-enable opendirectoryd interaction and
4497 * re-issue the lookup. We have to disable the libinfo L1 cache
4498 * otherwise libinfo will return the negative cache entry on the retry
4499 */
4500
4501 #if !TARGET_OS_EMBEDDED
4502 struct passwd *pw = NULL;
4503
4504 if (pid1_magic && j->mgr == root_jobmgr) {
4505 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4506 gL1CacheEnabled = false;
4507
4508 pw = getpwnam(name);
4509
4510 si_search_module_set_flags("ds", 0);
4511 }
4512
4513 if (pw == NULL) {
4514 pw = getpwnam(name);
4515 }
4516
4517 return pw;
4518 #else
4519 return getpwnam(name);
4520 #endif
4521 }
4522
4523 static struct group *
4524 job_getgrnam(job_t j, const char *name)
4525 {
4526 #if !TARGET_OS_EMBEDDED
4527 struct group *gr = NULL;
4528
4529 if (pid1_magic && j->mgr == root_jobmgr) {
4530 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4531 gL1CacheEnabled = false;
4532
4533 gr = getgrnam(name);
4534
4535 si_search_module_set_flags("ds", 0);
4536 }
4537
4538 if (gr == NULL) {
4539 gr = getgrnam(name);
4540 }
4541
4542 return gr;
4543 #else
4544 #pragma unused (j)
4545 return getgrnam(name);
4546 #endif
4547 }
4548
4549 void
4550 job_postfork_test_user(job_t j)
4551 {
4552 /* This function is all about 5201578 */
4553
4554 const char *home_env_var = getenv("HOME");
4555 const char *user_env_var = getenv("USER");
4556 const char *logname_env_var = getenv("LOGNAME");
4557 uid_t tmp_uid, local_uid = getuid();
4558 gid_t tmp_gid, local_gid = getgid();
4559 char shellpath[PATH_MAX];
4560 char homedir[PATH_MAX];
4561 char loginname[2000];
4562 struct passwd *pwe;
4563
4564
4565 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4566 && strcmp(user_env_var, logname_env_var) == 0)) {
4567 goto out_bad;
4568 }
4569
4570 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
4571 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4572 goto out_bad;
4573 }
4574
4575 /*
4576 * We must copy the results of getpw*().
4577 *
4578 * Why? Because subsequent API calls may call getpw*() as a part of
4579 * their implementation. Since getpw*() returns a [now thread scoped]
4580 * global, we must therefore cache the results before continuing.
4581 */
4582
4583 tmp_uid = pwe->pw_uid;
4584 tmp_gid = pwe->pw_gid;
4585
4586 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4587 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4588 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4589
4590 if (strcmp(loginname, logname_env_var) != 0) {
4591 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4592 goto out_bad;
4593 }
4594 if (strcmp(homedir, home_env_var) != 0) {
4595 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4596 goto out_bad;
4597 }
4598 if (local_uid != tmp_uid) {
4599 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4600 'U', tmp_uid, local_uid);
4601 goto out_bad;
4602 }
4603 if (local_gid != tmp_gid) {
4604 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4605 'G', tmp_gid, local_gid);
4606 goto out_bad;
4607 }
4608
4609 return;
4610 out_bad:
4611 #if 0
4612 (void)job_assumes(j, runtime_kill(getppid(), SIGTERM) != -1);
4613 _exit(EXIT_FAILURE);
4614 #else
4615 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
4616 #endif
4617 }
4618
4619 void
4620 job_postfork_become_user(job_t j)
4621 {
4622 char loginname[2000];
4623 char tmpdirpath[PATH_MAX];
4624 char shellpath[PATH_MAX];
4625 char homedir[PATH_MAX];
4626 struct passwd *pwe;
4627 size_t r;
4628 gid_t desired_gid = -1;
4629 uid_t desired_uid = -1;
4630
4631 if (getuid() != 0) {
4632 return job_postfork_test_user(j);
4633 }
4634
4635 /*
4636 * I contend that having UID == 0 and GID != 0 is of dubious value.
4637 * Nevertheless, this used to work in Tiger. See: 5425348
4638 */
4639 if (j->groupname && !j->username) {
4640 j->username = "root";
4641 }
4642
4643 if (j->username) {
4644 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
4645 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
4646 _exit(EXIT_FAILURE);
4647 }
4648 } else if (j->mach_uid) {
4649 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4650 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
4651 job_log_pids_with_weird_uids(j);
4652 _exit(EXIT_FAILURE);
4653 }
4654 } else {
4655 return;
4656 }
4657
4658 /*
4659 * We must copy the results of getpw*().
4660 *
4661 * Why? Because subsequent API calls may call getpw*() as a part of
4662 * their implementation. Since getpw*() returns a [now thread scoped]
4663 * global, we must therefore cache the results before continuing.
4664 */
4665
4666 desired_uid = pwe->pw_uid;
4667 desired_gid = pwe->pw_gid;
4668
4669 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4670 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4671 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4672
4673 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
4674 job_log(j, LOG_ERR, "Expired account");
4675 _exit(EXIT_FAILURE);
4676 }
4677
4678
4679 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
4680 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
4681 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
4682 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
4683 }
4684
4685 if (j->groupname) {
4686 struct group *gre;
4687
4688 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
4689 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
4690 _exit(EXIT_FAILURE);
4691 }
4692
4693 desired_gid = gre->gr_gid;
4694 }
4695
4696 if (!job_assumes(j, setlogin(loginname) != -1)) {
4697 _exit(EXIT_FAILURE);
4698 }
4699
4700 if (!job_assumes(j, setgid(desired_gid) != -1)) {
4701 _exit(EXIT_FAILURE);
4702 }
4703
4704 /*
4705 * The kernel team and the DirectoryServices team want initgroups()
4706 * called after setgid(). See 4616864 for more information.
4707 */
4708
4709 if (likely(!j->no_init_groups)) {
4710 #if 1
4711 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
4712 _exit(EXIT_FAILURE);
4713 }
4714 #else
4715 /* Do our own little initgroups(). We do this to guarantee that we're
4716 * always opted into dynamic group resolution in the kernel. initgroups(3)
4717 * does not make this guarantee.
4718 */
4719 int groups[NGROUPS], ngroups;
4720
4721 /* A failure here isn't fatal, and we'll still get data we can use. */
4722 (void)job_assumes(j, getgrouplist(j->username, desired_gid, groups, &ngroups) != -1);
4723
4724 if (!job_assumes(j, syscall(SYS_initgroups, ngroups, groups, desired_uid) != -1)) {
4725 _exit(EXIT_FAILURE);
4726 }
4727 #endif
4728 }
4729
4730 if (!job_assumes(j, setuid(desired_uid) != -1)) {
4731 _exit(EXIT_FAILURE);
4732 }
4733
4734 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4735
4736 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
4737 setenv("TMPDIR", tmpdirpath, 0);
4738 }
4739
4740 setenv("SHELL", shellpath, 0);
4741 setenv("HOME", homedir, 0);
4742 setenv("USER", loginname, 0);
4743 setenv("LOGNAME", loginname, 0);
4744 }
4745
4746 void
4747 job_setup_attributes(job_t j)
4748 {
4749 struct limititem *li;
4750 struct envitem *ei;
4751
4752 if (unlikely(j->setnice)) {
4753 (void)job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
4754 }
4755
4756 SLIST_FOREACH(li, &j->limits, sle) {
4757 struct rlimit rl;
4758
4759 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
4760 continue;
4761 }
4762
4763 if (li->sethard) {
4764 rl.rlim_max = li->lim.rlim_max;
4765 }
4766 if (li->setsoft) {
4767 rl.rlim_cur = li->lim.rlim_cur;
4768 }
4769
4770 if (setrlimit(li->which, &rl) == -1) {
4771 job_log_error(j, LOG_WARNING, "setrlimit()");
4772 }
4773 }
4774
4775 if (unlikely(!j->inetcompat && j->session_create)) {
4776 launchd_SessionCreate();
4777 }
4778
4779 if (unlikely(j->low_pri_io)) {
4780 (void)job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
4781 }
4782 if (unlikely(j->rootdir)) {
4783 (void)job_assumes(j, chroot(j->rootdir) != -1);
4784 (void)job_assumes(j, chdir(".") != -1);
4785 }
4786
4787 job_postfork_become_user(j);
4788
4789 if (unlikely(j->workingdir)) {
4790 (void)job_assumes(j, chdir(j->workingdir) != -1);
4791 }
4792
4793 if (unlikely(j->setmask)) {
4794 umask(j->mask);
4795 }
4796
4797 if (j->stdin_fd) {
4798 (void)job_assumes(j, dup2(j->stdin_fd, STDIN_FILENO) != -1);
4799 } else {
4800 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4801 }
4802 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4803 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
4804
4805 jobmgr_setup_env_from_other_jobs(j->mgr);
4806
4807 SLIST_FOREACH(ei, &j->env, sle) {
4808 setenv(ei->key, ei->value, 1);
4809 }
4810
4811 if (do_apple_internal_logging) {
4812 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING, "true", 1);
4813 }
4814
4815 #if !TARGET_OS_EMBEDDED
4816 if (j->jetsam_properties) {
4817 (void)job_assumes(j, proc_setpcontrol(PROC_SETPC_TERMINATE) == 0);
4818 }
4819 #endif
4820
4821 #if TARGET_OS_EMBEDDED
4822 if (j->main_thread_priority != 0) {
4823 struct sched_param params;
4824 bzero(&params, sizeof(params));
4825 params.sched_priority = j->main_thread_priority;
4826 (void)job_assumes(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params) != -1);
4827 }
4828 #endif
4829
4830 /*
4831 * We'd like to call setsid() unconditionally, but we have reason to
4832 * believe that prevents launchd from being able to send signals to
4833 * setuid children. We'll settle for process-groups.
4834 */
4835 if (getppid() != 1) {
4836 (void)job_assumes(j, setpgid(0, 0) != -1);
4837 } else {
4838 (void)job_assumes(j, setsid() != -1);
4839 }
4840 }
4841
4842 void
4843 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4844 {
4845 int fd;
4846
4847 if (!path) {
4848 return;
4849 }
4850
4851 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4852 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4853 return;
4854 }
4855
4856 (void)job_assumes(j, dup2(fd, target_fd) != -1);
4857 (void)job_assumes(j, runtime_close(fd) == 0);
4858 }
4859
4860 int
4861 dir_has_files(job_t j, const char *path)
4862 {
4863 DIR *dd = opendir(path);
4864 struct dirent *de;
4865 bool r = 0;
4866
4867 if (unlikely(!dd)) {
4868 return -1;
4869 }
4870
4871 while ((de = readdir(dd))) {
4872 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
4873 r = 1;
4874 break;
4875 }
4876 }
4877
4878 (void)job_assumes(j, closedir(dd) == 0);
4879 return r;
4880 }
4881
4882 void
4883 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
4884 {
4885 struct calendarinterval *ci_iter, *ci_prev = NULL;
4886 time_t later, head_later;
4887
4888 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4889
4890 if (ci->when.tm_wday != -1) {
4891 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4892
4893 if (ci->when.tm_mday == -1) {
4894 later = otherlater;
4895 } else {
4896 later = later < otherlater ? later : otherlater;
4897 }
4898 }
4899
4900 ci->when_next = later;
4901
4902 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4903 if (ci->when_next < ci_iter->when_next) {
4904 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4905 break;
4906 }
4907
4908 ci_prev = ci_iter;
4909 }
4910
4911 if (ci_iter == NULL) {
4912 /* ci must want to fire after every other timer, or there are no timers */
4913
4914 if (LIST_EMPTY(&sorted_calendar_events)) {
4915 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
4916 } else {
4917 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
4918 }
4919 }
4920
4921 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
4922
4923 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
4924 char time_string[100];
4925 size_t time_string_len;
4926
4927 ctime_r(&later, time_string);
4928 time_string_len = strlen(time_string);
4929
4930 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
4931 time_string[time_string_len - 1] = '\0';
4932 }
4933
4934 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4935 }
4936 }
4937
4938 void
4939 extract_rcsid_substr(const char *i, char *o, size_t osz)
4940 {
4941 char *rcs_rev_tmp = strchr(i, ' ');
4942
4943 if (!rcs_rev_tmp) {
4944 strlcpy(o, i, osz);
4945 } else {
4946 strlcpy(o, rcs_rev_tmp + 1, osz);
4947 rcs_rev_tmp = strchr(o, ' ');
4948 if (rcs_rev_tmp) {
4949 *rcs_rev_tmp = '\0';
4950 }
4951 }
4952 }
4953
4954 void
4955 jobmgr_log_bug(jobmgr_t jm, unsigned int line)
4956 {
4957 static const char *file;
4958 int saved_errno = errno;
4959 char buf[100];
4960
4961 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4962
4963 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4964
4965 if (!file) {
4966 file = strrchr(__FILE__, '/');
4967 if (!file) {
4968 file = __FILE__;
4969 } else {
4970 file += 1;
4971 }
4972 }
4973
4974 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4975 if (likely(jm)) {
4976 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4977 } else {
4978 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4979 }
4980 }
4981
4982 void
4983 job_log_bug(job_t j, unsigned int line)
4984 {
4985 static const char *file;
4986 int saved_errno = errno;
4987 char buf[100];
4988
4989 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4990
4991 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4992
4993 if (!file) {
4994 file = strrchr(__FILE__, '/');
4995 if (!file) {
4996 file = __FILE__;
4997 } else {
4998 file += 1;
4999 }
5000 }
5001
5002 if (likely(j)) {
5003 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
5004 } else {
5005 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
5006 }
5007 }
5008
5009 void
5010 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
5011 {
5012 const char *label2use = j ? j->label : "com.apple.launchd.NULL";
5013 const char *mgr2use = j ? j->mgr->name : "NULL";
5014 struct runtime_syslog_attr attr = { g_my_label, label2use, mgr2use, pri, getuid(), getpid(), j ? j->p : 0 };
5015 char *newmsg;
5016 int oldmask = 0;
5017 size_t newmsgsz;
5018
5019 /*
5020 * Hack: If bootstrap_port is set, we must be on the child side of a
5021 * fork(), but before the exec*(). Let's route the log message back to
5022 * launchd proper.
5023 */
5024 if (bootstrap_port) {
5025 return _vproc_logv(pri, err, msg, ap);
5026 }
5027
5028 newmsgsz = strlen(msg) + 200;
5029 newmsg = alloca(newmsgsz);
5030
5031 if (err) {
5032 #if !TARGET_OS_EMBEDDED
5033 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
5034 #else
5035 snprintf(newmsg, newmsgsz, "(%s) %s: %s", label2use, msg, strerror(err));
5036 #endif
5037 } else {
5038 #if !TARGET_OS_EMBEDDED
5039 snprintf(newmsg, newmsgsz, "%s", msg);
5040 #else
5041 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
5042 #endif
5043 }
5044
5045 if (j && unlikely(j->debug)) {
5046 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5047 }
5048
5049 runtime_vsyslog(&attr, newmsg, ap);
5050
5051 if (j && unlikely(j->debug)) {
5052 setlogmask(oldmask);
5053 }
5054 }
5055
5056 void
5057 job_log_error(job_t j, int pri, const char *msg, ...)
5058 {
5059 va_list ap;
5060
5061 va_start(ap, msg);
5062 job_logv(j, pri, errno, msg, ap);
5063 va_end(ap);
5064 }
5065
5066 void
5067 job_log(job_t j, int pri, const char *msg, ...)
5068 {
5069 va_list ap;
5070
5071 va_start(ap, msg);
5072 job_logv(j, pri, 0, msg, ap);
5073 va_end(ap);
5074 }
5075
5076 #if 0
5077 void
5078 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5079 {
5080 va_list ap;
5081
5082 va_start(ap, msg);
5083 jobmgr_logv(jm, pri, errno, msg, ap);
5084 va_end(ap);
5085 }
5086 #endif
5087
5088 void
5089 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5090 {
5091 va_list ap;
5092
5093 va_start(ap, msg);
5094 jobmgr_logv(jm, pri, 0, msg, ap);
5095 va_end(ap);
5096 }
5097
5098 void
5099 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5100 {
5101 char *newmsg;
5102 char *newname;
5103 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5104
5105 newname = alloca((jmname_len + 1) * 2);
5106 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5107 newmsg = alloca(newmsgsz);
5108
5109 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5110 if (jm->name[i] == '%') {
5111 newname[o] = '%';
5112 o++;
5113 }
5114 newname[o] = jm->name[i];
5115 }
5116 newname[o] = '\0';
5117
5118 if (err) {
5119 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5120 } else {
5121 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5122 }
5123
5124 if (jm->parentmgr) {
5125 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5126 } else {
5127 struct runtime_syslog_attr attr = { g_my_label, g_my_label, jm->name, pri, getuid(), getpid(), getpid() };
5128
5129 runtime_vsyslog(&attr, newmsg, ap);
5130 }
5131 }
5132
5133 void
5134 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
5135 {
5136 if (si->fd != -1) {
5137 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
5138 (void)job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
5139 }
5140 }
5141
5142 void
5143 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
5144 {
5145 char *parentdir, tmp_path[PATH_MAX];
5146 int saved_errno = 0;
5147 int fflags = NOTE_DELETE|NOTE_RENAME;
5148
5149 switch (si->why) {
5150 case DIR_NOT_EMPTY:
5151 case PATH_CHANGES:
5152 fflags |= NOTE_ATTRIB|NOTE_LINK;
5153 /* fall through */
5154 case PATH_EXISTS:
5155 fflags |= NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
5156 /* fall through */
5157 case PATH_MISSING:
5158 break;
5159 default:
5160 return;
5161 }
5162
5163 /* dirname() may modify tmp_path */
5164 strlcpy(tmp_path, si->what, sizeof(tmp_path));
5165
5166 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
5167 return;
5168 }
5169
5170 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
5171 do {
5172 if (si->fd == -1) {
5173 struct stat sb;
5174 if (stat(si->what, &sb) == 0) {
5175 /* If we're watching a character or block device, only watch the parent directory.
5176 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
5177 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
5178 * open(2)s the file (like a character device that waits for a carrier signal) or
5179 * (b) preventing other processes from obtaining an exclusive lock on the file, even
5180 * though we're opening it with O_EVTONLY.
5181 *
5182 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
5183 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
5184 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
5185 * each time the parent changes to see if it appeared or disappeared.
5186 */
5187 if (S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode)) {
5188 si->fd = _fd(open(si->what, O_EVTONLY | O_NOCTTY | O_NONBLOCK));
5189 }
5190 }
5191
5192 if (si->fd == -1) {
5193 si->watching_parent = job_assumes(j, (si->fd = _fd(open(parentdir, O_EVTONLY | O_NOCTTY | O_NONBLOCK))) != -1);
5194 } else {
5195 si->watching_parent = false;
5196 }
5197 }
5198
5199 if (si->fd == -1) {
5200 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", si->what);
5201 }
5202
5203 job_log(j, LOG_DEBUG, "Watching %svnode (%s): %d", si->watching_parent ? "parent ": "", si->what, si->fd);
5204
5205 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
5206 saved_errno = errno;
5207 /*
5208 * The FD can be revoked between the open() and kevent().
5209 * This is similar to the inability for kevents to be
5210 * attached to short lived zombie processes after fork()
5211 * but before kevent().
5212 */
5213 (void)job_assumes(j, runtime_close(si->fd) == 0);
5214 si->fd = -1;
5215 }
5216 } while (unlikely((si->fd == -1) && (saved_errno == ENOENT)));
5217
5218 if (saved_errno == ENOTSUP) {
5219 /*
5220 * 3524219 NFS needs kqueue support
5221 * 4124079 VFS needs generic kqueue support
5222 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
5223 */
5224 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
5225
5226 if (!j->poll_for_vfs_changes) {
5227 j->poll_for_vfs_changes = true;
5228 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
5229 }
5230 }
5231 }
5232
5233 void
5234 semaphoreitem_callback(job_t j, struct kevent *kev)
5235 {
5236 char invalidation_reason[100] = "";
5237 struct semaphoreitem *si;
5238
5239 SLIST_FOREACH(si, &j->semaphores, sle) {
5240 switch (si->why) {
5241 case PATH_CHANGES:
5242 case PATH_EXISTS:
5243 case PATH_MISSING:
5244 case DIR_NOT_EMPTY:
5245 job_log(j, LOG_DEBUG, "P%s changed (%u): %s", si->watching_parent ? "arent path" : "ath", si->why, si->what);
5246 break;
5247 default:
5248 continue;
5249 }
5250
5251 if (si->fd == (int)kev->ident) {
5252 break;
5253 }
5254 }
5255
5256 if (!job_assumes(j, si != NULL)) {
5257 return;
5258 }
5259
5260 if (NOTE_DELETE & kev->fflags) {
5261 strcat(invalidation_reason, "deleted");
5262 }
5263
5264 if (NOTE_RENAME & kev->fflags) {
5265 if (invalidation_reason[0]) {
5266 strcat(invalidation_reason, "/renamed");
5267 } else {
5268 strcat(invalidation_reason, "renamed");
5269 }
5270 }
5271
5272 if (NOTE_REVOKE & kev->fflags) {
5273 if (invalidation_reason[0]) {
5274 strcat(invalidation_reason, "/revoked");
5275 } else {
5276 strcat(invalidation_reason, "revoked");
5277 }
5278 }
5279
5280 if (invalidation_reason[0]) {
5281 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
5282 (void)job_assumes(j, runtime_close(si->fd) == 0);
5283 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5284 }
5285
5286 if (!si->watching_parent) {
5287 if (si->why == PATH_CHANGES) {
5288 j->start_pending = true;
5289 } else {
5290 semaphoreitem_watch(j, si);
5291 }
5292 } else { /* Something happened to the parent directory. See if our target file appeared. */
5293 if (!invalidation_reason[0]) {
5294 (void)job_assumes(j, runtime_close(si->fd) == 0);
5295 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5296 semaphoreitem_watch(j, si);
5297 }
5298 /* Need to think about what should happen if the parent directory goes invalid. */
5299 }
5300
5301 job_dispatch(j, false);
5302 }
5303
5304 struct cal_dict_walk {
5305 job_t j;
5306 struct tm tmptm;
5307 };
5308
5309 void
5310 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5311 {
5312 struct cal_dict_walk *cdw = context;
5313 struct tm *tmptm = &cdw->tmptm;
5314 job_t j = cdw->j;
5315 int64_t val;
5316
5317 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5318 /* hack to let caller know something went wrong */
5319 tmptm->tm_sec = -1;
5320 return;
5321 }
5322
5323 val = launch_data_get_integer(obj);
5324
5325 if (val < 0) {
5326 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5327 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5328 if (val > 59) {
5329 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5330 tmptm->tm_sec = -1;
5331 } else {
5332 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5333 }
5334 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5335 if (val > 23) {
5336 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5337 tmptm->tm_sec = -1;
5338 } else {
5339 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5340 }
5341 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5342 if (val < 1 || val > 31) {
5343 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5344 tmptm->tm_sec = -1;
5345 } else {
5346 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5347 }
5348 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5349 if (val > 7) {
5350 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5351 tmptm->tm_sec = -1;
5352 } else {
5353 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5354 }
5355 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5356 if (val > 12) {
5357 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5358 tmptm->tm_sec = -1;
5359 } else {
5360 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5361 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
5362 }
5363 }
5364 }
5365
5366 bool
5367 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5368 {
5369 struct cal_dict_walk cdw;
5370
5371 cdw.j = j;
5372 memset(&cdw.tmptm, 0, sizeof(0));
5373
5374 cdw.tmptm.tm_min = -1;
5375 cdw.tmptm.tm_hour = -1;
5376 cdw.tmptm.tm_mday = -1;
5377 cdw.tmptm.tm_wday = -1;
5378 cdw.tmptm.tm_mon = -1;
5379
5380 if (!job_assumes(j, obj != NULL)) {
5381 return false;
5382 }
5383
5384 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5385 return false;
5386 }
5387
5388 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5389
5390 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5391 return false;
5392 }
5393
5394 return calendarinterval_new(j, &cdw.tmptm);
5395 }
5396
5397 bool
5398 calendarinterval_new(job_t j, struct tm *w)
5399 {
5400 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5401
5402 if (!job_assumes(j, ci != NULL)) {
5403 return false;
5404 }
5405
5406 ci->when = *w;
5407 ci->job = j;
5408
5409 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5410
5411 calendarinterval_setalarm(j, ci);
5412
5413 runtime_add_weak_ref();
5414
5415 return true;
5416 }
5417
5418 void
5419 calendarinterval_delete(job_t j, struct calendarinterval *ci)
5420 {
5421 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5422 LIST_REMOVE(ci, global_sle);
5423
5424 free(ci);
5425
5426 runtime_del_weak_ref();
5427 }
5428
5429 void
5430 calendarinterval_sanity_check(void)
5431 {
5432 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5433 time_t now = time(NULL);
5434
5435 if (unlikely(ci && (ci->when_next < now))) {
5436 (void)jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
5437 }
5438 }
5439
5440 void
5441 calendarinterval_callback(void)
5442 {
5443 struct calendarinterval *ci, *ci_next;
5444 time_t now = time(NULL);
5445
5446 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5447 job_t j = ci->job;
5448
5449 if (ci->when_next > now) {
5450 break;
5451 }
5452
5453 LIST_REMOVE(ci, global_sle);
5454 calendarinterval_setalarm(j, ci);
5455
5456 j->start_pending = true;
5457 job_dispatch(j, false);
5458 }
5459 }
5460
5461 bool
5462 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds)
5463 {
5464 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5465
5466 if (!job_assumes(j, sg != NULL)) {
5467 return false;
5468 }
5469
5470 sg->fds = calloc(1, fd_cnt * sizeof(int));
5471 sg->fd_cnt = fd_cnt;
5472 sg->junkfds = junkfds;
5473
5474 if (!job_assumes(j, sg->fds != NULL)) {
5475 free(sg);
5476 return false;
5477 }
5478
5479 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5480 strcpy(sg->name_init, name);
5481
5482 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5483
5484 runtime_add_weak_ref();
5485
5486 return true;
5487 }
5488
5489 void
5490 socketgroup_delete(job_t j, struct socketgroup *sg)
5491 {
5492 unsigned int i;
5493
5494 for (i = 0; i < sg->fd_cnt; i++) {
5495 #if 0
5496 struct sockaddr_storage ss;
5497 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5498 socklen_t ss_len = sizeof(ss);
5499
5500 /* 5480306 */
5501 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5502 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5503 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5504 /* We might conditionally need to delete a directory here */
5505 }
5506 #endif
5507 (void)job_assumes(j, runtime_close(sg->fds[i]) != -1);
5508 }
5509
5510 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5511
5512 free(sg->fds);
5513 free(sg);
5514
5515 runtime_del_weak_ref();
5516 }
5517
5518 void
5519 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5520 {
5521 struct kevent kev[sg->fd_cnt];
5522 char buf[10000];
5523 unsigned int i, buf_off = 0;
5524
5525 if (unlikely(sg->junkfds)) {
5526 return;
5527 }
5528
5529 for (i = 0; i < sg->fd_cnt; i++) {
5530 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5531 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5532 }
5533
5534 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5535
5536 (void)job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
5537
5538 for (i = 0; i < sg->fd_cnt; i++) {
5539 (void)job_assumes(j, kev[i].flags & EV_ERROR);
5540 errno = (typeof(errno)) kev[i].data;
5541 (void)job_assumes(j, kev[i].data == 0);
5542 }
5543 }
5544
5545 void
5546 socketgroup_ignore(job_t j, struct socketgroup *sg)
5547 {
5548 socketgroup_kevent_mod(j, sg, false);
5549 }
5550
5551 void
5552 socketgroup_watch(job_t j, struct socketgroup *sg)
5553 {
5554 socketgroup_kevent_mod(j, sg, true);
5555 }
5556
5557 void
5558 socketgroup_callback(job_t j)
5559 {
5560 job_dispatch(j, true);
5561 }
5562
5563 bool
5564 envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot)
5565 {
5566 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5567
5568 if (!job_assumes(j, ei != NULL)) {
5569 return false;
5570 }
5571
5572 strcpy(ei->key_init, k);
5573 ei->value = ei->key_init + strlen(k) + 1;
5574 strcpy(ei->value, v);
5575 ei->one_shot = one_shot;
5576
5577 if (global) {
5578 if (SLIST_EMPTY(&j->global_env)) {
5579 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5580 }
5581 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5582 } else {
5583 SLIST_INSERT_HEAD(&j->env, ei, sle);
5584 }
5585
5586 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5587
5588 return true;
5589 }
5590
5591 void
5592 envitem_delete(job_t j, struct envitem *ei, bool global)
5593 {
5594 if (global) {
5595 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5596 if (SLIST_EMPTY(&j->global_env)) {
5597 LIST_REMOVE(j, global_env_sle);
5598 }
5599 } else {
5600 SLIST_REMOVE(&j->env, ei, envitem, sle);
5601 }
5602
5603 free(ei);
5604 }
5605
5606 void
5607 envitem_setup(launch_data_t obj, const char *key, void *context)
5608 {
5609 job_t j = context;
5610
5611 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5612 return;
5613 }
5614
5615 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5616 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, false);
5617 } else {
5618 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5619 }
5620 }
5621
5622 void
5623 envitem_setup_one_shot(launch_data_t obj, const char *key, void *context)
5624 {
5625 job_t j = context;
5626
5627 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5628 return;
5629 }
5630
5631 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5632 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, true);
5633 } else {
5634 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5635 }
5636 }
5637
5638 bool
5639 limititem_update(job_t j, int w, rlim_t r)
5640 {
5641 struct limititem *li;
5642
5643 SLIST_FOREACH(li, &j->limits, sle) {
5644 if (li->which == w) {
5645 break;
5646 }
5647 }
5648
5649 if (li == NULL) {
5650 li = calloc(1, sizeof(struct limititem));
5651
5652 if (!job_assumes(j, li != NULL)) {
5653 return false;
5654 }
5655
5656 SLIST_INSERT_HEAD(&j->limits, li, sle);
5657
5658 li->which = w;
5659 }
5660
5661 if (j->importing_hard_limits) {
5662 li->lim.rlim_max = r;
5663 li->sethard = true;
5664 } else {
5665 li->lim.rlim_cur = r;
5666 li->setsoft = true;
5667 }
5668
5669 return true;
5670 }
5671
5672 void
5673 limititem_delete(job_t j, struct limititem *li)
5674 {
5675 SLIST_REMOVE(&j->limits, li, limititem, sle);
5676
5677 free(li);
5678 }
5679
5680 #if HAVE_SANDBOX
5681 void
5682 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5683 {
5684 job_t j = context;
5685
5686 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5687 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5688 return;
5689 }
5690
5691 if (launch_data_get_bool(obj) == false) {
5692 return;
5693 }
5694
5695 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5696 j->seatbelt_flags |= SANDBOX_NAMED;
5697 }
5698 }
5699 #endif
5700
5701 void
5702 limititem_setup(launch_data_t obj, const char *key, void *context)
5703 {
5704 job_t j = context;
5705 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
5706 rlim_t rl;
5707
5708 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
5709 return;
5710 }
5711
5712 rl = launch_data_get_integer(obj);
5713
5714 for (i = 0; i < limits_cnt; i++) {
5715 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
5716 break;
5717 }
5718 }
5719
5720 if (i == limits_cnt) {
5721 return;
5722 }
5723
5724 limititem_update(j, launchd_keys2limits[i].val, rl);
5725 }
5726
5727 bool
5728 job_useless(job_t j)
5729 {
5730 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5731 if (j->legacy_LS_job && j->j_port) {
5732 return false;
5733 }
5734 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5735 return true;
5736 } else if (j->removal_pending) {
5737 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
5738 return true;
5739 } else if (j->shutdown_monitor) {
5740 return false;
5741 } else if (j->mgr->shutting_down) {
5742 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
5743 if (total_children == 0 && !j->anonymous) {
5744 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
5745 }
5746 return true;
5747 } else if (j->legacy_mach_job) {
5748 if (SLIST_EMPTY(&j->machservices)) {
5749 job_log(j, LOG_INFO, "Garbage collecting");
5750 return true;
5751 } else if (!j->checkedin) {
5752 job_log(j, LOG_WARNING, "Failed to check-in!");
5753 return true;
5754 }
5755 } else {
5756 /* If the job's executable does not have any valid architectures (for
5757 * example, if it's a PowerPC-only job), then we don't even bother
5758 * trying to relaunch it, as we have no reasonable expectation that
5759 * the situation will change.
5760 *
5761 * <rdar://problem/9106979>
5762 */
5763 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
5764 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5765 return true;
5766 }
5767 }
5768
5769 return false;
5770 }
5771
5772 bool
5773 job_keepalive(job_t j)
5774 {
5775 mach_msg_type_number_t statusCnt;
5776 mach_port_status_t status;
5777 struct semaphoreitem *si;
5778 struct machservice *ms;
5779 struct stat sb;
5780 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
5781 bool is_not_kextd = (do_apple_internal_logging || (strcmp(j->label, "com.apple.kextd") != 0));
5782
5783 if (unlikely(j->mgr->shutting_down)) {
5784 return false;
5785 }
5786
5787 /*
5788 * 5066316
5789 *
5790 * We definitely need to revisit this after Leopard ships. Please see
5791 * launchctl.c for the other half of this hack.
5792 */
5793 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5794 return false;
5795 }
5796
5797 if (unlikely(j->needs_kickoff)) {
5798 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5799 return false;
5800 }
5801
5802 if (j->start_pending) {
5803 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
5804 return true;
5805 }
5806
5807 if (!j->ondemand) {
5808 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5809 return true;
5810 }
5811
5812 SLIST_FOREACH(ms, &j->machservices, sle) {
5813 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5814 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5815 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
5816 continue;
5817 }
5818 if (status.mps_msgcount) {
5819 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
5820 status.mps_msgcount, ms->name);
5821 return true;
5822 }
5823 }
5824
5825 /* TODO: Coalesce external events and semaphore items, since they're basically
5826 * the same thing.
5827 */
5828 struct externalevent *ei = NULL;
5829 LIST_FOREACH(ei, &j->events, job_le) {
5830 if (ei->state == ei->wanted_state) {
5831 return true;
5832 }
5833 }
5834
5835 SLIST_FOREACH(si, &j->semaphores, sle) {
5836 bool wanted_state = false;
5837 int qdir_file_cnt;
5838 job_t other_j;
5839
5840 switch (si->why) {
5841 case NETWORK_UP:
5842 wanted_state = true;
5843 case NETWORK_DOWN:
5844 if (network_up == wanted_state) {
5845 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
5846 return true;
5847 }
5848 break;
5849 case SUCCESSFUL_EXIT:
5850 wanted_state = true;
5851 case FAILED_EXIT:
5852 if (good_exit == wanted_state) {
5853 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5854 return true;
5855 }
5856 break;
5857 case CRASHED:
5858 wanted_state = true;
5859 case DID_NOT_CRASH:
5860 if (j->crashed == wanted_state) {
5861 return true;
5862 }
5863 break;
5864 case OTHER_JOB_ENABLED:
5865 wanted_state = true;
5866 case OTHER_JOB_DISABLED:
5867 if ((bool)job_find(NULL, si->what) == wanted_state) {
5868 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
5869 return true;
5870 }
5871 break;
5872 case OTHER_JOB_ACTIVE:
5873 wanted_state = true;
5874 case OTHER_JOB_INACTIVE:
5875 if ((other_j = job_find(NULL, si->what))) {
5876 if ((bool)other_j->p == wanted_state) {
5877 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5878 return true;
5879 }
5880 }
5881 break;
5882 case PATH_EXISTS:
5883 wanted_state = true;
5884 case PATH_MISSING:
5885 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
5886 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
5887 return true;
5888 } else {
5889 if (wanted_state) { /* File is not there but we wish it was. */
5890 if (si->fd != -1 && !si->watching_parent) { /* Need to be watching the parent now. */
5891 (void)job_assumes(j, runtime_close(si->fd) == 0);
5892 si->fd = -1;
5893 semaphoreitem_watch(j, si);
5894 }
5895 } else { /* File is there but we wish it wasn't. */
5896 if (si->fd != -1 && si->watching_parent) { /* Need to watch the file now. */
5897 (void)job_assumes(j, runtime_close(si->fd) == 0);
5898 si->fd = -1;
5899 semaphoreitem_watch(j, si);
5900 }
5901 }
5902 }
5903 break;
5904 case PATH_CHANGES:
5905 break;
5906 case DIR_NOT_EMPTY:
5907 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
5908 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
5909 } else if (qdir_file_cnt > 0) {
5910 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
5911 return true;
5912 }
5913 break;
5914 }
5915 }
5916
5917 return false;
5918 }
5919
5920 const char *
5921 job_active(job_t j)
5922 {
5923 struct machservice *ms;
5924 if (j->p && j->shutdown_monitor) {
5925 return "Monitoring shutdown";
5926 }
5927 if (j->p) {
5928 return "PID is still valid";
5929 }
5930
5931 if (j->mgr->shutting_down && j->log_redirect_fd) {
5932 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5933 j->log_redirect_fd = 0;
5934 }
5935
5936 if (j->log_redirect_fd) {
5937 if (job_assumes(j, j->legacy_LS_job)) {
5938 return "Standard out/error is still valid";
5939 } else {
5940 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5941 j->log_redirect_fd = 0;
5942 }
5943 }
5944
5945 if (j->priv_port_has_senders) {
5946 return "Privileged Port still has outstanding senders";
5947 }
5948
5949 SLIST_FOREACH(ms, &j->machservices, sle) {
5950 if (ms->recv && machservice_active(ms)) {
5951 return "Mach service is still active";
5952 }
5953 }
5954
5955 return NULL;
5956 }
5957
5958 void
5959 machservice_watch(job_t j, struct machservice *ms)
5960 {
5961 if (ms->recv) {
5962 (void)job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
5963 }
5964 }
5965
5966 void
5967 machservice_ignore(job_t j, struct machservice *ms)
5968 {
5969 (void)job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
5970 }
5971
5972 void
5973 machservice_resetport(job_t j, struct machservice *ms)
5974 {
5975 LIST_REMOVE(ms, port_hash_sle);
5976 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5977 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
5978 ms->gen_num++;
5979 (void)job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
5980 (void)job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
5981 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5982 }
5983
5984 struct machservice *
5985 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
5986 {
5987 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
5988
5989 if (!job_assumes(j, ms != NULL)) {
5990 return NULL;
5991 }
5992
5993 strcpy((char *)ms->name, name);
5994 ms->job = j;
5995 ms->gen_num = 1;
5996 ms->per_pid = pid_local;
5997
5998 if (likely(*serviceport == MACH_PORT_NULL)) {
5999 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
6000 goto out_bad;
6001 }
6002
6003 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
6004 goto out_bad2;
6005 }
6006 *serviceport = ms->port;
6007 ms->recv = true;
6008 } else {
6009 ms->port = *serviceport;
6010 ms->isActive = true;
6011 }
6012
6013 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6014
6015 jobmgr_t where2put = j->mgr;
6016 /* XPC domains are separate from Mach bootstraps. */
6017 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6018 if (g_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6019 where2put = root_jobmgr;
6020 }
6021 }
6022
6023 /* Don't allow MachServices added by multiple-instance jobs to be looked up by others.
6024 * We could just do this with a simple bit, but then we'd have to uniquify the
6025 * names ourselves to avoid collisions. This is just easier.
6026 */
6027 if (!j->dedicated_instance) {
6028 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6029 }
6030 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6031
6032 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
6033
6034 return ms;
6035 out_bad2:
6036 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
6037 out_bad:
6038 free(ms);
6039 return NULL;
6040 }
6041
6042 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6043 struct machservice *
6044 machservice_new_alias(job_t j, struct machservice *orig)
6045 {
6046 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
6047 if (job_assumes(j, ms != NULL)) {
6048 strcpy((char *)ms->name, orig->name);
6049 ms->alias = orig;
6050 ms->job = j;
6051
6052 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6053 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6054 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
6055 }
6056
6057 return ms;
6058 }
6059 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
6060
6061 bootstrap_status_t
6062 machservice_status(struct machservice *ms)
6063 {
6064 ms = ms->alias ? ms->alias : ms;
6065 if (ms->isActive) {
6066 return BOOTSTRAP_STATUS_ACTIVE;
6067 } else if (ms->job->ondemand) {
6068 return BOOTSTRAP_STATUS_ON_DEMAND;
6069 } else {
6070 return BOOTSTRAP_STATUS_INACTIVE;
6071 }
6072 }
6073
6074 void
6075 job_setup_exception_port(job_t j, task_t target_task)
6076 {
6077 struct machservice *ms;
6078 thread_state_flavor_t f = 0;
6079 mach_port_t exc_port = the_exception_server;
6080
6081 if (unlikely(j->alt_exc_handler)) {
6082 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
6083 if (likely(ms)) {
6084 exc_port = machservice_port(ms);
6085 } else {
6086 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
6087 }
6088 } else if (unlikely(j->internal_exc_handler)) {
6089 exc_port = runtime_get_kernel_port();
6090 } else if (unlikely(!exc_port)) {
6091 return;
6092 }
6093
6094 #if defined (__ppc__) || defined(__ppc64__)
6095 f = PPC_THREAD_STATE64;
6096 #elif defined(__i386__) || defined(__x86_64__)
6097 f = x86_THREAD_STATE;
6098 #elif defined(__arm__)
6099 f = ARM_THREAD_STATE;
6100 #else
6101 #error "unknown architecture"
6102 #endif
6103
6104 if (likely(target_task)) {
6105 (void)job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
6106 } else if (pid1_magic && the_exception_server) {
6107 mach_port_t mhp = mach_host_self();
6108 (void)job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
6109 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
6110 }
6111 }
6112
6113 void
6114 job_set_exception_port(job_t j, mach_port_t port)
6115 {
6116 if (unlikely(!the_exception_server)) {
6117 the_exception_server = port;
6118 job_setup_exception_port(j, 0);
6119 } else {
6120 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6121 }
6122 }
6123
6124 void
6125 machservice_setup_options(launch_data_t obj, const char *key, void *context)
6126 {
6127 struct machservice *ms = context;
6128 mach_port_t mhp = mach_host_self();
6129 int which_port;
6130 bool b;
6131
6132 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
6133 return;
6134 }
6135
6136 switch (launch_data_get_type(obj)) {
6137 case LAUNCH_DATA_INTEGER:
6138 which_port = (int)launch_data_get_integer(obj); /* XXX we should bound check this... */
6139 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
6140 switch (which_port) {
6141 case TASK_KERNEL_PORT:
6142 case TASK_HOST_PORT:
6143 case TASK_NAME_PORT:
6144 case TASK_BOOTSTRAP_PORT:
6145 /* I find it a little odd that zero isn't reserved in the header.
6146 * Normally Mach is fairly good about this convention... */
6147 case 0:
6148 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6149 break;
6150 default:
6151 ms->special_port_num = which_port;
6152 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6153 break;
6154 }
6155 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
6156 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
6157 (void)job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
6158 } else {
6159 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6160 }
6161 }
6162 case LAUNCH_DATA_BOOL:
6163 b = launch_data_get_bool(obj);
6164 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6165 ms->debug_on_close = b;
6166 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
6167 ms->reset = b;
6168 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6169 ms->hide = b;
6170 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6171 job_set_exception_port(ms->job, ms->port);
6172 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6173 ms->kUNCServer = b;
6174 (void)job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
6175 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_PINGEVENTUPDATES) == 0) {
6176 ms->event_update_port = b;
6177 }
6178 break;
6179 case LAUNCH_DATA_STRING:
6180 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6181 const char *option = launch_data_get_string(obj);
6182 if (strcasecmp(option, "One") == 0) {
6183 ms->drain_one_on_crash = true;
6184 } else if (strcasecmp(option, "All") == 0) {
6185 ms->drain_all_on_crash = true;
6186 }
6187 }
6188 break;
6189 case LAUNCH_DATA_DICTIONARY:
6190 job_set_exception_port(ms->job, ms->port);
6191 break;
6192 default:
6193 break;
6194 }
6195
6196 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
6197 }
6198
6199 void
6200 machservice_setup(launch_data_t obj, const char *key, void *context)
6201 {
6202 job_t j = context;
6203 struct machservice *ms;
6204 mach_port_t p = MACH_PORT_NULL;
6205
6206 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6207 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6208 return;
6209 }
6210
6211 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6212 return;
6213 }
6214
6215 ms->isActive = false;
6216 ms->upfront = true;
6217
6218 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6219 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6220 }
6221 }
6222
6223 jobmgr_t
6224 jobmgr_do_garbage_collection(jobmgr_t jm)
6225 {
6226 jobmgr_t jmi = NULL, jmn = NULL;
6227 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6228 jobmgr_do_garbage_collection(jmi);
6229 }
6230
6231 if (!jm->shutting_down) {
6232 return jm;
6233 }
6234
6235 if (SLIST_EMPTY(&jm->submgrs)) {
6236 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6237 } else {
6238 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6239 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6240 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6241 }
6242 }
6243
6244 size_t actives = 0;
6245 job_t ji = NULL, jn = NULL;
6246 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6247 if (ji->anonymous) {
6248 continue;
6249 }
6250
6251 /* Let the shutdown monitor be up until the very end. */
6252 if (ji->shutdown_monitor) {
6253 continue;
6254 }
6255
6256 /* On our first pass through, open a transaction for all the jobs that
6257 * need to be dirty at shutdown. We'll close these transactions once the
6258 * jobs that do not need to be dirty at shutdown have all exited.
6259 */
6260 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6261 job_open_shutdown_transaction(ji);
6262 }
6263
6264 const char *active = job_active(ji);
6265 if (!active) {
6266 job_remove(ji);
6267 } else {
6268 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6269 job_stop(ji);
6270
6271 if (ji->p && !ji->dirty_at_shutdown) {
6272 /* We really only care if the job has not yet been reaped.
6273 * There's no reason to delay shutdown if a Mach port has not
6274 * yet been sent back to us, for example. While we're shutting
6275 * all the "normal" jobs down, do not count the
6276 * dirty-at-shutdown jobs toward the total of actives.
6277 *
6278 * Note that there's a potential race here where we may not get
6279 * a port back in time, so that when we hit jobmgr_remove(), we
6280 * end up removing the job and then our attempt to close the
6281 * Mach port will fail. But at that point, the failure won't
6282 * even make it to the syslog, so not a big deal.
6283 */
6284 actives++;
6285 }
6286
6287 if (ji->clean_kill) {
6288 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6289 } else {
6290 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6291 }
6292 }
6293 }
6294
6295 jm->shutdown_jobs_dirtied = true;
6296 if (actives == 0) {
6297 if (!jm->shutdown_jobs_cleaned) {
6298 LIST_FOREACH(ji, &jm->jobs, sle) {
6299 if (!ji->anonymous) {
6300 job_close_shutdown_transaction(ji);
6301 actives++;
6302 }
6303 }
6304
6305 jm->shutdown_jobs_cleaned = true;
6306 } else if (jm->monitor_shutdown && _s_shutdown_monitor) {
6307 /* The rest of shutdown has completed, so we can kill the shutdown
6308 * monitor now like it was any other job.
6309 */
6310 _s_shutdown_monitor->shutdown_monitor = false;
6311 actives = 1;
6312
6313 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6314 job_stop(_s_shutdown_monitor);
6315 _s_shutdown_monitor = NULL;
6316 }
6317 }
6318
6319 jobmgr_t r = jm;
6320 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6321 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6322 jobmgr_remove(jm);
6323 r = NULL;
6324 }
6325
6326 return r;
6327 }
6328
6329 void
6330 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6331 {
6332 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
6333 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
6334 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
6335 * for them to exit before moving on.
6336 *
6337 * See rdar://problem/6562592
6338 */
6339 size_t i = 0;
6340 for (i = 0; i < np; i++) {
6341 if (p[i] != 0) {
6342 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6343 (void)jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
6344 }
6345 }
6346 }
6347
6348 void
6349 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6350 {
6351 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6352 pid_t *pids = NULL;
6353 int i = 0, kp_cnt = 0;
6354
6355 if (likely(jm->parentmgr || !pid1_magic)) {
6356 return;
6357 }
6358
6359 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6360 return;
6361 }
6362
6363 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6364
6365 if (!jobmgr_assumes(jm, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
6366 goto out;
6367 }
6368
6369 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6370 for (i = 0; i < kp_cnt; i++) {
6371 struct proc_bsdshortinfo proc;
6372 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6373 if (errno != ESRCH) {
6374 jobmgr_assumes(jm, errno == 0);
6375 }
6376
6377 kp_skipped++;
6378 continue;
6379 }
6380
6381 pid_t p_i = pids[i];
6382 pid_t pp_i = proc.pbsi_ppid;
6383 pid_t pg_i = proc.pbsi_pgid;
6384 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6385 const char *n = proc.pbsi_comm;
6386
6387 if (unlikely(p_i == 0 || p_i == 1)) {
6388 kp_skipped++;
6389 continue;
6390 }
6391
6392 if (_s_shutdown_monitor && pp_i == _s_shutdown_monitor->p) {
6393 kp_skipped++;
6394 continue;
6395 }
6396
6397 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
6398 job_t j = jobmgr_find_by_pid(jm, p_i, false);
6399 if (!j || (j && j->anonymous)) {
6400 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6401
6402 int status = 0;
6403 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6404 if (jobmgr_assumes(jm, waitpid(p_i, &status, WNOHANG) == 0)) {
6405 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6406 }
6407 kp_skipped++;
6408 } else {
6409 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6410 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6411 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6412 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6413 * their userspace emissaries go away, before the call to reboot(2).
6414 */
6415 if (leader && leader->ignore_pg_at_shutdown) {
6416 kp_skipped++;
6417 } else {
6418 ps[i] = p_i;
6419 }
6420 }
6421 } else {
6422 kp_skipped++;
6423 }
6424 }
6425
6426 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6427 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6428 }
6429
6430 free(ps);
6431 out:
6432 free(pids);
6433 }
6434
6435 jobmgr_t
6436 jobmgr_parent(jobmgr_t jm)
6437 {
6438 return jm->parentmgr;
6439 }
6440
6441 void
6442 job_uncork_fork(job_t j)
6443 {
6444 pid_t c = j->p;
6445
6446 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6447 /* this unblocks the child and avoids a race
6448 * between the above fork() and the kevent_mod() */
6449 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6450 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
6451 j->fork_fd = 0;
6452 }
6453
6454 jobmgr_t
6455 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6456 {
6457 mach_msg_size_t mxmsgsz;
6458 job_t bootstrapper = NULL;
6459 jobmgr_t jmr;
6460
6461 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6462
6463 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6464 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6465 return NULL;
6466 }
6467
6468 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6469
6470 if (!jobmgr_assumes(jm, jmr != NULL)) {
6471 return NULL;
6472 }
6473
6474 if (jm == NULL) {
6475 root_jobmgr = jmr;
6476 }
6477
6478 jmr->kqjobmgr_callback = jobmgr_callback;
6479 strcpy(jmr->name_init, name ? name : "Under construction");
6480
6481 jmr->req_port = requestorport;
6482
6483 if ((jmr->parentmgr = jm)) {
6484 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6485 }
6486
6487 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
6488 goto out_bad;
6489 }
6490
6491 if (transfer_port != MACH_PORT_NULL) {
6492 (void)jobmgr_assumes(jmr, jm != NULL);
6493 jmr->jm_port = transfer_port;
6494 } else if (!jm && !pid1_magic) {
6495 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6496 name_t service_buf;
6497
6498 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6499
6500 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
6501 goto out_bad;
6502 }
6503
6504 if (trusted_fd) {
6505 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6506
6507 if ((dfd = dup(lfd)) >= 0) {
6508 (void)jobmgr_assumes(jmr, runtime_close(dfd) != -1);
6509 (void)jobmgr_assumes(jmr, runtime_close(lfd) != -1);
6510 }
6511
6512 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6513 }
6514
6515 /* cut off the Libc cache, we don't want to deadlock against ourself */
6516 inherited_bootstrap_port = bootstrap_port;
6517 bootstrap_port = MACH_PORT_NULL;
6518 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
6519
6520 /* We set this explicitly as we start each child */
6521 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
6522 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
6523 goto out_bad;
6524 }
6525
6526 if (!name) {
6527 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6528 }
6529
6530 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
6531 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
6532 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
6533 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
6534 }
6535
6536 /* Total hacks. But the MIG server loop is too generic, and the more dynamic
6537 * parts of it haven't been tested, or if they have, it was a very long time
6538 * ago.
6539 */
6540 if (xpc_events_xpc_events_subsystem.maxsize > mxmsgsz) {
6541 mxmsgsz = xpc_events_xpc_events_subsystem.maxsize;
6542 }
6543 if (xpc_domain_xpc_domain_subsystem.maxsize > mxmsgsz) {
6544 mxmsgsz = xpc_domain_xpc_domain_subsystem.maxsize;
6545 }
6546
6547 if (!jm) {
6548 (void)jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6549 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6550 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6551 (void)jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
6552 }
6553
6554 if (name && !skip_init) {
6555 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6556 }
6557
6558 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6559 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
6560 goto out_bad;
6561 }
6562 }
6563
6564 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6565
6566 if (bootstrapper) {
6567 bootstrapper->asport = asport;
6568
6569 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6570 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6571 } else {
6572 jmr->req_asport = asport;
6573 }
6574
6575 if (asport != MACH_PORT_NULL) {
6576 (void)jobmgr_assumes(jmr, launchd_mport_copy_send(asport) == KERN_SUCCESS);
6577 }
6578
6579 if (jmr->parentmgr) {
6580 runtime_add_weak_ref();
6581 }
6582
6583 return jmr;
6584
6585 out_bad:
6586 if (jmr) {
6587 jobmgr_remove(jmr);
6588 if (jm == NULL) {
6589 root_jobmgr = NULL;
6590 }
6591 }
6592 return NULL;
6593 }
6594
6595 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6596 jobmgr_t
6597 jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6598 {
6599 jobmgr_t new = NULL;
6600
6601 /* These job managers are basically singletons, so we use the root Mach
6602 * bootstrap port as their requestor ports so they'll never go away.
6603 */
6604 mach_port_t req_port = root_jobmgr->jm_port;
6605 if (jobmgr_assumes(jm, launchd_mport_make_send(req_port) == KERN_SUCCESS)) {
6606 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6607 if (new) {
6608 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6609 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6610 new->xpc_singleton = true;
6611 }
6612 }
6613
6614 return new;
6615 }
6616
6617 jobmgr_t
6618 jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6619 {
6620 jobmgr_t jmi = NULL;
6621 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6622 if (jmi->req_euid == uid) {
6623 return jmi;
6624 }
6625 }
6626
6627 name_t name;
6628 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6629 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6630 if (jobmgr_assumes(jm, jmi != NULL)) {
6631 /* We need to create a per-user launchd for this UID if there isn't one
6632 * already so we can grab the bootstrap port.
6633 */
6634 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6635 if (jobmgr_assumes(jmi, puj != NULL)) {
6636 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(puj->asport) == KERN_SUCCESS);
6637 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(jmi->req_bsport) == KERN_SUCCESS);
6638 jmi->shortdesc = "per-user";
6639 jmi->req_asport = puj->asport;
6640 jmi->req_asid = puj->asid;
6641 jmi->req_euid = uid;
6642 jmi->req_egid = -1;
6643
6644 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
6645 } else {
6646 jobmgr_remove(jmi);
6647 }
6648 }
6649
6650 return jmi;
6651 }
6652
6653 jobmgr_t
6654 jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
6655 {
6656 jobmgr_t jmi = NULL;
6657 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
6658 if (jmi->req_asid == asid) {
6659 return jmi;
6660 }
6661 }
6662
6663 name_t name;
6664 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
6665 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6666 if (jobmgr_assumes(jm, jmi != NULL)) {
6667 (void)jobmgr_assumes(jmi, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
6668 jmi->shortdesc = "per-session";
6669 jmi->req_bsport = root_jobmgr->jm_port;
6670 (void)jobmgr_assumes(jmi, audit_session_port(asid, &jmi->req_asport) == 0);
6671 jmi->req_asid = asid;
6672 jmi->req_euid = -1;
6673 jmi->req_egid = -1;
6674
6675 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
6676 } else {
6677 jobmgr_remove(jmi);
6678 }
6679
6680 return jmi;
6681 }
6682 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
6683
6684 job_t
6685 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
6686 {
6687 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
6688 char thelabel[1000];
6689 job_t bootstrapper;
6690
6691 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
6692 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
6693
6694 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
6695 bootstrapper->is_bootstrapper = true;
6696 char buf[100];
6697
6698 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
6699 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
6700 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false, false);
6701 bootstrapper->weird_bootstrap = true;
6702 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
6703 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
6704 bootstrapper->is_bootstrapper = true;
6705 if (jobmgr_assumes(jm, pid1_magic)) {
6706 /* Have our system bootstrapper print out to the console. */
6707 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
6708 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
6709
6710 if (g_console) {
6711 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
6712 }
6713 }
6714 }
6715
6716 jm->session_initialized = true;
6717 return bootstrapper;
6718 }
6719
6720 jobmgr_t
6721 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
6722 {
6723 struct machservice *ms, *next_ms;
6724 jobmgr_t jmi, jmn;
6725
6726 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6727 * words, when some program hands us a second or subsequent send right
6728 * to a port we already have open, the Mach kernel gives us the same
6729 * port number back and increments an reference count associated with
6730 * the port. This forces us, when discovering that a receive right at
6731 * the other end has been deleted, to wander all of our objects to see
6732 * what weird places clients might have handed us the same send right
6733 * to use.
6734 */
6735
6736 if (jm == root_jobmgr) {
6737 if (port == inherited_bootstrap_port) {
6738 (void)jobmgr_assumes(jm, launchd_mport_deallocate(port) == KERN_SUCCESS);
6739 inherited_bootstrap_port = MACH_PORT_NULL;
6740
6741 return jobmgr_shutdown(jm);
6742 }
6743
6744 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
6745 if (ms->port == port && !ms->recv) {
6746 machservice_delete(ms->job, ms, true);
6747 }
6748 }
6749 }
6750
6751 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6752 jobmgr_delete_anything_with_port(jmi, port);
6753 }
6754
6755 if (jm->req_port == port) {
6756 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
6757 return jobmgr_shutdown(jm);
6758 }
6759
6760 return jm;
6761 }
6762
6763 struct machservice *
6764 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
6765 {
6766 struct machservice *ms;
6767 job_t target_j;
6768
6769 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
6770
6771 if (target_pid) {
6772 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6773 * bootstrap in other bootstraps.
6774 */
6775
6776 /* Start in the given bootstrap. */
6777 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
6778 /* If we fail, do a deep traversal. */
6779 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6780 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6781 return NULL;
6782 }
6783 }
6784
6785 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6786 if (ms->per_pid && strcmp(name, ms->name) == 0) {
6787 return ms;
6788 }
6789 }
6790
6791 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
6792 return NULL;
6793 }
6794
6795 jobmgr_t where2look = jm;
6796 /* XPC domains are separate from Mach bootstraps. */
6797 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6798 if (g_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6799 where2look = root_jobmgr;
6800 }
6801 }
6802
6803 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
6804 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6805 return ms;
6806 }
6807 }
6808
6809 if (jm->parentmgr == NULL || !check_parent) {
6810 return NULL;
6811 }
6812
6813 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
6814 }
6815
6816 mach_port_t
6817 machservice_port(struct machservice *ms)
6818 {
6819 return ms->port;
6820 }
6821
6822 job_t
6823 machservice_job(struct machservice *ms)
6824 {
6825 return ms->job;
6826 }
6827
6828 bool
6829 machservice_hidden(struct machservice *ms)
6830 {
6831 return ms->hide;
6832 }
6833
6834 bool
6835 machservice_active(struct machservice *ms)
6836 {
6837 return ms->isActive;
6838 }
6839
6840 const char *
6841 machservice_name(struct machservice *ms)
6842 {
6843 return ms->name;
6844 }
6845
6846 void
6847 machservice_drain_port(struct machservice *ms)
6848 {
6849 bool drain_one = ms->drain_one_on_crash;
6850 bool drain_all = ms->drain_all_on_crash;
6851
6852 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
6853 return;
6854 }
6855
6856 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6857
6858 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6859 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6860 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6861 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6862
6863 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6864
6865 do {
6866 /* This should be a direct check on the Mach service to see if it's an exception-handling
6867 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6868 * Mach services. But for now, it should be okay.
6869 */
6870 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
6871 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6872 } else {
6873 mach_msg_options_t options = MACH_RCV_MSG |
6874 MACH_RCV_TIMEOUT ;
6875
6876 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
6877 switch (mr) {
6878 case MACH_MSG_SUCCESS:
6879 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6880 break;
6881 case MACH_RCV_TIMED_OUT:
6882 break;
6883 case MACH_RCV_TOO_LARGE:
6884 runtime_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6885 break;
6886 default:
6887 break;
6888 }
6889 }
6890 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
6891 }
6892
6893 void
6894 machservice_delete(job_t j, struct machservice *ms, bool port_died)
6895 {
6896 if (ms->alias) {
6897 /* HACK: Egregious code duplication. But dealing with aliases is a
6898 * pretty simple affair since they can't and shouldn't have any complex
6899 * behaviors associated with them.
6900 */
6901 LIST_REMOVE(ms, name_hash_sle);
6902 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6903 free(ms);
6904 return;
6905 }
6906
6907 if (unlikely(ms->debug_on_close)) {
6908 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
6909 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
6910 }
6911
6912 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6913 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
6914 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
6915 }
6916
6917 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
6918
6919 if (unlikely(ms->port == the_exception_server)) {
6920 the_exception_server = 0;
6921 }
6922
6923 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
6924
6925 if (ms->special_port_num) {
6926 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6927 }
6928 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6929
6930 if (!(j->dedicated_instance || ms->event_channel)) {
6931 LIST_REMOVE(ms, name_hash_sle);
6932 }
6933 LIST_REMOVE(ms, port_hash_sle);
6934
6935 free(ms);
6936 }
6937
6938 void
6939 machservice_request_notifications(struct machservice *ms)
6940 {
6941 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6942
6943 ms->isActive = true;
6944
6945 if (ms->recv) {
6946 which = MACH_NOTIFY_PORT_DESTROYED;
6947 job_checkin(ms->job);
6948 }
6949
6950 (void)job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
6951 }
6952
6953 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6954 #define END_OF(x) (&(x)[NELEM(x)])
6955
6956 char **
6957 mach_cmd2argv(const char *string)
6958 {
6959 char *argv[100], args[1000];
6960 const char *cp;
6961 char *argp = args, term, **argv_ret, *co;
6962 unsigned int nargs = 0, i;
6963
6964 for (cp = string; *cp;) {
6965 while (isspace(*cp))
6966 cp++;
6967 term = (*cp == '"') ? *cp++ : '\0';
6968 if (nargs < NELEM(argv)) {
6969 argv[nargs++] = argp;
6970 }
6971 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
6972 if (*cp == '\\') {
6973 cp++;
6974 }
6975 *argp++ = *cp;
6976 if (*cp) {
6977 cp++;
6978 }
6979 }
6980 *argp++ = '\0';
6981 }
6982 argv[nargs] = NULL;
6983
6984 if (nargs == 0) {
6985 return NULL;
6986 }
6987
6988 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6989
6990 if (!launchd_assumes(argv_ret != NULL)) {
6991 return NULL;
6992 }
6993
6994 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6995
6996 for (i = 0; i < nargs; i++) {
6997 strcpy(co, argv[i]);
6998 argv_ret[i] = co;
6999 co += strlen(argv[i]) + 1;
7000 }
7001 argv_ret[i] = NULL;
7002
7003 return argv_ret;
7004 }
7005
7006 void
7007 job_checkin(job_t j)
7008 {
7009 j->checkedin = true;
7010 }
7011
7012 bool job_is_god(job_t j)
7013 {
7014 return j->embedded_special_privileges;
7015 }
7016
7017 bool
7018 job_ack_port_destruction(mach_port_t p)
7019 {
7020 struct machservice *ms;
7021 job_t j;
7022
7023 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
7024 if (ms->recv && (ms->port == p)) {
7025 break;
7026 }
7027 }
7028
7029 if (!jobmgr_assumes(root_jobmgr, ms != NULL)) {
7030 return false;
7031 }
7032
7033 j = ms->job;
7034
7035 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
7036
7037 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
7038 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
7039 * receive rights have been returned.
7040 *
7041 * So when we get receive rights back, check to see if the job has been reaped yet. If
7042 * not, then we add this service to a list of services to be drained on crash if it's
7043 * requested that behavior. So, for a job with N receive rights all requesting that they
7044 * be drained on crash, we can safely handle the following sequence of events.
7045 *
7046 * ReceiveRight0Returned
7047 * ReceiveRight1Returned
7048 * ReceiveRight2Returned
7049 * NOTE_EXIT (reap, get exit status)
7050 * ReceiveRight3Returned
7051 * .
7052 * .
7053 * .
7054 * ReceiveRight(N - 1)Returned
7055 */
7056
7057 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
7058 if (j->crashed && j->reaped) {
7059 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
7060 machservice_drain_port(ms);
7061 } else if (!(j->crashed || j->reaped)) {
7062 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
7063 }
7064 }
7065
7066 /* If we get this notification after the job has been reaped, then we want to ping
7067 * the event port to keep things going.
7068 */
7069 if (ms->event_update_port && !j->p && job_assumes(j, j->event_monitor)) {
7070 if (_s_event_update_port == MACH_PORT_NULL) {
7071 (void)job_assumes(j, launchd_mport_make_send_once(ms->port, &_s_event_update_port) == KERN_SUCCESS);
7072 }
7073 eventsystem_ping();
7074 }
7075
7076 ms->isActive = false;
7077 if (ms->delete_on_destruction) {
7078 machservice_delete(j, ms, false);
7079 } else if (ms->reset) {
7080 machservice_resetport(j, ms);
7081 }
7082
7083 job_dispatch(j, false);
7084
7085 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
7086
7087 return true;
7088 }
7089
7090 void
7091 job_ack_no_senders(job_t j)
7092 {
7093 j->priv_port_has_senders = false;
7094
7095 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
7096 j->j_port = 0;
7097
7098 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
7099
7100 job_dispatch(j, false);
7101 }
7102
7103 bool
7104 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
7105 {
7106 struct semaphoreitem *si;
7107 size_t alloc_sz = sizeof(struct semaphoreitem);
7108
7109 if (what) {
7110 alloc_sz += strlen(what) + 1;
7111 }
7112
7113 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
7114 return false;
7115 }
7116
7117 si->fd = -1;
7118 si->why = why;
7119
7120 if (what) {
7121 strcpy(si->what_init, what);
7122 }
7123
7124 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
7125
7126 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
7127 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7128 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7129 j->nosy = true;
7130 }
7131
7132 semaphoreitem_runtime_mod_ref(si, true);
7133
7134 return true;
7135 }
7136
7137 void
7138 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7139 {
7140 /*
7141 * External events need to be tracked.
7142 * Internal events do NOT need to be tracked.
7143 */
7144
7145 switch (si->why) {
7146 case SUCCESSFUL_EXIT:
7147 case FAILED_EXIT:
7148 case OTHER_JOB_ENABLED:
7149 case OTHER_JOB_DISABLED:
7150 case OTHER_JOB_ACTIVE:
7151 case OTHER_JOB_INACTIVE:
7152 return;
7153 default:
7154 break;
7155 }
7156
7157 if (add) {
7158 runtime_add_weak_ref();
7159 } else {
7160 runtime_del_weak_ref();
7161 }
7162 }
7163
7164 void
7165 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
7166 {
7167 semaphoreitem_runtime_mod_ref(si, false);
7168
7169 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
7170
7171 if (si->fd != -1) {
7172 (void)job_assumes(j, runtime_close(si->fd) != -1);
7173 }
7174
7175 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
7176 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
7177 j->nosy = false;
7178 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7179 }
7180
7181 free(si);
7182 }
7183
7184 void
7185 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
7186 {
7187 struct semaphoreitem_dict_iter_context *sdic = context;
7188 semaphore_reason_t why;
7189
7190 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7191
7192 semaphoreitem_new(sdic->j, why, key);
7193 }
7194
7195 void
7196 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7197 {
7198 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7199 job_t j = context;
7200 semaphore_reason_t why;
7201
7202 switch (launch_data_get_type(obj)) {
7203 case LAUNCH_DATA_BOOL:
7204 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7205 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7206 semaphoreitem_new(j, why, NULL);
7207 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7208 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7209 semaphoreitem_new(j, why, NULL);
7210 j->start_pending = true;
7211 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7212 j->needs_kickoff = launch_data_get_bool(obj);
7213 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7214 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7215 semaphoreitem_new(j, why, NULL);
7216 j->start_pending = true;
7217 } else {
7218 (void)job_assumes(j, false);
7219 }
7220 break;
7221 case LAUNCH_DATA_DICTIONARY:
7222 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
7223 sdic.why_true = PATH_EXISTS;
7224 sdic.why_false = PATH_MISSING;
7225 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7226 sdic.why_true = OTHER_JOB_ACTIVE;
7227 sdic.why_false = OTHER_JOB_INACTIVE;
7228 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7229 sdic.why_true = OTHER_JOB_ENABLED;
7230 sdic.why_false = OTHER_JOB_DISABLED;
7231 } else {
7232 (void)job_assumes(j, false);
7233 break;
7234 }
7235
7236 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7237 break;
7238 default:
7239 (void)job_assumes(j, false);
7240 break;
7241 }
7242 }
7243
7244 bool
7245 externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event)
7246 {
7247 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7248 if (job_assumes(j, ee != NULL)) {
7249 ee->event = launch_data_copy(event);
7250 if (job_assumes(j, ee->event != NULL)) {
7251 strcpy(ee->name, evname);
7252 ee->job = j;
7253 ee->id = sys->curid;
7254 ee->sys = sys;
7255 ee->state = false;
7256 ee->wanted_state = true;
7257 sys->curid++;
7258
7259 LIST_INSERT_HEAD(&j->events, ee, job_le);
7260 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7261
7262 job_log(j, LOG_DEBUG, "New event: %s:%s", sys->name, evname);
7263 } else {
7264 free(ee);
7265 ee = NULL;
7266 }
7267 }
7268
7269 eventsystem_ping();
7270 return ee;
7271 }
7272
7273 void
7274 externalevent_delete(struct externalevent *ee)
7275 {
7276 launch_data_free(ee->event);
7277 LIST_REMOVE(ee, job_le);
7278 LIST_REMOVE(ee, sys_le);
7279
7280 free(ee);
7281
7282 eventsystem_ping();
7283 }
7284
7285 void
7286 externalevent_setup(launch_data_t obj, const char *key, void *context)
7287 {
7288 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7289 (void)job_assumes(ctx->j, externalevent_new(ctx->j, ctx->sys, (char *)key, obj));
7290 }
7291
7292 struct externalevent *
7293 externalevent_find(const char *sysname, uint64_t id)
7294 {
7295 struct externalevent *ei = NULL;
7296
7297 struct eventsystem *es = eventsystem_find(sysname);
7298 if (launchd_assumes(es != NULL)) {
7299 LIST_FOREACH(ei, &es->events, sys_le) {
7300 if (ei->id == id) {
7301 break;
7302 }
7303 }
7304 }
7305
7306 return ei;
7307 }
7308
7309 struct eventsystem *
7310 eventsystem_new(const char *name)
7311 {
7312 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7313 if (launchd_assumes(es != NULL)) {
7314 strcpy(es->name, name);
7315 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7316 }
7317
7318 return es;
7319 }
7320
7321 void
7322 eventsystem_delete(struct eventsystem *es)
7323 {
7324 struct externalevent *ei = NULL;
7325 while ((ei = LIST_FIRST(&es->events))) {
7326 externalevent_delete(ei);
7327 }
7328
7329 LIST_REMOVE(es, global_le);
7330
7331 free(es);
7332 }
7333
7334 void
7335 eventsystem_setup(launch_data_t obj, const char *key, void *context)
7336 {
7337 job_t j = (job_t)context;
7338 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7339 return;
7340 }
7341
7342 struct eventsystem *sys = eventsystem_find(key);
7343 if (unlikely(sys == NULL)) {
7344 sys = eventsystem_new(key);
7345 job_log(j, LOG_DEBUG, "New event system: %s", key);
7346 }
7347
7348 if (job_assumes(j, sys != NULL)) {
7349 struct externalevent_iter_ctx ctx = {
7350 .j = j,
7351 .sys = sys,
7352 };
7353 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7354 sys->has_updates = true;
7355 }
7356 }
7357
7358 struct eventsystem *
7359 eventsystem_find(const char *name)
7360 {
7361 struct eventsystem *esi = NULL;
7362 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7363 if (strcmp(name, esi->name) == 0) {
7364 break;
7365 }
7366 }
7367
7368 return esi;
7369 }
7370
7371 void
7372 eventsystem_ping(void)
7373 {
7374 /* We don't wrap this in an assumes() macro because we could potentially
7375 * call this function many times before the helper job gets back to us
7376 * and gives us another send-once right. So if it's MACH_PORT_NULL, that
7377 * means that we've sent a ping, but the helper hasn't yet checked in to
7378 * get the new set of notifications.
7379 */
7380 if (_s_event_update_port != MACH_PORT_NULL) {
7381 kern_return_t kr = helper_downcall_ping(_s_event_update_port);
7382 if (kr != KERN_SUCCESS) {
7383 runtime_syslog(LOG_NOTICE, "helper_downcall_ping(): kr = 0x%x", kr);
7384 }
7385 _s_event_update_port = MACH_PORT_NULL;
7386 }
7387 }
7388
7389 void
7390 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7391 {
7392 jobmgr_t jmi, jmn;
7393 job_t ji, jn;
7394
7395
7396 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7397 jobmgr_dispatch_all_semaphores(jmi);
7398 }
7399
7400 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7401 if (!SLIST_EMPTY(&ji->semaphores)) {
7402 job_dispatch(ji, false);
7403 }
7404 }
7405 }
7406
7407 time_t
7408 cronemu(int mon, int mday, int hour, int min)
7409 {
7410 struct tm workingtm;
7411 time_t now;
7412
7413 now = time(NULL);
7414 workingtm = *localtime(&now);
7415
7416 workingtm.tm_isdst = -1;
7417 workingtm.tm_sec = 0;
7418 workingtm.tm_min++;
7419
7420 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7421 workingtm.tm_year++;
7422 workingtm.tm_mon = 0;
7423 workingtm.tm_mday = 1;
7424 workingtm.tm_hour = 0;
7425 workingtm.tm_min = 0;
7426 mktime(&workingtm);
7427 }
7428
7429 return mktime(&workingtm);
7430 }
7431
7432 time_t
7433 cronemu_wday(int wday, int hour, int min)
7434 {
7435 struct tm workingtm;
7436 time_t now;
7437
7438 now = time(NULL);
7439 workingtm = *localtime(&now);
7440
7441 workingtm.tm_isdst = -1;
7442 workingtm.tm_sec = 0;
7443 workingtm.tm_min++;
7444
7445 if (wday == 7) {
7446 wday = 0;
7447 }
7448
7449 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7450 workingtm.tm_mday++;
7451 workingtm.tm_hour = 0;
7452 workingtm.tm_min = 0;
7453 mktime(&workingtm);
7454 }
7455
7456 return mktime(&workingtm);
7457 }
7458
7459 bool
7460 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7461 {
7462 if (mon == -1) {
7463 struct tm workingtm = *wtm;
7464 int carrytest;
7465
7466 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7467 workingtm.tm_mon++;
7468 workingtm.tm_mday = 1;
7469 workingtm.tm_hour = 0;
7470 workingtm.tm_min = 0;
7471 carrytest = workingtm.tm_mon;
7472 mktime(&workingtm);
7473 if (carrytest != workingtm.tm_mon) {
7474 return false;
7475 }
7476 }
7477 *wtm = workingtm;
7478 return true;
7479 }
7480
7481 if (mon < wtm->tm_mon) {
7482 return false;
7483 }
7484
7485 if (mon > wtm->tm_mon) {
7486 wtm->tm_mon = mon;
7487 wtm->tm_mday = 1;
7488 wtm->tm_hour = 0;
7489 wtm->tm_min = 0;
7490 }
7491
7492 return cronemu_mday(wtm, mday, hour, min);
7493 }
7494
7495 bool
7496 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7497 {
7498 if (mday == -1) {
7499 struct tm workingtm = *wtm;
7500 int carrytest;
7501
7502 while (!cronemu_hour(&workingtm, hour, min)) {
7503 workingtm.tm_mday++;
7504 workingtm.tm_hour = 0;
7505 workingtm.tm_min = 0;
7506 carrytest = workingtm.tm_mday;
7507 mktime(&workingtm);
7508 if (carrytest != workingtm.tm_mday) {
7509 return false;
7510 }
7511 }
7512 *wtm = workingtm;
7513 return true;
7514 }
7515
7516 if (mday < wtm->tm_mday) {
7517 return false;
7518 }
7519
7520 if (mday > wtm->tm_mday) {
7521 wtm->tm_mday = mday;
7522 wtm->tm_hour = 0;
7523 wtm->tm_min = 0;
7524 }
7525
7526 return cronemu_hour(wtm, hour, min);
7527 }
7528
7529 bool
7530 cronemu_hour(struct tm *wtm, int hour, int min)
7531 {
7532 if (hour == -1) {
7533 struct tm workingtm = *wtm;
7534 int carrytest;
7535
7536 while (!cronemu_min(&workingtm, min)) {
7537 workingtm.tm_hour++;
7538 workingtm.tm_min = 0;
7539 carrytest = workingtm.tm_hour;
7540 mktime(&workingtm);
7541 if (carrytest != workingtm.tm_hour) {
7542 return false;
7543 }
7544 }
7545 *wtm = workingtm;
7546 return true;
7547 }
7548
7549 if (hour < wtm->tm_hour) {
7550 return false;
7551 }
7552
7553 if (hour > wtm->tm_hour) {
7554 wtm->tm_hour = hour;
7555 wtm->tm_min = 0;
7556 }
7557
7558 return cronemu_min(wtm, min);
7559 }
7560
7561 bool
7562 cronemu_min(struct tm *wtm, int min)
7563 {
7564 if (min == -1) {
7565 return true;
7566 }
7567
7568 if (min < wtm->tm_min) {
7569 return false;
7570 }
7571
7572 if (min > wtm->tm_min) {
7573 wtm->tm_min = min;
7574 }
7575
7576 return true;
7577 }
7578
7579 kern_return_t
7580 job_mig_setup_shmem(job_t j, mach_port_t *shmem_port)
7581 {
7582 memory_object_size_t size_of_page, size_of_page_orig;
7583 vm_address_t vm_addr;
7584 kern_return_t kr;
7585
7586 if (!launchd_assumes(j != NULL)) {
7587 return BOOTSTRAP_NO_MEMORY;
7588 }
7589
7590 if (unlikely(j->anonymous)) {
7591 job_log(j, LOG_DEBUG, "Anonymous job tried to setup shared memory");
7592 return BOOTSTRAP_NOT_PRIVILEGED;
7593 }
7594
7595 if (unlikely(j->shmem)) {
7596 job_log(j, LOG_ERR, "Tried to setup shared memory more than once");
7597 return BOOTSTRAP_NOT_PRIVILEGED;
7598 }
7599
7600 size_of_page_orig = size_of_page = getpagesize();
7601
7602 kr = vm_allocate(mach_task_self(), &vm_addr, size_of_page, true);
7603
7604 if (!job_assumes(j, kr == 0)) {
7605 return kr;
7606 }
7607
7608 j->shmem = (typeof(j->shmem))vm_addr;
7609 j->shmem->vp_shmem_standby_timeout = j->timeout;
7610
7611 kr = mach_make_memory_entry_64(mach_task_self(), &size_of_page,
7612 (memory_object_offset_t)vm_addr, VM_PROT_READ|VM_PROT_WRITE, shmem_port, 0);
7613
7614 if (job_assumes(j, kr == 0)) {
7615 (void)job_assumes(j, size_of_page == size_of_page_orig);
7616 }
7617
7618 /* no need to inherit this in child processes */
7619 (void)job_assumes(j, vm_inherit(mach_task_self(), (vm_address_t)j->shmem, size_of_page_orig, VM_INHERIT_NONE) == 0);
7620
7621 return kr;
7622 }
7623
7624 kern_return_t
7625 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7626 {
7627 struct ldcred *ldc = runtime_get_caller_creds();
7628 job_t js;
7629
7630 if (!launchd_assumes(j != NULL)) {
7631 return BOOTSTRAP_NO_MEMORY;
7632 }
7633
7634 if (unlikely(j->deny_job_creation)) {
7635 return BOOTSTRAP_NOT_PRIVILEGED;
7636 }
7637
7638 #if HAVE_SANDBOX
7639 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7640 if (unlikely(argv == NULL)) {
7641 return BOOTSTRAP_NO_MEMORY;
7642 }
7643 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7644 free(argv);
7645 return BOOTSTRAP_NOT_PRIVILEGED;
7646 }
7647 free(argv);
7648 #endif
7649
7650 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7651
7652 if (pid1_magic) {
7653 if (ldc->euid || ldc->uid) {
7654 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
7655 return VPROC_ERR_TRY_PER_USER;
7656 }
7657 } else {
7658 if (unlikely(server_uid != getuid())) {
7659 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7660 server_cmd, getuid(), server_uid);
7661 }
7662 server_uid = 0; /* zero means "do nothing" */
7663 }
7664
7665 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
7666
7667 if (unlikely(js == NULL)) {
7668 return BOOTSTRAP_NO_MEMORY;
7669 }
7670
7671 *server_portp = js->j_port;
7672 return BOOTSTRAP_SUCCESS;
7673 }
7674
7675 kern_return_t
7676 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
7677 {
7678 struct ldcred *ldc = runtime_get_caller_creds();
7679 job_t otherj;
7680
7681 if (!launchd_assumes(j != NULL)) {
7682 return BOOTSTRAP_NO_MEMORY;
7683 }
7684
7685 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
7686 #if TARGET_OS_EMBEDDED
7687 if (!j->embedded_special_privileges) {
7688 return BOOTSTRAP_NOT_PRIVILEGED;
7689 }
7690 #else
7691 return BOOTSTRAP_NOT_PRIVILEGED;
7692 #endif
7693 }
7694
7695 #if HAVE_SANDBOX
7696 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7697 return BOOTSTRAP_NOT_PRIVILEGED;
7698 }
7699 #endif
7700
7701 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
7702 return BOOTSTRAP_UNKNOWN_SERVICE;
7703 }
7704
7705 #if TARGET_OS_EMBEDDED
7706 if (j->embedded_special_privileges && strcmp(j->username, otherj->username) != 0) {
7707 return BOOTSTRAP_NOT_PRIVILEGED;
7708 }
7709 #endif
7710
7711 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
7712 bool do_block = otherj->p;
7713
7714 if (otherj->anonymous) {
7715 return BOOTSTRAP_NOT_PRIVILEGED;
7716 }
7717
7718 job_remove(otherj);
7719
7720 if (do_block) {
7721 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
7722 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
7723 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
7724 return MIG_NO_REPLY;
7725 } else {
7726 return 0;
7727 }
7728 } else if (sig == VPROC_MAGIC_TRYKILL_SIGNAL) {
7729 if (!j->kill_via_shmem) {
7730 return BOOTSTRAP_NOT_PRIVILEGED;
7731 }
7732
7733 if (!j->shmem) {
7734 j->sent_kill_via_shmem = true;
7735 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
7736 return 0;
7737 }
7738
7739 #if !TARGET_OS_EMBEDDED
7740 if (__sync_bool_compare_and_swap(&j->shmem->vp_shmem_transaction_cnt, 0, -1)) {
7741 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
7742 j->sent_kill_via_shmem = true;
7743 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
7744 return 0;
7745 }
7746 #endif
7747 return BOOTSTRAP_NOT_PRIVILEGED;
7748 } else if (otherj->p) {
7749 (void)job_assumes(j, runtime_kill(otherj->p, sig) != -1);
7750 }
7751
7752 return 0;
7753 }
7754
7755 kern_return_t
7756 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
7757 {
7758 struct ldcred *ldc = runtime_get_caller_creds();
7759
7760 if (!launchd_assumes(j != NULL)) {
7761 return BOOTSTRAP_NO_MEMORY;
7762 }
7763
7764 if (!job_assumes(j, j->per_user)) {
7765 return BOOTSTRAP_NOT_PRIVILEGED;
7766 }
7767
7768 return runtime_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
7769 }
7770
7771 kern_return_t
7772 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7773 {
7774 struct ldcred *ldc = runtime_get_caller_creds();
7775
7776 if (!launchd_assumes(j != NULL)) {
7777 return BOOTSTRAP_NO_MEMORY;
7778 }
7779
7780 if (unlikely(ldc->euid)) {
7781 return BOOTSTRAP_NOT_PRIVILEGED;
7782 }
7783
7784 return runtime_log_drain(srp, outval, outvalCnt);
7785 }
7786
7787 kern_return_t
7788 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7789 {
7790 const char *action;
7791 launch_data_t input_obj = NULL, output_obj = NULL;
7792 size_t data_offset = 0;
7793 size_t packed_size;
7794 struct ldcred *ldc = runtime_get_caller_creds();
7795
7796 if (!launchd_assumes(j != NULL)) {
7797 return BOOTSTRAP_NO_MEMORY;
7798 }
7799 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7800 return BOOTSTRAP_NOT_PRIVILEGED;
7801 }
7802 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7803 return 1;
7804 }
7805
7806 if (inkey && outkey) {
7807 action = "Swapping";
7808 } else if (inkey) {
7809 action = "Setting";
7810 } else {
7811 action = "Getting";
7812 }
7813
7814 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7815
7816 *outvalCnt = 20 * 1024 * 1024;
7817 mig_allocate(outval, *outvalCnt);
7818 if (!job_assumes(j, *outval != 0)) {
7819 return 1;
7820 }
7821
7822 /* Note to future maintainers: launch_data_unpack() does NOT return a heap object. The data
7823 * is decoded in-place. So do not call launch_data_free() on input_obj.
7824 */
7825 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
7826 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
7827 goto out_bad;
7828 }
7829
7830 switch (outkey) {
7831 case VPROC_GSK_ENVIRONMENT:
7832 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
7833 goto out_bad;
7834 }
7835 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
7836 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7837 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
7838 goto out_bad;
7839 }
7840 launch_data_free(output_obj);
7841 break;
7842 case VPROC_GSK_ALLJOBS:
7843 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
7844 goto out_bad;
7845 }
7846 ipc_revoke_fds(output_obj);
7847 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7848 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7849 if (!job_assumes(j, packed_size != 0)) {
7850 goto out_bad;
7851 }
7852 launch_data_free(output_obj);
7853 break;
7854 case VPROC_GSK_MGR_NAME:
7855 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
7856 goto out_bad;
7857 }
7858 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7859 if (!job_assumes(j, packed_size != 0)) {
7860 goto out_bad;
7861 }
7862
7863 launch_data_free(output_obj);
7864 break;
7865 case VPROC_GSK_JOB_OVERRIDES_DB:
7866 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES))) != NULL)) {
7867 goto out_bad;
7868 }
7869 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7870 if (!job_assumes(j, packed_size != 0)) {
7871 goto out_bad;
7872 }
7873
7874 launch_data_free(output_obj);
7875 break;
7876 case VPROC_GSK_JOB_CACHE_DB:
7877 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE))) != NULL)) {
7878 goto out_bad;
7879 }
7880 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7881 if (!job_assumes(j, packed_size != 0)) {
7882 goto out_bad;
7883 }
7884
7885 job_log(j, LOG_DEBUG, "Location of job cache database: %s", launch_data_get_string(output_obj));
7886
7887 launch_data_free(output_obj);
7888 break;
7889 case 0:
7890 mig_deallocate(*outval, *outvalCnt);
7891 *outval = 0;
7892 *outvalCnt = 0;
7893 break;
7894 default:
7895 goto out_bad;
7896 }
7897
7898 if (invalCnt) switch (inkey) {
7899 case VPROC_GSK_ENVIRONMENT:
7900 if (launch_data_get_type(input_obj) == LAUNCH_DATA_DICTIONARY) {
7901 if (j->p) {
7902 job_log(j, LOG_INFO, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
7903 }
7904 launch_data_dict_iterate(input_obj, envitem_setup_one_shot, j);
7905 }
7906 break;
7907 case 0:
7908 break;
7909 default:
7910 goto out_bad;
7911 }
7912
7913 mig_deallocate(inval, invalCnt);
7914 return 0;
7915
7916 out_bad:
7917 mig_deallocate(inval, invalCnt);
7918 if (*outval) {
7919 mig_deallocate(*outval, *outvalCnt);
7920 }
7921 if (output_obj) {
7922 launch_data_free(output_obj);
7923 }
7924
7925 return 1;
7926 }
7927
7928 kern_return_t
7929 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7930 {
7931 const char *action;
7932 kern_return_t kr = 0;
7933 struct ldcred *ldc = runtime_get_caller_creds();
7934 int oldmask;
7935
7936 if (!launchd_assumes(j != NULL)) {
7937 return BOOTSTRAP_NO_MEMORY;
7938 }
7939
7940 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7941 return BOOTSTRAP_NOT_PRIVILEGED;
7942 }
7943
7944 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7945 return 1;
7946 }
7947
7948 if (inkey && outkey) {
7949 action = "Swapping";
7950 } else if (inkey) {
7951 action = "Setting";
7952 } else {
7953 action = "Getting";
7954 }
7955
7956 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7957
7958 switch (outkey) {
7959 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7960 *outval = j->abandon_pg;
7961 break;
7962 case VPROC_GSK_LAST_EXIT_STATUS:
7963 *outval = j->last_exit_status;
7964 break;
7965 case VPROC_GSK_MGR_UID:
7966 *outval = getuid();
7967 break;
7968 case VPROC_GSK_MGR_PID:
7969 *outval = getpid();
7970 break;
7971 case VPROC_GSK_IS_MANAGED:
7972 *outval = j->anonymous ? 0 : 1;
7973 break;
7974 case VPROC_GSK_BASIC_KEEPALIVE:
7975 *outval = !j->ondemand;
7976 break;
7977 case VPROC_GSK_START_INTERVAL:
7978 *outval = j->start_interval;
7979 break;
7980 case VPROC_GSK_IDLE_TIMEOUT:
7981 *outval = j->timeout;
7982 break;
7983 case VPROC_GSK_EXIT_TIMEOUT:
7984 *outval = j->exit_timeout;
7985 break;
7986 case VPROC_GSK_GLOBAL_LOG_MASK:
7987 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7988 *outval = oldmask;
7989 runtime_setlogmask(oldmask);
7990 break;
7991 case VPROC_GSK_GLOBAL_UMASK:
7992 oldmask = umask(0);
7993 *outval = oldmask;
7994 umask(oldmask);
7995 break;
7996 case VPROC_GSK_TRANSACTIONS_ENABLED:
7997 job_log(j, LOG_DEBUG, "Reading transaction model status.");
7998 *outval = j->kill_via_shmem;
7999 break;
8000 case VPROC_GSK_WAITFORDEBUGGER:
8001 *outval = j->wait4debugger;
8002 break;
8003 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
8004 *outval = j->embedded_special_privileges;
8005 break;
8006 case 0:
8007 *outval = 0;
8008 break;
8009 default:
8010 kr = 1;
8011 break;
8012 }
8013
8014 switch (inkey) {
8015 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8016 j->abandon_pg = (bool)inval;
8017 break;
8018 case VPROC_GSK_GLOBAL_ON_DEMAND:
8019 job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
8020 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
8021 break;
8022 case VPROC_GSK_BASIC_KEEPALIVE:
8023 j->ondemand = !inval;
8024 break;
8025 case VPROC_GSK_START_INTERVAL:
8026 if (inval > UINT32_MAX || inval < 0) {
8027 kr = 1;
8028 } else if (inval) {
8029 if (j->start_interval == 0) {
8030 runtime_add_weak_ref();
8031 }
8032 j->start_interval = (typeof(j->start_interval)) inval;
8033 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
8034 } else if (j->start_interval) {
8035 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
8036 if (j->start_interval != 0) {
8037 runtime_del_weak_ref();
8038 }
8039 j->start_interval = 0;
8040 }
8041 break;
8042 case VPROC_GSK_IDLE_TIMEOUT:
8043 if (inval < 0 || inval > UINT32_MAX) {
8044 kr = 1;
8045 } else {
8046 j->timeout = (typeof(j->timeout)) inval;
8047 }
8048 break;
8049 case VPROC_GSK_EXIT_TIMEOUT:
8050 if (inval < 0 || inval > UINT32_MAX) {
8051 kr = 1;
8052 } else {
8053 j->exit_timeout = (typeof(j->exit_timeout)) inval;
8054 }
8055 break;
8056 case VPROC_GSK_GLOBAL_LOG_MASK:
8057 if (inval < 0 || inval > UINT32_MAX) {
8058 kr = 1;
8059 } else {
8060 runtime_setlogmask((int) inval);
8061 }
8062 break;
8063 case VPROC_GSK_GLOBAL_UMASK:
8064 launchd_assert(sizeof (mode_t) == 2);
8065 if (inval < 0 || inval > UINT16_MAX) {
8066 kr = 1;
8067 } else {
8068 #if HAVE_SANDBOX
8069 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8070 kr = 1;
8071 } else {
8072 umask((mode_t) inval);
8073 }
8074 #endif
8075 }
8076 break;
8077 case VPROC_GSK_TRANSACTIONS_ENABLED:
8078 if (!job_assumes(j, inval != 0)) {
8079 job_log(j, LOG_WARNING, "Attempt to unregister from transaction model. This is not supported.");
8080 kr = 1;
8081 } else {
8082 j->kill_via_shmem = (bool)inval;
8083 }
8084 break;
8085 case VPROC_GSK_WEIRD_BOOTSTRAP:
8086 if (job_assumes(j, j->weird_bootstrap)) {
8087 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
8088
8089 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
8090
8091 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
8092 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
8093 }
8094
8095 (void)job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
8096 j->weird_bootstrap = false;
8097 }
8098 break;
8099 case VPROC_GSK_WAITFORDEBUGGER:
8100 j->wait4debugger_oneshot = inval;
8101 break;
8102 case VPROC_GSK_PERUSER_SUSPEND:
8103 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
8104 mach_port_t junk = MACH_PORT_NULL;
8105 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8106 if (job_assumes(j, jpu != NULL)) {
8107 struct suspended_peruser *spi = NULL;
8108 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8109 if ((int64_t)(spi->j->mach_uid) == inval) {
8110 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8111 break;
8112 }
8113 }
8114
8115 if (spi == NULL) {
8116 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8117 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
8118 if (job_assumes(j, spi != NULL)) {
8119 /* Stop listening for events.
8120 *
8121 * See <rdar://problem/9014146>.
8122 */
8123 if (jpu->peruser_suspend_count == 0) {
8124 job_ignore(jpu);
8125 }
8126
8127 spi->j = jpu;
8128 spi->j->peruser_suspend_count++;
8129 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8130 job_stop(spi->j);
8131 *outval = jpu->p;
8132 } else {
8133 kr = BOOTSTRAP_NO_MEMORY;
8134 }
8135 }
8136 }
8137 } else {
8138 kr = 1;
8139 }
8140 break;
8141 case VPROC_GSK_PERUSER_RESUME:
8142 if (job_assumes(j, pid1_magic == true)) {
8143 struct suspended_peruser *spi = NULL, *spt = NULL;
8144 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8145 if ((int64_t)(spi->j->mach_uid) == inval) {
8146 spi->j->peruser_suspend_count--;
8147 LIST_REMOVE(spi, sle);
8148 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8149 break;
8150 }
8151 }
8152
8153 if (!job_assumes(j, spi != NULL)) {
8154 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8155 kr = BOOTSTRAP_NOT_PRIVILEGED;
8156 } else if (spi->j->peruser_suspend_count == 0) {
8157 job_watch(spi->j);
8158 job_dispatch(spi->j, false);
8159 free(spi);
8160 }
8161 } else {
8162 kr = 1;
8163 }
8164 break;
8165 case 0:
8166 break;
8167 default:
8168 kr = 1;
8169 break;
8170 }
8171
8172 return kr;
8173 }
8174
8175 kern_return_t
8176 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
8177 {
8178 struct machservice *ms;
8179
8180 if (!launchd_assumes(j != NULL)) {
8181 return BOOTSTRAP_NO_MEMORY;
8182 }
8183
8184 job_log(j, LOG_DEBUG, "Post fork ping.");
8185
8186 job_setup_exception_port(j, child_task);
8187
8188 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8189 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8190 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
8191 continue;
8192 }
8193
8194 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8195
8196 if (unlikely(errno)) {
8197 int desired_log_level = LOG_ERR;
8198
8199 if (j->anonymous) {
8200 /* 5338127 */
8201
8202 desired_log_level = LOG_WARNING;
8203
8204 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8205 desired_log_level = LOG_DEBUG;
8206 }
8207 }
8208
8209 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8210 }
8211 }
8212
8213 /* MIG will not zero-initialize this pointer, so we must always do so. See
8214 * <rdar://problem/8562593>.
8215 */
8216 *asport = MACH_PORT_NULL;
8217 #if !TARGET_OS_EMBEDDED
8218 if (!j->anonymous) {
8219 /* XPC services will spawn into the root security session by default.
8220 * xpcproxy will switch them away if needed.
8221 */
8222 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8223 job_log(j, LOG_DEBUG, "Returning j->asport: %u", j->asport);
8224 *asport = j->asport;
8225 }
8226 }
8227 #endif
8228 (void)job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
8229
8230 return 0;
8231 }
8232
8233 kern_return_t
8234 job_mig_reboot2(job_t j, uint64_t flags)
8235 {
8236 char who_started_the_reboot[2048] = "";
8237 struct proc_bsdshortinfo proc;
8238 struct ldcred *ldc = runtime_get_caller_creds();
8239 pid_t pid_to_log;
8240
8241 if (!launchd_assumes(j != NULL)) {
8242 return BOOTSTRAP_NO_MEMORY;
8243 }
8244
8245 if (unlikely(!pid1_magic)) {
8246 return BOOTSTRAP_NOT_PRIVILEGED;
8247 }
8248
8249 #if !TARGET_OS_EMBEDDED
8250 if (unlikely(ldc->euid)) {
8251 #else
8252 if (unlikely(ldc->euid) && !j->embedded_special_privileges) {
8253 #endif
8254 return BOOTSTRAP_NOT_PRIVILEGED;
8255 }
8256
8257 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8258 size_t who_offset;
8259 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8260 if (errno != ESRCH) {
8261 job_assumes(j, errno == 0);
8262 }
8263 return 1;
8264 }
8265
8266 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8267 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8268 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8269 break;
8270 }
8271
8272 who_offset = strlen(who_started_the_reboot);
8273 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8274 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8275 }
8276
8277 root_jobmgr->reboot_flags = (int)flags;
8278 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8279 launchd_shutdown();
8280
8281 return 0;
8282 }
8283
8284 kern_return_t
8285 job_mig_getsocket(job_t j, name_t spr)
8286 {
8287 if (!launchd_assumes(j != NULL)) {
8288 return BOOTSTRAP_NO_MEMORY;
8289 }
8290
8291 if (j->deny_job_creation) {
8292 return BOOTSTRAP_NOT_PRIVILEGED;
8293 }
8294
8295 #if HAVE_SANDBOX
8296 struct ldcred *ldc = runtime_get_caller_creds();
8297 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8298 return BOOTSTRAP_NOT_PRIVILEGED;
8299 }
8300 #endif
8301
8302 ipc_server_init();
8303
8304 if (unlikely(!sockpath)) {
8305 return BOOTSTRAP_NO_MEMORY;
8306 }
8307
8308 strncpy(spr, sockpath, sizeof(name_t));
8309
8310 return BOOTSTRAP_SUCCESS;
8311 }
8312
8313 kern_return_t
8314 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8315 {
8316 if (!launchd_assumes(j != NULL)) {
8317 return BOOTSTRAP_NO_MEMORY;
8318 }
8319
8320 if ((errno = err)) {
8321 job_log_error(j, pri, "%s", msg);
8322 } else {
8323 job_log(j, pri, "%s", msg);
8324 }
8325
8326 return 0;
8327 }
8328
8329 job_t
8330 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8331 {
8332 job_t ji = NULL;
8333 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8334 if (!ji->per_user) {
8335 continue;
8336 }
8337 if (ji->mach_uid != which_user) {
8338 continue;
8339 }
8340 if (SLIST_EMPTY(&ji->machservices)) {
8341 continue;
8342 }
8343 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8344 continue;
8345 }
8346 break;
8347 }
8348
8349 if (unlikely(ji == NULL)) {
8350 struct machservice *ms;
8351 char lbuf[1024];
8352
8353 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8354
8355 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8356
8357 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8358
8359 if (ji != NULL) {
8360 auditinfo_addr_t auinfo = {
8361 .ai_termid = { .at_type = AU_IPv4 },
8362 .ai_auid = which_user,
8363 .ai_asid = AU_ASSIGN_ASID,
8364 };
8365
8366 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8367 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8368 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8369
8370 /* Kinda lame that we have to do this, but we can't create an
8371 * audit session without joining it.
8372 */
8373 (void)job_assumes(ji, audit_session_join(g_audit_session_port));
8374 ji->asid = auinfo.ai_asid;
8375 } else {
8376 job_log(ji, LOG_WARNING, "Could not set audit session!");
8377 job_remove(ji);
8378 return NULL;
8379 }
8380
8381 ji->mach_uid = which_user;
8382 ji->per_user = true;
8383 ji->kill_via_shmem = true;
8384
8385 struct stat sb;
8386 char pu_db[PATH_MAX];
8387 snprintf(pu_db, sizeof(pu_db), LAUNCHD_DB_PREFIX "/%s", lbuf);
8388
8389 bool created = false;
8390 int err = stat(pu_db, &sb);
8391 if ((err == -1 && errno == ENOENT) || (err == 0 && !S_ISDIR(sb.st_mode))) {
8392 if (err == 0) {
8393 char move_aside[PATH_MAX];
8394 snprintf(move_aside, sizeof(move_aside), LAUNCHD_DB_PREFIX "/%s.movedaside", lbuf);
8395
8396 (void)job_assumes(ji, rename(pu_db, move_aside) != -1);
8397 }
8398
8399 (void)job_assumes(ji, mkdir(pu_db, S_IRWXU) != -1);
8400 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8401 created = true;
8402 }
8403
8404 if (!created) {
8405 if (!job_assumes(ji, sb.st_uid == which_user)) {
8406 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8407 }
8408 if (!job_assumes(ji, sb.st_gid == 0)) {
8409 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8410 }
8411 if (!job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR))) {
8412 (void)job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
8413 }
8414 }
8415
8416 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8417 job_remove(ji);
8418 ji = NULL;
8419 } else {
8420 ms->per_user_hack = true;
8421 ms->hide = true;
8422
8423 ji = job_dispatch(ji, false);
8424 }
8425 }
8426 } else {
8427 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
8428 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8429 }
8430
8431 return ji;
8432 }
8433
8434 kern_return_t
8435 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8436 {
8437 struct ldcred *ldc = runtime_get_caller_creds();
8438 job_t jpu;
8439
8440 #if TARGET_OS_EMBEDDED
8441 /* There is no need for per-user launchd's on embedded. */
8442 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8443 return BOOTSTRAP_NOT_PRIVILEGED;
8444 #endif
8445
8446 #if HAVE_SANDBOX
8447 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8448 return BOOTSTRAP_NOT_PRIVILEGED;
8449 }
8450 #endif
8451
8452 if (!launchd_assumes(j != NULL)) {
8453 return BOOTSTRAP_NO_MEMORY;
8454 }
8455
8456 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8457
8458 if (unlikely(!pid1_magic)) {
8459 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8460 return BOOTSTRAP_NOT_PRIVILEGED;
8461 }
8462
8463 if (ldc->euid || ldc->uid) {
8464 which_user = ldc->euid ?: ldc->uid;
8465 }
8466
8467 *up_cont = MACH_PORT_NULL;
8468
8469 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8470
8471 return 0;
8472 }
8473
8474 kern_return_t
8475 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8476 {
8477 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8478 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8479 struct ldcred *ldc = runtime_get_caller_creds();
8480 struct machservice *ms = NULL;
8481 job_t jo;
8482
8483 if (!launchd_assumes(j != NULL)) {
8484 return BOOTSTRAP_NO_MEMORY;
8485 }
8486
8487 if (j->dedicated_instance) {
8488 struct machservice *msi = NULL;
8489 SLIST_FOREACH(msi, &j->machservices, sle) {
8490 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8491 uuid_copy(instance_id, j->instance_id);
8492 ms = msi;
8493 break;
8494 }
8495 }
8496 } else {
8497 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8498 }
8499
8500 if (strict) {
8501 if (likely(ms != NULL)) {
8502 if (ms->job != j) {
8503 return BOOTSTRAP_NOT_PRIVILEGED;
8504 } else if (ms->isActive) {
8505 return BOOTSTRAP_SERVICE_ACTIVE;
8506 }
8507 } else {
8508 return BOOTSTRAP_UNKNOWN_SERVICE;
8509 }
8510 } else if (ms == NULL) {
8511 if (job_assumes(j, !j->dedicated_instance)) {
8512 *serviceportp = MACH_PORT_NULL;
8513
8514 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8515 return BOOTSTRAP_NO_MEMORY;
8516 }
8517
8518 /* Treat this like a legacy job. */
8519 if (!j->legacy_mach_job) {
8520 ms->isActive = true;
8521 ms->recv = false;
8522 }
8523
8524 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8525 job_log(j, LOG_SCOLDING, "Please add the following service to the configuration file for this job: %s", servicename);
8526 }
8527 } else {
8528 return BOOTSTRAP_UNKNOWN_SERVICE;
8529 }
8530 } else {
8531 if (unlikely((jo = machservice_job(ms)) != j)) {
8532 static pid_t last_warned_pid;
8533
8534 if (last_warned_pid != ldc->pid) {
8535 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8536 last_warned_pid = ldc->pid;
8537 }
8538
8539 return BOOTSTRAP_NOT_PRIVILEGED;
8540 }
8541 if (unlikely(machservice_active(ms))) {
8542 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8543 return BOOTSTRAP_SERVICE_ACTIVE;
8544 }
8545 }
8546
8547 job_checkin(j);
8548 machservice_request_notifications(ms);
8549
8550 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8551
8552 *serviceportp = machservice_port(ms);
8553 return BOOTSTRAP_SUCCESS;
8554 }
8555
8556 kern_return_t
8557 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8558 {
8559 struct machservice *ms;
8560 struct ldcred *ldc = runtime_get_caller_creds();
8561
8562 if (!launchd_assumes(j != NULL)) {
8563 return BOOTSTRAP_NO_MEMORY;
8564 }
8565
8566 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
8567 job_log(j, LOG_SCOLDING, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8568 }
8569
8570 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8571
8572 /* 5641783 for the embedded hack */
8573 #if !TARGET_OS_EMBEDDED
8574 /*
8575 * From a per-user/session launchd's perspective, SecurityAgent (UID
8576 * 92) is a rogue application (not our UID, not root and not a child of
8577 * us). We'll have to reconcile this design friction at a later date.
8578 */
8579 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8580 if (pid1_magic) {
8581 return VPROC_ERR_TRY_PER_USER;
8582 } else {
8583 return BOOTSTRAP_NOT_PRIVILEGED;
8584 }
8585 }
8586 #endif
8587
8588 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
8589
8590 if (unlikely(ms)) {
8591 if (machservice_job(ms) != j) {
8592 return BOOTSTRAP_NOT_PRIVILEGED;
8593 }
8594 if (machservice_active(ms)) {
8595 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
8596 return BOOTSTRAP_SERVICE_ACTIVE;
8597 }
8598 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
8599 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
8600 return BOOTSTRAP_NOT_PRIVILEGED;
8601 }
8602 job_checkin(j);
8603 machservice_delete(j, ms, false);
8604 }
8605
8606 if (likely(serviceport != MACH_PORT_NULL)) {
8607 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
8608 machservice_request_notifications(ms);
8609 } else {
8610 return BOOTSTRAP_NO_MEMORY;
8611 }
8612 }
8613
8614
8615 return BOOTSTRAP_SUCCESS;
8616 }
8617
8618 kern_return_t
8619 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
8620 {
8621 struct machservice *ms = NULL;
8622 struct ldcred *ldc = runtime_get_caller_creds();
8623 kern_return_t kr;
8624 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
8625 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
8626 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
8627 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
8628
8629 if (!launchd_assumes(j != NULL)) {
8630 return BOOTSTRAP_NO_MEMORY;
8631 }
8632
8633 bool xpc_req = j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN;
8634
8635 /* 5641783 for the embedded hack */
8636 #if !TARGET_OS_EMBEDDED
8637 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
8638 return VPROC_ERR_TRY_PER_USER;
8639 }
8640 #endif
8641
8642 #if HAVE_SANDBOX
8643 /* We don't do sandbox checking for XPC domains because, by definition, all
8644 * the services within your domain should be accessibly to you.
8645 */
8646 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8647 return BOOTSTRAP_NOT_PRIVILEGED;
8648 }
8649 #endif
8650
8651 if (per_pid_lookup) {
8652 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
8653 } else {
8654 if (xpc_req) {
8655 /* Requests from XPC domains stay local. */
8656 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
8657 } else {
8658 /* A strict lookup which is privileged won't even bother trying to
8659 * find a service if we're not hosting the root Mach bootstrap.
8660 */
8661 if (strict_lookup && privileged) {
8662 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8663 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8664 }
8665 } else {
8666 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8667 }
8668 }
8669 }
8670
8671 if (likely(ms)) {
8672 ms = ms->alias ? ms->alias : ms;
8673 if (unlikely(specific_instance && ms->job->multiple_instances)) {
8674 job_t ji = NULL;
8675 job_t instance = NULL;
8676 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
8677 if (uuid_compare(instance_id, ji->instance_id) == 0) {
8678 instance = ji;
8679 break;
8680 }
8681 }
8682
8683 if (unlikely(instance == NULL)) {
8684 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
8685 instance = job_new_subjob(ms->job, instance_id);
8686 if (job_assumes(j, instance != NULL)) {
8687 /* Disable this support for now. We only support having
8688 * multi-instance jobs within private XPC domains.
8689 */
8690 #if 0
8691 /* If the job is multi-instance, in a singleton XPC domain
8692 * and the request is not coming from within that singleton
8693 * domain, we need to alias the new job into the requesting
8694 * domain.
8695 */
8696 if (!j->mgr->xpc_singleton && xpc_req) {
8697 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
8698 }
8699 #endif
8700 job_dispatch(instance, false);
8701 }
8702 }
8703
8704 ms = NULL;
8705 if (job_assumes(j, instance != NULL)) {
8706 struct machservice *msi = NULL;
8707 SLIST_FOREACH(msi, &instance->machservices, sle) {
8708 /* sizeof(servicename) will return the size of a pointer, even though it's
8709 * an array type, because when passing arrays as parameters in C, they
8710 * implicitly degrade to pointers.
8711 */
8712 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8713 ms = msi;
8714 break;
8715 }
8716 }
8717 }
8718 } else {
8719 if (machservice_hidden(ms) && !machservice_active(ms)) {
8720 ms = NULL;
8721 } else if (unlikely(ms->per_user_hack)) {
8722 ms = NULL;
8723 }
8724 }
8725 }
8726
8727 if (likely(ms)) {
8728 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
8729 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
8730
8731 if (unlikely(!per_pid_lookup && j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user)) {
8732 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
8733 job_log(j, LOG_DEBUG, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms->job->label, servicename);
8734 }
8735
8736 j->lastlookup = ms;
8737 j->lastlookup_gennum = ms->gen_num;
8738
8739 *serviceportp = machservice_port(ms);
8740
8741 kr = BOOTSTRAP_SUCCESS;
8742 } else if (strict_lookup && !privileged) {
8743 /* Hack: We need to simulate XPC's desire not to establish a hierarchy. So if
8744 * XPC is doing the lookup, and it's not a privileged lookup, we won't forward.
8745 * But if it is a privileged lookup (that is, was looked up in XPC_DOMAIN_LOCAL_SYSTEM)
8746 * then we must forward.
8747 */
8748 return BOOTSTRAP_UNKNOWN_SERVICE;
8749 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
8750 /* Requests from within an XPC domain don't get forwarded. */
8751 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
8752 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
8753 (void)job_assumes(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags) == 0);
8754 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8755 return MIG_NO_REPLY;
8756 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8757 /*
8758 * 5240036 Should start background session when a lookup of CCacheServer occurs
8759 *
8760 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
8761 * If we find a EUID that isn't root, we force it over to the per-user context.
8762 */
8763 return VPROC_ERR_TRY_PER_USER;
8764 } else {
8765 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
8766 kr = BOOTSTRAP_UNKNOWN_SERVICE;
8767 }
8768
8769 return kr;
8770 }
8771
8772 kern_return_t
8773 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
8774 {
8775 if (!launchd_assumes(j != NULL)) {
8776 return BOOTSTRAP_NO_MEMORY;
8777 }
8778
8779 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
8780 jobmgr_t jm = j->mgr;
8781
8782 if (jobmgr_parent(jm)) {
8783 *parentport = jobmgr_parent(jm)->jm_port;
8784 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
8785 *parentport = jm->jm_port;
8786 } else {
8787 (void)job_assumes(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp) == 0);
8788 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8789 return MIG_NO_REPLY;
8790 }
8791 return BOOTSTRAP_SUCCESS;
8792 }
8793
8794 kern_return_t
8795 job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
8796 {
8797 if (!j) {
8798 return BOOTSTRAP_NO_MEMORY;
8799 }
8800
8801 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8802 *rootbsp = root_jobmgr->jm_port;
8803 (void)job_assumes(j, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
8804 } else {
8805 *rootbsp = inherited_bootstrap_port;
8806 (void)job_assumes(j, launchd_mport_copy_send(inherited_bootstrap_port) == KERN_SUCCESS);
8807 }
8808
8809 return BOOTSTRAP_SUCCESS;
8810 }
8811
8812 kern_return_t
8813 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt, name_array_t *servicejobsp, unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt, uint64_t flags)
8814 {
8815 name_array_t service_names = NULL;
8816 name_array_t service_jobs = NULL;
8817 bootstrap_status_array_t service_actives = NULL;
8818 unsigned int cnt = 0, cnt2 = 0;
8819 jobmgr_t jm;
8820
8821 if (!launchd_assumes(j != NULL)) {
8822 return BOOTSTRAP_NO_MEMORY;
8823 }
8824
8825 if (g_flat_mach_namespace) {
8826 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
8827 jm = j->mgr;
8828 } else {
8829 jm = root_jobmgr;
8830 }
8831 } else {
8832 jm = j->mgr;
8833 }
8834
8835 unsigned int i = 0;
8836 struct machservice *msi = NULL;
8837 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8838 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8839 cnt += !msi->per_pid ? 1 : 0;
8840 }
8841 }
8842
8843 if (cnt == 0) {
8844 goto out;
8845 }
8846
8847 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
8848 if (!job_assumes(j, service_names != NULL)) {
8849 goto out_bad;
8850 }
8851
8852 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
8853 if (!job_assumes(j, service_jobs != NULL)) {
8854 goto out_bad;
8855 }
8856
8857 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
8858 if (!job_assumes(j, service_actives != NULL)) {
8859 goto out_bad;
8860 }
8861
8862 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8863 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8864 if (!msi->per_pid) {
8865 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
8866 msi = msi->alias ? msi->alias : msi;
8867 if (msi->job->mgr->shortdesc) {
8868 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
8869 } else {
8870 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
8871 }
8872 service_actives[cnt2] = machservice_status(msi);
8873 cnt2++;
8874 }
8875 }
8876 }
8877
8878 (void)job_assumes(j, cnt == cnt2);
8879
8880 out:
8881 *servicenamesp = service_names;
8882 *servicejobsp = service_jobs;
8883 *serviceactivesp = service_actives;
8884 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
8885
8886 return BOOTSTRAP_SUCCESS;
8887
8888 out_bad:
8889 if (service_names) {
8890 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
8891 }
8892 if (service_jobs) {
8893 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
8894 }
8895 if (service_actives) {
8896 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
8897 }
8898
8899 return BOOTSTRAP_NO_MEMORY;
8900 }
8901
8902 kern_return_t
8903 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports, mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names, mach_msg_type_number_t *child_names_cnt, bootstrap_property_array_t *child_properties,mach_msg_type_number_t *child_properties_cnt)
8904 {
8905 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
8906 if (!launchd_assumes(j != NULL)) {
8907 return BOOTSTRAP_NO_MEMORY;
8908 }
8909
8910 struct ldcred *ldc = runtime_get_caller_creds();
8911
8912 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8913 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8914 * in a non-flat namespace.
8915 */
8916 if (ldc->euid != 0) {
8917 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
8918 return BOOTSTRAP_NOT_PRIVILEGED;
8919 }
8920
8921 unsigned int cnt = 0;
8922
8923 jobmgr_t jmr = j->mgr;
8924 jobmgr_t jmi = NULL;
8925 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8926 cnt++;
8927 }
8928
8929 /* Find our per-user launchds if we're PID 1. */
8930 job_t ji = NULL;
8931 if (pid1_magic) {
8932 LIST_FOREACH(ji, &jmr->jobs, sle) {
8933 cnt += ji->per_user ? 1 : 0;
8934 }
8935 }
8936
8937 if (cnt == 0) {
8938 return BOOTSTRAP_NO_CHILDREN;
8939 }
8940
8941 mach_port_array_t _child_ports = NULL;
8942 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
8943 if (!job_assumes(j, _child_ports != NULL)) {
8944 kr = BOOTSTRAP_NO_MEMORY;
8945 goto out_bad;
8946 }
8947
8948 name_array_t _child_names = NULL;
8949 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
8950 if (!job_assumes(j, _child_names != NULL)) {
8951 kr = BOOTSTRAP_NO_MEMORY;
8952 goto out_bad;
8953 }
8954
8955 bootstrap_property_array_t _child_properties = NULL;
8956 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
8957 if (!job_assumes(j, _child_properties != NULL)) {
8958 kr = BOOTSTRAP_NO_MEMORY;
8959 goto out_bad;
8960 }
8961
8962 unsigned int cnt2 = 0;
8963 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8964 if (jobmgr_assumes(jmi, launchd_mport_make_send(jmi->jm_port) == KERN_SUCCESS)) {
8965 _child_ports[cnt2] = jmi->jm_port;
8966 } else {
8967 _child_ports[cnt2] = MACH_PORT_NULL;
8968 }
8969
8970 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
8971 _child_properties[cnt2] = jmi->properties;
8972
8973 cnt2++;
8974 }
8975
8976 if (pid1_magic) LIST_FOREACH( ji, &jmr->jobs, sle) {
8977 if (ji->per_user) {
8978 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
8979 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
8980
8981 if (job_assumes(ji, launchd_mport_copy_send(port) == KERN_SUCCESS)) {
8982 _child_ports[cnt2] = port;
8983 } else {
8984 _child_ports[cnt2] = MACH_PORT_NULL;
8985 }
8986 } else {
8987 _child_ports[cnt2] = MACH_PORT_NULL;
8988 }
8989
8990 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
8991 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
8992
8993 cnt2++;
8994 }
8995 }
8996
8997 *child_names_cnt = cnt;
8998 *child_ports_cnt = cnt;
8999 *child_properties_cnt = cnt;
9000
9001 *child_names = _child_names;
9002 *child_ports = _child_ports;
9003 *child_properties = _child_properties;
9004
9005 unsigned int i = 0;
9006 for (i = 0; i < cnt; i++) {
9007 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
9008 }
9009
9010 return BOOTSTRAP_SUCCESS;
9011 out_bad:
9012 if (_child_ports) {
9013 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
9014 }
9015
9016 if (_child_names) {
9017 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
9018 }
9019
9020 if (_child_properties) {
9021 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
9022 }
9023
9024 return kr;
9025 }
9026
9027 kern_return_t
9028 job_mig_transaction_count_for_pid(job_t j, pid_t p, int32_t *cnt, boolean_t *condemned)
9029 {
9030 if (!j) {
9031 return BOOTSTRAP_NO_MEMORY;
9032 }
9033
9034 kern_return_t kr = KERN_FAILURE;
9035 struct ldcred *ldc = runtime_get_caller_creds();
9036 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
9037 return BOOTSTRAP_NOT_PRIVILEGED;
9038 }
9039
9040 job_t j_for_pid = jobmgr_find_by_pid_deep(j->mgr, p, false);
9041 if (j_for_pid) {
9042 if (j_for_pid->kill_via_shmem) {
9043 if (j_for_pid->shmem) {
9044 *cnt = j_for_pid->shmem->vp_shmem_transaction_cnt;
9045 *condemned = j_for_pid->shmem->vp_shmem_flags & VPROC_SHMEM_EXITING;
9046 *cnt += *condemned ? 1 : 0;
9047 } else {
9048 *cnt = 0;
9049 *condemned = false;
9050 }
9051
9052 kr = BOOTSTRAP_SUCCESS;
9053 } else {
9054 kr = BOOTSTRAP_NO_MEMORY;
9055 }
9056 } else {
9057 kr = BOOTSTRAP_UNKNOWN_SERVICE;
9058 }
9059
9060 return kr;
9061 }
9062
9063 kern_return_t
9064 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
9065 {
9066 struct ldcred *ldc = runtime_get_caller_creds();
9067 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
9068 return BOOTSTRAP_NOT_PRIVILEGED;
9069 }
9070
9071 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9072 * directly by launchd as agents.
9073 */
9074 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
9075 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
9076 *managed = true;
9077 }
9078
9079 return BOOTSTRAP_SUCCESS;
9080 }
9081
9082 kern_return_t
9083 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
9084 {
9085 if (!j) {
9086 return BOOTSTRAP_NO_MEMORY;
9087 }
9088
9089 struct ldcred *ldc = runtime_get_caller_creds();
9090 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
9091
9092 #if HAVE_SANDBOX
9093 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
9094 return BOOTSTRAP_NOT_PRIVILEGED;
9095 }
9096 #endif
9097
9098 mach_port_t _mp = MACH_PORT_NULL;
9099 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
9100 job_t target_j = job_find(NULL, label);
9101 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
9102 if (target_j->j_port == MACH_PORT_NULL) {
9103 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
9104 }
9105
9106 _mp = target_j->j_port;
9107 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
9108 } else {
9109 kr = BOOTSTRAP_NO_MEMORY;
9110 }
9111 }
9112
9113 *mp = _mp;
9114 return kr;
9115 }
9116
9117 #if !TARGET_OS_EMBEDDED
9118 kern_return_t
9119 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
9120 {
9121 if (!j) {
9122 return BOOTSTRAP_NO_MEMORY;
9123 }
9124
9125 uuid_string_t uuid_str;
9126 uuid_unparse(uuid, uuid_str);
9127 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
9128
9129 job_t ji = NULL, jt = NULL;
9130 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
9131 uuid_string_t uuid_str2;
9132 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9133
9134 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
9135 uuid_clear(ji->expected_audit_uuid);
9136 if (asport != MACH_PORT_NULL ) {
9137 job_log(ji, LOG_DEBUG, "Job should join session with port %u", asport);
9138 (void)job_assumes(j, launchd_mport_copy_send(asport) == KERN_SUCCESS);
9139 } else {
9140 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9141 }
9142
9143 ji->asport = asport;
9144 LIST_REMOVE(ji, needing_session_sle);
9145 job_dispatch(ji, false);
9146 }
9147 }
9148
9149 /* Each job that the session port was set for holds a reference. At the end of
9150 * the loop, there will be one extra reference belonging to this MiG protocol.
9151 * We need to release it so that the session goes away when all the jobs
9152 * referencing it are unloaded.
9153 */
9154 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9155
9156 return KERN_SUCCESS;
9157 }
9158 #else
9159 kern_return_t
9160 job_mig_set_security_session(job_t j __attribute__((unused)), uuid_t uuid __attribute__((unused)), mach_port_t session __attribute__((unused)))
9161 {
9162 return KERN_SUCCESS;
9163 }
9164 #endif
9165
9166 jobmgr_t
9167 jobmgr_find_by_name(jobmgr_t jm, const char *where)
9168 {
9169 jobmgr_t jmi, jmi2;
9170
9171 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
9172 if (where == NULL) {
9173 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9174 where = VPROCMGR_SESSION_LOGINWINDOW;
9175 } else {
9176 where = VPROCMGR_SESSION_AQUA;
9177 }
9178 }
9179
9180 if (strcasecmp(jm->name, where) == 0) {
9181 return jm;
9182 }
9183
9184 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
9185 jmi = root_jobmgr;
9186 goto jm_found;
9187 }
9188
9189 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9190 if (unlikely(jmi->shutting_down)) {
9191 continue;
9192 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9193 continue;
9194 } else if (strcasecmp(jmi->name, where) == 0) {
9195 goto jm_found;
9196 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
9197 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9198 if (strcasecmp(jmi2->name, where) == 0) {
9199 jmi = jmi2;
9200 goto jm_found;
9201 }
9202 }
9203 }
9204 }
9205
9206 jm_found:
9207 return jmi;
9208 }
9209
9210 kern_return_t
9211 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
9212 {
9213 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9214 mach_port_array_t l2l_ports = NULL;
9215 mach_port_t reqport, rcvright;
9216 kern_return_t kr = 1;
9217 launch_data_t out_obj_array = NULL;
9218 struct ldcred *ldc = runtime_get_caller_creds();
9219 jobmgr_t jmr = NULL;
9220
9221 if (!launchd_assumes(j != NULL)) {
9222 return BOOTSTRAP_NO_MEMORY;
9223 }
9224
9225 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
9226 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9227
9228 kr = BOOTSTRAP_NOT_PRIVILEGED;
9229 goto out;
9230 }
9231
9232 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9233
9234 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
9235
9236 if (!job_assumes(j, kr == 0)) {
9237 goto out;
9238 }
9239
9240 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
9241
9242 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
9243 kr = BOOTSTRAP_NO_MEMORY;
9244 goto out;
9245 }
9246
9247 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9248
9249 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9250 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9251 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9252 */
9253 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9254 /* This is so awful. */
9255 /* Remove the job from its current job manager. */
9256 LIST_REMOVE(j, sle);
9257 LIST_REMOVE(j, pid_hash_sle);
9258
9259 /* Put the job into the target job manager. */
9260 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9261 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9262
9263 j->mgr = jmr;
9264 job_set_global_on_demand(j, true);
9265
9266 if (!j->holds_ref) {
9267 j->holds_ref = true;
9268 runtime_add_ref();
9269 }
9270 }
9271
9272 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9273 launch_data_t tmp, obj_at_idx;
9274 struct machservice *ms;
9275 job_t j_for_service;
9276 const char *serv_name;
9277 pid_t target_pid;
9278 bool serv_perpid;
9279
9280 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9281 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9282 target_pid = (pid_t)launch_data_get_integer(tmp);
9283 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9284 serv_perpid = launch_data_get_bool(tmp);
9285 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9286 serv_name = launch_data_get_string(tmp);
9287
9288 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9289
9290 if (unlikely(!j_for_service)) {
9291 /* The PID probably exited */
9292 (void)job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
9293 continue;
9294 }
9295
9296 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9297 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9298 machservice_request_notifications(ms);
9299 }
9300 }
9301
9302 kr = 0;
9303
9304 out:
9305 if (out_obj_array) {
9306 launch_data_free(out_obj_array);
9307 }
9308
9309 if (l2l_ports) {
9310 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9311 }
9312
9313 if (kr == 0) {
9314 if (target_subset) {
9315 (void)job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
9316 }
9317 if (asport) {
9318 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9319 }
9320 } else if (jmr) {
9321 jobmgr_shutdown(jmr);
9322 }
9323
9324 return kr;
9325 }
9326
9327 kern_return_t
9328 job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9329 {
9330 if (!j) {
9331 return BOOTSTRAP_NO_MEMORY;
9332 }
9333
9334 job_t j2;
9335
9336 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9337 if (j->mgr->session_initialized) {
9338 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9339 kr = BOOTSTRAP_NOT_PRIVILEGED;
9340 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9341 jobmgr_t jmi;
9342
9343 /*
9344 * 5330262
9345 *
9346 * We're working around LoginWindow and the WindowServer.
9347 *
9348 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9349 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9350 * spawns a replacement loginwindow session before cleaning up the previous one.
9351 *
9352 * We're going to use the creation of a new LoginWindow context as a clue that the
9353 * previous LoginWindow context is on the way out and therefore we should just
9354 * kick-start the shutdown of it.
9355 */
9356
9357 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9358 if (unlikely(jmi->shutting_down)) {
9359 continue;
9360 } else if (strcasecmp(jmi->name, session_type) == 0) {
9361 jobmgr_shutdown(jmi);
9362 break;
9363 }
9364 }
9365 }
9366
9367 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9368 strcpy(j->mgr->name_init, session_type);
9369
9370 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9371 j2->asport = asport;
9372 (void)job_assumes(j, job_dispatch(j2, true));
9373 kr = BOOTSTRAP_SUCCESS;
9374 }
9375
9376 return kr;
9377 }
9378
9379 kern_return_t
9380 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9381 {
9382 struct ldcred *ldc = runtime_get_caller_creds();
9383 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9384 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9385 return BOOTSTRAP_NO_MEMORY;
9386 }
9387
9388 if (j->mgr->shutting_down) {
9389 return BOOTSTRAP_UNKNOWN_SERVICE;
9390 }
9391
9392 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9393
9394 if (!job_assumes(j, pid1_magic == false)) {
9395 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9396 return BOOTSTRAP_NOT_PRIVILEGED;
9397 }
9398
9399 if (!j->anonymous) {
9400 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9401 return BOOTSTRAP_NOT_PRIVILEGED;
9402 }
9403
9404 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9405 if (target_jm == j->mgr) {
9406 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9407 *new_bsport = target_jm->jm_port;
9408 return BOOTSTRAP_SUCCESS;
9409 }
9410
9411 if (!target_jm) {
9412 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9413 if (target_jm) {
9414 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9415 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9416 }
9417 }
9418
9419 if (!job_assumes(j, target_jm != NULL)) {
9420 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9421 return BOOTSTRAP_NO_MEMORY;
9422 }
9423
9424 /* Remove the job from it's current job manager. */
9425 LIST_REMOVE(j, sle);
9426 LIST_REMOVE(j, pid_hash_sle);
9427
9428 job_t ji = NULL, jit = NULL;
9429 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9430 if (ji == j) {
9431 LIST_REMOVE(ji, global_env_sle);
9432 break;
9433 }
9434 }
9435
9436 /* Put the job into the target job manager. */
9437 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9438 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9439
9440 if (ji) {
9441 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9442 }
9443
9444 /* Move our Mach services over if we're not in a flat namespace. */
9445 if (!g_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9446 struct machservice *msi = NULL, *msit = NULL;
9447 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9448 LIST_REMOVE(msi, name_hash_sle);
9449 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9450 }
9451 }
9452
9453 j->mgr = target_jm;
9454
9455 if (!j->holds_ref) {
9456 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9457 * stick around while they're still around.
9458 * For example, login calls into the PAM launchd module, which moves the process into
9459 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9460 * ourselves from going away.
9461 */
9462 j->holds_ref = true;
9463 runtime_add_ref();
9464 }
9465
9466 *new_bsport = target_jm->jm_port;
9467
9468 return KERN_SUCCESS;
9469 }
9470
9471 kern_return_t
9472 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9473 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9474 mach_port_array_t *portsp, unsigned int *ports_cnt)
9475 {
9476 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9477 mach_port_array_t ports = NULL;
9478 unsigned int cnt = 0, cnt2 = 0;
9479 size_t packed_size;
9480 struct machservice *ms;
9481 jobmgr_t jm;
9482 job_t ji;
9483
9484 if (!launchd_assumes(j != NULL)) {
9485 return BOOTSTRAP_NO_MEMORY;
9486 }
9487
9488 jm = j->mgr;
9489
9490 if (unlikely(!pid1_magic)) {
9491 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9492 return BOOTSTRAP_NOT_PRIVILEGED;
9493 }
9494 if (unlikely(jobmgr_parent(jm) == NULL)) {
9495 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9496 return BOOTSTRAP_NOT_PRIVILEGED;
9497 }
9498 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9499 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9500 return BOOTSTRAP_NOT_PRIVILEGED;
9501 }
9502 if (unlikely(!j->anonymous)) {
9503 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9504 return BOOTSTRAP_NOT_PRIVILEGED;
9505 }
9506
9507 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9508
9509 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9510 if (!job_assumes(j, outdata_obj_array)) {
9511 goto out_bad;
9512 }
9513
9514 *outdataCnt = 20 * 1024 * 1024;
9515 mig_allocate(outdata, *outdataCnt);
9516 if (!job_assumes(j, *outdata != 0)) {
9517 return 1;
9518 }
9519
9520 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9521 if (!ji->anonymous) {
9522 continue;
9523 }
9524 SLIST_FOREACH(ms, &ji->machservices, sle) {
9525 cnt++;
9526 }
9527 }
9528
9529 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9530 if (!job_assumes(j, ports != NULL)) {
9531 goto out_bad;
9532 }
9533
9534 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9535 if (!ji->anonymous) {
9536 continue;
9537 }
9538
9539 SLIST_FOREACH(ms, &ji->machservices, sle) {
9540 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9541 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9542 } else {
9543 goto out_bad;
9544 }
9545
9546 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9547 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9548 } else {
9549 goto out_bad;
9550 }
9551
9552 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9553 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9554 } else {
9555 goto out_bad;
9556 }
9557
9558 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9559 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9560 } else {
9561 goto out_bad;
9562 }
9563
9564 ports[cnt2] = machservice_port(ms);
9565
9566 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
9567 (void)jobmgr_assumes(jm, (errno = launchd_mport_copy_send(ports[cnt2])) == KERN_SUCCESS);
9568 cnt2++;
9569 }
9570 }
9571
9572 (void)job_assumes(j, cnt == cnt2);
9573
9574 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9575 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9576 if (!job_assumes(j, packed_size != 0)) {
9577 goto out_bad;
9578 }
9579
9580 launch_data_free(outdata_obj_array);
9581
9582 *portsp = ports;
9583 *ports_cnt = cnt;
9584
9585 *reqport = jm->req_port;
9586 *rcvright = jm->jm_port;
9587
9588 jm->req_port = 0;
9589 jm->jm_port = 0;
9590
9591 workaround_5477111 = j;
9592
9593 jobmgr_shutdown(jm);
9594
9595 return BOOTSTRAP_SUCCESS;
9596
9597 out_bad:
9598 if (outdata_obj_array) {
9599 launch_data_free(outdata_obj_array);
9600 }
9601 if (*outdata) {
9602 mig_deallocate(*outdata, *outdataCnt);
9603 }
9604 if (ports) {
9605 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
9606 }
9607
9608 return BOOTSTRAP_NO_MEMORY;
9609 }
9610
9611 kern_return_t
9612 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
9613 {
9614 int bsdepth = 0;
9615 jobmgr_t jmr;
9616
9617 if (!launchd_assumes(j != NULL)) {
9618 return BOOTSTRAP_NO_MEMORY;
9619 }
9620 if (j->mgr->shutting_down) {
9621 return BOOTSTRAP_UNKNOWN_SERVICE;
9622 }
9623
9624 jmr = j->mgr;
9625
9626 while ((jmr = jobmgr_parent(jmr)) != NULL) {
9627 bsdepth++;
9628 }
9629
9630 /* Since we use recursion, we need an artificial depth for subsets */
9631 if (unlikely(bsdepth > 100)) {
9632 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
9633 return BOOTSTRAP_NO_MEMORY;
9634 }
9635
9636 char name[NAME_MAX];
9637 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
9638
9639 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
9640 if (unlikely(requestorport == MACH_PORT_NULL)) {
9641 return BOOTSTRAP_NOT_PRIVILEGED;
9642 }
9643 return BOOTSTRAP_NO_MEMORY;
9644 }
9645
9646 *subsetportp = jmr->jm_port;
9647 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
9648
9649 /* A job could create multiple subsets, so only add a reference the first time
9650 * it does so we don't have to keep a count.
9651 */
9652 if (j->anonymous && !j->holds_ref) {
9653 j->holds_ref = true;
9654 runtime_add_ref();
9655 }
9656
9657 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
9658 return BOOTSTRAP_SUCCESS;
9659 }
9660
9661 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
9662 job_t
9663 xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
9664 {
9665 jobmgr_t where2put = NULL;
9666
9667 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
9668 if (destname) {
9669 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
9670 const char *str = launch_data_get_string(destname);
9671 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
9672 where2put = _s_xpc_system_domain;
9673 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
9674 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
9675 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
9676 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
9677 } else {
9678 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
9679 errno = EINVAL;
9680 }
9681 } else {
9682 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
9683 errno = EINVAL;
9684 }
9685
9686 if (where2put) {
9687 launch_data_t mi = NULL;
9688 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
9689 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
9690 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
9691 where2put = NULL;
9692 errno = EINVAL;
9693 }
9694 }
9695 }
9696 } else {
9697 where2put = jm;
9698 }
9699
9700 job_t j = NULL;
9701 if (where2put) {
9702 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
9703 j = jobmgr_import2(where2put, pload);
9704 if (j) {
9705 j->xpc_service = true;
9706 if (where2put->xpc_singleton) {
9707 /* If the service was destined for one of the global domains,
9708 * then we have to alias it into our local domain to reserve the
9709 * name.
9710 */
9711 job_t ja = job_new_alias(jm, j);
9712 if (!ja) {
9713 /* If we failed to alias the job because of a conflict over
9714 * the label, then we remove it from the global domain. We
9715 * don't want to risk having imported a malicious job into
9716 * one of the global domains.
9717 */
9718 if (errno != EEXIST) {
9719 job_assumes(j, errno == 0);
9720 } else {
9721 job_log(j, LOG_ERR, "Failed to alias job into: %s", where2put->name);
9722 }
9723
9724 job_remove(j);
9725 } else {
9726 ja->xpc_service = true;
9727 j = ja;
9728 }
9729 }
9730 }
9731 }
9732
9733 return j;
9734 }
9735
9736 kern_return_t
9737 xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
9738 {
9739 if (unlikely(!pid1_magic)) {
9740 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
9741 return BOOTSTRAP_NOT_PRIVILEGED;
9742 }
9743 if (!j || !MACH_PORT_VALID(reqport)) {
9744 return BOOTSTRAP_UNKNOWN_SERVICE;
9745 }
9746 if (root_jobmgr->shutting_down) {
9747 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
9748 return BOOTSTRAP_NOT_PRIVILEGED;
9749 }
9750 if (!j->xpc_bootstrapper) {
9751 job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
9752 return BOOTSTRAP_NOT_PRIVILEGED;
9753 }
9754
9755 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9756 /* All XPC domains are children of the root job manager. What we're creating
9757 * here is really just a skeleton. By creating it, we're adding reqp to our
9758 * port set. It will have two messages on it. The first specifies the
9759 * environment of the originator. This is so we can cache it and hand it to
9760 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9761 * to be bootstrapped in.
9762 */
9763 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
9764 if (job_assumes(j, jm != NULL)) {
9765 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
9766 jm->shortdesc = "private";
9767 kr = BOOTSTRAP_SUCCESS;
9768 }
9769
9770 return kr;
9771 }
9772
9773 kern_return_t
9774 xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
9775 {
9776 if (!j) {
9777 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9778 * getting this message long after the requesting process has gone away.
9779 * See <rdar://problem/8593143>.
9780 */
9781 return BOOTSTRAP_UNKNOWN_SERVICE;
9782 }
9783
9784 jobmgr_t jm = j->mgr;
9785 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9786 return BOOTSTRAP_NOT_PRIVILEGED;
9787 }
9788
9789 if (jm->req_asport != MACH_PORT_NULL) {
9790 return BOOTSTRAP_NOT_PRIVILEGED;
9791 }
9792
9793 struct ldcred *ldc = runtime_get_caller_creds();
9794 struct proc_bsdshortinfo proc;
9795 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
9796 if (errno != ESRCH) {
9797 jobmgr_assumes(jm, errno == 0);
9798 }
9799
9800 jm->error = errno;
9801 jobmgr_remove(jm);
9802 return BOOTSTRAP_NO_MEMORY;
9803 }
9804
9805 if (!jobmgr_assumes(jm, audit_session_port(ldc->asid, &jm->req_asport) == 0)) {
9806 jm->error = EPERM;
9807 jobmgr_remove(jm);
9808 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
9809 return BOOTSTRAP_NOT_PRIVILEGED;
9810 }
9811
9812 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s[%i]", proc.pbsi_comm, ldc->pid);
9813 strlcpy(jm->owner, proc.pbsi_comm, sizeof(jm->owner));
9814 jm->req_bsport = bsport;
9815 jm->req_excport = excport;
9816 jm->req_rport = rp;
9817 jm->req_ctx = ctx;
9818 jm->req_ctx_sz = ctx_sz;
9819 jm->req_pid = ldc->pid;
9820 jm->req_euid = ldc->euid;
9821 jm->req_egid = ldc->egid;
9822 jm->req_asid = ldc->asid;
9823
9824 return KERN_SUCCESS;
9825 }
9826
9827 kern_return_t
9828 xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9829 {
9830 if (!j) {
9831 return BOOTSTRAP_UNKNOWN_SERVICE;
9832 }
9833
9834 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
9835 if (!(rootj && rootj->xpc_bootstrapper)) {
9836 job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
9837 return BOOTSTRAP_NOT_PRIVILEGED;
9838 }
9839
9840 /* This is just for XPC domains (for now). */
9841 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9842 return BOOTSTRAP_NOT_PRIVILEGED;
9843 }
9844 if (j->mgr->session_initialized) {
9845 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
9846 return BOOTSTRAP_NOT_PRIVILEGED;
9847 }
9848
9849 size_t offset = 0;
9850 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9851 if (!jobmgr_assumes(j->mgr, services != NULL)) {
9852 return BOOTSTRAP_NO_MEMORY;
9853 }
9854
9855 size_t i = 0;
9856 size_t c = launch_data_array_get_count(services);
9857 for (i = 0; i < c; i++) {
9858 job_t nj = NULL;
9859 launch_data_t ploadi = launch_data_array_get_index(services, i);
9860 if (!(nj = xpc_domain_import_service(j->mgr, ploadi))) {
9861 /* If loading one job fails, just fail the whole thing. At this
9862 * point, xpchelper should receive the failure and then just refuse
9863 * to launch the application, since its XPC services could not be
9864 * fully bootstrapped.
9865 *
9866 * Take care to not reference the job or its manager after this
9867 * point.
9868 */
9869 if (errno == EINVAL) {
9870 jobmgr_log(j->mgr, LOG_ERR, "Service at index is not valid: %lu", i);
9871 } else if (errno == EEXIST) {
9872 /* If we get back EEXIST, we know that the payload was a
9873 * dictionary with a label. But, well, I guess it never hurts to
9874 * check.
9875 */
9876 char *label = "(bogus)";
9877 if (launch_data_get_type(ploadi) == LAUNCH_DATA_DICTIONARY) {
9878 launch_data_t llabel = launch_data_dict_lookup(ploadi, LAUNCH_JOBKEY_LABEL);
9879 if (launch_data_get_type(llabel) == LAUNCH_DATA_STRING) {
9880 label = (char *)launch_data_get_string(llabel);
9881 }
9882 }
9883 jobmgr_log(j->mgr, LOG_ERR, "Service name conflict: %s", label);
9884 }
9885
9886 j->mgr->error = errno;
9887 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
9888 jobmgr_remove(j->mgr);
9889 break;
9890 } else {
9891 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service %s", nj->label);
9892 job_dispatch(nj, false);
9893 }
9894 }
9895
9896 kern_return_t result = BOOTSTRAP_NO_MEMORY;
9897 if (i == c) {
9898 j->mgr->session_initialized = true;
9899 (void)jobmgr_assumes(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS) == KERN_SUCCESS);
9900 j->mgr->req_rport = MACH_PORT_NULL;
9901
9902 /* Returning a failure code will destroy the message, whereas returning
9903 * success will not, so we need to clean up here.
9904 */
9905 mig_deallocate(services_buff, services_sz);
9906 result = BOOTSTRAP_SUCCESS;
9907 }
9908
9909 return result;
9910 }
9911
9912 kern_return_t
9913 xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport, mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid, int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
9914 {
9915 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9916 return BOOTSTRAP_UNKNOWN_SERVICE;
9917 }
9918 jobmgr_t jm = j->mgr;
9919 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9920 return BOOTSTRAP_NOT_PRIVILEGED;
9921 }
9922
9923 if (jm->req_asport == MACH_PORT_NULL) {
9924 return BOOTSTRAP_NOT_PRIVILEGED;
9925 }
9926
9927 *bsport = jm->req_bsport;
9928 *sbsport = root_jobmgr->jm_port;
9929 *excport = jm->req_excport;
9930 *asport = jm->req_asport;
9931 *uid = jm->req_euid;
9932 *gid = jm->req_egid;
9933 *asid = jm->req_asid;
9934
9935 *ctx = jm->req_ctx;
9936 *ctx_sz = jm->req_ctx_sz;
9937
9938 return KERN_SUCCESS;
9939 }
9940
9941 kern_return_t
9942 xpc_domain_get_service_name(job_t j, event_name_t name)
9943 {
9944 if (!j) {
9945 return BOOTSTRAP_NO_MEMORY;
9946 }
9947 if (!j->xpc_service) {
9948 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
9949 return BOOTSTRAP_NOT_PRIVILEGED;
9950 }
9951
9952 struct machservice * ms = SLIST_FIRST(&j->machservices);
9953 if (!ms) {
9954 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name of job with no machservices: %s", j->label);
9955 return BOOTSTRAP_UNKNOWN_SERVICE;
9956 }
9957
9958 (void)strlcpy(name, ms->name, sizeof(event_name_t));
9959 return BOOTSTRAP_SUCCESS;
9960 }
9961 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
9962
9963 kern_return_t
9964 xpc_events_get_channel_name(job_t j __attribute__((unused)), event_name_t stream __attribute__((unused)), uint64_t token __attribute__((unused)), event_name_t name __attribute__((unused)))
9965 {
9966 return KERN_FAILURE;
9967 }
9968
9969 kern_return_t
9970 xpc_events_get_event_name(job_t j, event_name_t stream, uint64_t token, event_name_t name)
9971 {
9972 struct externalevent *event = externalevent_find(stream, token);
9973 if (event && j->event_monitor) {
9974 (void)strcpy(name, event->name);
9975 } else {
9976 event = NULL;
9977 }
9978
9979 return event ? BOOTSTRAP_SUCCESS : BOOTSTRAP_UNKNOWN_SERVICE;
9980 }
9981
9982 kern_return_t
9983 xpc_events_set_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t event, mach_msg_type_number_t eventCnt)
9984 {
9985 if (j->anonymous) {
9986 return BOOTSTRAP_NOT_PRIVILEGED;
9987 }
9988
9989 struct externalevent *eei = NULL;
9990 LIST_FOREACH(eei, &j->events, job_le) {
9991 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9992 externalevent_delete(eei);
9993 eventsystem_ping();
9994 break;
9995 }
9996 }
9997
9998 bool success = false;
9999 struct eventsystem *es = eventsystem_find(stream);
10000 if (!es) {
10001 es = eventsystem_new(stream);
10002 (void)job_assumes(j, es != NULL);
10003 }
10004
10005 if (es) {
10006 size_t offset = 0;
10007 launch_data_t unpacked = launch_data_unpack((void *)event, eventCnt, NULL, 0, &offset, 0);
10008 if (unpacked && launch_data_get_type(unpacked) == LAUNCH_DATA_DICTIONARY) {
10009 success = externalevent_new(j, es, key, unpacked);
10010 }
10011 }
10012
10013 if (!success) {
10014 mig_deallocate(event, eventCnt);
10015 }
10016
10017 return KERN_SUCCESS;
10018 }
10019
10020 kern_return_t
10021 xpc_events_get_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t *event, mach_msg_type_number_t *eventCnt)
10022 {
10023 struct externalevent *eei = NULL;
10024 LIST_FOREACH(eei, &j->events, job_le) {
10025 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
10026 /* Big enough. */
10027 *eventCnt = 10 * 1024;
10028 mig_allocate(event, *eventCnt);
10029
10030 size_t sz = launch_data_pack(eei->event, (void *)*event, *eventCnt, NULL, NULL);
10031 if (!job_assumes(j, sz != 0)) {
10032 mig_deallocate(*event, *eventCnt);
10033 return BOOTSTRAP_NO_MEMORY;
10034 }
10035
10036 return BOOTSTRAP_SUCCESS;
10037 }
10038 }
10039
10040 return BOOTSTRAP_UNKNOWN_SERVICE;
10041 }
10042
10043 struct machservice *
10044 xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p)
10045 {
10046 struct machservice *msi = NULL;
10047 SLIST_FOREACH(msi, &j->machservices, sle) {
10048 if (strcmp(stream, msi->name) == 0) {
10049 break;
10050 }
10051 }
10052
10053 if (!msi) {
10054 mach_port_t sp = MACH_PORT_NULL;
10055 msi = machservice_new(j, stream, &sp, false);
10056 if (job_assumes(j, msi)) {
10057 /* Hack to keep this from being publicly accessible through
10058 * bootstrap_look_up().
10059 */
10060 LIST_REMOVE(msi, name_hash_sle);
10061 msi->event_channel = true;
10062 *p = sp;
10063
10064 /* If we call job_dispatch() here before the audit session for the
10065 * job has been set, we'll end up not watching this service. But we
10066 * also have to take care not to watch the port if the job is
10067 * active.
10068 *
10069 * See <rdar://problem/10357855>.
10070 */
10071 if (!j->currently_ignored) {
10072 machservice_watch(j, msi);
10073 }
10074 } else {
10075 errno = BOOTSTRAP_NO_MEMORY;
10076 }
10077 } else {
10078 if (!msi->event_channel) {
10079 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10080 msi = NULL;
10081 errno = BOOTSTRAP_NAME_IN_USE;
10082 } else {
10083 *p = msi->port;
10084 }
10085 }
10086
10087 return msi;
10088 }
10089
10090 kern_return_t
10091 xpc_events_channel_check_in(job_t j, event_name_t stream, uint64_t flags __attribute__((unused)), mach_port_t *p)
10092 {
10093 struct machservice *ms = xpc_events_find_channel(j, stream, p);
10094 if (ms) {
10095 if (ms->isActive) {
10096 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10097 *p = MACH_PORT_NULL;
10098 errno = BOOTSTRAP_SERVICE_ACTIVE;
10099 } else {
10100 job_checkin(j);
10101 machservice_request_notifications(ms);
10102 errno = BOOTSTRAP_SUCCESS;
10103 }
10104 }
10105
10106 return errno;
10107 }
10108
10109 kern_return_t
10110 xpc_events_channel_look_up(job_t j, event_name_t stream, event_token_t token, uint64_t flags __attribute__((unused)), mach_port_t *p)
10111 {
10112 if (!j->event_monitor) {
10113 return BOOTSTRAP_NOT_PRIVILEGED;
10114 }
10115
10116 struct externalevent *ee = externalevent_find(stream, token);
10117 if (!ee) {
10118 return BOOTSTRAP_UNKNOWN_SERVICE;
10119 }
10120
10121 struct machservice *ms = xpc_events_find_channel(ee->job, stream, p);
10122 if (ms) {
10123 errno = BOOTSTRAP_SUCCESS;
10124 }
10125
10126 return errno;
10127 }
10128
10129 kern_return_t
10130 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
10131 {
10132 struct ldcred *ldc = runtime_get_caller_creds();
10133 job_t otherj;
10134
10135 if (!launchd_assumes(j != NULL)) {
10136 return BOOTSTRAP_NO_MEMORY;
10137 }
10138
10139 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
10140 return BOOTSTRAP_UNKNOWN_SERVICE;
10141 }
10142
10143 #if TARGET_OS_EMBEDDED
10144 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
10145 #else
10146 bool allow_non_root_kickstart = false;
10147 #endif
10148
10149 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
10150 return BOOTSTRAP_NOT_PRIVILEGED;
10151 }
10152
10153 #if HAVE_SANDBOX
10154 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10155 return BOOTSTRAP_NOT_PRIVILEGED;
10156 }
10157 #endif
10158
10159 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
10160 return BOOTSTRAP_SERVICE_ACTIVE;
10161 }
10162
10163 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
10164 otherj = job_dispatch(otherj, true);
10165
10166 if (!job_assumes(j, otherj && otherj->p)) {
10167 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
10168 otherj->stall_before_exec = false;
10169 return BOOTSTRAP_NO_MEMORY;
10170 }
10171
10172 *out_pid = otherj->p;
10173
10174 return 0;
10175 }
10176
10177 kern_return_t
10178 job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
10179 {
10180 launch_data_t jobdata = NULL;
10181 size_t data_offset = 0;
10182 struct ldcred *ldc = runtime_get_caller_creds();
10183 job_t jr;
10184
10185 if (!launchd_assumes(j != NULL)) {
10186 return BOOTSTRAP_NO_MEMORY;
10187 }
10188
10189 if (unlikely(j->deny_job_creation)) {
10190 return BOOTSTRAP_NOT_PRIVILEGED;
10191 }
10192
10193 #if HAVE_SANDBOX
10194 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10195 return BOOTSTRAP_NOT_PRIVILEGED;
10196 }
10197 #endif
10198
10199 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
10200 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
10201 return VPROC_ERR_TRY_PER_USER;
10202 }
10203
10204 if (!job_assumes(j, indataCnt != 0)) {
10205 return 1;
10206 }
10207
10208 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
10209 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
10210 return 1;
10211 }
10212
10213 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
10214 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
10215 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
10216 return 1;
10217 }
10218
10219 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
10220
10221 launch_data_t label = NULL;
10222 launch_data_t wait4debugger = NULL;
10223 if (!jr) {
10224 switch (errno) {
10225 case EEXIST:
10226 /* If EEXIST was returned, we know that there is a label string in
10227 * the dictionary. So we don't need to check the types here; that
10228 * has already been done.
10229 */
10230 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
10231 jr = job_find(NULL, launch_data_get_string(label));
10232 if (job_assumes(j, jr != NULL) && !jr->p) {
10233 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
10234 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
10235 if (launch_data_get_bool(wait4debugger)) {
10236 /* If the job exists, we're going to kick-start it, but
10237 * we need to give the caller the opportunity to start
10238 * it suspended if it so desires. But this will only
10239 * take effect if the job isn't running.
10240 */
10241 jr->wait4debugger_oneshot = true;
10242 }
10243 }
10244 }
10245
10246 *outj = jr;
10247 return BOOTSTRAP_NAME_IN_USE;
10248 default:
10249 return BOOTSTRAP_NO_MEMORY;
10250 }
10251 }
10252
10253 if (pid1_magic) {
10254 jr->mach_uid = ldc->uid;
10255 }
10256
10257 jr->legacy_LS_job = true;
10258 jr->abandon_pg = true;
10259 jr->asport = asport;
10260 uuid_clear(jr->expected_audit_uuid);
10261 jr = job_dispatch(jr, true);
10262
10263 if (!job_assumes(j, jr != NULL)) {
10264 job_remove(jr);
10265 return BOOTSTRAP_NO_MEMORY;
10266 }
10267
10268 if (!job_assumes(jr, jr->p)) {
10269 job_remove(jr);
10270 return BOOTSTRAP_NO_MEMORY;
10271 }
10272
10273 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
10274 *outj = jr;
10275
10276 return BOOTSTRAP_SUCCESS;
10277 }
10278
10279 kern_return_t
10280 job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
10281 {
10282 job_t nj = NULL;
10283 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
10284 if (likely(kr == KERN_SUCCESS)) {
10285 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
10286 job_remove(nj);
10287 kr = BOOTSTRAP_NO_MEMORY;
10288 } else {
10289 /* Do not return until the job has called exec(3), thereby making it
10290 * safe for the caller to send it SIGCONT.
10291 *
10292 * <rdar://problem/9042798>
10293 */
10294 nj->spawn_reply_port = rp;
10295 kr = MIG_NO_REPLY;
10296 }
10297 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
10298 bool was_running = nj->p;
10299 if (job_dispatch(nj, true)) {
10300 if (!was_running) {
10301 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
10302
10303 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
10304 nj->spawn_reply_port = rp;
10305 kr = MIG_NO_REPLY;
10306 } else {
10307 kr = BOOTSTRAP_NO_MEMORY;
10308 }
10309 } else {
10310 *obsvr_port = MACH_PORT_NULL;
10311 *child_pid = nj->p;
10312 kr = KERN_SUCCESS;
10313 }
10314 } else {
10315 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
10316 kr = BOOTSTRAP_UNKNOWN_SERVICE;
10317 }
10318 }
10319
10320 mig_deallocate(indata, indataCnt);
10321 return kr;
10322 }
10323
10324 kern_return_t
10325 job_mig_event_source_check_in(job_t j, name_t name, mach_port_t ping_port, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt, uint64_t *tokens)
10326 {
10327 if (!j || !j->event_monitor) {
10328 return BOOTSTRAP_NOT_PRIVILEGED;
10329 }
10330
10331 /* Update our ping-port. One ping will force all the notification systems
10332 * to check in, so they'll all give us send-once rights. It doesn't really
10333 * matter which one we keep around. It's not the most efficient thing ever,
10334 * but keep in mind that, by doing this over one channel, we can do it over
10335 * the job's MachService. This means that we'll get it back when the job dies,
10336 * and we can create ourselves a send-once right if we didn't have one already,
10337 * and we can just keep the helper alive without it needing to bootstrap
10338 * communication.
10339 *
10340 * So we're trading efficiency for robustness. In this case, the checkins
10341 * should happen pretty infrequently, so it's pretty worth it.
10342 */
10343 if (_s_event_update_port != MACH_PORT_NULL) {
10344 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
10345 }
10346 _s_event_update_port = ping_port;
10347
10348 kern_return_t result = BOOTSTRAP_NO_MEMORY;
10349 launch_data_t arr = launch_data_alloc(LAUNCH_DATA_ARRAY);
10350 if (job_assumes(j, arr != NULL)) {
10351 struct eventsystem *es = eventsystem_find(name);
10352 if (unlikely(es == NULL)) {
10353 es = eventsystem_new(name);
10354 }
10355
10356 if (job_assumes(j, es != NULL)) {
10357 struct externalevent *ei = NULL;
10358 size_t i = 0;
10359 LIST_FOREACH(ei, &es->events, sys_le) {
10360 (void)job_assumes(j, launch_data_array_set_index(arr, ei->event, i));
10361 if (job_assumes(j, i < 1024)) {
10362 tokens[i] = ei->id;
10363 } else {
10364 break;
10365 }
10366 i++;
10367 }
10368
10369 /* Big enough. */
10370 *outvalCnt = 10 * 1024;
10371 mig_allocate(outval, *outvalCnt);
10372
10373 size_t sz = launch_data_pack(arr, (void *)*outval, *outvalCnt, NULL, NULL);
10374 if (job_assumes(j, sz != 0)) {
10375 result = BOOTSTRAP_SUCCESS;
10376 } else {
10377 mig_deallocate(*outval, *outvalCnt);
10378 }
10379 }
10380
10381 /* Total hack, but launch_data doesn't do ref-counting. */
10382 struct _launch_data *hack = (struct _launch_data *)arr;
10383 free(hack->_array);
10384 free(arr);
10385 }
10386
10387 return result;
10388 }
10389
10390 kern_return_t
10391 job_mig_event_set_state(job_t j, name_t name, uint64_t token, boolean_t state)
10392 {
10393 if (!j || !j->event_monitor) {
10394 return BOOTSTRAP_NOT_PRIVILEGED;
10395 }
10396
10397 struct externalevent *ei = externalevent_find(name, token);
10398 if (job_assumes(j, ei != NULL)) {
10399 ei->state = state;
10400 if(job_dispatch(ei->job, false) == NULL) {
10401 if (errno == EPERM) {
10402 return BOOTSTRAP_NOT_PRIVILEGED;
10403 }
10404 return BOOTSTRAP_NO_MEMORY;
10405 }
10406 } else {
10407 return BOOTSTRAP_NO_MEMORY;
10408 }
10409
10410 return BOOTSTRAP_SUCCESS;
10411 }
10412
10413 void
10414 jobmgr_init(bool sflag)
10415 {
10416 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
10417 SLIST_INIT(&s_curious_jobs);
10418 LIST_INIT(&s_needing_sessions);
10419
10420 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
10421 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
10422 launchd_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
10423 _s_xpc_system_domain->req_asid = g_audit_session;
10424 _s_xpc_system_domain->req_asport = g_audit_session_port;
10425 _s_xpc_system_domain->shortdesc = "system";
10426 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
10427 if (pid1_magic) {
10428 root_jobmgr->monitor_shutdown = true;
10429 }
10430
10431 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
10432 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
10433 if (likely(s_no_hang_fd == -1)) {
10434 if (jobmgr_assumes(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK)) != -1)) {
10435 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr) != -1);
10436 }
10437 }
10438 s_no_hang_fd = _fd(s_no_hang_fd);
10439 }
10440
10441 size_t
10442 our_strhash(const char *s)
10443 {
10444 size_t c, r = 5381;
10445
10446 /* djb2
10447 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10448 */
10449
10450 while ((c = *s++)) {
10451 r = ((r << 5) + r) + c; /* hash*33 + c */
10452 }
10453
10454 return r;
10455 }
10456
10457 size_t
10458 hash_label(const char *label)
10459 {
10460 return our_strhash(label) % LABEL_HASH_SIZE;
10461 }
10462
10463 size_t
10464 hash_ms(const char *msstr)
10465 {
10466 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
10467 }
10468
10469 bool
10470 waiting4removal_new(job_t j, mach_port_t rp)
10471 {
10472 struct waiting_for_removal *w4r;
10473
10474 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
10475 return false;
10476 }
10477
10478 w4r->reply_port = rp;
10479
10480 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
10481
10482 return true;
10483 }
10484
10485 void
10486 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
10487 {
10488 (void)job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
10489
10490 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
10491
10492 free(w4r);
10493 }
10494
10495 size_t
10496 get_kern_max_proc(void)
10497 {
10498 int mib[] = { CTL_KERN, KERN_MAXPROC };
10499 int max = 100;
10500 size_t max_sz = sizeof(max);
10501
10502 (void)launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
10503
10504 return max;
10505 }
10506
10507 /* See rdar://problem/6271234 */
10508 void
10509 eliminate_double_reboot(void)
10510 {
10511 if (unlikely(!pid1_magic)) {
10512 return;
10513 }
10514
10515 struct stat sb;
10516 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
10517 char *try_again = "Will try again at next boot.";
10518 int result = ~0;
10519
10520 if (unlikely(stat(argv[1], &sb) != -1)) {
10521 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
10522
10523 int wstatus;
10524 pid_t p;
10525
10526 (void)jobmgr_assumes(root_jobmgr, (errno = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ)) == 0);
10527
10528 if (errno) {
10529 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script! %s", try_again);
10530 goto out;
10531 }
10532
10533 if (!jobmgr_assumes(root_jobmgr, waitpid(p, &wstatus, 0) != -1)) {
10534 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't confirm that deferred install script exited successfully! %s", try_again);
10535 goto out;
10536 }
10537
10538 if (jobmgr_assumes(root_jobmgr, WIFEXITED(wstatus) != 0)) {
10539 if (jobmgr_assumes(root_jobmgr, (result = WEXITSTATUS(wstatus)) == EXIT_SUCCESS)) {
10540 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
10541 } else {
10542 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus), try_again);
10543 }
10544 } else {
10545 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again);
10546 }
10547 }
10548 out:
10549 if (result == 0) {
10550 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
10551 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
10552 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
10553 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
10554 */
10555 if (!jobmgr_assumes(root_jobmgr, unlink(argv[1]) != -1)) {
10556 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script couldn't be removed!");
10557 }
10558 }
10559 }
10560
10561 void
10562 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
10563 {
10564 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
10565 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10566 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
10567 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
10568 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10569 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
10570 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
10571 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
10572 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10573 * You can't set this in a plist.
10574 */
10575 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
10576 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10577 * complain about it.
10578 */
10579 } else {
10580 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
10581 }
10582
10583 if (unlikely(!j->jetsam_properties)) {
10584 j->jetsam_properties = true;
10585 LIST_INSERT_HEAD(&j->mgr->jetsam_jobs, j, jetsam_sle);
10586 j->mgr->jetsam_jobs_cnt++;
10587 }
10588
10589 j->jetsam_seq = s_jetsam_sequence_id++;
10590 }
10591
10592 int
10593 launchd_set_jetsam_priorities(launch_data_t priorities)
10594 {
10595 if (!launchd_assumes(launch_data_get_type(priorities) == LAUNCH_DATA_ARRAY)) {
10596 return EINVAL;
10597 }
10598
10599 jobmgr_t jm = NULL;
10600 #if !TARGET_OS_EMBEDDED
10601 /* For testing. */
10602 jm = jobmgr_find_by_name(root_jobmgr, VPROCMGR_SESSION_AQUA);
10603 if (!launchd_assumes(jm != NULL)) {
10604 return EINVAL;
10605 }
10606 #else
10607 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
10608 jm = root_jobmgr;
10609
10610 if (!g_embedded_privileged_action) {
10611 return EPERM;
10612 }
10613 #endif
10614
10615 size_t npris = launch_data_array_get_count(priorities);
10616
10617 job_t ji = NULL;
10618 size_t i = 0;
10619 for (i = 0; i < npris; i++) {
10620 launch_data_t ldi = launch_data_array_get_index(priorities, i);
10621 if (!launchd_assumes(launch_data_get_type(ldi) == LAUNCH_DATA_DICTIONARY)) {
10622 continue;
10623 }
10624
10625 launch_data_t label = NULL;
10626 if (!launchd_assumes(label = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL))) {
10627 continue;
10628 }
10629 const char *_label = launch_data_get_string(label);
10630
10631 ji = job_find(NULL, _label);
10632 if (!launchd_assumes(ji != NULL)) {
10633 continue;
10634 }
10635
10636 launch_data_dict_iterate(ldi, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, ji);
10637
10638 launch_data_t frontmost = NULL;
10639 if ((frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) && launch_data_get_type(frontmost) == LAUNCH_DATA_BOOL) {
10640 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
10641 }
10642 }
10643
10644 i = 0;
10645 job_t *jobs = (job_t *)calloc(jm->jetsam_jobs_cnt, sizeof(job_t));
10646 if (launchd_assumes(jobs != NULL)) {
10647 LIST_FOREACH(ji, &jm->jetsam_jobs, jetsam_sle) {
10648 if (ji->p) {
10649 jobs[i] = ji;
10650 i++;
10651 }
10652 }
10653 }
10654
10655 size_t totalpris = i;
10656
10657 int result = EINVAL;
10658
10659 /* It is conceivable that there could be no Jetsam jobs running. */
10660 if (totalpris > 0) {
10661 /* Yay blocks! */
10662 qsort_b((void *)jobs, totalpris, sizeof(job_t), ^ int (const void *lhs, const void *rhs) {
10663 job_t _lhs = *(job_t *)lhs;
10664 job_t _rhs = *(job_t *)rhs;
10665 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
10666 if (_lhs->jetsam_priority > _rhs->jetsam_priority) {
10667 return -1;
10668 } else if (_lhs->jetsam_priority < _rhs->jetsam_priority) {
10669 return 1;
10670 }
10671 /* Priority is equal, so sort by sequence ID to maintain LRU order */
10672 if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) > 0 ) {
10673 return 1;
10674 } else if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) < 0 ) {
10675 return -1;
10676 }
10677
10678 return 0;
10679 });
10680
10681 jetsam_priority_entry_t *jpris = (jetsam_priority_entry_t *)calloc(totalpris, sizeof(jetsam_priority_entry_t));
10682 if (!launchd_assumes(jpris != NULL)) {
10683 result = ENOMEM;
10684 } else {
10685 for (i = 0; i < totalpris; i++) {
10686 jpris[i].pid = jobs[i]->p; /* Subject to time-of-use vs. time-of-check, obviously. */
10687 jpris[i].flags |= jobs[i]->jetsam_frontmost ? kJetsamFlagsFrontmost : 0;
10688 jpris[i].hiwat_pages = jobs[i]->jetsam_memlimit;
10689 }
10690
10691 (void)launchd_assumes((result = sysctlbyname("kern.memorystatus_priority_list", NULL, NULL, &jpris[0], totalpris * sizeof(jetsam_priority_entry_t))) != -1);
10692 result = result != 0 ? errno : 0;
10693
10694 free(jpris);
10695 }
10696 }
10697
10698 if (jobs) {
10699 free(jobs);
10700 }
10701
10702 return result;
10703 }