]> git.saurik.com Git - apple/launchd.git/blob - src/core.c
61c4c3309c25d1abb5fc6c661aadee6ea514ba51
[apple/launchd.git] / src / core.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 #include "config.h"
20 #include "core.h"
21 #include "internal.h"
22 #include "helper.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/stat.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
43 #include <sys/un.h>
44 #include <sys/reboot.h>
45 #include <sys/wait.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
52 #include <sys/pipe.h>
53 #include <sys/mman.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <sys/kern_memorystatus.h>
57 #include <net/if.h>
58 #include <netinet/in.h>
59 #include <netinet/in_var.h>
60 #include <netinet6/nd6.h>
61 #include <bsm/libbsm.h>
62 #include <unistd.h>
63 #include <signal.h>
64 #include <errno.h>
65 #include <libgen.h>
66 #include <stdio.h>
67 #include <stdlib.h>
68 #include <stdarg.h>
69 #include <stdbool.h>
70 #include <paths.h>
71 #include <pwd.h>
72 #include <grp.h>
73 #include <ttyent.h>
74 #include <dlfcn.h>
75 #include <dirent.h>
76 #include <string.h>
77 #include <ctype.h>
78 #include <glob.h>
79 #include <System/sys/spawn.h>
80 #include <System/sys/spawn_internal.h>
81 #include <spawn.h>
82 #include <spawn_private.h>
83 #include <time.h>
84 #include <libinfo.h>
85 #include <os/assumes.h>
86 #include <xpc/launchd.h>
87 #include <asl.h>
88 #include <_simple.h>
89
90 #include <libproc.h>
91 #include <libproc_internal.h>
92 #include <System/sys/proc_info.h>
93 #include <malloc/malloc.h>
94 #include <pthread.h>
95 #if HAVE_SANDBOX
96 #define __APPLE_API_PRIVATE
97 #include <sandbox.h>
98 #endif
99 #if HAVE_QUARANTINE
100 #include <quarantine.h>
101 #endif
102 #if HAVE_RESPONSIBILITY
103 #include <responsibility.h>
104 #endif
105 #if !TARGET_OS_EMBEDDED
106 extern int gL1CacheEnabled;
107 #endif
108 #if HAVE_SYSTEMSTATS
109 #include <systemstats/systemstats.h>
110 #endif
111
112 #include "launch.h"
113 #include "launch_priv.h"
114 #include "launch_internal.h"
115 #include "bootstrap.h"
116 #include "bootstrap_priv.h"
117 #include "vproc.h"
118 #include "vproc_internal.h"
119
120 #include "reboot2.h"
121
122 #include "launchd.h"
123 #include "runtime.h"
124 #include "ipc.h"
125 #include "job.h"
126 #include "jobServer.h"
127 #include "job_reply.h"
128 #include "job_forward.h"
129 #include "mach_excServer.h"
130
131 #define POSIX_SPAWN_IOS_INTERACTIVE 0
132
133 #if TARGET_OS_EMBEDDED
134 /* Default memory highwatermark for daemons as set out in <rdar://problem/10307788>. */
135 #define DEFAULT_JETSAM_DAEMON_HIGHWATERMARK 5
136 #endif
137
138 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
139 * If the job hasn't exited in the given number of seconds after sending
140 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
141 */
142 #define LAUNCHD_MIN_JOB_RUN_TIME 10
143 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
144 #define LAUNCHD_SIGKILL_TIMER 4
145 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
146
147 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
148
149 #define TAKE_SUBSET_NAME "TakeSubsetName"
150 #define TAKE_SUBSET_PID "TakeSubsetPID"
151 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
152
153 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
154
155 extern char **environ;
156
157 struct waiting_for_removal {
158 SLIST_ENTRY(waiting_for_removal) sle;
159 mach_port_t reply_port;
160 };
161
162 static bool waiting4removal_new(job_t j, mach_port_t rp);
163 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
164
165 struct machservice {
166 SLIST_ENTRY(machservice) sle;
167 SLIST_ENTRY(machservice) special_port_sle;
168 LIST_ENTRY(machservice) name_hash_sle;
169 LIST_ENTRY(machservice) port_hash_sle;
170 struct machservice *alias;
171 job_t job;
172 unsigned int gen_num;
173 mach_port_name_t port;
174 unsigned int
175 isActive:1,
176 reset:1,
177 recv:1,
178 hide:1,
179 kUNCServer:1,
180 per_user_hack:1,
181 debug_on_close:1,
182 per_pid:1,
183 delete_on_destruction:1,
184 drain_one_on_crash:1,
185 drain_all_on_crash:1,
186 upfront:1,
187 event_channel:1,
188 recv_race_hack :1,
189 /* Don't let the size of this field to get too small. It has to be large
190 * enough to represent the reasonable range of special port numbers.
191 */
192 special_port_num:17;
193 const char name[0];
194 };
195
196 // HACK: This should be per jobmgr_t
197 static SLIST_HEAD(, machservice) special_ports;
198
199 #define PORT_HASH_SIZE 32
200 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
201
202 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
203
204 static void machservice_setup(launch_data_t obj, const char *key, void *context);
205 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
206 static void machservice_resetport(job_t j, struct machservice *ms);
207 static void machservice_stamp_port(job_t j, struct machservice *ms);
208 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
209 static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
210 static void machservice_ignore(job_t j, struct machservice *ms);
211 static void machservice_watch(job_t j, struct machservice *ms);
212 static void machservice_delete(job_t j, struct machservice *, bool port_died);
213 static void machservice_request_notifications(struct machservice *);
214 static mach_port_t machservice_port(struct machservice *);
215 static job_t machservice_job(struct machservice *);
216 static bool machservice_hidden(struct machservice *);
217 static bool machservice_active(struct machservice *);
218 static const char *machservice_name(struct machservice *);
219 static bootstrap_status_t machservice_status(struct machservice *);
220 void machservice_drain_port(struct machservice *);
221
222 struct socketgroup {
223 SLIST_ENTRY(socketgroup) sle;
224 int *fds;
225 unsigned int fd_cnt;
226 union {
227 const char name[0];
228 char name_init[0];
229 };
230 };
231
232 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt);
233 static void socketgroup_delete(job_t j, struct socketgroup *sg);
234 static void socketgroup_watch(job_t j, struct socketgroup *sg);
235 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
236 static void socketgroup_callback(job_t j);
237 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
238 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
239
240 struct calendarinterval {
241 LIST_ENTRY(calendarinterval) global_sle;
242 SLIST_ENTRY(calendarinterval) sle;
243 job_t job;
244 struct tm when;
245 time_t when_next;
246 };
247
248 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
249
250 static bool calendarinterval_new(job_t j, struct tm *w);
251 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
252 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
253 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
254 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
255 static void calendarinterval_callback(void);
256 static void calendarinterval_sanity_check(void);
257
258 struct envitem {
259 SLIST_ENTRY(envitem) sle;
260 char *value;
261 union {
262 const char key[0];
263 char key_init[0];
264 };
265 };
266
267 static bool envitem_new(job_t j, const char *k, const char *v, bool global);
268 static void envitem_delete(job_t j, struct envitem *ei, bool global);
269 static void envitem_setup(launch_data_t obj, const char *key, void *context);
270
271 struct limititem {
272 SLIST_ENTRY(limititem) sle;
273 struct rlimit lim;
274 unsigned int setsoft:1, sethard:1, which:30;
275 };
276
277 static bool limititem_update(job_t j, int w, rlim_t r);
278 static void limititem_delete(job_t j, struct limititem *li);
279 static void limititem_setup(launch_data_t obj, const char *key, void *context);
280 #if HAVE_SANDBOX
281 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
282 #endif
283
284 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
285
286 typedef enum {
287 NETWORK_UP = 1,
288 NETWORK_DOWN,
289 SUCCESSFUL_EXIT,
290 FAILED_EXIT,
291 CRASHED,
292 DID_NOT_CRASH,
293 OTHER_JOB_ENABLED,
294 OTHER_JOB_DISABLED,
295 OTHER_JOB_ACTIVE,
296 OTHER_JOB_INACTIVE,
297 } semaphore_reason_t;
298
299 struct semaphoreitem {
300 SLIST_ENTRY(semaphoreitem) sle;
301 semaphore_reason_t why;
302
303 union {
304 const char what[0];
305 char what_init[0];
306 };
307 };
308
309 struct semaphoreitem_dict_iter_context {
310 job_t j;
311 semaphore_reason_t why_true;
312 semaphore_reason_t why_false;
313 };
314
315 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
316 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
317 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
318 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
319 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
320
321 struct externalevent {
322 LIST_ENTRY(externalevent) sys_le;
323 LIST_ENTRY(externalevent) job_le;
324 struct eventsystem *sys;
325
326 uint64_t id;
327 job_t job;
328 bool state;
329 bool wanted_state;
330 bool internal;
331 xpc_object_t event;
332 xpc_object_t entitlements;
333
334 char name[0];
335 };
336
337 struct externalevent_iter_ctx {
338 job_t j;
339 struct eventsystem *sys;
340 };
341
342 static bool externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags);
343 static void externalevent_delete(struct externalevent *ee);
344 static void externalevent_setup(launch_data_t obj, const char *key, void *context);
345 static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
346
347 struct eventsystem {
348 LIST_ENTRY(eventsystem) global_le;
349 LIST_HEAD(, externalevent) events;
350 uint64_t curid;
351 char name[0];
352 };
353
354 static struct eventsystem *eventsystem_new(const char *name);
355 static void eventsystem_delete(struct eventsystem *sys) __attribute__((unused));
356 static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
357 static struct eventsystem *eventsystem_find(const char *name);
358 static void eventsystem_ping(void);
359
360 struct waiting4attach {
361 LIST_ENTRY(waiting4attach) le;
362 mach_port_t port;
363 pid_t dest;
364 xpc_service_type_t type;
365 char name[0];
366 };
367
368 static LIST_HEAD(, waiting4attach) _launchd_domain_waiters;
369
370 static struct waiting4attach *waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type);
371 static void waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a);
372 static struct waiting4attach *waiting4attach_find(jobmgr_t jm, job_t j);
373
374 #define ACTIVE_JOB_HASH_SIZE 32
375 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
376
377 #define MACHSERVICE_HASH_SIZE 37
378
379 #define LABEL_HASH_SIZE 53
380 struct jobmgr_s {
381 kq_callback kqjobmgr_callback;
382 LIST_ENTRY(jobmgr_s) xpc_le;
383 SLIST_ENTRY(jobmgr_s) sle;
384 SLIST_HEAD(, jobmgr_s) submgrs;
385 LIST_HEAD(, job_s) jobs;
386 LIST_HEAD(, waiting4attach) attaches;
387
388 /* For legacy reasons, we keep all job labels that are imported in the root
389 * job manager's label hash. If a job manager is an XPC domain, then it gets
390 * its own label hash that is separate from the "global" one stored in the
391 * root job manager.
392 */
393 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
394 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
395 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
396 LIST_HEAD(, job_s) global_env_jobs;
397 mach_port_t jm_port;
398 mach_port_t req_port;
399 jobmgr_t parentmgr;
400 int reboot_flags;
401 time_t shutdown_time;
402 unsigned int global_on_demand_cnt;
403 unsigned int normal_active_cnt;
404 unsigned int
405 shutting_down:1,
406 session_initialized:1,
407 killed_stray_jobs:1,
408 monitor_shutdown:1,
409 shutdown_jobs_dirtied:1,
410 shutdown_jobs_cleaned:1,
411 xpc_singleton:1;
412 uint32_t properties;
413 // XPC-specific properties.
414 char owner[MAXCOMLEN];
415 char *shortdesc;
416 mach_port_t req_bsport;
417 mach_port_t req_excport;
418 mach_port_t req_asport;
419 mach_port_t req_gui_asport;
420 pid_t req_pid;
421 uid_t req_euid;
422 gid_t req_egid;
423 au_asid_t req_asid;
424 vm_offset_t req_ctx;
425 mach_msg_type_number_t req_ctx_sz;
426 mach_port_t req_rport;
427 uint64_t req_uniqueid;
428 kern_return_t error;
429 union {
430 const char name[0];
431 char name_init[0];
432 };
433 };
434
435 // Global XPC domains.
436 static jobmgr_t _s_xpc_system_domain;
437 static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
438 static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
439
440 #define jobmgr_assumes(jm, e) os_assumes_ctx(jobmgr_log_bug, jm, (e))
441 #define jobmgr_assumes_zero(jm, e) os_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
442 #define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
443
444 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
445 static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
446 static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
447 static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
448 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
449 static jobmgr_t jobmgr_parent(jobmgr_t jm);
450 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
451 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
452 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
453 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
454 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
455 static void jobmgr_remove(jobmgr_t jm);
456 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
457 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
458 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
459 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
460 static job_t managed_job(pid_t p);
461 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
462 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
463 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
464 static void job_export_all2(jobmgr_t jm, launch_data_t where);
465 static void jobmgr_callback(void *obj, struct kevent *kev);
466 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
467 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
468 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
469 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
470 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
471 static void jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children);
472 // static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
473 static bool jobmgr_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
474
475 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
476 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
477 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
478
479 struct suspended_peruser {
480 LIST_ENTRY(suspended_peruser) sle;
481 job_t j;
482 };
483
484 struct job_s {
485 // MUST be first element of this structure.
486 kq_callback kqjob_callback;
487 LIST_ENTRY(job_s) sle;
488 LIST_ENTRY(job_s) subjob_sle;
489 LIST_ENTRY(job_s) needing_session_sle;
490 LIST_ENTRY(job_s) jetsam_sle;
491 LIST_ENTRY(job_s) pid_hash_sle;
492 LIST_ENTRY(job_s) global_pid_hash_sle;
493 LIST_ENTRY(job_s) label_hash_sle;
494 LIST_ENTRY(job_s) global_env_sle;
495 SLIST_ENTRY(job_s) curious_jobs_sle;
496 LIST_HEAD(, suspended_peruser) suspended_perusers;
497 LIST_HEAD(, waiting_for_exit) exit_watchers;
498 LIST_HEAD(, job_s) subjobs;
499 LIST_HEAD(, externalevent) events;
500 SLIST_HEAD(, socketgroup) sockets;
501 SLIST_HEAD(, calendarinterval) cal_intervals;
502 SLIST_HEAD(, envitem) global_env;
503 SLIST_HEAD(, envitem) env;
504 SLIST_HEAD(, limititem) limits;
505 SLIST_HEAD(, machservice) machservices;
506 SLIST_HEAD(, semaphoreitem) semaphores;
507 SLIST_HEAD(, waiting_for_removal) removal_watchers;
508 struct waiting4attach *w4a;
509 job_t original;
510 job_t alias;
511 cpu_type_t *j_binpref;
512 size_t j_binpref_cnt;
513 mach_port_t j_port;
514 mach_port_t exit_status_dest;
515 mach_port_t exit_status_port;
516 mach_port_t spawn_reply_port;
517 uid_t mach_uid;
518 jobmgr_t mgr;
519 size_t argc;
520 char **argv;
521 char *prog;
522 char *rootdir;
523 char *workingdir;
524 char *username;
525 char *groupname;
526 char *stdinpath;
527 char *stdoutpath;
528 char *stderrpath;
529 char *alt_exc_handler;
530 char *cfbundleidentifier;
531 unsigned int nruns;
532 uint64_t trt;
533 #if HAVE_SANDBOX
534 char *seatbelt_profile;
535 uint64_t seatbelt_flags;
536 char *container_identifier;
537 #endif
538 #if HAVE_QUARANTINE
539 void *quarantine_data;
540 size_t quarantine_data_sz;
541 #endif
542 pid_t p;
543 uint64_t uniqueid;
544 int last_exit_status;
545 int stdin_fd;
546 int fork_fd;
547 int nice;
548 uint32_t pstype;
549 uint32_t psproctype;
550 int32_t jetsam_priority;
551 int32_t jetsam_memlimit;
552 int32_t main_thread_priority;
553 uint32_t timeout;
554 uint32_t exit_timeout;
555 uint64_t sent_signal_time;
556 uint64_t start_time;
557 uint32_t min_run_time;
558 bool unthrottle;
559 uint32_t start_interval;
560 uint32_t peruser_suspend_count;
561 uuid_t instance_id;
562 mode_t mask;
563 mach_port_t asport;
564 au_asid_t asid;
565 uuid_t expected_audit_uuid;
566 bool
567 // man launchd.plist --> Debug
568 debug:1,
569 // man launchd.plist --> KeepAlive == false
570 ondemand:1,
571 // man launchd.plist --> SessionCreate
572 session_create:1,
573 // man launchd.plist --> LowPriorityIO
574 low_pri_io:1,
575 // man launchd.plist --> InitGroups
576 no_init_groups:1,
577 /* A legacy mach_init concept to make bootstrap_create_server/service()
578 * work
579 */
580 priv_port_has_senders:1,
581 // A hack during job importing
582 importing_global_env:1,
583 // A hack during job importing
584 importing_hard_limits:1,
585 // man launchd.plist --> Umask
586 setmask:1,
587 // A process that launchd knows about but doesn't manage.
588 anonymous:1,
589 // A legacy mach_init concept to detect sick jobs
590 checkedin:1,
591 // A job created via bootstrap_create_server()
592 legacy_mach_job:1,
593 // A job created via spawn_via_launchd()
594 legacy_LS_job:1,
595 // A legacy job that wants inetd compatible semantics
596 inetcompat:1,
597 // A twist on inetd compatibility
598 inetcompat_wait:1,
599 /* An event fired and the job should start, but not necessarily right
600 * away.
601 */
602 start_pending:1,
603 // man launchd.plist --> EnableGlobbing
604 globargv:1,
605 // man launchd.plist --> WaitForDebugger
606 wait4debugger:1,
607 // One-shot WaitForDebugger.
608 wait4debugger_oneshot:1,
609 // MachExceptionHandler == true
610 internal_exc_handler:1,
611 // A hack to support an option of spawn_via_launchd()
612 stall_before_exec:1,
613 /* man launchd.plist --> LaunchOnlyOnce.
614 *
615 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
616 */
617 only_once:1,
618 /* Make job_ignore() / job_watch() work. If these calls were balanced,
619 * then this wouldn't be necessarily.
620 */
621 currently_ignored:1,
622 /* A job that forced all other jobs to be temporarily launch-on-
623 * demand
624 */
625 forced_peers_to_demand_mode:1,
626 // man launchd.plist --> Nice
627 setnice:1,
628 /* A job was asked to be unloaded/removed while running, we'll remove it
629 * after it exits.
630 */
631 removal_pending:1,
632 // job_kill() was called.
633 sent_sigkill:1,
634 // Enter the kernel debugger before killing a job.
635 debug_before_kill:1,
636 // A hack that launchd+launchctl use during jobmgr_t creation.
637 weird_bootstrap:1,
638 // man launchd.plist --> StartOnMount
639 start_on_mount:1,
640 // This job is a per-user launchd managed by the PID 1 launchd.
641 per_user:1,
642 // A job thoroughly confused launchd. We need to unload it ASAP.
643 unload_at_mig_return:1,
644 // man launchd.plist --> AbandonProcessGroup
645 abandon_pg:1,
646 /* During shutdown, do not send SIGTERM to stray processes in the
647 * process group of this job.
648 */
649 ignore_pg_at_shutdown:1,
650 /* Don't let this job create new 'job_t' objects in launchd. Has been
651 * seriously overloaded for the purposes of sandboxing.
652 */
653 deny_job_creation:1,
654 // man launchd.plist --> EnableTransactions
655 enable_transactions:1,
656 // The job was sent SIGKILL because it was clean.
657 clean_kill:1,
658 // The job has an OtherJobEnabled KeepAlive criterion.
659 nosy:1,
660 // The job exited due to a crash.
661 crashed:1,
662 // We've received NOTE_EXIT for the job and reaped it.
663 reaped:1,
664 // job_stop() was called.
665 stopped:1,
666 /* The job is to be kept alive continuously, but it must first get an
667 * initial kick off.
668 */
669 needs_kickoff:1,
670 // The job is a bootstrapper.
671 is_bootstrapper:1,
672 // The job owns the console.
673 has_console:1,
674 /* The job runs as a non-root user on embedded but has select privileges
675 * of the root user. This is SpringBoard.
676 */
677 embedded_god:1,
678 // The job is responsible for drawing the home screen on embedded.
679 embedded_home:1,
680 // We got NOTE_EXEC for the job.
681 did_exec:1,
682 // The job is an XPC service, and XPC proxy successfully exec(3)ed.
683 xpcproxy_did_exec:1,
684 // The (anonymous) job called vprocmgr_switch_to_session().
685 holds_ref:1,
686 // The job has Jetsam limits in place.
687 jetsam_properties:1,
688 // The job's Jetsam memory limits should only be applied in the background
689 jetsam_memory_limit_background:1,
690 /* This job was created as the result of a look up of a service provided
691 * by a MultipleInstance job.
692 */
693 dedicated_instance:1,
694 // The job supports creating additional instances of itself.
695 multiple_instances:1,
696 /* The sub-job was already removed from the parent's list of
697 * sub-jobs.
698 */
699 former_subjob:1,
700 /* The job is responsible for monitoring external events for this
701 * launchd.
702 */
703 event_monitor:1,
704 // The event monitor job has retrieved the initial list of events.
705 event_monitor_ready2signal:1,
706 // A lame hack.
707 removing:1,
708 // Disable ASLR when launching this job.
709 disable_aslr:1,
710 // The job is an XPC Service.
711 xpc_service:1,
712 // The job is the Performance team's shutdown monitor.
713 shutdown_monitor:1,
714 // We should open a transaction for the job when shutdown begins.
715 dirty_at_shutdown:1,
716 /* The job was sent SIGKILL but did not exit in a timely fashion,
717 * indicating a kernel bug.
718 */
719 workaround9359725:1,
720 // The job is the XPC domain bootstrapper.
721 xpc_bootstrapper:1,
722 // The job is an app (on either iOS or OS X) and has different resource
723 // limitations.
724 app:1,
725 // FairPlay decryption failed on the job. This should only ever happen
726 // to apps.
727 fpfail:1,
728 // The job failed to exec(3) for reasons that may be transient, so we're
729 // waiting for UserEventAgent to tell us when it's okay to try spawning
730 // again (i.e. when the executable path appears, when the UID appears,
731 // etc.).
732 waiting4ok:1,
733 // The job exited due to memory pressure.
734 jettisoned:1,
735 // The job supports idle-exit.
736 idle_exit:1,
737 // The job was implicitly reaped by the kernel.
738 implicit_reap:1,
739 system_app :1,
740 joins_gui_session :1,
741 low_priority_background_io :1,
742 legacy_timers :1;
743
744 const char label[0];
745 };
746
747 static size_t hash_label(const char *label) __attribute__((pure));
748 static size_t hash_ms(const char *msstr) __attribute__((pure));
749 static SLIST_HEAD(, job_s) s_curious_jobs;
750 static LIST_HEAD(, job_s) managed_actives[ACTIVE_JOB_HASH_SIZE];
751
752 #define job_assumes(j, e) os_assumes_ctx(job_log_bug, j, (e))
753 #define job_assumes_zero(j, e) os_assumes_zero_ctx(job_log_bug, j, (e))
754 #define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
755
756 static void job_import_keys(launch_data_t obj, const char *key, void *context);
757 static void job_import_bool(job_t j, const char *key, bool value);
758 static void job_import_string(job_t j, const char *key, const char *value);
759 static void job_import_integer(job_t j, const char *key, long long value);
760 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
761 static void job_import_array(job_t j, const char *key, launch_data_t value);
762 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
763 static bool job_set_global_on_demand(job_t j, bool val);
764 static const char *job_active(job_t j);
765 static void job_watch(job_t j);
766 static void job_ignore(job_t j);
767 static void job_reap(job_t j);
768 static bool job_useless(job_t j);
769 static bool job_keepalive(job_t j);
770 static void job_dispatch_curious_jobs(job_t j);
771 static void job_start(job_t j);
772 static void job_start_child(job_t j) __attribute__((noreturn));
773 static void job_setup_attributes(job_t j);
774 static bool job_setup_machport(job_t j);
775 static kern_return_t job_setup_exit_port(job_t j);
776 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
777 static void job_postfork_become_user(job_t j);
778 static void job_postfork_test_user(job_t j);
779 static void job_log_pids_with_weird_uids(job_t j);
780 static void job_setup_exception_port(job_t j, task_t target_task);
781 static void job_callback(void *obj, struct kevent *kev);
782 static void job_callback_proc(job_t j, struct kevent *kev);
783 static void job_callback_timer(job_t j, void *ident);
784 static void job_callback_read(job_t j, int ident);
785 static void job_log_stray_pg(job_t j);
786 static void job_log_children_without_exec(job_t j);
787 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
788 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
789 static job_t job_new_alias(jobmgr_t jm, job_t src);
790 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
791 static job_t job_new_subjob(job_t j, uuid_t identifier);
792 static void job_kill(job_t j);
793 static void job_uncork_fork(job_t j);
794 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
795 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
796 static bool job_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
797 static void job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status);
798 #if HAVE_SYSTEMSTATS
799 static void job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status);
800 #endif
801 static void job_set_exception_port(job_t j, mach_port_t port);
802 static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
803 static void job_open_shutdown_transaction(job_t ji);
804 static void job_close_shutdown_transaction(job_t ji);
805 static launch_data_t job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport);
806 static void job_setup_per_user_directory(job_t j, uid_t uid, const char *path);
807 static void job_setup_per_user_directories(job_t j, uid_t uid, const char *label);
808 static void job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data);
809 static void job_update_jetsam_memory_limit(job_t j, int32_t limit);
810
811 #if TARGET_OS_EMBEDDED
812 static bool job_import_defaults(launch_data_t pload);
813 #endif
814
815 static struct priority_properties_t {
816 long long band;
817 int priority;
818 } _launchd_priority_map[] = {
819 { XPC_JETSAM_BAND_SUSPENDED, JETSAM_PRIORITY_IDLE },
820 { XPC_JETSAM_BAND_BACKGROUND_OPPORTUNISTIC, JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC },
821 { XPC_JETSAM_BAND_BACKGROUND, JETSAM_PRIORITY_BACKGROUND },
822 { XPC_JETSAM_BAND_MAIL, JETSAM_PRIORITY_MAIL },
823 { XPC_JETSAM_BAND_PHONE, JETSAM_PRIORITY_PHONE },
824 { XPC_JETSAM_BAND_UI_SUPPORT, JETSAM_PRIORITY_UI_SUPPORT },
825 { XPC_JETSAM_BAND_FOREGROUND_SUPPORT, JETSAM_PRIORITY_FOREGROUND_SUPPORT },
826 { XPC_JETSAM_BAND_FOREGROUND, JETSAM_PRIORITY_FOREGROUND },
827 { XPC_JETSAM_BAND_AUDIO, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
828 { XPC_JETSAM_BAND_ACCESSORY, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
829 { XPC_JETSAM_BAND_CRITICAL, JETSAM_PRIORITY_CRITICAL },
830 { XPC_JETSAM_BAND_TELEPHONY, JETSAM_PRIORITY_TELEPHONY },
831 };
832
833 static const struct {
834 const char *key;
835 int val;
836 } launchd_keys2limits[] = {
837 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
838 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
839 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
840 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
841 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
842 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
843 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
844 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
845 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
846 };
847
848 static time_t cronemu(int mon, int mday, int hour, int min);
849 static time_t cronemu_wday(int wday, int hour, int min);
850 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
851 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
852 static bool cronemu_hour(struct tm *wtm, int hour, int min);
853 static bool cronemu_min(struct tm *wtm, int min);
854
855 // miscellaneous file local functions
856 static size_t get_kern_max_proc(void);
857 static char **mach_cmd2argv(const char *string);
858 static size_t our_strhash(const char *s) __attribute__((pure));
859
860 void eliminate_double_reboot(void);
861
862 #pragma mark XPC Domain Forward Declarations
863 static job_t _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
864 static int _xpc_domain_import_services(job_t j, launch_data_t services);
865
866 #pragma mark XPC Event Forward Declarations
867 static int xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms);
868 static int xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply);
869 static int xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply);
870 static int xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply);
871 static int xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
872 static int xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply);
873 static int xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
874 static int xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply);
875
876 #pragma mark XPC Process Forward Declarations
877 static int xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply);
878 static int xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply);
879
880 // file local globals
881 static job_t _launchd_embedded_god = NULL;
882 static job_t _launchd_embedded_home = NULL;
883 static size_t total_children;
884 static size_t total_anon_children;
885 static mach_port_t the_exception_server;
886 static job_t workaround_5477111;
887 static LIST_HEAD(, job_s) s_needing_sessions;
888 static LIST_HEAD(, eventsystem) _s_event_systems;
889 static struct eventsystem *_launchd_support_system;
890 static job_t _launchd_event_monitor;
891 static job_t _launchd_xpc_bootstrapper;
892 static job_t _launchd_shutdown_monitor;
893
894 #if TARGET_OS_EMBEDDED
895 static xpc_object_t _launchd_defaults_cache;
896
897 mach_port_t launchd_audit_port = MACH_PORT_DEAD;
898 pid_t launchd_audit_session = 0;
899 #else
900 mach_port_t launchd_audit_port = MACH_PORT_NULL;
901 au_asid_t launchd_audit_session = AU_DEFAUDITSID;
902 #endif
903
904 static int s_no_hang_fd = -1;
905
906 // process wide globals
907 mach_port_t inherited_bootstrap_port;
908 jobmgr_t root_jobmgr;
909 bool launchd_shutdown_debugging = false;
910 bool launchd_verbose_boot = false;
911 bool launchd_embedded_handofgod = false;
912 bool launchd_runtime_busy_time = false;
913
914 void
915 job_ignore(job_t j)
916 {
917 struct socketgroup *sg;
918 struct machservice *ms;
919
920 if (j->currently_ignored) {
921 return;
922 }
923
924 job_log(j, LOG_DEBUG, "Ignoring...");
925
926 j->currently_ignored = true;
927
928 SLIST_FOREACH(sg, &j->sockets, sle) {
929 socketgroup_ignore(j, sg);
930 }
931
932 SLIST_FOREACH(ms, &j->machservices, sle) {
933 machservice_ignore(j, ms);
934 }
935 }
936
937 void
938 job_watch(job_t j)
939 {
940 struct socketgroup *sg;
941 struct machservice *ms;
942
943 if (!j->currently_ignored) {
944 return;
945 }
946
947 job_log(j, LOG_DEBUG, "Watching...");
948
949 j->currently_ignored = false;
950
951 SLIST_FOREACH(sg, &j->sockets, sle) {
952 socketgroup_watch(j, sg);
953 }
954
955 SLIST_FOREACH(ms, &j->machservices, sle) {
956 machservice_watch(j, ms);
957 }
958 }
959
960 void
961 job_stop(job_t j)
962 {
963 int sig;
964
965 if (unlikely(!j->p || j->stopped || j->anonymous)) {
966 return;
967 }
968
969 #if TARGET_OS_EMBEDDED
970 if (launchd_embedded_handofgod && _launchd_embedded_god) {
971 if (!_launchd_embedded_god->username || !j->username) {
972 errno = EPERM;
973 return;
974 }
975
976 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
977 errno = EPERM;
978 return;
979 }
980 } else if (launchd_embedded_handofgod) {
981 errno = EINVAL;
982 return;
983 }
984 #endif
985
986 j->sent_signal_time = runtime_get_opaque_time();
987
988 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Stopping job...");
989
990 int error = -1;
991 error = proc_terminate(j->p, &sig);
992 if (error) {
993 job_log(j, LOG_ERR | LOG_CONSOLE, "Could not terminate job: %d: %s", error, strerror(error));
994 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Using fallback option to terminate job...");
995 error = kill2(j->p, SIGTERM);
996 if (error) {
997 job_log(j, LOG_ERR, "Could not signal job: %d: %s", error, strerror(error));
998 } else {
999 sig = SIGTERM;
1000 }
1001 }
1002
1003 if (!error) {
1004 switch (sig) {
1005 case SIGKILL:
1006 j->sent_sigkill = true;
1007 j->clean_kill = true;
1008
1009 /* We cannot effectively simulate an exit for jobs during the course
1010 * of a normal run. Even if we pretend that the job exited, we will
1011 * still not have gotten the receive rights associated with the
1012 * job's MachServices back, so we cannot safely respawn it.
1013 */
1014 if (j->mgr->shutting_down) {
1015 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j);
1016 (void)job_assumes_zero_p(j, error);
1017 }
1018
1019 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sent job SIGKILL.");
1020 break;
1021 case SIGTERM:
1022 if (j->exit_timeout) {
1023 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j);
1024 (void)job_assumes_zero_p(j, error);
1025 } else {
1026 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
1027 }
1028 job_log(j, LOG_DEBUG, "Sent job SIGTERM.");
1029 break;
1030 default:
1031 job_log(j, LOG_ERR | LOG_CONSOLE, "Job was sent unexpected signal: %d: %s", sig, strsignal(sig));
1032 break;
1033 }
1034 }
1035
1036 j->stopped = true;
1037 }
1038
1039 launch_data_t
1040 job_export(job_t j)
1041 {
1042 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1043
1044 if (r == NULL) {
1045 return NULL;
1046 }
1047
1048 if ((tmp = launch_data_new_string(j->label))) {
1049 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
1050 }
1051 if ((tmp = launch_data_new_string(j->mgr->name))) {
1052 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
1053 }
1054 if ((tmp = launch_data_new_bool(j->ondemand))) {
1055 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
1056 }
1057
1058 long long status = j->last_exit_status;
1059 if (j->fpfail) {
1060 status = LAUNCH_EXITSTATUS_FAIRPLAY_FAIL;
1061 }
1062 if ((tmp = launch_data_new_integer(status))) {
1063 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
1064 }
1065
1066 if (j->p && (tmp = launch_data_new_integer(j->p))) {
1067 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
1068 }
1069 if ((tmp = launch_data_new_integer(j->timeout))) {
1070 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
1071 }
1072 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
1073 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
1074 }
1075 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
1076 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
1077 }
1078 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
1079 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
1080 }
1081 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
1082 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
1083 }
1084 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1085 size_t i;
1086
1087 for (i = 0; i < j->argc; i++) {
1088 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
1089 launch_data_array_set_index(tmp, tmp2, i);
1090 }
1091 }
1092
1093 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
1094 }
1095
1096 if (j->enable_transactions && (tmp = launch_data_new_bool(true))) {
1097 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
1098 }
1099
1100 if (j->session_create && (tmp = launch_data_new_bool(true))) {
1101 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
1102 }
1103
1104 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1105 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
1106 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
1107 }
1108 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
1109 }
1110
1111 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1112 struct socketgroup *sg;
1113 unsigned int i;
1114
1115 SLIST_FOREACH(sg, &j->sockets, sle) {
1116 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1117 for (i = 0; i < sg->fd_cnt; i++) {
1118 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1119 launch_data_array_set_index(tmp2, tmp3, i);
1120 }
1121 }
1122 launch_data_dict_insert(tmp, tmp2, sg->name);
1123 }
1124 }
1125
1126 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1127 }
1128
1129 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1130 struct machservice *ms;
1131
1132 tmp3 = NULL;
1133
1134 SLIST_FOREACH(ms, &j->machservices, sle) {
1135 if (ms->per_pid) {
1136 if (tmp3 == NULL) {
1137 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1138 }
1139 if (tmp3) {
1140 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1141 launch_data_dict_insert(tmp3, tmp2, ms->name);
1142 }
1143 } else {
1144 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1145 launch_data_dict_insert(tmp, tmp2, ms->name);
1146 }
1147 }
1148
1149 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1150
1151 if (tmp3) {
1152 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1153 }
1154 }
1155
1156 return r;
1157 }
1158
1159 static void
1160 jobmgr_log_active_jobs(jobmgr_t jm)
1161 {
1162 const char *why_active;
1163 jobmgr_t jmi;
1164 job_t ji;
1165
1166 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1167 jobmgr_log_active_jobs(jmi);
1168 }
1169
1170 int level = LOG_DEBUG;
1171 if (pid1_magic) {
1172 level |= LOG_CONSOLE;
1173 }
1174
1175 LIST_FOREACH(ji, &jm->jobs, sle) {
1176 if ((why_active = job_active(ji))) {
1177 if (ji->p != 1) {
1178 job_log(ji, level, "%s", why_active);
1179
1180 uint32_t flags = 0;
1181 (void)proc_get_dirty(ji->p, &flags);
1182 if (!(flags & PROC_DIRTY_TRACKED)) {
1183 continue;
1184 }
1185
1186 char *dirty = "clean";
1187 if (flags & PROC_DIRTY_IS_DIRTY) {
1188 dirty = "dirty";
1189 }
1190
1191 char *idle_exit = "idle-exit unsupported";
1192 if (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) {
1193 idle_exit = "idle-exit supported";
1194 }
1195
1196 job_log(ji, level, "Killability: %s/%s", dirty, idle_exit);
1197 }
1198 }
1199 }
1200 }
1201
1202 static void
1203 jobmgr_still_alive_with_check(jobmgr_t jm)
1204 {
1205 int level = LOG_DEBUG;
1206 if (pid1_magic) {
1207 level |= LOG_CONSOLE;
1208 }
1209
1210 jobmgr_log(jm, level, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1211 jobmgr_log_active_jobs(jm);
1212 launchd_log_push();
1213 }
1214
1215 jobmgr_t
1216 jobmgr_shutdown(jobmgr_t jm)
1217 {
1218 jobmgr_t jmi, jmn;
1219 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1220
1221 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1222
1223 struct tm curtime;
1224 (void)localtime_r(&jm->shutdown_time, &curtime);
1225
1226 char date[26];
1227 (void)asctime_r(&curtime, date);
1228 // Trim the new line that asctime_r(3) puts there for some reason.
1229 date[24] = 0;
1230
1231 if (jm == root_jobmgr && pid1_magic) {
1232 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1233 } else {
1234 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1235 }
1236
1237 jm->shutting_down = true;
1238
1239 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1240 jobmgr_shutdown(jmi);
1241 }
1242
1243 if (!jm->parentmgr) {
1244 if (pid1_magic) {
1245 // Spawn the shutdown monitor.
1246 if (_launchd_shutdown_monitor && !_launchd_shutdown_monitor->p) {
1247 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1248 job_dispatch(_launchd_shutdown_monitor, true);
1249 }
1250 }
1251
1252 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1253 }
1254
1255 return jobmgr_do_garbage_collection(jm);
1256 }
1257
1258 void
1259 jobmgr_remove(jobmgr_t jm)
1260 {
1261 jobmgr_t jmi;
1262 job_t ji;
1263
1264 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1265 if (!SLIST_EMPTY(&jm->submgrs)) {
1266 size_t cnt = 0;
1267 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1268 jobmgr_remove(jmi);
1269 cnt++;
1270 }
1271
1272 (void)jobmgr_assumes_zero(jm, cnt);
1273 }
1274
1275 while ((ji = LIST_FIRST(&jm->jobs))) {
1276 if (!ji->anonymous && ji->p != 0) {
1277 job_log(ji, LOG_ERR, "Job is still active at job manager teardown.");
1278 ji->p = 0;
1279 }
1280
1281 job_remove(ji);
1282 }
1283
1284 struct waiting4attach *w4ai = NULL;
1285 while ((w4ai = LIST_FIRST(&jm->attaches))) {
1286 waiting4attach_delete(jm, w4ai);
1287 }
1288
1289 if (jm->req_port) {
1290 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_port));
1291 }
1292 if (jm->jm_port) {
1293 (void)jobmgr_assumes_zero(jm, launchd_mport_close_recv(jm->jm_port));
1294 }
1295
1296 if (jm->req_bsport) {
1297 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_bsport));
1298 }
1299 if (jm->req_excport) {
1300 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_excport));
1301 }
1302 if (MACH_PORT_VALID(jm->req_asport)) {
1303 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_asport));
1304 }
1305 if (jm->req_rport) {
1306 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1307 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1308 /* If the originator went away, the reply port will be a dead name,
1309 * and we expect this to fail.
1310 */
1311 (void)jobmgr_assumes_zero(jm, kr);
1312 }
1313 }
1314 if (jm->req_ctx) {
1315 (void)jobmgr_assumes_zero(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz));
1316 }
1317
1318 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1319 struct tm curtime;
1320 (void)localtime_r(&ts, &curtime);
1321
1322 char date[26];
1323 (void)asctime_r(&curtime, date);
1324 date[24] = 0;
1325
1326 time_t delta = ts - jm->shutdown_time;
1327 if (jm == root_jobmgr && pid1_magic) {
1328 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1329 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1330 } else {
1331 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1332 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1333 }
1334
1335 if (jm->parentmgr) {
1336 runtime_del_weak_ref();
1337 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1338
1339 // Hack for the guest user so that its stuff doesn't persist.
1340 //
1341 // <rdar://problem/14527875>
1342 if (strcmp(jm->name, VPROCMGR_SESSION_AQUA) == 0 && getuid() == 201) {
1343 raise(SIGTERM);
1344 }
1345 } else if (pid1_magic) {
1346 eliminate_double_reboot();
1347 launchd_log_vm_stats();
1348 jobmgr_log_stray_children(jm, true);
1349 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1350 launchd_closelog();
1351 (void)jobmgr_assumes_zero_p(jm, reboot(jm->reboot_flags));
1352 } else {
1353 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1354 launchd_closelog();
1355 exit(EXIT_SUCCESS);
1356 }
1357
1358 free(jm);
1359 }
1360
1361 void
1362 job_remove(job_t j)
1363 {
1364 struct waiting_for_removal *w4r;
1365 struct calendarinterval *ci;
1366 struct semaphoreitem *si;
1367 struct socketgroup *sg;
1368 struct machservice *ms;
1369 struct limititem *li;
1370 struct envitem *ei;
1371
1372 if (j->alias) {
1373 /* HACK: Egregious code duplication. But as with machservice_delete(),
1374 * job aliases can't (and shouldn't) have any complex behaviors
1375 * associated with them.
1376 */
1377 while ((ms = SLIST_FIRST(&j->machservices))) {
1378 machservice_delete(j, ms, false);
1379 }
1380
1381 LIST_REMOVE(j, sle);
1382 LIST_REMOVE(j, label_hash_sle);
1383 free(j);
1384 return;
1385 }
1386
1387 #if TARGET_OS_EMBEDDED
1388 if (launchd_embedded_handofgod && _launchd_embedded_god) {
1389 if (!(_launchd_embedded_god->username && j->username)) {
1390 errno = EPERM;
1391 return;
1392 }
1393
1394 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
1395 errno = EPERM;
1396 return;
1397 }
1398 } else if (launchd_embedded_handofgod) {
1399 errno = EINVAL;
1400 return;
1401 }
1402 #endif
1403
1404 /* Do this BEFORE we check and see whether the job is still active. If we're
1405 * a sub-job, we're being removed due to the parent job removing us.
1406 * Therefore, the parent job will free itself after this call completes. So
1407 * if we defer removing ourselves from the parent's list, we'll crash when
1408 * we finally get around to it.
1409 */
1410 if (j->dedicated_instance && !j->former_subjob) {
1411 LIST_REMOVE(j, subjob_sle);
1412 j->former_subjob = true;
1413 }
1414
1415 if (unlikely(j->p)) {
1416 if (j->anonymous) {
1417 job_reap(j);
1418 } else {
1419 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1420
1421 if (!j->removal_pending) {
1422 j->removal_pending = true;
1423 job_stop(j);
1424 }
1425
1426 return;
1427 }
1428 }
1429
1430 if (!j->removing) {
1431 j->removing = true;
1432 job_dispatch_curious_jobs(j);
1433 }
1434
1435 ipc_close_all_with_job(j);
1436
1437 if (j->forced_peers_to_demand_mode) {
1438 job_set_global_on_demand(j, false);
1439 }
1440
1441 if (job_assumes_zero(j, j->fork_fd)) {
1442 (void)posix_assumes_zero(runtime_close(j->fork_fd));
1443 }
1444
1445 if (j->stdin_fd) {
1446 (void)posix_assumes_zero(runtime_close(j->stdin_fd));
1447 }
1448
1449 if (j->j_port) {
1450 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1451 }
1452
1453 while ((sg = SLIST_FIRST(&j->sockets))) {
1454 socketgroup_delete(j, sg);
1455 }
1456 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1457 calendarinterval_delete(j, ci);
1458 }
1459 while ((ei = SLIST_FIRST(&j->env))) {
1460 envitem_delete(j, ei, false);
1461 }
1462 while ((ei = SLIST_FIRST(&j->global_env))) {
1463 envitem_delete(j, ei, true);
1464 }
1465 while ((li = SLIST_FIRST(&j->limits))) {
1466 limititem_delete(j, li);
1467 }
1468 while ((ms = SLIST_FIRST(&j->machservices))) {
1469 machservice_delete(j, ms, false);
1470 }
1471 while ((si = SLIST_FIRST(&j->semaphores))) {
1472 semaphoreitem_delete(j, si);
1473 }
1474 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1475 waiting4removal_delete(j, w4r);
1476 }
1477
1478 struct externalevent *eei = NULL;
1479 while ((eei = LIST_FIRST(&j->events))) {
1480 externalevent_delete(eei);
1481 }
1482
1483 if (j->event_monitor) {
1484 _launchd_event_monitor = NULL;
1485 }
1486 if (j->xpc_bootstrapper) {
1487 _launchd_xpc_bootstrapper = NULL;
1488 }
1489
1490 if (j->prog) {
1491 free(j->prog);
1492 }
1493 if (j->argv) {
1494 free(j->argv);
1495 }
1496 if (j->rootdir) {
1497 free(j->rootdir);
1498 }
1499 if (j->workingdir) {
1500 free(j->workingdir);
1501 }
1502 if (j->username) {
1503 free(j->username);
1504 }
1505 if (j->groupname) {
1506 free(j->groupname);
1507 }
1508 if (j->stdinpath) {
1509 free(j->stdinpath);
1510 }
1511 if (j->stdoutpath) {
1512 free(j->stdoutpath);
1513 }
1514 if (j->stderrpath) {
1515 free(j->stderrpath);
1516 }
1517 if (j->alt_exc_handler) {
1518 free(j->alt_exc_handler);
1519 }
1520 if (j->cfbundleidentifier) {
1521 free(j->cfbundleidentifier);
1522 }
1523 #if HAVE_SANDBOX
1524 if (j->seatbelt_profile) {
1525 free(j->seatbelt_profile);
1526 }
1527 if (j->container_identifier) {
1528 free(j->container_identifier);
1529 }
1530 #endif
1531 #if HAVE_QUARANTINE
1532 if (j->quarantine_data) {
1533 free(j->quarantine_data);
1534 }
1535 #endif
1536 if (j->j_binpref) {
1537 free(j->j_binpref);
1538 }
1539 if (j->start_interval) {
1540 runtime_del_weak_ref();
1541 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
1542 }
1543 if (j->exit_timeout) {
1544 /* If this fails, it just means the timer's already fired, so no need to
1545 * wrap it in an assumes() macro.
1546 */
1547 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1548 }
1549 if (j->asport != MACH_PORT_NULL) {
1550 (void)job_assumes_zero(j, launchd_mport_deallocate(j->asport));
1551 }
1552 if (!uuid_is_null(j->expected_audit_uuid)) {
1553 LIST_REMOVE(j, needing_session_sle);
1554 }
1555 if (j->embedded_god) {
1556 _launchd_embedded_god = NULL;
1557 }
1558 if (j->embedded_home) {
1559 _launchd_embedded_home = NULL;
1560 }
1561 if (j->shutdown_monitor) {
1562 _launchd_shutdown_monitor = NULL;
1563 }
1564
1565 (void)kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1566
1567 LIST_REMOVE(j, sle);
1568 LIST_REMOVE(j, label_hash_sle);
1569
1570 job_t ji = NULL;
1571 job_t jit = NULL;
1572 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1573 job_remove(ji);
1574 }
1575
1576 job_log(j, LOG_DEBUG, "Removed");
1577
1578 j->kqjob_callback = (kq_callback)0x8badf00d;
1579 free(j);
1580 }
1581
1582 void
1583 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1584 {
1585 launch_data_t tmp_oai;
1586 job_t j = context;
1587 size_t i, fd_cnt = 1;
1588 int *fds;
1589
1590 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1591 fd_cnt = launch_data_array_get_count(obj);
1592 }
1593
1594 fds = alloca(fd_cnt * sizeof(int));
1595
1596 for (i = 0; i < fd_cnt; i++) {
1597 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1598 tmp_oai = launch_data_array_get_index(obj, i);
1599 } else {
1600 tmp_oai = obj;
1601 }
1602
1603 fds[i] = launch_data_get_fd(tmp_oai);
1604 }
1605
1606 socketgroup_new(j, key, fds, fd_cnt);
1607
1608 ipc_revoke_fds(obj);
1609 }
1610
1611 bool
1612 job_set_global_on_demand(job_t j, bool val)
1613 {
1614 if (j->forced_peers_to_demand_mode && val) {
1615 return false;
1616 } else if (!j->forced_peers_to_demand_mode && !val) {
1617 return false;
1618 }
1619
1620 if ((j->forced_peers_to_demand_mode = val)) {
1621 j->mgr->global_on_demand_cnt++;
1622 } else {
1623 j->mgr->global_on_demand_cnt--;
1624 }
1625
1626 if (j->mgr->global_on_demand_cnt == 0) {
1627 jobmgr_dispatch_all(j->mgr, false);
1628 }
1629
1630 return true;
1631 }
1632
1633 bool
1634 job_setup_machport(job_t j)
1635 {
1636 if (job_assumes_zero(j, launchd_mport_create_recv(&j->j_port)) != KERN_SUCCESS) {
1637 goto out_bad;
1638 }
1639
1640 if (job_assumes_zero(j, runtime_add_mport(j->j_port, job_server)) != KERN_SUCCESS) {
1641 goto out_bad2;
1642 }
1643
1644 if (job_assumes_zero(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS)) != KERN_SUCCESS) {
1645 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1646 goto out_bad;
1647 }
1648
1649 return true;
1650 out_bad2:
1651 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1652 out_bad:
1653 return false;
1654 }
1655
1656 kern_return_t
1657 job_setup_exit_port(job_t j)
1658 {
1659 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1660 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1661 return MACH_PORT_NULL;
1662 }
1663
1664 struct mach_port_limits limits = {
1665 .mpl_qlimit = 1,
1666 };
1667 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1668 (void)job_assumes_zero(j, kr);
1669
1670 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1671 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1672 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
1673 j->exit_status_port = MACH_PORT_NULL;
1674 }
1675
1676 return kr;
1677 }
1678
1679 job_t
1680 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1681 {
1682 const char **argv = (const char **)mach_cmd2argv(cmd);
1683 job_t jr = NULL;
1684
1685 if (!argv) {
1686 goto out_bad;
1687 }
1688
1689 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1690 free(argv);
1691
1692 // Job creation can be denied during shutdown.
1693 if (unlikely(jr == NULL)) {
1694 goto out_bad;
1695 }
1696
1697 jr->mach_uid = uid;
1698 jr->ondemand = ond;
1699 jr->legacy_mach_job = true;
1700 jr->abandon_pg = true;
1701 jr->priv_port_has_senders = true; // the IPC that called us will make-send on this port
1702
1703 if (!job_setup_machport(jr)) {
1704 goto out_bad;
1705 }
1706
1707 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1708
1709 return jr;
1710
1711 out_bad:
1712 if (jr) {
1713 job_remove(jr);
1714 }
1715 return NULL;
1716 }
1717
1718 job_t
1719 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1720 {
1721 struct proc_bsdshortinfo proc;
1722 bool shutdown_state;
1723 job_t jp = NULL, jr = NULL;
1724 uid_t kp_euid, kp_uid, kp_svuid;
1725 gid_t kp_egid, kp_gid, kp_svgid;
1726
1727 if (anonpid == 0) {
1728 errno = EINVAL;
1729 return NULL;
1730 }
1731
1732 if (anonpid >= 100000) {
1733 /* The kernel current defines PID_MAX to be 99999, but that define isn't
1734 * exported.
1735 */
1736 launchd_syslog(LOG_WARNING, "Did PID_MAX change? Got request from PID: %d", anonpid);
1737 errno = EINVAL;
1738 return NULL;
1739 }
1740
1741 /* libproc returns the number of bytes written into the buffer upon success,
1742 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
1743 */
1744 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1745 if (errno != ESRCH) {
1746 (void)jobmgr_assumes_zero(jm, errno);
1747 }
1748 return NULL;
1749 }
1750
1751 if (proc.pbsi_comm[0] == '\0') {
1752 launchd_syslog(LOG_WARNING, "Blank command for PID: %d", anonpid);
1753 errno = EINVAL;
1754 return NULL;
1755 }
1756
1757 if (unlikely(proc.pbsi_status == SZOMB)) {
1758 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1759 }
1760
1761 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1762 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1763 }
1764
1765 kp_euid = proc.pbsi_uid;
1766 kp_uid = proc.pbsi_ruid;
1767 kp_svuid = proc.pbsi_svuid;
1768 kp_egid = proc.pbsi_gid;
1769 kp_gid = proc.pbsi_rgid;
1770 kp_svgid = proc.pbsi_svgid;
1771
1772 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1773 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1774 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1775 }
1776
1777 /* "Fix" for when the kernel turns the process tree into a weird, cyclic
1778 * graph.
1779 *
1780 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
1781 * as to why this can happen.
1782 */
1783 if ((pid_t)proc.pbsi_ppid == anonpid) {
1784 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). Ignoring: %s", proc.pbsi_comm);
1785 errno = EINVAL;
1786 return NULL;
1787 }
1788
1789 /* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1790 * jobs can pop up during shutdown and need to talk to us.
1791 */
1792 if (unlikely(shutdown_state = jm->shutting_down)) {
1793 jm->shutting_down = false;
1794 }
1795
1796 // We only set requestor_pid for XPC domains.
1797 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1798 if ((jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL))) {
1799 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1800
1801 total_anon_children++;
1802 jr->anonymous = true;
1803 jr->p = anonpid;
1804
1805 // Anonymous process reaping is messy.
1806 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1807
1808 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1)) {
1809 if (errno != ESRCH) {
1810 (void)job_assumes_zero(jr, errno);
1811 }
1812
1813 // Zombies interact weirdly with kevent(3).
1814 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1815 jr->unload_at_mig_return = true;
1816 }
1817
1818 if (unlikely(shutdown_state)) {
1819 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1820 }
1821
1822 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1823 } else {
1824 (void)os_assumes_zero(errno);
1825 }
1826
1827 // Undo our hack from above.
1828 if (unlikely(shutdown_state)) {
1829 jm->shutting_down = true;
1830 }
1831
1832 /* This is down here to prevent infinite recursion due to a process
1833 * attaching to its parent through ptrace(3) -- causing a cycle in the
1834 * process tree and thereby not making it a tree anymore. We need to make
1835 * sure that the anonymous job has been added to the process list so that
1836 * we'll find the tracing parent PID of the parent process, which is the
1837 * child, when we go looking for it in jobmgr_find_by_pid().
1838 *
1839 * <rdar://problem/7264615>
1840 */
1841 switch (proc.pbsi_ppid) {
1842 case 0:
1843 // The kernel.
1844 break;
1845 case 1:
1846 if (!pid1_magic) {
1847 break;
1848 }
1849 // Fall through.
1850 default:
1851 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1852 if (jobmgr_assumes(jm, jp != NULL)) {
1853 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1854 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1855 }
1856 }
1857 break;
1858 }
1859
1860 return jr;
1861 }
1862
1863 job_t
1864 job_new_subjob(job_t j, uuid_t identifier)
1865 {
1866 char label[0];
1867 uuid_string_t idstr;
1868 uuid_unparse(identifier, idstr);
1869 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1870
1871 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1872 if (nj != NULL) {
1873 nj->kqjob_callback = job_callback;
1874 nj->original = j;
1875 nj->mgr = j->mgr;
1876 nj->min_run_time = j->min_run_time;
1877 nj->timeout = j->timeout;
1878 nj->exit_timeout = j->exit_timeout;
1879
1880 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1881
1882 // Set all our simple Booleans that are applicable.
1883 nj->debug = j->debug;
1884 nj->ondemand = j->ondemand;
1885 nj->checkedin = true;
1886 nj->low_pri_io = j->low_pri_io;
1887 nj->setmask = j->setmask;
1888 nj->wait4debugger = j->wait4debugger;
1889 nj->internal_exc_handler = j->internal_exc_handler;
1890 nj->setnice = j->setnice;
1891 nj->abandon_pg = j->abandon_pg;
1892 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1893 nj->deny_job_creation = j->deny_job_creation;
1894 nj->enable_transactions = j->enable_transactions;
1895 nj->needs_kickoff = j->needs_kickoff;
1896 nj->currently_ignored = true;
1897 nj->dedicated_instance = true;
1898 nj->xpc_service = j->xpc_service;
1899 nj->xpc_bootstrapper = j->xpc_bootstrapper;
1900 nj->jetsam_priority = j->jetsam_priority;
1901 nj->jetsam_memlimit = j->jetsam_memlimit;
1902 nj->psproctype = j->psproctype;
1903
1904 nj->mask = j->mask;
1905 uuid_copy(nj->instance_id, identifier);
1906
1907 // These jobs are purely on-demand Mach jobs.
1908 // {Hard | Soft}ResourceLimits are not supported.
1909 // JetsamPriority is not supported.
1910
1911 if (j->prog) {
1912 nj->prog = strdup(j->prog);
1913 }
1914 if (j->argv) {
1915 size_t sz = malloc_size(j->argv);
1916 nj->argv = (char **)malloc(sz);
1917 if (nj->argv != NULL) {
1918 // This is the start of our strings.
1919 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1920
1921 size_t i = 0;
1922 for (i = 0; i < j->argc; i++) {
1923 (void)strcpy(p, j->argv[i]);
1924 nj->argv[i] = p;
1925 p += (strlen(j->argv[i]) + 1);
1926 }
1927 nj->argv[i] = NULL;
1928 } else {
1929 (void)job_assumes_zero(nj, errno);
1930 }
1931
1932 nj->argc = j->argc;
1933 }
1934
1935 struct machservice *msi = NULL;
1936 SLIST_FOREACH(msi, &j->machservices, sle) {
1937 /* Only copy MachServices that were actually declared in the plist.
1938 * So skip over per-PID ones and ones that were created via
1939 * bootstrap_register().
1940 */
1941 if (msi->upfront) {
1942 mach_port_t mp = MACH_PORT_NULL;
1943 struct machservice *msj = machservice_new(nj, msi->name, &mp, false);
1944 if (msj != NULL) {
1945 msj->reset = msi->reset;
1946 msj->delete_on_destruction = msi->delete_on_destruction;
1947 msj->drain_one_on_crash = msi->drain_one_on_crash;
1948 msj->drain_all_on_crash = msi->drain_all_on_crash;
1949
1950 kern_return_t kr = mach_port_set_attributes(mach_task_self(), msj->port, MACH_PORT_TEMPOWNER, NULL, 0);
1951 (void)job_assumes_zero(j, kr);
1952 } else {
1953 (void)job_assumes_zero(nj, errno);
1954 }
1955 }
1956 }
1957
1958 // We ignore global environment variables.
1959 struct envitem *ei = NULL;
1960 SLIST_FOREACH(ei, &j->env, sle) {
1961 if (envitem_new(nj, ei->key, ei->value, false)) {
1962 (void)job_assumes_zero(nj, errno);
1963 }
1964 }
1965 uuid_string_t val;
1966 uuid_unparse(identifier, val);
1967 if (envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false)) {
1968 (void)job_assumes_zero(nj, errno);
1969 }
1970
1971 if (j->rootdir) {
1972 nj->rootdir = strdup(j->rootdir);
1973 }
1974 if (j->workingdir) {
1975 nj->workingdir = strdup(j->workingdir);
1976 }
1977 if (j->username) {
1978 nj->username = strdup(j->username);
1979 }
1980 if (j->groupname) {
1981 nj->groupname = strdup(j->groupname);
1982 }
1983
1984 /* FIXME: We shouldn't redirect all the output from these jobs to the
1985 * same file. We should uniquify the file names. But this hasn't shown
1986 * to be a problem in practice.
1987 */
1988 if (j->stdinpath) {
1989 nj->stdinpath = strdup(j->stdinpath);
1990 }
1991 if (j->stdoutpath) {
1992 nj->stdoutpath = strdup(j->stdinpath);
1993 }
1994 if (j->stderrpath) {
1995 nj->stderrpath = strdup(j->stderrpath);
1996 }
1997 if (j->alt_exc_handler) {
1998 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1999 }
2000 if (j->cfbundleidentifier) {
2001 nj->cfbundleidentifier = strdup(j->cfbundleidentifier);
2002 }
2003 #if HAVE_SANDBOX
2004 if (j->seatbelt_profile) {
2005 nj->seatbelt_profile = strdup(j->seatbelt_profile);
2006 }
2007 if (j->container_identifier) {
2008 nj->container_identifier = strdup(j->container_identifier);
2009 }
2010 #endif
2011
2012 #if HAVE_QUARANTINE
2013 if (j->quarantine_data) {
2014 nj->quarantine_data = strdup(j->quarantine_data);
2015 }
2016 nj->quarantine_data_sz = j->quarantine_data_sz;
2017 #endif
2018 if (j->j_binpref) {
2019 size_t sz = malloc_size(j->j_binpref);
2020 nj->j_binpref = (cpu_type_t *)malloc(sz);
2021 if (nj->j_binpref) {
2022 memcpy(&nj->j_binpref, &j->j_binpref, sz);
2023 } else {
2024 (void)job_assumes_zero(nj, errno);
2025 }
2026 }
2027
2028 if (j->asport != MACH_PORT_NULL) {
2029 (void)job_assumes_zero(nj, launchd_mport_copy_send(j->asport));
2030 nj->asport = j->asport;
2031 }
2032
2033 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
2034
2035 jobmgr_t where2put = root_jobmgr;
2036 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2037 where2put = j->mgr;
2038 }
2039 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
2040 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
2041 } else {
2042 (void)os_assumes_zero(errno);
2043 }
2044
2045 return nj;
2046 }
2047
2048 job_t
2049 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
2050 {
2051 const char *const *argv_tmp = argv;
2052 char tmp_path[PATH_MAX];
2053 char auto_label[1000];
2054 const char *bn = NULL;
2055 char *co;
2056 size_t minlabel_len;
2057 size_t i, cc = 0;
2058 job_t j;
2059
2060 __OS_COMPILETIME_ASSERT__(offsetof(struct job_s, kqjob_callback) == 0);
2061
2062 if (unlikely(jm->shutting_down)) {
2063 errno = EINVAL;
2064 return NULL;
2065 }
2066
2067 if (unlikely(prog == NULL && argv == NULL)) {
2068 errno = EINVAL;
2069 return NULL;
2070 }
2071
2072 /* I'd really like to redo this someday. Anonymous jobs carry all the
2073 * baggage of managed jobs with them, even though most of it is unused.
2074 * Maybe when we have Objective-C objects in libSystem, there can be a base
2075 * job type that anonymous and managed jobs inherit from...
2076 */
2077 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
2078 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
2079 if (prog) {
2080 bn = prog;
2081 } else {
2082 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
2083 // prog for auto labels is kp.kp_kproc.p_comm.
2084 bn = basename(tmp_path);
2085 }
2086
2087 (void)snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
2088 label = auto_label;
2089 /* This is so we can do gross things later. See NOTE_EXEC for anonymous
2090 * jobs.
2091 */
2092 minlabel_len = strlen(label) + MAXCOMLEN;
2093 } else {
2094 if (label == AUTO_PICK_XPC_LABEL) {
2095 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
2096 } else {
2097 minlabel_len = strlen(label);
2098 }
2099 }
2100
2101 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
2102
2103 if (!j) {
2104 (void)os_assumes_zero(errno);
2105 return NULL;
2106 }
2107
2108 if (unlikely(label == auto_label)) {
2109 (void)snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
2110 } else {
2111 (void)strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
2112 }
2113
2114 j->kqjob_callback = job_callback;
2115 j->mgr = jm;
2116 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
2117 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
2118 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
2119 j->currently_ignored = true;
2120 j->ondemand = true;
2121 j->checkedin = true;
2122 j->jetsam_priority = DEFAULT_JETSAM_PRIORITY;
2123 j->jetsam_memlimit = -1;
2124 uuid_clear(j->expected_audit_uuid);
2125 #if TARGET_OS_EMBEDDED
2126 /* Run embedded daemons as background by default. SpringBoard jobs are
2127 * Interactive by default. Unfortunately, so many daemons have opted into
2128 * this priority band that its usefulness is highly questionable.
2129 *
2130 * See <rdar://problem/9539873>.
2131 *
2132 * Also ensure that daemons have a default memory highwatermark unless
2133 * otherwise specified, as per <rdar://problem/10307814>.
2134 */
2135 if (launchd_embedded_handofgod) {
2136 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2137 j->app = true;
2138 } else {
2139 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2140 j->jetsam_memlimit = DEFAULT_JETSAM_DAEMON_HIGHWATERMARK;
2141 }
2142 #else
2143 /* Jobs on OS X that just come from disk are "standard" by default so that
2144 * third-party daemons/agents don't encounter unexpected throttling.
2145 */
2146 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
2147 #endif
2148
2149 if (prog) {
2150 j->prog = strdup(prog);
2151 if (!j->prog) {
2152 (void)os_assumes_zero(errno);
2153 goto out_bad;
2154 }
2155 }
2156
2157 if (likely(argv)) {
2158 while (*argv_tmp++) {
2159 j->argc++;
2160 }
2161
2162 for (i = 0; i < j->argc; i++) {
2163 cc += strlen(argv[i]) + 1;
2164 }
2165
2166 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
2167 if (!j->argv) {
2168 (void)job_assumes_zero(j, errno);
2169 goto out_bad;
2170 }
2171
2172 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
2173
2174 for (i = 0; i < j->argc; i++) {
2175 j->argv[i] = co;
2176 (void)strcpy(co, argv[i]);
2177 co += strlen(argv[i]) + 1;
2178 }
2179 j->argv[i] = NULL;
2180 }
2181
2182 // Sssshhh... don't tell anyone.
2183 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
2184 j->has_console = true;
2185 }
2186
2187 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2188
2189 jobmgr_t where2put_label = root_jobmgr;
2190 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2191 where2put_label = j->mgr;
2192 }
2193 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
2194 uuid_clear(j->expected_audit_uuid);
2195
2196 job_log(j, LOG_DEBUG, "Conceived");
2197
2198 return j;
2199
2200 out_bad:
2201 if (j->prog) {
2202 free(j->prog);
2203 }
2204 free(j);
2205
2206 return NULL;
2207 }
2208
2209 job_t
2210 job_new_alias(jobmgr_t jm, job_t src)
2211 {
2212 if (job_find(jm, src->label)) {
2213 errno = EEXIST;
2214 return NULL;
2215 }
2216
2217 job_t j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2218 if (!j) {
2219 (void)os_assumes_zero(errno);
2220 return NULL;
2221 }
2222
2223 (void)strcpy((char *)j->label, src->label);
2224 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2225 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2226 /* Bad jump address. The kqueue callback for aliases should never be
2227 * invoked.
2228 */
2229 j->kqjob_callback = (kq_callback)0xfa1afe1;
2230 j->alias = src;
2231 j->mgr = jm;
2232
2233 struct machservice *msi = NULL;
2234 SLIST_FOREACH(msi, &src->machservices, sle) {
2235 if (!machservice_new_alias(j, msi)) {
2236 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2237 errno = EINVAL;
2238 job_remove(j);
2239 j = NULL;
2240 break;
2241 }
2242 }
2243
2244 if (j) {
2245 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2246 }
2247
2248 return j;
2249 }
2250
2251 job_t
2252 job_import(launch_data_t pload)
2253 {
2254 #if TARGET_OS_EMBEDDED
2255 /* If this is the special payload of default values, handle it here */
2256 if (unlikely(launch_data_dict_lookup(pload, LAUNCH_JOBKEY_DEFAULTS))) {
2257 job_import_defaults(pload);
2258 return NULL;
2259 }
2260 #endif
2261
2262 job_t j = jobmgr_import2(root_jobmgr, pload);
2263
2264 if (unlikely(j == NULL)) {
2265 return NULL;
2266 }
2267
2268 /* Since jobs are effectively stalled until they get security sessions
2269 * assigned to them, we may wish to reconsider this behavior of calling the
2270 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2271 * criterion set.
2272 */
2273 job_dispatch_curious_jobs(j);
2274 return job_dispatch(j, false);
2275 }
2276
2277 #if TARGET_OS_EMBEDDED
2278
2279 bool
2280 job_import_defaults(launch_data_t pload)
2281 {
2282 bool result = false;
2283 xpc_object_t xd = NULL, defaults;
2284
2285 if (_launchd_defaults_cache) {
2286 xpc_release(_launchd_defaults_cache);
2287 _launchd_defaults_cache = NULL;
2288 }
2289
2290 xd = ld2xpc(pload);
2291 if (!xd || xpc_get_type(xd) != XPC_TYPE_DICTIONARY) {
2292 goto out;
2293 }
2294
2295 defaults = xpc_dictionary_get_value(xd, LAUNCHD_JOB_DEFAULTS);
2296 if (!defaults || xpc_get_type(defaults) != XPC_TYPE_DICTIONARY) {
2297 goto out;
2298 }
2299
2300 _launchd_defaults_cache = xpc_copy(defaults);
2301 result = true;
2302 out:
2303 if (xd) {
2304 xpc_release(xd);
2305 }
2306
2307 return result;
2308 }
2309
2310 bool
2311 job_apply_defaults(job_t j) {
2312 const char *test_prefix = "com.apple.test.";
2313
2314 char *sb_prefix_end, *sb_suffix_start;
2315 char true_job_label[strlen(j->label)];
2316 const char *label;
2317
2318 if (((sb_prefix_end = strchr(j->label, ':')) != NULL) &&
2319 ((sb_suffix_start = strchr(sb_prefix_end + 1, '[')) != NULL)) {
2320 /*
2321 * Workaround 'UIKitApplication:com.apple.foo[bar]' convention for the processes
2322 * we're interested in. To be removed when <rdar://problem/13066361> is addressed.
2323 */
2324 snprintf(true_job_label, sb_suffix_start - sb_prefix_end, "%s", sb_prefix_end + 1);
2325 label = true_job_label;
2326 } else {
2327 /* Just test the standard label */
2328 label = j->label;
2329 }
2330
2331 /* Test for cache presence and apply if found */
2332 if (_launchd_defaults_cache) {
2333 xpc_object_t props = xpc_dictionary_get_value(_launchd_defaults_cache, label);
2334 if (props && xpc_get_type(props) == XPC_TYPE_DICTIONARY) {
2335 launch_data_t lv = xpc2ld(props);
2336 launch_data_dict_iterate(lv, job_import_keys, j);
2337 launch_data_free(lv);
2338 return true;
2339 }
2340 }
2341
2342 /* Limit free? Disable the memory limit if this is a test job; see <rdar://problem/13180697> */
2343 if (!strncmp(label, test_prefix, strlen(test_prefix))) {
2344 j->jetsam_memlimit = -1;
2345 return true;
2346 }
2347
2348 return false;
2349 }
2350
2351 #endif
2352
2353 launch_data_t
2354 job_import_bulk(launch_data_t pload)
2355 {
2356 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2357 job_t *ja;
2358 size_t i, c = launch_data_array_get_count(pload);
2359
2360 ja = alloca(c * sizeof(job_t));
2361
2362 for (i = 0; i < c; i++) {
2363 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2364 errno = 0;
2365 }
2366 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2367 }
2368
2369 for (i = 0; i < c; i++) {
2370 if (likely(ja[i])) {
2371 job_dispatch_curious_jobs(ja[i]);
2372 job_dispatch(ja[i], false);
2373 }
2374 }
2375
2376 return resp;
2377 }
2378
2379 void
2380 job_import_bool(job_t j, const char *key, bool value)
2381 {
2382 bool found_key = false;
2383
2384 switch (key[0]) {
2385 case 'a':
2386 case 'A':
2387 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2388 j->abandon_pg = value;
2389 found_key = true;
2390 }
2391 break;
2392 case 'b':
2393 case 'B':
2394 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2395 j->dirty_at_shutdown = value;
2396 found_key = true;
2397 }
2398 break;
2399 case 'j':
2400 case 'J':
2401 if (strcasecmp(key, LAUNCH_JOBKEY_JOINGUISESSION) == 0) {
2402 j->joins_gui_session = value;
2403 found_key = true;
2404 }
2405 break;
2406 case 'k':
2407 case 'K':
2408 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2409 j->ondemand = !value;
2410 found_key = true;
2411 }
2412 break;
2413 case 'o':
2414 case 'O':
2415 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2416 j->ondemand = value;
2417 found_key = true;
2418 }
2419 break;
2420 case 'd':
2421 case 'D':
2422 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2423 j->debug = value;
2424 found_key = true;
2425 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2426 (void)job_assumes(j, !value);
2427 found_key = true;
2428 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2429 j->disable_aslr = value;
2430 found_key = true;
2431 }
2432 break;
2433 case 'h':
2434 case 'H':
2435 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2436 job_log(j, LOG_PERF, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2437 j->dirty_at_shutdown = value;
2438 found_key = true;
2439 }
2440 break;
2441 case 's':
2442 case 'S':
2443 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2444 j->session_create = value;
2445 found_key = true;
2446 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2447 j->start_on_mount = value;
2448 found_key = true;
2449 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2450 // this only does something on Mac OS X 10.4 "Tiger"
2451 found_key = true;
2452 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2453 if (_launchd_shutdown_monitor) {
2454 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2455 } else {
2456 j->shutdown_monitor = true;
2457 _launchd_shutdown_monitor = j;
2458 }
2459 found_key = true;
2460 }
2461 break;
2462 case 'l':
2463 case 'L':
2464 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2465 j->low_pri_io = value;
2466 found_key = true;
2467 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2468 j->only_once = value;
2469 found_key = true;
2470 } else if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYBACKGROUNDIO) == 0) {
2471 j->low_priority_background_io = true;
2472 found_key = true;
2473 } else if (strcasecmp(key, LAUNCH_JOBKEY_LEGACYTIMERS) == 0) {
2474 #if !TARGET_OS_EMBEDDED
2475 j->legacy_timers = value;
2476 #else // !TARGET_OS_EMBEDDED
2477 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2478 #endif // !TARGET_OS_EMBEDDED
2479 found_key = true;
2480 }
2481 break;
2482 case 'm':
2483 case 'M':
2484 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2485 j->internal_exc_handler = value;
2486 found_key = true;
2487 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2488 j->multiple_instances = value;
2489 found_key = true;
2490 }
2491 break;
2492 case 'i':
2493 case 'I':
2494 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2495 if (getuid() != 0) {
2496 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2497 return;
2498 }
2499 j->no_init_groups = !value;
2500 found_key = true;
2501 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2502 j->ignore_pg_at_shutdown = value;
2503 found_key = true;
2504 }
2505 break;
2506 case 'r':
2507 case 'R':
2508 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2509 if (value) {
2510 // We don't want value == false to change j->start_pending
2511 j->start_pending = true;
2512 }
2513 found_key = true;
2514 }
2515 break;
2516 case 'e':
2517 case 'E':
2518 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2519 j->globargv = value;
2520 found_key = true;
2521 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2522 j->enable_transactions = value;
2523 found_key = true;
2524 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2525 j->debug_before_kill = value;
2526 found_key = true;
2527 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2528 #if TARGET_OS_EMBEDDED
2529 if (!_launchd_embedded_god) {
2530 if ((j->embedded_god = value)) {
2531 _launchd_embedded_god = j;
2532 }
2533 } else {
2534 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2535 }
2536 #else
2537 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2538 #endif
2539 found_key = true;
2540 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDHOMESCREEN) == 0) {
2541 #if TARGET_OS_EMBEDDED
2542 if (!_launchd_embedded_home) {
2543 if ((j->embedded_home = value)) {
2544 _launchd_embedded_home = j;
2545 }
2546 } else {
2547 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2548 }
2549 #else
2550 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2551 #endif
2552 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2553 if (!_launchd_event_monitor) {
2554 j->event_monitor = value;
2555 if (value) {
2556 _launchd_event_monitor = j;
2557 }
2558 } else {
2559 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor->label);
2560 }
2561 found_key = true;
2562 }
2563 break;
2564 case 'w':
2565 case 'W':
2566 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2567 j->wait4debugger = value;
2568 found_key = true;
2569 }
2570 break;
2571 case 'x':
2572 case 'X':
2573 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2574 if (pid1_magic) {
2575 if (_launchd_xpc_bootstrapper) {
2576 job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper->label);
2577 } else {
2578 _launchd_xpc_bootstrapper = j;
2579 j->xpc_bootstrapper = value;
2580 }
2581 } else {
2582 job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2583 }
2584 }
2585 found_key = true;
2586 break;
2587 default:
2588 break;
2589 }
2590
2591 if (unlikely(!found_key)) {
2592 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2593 }
2594 }
2595
2596 void
2597 job_import_string(job_t j, const char *key, const char *value)
2598 {
2599 char **where2put = NULL;
2600
2601 switch (key[0]) {
2602 case 'c':
2603 case 'C':
2604 if (strcasecmp(key, LAUNCH_JOBKEY_CFBUNDLEIDENTIFIER) == 0) {
2605 where2put = &j->cfbundleidentifier;
2606 }
2607 break;
2608 case 'm':
2609 case 'M':
2610 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2611 where2put = &j->alt_exc_handler;
2612 }
2613 break;
2614 case 'p':
2615 case 'P':
2616 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2617 return;
2618 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0
2619 || strcasecmp(key, LAUNCH_JOBKEY_PROCESSTYPE) == 0) {
2620 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE) == 0) {
2621 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
2622 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_ADAPTIVE) == 0) {
2623 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE;
2624 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_STANDARD) == 0) {
2625 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
2626 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND) == 0) {
2627 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2628 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2629 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_TAL;
2630 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_SYSTEMAPP) == 0) {
2631 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2632 j->system_app = true;
2633 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_APP) == 0) {
2634 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2635 j->app = true;
2636 } else {
2637 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2638 }
2639 return;
2640 }
2641 break;
2642 case 'l':
2643 case 'L':
2644 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2645 return;
2646 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2647 return;
2648 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2649 return;
2650 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2651 return;
2652 }
2653 break;
2654 case 'r':
2655 case 'R':
2656 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2657 if (getuid() != 0) {
2658 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2659 return;
2660 }
2661 where2put = &j->rootdir;
2662 }
2663 break;
2664 case 'w':
2665 case 'W':
2666 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2667 where2put = &j->workingdir;
2668 }
2669 break;
2670 case 'u':
2671 case 'U':
2672 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2673 if (getuid() != 0) {
2674 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2675 return;
2676 } else if (strcmp(value, "root") == 0) {
2677 return;
2678 }
2679 where2put = &j->username;
2680 }
2681 break;
2682 case 'g':
2683 case 'G':
2684 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2685 if (getuid() != 0) {
2686 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2687 return;
2688 } else if (strcmp(value, "wheel") == 0) {
2689 return;
2690 }
2691 where2put = &j->groupname;
2692 }
2693 break;
2694 case 's':
2695 case 'S':
2696 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2697 where2put = &j->stdoutpath;
2698 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2699 where2put = &j->stderrpath;
2700 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2701 where2put = &j->stdinpath;
2702 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2703 if (job_assumes_zero_p(j, j->stdin_fd) != -1) {
2704 // open() should not block, but regular IO by the job should
2705 (void)job_assumes_zero_p(j, fcntl(j->stdin_fd, F_SETFL, 0));
2706 // XXX -- EV_CLEAR should make named pipes happy?
2707 (void)job_assumes_zero_p(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j));
2708 } else {
2709 j->stdin_fd = 0;
2710 }
2711 #if HAVE_SANDBOX
2712 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2713 where2put = &j->seatbelt_profile;
2714 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXCONTAINER) == 0) {
2715 where2put = &j->container_identifier;
2716 #endif
2717 }
2718 break;
2719 case 'X':
2720 case 'x':
2721 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2722 return;
2723 }
2724 break;
2725 default:
2726 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2727 break;
2728 }
2729
2730 if (likely(where2put)) {
2731 if (!(*where2put = strdup(value))) {
2732 (void)job_assumes_zero(j, errno);
2733 }
2734 } else {
2735 // See rdar://problem/5496612. These two are okay.
2736 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2737 || strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2738 job_log(j, LOG_APPLEONLY, "This key is no longer relevant and should be removed: %s", key);
2739 } else {
2740 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2741 }
2742 }
2743 }
2744
2745 void
2746 job_import_integer(job_t j, const char *key, long long value)
2747 {
2748 switch (key[0]) {
2749 case 'a':
2750 case 'A':
2751 #if TARGET_OS_EMBEDDED
2752 if (strcasecmp(key, LAUNCH_JOBKEY_ASID) == 0) {
2753 if (launchd_embedded_handofgod) {
2754 if (audit_session_port((au_asid_t)value, &j->asport) == -1 && errno != ENOSYS) {
2755 (void)job_assumes_zero(j, errno);
2756 }
2757 }
2758 }
2759 #endif
2760 case 'e':
2761 case 'E':
2762 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2763 if (unlikely(value < 0)) {
2764 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2765 } else if (unlikely(value > UINT32_MAX)) {
2766 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2767 } else {
2768 j->exit_timeout = (typeof(j->exit_timeout)) value;
2769 }
2770 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2771 j->main_thread_priority = value;
2772 }
2773 break;
2774 case 'j':
2775 case 'J':
2776 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2777 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2778
2779 launch_data_t pri = launch_data_new_integer(value);
2780 if (job_assumes(j, pri != NULL)) {
2781 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2782 launch_data_free(pri);
2783 }
2784 }
2785 case 'n':
2786 case 'N':
2787 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2788 if (unlikely(value < PRIO_MIN)) {
2789 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2790 } else if (unlikely(value > PRIO_MAX)) {
2791 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2792 } else {
2793 j->nice = (typeof(j->nice)) value;
2794 j->setnice = true;
2795 }
2796 }
2797 break;
2798 case 't':
2799 case 'T':
2800 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2801 if (unlikely(value < 0)) {
2802 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2803 } else if (unlikely(value > UINT32_MAX)) {
2804 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2805 } else {
2806 j->timeout = (typeof(j->timeout)) value;
2807 }
2808 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2809 if (value < 0) {
2810 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2811 } else if (value > UINT32_MAX) {
2812 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2813 } else {
2814 j->min_run_time = (typeof(j->min_run_time)) value;
2815 }
2816 }
2817 break;
2818 case 'u':
2819 case 'U':
2820 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2821 j->mask = value;
2822 j->setmask = true;
2823 }
2824 break;
2825 case 's':
2826 case 'S':
2827 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2828 if (unlikely(value <= 0)) {
2829 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2830 } else if (unlikely(value > UINT32_MAX)) {
2831 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2832 } else {
2833 runtime_add_weak_ref();
2834 j->start_interval = (typeof(j->start_interval)) value;
2835
2836 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
2837 }
2838 #if HAVE_SANDBOX
2839 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2840 j->seatbelt_flags = value;
2841 #endif
2842 }
2843
2844 break;
2845 default:
2846 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2847 break;
2848 }
2849 }
2850
2851 void
2852 job_import_opaque(job_t j __attribute__((unused)), const char *key, launch_data_t value __attribute__((unused)))
2853 {
2854 switch (key[0]) {
2855 case 'q':
2856 case 'Q':
2857 #if HAVE_QUARANTINE
2858 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2859 size_t tmpsz = launch_data_get_opaque_size(value);
2860
2861 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2862 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2863 j->quarantine_data_sz = tmpsz;
2864 }
2865 }
2866 #endif
2867 case 's':
2868 case 'S':
2869 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2870 size_t tmpsz = launch_data_get_opaque_size(value);
2871 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2872 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2873 }
2874 }
2875 break;
2876 default:
2877 break;
2878 }
2879 }
2880
2881 static void
2882 policy_setup(launch_data_t obj, const char *key, void *context)
2883 {
2884 job_t j = context;
2885 bool found_key = false;
2886
2887 switch (key[0]) {
2888 case 'd':
2889 case 'D':
2890 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2891 j->deny_job_creation = launch_data_get_bool(obj);
2892 found_key = true;
2893 }
2894 break;
2895 default:
2896 break;
2897 }
2898
2899 if (unlikely(!found_key)) {
2900 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2901 }
2902 }
2903
2904 void
2905 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2906 {
2907 launch_data_t tmp;
2908
2909 switch (key[0]) {
2910 case 'p':
2911 case 'P':
2912 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2913 launch_data_dict_iterate(value, policy_setup, j);
2914 }
2915 break;
2916 case 'k':
2917 case 'K':
2918 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2919 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2920 }
2921 break;
2922 case 'i':
2923 case 'I':
2924 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2925 j->inetcompat = true;
2926 j->abandon_pg = true;
2927 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2928 j->inetcompat_wait = launch_data_get_bool(tmp);
2929 }
2930 }
2931 break;
2932 case 'j':
2933 case 'J':
2934 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2935 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2936 }
2937 case 'e':
2938 case 'E':
2939 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2940 launch_data_dict_iterate(value, envitem_setup, j);
2941 }
2942 break;
2943 case 'u':
2944 case 'U':
2945 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2946 j->importing_global_env = true;
2947 launch_data_dict_iterate(value, envitem_setup, j);
2948 j->importing_global_env = false;
2949 }
2950 break;
2951 case 's':
2952 case 'S':
2953 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2954 launch_data_dict_iterate(value, socketgroup_setup, j);
2955 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2956 calendarinterval_new_from_obj(j, value);
2957 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2958 launch_data_dict_iterate(value, limititem_setup, j);
2959 #if HAVE_SANDBOX
2960 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2961 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2962 #endif
2963 }
2964 break;
2965 case 'h':
2966 case 'H':
2967 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2968 j->importing_hard_limits = true;
2969 launch_data_dict_iterate(value, limititem_setup, j);
2970 j->importing_hard_limits = false;
2971 }
2972 break;
2973 case 'm':
2974 case 'M':
2975 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2976 launch_data_dict_iterate(value, machservice_setup, j);
2977 }
2978 break;
2979 case 'l':
2980 case 'L':
2981 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2982 launch_data_dict_iterate(value, eventsystem_setup, j);
2983 } else {
2984 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2985 return;
2986 }
2987 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2988 return;
2989 }
2990 }
2991 break;
2992 default:
2993 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2994 break;
2995 }
2996 }
2997
2998 void
2999 job_import_array(job_t j, const char *key, launch_data_t value)
3000 {
3001 size_t i, value_cnt = launch_data_array_get_count(value);
3002
3003 switch (key[0]) {
3004 case 'p':
3005 case 'P':
3006 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
3007 return;
3008 }
3009 break;
3010 case 'l':
3011 case 'L':
3012 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
3013 return;
3014 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
3015 return;
3016 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
3017 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
3018 return;
3019 }
3020 break;
3021 case 'b':
3022 case 'B':
3023 if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
3024 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
3025 j->j_binpref_cnt = value_cnt;
3026 for (i = 0; i < value_cnt; i++) {
3027 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
3028 }
3029 }
3030 }
3031 break;
3032 case 's':
3033 case 'S':
3034 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
3035 for (i = 0; i < value_cnt; i++) {
3036 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
3037 }
3038 }
3039 break;
3040 default:
3041 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
3042 break;
3043 }
3044 }
3045
3046 void
3047 job_import_keys(launch_data_t obj, const char *key, void *context)
3048 {
3049 job_t j = context;
3050 launch_data_type_t kind;
3051
3052 if (!obj) {
3053 launchd_syslog(LOG_ERR, "NULL object given to job_import_keys().");
3054 return;
3055 }
3056
3057 kind = launch_data_get_type(obj);
3058
3059 switch (kind) {
3060 case LAUNCH_DATA_BOOL:
3061 job_import_bool(j, key, launch_data_get_bool(obj));
3062 break;
3063 case LAUNCH_DATA_STRING:
3064 job_import_string(j, key, launch_data_get_string(obj));
3065 break;
3066 case LAUNCH_DATA_INTEGER:
3067 job_import_integer(j, key, launch_data_get_integer(obj));
3068 break;
3069 case LAUNCH_DATA_DICTIONARY:
3070 job_import_dictionary(j, key, obj);
3071 break;
3072 case LAUNCH_DATA_ARRAY:
3073 job_import_array(j, key, obj);
3074 break;
3075 case LAUNCH_DATA_OPAQUE:
3076 job_import_opaque(j, key, obj);
3077 break;
3078 default:
3079 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
3080 break;
3081 }
3082 }
3083
3084 job_t
3085 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
3086 {
3087 launch_data_t tmp, ldpa;
3088 const char *label = NULL, *prog = NULL;
3089 const char **argv = NULL;
3090 job_t j;
3091
3092 if (!jobmgr_assumes(jm, pload != NULL)) {
3093 errno = EINVAL;
3094 return NULL;
3095 }
3096
3097 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
3098 errno = EINVAL;
3099 return NULL;
3100 }
3101
3102 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
3103 errno = EINVAL;
3104 return NULL;
3105 }
3106
3107 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
3108 errno = EINVAL;
3109 return NULL;
3110 }
3111
3112 if (unlikely(!(label = launch_data_get_string(tmp)))) {
3113 errno = EINVAL;
3114 return NULL;
3115 }
3116
3117 #if TARGET_OS_EMBEDDED
3118 if (unlikely(launchd_embedded_handofgod && _launchd_embedded_god)) {
3119 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
3120 errno = EPERM;
3121 return NULL;
3122 }
3123
3124 const char *username = NULL;
3125 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
3126 username = launch_data_get_string(tmp);
3127 } else {
3128 errno = EPERM;
3129 return NULL;
3130 }
3131
3132 if (!jobmgr_assumes(jm, _launchd_embedded_god->username != NULL && username != NULL)) {
3133 errno = EPERM;
3134 return NULL;
3135 }
3136
3137 if (unlikely(strcmp(_launchd_embedded_god->username, username) != 0)) {
3138 errno = EPERM;
3139 return NULL;
3140 }
3141 } else if (launchd_embedded_handofgod) {
3142 errno = EINVAL;
3143 return NULL;
3144 }
3145 #endif
3146
3147 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM))
3148 && (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
3149 prog = launch_data_get_string(tmp);
3150 }
3151
3152 int argc = 0;
3153 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
3154 size_t i, c;
3155
3156 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
3157 errno = EINVAL;
3158 return NULL;
3159 }
3160
3161 c = launch_data_array_get_count(ldpa);
3162
3163 argv = alloca((c + 1) * sizeof(char *));
3164
3165 for (i = 0; i < c; i++) {
3166 tmp = launch_data_array_get_index(ldpa, i);
3167
3168 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
3169 errno = EINVAL;
3170 return NULL;
3171 }
3172
3173 argv[i] = launch_data_get_string(tmp);
3174 }
3175
3176 argv[i] = NULL;
3177 argc = i;
3178 }
3179
3180 if (!prog && argc == 0) {
3181 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
3182 errno = EINVAL;
3183 return NULL;
3184 }
3185
3186 /* Find the requested session. You cannot load services into XPC domains in
3187 * this manner.
3188 */
3189 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
3190 if (session) {
3191 jobmgr_t jmt = NULL;
3192 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
3193 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
3194 if (!jmt) {
3195 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
3196 } else {
3197 jm = jmt;
3198 }
3199 } else {
3200 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
3201 }
3202
3203 if (!jmt) {
3204 errno = EINVAL;
3205 return NULL;
3206 }
3207 }
3208
3209 /* For legacy reasons, we have a global hash of all labels in all job
3210 * managers. So rather than make it a global, we store it in the root job
3211 * manager. But for an XPC domain, we store a local hash of all services in
3212 * the domain.
3213 */
3214 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
3215 if (unlikely((j = job_find(where2look, label)) != NULL)) {
3216 if (jm->xpc_singleton) {
3217 /* There can (and probably will be) multiple attemtps to import the
3218 * same XPC service from the same framework. This is okay. It's
3219 * treated as a singleton, so just return the existing one so that
3220 * it may be aliased into the requesting process' XPC domain.
3221 */
3222 errno = EEXIST;
3223 return j;
3224 } else {
3225 /* If we're not a global XPC domain, then it's an error to try
3226 * importing the same job/service multiple times.
3227 */
3228 errno = EEXIST;
3229 return NULL;
3230 }
3231 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
3232 errno = EINVAL;
3233 return NULL;
3234 }
3235 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
3236
3237 if (likely(j = job_new(jm, label, prog, argv))) {
3238 #if TARGET_OS_EMBEDDED
3239 job_apply_defaults(j);
3240 #endif
3241 launch_data_dict_iterate(pload, job_import_keys, j);
3242 if (!uuid_is_null(j->expected_audit_uuid)) {
3243 uuid_string_t uuid_str;
3244 uuid_unparse(j->expected_audit_uuid, uuid_str);
3245 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
3246 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
3247 errno = ENEEDAUTH;
3248 } else {
3249 job_log(j, LOG_DEBUG, "No security session specified.");
3250 j->asport = MACH_PORT_NULL;
3251 }
3252
3253 if (pid1_magic && !jm->parentmgr) {
3254 /* Workaround reentrancy in CF. We don't make this a global variable
3255 * because we don't want per-user launchd's to inherit it. So we
3256 * just set it for every job that we import into the System session.
3257 *
3258 * See <rdar://problem/9468837>.
3259 */
3260 envitem_new(j, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
3261 }
3262
3263 if (j->event_monitor) {
3264 eventsystem_ping();
3265 }
3266
3267 #if TARGET_OS_EMBEDDED
3268 /* SpringBoard and backboardd must run at elevated priority.
3269 *
3270 * See <rdar://problem/9539873> and <rdar://problem/10984383>.
3271 */
3272 if (j->embedded_god || j->embedded_home) {
3273 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
3274 }
3275 #endif
3276 }
3277
3278 return j;
3279 }
3280
3281 bool
3282 jobmgr_label_test(jobmgr_t jm, const char *str)
3283 {
3284 const char *ptr;
3285
3286 if (str[0] == '\0') {
3287 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
3288 return false;
3289 }
3290
3291 for (ptr = str; *ptr; ptr++) {
3292 if (iscntrl(*ptr)) {
3293 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
3294 return false;
3295 }
3296 }
3297
3298 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3299 || (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
3300 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
3301 return false;
3302 }
3303
3304 return true;
3305 }
3306
3307 job_t
3308 job_find(jobmgr_t jm, const char *label)
3309 {
3310 job_t ji;
3311
3312 if (!jm) {
3313 jm = root_jobmgr;
3314 }
3315
3316 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
3317 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
3318 // 5351245 and 5488633 respectively
3319 continue;
3320 }
3321
3322 if (strcmp(ji->label, label) == 0) {
3323 return ji;
3324 }
3325 }
3326
3327 errno = ESRCH;
3328 return NULL;
3329 }
3330
3331 // Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
3332 job_t
3333 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
3334 {
3335 job_t ji = NULL;
3336 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3337 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay))) {
3338 return ji;
3339 }
3340 }
3341
3342 jobmgr_t jmi = NULL;
3343 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3344 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3345 break;
3346 }
3347 }
3348
3349 return ji;
3350 }
3351
3352 job_t
3353 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3354 {
3355 job_t ji;
3356
3357 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3358 if (ji->p == p) {
3359 return ji;
3360 }
3361 }
3362
3363 return create_anon ? job_new_anonymous(jm, p) : NULL;
3364 }
3365
3366 job_t
3367 managed_job(pid_t p)
3368 {
3369 job_t ji;
3370
3371 LIST_FOREACH(ji, &managed_actives[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3372 if (ji->p == p) {
3373 return ji;
3374 }
3375 }
3376
3377 return NULL;
3378 }
3379
3380 job_t
3381 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3382 {
3383 jobmgr_t jmi;
3384 job_t ji;
3385
3386 if (jm->jm_port == mport) {
3387 return jobmgr_find_by_pid(jm, upid, true);
3388 }
3389
3390 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3391 job_t jr;
3392
3393 if ((jr = job_mig_intran2(jmi, mport, upid))) {
3394 return jr;
3395 }
3396 }
3397
3398 LIST_FOREACH(ji, &jm->jobs, sle) {
3399 if (ji->j_port == mport) {
3400 return ji;
3401 }
3402 }
3403
3404 return NULL;
3405 }
3406
3407 job_t
3408 job_mig_intran(mach_port_t p)
3409 {
3410 struct ldcred *ldc = runtime_get_caller_creds();
3411 job_t jr;
3412
3413 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3414
3415 if (!jr) {
3416 struct proc_bsdshortinfo proc;
3417 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3418 if (errno != ESRCH) {
3419 (void)jobmgr_assumes_zero(root_jobmgr, errno);
3420 } else {
3421 jobmgr_log(root_jobmgr, LOG_ERR, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc.pbsi_comm, ldc->pid, ldc->uid, ldc->euid);
3422 }
3423 }
3424 }
3425
3426 return jr;
3427 }
3428
3429 job_t
3430 job_find_by_service_port(mach_port_t p)
3431 {
3432 struct machservice *ms;
3433
3434 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3435 if (ms->recv && (ms->port == p)) {
3436 return ms->job;
3437 }
3438 }
3439
3440 return NULL;
3441 }
3442
3443 void
3444 job_mig_destructor(job_t j)
3445 {
3446 /* The job can go invalid before this point.
3447 *
3448 * <rdar://problem/5477111>
3449 */
3450 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3451 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3452 job_remove(j);
3453 }
3454
3455 workaround_5477111 = NULL;
3456
3457 calendarinterval_sanity_check();
3458 }
3459
3460 void
3461 job_export_all2(jobmgr_t jm, launch_data_t where)
3462 {
3463 jobmgr_t jmi;
3464 job_t ji;
3465
3466 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3467 job_export_all2(jmi, where);
3468 }
3469
3470 LIST_FOREACH(ji, &jm->jobs, sle) {
3471 launch_data_t tmp;
3472
3473 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3474 launch_data_dict_insert(where, tmp, ji->label);
3475 }
3476 }
3477 }
3478
3479 launch_data_t
3480 job_export_all(void)
3481 {
3482 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3483
3484 if (resp != NULL) {
3485 job_export_all2(root_jobmgr, resp);
3486 } else {
3487 (void)os_assumes_zero(errno);
3488 }
3489
3490 return resp;
3491 }
3492
3493 void
3494 job_log_stray_pg(job_t j)
3495 {
3496 pid_t *pids = NULL;
3497 size_t len = sizeof(pid_t) * get_kern_max_proc();
3498 int i = 0, kp_cnt = 0;
3499
3500 if (!launchd_apple_internal) {
3501 return;
3502 }
3503
3504 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3505
3506 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3507 return;
3508 }
3509 if (job_assumes_zero_p(j, (kp_cnt = proc_listpgrppids(j->p, pids, len))) == -1) {
3510 goto out;
3511 }
3512
3513 for (i = 0; i < kp_cnt; i++) {
3514 pid_t p_i = pids[i];
3515 if (p_i == j->p) {
3516 continue;
3517 } else if (p_i == 0 || p_i == 1) {
3518 continue;
3519 }
3520
3521 struct proc_bsdshortinfo proc;
3522 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3523 if (errno != ESRCH) {
3524 (void)job_assumes_zero(j, errno);
3525 }
3526 continue;
3527 }
3528
3529 pid_t pp_i = proc.pbsi_ppid;
3530 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3531 const char *n = proc.pbsi_comm;
3532
3533 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3534 }
3535
3536 out:
3537 free(pids);
3538 }
3539
3540 #if HAVE_SYSTEMSTATS
3541 static void
3542 systemstats_timer_callback(void)
3543 {
3544 jobmgr_log_perf_statistics(root_jobmgr, true);
3545 }
3546
3547 static bool
3548 systemstats_is_enabled(void)
3549 {
3550 static bool systemstats_enabled;
3551
3552 if (!systemstats_enabled) {
3553 char *store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_LOGS, NULL);
3554 systemstats_enabled = systemstats_init(SYSTEMSTATS_WRITER_launchd, store);
3555 free(store);
3556
3557 uint64_t interval;
3558 interval = systemstats_get_log_interval(SYSTEMSTATS_WRITER_launchd);
3559
3560 if (pid1_magic && systemstats_enabled && interval) {
3561 jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)systemstats_timer_callback, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, interval, root_jobmgr));
3562 }
3563 }
3564
3565 return systemstats_enabled;
3566 }
3567 #endif // HAVE_SYSTEMSTATS
3568
3569 void
3570 job_reap(job_t j)
3571 {
3572 bool is_system_bootstrapper = ((j->is_bootstrapper && pid1_magic) && !j->mgr->parentmgr);
3573
3574 job_log(j, LOG_DEBUG, "Reaping");
3575
3576 if (unlikely(j->weird_bootstrap)) {
3577 int64_t junk = 0;
3578 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3579 }
3580
3581 if (j->fork_fd) {
3582 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
3583 j->fork_fd = 0;
3584 }
3585
3586 bool was_dirty = false;
3587 if (!(j->anonymous || j->implicit_reap)) {
3588 uint32_t flags = 0;
3589 (void)job_assumes_zero(j, proc_get_dirty(j->p, &flags));
3590
3591 j->idle_exit = (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT);
3592 was_dirty = (flags & PROC_DIRTY_IS_DIRTY);
3593
3594 job_log(j, LOG_DEBUG, "%sob exited %s.", j->idle_exit ? "Idle-exit j" : "J", was_dirty ? "while dirty" : "cleanly");
3595 }
3596
3597 if (j->idle_exit && was_dirty) {
3598 if (j->jettisoned) {
3599 job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned while dirty. Will respawn immediately.");
3600 j->unthrottle = true;
3601 j->start_pending = true;
3602 } else {
3603 job_log(j, LOG_INFO, "Idle-exit job exited while dirty.");
3604 }
3605 } else if (j->idle_exit && j->jettisoned) {
3606 /* If an idle-exit job is jettisoned, then we shouldn't throttle its
3607 * next respawn because it could not help when it exited. If it ran for
3608 * the minimum runtime, then this doesn't really matter. If it ran for
3609 * less than the minimum runtime, it will not be throttled.
3610 *
3611 * <rdar://problem/12098667>
3612 */
3613 job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned. Will bypass throttle interval for next on-demand launch.");
3614 j->unthrottle = true;
3615 }
3616
3617 if (j->anonymous) {
3618 j->last_exit_status = 0;
3619 } else {
3620 uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
3621 j->trt += rt;
3622
3623 job_log(j, LOG_PERF, "Last instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
3624 j->nruns++;
3625
3626 /* The job is dead. While the PID/PGID is still known to be valid, try
3627 * to kill abandoned descendant processes.
3628 */
3629 job_log_stray_pg(j);
3630 if (!j->abandon_pg) {
3631 if (unlikely(killpg2(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3632 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3633 }
3634 }
3635
3636 int r = -1;
3637 if (!j->implicit_reap) {
3638 /* If the shutdown monitor has suspended a task and not resumed it
3639 * resumed it before exiting, the kernel will not clean up after the
3640 * shutdown monitor. It will, instead, leave the task suspended and
3641 * not process any pending signals on the event loop for the task.
3642 *
3643 * There are a variety of other kernel bugs that could prevent a
3644 * process from exiting, usually having to do with faulty hardware
3645 * or talking to misbehaving drivers that mark a thread as
3646 * uninterruptible and deadlock/hang before unmarking it as such. So
3647 * we have to work around that too.
3648 *
3649 * See <rdar://problem/9284889&9359725>.
3650 */
3651 if (j->workaround9359725) {
3652 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3653 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3654 } else {
3655 #if HAVE_SYSTEMSTATS
3656 int r2;
3657 struct rusage_info_v1 ri;
3658 r2 = job_assumes_zero(j, proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)&ri));
3659 #endif
3660 if ((r = wait4(j->p, &j->last_exit_status, 0, NULL)) == -1) {
3661 job_log(j, LOG_ERR, "Reap failed. Assuming job exited: %d: %s", errno, strerror(errno));
3662 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3663 }
3664
3665 if (j->idle_exit && j->jettisoned) {
3666 // Treat idle-exit jettisons as successful exit.
3667 //
3668 // <rdar://problem/13338973>
3669 (void)job_assumes_zero(j, WTERMSIG(j->last_exit_status));
3670 j->last_exit_status = W_EXITCODE(0, 0);
3671 }
3672 #if HAVE_SYSTEMSTATS
3673 if (r2 == 0) {
3674 job_log_perf_statistics(j, &ri, j->last_exit_status);
3675 }
3676 #endif
3677 }
3678 } else {
3679 job_log(j, LOG_INFO, "Job was implicitly reaped by the kernel.");
3680 }
3681 }
3682
3683 if (j->exit_timeout) {
3684 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3685 }
3686
3687 LIST_REMOVE(j, pid_hash_sle);
3688 if (!j->anonymous) {
3689 LIST_REMOVE(j, global_pid_hash_sle);
3690 }
3691
3692 if (j->sent_signal_time) {
3693 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3694
3695 td_sec = td / NSEC_PER_SEC;
3696 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3697
3698 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3699 }
3700
3701 int exit_status = WEXITSTATUS(j->last_exit_status);
3702 if (WIFEXITED(j->last_exit_status) && exit_status != 0) {
3703 if (!j->did_exec && _launchd_support_system) {
3704 xpc_object_t event = NULL;
3705 switch (exit_status) {
3706 case ENOENT:
3707 case ENOTDIR:
3708 case ESRCH:
3709 job_log(j, LOG_NOTICE, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status, strerror(exit_status));
3710 event = xpc_dictionary_create(NULL, NULL, 0);
3711 xpc_dictionary_set_string(event, "Executable", j->prog ? j->prog : j->argv[0]);
3712 if (j->mach_uid) {
3713 xpc_dictionary_set_uint64(event, "UID", j->mach_uid);
3714 } else if (j->username) {
3715 xpc_dictionary_set_string(event, "UserName", j->username);
3716 }
3717
3718 if (j->groupname) {
3719 xpc_dictionary_set_string(event, "GroupName", j->groupname);
3720 }
3721
3722 (void)externalevent_new(j, _launchd_support_system, j->label, event, 0);
3723 xpc_release(event);
3724
3725 j->waiting4ok = true;
3726 default:
3727 job_log(j, LOG_NOTICE, "Job failed to exec(3) for weird reason: %d", exit_status);
3728 }
3729 } else {
3730 int level = LOG_INFO;
3731 if (exit_status != 0) {
3732 level = LOG_ERR;
3733 }
3734
3735 job_log(j, level, "Exited with code: %d", exit_status);
3736 }
3737 }
3738
3739 if (WIFSIGNALED(j->last_exit_status)) {
3740 int s = WTERMSIG(j->last_exit_status);
3741 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3742 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3743 } else if (!(j->stopped || j->clean_kill || j->jettisoned)) {
3744 switch (s) {
3745 // Signals which indicate a crash.
3746 case SIGILL:
3747 case SIGABRT:
3748 case SIGFPE:
3749 case SIGBUS:
3750 case SIGSEGV:
3751 case SIGSYS:
3752 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3753 * SIGTRAP, assume that it's a crash.
3754 */
3755 case SIGTRAP:
3756 j->crashed = true;
3757 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3758 break;
3759 default:
3760 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3761 break;
3762 }
3763
3764 if (is_system_bootstrapper && j->crashed) {
3765 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3766 }
3767 }
3768 }
3769
3770 j->reaped = true;
3771
3772 struct machservice *msi = NULL;
3773 if (j->crashed || !(j->did_exec || j->anonymous)) {
3774 SLIST_FOREACH(msi, &j->machservices, sle) {
3775 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3776 machservice_drain_port(msi);
3777 }
3778
3779 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3780 machservice_resetport(j, msi);
3781 }
3782 }
3783 }
3784
3785 /* HACK: Essentially duplicating the logic directly above. But this has
3786 * gotten really hairy, and I don't want to try consolidating it right now.
3787 */
3788 if (j->xpc_service && !j->xpcproxy_did_exec) {
3789 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3790 SLIST_FOREACH(msi, &j->machservices, sle) {
3791 /* Drain the messages but do not reset the port. If xpcproxy could
3792 * not exec(3), then we don't want to continue trying, since there
3793 * is very likely a serious configuration error with the service.
3794 *
3795 * The above comment is weird. I originally said we should drain
3796 * messages but not reset the port, but that's exactly what we do
3797 * below, and I'm not sure which is the mistake, the comment or the
3798 * actual behavior.
3799 *
3800 * Since it's always been this way, I'll assume that the comment is
3801 * incorrect, but I'll leave it in place just to remind myself to
3802 * actually look into it at some point.
3803 *
3804 * <rdar://problem/8986802>
3805 */
3806 if (msi->upfront && job_assumes(j, !msi->isActive)) {
3807 machservice_resetport(j, msi);
3808 }
3809 }
3810 }
3811
3812 struct suspended_peruser *spi = NULL;
3813 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3814 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3815 spi->j->peruser_suspend_count--;
3816 if (spi->j->peruser_suspend_count == 0) {
3817 job_dispatch(spi->j, false);
3818 }
3819 LIST_REMOVE(spi, sle);
3820 free(spi);
3821 }
3822
3823 if (j->exit_status_dest) {
3824 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3825 if (errno && errno != MACH_SEND_INVALID_DEST) {
3826 (void)job_assumes_zero(j, errno);
3827 }
3828
3829 j->exit_status_dest = MACH_PORT_NULL;
3830 }
3831
3832 if (j->spawn_reply_port) {
3833 /* If the child never called exec(3), we must send a spawn() reply so
3834 * that the requestor can get exit status from it. If we fail to send
3835 * the reply for some reason, we have to deallocate the exit status port
3836 * ourselves.
3837 */
3838 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3839 if (kr) {
3840 if (kr != MACH_SEND_INVALID_DEST) {
3841 (void)job_assumes_zero(j, kr);
3842 }
3843
3844 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
3845 }
3846
3847 j->exit_status_port = MACH_PORT_NULL;
3848 j->spawn_reply_port = MACH_PORT_NULL;
3849 }
3850
3851 if (j->anonymous) {
3852 total_anon_children--;
3853 if (j->holds_ref) {
3854 job_log(j, LOG_PERF, "Anonymous job exited holding reference.");
3855 runtime_del_ref();
3856 }
3857 } else {
3858 job_log(j, LOG_PERF, "Job exited.");
3859 runtime_del_ref();
3860 total_children--;
3861 }
3862
3863 if (j->has_console) {
3864 launchd_wsp = 0;
3865 }
3866
3867 if (j->shutdown_monitor) {
3868 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3869 _launchd_shutdown_monitor = NULL;
3870 j->shutdown_monitor = false;
3871 }
3872
3873 if (!j->anonymous) {
3874 j->mgr->normal_active_cnt--;
3875 }
3876 j->sent_signal_time = 0;
3877 j->sent_sigkill = false;
3878 j->clean_kill = false;
3879 j->event_monitor_ready2signal = false;
3880 j->p = 0;
3881 j->uniqueid = 0;
3882 }
3883
3884 void
3885 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3886 {
3887 jobmgr_t jmi, jmn;
3888 job_t ji, jn;
3889
3890 if (jm->shutting_down) {
3891 return;
3892 }
3893
3894 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3895 jobmgr_dispatch_all(jmi, newmounthack);
3896 }
3897
3898 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3899 if (newmounthack && ji->start_on_mount) {
3900 ji->start_pending = true;
3901 }
3902
3903 job_dispatch(ji, false);
3904 }
3905 }
3906
3907 void
3908 job_dispatch_curious_jobs(job_t j)
3909 {
3910 job_t ji = NULL, jt = NULL;
3911 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3912 struct semaphoreitem *si = NULL;
3913 SLIST_FOREACH(si, &ji->semaphores, sle) {
3914 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3915 continue;
3916 }
3917
3918 if (strcmp(si->what, j->label) == 0) {
3919 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3920
3921 if (!ji->removing) {
3922 job_dispatch(ji, false);
3923 } else {
3924 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3925 }
3926
3927 /* ji could be removed here, so don't do anything with it or its semaphores
3928 * after this point.
3929 */
3930 break;
3931 }
3932 }
3933 }
3934 }
3935
3936 job_t
3937 job_dispatch(job_t j, bool kickstart)
3938 {
3939 // Don't dispatch a job if it has no audit session set.
3940 if (!uuid_is_null(j->expected_audit_uuid)) {
3941 job_log(j, LOG_DEBUG, "Job is still awaiting its audit session UUID. Not dispatching.");
3942 return NULL;
3943 }
3944 if (j->alias) {
3945 job_log(j, LOG_DEBUG, "Job is an alias. Not dispatching.");
3946 return NULL;
3947 }
3948
3949 if (j->waiting4ok) {
3950 job_log(j, LOG_DEBUG, "Job cannot exec(3). Not dispatching.");
3951 return NULL;
3952 }
3953
3954 #if TARGET_OS_EMBEDDED
3955 if (launchd_embedded_handofgod && _launchd_embedded_god) {
3956 if (!job_assumes(j, _launchd_embedded_god->username != NULL && j->username != NULL)) {
3957 errno = EPERM;
3958 return NULL;
3959 }
3960
3961 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
3962 errno = EPERM;
3963 return NULL;
3964 }
3965 } else if (launchd_embedded_handofgod) {
3966 errno = EINVAL;
3967 return NULL;
3968 }
3969 #endif
3970
3971 /*
3972 * The whole job removal logic needs to be consolidated. The fact that
3973 * a job can be removed from just about anywhere makes it easy to have
3974 * stale pointers left behind somewhere on the stack that might get
3975 * used after the deallocation. In particular, during job iteration.
3976 *
3977 * This is a classic example. The act of dispatching a job may delete it.
3978 */
3979 if (!job_active(j)) {
3980 if (job_useless(j)) {
3981 job_log(j, LOG_DEBUG, "Job is useless. Removing.");
3982 job_remove(j);
3983 return NULL;
3984 }
3985 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3986 job_log(j, LOG_DEBUG, "Per-user launchd is suspended. Not dispatching.");
3987 return NULL;
3988 }
3989
3990 if (kickstart || job_keepalive(j)) {
3991 job_log(j, LOG_DEBUG, "%starting job", kickstart ? "Kicks" : "S");
3992 job_start(j);
3993 } else {
3994 job_log(j, LOG_DEBUG, "Watching job.");
3995 job_watch(j);
3996 }
3997 } else {
3998 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job: %s.", job_active(j));
3999 }
4000
4001 return j;
4002 }
4003
4004 void
4005 job_kill(job_t j)
4006 {
4007 if (unlikely(!j->p || j->anonymous)) {
4008 return;
4009 }
4010
4011 (void)job_assumes_zero_p(j, kill2(j->p, SIGKILL));
4012
4013 j->sent_sigkill = true;
4014 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j));
4015
4016 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
4017 }
4018
4019 void
4020 job_open_shutdown_transaction(job_t j)
4021 {
4022 int rv = proc_set_dirty(j->p, true);
4023 if (rv != 0) {
4024 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
4025 j->dirty_at_shutdown = false;
4026 }
4027 }
4028
4029 void
4030 job_close_shutdown_transaction(job_t j)
4031 {
4032 if (j->dirty_at_shutdown) {
4033 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
4034 (void)job_assumes_zero(j, proc_set_dirty(j->p, false));
4035 j->dirty_at_shutdown = false;
4036 }
4037 }
4038
4039 void
4040 job_log_children_without_exec(job_t j)
4041 {
4042 pid_t *pids = NULL;
4043 size_t len = sizeof(pid_t) * get_kern_max_proc();
4044 int i = 0, kp_cnt = 0;
4045
4046 if (!launchd_apple_internal || j->anonymous || j->per_user) {
4047 return;
4048 }
4049
4050 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
4051 return;
4052 }
4053 if (job_assumes_zero_p(j, (kp_cnt = proc_listchildpids(j->p, pids, len))) == -1) {
4054 goto out;
4055 }
4056
4057 for (i = 0; i < kp_cnt; i++) {
4058 struct proc_bsdshortinfo proc;
4059 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4060 if (errno != ESRCH) {
4061 (void)job_assumes_zero(j, errno);
4062 }
4063 continue;
4064 }
4065 if (proc.pbsi_flags & P_EXEC) {
4066 continue;
4067 }
4068
4069 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
4070 }
4071
4072 out:
4073 free(pids);
4074 }
4075
4076 void
4077 job_callback_proc(job_t j, struct kevent *kev)
4078 {
4079 bool program_changed = false;
4080 int fflags = kev->fflags;
4081
4082 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
4083 log_kevent_struct(LOG_DEBUG, kev, 0);
4084
4085 if (fflags & NOTE_EXEC) {
4086 program_changed = true;
4087
4088 if (j->anonymous) {
4089 struct proc_bsdshortinfo proc;
4090 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
4091 char newlabel[1000];
4092
4093 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
4094
4095 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
4096
4097 LIST_REMOVE(j, label_hash_sle);
4098 strcpy((char *)j->label, newlabel);
4099
4100 jobmgr_t where2put = root_jobmgr;
4101 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
4102 where2put = j->mgr;
4103 }
4104 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
4105 } else if (errno != ESRCH) {
4106 (void)job_assumes_zero(j, errno);
4107 }
4108 } else {
4109 if (j->spawn_reply_port) {
4110 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
4111 if (errno) {
4112 if (errno != MACH_SEND_INVALID_DEST) {
4113 (void)job_assumes_zero(j, errno);
4114 }
4115 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
4116 }
4117
4118 j->spawn_reply_port = MACH_PORT_NULL;
4119 j->exit_status_port = MACH_PORT_NULL;
4120 }
4121
4122 if (j->xpc_service && j->did_exec) {
4123 j->xpcproxy_did_exec = true;
4124 }
4125
4126 j->did_exec = true;
4127 job_log(j, LOG_DEBUG, "Program changed");
4128 }
4129 }
4130
4131 if (fflags & NOTE_FORK) {
4132 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
4133 job_log_children_without_exec(j);
4134 }
4135
4136 if (fflags & NOTE_EXIT) {
4137 if (kev->data & NOTE_EXIT_DECRYPTFAIL) {
4138 j->fpfail = true;
4139 job_log(j, LOG_WARNING, "FairPlay decryption failed on binary for job.");
4140 } else if (kev->data & NOTE_EXIT_MEMORY) {
4141 j->jettisoned = true;
4142 job_log(j, LOG_INFO, "Job was killed due to memory pressure.");
4143 }
4144
4145 job_reap(j);
4146
4147 if (j->anonymous) {
4148 job_remove(j);
4149 j = NULL;
4150 } else {
4151 struct waiting4attach *w4ai = NULL;
4152 struct waiting4attach *w4ait = NULL;
4153 LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
4154 if (w4ai->dest == (pid_t)kev->ident) {
4155 waiting4attach_delete(j->mgr, w4ai);
4156 }
4157 }
4158
4159 (void)job_dispatch(j, false);
4160 }
4161 }
4162 }
4163
4164 void
4165 job_callback_timer(job_t j, void *ident)
4166 {
4167 if (j == ident) {
4168 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
4169 job_dispatch(j, true);
4170 } else if (&j->semaphores == ident) {
4171 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
4172 job_dispatch(j, false);
4173 } else if (&j->start_interval == ident) {
4174 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
4175 j->start_pending = true;
4176 job_dispatch(j, false);
4177 } else if (&j->exit_timeout == ident) {
4178 if (!job_assumes(j, j->p != 0)) {
4179 return;
4180 }
4181
4182 if (j->sent_sigkill) {
4183 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
4184
4185 td /= NSEC_PER_SEC;
4186 td -= j->clean_kill ? 0 : j->exit_timeout;
4187
4188 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
4189 j->workaround9359725 = true;
4190
4191 // This basically has to be done off the main thread. We have no
4192 // mechanism for draining the main queue in our run loop (like CF
4193 // does), and the kevent mechanism wants an object to be associated
4194 // as the callback. So we just create a dispatch source and reap the
4195 // errant PID whenever we can. Note that it is not safe for us to do
4196 // any logging in this block, since logging requires exclusive
4197 // access to global data structures that is only protected by the
4198 // main thread.
4199 dispatch_source_t hack_13570156 = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, j->p, DISPATCH_PROC_EXIT, dispatch_get_global_queue(0, 0));
4200 dispatch_source_set_event_handler(hack_13570156, ^{
4201 pid_t pid = (pid_t)dispatch_source_get_handle(hack_13570156);
4202
4203 int status = 0;
4204 (void)waitpid(pid, &status, 0);
4205 dispatch_release(hack_13570156);
4206 });
4207
4208 dispatch_resume(hack_13570156);
4209
4210 if (launchd_trap_sigkill_bugs) {
4211 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
4212 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
4213 }
4214
4215 struct kevent bogus_exit;
4216 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
4217 jobmgr_callback(j->mgr, &bogus_exit);
4218 } else {
4219 if (unlikely(j->debug_before_kill)) {
4220 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
4221 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
4222 }
4223
4224 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
4225 job_kill(j);
4226 }
4227 } else {
4228 job_log(j, LOG_ERR, "Unrecognized job timer callback: %p", ident);
4229 }
4230 }
4231
4232 void
4233 job_callback_read(job_t j, int ident)
4234 {
4235 if (ident == j->stdin_fd) {
4236 job_dispatch(j, true);
4237 } else {
4238 socketgroup_callback(j);
4239 }
4240 }
4241
4242 void
4243 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
4244 {
4245 jobmgr_t jmi;
4246 job_t j;
4247
4248 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
4249 jobmgr_reap_bulk(jmi, kev);
4250 }
4251
4252 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
4253 kev->udata = j;
4254 job_callback(j, kev);
4255 }
4256 }
4257
4258 void
4259 jobmgr_callback(void *obj, struct kevent *kev)
4260 {
4261 jobmgr_t jm = obj;
4262
4263 #if TARGET_OS_EMBEDDED
4264 int flag2check = VQ_MOUNT;
4265 #else
4266 int flag2check = VQ_UPDATE;
4267 #endif
4268
4269 switch (kev->filter) {
4270 case EVFILT_PROC:
4271 jobmgr_reap_bulk(jm, kev);
4272 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
4273 break;
4274 case EVFILT_SIGNAL:
4275 switch (kev->ident) {
4276 case SIGTERM:
4277 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
4278 return launchd_shutdown();
4279 case SIGUSR1:
4280 return calendarinterval_callback();
4281 case SIGUSR2:
4282 // Turn on all logging.
4283 launchd_log_perf = true;
4284 launchd_log_debug = true;
4285 launchd_log_shutdown = true;
4286 /* Hopefully /var is available by this point. If not, uh, oh well.
4287 * It's just a debugging facility.
4288 */
4289 return jobmgr_log_perf_statistics(jm, false);
4290 case SIGINFO:
4291 return jobmgr_log_perf_statistics(jm, true);
4292 default:
4293 jobmgr_log(jm, LOG_ERR, "Unrecognized signal: %lu: %s", kev->ident, strsignal(kev->ident));
4294 }
4295 break;
4296 case EVFILT_FS:
4297 if (kev->fflags & flag2check) {
4298 if (!launchd_var_available) {
4299 struct stat sb;
4300 if (stat("/var/log", &sb) == 0 && (sb.st_mode & S_IWUSR)) {
4301 launchd_var_available = true;
4302 }
4303 }
4304 } else if (kev->fflags & VQ_MOUNT) {
4305 jobmgr_dispatch_all(jm, true);
4306 }
4307 jobmgr_dispatch_all_semaphores(jm);
4308 break;
4309 case EVFILT_TIMER:
4310 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
4311 calendarinterval_callback();
4312 } else if (kev->ident == (uintptr_t)jm) {
4313 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4314 jobmgr_still_alive_with_check(jm);
4315 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4316 jobmgr_do_garbage_collection(jm);
4317 } else if (kev->ident == (uintptr_t)&launchd_runtime_busy_time) {
4318 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4319 if (jobmgr_assumes_zero(jm, runtime_busy_cnt) == 0) {
4320 return launchd_shutdown();
4321 }
4322 #if HAVE_SYSTEMSTATS
4323 } else if (kev->ident == (uintptr_t)systemstats_timer_callback) {
4324 systemstats_timer_callback();
4325 #endif
4326 }
4327 break;
4328 case EVFILT_VNODE:
4329 if (kev->ident == (uintptr_t)s_no_hang_fd) {
4330 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4331 if (unlikely(_no_hang_fd != -1)) {
4332 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4333 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL));
4334 (void)jobmgr_assumes_zero_p(root_jobmgr, runtime_close(s_no_hang_fd));
4335 s_no_hang_fd = _fd(_no_hang_fd);
4336 }
4337 } else if (pid1_magic && launchd_console && kev->ident == (uintptr_t)fileno(launchd_console)) {
4338 int cfd = -1;
4339 if (jobmgr_assumes_zero_p(jm, cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) {
4340 _fd(cfd);
4341 if (!(launchd_console = fdopen(cfd, "w"))) {
4342 (void)jobmgr_assumes_zero(jm, errno);
4343 (void)close(cfd);
4344 }
4345 }
4346 }
4347 break;
4348 default:
4349 jobmgr_log(jm, LOG_ERR, "Unrecognized kevent filter: %hd", kev->filter);
4350 }
4351 }
4352
4353 void
4354 job_callback(void *obj, struct kevent *kev)
4355 {
4356 job_t j = obj;
4357
4358 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4359
4360 switch (kev->filter) {
4361 case EVFILT_PROC:
4362 return job_callback_proc(j, kev);
4363 case EVFILT_TIMER:
4364 return job_callback_timer(j, (void *) kev->ident);
4365 case EVFILT_READ:
4366 return job_callback_read(j, (int) kev->ident);
4367 case EVFILT_MACHPORT:
4368 return (void)job_dispatch(j, true);
4369 default:
4370 job_log(j, LOG_ERR, "Unrecognized job callback filter: %hd", kev->filter);
4371 }
4372 }
4373
4374 void
4375 job_start(job_t j)
4376 {
4377 uint64_t td;
4378 int spair[2];
4379 int execspair[2];
4380 char nbuf[64];
4381 pid_t c;
4382 bool sipc = false;
4383 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXIT_DETAIL|NOTE_EXITSTATUS;
4384
4385 if (!job_assumes(j, j->mgr != NULL)) {
4386 return;
4387 }
4388
4389 if (unlikely(job_active(j))) {
4390 job_log(j, LOG_DEBUG, "Already started");
4391 return;
4392 }
4393
4394 if (!LIST_EMPTY(&j->mgr->attaches)) {
4395 job_log(j, LOG_DEBUG, "Looking for attachments for job: %s", j->label);
4396 (void)waiting4attach_find(j->mgr, j);
4397 }
4398
4399 /*
4400 * Some users adjust the wall-clock and then expect software to not notice.
4401 * Therefore, launchd must use an absolute clock instead of the wall clock
4402 * wherever possible.
4403 */
4404 td = runtime_get_nanoseconds_since(j->start_time);
4405 td /= NSEC_PER_SEC;
4406
4407 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat && !j->unthrottle) {
4408 time_t respawn_delta = j->min_run_time - (uint32_t)td;
4409 /* We technically should ref-count throttled jobs to prevent idle exit,
4410 * but we're not directly tracking the 'throttled' state at the moment.
4411 */
4412 job_log(j, LOG_NOTICE, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4413 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j));
4414 job_ignore(j);
4415 return;
4416 }
4417
4418 if (likely(!j->legacy_mach_job)) {
4419 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_god;
4420 }
4421
4422 if (sipc) {
4423 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair));
4424 }
4425
4426 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair));
4427
4428 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4429 case -1:
4430 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4431 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j));
4432 job_ignore(j);
4433
4434 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4435 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4436 if (sipc) {
4437 (void)job_assumes_zero(j, runtime_close(spair[0]));
4438 (void)job_assumes_zero(j, runtime_close(spair[1]));
4439 }
4440 break;
4441 case 0:
4442 if (unlikely(_vproc_post_fork_ping())) {
4443 _exit(EXIT_FAILURE);
4444 }
4445
4446 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4447 // wait for our parent to say they've attached a kevent to us
4448 read(_fd(execspair[1]), &c, sizeof(c));
4449
4450 if (sipc) {
4451 (void)job_assumes_zero(j, runtime_close(spair[0]));
4452 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4453 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4454 }
4455 job_start_child(j);
4456 break;
4457 default:
4458 j->start_time = runtime_get_opaque_time();
4459
4460 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4461
4462 j->did_exec = false;
4463 j->fpfail = false;
4464 j->jettisoned = false;
4465 j->xpcproxy_did_exec = false;
4466 j->checkedin = false;
4467 j->start_pending = false;
4468 j->reaped = false;
4469 j->crashed = false;
4470 j->stopped = false;
4471 j->workaround9359725 = false;
4472 j->implicit_reap = false;
4473 j->unthrottle = false;
4474 if (j->needs_kickoff) {
4475 j->needs_kickoff = false;
4476
4477 if (SLIST_EMPTY(&j->semaphores)) {
4478 j->ondemand = false;
4479 }
4480 }
4481
4482 if (j->has_console) {
4483 launchd_wsp = c;
4484 }
4485
4486 job_log(j, LOG_PERF, "Job started.");
4487 runtime_add_ref();
4488 total_children++;
4489 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4490 LIST_INSERT_HEAD(&managed_actives[ACTIVE_JOB_HASH(c)], j, global_pid_hash_sle);
4491 j->p = c;
4492
4493 struct proc_uniqidentifierinfo info;
4494 if (proc_pidinfo(c, PROC_PIDUNIQIDENTIFIERINFO, 0, &info, PROC_PIDUNIQIDENTIFIERINFO_SIZE) != 0) {
4495 // ignore errors here, kevent_mod below will catch them and clean up
4496 j->uniqueid = info.p_uniqueid;
4497 }
4498
4499 j->mgr->normal_active_cnt++;
4500 j->fork_fd = _fd(execspair[0]);
4501 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4502 if (sipc) {
4503 (void)job_assumes_zero(j, runtime_close(spair[1]));
4504 ipc_open(_fd(spair[0]), j);
4505 }
4506 if (kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1) {
4507 job_ignore(j);
4508 } else {
4509 if (errno == ESRCH) {
4510 job_log(j, LOG_ERR, "Child was killed before we could attach a kevent.");
4511 } else {
4512 (void)job_assumes(j, errno == ESRCH);
4513 }
4514 job_reap(j);
4515
4516 /* If we have reaped this job within this same run loop pass, then
4517 * it will be currently ignored. So if there's a failure to attach a
4518 * kevent, we need to make sure that we watch the job so that we can
4519 * respawn it.
4520 *
4521 * See <rdar://problem/10140809>.
4522 */
4523 job_watch(j);
4524 }
4525
4526 #if HAVE_SYSTEMSTATS
4527 if (systemstats_is_enabled()) {
4528 /* We don't really *need* to make the full rusage call -- it
4529 * will be mostly 0s and very small numbers. We only need
4530 * ri_proc_start_abstime, because that's how we disambiguiate
4531 * PIDs when they wrap around; and the UUID.
4532 * In the future we should use the 64-bit process unique ID,
4533 * so there's nothing to disambiguiate, and skip the full
4534 * rusage call here.
4535 *
4536 * Well, the future is now.
4537 */
4538 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_LAUNCHD_SHOULD_LOG_JOB_START)) {
4539 job_log_perf_statistics(j, NULL, -3);
4540 }
4541 }
4542 #endif
4543 j->wait4debugger_oneshot = false;
4544 if (likely(!j->stall_before_exec)) {
4545 job_uncork_fork(j);
4546 }
4547 break;
4548 }
4549 }
4550
4551 void
4552 job_start_child(job_t j)
4553 {
4554 typeof(posix_spawn) *psf;
4555 const char *file2exec = "/usr/libexec/launchproxy";
4556 const char **argv;
4557 posix_spawnattr_t spattr;
4558 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4559 glob_t g;
4560 short spflags = POSIX_SPAWN_SETEXEC;
4561 int psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
4562 size_t binpref_out_cnt = 0;
4563 size_t i;
4564
4565 (void)job_assumes_zero(j, posix_spawnattr_init(&spattr));
4566
4567 job_setup_attributes(j);
4568
4569 bool use_xpcproxy = false;
4570 struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
4571 if (w4a) {
4572 (void)setenv(XPC_SERVICE_ENV_ATTACHED, "1", 1);
4573 if (!j->xpc_service) {
4574 use_xpcproxy = true;
4575 }
4576 }
4577
4578 if (use_xpcproxy) {
4579 argv = alloca(3 * sizeof(char *));
4580 argv[0] = "/usr/libexec/xpcproxy";
4581 argv[1] = "-debug";
4582 argv[2] = NULL;
4583
4584 file2exec = argv[0];
4585 } else if (unlikely(j->argv && j->globargv)) {
4586 g.gl_offs = 1;
4587 for (i = 0; i < j->argc; i++) {
4588 if (i > 0) {
4589 gflags |= GLOB_APPEND;
4590 }
4591 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4592 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4593 exit(EXIT_FAILURE);
4594 }
4595 }
4596 g.gl_pathv[0] = (char *)file2exec;
4597 argv = (const char **)g.gl_pathv;
4598 } else if (likely(j->argv)) {
4599 argv = alloca((j->argc + 2) * sizeof(char *));
4600 argv[0] = file2exec;
4601 for (i = 0; i < j->argc; i++) {
4602 argv[i + 1] = j->argv[i];
4603 }
4604 argv[i + 1] = NULL;
4605 } else {
4606 argv = alloca(3 * sizeof(char *));
4607 argv[0] = file2exec;
4608 argv[1] = j->prog;
4609 argv[2] = NULL;
4610 }
4611
4612 if (likely(!(j->inetcompat || use_xpcproxy))) {
4613 argv++;
4614 }
4615
4616 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4617 if (!j->app) {
4618 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4619 }
4620 spflags |= POSIX_SPAWN_START_SUSPENDED;
4621 }
4622
4623 #if !TARGET_OS_EMBEDDED
4624 if (unlikely(j->disable_aslr)) {
4625 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4626 }
4627 #endif
4628 spflags |= j->pstype;
4629
4630 (void)job_assumes_zero(j, posix_spawnattr_setflags(&spattr, spflags));
4631 if (unlikely(j->j_binpref_cnt)) {
4632 (void)job_assumes_zero(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt));
4633 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4634 }
4635
4636 psproctype = j->psproctype;
4637 (void)job_assumes_zero(j, posix_spawnattr_setprocesstype_np(&spattr, psproctype));
4638
4639 #if TARGET_OS_EMBEDDED
4640 /* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4641 * against a race which arises if, during spawn, an initial jetsam property
4642 * update occurs before the values below are applied. In this case, the flag
4643 * ensures that the subsequent change is ignored; the explicit update should
4644 * be given priority.
4645 */
4646 (void)job_assumes_zero(j, posix_spawnattr_setjetsam(&spattr,
4647 POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY | (j->jetsam_memory_limit_background ? POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND : 0),
4648 j->jetsam_priority, j->jetsam_memlimit));
4649 #endif
4650
4651 mach_port_array_t sports = NULL;
4652 mach_msg_type_number_t sports_cnt = 0;
4653 kern_return_t kr = vproc_mig_get_listener_port_rights(bootstrap_port, &sports, &sports_cnt);
4654 if (kr == 0 && sports_cnt) {
4655 /* For some reason, this SPI takes a count as a signed quantity. */
4656 (void)posix_spawnattr_set_importancewatch_port_np(&spattr, (int)sports_cnt, sports);
4657
4658 /* All "count" parameters in MIG are counts of the array. So an array of
4659 * mach_port_t containing 10 elements will have a count of ten, but it
4660 * will occupy 40 bytes. So we must do the multiplication here to pass
4661 * the correct size.
4662 *
4663 * Note that we do NOT release the send rights. We need them to be valid
4664 * at the time they are passed to posix_spawn(2). When we exec(3) using
4665 * posix_spawn(2), they'll be cleaned up anyway.
4666 */
4667 mig_deallocate((vm_address_t)sports, sports_cnt * sizeof(sports[0]));
4668 } else if (kr != BOOTSTRAP_UNKNOWN_SERVICE) {
4669 (void)job_assumes_zero(j, kr);
4670 }
4671
4672 #if TARGET_OS_EMBEDDED
4673 if (!j->app || j->system_app) {
4674 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
4675 }
4676 #else
4677 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
4678 #endif
4679
4680 #if !TARGET_OS_EMBEDDED
4681 struct task_qos_policy qosinfo = {
4682 .task_latency_qos_tier = LATENCY_QOS_LAUNCH_DEFAULT_TIER,
4683 .task_throughput_qos_tier = THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER,
4684 };
4685
4686 if (!j->legacy_timers) {
4687 kr = task_policy_set(mach_task_self(), TASK_BASE_QOS_POLICY, (task_policy_t)&qosinfo, TASK_QOS_POLICY_COUNT);
4688 (void)job_assumes_zero_p(j, kr);
4689 }
4690 #endif
4691
4692 #if HAVE_RESPONSIBILITY
4693 /* Specify which process is responsible for the new job. Per-app XPC
4694 * services are the responsibility of the app. Other processes are
4695 * responsible for themselves. This decision is final and also applies
4696 * to the process's children, so don't initialize responsibility when
4697 * starting a per-user launchd.
4698 */
4699 if (j->mgr->req_pid) {
4700 responsibility_init2(j->mgr->req_pid, NULL);
4701 } else if (!j->per_user) {
4702 responsibility_init2(getpid(), j->prog ? j->prog : j->argv[0]);
4703 }
4704 #endif
4705
4706 #if HAVE_QUARANTINE
4707 if (j->quarantine_data) {
4708 qtn_proc_t qp;
4709
4710 if (job_assumes(j, qp = qtn_proc_alloc())) {
4711 if (job_assumes_zero(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4712 (void)job_assumes_zero(j, qtn_proc_apply_to_self(qp));
4713 }
4714 }
4715 }
4716 #endif
4717
4718 #if HAVE_SANDBOX
4719 #if TARGET_OS_EMBEDDED
4720 struct sandbox_spawnattrs sbattrs;
4721 if (j->seatbelt_profile || j->container_identifier) {
4722 sandbox_spawnattrs_init(&sbattrs);
4723 if (j->seatbelt_profile) {
4724 sandbox_spawnattrs_setprofilename(&sbattrs, j->seatbelt_profile);
4725 }
4726 if (j->container_identifier) {
4727 sandbox_spawnattrs_setcontainer(&sbattrs, j->container_identifier);
4728 }
4729 (void)job_assumes_zero(j, posix_spawnattr_setmacpolicyinfo_np(&spattr, "Sandbox", &sbattrs, sizeof(sbattrs)));
4730 }
4731 #else
4732 if (j->seatbelt_profile) {
4733 char *seatbelt_err_buf = NULL;
4734
4735 if (job_assumes_zero_p(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf)) == -1) {
4736 if (seatbelt_err_buf) {
4737 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4738 }
4739 goto out_bad;
4740 }
4741 }
4742 #endif
4743 #endif
4744
4745 psf = j->prog ? posix_spawn : posix_spawnp;
4746
4747 if (likely(!(j->inetcompat || use_xpcproxy))) {
4748 file2exec = j->prog ? j->prog : argv[0];
4749 }
4750
4751 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4752
4753 #if HAVE_SANDBOX && !TARGET_OS_EMBEDDED
4754 out_bad:
4755 #endif
4756 _exit(errno);
4757 }
4758
4759 void
4760 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4761 {
4762 launch_data_t tmp;
4763 struct envitem *ei;
4764 job_t ji;
4765
4766 if (jm->parentmgr) {
4767 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4768 } else {
4769 char **tmpenviron = environ;
4770 for (; *tmpenviron; tmpenviron++) {
4771 char envkey[1024];
4772 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4773 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4774 strncpy(envkey, *tmpenviron, sizeof(envkey));
4775 *(strchr(envkey, '=')) = '\0';
4776 launch_data_dict_insert(dict, s, envkey);
4777 }
4778 }
4779
4780 LIST_FOREACH(ji, &jm->jobs, sle) {
4781 SLIST_FOREACH(ei, &ji->global_env, sle) {
4782 if ((tmp = launch_data_new_string(ei->value))) {
4783 launch_data_dict_insert(dict, tmp, ei->key);
4784 }
4785 }
4786 }
4787 }
4788
4789 void
4790 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4791 {
4792 struct envitem *ei;
4793 job_t ji;
4794
4795 if (jm->parentmgr) {
4796 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4797 }
4798
4799 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4800 SLIST_FOREACH(ei, &ji->global_env, sle) {
4801 setenv(ei->key, ei->value, 1);
4802 }
4803 }
4804 }
4805
4806 void
4807 job_log_pids_with_weird_uids(job_t j)
4808 {
4809 size_t len = sizeof(pid_t) * get_kern_max_proc();
4810 pid_t *pids = NULL;
4811 uid_t u = j->mach_uid;
4812 int i = 0, kp_cnt = 0;
4813
4814 if (!launchd_apple_internal) {
4815 return;
4816 }
4817
4818 pids = malloc(len);
4819 if (!job_assumes(j, pids != NULL)) {
4820 return;
4821 }
4822
4823 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4824
4825 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4826 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4827 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4828 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4829 * struct back in a single call for each one.
4830 *
4831 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4832 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4833 * libproc could go stale before we call proc_pidinfo().
4834 *
4835 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4836 * of bytes written to the buffer.
4837 */
4838 if (job_assumes_zero_p(j, (kp_cnt = proc_listallpids(pids, len))) == -1) {
4839 goto out;
4840 }
4841
4842 for (i = 0; i < kp_cnt; i++) {
4843 struct proc_bsdshortinfo proc;
4844 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4845 * detailed above.
4846 */
4847 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4848 if (errno != ESRCH) {
4849 (void)job_assumes_zero(j, errno);
4850 }
4851 continue;
4852 }
4853
4854 uid_t i_euid = proc.pbsi_uid;
4855 uid_t i_uid = proc.pbsi_ruid;
4856 uid_t i_svuid = proc.pbsi_svuid;
4857 pid_t i_pid = pids[i];
4858
4859 if (i_euid != u && i_uid != u && i_svuid != u) {
4860 continue;
4861 }
4862
4863 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4864
4865 // Temporarily disabled due to 5423935 and 4946119.
4866 #if 0
4867 // Ask the accountless process to exit.
4868 (void)job_assumes_zero_p(j, kill2(i_pid, SIGTERM));
4869 #endif
4870 }
4871
4872 out:
4873 free(pids);
4874 }
4875
4876 static struct passwd *
4877 job_getpwnam(job_t j, const char *name)
4878 {
4879 /*
4880 * methodology for system daemons
4881 *
4882 * first lookup user record without any opendirectoryd interaction,
4883 * we don't know what interprocess dependencies might be in flight.
4884 * if that fails, we re-enable opendirectoryd interaction and
4885 * re-issue the lookup. We have to disable the libinfo L1 cache
4886 * otherwise libinfo will return the negative cache entry on the retry
4887 */
4888 #if !TARGET_OS_EMBEDDED
4889 struct passwd *pw = NULL;
4890
4891 if (pid1_magic && j->mgr == root_jobmgr) {
4892 // 1 == SEARCH_MODULE_FLAG_DISABLED
4893 si_search_module_set_flags("ds", 1);
4894 gL1CacheEnabled = false;
4895
4896 pw = getpwnam(name);
4897 si_search_module_set_flags("ds", 0);
4898 }
4899
4900 if (pw == NULL) {
4901 pw = getpwnam(name);
4902 }
4903
4904 return pw;
4905 #else
4906 #pragma unused (j)
4907 return getpwnam(name);
4908 #endif
4909 }
4910
4911 static struct group *
4912 job_getgrnam(job_t j, const char *name)
4913 {
4914 #if !TARGET_OS_EMBEDDED
4915 struct group *gr = NULL;
4916
4917 if (pid1_magic && j->mgr == root_jobmgr) {
4918 si_search_module_set_flags("ds", 1);
4919 gL1CacheEnabled = false;
4920
4921 gr = getgrnam(name);
4922
4923 si_search_module_set_flags("ds", 0);
4924 }
4925
4926 if (gr == NULL) {
4927 gr = getgrnam(name);
4928 }
4929
4930 return gr;
4931 #else
4932 #pragma unused (j)
4933 return getgrnam(name);
4934 #endif
4935 }
4936
4937 void
4938 job_postfork_test_user(job_t j)
4939 {
4940 // This function is all about 5201578
4941
4942 const char *home_env_var = getenv("HOME");
4943 const char *user_env_var = getenv("USER");
4944 const char *logname_env_var = getenv("LOGNAME");
4945 uid_t tmp_uid, local_uid = getuid();
4946 gid_t tmp_gid, local_gid = getgid();
4947 char shellpath[PATH_MAX];
4948 char homedir[PATH_MAX];
4949 char loginname[2000];
4950 struct passwd *pwe;
4951
4952
4953 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4954 && strcmp(user_env_var, logname_env_var) == 0)) {
4955 goto out_bad;
4956 }
4957
4958 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
4959 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4960 goto out_bad;
4961 }
4962
4963 /*
4964 * We must copy the results of getpw*().
4965 *
4966 * Why? Because subsequent API calls may call getpw*() as a part of
4967 * their implementation. Since getpw*() returns a [now thread scoped]
4968 * global, we must therefore cache the results before continuing.
4969 */
4970
4971 tmp_uid = pwe->pw_uid;
4972 tmp_gid = pwe->pw_gid;
4973
4974 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4975 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4976 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4977
4978 if (strcmp(loginname, logname_env_var) != 0) {
4979 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4980 goto out_bad;
4981 }
4982 if (strcmp(homedir, home_env_var) != 0) {
4983 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4984 goto out_bad;
4985 }
4986 if (local_uid != tmp_uid) {
4987 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4988 'U', tmp_uid, local_uid);
4989 goto out_bad;
4990 }
4991 if (local_gid != tmp_gid) {
4992 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4993 'G', tmp_gid, local_gid);
4994 goto out_bad;
4995 }
4996
4997 return;
4998 out_bad:
4999 #if 0
5000 (void)job_assumes_zero_p(j, kill2(getppid(), SIGTERM));
5001 _exit(EXIT_FAILURE);
5002 #else
5003 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
5004 #endif
5005 }
5006
5007 void
5008 job_postfork_become_user(job_t j)
5009 {
5010 char loginname[2000];
5011 char tmpdirpath[PATH_MAX];
5012 char shellpath[PATH_MAX];
5013 char homedir[PATH_MAX];
5014 struct passwd *pwe;
5015 size_t r;
5016 gid_t desired_gid = -1;
5017 uid_t desired_uid = -1;
5018
5019 if (getuid() != 0) {
5020 return job_postfork_test_user(j);
5021 }
5022
5023 /*
5024 * I contend that having UID == 0 and GID != 0 is of dubious value.
5025 * Nevertheless, this used to work in Tiger. See: 5425348
5026 */
5027 if (j->groupname && !j->username) {
5028 j->username = "root";
5029 }
5030
5031 if (j->username) {
5032 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
5033 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
5034 _exit(ESRCH);
5035 }
5036 } else if (j->mach_uid) {
5037 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
5038 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
5039 job_log_pids_with_weird_uids(j);
5040 _exit(ESRCH);
5041 }
5042 } else {
5043 return;
5044 }
5045
5046 /*
5047 * We must copy the results of getpw*().
5048 *
5049 * Why? Because subsequent API calls may call getpw*() as a part of
5050 * their implementation. Since getpw*() returns a [now thread scoped]
5051 * global, we must therefore cache the results before continuing.
5052 */
5053
5054 desired_uid = pwe->pw_uid;
5055 desired_gid = pwe->pw_gid;
5056
5057 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
5058 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
5059 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
5060
5061 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
5062 job_log(j, LOG_ERR, "Expired account");
5063 _exit(EXIT_FAILURE);
5064 }
5065
5066
5067 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
5068 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
5069 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
5070 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
5071 }
5072
5073 if (j->groupname) {
5074 struct group *gre;
5075
5076 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
5077 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
5078 _exit(ESRCH);
5079 }
5080
5081 desired_gid = gre->gr_gid;
5082 }
5083
5084 if (job_assumes_zero_p(j, setlogin(loginname)) == -1) {
5085 _exit(EXIT_FAILURE);
5086 }
5087
5088 if (job_assumes_zero_p(j, setgid(desired_gid)) == -1) {
5089 _exit(EXIT_FAILURE);
5090 }
5091
5092 /*
5093 * The kernel team and the DirectoryServices team want initgroups()
5094 * called after setgid(). See 4616864 for more information.
5095 */
5096
5097 if (likely(!j->no_init_groups)) {
5098 #if 1
5099 if (job_assumes_zero_p(j, initgroups(loginname, desired_gid)) == -1) {
5100 _exit(EXIT_FAILURE);
5101 }
5102 #else
5103 /* Do our own little initgroups(). We do this to guarantee that we're
5104 * always opted into dynamic group resolution in the kernel. initgroups(3)
5105 * does not make this guarantee.
5106 */
5107 int groups[NGROUPS], ngroups;
5108
5109 // A failure here isn't fatal, and we'll still get data we can use.
5110 (void)job_assumes_zero_p(j, getgrouplist(j->username, desired_gid, groups, &ngroups));
5111
5112 if (job_assumes_zero_p(j, syscall(SYS_initgroups, ngroups, groups, desired_uid)) == -1) {
5113 _exit(EXIT_FAILURE);
5114 }
5115 #endif
5116 }
5117
5118 if (job_assumes_zero_p(j, setuid(desired_uid)) == -1) {
5119 _exit(EXIT_FAILURE);
5120 }
5121
5122 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
5123
5124 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
5125 setenv("TMPDIR", tmpdirpath, 0);
5126 }
5127
5128 setenv("SHELL", shellpath, 0);
5129 setenv("HOME", homedir, 0);
5130 setenv("USER", loginname, 0);
5131 setenv("LOGNAME", loginname, 0);
5132 }
5133
5134 void
5135 job_setup_attributes(job_t j)
5136 {
5137 struct limititem *li;
5138 struct envitem *ei;
5139
5140 if (unlikely(j->setnice)) {
5141 (void)job_assumes_zero_p(j, setpriority(PRIO_PROCESS, 0, j->nice));
5142 }
5143
5144 SLIST_FOREACH(li, &j->limits, sle) {
5145 struct rlimit rl;
5146
5147 if (job_assumes_zero_p(j, getrlimit(li->which, &rl) == -1)) {
5148 continue;
5149 }
5150
5151 if (li->sethard) {
5152 rl.rlim_max = li->lim.rlim_max;
5153 }
5154 if (li->setsoft) {
5155 rl.rlim_cur = li->lim.rlim_cur;
5156 }
5157
5158 if (setrlimit(li->which, &rl) == -1) {
5159 job_log_error(j, LOG_WARNING, "setrlimit()");
5160 }
5161 }
5162
5163 if (unlikely(!j->inetcompat && j->session_create)) {
5164 launchd_SessionCreate();
5165 }
5166
5167 if (unlikely(j->low_pri_io)) {
5168 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE));
5169 }
5170 if (j->low_priority_background_io) {
5171 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_DARWIN_BG, IOPOL_THROTTLE));
5172 }
5173 if (unlikely(j->rootdir)) {
5174 (void)job_assumes_zero_p(j, chroot(j->rootdir));
5175 (void)job_assumes_zero_p(j, chdir("."));
5176 }
5177
5178 job_postfork_become_user(j);
5179
5180 if (unlikely(j->workingdir)) {
5181 if (chdir(j->workingdir) == -1) {
5182 if (errno == ENOENT || errno == ENOTDIR) {
5183 job_log(j, LOG_ERR, "Job specified non-existent working directory: %s", j->workingdir);
5184 } else {
5185 (void)job_assumes_zero(j, errno);
5186 }
5187 }
5188 }
5189
5190 if (unlikely(j->setmask)) {
5191 umask(j->mask);
5192 }
5193
5194 if (j->stdin_fd) {
5195 (void)job_assumes_zero_p(j, dup2(j->stdin_fd, STDIN_FILENO));
5196 } else {
5197 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
5198 }
5199 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
5200 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
5201
5202 jobmgr_setup_env_from_other_jobs(j->mgr);
5203
5204 SLIST_FOREACH(ei, &j->env, sle) {
5205 setenv(ei->key, ei->value, 1);
5206 }
5207
5208 #if !TARGET_OS_EMBEDDED
5209 if (j->jetsam_properties) {
5210 (void)job_assumes_zero(j, proc_setpcontrol(PROC_SETPC_TERMINATE));
5211 }
5212 #endif
5213
5214 #if TARGET_OS_EMBEDDED
5215 if (j->main_thread_priority != 0) {
5216 struct sched_param params;
5217 bzero(&params, sizeof(params));
5218 params.sched_priority = j->main_thread_priority;
5219 (void)job_assumes_zero_p(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params));
5220 }
5221 #endif
5222
5223 /*
5224 * We'd like to call setsid() unconditionally, but we have reason to
5225 * believe that prevents launchd from being able to send signals to
5226 * setuid children. We'll settle for process-groups.
5227 */
5228 if (getppid() != 1) {
5229 (void)job_assumes_zero_p(j, setpgid(0, 0));
5230 } else {
5231 (void)job_assumes_zero_p(j, setsid());
5232 }
5233 }
5234
5235 void
5236 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
5237 {
5238 int fd;
5239
5240 if (!path) {
5241 return;
5242 }
5243
5244 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
5245 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
5246 return;
5247 }
5248
5249 (void)job_assumes_zero_p(j, dup2(fd, target_fd));
5250 (void)job_assumes_zero(j, runtime_close(fd));
5251 }
5252
5253 void
5254 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
5255 {
5256 struct calendarinterval *ci_iter, *ci_prev = NULL;
5257 time_t later, head_later;
5258
5259 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
5260
5261 if (ci->when.tm_wday != -1) {
5262 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
5263
5264 if (ci->when.tm_mday == -1) {
5265 later = otherlater;
5266 } else {
5267 later = later < otherlater ? later : otherlater;
5268 }
5269 }
5270
5271 ci->when_next = later;
5272
5273 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
5274 if (ci->when_next < ci_iter->when_next) {
5275 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
5276 break;
5277 }
5278
5279 ci_prev = ci_iter;
5280 }
5281
5282 if (ci_iter == NULL) {
5283 // ci must want to fire after every other timer, or there are no timers
5284
5285 if (LIST_EMPTY(&sorted_calendar_events)) {
5286 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
5287 } else {
5288 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
5289 }
5290 }
5291
5292 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
5293
5294 if (job_assumes_zero_p(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr)) != -1) {
5295 char time_string[100];
5296 size_t time_string_len;
5297
5298 ctime_r(&later, time_string);
5299 time_string_len = strlen(time_string);
5300
5301 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
5302 time_string[time_string_len - 1] = '\0';
5303 }
5304
5305 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
5306 }
5307 }
5308
5309 bool
5310 jobmgr_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
5311 {
5312 jobmgr_t jm = ctx;
5313 jobmgr_log(jm, LOG_ERR, "%s", message);
5314
5315 return true;
5316 }
5317
5318 bool
5319 job_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
5320 {
5321 job_t j = ctx;
5322 job_log(j, LOG_ERR, "%s", message);
5323
5324 return true;
5325 }
5326
5327 // ri: NULL = please sample j->p; non-NULL = use this sample
5328 void
5329 job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status)
5330 {
5331 #if HAVE_SYSTEMSTATS
5332 if (j->anonymous || !j->p) {
5333 return;
5334 }
5335 if (!systemstats_is_enabled()) {
5336 return;
5337 }
5338 const char *name;
5339 if (j->cfbundleidentifier) {
5340 name = j->cfbundleidentifier;
5341 } else {
5342 name = j->label;
5343 }
5344 int r = 0;
5345 struct rusage_info_v1 ris;
5346 if (ri == NULL) {
5347 ri = &ris;
5348 r = proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)ri);
5349 }
5350 if (r == -1) {
5351 return;
5352 }
5353 job_log_systemstats(j->p, j->uniqueid, runtime_get_uniqueid(), j->mgr->req_pid, j->mgr->req_uniqueid, name, ri, exit_status);
5354 #else
5355 #pragma unused (j, ri, exit_status)
5356 #endif
5357 }
5358
5359 #if HAVE_SYSTEMSTATS
5360 // ri: NULL = don't write fields from ri; non-NULL = use this sample
5361 static
5362 void
5363 job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status)
5364 {
5365 if (!systemstats_is_enabled()) {
5366 return;
5367 }
5368
5369 struct systemstats_process_usage_s info;
5370 bzero(&info, sizeof(info));
5371 info.name = name;
5372 info.pid = pid;
5373 info.exit_status = exit_status;
5374 info.uid = getuid();
5375 info.ppid = getpid();
5376 info.responsible_pid = req_pid;
5377
5378 if (likely(ri)) {
5379 info.macho_uuid = (const uint8_t *)&ri->ri_uuid;
5380 info.user_time = ri->ri_user_time;
5381 info.system_time = ri->ri_system_time;
5382 info.pkg_idle_wkups = ri->ri_pkg_idle_wkups;
5383 info.interrupt_wkups = ri->ri_interrupt_wkups;
5384 info.proc_start_abstime = ri->ri_proc_start_abstime;
5385 info.proc_exit_abstime = ri->ri_proc_exit_abstime;
5386 #if SYSTEMSTATS_API_VERSION >= 20130319
5387 info.pageins = ri->ri_pageins;
5388 info.wired_size = ri->ri_wired_size;
5389 info.resident_size = ri->ri_resident_size;
5390 info.phys_footprint = ri->ri_phys_footprint;
5391 // info.purgeablesize = ???
5392 #endif
5393 #if SYSTEMSTATS_API_VERSION >= 20130328
5394 info.child_user_time = ri->ri_child_user_time;
5395 info.child_system_time = ri->ri_child_system_time;
5396 info.child_pkg_idle_wkups = ri->ri_child_pkg_idle_wkups;
5397 info.child_interrupt_wkups = ri->ri_child_interrupt_wkups;
5398 info.child_pageins = ri->ri_child_pageins;
5399 info.child_elapsed_abstime = ri->ri_child_elapsed_abstime;
5400 #endif
5401 }
5402 #if SYSTEMSTATS_API_VERSION >= 20130410
5403 info.uniqueid = uniqueid;
5404 info.parent_uniqueid = parent_uniqueid;
5405 info.responsible_uniqueid = req_uniqueid;
5406 #endif
5407 systemstats_write_process_usage(&info);
5408 }
5409 #endif /* HAVE_SYSTEMSTATS */
5410
5411 struct waiting4attach *
5412 waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type)
5413 {
5414 size_t xtra = strlen(name) + 1;
5415
5416 struct waiting4attach *w4a = malloc(sizeof(*w4a) + xtra);
5417 if (!w4a) {
5418 return NULL;
5419 }
5420
5421 w4a->port = port;
5422 w4a->dest = dest;
5423 w4a->type = type;
5424 (void)strcpy(w4a->name, name);
5425
5426 if (dest) {
5427 LIST_INSERT_HEAD(&_launchd_domain_waiters, w4a, le);
5428 } else {
5429 LIST_INSERT_HEAD(&jm->attaches, w4a, le);
5430 }
5431
5432
5433 (void)jobmgr_assumes_zero(jm, launchd_mport_notify_req(port, MACH_NOTIFY_DEAD_NAME));
5434 return w4a;
5435 }
5436
5437 void
5438 waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a)
5439 {
5440 jobmgr_log(jm, LOG_DEBUG, "Canceling dead-name notification for waiter port: 0x%x", w4a->port);
5441
5442 LIST_REMOVE(w4a, le);
5443
5444 mach_port_t previous = MACH_PORT_NULL;
5445 (void)jobmgr_assumes_zero(jm, mach_port_request_notification(mach_task_self(), w4a->port, MACH_NOTIFY_DEAD_NAME, 0, MACH_PORT_NULL, MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous));
5446 if (previous) {
5447 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(previous));
5448 }
5449
5450 jobmgr_assumes_zero(jm, launchd_mport_deallocate(w4a->port));
5451 free(w4a);
5452 }
5453
5454 struct waiting4attach *
5455 waiting4attach_find(jobmgr_t jm, job_t j)
5456 {
5457 char *name2use = (char *)j->label;
5458 if (j->app) {
5459 struct envitem *ei = NULL;
5460 SLIST_FOREACH(ei, &j->env, sle) {
5461 if (strcmp(ei->key, XPC_SERVICE_RENDEZVOUS_TOKEN) == 0) {
5462 name2use = ei->value;
5463 break;
5464 }
5465 }
5466 }
5467
5468 struct waiting4attach *w4ai = NULL;
5469 LIST_FOREACH(w4ai, &jm->attaches, le) {
5470 if (strcmp(name2use, w4ai->name) == 0) {
5471 job_log(j, LOG_DEBUG, "Found attachment: %s", name2use);
5472 break;
5473 }
5474 }
5475
5476 return w4ai;
5477 }
5478
5479 void
5480 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
5481 {
5482 const char *label2use = j ? j->label : "com.apple.launchd.job-unknown";
5483 const char *mgr2use = j ? j->mgr->name : "com.apple.launchd.jobmanager-unknown";
5484 char *newmsg;
5485 int oldmask = 0;
5486 size_t newmsgsz;
5487
5488 struct launchd_syslog_attr attr = {
5489 .from_name = launchd_label,
5490 .about_name = label2use,
5491 .session_name = mgr2use,
5492 .priority = pri,
5493 .from_uid = getuid(),
5494 .from_pid = getpid(),
5495 .about_pid = j ? j->p : 0,
5496 };
5497
5498 /* Hack: If bootstrap_port is set, we must be on the child side of a
5499 * fork(2), but before the exec*(3). Let's route the log message back to
5500 * launchd proper.
5501 */
5502 if (bootstrap_port) {
5503 return _vproc_logv(pri, err, msg, ap);
5504 }
5505
5506 newmsgsz = strlen(msg) + 200;
5507 newmsg = alloca(newmsgsz);
5508
5509 if (err) {
5510 #if !TARGET_OS_EMBEDDED
5511 snprintf(newmsg, newmsgsz, "%s: %d: %s", msg, err, strerror(err));
5512 #else
5513 snprintf(newmsg, newmsgsz, "(%s) %s: %d: %s", label2use, msg, err, strerror(err));
5514 #endif
5515 } else {
5516 #if !TARGET_OS_EMBEDDED
5517 snprintf(newmsg, newmsgsz, "%s", msg);
5518 #else
5519 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
5520 #endif
5521 }
5522
5523 if (j && unlikely(j->debug)) {
5524 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5525 }
5526
5527 launchd_vsyslog(&attr, newmsg, ap);
5528
5529 if (j && unlikely(j->debug)) {
5530 setlogmask(oldmask);
5531 }
5532 }
5533
5534 void
5535 job_log_error(job_t j, int pri, const char *msg, ...)
5536 {
5537 va_list ap;
5538
5539 va_start(ap, msg);
5540 job_logv(j, pri, errno, msg, ap);
5541 va_end(ap);
5542 }
5543
5544 void
5545 job_log(job_t j, int pri, const char *msg, ...)
5546 {
5547 va_list ap;
5548
5549 va_start(ap, msg);
5550 job_logv(j, pri, 0, msg, ap);
5551 va_end(ap);
5552 }
5553
5554 #if 0
5555 void
5556 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5557 {
5558 va_list ap;
5559
5560 va_start(ap, msg);
5561 jobmgr_logv(jm, pri, errno, msg, ap);
5562 va_end(ap);
5563 }
5564 #endif
5565
5566 void
5567 jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children)
5568 {
5569 #if HAVE_SYSTEMSTATS
5570 // Log information for kernel_task and pid 1 launchd.
5571 if (systemstats_is_enabled() && pid1_magic && jm == root_jobmgr) {
5572 #if SYSTEMSTATS_API_VERSION >= 20130328
5573 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_SHOULD_LOG_ENERGY_STATISTICS)) {
5574 systemstats_write_intel_energy_statistics(NULL);
5575 }
5576 #else
5577 systemstats_write_intel_energy_statistics(NULL);
5578 #endif
5579 job_log_systemstats(0, 0, 0, 0, 0, "com.apple.kernel", NULL, -1);
5580 job_log_systemstats(1, 1, 0, 1, 1, "com.apple.launchd", NULL, -1);
5581 }
5582 #endif
5583 jobmgr_t jmi = NULL;
5584 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
5585 jobmgr_log_perf_statistics(jmi, signal_children);
5586 }
5587
5588 if (jm->xpc_singleton) {
5589 jobmgr_log(jm, LOG_PERF, "XPC Singleton Domain: %s", jm->shortdesc);
5590 } else if (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
5591 jobmgr_log(jm, LOG_PERF, "XPC Private Domain: %s", jm->owner);
5592 } else if (jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) {
5593 jobmgr_log(jm, LOG_PERF, "Created via bootstrap_subset()");
5594 }
5595
5596 jobmgr_log(jm, LOG_PERF, "Jobs in job manager:");
5597
5598 job_t ji = NULL;
5599 LIST_FOREACH(ji, &jm->jobs, sle) {
5600 job_log_perf_statistics(ji, NULL, -1);
5601 if (unlikely(signal_children) && unlikely(strstr(ji->label, "com.apple.launchd.peruser.") == ji->label)) {
5602 jobmgr_log(jm, LOG_PERF, "Sending SIGINFO to peruser launchd %d", ji->p);
5603 kill(ji->p, SIGINFO);
5604 }
5605 }
5606
5607 jobmgr_log(jm, LOG_PERF, "End of job list.");
5608 }
5609
5610 void
5611 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5612 {
5613 va_list ap;
5614
5615 va_start(ap, msg);
5616 jobmgr_logv(jm, pri, 0, msg, ap);
5617 va_end(ap);
5618 }
5619
5620 void
5621 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5622 {
5623 if (!jm) {
5624 jm = root_jobmgr;
5625 }
5626
5627 char *newmsg;
5628 char *newname;
5629 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5630
5631 newname = alloca((jmname_len + 1) * 2);
5632 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5633 newmsg = alloca(newmsgsz);
5634
5635 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5636 if (jm->name[i] == '%') {
5637 newname[o] = '%';
5638 o++;
5639 }
5640 newname[o] = jm->name[i];
5641 }
5642 newname[o] = '\0';
5643
5644 if (err) {
5645 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5646 } else {
5647 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5648 }
5649
5650 if (jm->parentmgr) {
5651 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5652 } else {
5653 struct launchd_syslog_attr attr = {
5654 .from_name = launchd_label,
5655 .about_name = launchd_label,
5656 .session_name = jm->name,
5657 .priority = pri,
5658 .from_uid = getuid(),
5659 .from_pid = getpid(),
5660 .about_pid = getpid(),
5661 };
5662
5663 launchd_vsyslog(&attr, newmsg, ap);
5664 }
5665 }
5666
5667 struct cal_dict_walk {
5668 job_t j;
5669 struct tm tmptm;
5670 };
5671
5672 void
5673 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5674 {
5675 struct cal_dict_walk *cdw = context;
5676 struct tm *tmptm = &cdw->tmptm;
5677 job_t j = cdw->j;
5678 int64_t val;
5679
5680 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5681 // hack to let caller know something went wrong
5682 tmptm->tm_sec = -1;
5683 return;
5684 }
5685
5686 val = launch_data_get_integer(obj);
5687
5688 if (val < 0) {
5689 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5690 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5691 if (val > 59) {
5692 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5693 tmptm->tm_sec = -1;
5694 } else {
5695 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5696 }
5697 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5698 if (val > 23) {
5699 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5700 tmptm->tm_sec = -1;
5701 } else {
5702 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5703 }
5704 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5705 if (val < 1 || val > 31) {
5706 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5707 tmptm->tm_sec = -1;
5708 } else {
5709 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5710 }
5711 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5712 if (val > 7) {
5713 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5714 tmptm->tm_sec = -1;
5715 } else {
5716 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5717 }
5718 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5719 if (val > 12) {
5720 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5721 tmptm->tm_sec = -1;
5722 } else {
5723 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5724 tmptm->tm_mon -= 1; // 4798263 cron compatibility
5725 }
5726 }
5727 }
5728
5729 bool
5730 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5731 {
5732 struct cal_dict_walk cdw;
5733
5734 cdw.j = j;
5735 memset(&cdw.tmptm, 0, sizeof(0));
5736
5737 cdw.tmptm.tm_min = -1;
5738 cdw.tmptm.tm_hour = -1;
5739 cdw.tmptm.tm_mday = -1;
5740 cdw.tmptm.tm_wday = -1;
5741 cdw.tmptm.tm_mon = -1;
5742
5743 if (!job_assumes(j, obj != NULL)) {
5744 return false;
5745 }
5746
5747 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5748 return false;
5749 }
5750
5751 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5752
5753 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5754 return false;
5755 }
5756
5757 return calendarinterval_new(j, &cdw.tmptm);
5758 }
5759
5760 bool
5761 calendarinterval_new(job_t j, struct tm *w)
5762 {
5763 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5764
5765 if (!job_assumes(j, ci != NULL)) {
5766 return false;
5767 }
5768
5769 ci->when = *w;
5770 ci->job = j;
5771
5772 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5773
5774 calendarinterval_setalarm(j, ci);
5775
5776 runtime_add_weak_ref();
5777
5778 return true;
5779 }
5780
5781 void
5782 calendarinterval_delete(job_t j, struct calendarinterval *ci)
5783 {
5784 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5785 LIST_REMOVE(ci, global_sle);
5786
5787 free(ci);
5788
5789 runtime_del_weak_ref();
5790 }
5791
5792 void
5793 calendarinterval_sanity_check(void)
5794 {
5795 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5796 time_t now = time(NULL);
5797
5798 if (unlikely(ci && (ci->when_next < now))) {
5799 (void)jobmgr_assumes_zero_p(root_jobmgr, raise(SIGUSR1));
5800 }
5801 }
5802
5803 void
5804 calendarinterval_callback(void)
5805 {
5806 struct calendarinterval *ci, *ci_next;
5807 time_t now = time(NULL);
5808
5809 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5810 job_t j = ci->job;
5811
5812 if (ci->when_next > now) {
5813 break;
5814 }
5815
5816 LIST_REMOVE(ci, global_sle);
5817 calendarinterval_setalarm(j, ci);
5818
5819 j->start_pending = true;
5820 job_dispatch(j, false);
5821 }
5822 }
5823
5824 bool
5825 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt)
5826 {
5827 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5828
5829 if (!job_assumes(j, sg != NULL)) {
5830 return false;
5831 }
5832
5833 sg->fds = calloc(1, fd_cnt * sizeof(int));
5834 sg->fd_cnt = fd_cnt;
5835
5836 if (!job_assumes(j, sg->fds != NULL)) {
5837 free(sg);
5838 return false;
5839 }
5840
5841 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5842 strcpy(sg->name_init, name);
5843
5844 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5845
5846 runtime_add_weak_ref();
5847
5848 return true;
5849 }
5850
5851 void
5852 socketgroup_delete(job_t j, struct socketgroup *sg)
5853 {
5854 unsigned int i;
5855
5856 for (i = 0; i < sg->fd_cnt; i++) {
5857 #if 0
5858 struct sockaddr_storage ss;
5859 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5860 socklen_t ss_len = sizeof(ss);
5861
5862 // 5480306
5863 if (job_assumes_zero(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5864 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5865 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5866 // We might conditionally need to delete a directory here
5867 }
5868 #endif
5869 (void)job_assumes_zero_p(j, runtime_close(sg->fds[i]));
5870 }
5871
5872 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5873
5874 free(sg->fds);
5875 free(sg);
5876
5877 runtime_del_weak_ref();
5878 }
5879
5880 void
5881 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5882 {
5883 struct kevent kev[sg->fd_cnt];
5884 char buf[10000];
5885 unsigned int i, buf_off = 0;
5886
5887 for (i = 0; i < sg->fd_cnt; i++) {
5888 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5889 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5890 }
5891
5892 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5893
5894 (void)job_assumes_zero_p(j, kevent_bulk_mod(kev, sg->fd_cnt));
5895
5896 for (i = 0; i < sg->fd_cnt; i++) {
5897 (void)job_assumes(j, kev[i].flags & EV_ERROR);
5898 errno = (typeof(errno)) kev[i].data;
5899 (void)job_assumes_zero(j, kev[i].data);
5900 }
5901 }
5902
5903 void
5904 socketgroup_ignore(job_t j, struct socketgroup *sg)
5905 {
5906 socketgroup_kevent_mod(j, sg, false);
5907 }
5908
5909 void
5910 socketgroup_watch(job_t j, struct socketgroup *sg)
5911 {
5912 socketgroup_kevent_mod(j, sg, true);
5913 }
5914
5915 void
5916 socketgroup_callback(job_t j)
5917 {
5918 job_dispatch(j, true);
5919 }
5920
5921 bool
5922 envitem_new(job_t j, const char *k, const char *v, bool global)
5923 {
5924 if (global && !launchd_allow_global_dyld_envvars) {
5925 if (strncmp("DYLD_", k, sizeof("DYLD_") - 1) == 0) {
5926 job_log(j, LOG_ERR, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k, v);
5927 return false;
5928 }
5929 }
5930
5931 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5932
5933 if (!job_assumes(j, ei != NULL)) {
5934 return false;
5935 }
5936
5937 strcpy(ei->key_init, k);
5938 ei->value = ei->key_init + strlen(k) + 1;
5939 strcpy(ei->value, v);
5940
5941 if (global) {
5942 if (SLIST_EMPTY(&j->global_env)) {
5943 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5944 }
5945 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5946 } else {
5947 SLIST_INSERT_HEAD(&j->env, ei, sle);
5948 }
5949
5950 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5951
5952 return true;
5953 }
5954
5955 void
5956 envitem_delete(job_t j, struct envitem *ei, bool global)
5957 {
5958 if (global) {
5959 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5960 if (SLIST_EMPTY(&j->global_env)) {
5961 LIST_REMOVE(j, global_env_sle);
5962 }
5963 } else {
5964 SLIST_REMOVE(&j->env, ei, envitem, sle);
5965 }
5966
5967 free(ei);
5968 }
5969
5970 void
5971 envitem_setup(launch_data_t obj, const char *key, void *context)
5972 {
5973 job_t j = context;
5974
5975 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5976 return;
5977 }
5978
5979 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5980 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
5981 } else {
5982 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5983 }
5984 }
5985
5986 bool
5987 limititem_update(job_t j, int w, rlim_t r)
5988 {
5989 struct limititem *li;
5990
5991 SLIST_FOREACH(li, &j->limits, sle) {
5992 if (li->which == w) {
5993 break;
5994 }
5995 }
5996
5997 if (li == NULL) {
5998 li = calloc(1, sizeof(struct limititem));
5999
6000 if (!job_assumes(j, li != NULL)) {
6001 return false;
6002 }
6003
6004 SLIST_INSERT_HEAD(&j->limits, li, sle);
6005
6006 li->which = w;
6007 }
6008
6009 if (j->importing_hard_limits) {
6010 li->lim.rlim_max = r;
6011 li->sethard = true;
6012 } else {
6013 li->lim.rlim_cur = r;
6014 li->setsoft = true;
6015 }
6016
6017 return true;
6018 }
6019
6020 void
6021 limititem_delete(job_t j, struct limititem *li)
6022 {
6023 SLIST_REMOVE(&j->limits, li, limititem, sle);
6024
6025 free(li);
6026 }
6027
6028 #if HAVE_SANDBOX
6029 void
6030 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
6031 {
6032 job_t j = context;
6033
6034 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
6035 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
6036 return;
6037 }
6038
6039 if (launch_data_get_bool(obj) == false) {
6040 return;
6041 }
6042
6043 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
6044 j->seatbelt_flags |= SANDBOX_NAMED;
6045 }
6046 }
6047 #endif
6048
6049 void
6050 limititem_setup(launch_data_t obj, const char *key, void *context)
6051 {
6052 job_t j = context;
6053 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
6054 rlim_t rl;
6055
6056 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
6057 return;
6058 }
6059
6060 rl = launch_data_get_integer(obj);
6061
6062 for (i = 0; i < limits_cnt; i++) {
6063 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
6064 break;
6065 }
6066 }
6067
6068 if (i == limits_cnt) {
6069 return;
6070 }
6071
6072 limititem_update(j, launchd_keys2limits[i].val, rl);
6073 }
6074
6075 bool
6076 job_useless(job_t j)
6077 {
6078 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
6079 if (j->legacy_LS_job && j->j_port) {
6080 return false;
6081 }
6082 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
6083 return true;
6084 } else if (j->removal_pending) {
6085 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
6086 return true;
6087 } else if (j->shutdown_monitor) {
6088 return false;
6089 } else if (j->mgr->shutting_down && !j->mgr->parentmgr) {
6090 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
6091 if (total_children == 0 && !j->anonymous) {
6092 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
6093 }
6094 return true;
6095 } else if (j->legacy_mach_job) {
6096 if (SLIST_EMPTY(&j->machservices)) {
6097 job_log(j, LOG_INFO, "Garbage collecting");
6098 return true;
6099 } else if (!j->checkedin) {
6100 job_log(j, LOG_WARNING, "Failed to check-in!");
6101 return true;
6102 }
6103 } else {
6104 /* If the job's executable does not have any valid architectures (for
6105 * example, if it's a PowerPC-only job), then we don't even bother
6106 * trying to relaunch it, as we have no reasonable expectation that
6107 * the situation will change.
6108 *
6109 * <rdar://problem/9106979>
6110 */
6111 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
6112 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
6113 return true;
6114 }
6115 }
6116
6117 return false;
6118 }
6119
6120 bool
6121 job_keepalive(job_t j)
6122 {
6123 mach_msg_type_number_t statusCnt;
6124 mach_port_status_t status;
6125 struct semaphoreitem *si;
6126 struct machservice *ms;
6127 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
6128 bool is_not_kextd = (launchd_apple_internal || (strcmp(j->label, "com.apple.kextd") != 0));
6129
6130 if (unlikely(j->mgr->shutting_down)) {
6131 return false;
6132 }
6133
6134 /*
6135 * 5066316
6136 *
6137 * We definitely need to revisit this after Leopard ships. Please see
6138 * launchctl.c for the other half of this hack.
6139 */
6140 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
6141 return false;
6142 }
6143
6144 if (unlikely(j->needs_kickoff)) {
6145 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
6146 return false;
6147 }
6148
6149 if (j->start_pending) {
6150 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
6151 return true;
6152 }
6153
6154 if (!j->ondemand) {
6155 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
6156 return true;
6157 }
6158
6159 SLIST_FOREACH(ms, &j->machservices, sle) {
6160 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
6161 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
6162 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
6163 continue;
6164 }
6165 if (status.mps_msgcount) {
6166 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
6167 status.mps_msgcount, ms->name);
6168 return true;
6169 }
6170 }
6171
6172 /* TODO: Coalesce external events and semaphore items, since they're basically
6173 * the same thing.
6174 */
6175 struct externalevent *ei = NULL;
6176 LIST_FOREACH(ei, &j->events, job_le) {
6177 if (ei->state == ei->wanted_state) {
6178 return true;
6179 }
6180 }
6181
6182 SLIST_FOREACH(si, &j->semaphores, sle) {
6183 bool wanted_state = false;
6184 job_t other_j;
6185
6186 switch (si->why) {
6187 case NETWORK_UP:
6188 wanted_state = true;
6189 case NETWORK_DOWN:
6190 if (network_up == wanted_state) {
6191 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
6192 return true;
6193 }
6194 break;
6195 case SUCCESSFUL_EXIT:
6196 wanted_state = true;
6197 case FAILED_EXIT:
6198 if (good_exit == wanted_state) {
6199 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
6200 return true;
6201 }
6202 break;
6203 case CRASHED:
6204 wanted_state = true;
6205 case DID_NOT_CRASH:
6206 if (j->crashed == wanted_state) {
6207 return true;
6208 }
6209 break;
6210 case OTHER_JOB_ENABLED:
6211 wanted_state = true;
6212 case OTHER_JOB_DISABLED:
6213 if ((bool)job_find(NULL, si->what) == wanted_state) {
6214 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
6215 return true;
6216 }
6217 break;
6218 case OTHER_JOB_ACTIVE:
6219 wanted_state = true;
6220 case OTHER_JOB_INACTIVE:
6221 if ((other_j = job_find(NULL, si->what))) {
6222 if ((bool)other_j->p == wanted_state) {
6223 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
6224 return true;
6225 }
6226 }
6227 break;
6228 }
6229 }
6230
6231 return false;
6232 }
6233
6234 const char *
6235 job_active(job_t j)
6236 {
6237 if (j->p && j->shutdown_monitor) {
6238 return "Monitoring shutdown";
6239 }
6240 if (j->p) {
6241 return "PID is still valid";
6242 }
6243
6244 if (j->priv_port_has_senders) {
6245 return "Privileged Port still has outstanding senders";
6246 }
6247
6248 struct machservice *ms;
6249 SLIST_FOREACH(ms, &j->machservices, sle) {
6250 /* If we've simulated an exit, we mark the job as non-active, even
6251 * though doing so will leave it in an unsafe state. We do this so that
6252 * shutdown can proceed. See <rdar://problem/11126530>.
6253 */
6254 if (!j->workaround9359725 && ms->recv && machservice_active(ms)) {
6255 job_log(j, LOG_INFO, "Mach service is still active: %s", ms->name);
6256 return "Mach service is still active";
6257 }
6258 }
6259
6260 return NULL;
6261 }
6262
6263 void
6264 machservice_watch(job_t j, struct machservice *ms)
6265 {
6266 if (ms->recv) {
6267 if (job_assumes_zero(j, runtime_add_mport(ms->port, NULL)) == KERN_INVALID_RIGHT) {
6268 ms->recv_race_hack = true;
6269 }
6270 }
6271 }
6272
6273 void
6274 machservice_ignore(job_t j, struct machservice *ms)
6275 {
6276 /* We only add ports whose receive rights we control into the port set, so
6277 * don't attempt to remove te service from the port set if we didn't put it
6278 * there in the first place. Otherwise, we could wind up trying to access a
6279 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
6280 *
6281 * <rdar://problem/10898014>
6282 */
6283 if (ms->recv) {
6284 (void)job_assumes_zero(j, runtime_remove_mport(ms->port));
6285 }
6286 }
6287
6288 void
6289 machservice_resetport(job_t j, struct machservice *ms)
6290 {
6291 LIST_REMOVE(ms, port_hash_sle);
6292 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6293 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
6294
6295 ms->gen_num++;
6296 (void)job_assumes_zero(j, launchd_mport_create_recv(&ms->port));
6297 (void)job_assumes_zero(j, launchd_mport_make_send(ms->port));
6298 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6299 }
6300
6301 void
6302 machservice_stamp_port(job_t j, struct machservice *ms)
6303 {
6304 mach_port_context_t ctx = 0;
6305 char *where2get = j->prog ? j->prog : j->argv[0];
6306
6307 char *prog = NULL;
6308 if ((prog = strrchr(where2get, '/'))) {
6309 prog++;
6310 } else {
6311 prog = where2get;
6312 }
6313
6314 (void)strncpy((char *)&ctx, prog, sizeof(ctx));
6315 #if __LITTLE_ENDIAN__
6316 #if __LP64__
6317 ctx = OSSwapBigToHostInt64(ctx);
6318 #else
6319 ctx = OSSwapBigToHostInt32(ctx);
6320 #endif
6321 #endif
6322
6323 (void)job_assumes_zero(j, mach_port_set_context(mach_task_self(), ms->port, ctx));
6324 }
6325
6326 struct machservice *
6327 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
6328 {
6329 /* Don't create new MachServices for dead ports. This is primarily for
6330 * clients who use bootstrap_register2(). They can pass in a send right, but
6331 * then that port can immediately go dead. Hilarity ensues.
6332 *
6333 * <rdar://problem/10898014>
6334 */
6335 if (*serviceport == MACH_PORT_DEAD) {
6336 return NULL;
6337 }
6338
6339 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
6340 if (!job_assumes(j, ms != NULL)) {
6341 return NULL;
6342 }
6343
6344 strcpy((char *)ms->name, name);
6345 ms->job = j;
6346 ms->gen_num = 1;
6347 ms->per_pid = pid_local;
6348
6349 if (likely(*serviceport == MACH_PORT_NULL)) {
6350 if (job_assumes_zero(j, launchd_mport_create_recv(&ms->port)) != KERN_SUCCESS) {
6351 goto out_bad;
6352 }
6353
6354 if (job_assumes_zero(j, launchd_mport_make_send(ms->port)) != KERN_SUCCESS) {
6355 goto out_bad2;
6356 }
6357 *serviceport = ms->port;
6358 ms->recv = true;
6359 } else {
6360 ms->port = *serviceport;
6361 ms->isActive = true;
6362 }
6363
6364 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6365
6366 jobmgr_t where2put = j->mgr;
6367 // XPC domains are separate from Mach bootstraps.
6368 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6369 if (launchd_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6370 where2put = root_jobmgr;
6371 }
6372 }
6373
6374 /* Don't allow MachServices added by multiple-instance jobs to be looked up
6375 * by others. We could just do this with a simple bit, but then we'd have to
6376 * uniquify the names ourselves to avoid collisions. This is just easier.
6377 */
6378 if (!j->dedicated_instance) {
6379 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6380 }
6381 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6382
6383 if (ms->recv) {
6384 machservice_stamp_port(j, ms);
6385 }
6386
6387 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
6388
6389 return ms;
6390 out_bad2:
6391 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6392 out_bad:
6393 free(ms);
6394 return NULL;
6395 }
6396
6397 struct machservice *
6398 machservice_new_alias(job_t j, struct machservice *orig)
6399 {
6400 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
6401 if (job_assumes(j, ms != NULL)) {
6402 strcpy((char *)ms->name, orig->name);
6403 ms->alias = orig;
6404 ms->job = j;
6405
6406 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6407 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6408 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
6409 }
6410
6411 return ms;
6412 }
6413
6414 bootstrap_status_t
6415 machservice_status(struct machservice *ms)
6416 {
6417 ms = ms->alias ? ms->alias : ms;
6418 if (ms->isActive) {
6419 return BOOTSTRAP_STATUS_ACTIVE;
6420 } else if (ms->job->ondemand) {
6421 return BOOTSTRAP_STATUS_ON_DEMAND;
6422 } else {
6423 return BOOTSTRAP_STATUS_INACTIVE;
6424 }
6425 }
6426
6427 void
6428 job_setup_exception_port(job_t j, task_t target_task)
6429 {
6430 struct machservice *ms;
6431 thread_state_flavor_t f = 0;
6432 mach_port_t exc_port = the_exception_server;
6433
6434 if (unlikely(j->alt_exc_handler)) {
6435 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
6436 if (likely(ms)) {
6437 exc_port = machservice_port(ms);
6438 } else {
6439 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
6440 }
6441 } else if (unlikely(j->internal_exc_handler)) {
6442 exc_port = runtime_get_kernel_port();
6443 } else if (unlikely(!exc_port)) {
6444 return;
6445 }
6446
6447 #if defined (__ppc__) || defined(__ppc64__)
6448 f = PPC_THREAD_STATE64;
6449 #elif defined(__i386__) || defined(__x86_64__)
6450 f = x86_THREAD_STATE;
6451 #elif defined(__arm__)
6452 f = ARM_THREAD_STATE;
6453 #else
6454 #error "unknown architecture"
6455 #endif
6456
6457 if (likely(target_task)) {
6458 kern_return_t kr = task_set_exception_ports(target_task, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f);
6459 if (kr) {
6460 if (kr != MACH_SEND_INVALID_DEST) {
6461 (void)job_assumes_zero(j, kr);
6462 } else {
6463 job_log(j, LOG_WARNING, "Task died before exception port could be set.");
6464 }
6465 }
6466 } else if (pid1_magic && the_exception_server) {
6467 mach_port_t mhp = mach_host_self();
6468 (void)job_assumes_zero(j, host_set_exception_ports(mhp, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f));
6469 (void)job_assumes_zero(j, launchd_mport_deallocate(mhp));
6470 }
6471 }
6472
6473 void
6474 job_set_exception_port(job_t j, mach_port_t port)
6475 {
6476 if (unlikely(!the_exception_server)) {
6477 the_exception_server = port;
6478 job_setup_exception_port(j, 0);
6479 } else {
6480 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6481 }
6482 }
6483
6484 void
6485 machservice_setup_options(launch_data_t obj, const char *key, void *context)
6486 {
6487 struct machservice *ms = context;
6488 mach_port_t mhp = mach_host_self();
6489 int which_port;
6490 bool b;
6491
6492 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
6493 return;
6494 }
6495
6496 switch (launch_data_get_type(obj)) {
6497 case LAUNCH_DATA_INTEGER:
6498 which_port = (int)launch_data_get_integer(obj); // XXX we should bound check this...
6499 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
6500 switch (which_port) {
6501 case TASK_KERNEL_PORT:
6502 case TASK_HOST_PORT:
6503 case TASK_NAME_PORT:
6504 case TASK_BOOTSTRAP_PORT:
6505 /* I find it a little odd that zero isn't reserved in the header.
6506 * Normally Mach is fairly good about this convention...
6507 */
6508 case 0:
6509 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6510 break;
6511 default:
6512 ms->special_port_num = which_port;
6513 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6514 break;
6515 }
6516 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
6517 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
6518 (void)job_assumes_zero(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)));
6519 } else {
6520 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6521 }
6522 }
6523 case LAUNCH_DATA_BOOL:
6524 b = launch_data_get_bool(obj);
6525 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6526 ms->debug_on_close = b;
6527 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
6528 ms->reset = b;
6529 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6530 ms->hide = b;
6531 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6532 job_set_exception_port(ms->job, ms->port);
6533 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6534 ms->kUNCServer = b;
6535 (void)job_assumes_zero(ms->job, host_set_UNDServer(mhp, ms->port));
6536 }
6537 break;
6538 case LAUNCH_DATA_STRING:
6539 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6540 const char *option = launch_data_get_string(obj);
6541 if (strcasecmp(option, "One") == 0) {
6542 ms->drain_one_on_crash = true;
6543 } else if (strcasecmp(option, "All") == 0) {
6544 ms->drain_all_on_crash = true;
6545 }
6546 }
6547 break;
6548 case LAUNCH_DATA_DICTIONARY:
6549 if (launch_data_dict_get_count(obj) == 0) {
6550 job_set_exception_port(ms->job, ms->port);
6551 }
6552 break;
6553 default:
6554 break;
6555 }
6556
6557 (void)job_assumes_zero(ms->job, launchd_mport_deallocate(mhp));
6558 }
6559
6560 void
6561 machservice_setup(launch_data_t obj, const char *key, void *context)
6562 {
6563 job_t j = context;
6564 struct machservice *ms;
6565 mach_port_t p = MACH_PORT_NULL;
6566
6567 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6568 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6569 return;
6570 }
6571
6572 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6573 return;
6574 }
6575
6576 ms->isActive = false;
6577 ms->upfront = true;
6578
6579 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6580 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6581 }
6582
6583 kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
6584 (void)job_assumes_zero(j, kr);
6585 }
6586
6587 jobmgr_t
6588 jobmgr_do_garbage_collection(jobmgr_t jm)
6589 {
6590 jobmgr_t jmi = NULL, jmn = NULL;
6591 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6592 jobmgr_do_garbage_collection(jmi);
6593 }
6594
6595 if (!jm->shutting_down) {
6596 return jm;
6597 }
6598
6599 if (SLIST_EMPTY(&jm->submgrs)) {
6600 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6601 } else {
6602 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6603 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6604 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6605 }
6606 }
6607
6608 size_t actives = 0;
6609 job_t ji = NULL, jn = NULL;
6610 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6611 if (ji->anonymous) {
6612 continue;
6613 }
6614
6615 // Let the shutdown monitor be up until the very end.
6616 if (ji->shutdown_monitor) {
6617 continue;
6618 }
6619
6620 /* On our first pass through, open a transaction for all the jobs that
6621 * need to be dirty at shutdown. We'll close these transactions once the
6622 * jobs that do not need to be dirty at shutdown have all exited.
6623 */
6624 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6625 job_open_shutdown_transaction(ji);
6626 }
6627
6628 const char *active = job_active(ji);
6629 if (!active) {
6630 job_remove(ji);
6631 } else {
6632 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6633 job_stop(ji);
6634
6635 if (!ji->dirty_at_shutdown) {
6636 actives++;
6637 }
6638
6639 if (ji->clean_kill) {
6640 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6641 } else {
6642 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6643 }
6644 }
6645 }
6646
6647 jm->shutdown_jobs_dirtied = true;
6648 if (actives == 0) {
6649 if (!jm->shutdown_jobs_cleaned) {
6650 /* Once all normal jobs have exited, we clean the dirty-at-shutdown
6651 * jobs and make them into normal jobs so that the above loop will
6652 * handle them appropriately.
6653 */
6654 LIST_FOREACH(ji, &jm->jobs, sle) {
6655 if (ji->anonymous) {
6656 continue;
6657 }
6658
6659 if (!job_active(ji)) {
6660 continue;
6661 }
6662
6663 if (ji->shutdown_monitor) {
6664 continue;
6665 }
6666
6667 job_close_shutdown_transaction(ji);
6668 actives++;
6669 }
6670
6671 jm->shutdown_jobs_cleaned = true;
6672 }
6673
6674 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6675 /* We may be in a situation where the shutdown monitor is all that's
6676 * left, in which case we want to stop it. Like dirty-at-shutdown
6677 * jobs, we turn it back into a normal job so that the main loop
6678 * treats it appropriately.
6679 *
6680 * See:
6681 * <rdar://problem/10756306>
6682 * <rdar://problem/11034971>
6683 * <rdar://problem/11549541>
6684 */
6685 if (jm->monitor_shutdown && _launchd_shutdown_monitor) {
6686 /* The rest of shutdown has completed, so we can kill the shutdown
6687 * monitor now like it was any other job.
6688 */
6689 _launchd_shutdown_monitor->shutdown_monitor = false;
6690
6691 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6692 job_stop(_launchd_shutdown_monitor);
6693 _launchd_shutdown_monitor = NULL;
6694 } else {
6695 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6696 jobmgr_remove(jm);
6697 return NULL;
6698 }
6699 }
6700 }
6701
6702 return jm;
6703 }
6704
6705 void
6706 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6707 {
6708 /* I maintain that stray processes should be at the mercy of launchd during
6709 * shutdown, but nevertheless, things like diskimages-helper can stick
6710 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
6711 * to all the strays and don't wait for them to exit before moving on.
6712 *
6713 * See rdar://problem/6562592
6714 */
6715 size_t i = 0;
6716 for (i = 0; i < np; i++) {
6717 if (p[i] != 0) {
6718 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6719 (void)jobmgr_assumes_zero_p(jm, kill2(p[i], SIGTERM));
6720 }
6721 }
6722 }
6723
6724 void
6725 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6726 {
6727 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6728 pid_t *pids = NULL;
6729 int i = 0, kp_cnt = 0;
6730
6731 if (likely(jm->parentmgr || !pid1_magic)) {
6732 return;
6733 }
6734
6735 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6736 return;
6737 }
6738
6739 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6740
6741 if (jobmgr_assumes_zero_p(jm, (kp_cnt = proc_listallpids(pids, len))) == -1) {
6742 goto out;
6743 }
6744
6745 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6746 for (i = 0; i < kp_cnt; i++) {
6747 struct proc_bsdshortinfo proc;
6748 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6749 if (errno != ESRCH) {
6750 (void)jobmgr_assumes_zero(jm, errno);
6751 }
6752
6753 kp_skipped++;
6754 continue;
6755 }
6756
6757 pid_t p_i = pids[i];
6758 pid_t pp_i = proc.pbsi_ppid;
6759 pid_t pg_i = proc.pbsi_pgid;
6760 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6761 const char *n = proc.pbsi_comm;
6762
6763 if (unlikely(p_i == 0 || p_i == 1)) {
6764 kp_skipped++;
6765 continue;
6766 }
6767
6768 if (_launchd_shutdown_monitor && pp_i == _launchd_shutdown_monitor->p) {
6769 kp_skipped++;
6770 continue;
6771 }
6772
6773 // We might have some jobs hanging around that we've decided to shut down in spite of.
6774 job_t j = jobmgr_find_by_pid(jm, p_i, false);
6775 if (!j || (j && j->anonymous)) {
6776 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6777
6778 int status = 0;
6779 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6780 if (jobmgr_assumes_zero(jm, waitpid(p_i, &status, WNOHANG)) == 0) {
6781 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6782 }
6783 kp_skipped++;
6784 } else {
6785 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6786 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6787 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6788 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6789 * their userspace emissaries go away, before the call to reboot(2).
6790 */
6791 if (leader && leader->ignore_pg_at_shutdown) {
6792 kp_skipped++;
6793 } else {
6794 ps[i] = p_i;
6795 }
6796 }
6797 } else {
6798 kp_skipped++;
6799 }
6800 }
6801
6802 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6803 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6804 }
6805
6806 free(ps);
6807 out:
6808 free(pids);
6809 }
6810
6811 jobmgr_t
6812 jobmgr_parent(jobmgr_t jm)
6813 {
6814 return jm->parentmgr;
6815 }
6816
6817 void
6818 job_uncork_fork(job_t j)
6819 {
6820 pid_t c = j->p;
6821
6822 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6823 /* this unblocks the child and avoids a race
6824 * between the above fork() and the kevent_mod() */
6825 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6826 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
6827 j->fork_fd = 0;
6828 }
6829
6830 jobmgr_t
6831 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6832 {
6833 job_t bootstrapper = NULL;
6834 jobmgr_t jmr;
6835
6836 __OS_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6837
6838 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6839 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6840 return NULL;
6841 }
6842
6843 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6844
6845 if (!jobmgr_assumes(jm, jmr != NULL)) {
6846 return NULL;
6847 }
6848
6849 if (jm == NULL) {
6850 root_jobmgr = jmr;
6851 }
6852
6853 jmr->kqjobmgr_callback = jobmgr_callback;
6854 strcpy(jmr->name_init, name ? name : "Under construction");
6855
6856 jmr->req_port = requestorport;
6857
6858 if ((jmr->parentmgr = jm)) {
6859 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6860 }
6861
6862 if (jm && jobmgr_assumes_zero(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME)) != KERN_SUCCESS) {
6863 goto out_bad;
6864 }
6865
6866 if (transfer_port != MACH_PORT_NULL) {
6867 (void)jobmgr_assumes(jmr, jm != NULL);
6868 jmr->jm_port = transfer_port;
6869 } else if (!jm && !pid1_magic) {
6870 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6871 name_t service_buf;
6872
6873 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6874
6875 if (jobmgr_assumes_zero(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port)) != 0) {
6876 goto out_bad;
6877 }
6878
6879 if (trusted_fd) {
6880 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6881
6882 if ((dfd = dup(lfd)) >= 0) {
6883 (void)jobmgr_assumes_zero_p(jmr, runtime_close(dfd));
6884 (void)jobmgr_assumes_zero_p(jmr, runtime_close(lfd));
6885 }
6886
6887 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6888 }
6889
6890 // cut off the Libc cache, we don't want to deadlock against ourself
6891 inherited_bootstrap_port = bootstrap_port;
6892 bootstrap_port = MACH_PORT_NULL;
6893 os_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME));
6894
6895 // We set this explicitly as we start each child
6896 os_assert_zero(launchd_set_bport(MACH_PORT_NULL));
6897 } else if (jobmgr_assumes_zero(jmr, launchd_mport_create_recv(&jmr->jm_port)) != KERN_SUCCESS) {
6898 goto out_bad;
6899 }
6900
6901 if (!name) {
6902 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6903 }
6904
6905 if (!jm) {
6906 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6907 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6908 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6909 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGINFO, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6910 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr));
6911 }
6912
6913 if (name && !skip_init) {
6914 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6915 }
6916
6917 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6918 if (jobmgr_assumes_zero(jmr, runtime_add_mport(jmr->jm_port, job_server)) != KERN_SUCCESS) {
6919 goto out_bad;
6920 }
6921 }
6922
6923 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6924
6925 if (bootstrapper) {
6926 bootstrapper->asport = asport;
6927
6928 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6929 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6930 } else {
6931 jmr->req_asport = asport;
6932 }
6933
6934 if (asport != MACH_PORT_NULL) {
6935 (void)jobmgr_assumes_zero(jmr, launchd_mport_copy_send(asport));
6936 }
6937
6938 if (jmr->parentmgr) {
6939 runtime_add_weak_ref();
6940 }
6941
6942 return jmr;
6943
6944 out_bad:
6945 if (jmr) {
6946 jobmgr_remove(jmr);
6947 if (jm == NULL) {
6948 root_jobmgr = NULL;
6949 }
6950 }
6951 return NULL;
6952 }
6953
6954 jobmgr_t
6955 jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6956 {
6957 jobmgr_t new = NULL;
6958
6959 /* These job managers are basically singletons, so we use the root Mach
6960 * bootstrap port as their requestor ports so they'll never go away.
6961 */
6962 mach_port_t req_port = root_jobmgr->jm_port;
6963 if (jobmgr_assumes_zero(jm, launchd_mport_make_send(req_port)) == KERN_SUCCESS) {
6964 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6965 if (new) {
6966 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6967 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6968 new->xpc_singleton = true;
6969 }
6970 }
6971
6972 return new;
6973 }
6974
6975 jobmgr_t
6976 jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6977 {
6978 jobmgr_t jmi = NULL;
6979 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6980 if (jmi->req_euid == uid) {
6981 return jmi;
6982 }
6983 }
6984
6985 name_t name;
6986 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6987 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6988 if (jobmgr_assumes(jm, jmi != NULL)) {
6989 /* We need to create a per-user launchd for this UID if there isn't one
6990 * already so we can grab the bootstrap port.
6991 */
6992 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6993 if (jobmgr_assumes(jmi, puj != NULL)) {
6994 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(puj->asport));
6995 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(jmi->req_bsport));
6996 jmi->shortdesc = "per-user";
6997 jmi->req_asport = puj->asport;
6998 jmi->req_asid = puj->asid;
6999 jmi->req_euid = uid;
7000 jmi->req_egid = -1;
7001
7002 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
7003 } else {
7004 jobmgr_remove(jmi);
7005 }
7006 }
7007
7008 return jmi;
7009 }
7010
7011 jobmgr_t
7012 jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
7013 {
7014 jobmgr_t jmi = NULL;
7015 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
7016 if (jmi->req_asid == asid) {
7017 return jmi;
7018 }
7019 }
7020
7021 name_t name;
7022 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
7023 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
7024 if (jobmgr_assumes(jm, jmi != NULL)) {
7025 (void)jobmgr_assumes_zero(jmi, launchd_mport_make_send(root_jobmgr->jm_port));
7026 jmi->shortdesc = "per-session";
7027 jmi->req_bsport = root_jobmgr->jm_port;
7028 (void)jobmgr_assumes_zero(jmi, audit_session_port(asid, &jmi->req_asport));
7029 jmi->req_asid = asid;
7030 jmi->req_euid = -1;
7031 jmi->req_egid = -1;
7032
7033 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
7034 } else {
7035 jobmgr_remove(jmi);
7036 }
7037
7038 return jmi;
7039 }
7040
7041 job_t
7042 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
7043 {
7044 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
7045 char thelabel[1000];
7046 job_t bootstrapper;
7047
7048 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
7049 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
7050
7051 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
7052 bootstrapper->is_bootstrapper = true;
7053 char buf[100];
7054
7055 // <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
7056 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
7057 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
7058 bootstrapper->weird_bootstrap = true;
7059 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
7060 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
7061 #if TARGET_OS_EMBEDDED
7062 bootstrapper->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
7063 #endif
7064 bootstrapper->is_bootstrapper = true;
7065 if (jobmgr_assumes(jm, pid1_magic)) {
7066 // Have our system bootstrapper print out to the console.
7067 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
7068 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
7069
7070 if (launchd_console) {
7071 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)fileno(launchd_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm));
7072 }
7073 }
7074 }
7075
7076 jm->session_initialized = true;
7077 return bootstrapper;
7078 }
7079
7080 jobmgr_t
7081 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
7082 {
7083 struct machservice *ms, *next_ms;
7084 jobmgr_t jmi, jmn;
7085
7086 /* Mach ports, unlike Unix descriptors, are reference counted. In other
7087 * words, when some program hands us a second or subsequent send right to a
7088 * port we already have open, the Mach kernel gives us the same port number
7089 * back and increments an reference count associated with the port. This
7090 * This forces us, when discovering that a receive right at the other end
7091 * has been deleted, to wander all of our objects to see what weird places
7092 * clients might have handed us the same send right to use.
7093 */
7094
7095 if (jm == root_jobmgr) {
7096 if (port == inherited_bootstrap_port) {
7097 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(port));
7098 inherited_bootstrap_port = MACH_PORT_NULL;
7099
7100 return jobmgr_shutdown(jm);
7101 }
7102
7103 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
7104 if (ms->port == port && !ms->recv) {
7105 machservice_delete(ms->job, ms, true);
7106 }
7107 }
7108 }
7109
7110 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7111 jobmgr_delete_anything_with_port(jmi, port);
7112 }
7113
7114 if (jm->req_port == port) {
7115 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
7116 return jobmgr_shutdown(jm);
7117 }
7118
7119 struct waiting4attach *w4ai = NULL;
7120 struct waiting4attach *w4ait = NULL;
7121 LIST_FOREACH_SAFE(w4ai, &jm->attaches, le, w4ait) {
7122 if (port == w4ai->port) {
7123 waiting4attach_delete(jm, w4ai);
7124 break;
7125 }
7126 }
7127
7128 return jm;
7129 }
7130
7131 struct machservice *
7132 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
7133 {
7134 struct machservice *ms;
7135 job_t target_j;
7136
7137 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
7138
7139 if (target_pid) {
7140 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
7141 * bootstrap in other bootstraps.
7142 */
7143
7144 // Start in the given bootstrap.
7145 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
7146 // If we fail, do a deep traversal.
7147 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
7148 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
7149 return NULL;
7150 }
7151 }
7152
7153 SLIST_FOREACH(ms, &target_j->machservices, sle) {
7154 if (ms->per_pid && strcmp(name, ms->name) == 0) {
7155 return ms;
7156 }
7157 }
7158
7159 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
7160 return NULL;
7161 }
7162
7163 jobmgr_t where2look = jm;
7164 // XPC domains are separate from Mach bootstraps.
7165 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
7166 if (launchd_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
7167 where2look = root_jobmgr;
7168 }
7169 }
7170
7171 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
7172 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
7173 return ms;
7174 }
7175 }
7176
7177 if (jm->parentmgr == NULL || !check_parent) {
7178 return NULL;
7179 }
7180
7181 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
7182 }
7183
7184 mach_port_t
7185 machservice_port(struct machservice *ms)
7186 {
7187 return ms->port;
7188 }
7189
7190 job_t
7191 machservice_job(struct machservice *ms)
7192 {
7193 return ms->job;
7194 }
7195
7196 bool
7197 machservice_hidden(struct machservice *ms)
7198 {
7199 return ms->hide;
7200 }
7201
7202 bool
7203 machservice_active(struct machservice *ms)
7204 {
7205 return ms->isActive;
7206 }
7207
7208 const char *
7209 machservice_name(struct machservice *ms)
7210 {
7211 return ms->name;
7212 }
7213
7214 void
7215 machservice_drain_port(struct machservice *ms)
7216 {
7217 bool drain_one = ms->drain_one_on_crash;
7218 bool drain_all = ms->drain_all_on_crash;
7219
7220 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
7221 return;
7222 }
7223
7224 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
7225
7226 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
7227 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
7228 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
7229 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
7230
7231 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
7232
7233 do {
7234 /* This should be a direct check on the Mach service to see if it's an exception-handling
7235 * port, and it will break things if ReportCrash or SafetyNet start advertising other
7236 * Mach services. But for now, it should be okay.
7237 */
7238 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
7239 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
7240 } else {
7241 mach_msg_options_t options = MACH_RCV_MSG |
7242 MACH_RCV_TIMEOUT ;
7243
7244 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
7245 switch (mr) {
7246 case MACH_MSG_SUCCESS:
7247 mach_msg_destroy((mach_msg_header_t *)req_hdr);
7248 break;
7249 case MACH_RCV_TIMED_OUT:
7250 break;
7251 case MACH_RCV_TOO_LARGE:
7252 launchd_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
7253 break;
7254 default:
7255 break;
7256 }
7257 }
7258 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
7259 }
7260
7261 void
7262 machservice_delete(job_t j, struct machservice *ms, bool port_died)
7263 {
7264 if (ms->alias) {
7265 /* HACK: Egregious code duplication. But dealing with aliases is a
7266 * pretty simple affair since they can't and shouldn't have any complex
7267 * behaviors associated with them.
7268 */
7269 LIST_REMOVE(ms, name_hash_sle);
7270 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
7271 free(ms);
7272 return;
7273 }
7274
7275 if (unlikely(ms->debug_on_close)) {
7276 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
7277 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
7278 }
7279
7280 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
7281 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
7282 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
7283 }
7284
7285 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
7286
7287 if (unlikely(ms->port == the_exception_server)) {
7288 the_exception_server = 0;
7289 }
7290
7291 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
7292
7293 if (ms->special_port_num) {
7294 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
7295 }
7296 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
7297
7298 if (!(j->dedicated_instance || ms->event_channel)) {
7299 LIST_REMOVE(ms, name_hash_sle);
7300 }
7301 LIST_REMOVE(ms, port_hash_sle);
7302
7303 free(ms);
7304 }
7305
7306 void
7307 machservice_request_notifications(struct machservice *ms)
7308 {
7309 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
7310
7311 ms->isActive = true;
7312
7313 if (ms->recv) {
7314 which = MACH_NOTIFY_PORT_DESTROYED;
7315 job_checkin(ms->job);
7316 }
7317
7318 (void)job_assumes_zero(ms->job, launchd_mport_notify_req(ms->port, which));
7319 }
7320
7321 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
7322 #define END_OF(x) (&(x)[NELEM(x)])
7323
7324 char **
7325 mach_cmd2argv(const char *string)
7326 {
7327 char *argv[100], args[1000];
7328 const char *cp;
7329 char *argp = args, term, **argv_ret, *co;
7330 unsigned int nargs = 0, i;
7331
7332 for (cp = string; *cp;) {
7333 while (isspace(*cp))
7334 cp++;
7335 term = (*cp == '"') ? *cp++ : '\0';
7336 if (nargs < NELEM(argv)) {
7337 argv[nargs++] = argp;
7338 }
7339 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
7340 if (*cp == '\\') {
7341 cp++;
7342 }
7343 *argp++ = *cp;
7344 if (*cp) {
7345 cp++;
7346 }
7347 }
7348 *argp++ = '\0';
7349 }
7350 argv[nargs] = NULL;
7351
7352 if (nargs == 0) {
7353 return NULL;
7354 }
7355
7356 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
7357
7358 if (!argv_ret) {
7359 (void)os_assumes_zero(errno);
7360 return NULL;
7361 }
7362
7363 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
7364
7365 for (i = 0; i < nargs; i++) {
7366 strcpy(co, argv[i]);
7367 argv_ret[i] = co;
7368 co += strlen(argv[i]) + 1;
7369 }
7370 argv_ret[i] = NULL;
7371
7372 return argv_ret;
7373 }
7374
7375 void
7376 job_checkin(job_t j)
7377 {
7378 j->checkedin = true;
7379 }
7380
7381 bool job_is_god(job_t j)
7382 {
7383 return j->embedded_god;
7384 }
7385
7386 bool
7387 job_ack_port_destruction(mach_port_t p)
7388 {
7389 struct machservice *ms;
7390 job_t j;
7391
7392 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
7393 if (ms->recv && (ms->port == p)) {
7394 break;
7395 }
7396 }
7397
7398 if (!ms) {
7399 launchd_syslog(LOG_WARNING, "Could not find MachService to match receive right: 0x%x", p);
7400 return false;
7401 }
7402
7403 j = ms->job;
7404
7405 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
7406
7407 /* Without being the exception handler, NOTE_EXIT is our only way to tell if
7408 * the job crashed, and we can't rely on NOTE_EXIT always being processed
7409 * after all the job's receive rights have been returned.
7410 *
7411 * So when we get receive rights back, check to see if the job has been
7412 * reaped yet. If not, then we add this service to a list of services to be
7413 * drained on crash if it's requested that behavior. So, for a job with N
7414 * receive rights all requesting that they be drained on crash, we can
7415 * safely handle the following sequence of events.
7416 *
7417 * ReceiveRight0Returned
7418 * ReceiveRight1Returned
7419 * ReceiveRight2Returned
7420 * NOTE_EXIT (reap, get exit status)
7421 * ReceiveRight3Returned
7422 * .
7423 * .
7424 * .
7425 * ReceiveRight(N - 1)Returned
7426 */
7427 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
7428 if (j->crashed && j->reaped) {
7429 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
7430 machservice_drain_port(ms);
7431 } else if (!(j->crashed || j->reaped)) {
7432 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
7433 }
7434 }
7435
7436 ms->isActive = false;
7437 if (ms->delete_on_destruction) {
7438 machservice_delete(j, ms, false);
7439 } else if (ms->reset) {
7440 machservice_resetport(j, ms);
7441 }
7442
7443 kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
7444 (void)job_assumes_zero(j, kr);
7445 machservice_stamp_port(j, ms);
7446 job_dispatch(j, false);
7447
7448 if (ms->recv_race_hack) {
7449 ms->recv_race_hack = false;
7450 machservice_watch(ms->job, ms);
7451 }
7452
7453 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
7454
7455 return true;
7456 }
7457
7458 void
7459 job_ack_no_senders(job_t j)
7460 {
7461 j->priv_port_has_senders = false;
7462
7463 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
7464 j->j_port = 0;
7465
7466 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
7467
7468 job_dispatch(j, false);
7469 }
7470
7471 bool
7472 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
7473 {
7474 struct semaphoreitem *si;
7475 size_t alloc_sz = sizeof(struct semaphoreitem);
7476
7477 if (what) {
7478 alloc_sz += strlen(what) + 1;
7479 }
7480
7481 if (job_assumes(j, si = calloc(1, alloc_sz)) == NULL) {
7482 return false;
7483 }
7484
7485 si->why = why;
7486
7487 if (what) {
7488 strcpy(si->what_init, what);
7489 }
7490
7491 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
7492
7493 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
7494 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7495 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7496 j->nosy = true;
7497 }
7498
7499 semaphoreitem_runtime_mod_ref(si, true);
7500
7501 return true;
7502 }
7503
7504 void
7505 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7506 {
7507 /*
7508 * External events need to be tracked.
7509 * Internal events do NOT need to be tracked.
7510 */
7511
7512 switch (si->why) {
7513 case SUCCESSFUL_EXIT:
7514 case FAILED_EXIT:
7515 case OTHER_JOB_ENABLED:
7516 case OTHER_JOB_DISABLED:
7517 case OTHER_JOB_ACTIVE:
7518 case OTHER_JOB_INACTIVE:
7519 return;
7520 default:
7521 break;
7522 }
7523
7524 if (add) {
7525 runtime_add_weak_ref();
7526 } else {
7527 runtime_del_weak_ref();
7528 }
7529 }
7530
7531 void
7532 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
7533 {
7534 semaphoreitem_runtime_mod_ref(si, false);
7535
7536 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
7537
7538 // We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
7539 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
7540 j->nosy = false;
7541 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7542 }
7543
7544 free(si);
7545 }
7546
7547 void
7548 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
7549 {
7550 struct semaphoreitem_dict_iter_context *sdic = context;
7551 semaphore_reason_t why;
7552
7553 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7554
7555 semaphoreitem_new(sdic->j, why, key);
7556 }
7557
7558 void
7559 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7560 {
7561 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7562 job_t j = context;
7563 semaphore_reason_t why;
7564
7565 switch (launch_data_get_type(obj)) {
7566 case LAUNCH_DATA_BOOL:
7567 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7568 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7569 semaphoreitem_new(j, why, NULL);
7570 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7571 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7572 semaphoreitem_new(j, why, NULL);
7573 j->start_pending = true;
7574 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7575 j->needs_kickoff = launch_data_get_bool(obj);
7576 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7577 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7578 semaphoreitem_new(j, why, NULL);
7579 j->start_pending = true;
7580 } else {
7581 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7582 }
7583 break;
7584 case LAUNCH_DATA_DICTIONARY:
7585 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7586 sdic.why_true = OTHER_JOB_ACTIVE;
7587 sdic.why_false = OTHER_JOB_INACTIVE;
7588 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7589 sdic.why_true = OTHER_JOB_ENABLED;
7590 sdic.why_false = OTHER_JOB_DISABLED;
7591 } else {
7592 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7593 break;
7594 }
7595
7596 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7597 break;
7598 default:
7599 job_log(j, LOG_ERR, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj));
7600 break;
7601 }
7602 }
7603
7604 bool
7605 externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags)
7606 {
7607 if (j->event_monitor) {
7608 job_log(j, LOG_ERR, "The event monitor job cannot use LaunchEvents or XPC Events.");
7609 return false;
7610 }
7611
7612 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7613 if (!ee) {
7614 return false;
7615 }
7616
7617 ee->event = xpc_retain(event);
7618 (void)strcpy(ee->name, evname);
7619 ee->job = j;
7620 ee->id = sys->curid;
7621 ee->sys = sys;
7622 ee->state = false;
7623 ee->wanted_state = true;
7624 sys->curid++;
7625
7626 if (flags & XPC_EVENT_FLAG_ENTITLEMENTS) {
7627 struct ldcred *ldc = runtime_get_caller_creds();
7628 if (ldc) {
7629 ee->entitlements = xpc_copy_entitlements_for_pid(ldc->pid);
7630 }
7631 }
7632
7633 if (sys == _launchd_support_system) {
7634 ee->internal = true;
7635 }
7636
7637 LIST_INSERT_HEAD(&j->events, ee, job_le);
7638 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7639
7640 job_log(j, LOG_DEBUG, "New event: %s/%s", sys->name, evname);
7641
7642 eventsystem_ping();
7643 return true;
7644 }
7645
7646 void
7647 externalevent_delete(struct externalevent *ee)
7648 {
7649 xpc_release(ee->event);
7650 if (ee->entitlements) {
7651 xpc_release(ee->entitlements);
7652 }
7653 LIST_REMOVE(ee, job_le);
7654 LIST_REMOVE(ee, sys_le);
7655
7656 free(ee);
7657
7658 eventsystem_ping();
7659 }
7660
7661 void
7662 externalevent_setup(launch_data_t obj, const char *key, void *context)
7663 {
7664 /* This method can ONLY be called on the job_import() path, as it assumes
7665 * the input is a launch_data_t.
7666 */
7667 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7668
7669 xpc_object_t xobj = ld2xpc(obj);
7670 if (xobj) {
7671 job_log(ctx->j, LOG_DEBUG, "Importing stream/event: %s/%s", ctx->sys->name, key);
7672 externalevent_new(ctx->j, ctx->sys, key, xobj, 0);
7673 xpc_release(xobj);
7674 } else {
7675 job_log(ctx->j, LOG_ERR, "Could not import event for job: %s", key);
7676 }
7677 }
7678
7679 struct externalevent *
7680 externalevent_find(const char *sysname, uint64_t id)
7681 {
7682 struct externalevent *ei = NULL;
7683
7684 struct eventsystem *es = eventsystem_find(sysname);
7685 if (es != NULL) {
7686 LIST_FOREACH(ei, &es->events, sys_le) {
7687 if (ei->id == id) {
7688 break;
7689 }
7690 }
7691 } else {
7692 launchd_syslog(LOG_ERR, "Could not find event system: %s", sysname);
7693 }
7694
7695 return ei;
7696 }
7697
7698 struct eventsystem *
7699 eventsystem_new(const char *name)
7700 {
7701 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7702 if (es != NULL) {
7703 es->curid = 1;
7704 (void)strcpy(es->name, name);
7705 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7706 } else {
7707 (void)os_assumes_zero(errno);
7708 }
7709
7710 return es;
7711 }
7712
7713 void
7714 eventsystem_delete(struct eventsystem *es)
7715 {
7716 struct externalevent *ei = NULL;
7717 while ((ei = LIST_FIRST(&es->events))) {
7718 externalevent_delete(ei);
7719 }
7720
7721 LIST_REMOVE(es, global_le);
7722
7723 free(es);
7724 }
7725
7726 void
7727 eventsystem_setup(launch_data_t obj, const char *key, void *context)
7728 {
7729 job_t j = (job_t)context;
7730 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7731 return;
7732 }
7733
7734 struct eventsystem *sys = eventsystem_find(key);
7735 if (unlikely(sys == NULL)) {
7736 sys = eventsystem_new(key);
7737 job_log(j, LOG_DEBUG, "New event system: %s", key);
7738 }
7739
7740 if (job_assumes(j, sys != NULL)) {
7741 struct externalevent_iter_ctx ctx = {
7742 .j = j,
7743 .sys = sys,
7744 };
7745
7746 job_log(j, LOG_DEBUG, "Importing events for stream: %s", key);
7747 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7748 }
7749 }
7750
7751 struct eventsystem *
7752 eventsystem_find(const char *name)
7753 {
7754 struct eventsystem *esi = NULL;
7755 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7756 if (strcmp(name, esi->name) == 0) {
7757 break;
7758 }
7759 }
7760
7761 return esi;
7762 }
7763
7764 void
7765 eventsystem_ping(void)
7766 {
7767 if (!_launchd_event_monitor) {
7768 return;
7769 }
7770
7771 if (!_launchd_event_monitor->p) {
7772 (void)job_dispatch(_launchd_event_monitor, true);
7773 } else {
7774 if (_launchd_event_monitor->event_monitor_ready2signal) {
7775 (void)job_assumes_zero_p(_launchd_event_monitor, kill(_launchd_event_monitor->p, SIGUSR1));
7776 }
7777 }
7778 }
7779
7780 void
7781 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7782 {
7783 jobmgr_t jmi, jmn;
7784 job_t ji, jn;
7785
7786
7787 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7788 jobmgr_dispatch_all_semaphores(jmi);
7789 }
7790
7791 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7792 if (!SLIST_EMPTY(&ji->semaphores)) {
7793 job_dispatch(ji, false);
7794 }
7795 }
7796 }
7797
7798 time_t
7799 cronemu(int mon, int mday, int hour, int min)
7800 {
7801 struct tm workingtm;
7802 time_t now;
7803
7804 now = time(NULL);
7805 workingtm = *localtime(&now);
7806
7807 workingtm.tm_isdst = -1;
7808 workingtm.tm_sec = 0;
7809 workingtm.tm_min++;
7810
7811 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7812 workingtm.tm_year++;
7813 workingtm.tm_mon = 0;
7814 workingtm.tm_mday = 1;
7815 workingtm.tm_hour = 0;
7816 workingtm.tm_min = 0;
7817 mktime(&workingtm);
7818 }
7819
7820 return mktime(&workingtm);
7821 }
7822
7823 time_t
7824 cronemu_wday(int wday, int hour, int min)
7825 {
7826 struct tm workingtm;
7827 time_t now;
7828
7829 now = time(NULL);
7830 workingtm = *localtime(&now);
7831
7832 workingtm.tm_isdst = -1;
7833 workingtm.tm_sec = 0;
7834 workingtm.tm_min++;
7835
7836 if (wday == 7) {
7837 wday = 0;
7838 }
7839
7840 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7841 workingtm.tm_mday++;
7842 workingtm.tm_hour = 0;
7843 workingtm.tm_min = 0;
7844 mktime(&workingtm);
7845 }
7846
7847 return mktime(&workingtm);
7848 }
7849
7850 bool
7851 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7852 {
7853 if (mon == -1) {
7854 struct tm workingtm = *wtm;
7855 int carrytest;
7856
7857 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7858 workingtm.tm_mon++;
7859 workingtm.tm_mday = 1;
7860 workingtm.tm_hour = 0;
7861 workingtm.tm_min = 0;
7862 carrytest = workingtm.tm_mon;
7863 mktime(&workingtm);
7864 if (carrytest != workingtm.tm_mon) {
7865 return false;
7866 }
7867 }
7868 *wtm = workingtm;
7869 return true;
7870 }
7871
7872 if (mon < wtm->tm_mon) {
7873 return false;
7874 }
7875
7876 if (mon > wtm->tm_mon) {
7877 wtm->tm_mon = mon;
7878 wtm->tm_mday = 1;
7879 wtm->tm_hour = 0;
7880 wtm->tm_min = 0;
7881 }
7882
7883 return cronemu_mday(wtm, mday, hour, min);
7884 }
7885
7886 bool
7887 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7888 {
7889 if (mday == -1) {
7890 struct tm workingtm = *wtm;
7891 int carrytest;
7892
7893 while (!cronemu_hour(&workingtm, hour, min)) {
7894 workingtm.tm_mday++;
7895 workingtm.tm_hour = 0;
7896 workingtm.tm_min = 0;
7897 carrytest = workingtm.tm_mday;
7898 mktime(&workingtm);
7899 if (carrytest != workingtm.tm_mday) {
7900 return false;
7901 }
7902 }
7903 *wtm = workingtm;
7904 return true;
7905 }
7906
7907 if (mday < wtm->tm_mday) {
7908 return false;
7909 }
7910
7911 if (mday > wtm->tm_mday) {
7912 wtm->tm_mday = mday;
7913 wtm->tm_hour = 0;
7914 wtm->tm_min = 0;
7915 }
7916
7917 return cronemu_hour(wtm, hour, min);
7918 }
7919
7920 bool
7921 cronemu_hour(struct tm *wtm, int hour, int min)
7922 {
7923 if (hour == -1) {
7924 struct tm workingtm = *wtm;
7925 int carrytest;
7926
7927 while (!cronemu_min(&workingtm, min)) {
7928 workingtm.tm_hour++;
7929 workingtm.tm_min = 0;
7930 carrytest = workingtm.tm_hour;
7931 mktime(&workingtm);
7932 if (carrytest != workingtm.tm_hour) {
7933 return false;
7934 }
7935 }
7936 *wtm = workingtm;
7937 return true;
7938 }
7939
7940 if (hour < wtm->tm_hour) {
7941 return false;
7942 }
7943
7944 if (hour > wtm->tm_hour) {
7945 wtm->tm_hour = hour;
7946 wtm->tm_min = 0;
7947 }
7948
7949 return cronemu_min(wtm, min);
7950 }
7951
7952 bool
7953 cronemu_min(struct tm *wtm, int min)
7954 {
7955 if (min == -1) {
7956 return true;
7957 }
7958
7959 if (min < wtm->tm_min) {
7960 return false;
7961 }
7962
7963 if (min > wtm->tm_min) {
7964 wtm->tm_min = min;
7965 }
7966
7967 return true;
7968 }
7969
7970 kern_return_t
7971 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7972 {
7973 struct ldcred *ldc = runtime_get_caller_creds();
7974 job_t js;
7975
7976 if (!j) {
7977 return BOOTSTRAP_NO_MEMORY;
7978 }
7979
7980 if (unlikely(j->deny_job_creation)) {
7981 return BOOTSTRAP_NOT_PRIVILEGED;
7982 }
7983
7984 #if HAVE_SANDBOX
7985 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7986 if (unlikely(argv == NULL)) {
7987 return BOOTSTRAP_NO_MEMORY;
7988 }
7989 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7990 free(argv);
7991 return BOOTSTRAP_NOT_PRIVILEGED;
7992 }
7993 free(argv);
7994 #endif
7995
7996 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7997
7998 if (pid1_magic) {
7999 if (ldc->euid || ldc->uid) {
8000 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
8001 return VPROC_ERR_TRY_PER_USER;
8002 }
8003 } else {
8004 if (unlikely(server_uid != getuid())) {
8005 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
8006 server_cmd, getuid(), server_uid);
8007 }
8008 server_uid = 0; // zero means "do nothing"
8009 }
8010
8011 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
8012
8013 if (unlikely(js == NULL)) {
8014 return BOOTSTRAP_NO_MEMORY;
8015 }
8016
8017 *server_portp = js->j_port;
8018 return BOOTSTRAP_SUCCESS;
8019 }
8020
8021 kern_return_t
8022 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
8023 {
8024 struct ldcred *ldc = runtime_get_caller_creds();
8025 job_t otherj;
8026
8027 if (!j) {
8028 return BOOTSTRAP_NO_MEMORY;
8029 }
8030
8031 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
8032 #if TARGET_OS_EMBEDDED
8033 if (!j->embedded_god) {
8034 return BOOTSTRAP_NOT_PRIVILEGED;
8035 }
8036 #else
8037 return BOOTSTRAP_NOT_PRIVILEGED;
8038 #endif
8039 }
8040
8041 #if HAVE_SANDBOX
8042 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8043 return BOOTSTRAP_NOT_PRIVILEGED;
8044 }
8045 #endif
8046
8047 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
8048 return BOOTSTRAP_UNKNOWN_SERVICE;
8049 }
8050
8051 #if TARGET_OS_EMBEDDED
8052 if (j->embedded_god) {
8053 if (j->username && otherj->username) {
8054 if (strcmp(j->username, otherj->username) != 0) {
8055 return BOOTSTRAP_NOT_PRIVILEGED;
8056 }
8057 } else {
8058 return BOOTSTRAP_NOT_PRIVILEGED;
8059 }
8060 }
8061 #endif
8062
8063 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
8064 bool do_block = otherj->p;
8065
8066 if (otherj->anonymous) {
8067 return BOOTSTRAP_NOT_PRIVILEGED;
8068 }
8069
8070 job_remove(otherj);
8071
8072 if (do_block) {
8073 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
8074 // this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
8075 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
8076 return MIG_NO_REPLY;
8077 } else {
8078 return 0;
8079 }
8080 } else if (otherj->p) {
8081 (void)job_assumes_zero_p(j, kill2(otherj->p, sig));
8082 }
8083
8084 return 0;
8085 }
8086
8087 kern_return_t
8088 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
8089 {
8090 struct ldcred *ldc = runtime_get_caller_creds();
8091
8092 if (!j) {
8093 return BOOTSTRAP_NO_MEMORY;
8094 }
8095
8096 if (!job_assumes(j, j->per_user)) {
8097 return BOOTSTRAP_NOT_PRIVILEGED;
8098 }
8099
8100 return launchd_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
8101 }
8102
8103 kern_return_t
8104 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
8105 {
8106 struct ldcred *ldc = runtime_get_caller_creds();
8107
8108 if (!j) {
8109 return BOOTSTRAP_NO_MEMORY;
8110 }
8111
8112 if (unlikely(ldc->euid)) {
8113 return BOOTSTRAP_NOT_PRIVILEGED;
8114 }
8115
8116 return launchd_log_drain(srp, outval, outvalCnt);
8117 }
8118
8119 kern_return_t
8120 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
8121 vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval,
8122 mach_msg_type_number_t *outvalCnt)
8123 {
8124 const char *action;
8125 launch_data_t input_obj = NULL, output_obj = NULL;
8126 size_t data_offset = 0;
8127 size_t packed_size;
8128 struct ldcred *ldc = runtime_get_caller_creds();
8129
8130 if (!j) {
8131 return BOOTSTRAP_NO_MEMORY;
8132 }
8133
8134 if (inkey && ldc->pid != j->p) {
8135 if (ldc->euid && ldc->euid != getuid()) {
8136 return BOOTSTRAP_NOT_PRIVILEGED;
8137 }
8138 }
8139
8140 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
8141 return 1;
8142 }
8143
8144 if (inkey && outkey) {
8145 action = "Swapping";
8146 } else if (inkey) {
8147 action = "Setting";
8148 } else {
8149 action = "Getting";
8150 }
8151
8152 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8153
8154 *outvalCnt = 20 * 1024 * 1024;
8155 mig_allocate(outval, *outvalCnt);
8156 if (!job_assumes(j, *outval != 0)) {
8157 return 1;
8158 }
8159
8160 /* Note to future maintainers: launch_data_unpack() does NOT return a heap
8161 * object. The data is decoded in-place. So do not call launch_data_free()
8162 * on input_obj.
8163 */
8164 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
8165 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
8166 goto out_bad;
8167 }
8168
8169 char *store = NULL;
8170 switch (outkey) {
8171 case VPROC_GSK_ENVIRONMENT:
8172 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
8173 goto out_bad;
8174 }
8175 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
8176 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8177 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
8178 goto out_bad;
8179 }
8180 launch_data_free(output_obj);
8181 break;
8182 case VPROC_GSK_ALLJOBS:
8183 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
8184 goto out_bad;
8185 }
8186 ipc_revoke_fds(output_obj);
8187 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8188 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8189 if (!job_assumes(j, packed_size != 0)) {
8190 goto out_bad;
8191 }
8192 launch_data_free(output_obj);
8193 break;
8194 case VPROC_GSK_MGR_NAME:
8195 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
8196 goto out_bad;
8197 }
8198 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8199 if (!job_assumes(j, packed_size != 0)) {
8200 goto out_bad;
8201 }
8202
8203 launch_data_free(output_obj);
8204 break;
8205 case VPROC_GSK_JOB_OVERRIDES_DB:
8206 store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB, "overrides.plist");
8207 if (!store || !job_assumes(j, (output_obj = launch_data_new_string(store)) != NULL)) {
8208 free(store);
8209 goto out_bad;
8210 }
8211
8212 free(store);
8213 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8214 if (!job_assumes(j, packed_size != 0)) {
8215 goto out_bad;
8216 }
8217
8218 launch_data_free(output_obj);
8219 break;
8220 case VPROC_GSK_ZERO:
8221 mig_deallocate(*outval, *outvalCnt);
8222 *outval = 0;
8223 *outvalCnt = 0;
8224 break;
8225 default:
8226 goto out_bad;
8227 }
8228
8229 mig_deallocate(inval, invalCnt);
8230 return 0;
8231
8232 out_bad:
8233 mig_deallocate(inval, invalCnt);
8234 if (*outval) {
8235 mig_deallocate(*outval, *outvalCnt);
8236 }
8237 if (output_obj) {
8238 launch_data_free(output_obj);
8239 }
8240
8241 return 1;
8242 }
8243
8244 kern_return_t
8245 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
8246 {
8247 const char *action;
8248 kern_return_t kr = 0;
8249 struct ldcred *ldc = runtime_get_caller_creds();
8250 int oldmask;
8251
8252 if (!j) {
8253 return BOOTSTRAP_NO_MEMORY;
8254 }
8255
8256 if (inkey && ldc->pid != j->p) {
8257 if (ldc->euid && ldc->euid != getuid()) {
8258 return BOOTSTRAP_NOT_PRIVILEGED;
8259 }
8260 }
8261
8262 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
8263 return 1;
8264 }
8265
8266 if (inkey && outkey) {
8267 action = "Swapping";
8268 } else if (inkey) {
8269 action = "Setting";
8270 } else {
8271 action = "Getting";
8272 }
8273
8274 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8275
8276 switch (outkey) {
8277 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8278 *outval = j->abandon_pg;
8279 break;
8280 case VPROC_GSK_LAST_EXIT_STATUS:
8281 *outval = j->last_exit_status;
8282 break;
8283 case VPROC_GSK_MGR_UID:
8284 *outval = getuid();
8285 break;
8286 case VPROC_GSK_MGR_PID:
8287 *outval = getpid();
8288 break;
8289 case VPROC_GSK_IS_MANAGED:
8290 *outval = j->anonymous ? 0 : 1;
8291 break;
8292 case VPROC_GSK_BASIC_KEEPALIVE:
8293 *outval = !j->ondemand;
8294 break;
8295 case VPROC_GSK_START_INTERVAL:
8296 *outval = j->start_interval;
8297 break;
8298 case VPROC_GSK_IDLE_TIMEOUT:
8299 *outval = j->timeout;
8300 break;
8301 case VPROC_GSK_EXIT_TIMEOUT:
8302 *outval = j->exit_timeout;
8303 break;
8304 case VPROC_GSK_GLOBAL_LOG_MASK:
8305 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
8306 *outval = oldmask;
8307 runtime_setlogmask(oldmask);
8308 break;
8309 case VPROC_GSK_GLOBAL_UMASK:
8310 oldmask = umask(0);
8311 *outval = oldmask;
8312 umask(oldmask);
8313 break;
8314 case VPROC_GSK_TRANSACTIONS_ENABLED:
8315 job_log(j, LOG_DEBUG, "Reading EnableTransactions value.");
8316 *outval = j->enable_transactions;
8317 break;
8318 case VPROC_GSK_WAITFORDEBUGGER:
8319 *outval = j->wait4debugger;
8320 break;
8321 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
8322 *outval = j->embedded_god;
8323 break;
8324 case VPROC_GSK_ZERO:
8325 *outval = 0;
8326 break;
8327 default:
8328 kr = 1;
8329 break;
8330 }
8331
8332 switch (inkey) {
8333 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8334 j->abandon_pg = (bool)inval;
8335 break;
8336 case VPROC_GSK_GLOBAL_ON_DEMAND:
8337 job_log(j, LOG_DEBUG, "Job has set global on-demand mode to: %s", inval ? "true" : "false");
8338 kr = job_set_global_on_demand(j, inval);
8339 break;
8340 case VPROC_GSK_BASIC_KEEPALIVE:
8341 j->ondemand = !inval;
8342 break;
8343 case VPROC_GSK_START_INTERVAL:
8344 if (inval > UINT32_MAX || inval < 0) {
8345 kr = 1;
8346 } else if (inval) {
8347 if (j->start_interval == 0) {
8348 runtime_add_weak_ref();
8349 }
8350 j->start_interval = (typeof(j->start_interval)) inval;
8351 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
8352 } else if (j->start_interval) {
8353 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
8354 if (j->start_interval != 0) {
8355 runtime_del_weak_ref();
8356 }
8357 j->start_interval = 0;
8358 }
8359 break;
8360 case VPROC_GSK_IDLE_TIMEOUT:
8361 if (inval < 0 || inval > UINT32_MAX) {
8362 kr = 1;
8363 } else {
8364 j->timeout = (typeof(j->timeout)) inval;
8365 }
8366 break;
8367 case VPROC_GSK_EXIT_TIMEOUT:
8368 if (inval < 0 || inval > UINT32_MAX) {
8369 kr = 1;
8370 } else {
8371 j->exit_timeout = (typeof(j->exit_timeout)) inval;
8372 }
8373 break;
8374 case VPROC_GSK_GLOBAL_LOG_MASK:
8375 if (inval < 0 || inval > UINT32_MAX) {
8376 kr = 1;
8377 } else {
8378 runtime_setlogmask((int) inval);
8379 }
8380 break;
8381 case VPROC_GSK_GLOBAL_UMASK:
8382 __OS_COMPILETIME_ASSERT__(sizeof (mode_t) == 2);
8383 if (inval < 0 || inval > UINT16_MAX) {
8384 kr = 1;
8385 } else {
8386 #if HAVE_SANDBOX
8387 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8388 kr = 1;
8389 } else {
8390 umask((mode_t) inval);
8391 }
8392 #endif
8393 }
8394 break;
8395 case VPROC_GSK_TRANSACTIONS_ENABLED:
8396 /* No-op. */
8397 break;
8398 case VPROC_GSK_WEIRD_BOOTSTRAP:
8399 if (job_assumes(j, j->weird_bootstrap)) {
8400 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
8401
8402 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_job_subsystem);
8403
8404 if (job_mig_job_subsystem.maxsize > mxmsgsz) {
8405 mxmsgsz = job_mig_job_subsystem.maxsize;
8406 }
8407
8408 (void)job_assumes_zero(j, runtime_add_mport(j->mgr->jm_port, job_server));
8409 j->weird_bootstrap = false;
8410 }
8411 break;
8412 case VPROC_GSK_WAITFORDEBUGGER:
8413 j->wait4debugger_oneshot = inval;
8414 break;
8415 case VPROC_GSK_PERUSER_SUSPEND:
8416 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
8417 mach_port_t junk = MACH_PORT_NULL;
8418 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8419 if (job_assumes(j, jpu != NULL)) {
8420 struct suspended_peruser *spi = NULL;
8421 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8422 if ((int64_t)(spi->j->mach_uid) == inval) {
8423 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8424 break;
8425 }
8426 }
8427
8428 if (spi == NULL) {
8429 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8430 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
8431 if (job_assumes(j, spi != NULL)) {
8432 /* Stop listening for events.
8433 *
8434 * See <rdar://problem/9014146>.
8435 */
8436 if (jpu->peruser_suspend_count == 0) {
8437 job_ignore(jpu);
8438 }
8439
8440 spi->j = jpu;
8441 spi->j->peruser_suspend_count++;
8442 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8443 job_stop(spi->j);
8444 *outval = jpu->p;
8445 } else {
8446 kr = BOOTSTRAP_NO_MEMORY;
8447 }
8448 }
8449 }
8450 } else {
8451 kr = 1;
8452 }
8453 break;
8454 case VPROC_GSK_PERUSER_RESUME:
8455 if (job_assumes(j, pid1_magic == true)) {
8456 struct suspended_peruser *spi = NULL, *spt = NULL;
8457 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8458 if ((int64_t)(spi->j->mach_uid) == inval) {
8459 spi->j->peruser_suspend_count--;
8460 LIST_REMOVE(spi, sle);
8461 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8462 break;
8463 }
8464 }
8465
8466 if (!job_assumes(j, spi != NULL)) {
8467 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8468 kr = BOOTSTRAP_NOT_PRIVILEGED;
8469 } else if (spi->j->peruser_suspend_count == 0) {
8470 job_watch(spi->j);
8471 job_dispatch(spi->j, false);
8472 free(spi);
8473 }
8474 } else {
8475 kr = 1;
8476 }
8477 break;
8478 case VPROC_GSK_ZERO:
8479 break;
8480 default:
8481 kr = 1;
8482 break;
8483 }
8484
8485 return kr;
8486 }
8487
8488 kern_return_t
8489 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
8490 {
8491 if (!j) {
8492 return BOOTSTRAP_NO_MEMORY;
8493 }
8494
8495 job_log(j, LOG_DEBUG, "Post fork ping.");
8496
8497 struct machservice *ms;
8498 job_setup_exception_port(j, child_task);
8499 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8500 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8501 // The TASK_ACCESS_PORT funny business is to workaround 5325399.
8502 continue;
8503 }
8504
8505 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8506 if (errno) {
8507 if (errno == MACH_SEND_INVALID_DEST) {
8508 job_log(j, LOG_WARNING, "Task died before special ports could be set.");
8509 break;
8510 }
8511
8512 int desired_log_level = LOG_ERR;
8513 if (j->anonymous) {
8514 // 5338127
8515
8516 desired_log_level = LOG_WARNING;
8517
8518 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8519 desired_log_level = LOG_DEBUG;
8520 }
8521 }
8522
8523 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8524 }
8525 }
8526
8527 /* MIG will not zero-initialize this pointer, so we must always do so.
8528 *
8529 * <rdar://problem/8562593>.
8530 */
8531 *asport = MACH_PORT_NULL;
8532 #if !TARGET_OS_EMBEDDED
8533 if (!j->anonymous) {
8534 /* XPC services will spawn into the root security session by default.
8535 * xpcproxy will switch them away if needed.
8536 */
8537 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8538 job_log(j, LOG_DEBUG, "Returning session port: 0x%x", j->asport);
8539 *asport = j->asport;
8540 }
8541 }
8542 #endif
8543 (void)job_assumes_zero(j, launchd_mport_deallocate(child_task));
8544
8545 return 0;
8546 }
8547
8548 kern_return_t
8549 job_mig_get_listener_port_rights(job_t j, mach_port_array_t *sports, mach_msg_type_number_t *sports_cnt)
8550 {
8551 if (!j) {
8552 return BOOTSTRAP_NO_MEMORY;
8553 }
8554
8555 size_t cnt = 0;
8556 struct machservice *msi = NULL;
8557 SLIST_FOREACH(msi, &j->machservices, sle) {
8558 if (msi->upfront && job_assumes(j, msi->recv)) {
8559 cnt++;
8560 }
8561 }
8562
8563 if (cnt == 0) {
8564 return BOOTSTRAP_UNKNOWN_SERVICE;
8565 }
8566
8567 mach_port_array_t sports2 = NULL;
8568 mig_allocate((vm_address_t *)&sports2, cnt * sizeof(sports2[0]));
8569 if (!sports2) {
8570 return BOOTSTRAP_NO_MEMORY;
8571 }
8572
8573 size_t i = 0;
8574 SLIST_FOREACH(msi, &j->machservices, sle) {
8575 if (msi->upfront && msi->recv) {
8576 sports2[i] = msi->port;
8577 i++;
8578 }
8579 }
8580
8581 *sports = sports2;
8582 *sports_cnt = cnt;
8583
8584 return KERN_SUCCESS;
8585 }
8586
8587 kern_return_t
8588 job_mig_register_gui_session(job_t j, mach_port_t asport)
8589 {
8590 if (!j->per_user) {
8591 return BOOTSTRAP_NOT_PRIVILEGED;
8592 }
8593
8594 jobmgr_t jm = jobmgr_find_xpc_per_user_domain(root_jobmgr, j->mach_uid);
8595 if (!jm) {
8596 return BOOTSTRAP_UNKNOWN_SERVICE;
8597 }
8598
8599 if (jm->req_gui_asport) {
8600 // This job manager persists, so we need to allow the per-user launchd
8601 // to update the GUI session as it comes and goes.
8602 jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_gui_asport));
8603 }
8604
8605 jm->req_gui_asport = asport;
8606 return KERN_SUCCESS;
8607 }
8608
8609 kern_return_t
8610 job_mig_reboot2(job_t j, uint64_t flags)
8611 {
8612 char who_started_the_reboot[2048] = "";
8613 struct proc_bsdshortinfo proc;
8614 struct ldcred *ldc = runtime_get_caller_creds();
8615 pid_t pid_to_log;
8616
8617 if (!j) {
8618 return BOOTSTRAP_NO_MEMORY;
8619 }
8620
8621 if (unlikely(!pid1_magic)) {
8622 return BOOTSTRAP_NOT_PRIVILEGED;
8623 }
8624
8625 #if !TARGET_OS_EMBEDDED
8626 if (unlikely(ldc->euid)) {
8627 #else
8628 if (unlikely(ldc->euid) && !j->embedded_god) {
8629 #endif
8630 return BOOTSTRAP_NOT_PRIVILEGED;
8631 }
8632
8633 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8634 size_t who_offset;
8635 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8636 if (errno != ESRCH) {
8637 (void)job_assumes_zero(j, errno);
8638 }
8639 return 1;
8640 }
8641
8642 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8643 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8644 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8645 break;
8646 }
8647
8648 who_offset = strlen(who_started_the_reboot);
8649 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8650 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8651 }
8652
8653 root_jobmgr->reboot_flags = (int)flags;
8654 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8655 launchd_shutdown();
8656
8657 return 0;
8658 }
8659
8660 kern_return_t
8661 job_mig_getsocket(job_t j, name_t spr)
8662 {
8663 if (!j) {
8664 return BOOTSTRAP_NO_MEMORY;
8665 }
8666
8667 if (j->deny_job_creation) {
8668 return BOOTSTRAP_NOT_PRIVILEGED;
8669 }
8670
8671 #if HAVE_SANDBOX
8672 struct ldcred *ldc = runtime_get_caller_creds();
8673 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8674 return BOOTSTRAP_NOT_PRIVILEGED;
8675 }
8676 #endif
8677
8678 ipc_server_init();
8679
8680 if (unlikely(!sockpath)) {
8681 return BOOTSTRAP_NO_MEMORY;
8682 }
8683
8684 strncpy(spr, sockpath, sizeof(name_t));
8685
8686 return BOOTSTRAP_SUCCESS;
8687 }
8688
8689 kern_return_t
8690 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8691 {
8692 if (!j) {
8693 return BOOTSTRAP_NO_MEMORY;
8694 }
8695
8696 if ((errno = err)) {
8697 job_log_error(j, pri, "%s", msg);
8698 } else {
8699 job_log(j, pri, "%s", msg);
8700 }
8701
8702 return 0;
8703 }
8704
8705 void
8706 job_setup_per_user_directory(job_t j, uid_t uid, const char *path)
8707 {
8708 struct stat sb;
8709
8710 bool created = false;
8711 int r = stat(path, &sb);
8712 if ((r == -1 && errno == ENOENT) || (r == 0 && !S_ISDIR(sb.st_mode))) {
8713 if (r == 0) {
8714 job_log(j, LOG_NOTICE, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path);
8715
8716 char old[PATH_MAX];
8717 snprintf(old, sizeof(old), "%s.movedaside", path);
8718 (void)job_assumes_zero_p(j, rename(path, old));
8719 }
8720
8721 (void)job_assumes_zero_p(j, mkdir(path, S_IRWXU));
8722 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8723 created = true;
8724 }
8725
8726 if (!created) {
8727 if (sb.st_uid != uid) {
8728 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper user ownership. Repairing: %s", path);
8729 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8730 }
8731 if (sb.st_gid != 0) {
8732 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper group ownership. Repairing: %s", path);
8733 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8734 }
8735 if (sb.st_mode != (S_IRWXU | S_IFDIR)) {
8736 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper mode. Repairing: %s", path);
8737 (void)job_assumes_zero_p(j, chmod(path, S_IRWXU));
8738 }
8739 }
8740 }
8741
8742 void
8743 job_setup_per_user_directories(job_t j, uid_t uid, const char *label)
8744 {
8745 char path[PATH_MAX];
8746
8747 (void)snprintf(path, sizeof(path), LAUNCHD_DB_PREFIX "/%s", label);
8748 job_setup_per_user_directory(j, uid, path);
8749
8750 (void)snprintf(path, sizeof(path), LAUNCHD_LOG_PREFIX "/%s", label);
8751 job_setup_per_user_directory(j, uid, path);
8752 }
8753
8754 job_t
8755 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8756 {
8757 job_t ji = NULL;
8758 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8759 if (!ji->per_user) {
8760 continue;
8761 }
8762 if (ji->mach_uid != which_user) {
8763 continue;
8764 }
8765 if (SLIST_EMPTY(&ji->machservices)) {
8766 continue;
8767 }
8768 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8769 continue;
8770 }
8771 break;
8772 }
8773
8774 if (unlikely(ji == NULL)) {
8775 struct machservice *ms;
8776 char lbuf[1024];
8777
8778 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8779
8780 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8781
8782 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8783
8784 if (ji != NULL) {
8785 auditinfo_addr_t auinfo = {
8786 .ai_termid = {
8787 .at_type = AU_IPv4
8788 },
8789 .ai_auid = which_user,
8790 .ai_asid = AU_ASSIGN_ASID,
8791 };
8792
8793 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8794 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8795 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8796
8797 /* Kinda lame that we have to do this, but we can't create an
8798 * audit session without joining it.
8799 */
8800 (void)job_assumes(ji, audit_session_join(launchd_audit_port));
8801 ji->asid = auinfo.ai_asid;
8802 } else {
8803 job_log(ji, LOG_WARNING, "Could not set audit session!");
8804 job_remove(ji);
8805 return NULL;
8806 }
8807
8808 ji->mach_uid = which_user;
8809 ji->per_user = true;
8810 ji->enable_transactions = true;
8811 job_setup_per_user_directories(ji, which_user, lbuf);
8812
8813 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8814 job_remove(ji);
8815 ji = NULL;
8816 } else {
8817 ms->upfront = true;
8818 ms->per_user_hack = true;
8819 ms->hide = true;
8820
8821 ji = job_dispatch(ji, false);
8822 }
8823 }
8824 } else {
8825 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
8826 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8827 }
8828
8829 return ji;
8830 }
8831
8832 kern_return_t
8833 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8834 {
8835 struct ldcred *ldc = runtime_get_caller_creds();
8836 job_t jpu;
8837
8838 if (!j) {
8839 return BOOTSTRAP_NO_MEMORY;
8840 }
8841
8842 if (launchd_osinstaller) {
8843 return BOOTSTRAP_UNKNOWN_SERVICE;
8844 }
8845
8846 #if TARGET_OS_EMBEDDED
8847 // There is no need for per-user launchd's on embedded.
8848 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8849 return BOOTSTRAP_UNKNOWN_SERVICE;
8850 #endif
8851
8852 #if HAVE_SANDBOX
8853 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8854 return BOOTSTRAP_NOT_PRIVILEGED;
8855 }
8856 #endif
8857
8858 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8859
8860 if (unlikely(!pid1_magic)) {
8861 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8862 return BOOTSTRAP_NOT_PRIVILEGED;
8863 }
8864
8865 if (ldc->euid || ldc->uid) {
8866 which_user = ldc->euid ?: ldc->uid;
8867 }
8868
8869 *up_cont = MACH_PORT_NULL;
8870
8871 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8872
8873 return 0;
8874 }
8875
8876 kern_return_t
8877 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8878 {
8879 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8880 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8881 struct ldcred *ldc = runtime_get_caller_creds();
8882 struct machservice *ms = NULL;
8883 job_t jo;
8884
8885 if (!j) {
8886 return BOOTSTRAP_NO_MEMORY;
8887 }
8888
8889 if (j->dedicated_instance) {
8890 struct machservice *msi = NULL;
8891 SLIST_FOREACH(msi, &j->machservices, sle) {
8892 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8893 uuid_copy(instance_id, j->instance_id);
8894 ms = msi;
8895 break;
8896 }
8897 }
8898 } else {
8899 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8900 }
8901
8902 if (strict) {
8903 if (likely(ms != NULL)) {
8904 if (ms->job != j) {
8905 return BOOTSTRAP_NOT_PRIVILEGED;
8906 } else if (ms->isActive) {
8907 return BOOTSTRAP_SERVICE_ACTIVE;
8908 }
8909 } else {
8910 return BOOTSTRAP_UNKNOWN_SERVICE;
8911 }
8912 } else if (ms == NULL) {
8913 if (job_assumes(j, !j->dedicated_instance)) {
8914 *serviceportp = MACH_PORT_NULL;
8915
8916 #if HAVE_SANDBOX
8917 if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8918 return BOOTSTRAP_NOT_PRIVILEGED;
8919 }
8920 #endif
8921 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8922 return BOOTSTRAP_NO_MEMORY;
8923 }
8924
8925 // Treat this like a legacy job.
8926 if (!j->legacy_mach_job) {
8927 ms->isActive = true;
8928 ms->recv = false;
8929 }
8930
8931 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8932 job_log(j, LOG_APPLEONLY, "Please add the following service to the configuration file for this job: %s", servicename);
8933 }
8934 } else {
8935 return BOOTSTRAP_UNKNOWN_SERVICE;
8936 }
8937 } else {
8938 if (unlikely((jo = machservice_job(ms)) != j)) {
8939 static pid_t last_warned_pid;
8940
8941 if (last_warned_pid != ldc->pid) {
8942 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8943 last_warned_pid = ldc->pid;
8944 }
8945
8946 return BOOTSTRAP_NOT_PRIVILEGED;
8947 }
8948 if (unlikely(machservice_active(ms))) {
8949 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8950 return BOOTSTRAP_SERVICE_ACTIVE;
8951 }
8952 }
8953
8954 job_checkin(j);
8955 machservice_request_notifications(ms);
8956
8957 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8958
8959 *serviceportp = machservice_port(ms);
8960 return BOOTSTRAP_SUCCESS;
8961 }
8962
8963 kern_return_t
8964 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8965 {
8966 struct machservice *ms;
8967 struct ldcred *ldc = runtime_get_caller_creds();
8968 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8969
8970 if (!j) {
8971 return BOOTSTRAP_NO_MEMORY;
8972 }
8973
8974 if (!per_pid_service && !j->legacy_LS_job) {
8975 job_log(j, LOG_APPLEONLY, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8976 }
8977
8978 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8979
8980 #if HAVE_SANDBOX
8981 if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8982 return BOOTSTRAP_NOT_PRIVILEGED;
8983 }
8984 #endif
8985
8986 // 5641783 for the embedded hack
8987 #if !TARGET_OS_EMBEDDED
8988 /*
8989 * From a per-user/session launchd's perspective, SecurityAgent (UID
8990 * 92) is a rogue application (not our UID, not root and not a child of
8991 * us). We'll have to reconcile this design friction at a later date.
8992 */
8993 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8994 if (pid1_magic) {
8995 return VPROC_ERR_TRY_PER_USER;
8996 } else {
8997 return BOOTSTRAP_NOT_PRIVILEGED;
8998 }
8999 }
9000 #endif
9001
9002 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
9003
9004 if (unlikely(ms)) {
9005 if (machservice_job(ms) != j) {
9006 return BOOTSTRAP_NOT_PRIVILEGED;
9007 }
9008 if (machservice_active(ms)) {
9009 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
9010 return BOOTSTRAP_SERVICE_ACTIVE;
9011 }
9012 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
9013 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
9014 return BOOTSTRAP_NOT_PRIVILEGED;
9015 }
9016 job_checkin(j);
9017 machservice_delete(j, ms, false);
9018 }
9019
9020 if (likely(serviceport != MACH_PORT_NULL)) {
9021 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
9022 machservice_request_notifications(ms);
9023 } else {
9024 return BOOTSTRAP_NO_MEMORY;
9025 }
9026 }
9027
9028
9029 return BOOTSTRAP_SUCCESS;
9030 }
9031
9032 kern_return_t
9033 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
9034 {
9035 struct machservice *ms = NULL;
9036 struct ldcred *ldc = runtime_get_caller_creds();
9037 kern_return_t kr;
9038 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
9039 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
9040 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
9041 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
9042
9043 if (!j) {
9044 return BOOTSTRAP_NO_MEMORY;
9045 }
9046
9047 bool xpc_req = (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN);
9048
9049 // 5641783 for the embedded hack
9050 #if !TARGET_OS_EMBEDDED
9051 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
9052 return VPROC_ERR_TRY_PER_USER;
9053 }
9054 #endif
9055
9056 #if HAVE_SANDBOX
9057 /* We don't do sandbox checking for XPC domains because, by definition, all
9058 * the services within your domain should be accessible to you.
9059 */
9060 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
9061 return BOOTSTRAP_NOT_PRIVILEGED;
9062 }
9063 #endif
9064
9065 if (per_pid_lookup) {
9066 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
9067 } else {
9068 if (xpc_req) {
9069 // Requests from XPC domains stay local.
9070 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
9071 } else {
9072 /* A strict lookup which is privileged won't even bother trying to
9073 * find a service if we're not hosting the root Mach bootstrap.
9074 */
9075 if (strict_lookup && privileged) {
9076 if (inherited_bootstrap_port == MACH_PORT_NULL) {
9077 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9078 }
9079 } else {
9080 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9081 }
9082 }
9083 }
9084
9085 if (likely(ms)) {
9086 ms = ms->alias ? ms->alias : ms;
9087 if (unlikely(specific_instance && ms->job->multiple_instances)) {
9088 job_t ji = NULL;
9089 job_t instance = NULL;
9090 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
9091 if (uuid_compare(instance_id, ji->instance_id) == 0) {
9092 instance = ji;
9093 break;
9094 }
9095 }
9096
9097 if (unlikely(instance == NULL)) {
9098 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
9099 instance = job_new_subjob(ms->job, instance_id);
9100 if (job_assumes(j, instance != NULL)) {
9101 /* Disable this support for now. We only support having
9102 * multi-instance jobs within private XPC domains.
9103 */
9104 #if 0
9105 /* If the job is multi-instance, in a singleton XPC domain
9106 * and the request is not coming from within that singleton
9107 * domain, we need to alias the new job into the requesting
9108 * domain.
9109 */
9110 if (!j->mgr->xpc_singleton && xpc_req) {
9111 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
9112 }
9113 #endif
9114 job_dispatch(instance, false);
9115 }
9116 }
9117
9118 ms = NULL;
9119 if (job_assumes(j, instance != NULL)) {
9120 struct machservice *msi = NULL;
9121 SLIST_FOREACH(msi, &instance->machservices, sle) {
9122 /* sizeof(servicename) will return the size of a pointer,
9123 * even though it's an array type, because when passing
9124 * arrays as parameters in C, they implicitly degrade to
9125 * pointers.
9126 */
9127 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
9128 ms = msi;
9129 break;
9130 }
9131 }
9132 }
9133 } else {
9134 if (machservice_hidden(ms) && !machservice_active(ms)) {
9135 ms = NULL;
9136 } else if (unlikely(ms->per_user_hack)) {
9137 ms = NULL;
9138 }
9139 }
9140 }
9141
9142 if (likely(ms)) {
9143 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
9144 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
9145 *serviceportp = machservice_port(ms);
9146
9147 kr = BOOTSTRAP_SUCCESS;
9148 } else if (strict_lookup && !privileged) {
9149 /* Hack: We need to simulate XPC's desire not to establish a hierarchy.
9150 * So if XPC is doing the lookup, and it's not a privileged lookup, we
9151 * won't forward. But if it is a privileged lookup, then we must
9152 * forward.
9153 */
9154 return BOOTSTRAP_UNKNOWN_SERVICE;
9155 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
9156 // Requests from within an XPC domain don't get forwarded.
9157 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
9158 /* Clients potentially check the audit token of the reply to verify that
9159 * the returned send right is trustworthy.
9160 */
9161 (void)job_assumes_zero(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags));
9162 return MIG_NO_REPLY;
9163 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9164 /* 5240036 Should start background session when a lookup of CCacheServer
9165 * occurs
9166 *
9167 * This is a total hack. We sniff out loginwindow session, and attempt
9168 * to guess what it is up to. If we find a EUID that isn't root, we
9169 * force it over to the per-user context.
9170 */
9171 return VPROC_ERR_TRY_PER_USER;
9172 } else {
9173 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
9174 kr = BOOTSTRAP_UNKNOWN_SERVICE;
9175 }
9176
9177 return kr;
9178 }
9179
9180 kern_return_t
9181 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
9182 {
9183 if (!j) {
9184 return BOOTSTRAP_NO_MEMORY;
9185 }
9186
9187 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
9188 jobmgr_t jm = j->mgr;
9189
9190 if (jobmgr_parent(jm)) {
9191 *parentport = jobmgr_parent(jm)->jm_port;
9192 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
9193 *parentport = jm->jm_port;
9194 } else {
9195 (void)job_assumes_zero(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp));
9196 // The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
9197 return MIG_NO_REPLY;
9198 }
9199 return BOOTSTRAP_SUCCESS;
9200 }
9201
9202 kern_return_t
9203 job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
9204 {
9205 if (!j) {
9206 return BOOTSTRAP_NO_MEMORY;
9207 }
9208
9209 if (inherited_bootstrap_port == MACH_PORT_NULL) {
9210 *rootbsp = root_jobmgr->jm_port;
9211 (void)job_assumes_zero(j, launchd_mport_make_send(root_jobmgr->jm_port));
9212 } else {
9213 *rootbsp = inherited_bootstrap_port;
9214 (void)job_assumes_zero(j, launchd_mport_copy_send(inherited_bootstrap_port));
9215 }
9216
9217 return BOOTSTRAP_SUCCESS;
9218 }
9219
9220 kern_return_t
9221 job_mig_info(job_t j, name_array_t *servicenamesp,
9222 unsigned int *servicenames_cnt, name_array_t *servicejobsp,
9223 unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp,
9224 unsigned int *serviceactives_cnt, uint64_t flags)
9225 {
9226 name_array_t service_names = NULL;
9227 name_array_t service_jobs = NULL;
9228 bootstrap_status_array_t service_actives = NULL;
9229 unsigned int cnt = 0, cnt2 = 0;
9230 jobmgr_t jm;
9231
9232 if (!j) {
9233 return BOOTSTRAP_NO_MEMORY;
9234 }
9235
9236 #if TARGET_OS_EMBEDDED
9237 struct ldcred *ldc = runtime_get_caller_creds();
9238 if (ldc->euid) {
9239 return EPERM;
9240 }
9241 #endif // TARGET_OS_EMBEDDED
9242
9243 if (launchd_flat_mach_namespace) {
9244 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
9245 jm = j->mgr;
9246 } else {
9247 jm = root_jobmgr;
9248 }
9249 } else {
9250 jm = j->mgr;
9251 }
9252
9253 unsigned int i = 0;
9254 struct machservice *msi = NULL;
9255 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9256 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
9257 cnt += !msi->per_pid ? 1 : 0;
9258 }
9259 }
9260
9261 if (cnt == 0) {
9262 goto out;
9263 }
9264
9265 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
9266 if (!job_assumes(j, service_names != NULL)) {
9267 goto out_bad;
9268 }
9269
9270 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
9271 if (!job_assumes(j, service_jobs != NULL)) {
9272 goto out_bad;
9273 }
9274
9275 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
9276 if (!job_assumes(j, service_actives != NULL)) {
9277 goto out_bad;
9278 }
9279
9280 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9281 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
9282 if (!msi->per_pid) {
9283 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
9284 msi = msi->alias ? msi->alias : msi;
9285 if (msi->job->mgr->shortdesc) {
9286 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
9287 } else {
9288 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
9289 }
9290 service_actives[cnt2] = machservice_status(msi);
9291 cnt2++;
9292 }
9293 }
9294 }
9295
9296 (void)job_assumes(j, cnt == cnt2);
9297
9298 out:
9299 *servicenamesp = service_names;
9300 *servicejobsp = service_jobs;
9301 *serviceactivesp = service_actives;
9302 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
9303
9304 return BOOTSTRAP_SUCCESS;
9305
9306 out_bad:
9307 if (service_names) {
9308 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
9309 }
9310 if (service_jobs) {
9311 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
9312 }
9313 if (service_actives) {
9314 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
9315 }
9316
9317 return BOOTSTRAP_NO_MEMORY;
9318 }
9319
9320 kern_return_t
9321 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports,
9322 mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names,
9323 mach_msg_type_number_t *child_names_cnt,
9324 bootstrap_property_array_t *child_properties,
9325 mach_msg_type_number_t *child_properties_cnt)
9326 {
9327 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9328 if (!j) {
9329 return BOOTSTRAP_NO_MEMORY;
9330 }
9331
9332 struct ldcred *ldc = runtime_get_caller_creds();
9333
9334 /* Only allow root processes to look up children, even if we're in the per-user launchd.
9335 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
9336 * in a non-flat namespace.
9337 */
9338 if (ldc->euid != 0) {
9339 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
9340 return BOOTSTRAP_NOT_PRIVILEGED;
9341 }
9342
9343 unsigned int cnt = 0;
9344
9345 jobmgr_t jmr = j->mgr;
9346 jobmgr_t jmi = NULL;
9347 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
9348 cnt++;
9349 }
9350
9351 // Find our per-user launchds if we're PID 1.
9352 job_t ji = NULL;
9353 if (pid1_magic) {
9354 LIST_FOREACH(ji, &jmr->jobs, sle) {
9355 cnt += ji->per_user ? 1 : 0;
9356 }
9357 }
9358
9359 if (cnt == 0) {
9360 return BOOTSTRAP_NO_CHILDREN;
9361 }
9362
9363 mach_port_array_t _child_ports = NULL;
9364 name_array_t _child_names = NULL;
9365 bootstrap_property_array_t _child_properties = NULL;
9366
9367 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
9368 if (!job_assumes(j, _child_ports != NULL)) {
9369 kr = BOOTSTRAP_NO_MEMORY;
9370 goto out_bad;
9371 }
9372
9373 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
9374 if (!job_assumes(j, _child_names != NULL)) {
9375 kr = BOOTSTRAP_NO_MEMORY;
9376 goto out_bad;
9377 }
9378
9379 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
9380 if (!job_assumes(j, _child_properties != NULL)) {
9381 kr = BOOTSTRAP_NO_MEMORY;
9382 goto out_bad;
9383 }
9384
9385 unsigned int cnt2 = 0;
9386 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
9387 if (jobmgr_assumes_zero(jmi, launchd_mport_make_send(jmi->jm_port)) == KERN_SUCCESS) {
9388 _child_ports[cnt2] = jmi->jm_port;
9389 } else {
9390 _child_ports[cnt2] = MACH_PORT_NULL;
9391 }
9392
9393 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
9394 _child_properties[cnt2] = jmi->properties;
9395
9396 cnt2++;
9397 }
9398
9399 if (pid1_magic) LIST_FOREACH(ji, &jmr->jobs, sle) {
9400 if (ji->per_user) {
9401 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
9402 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
9403
9404 if (job_assumes_zero(ji, launchd_mport_copy_send(port)) == KERN_SUCCESS) {
9405 _child_ports[cnt2] = port;
9406 } else {
9407 _child_ports[cnt2] = MACH_PORT_NULL;
9408 }
9409 } else {
9410 _child_ports[cnt2] = MACH_PORT_NULL;
9411 }
9412
9413 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
9414 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
9415
9416 cnt2++;
9417 }
9418 }
9419
9420 *child_names_cnt = cnt;
9421 *child_ports_cnt = cnt;
9422 *child_properties_cnt = cnt;
9423
9424 *child_names = _child_names;
9425 *child_ports = _child_ports;
9426 *child_properties = _child_properties;
9427
9428 unsigned int i = 0;
9429 for (i = 0; i < cnt; i++) {
9430 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
9431 }
9432
9433 return BOOTSTRAP_SUCCESS;
9434 out_bad:
9435 if (_child_ports) {
9436 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
9437 }
9438
9439 if (_child_names) {
9440 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_names[0]));
9441 }
9442
9443 if (_child_properties) {
9444 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
9445 }
9446
9447 return kr;
9448 }
9449
9450 kern_return_t
9451 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
9452 {
9453 struct ldcred *ldc = runtime_get_caller_creds();
9454 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
9455 return BOOTSTRAP_NOT_PRIVILEGED;
9456 }
9457
9458 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9459 * directly by launchd as agents.
9460 */
9461 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
9462 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
9463 *managed = true;
9464 }
9465
9466 return BOOTSTRAP_SUCCESS;
9467 }
9468
9469 kern_return_t
9470 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
9471 {
9472 if (!j) {
9473 return BOOTSTRAP_NO_MEMORY;
9474 }
9475
9476 struct ldcred *ldc = runtime_get_caller_creds();
9477 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
9478
9479 #if HAVE_SANDBOX
9480 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
9481 return BOOTSTRAP_NOT_PRIVILEGED;
9482 }
9483 #endif
9484
9485 mach_port_t _mp = MACH_PORT_NULL;
9486 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
9487 job_t target_j = job_find(NULL, label);
9488 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
9489 if (target_j->j_port == MACH_PORT_NULL) {
9490 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
9491 }
9492
9493 _mp = target_j->j_port;
9494 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
9495 } else {
9496 kr = BOOTSTRAP_NO_MEMORY;
9497 }
9498 }
9499
9500 *mp = _mp;
9501 return kr;
9502 }
9503
9504 kern_return_t
9505 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
9506 {
9507 #if TARGET_OS_EMBEDDED
9508 return KERN_SUCCESS;
9509 #endif
9510
9511 if (!j) {
9512 return BOOTSTRAP_NO_MEMORY;
9513 }
9514
9515 uuid_string_t uuid_str;
9516 uuid_unparse(uuid, uuid_str);
9517 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
9518
9519 job_t ji = NULL, jt = NULL;
9520 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
9521 uuid_string_t uuid_str2;
9522 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9523
9524 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
9525 uuid_clear(ji->expected_audit_uuid);
9526 if (asport != MACH_PORT_NULL) {
9527 job_log(ji, LOG_DEBUG, "Job should join session with port 0x%x", asport);
9528 (void)job_assumes_zero(j, launchd_mport_copy_send(asport));
9529 } else {
9530 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9531 }
9532
9533 ji->asport = asport;
9534 LIST_REMOVE(ji, needing_session_sle);
9535
9536 if (ji->event_monitor) {
9537 eventsystem_ping();
9538 } else {
9539 job_dispatch(ji, false);
9540 }
9541 }
9542 }
9543
9544 /* Each job that the session port was set for holds a reference. At the end of
9545 * the loop, there will be one extra reference belonging to this MiG protocol.
9546 * We need to release it so that the session goes away when all the jobs
9547 * referencing it are unloaded.
9548 */
9549 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9550
9551 return KERN_SUCCESS;
9552 }
9553
9554 jobmgr_t
9555 jobmgr_find_by_name(jobmgr_t jm, const char *where)
9556 {
9557 jobmgr_t jmi, jmi2;
9558
9559 // NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
9560 if (where == NULL) {
9561 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9562 where = VPROCMGR_SESSION_LOGINWINDOW;
9563 } else {
9564 where = VPROCMGR_SESSION_AQUA;
9565 }
9566 }
9567
9568 if (strcasecmp(jm->name, where) == 0) {
9569 return jm;
9570 }
9571
9572 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
9573 jmi = root_jobmgr;
9574 goto jm_found;
9575 }
9576
9577 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9578 if (unlikely(jmi->shutting_down)) {
9579 continue;
9580 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9581 continue;
9582 } else if (strcasecmp(jmi->name, where) == 0) {
9583 goto jm_found;
9584 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
9585 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9586 if (strcasecmp(jmi2->name, where) == 0) {
9587 jmi = jmi2;
9588 goto jm_found;
9589 }
9590 }
9591 }
9592 }
9593
9594 jm_found:
9595 return jmi;
9596 }
9597
9598 kern_return_t
9599 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
9600 {
9601 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9602 mach_port_array_t l2l_ports = NULL;
9603 mach_port_t reqport, rcvright;
9604 kern_return_t kr = 1;
9605 launch_data_t out_obj_array = NULL;
9606 struct ldcred *ldc = runtime_get_caller_creds();
9607 jobmgr_t jmr = NULL;
9608
9609 if (!j) {
9610 return BOOTSTRAP_NO_MEMORY;
9611 }
9612
9613 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
9614 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9615
9616 kr = BOOTSTRAP_NOT_PRIVILEGED;
9617 goto out;
9618 }
9619
9620 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9621
9622 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
9623 if (job_assumes_zero(j, kr) != 0) {
9624 goto out;
9625 }
9626
9627 if (launch_data_array_get_count(out_obj_array) != l2l_port_cnt) {
9628 os_assert_zero(l2l_port_cnt);
9629 }
9630
9631 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
9632 kr = BOOTSTRAP_NO_MEMORY;
9633 goto out;
9634 }
9635
9636 if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9637 jobmgr_log(jmr, LOG_NOTICE, "Registering new GUI session.");
9638 kr = vproc_mig_register_gui_session(inherited_bootstrap_port, asport);
9639 if (kr) {
9640 jobmgr_log(jmr, LOG_ERR, "Failed to register GUI session with PID 1: 0x%x/0x%x", inherited_bootstrap_port, kr);
9641 }
9642 }
9643
9644 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9645
9646 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9647 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9648 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9649 */
9650 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9651 // This is so awful.
9652 // Remove the job from its current job manager.
9653 LIST_REMOVE(j, sle);
9654 LIST_REMOVE(j, pid_hash_sle);
9655
9656 // Put the job into the target job manager.
9657 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9658 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9659
9660 j->mgr = jmr;
9661 job_set_global_on_demand(j, true);
9662
9663 if (!j->holds_ref) {
9664 job_log(j, LOG_PERF, "Job moved subset into: %s", j->mgr->name);
9665 j->holds_ref = true;
9666 runtime_add_ref();
9667 }
9668 }
9669
9670 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9671 launch_data_t tmp, obj_at_idx;
9672 struct machservice *ms;
9673 job_t j_for_service;
9674 const char *serv_name;
9675 pid_t target_pid;
9676 bool serv_perpid;
9677
9678 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9679 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9680 target_pid = (pid_t)launch_data_get_integer(tmp);
9681 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9682 serv_perpid = launch_data_get_bool(tmp);
9683 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9684 serv_name = launch_data_get_string(tmp);
9685
9686 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9687
9688 if (unlikely(!j_for_service)) {
9689 // The PID probably exited
9690 (void)job_assumes_zero(j, launchd_mport_deallocate(l2l_ports[l2l_i]));
9691 continue;
9692 }
9693
9694 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9695 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9696 machservice_request_notifications(ms);
9697 }
9698 }
9699
9700 kr = 0;
9701
9702 out:
9703 if (out_obj_array) {
9704 launch_data_free(out_obj_array);
9705 }
9706
9707 if (l2l_ports) {
9708 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9709 }
9710
9711 if (kr == 0) {
9712 if (target_subset) {
9713 (void)job_assumes_zero(j, launchd_mport_deallocate(target_subset));
9714 }
9715 if (asport) {
9716 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9717 }
9718 } else if (jmr) {
9719 jobmgr_shutdown(jmr);
9720 }
9721
9722 return kr;
9723 }
9724
9725 kern_return_t
9726 job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9727 {
9728 if (!j) {
9729 return BOOTSTRAP_NO_MEMORY;
9730 }
9731
9732 job_t j2;
9733
9734 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9735 if (j->mgr->session_initialized) {
9736 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9737 kr = BOOTSTRAP_NOT_PRIVILEGED;
9738 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9739 jobmgr_t jmi;
9740
9741 /*
9742 * 5330262
9743 *
9744 * We're working around LoginWindow and the WindowServer.
9745 *
9746 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9747 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9748 * spawns a replacement loginwindow session before cleaning up the previous one.
9749 *
9750 * We're going to use the creation of a new LoginWindow context as a clue that the
9751 * previous LoginWindow context is on the way out and therefore we should just
9752 * kick-start the shutdown of it.
9753 */
9754
9755 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9756 if (unlikely(jmi->shutting_down)) {
9757 continue;
9758 } else if (strcasecmp(jmi->name, session_type) == 0) {
9759 jobmgr_shutdown(jmi);
9760 break;
9761 }
9762 }
9763 } else if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9764 (void)job_assumes_zero(j, runtime_remove_mport(j->mgr->jm_port));
9765 }
9766
9767 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9768 strcpy(j->mgr->name_init, session_type);
9769
9770 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9771 j2->asport = asport;
9772 (void)job_assumes(j, job_dispatch(j2, true));
9773 kr = BOOTSTRAP_SUCCESS;
9774 }
9775
9776 return kr;
9777 }
9778
9779 kern_return_t
9780 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9781 {
9782 struct ldcred *ldc = runtime_get_caller_creds();
9783 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9784 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9785 return BOOTSTRAP_NO_MEMORY;
9786 }
9787
9788 if (j->mgr->shutting_down) {
9789 return BOOTSTRAP_UNKNOWN_SERVICE;
9790 }
9791
9792 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9793
9794 if (!job_assumes(j, pid1_magic == false)) {
9795 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9796 return BOOTSTRAP_NOT_PRIVILEGED;
9797 }
9798
9799 if (!j->anonymous) {
9800 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9801 return BOOTSTRAP_NOT_PRIVILEGED;
9802 }
9803
9804 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9805 if (target_jm == j->mgr) {
9806 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9807 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9808 (void)job_assumes_zero(j, launchd_mport_deallocate(requestor_port));
9809 *new_bsport = target_jm->jm_port;
9810 return BOOTSTRAP_SUCCESS;
9811 }
9812
9813 if (!target_jm) {
9814 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9815 if (target_jm) {
9816 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9817 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9818 }
9819 }
9820
9821 if (!job_assumes(j, target_jm != NULL)) {
9822 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9823 return BOOTSTRAP_NO_MEMORY;
9824 }
9825
9826 // Remove the job from it's current job manager.
9827 LIST_REMOVE(j, sle);
9828 LIST_REMOVE(j, pid_hash_sle);
9829
9830 job_t ji = NULL, jit = NULL;
9831 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9832 if (ji == j) {
9833 LIST_REMOVE(ji, global_env_sle);
9834 break;
9835 }
9836 }
9837
9838 // Put the job into the target job manager.
9839 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9840 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9841
9842 if (ji) {
9843 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9844 }
9845
9846 // Move our Mach services over if we're not in a flat namespace.
9847 if (!launchd_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9848 struct machservice *msi = NULL, *msit = NULL;
9849 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9850 LIST_REMOVE(msi, name_hash_sle);
9851 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9852 }
9853 }
9854
9855 j->mgr = target_jm;
9856
9857 if (!j->holds_ref) {
9858 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9859 * stick around while they're still around.
9860 * For example, login calls into the PAM launchd module, which moves the process into
9861 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9862 * ourselves from going away.
9863 */
9864 j->holds_ref = true;
9865 job_log(j, LOG_PERF, "Job switched into manager: %s", j->mgr->name);
9866 runtime_add_ref();
9867 }
9868
9869 *new_bsport = target_jm->jm_port;
9870
9871 return KERN_SUCCESS;
9872 }
9873
9874 kern_return_t
9875 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9876 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9877 mach_port_array_t *portsp, unsigned int *ports_cnt)
9878 {
9879 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9880 mach_port_array_t ports = NULL;
9881 unsigned int cnt = 0, cnt2 = 0;
9882 size_t packed_size;
9883 struct machservice *ms;
9884 jobmgr_t jm;
9885 job_t ji;
9886
9887 if (!j) {
9888 return BOOTSTRAP_NO_MEMORY;
9889 }
9890
9891 jm = j->mgr;
9892
9893 if (unlikely(!pid1_magic)) {
9894 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9895 return BOOTSTRAP_NOT_PRIVILEGED;
9896 }
9897 if (unlikely(jobmgr_parent(jm) == NULL)) {
9898 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9899 return BOOTSTRAP_NOT_PRIVILEGED;
9900 }
9901 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9902 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9903 return BOOTSTRAP_NOT_PRIVILEGED;
9904 }
9905 if (unlikely(!j->anonymous)) {
9906 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9907 return BOOTSTRAP_NOT_PRIVILEGED;
9908 }
9909
9910 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9911
9912 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9913 if (!job_assumes(j, outdata_obj_array)) {
9914 goto out_bad;
9915 }
9916
9917 *outdataCnt = 20 * 1024 * 1024;
9918 mig_allocate(outdata, *outdataCnt);
9919 if (!job_assumes(j, *outdata != 0)) {
9920 return 1;
9921 }
9922
9923 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9924 if (!ji->anonymous) {
9925 continue;
9926 }
9927 SLIST_FOREACH(ms, &ji->machservices, sle) {
9928 cnt++;
9929 }
9930 }
9931
9932 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9933 if (!job_assumes(j, ports != NULL)) {
9934 goto out_bad;
9935 }
9936
9937 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9938 if (!ji->anonymous) {
9939 continue;
9940 }
9941
9942 SLIST_FOREACH(ms, &ji->machservices, sle) {
9943 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9944 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9945 } else {
9946 goto out_bad;
9947 }
9948
9949 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9950 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9951 } else {
9952 goto out_bad;
9953 }
9954
9955 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9956 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9957 } else {
9958 goto out_bad;
9959 }
9960
9961 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9962 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9963 } else {
9964 goto out_bad;
9965 }
9966
9967 ports[cnt2] = machservice_port(ms);
9968
9969 // Increment the send right by one so we can shutdown the jobmgr cleanly
9970 (void)jobmgr_assumes_zero(jm, launchd_mport_copy_send(ports[cnt2]));
9971 cnt2++;
9972 }
9973 }
9974
9975 (void)job_assumes(j, cnt == cnt2);
9976
9977 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9978 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9979 if (!job_assumes(j, packed_size != 0)) {
9980 goto out_bad;
9981 }
9982
9983 launch_data_free(outdata_obj_array);
9984
9985 *portsp = ports;
9986 *ports_cnt = cnt;
9987
9988 *reqport = jm->req_port;
9989 *rcvright = jm->jm_port;
9990
9991 jm->req_port = 0;
9992 jm->jm_port = 0;
9993
9994 workaround_5477111 = j;
9995
9996 jobmgr_shutdown(jm);
9997
9998 return BOOTSTRAP_SUCCESS;
9999
10000 out_bad:
10001 if (outdata_obj_array) {
10002 launch_data_free(outdata_obj_array);
10003 }
10004 if (*outdata) {
10005 mig_deallocate(*outdata, *outdataCnt);
10006 }
10007 if (ports) {
10008 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
10009 }
10010
10011 return BOOTSTRAP_NO_MEMORY;
10012 }
10013
10014 kern_return_t
10015 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
10016 {
10017 int bsdepth = 0;
10018 jobmgr_t jmr;
10019
10020 if (!j) {
10021 return BOOTSTRAP_NO_MEMORY;
10022 }
10023 if (j->mgr->shutting_down) {
10024 return BOOTSTRAP_UNKNOWN_SERVICE;
10025 }
10026
10027 jmr = j->mgr;
10028
10029 while ((jmr = jobmgr_parent(jmr)) != NULL) {
10030 bsdepth++;
10031 }
10032
10033 // Since we use recursion, we need an artificial depth for subsets
10034 if (unlikely(bsdepth > 100)) {
10035 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
10036 return BOOTSTRAP_NO_MEMORY;
10037 }
10038
10039 char name[NAME_MAX];
10040 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
10041
10042 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
10043 if (unlikely(requestorport == MACH_PORT_NULL)) {
10044 return BOOTSTRAP_NOT_PRIVILEGED;
10045 }
10046 return BOOTSTRAP_NO_MEMORY;
10047 }
10048
10049 *subsetportp = jmr->jm_port;
10050 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
10051
10052 /* A job could create multiple subsets, so only add a reference the first time
10053 * it does so we don't have to keep a count.
10054 */
10055 if (j->anonymous && !j->holds_ref) {
10056 job_log(j, LOG_PERF, "Job created subset: %s", jmr->name);
10057 j->holds_ref = true;
10058 runtime_add_ref();
10059 }
10060
10061 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
10062 return BOOTSTRAP_SUCCESS;
10063 }
10064
10065 job_t
10066 _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
10067 {
10068 jobmgr_t where2put = NULL;
10069
10070 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
10071 errno = EINVAL;
10072 return NULL;
10073 }
10074
10075 launch_data_t ldlabel = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL);
10076 if (!ldlabel || launch_data_get_type(ldlabel) != LAUNCH_DATA_STRING) {
10077 errno = EINVAL;
10078 return NULL;
10079 }
10080
10081 const char *label = launch_data_get_string(ldlabel);
10082 jobmgr_log(jm, LOG_DEBUG, "Importing service: %s", label);
10083
10084 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
10085 if (destname) {
10086 bool supported_domain = false;
10087
10088 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
10089 const char *str = launch_data_get_string(destname);
10090 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
10091 where2put = _s_xpc_system_domain;
10092 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
10093 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
10094 supported_domain = true;
10095 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
10096 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
10097 } else {
10098 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
10099 errno = EINVAL;
10100 }
10101 } else {
10102 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
10103 errno = EINVAL;
10104 }
10105
10106 if (where2put && !supported_domain) {
10107 launch_data_t mi = NULL;
10108 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
10109 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
10110 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
10111 where2put = NULL;
10112 errno = EINVAL;
10113 }
10114 }
10115 }
10116 } else {
10117 where2put = jm;
10118 }
10119
10120 job_t j = NULL;
10121 if (where2put) {
10122 /* Gross. If the service already exists in a singleton domain, then
10123 * jobmgr_import2() will return the existing job. But if we fail to alias
10124 * this job, we will normally want to remove it. But if we did not create
10125 * it in the first place, then we need to avoid removing it. So check
10126 * errno against EEXIST in the success case and if it's EEXIST, then do
10127 * not remove the original job in the event of a failed alias.
10128 *
10129 * This really needs to be re-thought, but I think it'll require a larger
10130 * evaluation of launchd's data structures. Right now, once a job is
10131 * imported into a singleton domain, it won't be removed until the system
10132 * shuts down, but that may not always be true. If it ever changes, we'll
10133 * have a problem because we'll have to account for all existing aliases
10134 * and clean them up somehow. Or just start ref-counting. I knew this
10135 * aliasing stuff would be trouble...
10136 *
10137 * <rdar://problem/10646503>
10138 */
10139 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
10140
10141 errno = 0;
10142 if ((j = jobmgr_import2(where2put, pload))) {
10143 bool created = (errno != EEXIST);
10144 j->xpc_service = true;
10145
10146 if (where2put->xpc_singleton) {
10147 /* If the service was destined for one of the global domains,
10148 * then we have to alias it into our local domain to reserve the
10149 * name.
10150 */
10151 job_t ja = NULL;
10152 if (!(ja = job_new_alias(jm, j))) {
10153 /* If we failed to alias the job because of a conflict over
10154 * the label, then we remove it from the global domain. We
10155 * don't want to risk having imported a malicious job into
10156 * one of the global domains.
10157 */
10158 if (errno != EEXIST) {
10159 job_log(j, LOG_ERR, "Failed to alias job into: %s: %d: %s", where2put->name, errno, strerror(errno));
10160 } else {
10161 errno = 0;
10162 }
10163
10164 if (created) {
10165 jobmgr_log(jm, LOG_WARNING, "Singleton service already existed in job-local namespace. Removing: %s", j->label);
10166 job_remove(j);
10167 }
10168
10169 j = NULL;
10170 } else {
10171 jobmgr_log(jm, LOG_DEBUG, "Aliased service into local domain: %s", j->label);
10172 (void)job_dispatch(j, false);
10173 ja->xpc_service = true;
10174 j = ja;
10175 }
10176 } else {
10177 (void)job_dispatch(j, false);
10178 }
10179 }
10180 } else {
10181 jobmgr_log(jm, LOG_DEBUG, "Could not find destination for service: %s", label);
10182 }
10183
10184 return j;
10185 }
10186
10187 int
10188 _xpc_domain_import_services(job_t j, launch_data_t services)
10189 {
10190 int error = EINVAL;
10191 if (launch_data_get_type(services) != LAUNCH_DATA_ARRAY) {
10192 return error;
10193 }
10194
10195 size_t i = 0;
10196 size_t c = launch_data_array_get_count(services);
10197 jobmgr_log(j->mgr, LOG_DEBUG, "Importing new services: %lu", c);
10198
10199 for (i = 0; i < c; i++) {
10200 jobmgr_log(j->mgr, LOG_DEBUG, "Importing service at index: %lu", i);
10201
10202 job_t nj = NULL;
10203 launch_data_t ploadi = launch_data_array_get_index(services, i);
10204 if (!(nj = _xpc_domain_import_service(j->mgr, ploadi))) {
10205 if (!j->mgr->session_initialized && errno) {
10206 /* Service import failures are only fatal if the domain is being
10207 * initialized. If we're extending the domain, we can run into
10208 * errors with services already existing, so we just ignore them.
10209 * In the case of a domain extension, we don't want to halt the
10210 * operation if we run into an error with one service.
10211 *
10212 * <rdar://problem/10842779>
10213 */
10214 jobmgr_log(j->mgr, LOG_ERR, "Failed to import service at index: %lu: %d: %s", i, errno, strerror(errno));
10215 error = errno;
10216 break;
10217 }
10218 } else {
10219 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service: %s", nj->label);
10220 }
10221 }
10222
10223 if (i == c) {
10224 error = 0;
10225 }
10226
10227 return error;
10228 }
10229
10230 kern_return_t
10231 xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
10232 {
10233 if (unlikely(!pid1_magic)) {
10234 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
10235 return BOOTSTRAP_NOT_PRIVILEGED;
10236 }
10237 if (!j || !MACH_PORT_VALID(reqport)) {
10238 return BOOTSTRAP_UNKNOWN_SERVICE;
10239 }
10240 if (root_jobmgr->shutting_down) {
10241 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
10242 return BOOTSTRAP_NOT_PRIVILEGED;
10243 }
10244 if (!j->xpc_bootstrapper) {
10245 job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
10246 return BOOTSTRAP_NOT_PRIVILEGED;
10247 }
10248
10249 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
10250 /* All XPC domains are children of the root job manager. What we're creating
10251 * here is really just a skeleton. By creating it, we're adding reqp to our
10252 * port set. It will have two messages on it. The first specifies the
10253 * environment of the originator. This is so we can cache it and hand it to
10254 * xpcproxy to bootstrap our services. The second is the set of jobs that is
10255 * to be bootstrapped in.
10256 */
10257 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
10258 if (job_assumes(j, jm != NULL)) {
10259 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
10260 jm->shortdesc = "private";
10261 kr = BOOTSTRAP_SUCCESS;
10262 }
10263
10264 return kr;
10265 }
10266
10267 kern_return_t
10268 xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
10269 {
10270 if (!j) {
10271 /* Due to the whacky nature of XPC service bootstrapping, we can end up
10272 * getting this message long after the requesting process has gone away.
10273 * See <rdar://problem/8593143>.
10274 */
10275 return BOOTSTRAP_UNKNOWN_SERVICE;
10276 }
10277
10278 jobmgr_t jm = j->mgr;
10279 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10280 return BOOTSTRAP_NOT_PRIVILEGED;
10281 }
10282
10283 if (jm->req_asport != MACH_PORT_NULL) {
10284 return BOOTSTRAP_NOT_PRIVILEGED;
10285 }
10286
10287 struct ldcred *ldc = runtime_get_caller_creds();
10288 struct proc_bsdinfowithuniqid proc;
10289 if (proc_pidinfo(ldc->pid, PROC_PIDT_BSDINFOWITHUNIQID, 1, &proc, PROC_PIDT_BSDINFOWITHUNIQID_SIZE) == 0) {
10290 if (errno != ESRCH) {
10291 (void)jobmgr_assumes_zero(jm, errno);
10292 }
10293
10294 jm->error = errno;
10295 jobmgr_remove(jm);
10296 return BOOTSTRAP_NO_MEMORY;
10297 }
10298
10299 #if !TARGET_OS_EMBEDDED
10300 if (jobmgr_assumes_zero(jm, audit_session_port(ldc->asid, &jm->req_asport)) != 0) {
10301 jm->error = EPERM;
10302 jobmgr_remove(jm);
10303 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
10304 return BOOTSTRAP_NOT_PRIVILEGED;
10305 }
10306 #else
10307 jm->req_asport = MACH_PORT_DEAD;
10308 #endif
10309
10310 struct waiting4attach *w4ai = NULL;
10311 struct waiting4attach *w4ait = NULL;
10312 LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
10313 if (w4ai->dest == ldc->pid) {
10314 jobmgr_log(jm, LOG_DEBUG, "Migrating attach for: %s", w4ai->name);
10315 LIST_REMOVE(w4ai, le);
10316 LIST_INSERT_HEAD(&jm->attaches, w4ai, le);
10317 w4ai->dest = 0;
10318 }
10319 }
10320
10321 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s.%d", proc.pbsd.pbi_comm, ldc->pid);
10322 strlcpy(jm->owner, proc.pbsd.pbi_comm, sizeof(jm->owner));
10323 jm->req_bsport = bsport;
10324 jm->req_excport = excport;
10325 jm->req_rport = rp;
10326 jm->req_ctx = ctx;
10327 jm->req_ctx_sz = ctx_sz;
10328 jm->req_pid = ldc->pid;
10329 jm->req_euid = ldc->euid;
10330 jm->req_egid = ldc->egid;
10331 jm->req_asid = ldc->asid;
10332 jm->req_uniqueid = proc.p_uniqidentifier.p_uniqueid;
10333
10334 return KERN_SUCCESS;
10335 }
10336
10337 kern_return_t
10338 xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10339 {
10340 if (!j) {
10341 return BOOTSTRAP_UNKNOWN_SERVICE;
10342 }
10343
10344 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10345 if (!(rootj && rootj->xpc_bootstrapper)) {
10346 job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
10347 return BOOTSTRAP_NOT_PRIVILEGED;
10348 }
10349
10350 // This is just for XPC domains (for now).
10351 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10352 return BOOTSTRAP_NOT_PRIVILEGED;
10353 }
10354 if (j->mgr->session_initialized) {
10355 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
10356 return BOOTSTRAP_NOT_PRIVILEGED;
10357 }
10358
10359 size_t offset = 0;
10360 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10361 if (!services) {
10362 return BOOTSTRAP_NO_MEMORY;
10363 }
10364
10365 int error = _xpc_domain_import_services(j, services);
10366 if (error) {
10367 j->mgr->error = error;
10368 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
10369 jobmgr_remove(j->mgr);
10370 } else {
10371 j->mgr->session_initialized = true;
10372 (void)jobmgr_assumes_zero(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS));
10373 j->mgr->req_rport = MACH_PORT_NULL;
10374
10375 /* Returning a failure code will destroy the message, whereas returning
10376 * success will not, so we need to clean up here.
10377 */
10378 mig_deallocate(services_buff, services_sz);
10379 error = BOOTSTRAP_SUCCESS;
10380 }
10381
10382 return error;
10383 }
10384
10385 kern_return_t
10386 xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport,
10387 mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid,
10388 int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
10389 {
10390 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
10391 return BOOTSTRAP_UNKNOWN_SERVICE;
10392 }
10393 jobmgr_t jm = j->mgr;
10394 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10395 return BOOTSTRAP_NOT_PRIVILEGED;
10396 }
10397
10398 if (jm->req_asport == MACH_PORT_NULL) {
10399 return BOOTSTRAP_NOT_PRIVILEGED;
10400 }
10401
10402 *bsport = jm->req_bsport;
10403 *sbsport = root_jobmgr->jm_port;
10404 *excport = jm->req_excport;
10405 if (j->joins_gui_session) {
10406 if (jm->req_gui_asport) {
10407 *asport = jm->req_gui_asport;
10408 } else {
10409 job_log(j, LOG_NOTICE, "No GUI session set for UID of user service. This service may not act properly.");
10410 *asport = jm->req_asport;
10411 }
10412 } else {
10413 *asport = jm->req_asport;
10414 }
10415
10416 *uid = jm->req_euid;
10417 *gid = jm->req_egid;
10418 *asid = jm->req_asid;
10419
10420 *ctx = jm->req_ctx;
10421 *ctx_sz = jm->req_ctx_sz;
10422
10423 return KERN_SUCCESS;
10424 }
10425
10426 kern_return_t
10427 xpc_domain_get_service_name(job_t j, event_name_t name)
10428 {
10429 if (!j) {
10430 return BOOTSTRAP_NO_MEMORY;
10431 }
10432
10433 if (!j->xpc_service) {
10434 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
10435 return BOOTSTRAP_NOT_PRIVILEGED;
10436 }
10437
10438 const char *what2find = j->label;
10439 if (j->dedicated_instance) {
10440 what2find = j->original->label;
10441 }
10442
10443 struct machservice *msi = NULL;
10444 SLIST_FOREACH(msi, &j->machservices, sle) {
10445 if (strcmp(msi->name, what2find) == 0) {
10446 break;
10447 }
10448 }
10449
10450 if (!msi) {
10451 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name that does not exist: %s", j->label);
10452 return BOOTSTRAP_UNKNOWN_SERVICE;
10453 }
10454
10455 (void)strlcpy(name, msi->name, sizeof(event_name_t));
10456 return BOOTSTRAP_SUCCESS;
10457 }
10458
10459 #if XPC_LPI_VERSION >= 20111216
10460 kern_return_t
10461 xpc_domain_add_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10462 {
10463 if (!j) {
10464 return BOOTSTRAP_UNKNOWN_SERVICE;
10465 }
10466
10467 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10468 if (!(rootj && rootj->xpc_bootstrapper)) {
10469 job_log(j, LOG_ERR, "Attempt to add service to XPC domain by unprivileged job.");
10470 return BOOTSTRAP_NOT_PRIVILEGED;
10471 }
10472
10473 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10474 return BOOTSTRAP_NOT_PRIVILEGED;
10475 }
10476
10477 size_t offset = 0;
10478 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10479 if (!services) {
10480 return BOOTSTRAP_NO_MEMORY;
10481 }
10482
10483 int error = _xpc_domain_import_services(j, services);
10484 if (!error) {
10485 mig_deallocate(services_buff, services_sz);
10486 }
10487
10488 return error;
10489 }
10490 #endif
10491
10492 #pragma mark XPC Events
10493 int
10494 xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms)
10495 {
10496 int error = EXNOMEM;
10497 struct machservice *msi = NULL;
10498 SLIST_FOREACH(msi, &j->machservices, sle) {
10499 if (strcmp(stream, msi->name) == 0) {
10500 break;
10501 }
10502 }
10503
10504 if (!msi) {
10505 mach_port_t sp = MACH_PORT_NULL;
10506 msi = machservice_new(j, stream, &sp, false);
10507 if (!msi) {
10508 return EXNOMEM;
10509 }
10510
10511 job_log(j, LOG_DEBUG, "Creating new MachService for stream: %s", stream);
10512 /* Hack to keep this from being publicly accessible through
10513 * bootstrap_look_up().
10514 */
10515 if (!j->dedicated_instance) {
10516 LIST_REMOVE(msi, name_hash_sle);
10517 }
10518 msi->event_channel = true;
10519
10520 /* If we call job_dispatch() here before the audit session for the job
10521 * has been set, we'll end up not watching this service. But we also have
10522 * to take care not to watch the port if the job is active.
10523 *
10524 * See <rdar://problem/10357855>.
10525 */
10526 if (!j->currently_ignored) {
10527 machservice_watch(j, msi);
10528 }
10529
10530 error = 0;
10531 *ms = msi;
10532 } else if (!msi->event_channel) {
10533 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10534 error = EEXIST;
10535 } else {
10536 error = 0;
10537 *ms = msi;
10538 }
10539
10540 return error;
10541 }
10542
10543 int
10544 xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply)
10545 {
10546 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10547 if (!stream) {
10548 return EXINVAL;
10549 }
10550
10551 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10552 if (!token) {
10553 return EXINVAL;
10554 }
10555
10556 job_log(j, LOG_DEBUG, "Getting event name for stream/token: %s/0x%llu", stream, token);
10557
10558 int result = ESRCH;
10559 struct externalevent *event = externalevent_find(stream, token);
10560 if (event && j->event_monitor) {
10561 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10562 xpc_dictionary_set_string(reply2, XPC_EVENT_ROUTINE_KEY_NAME, event->name);
10563 *reply = reply2;
10564
10565 job_log(j, LOG_DEBUG, "Found: %s", event->name);
10566 result = 0;
10567 }
10568
10569 return result;
10570 }
10571
10572 int
10573 xpc_event_copy_entitlements(job_t j, xpc_object_t request, xpc_object_t *reply)
10574 {
10575 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10576 if (!stream) {
10577 return EXINVAL;
10578 }
10579
10580 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10581 if (!token) {
10582 return EXINVAL;
10583 }
10584
10585 job_log(j, LOG_DEBUG, "Getting entitlements for stream/token: %s/0x%llu", stream, token);
10586
10587 int result = ESRCH;
10588 struct externalevent *event = externalevent_find(stream, token);
10589 if (event && j->event_monitor) {
10590 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10591 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_ENTITLEMENTS, event->entitlements);
10592 *reply = reply2;
10593
10594 job_log(j, LOG_DEBUG, "Found: %s", event->name);
10595 result = 0;
10596 }
10597
10598 return result;
10599 }
10600
10601 // TODO - can be removed with rdar://problem/12666150
10602 #ifndef XPC_EVENT_FLAG_ALLOW_UNMANAGED
10603 #define XPC_EVENT_FLAG_ALLOW_UNMANAGED (1 << 1)
10604 #endif
10605
10606 int
10607 xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10608 {
10609 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10610 if (!stream) {
10611 return EXINVAL;
10612 }
10613
10614 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10615 if (!key) {
10616 return EXINVAL;
10617 }
10618
10619 xpc_object_t event = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_EVENT);
10620 if (event && xpc_get_type(event) != XPC_TYPE_DICTIONARY) {
10621 return EXINVAL;
10622 }
10623
10624 uint64_t flags = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_FLAGS);
10625
10626 /* Don't allow events to be set for anonymous jobs unless specifically
10627 * requested in the flags. Only permit this for internal development.
10628 */
10629 if (j->anonymous && ((flags & XPC_EVENT_FLAG_ALLOW_UNMANAGED) == 0 || !launchd_apple_internal)) {
10630 job_log(j, LOG_ERR, "Unmanaged jobs may not make XPC Events requests.");
10631 return EPERM;
10632 }
10633
10634 job_log(j, LOG_DEBUG, "%s event for stream/key: %s/%s", event ? "Setting" : "Removing", stream, key);
10635
10636 struct externalevent *eei = NULL;
10637 LIST_FOREACH(eei, &j->events, job_le) {
10638 /* If the event for the given key already exists for the job, we need to
10639 * remove the old one first.
10640 */
10641 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
10642 job_log(j, LOG_DEBUG, "Event exists. Removing.");
10643 externalevent_delete(eei);
10644 break;
10645 }
10646 }
10647
10648 int result = EXNOMEM;
10649 if (event) {
10650 struct eventsystem *es = eventsystem_find(stream);
10651 if (!es) {
10652 job_log(j, LOG_DEBUG, "Creating stream.");
10653 es = eventsystem_new(stream);
10654 }
10655
10656 if (es) {
10657 job_log(j, LOG_DEBUG, "Adding event.");
10658 if (externalevent_new(j, es, key, event, flags)) {
10659 job_log(j, LOG_DEBUG, "Added new event for key: %s", key);
10660 result = 0;
10661 } else {
10662 job_log(j, LOG_ERR, "Could not create event for key: %s", key);
10663 }
10664 } else {
10665 job_log(j, LOG_ERR, "Event stream could not be created: %s", stream);
10666 }
10667 } else {
10668 /* If the event was NULL, then we just remove it and return. */
10669 result = 0;
10670 }
10671
10672 if (result == 0) {
10673 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10674 *reply = reply2;
10675 }
10676
10677 return result;
10678 }
10679
10680 int
10681 xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10682 {
10683 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10684 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10685
10686 bool all_streams = (stream == NULL);
10687 bool all_events = (key == NULL || strcmp(key, "") == 0); // strcmp for libxpc compatibility
10688 xpc_object_t events = NULL;
10689
10690 if (all_streams && !all_events) {
10691 return EXINVAL;
10692 }
10693
10694 if (all_streams || all_events) {
10695 job_log(j, LOG_DEBUG, "Fetching all events%s%s", stream ? " for stream: " : "", stream ? stream : "");
10696 events = xpc_dictionary_create(NULL, NULL, 0);
10697 } else {
10698 job_log(j, LOG_DEBUG, "Fetching stream/key: %s/%s", stream, key);
10699 }
10700
10701 int result = ESRCH;
10702 struct externalevent *eei = NULL;
10703 LIST_FOREACH(eei, &j->events, job_le) {
10704 if (all_streams) {
10705 xpc_object_t sub = xpc_dictionary_get_value(events, eei->sys->name);
10706 if (sub == NULL) {
10707 sub = xpc_dictionary_create(NULL, NULL, 0);
10708 xpc_dictionary_set_value(events, eei->sys->name, sub);
10709 xpc_release(sub);
10710 }
10711 xpc_dictionary_set_value(sub, eei->name, eei->event);
10712 } else if (strcmp(eei->sys->name, stream) == 0) {
10713 if (all_events) {
10714 xpc_dictionary_set_value(events, eei->name, eei->event);
10715 } else if (strcmp(eei->name, key) == 0) {
10716 job_log(j, LOG_DEBUG, "Found event.");
10717 events = xpc_retain(eei->event);
10718 break;
10719 }
10720 }
10721 }
10722
10723 if (events) {
10724 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10725 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENT, events);
10726 xpc_release(events);
10727
10728 *reply = reply2;
10729 result = 0;
10730 }
10731
10732 return result;
10733 }
10734
10735 int
10736 xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10737 {
10738 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10739 if (!stream) {
10740 return EXINVAL;
10741 }
10742
10743 job_log(j, LOG_DEBUG, "Checking in stream: %s", stream);
10744
10745 struct machservice *ms = NULL;
10746 int error = xpc_event_find_channel(j, stream, &ms);
10747 if (error) {
10748 job_log(j, LOG_ERR, "Failed to check in: 0x%x: %s", error, xpc_strerror(error));
10749 } else if (ms->isActive) {
10750 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10751 error = EBUSY;
10752 } else {
10753 machservice_request_notifications(ms);
10754
10755 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10756 xpc_dictionary_set_mach_recv(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10757 *reply = reply2;
10758 error = 0;
10759 }
10760
10761 return error;
10762 }
10763
10764 int
10765 xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply)
10766 {
10767 if (!j->event_monitor) {
10768 return EPERM;
10769 }
10770
10771 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10772 if (!stream) {
10773 return EXINVAL;
10774 }
10775
10776 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10777 if (!token) {
10778 return EXINVAL;
10779 }
10780
10781 job_log(j, LOG_DEBUG, "Looking up channel for stream/token: %s/%llu", stream, token);
10782
10783 struct externalevent *ee = externalevent_find(stream, token);
10784 if (!ee) {
10785 return ESRCH;
10786 }
10787
10788 struct machservice *ms = NULL;
10789 int error = xpc_event_find_channel(ee->job, stream, &ms);
10790 if (!error) {
10791 job_log(j, LOG_DEBUG, "Found event channel port: 0x%x", ms->port);
10792 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10793 xpc_dictionary_set_mach_send(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10794 *reply = reply2;
10795 error = 0;
10796 } else {
10797 job_log(j, LOG_ERR, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream, token, error, xpc_strerror(error));
10798 }
10799
10800 return error;
10801 }
10802
10803 int
10804 xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10805 {
10806 if (!j->event_monitor) {
10807 return EPERM;
10808 }
10809
10810 /* This indicates that the event monitor is now safe to signal. This state
10811 * is independent of whether this operation actually succeeds; we just need
10812 * it to ignore SIGUSR1.
10813 */
10814 j->event_monitor_ready2signal = true;
10815
10816 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10817 if (!stream) {
10818 return EXINVAL;
10819 }
10820
10821 job_log(j, LOG_DEBUG, "Provider checking in for stream: %s", stream);
10822
10823 xpc_object_t events = xpc_array_create(NULL, 0);
10824 struct eventsystem *es = eventsystem_find(stream);
10825 if (!es) {
10826 /* If we had to create the event stream, there were no events, so just
10827 * give back the empty array.
10828 */
10829 job_log(j, LOG_DEBUG, "Creating event stream.");
10830 es = eventsystem_new(stream);
10831 if (!job_assumes(j, es)) {
10832 xpc_release(events);
10833 return EXNOMEM;
10834 }
10835
10836 if (strcmp(stream, "com.apple.launchd.helper") == 0) {
10837 _launchd_support_system = es;
10838 }
10839 } else {
10840 job_log(j, LOG_DEBUG, "Filling event array.");
10841
10842 struct externalevent *ei = NULL;
10843 LIST_FOREACH(ei, &es->events, sys_le) {
10844 xpc_array_set_uint64(events, XPC_ARRAY_APPEND, ei->id);
10845 xpc_array_append_value(events, ei->event);
10846 }
10847 }
10848
10849 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10850 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENTS, events);
10851 xpc_release(events);
10852 *reply = reply2;
10853
10854 return 0;
10855 }
10856
10857 int
10858 xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply)
10859 {
10860 job_t other_j = NULL;
10861
10862 if (!j->event_monitor) {
10863 return EPERM;
10864 }
10865
10866 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10867 if (!stream) {
10868 return EXINVAL;
10869 }
10870
10871 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10872 if (!token) {
10873 return EXINVAL;
10874 }
10875
10876 bool state = false;
10877 xpc_object_t xstate = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_STATE);
10878 if (!xstate || xpc_get_type(xstate) != XPC_TYPE_BOOL) {
10879 return EXINVAL;
10880 } else {
10881 state = xpc_bool_get_value(xstate);
10882 }
10883
10884 job_log(j, LOG_DEBUG, "Setting event state to %s for stream/token: %s/%llu", state ? "true" : "false", stream, token);
10885
10886 struct externalevent *ei = externalevent_find(stream, token);
10887 if (!ei) {
10888 job_log(j, LOG_ERR, "Could not find stream/token: %s/%llu", stream, token);
10889 return ESRCH;
10890 }
10891
10892 other_j = ei->job;
10893 ei->state = state;
10894
10895 if (ei->internal) {
10896 job_log(ei->job, LOG_NOTICE, "Job should be able to exec(3) now.");
10897 ei->job->waiting4ok = false;
10898 externalevent_delete(ei);
10899 }
10900
10901 (void)job_dispatch(other_j, false);
10902
10903 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10904 *reply = reply2;
10905
10906 return 0;
10907 }
10908
10909 bool
10910 xpc_event_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
10911 {
10912 uint64_t op = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_OP);
10913 if (!op) {
10914 return false;
10915 }
10916
10917 audit_token_t token;
10918 xpc_dictionary_get_audit_token(request, &token);
10919 runtime_record_caller_creds(&token);
10920
10921 struct ldcred *ldc = runtime_get_caller_creds();
10922 job_t j = managed_job(ldc->pid);
10923 if (!j) {
10924 j = job_mig_intran(p);
10925 if (!j) {
10926 op = -1;
10927 }
10928 }
10929
10930 job_log(j, LOG_DEBUG, "Incoming XPC event request: %llu", op);
10931
10932 int error = -1;
10933 switch (op) {
10934 case XPC_EVENT_GET_NAME:
10935 error = xpc_event_get_event_name(j, request, reply);
10936 break;
10937 case XPC_EVENT_SET:
10938 error = xpc_event_set_event(j, request, reply);
10939 break;
10940 case XPC_EVENT_COPY:
10941 error = xpc_event_copy_event(j, request, reply);
10942 break;
10943 case XPC_EVENT_CHECK_IN:
10944 error = xpc_event_channel_check_in(j, request, reply);
10945 break;
10946 case XPC_EVENT_LOOK_UP:
10947 error = xpc_event_channel_look_up(j, request, reply);
10948 break;
10949 case XPC_EVENT_PROVIDER_CHECK_IN:
10950 error = xpc_event_provider_check_in(j, request, reply);
10951 break;
10952 case XPC_EVENT_PROVIDER_SET_STATE:
10953 error = xpc_event_provider_set_state(j, request, reply);
10954 break;
10955 case XPC_EVENT_COPY_ENTITLEMENTS:
10956 error = xpc_event_copy_entitlements(j, request, reply);
10957 break;
10958 case -1:
10959 error = EINVAL;
10960 break;
10961 default:
10962 job_log(j, LOG_ERR, "Bogus opcode.");
10963 error = EDOM;
10964 }
10965
10966 if (error) {
10967 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10968 xpc_dictionary_set_uint64(reply2, XPC_EVENT_ROUTINE_KEY_ERROR, error);
10969 *reply = reply2;
10970 }
10971
10972 return true;
10973 }
10974
10975 uint64_t
10976 xpc_get_jetsam_entitlement(const char *key)
10977 {
10978 uint64_t entitlement = 0;
10979
10980 audit_token_t *token = runtime_get_caller_token();
10981 xpc_object_t value = xpc_copy_entitlement_for_token(key, token);
10982 if (value) {
10983 if (xpc_get_type(value) == XPC_TYPE_UINT64) {
10984 entitlement = xpc_uint64_get_value(value);
10985 }
10986
10987 xpc_release(value);
10988 }
10989
10990 return entitlement;
10991 }
10992
10993 int
10994 xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply)
10995 {
10996 if (!j) {
10997 return EINVAL;
10998 }
10999
11000 const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11001 if (!label) {
11002 return EXINVAL;
11003 }
11004
11005 xpc_jetsam_band_t entitled_band = -1;
11006 xpc_jetsam_band_t requested_band = (xpc_jetsam_band_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_PRIORITY_BAND);
11007 if (!requested_band) {
11008 return EXINVAL;
11009 }
11010
11011 if (!(requested_band >= XPC_JETSAM_BAND_SUSPENDED && requested_band < XPC_JETSAM_BAND_LAST)) {
11012 return EXINVAL;
11013 }
11014
11015 uint64_t rcdata = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_RCDATA);
11016
11017 job_t tj = job_find(root_jobmgr, label);
11018 if (!tj) {
11019 return EXSRCH;
11020 }
11021
11022 boolean_t allow = false;
11023 if (j->embedded_god) {
11024 allow = true;
11025 } else {
11026 entitled_band = xpc_get_jetsam_entitlement("com.apple.private.jetsam.modify-priority");
11027 if (entitled_band >= requested_band) {
11028 allow = true;
11029 }
11030 }
11031
11032 if (!allow) {
11033 if (launchd_no_jetsam_perm_check) {
11034 job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set priority: %d", requested_band);
11035 } else {
11036 job_log(j, LOG_ERR, "Job cannot decrease Jetsam priority band (requested/maximum): %d/%d", requested_band, entitled_band);
11037 return EPERM;
11038 }
11039 }
11040
11041 job_log(j, LOG_INFO, "Setting Jetsam band: %d.", requested_band);
11042 job_update_jetsam_properties(tj, requested_band, rcdata);
11043
11044 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11045 *reply = reply2;
11046
11047 return 0;
11048 }
11049
11050 int
11051 xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply)
11052 {
11053 if (!j) {
11054 return EINVAL;
11055 }
11056
11057 const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11058 if (!label) {
11059 return EXINVAL;
11060 }
11061
11062 int32_t entitlement_limit = 0;
11063 int32_t requested_limit = (int32_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_MEMORY_LIMIT);
11064
11065 job_t tj = job_find(root_jobmgr, label);
11066 if (!tj) {
11067 return EXSRCH;
11068 }
11069
11070 boolean_t allow = false;
11071 if (j->embedded_god) {
11072 allow = true;
11073 } else {
11074 entitlement_limit = (int32_t)xpc_get_jetsam_entitlement("com.apple.private.jetsam.memory_limit");
11075 if (entitlement_limit >= requested_limit) {
11076 allow = true;
11077 }
11078 }
11079
11080 if (!allow) {
11081 if (launchd_no_jetsam_perm_check) {
11082 job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set memory limit: %d", requested_limit);
11083 } else {
11084 job_log(j, LOG_ERR, "Job cannot set Jetsam memory limit (requested/maximum): %d/%d", requested_limit, entitlement_limit);
11085 return EPERM;
11086 }
11087 }
11088
11089 job_log(j, LOG_INFO, "Setting Jetsam memory limit: %d.", requested_limit);
11090 job_update_jetsam_memory_limit(tj, requested_limit);
11091
11092 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11093 *reply = reply2;
11094
11095 return 0;
11096 }
11097
11098 static jobmgr_t
11099 _xpc_process_find_target_manager(job_t j, xpc_service_type_t type, pid_t pid)
11100 {
11101 jobmgr_t target = NULL;
11102 if (type == XPC_SERVICE_TYPE_BUNDLED) {
11103 job_log(j, LOG_DEBUG, "Bundled service. Searching for XPC domains for PID: %d", pid);
11104
11105 jobmgr_t jmi = NULL;
11106 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11107 if (jmi->req_pid && jmi->req_pid == pid) {
11108 jobmgr_log(jmi, LOG_DEBUG, "Found job manager for PID.");
11109 target = jmi;
11110 break;
11111 }
11112 }
11113 } else if (type == XPC_SERVICE_TYPE_LAUNCHD || type == XPC_SERVICE_TYPE_APP) {
11114 target = j->mgr;
11115 }
11116
11117 return target;
11118 }
11119
11120 static int
11121 xpc_process_attach(job_t j, xpc_object_t request, xpc_object_t *reply)
11122 {
11123 if (!j) {
11124 return EINVAL;
11125 }
11126
11127 audit_token_t *token = runtime_get_caller_token();
11128 xpc_object_t entitlement = xpc_copy_entitlement_for_token(XPC_SERVICE_ENTITLEMENT_ATTACH, token);
11129 if (!entitlement) {
11130 job_log(j, LOG_ERR, "Job does not have entitlement: %s", XPC_SERVICE_ENTITLEMENT_ATTACH);
11131 return EPERM;
11132 }
11133
11134 if (entitlement != XPC_BOOL_TRUE) {
11135 char *desc = xpc_copy_description(entitlement);
11136 job_log(j, LOG_ERR, "Job has bad value for entitlement: %s:\n%s", XPC_SERVICE_ENTITLEMENT_ATTACH, desc);
11137 free(desc);
11138
11139 xpc_release(entitlement);
11140 return EPERM;
11141 }
11142
11143 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11144 if (!name) {
11145 return EXINVAL;
11146 }
11147
11148 xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11149 if (!type) {
11150 return EXINVAL;
11151 }
11152
11153 mach_port_t port = xpc_dictionary_copy_mach_send(request, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT);
11154 if (!MACH_PORT_VALID(port)) {
11155 return EXINVAL;
11156 }
11157
11158 pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_HANDLE);
11159
11160 job_log(j, LOG_DEBUG, "Attaching to service: %s", name);
11161
11162 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11163 jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11164 if (target) {
11165 jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11166 (void)jobmgr_assumes(target, waiting4attach_new(target, name, port, 0, type));
11167
11168 /* HACK: This is awful. For legacy reasons, launchd job labels are all
11169 * stored in a global namespace, which is stored in the root job
11170 * manager. But XPC domains have a per-domain namespace. So if we're
11171 * looking for a legacy launchd job, we have to redirect any attachment
11172 * attempts to the root job manager to find existing instances.
11173 *
11174 * But because we store attachments on a per-job manager basis, we have
11175 * to create the new attachment in the actual target job manager, hence
11176 * why we change the target only after we've created the attachment.
11177 */
11178 if (strcmp(target->name, VPROCMGR_SESSION_AQUA) == 0) {
11179 target = root_jobmgr;
11180 }
11181
11182 job_t existing = job_find(target, name);
11183 if (existing && existing->p) {
11184 job_log(existing, LOG_DEBUG, "Found existing instance of service.");
11185 xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_PID, existing->p);
11186 } else {
11187 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11188 }
11189 } else if (type == XPC_SERVICE_TYPE_BUNDLED) {
11190 (void)job_assumes(j, waiting4attach_new(target, name, port, pid, type));
11191 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11192 } else {
11193 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, EXSRCH);
11194 }
11195
11196 *reply = reply2;
11197 return 0;
11198 }
11199
11200 static int
11201 xpc_process_detach(job_t j, xpc_object_t request, xpc_object_t *reply __unused)
11202 {
11203 if (!j) {
11204 return EINVAL;
11205 }
11206
11207 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11208 if (!name) {
11209 return EXINVAL;
11210 }
11211
11212 xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11213 if (!type) {
11214 return EXINVAL;
11215 }
11216
11217 job_log(j, LOG_DEBUG, "Deatching from service: %s", name);
11218
11219 pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_PID);
11220 jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11221 if (target) {
11222 jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11223
11224 struct waiting4attach *w4ai = NULL;
11225 struct waiting4attach *w4ait = NULL;
11226 LIST_FOREACH_SAFE(w4ai, &target->attaches, le, w4ait) {
11227 if (strcmp(name, w4ai->name) == 0) {
11228 jobmgr_log(target, LOG_DEBUG, "Found attachment. Deleting.");
11229 waiting4attach_delete(target, w4ai);
11230 break;
11231 }
11232 }
11233 }
11234
11235 return 0;
11236 }
11237
11238 static int
11239 xpc_process_get_properties(job_t j, xpc_object_t request, xpc_object_t *reply)
11240 {
11241 if (j->anonymous) {
11242 /* Total hack. libxpc will send requests to the pipe created out of the
11243 * process' bootstrap port, so when job_mig_intran() tries to resolve
11244 * the process into a job, it'll wind up creating an anonymous job if
11245 * the requestor was an XPC service, whose job manager is an XPC domain.
11246 */
11247 pid_t pid = j->p;
11248 jobmgr_t jmi = NULL;
11249 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11250 if ((j = jobmgr_find_by_pid(jmi, pid, false))) {
11251 break;
11252 }
11253 }
11254 }
11255
11256 if (!j || j->anonymous) {
11257 return EXINVAL;
11258 }
11259
11260 struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
11261 if (!w4a) {
11262 return EXINVAL;
11263 }
11264
11265 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11266 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_TYPE, w4a->type);
11267 xpc_dictionary_set_mach_send(reply2, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT, w4a->port);
11268 if (j->prog) {
11269 xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->prog);
11270 } else {
11271 xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->argv[0]);
11272 }
11273
11274 if (j->argv) {
11275 xpc_object_t xargv = xpc_array_create(NULL, 0);
11276
11277 size_t i = 0;
11278 for (i = 0; i < j->argc; i++) {
11279 if (j->argv[i]) {
11280 xpc_array_set_string(xargv, XPC_ARRAY_APPEND, j->argv[i]);
11281 }
11282 }
11283
11284 xpc_dictionary_set_value(reply2, XPC_PROCESS_ROUTINE_KEY_ARGV, xargv);
11285 xpc_release(xargv);
11286 }
11287
11288 *reply = reply2;
11289 return 0;
11290 }
11291
11292 static int
11293 xpc_process_service_kill(job_t j, xpc_object_t request, xpc_object_t *reply)
11294 {
11295 #if XPC_LPI_VERSION >= 20130426
11296 if (!j) {
11297 return ESRCH;
11298 }
11299
11300 jobmgr_t jm = _xpc_process_find_target_manager(j, XPC_SERVICE_TYPE_BUNDLED, j->p);
11301 if (!jm) {
11302 return ENOENT;
11303 }
11304
11305 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11306 if (!name) {
11307 return EINVAL;
11308 }
11309
11310 int64_t whichsig = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_SIGNAL);
11311 if (!whichsig) {
11312 return EINVAL;
11313 }
11314
11315 job_t j2kill = job_find(jm, name);
11316 if (!j2kill) {
11317 return ESRCH;
11318 }
11319
11320 if (j2kill->alias) {
11321 // Only allow for private instances to be killed.
11322 return EPERM;
11323 }
11324
11325 struct proc_bsdshortinfo proc;
11326 if (proc_pidinfo(j2kill->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
11327 if (errno != ESRCH) {
11328 (void)jobmgr_assumes_zero(root_jobmgr, errno);
11329 }
11330
11331 return errno;
11332 }
11333
11334 struct ldcred *ldc = runtime_get_caller_creds();
11335 if (proc.pbsi_uid != ldc->euid) {
11336 // Do not allow non-root to kill RoleAccount services running as a
11337 // different user.
11338 return EPERM;
11339 }
11340
11341 if (!j2kill->p) {
11342 return EALREADY;
11343 }
11344
11345 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11346 if (!reply2) {
11347 return EINVAL;
11348 }
11349
11350 int error = 0;
11351 int ret = kill(j2kill->p, whichsig);
11352 if (ret) {
11353 error = errno;
11354 }
11355
11356 xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11357 *reply = reply2;
11358 return 0;
11359 #else
11360 return ENOTSUP;
11361 #endif
11362 }
11363
11364 bool
11365 xpc_process_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
11366 {
11367 uint64_t op = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_OP);
11368 if (!op) {
11369 return false;
11370 }
11371
11372 audit_token_t token;
11373 xpc_dictionary_get_audit_token(request, &token);
11374 runtime_record_caller_creds(&token);
11375
11376 job_t j = job_mig_intran(p);
11377 job_log(j, LOG_DEBUG, "Incoming XPC process request: %llu", op);
11378
11379 int error = -1;
11380 switch (op) {
11381 case XPC_PROCESS_JETSAM_SET_BAND:
11382 error = xpc_process_set_jetsam_band(j, request, reply);
11383 break;
11384 case XPC_PROCESS_JETSAM_SET_MEMORY_LIMIT:
11385 error = xpc_process_set_jetsam_memory_limit(j, request, reply);
11386 break;
11387 case XPC_PROCESS_SERVICE_ATTACH:
11388 error = xpc_process_attach(j, request, reply);
11389 break;
11390 case XPC_PROCESS_SERVICE_DETACH:
11391 error = xpc_process_detach(j, request, reply);
11392 break;
11393 case XPC_PROCESS_SERVICE_GET_PROPERTIES:
11394 error = xpc_process_get_properties(j, request, reply);
11395 break;
11396 case XPC_PROCESS_SERVICE_KILL:
11397 error = xpc_process_service_kill(j, request, reply);
11398 break;
11399 default:
11400 job_log(j, LOG_ERR, "Bogus process opcode.");
11401 error = EDOM;
11402 }
11403
11404 if (error) {
11405 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11406 if (reply2) {
11407 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11408 }
11409
11410 *reply = reply2;
11411 }
11412
11413 return true;
11414 }
11415
11416 kern_return_t
11417 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
11418 {
11419 struct ldcred *ldc = runtime_get_caller_creds();
11420 job_t otherj;
11421
11422 if (!j) {
11423 return BOOTSTRAP_NO_MEMORY;
11424 }
11425
11426 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
11427 return BOOTSTRAP_UNKNOWN_SERVICE;
11428 }
11429
11430 #if TARGET_OS_EMBEDDED
11431 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
11432 #else
11433 bool allow_non_root_kickstart = false;
11434 #endif
11435
11436 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
11437 return BOOTSTRAP_NOT_PRIVILEGED;
11438 }
11439
11440 #if HAVE_SANDBOX
11441 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11442 return BOOTSTRAP_NOT_PRIVILEGED;
11443 }
11444 #endif
11445
11446 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
11447 return BOOTSTRAP_SERVICE_ACTIVE;
11448 }
11449
11450 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
11451 otherj = job_dispatch(otherj, true);
11452
11453 if (!job_assumes(j, otherj && otherj->p)) {
11454 // <rdar://problem/6787083> Clear this flag if we failed to start the job.
11455 otherj->stall_before_exec = false;
11456 return BOOTSTRAP_NO_MEMORY;
11457 }
11458
11459 *out_pid = otherj->p;
11460
11461 return 0;
11462 }
11463
11464 kern_return_t
11465 job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
11466 {
11467 launch_data_t jobdata = NULL;
11468 size_t data_offset = 0;
11469 struct ldcred *ldc = runtime_get_caller_creds();
11470 job_t jr;
11471
11472 if (!j) {
11473 return BOOTSTRAP_NO_MEMORY;
11474 }
11475
11476 if (unlikely(j->deny_job_creation)) {
11477 return BOOTSTRAP_NOT_PRIVILEGED;
11478 }
11479
11480 #if HAVE_SANDBOX
11481 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11482 return BOOTSTRAP_NOT_PRIVILEGED;
11483 }
11484 #endif
11485
11486 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
11487 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
11488 return VPROC_ERR_TRY_PER_USER;
11489 }
11490
11491 if (!job_assumes(j, indataCnt != 0)) {
11492 return 1;
11493 }
11494
11495 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
11496 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
11497 return 1;
11498 }
11499
11500 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
11501 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
11502 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
11503 return 1;
11504 }
11505
11506 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
11507
11508 launch_data_t label = NULL;
11509 launch_data_t wait4debugger = NULL;
11510 if (!jr) {
11511 switch (errno) {
11512 case EEXIST:
11513 /* If EEXIST was returned, we know that there is a label string in
11514 * the dictionary. So we don't need to check the types here; that
11515 * has already been done.
11516 */
11517 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
11518 jr = job_find(NULL, launch_data_get_string(label));
11519 if (job_assumes(j, jr != NULL) && !jr->p) {
11520 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
11521 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
11522 if (launch_data_get_bool(wait4debugger)) {
11523 /* If the job exists, we're going to kick-start it, but
11524 * we need to give the caller the opportunity to start
11525 * it suspended if it so desires. But this will only
11526 * take effect if the job isn't running.
11527 */
11528 jr->wait4debugger_oneshot = true;
11529 }
11530 }
11531 }
11532
11533 *outj = jr;
11534 return BOOTSTRAP_NAME_IN_USE;
11535 default:
11536 return BOOTSTRAP_NO_MEMORY;
11537 }
11538 }
11539
11540 if (pid1_magic) {
11541 jr->mach_uid = ldc->uid;
11542 }
11543
11544 // TODO: Consolidate the app and legacy_LS_job bits.
11545 jr->legacy_LS_job = true;
11546 jr->abandon_pg = true;
11547 jr->asport = asport;
11548 jr->app = true;
11549 uuid_clear(jr->expected_audit_uuid);
11550 jr = job_dispatch(jr, true);
11551
11552 if (!job_assumes(j, jr != NULL)) {
11553 job_remove(jr);
11554 return BOOTSTRAP_NO_MEMORY;
11555 }
11556
11557 if (!job_assumes(jr, jr->p)) {
11558 job_remove(jr);
11559 return BOOTSTRAP_NO_MEMORY;
11560 }
11561
11562 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
11563 *outj = jr;
11564
11565 return BOOTSTRAP_SUCCESS;
11566 }
11567
11568 kern_return_t
11569 job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
11570 {
11571 job_t nj = NULL;
11572 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
11573 if (likely(kr == KERN_SUCCESS)) {
11574 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
11575 job_remove(nj);
11576 kr = BOOTSTRAP_NO_MEMORY;
11577 } else {
11578 /* Do not return until the job has called exec(3), thereby making it
11579 * safe for the caller to send it SIGCONT.
11580 *
11581 * <rdar://problem/9042798>
11582 */
11583 nj->spawn_reply_port = rp;
11584 kr = MIG_NO_REPLY;
11585 }
11586 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
11587 bool was_running = nj->p;
11588 if (job_dispatch(nj, true)) {
11589 if (!was_running) {
11590 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
11591
11592 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
11593 nj->spawn_reply_port = rp;
11594 kr = MIG_NO_REPLY;
11595 } else {
11596 kr = BOOTSTRAP_NO_MEMORY;
11597 }
11598 } else {
11599 *obsvr_port = MACH_PORT_NULL;
11600 *child_pid = nj->p;
11601 kr = KERN_SUCCESS;
11602 }
11603 } else {
11604 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
11605 kr = BOOTSTRAP_UNKNOWN_SERVICE;
11606 }
11607 }
11608
11609 mig_deallocate(indata, indataCnt);
11610 return kr;
11611 }
11612
11613 launch_data_t
11614 job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport __attribute__((unused)))
11615 {
11616 launch_data_t reply = NULL;
11617
11618 errno = ENOTSUP;
11619 if (launch_data_get_type(request) == LAUNCH_DATA_STRING) {
11620 if (strcmp(launch_data_get_string(request), LAUNCH_KEY_CHECKIN) == 0) {
11621 reply = job_export(j);
11622 job_checkin(j);
11623 }
11624 }
11625
11626 return reply;
11627 }
11628
11629 #define LAUNCHD_MAX_LEGACY_FDS 128
11630 #define countof(x) (sizeof((x)) / sizeof((x[0])))
11631
11632 kern_return_t
11633 job_mig_legacy_ipc_request(job_t j, vm_offset_t request,
11634 mach_msg_type_number_t requestCnt, mach_port_array_t request_fds,
11635 mach_msg_type_number_t request_fdsCnt, vm_offset_t *reply,
11636 mach_msg_type_number_t *replyCnt, mach_port_array_t *reply_fdps,
11637 mach_msg_type_number_t *reply_fdsCnt, mach_port_t asport)
11638 {
11639 if (!j) {
11640 return BOOTSTRAP_NO_MEMORY;
11641 }
11642
11643 /* TODO: Once we support actions other than checking in, we must check the
11644 * sandbox capabilities and EUID of the requestort.
11645 */
11646 size_t nout_fdps = 0;
11647 size_t nfds = request_fdsCnt / sizeof(request_fds[0]);
11648 if (nfds > LAUNCHD_MAX_LEGACY_FDS) {
11649 job_log(j, LOG_ERR, "Too many incoming descriptors: %lu", nfds);
11650 return BOOTSTRAP_NO_MEMORY;
11651 }
11652
11653 int in_fds[LAUNCHD_MAX_LEGACY_FDS];
11654 size_t i = 0;
11655 for (i = 0; i < nfds; i++) {
11656 in_fds[i] = fileport_makefd(request_fds[i]);
11657 if (in_fds[i] == -1) {
11658 job_log(j, LOG_ERR, "Bad descriptor passed in legacy IPC request at index: %lu", i);
11659 }
11660 }
11661
11662 // DON'T goto outbad before this point.
11663 *reply = 0;
11664 *reply_fdps = NULL;
11665 launch_data_t ldreply = NULL;
11666
11667 size_t dataoff = 0;
11668 size_t fdoff = 0;
11669 launch_data_t ldrequest = launch_data_unpack((void *)request, requestCnt, in_fds, nfds, &dataoff, &fdoff);
11670 if (!ldrequest) {
11671 job_log(j, LOG_ERR, "Invalid legacy IPC request passed.");
11672 goto out_bad;
11673 }
11674
11675 ldreply = job_do_legacy_ipc_request(j, ldrequest, asport);
11676 if (!ldreply) {
11677 ldreply = launch_data_new_errno(errno);
11678 if (!ldreply) {
11679 goto out_bad;
11680 }
11681 }
11682
11683 *replyCnt = 10 * 1024 * 1024;
11684 mig_allocate(reply, *replyCnt);
11685 if (!*reply) {
11686 goto out_bad;
11687 }
11688
11689 int out_fds[LAUNCHD_MAX_LEGACY_FDS];
11690 size_t nout_fds = 0;
11691 size_t sz = launch_data_pack(ldreply, (void *)*reply, *replyCnt, out_fds, &nout_fds);
11692 if (!sz) {
11693 job_log(j, LOG_ERR, "Could not pack legacy IPC reply.");
11694 goto out_bad;
11695 }
11696
11697 if (nout_fds) {
11698 if (nout_fds > 128) {
11699 job_log(j, LOG_ERR, "Too many outgoing descriptors: %lu", nout_fds);
11700 goto out_bad;
11701 }
11702
11703 *reply_fdsCnt = nout_fds * sizeof((*reply_fdps)[0]);
11704 mig_allocate((vm_address_t *)reply_fdps, *reply_fdsCnt);
11705 if (!*reply_fdps) {
11706 goto out_bad;
11707 }
11708
11709 for (i = 0; i < nout_fds; i++) {
11710 mach_port_t fp = MACH_PORT_NULL;
11711 /* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
11712 * deal. Note, these get stuffed into an array whose disposition is
11713 * mach_port_move_send_t, so we don't have to worry about them after
11714 * returning.
11715 */
11716 if (fileport_makeport(out_fds[i], &fp) != 0) {
11717 job_log(j, LOG_ERR, "Could not pack response descriptor at index: %lu: %d: %s", i, errno, strerror(errno));
11718 }
11719 (*reply_fdps)[i] = fp;
11720 }
11721
11722 nout_fdps = nout_fds;
11723 } else {
11724 *reply_fdsCnt = 0;
11725 }
11726
11727 mig_deallocate(request, requestCnt);
11728 launch_data_free(ldreply);
11729 ldreply = NULL;
11730
11731 // Unused for now.
11732 (void)launchd_mport_deallocate(asport);
11733
11734 return BOOTSTRAP_SUCCESS;
11735
11736 out_bad:
11737 for (i = 0; i < nfds; i++) {
11738 (void)close(in_fds[i]);
11739 }
11740
11741 for (i = 0; i < nout_fds; i++) {
11742 (void)launchd_mport_deallocate((*reply_fdps)[i]);
11743 }
11744
11745 if (*reply) {
11746 mig_deallocate(*reply, *replyCnt);
11747 }
11748
11749 /* We should never hit this since the last goto out is in the case that
11750 * allocating this fails.
11751 */
11752 if (*reply_fdps) {
11753 mig_deallocate((vm_address_t)*reply_fdps, *reply_fdsCnt);
11754 }
11755
11756 if (ldreply) {
11757 launch_data_free(ldreply);
11758 }
11759
11760 return BOOTSTRAP_NO_MEMORY;
11761 }
11762
11763 void
11764 jobmgr_init(bool sflag)
11765 {
11766 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
11767 SLIST_INIT(&s_curious_jobs);
11768 LIST_INIT(&s_needing_sessions);
11769
11770 os_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
11771 os_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
11772 _s_xpc_system_domain->req_asid = launchd_audit_session;
11773 _s_xpc_system_domain->req_asport = launchd_audit_port;
11774 _s_xpc_system_domain->shortdesc = "system";
11775 if (pid1_magic) {
11776 root_jobmgr->monitor_shutdown = true;
11777 }
11778
11779 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
11780 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
11781 if (likely(s_no_hang_fd == -1)) {
11782 if (jobmgr_assumes_zero_p(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK))) != -1) {
11783 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr));
11784 }
11785 }
11786 s_no_hang_fd = _fd(s_no_hang_fd);
11787 }
11788
11789 size_t
11790 our_strhash(const char *s)
11791 {
11792 size_t c, r = 5381;
11793
11794 /* djb2
11795 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
11796 */
11797
11798 while ((c = *s++)) {
11799 r = ((r << 5) + r) + c; // hash*33 + c
11800 }
11801
11802 return r;
11803 }
11804
11805 size_t
11806 hash_label(const char *label)
11807 {
11808 return our_strhash(label) % LABEL_HASH_SIZE;
11809 }
11810
11811 size_t
11812 hash_ms(const char *msstr)
11813 {
11814 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
11815 }
11816
11817 bool
11818 waiting4removal_new(job_t j, mach_port_t rp)
11819 {
11820 struct waiting_for_removal *w4r;
11821
11822 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
11823 return false;
11824 }
11825
11826 w4r->reply_port = rp;
11827
11828 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
11829
11830 return true;
11831 }
11832
11833 void
11834 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
11835 {
11836 (void)job_assumes_zero(j, job_mig_send_signal_reply(w4r->reply_port, 0));
11837
11838 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
11839
11840 free(w4r);
11841 }
11842
11843 size_t
11844 get_kern_max_proc(void)
11845 {
11846 int mib[] = { CTL_KERN, KERN_MAXPROC };
11847 int max = 100;
11848 size_t max_sz = sizeof(max);
11849
11850 (void)posix_assumes_zero(sysctl(mib, 2, &max, &max_sz, NULL, 0));
11851
11852 return max;
11853 }
11854
11855 // See rdar://problem/6271234
11856 void
11857 eliminate_double_reboot(void)
11858 {
11859 if (unlikely(!pid1_magic)) {
11860 return;
11861 }
11862
11863 struct stat sb;
11864 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
11865 int result = -1;
11866
11867 if (unlikely(stat(argv[1], &sb) != -1)) {
11868 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
11869
11870 pid_t p = 0;
11871 result = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ);
11872 if (result == -1) {
11873 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script: %d: %s", result, strerror(result));
11874 goto out;
11875 }
11876
11877 int wstatus = 0;
11878 result = waitpid(p, &wstatus, 0);
11879 if (result == -1) {
11880 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to reap deferred install script: %d: %s", errno, strerror(errno));
11881 goto out;
11882 }
11883
11884 if (WIFEXITED(wstatus)) {
11885 if ((result = WEXITSTATUS(wstatus)) == 0) {
11886 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
11887 } else {
11888 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus));
11889 }
11890 } else {
11891 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Weirdness with install script: %d", wstatus);
11892 }
11893 }
11894 out:
11895 if (result == 0) {
11896 /* If the unlink(2) was to fail, it would be most likely fail with
11897 * EBUSY. All the other failure cases for unlink(2) don't apply when
11898 * we're running under PID 1 and have verified that the file exists.
11899 * Outside of someone deliberately messing with us (like if
11900 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
11901 * point for a filesystem) and I/O errors, we should be good.
11902 */
11903 if (unlink(argv[1]) == -1) {
11904 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to remove deferred install script: %d: %s", errno, strerror(errno));
11905 }
11906 }
11907 }
11908
11909 void
11910 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
11911 {
11912 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
11913 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11914 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
11915
11916 #if XPC_LPI_VERSION >= 20120810
11917 if (j->jetsam_priority > XPC_JETSAM_PRIORITY_RESERVED && j->jetsam_priority < XPC_JETSAM_PRIORITY_RESERVED + XPC_JETSAM_BAND_LAST) {
11918 size_t band = j->jetsam_priority - XPC_JETSAM_PRIORITY_RESERVED;
11919 j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11920 }
11921 #endif
11922 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
11923 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11924 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
11925 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
11926 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMITBACKGROUND) == 0) {
11927 j->jetsam_memory_limit_background = true;
11928 job_log(j, LOG_DEBUG, "Memory limit is for background state only");
11929 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
11930 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
11931 * You can't set this in a plist.
11932 */
11933 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMACTIVE) == 0) {
11934 // Ignore.
11935 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
11936 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
11937 * complain about it.
11938 */
11939 } else {
11940 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
11941 }
11942
11943 if (unlikely(!j->jetsam_properties)) {
11944 j->jetsam_properties = true;
11945 }
11946 }
11947
11948 void
11949 job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data)
11950 {
11951 #if TARGET_OS_EMBEDDED
11952 j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11953 j->jetsam_properties = true;
11954
11955 memorystatus_priority_properties_t mjp;
11956 mjp.priority = j->jetsam_priority;
11957 mjp.user_data = user_data;
11958
11959 size_t size = sizeof(mjp);
11960 int r = memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, j->p, 0, &mjp, size);
11961 if (r == -1 && errno != ESRCH) {
11962 (void)job_assumes_zero(j, errno);
11963 }
11964 #else
11965 #pragma unused(j, band, user_data)
11966 #endif
11967 }
11968
11969 void
11970 job_update_jetsam_memory_limit(job_t j, int32_t limit)
11971 {
11972 #if TARGET_OS_EMBEDDED
11973 j->jetsam_memlimit = limit;
11974 j->jetsam_properties = true;
11975
11976 int r = memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK, j->p, limit, NULL, 0);
11977 if (r == -1 && errno != ESRCH) {
11978 (void)job_assumes_zero(j, errno);
11979 }
11980 #else
11981 #pragma unused(j, limit)
11982 #endif
11983 }