]> git.saurik.com Git - apple/launchd.git/blob - src/core.c
e5720e4d4db120221cad456cb8e383af2b8fc285
[apple/launchd.git] / src / core.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 #include "config.h"
20 #include "core.h"
21 #include "internal.h"
22 #include "helper.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/stat.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
43 #include <sys/un.h>
44 #include <sys/reboot.h>
45 #include <sys/wait.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
52 #include <sys/pipe.h>
53 #include <sys/mman.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <sys/kern_memorystatus.h>
57 #include <net/if.h>
58 #include <netinet/in.h>
59 #include <netinet/in_var.h>
60 #include <netinet6/nd6.h>
61 #include <bsm/libbsm.h>
62 #include <unistd.h>
63 #include <signal.h>
64 #include <errno.h>
65 #include <libgen.h>
66 #include <stdio.h>
67 #include <stdlib.h>
68 #include <stdarg.h>
69 #include <stdbool.h>
70 #include <paths.h>
71 #include <pwd.h>
72 #include <grp.h>
73 #include <ttyent.h>
74 #include <dlfcn.h>
75 #include <dirent.h>
76 #include <string.h>
77 #include <ctype.h>
78 #include <glob.h>
79 #include <System/sys/spawn.h>
80 #include <System/sys/spawn_internal.h>
81 #include <spawn.h>
82 #include <spawn_private.h>
83 #include <time.h>
84 #include <libinfo.h>
85 #include <os/assumes.h>
86 #include <xpc/launchd.h>
87 #include <asl.h>
88 #include <_simple.h>
89
90 #include <libproc.h>
91 #include <libproc_internal.h>
92 #include <System/sys/proc_info.h>
93 #include <malloc/malloc.h>
94 #include <pthread.h>
95 #if HAVE_SANDBOX
96 #define __APPLE_API_PRIVATE
97 #include <sandbox.h>
98 #endif
99 #if HAVE_QUARANTINE
100 #include <quarantine.h>
101 #endif
102 #if HAVE_RESPONSIBILITY
103 #include <responsibility.h>
104 #endif
105 #if !TARGET_OS_EMBEDDED
106 extern int gL1CacheEnabled;
107 #endif
108 #if HAVE_SYSTEMSTATS
109 #include <systemstats/systemstats.h>
110 #endif
111
112 #include "launch.h"
113 #include "launch_priv.h"
114 #include "launch_internal.h"
115 #include "bootstrap.h"
116 #include "bootstrap_priv.h"
117 #include "vproc.h"
118 #include "vproc_internal.h"
119
120 #include "reboot2.h"
121
122 #include "launchd.h"
123 #include "runtime.h"
124 #include "ipc.h"
125 #include "job.h"
126 #include "jobServer.h"
127 #include "job_reply.h"
128 #include "job_forward.h"
129 #include "mach_excServer.h"
130
131 #define POSIX_SPAWN_IOS_INTERACTIVE 0
132
133 #if TARGET_OS_EMBEDDED
134 /* Default memory highwatermark for daemons as set out in <rdar://problem/10307788>. */
135 #define DEFAULT_JETSAM_DAEMON_HIGHWATERMARK 5
136 #endif
137
138 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
139 * If the job hasn't exited in the given number of seconds after sending
140 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
141 */
142 #define LAUNCHD_MIN_JOB_RUN_TIME 10
143 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
144 #define LAUNCHD_SIGKILL_TIMER 4
145 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
146
147 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
148
149 #define TAKE_SUBSET_NAME "TakeSubsetName"
150 #define TAKE_SUBSET_PID "TakeSubsetPID"
151 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
152
153 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
154
155 extern char **environ;
156
157 struct waiting_for_removal {
158 SLIST_ENTRY(waiting_for_removal) sle;
159 mach_port_t reply_port;
160 };
161
162 static bool waiting4removal_new(job_t j, mach_port_t rp);
163 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
164
165 struct machservice {
166 SLIST_ENTRY(machservice) sle;
167 SLIST_ENTRY(machservice) special_port_sle;
168 LIST_ENTRY(machservice) name_hash_sle;
169 LIST_ENTRY(machservice) port_hash_sle;
170 struct machservice *alias;
171 job_t job;
172 unsigned int gen_num;
173 mach_port_name_t port;
174 unsigned int
175 isActive:1,
176 reset:1,
177 recv:1,
178 hide:1,
179 kUNCServer:1,
180 per_user_hack:1,
181 debug_on_close:1,
182 per_pid:1,
183 delete_on_destruction:1,
184 drain_one_on_crash:1,
185 drain_all_on_crash:1,
186 upfront:1,
187 event_channel:1,
188 recv_race_hack :1,
189 /* Don't let the size of this field to get too small. It has to be large
190 * enough to represent the reasonable range of special port numbers.
191 */
192 special_port_num:17;
193 const char name[0];
194 };
195
196 // HACK: This should be per jobmgr_t
197 static SLIST_HEAD(, machservice) special_ports;
198
199 #define PORT_HASH_SIZE 32
200 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
201
202 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
203
204 static void machservice_setup(launch_data_t obj, const char *key, void *context);
205 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
206 static void machservice_resetport(job_t j, struct machservice *ms);
207 static void machservice_stamp_port(job_t j, struct machservice *ms);
208 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
209 static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
210 static void machservice_ignore(job_t j, struct machservice *ms);
211 static void machservice_watch(job_t j, struct machservice *ms);
212 static void machservice_delete(job_t j, struct machservice *, bool port_died);
213 static void machservice_request_notifications(struct machservice *);
214 static mach_port_t machservice_port(struct machservice *);
215 static job_t machservice_job(struct machservice *);
216 static bool machservice_hidden(struct machservice *);
217 static bool machservice_active(struct machservice *);
218 static const char *machservice_name(struct machservice *);
219 static bootstrap_status_t machservice_status(struct machservice *);
220 void machservice_drain_port(struct machservice *);
221
222 struct socketgroup {
223 SLIST_ENTRY(socketgroup) sle;
224 int *fds;
225 unsigned int fd_cnt;
226 union {
227 const char name[0];
228 char name_init[0];
229 };
230 };
231
232 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt);
233 static void socketgroup_delete(job_t j, struct socketgroup *sg);
234 static void socketgroup_watch(job_t j, struct socketgroup *sg);
235 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
236 static void socketgroup_callback(job_t j);
237 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
238 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
239
240 struct calendarinterval {
241 LIST_ENTRY(calendarinterval) global_sle;
242 SLIST_ENTRY(calendarinterval) sle;
243 job_t job;
244 struct tm when;
245 time_t when_next;
246 };
247
248 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
249
250 static bool calendarinterval_new(job_t j, struct tm *w);
251 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
252 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
253 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
254 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
255 static void calendarinterval_callback(void);
256 static void calendarinterval_sanity_check(void);
257
258 struct envitem {
259 SLIST_ENTRY(envitem) sle;
260 char *value;
261 union {
262 const char key[0];
263 char key_init[0];
264 };
265 };
266
267 static bool envitem_new(job_t j, const char *k, const char *v, bool global);
268 static void envitem_delete(job_t j, struct envitem *ei, bool global);
269 static void envitem_setup(launch_data_t obj, const char *key, void *context);
270
271 struct limititem {
272 SLIST_ENTRY(limititem) sle;
273 struct rlimit lim;
274 unsigned int setsoft:1, sethard:1, which:30;
275 };
276
277 static bool limititem_update(job_t j, int w, rlim_t r);
278 static void limititem_delete(job_t j, struct limititem *li);
279 static void limititem_setup(launch_data_t obj, const char *key, void *context);
280 #if HAVE_SANDBOX
281 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
282 #endif
283
284 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
285
286 typedef enum {
287 NETWORK_UP = 1,
288 NETWORK_DOWN,
289 SUCCESSFUL_EXIT,
290 FAILED_EXIT,
291 CRASHED,
292 DID_NOT_CRASH,
293 OTHER_JOB_ENABLED,
294 OTHER_JOB_DISABLED,
295 OTHER_JOB_ACTIVE,
296 OTHER_JOB_INACTIVE,
297 } semaphore_reason_t;
298
299 struct semaphoreitem {
300 SLIST_ENTRY(semaphoreitem) sle;
301 semaphore_reason_t why;
302
303 union {
304 const char what[0];
305 char what_init[0];
306 };
307 };
308
309 struct semaphoreitem_dict_iter_context {
310 job_t j;
311 semaphore_reason_t why_true;
312 semaphore_reason_t why_false;
313 };
314
315 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
316 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
317 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
318 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
319 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
320
321 struct externalevent {
322 LIST_ENTRY(externalevent) sys_le;
323 LIST_ENTRY(externalevent) job_le;
324 struct eventsystem *sys;
325
326 uint64_t id;
327 job_t job;
328 bool state;
329 bool wanted_state;
330 bool internal;
331 xpc_object_t event;
332 xpc_object_t entitlements;
333
334 char name[0];
335 };
336
337 struct externalevent_iter_ctx {
338 job_t j;
339 struct eventsystem *sys;
340 };
341
342 static bool externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags);
343 static void externalevent_delete(struct externalevent *ee);
344 static void externalevent_setup(launch_data_t obj, const char *key, void *context);
345 static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
346
347 struct eventsystem {
348 LIST_ENTRY(eventsystem) global_le;
349 LIST_HEAD(, externalevent) events;
350 uint64_t curid;
351 char name[0];
352 };
353
354 static struct eventsystem *eventsystem_new(const char *name);
355 static void eventsystem_delete(struct eventsystem *sys) __attribute__((unused));
356 static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
357 static struct eventsystem *eventsystem_find(const char *name);
358 static void eventsystem_ping(void);
359
360 struct waiting4attach {
361 LIST_ENTRY(waiting4attach) le;
362 mach_port_t port;
363 pid_t dest;
364 xpc_service_type_t type;
365 char name[0];
366 };
367
368 static LIST_HEAD(, waiting4attach) _launchd_domain_waiters;
369
370 static struct waiting4attach *waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type);
371 static void waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a);
372 static struct waiting4attach *waiting4attach_find(jobmgr_t jm, job_t j);
373
374 #define ACTIVE_JOB_HASH_SIZE 32
375 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
376
377 #define MACHSERVICE_HASH_SIZE 37
378
379 #define LABEL_HASH_SIZE 53
380 struct jobmgr_s {
381 kq_callback kqjobmgr_callback;
382 LIST_ENTRY(jobmgr_s) xpc_le;
383 SLIST_ENTRY(jobmgr_s) sle;
384 SLIST_HEAD(, jobmgr_s) submgrs;
385 LIST_HEAD(, job_s) jobs;
386 LIST_HEAD(, waiting4attach) attaches;
387
388 /* For legacy reasons, we keep all job labels that are imported in the root
389 * job manager's label hash. If a job manager is an XPC domain, then it gets
390 * its own label hash that is separate from the "global" one stored in the
391 * root job manager.
392 */
393 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
394 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
395 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
396 LIST_HEAD(, job_s) global_env_jobs;
397 mach_port_t jm_port;
398 mach_port_t req_port;
399 jobmgr_t parentmgr;
400 int reboot_flags;
401 time_t shutdown_time;
402 unsigned int global_on_demand_cnt;
403 unsigned int normal_active_cnt;
404 unsigned int
405 shutting_down:1,
406 session_initialized:1,
407 killed_stray_jobs:1,
408 monitor_shutdown:1,
409 shutdown_jobs_dirtied:1,
410 shutdown_jobs_cleaned:1,
411 xpc_singleton:1;
412 uint32_t properties;
413 // XPC-specific properties.
414 char owner[MAXCOMLEN];
415 char *shortdesc;
416 mach_port_t req_bsport;
417 mach_port_t req_excport;
418 mach_port_t req_asport;
419 mach_port_t req_gui_asport;
420 pid_t req_pid;
421 uid_t req_euid;
422 gid_t req_egid;
423 au_asid_t req_asid;
424 vm_offset_t req_ctx;
425 mach_msg_type_number_t req_ctx_sz;
426 mach_port_t req_rport;
427 uint64_t req_uniqueid;
428 kern_return_t error;
429 union {
430 const char name[0];
431 char name_init[0];
432 };
433 };
434
435 // Global XPC domains.
436 static jobmgr_t _s_xpc_system_domain;
437 static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
438 static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
439
440 #define jobmgr_assumes(jm, e) os_assumes_ctx(jobmgr_log_bug, jm, (e))
441 #define jobmgr_assumes_zero(jm, e) os_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
442 #define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
443
444 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
445 static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
446 static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
447 static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
448 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
449 static jobmgr_t jobmgr_parent(jobmgr_t jm);
450 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
451 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
452 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
453 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
454 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
455 static void jobmgr_remove(jobmgr_t jm);
456 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
457 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
458 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
459 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
460 static job_t managed_job(pid_t p);
461 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
462 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
463 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
464 static void job_export_all2(jobmgr_t jm, launch_data_t where);
465 static void jobmgr_callback(void *obj, struct kevent *kev);
466 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
467 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
468 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
469 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
470 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
471 static void jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children);
472 // static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
473 static bool jobmgr_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
474
475 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
476 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
477 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
478
479 struct suspended_peruser {
480 LIST_ENTRY(suspended_peruser) sle;
481 job_t j;
482 };
483
484 struct job_s {
485 // MUST be first element of this structure.
486 kq_callback kqjob_callback;
487 LIST_ENTRY(job_s) sle;
488 LIST_ENTRY(job_s) subjob_sle;
489 LIST_ENTRY(job_s) needing_session_sle;
490 LIST_ENTRY(job_s) jetsam_sle;
491 LIST_ENTRY(job_s) pid_hash_sle;
492 LIST_ENTRY(job_s) global_pid_hash_sle;
493 LIST_ENTRY(job_s) label_hash_sle;
494 LIST_ENTRY(job_s) global_env_sle;
495 SLIST_ENTRY(job_s) curious_jobs_sle;
496 LIST_HEAD(, suspended_peruser) suspended_perusers;
497 LIST_HEAD(, waiting_for_exit) exit_watchers;
498 LIST_HEAD(, job_s) subjobs;
499 LIST_HEAD(, externalevent) events;
500 SLIST_HEAD(, socketgroup) sockets;
501 SLIST_HEAD(, calendarinterval) cal_intervals;
502 SLIST_HEAD(, envitem) global_env;
503 SLIST_HEAD(, envitem) env;
504 SLIST_HEAD(, limititem) limits;
505 SLIST_HEAD(, machservice) machservices;
506 SLIST_HEAD(, semaphoreitem) semaphores;
507 SLIST_HEAD(, waiting_for_removal) removal_watchers;
508 struct waiting4attach *w4a;
509 job_t original;
510 job_t alias;
511 cpu_type_t *j_binpref;
512 size_t j_binpref_cnt;
513 mach_port_t j_port;
514 mach_port_t exit_status_dest;
515 mach_port_t exit_status_port;
516 mach_port_t spawn_reply_port;
517 uid_t mach_uid;
518 jobmgr_t mgr;
519 size_t argc;
520 char **argv;
521 char *prog;
522 char *rootdir;
523 char *workingdir;
524 char *username;
525 char *groupname;
526 char *stdinpath;
527 char *stdoutpath;
528 char *stderrpath;
529 char *alt_exc_handler;
530 char *cfbundleidentifier;
531 unsigned int nruns;
532 uint64_t trt;
533 #if HAVE_SANDBOX
534 char *seatbelt_profile;
535 uint64_t seatbelt_flags;
536 char *container_identifier;
537 #endif
538 #if HAVE_QUARANTINE
539 void *quarantine_data;
540 size_t quarantine_data_sz;
541 #endif
542 pid_t p;
543 uint64_t uniqueid;
544 int last_exit_status;
545 int stdin_fd;
546 int fork_fd;
547 int nice;
548 uint32_t pstype;
549 uint32_t psproctype;
550 int32_t jetsam_priority;
551 int32_t jetsam_memlimit;
552 int32_t main_thread_priority;
553 uint32_t timeout;
554 uint32_t exit_timeout;
555 uint64_t sent_signal_time;
556 uint64_t start_time;
557 uint32_t min_run_time;
558 bool unthrottle;
559 uint32_t start_interval;
560 uint32_t peruser_suspend_count;
561 uuid_t instance_id;
562 mode_t mask;
563 mach_port_t asport;
564 au_asid_t asid;
565 uuid_t expected_audit_uuid;
566 bool
567 // man launchd.plist --> Debug
568 debug:1,
569 // man launchd.plist --> KeepAlive == false
570 ondemand:1,
571 // man launchd.plist --> SessionCreate
572 session_create:1,
573 // man launchd.plist --> LowPriorityIO
574 low_pri_io:1,
575 // man launchd.plist --> InitGroups
576 no_init_groups:1,
577 /* A legacy mach_init concept to make bootstrap_create_server/service()
578 * work
579 */
580 priv_port_has_senders:1,
581 // A hack during job importing
582 importing_global_env:1,
583 // A hack during job importing
584 importing_hard_limits:1,
585 // man launchd.plist --> Umask
586 setmask:1,
587 // A process that launchd knows about but doesn't manage.
588 anonymous:1,
589 // A legacy mach_init concept to detect sick jobs
590 checkedin:1,
591 // A job created via bootstrap_create_server()
592 legacy_mach_job:1,
593 // A job created via spawn_via_launchd()
594 legacy_LS_job:1,
595 // A legacy job that wants inetd compatible semantics
596 inetcompat:1,
597 // A twist on inetd compatibility
598 inetcompat_wait:1,
599 /* An event fired and the job should start, but not necessarily right
600 * away.
601 */
602 start_pending:1,
603 // man launchd.plist --> EnableGlobbing
604 globargv:1,
605 // man launchd.plist --> WaitForDebugger
606 wait4debugger:1,
607 // One-shot WaitForDebugger.
608 wait4debugger_oneshot:1,
609 // MachExceptionHandler == true
610 internal_exc_handler:1,
611 // A hack to support an option of spawn_via_launchd()
612 stall_before_exec:1,
613 /* man launchd.plist --> LaunchOnlyOnce.
614 *
615 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
616 */
617 only_once:1,
618 /* Make job_ignore() / job_watch() work. If these calls were balanced,
619 * then this wouldn't be necessarily.
620 */
621 currently_ignored:1,
622 /* A job that forced all other jobs to be temporarily launch-on-
623 * demand
624 */
625 forced_peers_to_demand_mode:1,
626 // man launchd.plist --> Nice
627 setnice:1,
628 /* A job was asked to be unloaded/removed while running, we'll remove it
629 * after it exits.
630 */
631 removal_pending:1,
632 // job_kill() was called.
633 sent_sigkill:1,
634 // Enter the kernel debugger before killing a job.
635 debug_before_kill:1,
636 // A hack that launchd+launchctl use during jobmgr_t creation.
637 weird_bootstrap:1,
638 // man launchd.plist --> StartOnMount
639 start_on_mount:1,
640 // This job is a per-user launchd managed by the PID 1 launchd.
641 per_user:1,
642 // A job thoroughly confused launchd. We need to unload it ASAP.
643 unload_at_mig_return:1,
644 // man launchd.plist --> AbandonProcessGroup
645 abandon_pg:1,
646 /* During shutdown, do not send SIGTERM to stray processes in the
647 * process group of this job.
648 */
649 ignore_pg_at_shutdown:1,
650 /* Don't let this job create new 'job_t' objects in launchd. Has been
651 * seriously overloaded for the purposes of sandboxing.
652 */
653 deny_job_creation:1,
654 // man launchd.plist --> EnableTransactions
655 enable_transactions:1,
656 // The job was sent SIGKILL because it was clean.
657 clean_kill:1,
658 // The job has an OtherJobEnabled KeepAlive criterion.
659 nosy:1,
660 // The job exited due to a crash.
661 crashed:1,
662 // We've received NOTE_EXIT for the job and reaped it.
663 reaped:1,
664 // job_stop() was called.
665 stopped:1,
666 /* The job is to be kept alive continuously, but it must first get an
667 * initial kick off.
668 */
669 needs_kickoff:1,
670 // The job is a bootstrapper.
671 is_bootstrapper:1,
672 // The job owns the console.
673 has_console:1,
674 /* The job runs as a non-root user on embedded but has select privileges
675 * of the root user. This is SpringBoard.
676 */
677 embedded_god:1,
678 // The job is responsible for drawing the home screen on embedded.
679 embedded_home:1,
680 // We got NOTE_EXEC for the job.
681 did_exec:1,
682 // The job is an XPC service, and XPC proxy successfully exec(3)ed.
683 xpcproxy_did_exec:1,
684 // The (anonymous) job called vprocmgr_switch_to_session().
685 holds_ref:1,
686 // The job has Jetsam limits in place.
687 jetsam_properties:1,
688 // The job's Jetsam memory limits should only be applied in the background
689 jetsam_memory_limit_background:1,
690 /* This job was created as the result of a look up of a service provided
691 * by a MultipleInstance job.
692 */
693 dedicated_instance:1,
694 // The job supports creating additional instances of itself.
695 multiple_instances:1,
696 /* The sub-job was already removed from the parent's list of
697 * sub-jobs.
698 */
699 former_subjob:1,
700 /* The job is responsible for monitoring external events for this
701 * launchd.
702 */
703 event_monitor:1,
704 // The event monitor job has retrieved the initial list of events.
705 event_monitor_ready2signal:1,
706 // A lame hack.
707 removing:1,
708 // Disable ASLR when launching this job.
709 disable_aslr:1,
710 // The job is an XPC Service.
711 xpc_service:1,
712 // The job is the Performance team's shutdown monitor.
713 shutdown_monitor:1,
714 // We should open a transaction for the job when shutdown begins.
715 dirty_at_shutdown:1,
716 /* The job was sent SIGKILL but did not exit in a timely fashion,
717 * indicating a kernel bug.
718 */
719 workaround9359725:1,
720 // The job is the XPC domain bootstrapper.
721 xpc_bootstrapper:1,
722 // The job is an app (on either iOS or OS X) and has different resource
723 // limitations.
724 app:1,
725 // FairPlay decryption failed on the job. This should only ever happen
726 // to apps.
727 fpfail:1,
728 // The job failed to exec(3) for reasons that may be transient, so we're
729 // waiting for UserEventAgent to tell us when it's okay to try spawning
730 // again (i.e. when the executable path appears, when the UID appears,
731 // etc.).
732 waiting4ok:1,
733 // The job exited due to memory pressure.
734 jettisoned:1,
735 // The job supports idle-exit.
736 idle_exit:1,
737 // The job was implicitly reaped by the kernel.
738 implicit_reap:1,
739 system_app :1,
740 joins_gui_session :1,
741 low_priority_background_io :1;
742
743 const char label[0];
744 };
745
746 static size_t hash_label(const char *label) __attribute__((pure));
747 static size_t hash_ms(const char *msstr) __attribute__((pure));
748 static SLIST_HEAD(, job_s) s_curious_jobs;
749 static LIST_HEAD(, job_s) managed_actives[ACTIVE_JOB_HASH_SIZE];
750
751 #define job_assumes(j, e) os_assumes_ctx(job_log_bug, j, (e))
752 #define job_assumes_zero(j, e) os_assumes_zero_ctx(job_log_bug, j, (e))
753 #define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
754
755 static void job_import_keys(launch_data_t obj, const char *key, void *context);
756 static void job_import_bool(job_t j, const char *key, bool value);
757 static void job_import_string(job_t j, const char *key, const char *value);
758 static void job_import_integer(job_t j, const char *key, long long value);
759 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
760 static void job_import_array(job_t j, const char *key, launch_data_t value);
761 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
762 static bool job_set_global_on_demand(job_t j, bool val);
763 static const char *job_active(job_t j);
764 static void job_watch(job_t j);
765 static void job_ignore(job_t j);
766 static void job_reap(job_t j);
767 static bool job_useless(job_t j);
768 static bool job_keepalive(job_t j);
769 static void job_dispatch_curious_jobs(job_t j);
770 static void job_start(job_t j);
771 static void job_start_child(job_t j) __attribute__((noreturn));
772 static void job_setup_attributes(job_t j);
773 static bool job_setup_machport(job_t j);
774 static kern_return_t job_setup_exit_port(job_t j);
775 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
776 static void job_postfork_become_user(job_t j);
777 static void job_postfork_test_user(job_t j);
778 static void job_log_pids_with_weird_uids(job_t j);
779 static void job_setup_exception_port(job_t j, task_t target_task);
780 static void job_callback(void *obj, struct kevent *kev);
781 static void job_callback_proc(job_t j, struct kevent *kev);
782 static void job_callback_timer(job_t j, void *ident);
783 static void job_callback_read(job_t j, int ident);
784 static void job_log_stray_pg(job_t j);
785 static void job_log_children_without_exec(job_t j);
786 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
787 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
788 static job_t job_new_alias(jobmgr_t jm, job_t src);
789 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
790 static job_t job_new_subjob(job_t j, uuid_t identifier);
791 static void job_kill(job_t j);
792 static void job_uncork_fork(job_t j);
793 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
794 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
795 static bool job_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
796 static void job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status);
797 #if HAVE_SYSTEMSTATS
798 static void job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status);
799 #endif
800 static void job_set_exception_port(job_t j, mach_port_t port);
801 static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
802 static void job_open_shutdown_transaction(job_t ji);
803 static void job_close_shutdown_transaction(job_t ji);
804 static launch_data_t job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport);
805 static void job_setup_per_user_directory(job_t j, uid_t uid, const char *path);
806 static void job_setup_per_user_directories(job_t j, uid_t uid, const char *label);
807 static void job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data);
808 static void job_update_jetsam_memory_limit(job_t j, int32_t limit);
809
810 #if TARGET_OS_EMBEDDED
811 static bool job_import_defaults(launch_data_t pload);
812 #endif
813
814 static struct priority_properties_t {
815 long long band;
816 int priority;
817 } _launchd_priority_map[] = {
818 { XPC_JETSAM_BAND_SUSPENDED, JETSAM_PRIORITY_IDLE },
819 { XPC_JETSAM_BAND_BACKGROUND_OPPORTUNISTIC, JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC },
820 { XPC_JETSAM_BAND_BACKGROUND, JETSAM_PRIORITY_BACKGROUND },
821 { XPC_JETSAM_BAND_MAIL, JETSAM_PRIORITY_MAIL },
822 { XPC_JETSAM_BAND_PHONE, JETSAM_PRIORITY_PHONE },
823 { XPC_JETSAM_BAND_UI_SUPPORT, JETSAM_PRIORITY_UI_SUPPORT },
824 { XPC_JETSAM_BAND_FOREGROUND_SUPPORT, JETSAM_PRIORITY_FOREGROUND_SUPPORT },
825 { XPC_JETSAM_BAND_FOREGROUND, JETSAM_PRIORITY_FOREGROUND },
826 { XPC_JETSAM_BAND_AUDIO, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
827 { XPC_JETSAM_BAND_ACCESSORY, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
828 { XPC_JETSAM_BAND_CRITICAL, JETSAM_PRIORITY_CRITICAL },
829 { XPC_JETSAM_BAND_TELEPHONY, JETSAM_PRIORITY_TELEPHONY },
830 };
831
832 static const struct {
833 const char *key;
834 int val;
835 } launchd_keys2limits[] = {
836 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
837 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
838 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
839 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
840 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
841 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
842 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
843 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
844 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
845 };
846
847 static time_t cronemu(int mon, int mday, int hour, int min);
848 static time_t cronemu_wday(int wday, int hour, int min);
849 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
850 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
851 static bool cronemu_hour(struct tm *wtm, int hour, int min);
852 static bool cronemu_min(struct tm *wtm, int min);
853
854 // miscellaneous file local functions
855 static size_t get_kern_max_proc(void);
856 static char **mach_cmd2argv(const char *string);
857 static size_t our_strhash(const char *s) __attribute__((pure));
858
859 void eliminate_double_reboot(void);
860
861 #pragma mark XPC Domain Forward Declarations
862 static job_t _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
863 static int _xpc_domain_import_services(job_t j, launch_data_t services);
864
865 #pragma mark XPC Event Forward Declarations
866 static int xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms);
867 static int xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply);
868 static int xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply);
869 static int xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply);
870 static int xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
871 static int xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply);
872 static int xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
873 static int xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply);
874
875 #pragma mark XPC Process Forward Declarations
876 static int xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply);
877 static int xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply);
878
879 // file local globals
880 static job_t _launchd_embedded_god = NULL;
881 static job_t _launchd_embedded_home = NULL;
882 static size_t total_children;
883 static size_t total_anon_children;
884 static mach_port_t the_exception_server;
885 static job_t workaround_5477111;
886 static LIST_HEAD(, job_s) s_needing_sessions;
887 static LIST_HEAD(, eventsystem) _s_event_systems;
888 static struct eventsystem *_launchd_support_system;
889 static job_t _launchd_event_monitor;
890 static job_t _launchd_xpc_bootstrapper;
891 static job_t _launchd_shutdown_monitor;
892
893 #if TARGET_OS_EMBEDDED
894 static xpc_object_t _launchd_defaults_cache;
895
896 mach_port_t launchd_audit_port = MACH_PORT_DEAD;
897 pid_t launchd_audit_session = 0;
898 #else
899 mach_port_t launchd_audit_port = MACH_PORT_NULL;
900 au_asid_t launchd_audit_session = AU_DEFAUDITSID;
901 #endif
902
903 static int s_no_hang_fd = -1;
904
905 // process wide globals
906 mach_port_t inherited_bootstrap_port;
907 jobmgr_t root_jobmgr;
908 bool launchd_shutdown_debugging = false;
909 bool launchd_verbose_boot = false;
910 bool launchd_embedded_handofgod = false;
911 bool launchd_runtime_busy_time = false;
912
913 void
914 job_ignore(job_t j)
915 {
916 struct socketgroup *sg;
917 struct machservice *ms;
918
919 if (j->currently_ignored) {
920 return;
921 }
922
923 job_log(j, LOG_DEBUG, "Ignoring...");
924
925 j->currently_ignored = true;
926
927 SLIST_FOREACH(sg, &j->sockets, sle) {
928 socketgroup_ignore(j, sg);
929 }
930
931 SLIST_FOREACH(ms, &j->machservices, sle) {
932 machservice_ignore(j, ms);
933 }
934 }
935
936 void
937 job_watch(job_t j)
938 {
939 struct socketgroup *sg;
940 struct machservice *ms;
941
942 if (!j->currently_ignored) {
943 return;
944 }
945
946 job_log(j, LOG_DEBUG, "Watching...");
947
948 j->currently_ignored = false;
949
950 SLIST_FOREACH(sg, &j->sockets, sle) {
951 socketgroup_watch(j, sg);
952 }
953
954 SLIST_FOREACH(ms, &j->machservices, sle) {
955 machservice_watch(j, ms);
956 }
957 }
958
959 void
960 job_stop(job_t j)
961 {
962 int sig;
963
964 if (unlikely(!j->p || j->stopped || j->anonymous)) {
965 return;
966 }
967
968 #if TARGET_OS_EMBEDDED
969 if (launchd_embedded_handofgod && _launchd_embedded_god) {
970 if (!_launchd_embedded_god->username || !j->username) {
971 errno = EPERM;
972 return;
973 }
974
975 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
976 errno = EPERM;
977 return;
978 }
979 } else if (launchd_embedded_handofgod) {
980 errno = EINVAL;
981 return;
982 }
983 #endif
984
985 j->sent_signal_time = runtime_get_opaque_time();
986
987 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Stopping job...");
988
989 int error = -1;
990 error = proc_terminate(j->p, &sig);
991 if (error) {
992 job_log(j, LOG_ERR | LOG_CONSOLE, "Could not terminate job: %d: %s", error, strerror(error));
993 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Using fallback option to terminate job...");
994 error = kill2(j->p, SIGTERM);
995 if (error) {
996 job_log(j, LOG_ERR, "Could not signal job: %d: %s", error, strerror(error));
997 } else {
998 sig = SIGTERM;
999 }
1000 }
1001
1002 if (!error) {
1003 switch (sig) {
1004 case SIGKILL:
1005 j->sent_sigkill = true;
1006 j->clean_kill = true;
1007
1008 /* We cannot effectively simulate an exit for jobs during the course
1009 * of a normal run. Even if we pretend that the job exited, we will
1010 * still not have gotten the receive rights associated with the
1011 * job's MachServices back, so we cannot safely respawn it.
1012 */
1013 if (j->mgr->shutting_down) {
1014 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j);
1015 (void)job_assumes_zero_p(j, error);
1016 }
1017
1018 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sent job SIGKILL.");
1019 break;
1020 case SIGTERM:
1021 if (j->exit_timeout) {
1022 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j);
1023 (void)job_assumes_zero_p(j, error);
1024 } else {
1025 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
1026 }
1027 job_log(j, LOG_DEBUG, "Sent job SIGTERM.");
1028 break;
1029 default:
1030 job_log(j, LOG_ERR | LOG_CONSOLE, "Job was sent unexpected signal: %d: %s", sig, strsignal(sig));
1031 break;
1032 }
1033 }
1034
1035 j->stopped = true;
1036 }
1037
1038 launch_data_t
1039 job_export(job_t j)
1040 {
1041 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1042
1043 if (r == NULL) {
1044 return NULL;
1045 }
1046
1047 if ((tmp = launch_data_new_string(j->label))) {
1048 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
1049 }
1050 if ((tmp = launch_data_new_string(j->mgr->name))) {
1051 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
1052 }
1053 if ((tmp = launch_data_new_bool(j->ondemand))) {
1054 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
1055 }
1056
1057 long long status = j->last_exit_status;
1058 if (j->fpfail) {
1059 status = LAUNCH_EXITSTATUS_FAIRPLAY_FAIL;
1060 }
1061 if ((tmp = launch_data_new_integer(status))) {
1062 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
1063 }
1064
1065 if (j->p && (tmp = launch_data_new_integer(j->p))) {
1066 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
1067 }
1068 if ((tmp = launch_data_new_integer(j->timeout))) {
1069 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
1070 }
1071 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
1072 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
1073 }
1074 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
1075 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
1076 }
1077 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
1078 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
1079 }
1080 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
1081 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
1082 }
1083 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1084 size_t i;
1085
1086 for (i = 0; i < j->argc; i++) {
1087 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
1088 launch_data_array_set_index(tmp, tmp2, i);
1089 }
1090 }
1091
1092 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
1093 }
1094
1095 if (j->enable_transactions && (tmp = launch_data_new_bool(true))) {
1096 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
1097 }
1098
1099 if (j->session_create && (tmp = launch_data_new_bool(true))) {
1100 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
1101 }
1102
1103 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1104 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
1105 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
1106 }
1107 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
1108 }
1109
1110 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1111 struct socketgroup *sg;
1112 unsigned int i;
1113
1114 SLIST_FOREACH(sg, &j->sockets, sle) {
1115 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1116 for (i = 0; i < sg->fd_cnt; i++) {
1117 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1118 launch_data_array_set_index(tmp2, tmp3, i);
1119 }
1120 }
1121 launch_data_dict_insert(tmp, tmp2, sg->name);
1122 }
1123 }
1124
1125 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1126 }
1127
1128 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1129 struct machservice *ms;
1130
1131 tmp3 = NULL;
1132
1133 SLIST_FOREACH(ms, &j->machservices, sle) {
1134 if (ms->per_pid) {
1135 if (tmp3 == NULL) {
1136 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1137 }
1138 if (tmp3) {
1139 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1140 launch_data_dict_insert(tmp3, tmp2, ms->name);
1141 }
1142 } else {
1143 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1144 launch_data_dict_insert(tmp, tmp2, ms->name);
1145 }
1146 }
1147
1148 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1149
1150 if (tmp3) {
1151 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1152 }
1153 }
1154
1155 return r;
1156 }
1157
1158 static void
1159 jobmgr_log_active_jobs(jobmgr_t jm)
1160 {
1161 const char *why_active;
1162 jobmgr_t jmi;
1163 job_t ji;
1164
1165 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1166 jobmgr_log_active_jobs(jmi);
1167 }
1168
1169 int level = LOG_DEBUG;
1170 if (pid1_magic) {
1171 level |= LOG_CONSOLE;
1172 }
1173
1174 LIST_FOREACH(ji, &jm->jobs, sle) {
1175 if ((why_active = job_active(ji))) {
1176 if (ji->p != 1) {
1177 job_log(ji, level, "%s", why_active);
1178
1179 uint32_t flags = 0;
1180 (void)proc_get_dirty(ji->p, &flags);
1181 if (!(flags & PROC_DIRTY_TRACKED)) {
1182 continue;
1183 }
1184
1185 char *dirty = "clean";
1186 if (flags & PROC_DIRTY_IS_DIRTY) {
1187 dirty = "dirty";
1188 }
1189
1190 char *idle_exit = "idle-exit unsupported";
1191 if (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) {
1192 idle_exit = "idle-exit supported";
1193 }
1194
1195 job_log(ji, level, "Killability: %s/%s", dirty, idle_exit);
1196 }
1197 }
1198 }
1199 }
1200
1201 static void
1202 jobmgr_still_alive_with_check(jobmgr_t jm)
1203 {
1204 int level = LOG_DEBUG;
1205 if (pid1_magic) {
1206 level |= LOG_CONSOLE;
1207 }
1208
1209 jobmgr_log(jm, level, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1210 jobmgr_log_active_jobs(jm);
1211 launchd_log_push();
1212 }
1213
1214 jobmgr_t
1215 jobmgr_shutdown(jobmgr_t jm)
1216 {
1217 jobmgr_t jmi, jmn;
1218 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1219
1220 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1221
1222 struct tm curtime;
1223 (void)localtime_r(&jm->shutdown_time, &curtime);
1224
1225 char date[26];
1226 (void)asctime_r(&curtime, date);
1227 // Trim the new line that asctime_r(3) puts there for some reason.
1228 date[24] = 0;
1229
1230 if (jm == root_jobmgr && pid1_magic) {
1231 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1232 } else {
1233 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1234 }
1235
1236 jm->shutting_down = true;
1237
1238 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1239 jobmgr_shutdown(jmi);
1240 }
1241
1242 if (!jm->parentmgr) {
1243 if (pid1_magic) {
1244 // Spawn the shutdown monitor.
1245 if (_launchd_shutdown_monitor && !_launchd_shutdown_monitor->p) {
1246 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1247 job_dispatch(_launchd_shutdown_monitor, true);
1248 }
1249 }
1250
1251 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1252 }
1253
1254 return jobmgr_do_garbage_collection(jm);
1255 }
1256
1257 void
1258 jobmgr_remove(jobmgr_t jm)
1259 {
1260 jobmgr_t jmi;
1261 job_t ji;
1262
1263 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1264 if (!SLIST_EMPTY(&jm->submgrs)) {
1265 size_t cnt = 0;
1266 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1267 jobmgr_remove(jmi);
1268 cnt++;
1269 }
1270
1271 (void)jobmgr_assumes_zero(jm, cnt);
1272 }
1273
1274 while ((ji = LIST_FIRST(&jm->jobs))) {
1275 if (!ji->anonymous && ji->p != 0) {
1276 job_log(ji, LOG_ERR, "Job is still active at job manager teardown.");
1277 ji->p = 0;
1278 }
1279
1280 job_remove(ji);
1281 }
1282
1283 struct waiting4attach *w4ai = NULL;
1284 while ((w4ai = LIST_FIRST(&jm->attaches))) {
1285 waiting4attach_delete(jm, w4ai);
1286 }
1287
1288 if (jm->req_port) {
1289 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_port));
1290 }
1291 if (jm->jm_port) {
1292 (void)jobmgr_assumes_zero(jm, launchd_mport_close_recv(jm->jm_port));
1293 }
1294
1295 if (jm->req_bsport) {
1296 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_bsport));
1297 }
1298 if (jm->req_excport) {
1299 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_excport));
1300 }
1301 if (MACH_PORT_VALID(jm->req_asport)) {
1302 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_asport));
1303 }
1304 if (jm->req_rport) {
1305 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1306 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1307 /* If the originator went away, the reply port will be a dead name,
1308 * and we expect this to fail.
1309 */
1310 (void)jobmgr_assumes_zero(jm, kr);
1311 }
1312 }
1313 if (jm->req_ctx) {
1314 (void)jobmgr_assumes_zero(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz));
1315 }
1316
1317 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1318 struct tm curtime;
1319 (void)localtime_r(&ts, &curtime);
1320
1321 char date[26];
1322 (void)asctime_r(&curtime, date);
1323 date[24] = 0;
1324
1325 time_t delta = ts - jm->shutdown_time;
1326 if (jm == root_jobmgr && pid1_magic) {
1327 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1328 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1329 } else {
1330 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1331 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1332 }
1333
1334 if (jm->parentmgr) {
1335 runtime_del_weak_ref();
1336 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1337
1338 // Hack for the guest user so that its stuff doesn't persist.
1339 //
1340 // <rdar://problem/14527875>
1341 if (strcmp(jm->name, VPROCMGR_SESSION_AQUA) == 0 && getuid() == 201) {
1342 raise(SIGTERM);
1343 }
1344 } else if (pid1_magic) {
1345 eliminate_double_reboot();
1346 launchd_log_vm_stats();
1347 jobmgr_log_stray_children(jm, true);
1348 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1349 launchd_closelog();
1350 (void)jobmgr_assumes_zero_p(jm, reboot(jm->reboot_flags));
1351 } else {
1352 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1353 launchd_closelog();
1354 exit(EXIT_SUCCESS);
1355 }
1356
1357 free(jm);
1358 }
1359
1360 void
1361 job_remove(job_t j)
1362 {
1363 struct waiting_for_removal *w4r;
1364 struct calendarinterval *ci;
1365 struct semaphoreitem *si;
1366 struct socketgroup *sg;
1367 struct machservice *ms;
1368 struct limititem *li;
1369 struct envitem *ei;
1370
1371 if (j->alias) {
1372 /* HACK: Egregious code duplication. But as with machservice_delete(),
1373 * job aliases can't (and shouldn't) have any complex behaviors
1374 * associated with them.
1375 */
1376 while ((ms = SLIST_FIRST(&j->machservices))) {
1377 machservice_delete(j, ms, false);
1378 }
1379
1380 LIST_REMOVE(j, sle);
1381 LIST_REMOVE(j, label_hash_sle);
1382 free(j);
1383 return;
1384 }
1385
1386 #if TARGET_OS_EMBEDDED
1387 if (launchd_embedded_handofgod && _launchd_embedded_god) {
1388 if (!(_launchd_embedded_god->username && j->username)) {
1389 errno = EPERM;
1390 return;
1391 }
1392
1393 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
1394 errno = EPERM;
1395 return;
1396 }
1397 } else if (launchd_embedded_handofgod) {
1398 errno = EINVAL;
1399 return;
1400 }
1401 #endif
1402
1403 /* Do this BEFORE we check and see whether the job is still active. If we're
1404 * a sub-job, we're being removed due to the parent job removing us.
1405 * Therefore, the parent job will free itself after this call completes. So
1406 * if we defer removing ourselves from the parent's list, we'll crash when
1407 * we finally get around to it.
1408 */
1409 if (j->dedicated_instance && !j->former_subjob) {
1410 LIST_REMOVE(j, subjob_sle);
1411 j->former_subjob = true;
1412 }
1413
1414 if (unlikely(j->p)) {
1415 if (j->anonymous) {
1416 job_reap(j);
1417 } else {
1418 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1419
1420 if (!j->removal_pending) {
1421 j->removal_pending = true;
1422 job_stop(j);
1423 }
1424
1425 return;
1426 }
1427 }
1428
1429 if (!j->removing) {
1430 j->removing = true;
1431 job_dispatch_curious_jobs(j);
1432 }
1433
1434 ipc_close_all_with_job(j);
1435
1436 if (j->forced_peers_to_demand_mode) {
1437 job_set_global_on_demand(j, false);
1438 }
1439
1440 if (job_assumes_zero(j, j->fork_fd)) {
1441 (void)posix_assumes_zero(runtime_close(j->fork_fd));
1442 }
1443
1444 if (j->stdin_fd) {
1445 (void)posix_assumes_zero(runtime_close(j->stdin_fd));
1446 }
1447
1448 if (j->j_port) {
1449 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1450 }
1451
1452 while ((sg = SLIST_FIRST(&j->sockets))) {
1453 socketgroup_delete(j, sg);
1454 }
1455 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1456 calendarinterval_delete(j, ci);
1457 }
1458 while ((ei = SLIST_FIRST(&j->env))) {
1459 envitem_delete(j, ei, false);
1460 }
1461 while ((ei = SLIST_FIRST(&j->global_env))) {
1462 envitem_delete(j, ei, true);
1463 }
1464 while ((li = SLIST_FIRST(&j->limits))) {
1465 limititem_delete(j, li);
1466 }
1467 while ((ms = SLIST_FIRST(&j->machservices))) {
1468 machservice_delete(j, ms, false);
1469 }
1470 while ((si = SLIST_FIRST(&j->semaphores))) {
1471 semaphoreitem_delete(j, si);
1472 }
1473 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1474 waiting4removal_delete(j, w4r);
1475 }
1476
1477 struct externalevent *eei = NULL;
1478 while ((eei = LIST_FIRST(&j->events))) {
1479 externalevent_delete(eei);
1480 }
1481
1482 if (j->event_monitor) {
1483 _launchd_event_monitor = NULL;
1484 }
1485 if (j->xpc_bootstrapper) {
1486 _launchd_xpc_bootstrapper = NULL;
1487 }
1488
1489 if (j->prog) {
1490 free(j->prog);
1491 }
1492 if (j->argv) {
1493 free(j->argv);
1494 }
1495 if (j->rootdir) {
1496 free(j->rootdir);
1497 }
1498 if (j->workingdir) {
1499 free(j->workingdir);
1500 }
1501 if (j->username) {
1502 free(j->username);
1503 }
1504 if (j->groupname) {
1505 free(j->groupname);
1506 }
1507 if (j->stdinpath) {
1508 free(j->stdinpath);
1509 }
1510 if (j->stdoutpath) {
1511 free(j->stdoutpath);
1512 }
1513 if (j->stderrpath) {
1514 free(j->stderrpath);
1515 }
1516 if (j->alt_exc_handler) {
1517 free(j->alt_exc_handler);
1518 }
1519 if (j->cfbundleidentifier) {
1520 free(j->cfbundleidentifier);
1521 }
1522 #if HAVE_SANDBOX
1523 if (j->seatbelt_profile) {
1524 free(j->seatbelt_profile);
1525 }
1526 if (j->container_identifier) {
1527 free(j->container_identifier);
1528 }
1529 #endif
1530 #if HAVE_QUARANTINE
1531 if (j->quarantine_data) {
1532 free(j->quarantine_data);
1533 }
1534 #endif
1535 if (j->j_binpref) {
1536 free(j->j_binpref);
1537 }
1538 if (j->start_interval) {
1539 runtime_del_weak_ref();
1540 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
1541 }
1542 if (j->exit_timeout) {
1543 /* If this fails, it just means the timer's already fired, so no need to
1544 * wrap it in an assumes() macro.
1545 */
1546 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1547 }
1548 if (j->asport != MACH_PORT_NULL) {
1549 (void)job_assumes_zero(j, launchd_mport_deallocate(j->asport));
1550 }
1551 if (!uuid_is_null(j->expected_audit_uuid)) {
1552 LIST_REMOVE(j, needing_session_sle);
1553 }
1554 if (j->embedded_god) {
1555 _launchd_embedded_god = NULL;
1556 }
1557 if (j->embedded_home) {
1558 _launchd_embedded_home = NULL;
1559 }
1560 if (j->shutdown_monitor) {
1561 _launchd_shutdown_monitor = NULL;
1562 }
1563
1564 (void)kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1565
1566 LIST_REMOVE(j, sle);
1567 LIST_REMOVE(j, label_hash_sle);
1568
1569 job_t ji = NULL;
1570 job_t jit = NULL;
1571 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1572 job_remove(ji);
1573 }
1574
1575 job_log(j, LOG_DEBUG, "Removed");
1576
1577 j->kqjob_callback = (kq_callback)0x8badf00d;
1578 free(j);
1579 }
1580
1581 void
1582 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1583 {
1584 launch_data_t tmp_oai;
1585 job_t j = context;
1586 size_t i, fd_cnt = 1;
1587 int *fds;
1588
1589 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1590 fd_cnt = launch_data_array_get_count(obj);
1591 }
1592
1593 fds = alloca(fd_cnt * sizeof(int));
1594
1595 for (i = 0; i < fd_cnt; i++) {
1596 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1597 tmp_oai = launch_data_array_get_index(obj, i);
1598 } else {
1599 tmp_oai = obj;
1600 }
1601
1602 fds[i] = launch_data_get_fd(tmp_oai);
1603 }
1604
1605 socketgroup_new(j, key, fds, fd_cnt);
1606
1607 ipc_revoke_fds(obj);
1608 }
1609
1610 bool
1611 job_set_global_on_demand(job_t j, bool val)
1612 {
1613 if (j->forced_peers_to_demand_mode && val) {
1614 return false;
1615 } else if (!j->forced_peers_to_demand_mode && !val) {
1616 return false;
1617 }
1618
1619 if ((j->forced_peers_to_demand_mode = val)) {
1620 j->mgr->global_on_demand_cnt++;
1621 } else {
1622 j->mgr->global_on_demand_cnt--;
1623 }
1624
1625 if (j->mgr->global_on_demand_cnt == 0) {
1626 jobmgr_dispatch_all(j->mgr, false);
1627 }
1628
1629 return true;
1630 }
1631
1632 bool
1633 job_setup_machport(job_t j)
1634 {
1635 if (job_assumes_zero(j, launchd_mport_create_recv(&j->j_port)) != KERN_SUCCESS) {
1636 goto out_bad;
1637 }
1638
1639 if (job_assumes_zero(j, runtime_add_mport(j->j_port, job_server)) != KERN_SUCCESS) {
1640 goto out_bad2;
1641 }
1642
1643 if (job_assumes_zero(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS)) != KERN_SUCCESS) {
1644 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1645 goto out_bad;
1646 }
1647
1648 return true;
1649 out_bad2:
1650 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1651 out_bad:
1652 return false;
1653 }
1654
1655 kern_return_t
1656 job_setup_exit_port(job_t j)
1657 {
1658 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1659 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1660 return MACH_PORT_NULL;
1661 }
1662
1663 struct mach_port_limits limits = {
1664 .mpl_qlimit = 1,
1665 };
1666 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1667 (void)job_assumes_zero(j, kr);
1668
1669 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1670 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1671 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
1672 j->exit_status_port = MACH_PORT_NULL;
1673 }
1674
1675 return kr;
1676 }
1677
1678 job_t
1679 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1680 {
1681 const char **argv = (const char **)mach_cmd2argv(cmd);
1682 job_t jr = NULL;
1683
1684 if (!argv) {
1685 goto out_bad;
1686 }
1687
1688 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1689 free(argv);
1690
1691 // Job creation can be denied during shutdown.
1692 if (unlikely(jr == NULL)) {
1693 goto out_bad;
1694 }
1695
1696 jr->mach_uid = uid;
1697 jr->ondemand = ond;
1698 jr->legacy_mach_job = true;
1699 jr->abandon_pg = true;
1700 jr->priv_port_has_senders = true; // the IPC that called us will make-send on this port
1701
1702 if (!job_setup_machport(jr)) {
1703 goto out_bad;
1704 }
1705
1706 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1707
1708 return jr;
1709
1710 out_bad:
1711 if (jr) {
1712 job_remove(jr);
1713 }
1714 return NULL;
1715 }
1716
1717 job_t
1718 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1719 {
1720 struct proc_bsdshortinfo proc;
1721 bool shutdown_state;
1722 job_t jp = NULL, jr = NULL;
1723 uid_t kp_euid, kp_uid, kp_svuid;
1724 gid_t kp_egid, kp_gid, kp_svgid;
1725
1726 if (anonpid == 0) {
1727 errno = EINVAL;
1728 return NULL;
1729 }
1730
1731 if (anonpid >= 100000) {
1732 /* The kernel current defines PID_MAX to be 99999, but that define isn't
1733 * exported.
1734 */
1735 launchd_syslog(LOG_WARNING, "Did PID_MAX change? Got request from PID: %d", anonpid);
1736 errno = EINVAL;
1737 return NULL;
1738 }
1739
1740 /* libproc returns the number of bytes written into the buffer upon success,
1741 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
1742 */
1743 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1744 if (errno != ESRCH) {
1745 (void)jobmgr_assumes_zero(jm, errno);
1746 }
1747 return NULL;
1748 }
1749
1750 if (proc.pbsi_comm[0] == '\0') {
1751 launchd_syslog(LOG_WARNING, "Blank command for PID: %d", anonpid);
1752 errno = EINVAL;
1753 return NULL;
1754 }
1755
1756 if (unlikely(proc.pbsi_status == SZOMB)) {
1757 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1758 }
1759
1760 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1761 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1762 }
1763
1764 kp_euid = proc.pbsi_uid;
1765 kp_uid = proc.pbsi_ruid;
1766 kp_svuid = proc.pbsi_svuid;
1767 kp_egid = proc.pbsi_gid;
1768 kp_gid = proc.pbsi_rgid;
1769 kp_svgid = proc.pbsi_svgid;
1770
1771 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1772 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1773 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1774 }
1775
1776 /* "Fix" for when the kernel turns the process tree into a weird, cyclic
1777 * graph.
1778 *
1779 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
1780 * as to why this can happen.
1781 */
1782 if ((pid_t)proc.pbsi_ppid == anonpid) {
1783 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). Ignoring: %s", proc.pbsi_comm);
1784 errno = EINVAL;
1785 return NULL;
1786 }
1787
1788 /* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1789 * jobs can pop up during shutdown and need to talk to us.
1790 */
1791 if (unlikely(shutdown_state = jm->shutting_down)) {
1792 jm->shutting_down = false;
1793 }
1794
1795 // We only set requestor_pid for XPC domains.
1796 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1797 if ((jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL))) {
1798 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1799
1800 total_anon_children++;
1801 jr->anonymous = true;
1802 jr->p = anonpid;
1803
1804 // Anonymous process reaping is messy.
1805 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1806
1807 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1)) {
1808 if (errno != ESRCH) {
1809 (void)job_assumes_zero(jr, errno);
1810 }
1811
1812 // Zombies interact weirdly with kevent(3).
1813 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1814 jr->unload_at_mig_return = true;
1815 }
1816
1817 if (unlikely(shutdown_state)) {
1818 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1819 }
1820
1821 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1822 } else {
1823 (void)os_assumes_zero(errno);
1824 }
1825
1826 // Undo our hack from above.
1827 if (unlikely(shutdown_state)) {
1828 jm->shutting_down = true;
1829 }
1830
1831 /* This is down here to prevent infinite recursion due to a process
1832 * attaching to its parent through ptrace(3) -- causing a cycle in the
1833 * process tree and thereby not making it a tree anymore. We need to make
1834 * sure that the anonymous job has been added to the process list so that
1835 * we'll find the tracing parent PID of the parent process, which is the
1836 * child, when we go looking for it in jobmgr_find_by_pid().
1837 *
1838 * <rdar://problem/7264615>
1839 */
1840 switch (proc.pbsi_ppid) {
1841 case 0:
1842 // The kernel.
1843 break;
1844 case 1:
1845 if (!pid1_magic) {
1846 break;
1847 }
1848 // Fall through.
1849 default:
1850 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1851 if (jobmgr_assumes(jm, jp != NULL)) {
1852 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1853 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1854 }
1855 }
1856 break;
1857 }
1858
1859 return jr;
1860 }
1861
1862 job_t
1863 job_new_subjob(job_t j, uuid_t identifier)
1864 {
1865 char label[0];
1866 uuid_string_t idstr;
1867 uuid_unparse(identifier, idstr);
1868 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1869
1870 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1871 if (nj != NULL) {
1872 nj->kqjob_callback = job_callback;
1873 nj->original = j;
1874 nj->mgr = j->mgr;
1875 nj->min_run_time = j->min_run_time;
1876 nj->timeout = j->timeout;
1877 nj->exit_timeout = j->exit_timeout;
1878
1879 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1880
1881 // Set all our simple Booleans that are applicable.
1882 nj->debug = j->debug;
1883 nj->ondemand = j->ondemand;
1884 nj->checkedin = true;
1885 nj->low_pri_io = j->low_pri_io;
1886 nj->setmask = j->setmask;
1887 nj->wait4debugger = j->wait4debugger;
1888 nj->internal_exc_handler = j->internal_exc_handler;
1889 nj->setnice = j->setnice;
1890 nj->abandon_pg = j->abandon_pg;
1891 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1892 nj->deny_job_creation = j->deny_job_creation;
1893 nj->enable_transactions = j->enable_transactions;
1894 nj->needs_kickoff = j->needs_kickoff;
1895 nj->currently_ignored = true;
1896 nj->dedicated_instance = true;
1897 nj->xpc_service = j->xpc_service;
1898 nj->xpc_bootstrapper = j->xpc_bootstrapper;
1899 nj->jetsam_priority = j->jetsam_priority;
1900 nj->jetsam_memlimit = j->jetsam_memlimit;
1901 nj->psproctype = j->psproctype;
1902
1903 nj->mask = j->mask;
1904 uuid_copy(nj->instance_id, identifier);
1905
1906 // These jobs are purely on-demand Mach jobs.
1907 // {Hard | Soft}ResourceLimits are not supported.
1908 // JetsamPriority is not supported.
1909
1910 if (j->prog) {
1911 nj->prog = strdup(j->prog);
1912 }
1913 if (j->argv) {
1914 size_t sz = malloc_size(j->argv);
1915 nj->argv = (char **)malloc(sz);
1916 if (nj->argv != NULL) {
1917 // This is the start of our strings.
1918 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1919
1920 size_t i = 0;
1921 for (i = 0; i < j->argc; i++) {
1922 (void)strcpy(p, j->argv[i]);
1923 nj->argv[i] = p;
1924 p += (strlen(j->argv[i]) + 1);
1925 }
1926 nj->argv[i] = NULL;
1927 } else {
1928 (void)job_assumes_zero(nj, errno);
1929 }
1930
1931 nj->argc = j->argc;
1932 }
1933
1934 struct machservice *msi = NULL;
1935 SLIST_FOREACH(msi, &j->machservices, sle) {
1936 /* Only copy MachServices that were actually declared in the plist.
1937 * So skip over per-PID ones and ones that were created via
1938 * bootstrap_register().
1939 */
1940 if (msi->upfront) {
1941 mach_port_t mp = MACH_PORT_NULL;
1942 struct machservice *msj = machservice_new(nj, msi->name, &mp, false);
1943 if (msj != NULL) {
1944 msj->reset = msi->reset;
1945 msj->delete_on_destruction = msi->delete_on_destruction;
1946 msj->drain_one_on_crash = msi->drain_one_on_crash;
1947 msj->drain_all_on_crash = msi->drain_all_on_crash;
1948
1949 kern_return_t kr = mach_port_set_attributes(mach_task_self(), msj->port, MACH_PORT_TEMPOWNER, NULL, 0);
1950 (void)job_assumes_zero(j, kr);
1951 } else {
1952 (void)job_assumes_zero(nj, errno);
1953 }
1954 }
1955 }
1956
1957 // We ignore global environment variables.
1958 struct envitem *ei = NULL;
1959 SLIST_FOREACH(ei, &j->env, sle) {
1960 if (envitem_new(nj, ei->key, ei->value, false)) {
1961 (void)job_assumes_zero(nj, errno);
1962 }
1963 }
1964 uuid_string_t val;
1965 uuid_unparse(identifier, val);
1966 if (envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false)) {
1967 (void)job_assumes_zero(nj, errno);
1968 }
1969
1970 if (j->rootdir) {
1971 nj->rootdir = strdup(j->rootdir);
1972 }
1973 if (j->workingdir) {
1974 nj->workingdir = strdup(j->workingdir);
1975 }
1976 if (j->username) {
1977 nj->username = strdup(j->username);
1978 }
1979 if (j->groupname) {
1980 nj->groupname = strdup(j->groupname);
1981 }
1982
1983 /* FIXME: We shouldn't redirect all the output from these jobs to the
1984 * same file. We should uniquify the file names. But this hasn't shown
1985 * to be a problem in practice.
1986 */
1987 if (j->stdinpath) {
1988 nj->stdinpath = strdup(j->stdinpath);
1989 }
1990 if (j->stdoutpath) {
1991 nj->stdoutpath = strdup(j->stdinpath);
1992 }
1993 if (j->stderrpath) {
1994 nj->stderrpath = strdup(j->stderrpath);
1995 }
1996 if (j->alt_exc_handler) {
1997 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1998 }
1999 if (j->cfbundleidentifier) {
2000 nj->cfbundleidentifier = strdup(j->cfbundleidentifier);
2001 }
2002 #if HAVE_SANDBOX
2003 if (j->seatbelt_profile) {
2004 nj->seatbelt_profile = strdup(j->seatbelt_profile);
2005 }
2006 if (j->container_identifier) {
2007 nj->container_identifier = strdup(j->container_identifier);
2008 }
2009 #endif
2010
2011 #if HAVE_QUARANTINE
2012 if (j->quarantine_data) {
2013 nj->quarantine_data = strdup(j->quarantine_data);
2014 }
2015 nj->quarantine_data_sz = j->quarantine_data_sz;
2016 #endif
2017 if (j->j_binpref) {
2018 size_t sz = malloc_size(j->j_binpref);
2019 nj->j_binpref = (cpu_type_t *)malloc(sz);
2020 if (nj->j_binpref) {
2021 memcpy(&nj->j_binpref, &j->j_binpref, sz);
2022 } else {
2023 (void)job_assumes_zero(nj, errno);
2024 }
2025 }
2026
2027 if (j->asport != MACH_PORT_NULL) {
2028 (void)job_assumes_zero(nj, launchd_mport_copy_send(j->asport));
2029 nj->asport = j->asport;
2030 }
2031
2032 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
2033
2034 jobmgr_t where2put = root_jobmgr;
2035 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2036 where2put = j->mgr;
2037 }
2038 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
2039 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
2040 } else {
2041 (void)os_assumes_zero(errno);
2042 }
2043
2044 return nj;
2045 }
2046
2047 job_t
2048 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
2049 {
2050 const char *const *argv_tmp = argv;
2051 char tmp_path[PATH_MAX];
2052 char auto_label[1000];
2053 const char *bn = NULL;
2054 char *co;
2055 size_t minlabel_len;
2056 size_t i, cc = 0;
2057 job_t j;
2058
2059 __OS_COMPILETIME_ASSERT__(offsetof(struct job_s, kqjob_callback) == 0);
2060
2061 if (unlikely(jm->shutting_down)) {
2062 errno = EINVAL;
2063 return NULL;
2064 }
2065
2066 if (unlikely(prog == NULL && argv == NULL)) {
2067 errno = EINVAL;
2068 return NULL;
2069 }
2070
2071 /* I'd really like to redo this someday. Anonymous jobs carry all the
2072 * baggage of managed jobs with them, even though most of it is unused.
2073 * Maybe when we have Objective-C objects in libSystem, there can be a base
2074 * job type that anonymous and managed jobs inherit from...
2075 */
2076 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
2077 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
2078 if (prog) {
2079 bn = prog;
2080 } else {
2081 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
2082 // prog for auto labels is kp.kp_kproc.p_comm.
2083 bn = basename(tmp_path);
2084 }
2085
2086 (void)snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
2087 label = auto_label;
2088 /* This is so we can do gross things later. See NOTE_EXEC for anonymous
2089 * jobs.
2090 */
2091 minlabel_len = strlen(label) + MAXCOMLEN;
2092 } else {
2093 if (label == AUTO_PICK_XPC_LABEL) {
2094 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
2095 } else {
2096 minlabel_len = strlen(label);
2097 }
2098 }
2099
2100 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
2101
2102 if (!j) {
2103 (void)os_assumes_zero(errno);
2104 return NULL;
2105 }
2106
2107 if (unlikely(label == auto_label)) {
2108 (void)snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
2109 } else {
2110 (void)strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
2111 }
2112
2113 j->kqjob_callback = job_callback;
2114 j->mgr = jm;
2115 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
2116 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
2117 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
2118 j->currently_ignored = true;
2119 j->ondemand = true;
2120 j->checkedin = true;
2121 j->jetsam_priority = DEFAULT_JETSAM_PRIORITY;
2122 j->jetsam_memlimit = -1;
2123 uuid_clear(j->expected_audit_uuid);
2124 #if TARGET_OS_EMBEDDED
2125 /* Run embedded daemons as background by default. SpringBoard jobs are
2126 * Interactive by default. Unfortunately, so many daemons have opted into
2127 * this priority band that its usefulness is highly questionable.
2128 *
2129 * See <rdar://problem/9539873>.
2130 *
2131 * Also ensure that daemons have a default memory highwatermark unless
2132 * otherwise specified, as per <rdar://problem/10307814>.
2133 */
2134 if (launchd_embedded_handofgod) {
2135 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2136 j->app = true;
2137 } else {
2138 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2139 j->jetsam_memlimit = DEFAULT_JETSAM_DAEMON_HIGHWATERMARK;
2140 }
2141 #else
2142 /* Jobs on OS X that just come from disk are "standard" by default so that
2143 * third-party daemons/agents don't encounter unexpected throttling.
2144 */
2145 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
2146 #endif
2147
2148 if (prog) {
2149 j->prog = strdup(prog);
2150 if (!j->prog) {
2151 (void)os_assumes_zero(errno);
2152 goto out_bad;
2153 }
2154 }
2155
2156 if (likely(argv)) {
2157 while (*argv_tmp++) {
2158 j->argc++;
2159 }
2160
2161 for (i = 0; i < j->argc; i++) {
2162 cc += strlen(argv[i]) + 1;
2163 }
2164
2165 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
2166 if (!j->argv) {
2167 (void)job_assumes_zero(j, errno);
2168 goto out_bad;
2169 }
2170
2171 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
2172
2173 for (i = 0; i < j->argc; i++) {
2174 j->argv[i] = co;
2175 (void)strcpy(co, argv[i]);
2176 co += strlen(argv[i]) + 1;
2177 }
2178 j->argv[i] = NULL;
2179 }
2180
2181 // Sssshhh... don't tell anyone.
2182 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
2183 j->has_console = true;
2184 }
2185
2186 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2187
2188 jobmgr_t where2put_label = root_jobmgr;
2189 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2190 where2put_label = j->mgr;
2191 }
2192 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
2193 uuid_clear(j->expected_audit_uuid);
2194
2195 job_log(j, LOG_DEBUG, "Conceived");
2196
2197 return j;
2198
2199 out_bad:
2200 if (j->prog) {
2201 free(j->prog);
2202 }
2203 free(j);
2204
2205 return NULL;
2206 }
2207
2208 job_t
2209 job_new_alias(jobmgr_t jm, job_t src)
2210 {
2211 if (job_find(jm, src->label)) {
2212 errno = EEXIST;
2213 return NULL;
2214 }
2215
2216 job_t j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2217 if (!j) {
2218 (void)os_assumes_zero(errno);
2219 return NULL;
2220 }
2221
2222 (void)strcpy((char *)j->label, src->label);
2223 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2224 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2225 /* Bad jump address. The kqueue callback for aliases should never be
2226 * invoked.
2227 */
2228 j->kqjob_callback = (kq_callback)0xfa1afe1;
2229 j->alias = src;
2230 j->mgr = jm;
2231
2232 struct machservice *msi = NULL;
2233 SLIST_FOREACH(msi, &src->machservices, sle) {
2234 if (!machservice_new_alias(j, msi)) {
2235 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2236 errno = EINVAL;
2237 job_remove(j);
2238 j = NULL;
2239 break;
2240 }
2241 }
2242
2243 if (j) {
2244 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2245 }
2246
2247 return j;
2248 }
2249
2250 job_t
2251 job_import(launch_data_t pload)
2252 {
2253 #if TARGET_OS_EMBEDDED
2254 /* If this is the special payload of default values, handle it here */
2255 if (unlikely(launch_data_dict_lookup(pload, LAUNCH_JOBKEY_DEFAULTS))) {
2256 job_import_defaults(pload);
2257 return NULL;
2258 }
2259 #endif
2260
2261 job_t j = jobmgr_import2(root_jobmgr, pload);
2262
2263 if (unlikely(j == NULL)) {
2264 return NULL;
2265 }
2266
2267 /* Since jobs are effectively stalled until they get security sessions
2268 * assigned to them, we may wish to reconsider this behavior of calling the
2269 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2270 * criterion set.
2271 */
2272 job_dispatch_curious_jobs(j);
2273 return job_dispatch(j, false);
2274 }
2275
2276 #if TARGET_OS_EMBEDDED
2277
2278 bool
2279 job_import_defaults(launch_data_t pload)
2280 {
2281 bool result = false;
2282 xpc_object_t xd = NULL, defaults;
2283
2284 if (_launchd_defaults_cache) {
2285 xpc_release(_launchd_defaults_cache);
2286 _launchd_defaults_cache = NULL;
2287 }
2288
2289 xd = ld2xpc(pload);
2290 if (!xd || xpc_get_type(xd) != XPC_TYPE_DICTIONARY) {
2291 goto out;
2292 }
2293
2294 defaults = xpc_dictionary_get_value(xd, LAUNCHD_JOB_DEFAULTS);
2295 if (!defaults || xpc_get_type(defaults) != XPC_TYPE_DICTIONARY) {
2296 goto out;
2297 }
2298
2299 _launchd_defaults_cache = xpc_copy(defaults);
2300 result = true;
2301 out:
2302 if (xd) {
2303 xpc_release(xd);
2304 }
2305
2306 return result;
2307 }
2308
2309 bool
2310 job_apply_defaults(job_t j) {
2311 const char *test_prefix = "com.apple.test.";
2312
2313 char *sb_prefix_end, *sb_suffix_start;
2314 char true_job_label[strlen(j->label)];
2315 const char *label;
2316
2317 if (((sb_prefix_end = strchr(j->label, ':')) != NULL) &&
2318 ((sb_suffix_start = strchr(sb_prefix_end + 1, '[')) != NULL)) {
2319 /*
2320 * Workaround 'UIKitApplication:com.apple.foo[bar]' convention for the processes
2321 * we're interested in. To be removed when <rdar://problem/13066361> is addressed.
2322 */
2323 snprintf(true_job_label, sb_suffix_start - sb_prefix_end, "%s", sb_prefix_end + 1);
2324 label = true_job_label;
2325 } else {
2326 /* Just test the standard label */
2327 label = j->label;
2328 }
2329
2330 /* Test for cache presence and apply if found */
2331 if (_launchd_defaults_cache) {
2332 xpc_object_t props = xpc_dictionary_get_value(_launchd_defaults_cache, label);
2333 if (props && xpc_get_type(props) == XPC_TYPE_DICTIONARY) {
2334 launch_data_t lv = xpc2ld(props);
2335 launch_data_dict_iterate(lv, job_import_keys, j);
2336 launch_data_free(lv);
2337 return true;
2338 }
2339 }
2340
2341 /* Limit free? Disable the memory limit if this is a test job; see <rdar://problem/13180697> */
2342 if (!strncmp(label, test_prefix, strlen(test_prefix))) {
2343 j->jetsam_memlimit = -1;
2344 return true;
2345 }
2346
2347 return false;
2348 }
2349
2350 #endif
2351
2352 launch_data_t
2353 job_import_bulk(launch_data_t pload)
2354 {
2355 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2356 job_t *ja;
2357 size_t i, c = launch_data_array_get_count(pload);
2358
2359 ja = alloca(c * sizeof(job_t));
2360
2361 for (i = 0; i < c; i++) {
2362 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2363 errno = 0;
2364 }
2365 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2366 }
2367
2368 for (i = 0; i < c; i++) {
2369 if (likely(ja[i])) {
2370 job_dispatch_curious_jobs(ja[i]);
2371 job_dispatch(ja[i], false);
2372 }
2373 }
2374
2375 return resp;
2376 }
2377
2378 void
2379 job_import_bool(job_t j, const char *key, bool value)
2380 {
2381 bool found_key = false;
2382
2383 switch (key[0]) {
2384 case 'a':
2385 case 'A':
2386 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2387 j->abandon_pg = value;
2388 found_key = true;
2389 }
2390 break;
2391 case 'b':
2392 case 'B':
2393 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2394 j->dirty_at_shutdown = value;
2395 found_key = true;
2396 }
2397 break;
2398 case 'j':
2399 case 'J':
2400 if (strcasecmp(key, LAUNCH_JOBKEY_JOINGUISESSION) == 0) {
2401 j->joins_gui_session = value;
2402 found_key = true;
2403 }
2404 break;
2405 case 'k':
2406 case 'K':
2407 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2408 j->ondemand = !value;
2409 found_key = true;
2410 }
2411 break;
2412 case 'o':
2413 case 'O':
2414 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2415 j->ondemand = value;
2416 found_key = true;
2417 }
2418 break;
2419 case 'd':
2420 case 'D':
2421 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2422 j->debug = value;
2423 found_key = true;
2424 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2425 (void)job_assumes(j, !value);
2426 found_key = true;
2427 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2428 j->disable_aslr = value;
2429 found_key = true;
2430 }
2431 break;
2432 case 'h':
2433 case 'H':
2434 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2435 job_log(j, LOG_PERF, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2436 j->dirty_at_shutdown = value;
2437 found_key = true;
2438 }
2439 break;
2440 case 's':
2441 case 'S':
2442 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2443 j->session_create = value;
2444 found_key = true;
2445 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2446 j->start_on_mount = value;
2447 found_key = true;
2448 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2449 // this only does something on Mac OS X 10.4 "Tiger"
2450 found_key = true;
2451 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2452 if (_launchd_shutdown_monitor) {
2453 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2454 } else {
2455 j->shutdown_monitor = true;
2456 _launchd_shutdown_monitor = j;
2457 }
2458 found_key = true;
2459 }
2460 break;
2461 case 'l':
2462 case 'L':
2463 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2464 j->low_pri_io = value;
2465 found_key = true;
2466 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2467 j->only_once = value;
2468 found_key = true;
2469 } else if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYBACKGROUNDIO) == 0) {
2470 j->low_priority_background_io = true;
2471 found_key = true;
2472 }
2473 break;
2474 case 'm':
2475 case 'M':
2476 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2477 j->internal_exc_handler = value;
2478 found_key = true;
2479 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2480 j->multiple_instances = value;
2481 found_key = true;
2482 }
2483 break;
2484 case 'i':
2485 case 'I':
2486 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2487 if (getuid() != 0) {
2488 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2489 return;
2490 }
2491 j->no_init_groups = !value;
2492 found_key = true;
2493 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2494 j->ignore_pg_at_shutdown = value;
2495 found_key = true;
2496 }
2497 break;
2498 case 'r':
2499 case 'R':
2500 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2501 if (value) {
2502 // We don't want value == false to change j->start_pending
2503 j->start_pending = true;
2504 }
2505 found_key = true;
2506 }
2507 break;
2508 case 'e':
2509 case 'E':
2510 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2511 j->globargv = value;
2512 found_key = true;
2513 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2514 j->enable_transactions = value;
2515 found_key = true;
2516 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2517 j->debug_before_kill = value;
2518 found_key = true;
2519 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2520 #if TARGET_OS_EMBEDDED
2521 if (!_launchd_embedded_god) {
2522 if ((j->embedded_god = value)) {
2523 _launchd_embedded_god = j;
2524 }
2525 } else {
2526 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2527 }
2528 #else
2529 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2530 #endif
2531 found_key = true;
2532 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDHOMESCREEN) == 0) {
2533 #if TARGET_OS_EMBEDDED
2534 if (!_launchd_embedded_home) {
2535 if ((j->embedded_home = value)) {
2536 _launchd_embedded_home = j;
2537 }
2538 } else {
2539 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2540 }
2541 #else
2542 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2543 #endif
2544 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2545 if (!_launchd_event_monitor) {
2546 j->event_monitor = value;
2547 if (value) {
2548 _launchd_event_monitor = j;
2549 }
2550 } else {
2551 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor->label);
2552 }
2553 found_key = true;
2554 }
2555 break;
2556 case 'w':
2557 case 'W':
2558 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2559 j->wait4debugger = value;
2560 found_key = true;
2561 }
2562 break;
2563 case 'x':
2564 case 'X':
2565 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2566 if (pid1_magic) {
2567 if (_launchd_xpc_bootstrapper) {
2568 job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper->label);
2569 } else {
2570 _launchd_xpc_bootstrapper = j;
2571 j->xpc_bootstrapper = value;
2572 }
2573 } else {
2574 job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2575 }
2576 }
2577 found_key = true;
2578 break;
2579 default:
2580 break;
2581 }
2582
2583 if (unlikely(!found_key)) {
2584 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2585 }
2586 }
2587
2588 void
2589 job_import_string(job_t j, const char *key, const char *value)
2590 {
2591 char **where2put = NULL;
2592
2593 switch (key[0]) {
2594 case 'c':
2595 case 'C':
2596 if (strcasecmp(key, LAUNCH_JOBKEY_CFBUNDLEIDENTIFIER) == 0) {
2597 where2put = &j->cfbundleidentifier;
2598 }
2599 break;
2600 case 'm':
2601 case 'M':
2602 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2603 where2put = &j->alt_exc_handler;
2604 }
2605 break;
2606 case 'p':
2607 case 'P':
2608 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2609 return;
2610 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0
2611 || strcasecmp(key, LAUNCH_JOBKEY_PROCESSTYPE) == 0) {
2612 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE) == 0) {
2613 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
2614 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_ADAPTIVE) == 0) {
2615 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE;
2616 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_STANDARD) == 0) {
2617 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
2618 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND) == 0) {
2619 j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2620 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2621 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_TAL;
2622 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_SYSTEMAPP) == 0) {
2623 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2624 j->system_app = true;
2625 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_APP) == 0) {
2626 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2627 j->app = true;
2628 } else {
2629 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2630 }
2631 return;
2632 }
2633 break;
2634 case 'l':
2635 case 'L':
2636 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2637 return;
2638 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2639 return;
2640 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2641 return;
2642 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2643 return;
2644 }
2645 break;
2646 case 'r':
2647 case 'R':
2648 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2649 if (getuid() != 0) {
2650 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2651 return;
2652 }
2653 where2put = &j->rootdir;
2654 }
2655 break;
2656 case 'w':
2657 case 'W':
2658 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2659 where2put = &j->workingdir;
2660 }
2661 break;
2662 case 'u':
2663 case 'U':
2664 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2665 if (getuid() != 0) {
2666 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2667 return;
2668 } else if (strcmp(value, "root") == 0) {
2669 return;
2670 }
2671 where2put = &j->username;
2672 }
2673 break;
2674 case 'g':
2675 case 'G':
2676 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2677 if (getuid() != 0) {
2678 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2679 return;
2680 } else if (strcmp(value, "wheel") == 0) {
2681 return;
2682 }
2683 where2put = &j->groupname;
2684 }
2685 break;
2686 case 's':
2687 case 'S':
2688 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2689 where2put = &j->stdoutpath;
2690 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2691 where2put = &j->stderrpath;
2692 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2693 where2put = &j->stdinpath;
2694 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2695 if (job_assumes_zero_p(j, j->stdin_fd) != -1) {
2696 // open() should not block, but regular IO by the job should
2697 (void)job_assumes_zero_p(j, fcntl(j->stdin_fd, F_SETFL, 0));
2698 // XXX -- EV_CLEAR should make named pipes happy?
2699 (void)job_assumes_zero_p(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j));
2700 } else {
2701 j->stdin_fd = 0;
2702 }
2703 #if HAVE_SANDBOX
2704 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2705 where2put = &j->seatbelt_profile;
2706 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXCONTAINER) == 0) {
2707 where2put = &j->container_identifier;
2708 #endif
2709 }
2710 break;
2711 case 'X':
2712 case 'x':
2713 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2714 return;
2715 }
2716 break;
2717 default:
2718 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2719 break;
2720 }
2721
2722 if (likely(where2put)) {
2723 if (!(*where2put = strdup(value))) {
2724 (void)job_assumes_zero(j, errno);
2725 }
2726 } else {
2727 // See rdar://problem/5496612. These two are okay.
2728 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2729 || strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2730 job_log(j, LOG_APPLEONLY, "This key is no longer relevant and should be removed: %s", key);
2731 } else {
2732 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2733 }
2734 }
2735 }
2736
2737 void
2738 job_import_integer(job_t j, const char *key, long long value)
2739 {
2740 switch (key[0]) {
2741 case 'a':
2742 case 'A':
2743 #if TARGET_OS_EMBEDDED
2744 if (strcasecmp(key, LAUNCH_JOBKEY_ASID) == 0) {
2745 if (launchd_embedded_handofgod) {
2746 if (audit_session_port((au_asid_t)value, &j->asport) == -1 && errno != ENOSYS) {
2747 (void)job_assumes_zero(j, errno);
2748 }
2749 }
2750 }
2751 #endif
2752 case 'e':
2753 case 'E':
2754 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2755 if (unlikely(value < 0)) {
2756 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2757 } else if (unlikely(value > UINT32_MAX)) {
2758 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2759 } else {
2760 j->exit_timeout = (typeof(j->exit_timeout)) value;
2761 }
2762 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2763 j->main_thread_priority = value;
2764 }
2765 break;
2766 case 'j':
2767 case 'J':
2768 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2769 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2770
2771 launch_data_t pri = launch_data_new_integer(value);
2772 if (job_assumes(j, pri != NULL)) {
2773 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2774 launch_data_free(pri);
2775 }
2776 }
2777 case 'n':
2778 case 'N':
2779 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2780 if (unlikely(value < PRIO_MIN)) {
2781 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2782 } else if (unlikely(value > PRIO_MAX)) {
2783 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2784 } else {
2785 j->nice = (typeof(j->nice)) value;
2786 j->setnice = true;
2787 }
2788 }
2789 break;
2790 case 't':
2791 case 'T':
2792 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2793 if (unlikely(value < 0)) {
2794 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2795 } else if (unlikely(value > UINT32_MAX)) {
2796 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2797 } else {
2798 j->timeout = (typeof(j->timeout)) value;
2799 }
2800 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2801 if (value < 0) {
2802 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2803 } else if (value > UINT32_MAX) {
2804 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2805 } else {
2806 j->min_run_time = (typeof(j->min_run_time)) value;
2807 }
2808 }
2809 break;
2810 case 'u':
2811 case 'U':
2812 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2813 j->mask = value;
2814 j->setmask = true;
2815 }
2816 break;
2817 case 's':
2818 case 'S':
2819 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2820 if (unlikely(value <= 0)) {
2821 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2822 } else if (unlikely(value > UINT32_MAX)) {
2823 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2824 } else {
2825 runtime_add_weak_ref();
2826 j->start_interval = (typeof(j->start_interval)) value;
2827
2828 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
2829 }
2830 #if HAVE_SANDBOX
2831 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2832 j->seatbelt_flags = value;
2833 #endif
2834 }
2835
2836 break;
2837 default:
2838 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2839 break;
2840 }
2841 }
2842
2843 void
2844 job_import_opaque(job_t j __attribute__((unused)), const char *key, launch_data_t value __attribute__((unused)))
2845 {
2846 switch (key[0]) {
2847 case 'q':
2848 case 'Q':
2849 #if HAVE_QUARANTINE
2850 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2851 size_t tmpsz = launch_data_get_opaque_size(value);
2852
2853 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2854 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2855 j->quarantine_data_sz = tmpsz;
2856 }
2857 }
2858 #endif
2859 case 's':
2860 case 'S':
2861 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2862 size_t tmpsz = launch_data_get_opaque_size(value);
2863 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2864 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2865 }
2866 }
2867 break;
2868 default:
2869 break;
2870 }
2871 }
2872
2873 static void
2874 policy_setup(launch_data_t obj, const char *key, void *context)
2875 {
2876 job_t j = context;
2877 bool found_key = false;
2878
2879 switch (key[0]) {
2880 case 'd':
2881 case 'D':
2882 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2883 j->deny_job_creation = launch_data_get_bool(obj);
2884 found_key = true;
2885 }
2886 break;
2887 default:
2888 break;
2889 }
2890
2891 if (unlikely(!found_key)) {
2892 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2893 }
2894 }
2895
2896 void
2897 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2898 {
2899 launch_data_t tmp;
2900
2901 switch (key[0]) {
2902 case 'p':
2903 case 'P':
2904 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2905 launch_data_dict_iterate(value, policy_setup, j);
2906 }
2907 break;
2908 case 'k':
2909 case 'K':
2910 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2911 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2912 }
2913 break;
2914 case 'i':
2915 case 'I':
2916 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2917 j->inetcompat = true;
2918 j->abandon_pg = true;
2919 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2920 j->inetcompat_wait = launch_data_get_bool(tmp);
2921 }
2922 }
2923 break;
2924 case 'j':
2925 case 'J':
2926 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2927 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2928 }
2929 case 'e':
2930 case 'E':
2931 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2932 launch_data_dict_iterate(value, envitem_setup, j);
2933 }
2934 break;
2935 case 'u':
2936 case 'U':
2937 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2938 j->importing_global_env = true;
2939 launch_data_dict_iterate(value, envitem_setup, j);
2940 j->importing_global_env = false;
2941 }
2942 break;
2943 case 's':
2944 case 'S':
2945 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2946 launch_data_dict_iterate(value, socketgroup_setup, j);
2947 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2948 calendarinterval_new_from_obj(j, value);
2949 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2950 launch_data_dict_iterate(value, limititem_setup, j);
2951 #if HAVE_SANDBOX
2952 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2953 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2954 #endif
2955 }
2956 break;
2957 case 'h':
2958 case 'H':
2959 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2960 j->importing_hard_limits = true;
2961 launch_data_dict_iterate(value, limititem_setup, j);
2962 j->importing_hard_limits = false;
2963 }
2964 break;
2965 case 'm':
2966 case 'M':
2967 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2968 launch_data_dict_iterate(value, machservice_setup, j);
2969 }
2970 break;
2971 case 'l':
2972 case 'L':
2973 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2974 launch_data_dict_iterate(value, eventsystem_setup, j);
2975 } else {
2976 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2977 return;
2978 }
2979 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2980 return;
2981 }
2982 }
2983 break;
2984 default:
2985 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2986 break;
2987 }
2988 }
2989
2990 void
2991 job_import_array(job_t j, const char *key, launch_data_t value)
2992 {
2993 size_t i, value_cnt = launch_data_array_get_count(value);
2994
2995 switch (key[0]) {
2996 case 'p':
2997 case 'P':
2998 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2999 return;
3000 }
3001 break;
3002 case 'l':
3003 case 'L':
3004 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
3005 return;
3006 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
3007 return;
3008 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
3009 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
3010 return;
3011 }
3012 break;
3013 case 'b':
3014 case 'B':
3015 if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
3016 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
3017 j->j_binpref_cnt = value_cnt;
3018 for (i = 0; i < value_cnt; i++) {
3019 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
3020 }
3021 }
3022 }
3023 break;
3024 case 's':
3025 case 'S':
3026 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
3027 for (i = 0; i < value_cnt; i++) {
3028 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
3029 }
3030 }
3031 break;
3032 default:
3033 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
3034 break;
3035 }
3036 }
3037
3038 void
3039 job_import_keys(launch_data_t obj, const char *key, void *context)
3040 {
3041 job_t j = context;
3042 launch_data_type_t kind;
3043
3044 if (!obj) {
3045 launchd_syslog(LOG_ERR, "NULL object given to job_import_keys().");
3046 return;
3047 }
3048
3049 kind = launch_data_get_type(obj);
3050
3051 switch (kind) {
3052 case LAUNCH_DATA_BOOL:
3053 job_import_bool(j, key, launch_data_get_bool(obj));
3054 break;
3055 case LAUNCH_DATA_STRING:
3056 job_import_string(j, key, launch_data_get_string(obj));
3057 break;
3058 case LAUNCH_DATA_INTEGER:
3059 job_import_integer(j, key, launch_data_get_integer(obj));
3060 break;
3061 case LAUNCH_DATA_DICTIONARY:
3062 job_import_dictionary(j, key, obj);
3063 break;
3064 case LAUNCH_DATA_ARRAY:
3065 job_import_array(j, key, obj);
3066 break;
3067 case LAUNCH_DATA_OPAQUE:
3068 job_import_opaque(j, key, obj);
3069 break;
3070 default:
3071 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
3072 break;
3073 }
3074 }
3075
3076 job_t
3077 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
3078 {
3079 launch_data_t tmp, ldpa;
3080 const char *label = NULL, *prog = NULL;
3081 const char **argv = NULL;
3082 job_t j;
3083
3084 if (!jobmgr_assumes(jm, pload != NULL)) {
3085 errno = EINVAL;
3086 return NULL;
3087 }
3088
3089 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
3090 errno = EINVAL;
3091 return NULL;
3092 }
3093
3094 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
3095 errno = EINVAL;
3096 return NULL;
3097 }
3098
3099 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
3100 errno = EINVAL;
3101 return NULL;
3102 }
3103
3104 if (unlikely(!(label = launch_data_get_string(tmp)))) {
3105 errno = EINVAL;
3106 return NULL;
3107 }
3108
3109 #if TARGET_OS_EMBEDDED
3110 if (unlikely(launchd_embedded_handofgod && _launchd_embedded_god)) {
3111 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
3112 errno = EPERM;
3113 return NULL;
3114 }
3115
3116 const char *username = NULL;
3117 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
3118 username = launch_data_get_string(tmp);
3119 } else {
3120 errno = EPERM;
3121 return NULL;
3122 }
3123
3124 if (!jobmgr_assumes(jm, _launchd_embedded_god->username != NULL && username != NULL)) {
3125 errno = EPERM;
3126 return NULL;
3127 }
3128
3129 if (unlikely(strcmp(_launchd_embedded_god->username, username) != 0)) {
3130 errno = EPERM;
3131 return NULL;
3132 }
3133 } else if (launchd_embedded_handofgod) {
3134 errno = EINVAL;
3135 return NULL;
3136 }
3137 #endif
3138
3139 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM))
3140 && (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
3141 prog = launch_data_get_string(tmp);
3142 }
3143
3144 int argc = 0;
3145 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
3146 size_t i, c;
3147
3148 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
3149 errno = EINVAL;
3150 return NULL;
3151 }
3152
3153 c = launch_data_array_get_count(ldpa);
3154
3155 argv = alloca((c + 1) * sizeof(char *));
3156
3157 for (i = 0; i < c; i++) {
3158 tmp = launch_data_array_get_index(ldpa, i);
3159
3160 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
3161 errno = EINVAL;
3162 return NULL;
3163 }
3164
3165 argv[i] = launch_data_get_string(tmp);
3166 }
3167
3168 argv[i] = NULL;
3169 argc = i;
3170 }
3171
3172 if (!prog && argc == 0) {
3173 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
3174 errno = EINVAL;
3175 return NULL;
3176 }
3177
3178 /* Find the requested session. You cannot load services into XPC domains in
3179 * this manner.
3180 */
3181 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
3182 if (session) {
3183 jobmgr_t jmt = NULL;
3184 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
3185 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
3186 if (!jmt) {
3187 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
3188 } else {
3189 jm = jmt;
3190 }
3191 } else {
3192 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
3193 }
3194
3195 if (!jmt) {
3196 errno = EINVAL;
3197 return NULL;
3198 }
3199 }
3200
3201 /* For legacy reasons, we have a global hash of all labels in all job
3202 * managers. So rather than make it a global, we store it in the root job
3203 * manager. But for an XPC domain, we store a local hash of all services in
3204 * the domain.
3205 */
3206 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
3207 if (unlikely((j = job_find(where2look, label)) != NULL)) {
3208 if (jm->xpc_singleton) {
3209 /* There can (and probably will be) multiple attemtps to import the
3210 * same XPC service from the same framework. This is okay. It's
3211 * treated as a singleton, so just return the existing one so that
3212 * it may be aliased into the requesting process' XPC domain.
3213 */
3214 errno = EEXIST;
3215 return j;
3216 } else {
3217 /* If we're not a global XPC domain, then it's an error to try
3218 * importing the same job/service multiple times.
3219 */
3220 errno = EEXIST;
3221 return NULL;
3222 }
3223 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
3224 errno = EINVAL;
3225 return NULL;
3226 }
3227 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
3228
3229 if (likely(j = job_new(jm, label, prog, argv))) {
3230 #if TARGET_OS_EMBEDDED
3231 job_apply_defaults(j);
3232 #endif
3233 launch_data_dict_iterate(pload, job_import_keys, j);
3234 if (!uuid_is_null(j->expected_audit_uuid)) {
3235 uuid_string_t uuid_str;
3236 uuid_unparse(j->expected_audit_uuid, uuid_str);
3237 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
3238 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
3239 errno = ENEEDAUTH;
3240 } else {
3241 job_log(j, LOG_DEBUG, "No security session specified.");
3242 j->asport = MACH_PORT_NULL;
3243 }
3244
3245 if (pid1_magic && !jm->parentmgr) {
3246 /* Workaround reentrancy in CF. We don't make this a global variable
3247 * because we don't want per-user launchd's to inherit it. So we
3248 * just set it for every job that we import into the System session.
3249 *
3250 * See <rdar://problem/9468837>.
3251 */
3252 envitem_new(j, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
3253 }
3254
3255 if (j->event_monitor) {
3256 eventsystem_ping();
3257 }
3258
3259 #if TARGET_OS_EMBEDDED
3260 /* SpringBoard and backboardd must run at elevated priority.
3261 *
3262 * See <rdar://problem/9539873> and <rdar://problem/10984383>.
3263 */
3264 if (j->embedded_god || j->embedded_home) {
3265 j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
3266 }
3267 #endif
3268 }
3269
3270 return j;
3271 }
3272
3273 bool
3274 jobmgr_label_test(jobmgr_t jm, const char *str)
3275 {
3276 const char *ptr;
3277
3278 if (str[0] == '\0') {
3279 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
3280 return false;
3281 }
3282
3283 for (ptr = str; *ptr; ptr++) {
3284 if (iscntrl(*ptr)) {
3285 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
3286 return false;
3287 }
3288 }
3289
3290 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3291 || (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
3292 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
3293 return false;
3294 }
3295
3296 return true;
3297 }
3298
3299 job_t
3300 job_find(jobmgr_t jm, const char *label)
3301 {
3302 job_t ji;
3303
3304 if (!jm) {
3305 jm = root_jobmgr;
3306 }
3307
3308 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
3309 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
3310 // 5351245 and 5488633 respectively
3311 continue;
3312 }
3313
3314 if (strcmp(ji->label, label) == 0) {
3315 return ji;
3316 }
3317 }
3318
3319 errno = ESRCH;
3320 return NULL;
3321 }
3322
3323 // Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
3324 job_t
3325 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
3326 {
3327 job_t ji = NULL;
3328 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3329 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay))) {
3330 return ji;
3331 }
3332 }
3333
3334 jobmgr_t jmi = NULL;
3335 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3336 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3337 break;
3338 }
3339 }
3340
3341 return ji;
3342 }
3343
3344 job_t
3345 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3346 {
3347 job_t ji;
3348
3349 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3350 if (ji->p == p) {
3351 return ji;
3352 }
3353 }
3354
3355 return create_anon ? job_new_anonymous(jm, p) : NULL;
3356 }
3357
3358 job_t
3359 managed_job(pid_t p)
3360 {
3361 job_t ji;
3362
3363 LIST_FOREACH(ji, &managed_actives[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3364 if (ji->p == p) {
3365 return ji;
3366 }
3367 }
3368
3369 return NULL;
3370 }
3371
3372 job_t
3373 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3374 {
3375 jobmgr_t jmi;
3376 job_t ji;
3377
3378 if (jm->jm_port == mport) {
3379 return jobmgr_find_by_pid(jm, upid, true);
3380 }
3381
3382 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3383 job_t jr;
3384
3385 if ((jr = job_mig_intran2(jmi, mport, upid))) {
3386 return jr;
3387 }
3388 }
3389
3390 LIST_FOREACH(ji, &jm->jobs, sle) {
3391 if (ji->j_port == mport) {
3392 return ji;
3393 }
3394 }
3395
3396 return NULL;
3397 }
3398
3399 job_t
3400 job_mig_intran(mach_port_t p)
3401 {
3402 struct ldcred *ldc = runtime_get_caller_creds();
3403 job_t jr;
3404
3405 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3406
3407 if (!jr) {
3408 struct proc_bsdshortinfo proc;
3409 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3410 if (errno != ESRCH) {
3411 (void)jobmgr_assumes_zero(root_jobmgr, errno);
3412 } else {
3413 jobmgr_log(root_jobmgr, LOG_ERR, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc.pbsi_comm, ldc->pid, ldc->uid, ldc->euid);
3414 }
3415 }
3416 }
3417
3418 return jr;
3419 }
3420
3421 job_t
3422 job_find_by_service_port(mach_port_t p)
3423 {
3424 struct machservice *ms;
3425
3426 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3427 if (ms->recv && (ms->port == p)) {
3428 return ms->job;
3429 }
3430 }
3431
3432 return NULL;
3433 }
3434
3435 void
3436 job_mig_destructor(job_t j)
3437 {
3438 /* The job can go invalid before this point.
3439 *
3440 * <rdar://problem/5477111>
3441 */
3442 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3443 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3444 job_remove(j);
3445 }
3446
3447 workaround_5477111 = NULL;
3448
3449 calendarinterval_sanity_check();
3450 }
3451
3452 void
3453 job_export_all2(jobmgr_t jm, launch_data_t where)
3454 {
3455 jobmgr_t jmi;
3456 job_t ji;
3457
3458 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3459 job_export_all2(jmi, where);
3460 }
3461
3462 LIST_FOREACH(ji, &jm->jobs, sle) {
3463 launch_data_t tmp;
3464
3465 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3466 launch_data_dict_insert(where, tmp, ji->label);
3467 }
3468 }
3469 }
3470
3471 launch_data_t
3472 job_export_all(void)
3473 {
3474 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3475
3476 if (resp != NULL) {
3477 job_export_all2(root_jobmgr, resp);
3478 } else {
3479 (void)os_assumes_zero(errno);
3480 }
3481
3482 return resp;
3483 }
3484
3485 void
3486 job_log_stray_pg(job_t j)
3487 {
3488 pid_t *pids = NULL;
3489 size_t len = sizeof(pid_t) * get_kern_max_proc();
3490 int i = 0, kp_cnt = 0;
3491
3492 if (!launchd_apple_internal) {
3493 return;
3494 }
3495
3496 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3497
3498 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3499 return;
3500 }
3501 if (job_assumes_zero_p(j, (kp_cnt = proc_listpgrppids(j->p, pids, len))) == -1) {
3502 goto out;
3503 }
3504
3505 for (i = 0; i < kp_cnt; i++) {
3506 pid_t p_i = pids[i];
3507 if (p_i == j->p) {
3508 continue;
3509 } else if (p_i == 0 || p_i == 1) {
3510 continue;
3511 }
3512
3513 struct proc_bsdshortinfo proc;
3514 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3515 if (errno != ESRCH) {
3516 (void)job_assumes_zero(j, errno);
3517 }
3518 continue;
3519 }
3520
3521 pid_t pp_i = proc.pbsi_ppid;
3522 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3523 const char *n = proc.pbsi_comm;
3524
3525 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3526 }
3527
3528 out:
3529 free(pids);
3530 }
3531
3532 #if HAVE_SYSTEMSTATS
3533 static void
3534 systemstats_timer_callback(void)
3535 {
3536 jobmgr_log_perf_statistics(root_jobmgr, true);
3537 }
3538
3539 static bool
3540 systemstats_is_enabled(void)
3541 {
3542 static bool systemstats_enabled;
3543
3544 if (!systemstats_enabled) {
3545 char *store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_LOGS, NULL);
3546 systemstats_enabled = systemstats_init(SYSTEMSTATS_WRITER_launchd, store);
3547 free(store);
3548
3549 uint64_t interval;
3550 interval = systemstats_get_log_interval(SYSTEMSTATS_WRITER_launchd);
3551
3552 if (pid1_magic && systemstats_enabled && interval) {
3553 jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)systemstats_timer_callback, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, interval, root_jobmgr));
3554 }
3555 }
3556
3557 return systemstats_enabled;
3558 }
3559 #endif // HAVE_SYSTEMSTATS
3560
3561 void
3562 job_reap(job_t j)
3563 {
3564 bool is_system_bootstrapper = ((j->is_bootstrapper && pid1_magic) && !j->mgr->parentmgr);
3565
3566 job_log(j, LOG_DEBUG, "Reaping");
3567
3568 if (unlikely(j->weird_bootstrap)) {
3569 int64_t junk = 0;
3570 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3571 }
3572
3573 if (j->fork_fd) {
3574 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
3575 j->fork_fd = 0;
3576 }
3577
3578 bool was_dirty = false;
3579 if (!(j->anonymous || j->implicit_reap)) {
3580 uint32_t flags = 0;
3581 (void)job_assumes_zero(j, proc_get_dirty(j->p, &flags));
3582
3583 j->idle_exit = (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT);
3584 was_dirty = (flags & PROC_DIRTY_IS_DIRTY);
3585
3586 job_log(j, LOG_DEBUG, "%sob exited %s.", j->idle_exit ? "Idle-exit j" : "J", was_dirty ? "while dirty" : "cleanly");
3587 }
3588
3589 if (j->idle_exit && was_dirty) {
3590 if (j->jettisoned) {
3591 job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned while dirty. Will respawn immediately.");
3592 j->unthrottle = true;
3593 j->start_pending = true;
3594 } else {
3595 job_log(j, LOG_INFO, "Idle-exit job exited while dirty.");
3596 }
3597 } else if (j->idle_exit && j->jettisoned) {
3598 /* If an idle-exit job is jettisoned, then we shouldn't throttle its
3599 * next respawn because it could not help when it exited. If it ran for
3600 * the minimum runtime, then this doesn't really matter. If it ran for
3601 * less than the minimum runtime, it will not be throttled.
3602 *
3603 * <rdar://problem/12098667>
3604 */
3605 job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned. Will bypass throttle interval for next on-demand launch.");
3606 j->unthrottle = true;
3607 }
3608
3609 if (j->anonymous) {
3610 j->last_exit_status = 0;
3611 } else {
3612 uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
3613 j->trt += rt;
3614
3615 job_log(j, LOG_PERF, "Last instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
3616 j->nruns++;
3617
3618 /* The job is dead. While the PID/PGID is still known to be valid, try
3619 * to kill abandoned descendant processes.
3620 */
3621 job_log_stray_pg(j);
3622 if (!j->abandon_pg) {
3623 if (unlikely(killpg2(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3624 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3625 }
3626 }
3627
3628 int r = -1;
3629 if (!j->implicit_reap) {
3630 /* If the shutdown monitor has suspended a task and not resumed it
3631 * resumed it before exiting, the kernel will not clean up after the
3632 * shutdown monitor. It will, instead, leave the task suspended and
3633 * not process any pending signals on the event loop for the task.
3634 *
3635 * There are a variety of other kernel bugs that could prevent a
3636 * process from exiting, usually having to do with faulty hardware
3637 * or talking to misbehaving drivers that mark a thread as
3638 * uninterruptible and deadlock/hang before unmarking it as such. So
3639 * we have to work around that too.
3640 *
3641 * See <rdar://problem/9284889&9359725>.
3642 */
3643 if (j->workaround9359725) {
3644 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3645 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3646 } else {
3647 #if HAVE_SYSTEMSTATS
3648 int r2;
3649 struct rusage_info_v1 ri;
3650 r2 = job_assumes_zero(j, proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)&ri));
3651 #endif
3652 if ((r = wait4(j->p, &j->last_exit_status, 0, NULL)) == -1) {
3653 job_log(j, LOG_ERR, "Reap failed. Assuming job exited: %d: %s", errno, strerror(errno));
3654 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3655 }
3656
3657 if (j->idle_exit && j->jettisoned) {
3658 // Treat idle-exit jettisons as successful exit.
3659 //
3660 // <rdar://problem/13338973>
3661 (void)job_assumes_zero(j, WTERMSIG(j->last_exit_status));
3662 j->last_exit_status = W_EXITCODE(0, 0);
3663 }
3664 #if HAVE_SYSTEMSTATS
3665 if (r2 == 0) {
3666 job_log_perf_statistics(j, &ri, j->last_exit_status);
3667 }
3668 #endif
3669 }
3670 } else {
3671 job_log(j, LOG_INFO, "Job was implicitly reaped by the kernel.");
3672 }
3673 }
3674
3675 if (j->exit_timeout) {
3676 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3677 }
3678
3679 LIST_REMOVE(j, pid_hash_sle);
3680 if (!j->anonymous) {
3681 LIST_REMOVE(j, global_pid_hash_sle);
3682 }
3683
3684 if (j->sent_signal_time) {
3685 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3686
3687 td_sec = td / NSEC_PER_SEC;
3688 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3689
3690 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3691 }
3692
3693 int exit_status = WEXITSTATUS(j->last_exit_status);
3694 if (WIFEXITED(j->last_exit_status) && exit_status != 0) {
3695 if (!j->did_exec && _launchd_support_system) {
3696 xpc_object_t event = NULL;
3697 switch (exit_status) {
3698 case ENOENT:
3699 case ENOTDIR:
3700 case ESRCH:
3701 job_log(j, LOG_NOTICE, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status, strerror(exit_status));
3702 event = xpc_dictionary_create(NULL, NULL, 0);
3703 xpc_dictionary_set_string(event, "Executable", j->prog ? j->prog : j->argv[0]);
3704 if (j->mach_uid) {
3705 xpc_dictionary_set_uint64(event, "UID", j->mach_uid);
3706 } else if (j->username) {
3707 xpc_dictionary_set_string(event, "UserName", j->username);
3708 }
3709
3710 if (j->groupname) {
3711 xpc_dictionary_set_string(event, "GroupName", j->groupname);
3712 }
3713
3714 (void)externalevent_new(j, _launchd_support_system, j->label, event, 0);
3715 xpc_release(event);
3716
3717 j->waiting4ok = true;
3718 default:
3719 job_log(j, LOG_NOTICE, "Job failed to exec(3) for weird reason: %d", exit_status);
3720 }
3721 } else {
3722 int level = LOG_INFO;
3723 if (exit_status != 0) {
3724 level = LOG_ERR;
3725 }
3726
3727 job_log(j, level, "Exited with code: %d", exit_status);
3728 }
3729 }
3730
3731 if (WIFSIGNALED(j->last_exit_status)) {
3732 int s = WTERMSIG(j->last_exit_status);
3733 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3734 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3735 } else if (!(j->stopped || j->clean_kill || j->jettisoned)) {
3736 switch (s) {
3737 // Signals which indicate a crash.
3738 case SIGILL:
3739 case SIGABRT:
3740 case SIGFPE:
3741 case SIGBUS:
3742 case SIGSEGV:
3743 case SIGSYS:
3744 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3745 * SIGTRAP, assume that it's a crash.
3746 */
3747 case SIGTRAP:
3748 j->crashed = true;
3749 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3750 break;
3751 default:
3752 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3753 break;
3754 }
3755
3756 if (is_system_bootstrapper && j->crashed) {
3757 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3758 }
3759 }
3760 }
3761
3762 j->reaped = true;
3763
3764 struct machservice *msi = NULL;
3765 if (j->crashed || !(j->did_exec || j->anonymous)) {
3766 SLIST_FOREACH(msi, &j->machservices, sle) {
3767 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3768 machservice_drain_port(msi);
3769 }
3770
3771 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3772 machservice_resetport(j, msi);
3773 }
3774 }
3775 }
3776
3777 /* HACK: Essentially duplicating the logic directly above. But this has
3778 * gotten really hairy, and I don't want to try consolidating it right now.
3779 */
3780 if (j->xpc_service && !j->xpcproxy_did_exec) {
3781 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3782 SLIST_FOREACH(msi, &j->machservices, sle) {
3783 /* Drain the messages but do not reset the port. If xpcproxy could
3784 * not exec(3), then we don't want to continue trying, since there
3785 * is very likely a serious configuration error with the service.
3786 *
3787 * The above comment is weird. I originally said we should drain
3788 * messages but not reset the port, but that's exactly what we do
3789 * below, and I'm not sure which is the mistake, the comment or the
3790 * actual behavior.
3791 *
3792 * Since it's always been this way, I'll assume that the comment is
3793 * incorrect, but I'll leave it in place just to remind myself to
3794 * actually look into it at some point.
3795 *
3796 * <rdar://problem/8986802>
3797 */
3798 if (msi->upfront && job_assumes(j, !msi->isActive)) {
3799 machservice_resetport(j, msi);
3800 }
3801 }
3802 }
3803
3804 struct suspended_peruser *spi = NULL;
3805 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3806 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3807 spi->j->peruser_suspend_count--;
3808 if (spi->j->peruser_suspend_count == 0) {
3809 job_dispatch(spi->j, false);
3810 }
3811 LIST_REMOVE(spi, sle);
3812 free(spi);
3813 }
3814
3815 if (j->exit_status_dest) {
3816 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3817 if (errno && errno != MACH_SEND_INVALID_DEST) {
3818 (void)job_assumes_zero(j, errno);
3819 }
3820
3821 j->exit_status_dest = MACH_PORT_NULL;
3822 }
3823
3824 if (j->spawn_reply_port) {
3825 /* If the child never called exec(3), we must send a spawn() reply so
3826 * that the requestor can get exit status from it. If we fail to send
3827 * the reply for some reason, we have to deallocate the exit status port
3828 * ourselves.
3829 */
3830 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3831 if (kr) {
3832 if (kr != MACH_SEND_INVALID_DEST) {
3833 (void)job_assumes_zero(j, kr);
3834 }
3835
3836 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
3837 }
3838
3839 j->exit_status_port = MACH_PORT_NULL;
3840 j->spawn_reply_port = MACH_PORT_NULL;
3841 }
3842
3843 if (j->anonymous) {
3844 total_anon_children--;
3845 if (j->holds_ref) {
3846 job_log(j, LOG_PERF, "Anonymous job exited holding reference.");
3847 runtime_del_ref();
3848 }
3849 } else {
3850 job_log(j, LOG_PERF, "Job exited.");
3851 runtime_del_ref();
3852 total_children--;
3853 }
3854
3855 if (j->has_console) {
3856 launchd_wsp = 0;
3857 }
3858
3859 if (j->shutdown_monitor) {
3860 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3861 _launchd_shutdown_monitor = NULL;
3862 j->shutdown_monitor = false;
3863 }
3864
3865 if (!j->anonymous) {
3866 j->mgr->normal_active_cnt--;
3867 }
3868 j->sent_signal_time = 0;
3869 j->sent_sigkill = false;
3870 j->clean_kill = false;
3871 j->event_monitor_ready2signal = false;
3872 j->p = 0;
3873 j->uniqueid = 0;
3874 }
3875
3876 void
3877 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3878 {
3879 jobmgr_t jmi, jmn;
3880 job_t ji, jn;
3881
3882 if (jm->shutting_down) {
3883 return;
3884 }
3885
3886 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3887 jobmgr_dispatch_all(jmi, newmounthack);
3888 }
3889
3890 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3891 if (newmounthack && ji->start_on_mount) {
3892 ji->start_pending = true;
3893 }
3894
3895 job_dispatch(ji, false);
3896 }
3897 }
3898
3899 void
3900 job_dispatch_curious_jobs(job_t j)
3901 {
3902 job_t ji = NULL, jt = NULL;
3903 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3904 struct semaphoreitem *si = NULL;
3905 SLIST_FOREACH(si, &ji->semaphores, sle) {
3906 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3907 continue;
3908 }
3909
3910 if (strcmp(si->what, j->label) == 0) {
3911 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3912
3913 if (!ji->removing) {
3914 job_dispatch(ji, false);
3915 } else {
3916 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3917 }
3918
3919 /* ji could be removed here, so don't do anything with it or its semaphores
3920 * after this point.
3921 */
3922 break;
3923 }
3924 }
3925 }
3926 }
3927
3928 job_t
3929 job_dispatch(job_t j, bool kickstart)
3930 {
3931 // Don't dispatch a job if it has no audit session set.
3932 if (!uuid_is_null(j->expected_audit_uuid)) {
3933 job_log(j, LOG_DEBUG, "Job is still awaiting its audit session UUID. Not dispatching.");
3934 return NULL;
3935 }
3936 if (j->alias) {
3937 job_log(j, LOG_DEBUG, "Job is an alias. Not dispatching.");
3938 return NULL;
3939 }
3940
3941 if (j->waiting4ok) {
3942 job_log(j, LOG_DEBUG, "Job cannot exec(3). Not dispatching.");
3943 return NULL;
3944 }
3945
3946 #if TARGET_OS_EMBEDDED
3947 if (launchd_embedded_handofgod && _launchd_embedded_god) {
3948 if (!job_assumes(j, _launchd_embedded_god->username != NULL && j->username != NULL)) {
3949 errno = EPERM;
3950 return NULL;
3951 }
3952
3953 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
3954 errno = EPERM;
3955 return NULL;
3956 }
3957 } else if (launchd_embedded_handofgod) {
3958 errno = EINVAL;
3959 return NULL;
3960 }
3961 #endif
3962
3963 /*
3964 * The whole job removal logic needs to be consolidated. The fact that
3965 * a job can be removed from just about anywhere makes it easy to have
3966 * stale pointers left behind somewhere on the stack that might get
3967 * used after the deallocation. In particular, during job iteration.
3968 *
3969 * This is a classic example. The act of dispatching a job may delete it.
3970 */
3971 if (!job_active(j)) {
3972 if (job_useless(j)) {
3973 job_log(j, LOG_DEBUG, "Job is useless. Removing.");
3974 job_remove(j);
3975 return NULL;
3976 }
3977 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3978 job_log(j, LOG_DEBUG, "Per-user launchd is suspended. Not dispatching.");
3979 return NULL;
3980 }
3981
3982 if (kickstart || job_keepalive(j)) {
3983 job_log(j, LOG_DEBUG, "%starting job", kickstart ? "Kicks" : "S");
3984 job_start(j);
3985 } else {
3986 job_log(j, LOG_DEBUG, "Watching job.");
3987 job_watch(j);
3988 }
3989 } else {
3990 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job: %s.", job_active(j));
3991 }
3992
3993 return j;
3994 }
3995
3996 void
3997 job_kill(job_t j)
3998 {
3999 if (unlikely(!j->p || j->anonymous)) {
4000 return;
4001 }
4002
4003 (void)job_assumes_zero_p(j, kill2(j->p, SIGKILL));
4004
4005 j->sent_sigkill = true;
4006 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j));
4007
4008 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
4009 }
4010
4011 void
4012 job_open_shutdown_transaction(job_t j)
4013 {
4014 int rv = proc_set_dirty(j->p, true);
4015 if (rv != 0) {
4016 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
4017 j->dirty_at_shutdown = false;
4018 }
4019 }
4020
4021 void
4022 job_close_shutdown_transaction(job_t j)
4023 {
4024 if (j->dirty_at_shutdown) {
4025 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
4026 (void)job_assumes_zero(j, proc_set_dirty(j->p, false));
4027 j->dirty_at_shutdown = false;
4028 }
4029 }
4030
4031 void
4032 job_log_children_without_exec(job_t j)
4033 {
4034 pid_t *pids = NULL;
4035 size_t len = sizeof(pid_t) * get_kern_max_proc();
4036 int i = 0, kp_cnt = 0;
4037
4038 if (!launchd_apple_internal || j->anonymous || j->per_user) {
4039 return;
4040 }
4041
4042 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
4043 return;
4044 }
4045 if (job_assumes_zero_p(j, (kp_cnt = proc_listchildpids(j->p, pids, len))) == -1) {
4046 goto out;
4047 }
4048
4049 for (i = 0; i < kp_cnt; i++) {
4050 struct proc_bsdshortinfo proc;
4051 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4052 if (errno != ESRCH) {
4053 (void)job_assumes_zero(j, errno);
4054 }
4055 continue;
4056 }
4057 if (proc.pbsi_flags & P_EXEC) {
4058 continue;
4059 }
4060
4061 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
4062 }
4063
4064 out:
4065 free(pids);
4066 }
4067
4068 void
4069 job_callback_proc(job_t j, struct kevent *kev)
4070 {
4071 bool program_changed = false;
4072 int fflags = kev->fflags;
4073
4074 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
4075 log_kevent_struct(LOG_DEBUG, kev, 0);
4076
4077 if (fflags & NOTE_EXEC) {
4078 program_changed = true;
4079
4080 if (j->anonymous) {
4081 struct proc_bsdshortinfo proc;
4082 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
4083 char newlabel[1000];
4084
4085 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
4086
4087 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
4088
4089 LIST_REMOVE(j, label_hash_sle);
4090 strcpy((char *)j->label, newlabel);
4091
4092 jobmgr_t where2put = root_jobmgr;
4093 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
4094 where2put = j->mgr;
4095 }
4096 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
4097 } else if (errno != ESRCH) {
4098 (void)job_assumes_zero(j, errno);
4099 }
4100 } else {
4101 if (j->spawn_reply_port) {
4102 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
4103 if (errno) {
4104 if (errno != MACH_SEND_INVALID_DEST) {
4105 (void)job_assumes_zero(j, errno);
4106 }
4107 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
4108 }
4109
4110 j->spawn_reply_port = MACH_PORT_NULL;
4111 j->exit_status_port = MACH_PORT_NULL;
4112 }
4113
4114 if (j->xpc_service && j->did_exec) {
4115 j->xpcproxy_did_exec = true;
4116 }
4117
4118 j->did_exec = true;
4119 job_log(j, LOG_DEBUG, "Program changed");
4120 }
4121 }
4122
4123 if (fflags & NOTE_FORK) {
4124 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
4125 job_log_children_without_exec(j);
4126 }
4127
4128 if (fflags & NOTE_EXIT) {
4129 if (kev->data & NOTE_EXIT_DECRYPTFAIL) {
4130 j->fpfail = true;
4131 job_log(j, LOG_WARNING, "FairPlay decryption failed on binary for job.");
4132 } else if (kev->data & NOTE_EXIT_MEMORY) {
4133 j->jettisoned = true;
4134 job_log(j, LOG_INFO, "Job was killed due to memory pressure.");
4135 }
4136
4137 job_reap(j);
4138
4139 if (j->anonymous) {
4140 job_remove(j);
4141 j = NULL;
4142 } else {
4143 struct waiting4attach *w4ai = NULL;
4144 struct waiting4attach *w4ait = NULL;
4145 LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
4146 if (w4ai->dest == (pid_t)kev->ident) {
4147 waiting4attach_delete(j->mgr, w4ai);
4148 }
4149 }
4150
4151 (void)job_dispatch(j, false);
4152 }
4153 }
4154 }
4155
4156 void
4157 job_callback_timer(job_t j, void *ident)
4158 {
4159 if (j == ident) {
4160 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
4161 job_dispatch(j, true);
4162 } else if (&j->semaphores == ident) {
4163 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
4164 job_dispatch(j, false);
4165 } else if (&j->start_interval == ident) {
4166 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
4167 j->start_pending = true;
4168 job_dispatch(j, false);
4169 } else if (&j->exit_timeout == ident) {
4170 if (!job_assumes(j, j->p != 0)) {
4171 return;
4172 }
4173
4174 if (j->sent_sigkill) {
4175 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
4176
4177 td /= NSEC_PER_SEC;
4178 td -= j->clean_kill ? 0 : j->exit_timeout;
4179
4180 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
4181 j->workaround9359725 = true;
4182
4183 // This basically has to be done off the main thread. We have no
4184 // mechanism for draining the main queue in our run loop (like CF
4185 // does), and the kevent mechanism wants an object to be associated
4186 // as the callback. So we just create a dispatch source and reap the
4187 // errant PID whenever we can. Note that it is not safe for us to do
4188 // any logging in this block, since logging requires exclusive
4189 // access to global data structures that is only protected by the
4190 // main thread.
4191 dispatch_source_t hack_13570156 = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, j->p, DISPATCH_PROC_EXIT, dispatch_get_global_queue(0, 0));
4192 dispatch_source_set_event_handler(hack_13570156, ^{
4193 pid_t pid = (pid_t)dispatch_source_get_handle(hack_13570156);
4194
4195 int status = 0;
4196 (void)waitpid(pid, &status, 0);
4197 dispatch_release(hack_13570156);
4198 });
4199
4200 dispatch_resume(hack_13570156);
4201
4202 if (launchd_trap_sigkill_bugs) {
4203 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
4204 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
4205 }
4206
4207 struct kevent bogus_exit;
4208 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
4209 jobmgr_callback(j->mgr, &bogus_exit);
4210 } else {
4211 if (unlikely(j->debug_before_kill)) {
4212 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
4213 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
4214 }
4215
4216 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
4217 job_kill(j);
4218 }
4219 } else {
4220 job_log(j, LOG_ERR, "Unrecognized job timer callback: %p", ident);
4221 }
4222 }
4223
4224 void
4225 job_callback_read(job_t j, int ident)
4226 {
4227 if (ident == j->stdin_fd) {
4228 job_dispatch(j, true);
4229 } else {
4230 socketgroup_callback(j);
4231 }
4232 }
4233
4234 void
4235 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
4236 {
4237 jobmgr_t jmi;
4238 job_t j;
4239
4240 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
4241 jobmgr_reap_bulk(jmi, kev);
4242 }
4243
4244 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
4245 kev->udata = j;
4246 job_callback(j, kev);
4247 }
4248 }
4249
4250 void
4251 jobmgr_callback(void *obj, struct kevent *kev)
4252 {
4253 jobmgr_t jm = obj;
4254
4255 #if TARGET_OS_EMBEDDED
4256 int flag2check = VQ_MOUNT;
4257 #else
4258 int flag2check = VQ_UPDATE;
4259 #endif
4260
4261 switch (kev->filter) {
4262 case EVFILT_PROC:
4263 jobmgr_reap_bulk(jm, kev);
4264 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
4265 break;
4266 case EVFILT_SIGNAL:
4267 switch (kev->ident) {
4268 case SIGTERM:
4269 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
4270 return launchd_shutdown();
4271 case SIGUSR1:
4272 return calendarinterval_callback();
4273 case SIGUSR2:
4274 // Turn on all logging.
4275 launchd_log_perf = true;
4276 launchd_log_debug = true;
4277 launchd_log_shutdown = true;
4278 /* Hopefully /var is available by this point. If not, uh, oh well.
4279 * It's just a debugging facility.
4280 */
4281 return jobmgr_log_perf_statistics(jm, false);
4282 case SIGINFO:
4283 return jobmgr_log_perf_statistics(jm, true);
4284 default:
4285 jobmgr_log(jm, LOG_ERR, "Unrecognized signal: %lu: %s", kev->ident, strsignal(kev->ident));
4286 }
4287 break;
4288 case EVFILT_FS:
4289 if (kev->fflags & flag2check) {
4290 if (!launchd_var_available) {
4291 struct stat sb;
4292 if (stat("/var/log", &sb) == 0 && (sb.st_mode & S_IWUSR)) {
4293 launchd_var_available = true;
4294 }
4295 }
4296 } else if (kev->fflags & VQ_MOUNT) {
4297 jobmgr_dispatch_all(jm, true);
4298 }
4299 jobmgr_dispatch_all_semaphores(jm);
4300 break;
4301 case EVFILT_TIMER:
4302 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
4303 calendarinterval_callback();
4304 } else if (kev->ident == (uintptr_t)jm) {
4305 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4306 jobmgr_still_alive_with_check(jm);
4307 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4308 jobmgr_do_garbage_collection(jm);
4309 } else if (kev->ident == (uintptr_t)&launchd_runtime_busy_time) {
4310 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4311 if (jobmgr_assumes_zero(jm, runtime_busy_cnt) == 0) {
4312 return launchd_shutdown();
4313 }
4314 #if HAVE_SYSTEMSTATS
4315 } else if (kev->ident == (uintptr_t)systemstats_timer_callback) {
4316 systemstats_timer_callback();
4317 #endif
4318 }
4319 break;
4320 case EVFILT_VNODE:
4321 if (kev->ident == (uintptr_t)s_no_hang_fd) {
4322 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4323 if (unlikely(_no_hang_fd != -1)) {
4324 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4325 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL));
4326 (void)jobmgr_assumes_zero_p(root_jobmgr, runtime_close(s_no_hang_fd));
4327 s_no_hang_fd = _fd(_no_hang_fd);
4328 }
4329 } else if (pid1_magic && launchd_console && kev->ident == (uintptr_t)fileno(launchd_console)) {
4330 int cfd = -1;
4331 if (jobmgr_assumes_zero_p(jm, cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) {
4332 _fd(cfd);
4333 if (!(launchd_console = fdopen(cfd, "w"))) {
4334 (void)jobmgr_assumes_zero(jm, errno);
4335 (void)close(cfd);
4336 }
4337 }
4338 }
4339 break;
4340 default:
4341 jobmgr_log(jm, LOG_ERR, "Unrecognized kevent filter: %hd", kev->filter);
4342 }
4343 }
4344
4345 void
4346 job_callback(void *obj, struct kevent *kev)
4347 {
4348 job_t j = obj;
4349
4350 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4351
4352 switch (kev->filter) {
4353 case EVFILT_PROC:
4354 return job_callback_proc(j, kev);
4355 case EVFILT_TIMER:
4356 return job_callback_timer(j, (void *) kev->ident);
4357 case EVFILT_READ:
4358 return job_callback_read(j, (int) kev->ident);
4359 case EVFILT_MACHPORT:
4360 return (void)job_dispatch(j, true);
4361 default:
4362 job_log(j, LOG_ERR, "Unrecognized job callback filter: %hd", kev->filter);
4363 }
4364 }
4365
4366 void
4367 job_start(job_t j)
4368 {
4369 uint64_t td;
4370 int spair[2];
4371 int execspair[2];
4372 char nbuf[64];
4373 pid_t c;
4374 bool sipc = false;
4375 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXIT_DETAIL|NOTE_EXITSTATUS;
4376
4377 if (!job_assumes(j, j->mgr != NULL)) {
4378 return;
4379 }
4380
4381 if (unlikely(job_active(j))) {
4382 job_log(j, LOG_DEBUG, "Already started");
4383 return;
4384 }
4385
4386 if (!LIST_EMPTY(&j->mgr->attaches)) {
4387 job_log(j, LOG_DEBUG, "Looking for attachments for job: %s", j->label);
4388 (void)waiting4attach_find(j->mgr, j);
4389 }
4390
4391 /*
4392 * Some users adjust the wall-clock and then expect software to not notice.
4393 * Therefore, launchd must use an absolute clock instead of the wall clock
4394 * wherever possible.
4395 */
4396 td = runtime_get_nanoseconds_since(j->start_time);
4397 td /= NSEC_PER_SEC;
4398
4399 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat && !j->unthrottle) {
4400 time_t respawn_delta = j->min_run_time - (uint32_t)td;
4401 /* We technically should ref-count throttled jobs to prevent idle exit,
4402 * but we're not directly tracking the 'throttled' state at the moment.
4403 */
4404 job_log(j, LOG_NOTICE, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4405 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j));
4406 job_ignore(j);
4407 return;
4408 }
4409
4410 if (likely(!j->legacy_mach_job)) {
4411 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_god;
4412 }
4413
4414 if (sipc) {
4415 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair));
4416 }
4417
4418 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair));
4419
4420 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4421 case -1:
4422 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4423 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j));
4424 job_ignore(j);
4425
4426 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4427 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4428 if (sipc) {
4429 (void)job_assumes_zero(j, runtime_close(spair[0]));
4430 (void)job_assumes_zero(j, runtime_close(spair[1]));
4431 }
4432 break;
4433 case 0:
4434 if (unlikely(_vproc_post_fork_ping())) {
4435 _exit(EXIT_FAILURE);
4436 }
4437
4438 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4439 // wait for our parent to say they've attached a kevent to us
4440 read(_fd(execspair[1]), &c, sizeof(c));
4441
4442 if (sipc) {
4443 (void)job_assumes_zero(j, runtime_close(spair[0]));
4444 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4445 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4446 }
4447 job_start_child(j);
4448 break;
4449 default:
4450 j->start_time = runtime_get_opaque_time();
4451
4452 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4453
4454 j->did_exec = false;
4455 j->fpfail = false;
4456 j->jettisoned = false;
4457 j->xpcproxy_did_exec = false;
4458 j->checkedin = false;
4459 j->start_pending = false;
4460 j->reaped = false;
4461 j->crashed = false;
4462 j->stopped = false;
4463 j->workaround9359725 = false;
4464 j->implicit_reap = false;
4465 j->unthrottle = false;
4466 if (j->needs_kickoff) {
4467 j->needs_kickoff = false;
4468
4469 if (SLIST_EMPTY(&j->semaphores)) {
4470 j->ondemand = false;
4471 }
4472 }
4473
4474 if (j->has_console) {
4475 launchd_wsp = c;
4476 }
4477
4478 job_log(j, LOG_PERF, "Job started.");
4479 runtime_add_ref();
4480 total_children++;
4481 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4482 LIST_INSERT_HEAD(&managed_actives[ACTIVE_JOB_HASH(c)], j, global_pid_hash_sle);
4483 j->p = c;
4484
4485 struct proc_uniqidentifierinfo info;
4486 if (proc_pidinfo(c, PROC_PIDUNIQIDENTIFIERINFO, 0, &info, PROC_PIDUNIQIDENTIFIERINFO_SIZE) != 0) {
4487 // ignore errors here, kevent_mod below will catch them and clean up
4488 j->uniqueid = info.p_uniqueid;
4489 }
4490
4491 j->mgr->normal_active_cnt++;
4492 j->fork_fd = _fd(execspair[0]);
4493 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4494 if (sipc) {
4495 (void)job_assumes_zero(j, runtime_close(spair[1]));
4496 ipc_open(_fd(spair[0]), j);
4497 }
4498 if (kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1) {
4499 job_ignore(j);
4500 } else {
4501 if (errno == ESRCH) {
4502 job_log(j, LOG_ERR, "Child was killed before we could attach a kevent.");
4503 } else {
4504 (void)job_assumes(j, errno == ESRCH);
4505 }
4506 job_reap(j);
4507
4508 /* If we have reaped this job within this same run loop pass, then
4509 * it will be currently ignored. So if there's a failure to attach a
4510 * kevent, we need to make sure that we watch the job so that we can
4511 * respawn it.
4512 *
4513 * See <rdar://problem/10140809>.
4514 */
4515 job_watch(j);
4516 }
4517
4518 #if HAVE_SYSTEMSTATS
4519 if (systemstats_is_enabled()) {
4520 /* We don't really *need* to make the full rusage call -- it
4521 * will be mostly 0s and very small numbers. We only need
4522 * ri_proc_start_abstime, because that's how we disambiguiate
4523 * PIDs when they wrap around; and the UUID.
4524 * In the future we should use the 64-bit process unique ID,
4525 * so there's nothing to disambiguiate, and skip the full
4526 * rusage call here.
4527 *
4528 * Well, the future is now.
4529 */
4530 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_LAUNCHD_SHOULD_LOG_JOB_START)) {
4531 job_log_perf_statistics(j, NULL, -3);
4532 }
4533 }
4534 #endif
4535 j->wait4debugger_oneshot = false;
4536 if (likely(!j->stall_before_exec)) {
4537 job_uncork_fork(j);
4538 }
4539 break;
4540 }
4541 }
4542
4543 void
4544 job_start_child(job_t j)
4545 {
4546 typeof(posix_spawn) *psf;
4547 const char *file2exec = "/usr/libexec/launchproxy";
4548 const char **argv;
4549 posix_spawnattr_t spattr;
4550 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4551 glob_t g;
4552 short spflags = POSIX_SPAWN_SETEXEC;
4553 int psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
4554 size_t binpref_out_cnt = 0;
4555 size_t i;
4556
4557 (void)job_assumes_zero(j, posix_spawnattr_init(&spattr));
4558
4559 job_setup_attributes(j);
4560
4561 bool use_xpcproxy = false;
4562 struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
4563 if (w4a) {
4564 (void)setenv(XPC_SERVICE_ENV_ATTACHED, "1", 1);
4565 if (!j->xpc_service) {
4566 use_xpcproxy = true;
4567 }
4568 }
4569
4570 if (use_xpcproxy) {
4571 argv = alloca(3 * sizeof(char *));
4572 argv[0] = "/usr/libexec/xpcproxy";
4573 argv[1] = "-debug";
4574 argv[2] = NULL;
4575
4576 file2exec = argv[0];
4577 } else if (unlikely(j->argv && j->globargv)) {
4578 g.gl_offs = 1;
4579 for (i = 0; i < j->argc; i++) {
4580 if (i > 0) {
4581 gflags |= GLOB_APPEND;
4582 }
4583 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4584 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4585 exit(EXIT_FAILURE);
4586 }
4587 }
4588 g.gl_pathv[0] = (char *)file2exec;
4589 argv = (const char **)g.gl_pathv;
4590 } else if (likely(j->argv)) {
4591 argv = alloca((j->argc + 2) * sizeof(char *));
4592 argv[0] = file2exec;
4593 for (i = 0; i < j->argc; i++) {
4594 argv[i + 1] = j->argv[i];
4595 }
4596 argv[i + 1] = NULL;
4597 } else {
4598 argv = alloca(3 * sizeof(char *));
4599 argv[0] = file2exec;
4600 argv[1] = j->prog;
4601 argv[2] = NULL;
4602 }
4603
4604 if (likely(!(j->inetcompat || use_xpcproxy))) {
4605 argv++;
4606 }
4607
4608 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4609 if (!j->app) {
4610 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4611 }
4612 spflags |= POSIX_SPAWN_START_SUSPENDED;
4613 }
4614
4615 #if !TARGET_OS_EMBEDDED
4616 if (unlikely(j->disable_aslr)) {
4617 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4618 }
4619 #endif
4620 spflags |= j->pstype;
4621
4622 (void)job_assumes_zero(j, posix_spawnattr_setflags(&spattr, spflags));
4623 if (unlikely(j->j_binpref_cnt)) {
4624 (void)job_assumes_zero(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt));
4625 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4626 }
4627
4628 psproctype = j->psproctype;
4629 (void)job_assumes_zero(j, posix_spawnattr_setprocesstype_np(&spattr, psproctype));
4630
4631 #if TARGET_OS_EMBEDDED
4632 /* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4633 * against a race which arises if, during spawn, an initial jetsam property
4634 * update occurs before the values below are applied. In this case, the flag
4635 * ensures that the subsequent change is ignored; the explicit update should
4636 * be given priority.
4637 */
4638 (void)job_assumes_zero(j, posix_spawnattr_setjetsam(&spattr,
4639 POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY | (j->jetsam_memory_limit_background ? POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND : 0),
4640 j->jetsam_priority, j->jetsam_memlimit));
4641 #endif
4642
4643 mach_port_array_t sports = NULL;
4644 mach_msg_type_number_t sports_cnt = 0;
4645 kern_return_t kr = vproc_mig_get_listener_port_rights(bootstrap_port, &sports, &sports_cnt);
4646 if (kr == 0 && sports_cnt) {
4647 /* For some reason, this SPI takes a count as a signed quantity. */
4648 (void)posix_spawnattr_set_importancewatch_port_np(&spattr, (int)sports_cnt, sports);
4649
4650 /* All "count" parameters in MIG are counts of the array. So an array of
4651 * mach_port_t containing 10 elements will have a count of ten, but it
4652 * will occupy 40 bytes. So we must do the multiplication here to pass
4653 * the correct size.
4654 *
4655 * Note that we do NOT release the send rights. We need them to be valid
4656 * at the time they are passed to posix_spawn(2). When we exec(3) using
4657 * posix_spawn(2), they'll be cleaned up anyway.
4658 */
4659 mig_deallocate((vm_address_t)sports, sports_cnt * sizeof(sports[0]));
4660 } else if (kr != BOOTSTRAP_UNKNOWN_SERVICE) {
4661 (void)job_assumes_zero(j, kr);
4662 }
4663
4664 #if TARGET_OS_EMBEDDED
4665 if (!j->app || j->system_app) {
4666 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
4667 }
4668 #else
4669 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
4670 #endif
4671
4672 #if !TARGET_OS_EMBEDDED
4673 struct task_qos_policy qosinfo = {
4674 .task_latency_qos_tier = LATENCY_QOS_LAUNCH_DEFAULT_TIER,
4675 .task_throughput_qos_tier = THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER,
4676 };
4677
4678 kr = task_policy_set(mach_task_self(), TASK_BASE_QOS_POLICY, (task_policy_t)&qosinfo, TASK_QOS_POLICY_COUNT);
4679 (void)job_assumes_zero_p(j, kr);
4680 #endif
4681
4682 #if HAVE_RESPONSIBILITY
4683 /* Specify which process is responsible for the new job. Per-app XPC
4684 * services are the responsibility of the app. Other processes are
4685 * responsible for themselves. This decision is final and also applies
4686 * to the process's children, so don't initialize responsibility when
4687 * starting a per-user launchd.
4688 */
4689 if (j->mgr->req_pid) {
4690 responsibility_init2(j->mgr->req_pid, NULL);
4691 } else if (!j->per_user) {
4692 responsibility_init2(getpid(), j->prog ? j->prog : j->argv[0]);
4693 }
4694 #endif
4695
4696 #if HAVE_QUARANTINE
4697 if (j->quarantine_data) {
4698 qtn_proc_t qp;
4699
4700 if (job_assumes(j, qp = qtn_proc_alloc())) {
4701 if (job_assumes_zero(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4702 (void)job_assumes_zero(j, qtn_proc_apply_to_self(qp));
4703 }
4704 }
4705 }
4706 #endif
4707
4708 #if HAVE_SANDBOX
4709 #if TARGET_OS_EMBEDDED
4710 struct sandbox_spawnattrs sbattrs;
4711 if (j->seatbelt_profile || j->container_identifier) {
4712 sandbox_spawnattrs_init(&sbattrs);
4713 if (j->seatbelt_profile) {
4714 sandbox_spawnattrs_setprofilename(&sbattrs, j->seatbelt_profile);
4715 }
4716 if (j->container_identifier) {
4717 sandbox_spawnattrs_setcontainer(&sbattrs, j->container_identifier);
4718 }
4719 (void)job_assumes_zero(j, posix_spawnattr_setmacpolicyinfo_np(&spattr, "Sandbox", &sbattrs, sizeof(sbattrs)));
4720 }
4721 #else
4722 if (j->seatbelt_profile) {
4723 char *seatbelt_err_buf = NULL;
4724
4725 if (job_assumes_zero_p(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf)) == -1) {
4726 if (seatbelt_err_buf) {
4727 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4728 }
4729 goto out_bad;
4730 }
4731 }
4732 #endif
4733 #endif
4734
4735 psf = j->prog ? posix_spawn : posix_spawnp;
4736
4737 if (likely(!(j->inetcompat || use_xpcproxy))) {
4738 file2exec = j->prog ? j->prog : argv[0];
4739 }
4740
4741 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4742
4743 #if HAVE_SANDBOX && !TARGET_OS_EMBEDDED
4744 out_bad:
4745 #endif
4746 _exit(errno);
4747 }
4748
4749 void
4750 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4751 {
4752 launch_data_t tmp;
4753 struct envitem *ei;
4754 job_t ji;
4755
4756 if (jm->parentmgr) {
4757 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4758 } else {
4759 char **tmpenviron = environ;
4760 for (; *tmpenviron; tmpenviron++) {
4761 char envkey[1024];
4762 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4763 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4764 strncpy(envkey, *tmpenviron, sizeof(envkey));
4765 *(strchr(envkey, '=')) = '\0';
4766 launch_data_dict_insert(dict, s, envkey);
4767 }
4768 }
4769
4770 LIST_FOREACH(ji, &jm->jobs, sle) {
4771 SLIST_FOREACH(ei, &ji->global_env, sle) {
4772 if ((tmp = launch_data_new_string(ei->value))) {
4773 launch_data_dict_insert(dict, tmp, ei->key);
4774 }
4775 }
4776 }
4777 }
4778
4779 void
4780 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4781 {
4782 struct envitem *ei;
4783 job_t ji;
4784
4785 if (jm->parentmgr) {
4786 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4787 }
4788
4789 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4790 SLIST_FOREACH(ei, &ji->global_env, sle) {
4791 setenv(ei->key, ei->value, 1);
4792 }
4793 }
4794 }
4795
4796 void
4797 job_log_pids_with_weird_uids(job_t j)
4798 {
4799 size_t len = sizeof(pid_t) * get_kern_max_proc();
4800 pid_t *pids = NULL;
4801 uid_t u = j->mach_uid;
4802 int i = 0, kp_cnt = 0;
4803
4804 if (!launchd_apple_internal) {
4805 return;
4806 }
4807
4808 pids = malloc(len);
4809 if (!job_assumes(j, pids != NULL)) {
4810 return;
4811 }
4812
4813 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4814
4815 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4816 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4817 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4818 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4819 * struct back in a single call for each one.
4820 *
4821 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4822 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4823 * libproc could go stale before we call proc_pidinfo().
4824 *
4825 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4826 * of bytes written to the buffer.
4827 */
4828 if (job_assumes_zero_p(j, (kp_cnt = proc_listallpids(pids, len))) == -1) {
4829 goto out;
4830 }
4831
4832 for (i = 0; i < kp_cnt; i++) {
4833 struct proc_bsdshortinfo proc;
4834 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4835 * detailed above.
4836 */
4837 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4838 if (errno != ESRCH) {
4839 (void)job_assumes_zero(j, errno);
4840 }
4841 continue;
4842 }
4843
4844 uid_t i_euid = proc.pbsi_uid;
4845 uid_t i_uid = proc.pbsi_ruid;
4846 uid_t i_svuid = proc.pbsi_svuid;
4847 pid_t i_pid = pids[i];
4848
4849 if (i_euid != u && i_uid != u && i_svuid != u) {
4850 continue;
4851 }
4852
4853 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4854
4855 // Temporarily disabled due to 5423935 and 4946119.
4856 #if 0
4857 // Ask the accountless process to exit.
4858 (void)job_assumes_zero_p(j, kill2(i_pid, SIGTERM));
4859 #endif
4860 }
4861
4862 out:
4863 free(pids);
4864 }
4865
4866 static struct passwd *
4867 job_getpwnam(job_t j, const char *name)
4868 {
4869 /*
4870 * methodology for system daemons
4871 *
4872 * first lookup user record without any opendirectoryd interaction,
4873 * we don't know what interprocess dependencies might be in flight.
4874 * if that fails, we re-enable opendirectoryd interaction and
4875 * re-issue the lookup. We have to disable the libinfo L1 cache
4876 * otherwise libinfo will return the negative cache entry on the retry
4877 */
4878 #if !TARGET_OS_EMBEDDED
4879 struct passwd *pw = NULL;
4880
4881 if (pid1_magic && j->mgr == root_jobmgr) {
4882 // 1 == SEARCH_MODULE_FLAG_DISABLED
4883 si_search_module_set_flags("ds", 1);
4884 gL1CacheEnabled = false;
4885
4886 pw = getpwnam(name);
4887 si_search_module_set_flags("ds", 0);
4888 }
4889
4890 if (pw == NULL) {
4891 pw = getpwnam(name);
4892 }
4893
4894 return pw;
4895 #else
4896 #pragma unused (j)
4897 return getpwnam(name);
4898 #endif
4899 }
4900
4901 static struct group *
4902 job_getgrnam(job_t j, const char *name)
4903 {
4904 #if !TARGET_OS_EMBEDDED
4905 struct group *gr = NULL;
4906
4907 if (pid1_magic && j->mgr == root_jobmgr) {
4908 si_search_module_set_flags("ds", 1);
4909 gL1CacheEnabled = false;
4910
4911 gr = getgrnam(name);
4912
4913 si_search_module_set_flags("ds", 0);
4914 }
4915
4916 if (gr == NULL) {
4917 gr = getgrnam(name);
4918 }
4919
4920 return gr;
4921 #else
4922 #pragma unused (j)
4923 return getgrnam(name);
4924 #endif
4925 }
4926
4927 void
4928 job_postfork_test_user(job_t j)
4929 {
4930 // This function is all about 5201578
4931
4932 const char *home_env_var = getenv("HOME");
4933 const char *user_env_var = getenv("USER");
4934 const char *logname_env_var = getenv("LOGNAME");
4935 uid_t tmp_uid, local_uid = getuid();
4936 gid_t tmp_gid, local_gid = getgid();
4937 char shellpath[PATH_MAX];
4938 char homedir[PATH_MAX];
4939 char loginname[2000];
4940 struct passwd *pwe;
4941
4942
4943 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4944 && strcmp(user_env_var, logname_env_var) == 0)) {
4945 goto out_bad;
4946 }
4947
4948 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
4949 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4950 goto out_bad;
4951 }
4952
4953 /*
4954 * We must copy the results of getpw*().
4955 *
4956 * Why? Because subsequent API calls may call getpw*() as a part of
4957 * their implementation. Since getpw*() returns a [now thread scoped]
4958 * global, we must therefore cache the results before continuing.
4959 */
4960
4961 tmp_uid = pwe->pw_uid;
4962 tmp_gid = pwe->pw_gid;
4963
4964 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4965 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4966 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4967
4968 if (strcmp(loginname, logname_env_var) != 0) {
4969 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4970 goto out_bad;
4971 }
4972 if (strcmp(homedir, home_env_var) != 0) {
4973 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4974 goto out_bad;
4975 }
4976 if (local_uid != tmp_uid) {
4977 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4978 'U', tmp_uid, local_uid);
4979 goto out_bad;
4980 }
4981 if (local_gid != tmp_gid) {
4982 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4983 'G', tmp_gid, local_gid);
4984 goto out_bad;
4985 }
4986
4987 return;
4988 out_bad:
4989 #if 0
4990 (void)job_assumes_zero_p(j, kill2(getppid(), SIGTERM));
4991 _exit(EXIT_FAILURE);
4992 #else
4993 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
4994 #endif
4995 }
4996
4997 void
4998 job_postfork_become_user(job_t j)
4999 {
5000 char loginname[2000];
5001 char tmpdirpath[PATH_MAX];
5002 char shellpath[PATH_MAX];
5003 char homedir[PATH_MAX];
5004 struct passwd *pwe;
5005 size_t r;
5006 gid_t desired_gid = -1;
5007 uid_t desired_uid = -1;
5008
5009 if (getuid() != 0) {
5010 return job_postfork_test_user(j);
5011 }
5012
5013 /*
5014 * I contend that having UID == 0 and GID != 0 is of dubious value.
5015 * Nevertheless, this used to work in Tiger. See: 5425348
5016 */
5017 if (j->groupname && !j->username) {
5018 j->username = "root";
5019 }
5020
5021 if (j->username) {
5022 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
5023 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
5024 _exit(ESRCH);
5025 }
5026 } else if (j->mach_uid) {
5027 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
5028 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
5029 job_log_pids_with_weird_uids(j);
5030 _exit(ESRCH);
5031 }
5032 } else {
5033 return;
5034 }
5035
5036 /*
5037 * We must copy the results of getpw*().
5038 *
5039 * Why? Because subsequent API calls may call getpw*() as a part of
5040 * their implementation. Since getpw*() returns a [now thread scoped]
5041 * global, we must therefore cache the results before continuing.
5042 */
5043
5044 desired_uid = pwe->pw_uid;
5045 desired_gid = pwe->pw_gid;
5046
5047 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
5048 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
5049 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
5050
5051 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
5052 job_log(j, LOG_ERR, "Expired account");
5053 _exit(EXIT_FAILURE);
5054 }
5055
5056
5057 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
5058 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
5059 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
5060 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
5061 }
5062
5063 if (j->groupname) {
5064 struct group *gre;
5065
5066 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
5067 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
5068 _exit(ESRCH);
5069 }
5070
5071 desired_gid = gre->gr_gid;
5072 }
5073
5074 if (job_assumes_zero_p(j, setlogin(loginname)) == -1) {
5075 _exit(EXIT_FAILURE);
5076 }
5077
5078 if (job_assumes_zero_p(j, setgid(desired_gid)) == -1) {
5079 _exit(EXIT_FAILURE);
5080 }
5081
5082 /*
5083 * The kernel team and the DirectoryServices team want initgroups()
5084 * called after setgid(). See 4616864 for more information.
5085 */
5086
5087 if (likely(!j->no_init_groups)) {
5088 #if 1
5089 if (job_assumes_zero_p(j, initgroups(loginname, desired_gid)) == -1) {
5090 _exit(EXIT_FAILURE);
5091 }
5092 #else
5093 /* Do our own little initgroups(). We do this to guarantee that we're
5094 * always opted into dynamic group resolution in the kernel. initgroups(3)
5095 * does not make this guarantee.
5096 */
5097 int groups[NGROUPS], ngroups;
5098
5099 // A failure here isn't fatal, and we'll still get data we can use.
5100 (void)job_assumes_zero_p(j, getgrouplist(j->username, desired_gid, groups, &ngroups));
5101
5102 if (job_assumes_zero_p(j, syscall(SYS_initgroups, ngroups, groups, desired_uid)) == -1) {
5103 _exit(EXIT_FAILURE);
5104 }
5105 #endif
5106 }
5107
5108 if (job_assumes_zero_p(j, setuid(desired_uid)) == -1) {
5109 _exit(EXIT_FAILURE);
5110 }
5111
5112 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
5113
5114 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
5115 setenv("TMPDIR", tmpdirpath, 0);
5116 }
5117
5118 setenv("SHELL", shellpath, 0);
5119 setenv("HOME", homedir, 0);
5120 setenv("USER", loginname, 0);
5121 setenv("LOGNAME", loginname, 0);
5122 }
5123
5124 void
5125 job_setup_attributes(job_t j)
5126 {
5127 struct limititem *li;
5128 struct envitem *ei;
5129
5130 if (unlikely(j->setnice)) {
5131 (void)job_assumes_zero_p(j, setpriority(PRIO_PROCESS, 0, j->nice));
5132 }
5133
5134 SLIST_FOREACH(li, &j->limits, sle) {
5135 struct rlimit rl;
5136
5137 if (job_assumes_zero_p(j, getrlimit(li->which, &rl) == -1)) {
5138 continue;
5139 }
5140
5141 if (li->sethard) {
5142 rl.rlim_max = li->lim.rlim_max;
5143 }
5144 if (li->setsoft) {
5145 rl.rlim_cur = li->lim.rlim_cur;
5146 }
5147
5148 if (setrlimit(li->which, &rl) == -1) {
5149 job_log_error(j, LOG_WARNING, "setrlimit()");
5150 }
5151 }
5152
5153 if (unlikely(!j->inetcompat && j->session_create)) {
5154 launchd_SessionCreate();
5155 }
5156
5157 if (unlikely(j->low_pri_io)) {
5158 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE));
5159 }
5160 if (j->low_priority_background_io) {
5161 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_DARWIN_BG, IOPOL_THROTTLE));
5162 }
5163 if (unlikely(j->rootdir)) {
5164 (void)job_assumes_zero_p(j, chroot(j->rootdir));
5165 (void)job_assumes_zero_p(j, chdir("."));
5166 }
5167
5168 job_postfork_become_user(j);
5169
5170 if (unlikely(j->workingdir)) {
5171 if (chdir(j->workingdir) == -1) {
5172 if (errno == ENOENT || errno == ENOTDIR) {
5173 job_log(j, LOG_ERR, "Job specified non-existent working directory: %s", j->workingdir);
5174 } else {
5175 (void)job_assumes_zero(j, errno);
5176 }
5177 }
5178 }
5179
5180 if (unlikely(j->setmask)) {
5181 umask(j->mask);
5182 }
5183
5184 if (j->stdin_fd) {
5185 (void)job_assumes_zero_p(j, dup2(j->stdin_fd, STDIN_FILENO));
5186 } else {
5187 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
5188 }
5189 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
5190 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
5191
5192 jobmgr_setup_env_from_other_jobs(j->mgr);
5193
5194 SLIST_FOREACH(ei, &j->env, sle) {
5195 setenv(ei->key, ei->value, 1);
5196 }
5197
5198 #if !TARGET_OS_EMBEDDED
5199 if (j->jetsam_properties) {
5200 (void)job_assumes_zero(j, proc_setpcontrol(PROC_SETPC_TERMINATE));
5201 }
5202 #endif
5203
5204 #if TARGET_OS_EMBEDDED
5205 if (j->main_thread_priority != 0) {
5206 struct sched_param params;
5207 bzero(&params, sizeof(params));
5208 params.sched_priority = j->main_thread_priority;
5209 (void)job_assumes_zero_p(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params));
5210 }
5211 #endif
5212
5213 /*
5214 * We'd like to call setsid() unconditionally, but we have reason to
5215 * believe that prevents launchd from being able to send signals to
5216 * setuid children. We'll settle for process-groups.
5217 */
5218 if (getppid() != 1) {
5219 (void)job_assumes_zero_p(j, setpgid(0, 0));
5220 } else {
5221 (void)job_assumes_zero_p(j, setsid());
5222 }
5223 }
5224
5225 void
5226 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
5227 {
5228 int fd;
5229
5230 if (!path) {
5231 return;
5232 }
5233
5234 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
5235 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
5236 return;
5237 }
5238
5239 (void)job_assumes_zero_p(j, dup2(fd, target_fd));
5240 (void)job_assumes_zero(j, runtime_close(fd));
5241 }
5242
5243 void
5244 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
5245 {
5246 struct calendarinterval *ci_iter, *ci_prev = NULL;
5247 time_t later, head_later;
5248
5249 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
5250
5251 if (ci->when.tm_wday != -1) {
5252 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
5253
5254 if (ci->when.tm_mday == -1) {
5255 later = otherlater;
5256 } else {
5257 later = later < otherlater ? later : otherlater;
5258 }
5259 }
5260
5261 ci->when_next = later;
5262
5263 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
5264 if (ci->when_next < ci_iter->when_next) {
5265 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
5266 break;
5267 }
5268
5269 ci_prev = ci_iter;
5270 }
5271
5272 if (ci_iter == NULL) {
5273 // ci must want to fire after every other timer, or there are no timers
5274
5275 if (LIST_EMPTY(&sorted_calendar_events)) {
5276 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
5277 } else {
5278 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
5279 }
5280 }
5281
5282 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
5283
5284 if (job_assumes_zero_p(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr)) != -1) {
5285 char time_string[100];
5286 size_t time_string_len;
5287
5288 ctime_r(&later, time_string);
5289 time_string_len = strlen(time_string);
5290
5291 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
5292 time_string[time_string_len - 1] = '\0';
5293 }
5294
5295 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
5296 }
5297 }
5298
5299 bool
5300 jobmgr_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
5301 {
5302 jobmgr_t jm = ctx;
5303 jobmgr_log(jm, LOG_ERR, "%s", message);
5304
5305 return true;
5306 }
5307
5308 bool
5309 job_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
5310 {
5311 job_t j = ctx;
5312 job_log(j, LOG_ERR, "%s", message);
5313
5314 return true;
5315 }
5316
5317 // ri: NULL = please sample j->p; non-NULL = use this sample
5318 void
5319 job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status)
5320 {
5321 #if HAVE_SYSTEMSTATS
5322 if (j->anonymous || !j->p) {
5323 return;
5324 }
5325 if (!systemstats_is_enabled()) {
5326 return;
5327 }
5328 const char *name;
5329 if (j->cfbundleidentifier) {
5330 name = j->cfbundleidentifier;
5331 } else {
5332 name = j->label;
5333 }
5334 int r = 0;
5335 struct rusage_info_v1 ris;
5336 if (ri == NULL) {
5337 ri = &ris;
5338 r = proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)ri);
5339 }
5340 if (r == -1) {
5341 return;
5342 }
5343 job_log_systemstats(j->p, j->uniqueid, runtime_get_uniqueid(), j->mgr->req_pid, j->mgr->req_uniqueid, name, ri, exit_status);
5344 #else
5345 #pragma unused (j, ri, exit_status)
5346 #endif
5347 }
5348
5349 #if HAVE_SYSTEMSTATS
5350 // ri: NULL = don't write fields from ri; non-NULL = use this sample
5351 static
5352 void
5353 job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status)
5354 {
5355 if (!systemstats_is_enabled()) {
5356 return;
5357 }
5358
5359 struct systemstats_process_usage_s info;
5360 bzero(&info, sizeof(info));
5361 info.name = name;
5362 info.pid = pid;
5363 info.exit_status = exit_status;
5364 info.uid = getuid();
5365 info.ppid = getpid();
5366 info.responsible_pid = req_pid;
5367
5368 if (likely(ri)) {
5369 info.macho_uuid = (const uint8_t *)&ri->ri_uuid;
5370 info.user_time = ri->ri_user_time;
5371 info.system_time = ri->ri_system_time;
5372 info.pkg_idle_wkups = ri->ri_pkg_idle_wkups;
5373 info.interrupt_wkups = ri->ri_interrupt_wkups;
5374 info.proc_start_abstime = ri->ri_proc_start_abstime;
5375 info.proc_exit_abstime = ri->ri_proc_exit_abstime;
5376 #if SYSTEMSTATS_API_VERSION >= 20130319
5377 info.pageins = ri->ri_pageins;
5378 info.wired_size = ri->ri_wired_size;
5379 info.resident_size = ri->ri_resident_size;
5380 info.phys_footprint = ri->ri_phys_footprint;
5381 // info.purgeablesize = ???
5382 #endif
5383 #if SYSTEMSTATS_API_VERSION >= 20130328
5384 info.child_user_time = ri->ri_child_user_time;
5385 info.child_system_time = ri->ri_child_system_time;
5386 info.child_pkg_idle_wkups = ri->ri_child_pkg_idle_wkups;
5387 info.child_interrupt_wkups = ri->ri_child_interrupt_wkups;
5388 info.child_pageins = ri->ri_child_pageins;
5389 info.child_elapsed_abstime = ri->ri_child_elapsed_abstime;
5390 #endif
5391 }
5392 #if SYSTEMSTATS_API_VERSION >= 20130410
5393 info.uniqueid = uniqueid;
5394 info.parent_uniqueid = parent_uniqueid;
5395 info.responsible_uniqueid = req_uniqueid;
5396 #endif
5397 systemstats_write_process_usage(&info);
5398 }
5399 #endif /* HAVE_SYSTEMSTATS */
5400
5401 struct waiting4attach *
5402 waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type)
5403 {
5404 size_t xtra = strlen(name) + 1;
5405
5406 struct waiting4attach *w4a = malloc(sizeof(*w4a) + xtra);
5407 if (!w4a) {
5408 return NULL;
5409 }
5410
5411 w4a->port = port;
5412 w4a->dest = dest;
5413 w4a->type = type;
5414 (void)strcpy(w4a->name, name);
5415
5416 if (dest) {
5417 LIST_INSERT_HEAD(&_launchd_domain_waiters, w4a, le);
5418 } else {
5419 LIST_INSERT_HEAD(&jm->attaches, w4a, le);
5420 }
5421
5422
5423 (void)jobmgr_assumes_zero(jm, launchd_mport_notify_req(port, MACH_NOTIFY_DEAD_NAME));
5424 return w4a;
5425 }
5426
5427 void
5428 waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a)
5429 {
5430 jobmgr_log(jm, LOG_DEBUG, "Canceling dead-name notification for waiter port: 0x%x", w4a->port);
5431
5432 LIST_REMOVE(w4a, le);
5433
5434 mach_port_t previous = MACH_PORT_NULL;
5435 (void)jobmgr_assumes_zero(jm, mach_port_request_notification(mach_task_self(), w4a->port, MACH_NOTIFY_DEAD_NAME, 0, MACH_PORT_NULL, MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous));
5436 if (previous) {
5437 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(previous));
5438 }
5439
5440 jobmgr_assumes_zero(jm, launchd_mport_deallocate(w4a->port));
5441 free(w4a);
5442 }
5443
5444 struct waiting4attach *
5445 waiting4attach_find(jobmgr_t jm, job_t j)
5446 {
5447 char *name2use = (char *)j->label;
5448 if (j->app) {
5449 struct envitem *ei = NULL;
5450 SLIST_FOREACH(ei, &j->env, sle) {
5451 if (strcmp(ei->key, XPC_SERVICE_RENDEZVOUS_TOKEN) == 0) {
5452 name2use = ei->value;
5453 break;
5454 }
5455 }
5456 }
5457
5458 struct waiting4attach *w4ai = NULL;
5459 LIST_FOREACH(w4ai, &jm->attaches, le) {
5460 if (strcmp(name2use, w4ai->name) == 0) {
5461 job_log(j, LOG_DEBUG, "Found attachment: %s", name2use);
5462 break;
5463 }
5464 }
5465
5466 return w4ai;
5467 }
5468
5469 void
5470 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
5471 {
5472 const char *label2use = j ? j->label : "com.apple.launchd.job-unknown";
5473 const char *mgr2use = j ? j->mgr->name : "com.apple.launchd.jobmanager-unknown";
5474 char *newmsg;
5475 int oldmask = 0;
5476 size_t newmsgsz;
5477
5478 struct launchd_syslog_attr attr = {
5479 .from_name = launchd_label,
5480 .about_name = label2use,
5481 .session_name = mgr2use,
5482 .priority = pri,
5483 .from_uid = getuid(),
5484 .from_pid = getpid(),
5485 .about_pid = j ? j->p : 0,
5486 };
5487
5488 /* Hack: If bootstrap_port is set, we must be on the child side of a
5489 * fork(2), but before the exec*(3). Let's route the log message back to
5490 * launchd proper.
5491 */
5492 if (bootstrap_port) {
5493 return _vproc_logv(pri, err, msg, ap);
5494 }
5495
5496 newmsgsz = strlen(msg) + 200;
5497 newmsg = alloca(newmsgsz);
5498
5499 if (err) {
5500 #if !TARGET_OS_EMBEDDED
5501 snprintf(newmsg, newmsgsz, "%s: %d: %s", msg, err, strerror(err));
5502 #else
5503 snprintf(newmsg, newmsgsz, "(%s) %s: %d: %s", label2use, msg, err, strerror(err));
5504 #endif
5505 } else {
5506 #if !TARGET_OS_EMBEDDED
5507 snprintf(newmsg, newmsgsz, "%s", msg);
5508 #else
5509 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
5510 #endif
5511 }
5512
5513 if (j && unlikely(j->debug)) {
5514 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5515 }
5516
5517 launchd_vsyslog(&attr, newmsg, ap);
5518
5519 if (j && unlikely(j->debug)) {
5520 setlogmask(oldmask);
5521 }
5522 }
5523
5524 void
5525 job_log_error(job_t j, int pri, const char *msg, ...)
5526 {
5527 va_list ap;
5528
5529 va_start(ap, msg);
5530 job_logv(j, pri, errno, msg, ap);
5531 va_end(ap);
5532 }
5533
5534 void
5535 job_log(job_t j, int pri, const char *msg, ...)
5536 {
5537 va_list ap;
5538
5539 va_start(ap, msg);
5540 job_logv(j, pri, 0, msg, ap);
5541 va_end(ap);
5542 }
5543
5544 #if 0
5545 void
5546 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5547 {
5548 va_list ap;
5549
5550 va_start(ap, msg);
5551 jobmgr_logv(jm, pri, errno, msg, ap);
5552 va_end(ap);
5553 }
5554 #endif
5555
5556 void
5557 jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children)
5558 {
5559 #if HAVE_SYSTEMSTATS
5560 // Log information for kernel_task and pid 1 launchd.
5561 if (systemstats_is_enabled() && pid1_magic && jm == root_jobmgr) {
5562 #if SYSTEMSTATS_API_VERSION >= 20130328
5563 if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_SHOULD_LOG_ENERGY_STATISTICS)) {
5564 systemstats_write_intel_energy_statistics(NULL);
5565 }
5566 #else
5567 systemstats_write_intel_energy_statistics(NULL);
5568 #endif
5569 job_log_systemstats(0, 0, 0, 0, 0, "com.apple.kernel", NULL, -1);
5570 job_log_systemstats(1, 1, 0, 1, 1, "com.apple.launchd", NULL, -1);
5571 }
5572 #endif
5573 jobmgr_t jmi = NULL;
5574 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
5575 jobmgr_log_perf_statistics(jmi, signal_children);
5576 }
5577
5578 if (jm->xpc_singleton) {
5579 jobmgr_log(jm, LOG_PERF, "XPC Singleton Domain: %s", jm->shortdesc);
5580 } else if (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
5581 jobmgr_log(jm, LOG_PERF, "XPC Private Domain: %s", jm->owner);
5582 } else if (jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) {
5583 jobmgr_log(jm, LOG_PERF, "Created via bootstrap_subset()");
5584 }
5585
5586 jobmgr_log(jm, LOG_PERF, "Jobs in job manager:");
5587
5588 job_t ji = NULL;
5589 LIST_FOREACH(ji, &jm->jobs, sle) {
5590 job_log_perf_statistics(ji, NULL, -1);
5591 if (unlikely(signal_children) && unlikely(strstr(ji->label, "com.apple.launchd.peruser.") == ji->label)) {
5592 jobmgr_log(jm, LOG_PERF, "Sending SIGINFO to peruser launchd %d", ji->p);
5593 kill(ji->p, SIGINFO);
5594 }
5595 }
5596
5597 jobmgr_log(jm, LOG_PERF, "End of job list.");
5598 }
5599
5600 void
5601 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5602 {
5603 va_list ap;
5604
5605 va_start(ap, msg);
5606 jobmgr_logv(jm, pri, 0, msg, ap);
5607 va_end(ap);
5608 }
5609
5610 void
5611 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5612 {
5613 if (!jm) {
5614 jm = root_jobmgr;
5615 }
5616
5617 char *newmsg;
5618 char *newname;
5619 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5620
5621 newname = alloca((jmname_len + 1) * 2);
5622 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5623 newmsg = alloca(newmsgsz);
5624
5625 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5626 if (jm->name[i] == '%') {
5627 newname[o] = '%';
5628 o++;
5629 }
5630 newname[o] = jm->name[i];
5631 }
5632 newname[o] = '\0';
5633
5634 if (err) {
5635 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5636 } else {
5637 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5638 }
5639
5640 if (jm->parentmgr) {
5641 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5642 } else {
5643 struct launchd_syslog_attr attr = {
5644 .from_name = launchd_label,
5645 .about_name = launchd_label,
5646 .session_name = jm->name,
5647 .priority = pri,
5648 .from_uid = getuid(),
5649 .from_pid = getpid(),
5650 .about_pid = getpid(),
5651 };
5652
5653 launchd_vsyslog(&attr, newmsg, ap);
5654 }
5655 }
5656
5657 struct cal_dict_walk {
5658 job_t j;
5659 struct tm tmptm;
5660 };
5661
5662 void
5663 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5664 {
5665 struct cal_dict_walk *cdw = context;
5666 struct tm *tmptm = &cdw->tmptm;
5667 job_t j = cdw->j;
5668 int64_t val;
5669
5670 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5671 // hack to let caller know something went wrong
5672 tmptm->tm_sec = -1;
5673 return;
5674 }
5675
5676 val = launch_data_get_integer(obj);
5677
5678 if (val < 0) {
5679 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5680 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5681 if (val > 59) {
5682 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5683 tmptm->tm_sec = -1;
5684 } else {
5685 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5686 }
5687 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5688 if (val > 23) {
5689 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5690 tmptm->tm_sec = -1;
5691 } else {
5692 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5693 }
5694 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5695 if (val < 1 || val > 31) {
5696 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5697 tmptm->tm_sec = -1;
5698 } else {
5699 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5700 }
5701 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5702 if (val > 7) {
5703 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5704 tmptm->tm_sec = -1;
5705 } else {
5706 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5707 }
5708 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5709 if (val > 12) {
5710 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5711 tmptm->tm_sec = -1;
5712 } else {
5713 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5714 tmptm->tm_mon -= 1; // 4798263 cron compatibility
5715 }
5716 }
5717 }
5718
5719 bool
5720 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5721 {
5722 struct cal_dict_walk cdw;
5723
5724 cdw.j = j;
5725 memset(&cdw.tmptm, 0, sizeof(0));
5726
5727 cdw.tmptm.tm_min = -1;
5728 cdw.tmptm.tm_hour = -1;
5729 cdw.tmptm.tm_mday = -1;
5730 cdw.tmptm.tm_wday = -1;
5731 cdw.tmptm.tm_mon = -1;
5732
5733 if (!job_assumes(j, obj != NULL)) {
5734 return false;
5735 }
5736
5737 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5738 return false;
5739 }
5740
5741 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5742
5743 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5744 return false;
5745 }
5746
5747 return calendarinterval_new(j, &cdw.tmptm);
5748 }
5749
5750 bool
5751 calendarinterval_new(job_t j, struct tm *w)
5752 {
5753 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5754
5755 if (!job_assumes(j, ci != NULL)) {
5756 return false;
5757 }
5758
5759 ci->when = *w;
5760 ci->job = j;
5761
5762 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5763
5764 calendarinterval_setalarm(j, ci);
5765
5766 runtime_add_weak_ref();
5767
5768 return true;
5769 }
5770
5771 void
5772 calendarinterval_delete(job_t j, struct calendarinterval *ci)
5773 {
5774 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5775 LIST_REMOVE(ci, global_sle);
5776
5777 free(ci);
5778
5779 runtime_del_weak_ref();
5780 }
5781
5782 void
5783 calendarinterval_sanity_check(void)
5784 {
5785 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5786 time_t now = time(NULL);
5787
5788 if (unlikely(ci && (ci->when_next < now))) {
5789 (void)jobmgr_assumes_zero_p(root_jobmgr, raise(SIGUSR1));
5790 }
5791 }
5792
5793 void
5794 calendarinterval_callback(void)
5795 {
5796 struct calendarinterval *ci, *ci_next;
5797 time_t now = time(NULL);
5798
5799 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5800 job_t j = ci->job;
5801
5802 if (ci->when_next > now) {
5803 break;
5804 }
5805
5806 LIST_REMOVE(ci, global_sle);
5807 calendarinterval_setalarm(j, ci);
5808
5809 j->start_pending = true;
5810 job_dispatch(j, false);
5811 }
5812 }
5813
5814 bool
5815 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt)
5816 {
5817 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5818
5819 if (!job_assumes(j, sg != NULL)) {
5820 return false;
5821 }
5822
5823 sg->fds = calloc(1, fd_cnt * sizeof(int));
5824 sg->fd_cnt = fd_cnt;
5825
5826 if (!job_assumes(j, sg->fds != NULL)) {
5827 free(sg);
5828 return false;
5829 }
5830
5831 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5832 strcpy(sg->name_init, name);
5833
5834 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5835
5836 runtime_add_weak_ref();
5837
5838 return true;
5839 }
5840
5841 void
5842 socketgroup_delete(job_t j, struct socketgroup *sg)
5843 {
5844 unsigned int i;
5845
5846 for (i = 0; i < sg->fd_cnt; i++) {
5847 #if 0
5848 struct sockaddr_storage ss;
5849 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5850 socklen_t ss_len = sizeof(ss);
5851
5852 // 5480306
5853 if (job_assumes_zero(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5854 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5855 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5856 // We might conditionally need to delete a directory here
5857 }
5858 #endif
5859 (void)job_assumes_zero_p(j, runtime_close(sg->fds[i]));
5860 }
5861
5862 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5863
5864 free(sg->fds);
5865 free(sg);
5866
5867 runtime_del_weak_ref();
5868 }
5869
5870 void
5871 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5872 {
5873 struct kevent kev[sg->fd_cnt];
5874 char buf[10000];
5875 unsigned int i, buf_off = 0;
5876
5877 for (i = 0; i < sg->fd_cnt; i++) {
5878 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5879 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5880 }
5881
5882 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5883
5884 (void)job_assumes_zero_p(j, kevent_bulk_mod(kev, sg->fd_cnt));
5885
5886 for (i = 0; i < sg->fd_cnt; i++) {
5887 (void)job_assumes(j, kev[i].flags & EV_ERROR);
5888 errno = (typeof(errno)) kev[i].data;
5889 (void)job_assumes_zero(j, kev[i].data);
5890 }
5891 }
5892
5893 void
5894 socketgroup_ignore(job_t j, struct socketgroup *sg)
5895 {
5896 socketgroup_kevent_mod(j, sg, false);
5897 }
5898
5899 void
5900 socketgroup_watch(job_t j, struct socketgroup *sg)
5901 {
5902 socketgroup_kevent_mod(j, sg, true);
5903 }
5904
5905 void
5906 socketgroup_callback(job_t j)
5907 {
5908 job_dispatch(j, true);
5909 }
5910
5911 bool
5912 envitem_new(job_t j, const char *k, const char *v, bool global)
5913 {
5914 if (global && !launchd_allow_global_dyld_envvars) {
5915 if (strncmp("DYLD_", k, sizeof("DYLD_") - 1) == 0) {
5916 job_log(j, LOG_ERR, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k, v);
5917 return false;
5918 }
5919 }
5920
5921 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5922
5923 if (!job_assumes(j, ei != NULL)) {
5924 return false;
5925 }
5926
5927 strcpy(ei->key_init, k);
5928 ei->value = ei->key_init + strlen(k) + 1;
5929 strcpy(ei->value, v);
5930
5931 if (global) {
5932 if (SLIST_EMPTY(&j->global_env)) {
5933 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5934 }
5935 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5936 } else {
5937 SLIST_INSERT_HEAD(&j->env, ei, sle);
5938 }
5939
5940 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5941
5942 return true;
5943 }
5944
5945 void
5946 envitem_delete(job_t j, struct envitem *ei, bool global)
5947 {
5948 if (global) {
5949 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5950 if (SLIST_EMPTY(&j->global_env)) {
5951 LIST_REMOVE(j, global_env_sle);
5952 }
5953 } else {
5954 SLIST_REMOVE(&j->env, ei, envitem, sle);
5955 }
5956
5957 free(ei);
5958 }
5959
5960 void
5961 envitem_setup(launch_data_t obj, const char *key, void *context)
5962 {
5963 job_t j = context;
5964
5965 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5966 return;
5967 }
5968
5969 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5970 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
5971 } else {
5972 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5973 }
5974 }
5975
5976 bool
5977 limititem_update(job_t j, int w, rlim_t r)
5978 {
5979 struct limititem *li;
5980
5981 SLIST_FOREACH(li, &j->limits, sle) {
5982 if (li->which == w) {
5983 break;
5984 }
5985 }
5986
5987 if (li == NULL) {
5988 li = calloc(1, sizeof(struct limititem));
5989
5990 if (!job_assumes(j, li != NULL)) {
5991 return false;
5992 }
5993
5994 SLIST_INSERT_HEAD(&j->limits, li, sle);
5995
5996 li->which = w;
5997 }
5998
5999 if (j->importing_hard_limits) {
6000 li->lim.rlim_max = r;
6001 li->sethard = true;
6002 } else {
6003 li->lim.rlim_cur = r;
6004 li->setsoft = true;
6005 }
6006
6007 return true;
6008 }
6009
6010 void
6011 limititem_delete(job_t j, struct limititem *li)
6012 {
6013 SLIST_REMOVE(&j->limits, li, limititem, sle);
6014
6015 free(li);
6016 }
6017
6018 #if HAVE_SANDBOX
6019 void
6020 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
6021 {
6022 job_t j = context;
6023
6024 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
6025 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
6026 return;
6027 }
6028
6029 if (launch_data_get_bool(obj) == false) {
6030 return;
6031 }
6032
6033 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
6034 j->seatbelt_flags |= SANDBOX_NAMED;
6035 }
6036 }
6037 #endif
6038
6039 void
6040 limititem_setup(launch_data_t obj, const char *key, void *context)
6041 {
6042 job_t j = context;
6043 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
6044 rlim_t rl;
6045
6046 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
6047 return;
6048 }
6049
6050 rl = launch_data_get_integer(obj);
6051
6052 for (i = 0; i < limits_cnt; i++) {
6053 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
6054 break;
6055 }
6056 }
6057
6058 if (i == limits_cnt) {
6059 return;
6060 }
6061
6062 limititem_update(j, launchd_keys2limits[i].val, rl);
6063 }
6064
6065 bool
6066 job_useless(job_t j)
6067 {
6068 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
6069 if (j->legacy_LS_job && j->j_port) {
6070 return false;
6071 }
6072 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
6073 return true;
6074 } else if (j->removal_pending) {
6075 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
6076 return true;
6077 } else if (j->shutdown_monitor) {
6078 return false;
6079 } else if (j->mgr->shutting_down && !j->mgr->parentmgr) {
6080 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
6081 if (total_children == 0 && !j->anonymous) {
6082 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
6083 }
6084 return true;
6085 } else if (j->legacy_mach_job) {
6086 if (SLIST_EMPTY(&j->machservices)) {
6087 job_log(j, LOG_INFO, "Garbage collecting");
6088 return true;
6089 } else if (!j->checkedin) {
6090 job_log(j, LOG_WARNING, "Failed to check-in!");
6091 return true;
6092 }
6093 } else {
6094 /* If the job's executable does not have any valid architectures (for
6095 * example, if it's a PowerPC-only job), then we don't even bother
6096 * trying to relaunch it, as we have no reasonable expectation that
6097 * the situation will change.
6098 *
6099 * <rdar://problem/9106979>
6100 */
6101 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
6102 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
6103 return true;
6104 }
6105 }
6106
6107 return false;
6108 }
6109
6110 bool
6111 job_keepalive(job_t j)
6112 {
6113 mach_msg_type_number_t statusCnt;
6114 mach_port_status_t status;
6115 struct semaphoreitem *si;
6116 struct machservice *ms;
6117 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
6118 bool is_not_kextd = (launchd_apple_internal || (strcmp(j->label, "com.apple.kextd") != 0));
6119
6120 if (unlikely(j->mgr->shutting_down)) {
6121 return false;
6122 }
6123
6124 /*
6125 * 5066316
6126 *
6127 * We definitely need to revisit this after Leopard ships. Please see
6128 * launchctl.c for the other half of this hack.
6129 */
6130 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
6131 return false;
6132 }
6133
6134 if (unlikely(j->needs_kickoff)) {
6135 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
6136 return false;
6137 }
6138
6139 if (j->start_pending) {
6140 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
6141 return true;
6142 }
6143
6144 if (!j->ondemand) {
6145 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
6146 return true;
6147 }
6148
6149 SLIST_FOREACH(ms, &j->machservices, sle) {
6150 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
6151 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
6152 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
6153 continue;
6154 }
6155 if (status.mps_msgcount) {
6156 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
6157 status.mps_msgcount, ms->name);
6158 return true;
6159 }
6160 }
6161
6162 /* TODO: Coalesce external events and semaphore items, since they're basically
6163 * the same thing.
6164 */
6165 struct externalevent *ei = NULL;
6166 LIST_FOREACH(ei, &j->events, job_le) {
6167 if (ei->state == ei->wanted_state) {
6168 return true;
6169 }
6170 }
6171
6172 SLIST_FOREACH(si, &j->semaphores, sle) {
6173 bool wanted_state = false;
6174 job_t other_j;
6175
6176 switch (si->why) {
6177 case NETWORK_UP:
6178 wanted_state = true;
6179 case NETWORK_DOWN:
6180 if (network_up == wanted_state) {
6181 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
6182 return true;
6183 }
6184 break;
6185 case SUCCESSFUL_EXIT:
6186 wanted_state = true;
6187 case FAILED_EXIT:
6188 if (good_exit == wanted_state) {
6189 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
6190 return true;
6191 }
6192 break;
6193 case CRASHED:
6194 wanted_state = true;
6195 case DID_NOT_CRASH:
6196 if (j->crashed == wanted_state) {
6197 return true;
6198 }
6199 break;
6200 case OTHER_JOB_ENABLED:
6201 wanted_state = true;
6202 case OTHER_JOB_DISABLED:
6203 if ((bool)job_find(NULL, si->what) == wanted_state) {
6204 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
6205 return true;
6206 }
6207 break;
6208 case OTHER_JOB_ACTIVE:
6209 wanted_state = true;
6210 case OTHER_JOB_INACTIVE:
6211 if ((other_j = job_find(NULL, si->what))) {
6212 if ((bool)other_j->p == wanted_state) {
6213 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
6214 return true;
6215 }
6216 }
6217 break;
6218 }
6219 }
6220
6221 return false;
6222 }
6223
6224 const char *
6225 job_active(job_t j)
6226 {
6227 if (j->p && j->shutdown_monitor) {
6228 return "Monitoring shutdown";
6229 }
6230 if (j->p) {
6231 return "PID is still valid";
6232 }
6233
6234 if (j->priv_port_has_senders) {
6235 return "Privileged Port still has outstanding senders";
6236 }
6237
6238 struct machservice *ms;
6239 SLIST_FOREACH(ms, &j->machservices, sle) {
6240 /* If we've simulated an exit, we mark the job as non-active, even
6241 * though doing so will leave it in an unsafe state. We do this so that
6242 * shutdown can proceed. See <rdar://problem/11126530>.
6243 */
6244 if (!j->workaround9359725 && ms->recv && machservice_active(ms)) {
6245 job_log(j, LOG_INFO, "Mach service is still active: %s", ms->name);
6246 return "Mach service is still active";
6247 }
6248 }
6249
6250 return NULL;
6251 }
6252
6253 void
6254 machservice_watch(job_t j, struct machservice *ms)
6255 {
6256 if (ms->recv) {
6257 if (job_assumes_zero(j, runtime_add_mport(ms->port, NULL)) == KERN_INVALID_RIGHT) {
6258 ms->recv_race_hack = true;
6259 }
6260 }
6261 }
6262
6263 void
6264 machservice_ignore(job_t j, struct machservice *ms)
6265 {
6266 /* We only add ports whose receive rights we control into the port set, so
6267 * don't attempt to remove te service from the port set if we didn't put it
6268 * there in the first place. Otherwise, we could wind up trying to access a
6269 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
6270 *
6271 * <rdar://problem/10898014>
6272 */
6273 if (ms->recv) {
6274 (void)job_assumes_zero(j, runtime_remove_mport(ms->port));
6275 }
6276 }
6277
6278 void
6279 machservice_resetport(job_t j, struct machservice *ms)
6280 {
6281 LIST_REMOVE(ms, port_hash_sle);
6282 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6283 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
6284
6285 ms->gen_num++;
6286 (void)job_assumes_zero(j, launchd_mport_create_recv(&ms->port));
6287 (void)job_assumes_zero(j, launchd_mport_make_send(ms->port));
6288 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6289 }
6290
6291 void
6292 machservice_stamp_port(job_t j, struct machservice *ms)
6293 {
6294 mach_port_context_t ctx = 0;
6295 char *where2get = j->prog ? j->prog : j->argv[0];
6296
6297 char *prog = NULL;
6298 if ((prog = strrchr(where2get, '/'))) {
6299 prog++;
6300 } else {
6301 prog = where2get;
6302 }
6303
6304 (void)strncpy((char *)&ctx, prog, sizeof(ctx));
6305 #if __LITTLE_ENDIAN__
6306 #if __LP64__
6307 ctx = OSSwapBigToHostInt64(ctx);
6308 #else
6309 ctx = OSSwapBigToHostInt32(ctx);
6310 #endif
6311 #endif
6312
6313 (void)job_assumes_zero(j, mach_port_set_context(mach_task_self(), ms->port, ctx));
6314 }
6315
6316 struct machservice *
6317 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
6318 {
6319 /* Don't create new MachServices for dead ports. This is primarily for
6320 * clients who use bootstrap_register2(). They can pass in a send right, but
6321 * then that port can immediately go dead. Hilarity ensues.
6322 *
6323 * <rdar://problem/10898014>
6324 */
6325 if (*serviceport == MACH_PORT_DEAD) {
6326 return NULL;
6327 }
6328
6329 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
6330 if (!job_assumes(j, ms != NULL)) {
6331 return NULL;
6332 }
6333
6334 strcpy((char *)ms->name, name);
6335 ms->job = j;
6336 ms->gen_num = 1;
6337 ms->per_pid = pid_local;
6338
6339 if (likely(*serviceport == MACH_PORT_NULL)) {
6340 if (job_assumes_zero(j, launchd_mport_create_recv(&ms->port)) != KERN_SUCCESS) {
6341 goto out_bad;
6342 }
6343
6344 if (job_assumes_zero(j, launchd_mport_make_send(ms->port)) != KERN_SUCCESS) {
6345 goto out_bad2;
6346 }
6347 *serviceport = ms->port;
6348 ms->recv = true;
6349 } else {
6350 ms->port = *serviceport;
6351 ms->isActive = true;
6352 }
6353
6354 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6355
6356 jobmgr_t where2put = j->mgr;
6357 // XPC domains are separate from Mach bootstraps.
6358 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6359 if (launchd_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6360 where2put = root_jobmgr;
6361 }
6362 }
6363
6364 /* Don't allow MachServices added by multiple-instance jobs to be looked up
6365 * by others. We could just do this with a simple bit, but then we'd have to
6366 * uniquify the names ourselves to avoid collisions. This is just easier.
6367 */
6368 if (!j->dedicated_instance) {
6369 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6370 }
6371 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6372
6373 if (ms->recv) {
6374 machservice_stamp_port(j, ms);
6375 }
6376
6377 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
6378
6379 return ms;
6380 out_bad2:
6381 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6382 out_bad:
6383 free(ms);
6384 return NULL;
6385 }
6386
6387 struct machservice *
6388 machservice_new_alias(job_t j, struct machservice *orig)
6389 {
6390 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
6391 if (job_assumes(j, ms != NULL)) {
6392 strcpy((char *)ms->name, orig->name);
6393 ms->alias = orig;
6394 ms->job = j;
6395
6396 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6397 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6398 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
6399 }
6400
6401 return ms;
6402 }
6403
6404 bootstrap_status_t
6405 machservice_status(struct machservice *ms)
6406 {
6407 ms = ms->alias ? ms->alias : ms;
6408 if (ms->isActive) {
6409 return BOOTSTRAP_STATUS_ACTIVE;
6410 } else if (ms->job->ondemand) {
6411 return BOOTSTRAP_STATUS_ON_DEMAND;
6412 } else {
6413 return BOOTSTRAP_STATUS_INACTIVE;
6414 }
6415 }
6416
6417 void
6418 job_setup_exception_port(job_t j, task_t target_task)
6419 {
6420 struct machservice *ms;
6421 thread_state_flavor_t f = 0;
6422 mach_port_t exc_port = the_exception_server;
6423
6424 if (unlikely(j->alt_exc_handler)) {
6425 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
6426 if (likely(ms)) {
6427 exc_port = machservice_port(ms);
6428 } else {
6429 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
6430 }
6431 } else if (unlikely(j->internal_exc_handler)) {
6432 exc_port = runtime_get_kernel_port();
6433 } else if (unlikely(!exc_port)) {
6434 return;
6435 }
6436
6437 #if defined (__ppc__) || defined(__ppc64__)
6438 f = PPC_THREAD_STATE64;
6439 #elif defined(__i386__) || defined(__x86_64__)
6440 f = x86_THREAD_STATE;
6441 #elif defined(__arm__)
6442 f = ARM_THREAD_STATE;
6443 #else
6444 #error "unknown architecture"
6445 #endif
6446
6447 if (likely(target_task)) {
6448 kern_return_t kr = task_set_exception_ports(target_task, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f);
6449 if (kr) {
6450 if (kr != MACH_SEND_INVALID_DEST) {
6451 (void)job_assumes_zero(j, kr);
6452 } else {
6453 job_log(j, LOG_WARNING, "Task died before exception port could be set.");
6454 }
6455 }
6456 } else if (pid1_magic && the_exception_server) {
6457 mach_port_t mhp = mach_host_self();
6458 (void)job_assumes_zero(j, host_set_exception_ports(mhp, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f));
6459 (void)job_assumes_zero(j, launchd_mport_deallocate(mhp));
6460 }
6461 }
6462
6463 void
6464 job_set_exception_port(job_t j, mach_port_t port)
6465 {
6466 if (unlikely(!the_exception_server)) {
6467 the_exception_server = port;
6468 job_setup_exception_port(j, 0);
6469 } else {
6470 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6471 }
6472 }
6473
6474 void
6475 machservice_setup_options(launch_data_t obj, const char *key, void *context)
6476 {
6477 struct machservice *ms = context;
6478 mach_port_t mhp = mach_host_self();
6479 int which_port;
6480 bool b;
6481
6482 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
6483 return;
6484 }
6485
6486 switch (launch_data_get_type(obj)) {
6487 case LAUNCH_DATA_INTEGER:
6488 which_port = (int)launch_data_get_integer(obj); // XXX we should bound check this...
6489 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
6490 switch (which_port) {
6491 case TASK_KERNEL_PORT:
6492 case TASK_HOST_PORT:
6493 case TASK_NAME_PORT:
6494 case TASK_BOOTSTRAP_PORT:
6495 /* I find it a little odd that zero isn't reserved in the header.
6496 * Normally Mach is fairly good about this convention...
6497 */
6498 case 0:
6499 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6500 break;
6501 default:
6502 ms->special_port_num = which_port;
6503 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6504 break;
6505 }
6506 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
6507 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
6508 (void)job_assumes_zero(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)));
6509 } else {
6510 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6511 }
6512 }
6513 case LAUNCH_DATA_BOOL:
6514 b = launch_data_get_bool(obj);
6515 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6516 ms->debug_on_close = b;
6517 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
6518 ms->reset = b;
6519 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6520 ms->hide = b;
6521 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6522 job_set_exception_port(ms->job, ms->port);
6523 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6524 ms->kUNCServer = b;
6525 (void)job_assumes_zero(ms->job, host_set_UNDServer(mhp, ms->port));
6526 }
6527 break;
6528 case LAUNCH_DATA_STRING:
6529 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6530 const char *option = launch_data_get_string(obj);
6531 if (strcasecmp(option, "One") == 0) {
6532 ms->drain_one_on_crash = true;
6533 } else if (strcasecmp(option, "All") == 0) {
6534 ms->drain_all_on_crash = true;
6535 }
6536 }
6537 break;
6538 case LAUNCH_DATA_DICTIONARY:
6539 if (launch_data_dict_get_count(obj) == 0) {
6540 job_set_exception_port(ms->job, ms->port);
6541 }
6542 break;
6543 default:
6544 break;
6545 }
6546
6547 (void)job_assumes_zero(ms->job, launchd_mport_deallocate(mhp));
6548 }
6549
6550 void
6551 machservice_setup(launch_data_t obj, const char *key, void *context)
6552 {
6553 job_t j = context;
6554 struct machservice *ms;
6555 mach_port_t p = MACH_PORT_NULL;
6556
6557 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6558 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6559 return;
6560 }
6561
6562 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6563 return;
6564 }
6565
6566 ms->isActive = false;
6567 ms->upfront = true;
6568
6569 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6570 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6571 }
6572
6573 kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
6574 (void)job_assumes_zero(j, kr);
6575 }
6576
6577 jobmgr_t
6578 jobmgr_do_garbage_collection(jobmgr_t jm)
6579 {
6580 jobmgr_t jmi = NULL, jmn = NULL;
6581 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6582 jobmgr_do_garbage_collection(jmi);
6583 }
6584
6585 if (!jm->shutting_down) {
6586 return jm;
6587 }
6588
6589 if (SLIST_EMPTY(&jm->submgrs)) {
6590 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6591 } else {
6592 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6593 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6594 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6595 }
6596 }
6597
6598 size_t actives = 0;
6599 job_t ji = NULL, jn = NULL;
6600 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6601 if (ji->anonymous) {
6602 continue;
6603 }
6604
6605 // Let the shutdown monitor be up until the very end.
6606 if (ji->shutdown_monitor) {
6607 continue;
6608 }
6609
6610 /* On our first pass through, open a transaction for all the jobs that
6611 * need to be dirty at shutdown. We'll close these transactions once the
6612 * jobs that do not need to be dirty at shutdown have all exited.
6613 */
6614 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6615 job_open_shutdown_transaction(ji);
6616 }
6617
6618 const char *active = job_active(ji);
6619 if (!active) {
6620 job_remove(ji);
6621 } else {
6622 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6623 job_stop(ji);
6624
6625 if (!ji->dirty_at_shutdown) {
6626 actives++;
6627 }
6628
6629 if (ji->clean_kill) {
6630 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6631 } else {
6632 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6633 }
6634 }
6635 }
6636
6637 jm->shutdown_jobs_dirtied = true;
6638 if (actives == 0) {
6639 if (!jm->shutdown_jobs_cleaned) {
6640 /* Once all normal jobs have exited, we clean the dirty-at-shutdown
6641 * jobs and make them into normal jobs so that the above loop will
6642 * handle them appropriately.
6643 */
6644 LIST_FOREACH(ji, &jm->jobs, sle) {
6645 if (ji->anonymous) {
6646 continue;
6647 }
6648
6649 if (!job_active(ji)) {
6650 continue;
6651 }
6652
6653 if (ji->shutdown_monitor) {
6654 continue;
6655 }
6656
6657 job_close_shutdown_transaction(ji);
6658 actives++;
6659 }
6660
6661 jm->shutdown_jobs_cleaned = true;
6662 }
6663
6664 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6665 /* We may be in a situation where the shutdown monitor is all that's
6666 * left, in which case we want to stop it. Like dirty-at-shutdown
6667 * jobs, we turn it back into a normal job so that the main loop
6668 * treats it appropriately.
6669 *
6670 * See:
6671 * <rdar://problem/10756306>
6672 * <rdar://problem/11034971>
6673 * <rdar://problem/11549541>
6674 */
6675 if (jm->monitor_shutdown && _launchd_shutdown_monitor) {
6676 /* The rest of shutdown has completed, so we can kill the shutdown
6677 * monitor now like it was any other job.
6678 */
6679 _launchd_shutdown_monitor->shutdown_monitor = false;
6680
6681 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6682 job_stop(_launchd_shutdown_monitor);
6683 _launchd_shutdown_monitor = NULL;
6684 } else {
6685 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6686 jobmgr_remove(jm);
6687 return NULL;
6688 }
6689 }
6690 }
6691
6692 return jm;
6693 }
6694
6695 void
6696 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6697 {
6698 /* I maintain that stray processes should be at the mercy of launchd during
6699 * shutdown, but nevertheless, things like diskimages-helper can stick
6700 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
6701 * to all the strays and don't wait for them to exit before moving on.
6702 *
6703 * See rdar://problem/6562592
6704 */
6705 size_t i = 0;
6706 for (i = 0; i < np; i++) {
6707 if (p[i] != 0) {
6708 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6709 (void)jobmgr_assumes_zero_p(jm, kill2(p[i], SIGTERM));
6710 }
6711 }
6712 }
6713
6714 void
6715 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6716 {
6717 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6718 pid_t *pids = NULL;
6719 int i = 0, kp_cnt = 0;
6720
6721 if (likely(jm->parentmgr || !pid1_magic)) {
6722 return;
6723 }
6724
6725 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6726 return;
6727 }
6728
6729 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6730
6731 if (jobmgr_assumes_zero_p(jm, (kp_cnt = proc_listallpids(pids, len))) == -1) {
6732 goto out;
6733 }
6734
6735 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6736 for (i = 0; i < kp_cnt; i++) {
6737 struct proc_bsdshortinfo proc;
6738 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6739 if (errno != ESRCH) {
6740 (void)jobmgr_assumes_zero(jm, errno);
6741 }
6742
6743 kp_skipped++;
6744 continue;
6745 }
6746
6747 pid_t p_i = pids[i];
6748 pid_t pp_i = proc.pbsi_ppid;
6749 pid_t pg_i = proc.pbsi_pgid;
6750 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6751 const char *n = proc.pbsi_comm;
6752
6753 if (unlikely(p_i == 0 || p_i == 1)) {
6754 kp_skipped++;
6755 continue;
6756 }
6757
6758 if (_launchd_shutdown_monitor && pp_i == _launchd_shutdown_monitor->p) {
6759 kp_skipped++;
6760 continue;
6761 }
6762
6763 // We might have some jobs hanging around that we've decided to shut down in spite of.
6764 job_t j = jobmgr_find_by_pid(jm, p_i, false);
6765 if (!j || (j && j->anonymous)) {
6766 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6767
6768 int status = 0;
6769 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6770 if (jobmgr_assumes_zero(jm, waitpid(p_i, &status, WNOHANG)) == 0) {
6771 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6772 }
6773 kp_skipped++;
6774 } else {
6775 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6776 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6777 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6778 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6779 * their userspace emissaries go away, before the call to reboot(2).
6780 */
6781 if (leader && leader->ignore_pg_at_shutdown) {
6782 kp_skipped++;
6783 } else {
6784 ps[i] = p_i;
6785 }
6786 }
6787 } else {
6788 kp_skipped++;
6789 }
6790 }
6791
6792 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6793 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6794 }
6795
6796 free(ps);
6797 out:
6798 free(pids);
6799 }
6800
6801 jobmgr_t
6802 jobmgr_parent(jobmgr_t jm)
6803 {
6804 return jm->parentmgr;
6805 }
6806
6807 void
6808 job_uncork_fork(job_t j)
6809 {
6810 pid_t c = j->p;
6811
6812 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6813 /* this unblocks the child and avoids a race
6814 * between the above fork() and the kevent_mod() */
6815 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6816 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
6817 j->fork_fd = 0;
6818 }
6819
6820 jobmgr_t
6821 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6822 {
6823 job_t bootstrapper = NULL;
6824 jobmgr_t jmr;
6825
6826 __OS_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6827
6828 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6829 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6830 return NULL;
6831 }
6832
6833 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6834
6835 if (!jobmgr_assumes(jm, jmr != NULL)) {
6836 return NULL;
6837 }
6838
6839 if (jm == NULL) {
6840 root_jobmgr = jmr;
6841 }
6842
6843 jmr->kqjobmgr_callback = jobmgr_callback;
6844 strcpy(jmr->name_init, name ? name : "Under construction");
6845
6846 jmr->req_port = requestorport;
6847
6848 if ((jmr->parentmgr = jm)) {
6849 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6850 }
6851
6852 if (jm && jobmgr_assumes_zero(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME)) != KERN_SUCCESS) {
6853 goto out_bad;
6854 }
6855
6856 if (transfer_port != MACH_PORT_NULL) {
6857 (void)jobmgr_assumes(jmr, jm != NULL);
6858 jmr->jm_port = transfer_port;
6859 } else if (!jm && !pid1_magic) {
6860 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6861 name_t service_buf;
6862
6863 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6864
6865 if (jobmgr_assumes_zero(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port)) != 0) {
6866 goto out_bad;
6867 }
6868
6869 if (trusted_fd) {
6870 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6871
6872 if ((dfd = dup(lfd)) >= 0) {
6873 (void)jobmgr_assumes_zero_p(jmr, runtime_close(dfd));
6874 (void)jobmgr_assumes_zero_p(jmr, runtime_close(lfd));
6875 }
6876
6877 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6878 }
6879
6880 // cut off the Libc cache, we don't want to deadlock against ourself
6881 inherited_bootstrap_port = bootstrap_port;
6882 bootstrap_port = MACH_PORT_NULL;
6883 os_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME));
6884
6885 // We set this explicitly as we start each child
6886 os_assert_zero(launchd_set_bport(MACH_PORT_NULL));
6887 } else if (jobmgr_assumes_zero(jmr, launchd_mport_create_recv(&jmr->jm_port)) != KERN_SUCCESS) {
6888 goto out_bad;
6889 }
6890
6891 if (!name) {
6892 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6893 }
6894
6895 if (!jm) {
6896 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6897 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6898 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6899 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGINFO, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6900 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr));
6901 }
6902
6903 if (name && !skip_init) {
6904 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6905 }
6906
6907 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6908 if (jobmgr_assumes_zero(jmr, runtime_add_mport(jmr->jm_port, job_server)) != KERN_SUCCESS) {
6909 goto out_bad;
6910 }
6911 }
6912
6913 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6914
6915 if (bootstrapper) {
6916 bootstrapper->asport = asport;
6917
6918 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6919 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6920 } else {
6921 jmr->req_asport = asport;
6922 }
6923
6924 if (asport != MACH_PORT_NULL) {
6925 (void)jobmgr_assumes_zero(jmr, launchd_mport_copy_send(asport));
6926 }
6927
6928 if (jmr->parentmgr) {
6929 runtime_add_weak_ref();
6930 }
6931
6932 return jmr;
6933
6934 out_bad:
6935 if (jmr) {
6936 jobmgr_remove(jmr);
6937 if (jm == NULL) {
6938 root_jobmgr = NULL;
6939 }
6940 }
6941 return NULL;
6942 }
6943
6944 jobmgr_t
6945 jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6946 {
6947 jobmgr_t new = NULL;
6948
6949 /* These job managers are basically singletons, so we use the root Mach
6950 * bootstrap port as their requestor ports so they'll never go away.
6951 */
6952 mach_port_t req_port = root_jobmgr->jm_port;
6953 if (jobmgr_assumes_zero(jm, launchd_mport_make_send(req_port)) == KERN_SUCCESS) {
6954 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6955 if (new) {
6956 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6957 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6958 new->xpc_singleton = true;
6959 }
6960 }
6961
6962 return new;
6963 }
6964
6965 jobmgr_t
6966 jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6967 {
6968 jobmgr_t jmi = NULL;
6969 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6970 if (jmi->req_euid == uid) {
6971 return jmi;
6972 }
6973 }
6974
6975 name_t name;
6976 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6977 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6978 if (jobmgr_assumes(jm, jmi != NULL)) {
6979 /* We need to create a per-user launchd for this UID if there isn't one
6980 * already so we can grab the bootstrap port.
6981 */
6982 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6983 if (jobmgr_assumes(jmi, puj != NULL)) {
6984 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(puj->asport));
6985 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(jmi->req_bsport));
6986 jmi->shortdesc = "per-user";
6987 jmi->req_asport = puj->asport;
6988 jmi->req_asid = puj->asid;
6989 jmi->req_euid = uid;
6990 jmi->req_egid = -1;
6991
6992 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
6993 } else {
6994 jobmgr_remove(jmi);
6995 }
6996 }
6997
6998 return jmi;
6999 }
7000
7001 jobmgr_t
7002 jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
7003 {
7004 jobmgr_t jmi = NULL;
7005 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
7006 if (jmi->req_asid == asid) {
7007 return jmi;
7008 }
7009 }
7010
7011 name_t name;
7012 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
7013 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
7014 if (jobmgr_assumes(jm, jmi != NULL)) {
7015 (void)jobmgr_assumes_zero(jmi, launchd_mport_make_send(root_jobmgr->jm_port));
7016 jmi->shortdesc = "per-session";
7017 jmi->req_bsport = root_jobmgr->jm_port;
7018 (void)jobmgr_assumes_zero(jmi, audit_session_port(asid, &jmi->req_asport));
7019 jmi->req_asid = asid;
7020 jmi->req_euid = -1;
7021 jmi->req_egid = -1;
7022
7023 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
7024 } else {
7025 jobmgr_remove(jmi);
7026 }
7027
7028 return jmi;
7029 }
7030
7031 job_t
7032 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
7033 {
7034 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
7035 char thelabel[1000];
7036 job_t bootstrapper;
7037
7038 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
7039 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
7040
7041 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
7042 bootstrapper->is_bootstrapper = true;
7043 char buf[100];
7044
7045 // <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
7046 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
7047 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
7048 bootstrapper->weird_bootstrap = true;
7049 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
7050 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
7051 #if TARGET_OS_EMBEDDED
7052 bootstrapper->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
7053 #endif
7054 bootstrapper->is_bootstrapper = true;
7055 if (jobmgr_assumes(jm, pid1_magic)) {
7056 // Have our system bootstrapper print out to the console.
7057 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
7058 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
7059
7060 if (launchd_console) {
7061 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)fileno(launchd_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm));
7062 }
7063 }
7064 }
7065
7066 jm->session_initialized = true;
7067 return bootstrapper;
7068 }
7069
7070 jobmgr_t
7071 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
7072 {
7073 struct machservice *ms, *next_ms;
7074 jobmgr_t jmi, jmn;
7075
7076 /* Mach ports, unlike Unix descriptors, are reference counted. In other
7077 * words, when some program hands us a second or subsequent send right to a
7078 * port we already have open, the Mach kernel gives us the same port number
7079 * back and increments an reference count associated with the port. This
7080 * This forces us, when discovering that a receive right at the other end
7081 * has been deleted, to wander all of our objects to see what weird places
7082 * clients might have handed us the same send right to use.
7083 */
7084
7085 if (jm == root_jobmgr) {
7086 if (port == inherited_bootstrap_port) {
7087 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(port));
7088 inherited_bootstrap_port = MACH_PORT_NULL;
7089
7090 return jobmgr_shutdown(jm);
7091 }
7092
7093 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
7094 if (ms->port == port && !ms->recv) {
7095 machservice_delete(ms->job, ms, true);
7096 }
7097 }
7098 }
7099
7100 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7101 jobmgr_delete_anything_with_port(jmi, port);
7102 }
7103
7104 if (jm->req_port == port) {
7105 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
7106 return jobmgr_shutdown(jm);
7107 }
7108
7109 struct waiting4attach *w4ai = NULL;
7110 struct waiting4attach *w4ait = NULL;
7111 LIST_FOREACH_SAFE(w4ai, &jm->attaches, le, w4ait) {
7112 if (port == w4ai->port) {
7113 waiting4attach_delete(jm, w4ai);
7114 break;
7115 }
7116 }
7117
7118 return jm;
7119 }
7120
7121 struct machservice *
7122 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
7123 {
7124 struct machservice *ms;
7125 job_t target_j;
7126
7127 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
7128
7129 if (target_pid) {
7130 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
7131 * bootstrap in other bootstraps.
7132 */
7133
7134 // Start in the given bootstrap.
7135 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
7136 // If we fail, do a deep traversal.
7137 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
7138 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
7139 return NULL;
7140 }
7141 }
7142
7143 SLIST_FOREACH(ms, &target_j->machservices, sle) {
7144 if (ms->per_pid && strcmp(name, ms->name) == 0) {
7145 return ms;
7146 }
7147 }
7148
7149 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
7150 return NULL;
7151 }
7152
7153 jobmgr_t where2look = jm;
7154 // XPC domains are separate from Mach bootstraps.
7155 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
7156 if (launchd_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
7157 where2look = root_jobmgr;
7158 }
7159 }
7160
7161 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
7162 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
7163 return ms;
7164 }
7165 }
7166
7167 if (jm->parentmgr == NULL || !check_parent) {
7168 return NULL;
7169 }
7170
7171 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
7172 }
7173
7174 mach_port_t
7175 machservice_port(struct machservice *ms)
7176 {
7177 return ms->port;
7178 }
7179
7180 job_t
7181 machservice_job(struct machservice *ms)
7182 {
7183 return ms->job;
7184 }
7185
7186 bool
7187 machservice_hidden(struct machservice *ms)
7188 {
7189 return ms->hide;
7190 }
7191
7192 bool
7193 machservice_active(struct machservice *ms)
7194 {
7195 return ms->isActive;
7196 }
7197
7198 const char *
7199 machservice_name(struct machservice *ms)
7200 {
7201 return ms->name;
7202 }
7203
7204 void
7205 machservice_drain_port(struct machservice *ms)
7206 {
7207 bool drain_one = ms->drain_one_on_crash;
7208 bool drain_all = ms->drain_all_on_crash;
7209
7210 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
7211 return;
7212 }
7213
7214 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
7215
7216 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
7217 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
7218 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
7219 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
7220
7221 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
7222
7223 do {
7224 /* This should be a direct check on the Mach service to see if it's an exception-handling
7225 * port, and it will break things if ReportCrash or SafetyNet start advertising other
7226 * Mach services. But for now, it should be okay.
7227 */
7228 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
7229 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
7230 } else {
7231 mach_msg_options_t options = MACH_RCV_MSG |
7232 MACH_RCV_TIMEOUT ;
7233
7234 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
7235 switch (mr) {
7236 case MACH_MSG_SUCCESS:
7237 mach_msg_destroy((mach_msg_header_t *)req_hdr);
7238 break;
7239 case MACH_RCV_TIMED_OUT:
7240 break;
7241 case MACH_RCV_TOO_LARGE:
7242 launchd_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
7243 break;
7244 default:
7245 break;
7246 }
7247 }
7248 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
7249 }
7250
7251 void
7252 machservice_delete(job_t j, struct machservice *ms, bool port_died)
7253 {
7254 if (ms->alias) {
7255 /* HACK: Egregious code duplication. But dealing with aliases is a
7256 * pretty simple affair since they can't and shouldn't have any complex
7257 * behaviors associated with them.
7258 */
7259 LIST_REMOVE(ms, name_hash_sle);
7260 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
7261 free(ms);
7262 return;
7263 }
7264
7265 if (unlikely(ms->debug_on_close)) {
7266 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
7267 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
7268 }
7269
7270 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
7271 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
7272 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
7273 }
7274
7275 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
7276
7277 if (unlikely(ms->port == the_exception_server)) {
7278 the_exception_server = 0;
7279 }
7280
7281 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
7282
7283 if (ms->special_port_num) {
7284 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
7285 }
7286 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
7287
7288 if (!(j->dedicated_instance || ms->event_channel)) {
7289 LIST_REMOVE(ms, name_hash_sle);
7290 }
7291 LIST_REMOVE(ms, port_hash_sle);
7292
7293 free(ms);
7294 }
7295
7296 void
7297 machservice_request_notifications(struct machservice *ms)
7298 {
7299 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
7300
7301 ms->isActive = true;
7302
7303 if (ms->recv) {
7304 which = MACH_NOTIFY_PORT_DESTROYED;
7305 job_checkin(ms->job);
7306 }
7307
7308 (void)job_assumes_zero(ms->job, launchd_mport_notify_req(ms->port, which));
7309 }
7310
7311 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
7312 #define END_OF(x) (&(x)[NELEM(x)])
7313
7314 char **
7315 mach_cmd2argv(const char *string)
7316 {
7317 char *argv[100], args[1000];
7318 const char *cp;
7319 char *argp = args, term, **argv_ret, *co;
7320 unsigned int nargs = 0, i;
7321
7322 for (cp = string; *cp;) {
7323 while (isspace(*cp))
7324 cp++;
7325 term = (*cp == '"') ? *cp++ : '\0';
7326 if (nargs < NELEM(argv)) {
7327 argv[nargs++] = argp;
7328 }
7329 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
7330 if (*cp == '\\') {
7331 cp++;
7332 }
7333 *argp++ = *cp;
7334 if (*cp) {
7335 cp++;
7336 }
7337 }
7338 *argp++ = '\0';
7339 }
7340 argv[nargs] = NULL;
7341
7342 if (nargs == 0) {
7343 return NULL;
7344 }
7345
7346 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
7347
7348 if (!argv_ret) {
7349 (void)os_assumes_zero(errno);
7350 return NULL;
7351 }
7352
7353 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
7354
7355 for (i = 0; i < nargs; i++) {
7356 strcpy(co, argv[i]);
7357 argv_ret[i] = co;
7358 co += strlen(argv[i]) + 1;
7359 }
7360 argv_ret[i] = NULL;
7361
7362 return argv_ret;
7363 }
7364
7365 void
7366 job_checkin(job_t j)
7367 {
7368 j->checkedin = true;
7369 }
7370
7371 bool job_is_god(job_t j)
7372 {
7373 return j->embedded_god;
7374 }
7375
7376 bool
7377 job_ack_port_destruction(mach_port_t p)
7378 {
7379 struct machservice *ms;
7380 job_t j;
7381
7382 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
7383 if (ms->recv && (ms->port == p)) {
7384 break;
7385 }
7386 }
7387
7388 if (!ms) {
7389 launchd_syslog(LOG_WARNING, "Could not find MachService to match receive right: 0x%x", p);
7390 return false;
7391 }
7392
7393 j = ms->job;
7394
7395 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
7396
7397 /* Without being the exception handler, NOTE_EXIT is our only way to tell if
7398 * the job crashed, and we can't rely on NOTE_EXIT always being processed
7399 * after all the job's receive rights have been returned.
7400 *
7401 * So when we get receive rights back, check to see if the job has been
7402 * reaped yet. If not, then we add this service to a list of services to be
7403 * drained on crash if it's requested that behavior. So, for a job with N
7404 * receive rights all requesting that they be drained on crash, we can
7405 * safely handle the following sequence of events.
7406 *
7407 * ReceiveRight0Returned
7408 * ReceiveRight1Returned
7409 * ReceiveRight2Returned
7410 * NOTE_EXIT (reap, get exit status)
7411 * ReceiveRight3Returned
7412 * .
7413 * .
7414 * .
7415 * ReceiveRight(N - 1)Returned
7416 */
7417 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
7418 if (j->crashed && j->reaped) {
7419 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
7420 machservice_drain_port(ms);
7421 } else if (!(j->crashed || j->reaped)) {
7422 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
7423 }
7424 }
7425
7426 ms->isActive = false;
7427 if (ms->delete_on_destruction) {
7428 machservice_delete(j, ms, false);
7429 } else if (ms->reset) {
7430 machservice_resetport(j, ms);
7431 }
7432
7433 kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
7434 (void)job_assumes_zero(j, kr);
7435 machservice_stamp_port(j, ms);
7436 job_dispatch(j, false);
7437
7438 if (ms->recv_race_hack) {
7439 ms->recv_race_hack = false;
7440 machservice_watch(ms->job, ms);
7441 }
7442
7443 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
7444
7445 return true;
7446 }
7447
7448 void
7449 job_ack_no_senders(job_t j)
7450 {
7451 j->priv_port_has_senders = false;
7452
7453 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
7454 j->j_port = 0;
7455
7456 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
7457
7458 job_dispatch(j, false);
7459 }
7460
7461 bool
7462 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
7463 {
7464 struct semaphoreitem *si;
7465 size_t alloc_sz = sizeof(struct semaphoreitem);
7466
7467 if (what) {
7468 alloc_sz += strlen(what) + 1;
7469 }
7470
7471 if (job_assumes(j, si = calloc(1, alloc_sz)) == NULL) {
7472 return false;
7473 }
7474
7475 si->why = why;
7476
7477 if (what) {
7478 strcpy(si->what_init, what);
7479 }
7480
7481 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
7482
7483 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
7484 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7485 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7486 j->nosy = true;
7487 }
7488
7489 semaphoreitem_runtime_mod_ref(si, true);
7490
7491 return true;
7492 }
7493
7494 void
7495 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7496 {
7497 /*
7498 * External events need to be tracked.
7499 * Internal events do NOT need to be tracked.
7500 */
7501
7502 switch (si->why) {
7503 case SUCCESSFUL_EXIT:
7504 case FAILED_EXIT:
7505 case OTHER_JOB_ENABLED:
7506 case OTHER_JOB_DISABLED:
7507 case OTHER_JOB_ACTIVE:
7508 case OTHER_JOB_INACTIVE:
7509 return;
7510 default:
7511 break;
7512 }
7513
7514 if (add) {
7515 runtime_add_weak_ref();
7516 } else {
7517 runtime_del_weak_ref();
7518 }
7519 }
7520
7521 void
7522 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
7523 {
7524 semaphoreitem_runtime_mod_ref(si, false);
7525
7526 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
7527
7528 // We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
7529 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
7530 j->nosy = false;
7531 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7532 }
7533
7534 free(si);
7535 }
7536
7537 void
7538 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
7539 {
7540 struct semaphoreitem_dict_iter_context *sdic = context;
7541 semaphore_reason_t why;
7542
7543 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7544
7545 semaphoreitem_new(sdic->j, why, key);
7546 }
7547
7548 void
7549 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7550 {
7551 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7552 job_t j = context;
7553 semaphore_reason_t why;
7554
7555 switch (launch_data_get_type(obj)) {
7556 case LAUNCH_DATA_BOOL:
7557 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7558 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7559 semaphoreitem_new(j, why, NULL);
7560 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7561 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7562 semaphoreitem_new(j, why, NULL);
7563 j->start_pending = true;
7564 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7565 j->needs_kickoff = launch_data_get_bool(obj);
7566 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7567 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7568 semaphoreitem_new(j, why, NULL);
7569 j->start_pending = true;
7570 } else {
7571 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7572 }
7573 break;
7574 case LAUNCH_DATA_DICTIONARY:
7575 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7576 sdic.why_true = OTHER_JOB_ACTIVE;
7577 sdic.why_false = OTHER_JOB_INACTIVE;
7578 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7579 sdic.why_true = OTHER_JOB_ENABLED;
7580 sdic.why_false = OTHER_JOB_DISABLED;
7581 } else {
7582 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7583 break;
7584 }
7585
7586 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7587 break;
7588 default:
7589 job_log(j, LOG_ERR, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj));
7590 break;
7591 }
7592 }
7593
7594 bool
7595 externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags)
7596 {
7597 if (j->event_monitor) {
7598 job_log(j, LOG_ERR, "The event monitor job cannot use LaunchEvents or XPC Events.");
7599 return false;
7600 }
7601
7602 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7603 if (!ee) {
7604 return false;
7605 }
7606
7607 ee->event = xpc_retain(event);
7608 (void)strcpy(ee->name, evname);
7609 ee->job = j;
7610 ee->id = sys->curid;
7611 ee->sys = sys;
7612 ee->state = false;
7613 ee->wanted_state = true;
7614 sys->curid++;
7615
7616 if (flags & XPC_EVENT_FLAG_ENTITLEMENTS) {
7617 struct ldcred *ldc = runtime_get_caller_creds();
7618 if (ldc) {
7619 ee->entitlements = xpc_copy_entitlements_for_pid(ldc->pid);
7620 }
7621 }
7622
7623 if (sys == _launchd_support_system) {
7624 ee->internal = true;
7625 }
7626
7627 LIST_INSERT_HEAD(&j->events, ee, job_le);
7628 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7629
7630 job_log(j, LOG_DEBUG, "New event: %s/%s", sys->name, evname);
7631
7632 eventsystem_ping();
7633 return true;
7634 }
7635
7636 void
7637 externalevent_delete(struct externalevent *ee)
7638 {
7639 xpc_release(ee->event);
7640 if (ee->entitlements) {
7641 xpc_release(ee->entitlements);
7642 }
7643 LIST_REMOVE(ee, job_le);
7644 LIST_REMOVE(ee, sys_le);
7645
7646 free(ee);
7647
7648 eventsystem_ping();
7649 }
7650
7651 void
7652 externalevent_setup(launch_data_t obj, const char *key, void *context)
7653 {
7654 /* This method can ONLY be called on the job_import() path, as it assumes
7655 * the input is a launch_data_t.
7656 */
7657 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7658
7659 xpc_object_t xobj = ld2xpc(obj);
7660 if (xobj) {
7661 job_log(ctx->j, LOG_DEBUG, "Importing stream/event: %s/%s", ctx->sys->name, key);
7662 externalevent_new(ctx->j, ctx->sys, key, xobj, 0);
7663 xpc_release(xobj);
7664 } else {
7665 job_log(ctx->j, LOG_ERR, "Could not import event for job: %s", key);
7666 }
7667 }
7668
7669 struct externalevent *
7670 externalevent_find(const char *sysname, uint64_t id)
7671 {
7672 struct externalevent *ei = NULL;
7673
7674 struct eventsystem *es = eventsystem_find(sysname);
7675 if (es != NULL) {
7676 LIST_FOREACH(ei, &es->events, sys_le) {
7677 if (ei->id == id) {
7678 break;
7679 }
7680 }
7681 } else {
7682 launchd_syslog(LOG_ERR, "Could not find event system: %s", sysname);
7683 }
7684
7685 return ei;
7686 }
7687
7688 struct eventsystem *
7689 eventsystem_new(const char *name)
7690 {
7691 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7692 if (es != NULL) {
7693 es->curid = 1;
7694 (void)strcpy(es->name, name);
7695 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7696 } else {
7697 (void)os_assumes_zero(errno);
7698 }
7699
7700 return es;
7701 }
7702
7703 void
7704 eventsystem_delete(struct eventsystem *es)
7705 {
7706 struct externalevent *ei = NULL;
7707 while ((ei = LIST_FIRST(&es->events))) {
7708 externalevent_delete(ei);
7709 }
7710
7711 LIST_REMOVE(es, global_le);
7712
7713 free(es);
7714 }
7715
7716 void
7717 eventsystem_setup(launch_data_t obj, const char *key, void *context)
7718 {
7719 job_t j = (job_t)context;
7720 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7721 return;
7722 }
7723
7724 struct eventsystem *sys = eventsystem_find(key);
7725 if (unlikely(sys == NULL)) {
7726 sys = eventsystem_new(key);
7727 job_log(j, LOG_DEBUG, "New event system: %s", key);
7728 }
7729
7730 if (job_assumes(j, sys != NULL)) {
7731 struct externalevent_iter_ctx ctx = {
7732 .j = j,
7733 .sys = sys,
7734 };
7735
7736 job_log(j, LOG_DEBUG, "Importing events for stream: %s", key);
7737 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7738 }
7739 }
7740
7741 struct eventsystem *
7742 eventsystem_find(const char *name)
7743 {
7744 struct eventsystem *esi = NULL;
7745 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7746 if (strcmp(name, esi->name) == 0) {
7747 break;
7748 }
7749 }
7750
7751 return esi;
7752 }
7753
7754 void
7755 eventsystem_ping(void)
7756 {
7757 if (!_launchd_event_monitor) {
7758 return;
7759 }
7760
7761 if (!_launchd_event_monitor->p) {
7762 (void)job_dispatch(_launchd_event_monitor, true);
7763 } else {
7764 if (_launchd_event_monitor->event_monitor_ready2signal) {
7765 (void)job_assumes_zero_p(_launchd_event_monitor, kill(_launchd_event_monitor->p, SIGUSR1));
7766 }
7767 }
7768 }
7769
7770 void
7771 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7772 {
7773 jobmgr_t jmi, jmn;
7774 job_t ji, jn;
7775
7776
7777 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7778 jobmgr_dispatch_all_semaphores(jmi);
7779 }
7780
7781 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7782 if (!SLIST_EMPTY(&ji->semaphores)) {
7783 job_dispatch(ji, false);
7784 }
7785 }
7786 }
7787
7788 time_t
7789 cronemu(int mon, int mday, int hour, int min)
7790 {
7791 struct tm workingtm;
7792 time_t now;
7793
7794 now = time(NULL);
7795 workingtm = *localtime(&now);
7796
7797 workingtm.tm_isdst = -1;
7798 workingtm.tm_sec = 0;
7799 workingtm.tm_min++;
7800
7801 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7802 workingtm.tm_year++;
7803 workingtm.tm_mon = 0;
7804 workingtm.tm_mday = 1;
7805 workingtm.tm_hour = 0;
7806 workingtm.tm_min = 0;
7807 mktime(&workingtm);
7808 }
7809
7810 return mktime(&workingtm);
7811 }
7812
7813 time_t
7814 cronemu_wday(int wday, int hour, int min)
7815 {
7816 struct tm workingtm;
7817 time_t now;
7818
7819 now = time(NULL);
7820 workingtm = *localtime(&now);
7821
7822 workingtm.tm_isdst = -1;
7823 workingtm.tm_sec = 0;
7824 workingtm.tm_min++;
7825
7826 if (wday == 7) {
7827 wday = 0;
7828 }
7829
7830 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7831 workingtm.tm_mday++;
7832 workingtm.tm_hour = 0;
7833 workingtm.tm_min = 0;
7834 mktime(&workingtm);
7835 }
7836
7837 return mktime(&workingtm);
7838 }
7839
7840 bool
7841 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7842 {
7843 if (mon == -1) {
7844 struct tm workingtm = *wtm;
7845 int carrytest;
7846
7847 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7848 workingtm.tm_mon++;
7849 workingtm.tm_mday = 1;
7850 workingtm.tm_hour = 0;
7851 workingtm.tm_min = 0;
7852 carrytest = workingtm.tm_mon;
7853 mktime(&workingtm);
7854 if (carrytest != workingtm.tm_mon) {
7855 return false;
7856 }
7857 }
7858 *wtm = workingtm;
7859 return true;
7860 }
7861
7862 if (mon < wtm->tm_mon) {
7863 return false;
7864 }
7865
7866 if (mon > wtm->tm_mon) {
7867 wtm->tm_mon = mon;
7868 wtm->tm_mday = 1;
7869 wtm->tm_hour = 0;
7870 wtm->tm_min = 0;
7871 }
7872
7873 return cronemu_mday(wtm, mday, hour, min);
7874 }
7875
7876 bool
7877 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7878 {
7879 if (mday == -1) {
7880 struct tm workingtm = *wtm;
7881 int carrytest;
7882
7883 while (!cronemu_hour(&workingtm, hour, min)) {
7884 workingtm.tm_mday++;
7885 workingtm.tm_hour = 0;
7886 workingtm.tm_min = 0;
7887 carrytest = workingtm.tm_mday;
7888 mktime(&workingtm);
7889 if (carrytest != workingtm.tm_mday) {
7890 return false;
7891 }
7892 }
7893 *wtm = workingtm;
7894 return true;
7895 }
7896
7897 if (mday < wtm->tm_mday) {
7898 return false;
7899 }
7900
7901 if (mday > wtm->tm_mday) {
7902 wtm->tm_mday = mday;
7903 wtm->tm_hour = 0;
7904 wtm->tm_min = 0;
7905 }
7906
7907 return cronemu_hour(wtm, hour, min);
7908 }
7909
7910 bool
7911 cronemu_hour(struct tm *wtm, int hour, int min)
7912 {
7913 if (hour == -1) {
7914 struct tm workingtm = *wtm;
7915 int carrytest;
7916
7917 while (!cronemu_min(&workingtm, min)) {
7918 workingtm.tm_hour++;
7919 workingtm.tm_min = 0;
7920 carrytest = workingtm.tm_hour;
7921 mktime(&workingtm);
7922 if (carrytest != workingtm.tm_hour) {
7923 return false;
7924 }
7925 }
7926 *wtm = workingtm;
7927 return true;
7928 }
7929
7930 if (hour < wtm->tm_hour) {
7931 return false;
7932 }
7933
7934 if (hour > wtm->tm_hour) {
7935 wtm->tm_hour = hour;
7936 wtm->tm_min = 0;
7937 }
7938
7939 return cronemu_min(wtm, min);
7940 }
7941
7942 bool
7943 cronemu_min(struct tm *wtm, int min)
7944 {
7945 if (min == -1) {
7946 return true;
7947 }
7948
7949 if (min < wtm->tm_min) {
7950 return false;
7951 }
7952
7953 if (min > wtm->tm_min) {
7954 wtm->tm_min = min;
7955 }
7956
7957 return true;
7958 }
7959
7960 kern_return_t
7961 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7962 {
7963 struct ldcred *ldc = runtime_get_caller_creds();
7964 job_t js;
7965
7966 if (!j) {
7967 return BOOTSTRAP_NO_MEMORY;
7968 }
7969
7970 if (unlikely(j->deny_job_creation)) {
7971 return BOOTSTRAP_NOT_PRIVILEGED;
7972 }
7973
7974 #if HAVE_SANDBOX
7975 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7976 if (unlikely(argv == NULL)) {
7977 return BOOTSTRAP_NO_MEMORY;
7978 }
7979 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7980 free(argv);
7981 return BOOTSTRAP_NOT_PRIVILEGED;
7982 }
7983 free(argv);
7984 #endif
7985
7986 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7987
7988 if (pid1_magic) {
7989 if (ldc->euid || ldc->uid) {
7990 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
7991 return VPROC_ERR_TRY_PER_USER;
7992 }
7993 } else {
7994 if (unlikely(server_uid != getuid())) {
7995 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7996 server_cmd, getuid(), server_uid);
7997 }
7998 server_uid = 0; // zero means "do nothing"
7999 }
8000
8001 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
8002
8003 if (unlikely(js == NULL)) {
8004 return BOOTSTRAP_NO_MEMORY;
8005 }
8006
8007 *server_portp = js->j_port;
8008 return BOOTSTRAP_SUCCESS;
8009 }
8010
8011 kern_return_t
8012 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
8013 {
8014 struct ldcred *ldc = runtime_get_caller_creds();
8015 job_t otherj;
8016
8017 if (!j) {
8018 return BOOTSTRAP_NO_MEMORY;
8019 }
8020
8021 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
8022 #if TARGET_OS_EMBEDDED
8023 if (!j->embedded_god) {
8024 return BOOTSTRAP_NOT_PRIVILEGED;
8025 }
8026 #else
8027 return BOOTSTRAP_NOT_PRIVILEGED;
8028 #endif
8029 }
8030
8031 #if HAVE_SANDBOX
8032 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8033 return BOOTSTRAP_NOT_PRIVILEGED;
8034 }
8035 #endif
8036
8037 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
8038 return BOOTSTRAP_UNKNOWN_SERVICE;
8039 }
8040
8041 #if TARGET_OS_EMBEDDED
8042 if (j->embedded_god) {
8043 if (j->username && otherj->username) {
8044 if (strcmp(j->username, otherj->username) != 0) {
8045 return BOOTSTRAP_NOT_PRIVILEGED;
8046 }
8047 } else {
8048 return BOOTSTRAP_NOT_PRIVILEGED;
8049 }
8050 }
8051 #endif
8052
8053 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
8054 bool do_block = otherj->p;
8055
8056 if (otherj->anonymous) {
8057 return BOOTSTRAP_NOT_PRIVILEGED;
8058 }
8059
8060 job_remove(otherj);
8061
8062 if (do_block) {
8063 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
8064 // this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
8065 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
8066 return MIG_NO_REPLY;
8067 } else {
8068 return 0;
8069 }
8070 } else if (otherj->p) {
8071 (void)job_assumes_zero_p(j, kill2(otherj->p, sig));
8072 }
8073
8074 return 0;
8075 }
8076
8077 kern_return_t
8078 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
8079 {
8080 struct ldcred *ldc = runtime_get_caller_creds();
8081
8082 if (!j) {
8083 return BOOTSTRAP_NO_MEMORY;
8084 }
8085
8086 if (!job_assumes(j, j->per_user)) {
8087 return BOOTSTRAP_NOT_PRIVILEGED;
8088 }
8089
8090 return launchd_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
8091 }
8092
8093 kern_return_t
8094 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
8095 {
8096 struct ldcred *ldc = runtime_get_caller_creds();
8097
8098 if (!j) {
8099 return BOOTSTRAP_NO_MEMORY;
8100 }
8101
8102 if (unlikely(ldc->euid)) {
8103 return BOOTSTRAP_NOT_PRIVILEGED;
8104 }
8105
8106 return launchd_log_drain(srp, outval, outvalCnt);
8107 }
8108
8109 kern_return_t
8110 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
8111 vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval,
8112 mach_msg_type_number_t *outvalCnt)
8113 {
8114 const char *action;
8115 launch_data_t input_obj = NULL, output_obj = NULL;
8116 size_t data_offset = 0;
8117 size_t packed_size;
8118 struct ldcred *ldc = runtime_get_caller_creds();
8119
8120 if (!j) {
8121 return BOOTSTRAP_NO_MEMORY;
8122 }
8123
8124 if (inkey && ldc->pid != j->p) {
8125 if (ldc->euid && ldc->euid != getuid()) {
8126 return BOOTSTRAP_NOT_PRIVILEGED;
8127 }
8128 }
8129
8130 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
8131 return 1;
8132 }
8133
8134 if (inkey && outkey) {
8135 action = "Swapping";
8136 } else if (inkey) {
8137 action = "Setting";
8138 } else {
8139 action = "Getting";
8140 }
8141
8142 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8143
8144 *outvalCnt = 20 * 1024 * 1024;
8145 mig_allocate(outval, *outvalCnt);
8146 if (!job_assumes(j, *outval != 0)) {
8147 return 1;
8148 }
8149
8150 /* Note to future maintainers: launch_data_unpack() does NOT return a heap
8151 * object. The data is decoded in-place. So do not call launch_data_free()
8152 * on input_obj.
8153 */
8154 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
8155 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
8156 goto out_bad;
8157 }
8158
8159 char *store = NULL;
8160 switch (outkey) {
8161 case VPROC_GSK_ENVIRONMENT:
8162 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
8163 goto out_bad;
8164 }
8165 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
8166 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8167 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
8168 goto out_bad;
8169 }
8170 launch_data_free(output_obj);
8171 break;
8172 case VPROC_GSK_ALLJOBS:
8173 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
8174 goto out_bad;
8175 }
8176 ipc_revoke_fds(output_obj);
8177 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8178 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8179 if (!job_assumes(j, packed_size != 0)) {
8180 goto out_bad;
8181 }
8182 launch_data_free(output_obj);
8183 break;
8184 case VPROC_GSK_MGR_NAME:
8185 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
8186 goto out_bad;
8187 }
8188 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8189 if (!job_assumes(j, packed_size != 0)) {
8190 goto out_bad;
8191 }
8192
8193 launch_data_free(output_obj);
8194 break;
8195 case VPROC_GSK_JOB_OVERRIDES_DB:
8196 store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB, "overrides.plist");
8197 if (!store || !job_assumes(j, (output_obj = launch_data_new_string(store)) != NULL)) {
8198 free(store);
8199 goto out_bad;
8200 }
8201
8202 free(store);
8203 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8204 if (!job_assumes(j, packed_size != 0)) {
8205 goto out_bad;
8206 }
8207
8208 launch_data_free(output_obj);
8209 break;
8210 case VPROC_GSK_ZERO:
8211 mig_deallocate(*outval, *outvalCnt);
8212 *outval = 0;
8213 *outvalCnt = 0;
8214 break;
8215 default:
8216 goto out_bad;
8217 }
8218
8219 mig_deallocate(inval, invalCnt);
8220 return 0;
8221
8222 out_bad:
8223 mig_deallocate(inval, invalCnt);
8224 if (*outval) {
8225 mig_deallocate(*outval, *outvalCnt);
8226 }
8227 if (output_obj) {
8228 launch_data_free(output_obj);
8229 }
8230
8231 return 1;
8232 }
8233
8234 kern_return_t
8235 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
8236 {
8237 const char *action;
8238 kern_return_t kr = 0;
8239 struct ldcred *ldc = runtime_get_caller_creds();
8240 int oldmask;
8241
8242 if (!j) {
8243 return BOOTSTRAP_NO_MEMORY;
8244 }
8245
8246 if (inkey && ldc->pid != j->p) {
8247 if (ldc->euid && ldc->euid != getuid()) {
8248 return BOOTSTRAP_NOT_PRIVILEGED;
8249 }
8250 }
8251
8252 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
8253 return 1;
8254 }
8255
8256 if (inkey && outkey) {
8257 action = "Swapping";
8258 } else if (inkey) {
8259 action = "Setting";
8260 } else {
8261 action = "Getting";
8262 }
8263
8264 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8265
8266 switch (outkey) {
8267 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8268 *outval = j->abandon_pg;
8269 break;
8270 case VPROC_GSK_LAST_EXIT_STATUS:
8271 *outval = j->last_exit_status;
8272 break;
8273 case VPROC_GSK_MGR_UID:
8274 *outval = getuid();
8275 break;
8276 case VPROC_GSK_MGR_PID:
8277 *outval = getpid();
8278 break;
8279 case VPROC_GSK_IS_MANAGED:
8280 *outval = j->anonymous ? 0 : 1;
8281 break;
8282 case VPROC_GSK_BASIC_KEEPALIVE:
8283 *outval = !j->ondemand;
8284 break;
8285 case VPROC_GSK_START_INTERVAL:
8286 *outval = j->start_interval;
8287 break;
8288 case VPROC_GSK_IDLE_TIMEOUT:
8289 *outval = j->timeout;
8290 break;
8291 case VPROC_GSK_EXIT_TIMEOUT:
8292 *outval = j->exit_timeout;
8293 break;
8294 case VPROC_GSK_GLOBAL_LOG_MASK:
8295 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
8296 *outval = oldmask;
8297 runtime_setlogmask(oldmask);
8298 break;
8299 case VPROC_GSK_GLOBAL_UMASK:
8300 oldmask = umask(0);
8301 *outval = oldmask;
8302 umask(oldmask);
8303 break;
8304 case VPROC_GSK_TRANSACTIONS_ENABLED:
8305 job_log(j, LOG_DEBUG, "Reading EnableTransactions value.");
8306 *outval = j->enable_transactions;
8307 break;
8308 case VPROC_GSK_WAITFORDEBUGGER:
8309 *outval = j->wait4debugger;
8310 break;
8311 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
8312 *outval = j->embedded_god;
8313 break;
8314 case VPROC_GSK_ZERO:
8315 *outval = 0;
8316 break;
8317 default:
8318 kr = 1;
8319 break;
8320 }
8321
8322 switch (inkey) {
8323 case VPROC_GSK_ABANDON_PROCESS_GROUP:
8324 j->abandon_pg = (bool)inval;
8325 break;
8326 case VPROC_GSK_GLOBAL_ON_DEMAND:
8327 job_log(j, LOG_DEBUG, "Job has set global on-demand mode to: %s", inval ? "true" : "false");
8328 kr = job_set_global_on_demand(j, inval);
8329 break;
8330 case VPROC_GSK_BASIC_KEEPALIVE:
8331 j->ondemand = !inval;
8332 break;
8333 case VPROC_GSK_START_INTERVAL:
8334 if (inval > UINT32_MAX || inval < 0) {
8335 kr = 1;
8336 } else if (inval) {
8337 if (j->start_interval == 0) {
8338 runtime_add_weak_ref();
8339 }
8340 j->start_interval = (typeof(j->start_interval)) inval;
8341 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
8342 } else if (j->start_interval) {
8343 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
8344 if (j->start_interval != 0) {
8345 runtime_del_weak_ref();
8346 }
8347 j->start_interval = 0;
8348 }
8349 break;
8350 case VPROC_GSK_IDLE_TIMEOUT:
8351 if (inval < 0 || inval > UINT32_MAX) {
8352 kr = 1;
8353 } else {
8354 j->timeout = (typeof(j->timeout)) inval;
8355 }
8356 break;
8357 case VPROC_GSK_EXIT_TIMEOUT:
8358 if (inval < 0 || inval > UINT32_MAX) {
8359 kr = 1;
8360 } else {
8361 j->exit_timeout = (typeof(j->exit_timeout)) inval;
8362 }
8363 break;
8364 case VPROC_GSK_GLOBAL_LOG_MASK:
8365 if (inval < 0 || inval > UINT32_MAX) {
8366 kr = 1;
8367 } else {
8368 runtime_setlogmask((int) inval);
8369 }
8370 break;
8371 case VPROC_GSK_GLOBAL_UMASK:
8372 __OS_COMPILETIME_ASSERT__(sizeof (mode_t) == 2);
8373 if (inval < 0 || inval > UINT16_MAX) {
8374 kr = 1;
8375 } else {
8376 #if HAVE_SANDBOX
8377 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8378 kr = 1;
8379 } else {
8380 umask((mode_t) inval);
8381 }
8382 #endif
8383 }
8384 break;
8385 case VPROC_GSK_TRANSACTIONS_ENABLED:
8386 /* No-op. */
8387 break;
8388 case VPROC_GSK_WEIRD_BOOTSTRAP:
8389 if (job_assumes(j, j->weird_bootstrap)) {
8390 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
8391
8392 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_job_subsystem);
8393
8394 if (job_mig_job_subsystem.maxsize > mxmsgsz) {
8395 mxmsgsz = job_mig_job_subsystem.maxsize;
8396 }
8397
8398 (void)job_assumes_zero(j, runtime_add_mport(j->mgr->jm_port, job_server));
8399 j->weird_bootstrap = false;
8400 }
8401 break;
8402 case VPROC_GSK_WAITFORDEBUGGER:
8403 j->wait4debugger_oneshot = inval;
8404 break;
8405 case VPROC_GSK_PERUSER_SUSPEND:
8406 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
8407 mach_port_t junk = MACH_PORT_NULL;
8408 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8409 if (job_assumes(j, jpu != NULL)) {
8410 struct suspended_peruser *spi = NULL;
8411 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8412 if ((int64_t)(spi->j->mach_uid) == inval) {
8413 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8414 break;
8415 }
8416 }
8417
8418 if (spi == NULL) {
8419 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8420 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
8421 if (job_assumes(j, spi != NULL)) {
8422 /* Stop listening for events.
8423 *
8424 * See <rdar://problem/9014146>.
8425 */
8426 if (jpu->peruser_suspend_count == 0) {
8427 job_ignore(jpu);
8428 }
8429
8430 spi->j = jpu;
8431 spi->j->peruser_suspend_count++;
8432 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8433 job_stop(spi->j);
8434 *outval = jpu->p;
8435 } else {
8436 kr = BOOTSTRAP_NO_MEMORY;
8437 }
8438 }
8439 }
8440 } else {
8441 kr = 1;
8442 }
8443 break;
8444 case VPROC_GSK_PERUSER_RESUME:
8445 if (job_assumes(j, pid1_magic == true)) {
8446 struct suspended_peruser *spi = NULL, *spt = NULL;
8447 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8448 if ((int64_t)(spi->j->mach_uid) == inval) {
8449 spi->j->peruser_suspend_count--;
8450 LIST_REMOVE(spi, sle);
8451 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8452 break;
8453 }
8454 }
8455
8456 if (!job_assumes(j, spi != NULL)) {
8457 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8458 kr = BOOTSTRAP_NOT_PRIVILEGED;
8459 } else if (spi->j->peruser_suspend_count == 0) {
8460 job_watch(spi->j);
8461 job_dispatch(spi->j, false);
8462 free(spi);
8463 }
8464 } else {
8465 kr = 1;
8466 }
8467 break;
8468 case VPROC_GSK_ZERO:
8469 break;
8470 default:
8471 kr = 1;
8472 break;
8473 }
8474
8475 return kr;
8476 }
8477
8478 kern_return_t
8479 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
8480 {
8481 if (!j) {
8482 return BOOTSTRAP_NO_MEMORY;
8483 }
8484
8485 job_log(j, LOG_DEBUG, "Post fork ping.");
8486
8487 struct machservice *ms;
8488 job_setup_exception_port(j, child_task);
8489 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8490 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8491 // The TASK_ACCESS_PORT funny business is to workaround 5325399.
8492 continue;
8493 }
8494
8495 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8496 if (errno) {
8497 if (errno == MACH_SEND_INVALID_DEST) {
8498 job_log(j, LOG_WARNING, "Task died before special ports could be set.");
8499 break;
8500 }
8501
8502 int desired_log_level = LOG_ERR;
8503 if (j->anonymous) {
8504 // 5338127
8505
8506 desired_log_level = LOG_WARNING;
8507
8508 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8509 desired_log_level = LOG_DEBUG;
8510 }
8511 }
8512
8513 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8514 }
8515 }
8516
8517 /* MIG will not zero-initialize this pointer, so we must always do so.
8518 *
8519 * <rdar://problem/8562593>.
8520 */
8521 *asport = MACH_PORT_NULL;
8522 #if !TARGET_OS_EMBEDDED
8523 if (!j->anonymous) {
8524 /* XPC services will spawn into the root security session by default.
8525 * xpcproxy will switch them away if needed.
8526 */
8527 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8528 job_log(j, LOG_DEBUG, "Returning session port: 0x%x", j->asport);
8529 *asport = j->asport;
8530 }
8531 }
8532 #endif
8533 (void)job_assumes_zero(j, launchd_mport_deallocate(child_task));
8534
8535 return 0;
8536 }
8537
8538 kern_return_t
8539 job_mig_get_listener_port_rights(job_t j, mach_port_array_t *sports, mach_msg_type_number_t *sports_cnt)
8540 {
8541 if (!j) {
8542 return BOOTSTRAP_NO_MEMORY;
8543 }
8544
8545 size_t cnt = 0;
8546 struct machservice *msi = NULL;
8547 SLIST_FOREACH(msi, &j->machservices, sle) {
8548 if (msi->upfront && job_assumes(j, msi->recv)) {
8549 cnt++;
8550 }
8551 }
8552
8553 if (cnt == 0) {
8554 return BOOTSTRAP_UNKNOWN_SERVICE;
8555 }
8556
8557 mach_port_array_t sports2 = NULL;
8558 mig_allocate((vm_address_t *)&sports2, cnt * sizeof(sports2[0]));
8559 if (!sports2) {
8560 return BOOTSTRAP_NO_MEMORY;
8561 }
8562
8563 size_t i = 0;
8564 SLIST_FOREACH(msi, &j->machservices, sle) {
8565 if (msi->upfront && msi->recv) {
8566 sports2[i] = msi->port;
8567 i++;
8568 }
8569 }
8570
8571 *sports = sports2;
8572 *sports_cnt = cnt;
8573
8574 return KERN_SUCCESS;
8575 }
8576
8577 kern_return_t
8578 job_mig_register_gui_session(job_t j, mach_port_t asport)
8579 {
8580 if (!j->per_user) {
8581 return BOOTSTRAP_NOT_PRIVILEGED;
8582 }
8583
8584 jobmgr_t jm = jobmgr_find_xpc_per_user_domain(root_jobmgr, j->mach_uid);
8585 if (!jm) {
8586 return BOOTSTRAP_UNKNOWN_SERVICE;
8587 }
8588
8589 if (jm->req_gui_asport) {
8590 // This job manager persists, so we need to allow the per-user launchd
8591 // to update the GUI session as it comes and goes.
8592 jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_gui_asport));
8593 }
8594
8595 jm->req_gui_asport = asport;
8596 return KERN_SUCCESS;
8597 }
8598
8599 kern_return_t
8600 job_mig_reboot2(job_t j, uint64_t flags)
8601 {
8602 char who_started_the_reboot[2048] = "";
8603 struct proc_bsdshortinfo proc;
8604 struct ldcred *ldc = runtime_get_caller_creds();
8605 pid_t pid_to_log;
8606
8607 if (!j) {
8608 return BOOTSTRAP_NO_MEMORY;
8609 }
8610
8611 if (unlikely(!pid1_magic)) {
8612 return BOOTSTRAP_NOT_PRIVILEGED;
8613 }
8614
8615 #if !TARGET_OS_EMBEDDED
8616 if (unlikely(ldc->euid)) {
8617 #else
8618 if (unlikely(ldc->euid) && !j->embedded_god) {
8619 #endif
8620 return BOOTSTRAP_NOT_PRIVILEGED;
8621 }
8622
8623 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8624 size_t who_offset;
8625 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8626 if (errno != ESRCH) {
8627 (void)job_assumes_zero(j, errno);
8628 }
8629 return 1;
8630 }
8631
8632 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8633 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8634 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8635 break;
8636 }
8637
8638 who_offset = strlen(who_started_the_reboot);
8639 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8640 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8641 }
8642
8643 root_jobmgr->reboot_flags = (int)flags;
8644 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8645 launchd_shutdown();
8646
8647 return 0;
8648 }
8649
8650 kern_return_t
8651 job_mig_getsocket(job_t j, name_t spr)
8652 {
8653 if (!j) {
8654 return BOOTSTRAP_NO_MEMORY;
8655 }
8656
8657 if (j->deny_job_creation) {
8658 return BOOTSTRAP_NOT_PRIVILEGED;
8659 }
8660
8661 #if HAVE_SANDBOX
8662 struct ldcred *ldc = runtime_get_caller_creds();
8663 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8664 return BOOTSTRAP_NOT_PRIVILEGED;
8665 }
8666 #endif
8667
8668 ipc_server_init();
8669
8670 if (unlikely(!sockpath)) {
8671 return BOOTSTRAP_NO_MEMORY;
8672 }
8673
8674 strncpy(spr, sockpath, sizeof(name_t));
8675
8676 return BOOTSTRAP_SUCCESS;
8677 }
8678
8679 kern_return_t
8680 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8681 {
8682 if (!j) {
8683 return BOOTSTRAP_NO_MEMORY;
8684 }
8685
8686 if ((errno = err)) {
8687 job_log_error(j, pri, "%s", msg);
8688 } else {
8689 job_log(j, pri, "%s", msg);
8690 }
8691
8692 return 0;
8693 }
8694
8695 void
8696 job_setup_per_user_directory(job_t j, uid_t uid, const char *path)
8697 {
8698 struct stat sb;
8699
8700 bool created = false;
8701 int r = stat(path, &sb);
8702 if ((r == -1 && errno == ENOENT) || (r == 0 && !S_ISDIR(sb.st_mode))) {
8703 if (r == 0) {
8704 job_log(j, LOG_NOTICE, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path);
8705
8706 char old[PATH_MAX];
8707 snprintf(old, sizeof(old), "%s.movedaside", path);
8708 (void)job_assumes_zero_p(j, rename(path, old));
8709 }
8710
8711 (void)job_assumes_zero_p(j, mkdir(path, S_IRWXU));
8712 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8713 created = true;
8714 }
8715
8716 if (!created) {
8717 if (sb.st_uid != uid) {
8718 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper user ownership. Repairing: %s", path);
8719 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8720 }
8721 if (sb.st_gid != 0) {
8722 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper group ownership. Repairing: %s", path);
8723 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8724 }
8725 if (sb.st_mode != (S_IRWXU | S_IFDIR)) {
8726 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper mode. Repairing: %s", path);
8727 (void)job_assumes_zero_p(j, chmod(path, S_IRWXU));
8728 }
8729 }
8730 }
8731
8732 void
8733 job_setup_per_user_directories(job_t j, uid_t uid, const char *label)
8734 {
8735 char path[PATH_MAX];
8736
8737 (void)snprintf(path, sizeof(path), LAUNCHD_DB_PREFIX "/%s", label);
8738 job_setup_per_user_directory(j, uid, path);
8739
8740 (void)snprintf(path, sizeof(path), LAUNCHD_LOG_PREFIX "/%s", label);
8741 job_setup_per_user_directory(j, uid, path);
8742 }
8743
8744 job_t
8745 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8746 {
8747 job_t ji = NULL;
8748 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8749 if (!ji->per_user) {
8750 continue;
8751 }
8752 if (ji->mach_uid != which_user) {
8753 continue;
8754 }
8755 if (SLIST_EMPTY(&ji->machservices)) {
8756 continue;
8757 }
8758 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8759 continue;
8760 }
8761 break;
8762 }
8763
8764 if (unlikely(ji == NULL)) {
8765 struct machservice *ms;
8766 char lbuf[1024];
8767
8768 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8769
8770 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8771
8772 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8773
8774 if (ji != NULL) {
8775 auditinfo_addr_t auinfo = {
8776 .ai_termid = {
8777 .at_type = AU_IPv4
8778 },
8779 .ai_auid = which_user,
8780 .ai_asid = AU_ASSIGN_ASID,
8781 };
8782
8783 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8784 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8785 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8786
8787 /* Kinda lame that we have to do this, but we can't create an
8788 * audit session without joining it.
8789 */
8790 (void)job_assumes(ji, audit_session_join(launchd_audit_port));
8791 ji->asid = auinfo.ai_asid;
8792 } else {
8793 job_log(ji, LOG_WARNING, "Could not set audit session!");
8794 job_remove(ji);
8795 return NULL;
8796 }
8797
8798 ji->mach_uid = which_user;
8799 ji->per_user = true;
8800 ji->enable_transactions = true;
8801 job_setup_per_user_directories(ji, which_user, lbuf);
8802
8803 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8804 job_remove(ji);
8805 ji = NULL;
8806 } else {
8807 ms->upfront = true;
8808 ms->per_user_hack = true;
8809 ms->hide = true;
8810
8811 ji = job_dispatch(ji, false);
8812 }
8813 }
8814 } else {
8815 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
8816 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8817 }
8818
8819 return ji;
8820 }
8821
8822 kern_return_t
8823 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8824 {
8825 struct ldcred *ldc = runtime_get_caller_creds();
8826 job_t jpu;
8827
8828 if (!j) {
8829 return BOOTSTRAP_NO_MEMORY;
8830 }
8831
8832 if (launchd_osinstaller) {
8833 return BOOTSTRAP_UNKNOWN_SERVICE;
8834 }
8835
8836 #if TARGET_OS_EMBEDDED
8837 // There is no need for per-user launchd's on embedded.
8838 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8839 return BOOTSTRAP_UNKNOWN_SERVICE;
8840 #endif
8841
8842 #if HAVE_SANDBOX
8843 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8844 return BOOTSTRAP_NOT_PRIVILEGED;
8845 }
8846 #endif
8847
8848 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8849
8850 if (unlikely(!pid1_magic)) {
8851 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8852 return BOOTSTRAP_NOT_PRIVILEGED;
8853 }
8854
8855 if (ldc->euid || ldc->uid) {
8856 which_user = ldc->euid ?: ldc->uid;
8857 }
8858
8859 *up_cont = MACH_PORT_NULL;
8860
8861 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8862
8863 return 0;
8864 }
8865
8866 kern_return_t
8867 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8868 {
8869 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8870 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8871 struct ldcred *ldc = runtime_get_caller_creds();
8872 struct machservice *ms = NULL;
8873 job_t jo;
8874
8875 if (!j) {
8876 return BOOTSTRAP_NO_MEMORY;
8877 }
8878
8879 if (j->dedicated_instance) {
8880 struct machservice *msi = NULL;
8881 SLIST_FOREACH(msi, &j->machservices, sle) {
8882 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8883 uuid_copy(instance_id, j->instance_id);
8884 ms = msi;
8885 break;
8886 }
8887 }
8888 } else {
8889 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8890 }
8891
8892 if (strict) {
8893 if (likely(ms != NULL)) {
8894 if (ms->job != j) {
8895 return BOOTSTRAP_NOT_PRIVILEGED;
8896 } else if (ms->isActive) {
8897 return BOOTSTRAP_SERVICE_ACTIVE;
8898 }
8899 } else {
8900 return BOOTSTRAP_UNKNOWN_SERVICE;
8901 }
8902 } else if (ms == NULL) {
8903 if (job_assumes(j, !j->dedicated_instance)) {
8904 *serviceportp = MACH_PORT_NULL;
8905
8906 #if HAVE_SANDBOX
8907 if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8908 return BOOTSTRAP_NOT_PRIVILEGED;
8909 }
8910 #endif
8911 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8912 return BOOTSTRAP_NO_MEMORY;
8913 }
8914
8915 // Treat this like a legacy job.
8916 if (!j->legacy_mach_job) {
8917 ms->isActive = true;
8918 ms->recv = false;
8919 }
8920
8921 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8922 job_log(j, LOG_APPLEONLY, "Please add the following service to the configuration file for this job: %s", servicename);
8923 }
8924 } else {
8925 return BOOTSTRAP_UNKNOWN_SERVICE;
8926 }
8927 } else {
8928 if (unlikely((jo = machservice_job(ms)) != j)) {
8929 static pid_t last_warned_pid;
8930
8931 if (last_warned_pid != ldc->pid) {
8932 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8933 last_warned_pid = ldc->pid;
8934 }
8935
8936 return BOOTSTRAP_NOT_PRIVILEGED;
8937 }
8938 if (unlikely(machservice_active(ms))) {
8939 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8940 return BOOTSTRAP_SERVICE_ACTIVE;
8941 }
8942 }
8943
8944 job_checkin(j);
8945 machservice_request_notifications(ms);
8946
8947 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8948
8949 *serviceportp = machservice_port(ms);
8950 return BOOTSTRAP_SUCCESS;
8951 }
8952
8953 kern_return_t
8954 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8955 {
8956 struct machservice *ms;
8957 struct ldcred *ldc = runtime_get_caller_creds();
8958 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8959
8960 if (!j) {
8961 return BOOTSTRAP_NO_MEMORY;
8962 }
8963
8964 if (!per_pid_service && !j->legacy_LS_job) {
8965 job_log(j, LOG_APPLEONLY, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8966 }
8967
8968 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8969
8970 #if HAVE_SANDBOX
8971 if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8972 return BOOTSTRAP_NOT_PRIVILEGED;
8973 }
8974 #endif
8975
8976 // 5641783 for the embedded hack
8977 #if !TARGET_OS_EMBEDDED
8978 /*
8979 * From a per-user/session launchd's perspective, SecurityAgent (UID
8980 * 92) is a rogue application (not our UID, not root and not a child of
8981 * us). We'll have to reconcile this design friction at a later date.
8982 */
8983 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8984 if (pid1_magic) {
8985 return VPROC_ERR_TRY_PER_USER;
8986 } else {
8987 return BOOTSTRAP_NOT_PRIVILEGED;
8988 }
8989 }
8990 #endif
8991
8992 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
8993
8994 if (unlikely(ms)) {
8995 if (machservice_job(ms) != j) {
8996 return BOOTSTRAP_NOT_PRIVILEGED;
8997 }
8998 if (machservice_active(ms)) {
8999 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
9000 return BOOTSTRAP_SERVICE_ACTIVE;
9001 }
9002 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
9003 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
9004 return BOOTSTRAP_NOT_PRIVILEGED;
9005 }
9006 job_checkin(j);
9007 machservice_delete(j, ms, false);
9008 }
9009
9010 if (likely(serviceport != MACH_PORT_NULL)) {
9011 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
9012 machservice_request_notifications(ms);
9013 } else {
9014 return BOOTSTRAP_NO_MEMORY;
9015 }
9016 }
9017
9018
9019 return BOOTSTRAP_SUCCESS;
9020 }
9021
9022 kern_return_t
9023 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
9024 {
9025 struct machservice *ms = NULL;
9026 struct ldcred *ldc = runtime_get_caller_creds();
9027 kern_return_t kr;
9028 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
9029 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
9030 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
9031 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
9032
9033 if (!j) {
9034 return BOOTSTRAP_NO_MEMORY;
9035 }
9036
9037 bool xpc_req = (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN);
9038
9039 // 5641783 for the embedded hack
9040 #if !TARGET_OS_EMBEDDED
9041 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
9042 return VPROC_ERR_TRY_PER_USER;
9043 }
9044 #endif
9045
9046 #if HAVE_SANDBOX
9047 /* We don't do sandbox checking for XPC domains because, by definition, all
9048 * the services within your domain should be accessible to you.
9049 */
9050 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
9051 return BOOTSTRAP_NOT_PRIVILEGED;
9052 }
9053 #endif
9054
9055 if (per_pid_lookup) {
9056 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
9057 } else {
9058 if (xpc_req) {
9059 // Requests from XPC domains stay local.
9060 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
9061 } else {
9062 /* A strict lookup which is privileged won't even bother trying to
9063 * find a service if we're not hosting the root Mach bootstrap.
9064 */
9065 if (strict_lookup && privileged) {
9066 if (inherited_bootstrap_port == MACH_PORT_NULL) {
9067 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9068 }
9069 } else {
9070 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9071 }
9072 }
9073 }
9074
9075 if (likely(ms)) {
9076 ms = ms->alias ? ms->alias : ms;
9077 if (unlikely(specific_instance && ms->job->multiple_instances)) {
9078 job_t ji = NULL;
9079 job_t instance = NULL;
9080 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
9081 if (uuid_compare(instance_id, ji->instance_id) == 0) {
9082 instance = ji;
9083 break;
9084 }
9085 }
9086
9087 if (unlikely(instance == NULL)) {
9088 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
9089 instance = job_new_subjob(ms->job, instance_id);
9090 if (job_assumes(j, instance != NULL)) {
9091 /* Disable this support for now. We only support having
9092 * multi-instance jobs within private XPC domains.
9093 */
9094 #if 0
9095 /* If the job is multi-instance, in a singleton XPC domain
9096 * and the request is not coming from within that singleton
9097 * domain, we need to alias the new job into the requesting
9098 * domain.
9099 */
9100 if (!j->mgr->xpc_singleton && xpc_req) {
9101 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
9102 }
9103 #endif
9104 job_dispatch(instance, false);
9105 }
9106 }
9107
9108 ms = NULL;
9109 if (job_assumes(j, instance != NULL)) {
9110 struct machservice *msi = NULL;
9111 SLIST_FOREACH(msi, &instance->machservices, sle) {
9112 /* sizeof(servicename) will return the size of a pointer,
9113 * even though it's an array type, because when passing
9114 * arrays as parameters in C, they implicitly degrade to
9115 * pointers.
9116 */
9117 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
9118 ms = msi;
9119 break;
9120 }
9121 }
9122 }
9123 } else {
9124 if (machservice_hidden(ms) && !machservice_active(ms)) {
9125 ms = NULL;
9126 } else if (unlikely(ms->per_user_hack)) {
9127 ms = NULL;
9128 }
9129 }
9130 }
9131
9132 if (likely(ms)) {
9133 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
9134 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
9135 *serviceportp = machservice_port(ms);
9136
9137 kr = BOOTSTRAP_SUCCESS;
9138 } else if (strict_lookup && !privileged) {
9139 /* Hack: We need to simulate XPC's desire not to establish a hierarchy.
9140 * So if XPC is doing the lookup, and it's not a privileged lookup, we
9141 * won't forward. But if it is a privileged lookup, then we must
9142 * forward.
9143 */
9144 return BOOTSTRAP_UNKNOWN_SERVICE;
9145 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
9146 // Requests from within an XPC domain don't get forwarded.
9147 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
9148 /* Clients potentially check the audit token of the reply to verify that
9149 * the returned send right is trustworthy.
9150 */
9151 (void)job_assumes_zero(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags));
9152 return MIG_NO_REPLY;
9153 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9154 /* 5240036 Should start background session when a lookup of CCacheServer
9155 * occurs
9156 *
9157 * This is a total hack. We sniff out loginwindow session, and attempt
9158 * to guess what it is up to. If we find a EUID that isn't root, we
9159 * force it over to the per-user context.
9160 */
9161 return VPROC_ERR_TRY_PER_USER;
9162 } else {
9163 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
9164 kr = BOOTSTRAP_UNKNOWN_SERVICE;
9165 }
9166
9167 return kr;
9168 }
9169
9170 kern_return_t
9171 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
9172 {
9173 if (!j) {
9174 return BOOTSTRAP_NO_MEMORY;
9175 }
9176
9177 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
9178 jobmgr_t jm = j->mgr;
9179
9180 if (jobmgr_parent(jm)) {
9181 *parentport = jobmgr_parent(jm)->jm_port;
9182 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
9183 *parentport = jm->jm_port;
9184 } else {
9185 (void)job_assumes_zero(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp));
9186 // The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
9187 return MIG_NO_REPLY;
9188 }
9189 return BOOTSTRAP_SUCCESS;
9190 }
9191
9192 kern_return_t
9193 job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
9194 {
9195 if (!j) {
9196 return BOOTSTRAP_NO_MEMORY;
9197 }
9198
9199 if (inherited_bootstrap_port == MACH_PORT_NULL) {
9200 *rootbsp = root_jobmgr->jm_port;
9201 (void)job_assumes_zero(j, launchd_mport_make_send(root_jobmgr->jm_port));
9202 } else {
9203 *rootbsp = inherited_bootstrap_port;
9204 (void)job_assumes_zero(j, launchd_mport_copy_send(inherited_bootstrap_port));
9205 }
9206
9207 return BOOTSTRAP_SUCCESS;
9208 }
9209
9210 kern_return_t
9211 job_mig_info(job_t j, name_array_t *servicenamesp,
9212 unsigned int *servicenames_cnt, name_array_t *servicejobsp,
9213 unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp,
9214 unsigned int *serviceactives_cnt, uint64_t flags)
9215 {
9216 name_array_t service_names = NULL;
9217 name_array_t service_jobs = NULL;
9218 bootstrap_status_array_t service_actives = NULL;
9219 unsigned int cnt = 0, cnt2 = 0;
9220 jobmgr_t jm;
9221
9222 if (!j) {
9223 return BOOTSTRAP_NO_MEMORY;
9224 }
9225
9226 #if TARGET_OS_EMBEDDED
9227 struct ldcred *ldc = runtime_get_caller_creds();
9228 if (ldc->euid) {
9229 return EPERM;
9230 }
9231 #endif // TARGET_OS_EMBEDDED
9232
9233 if (launchd_flat_mach_namespace) {
9234 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
9235 jm = j->mgr;
9236 } else {
9237 jm = root_jobmgr;
9238 }
9239 } else {
9240 jm = j->mgr;
9241 }
9242
9243 unsigned int i = 0;
9244 struct machservice *msi = NULL;
9245 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9246 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
9247 cnt += !msi->per_pid ? 1 : 0;
9248 }
9249 }
9250
9251 if (cnt == 0) {
9252 goto out;
9253 }
9254
9255 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
9256 if (!job_assumes(j, service_names != NULL)) {
9257 goto out_bad;
9258 }
9259
9260 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
9261 if (!job_assumes(j, service_jobs != NULL)) {
9262 goto out_bad;
9263 }
9264
9265 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
9266 if (!job_assumes(j, service_actives != NULL)) {
9267 goto out_bad;
9268 }
9269
9270 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9271 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
9272 if (!msi->per_pid) {
9273 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
9274 msi = msi->alias ? msi->alias : msi;
9275 if (msi->job->mgr->shortdesc) {
9276 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
9277 } else {
9278 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
9279 }
9280 service_actives[cnt2] = machservice_status(msi);
9281 cnt2++;
9282 }
9283 }
9284 }
9285
9286 (void)job_assumes(j, cnt == cnt2);
9287
9288 out:
9289 *servicenamesp = service_names;
9290 *servicejobsp = service_jobs;
9291 *serviceactivesp = service_actives;
9292 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
9293
9294 return BOOTSTRAP_SUCCESS;
9295
9296 out_bad:
9297 if (service_names) {
9298 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
9299 }
9300 if (service_jobs) {
9301 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
9302 }
9303 if (service_actives) {
9304 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
9305 }
9306
9307 return BOOTSTRAP_NO_MEMORY;
9308 }
9309
9310 kern_return_t
9311 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports,
9312 mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names,
9313 mach_msg_type_number_t *child_names_cnt,
9314 bootstrap_property_array_t *child_properties,
9315 mach_msg_type_number_t *child_properties_cnt)
9316 {
9317 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9318 if (!j) {
9319 return BOOTSTRAP_NO_MEMORY;
9320 }
9321
9322 struct ldcred *ldc = runtime_get_caller_creds();
9323
9324 /* Only allow root processes to look up children, even if we're in the per-user launchd.
9325 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
9326 * in a non-flat namespace.
9327 */
9328 if (ldc->euid != 0) {
9329 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
9330 return BOOTSTRAP_NOT_PRIVILEGED;
9331 }
9332
9333 unsigned int cnt = 0;
9334
9335 jobmgr_t jmr = j->mgr;
9336 jobmgr_t jmi = NULL;
9337 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
9338 cnt++;
9339 }
9340
9341 // Find our per-user launchds if we're PID 1.
9342 job_t ji = NULL;
9343 if (pid1_magic) {
9344 LIST_FOREACH(ji, &jmr->jobs, sle) {
9345 cnt += ji->per_user ? 1 : 0;
9346 }
9347 }
9348
9349 if (cnt == 0) {
9350 return BOOTSTRAP_NO_CHILDREN;
9351 }
9352
9353 mach_port_array_t _child_ports = NULL;
9354 name_array_t _child_names = NULL;
9355 bootstrap_property_array_t _child_properties = NULL;
9356
9357 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
9358 if (!job_assumes(j, _child_ports != NULL)) {
9359 kr = BOOTSTRAP_NO_MEMORY;
9360 goto out_bad;
9361 }
9362
9363 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
9364 if (!job_assumes(j, _child_names != NULL)) {
9365 kr = BOOTSTRAP_NO_MEMORY;
9366 goto out_bad;
9367 }
9368
9369 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
9370 if (!job_assumes(j, _child_properties != NULL)) {
9371 kr = BOOTSTRAP_NO_MEMORY;
9372 goto out_bad;
9373 }
9374
9375 unsigned int cnt2 = 0;
9376 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
9377 if (jobmgr_assumes_zero(jmi, launchd_mport_make_send(jmi->jm_port)) == KERN_SUCCESS) {
9378 _child_ports[cnt2] = jmi->jm_port;
9379 } else {
9380 _child_ports[cnt2] = MACH_PORT_NULL;
9381 }
9382
9383 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
9384 _child_properties[cnt2] = jmi->properties;
9385
9386 cnt2++;
9387 }
9388
9389 if (pid1_magic) LIST_FOREACH(ji, &jmr->jobs, sle) {
9390 if (ji->per_user) {
9391 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
9392 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
9393
9394 if (job_assumes_zero(ji, launchd_mport_copy_send(port)) == KERN_SUCCESS) {
9395 _child_ports[cnt2] = port;
9396 } else {
9397 _child_ports[cnt2] = MACH_PORT_NULL;
9398 }
9399 } else {
9400 _child_ports[cnt2] = MACH_PORT_NULL;
9401 }
9402
9403 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
9404 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
9405
9406 cnt2++;
9407 }
9408 }
9409
9410 *child_names_cnt = cnt;
9411 *child_ports_cnt = cnt;
9412 *child_properties_cnt = cnt;
9413
9414 *child_names = _child_names;
9415 *child_ports = _child_ports;
9416 *child_properties = _child_properties;
9417
9418 unsigned int i = 0;
9419 for (i = 0; i < cnt; i++) {
9420 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
9421 }
9422
9423 return BOOTSTRAP_SUCCESS;
9424 out_bad:
9425 if (_child_ports) {
9426 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
9427 }
9428
9429 if (_child_names) {
9430 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_names[0]));
9431 }
9432
9433 if (_child_properties) {
9434 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
9435 }
9436
9437 return kr;
9438 }
9439
9440 kern_return_t
9441 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
9442 {
9443 struct ldcred *ldc = runtime_get_caller_creds();
9444 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
9445 return BOOTSTRAP_NOT_PRIVILEGED;
9446 }
9447
9448 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9449 * directly by launchd as agents.
9450 */
9451 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
9452 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
9453 *managed = true;
9454 }
9455
9456 return BOOTSTRAP_SUCCESS;
9457 }
9458
9459 kern_return_t
9460 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
9461 {
9462 if (!j) {
9463 return BOOTSTRAP_NO_MEMORY;
9464 }
9465
9466 struct ldcred *ldc = runtime_get_caller_creds();
9467 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
9468
9469 #if HAVE_SANDBOX
9470 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
9471 return BOOTSTRAP_NOT_PRIVILEGED;
9472 }
9473 #endif
9474
9475 mach_port_t _mp = MACH_PORT_NULL;
9476 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
9477 job_t target_j = job_find(NULL, label);
9478 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
9479 if (target_j->j_port == MACH_PORT_NULL) {
9480 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
9481 }
9482
9483 _mp = target_j->j_port;
9484 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
9485 } else {
9486 kr = BOOTSTRAP_NO_MEMORY;
9487 }
9488 }
9489
9490 *mp = _mp;
9491 return kr;
9492 }
9493
9494 kern_return_t
9495 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
9496 {
9497 #if TARGET_OS_EMBEDDED
9498 return KERN_SUCCESS;
9499 #endif
9500
9501 if (!j) {
9502 return BOOTSTRAP_NO_MEMORY;
9503 }
9504
9505 uuid_string_t uuid_str;
9506 uuid_unparse(uuid, uuid_str);
9507 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
9508
9509 job_t ji = NULL, jt = NULL;
9510 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
9511 uuid_string_t uuid_str2;
9512 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9513
9514 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
9515 uuid_clear(ji->expected_audit_uuid);
9516 if (asport != MACH_PORT_NULL) {
9517 job_log(ji, LOG_DEBUG, "Job should join session with port 0x%x", asport);
9518 (void)job_assumes_zero(j, launchd_mport_copy_send(asport));
9519 } else {
9520 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9521 }
9522
9523 ji->asport = asport;
9524 LIST_REMOVE(ji, needing_session_sle);
9525
9526 if (ji->event_monitor) {
9527 eventsystem_ping();
9528 } else {
9529 job_dispatch(ji, false);
9530 }
9531 }
9532 }
9533
9534 /* Each job that the session port was set for holds a reference. At the end of
9535 * the loop, there will be one extra reference belonging to this MiG protocol.
9536 * We need to release it so that the session goes away when all the jobs
9537 * referencing it are unloaded.
9538 */
9539 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9540
9541 return KERN_SUCCESS;
9542 }
9543
9544 jobmgr_t
9545 jobmgr_find_by_name(jobmgr_t jm, const char *where)
9546 {
9547 jobmgr_t jmi, jmi2;
9548
9549 // NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
9550 if (where == NULL) {
9551 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9552 where = VPROCMGR_SESSION_LOGINWINDOW;
9553 } else {
9554 where = VPROCMGR_SESSION_AQUA;
9555 }
9556 }
9557
9558 if (strcasecmp(jm->name, where) == 0) {
9559 return jm;
9560 }
9561
9562 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
9563 jmi = root_jobmgr;
9564 goto jm_found;
9565 }
9566
9567 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9568 if (unlikely(jmi->shutting_down)) {
9569 continue;
9570 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9571 continue;
9572 } else if (strcasecmp(jmi->name, where) == 0) {
9573 goto jm_found;
9574 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
9575 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9576 if (strcasecmp(jmi2->name, where) == 0) {
9577 jmi = jmi2;
9578 goto jm_found;
9579 }
9580 }
9581 }
9582 }
9583
9584 jm_found:
9585 return jmi;
9586 }
9587
9588 kern_return_t
9589 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
9590 {
9591 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9592 mach_port_array_t l2l_ports = NULL;
9593 mach_port_t reqport, rcvright;
9594 kern_return_t kr = 1;
9595 launch_data_t out_obj_array = NULL;
9596 struct ldcred *ldc = runtime_get_caller_creds();
9597 jobmgr_t jmr = NULL;
9598
9599 if (!j) {
9600 return BOOTSTRAP_NO_MEMORY;
9601 }
9602
9603 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
9604 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9605
9606 kr = BOOTSTRAP_NOT_PRIVILEGED;
9607 goto out;
9608 }
9609
9610 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9611
9612 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
9613 if (job_assumes_zero(j, kr) != 0) {
9614 goto out;
9615 }
9616
9617 if (launch_data_array_get_count(out_obj_array) != l2l_port_cnt) {
9618 os_assert_zero(l2l_port_cnt);
9619 }
9620
9621 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
9622 kr = BOOTSTRAP_NO_MEMORY;
9623 goto out;
9624 }
9625
9626 if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9627 jobmgr_log(jmr, LOG_NOTICE, "Registering new GUI session.");
9628 kr = vproc_mig_register_gui_session(inherited_bootstrap_port, asport);
9629 if (kr) {
9630 jobmgr_log(jmr, LOG_ERR, "Failed to register GUI session with PID 1: 0x%x/0x%x", inherited_bootstrap_port, kr);
9631 }
9632 }
9633
9634 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9635
9636 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9637 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9638 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9639 */
9640 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9641 // This is so awful.
9642 // Remove the job from its current job manager.
9643 LIST_REMOVE(j, sle);
9644 LIST_REMOVE(j, pid_hash_sle);
9645
9646 // Put the job into the target job manager.
9647 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9648 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9649
9650 j->mgr = jmr;
9651 job_set_global_on_demand(j, true);
9652
9653 if (!j->holds_ref) {
9654 job_log(j, LOG_PERF, "Job moved subset into: %s", j->mgr->name);
9655 j->holds_ref = true;
9656 runtime_add_ref();
9657 }
9658 }
9659
9660 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9661 launch_data_t tmp, obj_at_idx;
9662 struct machservice *ms;
9663 job_t j_for_service;
9664 const char *serv_name;
9665 pid_t target_pid;
9666 bool serv_perpid;
9667
9668 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9669 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9670 target_pid = (pid_t)launch_data_get_integer(tmp);
9671 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9672 serv_perpid = launch_data_get_bool(tmp);
9673 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9674 serv_name = launch_data_get_string(tmp);
9675
9676 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9677
9678 if (unlikely(!j_for_service)) {
9679 // The PID probably exited
9680 (void)job_assumes_zero(j, launchd_mport_deallocate(l2l_ports[l2l_i]));
9681 continue;
9682 }
9683
9684 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9685 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9686 machservice_request_notifications(ms);
9687 }
9688 }
9689
9690 kr = 0;
9691
9692 out:
9693 if (out_obj_array) {
9694 launch_data_free(out_obj_array);
9695 }
9696
9697 if (l2l_ports) {
9698 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9699 }
9700
9701 if (kr == 0) {
9702 if (target_subset) {
9703 (void)job_assumes_zero(j, launchd_mport_deallocate(target_subset));
9704 }
9705 if (asport) {
9706 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9707 }
9708 } else if (jmr) {
9709 jobmgr_shutdown(jmr);
9710 }
9711
9712 return kr;
9713 }
9714
9715 kern_return_t
9716 job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9717 {
9718 if (!j) {
9719 return BOOTSTRAP_NO_MEMORY;
9720 }
9721
9722 job_t j2;
9723
9724 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9725 if (j->mgr->session_initialized) {
9726 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9727 kr = BOOTSTRAP_NOT_PRIVILEGED;
9728 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9729 jobmgr_t jmi;
9730
9731 /*
9732 * 5330262
9733 *
9734 * We're working around LoginWindow and the WindowServer.
9735 *
9736 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9737 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9738 * spawns a replacement loginwindow session before cleaning up the previous one.
9739 *
9740 * We're going to use the creation of a new LoginWindow context as a clue that the
9741 * previous LoginWindow context is on the way out and therefore we should just
9742 * kick-start the shutdown of it.
9743 */
9744
9745 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9746 if (unlikely(jmi->shutting_down)) {
9747 continue;
9748 } else if (strcasecmp(jmi->name, session_type) == 0) {
9749 jobmgr_shutdown(jmi);
9750 break;
9751 }
9752 }
9753 } else if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9754 (void)job_assumes_zero(j, runtime_remove_mport(j->mgr->jm_port));
9755 }
9756
9757 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9758 strcpy(j->mgr->name_init, session_type);
9759
9760 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9761 j2->asport = asport;
9762 (void)job_assumes(j, job_dispatch(j2, true));
9763 kr = BOOTSTRAP_SUCCESS;
9764 }
9765
9766 return kr;
9767 }
9768
9769 kern_return_t
9770 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9771 {
9772 struct ldcred *ldc = runtime_get_caller_creds();
9773 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9774 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9775 return BOOTSTRAP_NO_MEMORY;
9776 }
9777
9778 if (j->mgr->shutting_down) {
9779 return BOOTSTRAP_UNKNOWN_SERVICE;
9780 }
9781
9782 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9783
9784 if (!job_assumes(j, pid1_magic == false)) {
9785 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9786 return BOOTSTRAP_NOT_PRIVILEGED;
9787 }
9788
9789 if (!j->anonymous) {
9790 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9791 return BOOTSTRAP_NOT_PRIVILEGED;
9792 }
9793
9794 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9795 if (target_jm == j->mgr) {
9796 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9797 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9798 (void)job_assumes_zero(j, launchd_mport_deallocate(requestor_port));
9799 *new_bsport = target_jm->jm_port;
9800 return BOOTSTRAP_SUCCESS;
9801 }
9802
9803 if (!target_jm) {
9804 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9805 if (target_jm) {
9806 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9807 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9808 }
9809 }
9810
9811 if (!job_assumes(j, target_jm != NULL)) {
9812 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9813 return BOOTSTRAP_NO_MEMORY;
9814 }
9815
9816 // Remove the job from it's current job manager.
9817 LIST_REMOVE(j, sle);
9818 LIST_REMOVE(j, pid_hash_sle);
9819
9820 job_t ji = NULL, jit = NULL;
9821 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9822 if (ji == j) {
9823 LIST_REMOVE(ji, global_env_sle);
9824 break;
9825 }
9826 }
9827
9828 // Put the job into the target job manager.
9829 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9830 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9831
9832 if (ji) {
9833 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9834 }
9835
9836 // Move our Mach services over if we're not in a flat namespace.
9837 if (!launchd_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9838 struct machservice *msi = NULL, *msit = NULL;
9839 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9840 LIST_REMOVE(msi, name_hash_sle);
9841 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9842 }
9843 }
9844
9845 j->mgr = target_jm;
9846
9847 if (!j->holds_ref) {
9848 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9849 * stick around while they're still around.
9850 * For example, login calls into the PAM launchd module, which moves the process into
9851 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9852 * ourselves from going away.
9853 */
9854 j->holds_ref = true;
9855 job_log(j, LOG_PERF, "Job switched into manager: %s", j->mgr->name);
9856 runtime_add_ref();
9857 }
9858
9859 *new_bsport = target_jm->jm_port;
9860
9861 return KERN_SUCCESS;
9862 }
9863
9864 kern_return_t
9865 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9866 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9867 mach_port_array_t *portsp, unsigned int *ports_cnt)
9868 {
9869 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9870 mach_port_array_t ports = NULL;
9871 unsigned int cnt = 0, cnt2 = 0;
9872 size_t packed_size;
9873 struct machservice *ms;
9874 jobmgr_t jm;
9875 job_t ji;
9876
9877 if (!j) {
9878 return BOOTSTRAP_NO_MEMORY;
9879 }
9880
9881 jm = j->mgr;
9882
9883 if (unlikely(!pid1_magic)) {
9884 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9885 return BOOTSTRAP_NOT_PRIVILEGED;
9886 }
9887 if (unlikely(jobmgr_parent(jm) == NULL)) {
9888 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9889 return BOOTSTRAP_NOT_PRIVILEGED;
9890 }
9891 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9892 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9893 return BOOTSTRAP_NOT_PRIVILEGED;
9894 }
9895 if (unlikely(!j->anonymous)) {
9896 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9897 return BOOTSTRAP_NOT_PRIVILEGED;
9898 }
9899
9900 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9901
9902 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9903 if (!job_assumes(j, outdata_obj_array)) {
9904 goto out_bad;
9905 }
9906
9907 *outdataCnt = 20 * 1024 * 1024;
9908 mig_allocate(outdata, *outdataCnt);
9909 if (!job_assumes(j, *outdata != 0)) {
9910 return 1;
9911 }
9912
9913 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9914 if (!ji->anonymous) {
9915 continue;
9916 }
9917 SLIST_FOREACH(ms, &ji->machservices, sle) {
9918 cnt++;
9919 }
9920 }
9921
9922 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9923 if (!job_assumes(j, ports != NULL)) {
9924 goto out_bad;
9925 }
9926
9927 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9928 if (!ji->anonymous) {
9929 continue;
9930 }
9931
9932 SLIST_FOREACH(ms, &ji->machservices, sle) {
9933 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9934 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9935 } else {
9936 goto out_bad;
9937 }
9938
9939 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9940 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9941 } else {
9942 goto out_bad;
9943 }
9944
9945 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9946 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9947 } else {
9948 goto out_bad;
9949 }
9950
9951 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9952 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9953 } else {
9954 goto out_bad;
9955 }
9956
9957 ports[cnt2] = machservice_port(ms);
9958
9959 // Increment the send right by one so we can shutdown the jobmgr cleanly
9960 (void)jobmgr_assumes_zero(jm, launchd_mport_copy_send(ports[cnt2]));
9961 cnt2++;
9962 }
9963 }
9964
9965 (void)job_assumes(j, cnt == cnt2);
9966
9967 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9968 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9969 if (!job_assumes(j, packed_size != 0)) {
9970 goto out_bad;
9971 }
9972
9973 launch_data_free(outdata_obj_array);
9974
9975 *portsp = ports;
9976 *ports_cnt = cnt;
9977
9978 *reqport = jm->req_port;
9979 *rcvright = jm->jm_port;
9980
9981 jm->req_port = 0;
9982 jm->jm_port = 0;
9983
9984 workaround_5477111 = j;
9985
9986 jobmgr_shutdown(jm);
9987
9988 return BOOTSTRAP_SUCCESS;
9989
9990 out_bad:
9991 if (outdata_obj_array) {
9992 launch_data_free(outdata_obj_array);
9993 }
9994 if (*outdata) {
9995 mig_deallocate(*outdata, *outdataCnt);
9996 }
9997 if (ports) {
9998 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
9999 }
10000
10001 return BOOTSTRAP_NO_MEMORY;
10002 }
10003
10004 kern_return_t
10005 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
10006 {
10007 int bsdepth = 0;
10008 jobmgr_t jmr;
10009
10010 if (!j) {
10011 return BOOTSTRAP_NO_MEMORY;
10012 }
10013 if (j->mgr->shutting_down) {
10014 return BOOTSTRAP_UNKNOWN_SERVICE;
10015 }
10016
10017 jmr = j->mgr;
10018
10019 while ((jmr = jobmgr_parent(jmr)) != NULL) {
10020 bsdepth++;
10021 }
10022
10023 // Since we use recursion, we need an artificial depth for subsets
10024 if (unlikely(bsdepth > 100)) {
10025 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
10026 return BOOTSTRAP_NO_MEMORY;
10027 }
10028
10029 char name[NAME_MAX];
10030 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
10031
10032 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
10033 if (unlikely(requestorport == MACH_PORT_NULL)) {
10034 return BOOTSTRAP_NOT_PRIVILEGED;
10035 }
10036 return BOOTSTRAP_NO_MEMORY;
10037 }
10038
10039 *subsetportp = jmr->jm_port;
10040 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
10041
10042 /* A job could create multiple subsets, so only add a reference the first time
10043 * it does so we don't have to keep a count.
10044 */
10045 if (j->anonymous && !j->holds_ref) {
10046 job_log(j, LOG_PERF, "Job created subset: %s", jmr->name);
10047 j->holds_ref = true;
10048 runtime_add_ref();
10049 }
10050
10051 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
10052 return BOOTSTRAP_SUCCESS;
10053 }
10054
10055 job_t
10056 _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
10057 {
10058 jobmgr_t where2put = NULL;
10059
10060 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
10061 errno = EINVAL;
10062 return NULL;
10063 }
10064
10065 launch_data_t ldlabel = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL);
10066 if (!ldlabel || launch_data_get_type(ldlabel) != LAUNCH_DATA_STRING) {
10067 errno = EINVAL;
10068 return NULL;
10069 }
10070
10071 const char *label = launch_data_get_string(ldlabel);
10072 jobmgr_log(jm, LOG_DEBUG, "Importing service: %s", label);
10073
10074 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
10075 if (destname) {
10076 bool supported_domain = false;
10077
10078 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
10079 const char *str = launch_data_get_string(destname);
10080 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
10081 where2put = _s_xpc_system_domain;
10082 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
10083 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
10084 supported_domain = true;
10085 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
10086 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
10087 } else {
10088 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
10089 errno = EINVAL;
10090 }
10091 } else {
10092 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
10093 errno = EINVAL;
10094 }
10095
10096 if (where2put && !supported_domain) {
10097 launch_data_t mi = NULL;
10098 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
10099 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
10100 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
10101 where2put = NULL;
10102 errno = EINVAL;
10103 }
10104 }
10105 }
10106 } else {
10107 where2put = jm;
10108 }
10109
10110 job_t j = NULL;
10111 if (where2put) {
10112 /* Gross. If the service already exists in a singleton domain, then
10113 * jobmgr_import2() will return the existing job. But if we fail to alias
10114 * this job, we will normally want to remove it. But if we did not create
10115 * it in the first place, then we need to avoid removing it. So check
10116 * errno against EEXIST in the success case and if it's EEXIST, then do
10117 * not remove the original job in the event of a failed alias.
10118 *
10119 * This really needs to be re-thought, but I think it'll require a larger
10120 * evaluation of launchd's data structures. Right now, once a job is
10121 * imported into a singleton domain, it won't be removed until the system
10122 * shuts down, but that may not always be true. If it ever changes, we'll
10123 * have a problem because we'll have to account for all existing aliases
10124 * and clean them up somehow. Or just start ref-counting. I knew this
10125 * aliasing stuff would be trouble...
10126 *
10127 * <rdar://problem/10646503>
10128 */
10129 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
10130
10131 errno = 0;
10132 if ((j = jobmgr_import2(where2put, pload))) {
10133 bool created = (errno != EEXIST);
10134 j->xpc_service = true;
10135
10136 if (where2put->xpc_singleton) {
10137 /* If the service was destined for one of the global domains,
10138 * then we have to alias it into our local domain to reserve the
10139 * name.
10140 */
10141 job_t ja = NULL;
10142 if (!(ja = job_new_alias(jm, j))) {
10143 /* If we failed to alias the job because of a conflict over
10144 * the label, then we remove it from the global domain. We
10145 * don't want to risk having imported a malicious job into
10146 * one of the global domains.
10147 */
10148 if (errno != EEXIST) {
10149 job_log(j, LOG_ERR, "Failed to alias job into: %s: %d: %s", where2put->name, errno, strerror(errno));
10150 } else {
10151 errno = 0;
10152 }
10153
10154 if (created) {
10155 jobmgr_log(jm, LOG_WARNING, "Singleton service already existed in job-local namespace. Removing: %s", j->label);
10156 job_remove(j);
10157 }
10158
10159 j = NULL;
10160 } else {
10161 jobmgr_log(jm, LOG_DEBUG, "Aliased service into local domain: %s", j->label);
10162 (void)job_dispatch(j, false);
10163 ja->xpc_service = true;
10164 j = ja;
10165 }
10166 } else {
10167 (void)job_dispatch(j, false);
10168 }
10169 }
10170 } else {
10171 jobmgr_log(jm, LOG_DEBUG, "Could not find destination for service: %s", label);
10172 }
10173
10174 return j;
10175 }
10176
10177 int
10178 _xpc_domain_import_services(job_t j, launch_data_t services)
10179 {
10180 int error = EINVAL;
10181 if (launch_data_get_type(services) != LAUNCH_DATA_ARRAY) {
10182 return error;
10183 }
10184
10185 size_t i = 0;
10186 size_t c = launch_data_array_get_count(services);
10187 jobmgr_log(j->mgr, LOG_DEBUG, "Importing new services: %lu", c);
10188
10189 for (i = 0; i < c; i++) {
10190 jobmgr_log(j->mgr, LOG_DEBUG, "Importing service at index: %lu", i);
10191
10192 job_t nj = NULL;
10193 launch_data_t ploadi = launch_data_array_get_index(services, i);
10194 if (!(nj = _xpc_domain_import_service(j->mgr, ploadi))) {
10195 if (!j->mgr->session_initialized && errno) {
10196 /* Service import failures are only fatal if the domain is being
10197 * initialized. If we're extending the domain, we can run into
10198 * errors with services already existing, so we just ignore them.
10199 * In the case of a domain extension, we don't want to halt the
10200 * operation if we run into an error with one service.
10201 *
10202 * <rdar://problem/10842779>
10203 */
10204 jobmgr_log(j->mgr, LOG_ERR, "Failed to import service at index: %lu: %d: %s", i, errno, strerror(errno));
10205 error = errno;
10206 break;
10207 }
10208 } else {
10209 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service: %s", nj->label);
10210 }
10211 }
10212
10213 if (i == c) {
10214 error = 0;
10215 }
10216
10217 return error;
10218 }
10219
10220 kern_return_t
10221 xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
10222 {
10223 if (unlikely(!pid1_magic)) {
10224 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
10225 return BOOTSTRAP_NOT_PRIVILEGED;
10226 }
10227 if (!j || !MACH_PORT_VALID(reqport)) {
10228 return BOOTSTRAP_UNKNOWN_SERVICE;
10229 }
10230 if (root_jobmgr->shutting_down) {
10231 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
10232 return BOOTSTRAP_NOT_PRIVILEGED;
10233 }
10234 if (!j->xpc_bootstrapper) {
10235 job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
10236 return BOOTSTRAP_NOT_PRIVILEGED;
10237 }
10238
10239 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
10240 /* All XPC domains are children of the root job manager. What we're creating
10241 * here is really just a skeleton. By creating it, we're adding reqp to our
10242 * port set. It will have two messages on it. The first specifies the
10243 * environment of the originator. This is so we can cache it and hand it to
10244 * xpcproxy to bootstrap our services. The second is the set of jobs that is
10245 * to be bootstrapped in.
10246 */
10247 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
10248 if (job_assumes(j, jm != NULL)) {
10249 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
10250 jm->shortdesc = "private";
10251 kr = BOOTSTRAP_SUCCESS;
10252 }
10253
10254 return kr;
10255 }
10256
10257 kern_return_t
10258 xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
10259 {
10260 if (!j) {
10261 /* Due to the whacky nature of XPC service bootstrapping, we can end up
10262 * getting this message long after the requesting process has gone away.
10263 * See <rdar://problem/8593143>.
10264 */
10265 return BOOTSTRAP_UNKNOWN_SERVICE;
10266 }
10267
10268 jobmgr_t jm = j->mgr;
10269 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10270 return BOOTSTRAP_NOT_PRIVILEGED;
10271 }
10272
10273 if (jm->req_asport != MACH_PORT_NULL) {
10274 return BOOTSTRAP_NOT_PRIVILEGED;
10275 }
10276
10277 struct ldcred *ldc = runtime_get_caller_creds();
10278 struct proc_bsdinfowithuniqid proc;
10279 if (proc_pidinfo(ldc->pid, PROC_PIDT_BSDINFOWITHUNIQID, 1, &proc, PROC_PIDT_BSDINFOWITHUNIQID_SIZE) == 0) {
10280 if (errno != ESRCH) {
10281 (void)jobmgr_assumes_zero(jm, errno);
10282 }
10283
10284 jm->error = errno;
10285 jobmgr_remove(jm);
10286 return BOOTSTRAP_NO_MEMORY;
10287 }
10288
10289 #if !TARGET_OS_EMBEDDED
10290 if (jobmgr_assumes_zero(jm, audit_session_port(ldc->asid, &jm->req_asport)) != 0) {
10291 jm->error = EPERM;
10292 jobmgr_remove(jm);
10293 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
10294 return BOOTSTRAP_NOT_PRIVILEGED;
10295 }
10296 #else
10297 jm->req_asport = MACH_PORT_DEAD;
10298 #endif
10299
10300 struct waiting4attach *w4ai = NULL;
10301 struct waiting4attach *w4ait = NULL;
10302 LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
10303 if (w4ai->dest == ldc->pid) {
10304 jobmgr_log(jm, LOG_DEBUG, "Migrating attach for: %s", w4ai->name);
10305 LIST_REMOVE(w4ai, le);
10306 LIST_INSERT_HEAD(&jm->attaches, w4ai, le);
10307 w4ai->dest = 0;
10308 }
10309 }
10310
10311 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s.%d", proc.pbsd.pbi_comm, ldc->pid);
10312 strlcpy(jm->owner, proc.pbsd.pbi_comm, sizeof(jm->owner));
10313 jm->req_bsport = bsport;
10314 jm->req_excport = excport;
10315 jm->req_rport = rp;
10316 jm->req_ctx = ctx;
10317 jm->req_ctx_sz = ctx_sz;
10318 jm->req_pid = ldc->pid;
10319 jm->req_euid = ldc->euid;
10320 jm->req_egid = ldc->egid;
10321 jm->req_asid = ldc->asid;
10322 jm->req_uniqueid = proc.p_uniqidentifier.p_uniqueid;
10323
10324 return KERN_SUCCESS;
10325 }
10326
10327 kern_return_t
10328 xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10329 {
10330 if (!j) {
10331 return BOOTSTRAP_UNKNOWN_SERVICE;
10332 }
10333
10334 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10335 if (!(rootj && rootj->xpc_bootstrapper)) {
10336 job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
10337 return BOOTSTRAP_NOT_PRIVILEGED;
10338 }
10339
10340 // This is just for XPC domains (for now).
10341 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10342 return BOOTSTRAP_NOT_PRIVILEGED;
10343 }
10344 if (j->mgr->session_initialized) {
10345 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
10346 return BOOTSTRAP_NOT_PRIVILEGED;
10347 }
10348
10349 size_t offset = 0;
10350 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10351 if (!services) {
10352 return BOOTSTRAP_NO_MEMORY;
10353 }
10354
10355 int error = _xpc_domain_import_services(j, services);
10356 if (error) {
10357 j->mgr->error = error;
10358 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
10359 jobmgr_remove(j->mgr);
10360 } else {
10361 j->mgr->session_initialized = true;
10362 (void)jobmgr_assumes_zero(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS));
10363 j->mgr->req_rport = MACH_PORT_NULL;
10364
10365 /* Returning a failure code will destroy the message, whereas returning
10366 * success will not, so we need to clean up here.
10367 */
10368 mig_deallocate(services_buff, services_sz);
10369 error = BOOTSTRAP_SUCCESS;
10370 }
10371
10372 return error;
10373 }
10374
10375 kern_return_t
10376 xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport,
10377 mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid,
10378 int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
10379 {
10380 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
10381 return BOOTSTRAP_UNKNOWN_SERVICE;
10382 }
10383 jobmgr_t jm = j->mgr;
10384 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10385 return BOOTSTRAP_NOT_PRIVILEGED;
10386 }
10387
10388 if (jm->req_asport == MACH_PORT_NULL) {
10389 return BOOTSTRAP_NOT_PRIVILEGED;
10390 }
10391
10392 *bsport = jm->req_bsport;
10393 *sbsport = root_jobmgr->jm_port;
10394 *excport = jm->req_excport;
10395 if (j->joins_gui_session) {
10396 if (jm->req_gui_asport) {
10397 *asport = jm->req_gui_asport;
10398 } else {
10399 job_log(j, LOG_NOTICE, "No GUI session set for UID of user service. This service may not act properly.");
10400 *asport = jm->req_asport;
10401 }
10402 } else {
10403 *asport = jm->req_asport;
10404 }
10405
10406 *uid = jm->req_euid;
10407 *gid = jm->req_egid;
10408 *asid = jm->req_asid;
10409
10410 *ctx = jm->req_ctx;
10411 *ctx_sz = jm->req_ctx_sz;
10412
10413 return KERN_SUCCESS;
10414 }
10415
10416 kern_return_t
10417 xpc_domain_get_service_name(job_t j, event_name_t name)
10418 {
10419 if (!j) {
10420 return BOOTSTRAP_NO_MEMORY;
10421 }
10422
10423 if (!j->xpc_service) {
10424 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
10425 return BOOTSTRAP_NOT_PRIVILEGED;
10426 }
10427
10428 const char *what2find = j->label;
10429 if (j->dedicated_instance) {
10430 what2find = j->original->label;
10431 }
10432
10433 struct machservice *msi = NULL;
10434 SLIST_FOREACH(msi, &j->machservices, sle) {
10435 if (strcmp(msi->name, what2find) == 0) {
10436 break;
10437 }
10438 }
10439
10440 if (!msi) {
10441 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name that does not exist: %s", j->label);
10442 return BOOTSTRAP_UNKNOWN_SERVICE;
10443 }
10444
10445 (void)strlcpy(name, msi->name, sizeof(event_name_t));
10446 return BOOTSTRAP_SUCCESS;
10447 }
10448
10449 #if XPC_LPI_VERSION >= 20111216
10450 kern_return_t
10451 xpc_domain_add_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10452 {
10453 if (!j) {
10454 return BOOTSTRAP_UNKNOWN_SERVICE;
10455 }
10456
10457 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10458 if (!(rootj && rootj->xpc_bootstrapper)) {
10459 job_log(j, LOG_ERR, "Attempt to add service to XPC domain by unprivileged job.");
10460 return BOOTSTRAP_NOT_PRIVILEGED;
10461 }
10462
10463 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10464 return BOOTSTRAP_NOT_PRIVILEGED;
10465 }
10466
10467 size_t offset = 0;
10468 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10469 if (!services) {
10470 return BOOTSTRAP_NO_MEMORY;
10471 }
10472
10473 int error = _xpc_domain_import_services(j, services);
10474 if (!error) {
10475 mig_deallocate(services_buff, services_sz);
10476 }
10477
10478 return error;
10479 }
10480 #endif
10481
10482 #pragma mark XPC Events
10483 int
10484 xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms)
10485 {
10486 int error = EXNOMEM;
10487 struct machservice *msi = NULL;
10488 SLIST_FOREACH(msi, &j->machservices, sle) {
10489 if (strcmp(stream, msi->name) == 0) {
10490 break;
10491 }
10492 }
10493
10494 if (!msi) {
10495 mach_port_t sp = MACH_PORT_NULL;
10496 msi = machservice_new(j, stream, &sp, false);
10497 if (!msi) {
10498 return EXNOMEM;
10499 }
10500
10501 job_log(j, LOG_DEBUG, "Creating new MachService for stream: %s", stream);
10502 /* Hack to keep this from being publicly accessible through
10503 * bootstrap_look_up().
10504 */
10505 if (!j->dedicated_instance) {
10506 LIST_REMOVE(msi, name_hash_sle);
10507 }
10508 msi->event_channel = true;
10509
10510 /* If we call job_dispatch() here before the audit session for the job
10511 * has been set, we'll end up not watching this service. But we also have
10512 * to take care not to watch the port if the job is active.
10513 *
10514 * See <rdar://problem/10357855>.
10515 */
10516 if (!j->currently_ignored) {
10517 machservice_watch(j, msi);
10518 }
10519
10520 error = 0;
10521 *ms = msi;
10522 } else if (!msi->event_channel) {
10523 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10524 error = EEXIST;
10525 } else {
10526 error = 0;
10527 *ms = msi;
10528 }
10529
10530 return error;
10531 }
10532
10533 int
10534 xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply)
10535 {
10536 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10537 if (!stream) {
10538 return EXINVAL;
10539 }
10540
10541 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10542 if (!token) {
10543 return EXINVAL;
10544 }
10545
10546 job_log(j, LOG_DEBUG, "Getting event name for stream/token: %s/0x%llu", stream, token);
10547
10548 int result = ESRCH;
10549 struct externalevent *event = externalevent_find(stream, token);
10550 if (event && j->event_monitor) {
10551 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10552 xpc_dictionary_set_string(reply2, XPC_EVENT_ROUTINE_KEY_NAME, event->name);
10553 *reply = reply2;
10554
10555 job_log(j, LOG_DEBUG, "Found: %s", event->name);
10556 result = 0;
10557 }
10558
10559 return result;
10560 }
10561
10562 int
10563 xpc_event_copy_entitlements(job_t j, xpc_object_t request, xpc_object_t *reply)
10564 {
10565 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10566 if (!stream) {
10567 return EXINVAL;
10568 }
10569
10570 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10571 if (!token) {
10572 return EXINVAL;
10573 }
10574
10575 job_log(j, LOG_DEBUG, "Getting entitlements for stream/token: %s/0x%llu", stream, token);
10576
10577 int result = ESRCH;
10578 struct externalevent *event = externalevent_find(stream, token);
10579 if (event && j->event_monitor) {
10580 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10581 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_ENTITLEMENTS, event->entitlements);
10582 *reply = reply2;
10583
10584 job_log(j, LOG_DEBUG, "Found: %s", event->name);
10585 result = 0;
10586 }
10587
10588 return result;
10589 }
10590
10591 // TODO - can be removed with rdar://problem/12666150
10592 #ifndef XPC_EVENT_FLAG_ALLOW_UNMANAGED
10593 #define XPC_EVENT_FLAG_ALLOW_UNMANAGED (1 << 1)
10594 #endif
10595
10596 int
10597 xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10598 {
10599 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10600 if (!stream) {
10601 return EXINVAL;
10602 }
10603
10604 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10605 if (!key) {
10606 return EXINVAL;
10607 }
10608
10609 xpc_object_t event = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_EVENT);
10610 if (event && xpc_get_type(event) != XPC_TYPE_DICTIONARY) {
10611 return EXINVAL;
10612 }
10613
10614 uint64_t flags = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_FLAGS);
10615
10616 /* Don't allow events to be set for anonymous jobs unless specifically
10617 * requested in the flags. Only permit this for internal development.
10618 */
10619 if (j->anonymous && ((flags & XPC_EVENT_FLAG_ALLOW_UNMANAGED) == 0 || !launchd_apple_internal)) {
10620 job_log(j, LOG_ERR, "Unmanaged jobs may not make XPC Events requests.");
10621 return EPERM;
10622 }
10623
10624 job_log(j, LOG_DEBUG, "%s event for stream/key: %s/%s", event ? "Setting" : "Removing", stream, key);
10625
10626 struct externalevent *eei = NULL;
10627 LIST_FOREACH(eei, &j->events, job_le) {
10628 /* If the event for the given key already exists for the job, we need to
10629 * remove the old one first.
10630 */
10631 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
10632 job_log(j, LOG_DEBUG, "Event exists. Removing.");
10633 externalevent_delete(eei);
10634 break;
10635 }
10636 }
10637
10638 int result = EXNOMEM;
10639 if (event) {
10640 struct eventsystem *es = eventsystem_find(stream);
10641 if (!es) {
10642 job_log(j, LOG_DEBUG, "Creating stream.");
10643 es = eventsystem_new(stream);
10644 }
10645
10646 if (es) {
10647 job_log(j, LOG_DEBUG, "Adding event.");
10648 if (externalevent_new(j, es, key, event, flags)) {
10649 job_log(j, LOG_DEBUG, "Added new event for key: %s", key);
10650 result = 0;
10651 } else {
10652 job_log(j, LOG_ERR, "Could not create event for key: %s", key);
10653 }
10654 } else {
10655 job_log(j, LOG_ERR, "Event stream could not be created: %s", stream);
10656 }
10657 } else {
10658 /* If the event was NULL, then we just remove it and return. */
10659 result = 0;
10660 }
10661
10662 if (result == 0) {
10663 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10664 *reply = reply2;
10665 }
10666
10667 return result;
10668 }
10669
10670 int
10671 xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10672 {
10673 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10674 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10675
10676 bool all_streams = (stream == NULL);
10677 bool all_events = (key == NULL || strcmp(key, "") == 0); // strcmp for libxpc compatibility
10678 xpc_object_t events = NULL;
10679
10680 if (all_streams && !all_events) {
10681 return EXINVAL;
10682 }
10683
10684 if (all_streams || all_events) {
10685 job_log(j, LOG_DEBUG, "Fetching all events%s%s", stream ? " for stream: " : "", stream ? stream : "");
10686 events = xpc_dictionary_create(NULL, NULL, 0);
10687 } else {
10688 job_log(j, LOG_DEBUG, "Fetching stream/key: %s/%s", stream, key);
10689 }
10690
10691 int result = ESRCH;
10692 struct externalevent *eei = NULL;
10693 LIST_FOREACH(eei, &j->events, job_le) {
10694 if (all_streams) {
10695 xpc_object_t sub = xpc_dictionary_get_value(events, eei->sys->name);
10696 if (sub == NULL) {
10697 sub = xpc_dictionary_create(NULL, NULL, 0);
10698 xpc_dictionary_set_value(events, eei->sys->name, sub);
10699 xpc_release(sub);
10700 }
10701 xpc_dictionary_set_value(sub, eei->name, eei->event);
10702 } else if (strcmp(eei->sys->name, stream) == 0) {
10703 if (all_events) {
10704 xpc_dictionary_set_value(events, eei->name, eei->event);
10705 } else if (strcmp(eei->name, key) == 0) {
10706 job_log(j, LOG_DEBUG, "Found event.");
10707 events = xpc_retain(eei->event);
10708 break;
10709 }
10710 }
10711 }
10712
10713 if (events) {
10714 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10715 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENT, events);
10716 xpc_release(events);
10717
10718 *reply = reply2;
10719 result = 0;
10720 }
10721
10722 return result;
10723 }
10724
10725 int
10726 xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10727 {
10728 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10729 if (!stream) {
10730 return EXINVAL;
10731 }
10732
10733 job_log(j, LOG_DEBUG, "Checking in stream: %s", stream);
10734
10735 struct machservice *ms = NULL;
10736 int error = xpc_event_find_channel(j, stream, &ms);
10737 if (error) {
10738 job_log(j, LOG_ERR, "Failed to check in: 0x%x: %s", error, xpc_strerror(error));
10739 } else if (ms->isActive) {
10740 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10741 error = EBUSY;
10742 } else {
10743 machservice_request_notifications(ms);
10744
10745 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10746 xpc_dictionary_set_mach_recv(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10747 *reply = reply2;
10748 error = 0;
10749 }
10750
10751 return error;
10752 }
10753
10754 int
10755 xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply)
10756 {
10757 if (!j->event_monitor) {
10758 return EPERM;
10759 }
10760
10761 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10762 if (!stream) {
10763 return EXINVAL;
10764 }
10765
10766 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10767 if (!token) {
10768 return EXINVAL;
10769 }
10770
10771 job_log(j, LOG_DEBUG, "Looking up channel for stream/token: %s/%llu", stream, token);
10772
10773 struct externalevent *ee = externalevent_find(stream, token);
10774 if (!ee) {
10775 return ESRCH;
10776 }
10777
10778 struct machservice *ms = NULL;
10779 int error = xpc_event_find_channel(ee->job, stream, &ms);
10780 if (!error) {
10781 job_log(j, LOG_DEBUG, "Found event channel port: 0x%x", ms->port);
10782 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10783 xpc_dictionary_set_mach_send(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10784 *reply = reply2;
10785 error = 0;
10786 } else {
10787 job_log(j, LOG_ERR, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream, token, error, xpc_strerror(error));
10788 }
10789
10790 return error;
10791 }
10792
10793 int
10794 xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10795 {
10796 if (!j->event_monitor) {
10797 return EPERM;
10798 }
10799
10800 /* This indicates that the event monitor is now safe to signal. This state
10801 * is independent of whether this operation actually succeeds; we just need
10802 * it to ignore SIGUSR1.
10803 */
10804 j->event_monitor_ready2signal = true;
10805
10806 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10807 if (!stream) {
10808 return EXINVAL;
10809 }
10810
10811 job_log(j, LOG_DEBUG, "Provider checking in for stream: %s", stream);
10812
10813 xpc_object_t events = xpc_array_create(NULL, 0);
10814 struct eventsystem *es = eventsystem_find(stream);
10815 if (!es) {
10816 /* If we had to create the event stream, there were no events, so just
10817 * give back the empty array.
10818 */
10819 job_log(j, LOG_DEBUG, "Creating event stream.");
10820 es = eventsystem_new(stream);
10821 if (!job_assumes(j, es)) {
10822 xpc_release(events);
10823 return EXNOMEM;
10824 }
10825
10826 if (strcmp(stream, "com.apple.launchd.helper") == 0) {
10827 _launchd_support_system = es;
10828 }
10829 } else {
10830 job_log(j, LOG_DEBUG, "Filling event array.");
10831
10832 struct externalevent *ei = NULL;
10833 LIST_FOREACH(ei, &es->events, sys_le) {
10834 xpc_array_set_uint64(events, XPC_ARRAY_APPEND, ei->id);
10835 xpc_array_append_value(events, ei->event);
10836 }
10837 }
10838
10839 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10840 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENTS, events);
10841 xpc_release(events);
10842 *reply = reply2;
10843
10844 return 0;
10845 }
10846
10847 int
10848 xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply)
10849 {
10850 job_t other_j = NULL;
10851
10852 if (!j->event_monitor) {
10853 return EPERM;
10854 }
10855
10856 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10857 if (!stream) {
10858 return EXINVAL;
10859 }
10860
10861 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10862 if (!token) {
10863 return EXINVAL;
10864 }
10865
10866 bool state = false;
10867 xpc_object_t xstate = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_STATE);
10868 if (!xstate || xpc_get_type(xstate) != XPC_TYPE_BOOL) {
10869 return EXINVAL;
10870 } else {
10871 state = xpc_bool_get_value(xstate);
10872 }
10873
10874 job_log(j, LOG_DEBUG, "Setting event state to %s for stream/token: %s/%llu", state ? "true" : "false", stream, token);
10875
10876 struct externalevent *ei = externalevent_find(stream, token);
10877 if (!ei) {
10878 job_log(j, LOG_ERR, "Could not find stream/token: %s/%llu", stream, token);
10879 return ESRCH;
10880 }
10881
10882 other_j = ei->job;
10883 ei->state = state;
10884
10885 if (ei->internal) {
10886 job_log(ei->job, LOG_NOTICE, "Job should be able to exec(3) now.");
10887 ei->job->waiting4ok = false;
10888 externalevent_delete(ei);
10889 }
10890
10891 (void)job_dispatch(other_j, false);
10892
10893 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10894 *reply = reply2;
10895
10896 return 0;
10897 }
10898
10899 bool
10900 xpc_event_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
10901 {
10902 uint64_t op = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_OP);
10903 if (!op) {
10904 return false;
10905 }
10906
10907 audit_token_t token;
10908 xpc_dictionary_get_audit_token(request, &token);
10909 runtime_record_caller_creds(&token);
10910
10911 struct ldcred *ldc = runtime_get_caller_creds();
10912 job_t j = managed_job(ldc->pid);
10913 if (!j) {
10914 j = job_mig_intran(p);
10915 if (!j) {
10916 op = -1;
10917 }
10918 }
10919
10920 job_log(j, LOG_DEBUG, "Incoming XPC event request: %llu", op);
10921
10922 int error = -1;
10923 switch (op) {
10924 case XPC_EVENT_GET_NAME:
10925 error = xpc_event_get_event_name(j, request, reply);
10926 break;
10927 case XPC_EVENT_SET:
10928 error = xpc_event_set_event(j, request, reply);
10929 break;
10930 case XPC_EVENT_COPY:
10931 error = xpc_event_copy_event(j, request, reply);
10932 break;
10933 case XPC_EVENT_CHECK_IN:
10934 error = xpc_event_channel_check_in(j, request, reply);
10935 break;
10936 case XPC_EVENT_LOOK_UP:
10937 error = xpc_event_channel_look_up(j, request, reply);
10938 break;
10939 case XPC_EVENT_PROVIDER_CHECK_IN:
10940 error = xpc_event_provider_check_in(j, request, reply);
10941 break;
10942 case XPC_EVENT_PROVIDER_SET_STATE:
10943 error = xpc_event_provider_set_state(j, request, reply);
10944 break;
10945 case XPC_EVENT_COPY_ENTITLEMENTS:
10946 error = xpc_event_copy_entitlements(j, request, reply);
10947 break;
10948 case -1:
10949 error = EINVAL;
10950 break;
10951 default:
10952 job_log(j, LOG_ERR, "Bogus opcode.");
10953 error = EDOM;
10954 }
10955
10956 if (error) {
10957 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10958 xpc_dictionary_set_uint64(reply2, XPC_EVENT_ROUTINE_KEY_ERROR, error);
10959 *reply = reply2;
10960 }
10961
10962 return true;
10963 }
10964
10965 uint64_t
10966 xpc_get_jetsam_entitlement(const char *key)
10967 {
10968 uint64_t entitlement = 0;
10969
10970 audit_token_t *token = runtime_get_caller_token();
10971 xpc_object_t value = xpc_copy_entitlement_for_token(key, token);
10972 if (value) {
10973 if (xpc_get_type(value) == XPC_TYPE_UINT64) {
10974 entitlement = xpc_uint64_get_value(value);
10975 }
10976
10977 xpc_release(value);
10978 }
10979
10980 return entitlement;
10981 }
10982
10983 int
10984 xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply)
10985 {
10986 if (!j) {
10987 return EINVAL;
10988 }
10989
10990 const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
10991 if (!label) {
10992 return EXINVAL;
10993 }
10994
10995 xpc_jetsam_band_t entitled_band = -1;
10996 xpc_jetsam_band_t requested_band = (xpc_jetsam_band_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_PRIORITY_BAND);
10997 if (!requested_band) {
10998 return EXINVAL;
10999 }
11000
11001 if (!(requested_band >= XPC_JETSAM_BAND_SUSPENDED && requested_band < XPC_JETSAM_BAND_LAST)) {
11002 return EXINVAL;
11003 }
11004
11005 uint64_t rcdata = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_RCDATA);
11006
11007 job_t tj = job_find(root_jobmgr, label);
11008 if (!tj) {
11009 return EXSRCH;
11010 }
11011
11012 boolean_t allow = false;
11013 if (j->embedded_god) {
11014 allow = true;
11015 } else {
11016 entitled_band = xpc_get_jetsam_entitlement("com.apple.private.jetsam.modify-priority");
11017 if (entitled_band >= requested_band) {
11018 allow = true;
11019 }
11020 }
11021
11022 if (!allow) {
11023 if (launchd_no_jetsam_perm_check) {
11024 job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set priority: %d", requested_band);
11025 } else {
11026 job_log(j, LOG_ERR, "Job cannot decrease Jetsam priority band (requested/maximum): %d/%d", requested_band, entitled_band);
11027 return EPERM;
11028 }
11029 }
11030
11031 job_log(j, LOG_INFO, "Setting Jetsam band: %d.", requested_band);
11032 job_update_jetsam_properties(tj, requested_band, rcdata);
11033
11034 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11035 *reply = reply2;
11036
11037 return 0;
11038 }
11039
11040 int
11041 xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply)
11042 {
11043 if (!j) {
11044 return EINVAL;
11045 }
11046
11047 const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11048 if (!label) {
11049 return EXINVAL;
11050 }
11051
11052 int32_t entitlement_limit = 0;
11053 int32_t requested_limit = (int32_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_MEMORY_LIMIT);
11054
11055 job_t tj = job_find(root_jobmgr, label);
11056 if (!tj) {
11057 return EXSRCH;
11058 }
11059
11060 boolean_t allow = false;
11061 if (j->embedded_god) {
11062 allow = true;
11063 } else {
11064 entitlement_limit = (int32_t)xpc_get_jetsam_entitlement("com.apple.private.jetsam.memory_limit");
11065 if (entitlement_limit >= requested_limit) {
11066 allow = true;
11067 }
11068 }
11069
11070 if (!allow) {
11071 if (launchd_no_jetsam_perm_check) {
11072 job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set memory limit: %d", requested_limit);
11073 } else {
11074 job_log(j, LOG_ERR, "Job cannot set Jetsam memory limit (requested/maximum): %d/%d", requested_limit, entitlement_limit);
11075 return EPERM;
11076 }
11077 }
11078
11079 job_log(j, LOG_INFO, "Setting Jetsam memory limit: %d.", requested_limit);
11080 job_update_jetsam_memory_limit(tj, requested_limit);
11081
11082 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11083 *reply = reply2;
11084
11085 return 0;
11086 }
11087
11088 static jobmgr_t
11089 _xpc_process_find_target_manager(job_t j, xpc_service_type_t type, pid_t pid)
11090 {
11091 jobmgr_t target = NULL;
11092 if (type == XPC_SERVICE_TYPE_BUNDLED) {
11093 job_log(j, LOG_DEBUG, "Bundled service. Searching for XPC domains for PID: %d", pid);
11094
11095 jobmgr_t jmi = NULL;
11096 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11097 if (jmi->req_pid && jmi->req_pid == pid) {
11098 jobmgr_log(jmi, LOG_DEBUG, "Found job manager for PID.");
11099 target = jmi;
11100 break;
11101 }
11102 }
11103 } else if (type == XPC_SERVICE_TYPE_LAUNCHD || type == XPC_SERVICE_TYPE_APP) {
11104 target = j->mgr;
11105 }
11106
11107 return target;
11108 }
11109
11110 static int
11111 xpc_process_attach(job_t j, xpc_object_t request, xpc_object_t *reply)
11112 {
11113 if (!j) {
11114 return EINVAL;
11115 }
11116
11117 audit_token_t *token = runtime_get_caller_token();
11118 xpc_object_t entitlement = xpc_copy_entitlement_for_token(XPC_SERVICE_ENTITLEMENT_ATTACH, token);
11119 if (!entitlement) {
11120 job_log(j, LOG_ERR, "Job does not have entitlement: %s", XPC_SERVICE_ENTITLEMENT_ATTACH);
11121 return EPERM;
11122 }
11123
11124 if (entitlement != XPC_BOOL_TRUE) {
11125 char *desc = xpc_copy_description(entitlement);
11126 job_log(j, LOG_ERR, "Job has bad value for entitlement: %s:\n%s", XPC_SERVICE_ENTITLEMENT_ATTACH, desc);
11127 free(desc);
11128
11129 xpc_release(entitlement);
11130 return EPERM;
11131 }
11132
11133 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11134 if (!name) {
11135 return EXINVAL;
11136 }
11137
11138 xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11139 if (!type) {
11140 return EXINVAL;
11141 }
11142
11143 mach_port_t port = xpc_dictionary_copy_mach_send(request, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT);
11144 if (!MACH_PORT_VALID(port)) {
11145 return EXINVAL;
11146 }
11147
11148 pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_HANDLE);
11149
11150 job_log(j, LOG_DEBUG, "Attaching to service: %s", name);
11151
11152 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11153 jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11154 if (target) {
11155 jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11156 (void)jobmgr_assumes(target, waiting4attach_new(target, name, port, 0, type));
11157
11158 /* HACK: This is awful. For legacy reasons, launchd job labels are all
11159 * stored in a global namespace, which is stored in the root job
11160 * manager. But XPC domains have a per-domain namespace. So if we're
11161 * looking for a legacy launchd job, we have to redirect any attachment
11162 * attempts to the root job manager to find existing instances.
11163 *
11164 * But because we store attachments on a per-job manager basis, we have
11165 * to create the new attachment in the actual target job manager, hence
11166 * why we change the target only after we've created the attachment.
11167 */
11168 if (strcmp(target->name, VPROCMGR_SESSION_AQUA) == 0) {
11169 target = root_jobmgr;
11170 }
11171
11172 job_t existing = job_find(target, name);
11173 if (existing && existing->p) {
11174 job_log(existing, LOG_DEBUG, "Found existing instance of service.");
11175 xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_PID, existing->p);
11176 } else {
11177 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11178 }
11179 } else if (type == XPC_SERVICE_TYPE_BUNDLED) {
11180 (void)job_assumes(j, waiting4attach_new(target, name, port, pid, type));
11181 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11182 } else {
11183 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, EXSRCH);
11184 }
11185
11186 *reply = reply2;
11187 return 0;
11188 }
11189
11190 static int
11191 xpc_process_detach(job_t j, xpc_object_t request, xpc_object_t *reply __unused)
11192 {
11193 if (!j) {
11194 return EINVAL;
11195 }
11196
11197 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11198 if (!name) {
11199 return EXINVAL;
11200 }
11201
11202 xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11203 if (!type) {
11204 return EXINVAL;
11205 }
11206
11207 job_log(j, LOG_DEBUG, "Deatching from service: %s", name);
11208
11209 pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_PID);
11210 jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11211 if (target) {
11212 jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11213
11214 struct waiting4attach *w4ai = NULL;
11215 struct waiting4attach *w4ait = NULL;
11216 LIST_FOREACH_SAFE(w4ai, &target->attaches, le, w4ait) {
11217 if (strcmp(name, w4ai->name) == 0) {
11218 jobmgr_log(target, LOG_DEBUG, "Found attachment. Deleting.");
11219 waiting4attach_delete(target, w4ai);
11220 break;
11221 }
11222 }
11223 }
11224
11225 return 0;
11226 }
11227
11228 static int
11229 xpc_process_get_properties(job_t j, xpc_object_t request, xpc_object_t *reply)
11230 {
11231 if (j->anonymous) {
11232 /* Total hack. libxpc will send requests to the pipe created out of the
11233 * process' bootstrap port, so when job_mig_intran() tries to resolve
11234 * the process into a job, it'll wind up creating an anonymous job if
11235 * the requestor was an XPC service, whose job manager is an XPC domain.
11236 */
11237 pid_t pid = j->p;
11238 jobmgr_t jmi = NULL;
11239 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11240 if ((j = jobmgr_find_by_pid(jmi, pid, false))) {
11241 break;
11242 }
11243 }
11244 }
11245
11246 if (!j || j->anonymous) {
11247 return EXINVAL;
11248 }
11249
11250 struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
11251 if (!w4a) {
11252 return EXINVAL;
11253 }
11254
11255 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11256 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_TYPE, w4a->type);
11257 xpc_dictionary_set_mach_send(reply2, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT, w4a->port);
11258 if (j->prog) {
11259 xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->prog);
11260 } else {
11261 xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->argv[0]);
11262 }
11263
11264 if (j->argv) {
11265 xpc_object_t xargv = xpc_array_create(NULL, 0);
11266
11267 size_t i = 0;
11268 for (i = 0; i < j->argc; i++) {
11269 if (j->argv[i]) {
11270 xpc_array_set_string(xargv, XPC_ARRAY_APPEND, j->argv[i]);
11271 }
11272 }
11273
11274 xpc_dictionary_set_value(reply2, XPC_PROCESS_ROUTINE_KEY_ARGV, xargv);
11275 xpc_release(xargv);
11276 }
11277
11278 *reply = reply2;
11279 return 0;
11280 }
11281
11282 static int
11283 xpc_process_service_kill(job_t j, xpc_object_t request, xpc_object_t *reply)
11284 {
11285 #if XPC_LPI_VERSION >= 20130426
11286 if (!j) {
11287 return ESRCH;
11288 }
11289
11290 jobmgr_t jm = _xpc_process_find_target_manager(j, XPC_SERVICE_TYPE_BUNDLED, j->p);
11291 if (!jm) {
11292 return ENOENT;
11293 }
11294
11295 const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11296 if (!name) {
11297 return EINVAL;
11298 }
11299
11300 int64_t whichsig = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_SIGNAL);
11301 if (!whichsig) {
11302 return EINVAL;
11303 }
11304
11305 job_t j2kill = job_find(jm, name);
11306 if (!j2kill) {
11307 return ESRCH;
11308 }
11309
11310 if (j2kill->alias) {
11311 // Only allow for private instances to be killed.
11312 return EPERM;
11313 }
11314
11315 struct proc_bsdshortinfo proc;
11316 if (proc_pidinfo(j2kill->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
11317 if (errno != ESRCH) {
11318 (void)jobmgr_assumes_zero(root_jobmgr, errno);
11319 }
11320
11321 return errno;
11322 }
11323
11324 struct ldcred *ldc = runtime_get_caller_creds();
11325 if (proc.pbsi_uid != ldc->euid) {
11326 // Do not allow non-root to kill RoleAccount services running as a
11327 // different user.
11328 return EPERM;
11329 }
11330
11331 if (!j2kill->p) {
11332 return EALREADY;
11333 }
11334
11335 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11336 if (!reply2) {
11337 return EINVAL;
11338 }
11339
11340 int error = 0;
11341 int ret = kill(j2kill->p, whichsig);
11342 if (ret) {
11343 error = errno;
11344 }
11345
11346 xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11347 *reply = reply2;
11348 return 0;
11349 #else
11350 return ENOTSUP;
11351 #endif
11352 }
11353
11354 bool
11355 xpc_process_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
11356 {
11357 uint64_t op = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_OP);
11358 if (!op) {
11359 return false;
11360 }
11361
11362 audit_token_t token;
11363 xpc_dictionary_get_audit_token(request, &token);
11364 runtime_record_caller_creds(&token);
11365
11366 job_t j = job_mig_intran(p);
11367 job_log(j, LOG_DEBUG, "Incoming XPC process request: %llu", op);
11368
11369 int error = -1;
11370 switch (op) {
11371 case XPC_PROCESS_JETSAM_SET_BAND:
11372 error = xpc_process_set_jetsam_band(j, request, reply);
11373 break;
11374 case XPC_PROCESS_JETSAM_SET_MEMORY_LIMIT:
11375 error = xpc_process_set_jetsam_memory_limit(j, request, reply);
11376 break;
11377 case XPC_PROCESS_SERVICE_ATTACH:
11378 error = xpc_process_attach(j, request, reply);
11379 break;
11380 case XPC_PROCESS_SERVICE_DETACH:
11381 error = xpc_process_detach(j, request, reply);
11382 break;
11383 case XPC_PROCESS_SERVICE_GET_PROPERTIES:
11384 error = xpc_process_get_properties(j, request, reply);
11385 break;
11386 case XPC_PROCESS_SERVICE_KILL:
11387 error = xpc_process_service_kill(j, request, reply);
11388 break;
11389 default:
11390 job_log(j, LOG_ERR, "Bogus process opcode.");
11391 error = EDOM;
11392 }
11393
11394 if (error) {
11395 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11396 if (reply2) {
11397 xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11398 }
11399
11400 *reply = reply2;
11401 }
11402
11403 return true;
11404 }
11405
11406 kern_return_t
11407 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
11408 {
11409 struct ldcred *ldc = runtime_get_caller_creds();
11410 job_t otherj;
11411
11412 if (!j) {
11413 return BOOTSTRAP_NO_MEMORY;
11414 }
11415
11416 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
11417 return BOOTSTRAP_UNKNOWN_SERVICE;
11418 }
11419
11420 #if TARGET_OS_EMBEDDED
11421 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
11422 #else
11423 bool allow_non_root_kickstart = false;
11424 #endif
11425
11426 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
11427 return BOOTSTRAP_NOT_PRIVILEGED;
11428 }
11429
11430 #if HAVE_SANDBOX
11431 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11432 return BOOTSTRAP_NOT_PRIVILEGED;
11433 }
11434 #endif
11435
11436 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
11437 return BOOTSTRAP_SERVICE_ACTIVE;
11438 }
11439
11440 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
11441 otherj = job_dispatch(otherj, true);
11442
11443 if (!job_assumes(j, otherj && otherj->p)) {
11444 // <rdar://problem/6787083> Clear this flag if we failed to start the job.
11445 otherj->stall_before_exec = false;
11446 return BOOTSTRAP_NO_MEMORY;
11447 }
11448
11449 *out_pid = otherj->p;
11450
11451 return 0;
11452 }
11453
11454 kern_return_t
11455 job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
11456 {
11457 launch_data_t jobdata = NULL;
11458 size_t data_offset = 0;
11459 struct ldcred *ldc = runtime_get_caller_creds();
11460 job_t jr;
11461
11462 if (!j) {
11463 return BOOTSTRAP_NO_MEMORY;
11464 }
11465
11466 if (unlikely(j->deny_job_creation)) {
11467 return BOOTSTRAP_NOT_PRIVILEGED;
11468 }
11469
11470 #if HAVE_SANDBOX
11471 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11472 return BOOTSTRAP_NOT_PRIVILEGED;
11473 }
11474 #endif
11475
11476 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
11477 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
11478 return VPROC_ERR_TRY_PER_USER;
11479 }
11480
11481 if (!job_assumes(j, indataCnt != 0)) {
11482 return 1;
11483 }
11484
11485 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
11486 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
11487 return 1;
11488 }
11489
11490 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
11491 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
11492 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
11493 return 1;
11494 }
11495
11496 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
11497
11498 launch_data_t label = NULL;
11499 launch_data_t wait4debugger = NULL;
11500 if (!jr) {
11501 switch (errno) {
11502 case EEXIST:
11503 /* If EEXIST was returned, we know that there is a label string in
11504 * the dictionary. So we don't need to check the types here; that
11505 * has already been done.
11506 */
11507 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
11508 jr = job_find(NULL, launch_data_get_string(label));
11509 if (job_assumes(j, jr != NULL) && !jr->p) {
11510 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
11511 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
11512 if (launch_data_get_bool(wait4debugger)) {
11513 /* If the job exists, we're going to kick-start it, but
11514 * we need to give the caller the opportunity to start
11515 * it suspended if it so desires. But this will only
11516 * take effect if the job isn't running.
11517 */
11518 jr->wait4debugger_oneshot = true;
11519 }
11520 }
11521 }
11522
11523 *outj = jr;
11524 return BOOTSTRAP_NAME_IN_USE;
11525 default:
11526 return BOOTSTRAP_NO_MEMORY;
11527 }
11528 }
11529
11530 if (pid1_magic) {
11531 jr->mach_uid = ldc->uid;
11532 }
11533
11534 // TODO: Consolidate the app and legacy_LS_job bits.
11535 jr->legacy_LS_job = true;
11536 jr->abandon_pg = true;
11537 jr->asport = asport;
11538 jr->app = true;
11539 uuid_clear(jr->expected_audit_uuid);
11540 jr = job_dispatch(jr, true);
11541
11542 if (!job_assumes(j, jr != NULL)) {
11543 job_remove(jr);
11544 return BOOTSTRAP_NO_MEMORY;
11545 }
11546
11547 if (!job_assumes(jr, jr->p)) {
11548 job_remove(jr);
11549 return BOOTSTRAP_NO_MEMORY;
11550 }
11551
11552 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
11553 *outj = jr;
11554
11555 return BOOTSTRAP_SUCCESS;
11556 }
11557
11558 kern_return_t
11559 job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
11560 {
11561 job_t nj = NULL;
11562 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
11563 if (likely(kr == KERN_SUCCESS)) {
11564 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
11565 job_remove(nj);
11566 kr = BOOTSTRAP_NO_MEMORY;
11567 } else {
11568 /* Do not return until the job has called exec(3), thereby making it
11569 * safe for the caller to send it SIGCONT.
11570 *
11571 * <rdar://problem/9042798>
11572 */
11573 nj->spawn_reply_port = rp;
11574 kr = MIG_NO_REPLY;
11575 }
11576 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
11577 bool was_running = nj->p;
11578 if (job_dispatch(nj, true)) {
11579 if (!was_running) {
11580 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
11581
11582 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
11583 nj->spawn_reply_port = rp;
11584 kr = MIG_NO_REPLY;
11585 } else {
11586 kr = BOOTSTRAP_NO_MEMORY;
11587 }
11588 } else {
11589 *obsvr_port = MACH_PORT_NULL;
11590 *child_pid = nj->p;
11591 kr = KERN_SUCCESS;
11592 }
11593 } else {
11594 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
11595 kr = BOOTSTRAP_UNKNOWN_SERVICE;
11596 }
11597 }
11598
11599 mig_deallocate(indata, indataCnt);
11600 return kr;
11601 }
11602
11603 launch_data_t
11604 job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport __attribute__((unused)))
11605 {
11606 launch_data_t reply = NULL;
11607
11608 errno = ENOTSUP;
11609 if (launch_data_get_type(request) == LAUNCH_DATA_STRING) {
11610 if (strcmp(launch_data_get_string(request), LAUNCH_KEY_CHECKIN) == 0) {
11611 reply = job_export(j);
11612 job_checkin(j);
11613 }
11614 }
11615
11616 return reply;
11617 }
11618
11619 #define LAUNCHD_MAX_LEGACY_FDS 128
11620 #define countof(x) (sizeof((x)) / sizeof((x[0])))
11621
11622 kern_return_t
11623 job_mig_legacy_ipc_request(job_t j, vm_offset_t request,
11624 mach_msg_type_number_t requestCnt, mach_port_array_t request_fds,
11625 mach_msg_type_number_t request_fdsCnt, vm_offset_t *reply,
11626 mach_msg_type_number_t *replyCnt, mach_port_array_t *reply_fdps,
11627 mach_msg_type_number_t *reply_fdsCnt, mach_port_t asport)
11628 {
11629 if (!j) {
11630 return BOOTSTRAP_NO_MEMORY;
11631 }
11632
11633 /* TODO: Once we support actions other than checking in, we must check the
11634 * sandbox capabilities and EUID of the requestort.
11635 */
11636 size_t nout_fdps = 0;
11637 size_t nfds = request_fdsCnt / sizeof(request_fds[0]);
11638 if (nfds > LAUNCHD_MAX_LEGACY_FDS) {
11639 job_log(j, LOG_ERR, "Too many incoming descriptors: %lu", nfds);
11640 return BOOTSTRAP_NO_MEMORY;
11641 }
11642
11643 int in_fds[LAUNCHD_MAX_LEGACY_FDS];
11644 size_t i = 0;
11645 for (i = 0; i < nfds; i++) {
11646 in_fds[i] = fileport_makefd(request_fds[i]);
11647 if (in_fds[i] == -1) {
11648 job_log(j, LOG_ERR, "Bad descriptor passed in legacy IPC request at index: %lu", i);
11649 }
11650 }
11651
11652 // DON'T goto outbad before this point.
11653 *reply = 0;
11654 *reply_fdps = NULL;
11655 launch_data_t ldreply = NULL;
11656
11657 size_t dataoff = 0;
11658 size_t fdoff = 0;
11659 launch_data_t ldrequest = launch_data_unpack((void *)request, requestCnt, in_fds, nfds, &dataoff, &fdoff);
11660 if (!ldrequest) {
11661 job_log(j, LOG_ERR, "Invalid legacy IPC request passed.");
11662 goto out_bad;
11663 }
11664
11665 ldreply = job_do_legacy_ipc_request(j, ldrequest, asport);
11666 if (!ldreply) {
11667 ldreply = launch_data_new_errno(errno);
11668 if (!ldreply) {
11669 goto out_bad;
11670 }
11671 }
11672
11673 *replyCnt = 10 * 1024 * 1024;
11674 mig_allocate(reply, *replyCnt);
11675 if (!*reply) {
11676 goto out_bad;
11677 }
11678
11679 int out_fds[LAUNCHD_MAX_LEGACY_FDS];
11680 size_t nout_fds = 0;
11681 size_t sz = launch_data_pack(ldreply, (void *)*reply, *replyCnt, out_fds, &nout_fds);
11682 if (!sz) {
11683 job_log(j, LOG_ERR, "Could not pack legacy IPC reply.");
11684 goto out_bad;
11685 }
11686
11687 if (nout_fds) {
11688 if (nout_fds > 128) {
11689 job_log(j, LOG_ERR, "Too many outgoing descriptors: %lu", nout_fds);
11690 goto out_bad;
11691 }
11692
11693 *reply_fdsCnt = nout_fds * sizeof((*reply_fdps)[0]);
11694 mig_allocate((vm_address_t *)reply_fdps, *reply_fdsCnt);
11695 if (!*reply_fdps) {
11696 goto out_bad;
11697 }
11698
11699 for (i = 0; i < nout_fds; i++) {
11700 mach_port_t fp = MACH_PORT_NULL;
11701 /* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
11702 * deal. Note, these get stuffed into an array whose disposition is
11703 * mach_port_move_send_t, so we don't have to worry about them after
11704 * returning.
11705 */
11706 if (fileport_makeport(out_fds[i], &fp) != 0) {
11707 job_log(j, LOG_ERR, "Could not pack response descriptor at index: %lu: %d: %s", i, errno, strerror(errno));
11708 }
11709 (*reply_fdps)[i] = fp;
11710 }
11711
11712 nout_fdps = nout_fds;
11713 } else {
11714 *reply_fdsCnt = 0;
11715 }
11716
11717 mig_deallocate(request, requestCnt);
11718 launch_data_free(ldreply);
11719 ldreply = NULL;
11720
11721 // Unused for now.
11722 (void)launchd_mport_deallocate(asport);
11723
11724 return BOOTSTRAP_SUCCESS;
11725
11726 out_bad:
11727 for (i = 0; i < nfds; i++) {
11728 (void)close(in_fds[i]);
11729 }
11730
11731 for (i = 0; i < nout_fds; i++) {
11732 (void)launchd_mport_deallocate((*reply_fdps)[i]);
11733 }
11734
11735 if (*reply) {
11736 mig_deallocate(*reply, *replyCnt);
11737 }
11738
11739 /* We should never hit this since the last goto out is in the case that
11740 * allocating this fails.
11741 */
11742 if (*reply_fdps) {
11743 mig_deallocate((vm_address_t)*reply_fdps, *reply_fdsCnt);
11744 }
11745
11746 if (ldreply) {
11747 launch_data_free(ldreply);
11748 }
11749
11750 return BOOTSTRAP_NO_MEMORY;
11751 }
11752
11753 void
11754 jobmgr_init(bool sflag)
11755 {
11756 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
11757 SLIST_INIT(&s_curious_jobs);
11758 LIST_INIT(&s_needing_sessions);
11759
11760 os_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
11761 os_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
11762 _s_xpc_system_domain->req_asid = launchd_audit_session;
11763 _s_xpc_system_domain->req_asport = launchd_audit_port;
11764 _s_xpc_system_domain->shortdesc = "system";
11765 if (pid1_magic) {
11766 root_jobmgr->monitor_shutdown = true;
11767 }
11768
11769 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
11770 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
11771 if (likely(s_no_hang_fd == -1)) {
11772 if (jobmgr_assumes_zero_p(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK))) != -1) {
11773 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr));
11774 }
11775 }
11776 s_no_hang_fd = _fd(s_no_hang_fd);
11777 }
11778
11779 size_t
11780 our_strhash(const char *s)
11781 {
11782 size_t c, r = 5381;
11783
11784 /* djb2
11785 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
11786 */
11787
11788 while ((c = *s++)) {
11789 r = ((r << 5) + r) + c; // hash*33 + c
11790 }
11791
11792 return r;
11793 }
11794
11795 size_t
11796 hash_label(const char *label)
11797 {
11798 return our_strhash(label) % LABEL_HASH_SIZE;
11799 }
11800
11801 size_t
11802 hash_ms(const char *msstr)
11803 {
11804 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
11805 }
11806
11807 bool
11808 waiting4removal_new(job_t j, mach_port_t rp)
11809 {
11810 struct waiting_for_removal *w4r;
11811
11812 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
11813 return false;
11814 }
11815
11816 w4r->reply_port = rp;
11817
11818 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
11819
11820 return true;
11821 }
11822
11823 void
11824 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
11825 {
11826 (void)job_assumes_zero(j, job_mig_send_signal_reply(w4r->reply_port, 0));
11827
11828 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
11829
11830 free(w4r);
11831 }
11832
11833 size_t
11834 get_kern_max_proc(void)
11835 {
11836 int mib[] = { CTL_KERN, KERN_MAXPROC };
11837 int max = 100;
11838 size_t max_sz = sizeof(max);
11839
11840 (void)posix_assumes_zero(sysctl(mib, 2, &max, &max_sz, NULL, 0));
11841
11842 return max;
11843 }
11844
11845 // See rdar://problem/6271234
11846 void
11847 eliminate_double_reboot(void)
11848 {
11849 if (unlikely(!pid1_magic)) {
11850 return;
11851 }
11852
11853 struct stat sb;
11854 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
11855 int result = -1;
11856
11857 if (unlikely(stat(argv[1], &sb) != -1)) {
11858 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
11859
11860 pid_t p = 0;
11861 result = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ);
11862 if (result == -1) {
11863 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script: %d: %s", result, strerror(result));
11864 goto out;
11865 }
11866
11867 int wstatus = 0;
11868 result = waitpid(p, &wstatus, 0);
11869 if (result == -1) {
11870 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to reap deferred install script: %d: %s", errno, strerror(errno));
11871 goto out;
11872 }
11873
11874 if (WIFEXITED(wstatus)) {
11875 if ((result = WEXITSTATUS(wstatus)) == 0) {
11876 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
11877 } else {
11878 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus));
11879 }
11880 } else {
11881 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Weirdness with install script: %d", wstatus);
11882 }
11883 }
11884 out:
11885 if (result == 0) {
11886 /* If the unlink(2) was to fail, it would be most likely fail with
11887 * EBUSY. All the other failure cases for unlink(2) don't apply when
11888 * we're running under PID 1 and have verified that the file exists.
11889 * Outside of someone deliberately messing with us (like if
11890 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
11891 * point for a filesystem) and I/O errors, we should be good.
11892 */
11893 if (unlink(argv[1]) == -1) {
11894 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to remove deferred install script: %d: %s", errno, strerror(errno));
11895 }
11896 }
11897 }
11898
11899 void
11900 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
11901 {
11902 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
11903 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11904 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
11905
11906 #if XPC_LPI_VERSION >= 20120810
11907 if (j->jetsam_priority > XPC_JETSAM_PRIORITY_RESERVED && j->jetsam_priority < XPC_JETSAM_PRIORITY_RESERVED + XPC_JETSAM_BAND_LAST) {
11908 size_t band = j->jetsam_priority - XPC_JETSAM_PRIORITY_RESERVED;
11909 j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11910 }
11911 #endif
11912 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
11913 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11914 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
11915 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
11916 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMITBACKGROUND) == 0) {
11917 j->jetsam_memory_limit_background = true;
11918 job_log(j, LOG_DEBUG, "Memory limit is for background state only");
11919 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
11920 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
11921 * You can't set this in a plist.
11922 */
11923 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMACTIVE) == 0) {
11924 // Ignore.
11925 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
11926 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
11927 * complain about it.
11928 */
11929 } else {
11930 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
11931 }
11932
11933 if (unlikely(!j->jetsam_properties)) {
11934 j->jetsam_properties = true;
11935 }
11936 }
11937
11938 void
11939 job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data)
11940 {
11941 #if TARGET_OS_EMBEDDED
11942 j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11943 j->jetsam_properties = true;
11944
11945 memorystatus_priority_properties_t mjp;
11946 mjp.priority = j->jetsam_priority;
11947 mjp.user_data = user_data;
11948
11949 size_t size = sizeof(mjp);
11950 int r = memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, j->p, 0, &mjp, size);
11951 if (r == -1 && errno != ESRCH) {
11952 (void)job_assumes_zero(j, errno);
11953 }
11954 #else
11955 #pragma unused(j, band, user_data)
11956 #endif
11957 }
11958
11959 void
11960 job_update_jetsam_memory_limit(job_t j, int32_t limit)
11961 {
11962 #if TARGET_OS_EMBEDDED
11963 j->jetsam_memlimit = limit;
11964 j->jetsam_properties = true;
11965
11966 int r = memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK, j->p, limit, NULL, 0);
11967 if (r == -1 && errno != ESRCH) {
11968 (void)job_assumes_zero(j, errno);
11969 }
11970 #else
11971 #pragma unused(j, limit)
11972 #endif
11973 }