]> git.saurik.com Git - apple/launchd.git/blob - src/core.c
68b6adef9635219ca8d8d58200be0978b25fc7f7
[apple/launchd.git] / src / core.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 #include "config.h"
20 #include "core.h"
21 #include "internal.h"
22 #include "helper.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/stat.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
43 #include <sys/un.h>
44 #include <sys/reboot.h>
45 #include <sys/wait.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
52 #include <sys/pipe.h>
53 #include <sys/mman.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <sys/kern_memorystatus.h>
57 #include <net/if.h>
58 #include <netinet/in.h>
59 #include <netinet/in_var.h>
60 #include <netinet6/nd6.h>
61 #include <bsm/libbsm.h>
62 #include <unistd.h>
63 #include <signal.h>
64 #include <errno.h>
65 #include <libgen.h>
66 #include <stdio.h>
67 #include <stdlib.h>
68 #include <stdarg.h>
69 #include <stdbool.h>
70 #include <paths.h>
71 #include <pwd.h>
72 #include <grp.h>
73 #include <ttyent.h>
74 #include <dlfcn.h>
75 #include <dirent.h>
76 #include <string.h>
77 #include <ctype.h>
78 #include <glob.h>
79 #include <System/sys/spawn.h>
80 #include <System/sys/spawn_internal.h>
81 #include <spawn.h>
82 #include <spawn_private.h>
83 #include <time.h>
84 #include <libinfo.h>
85 #include <assumes.h>
86 #include <xpc/launchd.h>
87
88 #include <libproc.h>
89 #include <System/sys/proc_info.h>
90 #include <malloc/malloc.h>
91 #include <pthread.h>
92 #include <libproc.h>
93 #if HAVE_SANDBOX
94 #define __APPLE_API_PRIVATE
95 #include <sandbox.h>
96 #endif
97 #if HAVE_QUARANTINE
98 #include <quarantine.h>
99 #endif
100 #if !TARGET_OS_EMBEDDED
101 extern int gL1CacheEnabled;
102 #endif
103
104 #include "launch.h"
105 #include "launch_priv.h"
106 #include "launch_internal.h"
107 #include "bootstrap.h"
108 #include "bootstrap_priv.h"
109 #include "vproc.h"
110 #include "vproc_internal.h"
111
112 #include "reboot2.h"
113
114 #include "launchd.h"
115 #include "runtime.h"
116 #include "ipc.h"
117 #include "job.h"
118 #include "jobServer.h"
119 #include "job_reply.h"
120 #include "job_forward.h"
121 #include "mach_excServer.h"
122
123 #define POSIX_SPAWN_IOS_INTERACTIVE 0
124
125 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
126 * If the job hasn't exited in the given number of seconds after sending
127 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
128 */
129 #define LAUNCHD_MIN_JOB_RUN_TIME 10
130 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
131 #define LAUNCHD_SIGKILL_TIMER 4
132 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
133
134 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
135
136 #define TAKE_SUBSET_NAME "TakeSubsetName"
137 #define TAKE_SUBSET_PID "TakeSubsetPID"
138 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
139
140 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
141
142 extern char **environ;
143
144 struct waiting_for_removal {
145 SLIST_ENTRY(waiting_for_removal) sle;
146 mach_port_t reply_port;
147 };
148
149 static bool waiting4removal_new(job_t j, mach_port_t rp);
150 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
151
152 struct machservice {
153 SLIST_ENTRY(machservice) sle;
154 SLIST_ENTRY(machservice) special_port_sle;
155 LIST_ENTRY(machservice) name_hash_sle;
156 LIST_ENTRY(machservice) port_hash_sle;
157 struct machservice *alias;
158 job_t job;
159 unsigned int gen_num;
160 mach_port_name_t port;
161 unsigned int
162 isActive:1,
163 reset:1,
164 recv:1,
165 hide:1,
166 kUNCServer:1,
167 per_user_hack:1,
168 debug_on_close:1,
169 per_pid:1,
170 delete_on_destruction:1,
171 drain_one_on_crash:1,
172 drain_all_on_crash:1,
173 upfront:1,
174 event_channel:1,
175 /* Don't let the size of this field to get too small. It has to be large
176 * enough to represent the reasonable range of special port numbers.
177 */
178 special_port_num:18;
179 const char name[0];
180 };
181
182 // HACK: This should be per jobmgr_t
183 static SLIST_HEAD(, machservice) special_ports;
184
185 #define PORT_HASH_SIZE 32
186 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
187
188 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
189
190 static void machservice_setup(launch_data_t obj, const char *key, void *context);
191 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
192 static void machservice_resetport(job_t j, struct machservice *ms);
193 static void machservice_stamp_port(job_t j, struct machservice *ms);
194 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
195 static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
196 static void machservice_ignore(job_t j, struct machservice *ms);
197 static void machservice_watch(job_t j, struct machservice *ms);
198 static void machservice_delete(job_t j, struct machservice *, bool port_died);
199 static void machservice_request_notifications(struct machservice *);
200 static mach_port_t machservice_port(struct machservice *);
201 static job_t machservice_job(struct machservice *);
202 static bool machservice_hidden(struct machservice *);
203 static bool machservice_active(struct machservice *);
204 static const char *machservice_name(struct machservice *);
205 static bootstrap_status_t machservice_status(struct machservice *);
206 void machservice_drain_port(struct machservice *);
207
208 struct socketgroup {
209 SLIST_ENTRY(socketgroup) sle;
210 int *fds;
211 unsigned int fd_cnt;
212 union {
213 const char name[0];
214 char name_init[0];
215 };
216 };
217
218 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt);
219 static void socketgroup_delete(job_t j, struct socketgroup *sg);
220 static void socketgroup_watch(job_t j, struct socketgroup *sg);
221 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
222 static void socketgroup_callback(job_t j);
223 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
224 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
225
226 struct calendarinterval {
227 LIST_ENTRY(calendarinterval) global_sle;
228 SLIST_ENTRY(calendarinterval) sle;
229 job_t job;
230 struct tm when;
231 time_t when_next;
232 };
233
234 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
235
236 static bool calendarinterval_new(job_t j, struct tm *w);
237 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
238 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
239 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
240 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
241 static void calendarinterval_callback(void);
242 static void calendarinterval_sanity_check(void);
243
244 struct envitem {
245 SLIST_ENTRY(envitem) sle;
246 char *value;
247 union {
248 const char key[0];
249 char key_init[0];
250 };
251 };
252
253 static bool envitem_new(job_t j, const char *k, const char *v, bool global);
254 static void envitem_delete(job_t j, struct envitem *ei, bool global);
255 static void envitem_setup(launch_data_t obj, const char *key, void *context);
256
257 struct limititem {
258 SLIST_ENTRY(limititem) sle;
259 struct rlimit lim;
260 unsigned int setsoft:1, sethard:1, which:30;
261 };
262
263 static bool limititem_update(job_t j, int w, rlim_t r);
264 static void limititem_delete(job_t j, struct limititem *li);
265 static void limititem_setup(launch_data_t obj, const char *key, void *context);
266 #if HAVE_SANDBOX
267 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
268 #endif
269
270 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
271
272 typedef enum {
273 NETWORK_UP = 1,
274 NETWORK_DOWN,
275 SUCCESSFUL_EXIT,
276 FAILED_EXIT,
277 CRASHED,
278 DID_NOT_CRASH,
279 OTHER_JOB_ENABLED,
280 OTHER_JOB_DISABLED,
281 OTHER_JOB_ACTIVE,
282 OTHER_JOB_INACTIVE,
283 } semaphore_reason_t;
284
285 struct semaphoreitem {
286 SLIST_ENTRY(semaphoreitem) sle;
287 semaphore_reason_t why;
288
289 union {
290 const char what[0];
291 char what_init[0];
292 };
293 };
294
295 struct semaphoreitem_dict_iter_context {
296 job_t j;
297 semaphore_reason_t why_true;
298 semaphore_reason_t why_false;
299 };
300
301 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
302 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
303 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
304 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
305 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
306
307 struct externalevent {
308 LIST_ENTRY(externalevent) sys_le;
309 LIST_ENTRY(externalevent) job_le;
310 struct eventsystem *sys;
311
312 uint64_t id;
313 job_t job;
314 bool state;
315 bool wanted_state;
316 bool internal;
317 xpc_object_t event;
318
319 char name[0];
320 };
321
322 struct externalevent_iter_ctx {
323 job_t j;
324 struct eventsystem *sys;
325 };
326
327 static bool externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event);
328 static void externalevent_delete(struct externalevent *ee);
329 static void externalevent_setup(launch_data_t obj, const char *key, void *context);
330 static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
331
332 struct eventsystem {
333 LIST_ENTRY(eventsystem) global_le;
334 LIST_HEAD(, externalevent) events;
335 uint64_t curid;
336 char name[0];
337 };
338
339 static struct eventsystem *eventsystem_new(const char *name);
340 static void eventsystem_delete(struct eventsystem *sys) __attribute__((unused));
341 static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
342 static struct eventsystem *eventsystem_find(const char *name);
343 static void eventsystem_ping(void);
344
345 #define ACTIVE_JOB_HASH_SIZE 32
346 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
347
348 #define MACHSERVICE_HASH_SIZE 37
349
350 #define LABEL_HASH_SIZE 53
351 struct jobmgr_s {
352 kq_callback kqjobmgr_callback;
353 LIST_ENTRY(jobmgr_s) xpc_le;
354 SLIST_ENTRY(jobmgr_s) sle;
355 SLIST_HEAD(, jobmgr_s) submgrs;
356 LIST_HEAD(, job_s) jobs;
357
358 /* For legacy reasons, we keep all job labels that are imported in the root
359 * job manager's label hash. If a job manager is an XPC domain, then it gets
360 * its own label hash that is separate from the "global" one stored in the
361 * root job manager.
362 */
363 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
364 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
365 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
366 LIST_HEAD(, job_s) global_env_jobs;
367 mach_port_t jm_port;
368 mach_port_t req_port;
369 jobmgr_t parentmgr;
370 int reboot_flags;
371 time_t shutdown_time;
372 unsigned int global_on_demand_cnt;
373 unsigned int normal_active_cnt;
374 unsigned int
375 shutting_down:1,
376 session_initialized:1,
377 killed_stray_jobs:1,
378 monitor_shutdown:1,
379 shutdown_jobs_dirtied:1,
380 shutdown_jobs_cleaned:1,
381 xpc_singleton:1;
382 uint32_t properties;
383 // XPC-specific properties.
384 char owner[MAXCOMLEN];
385 char *shortdesc;
386 mach_port_t req_bsport;
387 mach_port_t req_excport;
388 mach_port_t req_asport;
389 pid_t req_pid;
390 uid_t req_euid;
391 gid_t req_egid;
392 au_asid_t req_asid;
393 vm_offset_t req_ctx;
394 mach_msg_type_number_t req_ctx_sz;
395 mach_port_t req_rport;
396 kern_return_t error;
397 union {
398 const char name[0];
399 char name_init[0];
400 };
401 };
402
403 // Global XPC domains.
404 static jobmgr_t _s_xpc_system_domain;
405 static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
406 static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
407
408 #define jobmgr_assumes(jm, e) osx_assumes_ctx(jobmgr_log_bug, jm, (e))
409 #define jobmgr_assumes_zero(jm, e) osx_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
410 #define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
411
412 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
413 static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
414 static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
415 static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
416 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
417 static jobmgr_t jobmgr_parent(jobmgr_t jm);
418 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
419 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
420 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
421 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
422 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
423 static void jobmgr_remove(jobmgr_t jm);
424 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
425 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
426 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
427 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
428 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
429 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
430 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
431 static void job_export_all2(jobmgr_t jm, launch_data_t where);
432 static void jobmgr_callback(void *obj, struct kevent *kev);
433 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
434 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
435 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
436 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
437 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
438 static void jobmgr_log_perf_statistics(jobmgr_t jm);
439 // static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
440 static bool jobmgr_log_bug(aslmsg asl_message, void *ctx, const char *message);
441
442 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
443 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
444 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
445
446 struct suspended_peruser {
447 LIST_ENTRY(suspended_peruser) sle;
448 job_t j;
449 };
450
451 struct job_s {
452 // MUST be first element of this structure.
453 kq_callback kqjob_callback;
454 LIST_ENTRY(job_s) sle;
455 LIST_ENTRY(job_s) subjob_sle;
456 LIST_ENTRY(job_s) needing_session_sle;
457 LIST_ENTRY(job_s) jetsam_sle;
458 LIST_ENTRY(job_s) pid_hash_sle;
459 LIST_ENTRY(job_s) label_hash_sle;
460 LIST_ENTRY(job_s) global_env_sle;
461 SLIST_ENTRY(job_s) curious_jobs_sle;
462 LIST_HEAD(, suspended_peruser) suspended_perusers;
463 LIST_HEAD(, waiting_for_exit) exit_watchers;
464 LIST_HEAD(, job_s) subjobs;
465 LIST_HEAD(, externalevent) events;
466 SLIST_HEAD(, socketgroup) sockets;
467 SLIST_HEAD(, calendarinterval) cal_intervals;
468 SLIST_HEAD(, envitem) global_env;
469 SLIST_HEAD(, envitem) env;
470 SLIST_HEAD(, limititem) limits;
471 SLIST_HEAD(, machservice) machservices;
472 SLIST_HEAD(, semaphoreitem) semaphores;
473 SLIST_HEAD(, waiting_for_removal) removal_watchers;
474 job_t alias;
475 struct rusage ru;
476 cpu_type_t *j_binpref;
477 size_t j_binpref_cnt;
478 mach_port_t j_port;
479 mach_port_t exit_status_dest;
480 mach_port_t exit_status_port;
481 mach_port_t spawn_reply_port;
482 uid_t mach_uid;
483 jobmgr_t mgr;
484 size_t argc;
485 char **argv;
486 char *prog;
487 char *rootdir;
488 char *workingdir;
489 char *username;
490 char *groupname;
491 char *stdinpath;
492 char *stdoutpath;
493 char *stderrpath;
494 char *alt_exc_handler;
495 unsigned int nruns;
496 uint64_t trt;
497 #if HAVE_SANDBOX
498 char *seatbelt_profile;
499 uint64_t seatbelt_flags;
500 #endif
501 #if HAVE_QUARANTINE
502 void *quarantine_data;
503 size_t quarantine_data_sz;
504 #endif
505 pid_t p;
506 int last_exit_status;
507 int stdin_fd;
508 int fork_fd;
509 int nice;
510 uint32_t pstype;
511 int32_t jetsam_priority;
512 int32_t jetsam_memlimit;
513 int32_t main_thread_priority;
514 uint32_t timeout;
515 uint32_t exit_timeout;
516 uint64_t sent_signal_time;
517 uint64_t start_time;
518 uint32_t min_run_time;
519 uint32_t start_interval;
520 uint32_t peruser_suspend_count;
521 uuid_t instance_id;
522 mode_t mask;
523 pid_t tracing_pid;
524 mach_port_t asport;
525 // Only set for per-user launchd's.
526 au_asid_t asid;
527 uuid_t expected_audit_uuid;
528 bool
529 // man launchd.plist --> Debug
530 debug:1,
531 // man launchd.plist --> KeepAlive == false
532 ondemand:1,
533 // man launchd.plist --> SessionCreate
534 session_create:1,
535 // man launchd.plist --> LowPriorityIO
536 low_pri_io:1,
537 // man launchd.plist --> InitGroups
538 no_init_groups:1,
539 /* A legacy mach_init concept to make bootstrap_create_server/service()
540 * work
541 */
542 priv_port_has_senders:1,
543 // A hack during job importing
544 importing_global_env:1,
545 // A hack during job importing
546 importing_hard_limits:1,
547 // man launchd.plist --> Umask
548 setmask:1,
549 // A process that launchd knows about but doesn't manage.
550 anonymous:1,
551 // A legacy mach_init concept to detect sick jobs
552 checkedin:1,
553 // A job created via bootstrap_create_server()
554 legacy_mach_job:1,
555 // A job created via spawn_via_launchd()
556 legacy_LS_job:1,
557 // A legacy job that wants inetd compatible semantics
558 inetcompat:1,
559 // A twist on inetd compatibility
560 inetcompat_wait:1,
561 /* An event fired and the job should start, but not necessarily right
562 * away.
563 */
564 start_pending:1,
565 // man launchd.plist --> EnableGlobbing
566 globargv:1,
567 // man launchd.plist --> WaitForDebugger
568 wait4debugger:1,
569 // One-shot WaitForDebugger.
570 wait4debugger_oneshot:1,
571 // MachExceptionHandler == true
572 internal_exc_handler:1,
573 // A hack to support an option of spawn_via_launchd()
574 stall_before_exec:1,
575 /* man launchd.plist --> LaunchOnlyOnce.
576 *
577 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
578 */
579 only_once:1,
580 /* Make job_ignore() / job_watch() work. If these calls were balanced,
581 * then this wouldn't be necessarily.
582 */
583 currently_ignored:1,
584 /* A job that forced all other jobs to be temporarily launch-on-
585 * demand
586 */
587 forced_peers_to_demand_mode:1,
588 // man launchd.plist --> Nice
589 setnice:1,
590 /* A job was asked to be unloaded/removed while running, we'll remove it
591 * after it exits.
592 */
593 removal_pending:1,
594 // job_kill() was called.
595 sent_sigkill:1,
596 // Enter the kernel debugger before killing a job.
597 debug_before_kill:1,
598 // A hack that launchd+launchctl use during jobmgr_t creation.
599 weird_bootstrap:1,
600 // man launchd.plist --> StartOnMount
601 start_on_mount:1,
602 // This job is a per-user launchd managed by the PID 1 launchd.
603 per_user:1,
604 // A job thoroughly confused launchd. We need to unload it ASAP.
605 unload_at_mig_return:1,
606 // man launchd.plist --> AbandonProcessGroup
607 abandon_pg:1,
608 /* During shutdown, do not send SIGTERM to stray processes in the
609 * process group of this job.
610 */
611 ignore_pg_at_shutdown:1,
612 /* Don't let this job create new 'job_t' objects in launchd. Has been
613 * seriously overloaded for the purposes of sandboxing.
614 */
615 deny_job_creation:1,
616 // man launchd.plist --> EnableTransactions
617 enable_transactions:1,
618 // The job was sent SIGKILL because it was clean.
619 clean_kill:1,
620 /* The job has a tracing PID (probably a debugger) and exited before the
621 * tracer did. So we must defer our reap attempt until after the tracer
622 * has exited. This works around our busted ptrace(3) implementation.
623 */
624 reap_after_trace:1,
625 // The job has an OtherJobEnabled KeepAlive criterion.
626 nosy:1,
627 // The job exited due to a crash.
628 crashed:1,
629 // We've received NOTE_EXIT for the job and reaped it.
630 reaped:1,
631 // job_stop() was called.
632 stopped:1,
633 // The job is considered "frontmost" by Jetsam.
634 jetsam_frontmost:1,
635 /* The job is not frontmost, but it is considered "active" (i.e.
636 * backgrounded) by Jetsam.
637 */
638 jetsam_active:1,
639 /* The job is to be kept alive continuously, but it must first get an
640 * initial kick off.
641 */
642 needs_kickoff:1,
643 // The job is a bootstrapper.
644 is_bootstrapper:1,
645 // The job owns the console.
646 has_console:1,
647 /* The job runs as a non-root user on embedded but has select privileges
648 * of the root user. This is SpringBoard.
649 */
650 embedded_god:1,
651 // We got NOTE_EXEC for the job.
652 did_exec:1,
653 // The job is an XPC service, and XPC proxy successfully exec(3)ed.
654 xpcproxy_did_exec:1,
655 // The (anonymous) job called vprocmgr_switch_to_session().
656 holds_ref:1,
657 // The job has Jetsam limits in place.
658 jetsam_properties:1,
659 /* This job was created as the result of a look up of a service provided
660 * by a MultipleInstance job.
661 */
662 dedicated_instance:1,
663 // The job supports creating additional instances of itself.
664 multiple_instances:1,
665 /* The sub-job was already removed from the parent's list of
666 * sub-jobs.
667 */
668 former_subjob:1,
669 /* The job is responsible for monitoring external events for this
670 * launchd.
671 */
672 event_monitor:1,
673 // The event monitor job has retrieved the initial list of events.
674 event_monitor_ready2signal:1,
675 // A lame hack.
676 removing:1,
677 // Disable ASLR when launching this job.
678 disable_aslr:1,
679 // The job is an XPC Service.
680 xpc_service:1,
681 // The job is the Performance team's shutdown monitor.
682 shutdown_monitor:1,
683 // We should open a transaction for the job when shutdown begins.
684 dirty_at_shutdown:1,
685 /* The job was sent SIGKILL but did not exit in a timely fashion,
686 * indicating a kernel bug.
687 */
688 workaround9359725:1,
689 // The job is the XPC domain bootstrapper.
690 xpc_bootstrapper:1,
691 // The job is an app (on either iOS or OS X) and has different resource
692 // limitations.
693 app:1,
694 // The job failed to exec(3) for reasons that may be transient, so we're
695 // waiting for UserEventAgent to tell us when it's okay to try spawning
696 // again (i.e. when the executable path appears, when the UID appears,
697 // etc.).
698 waiting4ok:1;
699
700 const char label[0];
701 };
702
703 static size_t hash_label(const char *label) __attribute__((pure));
704 static size_t hash_ms(const char *msstr) __attribute__((pure));
705 static SLIST_HEAD(, job_s) s_curious_jobs;
706
707 #define job_assumes(j, e) osx_assumes_ctx(job_log_bug, j, (e))
708 #define job_assumes_zero(j, e) osx_assumes_zero_ctx(job_log_bug, j, (e))
709 #define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
710
711 static void job_import_keys(launch_data_t obj, const char *key, void *context);
712 static void job_import_bool(job_t j, const char *key, bool value);
713 static void job_import_string(job_t j, const char *key, const char *value);
714 static void job_import_integer(job_t j, const char *key, long long value);
715 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
716 static void job_import_array(job_t j, const char *key, launch_data_t value);
717 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
718 static bool job_set_global_on_demand(job_t j, bool val);
719 static const char *job_active(job_t j);
720 static void job_watch(job_t j);
721 static void job_ignore(job_t j);
722 static void job_cleanup_after_tracer(job_t j);
723 static void job_reap(job_t j);
724 static bool job_useless(job_t j);
725 static bool job_keepalive(job_t j);
726 static void job_dispatch_curious_jobs(job_t j);
727 static void job_start(job_t j);
728 static void job_start_child(job_t j) __attribute__((noreturn));
729 static void job_setup_attributes(job_t j);
730 static bool job_setup_machport(job_t j);
731 static kern_return_t job_setup_exit_port(job_t j);
732 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
733 static void job_postfork_become_user(job_t j);
734 static void job_postfork_test_user(job_t j);
735 static void job_log_pids_with_weird_uids(job_t j);
736 static void job_setup_exception_port(job_t j, task_t target_task);
737 static void job_callback(void *obj, struct kevent *kev);
738 static void job_callback_proc(job_t j, struct kevent *kev);
739 static void job_callback_timer(job_t j, void *ident);
740 static void job_callback_read(job_t j, int ident);
741 static void job_log_stray_pg(job_t j);
742 static void job_log_children_without_exec(job_t j);
743 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
744 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
745 static job_t job_new_alias(jobmgr_t jm, job_t src);
746 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
747 static job_t job_new_subjob(job_t j, uuid_t identifier);
748 static void job_kill(job_t j);
749 static void job_uncork_fork(job_t j);
750 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
751 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
752 static bool job_log_bug(aslmsg asl_message, void *ctx, const char *message);
753 static void job_log_perf_statistics(job_t j);
754 static void job_set_exception_port(job_t j, mach_port_t port);
755 static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
756 static void job_open_shutdown_transaction(job_t ji);
757 static void job_close_shutdown_transaction(job_t ji);
758 static launch_data_t job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport);
759 static void job_setup_per_user_directory(job_t j, uid_t uid, const char *path);
760 static void job_setup_per_user_directories(job_t j, uid_t uid, const char *label);
761
762 static const struct {
763 const char *key;
764 int val;
765 } launchd_keys2limits[] = {
766 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
767 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
768 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
769 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
770 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
771 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
772 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
773 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
774 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
775 };
776
777 static time_t cronemu(int mon, int mday, int hour, int min);
778 static time_t cronemu_wday(int wday, int hour, int min);
779 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
780 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
781 static bool cronemu_hour(struct tm *wtm, int hour, int min);
782 static bool cronemu_min(struct tm *wtm, int min);
783
784 // miscellaneous file local functions
785 static size_t get_kern_max_proc(void);
786 static char **mach_cmd2argv(const char *string);
787 static size_t our_strhash(const char *s) __attribute__((pure));
788
789 void eliminate_double_reboot(void);
790
791 #pragma mark XPC Domain Forward Declarations
792 static job_t _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
793 static int _xpc_domain_import_services(job_t j, launch_data_t services);
794
795 #pragma mark XPC Event Forward Declarations
796 static int xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms);
797 static int xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply);
798 static int xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply);
799 static int xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply);
800 static int xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
801 static int xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply);
802 static int xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
803 static int xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply);
804
805 // file local globals
806 static job_t _launchd_embedded_god = NULL;
807 static size_t total_children;
808 static size_t total_anon_children;
809 static mach_port_t the_exception_server;
810 static job_t workaround_5477111;
811 static LIST_HEAD(, job_s) s_needing_sessions;
812 static LIST_HEAD(, eventsystem) _s_event_systems;
813 static struct eventsystem *_launchd_support_system;
814 static job_t _launchd_event_monitor;
815 static job_t _launchd_xpc_bootstrapper;
816 static job_t _launchd_shutdown_monitor;
817
818 mach_port_t launchd_audit_port = MACH_PORT_NULL;
819 #if !TARGET_OS_EMBEDDED
820 au_asid_t launchd_audit_session = AU_DEFAUDITSID;
821 #else
822 pid_t launchd_audit_session = 0;
823 #endif
824
825 static int s_no_hang_fd = -1;
826
827 // process wide globals
828 mach_port_t inherited_bootstrap_port;
829 jobmgr_t root_jobmgr;
830 bool launchd_shutdown_debugging = false;
831 bool launchd_verbose_boot = false;
832 bool launchd_embedded_handofgod = false;
833 bool launchd_runtime_busy_time = false;
834
835 void
836 job_ignore(job_t j)
837 {
838 struct socketgroup *sg;
839 struct machservice *ms;
840
841 if (j->currently_ignored) {
842 return;
843 }
844
845 job_log(j, LOG_DEBUG, "Ignoring...");
846
847 j->currently_ignored = true;
848
849 SLIST_FOREACH(sg, &j->sockets, sle) {
850 socketgroup_ignore(j, sg);
851 }
852
853 SLIST_FOREACH(ms, &j->machservices, sle) {
854 machservice_ignore(j, ms);
855 }
856 }
857
858 void
859 job_watch(job_t j)
860 {
861 struct socketgroup *sg;
862 struct machservice *ms;
863
864 if (!j->currently_ignored) {
865 return;
866 }
867
868 job_log(j, LOG_DEBUG, "Watching...");
869
870 j->currently_ignored = false;
871
872 SLIST_FOREACH(sg, &j->sockets, sle) {
873 socketgroup_watch(j, sg);
874 }
875
876 SLIST_FOREACH(ms, &j->machservices, sle) {
877 machservice_watch(j, ms);
878 }
879 }
880
881 void
882 job_stop(job_t j)
883 {
884 int sig;
885
886 if (unlikely(!j->p || j->stopped || j->anonymous)) {
887 return;
888 }
889
890 #if TARGET_OS_EMBEDDED
891 if (launchd_embedded_handofgod && _launchd_embedded_god) {
892 if (!_launchd_embedded_god->username || !j->username) {
893 errno = EPERM;
894 return;
895 }
896
897 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
898 errno = EPERM;
899 return;
900 }
901 } else if (launchd_embedded_handofgod) {
902 errno = EINVAL;
903 return;
904 }
905 #endif
906
907 j->sent_signal_time = runtime_get_opaque_time();
908
909 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Stopping job...");
910
911 int error = -1;
912 error = proc_terminate(j->p, &sig);
913 if (error) {
914 job_log(j, LOG_ERR | LOG_CONSOLE, "Could not terminate job: %d: %s", error, strerror(error));
915 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Using fallback option to terminate job...");
916 error = kill2(j->p, SIGTERM);
917 if (error) {
918 job_log(j, LOG_ERR, "Could not signal job: %d: %s", error, strerror(error));
919 } else {
920 sig = SIGTERM;
921 }
922 }
923
924 if (!error) {
925 switch (sig) {
926 case SIGKILL:
927 j->sent_sigkill = true;
928 j->clean_kill = true;
929 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j);
930 (void)job_assumes_zero_p(j, error);
931
932 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sent job SIGKILL.");
933 break;
934 case SIGTERM:
935 if (j->exit_timeout) {
936 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j);
937 (void)job_assumes_zero_p(j, error);
938 } else {
939 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
940 }
941 job_log(j, LOG_DEBUG, "Sent job SIGTERM.");
942 break;
943 default:
944 job_log(j, LOG_ERR | LOG_CONSOLE, "Job was sent unexpected signal: %d: %s", sig, strsignal(sig));
945 break;
946 }
947 }
948
949 j->stopped = true;
950 }
951
952 launch_data_t
953 job_export(job_t j)
954 {
955 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
956
957 if (r == NULL) {
958 return NULL;
959 }
960
961 if ((tmp = launch_data_new_string(j->label))) {
962 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
963 }
964 if ((tmp = launch_data_new_string(j->mgr->name))) {
965 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
966 }
967 if ((tmp = launch_data_new_bool(j->ondemand))) {
968 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
969 }
970 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
971 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
972 }
973 if (j->p && (tmp = launch_data_new_integer(j->p))) {
974 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
975 }
976 if ((tmp = launch_data_new_integer(j->timeout))) {
977 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
978 }
979 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
980 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
981 }
982 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
983 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
984 }
985 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
986 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
987 }
988 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
989 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
990 }
991 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
992 size_t i;
993
994 for (i = 0; i < j->argc; i++) {
995 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
996 launch_data_array_set_index(tmp, tmp2, i);
997 }
998 }
999
1000 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
1001 }
1002
1003 if (j->enable_transactions && (tmp = launch_data_new_bool(true))) {
1004 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
1005 }
1006
1007 if (j->session_create && (tmp = launch_data_new_bool(true))) {
1008 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
1009 }
1010
1011 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1012 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
1013 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
1014 }
1015 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
1016 }
1017
1018 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1019 struct socketgroup *sg;
1020 unsigned int i;
1021
1022 SLIST_FOREACH(sg, &j->sockets, sle) {
1023 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1024 for (i = 0; i < sg->fd_cnt; i++) {
1025 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1026 launch_data_array_set_index(tmp2, tmp3, i);
1027 }
1028 }
1029 launch_data_dict_insert(tmp, tmp2, sg->name);
1030 }
1031 }
1032
1033 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1034 }
1035
1036 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1037 struct machservice *ms;
1038
1039 tmp3 = NULL;
1040
1041 SLIST_FOREACH(ms, &j->machservices, sle) {
1042 if (ms->per_pid) {
1043 if (tmp3 == NULL) {
1044 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1045 }
1046 if (tmp3) {
1047 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1048 launch_data_dict_insert(tmp3, tmp2, ms->name);
1049 }
1050 } else {
1051 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1052 launch_data_dict_insert(tmp, tmp2, ms->name);
1053 }
1054 }
1055
1056 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1057
1058 if (tmp3) {
1059 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1060 }
1061 }
1062
1063 return r;
1064 }
1065
1066 static void
1067 jobmgr_log_active_jobs(jobmgr_t jm)
1068 {
1069 const char *why_active;
1070 jobmgr_t jmi;
1071 job_t ji;
1072
1073 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1074 jobmgr_log_active_jobs(jmi);
1075 }
1076
1077 int level = LOG_DEBUG;
1078 if (pid1_magic) {
1079 level |= LOG_CONSOLE;
1080 }
1081
1082 LIST_FOREACH(ji, &jm->jobs, sle) {
1083 if ((why_active = job_active(ji))) {
1084 if (ji->p != 1) {
1085 job_log(ji, level, "%s", why_active);
1086
1087 uint32_t flags = 0;
1088 (void)proc_get_dirty(ji->p, &flags);
1089 if (!(flags & PROC_DIRTY_TRACKED)) {
1090 continue;
1091 }
1092
1093 char *dirty = "clean";
1094 if (flags & PROC_DIRTY_IS_DIRTY) {
1095 dirty = "dirty";
1096 }
1097
1098 char *idle_exit = "idle-exit unsupported";
1099 if (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) {
1100 idle_exit = "idle-exit supported";
1101 }
1102
1103 job_log(ji, level, "Killability: %s/%s", dirty, idle_exit);
1104 }
1105 }
1106 }
1107 }
1108
1109 static void
1110 jobmgr_still_alive_with_check(jobmgr_t jm)
1111 {
1112 int level = LOG_DEBUG;
1113 if (pid1_magic) {
1114 level |= LOG_CONSOLE;
1115 }
1116
1117 jobmgr_log(jm, level, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1118 jobmgr_log_active_jobs(jm);
1119 launchd_log_push();
1120 }
1121
1122 jobmgr_t
1123 jobmgr_shutdown(jobmgr_t jm)
1124 {
1125 jobmgr_t jmi, jmn;
1126 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1127
1128 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1129
1130 struct tm curtime;
1131 (void)localtime_r(&jm->shutdown_time, &curtime);
1132
1133 char date[26];
1134 (void)asctime_r(&curtime, date);
1135 // Trim the new line that asctime_r(3) puts there for some reason.
1136 date[24] = 0;
1137
1138 if (jm == root_jobmgr && pid1_magic) {
1139 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1140 } else {
1141 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1142 }
1143
1144 jm->shutting_down = true;
1145
1146 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1147 jobmgr_shutdown(jmi);
1148 }
1149
1150 if (!jm->parentmgr) {
1151 if (pid1_magic) {
1152 // Spawn the shutdown monitor.
1153 if (_launchd_shutdown_monitor && !_launchd_shutdown_monitor->p) {
1154 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1155 job_dispatch(_launchd_shutdown_monitor, true);
1156 }
1157 }
1158
1159 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1160 }
1161
1162 return jobmgr_do_garbage_collection(jm);
1163 }
1164
1165 void
1166 jobmgr_remove(jobmgr_t jm)
1167 {
1168 jobmgr_t jmi;
1169 job_t ji;
1170
1171 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1172 if (!SLIST_EMPTY(&jm->submgrs)) {
1173 size_t cnt = 0;
1174 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1175 jobmgr_remove(jmi);
1176 cnt++;
1177 }
1178
1179 (void)jobmgr_assumes_zero(jm, cnt);
1180 }
1181
1182 while ((ji = LIST_FIRST(&jm->jobs))) {
1183 if (!ji->anonymous && ji->p != 0) {
1184 job_log(ji, LOG_ERR, "Job is still active at job manager teardown.");
1185 ji->p = 0;
1186 }
1187 job_remove(ji);
1188 }
1189
1190 if (jm->req_port) {
1191 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_port));
1192 }
1193 if (jm->jm_port) {
1194 (void)jobmgr_assumes_zero(jm, launchd_mport_close_recv(jm->jm_port));
1195 }
1196
1197 if (jm->req_bsport) {
1198 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_bsport));
1199 }
1200 if (jm->req_excport) {
1201 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_excport));
1202 }
1203 if (MACH_PORT_VALID(jm->req_asport)) {
1204 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_asport));
1205 }
1206 if (jm->req_rport) {
1207 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1208 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1209 /* If the originator went away, the reply port will be a dead name,
1210 * and we expect this to fail.
1211 */
1212 (void)jobmgr_assumes_zero(jm, kr);
1213 }
1214 }
1215 if (jm->req_ctx) {
1216 (void)jobmgr_assumes_zero(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz));
1217 }
1218
1219 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1220 struct tm curtime;
1221 (void)localtime_r(&ts, &curtime);
1222
1223 char date[26];
1224 (void)asctime_r(&curtime, date);
1225 date[24] = 0;
1226
1227 time_t delta = ts - jm->shutdown_time;
1228 if (jm == root_jobmgr && pid1_magic) {
1229 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1230 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1231 } else {
1232 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1233 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1234 }
1235
1236 if (jm->parentmgr) {
1237 runtime_del_weak_ref();
1238 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1239 } else if (pid1_magic) {
1240 eliminate_double_reboot();
1241 launchd_log_vm_stats();
1242 jobmgr_log_stray_children(jm, true);
1243 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1244 launchd_closelog();
1245 (void)jobmgr_assumes_zero_p(jm, reboot(jm->reboot_flags));
1246 } else {
1247 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1248 launchd_closelog();
1249 exit(EXIT_SUCCESS);
1250 }
1251
1252 free(jm);
1253 }
1254
1255 void
1256 job_remove(job_t j)
1257 {
1258 struct waiting_for_removal *w4r;
1259 struct calendarinterval *ci;
1260 struct semaphoreitem *si;
1261 struct socketgroup *sg;
1262 struct machservice *ms;
1263 struct limititem *li;
1264 struct envitem *ei;
1265
1266 if (j->alias) {
1267 /* HACK: Egregious code duplication. But as with machservice_delete(),
1268 * job aliases can't (and shouldn't) have any complex behaviors
1269 * associated with them.
1270 */
1271 while ((ms = SLIST_FIRST(&j->machservices))) {
1272 machservice_delete(j, ms, false);
1273 }
1274
1275 LIST_REMOVE(j, sle);
1276 LIST_REMOVE(j, label_hash_sle);
1277 free(j);
1278 return;
1279 }
1280
1281 #if TARGET_OS_EMBEDDED
1282 if (launchd_embedded_handofgod && _launchd_embedded_god) {
1283 if (!(_launchd_embedded_god->username && j->username)) {
1284 errno = EPERM;
1285 return;
1286 }
1287
1288 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
1289 errno = EPERM;
1290 return;
1291 }
1292 } else if (launchd_embedded_handofgod) {
1293 errno = EINVAL;
1294 return;
1295 }
1296 #endif
1297
1298 /* Do this BEFORE we check and see whether the job is still active. If we're
1299 * a sub-job, we're being removed due to the parent job removing us.
1300 * Therefore, the parent job will free itself after this call completes. So
1301 * if we defer removing ourselves from the parent's list, we'll crash when
1302 * we finally get around to it.
1303 */
1304 if (j->dedicated_instance && !j->former_subjob) {
1305 LIST_REMOVE(j, subjob_sle);
1306 j->former_subjob = true;
1307 }
1308
1309 if (unlikely(j->p)) {
1310 if (j->anonymous) {
1311 job_reap(j);
1312 } else {
1313 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1314
1315 if (!j->removal_pending) {
1316 j->removal_pending = true;
1317 job_stop(j);
1318 }
1319
1320 return;
1321 }
1322 }
1323
1324 if (!j->removing) {
1325 j->removing = true;
1326 job_dispatch_curious_jobs(j);
1327 }
1328
1329 ipc_close_all_with_job(j);
1330
1331 if (j->forced_peers_to_demand_mode) {
1332 job_set_global_on_demand(j, false);
1333 }
1334
1335 if (job_assumes_zero(j, j->fork_fd)) {
1336 (void)posix_assumes_zero(runtime_close(j->fork_fd));
1337 }
1338
1339 if (j->stdin_fd) {
1340 (void)posix_assumes_zero(runtime_close(j->stdin_fd));
1341 }
1342
1343 if (j->j_port) {
1344 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1345 }
1346
1347 while ((sg = SLIST_FIRST(&j->sockets))) {
1348 socketgroup_delete(j, sg);
1349 }
1350 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1351 calendarinterval_delete(j, ci);
1352 }
1353 while ((ei = SLIST_FIRST(&j->env))) {
1354 envitem_delete(j, ei, false);
1355 }
1356 while ((ei = SLIST_FIRST(&j->global_env))) {
1357 envitem_delete(j, ei, true);
1358 }
1359 while ((li = SLIST_FIRST(&j->limits))) {
1360 limititem_delete(j, li);
1361 }
1362 while ((ms = SLIST_FIRST(&j->machservices))) {
1363 machservice_delete(j, ms, false);
1364 }
1365 while ((si = SLIST_FIRST(&j->semaphores))) {
1366 semaphoreitem_delete(j, si);
1367 }
1368 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1369 waiting4removal_delete(j, w4r);
1370 }
1371
1372 struct externalevent *eei = NULL;
1373 while ((eei = LIST_FIRST(&j->events))) {
1374 externalevent_delete(eei);
1375 }
1376
1377 if (j->event_monitor) {
1378 _launchd_event_monitor = NULL;
1379 }
1380 if (j->xpc_bootstrapper) {
1381 _launchd_xpc_bootstrapper = NULL;
1382 }
1383
1384 if (j->prog) {
1385 free(j->prog);
1386 }
1387 if (j->argv) {
1388 free(j->argv);
1389 }
1390 if (j->rootdir) {
1391 free(j->rootdir);
1392 }
1393 if (j->workingdir) {
1394 free(j->workingdir);
1395 }
1396 if (j->username) {
1397 free(j->username);
1398 }
1399 if (j->groupname) {
1400 free(j->groupname);
1401 }
1402 if (j->stdinpath) {
1403 free(j->stdinpath);
1404 }
1405 if (j->stdoutpath) {
1406 free(j->stdoutpath);
1407 }
1408 if (j->stderrpath) {
1409 free(j->stderrpath);
1410 }
1411 if (j->alt_exc_handler) {
1412 free(j->alt_exc_handler);
1413 }
1414 #if HAVE_SANDBOX
1415 if (j->seatbelt_profile) {
1416 free(j->seatbelt_profile);
1417 }
1418 #endif
1419 #if HAVE_QUARANTINE
1420 if (j->quarantine_data) {
1421 free(j->quarantine_data);
1422 }
1423 #endif
1424 if (j->j_binpref) {
1425 free(j->j_binpref);
1426 }
1427 if (j->start_interval) {
1428 runtime_del_weak_ref();
1429 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
1430 }
1431 if (j->exit_timeout) {
1432 /* If this fails, it just means the timer's already fired, so no need to
1433 * wrap it in an assumes() macro.
1434 */
1435 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1436 }
1437 if (j->asport != MACH_PORT_NULL) {
1438 (void)job_assumes_zero(j, launchd_mport_deallocate(j->asport));
1439 }
1440 if (!uuid_is_null(j->expected_audit_uuid)) {
1441 LIST_REMOVE(j, needing_session_sle);
1442 }
1443 if (j->embedded_god) {
1444 _launchd_embedded_god = NULL;
1445 }
1446 if (j->shutdown_monitor) {
1447 _launchd_shutdown_monitor = NULL;
1448 }
1449
1450 (void)kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1451
1452 LIST_REMOVE(j, sle);
1453 LIST_REMOVE(j, label_hash_sle);
1454
1455 job_t ji = NULL;
1456 job_t jit = NULL;
1457 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1458 job_remove(ji);
1459 }
1460
1461 job_log(j, LOG_DEBUG, "Removed");
1462
1463 j->kqjob_callback = (kq_callback)0x8badf00d;
1464 free(j);
1465 }
1466
1467 void
1468 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1469 {
1470 launch_data_t tmp_oai;
1471 job_t j = context;
1472 size_t i, fd_cnt = 1;
1473 int *fds;
1474
1475 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1476 fd_cnt = launch_data_array_get_count(obj);
1477 }
1478
1479 fds = alloca(fd_cnt * sizeof(int));
1480
1481 for (i = 0; i < fd_cnt; i++) {
1482 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1483 tmp_oai = launch_data_array_get_index(obj, i);
1484 } else {
1485 tmp_oai = obj;
1486 }
1487
1488 fds[i] = launch_data_get_fd(tmp_oai);
1489 }
1490
1491 socketgroup_new(j, key, fds, fd_cnt);
1492
1493 ipc_revoke_fds(obj);
1494 }
1495
1496 bool
1497 job_set_global_on_demand(job_t j, bool val)
1498 {
1499 if (j->forced_peers_to_demand_mode && val) {
1500 return false;
1501 } else if (!j->forced_peers_to_demand_mode && !val) {
1502 return false;
1503 }
1504
1505 if ((j->forced_peers_to_demand_mode = val)) {
1506 j->mgr->global_on_demand_cnt++;
1507 } else {
1508 j->mgr->global_on_demand_cnt--;
1509 }
1510
1511 if (j->mgr->global_on_demand_cnt == 0) {
1512 jobmgr_dispatch_all(j->mgr, false);
1513 }
1514
1515 return true;
1516 }
1517
1518 bool
1519 job_setup_machport(job_t j)
1520 {
1521 if (job_assumes_zero(j, launchd_mport_create_recv(&j->j_port)) != KERN_SUCCESS) {
1522 goto out_bad;
1523 }
1524
1525 if (job_assumes_zero(j, runtime_add_mport(j->j_port, job_server)) != KERN_SUCCESS) {
1526 goto out_bad2;
1527 }
1528
1529 if (job_assumes_zero(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS)) != KERN_SUCCESS) {
1530 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1531 goto out_bad;
1532 }
1533
1534 return true;
1535 out_bad2:
1536 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1537 out_bad:
1538 return false;
1539 }
1540
1541 kern_return_t
1542 job_setup_exit_port(job_t j)
1543 {
1544 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1545 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1546 return MACH_PORT_NULL;
1547 }
1548
1549 struct mach_port_limits limits = {
1550 .mpl_qlimit = 1,
1551 };
1552 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1553 (void)job_assumes_zero(j, kr);
1554
1555 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1556 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1557 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
1558 j->exit_status_port = MACH_PORT_NULL;
1559 }
1560
1561 return kr;
1562 }
1563
1564 job_t
1565 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1566 {
1567 const char **argv = (const char **)mach_cmd2argv(cmd);
1568 job_t jr = NULL;
1569
1570 if (!argv) {
1571 goto out_bad;
1572 }
1573
1574 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1575 free(argv);
1576
1577 // Job creation can be denied during shutdown.
1578 if (unlikely(jr == NULL)) {
1579 goto out_bad;
1580 }
1581
1582 jr->mach_uid = uid;
1583 jr->ondemand = ond;
1584 jr->legacy_mach_job = true;
1585 jr->abandon_pg = true;
1586 jr->priv_port_has_senders = true; // the IPC that called us will make-send on this port
1587
1588 if (!job_setup_machport(jr)) {
1589 goto out_bad;
1590 }
1591
1592 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1593
1594 return jr;
1595
1596 out_bad:
1597 if (jr) {
1598 job_remove(jr);
1599 }
1600 return NULL;
1601 }
1602
1603 job_t
1604 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1605 {
1606 struct proc_bsdshortinfo proc;
1607 bool shutdown_state;
1608 job_t jp = NULL, jr = NULL;
1609 uid_t kp_euid, kp_uid, kp_svuid;
1610 gid_t kp_egid, kp_gid, kp_svgid;
1611
1612 if (anonpid == 0) {
1613 errno = EINVAL;
1614 return NULL;
1615 }
1616
1617 if (anonpid >= 100000) {
1618 /* The kernel current defines PID_MAX to be 99999, but that define isn't
1619 * exported.
1620 */
1621 launchd_syslog(LOG_WARNING, "Did PID_MAX change? Got request from PID: %d", anonpid);
1622 errno = EINVAL;
1623 return NULL;
1624 }
1625
1626 /* libproc returns the number of bytes written into the buffer upon success,
1627 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
1628 */
1629 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1630 if (errno != ESRCH) {
1631 (void)jobmgr_assumes_zero(jm, errno);
1632 }
1633 return NULL;
1634 }
1635
1636 if (proc.pbsi_comm[0] == '\0') {
1637 launchd_syslog(LOG_WARNING, "Blank command for PID: %d", anonpid);
1638 errno = EINVAL;
1639 return NULL;
1640 }
1641
1642 if (unlikely(proc.pbsi_status == SZOMB)) {
1643 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1644 }
1645
1646 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1647 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1648 }
1649
1650 kp_euid = proc.pbsi_uid;
1651 kp_uid = proc.pbsi_ruid;
1652 kp_svuid = proc.pbsi_svuid;
1653 kp_egid = proc.pbsi_gid;
1654 kp_gid = proc.pbsi_rgid;
1655 kp_svgid = proc.pbsi_svgid;
1656
1657 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1658 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1659 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1660 }
1661
1662 /* "Fix" for when the kernel turns the process tree into a weird, cyclic
1663 * graph.
1664 *
1665 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
1666 * as to why this can happen.
1667 */
1668 if ((pid_t)proc.pbsi_ppid == anonpid) {
1669 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). Ignoring: %s", proc.pbsi_comm);
1670 errno = EINVAL;
1671 return NULL;
1672 }
1673
1674 /* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1675 * jobs can pop up during shutdown and need to talk to us.
1676 */
1677 if (unlikely(shutdown_state = jm->shutting_down)) {
1678 jm->shutting_down = false;
1679 }
1680
1681 // We only set requestor_pid for XPC domains.
1682 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1683 if ((jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL))) {
1684 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1685
1686 total_anon_children++;
1687 jr->anonymous = true;
1688 jr->p = anonpid;
1689
1690 // Anonymous process reaping is messy.
1691 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1692
1693 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1)) {
1694 if (errno != ESRCH) {
1695 (void)job_assumes_zero(jr, errno);
1696 }
1697
1698 // Zombies interact weirdly with kevent(3).
1699 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1700 jr->unload_at_mig_return = true;
1701 }
1702
1703 if (unlikely(shutdown_state)) {
1704 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1705 }
1706
1707 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1708 } else {
1709 (void)osx_assumes_zero(errno);
1710 }
1711
1712 // Undo our hack from above.
1713 if (unlikely(shutdown_state)) {
1714 jm->shutting_down = true;
1715 }
1716
1717 /* This is down here to prevent infinite recursion due to a process
1718 * attaching to its parent through ptrace(3) -- causing a cycle in the
1719 * process tree and thereby not making it a tree anymore. We need to make
1720 * sure that the anonymous job has been added to the process list so that
1721 * we'll find the tracing parent PID of the parent process, which is the
1722 * child, when we go looking for it in jobmgr_find_by_pid().
1723 *
1724 * <rdar://problem/7264615>
1725 */
1726 switch (proc.pbsi_ppid) {
1727 case 0:
1728 // The kernel.
1729 break;
1730 case 1:
1731 if (!pid1_magic) {
1732 break;
1733 }
1734 // Fall through.
1735 default:
1736 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1737 if (jobmgr_assumes(jm, jp != NULL)) {
1738 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1739 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1740 }
1741 }
1742 break;
1743 }
1744
1745 return jr;
1746 }
1747
1748 job_t
1749 job_new_subjob(job_t j, uuid_t identifier)
1750 {
1751 char label[0];
1752 uuid_string_t idstr;
1753 uuid_unparse(identifier, idstr);
1754 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1755
1756 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1757 if (nj != NULL) {
1758 nj->kqjob_callback = job_callback;
1759 nj->mgr = j->mgr;
1760 nj->min_run_time = j->min_run_time;
1761 nj->timeout = j->timeout;
1762 nj->exit_timeout = j->exit_timeout;
1763
1764 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1765
1766 // Set all our simple Booleans that are applicable.
1767 nj->debug = j->debug;
1768 nj->ondemand = j->ondemand;
1769 nj->checkedin = true;
1770 nj->low_pri_io = j->low_pri_io;
1771 nj->setmask = j->setmask;
1772 nj->wait4debugger = j->wait4debugger;
1773 nj->internal_exc_handler = j->internal_exc_handler;
1774 nj->setnice = j->setnice;
1775 nj->abandon_pg = j->abandon_pg;
1776 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1777 nj->deny_job_creation = j->deny_job_creation;
1778 nj->enable_transactions = j->enable_transactions;
1779 nj->needs_kickoff = j->needs_kickoff;
1780 nj->currently_ignored = true;
1781 nj->dedicated_instance = true;
1782 nj->xpc_service = j->xpc_service;
1783 nj->xpc_bootstrapper = j->xpc_bootstrapper;
1784
1785 nj->mask = j->mask;
1786 uuid_copy(nj->instance_id, identifier);
1787
1788 // These jobs are purely on-demand Mach jobs.
1789 // {Hard | Soft}ResourceLimits are not supported.
1790 // JetsamPriority is not supported.
1791
1792 if (j->prog) {
1793 nj->prog = strdup(j->prog);
1794 }
1795 if (j->argv) {
1796 size_t sz = malloc_size(j->argv);
1797 nj->argv = (char **)malloc(sz);
1798 if (nj->argv != NULL) {
1799 // This is the start of our strings.
1800 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1801
1802 size_t i = 0;
1803 for (i = 0; i < j->argc; i++) {
1804 (void)strcpy(p, j->argv[i]);
1805 nj->argv[i] = p;
1806 p += (strlen(j->argv[i]) + 1);
1807 }
1808 nj->argv[i] = NULL;
1809 } else {
1810 (void)job_assumes_zero(nj, errno);
1811 }
1812
1813 nj->argc = j->argc;
1814 }
1815
1816 struct machservice *msi = NULL;
1817 SLIST_FOREACH(msi, &j->machservices, sle) {
1818 /* Only copy MachServices that were actually declared in the plist.
1819 * So skip over per-PID ones and ones that were created via
1820 * bootstrap_register().
1821 */
1822 if (msi->upfront) {
1823 mach_port_t mp = MACH_PORT_NULL;
1824 struct machservice *msj = machservice_new(nj, msi->name, &mp, msi->per_pid);
1825 if (msj != NULL) {
1826 msj->reset = msi->reset;
1827 msj->delete_on_destruction = msi->delete_on_destruction;
1828 msj->drain_one_on_crash = msi->drain_one_on_crash;
1829 msj->drain_all_on_crash = msi->drain_all_on_crash;
1830 } else {
1831 (void)job_assumes_zero(nj, errno);
1832 }
1833 }
1834 }
1835
1836 // We ignore global environment variables.
1837 struct envitem *ei = NULL;
1838 SLIST_FOREACH(ei, &j->env, sle) {
1839 if (envitem_new(nj, ei->key, ei->value, false)) {
1840 (void)job_assumes_zero(nj, errno);
1841 }
1842 }
1843 uuid_string_t val;
1844 uuid_unparse(identifier, val);
1845 if (envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false)) {
1846 (void)job_assumes_zero(nj, errno);
1847 }
1848
1849 if (j->rootdir) {
1850 nj->rootdir = strdup(j->rootdir);
1851 }
1852 if (j->workingdir) {
1853 nj->workingdir = strdup(j->workingdir);
1854 }
1855 if (j->username) {
1856 nj->username = strdup(j->username);
1857 }
1858 if (j->groupname) {
1859 nj->groupname = strdup(j->groupname);
1860 }
1861
1862 /* FIXME: We shouldn't redirect all the output from these jobs to the
1863 * same file. We should uniquify the file names. But this hasn't shown
1864 * to be a problem in practice.
1865 */
1866 if (j->stdinpath) {
1867 nj->stdinpath = strdup(j->stdinpath);
1868 }
1869 if (j->stdoutpath) {
1870 nj->stdoutpath = strdup(j->stdinpath);
1871 }
1872 if (j->stderrpath) {
1873 nj->stderrpath = strdup(j->stderrpath);
1874 }
1875 if (j->alt_exc_handler) {
1876 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1877 }
1878 #if HAVE_SANDBOX
1879 if (j->seatbelt_profile) {
1880 nj->seatbelt_profile = strdup(j->seatbelt_profile);
1881 }
1882 #endif
1883
1884 #if HAVE_QUARANTINE
1885 if (j->quarantine_data) {
1886 nj->quarantine_data = strdup(j->quarantine_data);
1887 }
1888 nj->quarantine_data_sz = j->quarantine_data_sz;
1889 #endif
1890 if (j->j_binpref) {
1891 size_t sz = malloc_size(j->j_binpref);
1892 nj->j_binpref = (cpu_type_t *)malloc(sz);
1893 if (nj->j_binpref) {
1894 memcpy(&nj->j_binpref, &j->j_binpref, sz);
1895 } else {
1896 (void)job_assumes_zero(nj, errno);
1897 }
1898 }
1899
1900 if (j->asport != MACH_PORT_NULL) {
1901 (void)job_assumes_zero(nj, launchd_mport_copy_send(j->asport));
1902 nj->asport = j->asport;
1903 }
1904
1905 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
1906
1907 jobmgr_t where2put = root_jobmgr;
1908 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1909 where2put = j->mgr;
1910 }
1911 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
1912 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
1913 } else {
1914 (void)osx_assumes_zero(errno);
1915 }
1916
1917 return nj;
1918 }
1919
1920 job_t
1921 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1922 {
1923 const char *const *argv_tmp = argv;
1924 char tmp_path[PATH_MAX];
1925 char auto_label[1000];
1926 const char *bn = NULL;
1927 char *co;
1928 size_t minlabel_len;
1929 size_t i, cc = 0;
1930 job_t j;
1931
1932 __OSX_COMPILETIME_ASSERT__(offsetof(struct job_s, kqjob_callback) == 0);
1933
1934 if (unlikely(jm->shutting_down)) {
1935 errno = EINVAL;
1936 return NULL;
1937 }
1938
1939 if (unlikely(prog == NULL && argv == NULL)) {
1940 errno = EINVAL;
1941 return NULL;
1942 }
1943
1944 /* I'd really like to redo this someday. Anonymous jobs carry all the
1945 * baggage of managed jobs with them, even though most of it is unused.
1946 * Maybe when we have Objective-C objects in libSystem, there can be a base
1947 * job type that anonymous and managed jobs inherit from...
1948 */
1949 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
1950 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1951 if (prog) {
1952 bn = prog;
1953 } else {
1954 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1955 // prog for auto labels is kp.kp_kproc.p_comm.
1956 bn = basename(tmp_path);
1957 }
1958
1959 (void)snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
1960 label = auto_label;
1961 /* This is so we can do gross things later. See NOTE_EXEC for anonymous
1962 * jobs.
1963 */
1964 minlabel_len = strlen(label) + MAXCOMLEN;
1965 } else {
1966 if (label == AUTO_PICK_XPC_LABEL) {
1967 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
1968 } else {
1969 minlabel_len = strlen(label);
1970 }
1971 }
1972
1973 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1974
1975 if (!j) {
1976 (void)osx_assumes_zero(errno);
1977 return NULL;
1978 }
1979
1980 if (unlikely(label == auto_label)) {
1981 (void)snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
1982 } else {
1983 (void)strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
1984 }
1985
1986 j->kqjob_callback = job_callback;
1987 j->mgr = jm;
1988 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1989 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1990 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1991 j->currently_ignored = true;
1992 j->ondemand = true;
1993 j->checkedin = true;
1994 j->jetsam_priority = DEFAULT_JETSAM_PRIORITY;
1995 j->jetsam_memlimit = -1;
1996 uuid_clear(j->expected_audit_uuid);
1997 #if TARGET_OS_EMBEDDED
1998 /* Run embedded daemons as background by default. SpringBoard jobs are
1999 * Interactive by default. Unfortunately, so many daemons have opted into
2000 * this priority band that its usefulness is highly questionable.
2001 *
2002 * See <rdar://problem/9539873>.
2003 */
2004 if (launchd_embedded_handofgod) {
2005 j->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
2006 j->app = true;
2007 } else {
2008 j->pstype = POSIX_SPAWN_IOS_APPLE_DAEMON_START;
2009 }
2010 #endif
2011
2012 if (prog) {
2013 j->prog = strdup(prog);
2014 if (!j->prog) {
2015 (void)osx_assumes_zero(errno);
2016 goto out_bad;
2017 }
2018 }
2019
2020 if (likely(argv)) {
2021 while (*argv_tmp++) {
2022 j->argc++;
2023 }
2024
2025 for (i = 0; i < j->argc; i++) {
2026 cc += strlen(argv[i]) + 1;
2027 }
2028
2029 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
2030 if (!j->argv) {
2031 (void)job_assumes_zero(j, errno);
2032 goto out_bad;
2033 }
2034
2035 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
2036
2037 for (i = 0; i < j->argc; i++) {
2038 j->argv[i] = co;
2039 (void)strcpy(co, argv[i]);
2040 co += strlen(argv[i]) + 1;
2041 }
2042 j->argv[i] = NULL;
2043 }
2044
2045 // Sssshhh... don't tell anyone.
2046 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
2047 j->has_console = true;
2048 }
2049
2050 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2051
2052 jobmgr_t where2put_label = root_jobmgr;
2053 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2054 where2put_label = j->mgr;
2055 }
2056 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
2057 uuid_clear(j->expected_audit_uuid);
2058
2059 job_log(j, LOG_DEBUG, "Conceived");
2060
2061 return j;
2062
2063 out_bad:
2064 if (j->prog) {
2065 free(j->prog);
2066 }
2067 free(j);
2068
2069 return NULL;
2070 }
2071
2072 job_t
2073 job_new_alias(jobmgr_t jm, job_t src)
2074 {
2075 if (job_find(jm, src->label)) {
2076 errno = EEXIST;
2077 return NULL;
2078 }
2079
2080 job_t j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2081 if (!j) {
2082 (void)osx_assumes_zero(errno);
2083 return NULL;
2084 }
2085
2086 (void)strcpy((char *)j->label, src->label);
2087 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2088 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2089 /* Bad jump address. The kqueue callback for aliases should never be
2090 * invoked.
2091 */
2092 j->kqjob_callback = (kq_callback)0xfa1afe1;
2093 j->alias = src;
2094 j->mgr = jm;
2095
2096 struct machservice *msi = NULL;
2097 SLIST_FOREACH(msi, &src->machservices, sle) {
2098 if (!machservice_new_alias(j, msi)) {
2099 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2100 errno = EINVAL;
2101 job_remove(j);
2102 j = NULL;
2103 break;
2104 }
2105 }
2106
2107 if (j) {
2108 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2109 }
2110
2111 return j;
2112 }
2113
2114 job_t
2115 job_import(launch_data_t pload)
2116 {
2117 job_t j = jobmgr_import2(root_jobmgr, pload);
2118
2119 if (unlikely(j == NULL)) {
2120 return NULL;
2121 }
2122
2123 /* Since jobs are effectively stalled until they get security sessions
2124 * assigned to them, we may wish to reconsider this behavior of calling the
2125 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2126 * criterion set.
2127 */
2128 job_dispatch_curious_jobs(j);
2129 return job_dispatch(j, false);
2130 }
2131
2132 launch_data_t
2133 job_import_bulk(launch_data_t pload)
2134 {
2135 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2136 job_t *ja;
2137 size_t i, c = launch_data_array_get_count(pload);
2138
2139 ja = alloca(c * sizeof(job_t));
2140
2141 for (i = 0; i < c; i++) {
2142 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2143 errno = 0;
2144 }
2145 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2146 }
2147
2148 for (i = 0; i < c; i++) {
2149 if (likely(ja[i])) {
2150 job_dispatch_curious_jobs(ja[i]);
2151 job_dispatch(ja[i], false);
2152 }
2153 }
2154
2155 return resp;
2156 }
2157
2158 void
2159 job_import_bool(job_t j, const char *key, bool value)
2160 {
2161 bool found_key = false;
2162
2163 switch (key[0]) {
2164 case 'a':
2165 case 'A':
2166 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2167 j->abandon_pg = value;
2168 found_key = true;
2169 }
2170 break;
2171 case 'b':
2172 case 'B':
2173 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2174 j->dirty_at_shutdown = value;
2175 found_key = true;
2176 }
2177 break;
2178 case 'k':
2179 case 'K':
2180 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2181 j->ondemand = !value;
2182 found_key = true;
2183 }
2184 break;
2185 case 'o':
2186 case 'O':
2187 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2188 j->ondemand = value;
2189 found_key = true;
2190 }
2191 break;
2192 case 'd':
2193 case 'D':
2194 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2195 j->debug = value;
2196 found_key = true;
2197 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2198 (void)job_assumes(j, !value);
2199 found_key = true;
2200 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2201 j->disable_aslr = value;
2202 found_key = true;
2203 }
2204 break;
2205 case 'h':
2206 case 'H':
2207 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2208 job_log(j, LOG_PERF, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2209 j->dirty_at_shutdown = value;
2210 found_key = true;
2211 }
2212 break;
2213 case 's':
2214 case 'S':
2215 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2216 j->session_create = value;
2217 found_key = true;
2218 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2219 j->start_on_mount = value;
2220 found_key = true;
2221 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2222 // this only does something on Mac OS X 10.4 "Tiger"
2223 found_key = true;
2224 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2225 if (_launchd_shutdown_monitor) {
2226 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2227 } else {
2228 j->shutdown_monitor = true;
2229 _launchd_shutdown_monitor = j;
2230 }
2231 found_key = true;
2232 }
2233 break;
2234 case 'l':
2235 case 'L':
2236 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2237 j->low_pri_io = value;
2238 found_key = true;
2239 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2240 j->only_once = value;
2241 found_key = true;
2242 }
2243 break;
2244 case 'm':
2245 case 'M':
2246 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2247 j->internal_exc_handler = value;
2248 found_key = true;
2249 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2250 j->multiple_instances = value;
2251 found_key = true;
2252 }
2253 break;
2254 case 'i':
2255 case 'I':
2256 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2257 if (getuid() != 0) {
2258 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2259 return;
2260 }
2261 j->no_init_groups = !value;
2262 found_key = true;
2263 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2264 j->ignore_pg_at_shutdown = value;
2265 found_key = true;
2266 }
2267 break;
2268 case 'r':
2269 case 'R':
2270 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2271 if (value) {
2272 // We don't want value == false to change j->start_pending
2273 j->start_pending = true;
2274 }
2275 found_key = true;
2276 }
2277 break;
2278 case 'e':
2279 case 'E':
2280 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2281 j->globargv = value;
2282 found_key = true;
2283 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2284 j->enable_transactions = value;
2285 found_key = true;
2286 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2287 j->debug_before_kill = value;
2288 found_key = true;
2289 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2290 #if TARGET_OS_EMBEDDED
2291 if (!_launchd_embedded_god) {
2292 if ((j->embedded_god = value)) {
2293 _launchd_embedded_god = j;
2294 }
2295 } else {
2296 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2297 }
2298 #else
2299 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2300 #endif
2301 found_key = true;
2302 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2303 if (!_launchd_event_monitor) {
2304 j->event_monitor = value;
2305 if (value) {
2306 _launchd_event_monitor = j;
2307 }
2308 } else {
2309 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor->label);
2310 }
2311 found_key = true;
2312 }
2313 break;
2314 case 'w':
2315 case 'W':
2316 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2317 j->wait4debugger = value;
2318 found_key = true;
2319 }
2320 break;
2321 case 'x':
2322 case 'X':
2323 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2324 if (pid1_magic) {
2325 if (_launchd_xpc_bootstrapper) {
2326 job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper->label);
2327 } else {
2328 _launchd_xpc_bootstrapper = j;
2329 j->xpc_bootstrapper = value;
2330 }
2331 } else {
2332 job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2333 }
2334 }
2335 found_key = true;
2336 break;
2337 default:
2338 break;
2339 }
2340
2341 if (unlikely(!found_key)) {
2342 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2343 }
2344 }
2345
2346 void
2347 job_import_string(job_t j, const char *key, const char *value)
2348 {
2349 char **where2put = NULL;
2350
2351 switch (key[0]) {
2352 case 'm':
2353 case 'M':
2354 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2355 where2put = &j->alt_exc_handler;
2356 }
2357 break;
2358 case 'p':
2359 case 'P':
2360 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2361 return;
2362 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0) {
2363 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2364 #if !TARGET_OS_EMBEDDED
2365 j->pstype = POSIX_SPAWN_OSX_TALAPP_START;
2366 #endif
2367 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET) == 0) {
2368 #if !TARGET_OS_EMBEDDED
2369 j->pstype = POSIX_SPAWN_OSX_DBCLIENT_START;
2370 #endif
2371 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP) == 0) {
2372 #if TARGET_OS_EMBEDDED
2373 j->pstype = POSIX_SPAWN_IOS_APP_START;
2374 #endif
2375 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE) == 0) {
2376 #if TARGET_OS_EMBEDDED
2377 j->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
2378 #endif
2379 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND) == 0) {
2380 #if TARGET_OS_EMBEDDED
2381 j->pstype = POSIX_SPAWN_IOS_APPLE_DAEMON_START;
2382 #endif
2383 } else if (strcasecmp(value, "Adaptive") == 0) {
2384 // Hack.
2385 } else {
2386 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2387 }
2388 return;
2389 }
2390 break;
2391 case 'l':
2392 case 'L':
2393 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2394 return;
2395 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2396 return;
2397 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2398 return;
2399 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2400 return;
2401 }
2402 break;
2403 case 'r':
2404 case 'R':
2405 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2406 if (getuid() != 0) {
2407 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2408 return;
2409 }
2410 where2put = &j->rootdir;
2411 }
2412 break;
2413 case 'w':
2414 case 'W':
2415 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2416 where2put = &j->workingdir;
2417 }
2418 break;
2419 case 'u':
2420 case 'U':
2421 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2422 if (getuid() != 0) {
2423 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2424 return;
2425 } else if (strcmp(value, "root") == 0) {
2426 return;
2427 }
2428 where2put = &j->username;
2429 }
2430 break;
2431 case 'g':
2432 case 'G':
2433 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2434 if (getuid() != 0) {
2435 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2436 return;
2437 } else if (strcmp(value, "wheel") == 0) {
2438 return;
2439 }
2440 where2put = &j->groupname;
2441 }
2442 break;
2443 case 's':
2444 case 'S':
2445 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2446 where2put = &j->stdoutpath;
2447 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2448 where2put = &j->stderrpath;
2449 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2450 where2put = &j->stdinpath;
2451 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2452 if (job_assumes_zero_p(j, j->stdin_fd) != -1) {
2453 // open() should not block, but regular IO by the job should
2454 (void)job_assumes_zero_p(j, fcntl(j->stdin_fd, F_SETFL, 0));
2455 // XXX -- EV_CLEAR should make named pipes happy?
2456 (void)job_assumes_zero_p(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j));
2457 } else {
2458 j->stdin_fd = 0;
2459 }
2460 #if HAVE_SANDBOX
2461 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2462 where2put = &j->seatbelt_profile;
2463 #endif
2464 }
2465 break;
2466 case 'X':
2467 case 'x':
2468 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2469 return;
2470 }
2471 break;
2472 default:
2473 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2474 break;
2475 }
2476
2477 if (likely(where2put)) {
2478 if (!(*where2put = strdup(value))) {
2479 (void)job_assumes_zero(j, errno);
2480 }
2481 } else {
2482 // See rdar://problem/5496612. These two are okay.
2483 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2484 || strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2485 job_log(j, LOG_APPLEONLY, "This key is no longer relevant and should be removed: %s", key);
2486 } else {
2487 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2488 }
2489 }
2490 }
2491
2492 void
2493 job_import_integer(job_t j, const char *key, long long value)
2494 {
2495 switch (key[0]) {
2496 case 'e':
2497 case 'E':
2498 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2499 if (unlikely(value < 0)) {
2500 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2501 } else if (unlikely(value > UINT32_MAX)) {
2502 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2503 } else {
2504 j->exit_timeout = (typeof(j->exit_timeout)) value;
2505 }
2506 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2507 j->main_thread_priority = value;
2508 }
2509 break;
2510 case 'j':
2511 case 'J':
2512 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2513 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2514
2515 launch_data_t pri = launch_data_new_integer(value);
2516 if (job_assumes(j, pri != NULL)) {
2517 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2518 launch_data_free(pri);
2519 }
2520 }
2521 case 'n':
2522 case 'N':
2523 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2524 if (unlikely(value < PRIO_MIN)) {
2525 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2526 } else if (unlikely(value > PRIO_MAX)) {
2527 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2528 } else {
2529 j->nice = (typeof(j->nice)) value;
2530 j->setnice = true;
2531 }
2532 }
2533 break;
2534 case 't':
2535 case 'T':
2536 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2537 if (unlikely(value < 0)) {
2538 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2539 } else if (unlikely(value > UINT32_MAX)) {
2540 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2541 } else {
2542 j->timeout = (typeof(j->timeout)) value;
2543 }
2544 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2545 if (value < 0) {
2546 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2547 } else if (value > UINT32_MAX) {
2548 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2549 } else {
2550 j->min_run_time = (typeof(j->min_run_time)) value;
2551 }
2552 }
2553 break;
2554 case 'u':
2555 case 'U':
2556 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2557 j->mask = value;
2558 j->setmask = true;
2559 }
2560 break;
2561 case 's':
2562 case 'S':
2563 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2564 if (unlikely(value <= 0)) {
2565 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2566 } else if (unlikely(value > UINT32_MAX)) {
2567 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2568 } else {
2569 runtime_add_weak_ref();
2570 j->start_interval = (typeof(j->start_interval)) value;
2571
2572 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
2573 }
2574 #if HAVE_SANDBOX
2575 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2576 j->seatbelt_flags = value;
2577 #endif
2578 }
2579
2580 break;
2581 default:
2582 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2583 break;
2584 }
2585 }
2586
2587 void
2588 job_import_opaque(job_t j __attribute__((unused)), const char *key, launch_data_t value __attribute__((unused)))
2589 {
2590 switch (key[0]) {
2591 case 'q':
2592 case 'Q':
2593 #if HAVE_QUARANTINE
2594 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2595 size_t tmpsz = launch_data_get_opaque_size(value);
2596
2597 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2598 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2599 j->quarantine_data_sz = tmpsz;
2600 }
2601 }
2602 #endif
2603 case 's':
2604 case 'S':
2605 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2606 size_t tmpsz = launch_data_get_opaque_size(value);
2607 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2608 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2609 }
2610 }
2611 break;
2612 default:
2613 break;
2614 }
2615 }
2616
2617 static void
2618 policy_setup(launch_data_t obj, const char *key, void *context)
2619 {
2620 job_t j = context;
2621 bool found_key = false;
2622
2623 switch (key[0]) {
2624 case 'd':
2625 case 'D':
2626 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2627 j->deny_job_creation = launch_data_get_bool(obj);
2628 found_key = true;
2629 }
2630 break;
2631 default:
2632 break;
2633 }
2634
2635 if (unlikely(!found_key)) {
2636 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2637 }
2638 }
2639
2640 void
2641 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2642 {
2643 launch_data_t tmp;
2644
2645 switch (key[0]) {
2646 case 'p':
2647 case 'P':
2648 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2649 launch_data_dict_iterate(value, policy_setup, j);
2650 }
2651 break;
2652 case 'k':
2653 case 'K':
2654 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2655 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2656 }
2657 break;
2658 case 'i':
2659 case 'I':
2660 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2661 j->inetcompat = true;
2662 j->abandon_pg = true;
2663 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2664 j->inetcompat_wait = launch_data_get_bool(tmp);
2665 }
2666 }
2667 break;
2668 case 'j':
2669 case 'J':
2670 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2671 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2672 }
2673 case 'e':
2674 case 'E':
2675 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2676 launch_data_dict_iterate(value, envitem_setup, j);
2677 }
2678 break;
2679 case 'u':
2680 case 'U':
2681 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2682 j->importing_global_env = true;
2683 launch_data_dict_iterate(value, envitem_setup, j);
2684 j->importing_global_env = false;
2685 }
2686 break;
2687 case 's':
2688 case 'S':
2689 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2690 launch_data_dict_iterate(value, socketgroup_setup, j);
2691 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2692 calendarinterval_new_from_obj(j, value);
2693 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2694 launch_data_dict_iterate(value, limititem_setup, j);
2695 #if HAVE_SANDBOX
2696 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2697 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2698 #endif
2699 }
2700 break;
2701 case 'h':
2702 case 'H':
2703 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2704 j->importing_hard_limits = true;
2705 launch_data_dict_iterate(value, limititem_setup, j);
2706 j->importing_hard_limits = false;
2707 }
2708 break;
2709 case 'm':
2710 case 'M':
2711 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2712 launch_data_dict_iterate(value, machservice_setup, j);
2713 }
2714 break;
2715 case 'l':
2716 case 'L':
2717 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2718 launch_data_dict_iterate(value, eventsystem_setup, j);
2719 } else {
2720 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2721 return;
2722 }
2723 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2724 return;
2725 }
2726 }
2727 break;
2728 default:
2729 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2730 break;
2731 }
2732 }
2733
2734 void
2735 job_import_array(job_t j, const char *key, launch_data_t value)
2736 {
2737 size_t i, value_cnt = launch_data_array_get_count(value);
2738
2739 switch (key[0]) {
2740 case 'p':
2741 case 'P':
2742 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2743 return;
2744 }
2745 break;
2746 case 'l':
2747 case 'L':
2748 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2749 return;
2750 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2751 return;
2752 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2753 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2754 return;
2755 }
2756 break;
2757 case 'b':
2758 case 'B':
2759 if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2760 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2761 j->j_binpref_cnt = value_cnt;
2762 for (i = 0; i < value_cnt; i++) {
2763 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
2764 }
2765 }
2766 }
2767 break;
2768 case 's':
2769 case 'S':
2770 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2771 for (i = 0; i < value_cnt; i++) {
2772 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
2773 }
2774 }
2775 break;
2776 default:
2777 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
2778 break;
2779 }
2780 }
2781
2782 void
2783 job_import_keys(launch_data_t obj, const char *key, void *context)
2784 {
2785 job_t j = context;
2786 launch_data_type_t kind;
2787
2788 if (!obj) {
2789 launchd_syslog(LOG_ERR, "NULL object given to job_import_keys().");
2790 return;
2791 }
2792
2793 kind = launch_data_get_type(obj);
2794
2795 switch (kind) {
2796 case LAUNCH_DATA_BOOL:
2797 job_import_bool(j, key, launch_data_get_bool(obj));
2798 break;
2799 case LAUNCH_DATA_STRING:
2800 job_import_string(j, key, launch_data_get_string(obj));
2801 break;
2802 case LAUNCH_DATA_INTEGER:
2803 job_import_integer(j, key, launch_data_get_integer(obj));
2804 break;
2805 case LAUNCH_DATA_DICTIONARY:
2806 job_import_dictionary(j, key, obj);
2807 break;
2808 case LAUNCH_DATA_ARRAY:
2809 job_import_array(j, key, obj);
2810 break;
2811 case LAUNCH_DATA_OPAQUE:
2812 job_import_opaque(j, key, obj);
2813 break;
2814 default:
2815 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2816 break;
2817 }
2818 }
2819
2820 job_t
2821 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
2822 {
2823 launch_data_t tmp, ldpa;
2824 const char *label = NULL, *prog = NULL;
2825 const char **argv = NULL;
2826 job_t j;
2827
2828 if (!jobmgr_assumes(jm, pload != NULL)) {
2829 errno = EINVAL;
2830 return NULL;
2831 }
2832
2833 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
2834 errno = EINVAL;
2835 return NULL;
2836 }
2837
2838 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
2839 errno = EINVAL;
2840 return NULL;
2841 }
2842
2843 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
2844 errno = EINVAL;
2845 return NULL;
2846 }
2847
2848 if (unlikely(!(label = launch_data_get_string(tmp)))) {
2849 errno = EINVAL;
2850 return NULL;
2851 }
2852
2853 #if TARGET_OS_EMBEDDED
2854 if (unlikely(launchd_embedded_handofgod && _launchd_embedded_god)) {
2855 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
2856 errno = EPERM;
2857 return NULL;
2858 }
2859
2860 const char *username = NULL;
2861 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2862 username = launch_data_get_string(tmp);
2863 } else {
2864 errno = EPERM;
2865 return NULL;
2866 }
2867
2868 if (!jobmgr_assumes(jm, _launchd_embedded_god->username != NULL && username != NULL)) {
2869 errno = EPERM;
2870 return NULL;
2871 }
2872
2873 if (unlikely(strcmp(_launchd_embedded_god->username, username) != 0)) {
2874 errno = EPERM;
2875 return NULL;
2876 }
2877 } else if (launchd_embedded_handofgod) {
2878 errno = EINVAL;
2879 return NULL;
2880 }
2881 #endif
2882
2883 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM))
2884 && (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2885 prog = launch_data_get_string(tmp);
2886 }
2887
2888 int argc = 0;
2889 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2890 size_t i, c;
2891
2892 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2893 errno = EINVAL;
2894 return NULL;
2895 }
2896
2897 c = launch_data_array_get_count(ldpa);
2898
2899 argv = alloca((c + 1) * sizeof(char *));
2900
2901 for (i = 0; i < c; i++) {
2902 tmp = launch_data_array_get_index(ldpa, i);
2903
2904 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2905 errno = EINVAL;
2906 return NULL;
2907 }
2908
2909 argv[i] = launch_data_get_string(tmp);
2910 }
2911
2912 argv[i] = NULL;
2913 argc = i;
2914 }
2915
2916 if (!prog && argc == 0) {
2917 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
2918 errno = EINVAL;
2919 return NULL;
2920 }
2921
2922 /* Find the requested session. You cannot load services into XPC domains in
2923 * this manner.
2924 */
2925 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2926 if (session) {
2927 jobmgr_t jmt = NULL;
2928 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
2929 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
2930 if (!jmt) {
2931 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
2932 } else {
2933 jm = jmt;
2934 }
2935 } else {
2936 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
2937 }
2938
2939 if (!jmt) {
2940 errno = EINVAL;
2941 return NULL;
2942 }
2943 }
2944
2945 /* For legacy reasons, we have a global hash of all labels in all job
2946 * managers. So rather than make it a global, we store it in the root job
2947 * manager. But for an XPC domain, we store a local hash of all services in
2948 * the domain.
2949 */
2950 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
2951 if (unlikely((j = job_find(where2look, label)) != NULL)) {
2952 if (jm->xpc_singleton) {
2953 /* There can (and probably will be) multiple attemtps to import the
2954 * same XPC service from the same framework. This is okay. It's
2955 * treated as a singleton, so just return the existing one so that
2956 * it may be aliased into the requesting process' XPC domain.
2957 */
2958 errno = EEXIST;
2959 return j;
2960 } else {
2961 /* If we're not a global XPC domain, then it's an error to try
2962 * importing the same job/service multiple times.
2963 */
2964 errno = EEXIST;
2965 return NULL;
2966 }
2967 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
2968 errno = EINVAL;
2969 return NULL;
2970 }
2971 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
2972
2973 if (likely(j = job_new(jm, label, prog, argv))) {
2974 launch_data_dict_iterate(pload, job_import_keys, j);
2975 if (!uuid_is_null(j->expected_audit_uuid)) {
2976 uuid_string_t uuid_str;
2977 uuid_unparse(j->expected_audit_uuid, uuid_str);
2978 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2979 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2980 errno = ENEEDAUTH;
2981 } else {
2982 job_log(j, LOG_DEBUG, "No security session specified.");
2983 j->asport = MACH_PORT_NULL;
2984 }
2985
2986 if (pid1_magic && !jm->parentmgr) {
2987 /* Workaround reentrancy in CF. We don't make this a global variable
2988 * because we don't want per-user launchd's to inherit it. So we
2989 * just set it for every job that we import into the System session.
2990 *
2991 * See <rdar://problem/9468837>.
2992 */
2993 envitem_new(j, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
2994 }
2995
2996 if (j->event_monitor) {
2997 eventsystem_ping();
2998 }
2999
3000 #if TARGET_OS_EMBEDDED
3001 /* SpringBoard runs at Interactive priority.
3002 *
3003 * See <rdar://problem/9539873>.
3004 */
3005 if (j->embedded_god) {
3006 j->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
3007 }
3008 #endif
3009 }
3010
3011 return j;
3012 }
3013
3014 bool
3015 jobmgr_label_test(jobmgr_t jm, const char *str)
3016 {
3017 char *endstr = NULL;
3018 const char *ptr;
3019
3020 if (str[0] == '\0') {
3021 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
3022 return false;
3023 }
3024
3025 for (ptr = str; *ptr; ptr++) {
3026 if (iscntrl(*ptr)) {
3027 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
3028 return false;
3029 }
3030 }
3031
3032 strtoll(str, &endstr, 0);
3033
3034 if (str != endstr) {
3035 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
3036 return false;
3037 }
3038
3039 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3040 || (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
3041 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
3042 return false;
3043 }
3044
3045 return true;
3046 }
3047
3048 job_t
3049 job_find(jobmgr_t jm, const char *label)
3050 {
3051 job_t ji;
3052
3053 if (!jm) {
3054 jm = root_jobmgr;
3055 }
3056
3057 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
3058 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
3059 // 5351245 and 5488633 respectively
3060 continue;
3061 }
3062
3063 if (strcmp(ji->label, label) == 0) {
3064 return ji;
3065 }
3066 }
3067
3068 errno = ESRCH;
3069 return NULL;
3070 }
3071
3072 // Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
3073 job_t
3074 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
3075 {
3076 job_t ji = NULL;
3077 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3078 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay))) {
3079 return ji;
3080 }
3081 }
3082
3083 jobmgr_t jmi = NULL;
3084 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3085 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3086 break;
3087 }
3088 }
3089
3090 return ji;
3091 }
3092
3093 job_t
3094 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3095 {
3096 job_t ji;
3097
3098 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3099 if (ji->p == p) {
3100 return ji;
3101 }
3102 }
3103
3104 return create_anon ? job_new_anonymous(jm, p) : NULL;
3105 }
3106
3107 job_t
3108 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3109 {
3110 jobmgr_t jmi;
3111 job_t ji;
3112
3113 if (jm->jm_port == mport) {
3114 return jobmgr_find_by_pid(jm, upid, true);
3115 }
3116
3117 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3118 job_t jr;
3119
3120 if ((jr = job_mig_intran2(jmi, mport, upid))) {
3121 return jr;
3122 }
3123 }
3124
3125 LIST_FOREACH(ji, &jm->jobs, sle) {
3126 if (ji->j_port == mport) {
3127 return ji;
3128 }
3129 }
3130
3131 return NULL;
3132 }
3133
3134 job_t
3135 job_mig_intran(mach_port_t p)
3136 {
3137 struct ldcred *ldc = runtime_get_caller_creds();
3138 job_t jr;
3139
3140 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3141
3142 if (!jr) {
3143 struct proc_bsdshortinfo proc;
3144 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3145 if (errno != ESRCH) {
3146 (void)jobmgr_assumes_zero(root_jobmgr, errno);
3147 } else {
3148 jobmgr_log(root_jobmgr, LOG_ERR, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc.pbsi_comm, ldc->pid, ldc->uid, ldc->euid);
3149 }
3150 }
3151 }
3152
3153 return jr;
3154 }
3155
3156 job_t
3157 job_find_by_service_port(mach_port_t p)
3158 {
3159 struct machservice *ms;
3160
3161 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3162 if (ms->recv && (ms->port == p)) {
3163 return ms->job;
3164 }
3165 }
3166
3167 return NULL;
3168 }
3169
3170 void
3171 job_mig_destructor(job_t j)
3172 {
3173 /* The job can go invalid before this point.
3174 *
3175 * <rdar://problem/5477111>
3176 */
3177 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3178 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3179 job_remove(j);
3180 }
3181
3182 workaround_5477111 = NULL;
3183
3184 calendarinterval_sanity_check();
3185 }
3186
3187 void
3188 job_export_all2(jobmgr_t jm, launch_data_t where)
3189 {
3190 jobmgr_t jmi;
3191 job_t ji;
3192
3193 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3194 job_export_all2(jmi, where);
3195 }
3196
3197 LIST_FOREACH(ji, &jm->jobs, sle) {
3198 launch_data_t tmp;
3199
3200 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3201 launch_data_dict_insert(where, tmp, ji->label);
3202 }
3203 }
3204 }
3205
3206 launch_data_t
3207 job_export_all(void)
3208 {
3209 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3210
3211 if (resp != NULL) {
3212 job_export_all2(root_jobmgr, resp);
3213 } else {
3214 (void)osx_assumes_zero(errno);
3215 }
3216
3217 return resp;
3218 }
3219
3220 void
3221 job_log_stray_pg(job_t j)
3222 {
3223 pid_t *pids = NULL;
3224 size_t len = sizeof(pid_t) * get_kern_max_proc();
3225 int i = 0, kp_cnt = 0;
3226
3227 if (!launchd_apple_internal) {
3228 return;
3229 }
3230
3231 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3232
3233 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3234 return;
3235 }
3236 if (job_assumes_zero_p(j, (kp_cnt = proc_listpgrppids(j->p, pids, len))) == -1) {
3237 goto out;
3238 }
3239
3240 for (i = 0; i < kp_cnt; i++) {
3241 pid_t p_i = pids[i];
3242 if (p_i == j->p) {
3243 continue;
3244 } else if (p_i == 0 || p_i == 1) {
3245 continue;
3246 }
3247
3248 struct proc_bsdshortinfo proc;
3249 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3250 if (errno != ESRCH) {
3251 (void)job_assumes_zero(j, errno);
3252 }
3253 continue;
3254 }
3255
3256 pid_t pp_i = proc.pbsi_ppid;
3257 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3258 const char *n = proc.pbsi_comm;
3259
3260 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3261 }
3262
3263 out:
3264 free(pids);
3265 }
3266
3267 void
3268 job_reap(job_t j)
3269 {
3270 struct rusage ru;
3271 int status;
3272
3273 bool is_system_bootstrapper = ((j->is_bootstrapper && pid1_magic) && !j->mgr->parentmgr);
3274
3275 job_log(j, LOG_DEBUG, "Reaping");
3276
3277 if (unlikely(j->weird_bootstrap)) {
3278 int64_t junk = 0;
3279 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3280 }
3281
3282 if (j->fork_fd) {
3283 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
3284 j->fork_fd = 0;
3285 }
3286
3287 if (j->anonymous) {
3288 status = 0;
3289 memset(&ru, 0, sizeof(ru));
3290 } else {
3291 uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
3292 j->trt += rt;
3293
3294 job_log(j, LOG_PERF, "Last instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
3295 j->nruns++;
3296
3297 /* The job is dead. While the PID/PGID is still known to be valid, try
3298 * to kill abandoned descendant processes.
3299 */
3300 job_log_stray_pg(j);
3301 if (!j->abandon_pg) {
3302 if (unlikely(killpg2(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3303 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3304 }
3305 }
3306
3307 /* We have to work around one of two kernel bugs here. ptrace(3) may
3308 * have abducted the child away from us and reparented it to the tracing
3309 * process. If the process then exits, we still get NOTE_EXIT, but we
3310 * cannot reap it because the kernel may not have restored the true
3311 * parent/child relationship in time.
3312 *
3313 * See <rdar://problem/5020256>.
3314 *
3315 * The other bug is if the shutdown monitor has suspended a task and not
3316 * resumed it before exiting. In this case, the kernel will not clean up
3317 * after the shutdown monitor. It will, instead, leave the task
3318 * task suspended and not process any pending signals on the event loop
3319 * for the task.
3320 *
3321 * There are a variety of other kernel bugs that could prevent a process
3322 * from exiting, usually having to do with faulty hardware or talking to
3323 * misbehaving drivers that mark a thread as uninterruptible and
3324 * deadlock/hang before unmarking it as such. So we have to work around
3325 * that too.
3326 *
3327 * See <rdar://problem/9284889&9359725>.
3328 */
3329 int r = -1;
3330 if (j->workaround9359725) {
3331 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3332 status = W_EXITCODE(-1, SIGSEGV);
3333 memset(&ru, 0, sizeof(ru));
3334 } else if ((r = wait4(j->p, &status, 0, &ru)) == -1) {
3335 job_log(j, LOG_NOTICE, "Assuming job exited: <rdar://problem/5020256>: %d: %s", errno, strerror(errno));
3336 status = W_EXITCODE(-1, SIGSEGV);
3337 memset(&ru, 0, sizeof(ru));
3338 }
3339
3340 if (launchd_log_perf && r != -1) {
3341 job_log(j, LOG_PERF, "Last instance user time: %ld.%06u", ru.ru_utime.tv_sec, ru.ru_utime.tv_usec);
3342 job_log(j, LOG_PERF, "Last instance system time: %ld.%06u", ru.ru_stime.tv_sec, ru.ru_stime.tv_usec);
3343 job_log(j, LOG_PERF, "Last instance maximum resident size: %lu", ru.ru_maxrss);
3344 job_log(j, LOG_PERF, "Last instance integral shared memory size: %lu", ru.ru_ixrss);
3345 job_log(j, LOG_PERF, "Last instance integral unshared data size: %lu", ru.ru_idrss);
3346 job_log(j, LOG_PERF, "Last instance integral unshared stack size: %lu", ru.ru_isrss);
3347 job_log(j, LOG_PERF, "Last instance page reclaims: %lu", ru.ru_minflt);
3348 job_log(j, LOG_PERF, "Last instance page faults: %lu", ru.ru_majflt);
3349 job_log(j, LOG_PERF, "Last instance swaps: %lu", ru.ru_nswap);
3350 job_log(j, LOG_PERF, "Last instance input ops: %lu", ru.ru_inblock);
3351 job_log(j, LOG_PERF, "Last instance output ops: %lu", ru.ru_oublock);
3352 job_log(j, LOG_PERF, "Last instance messages sent: %lu", ru.ru_msgsnd);
3353 job_log(j, LOG_PERF, "Last instance messages received: %lu", ru.ru_msgrcv);
3354 job_log(j, LOG_PERF, "Last instance signals received: %lu", ru.ru_nsignals);
3355 job_log(j, LOG_PERF, "Last instance voluntary context switches: %lu", ru.ru_nvcsw);
3356 job_log(j, LOG_PERF, "Last instance involuntary context switches: %lu", ru.ru_nivcsw);
3357 }
3358 }
3359
3360 if (j->exit_timeout) {
3361 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3362 }
3363
3364 LIST_REMOVE(j, pid_hash_sle);
3365
3366 if (j->sent_signal_time) {
3367 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3368
3369 td_sec = td / NSEC_PER_SEC;
3370 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3371
3372 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3373 }
3374
3375 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
3376 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
3377 if (j->ru.ru_maxrss < ru.ru_maxrss) {
3378 j->ru.ru_maxrss = ru.ru_maxrss;
3379 }
3380
3381 j->ru.ru_ixrss += ru.ru_ixrss;
3382 j->ru.ru_idrss += ru.ru_idrss;
3383 j->ru.ru_isrss += ru.ru_isrss;
3384 j->ru.ru_minflt += ru.ru_minflt;
3385 j->ru.ru_majflt += ru.ru_majflt;
3386 j->ru.ru_nswap += ru.ru_nswap;
3387 j->ru.ru_inblock += ru.ru_inblock;
3388 j->ru.ru_oublock += ru.ru_oublock;
3389 j->ru.ru_msgsnd += ru.ru_msgsnd;
3390 j->ru.ru_msgrcv += ru.ru_msgrcv;
3391 j->ru.ru_nsignals += ru.ru_nsignals;
3392 j->ru.ru_nvcsw += ru.ru_nvcsw;
3393 j->ru.ru_nivcsw += ru.ru_nivcsw;
3394 job_log_perf_statistics(j);
3395
3396 int exit_status = WEXITSTATUS(status);
3397 if (WIFEXITED(status) && exit_status != 0) {
3398 if (!j->did_exec && _launchd_support_system) {
3399 xpc_object_t event = NULL;
3400 switch (exit_status) {
3401 case ENOENT:
3402 case ENOTDIR:
3403 case ESRCH:
3404 job_log(j, LOG_NOTICE, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status, strerror(exit_status));
3405 event = xpc_dictionary_create(NULL, NULL, 0);
3406 xpc_dictionary_set_string(event, "Executable", j->prog ? j->prog : j->argv[0]);
3407 if (j->mach_uid) {
3408 xpc_dictionary_set_uint64(event, "UID", j->mach_uid);
3409 } else if (j->username) {
3410 xpc_dictionary_set_string(event, "UserName", j->username);
3411 }
3412
3413 if (j->groupname) {
3414 xpc_dictionary_set_string(event, "GroupName", j->groupname);
3415 }
3416
3417 (void)externalevent_new(j, _launchd_support_system, j->label, event);
3418 xpc_release(event);
3419
3420 j->waiting4ok = true;
3421 default:
3422 job_log(j, LOG_NOTICE, "Job failed to exec(3) for weird reason: %d", exit_status);
3423 }
3424 } else {
3425 int level = LOG_INFO;
3426 if (exit_status != 0) {
3427 level = LOG_ERR;
3428 }
3429
3430 job_log(j, level, "Exited with code: %d", exit_status);
3431 }
3432 }
3433
3434 if (WIFSIGNALED(status)) {
3435 int s = WTERMSIG(status);
3436 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3437 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3438 } else if (!j->stopped && !j->clean_kill) {
3439 switch (s) {
3440 // Signals which indicate a crash.
3441 case SIGILL:
3442 case SIGABRT:
3443 case SIGFPE:
3444 case SIGBUS:
3445 case SIGSEGV:
3446 case SIGSYS:
3447 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3448 * SIGTRAP, assume that it's a crash.
3449 */
3450 case SIGTRAP:
3451 j->crashed = true;
3452 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3453 break;
3454 default:
3455 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3456 break;
3457 }
3458
3459 if (is_system_bootstrapper && j->crashed) {
3460 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3461 }
3462 }
3463 }
3464
3465 j->reaped = true;
3466
3467 struct machservice *msi = NULL;
3468 if (j->crashed || !(j->did_exec || j->anonymous)) {
3469 SLIST_FOREACH(msi, &j->machservices, sle) {
3470 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3471 machservice_drain_port(msi);
3472 }
3473
3474 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3475 machservice_resetport(j, msi);
3476 }
3477 }
3478 }
3479
3480 /* HACK: Essentially duplicating the logic directly above. But this has
3481 * gotten really hairy, and I don't want to try consolidating it right now.
3482 */
3483 if (j->xpc_service && !j->xpcproxy_did_exec) {
3484 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3485 SLIST_FOREACH(msi, &j->machservices, sle) {
3486 /* Drain the messages but do not reset the port. If xpcproxy could
3487 * not exec(3), then we don't want to continue trying, since there
3488 * is very likely a serious configuration error with the service.
3489 *
3490 * The above comment is weird. I originally said we should drain
3491 * messages but not reset the port, but that's exactly what we do
3492 * below, and I'm not sure which is the mistake, the comment or the
3493 * actual behavior.
3494 *
3495 * Since it's always been this way, I'll assume that the comment is
3496 * incorrect, but I'll leave it in place just to remind myself to
3497 * actually look into it at some point.
3498 *
3499 * <rdar://problem/8986802>
3500 */
3501 if (msi->upfront && job_assumes(j, !msi->isActive)) {
3502 machservice_resetport(j, msi);
3503 }
3504 }
3505 }
3506
3507 struct suspended_peruser *spi = NULL;
3508 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3509 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3510 spi->j->peruser_suspend_count--;
3511 if (spi->j->peruser_suspend_count == 0) {
3512 job_dispatch(spi->j, false);
3513 }
3514 LIST_REMOVE(spi, sle);
3515 free(spi);
3516 }
3517
3518 j->last_exit_status = status;
3519
3520 if (j->exit_status_dest) {
3521 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3522 if (errno && errno != MACH_SEND_INVALID_DEST) {
3523 (void)job_assumes_zero(j, errno);
3524 }
3525
3526 j->exit_status_dest = MACH_PORT_NULL;
3527 }
3528
3529 if (j->spawn_reply_port) {
3530 /* If the child never called exec(3), we must send a spawn() reply so
3531 * that the requestor can get exit status from it. If we fail to send
3532 * the reply for some reason, we have to deallocate the exit status port
3533 * ourselves.
3534 */
3535 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3536 if (kr) {
3537 if (kr != MACH_SEND_INVALID_DEST) {
3538 (void)job_assumes_zero(j, kr);
3539 }
3540
3541 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
3542 }
3543
3544 j->exit_status_port = MACH_PORT_NULL;
3545 j->spawn_reply_port = MACH_PORT_NULL;
3546 }
3547
3548 if (j->anonymous) {
3549 total_anon_children--;
3550 if (j->holds_ref) {
3551 job_log(j, LOG_PERF, "Anonymous job exited holding reference.");
3552 runtime_del_ref();
3553 }
3554 } else {
3555 job_log(j, LOG_PERF, "Job exited.");
3556 runtime_del_ref();
3557 total_children--;
3558 }
3559
3560 if (j->has_console) {
3561 launchd_wsp = 0;
3562 }
3563
3564 if (j->shutdown_monitor) {
3565 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3566 _launchd_shutdown_monitor = NULL;
3567 j->shutdown_monitor = false;
3568 }
3569
3570 if (!j->anonymous) {
3571 j->mgr->normal_active_cnt--;
3572 }
3573 j->sent_signal_time = 0;
3574 j->sent_sigkill = false;
3575 j->clean_kill = false;
3576 j->event_monitor_ready2signal = false;
3577 j->p = 0;
3578 }
3579
3580 void
3581 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3582 {
3583 jobmgr_t jmi, jmn;
3584 job_t ji, jn;
3585
3586 if (jm->shutting_down) {
3587 return;
3588 }
3589
3590 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3591 jobmgr_dispatch_all(jmi, newmounthack);
3592 }
3593
3594 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3595 if (newmounthack && ji->start_on_mount) {
3596 ji->start_pending = true;
3597 }
3598
3599 job_dispatch(ji, false);
3600 }
3601 }
3602
3603 void
3604 job_dispatch_curious_jobs(job_t j)
3605 {
3606 job_t ji = NULL, jt = NULL;
3607 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3608 struct semaphoreitem *si = NULL;
3609 SLIST_FOREACH(si, &ji->semaphores, sle) {
3610 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3611 continue;
3612 }
3613
3614 if (strcmp(si->what, j->label) == 0) {
3615 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3616
3617 if (!ji->removing) {
3618 job_dispatch(ji, false);
3619 } else {
3620 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3621 }
3622
3623 /* ji could be removed here, so don't do anything with it or its semaphores
3624 * after this point.
3625 */
3626 break;
3627 }
3628 }
3629 }
3630 }
3631
3632 job_t
3633 job_dispatch(job_t j, bool kickstart)
3634 {
3635 // Don't dispatch a job if it has no audit session set.
3636 if (!uuid_is_null(j->expected_audit_uuid)) {
3637 job_log(j, LOG_DEBUG, "Job is still awaiting its audit session UUID. Not dispatching.");
3638 return NULL;
3639 }
3640 if (j->alias) {
3641 job_log(j, LOG_DEBUG, "Job is an alias. Not dispatching.");
3642 return NULL;
3643 }
3644
3645 if (j->waiting4ok) {
3646 job_log(j, LOG_DEBUG, "Job cannot exec(3). Not dispatching.");
3647 return NULL;
3648 }
3649
3650 #if TARGET_OS_EMBEDDED
3651 if (launchd_embedded_handofgod && _launchd_embedded_god) {
3652 if (!job_assumes(j, _launchd_embedded_god->username != NULL && j->username != NULL)) {
3653 errno = EPERM;
3654 return NULL;
3655 }
3656
3657 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
3658 errno = EPERM;
3659 return NULL;
3660 }
3661 } else if (launchd_embedded_handofgod) {
3662 errno = EINVAL;
3663 return NULL;
3664 }
3665 #endif
3666
3667 /*
3668 * The whole job removal logic needs to be consolidated. The fact that
3669 * a job can be removed from just about anywhere makes it easy to have
3670 * stale pointers left behind somewhere on the stack that might get
3671 * used after the deallocation. In particular, during job iteration.
3672 *
3673 * This is a classic example. The act of dispatching a job may delete it.
3674 */
3675 if (!job_active(j)) {
3676 if (job_useless(j)) {
3677 job_log(j, LOG_DEBUG, "Job is useless. Removing.");
3678 job_remove(j);
3679 return NULL;
3680 }
3681 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3682 job_log(j, LOG_DEBUG, "Per-user launchd is suspended. Not dispatching.");
3683 return NULL;
3684 }
3685
3686 if (kickstart || job_keepalive(j)) {
3687 job_log(j, LOG_DEBUG, "%starting job", kickstart ? "Kicks" : "S");
3688 job_start(j);
3689 } else {
3690 job_log(j, LOG_DEBUG, "Watching job.");
3691 job_watch(j);
3692
3693 /*
3694 * 5455720
3695 *
3696 * Path checking and monitoring is really racy right now.
3697 * We should clean this up post Leopard.
3698 */
3699 if (job_keepalive(j)) {
3700 job_start(j);
3701 }
3702 }
3703 } else {
3704 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job: %s.", job_active(j));
3705 }
3706
3707 return j;
3708 }
3709
3710 void
3711 job_kill(job_t j)
3712 {
3713 if (unlikely(!j->p || j->anonymous)) {
3714 return;
3715 }
3716
3717 (void)job_assumes_zero_p(j, kill2(j->p, SIGKILL));
3718
3719 j->sent_sigkill = true;
3720 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j));
3721
3722 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
3723 }
3724
3725 void
3726 job_open_shutdown_transaction(job_t j)
3727 {
3728 int rv = proc_set_dirty(j->p, true);
3729 if (rv != 0) {
3730 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3731 j->dirty_at_shutdown = false;
3732 }
3733 }
3734
3735 void
3736 job_close_shutdown_transaction(job_t j)
3737 {
3738 if (j->dirty_at_shutdown) {
3739 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
3740 (void)job_assumes_zero(j, proc_set_dirty(j->p, false));
3741 j->dirty_at_shutdown = false;
3742 }
3743 }
3744
3745 void
3746 job_log_children_without_exec(job_t j)
3747 {
3748 pid_t *pids = NULL;
3749 size_t len = sizeof(pid_t) * get_kern_max_proc();
3750 int i = 0, kp_cnt = 0;
3751
3752 if (!launchd_apple_internal || j->anonymous || j->per_user) {
3753 return;
3754 }
3755
3756 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3757 return;
3758 }
3759 if (job_assumes_zero_p(j, (kp_cnt = proc_listchildpids(j->p, pids, len))) == -1) {
3760 goto out;
3761 }
3762
3763 for (i = 0; i < kp_cnt; i++) {
3764 struct proc_bsdshortinfo proc;
3765 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3766 if (errno != ESRCH) {
3767 (void)job_assumes_zero(j, errno);
3768 }
3769 continue;
3770 }
3771 if (proc.pbsi_flags & P_EXEC) {
3772 continue;
3773 }
3774
3775 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
3776 }
3777
3778 out:
3779 free(pids);
3780 }
3781
3782 void
3783 job_cleanup_after_tracer(job_t j)
3784 {
3785 j->tracing_pid = 0;
3786 if (j->reap_after_trace) {
3787 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Reaping job now that attached tracer is gone.");
3788 struct kevent kev;
3789 EV_SET(&kev, j->p, 0, 0, NOTE_EXIT, 0, 0);
3790
3791 // Fake a kevent to keep our logic consistent.
3792 job_callback_proc(j, &kev);
3793
3794 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3795 * on the root job manager. To make our fakery complete, we will do garbage
3796 * collection at the beginning of the next run loop cycle (after we're done
3797 * draining the current queue of kevents).
3798 */
3799 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr));
3800 }
3801 }
3802
3803 void
3804 job_callback_proc(job_t j, struct kevent *kev)
3805 {
3806 bool program_changed = false;
3807 int fflags = kev->fflags;
3808
3809 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
3810 log_kevent_struct(LOG_DEBUG, kev, 0);
3811
3812 if (fflags & NOTE_EXIT) {
3813 if (j->p == (pid_t)kev->ident && !j->anonymous) {
3814 /* Note that the third argument to proc_pidinfo() is a magic
3815 * argument for PROC_PIDT_SHORTBSDINFO. Specifically, passing 1
3816 * means "don't fail on a zombie PID".
3817 */
3818 struct proc_bsdshortinfo proc;
3819 if (job_assumes(j, proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0)) {
3820 if (!job_assumes(j, (pid_t)proc.pbsi_ppid == getpid())) {
3821 /* Someone has attached to the process with ptrace().
3822 * There's a race here. If we determine that we are not the
3823 * parent process and then fail to attach a kevent to the
3824 * parent PID (who is probably using ptrace()), we can take
3825 * that as an indication that the parent exited between
3826 * sysctl(3) and kevent_mod(). The reparenting of the PID
3827 * should be atomic to us, so in that case, we reap the job
3828 * as normal.
3829 *
3830 * Otherwise, we wait for the death of the parent tracer and
3831 * then reap, just as we would if a job died while we were
3832 * sampling it at shutdown.
3833 *
3834 * Note that we foolishly assume that in the process *tree*
3835 * a node cannot be its own parent. Apparently, that is not
3836 * correct. If this is the case, we forsake the process to
3837 * its own devices. Let it reap itself.
3838 */
3839 if (!job_assumes(j, proc.pbsi_ppid != kev->ident)) {
3840 job_log(j, LOG_WARNING, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3841 return;
3842 }
3843 if (job_assumes_zero_p(j, kevent_mod(proc.pbsi_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j)) != -1) {
3844 j->tracing_pid = proc.pbsi_ppid;
3845 j->reap_after_trace = true;
3846 return;
3847 }
3848 }
3849 }
3850 } else if (!j->anonymous) {
3851 if (j->tracing_pid == (pid_t)kev->ident) {
3852 job_cleanup_after_tracer(j);
3853
3854 return;
3855 } else if (j->tracing_pid && !j->reap_after_trace) {
3856 // The job exited before our sample completed.
3857 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
3858 j->reap_after_trace = true;
3859 return;
3860 }
3861 }
3862 }
3863
3864 if (fflags & NOTE_EXEC) {
3865 program_changed = true;
3866
3867 if (j->anonymous) {
3868 struct proc_bsdshortinfo proc;
3869 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
3870 char newlabel[1000];
3871
3872 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
3873
3874 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3875
3876 LIST_REMOVE(j, label_hash_sle);
3877 strcpy((char *)j->label, newlabel);
3878
3879 jobmgr_t where2put = root_jobmgr;
3880 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
3881 where2put = j->mgr;
3882 }
3883 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
3884 } else if (errno != ESRCH) {
3885 (void)job_assumes_zero(j, errno);
3886 }
3887 } else {
3888 if (j->spawn_reply_port) {
3889 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3890 if (errno) {
3891 if (errno != MACH_SEND_INVALID_DEST) {
3892 (void)job_assumes_zero(j, errno);
3893 }
3894 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
3895 }
3896
3897 j->spawn_reply_port = MACH_PORT_NULL;
3898 j->exit_status_port = MACH_PORT_NULL;
3899 }
3900
3901 if (j->xpc_service && j->did_exec) {
3902 j->xpcproxy_did_exec = true;
3903 }
3904
3905 j->did_exec = true;
3906 job_log(j, LOG_DEBUG, "Program changed");
3907 }
3908 }
3909
3910 if (fflags & NOTE_FORK) {
3911 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3912 job_log_children_without_exec(j);
3913 }
3914
3915 if (fflags & NOTE_EXIT) {
3916 job_reap(j);
3917
3918 if (j->anonymous) {
3919 job_remove(j);
3920 j = NULL;
3921 } else {
3922 (void)job_dispatch(j, false);
3923 }
3924 }
3925 }
3926
3927 void
3928 job_callback_timer(job_t j, void *ident)
3929 {
3930 if (j == ident) {
3931 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
3932 job_dispatch(j, true);
3933 } else if (&j->semaphores == ident) {
3934 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
3935 job_dispatch(j, false);
3936 } else if (&j->start_interval == ident) {
3937 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
3938 j->start_pending = true;
3939 job_dispatch(j, false);
3940 } else if (&j->exit_timeout == ident) {
3941 if (!job_assumes(j, j->p != 0)) {
3942 return;
3943 }
3944
3945 if (j->sent_sigkill) {
3946 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
3947
3948 td /= NSEC_PER_SEC;
3949 td -= j->clean_kill ? 0 : j->exit_timeout;
3950
3951 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
3952 j->workaround9359725 = true;
3953
3954 if (launchd_trap_sigkill_bugs) {
3955 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3956 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
3957 }
3958
3959 /* We've simulated the exit, so we have to cancel the kevent for
3960 * this job, otherwise we may get a kevent later down the road that
3961 * has a stale context pointer (if we've removed the job). Or worse,
3962 * it'll corrupt our data structures if the job still exists or the
3963 * allocation was recycled.
3964 *
3965 * If the failing process had a tracer attached to it, we need to
3966 * remove out NOTE_EXIT for that tracer too, otherwise the same
3967 * thing might happen.
3968 *
3969 * Note that, if we're not shutting down, this will result in a
3970 * zombie process just hanging around forever. But if the process
3971 * didn't exit after receiving SIGKILL, odds are it would've just
3972 * stuck around forever anyway.
3973 *
3974 * See <rdar://problem/9481630>.
3975 */
3976 (void)kevent_mod((uintptr_t)j->p, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3977 if (j->tracing_pid) {
3978 (void)kevent_mod((uintptr_t)j->tracing_pid, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3979 }
3980
3981 struct kevent bogus_exit;
3982 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
3983 jobmgr_callback(j->mgr, &bogus_exit);
3984 } else {
3985 if (unlikely(j->debug_before_kill)) {
3986 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3987 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
3988 }
3989
3990 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3991 job_kill(j);
3992 }
3993 } else {
3994 job_log(j, LOG_ERR, "Unrecognized job timer callback: %p", ident);
3995 }
3996 }
3997
3998 void
3999 job_callback_read(job_t j, int ident)
4000 {
4001 if (ident == j->stdin_fd) {
4002 job_dispatch(j, true);
4003 } else {
4004 socketgroup_callback(j);
4005 }
4006 }
4007
4008 void
4009 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
4010 {
4011 jobmgr_t jmi;
4012 job_t j;
4013
4014 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
4015 jobmgr_reap_bulk(jmi, kev);
4016 }
4017
4018 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
4019 kev->udata = j;
4020 job_callback(j, kev);
4021 }
4022 }
4023
4024 void
4025 jobmgr_callback(void *obj, struct kevent *kev)
4026 {
4027 jobmgr_t jm = obj;
4028
4029 #if TARGET_OS_EMBEDDED
4030 int flag2check = VQ_MOUNT;
4031 #else
4032 int flag2check = VQ_UPDATE;
4033 #endif
4034
4035 switch (kev->filter) {
4036 case EVFILT_PROC:
4037 jobmgr_reap_bulk(jm, kev);
4038 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
4039 break;
4040 case EVFILT_SIGNAL:
4041 switch (kev->ident) {
4042 case SIGTERM:
4043 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
4044 return launchd_shutdown();
4045 case SIGUSR1:
4046 return calendarinterval_callback();
4047 case SIGUSR2:
4048 // Turn on all logging.
4049 launchd_log_perf = true;
4050 launchd_log_debug = true;
4051 launchd_log_shutdown = true;
4052 /* Hopefully /var is available by this point. If not, uh, oh well.
4053 * It's just a debugging facility.
4054 */
4055 return jobmgr_log_perf_statistics(jm);
4056 default:
4057 jobmgr_log(jm, LOG_ERR, "Unrecognized signal: %lu: %s", kev->ident, strsignal(kev->ident));
4058 }
4059 break;
4060 case EVFILT_FS:
4061 if (kev->fflags & flag2check) {
4062 if (!launchd_var_available) {
4063 struct stat sb;
4064 if (stat("/var/log", &sb) == 0 && (sb.st_mode & S_IWUSR)) {
4065 launchd_var_available = true;
4066 }
4067 }
4068 } else if (kev->fflags & VQ_MOUNT) {
4069 jobmgr_dispatch_all(jm, true);
4070 }
4071 jobmgr_dispatch_all_semaphores(jm);
4072 break;
4073 case EVFILT_TIMER:
4074 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
4075 calendarinterval_callback();
4076 } else if (kev->ident == (uintptr_t)jm) {
4077 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4078 jobmgr_still_alive_with_check(jm);
4079 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4080 jobmgr_do_garbage_collection(jm);
4081 } else if (kev->ident == (uintptr_t)&launchd_runtime_busy_time) {
4082 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4083 if (jobmgr_assumes_zero(jm, runtime_busy_cnt) == 0) {
4084 return launchd_shutdown();
4085 }
4086 }
4087 break;
4088 case EVFILT_VNODE:
4089 if (kev->ident == (uintptr_t)s_no_hang_fd) {
4090 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4091 if (unlikely(_no_hang_fd != -1)) {
4092 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4093 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL));
4094 (void)jobmgr_assumes_zero_p(root_jobmgr, runtime_close(s_no_hang_fd));
4095 s_no_hang_fd = _fd(_no_hang_fd);
4096 }
4097 } else if (pid1_magic && launchd_console && kev->ident == (uintptr_t)fileno(launchd_console)) {
4098 int cfd = -1;
4099 if (jobmgr_assumes_zero_p(jm, cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) {
4100 _fd(cfd);
4101 if (!(launchd_console = fdopen(cfd, "w"))) {
4102 (void)jobmgr_assumes_zero(jm, errno);
4103 (void)close(cfd);
4104 }
4105 }
4106 }
4107 break;
4108 default:
4109 jobmgr_log(jm, LOG_ERR, "Unrecognized kevent filter: %hd", kev->filter);
4110 }
4111 }
4112
4113 void
4114 job_callback(void *obj, struct kevent *kev)
4115 {
4116 job_t j = obj;
4117
4118 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4119
4120 switch (kev->filter) {
4121 case EVFILT_PROC:
4122 return job_callback_proc(j, kev);
4123 case EVFILT_TIMER:
4124 return job_callback_timer(j, (void *) kev->ident);
4125 case EVFILT_READ:
4126 return job_callback_read(j, (int) kev->ident);
4127 case EVFILT_MACHPORT:
4128 return (void)job_dispatch(j, true);
4129 default:
4130 job_log(j, LOG_ERR, "Unrecognized job callback filter: %hd", kev->filter);
4131 }
4132 }
4133
4134 void
4135 job_start(job_t j)
4136 {
4137 uint64_t td;
4138 int spair[2];
4139 int execspair[2];
4140 char nbuf[64];
4141 pid_t c;
4142 bool sipc = false;
4143 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC;
4144
4145 if (!job_assumes(j, j->mgr != NULL)) {
4146 return;
4147 }
4148
4149 if (unlikely(job_active(j))) {
4150 job_log(j, LOG_DEBUG, "Already started");
4151 return;
4152 }
4153
4154 /*
4155 * Some users adjust the wall-clock and then expect software to not notice.
4156 * Therefore, launchd must use an absolute clock instead of the wall clock
4157 * wherever possible.
4158 */
4159 td = runtime_get_nanoseconds_since(j->start_time);
4160 td /= NSEC_PER_SEC;
4161
4162 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
4163 time_t respawn_delta = j->min_run_time - (uint32_t)td;
4164
4165 /* We technically should ref-count throttled jobs to prevent idle exit,
4166 * but we're not directly tracking the 'throttled' state at the moment.
4167 */
4168 job_log(j, LOG_NOTICE, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4169 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j));
4170 job_ignore(j);
4171 return;
4172 }
4173
4174 if (likely(!j->legacy_mach_job)) {
4175 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_god;
4176 }
4177
4178 if (sipc) {
4179 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair));
4180 }
4181
4182 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair));
4183
4184 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4185 case -1:
4186 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4187 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j));
4188 job_ignore(j);
4189
4190 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4191 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4192 if (sipc) {
4193 (void)job_assumes_zero(j, runtime_close(spair[0]));
4194 (void)job_assumes_zero(j, runtime_close(spair[1]));
4195 }
4196 break;
4197 case 0:
4198 if (unlikely(_vproc_post_fork_ping())) {
4199 _exit(EXIT_FAILURE);
4200 }
4201
4202 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4203 // wait for our parent to say they've attached a kevent to us
4204 read(_fd(execspair[1]), &c, sizeof(c));
4205
4206 if (sipc) {
4207 (void)job_assumes_zero(j, runtime_close(spair[0]));
4208 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4209 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4210 }
4211 job_start_child(j);
4212 break;
4213 default:
4214 j->start_time = runtime_get_opaque_time();
4215
4216 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4217
4218 j->did_exec = false;
4219 j->xpcproxy_did_exec = false;
4220 j->checkedin = false;
4221 j->start_pending = false;
4222 j->reaped = false;
4223 j->crashed = false;
4224 j->stopped = false;
4225 j->workaround9359725 = false;
4226 if (j->needs_kickoff) {
4227 j->needs_kickoff = false;
4228
4229 if (SLIST_EMPTY(&j->semaphores)) {
4230 j->ondemand = false;
4231 }
4232 }
4233
4234 if (j->has_console) {
4235 launchd_wsp = c;
4236 }
4237
4238 job_log(j, LOG_PERF, "Job started.");
4239 runtime_add_ref();
4240 total_children++;
4241 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4242 j->p = c;
4243
4244 j->mgr->normal_active_cnt++;
4245 j->fork_fd = _fd(execspair[0]);
4246 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4247 if (sipc) {
4248 (void)job_assumes_zero(j, runtime_close(spair[1]));
4249 ipc_open(_fd(spair[0]), j);
4250 }
4251 if (kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1) {
4252 job_ignore(j);
4253 } else {
4254 if (errno == ESRCH) {
4255 job_log(j, LOG_ERR, "Child was killed before we could attach a kevent.");
4256 } else {
4257 (void)job_assumes(j, errno == ESRCH);
4258 }
4259 job_reap(j);
4260
4261 /* If we have reaped this job within this same run loop pass, then
4262 * it will be currently ignored. So if there's a failure to attach a
4263 * kevent, we need to make sure that we watch the job so that we can
4264 * respawn it.
4265 *
4266 * See <rdar://problem/10140809>.
4267 */
4268 job_watch(j);
4269 }
4270
4271 j->wait4debugger_oneshot = false;
4272 if (likely(!j->stall_before_exec)) {
4273 job_uncork_fork(j);
4274 }
4275 break;
4276 }
4277 }
4278
4279 void
4280 job_start_child(job_t j)
4281 {
4282 typeof(posix_spawn) *psf;
4283 const char *file2exec = "/usr/libexec/launchproxy";
4284 const char **argv;
4285 posix_spawnattr_t spattr;
4286 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4287 glob_t g;
4288 short spflags = POSIX_SPAWN_SETEXEC;
4289 size_t binpref_out_cnt = 0;
4290 size_t i;
4291
4292 (void)job_assumes_zero(j, posix_spawnattr_init(&spattr));
4293
4294 job_setup_attributes(j);
4295
4296 if (unlikely(j->argv && j->globargv)) {
4297 g.gl_offs = 1;
4298 for (i = 0; i < j->argc; i++) {
4299 if (i > 0) {
4300 gflags |= GLOB_APPEND;
4301 }
4302 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4303 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4304 exit(EXIT_FAILURE);
4305 }
4306 }
4307 g.gl_pathv[0] = (char *)file2exec;
4308 argv = (const char **)g.gl_pathv;
4309 } else if (likely(j->argv)) {
4310 argv = alloca((j->argc + 2) * sizeof(char *));
4311 argv[0] = file2exec;
4312 for (i = 0; i < j->argc; i++) {
4313 argv[i + 1] = j->argv[i];
4314 }
4315 argv[i + 1] = NULL;
4316 } else {
4317 argv = alloca(3 * sizeof(char *));
4318 argv[0] = file2exec;
4319 argv[1] = j->prog;
4320 argv[2] = NULL;
4321 }
4322
4323 if (likely(!j->inetcompat)) {
4324 argv++;
4325 }
4326
4327 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4328 if (!j->legacy_LS_job) {
4329 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4330 }
4331 spflags |= POSIX_SPAWN_START_SUSPENDED;
4332 }
4333
4334 #if !TARGET_OS_EMBEDDED
4335 if (unlikely(j->disable_aslr)) {
4336 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4337 }
4338 #endif
4339 spflags |= j->pstype;
4340
4341 (void)job_assumes_zero(j, posix_spawnattr_setflags(&spattr, spflags));
4342 if (unlikely(j->j_binpref_cnt)) {
4343 (void)job_assumes_zero(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt));
4344 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4345 }
4346
4347 #if TARGET_OS_EMBEDDED
4348 /* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4349 * against a race which arises if, during spawn, an initial jetsam property
4350 * update occurs before the values below are applied. In this case, the flag
4351 * ensures that the subsequent change is ignored; the explicit update should
4352 * be given priority.
4353 */
4354 short flags = 0;
4355 if (j->jetsam_properties) {
4356 flags = POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY;
4357 }
4358
4359 (void)job_assumes_zero(j, posix_spawnattr_setjetsam(&spattr, flags, j->jetsam_priority, j->jetsam_memlimit));
4360 #endif
4361
4362 if (!j->app) {
4363 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor(&spattr, 85, 5 * 60));
4364 }
4365
4366 #if HAVE_QUARANTINE
4367 if (j->quarantine_data) {
4368 qtn_proc_t qp;
4369
4370 if (job_assumes(j, qp = qtn_proc_alloc())) {
4371 if (job_assumes_zero(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4372 (void)job_assumes_zero(j, qtn_proc_apply_to_self(qp));
4373 }
4374 }
4375 }
4376 #endif
4377
4378 #if HAVE_SANDBOX
4379 if (j->seatbelt_profile) {
4380 char *seatbelt_err_buf = NULL;
4381
4382 if (job_assumes_zero_p(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf)) == -1) {
4383 if (seatbelt_err_buf) {
4384 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4385 }
4386 goto out_bad;
4387 }
4388 }
4389 #endif
4390
4391 psf = j->prog ? posix_spawn : posix_spawnp;
4392
4393 if (likely(!j->inetcompat)) {
4394 file2exec = j->prog ? j->prog : argv[0];
4395 }
4396
4397 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4398
4399 #if HAVE_SANDBOX
4400 out_bad:
4401 #endif
4402 _exit(errno);
4403 }
4404
4405 void
4406 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4407 {
4408 launch_data_t tmp;
4409 struct envitem *ei;
4410 job_t ji;
4411
4412 if (jm->parentmgr) {
4413 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4414 } else {
4415 char **tmpenviron = environ;
4416 for (; *tmpenviron; tmpenviron++) {
4417 char envkey[1024];
4418 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4419 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4420 strncpy(envkey, *tmpenviron, sizeof(envkey));
4421 *(strchr(envkey, '=')) = '\0';
4422 launch_data_dict_insert(dict, s, envkey);
4423 }
4424 }
4425
4426 LIST_FOREACH(ji, &jm->jobs, sle) {
4427 SLIST_FOREACH(ei, &ji->global_env, sle) {
4428 if ((tmp = launch_data_new_string(ei->value))) {
4429 launch_data_dict_insert(dict, tmp, ei->key);
4430 }
4431 }
4432 }
4433 }
4434
4435 void
4436 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4437 {
4438 struct envitem *ei;
4439 job_t ji;
4440
4441 if (jm->parentmgr) {
4442 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4443 }
4444
4445 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4446 SLIST_FOREACH(ei, &ji->global_env, sle) {
4447 setenv(ei->key, ei->value, 1);
4448 }
4449 }
4450 }
4451
4452 void
4453 job_log_pids_with_weird_uids(job_t j)
4454 {
4455 size_t len = sizeof(pid_t) * get_kern_max_proc();
4456 pid_t *pids = NULL;
4457 uid_t u = j->mach_uid;
4458 int i = 0, kp_cnt = 0;
4459
4460 if (!launchd_apple_internal) {
4461 return;
4462 }
4463
4464 pids = malloc(len);
4465 if (!job_assumes(j, pids != NULL)) {
4466 return;
4467 }
4468
4469 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4470
4471 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4472 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4473 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4474 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4475 * struct back in a single call for each one.
4476 *
4477 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4478 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4479 * libproc could go stale before we call proc_pidinfo().
4480 *
4481 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4482 * of bytes written to the buffer.
4483 */
4484 if (job_assumes_zero_p(j, (kp_cnt = proc_listallpids(pids, len))) == -1) {
4485 goto out;
4486 }
4487
4488 for (i = 0; i < kp_cnt; i++) {
4489 struct proc_bsdshortinfo proc;
4490 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4491 * detailed above.
4492 */
4493 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4494 if (errno != ESRCH) {
4495 (void)job_assumes_zero(j, errno);
4496 }
4497 continue;
4498 }
4499
4500 uid_t i_euid = proc.pbsi_uid;
4501 uid_t i_uid = proc.pbsi_ruid;
4502 uid_t i_svuid = proc.pbsi_svuid;
4503 pid_t i_pid = pids[i];
4504
4505 if (i_euid != u && i_uid != u && i_svuid != u) {
4506 continue;
4507 }
4508
4509 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4510
4511 // Temporarily disabled due to 5423935 and 4946119.
4512 #if 0
4513 // Ask the accountless process to exit.
4514 (void)job_assumes_zero_p(j, kill2(i_pid, SIGTERM));
4515 #endif
4516 }
4517
4518 out:
4519 free(pids);
4520 }
4521
4522 static struct passwd *
4523 job_getpwnam(job_t j, const char *name)
4524 {
4525 /*
4526 * methodology for system daemons
4527 *
4528 * first lookup user record without any opendirectoryd interaction,
4529 * we don't know what interprocess dependencies might be in flight.
4530 * if that fails, we re-enable opendirectoryd interaction and
4531 * re-issue the lookup. We have to disable the libinfo L1 cache
4532 * otherwise libinfo will return the negative cache entry on the retry
4533 */
4534 #if !TARGET_OS_EMBEDDED
4535 struct passwd *pw = NULL;
4536
4537 if (pid1_magic && j->mgr == root_jobmgr) {
4538 // 1 == SEARCH_MODULE_FLAG_DISABLED
4539 si_search_module_set_flags("ds", 1);
4540 gL1CacheEnabled = false;
4541
4542 pw = getpwnam(name);
4543 si_search_module_set_flags("ds", 0);
4544 }
4545
4546 if (pw == NULL) {
4547 pw = getpwnam(name);
4548 }
4549
4550 return pw;
4551 #else
4552 #pragma unused (j)
4553 return getpwnam(name);
4554 #endif
4555 }
4556
4557 static struct group *
4558 job_getgrnam(job_t j, const char *name)
4559 {
4560 #if !TARGET_OS_EMBEDDED
4561 struct group *gr = NULL;
4562
4563 if (pid1_magic && j->mgr == root_jobmgr) {
4564 si_search_module_set_flags("ds", 1);
4565 gL1CacheEnabled = false;
4566
4567 gr = getgrnam(name);
4568
4569 si_search_module_set_flags("ds", 0);
4570 }
4571
4572 if (gr == NULL) {
4573 gr = getgrnam(name);
4574 }
4575
4576 return gr;
4577 #else
4578 #pragma unused (j)
4579 return getgrnam(name);
4580 #endif
4581 }
4582
4583 void
4584 job_postfork_test_user(job_t j)
4585 {
4586 // This function is all about 5201578
4587
4588 const char *home_env_var = getenv("HOME");
4589 const char *user_env_var = getenv("USER");
4590 const char *logname_env_var = getenv("LOGNAME");
4591 uid_t tmp_uid, local_uid = getuid();
4592 gid_t tmp_gid, local_gid = getgid();
4593 char shellpath[PATH_MAX];
4594 char homedir[PATH_MAX];
4595 char loginname[2000];
4596 struct passwd *pwe;
4597
4598
4599 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4600 && strcmp(user_env_var, logname_env_var) == 0)) {
4601 goto out_bad;
4602 }
4603
4604 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
4605 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4606 goto out_bad;
4607 }
4608
4609 /*
4610 * We must copy the results of getpw*().
4611 *
4612 * Why? Because subsequent API calls may call getpw*() as a part of
4613 * their implementation. Since getpw*() returns a [now thread scoped]
4614 * global, we must therefore cache the results before continuing.
4615 */
4616
4617 tmp_uid = pwe->pw_uid;
4618 tmp_gid = pwe->pw_gid;
4619
4620 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4621 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4622 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4623
4624 if (strcmp(loginname, logname_env_var) != 0) {
4625 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4626 goto out_bad;
4627 }
4628 if (strcmp(homedir, home_env_var) != 0) {
4629 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4630 goto out_bad;
4631 }
4632 if (local_uid != tmp_uid) {
4633 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4634 'U', tmp_uid, local_uid);
4635 goto out_bad;
4636 }
4637 if (local_gid != tmp_gid) {
4638 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4639 'G', tmp_gid, local_gid);
4640 goto out_bad;
4641 }
4642
4643 return;
4644 out_bad:
4645 #if 0
4646 (void)job_assumes_zero_p(j, kill2(getppid(), SIGTERM));
4647 _exit(EXIT_FAILURE);
4648 #else
4649 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
4650 #endif
4651 }
4652
4653 void
4654 job_postfork_become_user(job_t j)
4655 {
4656 char loginname[2000];
4657 char tmpdirpath[PATH_MAX];
4658 char shellpath[PATH_MAX];
4659 char homedir[PATH_MAX];
4660 struct passwd *pwe;
4661 size_t r;
4662 gid_t desired_gid = -1;
4663 uid_t desired_uid = -1;
4664
4665 if (getuid() != 0) {
4666 return job_postfork_test_user(j);
4667 }
4668
4669 /*
4670 * I contend that having UID == 0 and GID != 0 is of dubious value.
4671 * Nevertheless, this used to work in Tiger. See: 5425348
4672 */
4673 if (j->groupname && !j->username) {
4674 j->username = "root";
4675 }
4676
4677 if (j->username) {
4678 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
4679 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
4680 _exit(ESRCH);
4681 }
4682 } else if (j->mach_uid) {
4683 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4684 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
4685 job_log_pids_with_weird_uids(j);
4686 _exit(ESRCH);
4687 }
4688 } else {
4689 return;
4690 }
4691
4692 /*
4693 * We must copy the results of getpw*().
4694 *
4695 * Why? Because subsequent API calls may call getpw*() as a part of
4696 * their implementation. Since getpw*() returns a [now thread scoped]
4697 * global, we must therefore cache the results before continuing.
4698 */
4699
4700 desired_uid = pwe->pw_uid;
4701 desired_gid = pwe->pw_gid;
4702
4703 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4704 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4705 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4706
4707 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
4708 job_log(j, LOG_ERR, "Expired account");
4709 _exit(EXIT_FAILURE);
4710 }
4711
4712
4713 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
4714 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
4715 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
4716 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
4717 }
4718
4719 if (j->groupname) {
4720 struct group *gre;
4721
4722 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
4723 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
4724 _exit(ESRCH);
4725 }
4726
4727 desired_gid = gre->gr_gid;
4728 }
4729
4730 if (job_assumes_zero_p(j, setlogin(loginname)) == -1) {
4731 _exit(EXIT_FAILURE);
4732 }
4733
4734 if (job_assumes_zero_p(j, setgid(desired_gid)) == -1) {
4735 _exit(EXIT_FAILURE);
4736 }
4737
4738 /*
4739 * The kernel team and the DirectoryServices team want initgroups()
4740 * called after setgid(). See 4616864 for more information.
4741 */
4742
4743 if (likely(!j->no_init_groups)) {
4744 #if 1
4745 if (job_assumes_zero_p(j, initgroups(loginname, desired_gid)) == -1) {
4746 _exit(EXIT_FAILURE);
4747 }
4748 #else
4749 /* Do our own little initgroups(). We do this to guarantee that we're
4750 * always opted into dynamic group resolution in the kernel. initgroups(3)
4751 * does not make this guarantee.
4752 */
4753 int groups[NGROUPS], ngroups;
4754
4755 // A failure here isn't fatal, and we'll still get data we can use.
4756 (void)job_assumes_zero_p(j, getgrouplist(j->username, desired_gid, groups, &ngroups));
4757
4758 if (job_assumes_zero_p(j, syscall(SYS_initgroups, ngroups, groups, desired_uid)) == -1) {
4759 _exit(EXIT_FAILURE);
4760 }
4761 #endif
4762 }
4763
4764 if (job_assumes_zero_p(j, setuid(desired_uid)) == -1) {
4765 _exit(EXIT_FAILURE);
4766 }
4767
4768 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4769
4770 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
4771 setenv("TMPDIR", tmpdirpath, 0);
4772 }
4773
4774 setenv("SHELL", shellpath, 0);
4775 setenv("HOME", homedir, 0);
4776 setenv("USER", loginname, 0);
4777 setenv("LOGNAME", loginname, 0);
4778 }
4779
4780 void
4781 job_setup_attributes(job_t j)
4782 {
4783 struct limititem *li;
4784 struct envitem *ei;
4785
4786 if (unlikely(j->setnice)) {
4787 (void)job_assumes_zero_p(j, setpriority(PRIO_PROCESS, 0, j->nice));
4788 }
4789
4790 SLIST_FOREACH(li, &j->limits, sle) {
4791 struct rlimit rl;
4792
4793 if (job_assumes_zero_p(j, getrlimit(li->which, &rl) == -1)) {
4794 continue;
4795 }
4796
4797 if (li->sethard) {
4798 rl.rlim_max = li->lim.rlim_max;
4799 }
4800 if (li->setsoft) {
4801 rl.rlim_cur = li->lim.rlim_cur;
4802 }
4803
4804 if (setrlimit(li->which, &rl) == -1) {
4805 job_log_error(j, LOG_WARNING, "setrlimit()");
4806 }
4807 }
4808
4809 if (unlikely(!j->inetcompat && j->session_create)) {
4810 launchd_SessionCreate();
4811 }
4812
4813 if (unlikely(j->low_pri_io)) {
4814 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE));
4815 }
4816 if (unlikely(j->rootdir)) {
4817 (void)job_assumes_zero_p(j, chroot(j->rootdir));
4818 (void)job_assumes_zero_p(j, chdir("."));
4819 }
4820
4821 job_postfork_become_user(j);
4822
4823 if (unlikely(j->workingdir)) {
4824 if (chdir(j->workingdir) == -1) {
4825 if (errno == ENOENT || errno == ENOTDIR) {
4826 job_log(j, LOG_ERR, "Job specified non-existent working directory: %s", j->workingdir);
4827 } else {
4828 (void)job_assumes_zero(j, errno);
4829 }
4830 }
4831 }
4832
4833 if (unlikely(j->setmask)) {
4834 umask(j->mask);
4835 }
4836
4837 if (j->stdin_fd) {
4838 (void)job_assumes_zero_p(j, dup2(j->stdin_fd, STDIN_FILENO));
4839 } else {
4840 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4841 }
4842 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4843 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
4844
4845 jobmgr_setup_env_from_other_jobs(j->mgr);
4846
4847 SLIST_FOREACH(ei, &j->env, sle) {
4848 setenv(ei->key, ei->value, 1);
4849 }
4850
4851 #if !TARGET_OS_EMBEDDED
4852 if (j->jetsam_properties) {
4853 (void)job_assumes_zero(j, proc_setpcontrol(PROC_SETPC_TERMINATE));
4854 }
4855 #endif
4856
4857 #if TARGET_OS_EMBEDDED
4858 if (j->main_thread_priority != 0) {
4859 struct sched_param params;
4860 bzero(&params, sizeof(params));
4861 params.sched_priority = j->main_thread_priority;
4862 (void)job_assumes_zero_p(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params));
4863 }
4864 #endif
4865
4866 /*
4867 * We'd like to call setsid() unconditionally, but we have reason to
4868 * believe that prevents launchd from being able to send signals to
4869 * setuid children. We'll settle for process-groups.
4870 */
4871 if (getppid() != 1) {
4872 (void)job_assumes_zero_p(j, setpgid(0, 0));
4873 } else {
4874 (void)job_assumes_zero_p(j, setsid());
4875 }
4876 }
4877
4878 void
4879 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4880 {
4881 int fd;
4882
4883 if (!path) {
4884 return;
4885 }
4886
4887 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4888 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4889 return;
4890 }
4891
4892 (void)job_assumes_zero_p(j, dup2(fd, target_fd));
4893 (void)job_assumes_zero(j, runtime_close(fd));
4894 }
4895
4896 void
4897 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
4898 {
4899 struct calendarinterval *ci_iter, *ci_prev = NULL;
4900 time_t later, head_later;
4901
4902 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4903
4904 if (ci->when.tm_wday != -1) {
4905 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4906
4907 if (ci->when.tm_mday == -1) {
4908 later = otherlater;
4909 } else {
4910 later = later < otherlater ? later : otherlater;
4911 }
4912 }
4913
4914 ci->when_next = later;
4915
4916 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4917 if (ci->when_next < ci_iter->when_next) {
4918 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4919 break;
4920 }
4921
4922 ci_prev = ci_iter;
4923 }
4924
4925 if (ci_iter == NULL) {
4926 // ci must want to fire after every other timer, or there are no timers
4927
4928 if (LIST_EMPTY(&sorted_calendar_events)) {
4929 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
4930 } else {
4931 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
4932 }
4933 }
4934
4935 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
4936
4937 if (job_assumes_zero_p(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr)) != -1) {
4938 char time_string[100];
4939 size_t time_string_len;
4940
4941 ctime_r(&later, time_string);
4942 time_string_len = strlen(time_string);
4943
4944 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
4945 time_string[time_string_len - 1] = '\0';
4946 }
4947
4948 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4949 }
4950 }
4951
4952 bool
4953 jobmgr_log_bug(aslmsg asl_message __attribute__((unused)), void *ctx, const char *message)
4954 {
4955 jobmgr_t jm = ctx;
4956 jobmgr_log(jm, LOG_ERR, "%s", message);
4957
4958 return true;
4959 }
4960
4961 bool
4962 job_log_bug(aslmsg asl_message __attribute__((unused)), void *ctx, const char *message)
4963 {
4964 job_t j = ctx;
4965 job_log(j, LOG_ERR, "%s", message);
4966
4967 return true;
4968 }
4969
4970 void
4971 job_log_perf_statistics(job_t j)
4972 {
4973 if (j->anonymous) {
4974 return;
4975 }
4976 if (!launchd_log_perf) {
4977 return;
4978 }
4979
4980 job_log(j, LOG_PERF, "Job is currently %srunning.", j->p ? "" : "not ");
4981 job_log(j, LOG_PERF, "Number of runs: %u", j->nruns);
4982 if (j->nruns) {
4983 job_log(j, LOG_PERF, "Total runtime: %06f.", (double)j->trt / (double)NSEC_PER_SEC);
4984 job_log(j, LOG_PERF, "Total user time: %ld.%06u", j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec);
4985 job_log(j, LOG_PERF, "Total system time: %ld.%06u", j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec);
4986 job_log(j, LOG_PERF, "Largest maximum resident size: %lu", j->ru.ru_maxrss);
4987 job_log(j, LOG_PERF, "Total integral shared memory size: %lu", j->ru.ru_ixrss);
4988 job_log(j, LOG_PERF, "Total integral unshared data size: %lu", j->ru.ru_idrss);
4989 job_log(j, LOG_PERF, "Total integral unshared stack size: %lu", j->ru.ru_isrss);
4990 job_log(j, LOG_PERF, "Total page reclaims: %lu", j->ru.ru_minflt);
4991 job_log(j, LOG_PERF, "Total page faults: %lu", j->ru.ru_majflt);
4992 job_log(j, LOG_PERF, "Total swaps: %lu", j->ru.ru_nswap);
4993 job_log(j, LOG_PERF, "Total input ops: %lu", j->ru.ru_inblock);
4994 job_log(j, LOG_PERF, "Total output ops: %lu", j->ru.ru_oublock);
4995 job_log(j, LOG_PERF, "Total messages sent: %lu", j->ru.ru_msgsnd);
4996 job_log(j, LOG_PERF, "Total messages received: %lu", j->ru.ru_msgrcv);
4997 job_log(j, LOG_PERF, "Total signals received: %lu", j->ru.ru_nsignals);
4998 job_log(j, LOG_PERF, "Total voluntary context switches: %lu", j->ru.ru_nvcsw);
4999 job_log(j, LOG_PERF, "Total involuntary context switches: %lu", j->ru.ru_nivcsw);
5000 }
5001
5002 if (j->p) {
5003 uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
5004 job_log(j, LOG_PERF, "Current instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
5005
5006 struct proc_taskinfo ti;
5007 int r = proc_pidinfo(j->p, PROC_PIDTASKINFO, 1, &ti, PROC_PIDTASKINFO_SIZE);
5008 if (r > 0) {
5009 job_log(j, LOG_PERF, "Current instance virtual size: %llu", ti.pti_virtual_size);
5010 job_log(j, LOG_PERF, "Current instance resident size: %llu", ti.pti_resident_size);
5011 job_log(j, LOG_PERF, "Current instance user time: %06f", (double)ti.pti_total_user / (double)NSEC_PER_SEC);
5012 job_log(j, LOG_PERF, "Current instance system time: %06f", (double)ti.pti_total_system / (double)NSEC_PER_SEC);
5013 job_log(j, LOG_PERF, "Current instance number of user threads: %llu", ti.pti_threads_user);
5014 job_log(j, LOG_PERF, "Current instance number of system threads: %llu", ti.pti_threads_system);
5015 job_log(j, LOG_PERF, "Current instance default thread policy: %d", ti.pti_policy);
5016 job_log(j, LOG_PERF, "Current instance number of page faults: %d", ti.pti_faults);
5017 job_log(j, LOG_PERF, "Current instance number of page-ins: %d", ti.pti_pageins);
5018 job_log(j, LOG_PERF, "Current instance number of COW faults: %d", ti.pti_cow_faults);
5019 job_log(j, LOG_PERF, "Current instance number of Mach syscalls: %d", ti.pti_syscalls_mach);
5020 job_log(j, LOG_PERF, "Current instance number of Unix syscalls: %d", ti.pti_syscalls_unix);
5021 job_log(j, LOG_PERF, "Current instance number of threads: %d", ti.pti_threadnum);
5022 job_log(j, LOG_PERF, "Current instance number of running threads: %d", ti.pti_numrunning);
5023 job_log(j, LOG_PERF, "Current instance task priority: %d", ti.pti_priority);
5024 } else {
5025 job_log(j, LOG_PERF, "proc_pidinfo(%d): %d: %s", j->p, errno, strerror(errno));
5026 }
5027 }
5028
5029 if (!j->ondemand) {
5030 job_log(j, LOG_PERF, "Job is configured to always run.");
5031 }
5032
5033 struct machservice *msi = NULL;
5034 SLIST_FOREACH(msi, &j->machservices, sle) {
5035 if (msi->upfront) {
5036 job_log(j, LOG_PERF, "Job advertises service in plist: %s", msi->name);
5037 } else if (!(msi->event_channel || msi->per_pid)) {
5038 job_log(j, LOG_PERF, "Job has dynamically registered service: %s", msi->name);
5039 } else if (msi->per_pid) {
5040 job_log(j, LOG_PERF, "Job advertises per-PID service: %s", msi->name);
5041 }
5042 }
5043 }
5044
5045 void
5046 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
5047 {
5048 const char *label2use = j ? j->label : "com.apple.launchd.job-unknown";
5049 const char *mgr2use = j ? j->mgr->name : "com.apple.launchd.jobmanager-unknown";
5050 char *newmsg;
5051 int oldmask = 0;
5052 size_t newmsgsz;
5053
5054 struct launchd_syslog_attr attr = {
5055 .from_name = launchd_label,
5056 .about_name = label2use,
5057 .session_name = mgr2use,
5058 .priority = pri,
5059 .from_uid = getuid(),
5060 .from_pid = getpid(),
5061 .about_pid = j ? j->p : 0,
5062 };
5063
5064 /* Hack: If bootstrap_port is set, we must be on the child side of a
5065 * fork(2), but before the exec*(3). Let's route the log message back to
5066 * launchd proper.
5067 */
5068 if (bootstrap_port) {
5069 return _vproc_logv(pri, err, msg, ap);
5070 }
5071
5072 newmsgsz = strlen(msg) + 200;
5073 newmsg = alloca(newmsgsz);
5074
5075 if (err) {
5076 #if !TARGET_OS_EMBEDDED
5077 snprintf(newmsg, newmsgsz, "%s: %d: %s", msg, err, strerror(err));
5078 #else
5079 snprintf(newmsg, newmsgsz, "(%s) %s: %d: %s", label2use, msg, err, strerror(err));
5080 #endif
5081 } else {
5082 #if !TARGET_OS_EMBEDDED
5083 snprintf(newmsg, newmsgsz, "%s", msg);
5084 #else
5085 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
5086 #endif
5087 }
5088
5089 if (j && unlikely(j->debug)) {
5090 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5091 }
5092
5093 launchd_vsyslog(&attr, newmsg, ap);
5094
5095 if (j && unlikely(j->debug)) {
5096 setlogmask(oldmask);
5097 }
5098 }
5099
5100 void
5101 job_log_error(job_t j, int pri, const char *msg, ...)
5102 {
5103 va_list ap;
5104
5105 va_start(ap, msg);
5106 job_logv(j, pri, errno, msg, ap);
5107 va_end(ap);
5108 }
5109
5110 void
5111 job_log(job_t j, int pri, const char *msg, ...)
5112 {
5113 va_list ap;
5114
5115 va_start(ap, msg);
5116 job_logv(j, pri, 0, msg, ap);
5117 va_end(ap);
5118 }
5119
5120 #if 0
5121 void
5122 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5123 {
5124 va_list ap;
5125
5126 va_start(ap, msg);
5127 jobmgr_logv(jm, pri, errno, msg, ap);
5128 va_end(ap);
5129 }
5130 #endif
5131
5132 void
5133 jobmgr_log_perf_statistics(jobmgr_t jm)
5134 {
5135 jobmgr_t jmi = NULL;
5136 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
5137 jobmgr_log_perf_statistics(jmi);
5138 }
5139
5140 if (jm->xpc_singleton) {
5141 jobmgr_log(jm, LOG_PERF, "XPC Singleton Domain: %s", jm->shortdesc);
5142 } else if (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
5143 jobmgr_log(jm, LOG_PERF, "XPC Private Domain: %s", jm->owner);
5144 } else if (jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) {
5145 jobmgr_log(jm, LOG_PERF, "Created via bootstrap_subset()");
5146 }
5147
5148 jobmgr_log(jm, LOG_PERF, "Jobs in job manager:");
5149
5150 job_t ji = NULL;
5151 LIST_FOREACH(ji, &jm->jobs, sle) {
5152 job_log_perf_statistics(ji);
5153 }
5154
5155 jobmgr_log(jm, LOG_PERF, "End of job list.");
5156 }
5157
5158 void
5159 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5160 {
5161 va_list ap;
5162
5163 va_start(ap, msg);
5164 jobmgr_logv(jm, pri, 0, msg, ap);
5165 va_end(ap);
5166 }
5167
5168 void
5169 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5170 {
5171 if (!jm) {
5172 jm = root_jobmgr;
5173 }
5174
5175 char *newmsg;
5176 char *newname;
5177 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5178
5179 newname = alloca((jmname_len + 1) * 2);
5180 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5181 newmsg = alloca(newmsgsz);
5182
5183 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5184 if (jm->name[i] == '%') {
5185 newname[o] = '%';
5186 o++;
5187 }
5188 newname[o] = jm->name[i];
5189 }
5190 newname[o] = '\0';
5191
5192 if (err) {
5193 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5194 } else {
5195 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5196 }
5197
5198 if (jm->parentmgr) {
5199 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5200 } else {
5201 struct launchd_syslog_attr attr = {
5202 .from_name = launchd_label,
5203 .about_name = launchd_label,
5204 .session_name = jm->name,
5205 .priority = pri,
5206 .from_uid = getuid(),
5207 .from_pid = getpid(),
5208 .about_pid = getpid(),
5209 };
5210
5211 launchd_vsyslog(&attr, newmsg, ap);
5212 }
5213 }
5214
5215 struct cal_dict_walk {
5216 job_t j;
5217 struct tm tmptm;
5218 };
5219
5220 void
5221 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5222 {
5223 struct cal_dict_walk *cdw = context;
5224 struct tm *tmptm = &cdw->tmptm;
5225 job_t j = cdw->j;
5226 int64_t val;
5227
5228 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5229 // hack to let caller know something went wrong
5230 tmptm->tm_sec = -1;
5231 return;
5232 }
5233
5234 val = launch_data_get_integer(obj);
5235
5236 if (val < 0) {
5237 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5238 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5239 if (val > 59) {
5240 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5241 tmptm->tm_sec = -1;
5242 } else {
5243 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5244 }
5245 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5246 if (val > 23) {
5247 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5248 tmptm->tm_sec = -1;
5249 } else {
5250 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5251 }
5252 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5253 if (val < 1 || val > 31) {
5254 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5255 tmptm->tm_sec = -1;
5256 } else {
5257 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5258 }
5259 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5260 if (val > 7) {
5261 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5262 tmptm->tm_sec = -1;
5263 } else {
5264 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5265 }
5266 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5267 if (val > 12) {
5268 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5269 tmptm->tm_sec = -1;
5270 } else {
5271 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5272 tmptm->tm_mon -= 1; // 4798263 cron compatibility
5273 }
5274 }
5275 }
5276
5277 bool
5278 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5279 {
5280 struct cal_dict_walk cdw;
5281
5282 cdw.j = j;
5283 memset(&cdw.tmptm, 0, sizeof(0));
5284
5285 cdw.tmptm.tm_min = -1;
5286 cdw.tmptm.tm_hour = -1;
5287 cdw.tmptm.tm_mday = -1;
5288 cdw.tmptm.tm_wday = -1;
5289 cdw.tmptm.tm_mon = -1;
5290
5291 if (!job_assumes(j, obj != NULL)) {
5292 return false;
5293 }
5294
5295 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5296 return false;
5297 }
5298
5299 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5300
5301 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5302 return false;
5303 }
5304
5305 return calendarinterval_new(j, &cdw.tmptm);
5306 }
5307
5308 bool
5309 calendarinterval_new(job_t j, struct tm *w)
5310 {
5311 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5312
5313 if (!job_assumes(j, ci != NULL)) {
5314 return false;
5315 }
5316
5317 ci->when = *w;
5318 ci->job = j;
5319
5320 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5321
5322 calendarinterval_setalarm(j, ci);
5323
5324 runtime_add_weak_ref();
5325
5326 return true;
5327 }
5328
5329 void
5330 calendarinterval_delete(job_t j, struct calendarinterval *ci)
5331 {
5332 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5333 LIST_REMOVE(ci, global_sle);
5334
5335 free(ci);
5336
5337 runtime_del_weak_ref();
5338 }
5339
5340 void
5341 calendarinterval_sanity_check(void)
5342 {
5343 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5344 time_t now = time(NULL);
5345
5346 if (unlikely(ci && (ci->when_next < now))) {
5347 (void)jobmgr_assumes_zero_p(root_jobmgr, raise(SIGUSR1));
5348 }
5349 }
5350
5351 void
5352 calendarinterval_callback(void)
5353 {
5354 struct calendarinterval *ci, *ci_next;
5355 time_t now = time(NULL);
5356
5357 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5358 job_t j = ci->job;
5359
5360 if (ci->when_next > now) {
5361 break;
5362 }
5363
5364 LIST_REMOVE(ci, global_sle);
5365 calendarinterval_setalarm(j, ci);
5366
5367 j->start_pending = true;
5368 job_dispatch(j, false);
5369 }
5370 }
5371
5372 bool
5373 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt)
5374 {
5375 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5376
5377 if (!job_assumes(j, sg != NULL)) {
5378 return false;
5379 }
5380
5381 sg->fds = calloc(1, fd_cnt * sizeof(int));
5382 sg->fd_cnt = fd_cnt;
5383
5384 if (!job_assumes(j, sg->fds != NULL)) {
5385 free(sg);
5386 return false;
5387 }
5388
5389 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5390 strcpy(sg->name_init, name);
5391
5392 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5393
5394 runtime_add_weak_ref();
5395
5396 return true;
5397 }
5398
5399 void
5400 socketgroup_delete(job_t j, struct socketgroup *sg)
5401 {
5402 unsigned int i;
5403
5404 for (i = 0; i < sg->fd_cnt; i++) {
5405 #if 0
5406 struct sockaddr_storage ss;
5407 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5408 socklen_t ss_len = sizeof(ss);
5409
5410 // 5480306
5411 if (job_assumes_zero(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5412 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5413 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5414 // We might conditionally need to delete a directory here
5415 }
5416 #endif
5417 (void)job_assumes_zero_p(j, runtime_close(sg->fds[i]));
5418 }
5419
5420 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5421
5422 free(sg->fds);
5423 free(sg);
5424
5425 runtime_del_weak_ref();
5426 }
5427
5428 void
5429 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5430 {
5431 struct kevent kev[sg->fd_cnt];
5432 char buf[10000];
5433 unsigned int i, buf_off = 0;
5434
5435 for (i = 0; i < sg->fd_cnt; i++) {
5436 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5437 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5438 }
5439
5440 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5441
5442 (void)job_assumes_zero_p(j, kevent_bulk_mod(kev, sg->fd_cnt));
5443
5444 for (i = 0; i < sg->fd_cnt; i++) {
5445 (void)job_assumes(j, kev[i].flags & EV_ERROR);
5446 errno = (typeof(errno)) kev[i].data;
5447 (void)job_assumes_zero(j, kev[i].data);
5448 }
5449 }
5450
5451 void
5452 socketgroup_ignore(job_t j, struct socketgroup *sg)
5453 {
5454 socketgroup_kevent_mod(j, sg, false);
5455 }
5456
5457 void
5458 socketgroup_watch(job_t j, struct socketgroup *sg)
5459 {
5460 socketgroup_kevent_mod(j, sg, true);
5461 }
5462
5463 void
5464 socketgroup_callback(job_t j)
5465 {
5466 job_dispatch(j, true);
5467 }
5468
5469 bool
5470 envitem_new(job_t j, const char *k, const char *v, bool global)
5471 {
5472 if (global && !launchd_allow_global_dyld_envvars) {
5473 if (strncmp("DYLD_", k, sizeof("DYLD_") - 1) == 0) {
5474 job_log(j, LOG_ERR, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k, v);
5475 return false;
5476 }
5477 }
5478
5479 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5480
5481 if (!job_assumes(j, ei != NULL)) {
5482 return false;
5483 }
5484
5485 strcpy(ei->key_init, k);
5486 ei->value = ei->key_init + strlen(k) + 1;
5487 strcpy(ei->value, v);
5488
5489 if (global) {
5490 if (SLIST_EMPTY(&j->global_env)) {
5491 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5492 }
5493 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5494 } else {
5495 SLIST_INSERT_HEAD(&j->env, ei, sle);
5496 }
5497
5498 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5499
5500 return true;
5501 }
5502
5503 void
5504 envitem_delete(job_t j, struct envitem *ei, bool global)
5505 {
5506 if (global) {
5507 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5508 if (SLIST_EMPTY(&j->global_env)) {
5509 LIST_REMOVE(j, global_env_sle);
5510 }
5511 } else {
5512 SLIST_REMOVE(&j->env, ei, envitem, sle);
5513 }
5514
5515 free(ei);
5516 }
5517
5518 void
5519 envitem_setup(launch_data_t obj, const char *key, void *context)
5520 {
5521 job_t j = context;
5522
5523 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5524 return;
5525 }
5526
5527 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5528 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
5529 } else {
5530 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5531 }
5532 }
5533
5534 bool
5535 limititem_update(job_t j, int w, rlim_t r)
5536 {
5537 struct limititem *li;
5538
5539 SLIST_FOREACH(li, &j->limits, sle) {
5540 if (li->which == w) {
5541 break;
5542 }
5543 }
5544
5545 if (li == NULL) {
5546 li = calloc(1, sizeof(struct limititem));
5547
5548 if (!job_assumes(j, li != NULL)) {
5549 return false;
5550 }
5551
5552 SLIST_INSERT_HEAD(&j->limits, li, sle);
5553
5554 li->which = w;
5555 }
5556
5557 if (j->importing_hard_limits) {
5558 li->lim.rlim_max = r;
5559 li->sethard = true;
5560 } else {
5561 li->lim.rlim_cur = r;
5562 li->setsoft = true;
5563 }
5564
5565 return true;
5566 }
5567
5568 void
5569 limititem_delete(job_t j, struct limititem *li)
5570 {
5571 SLIST_REMOVE(&j->limits, li, limititem, sle);
5572
5573 free(li);
5574 }
5575
5576 #if HAVE_SANDBOX
5577 void
5578 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5579 {
5580 job_t j = context;
5581
5582 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5583 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5584 return;
5585 }
5586
5587 if (launch_data_get_bool(obj) == false) {
5588 return;
5589 }
5590
5591 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5592 j->seatbelt_flags |= SANDBOX_NAMED;
5593 }
5594 }
5595 #endif
5596
5597 void
5598 limititem_setup(launch_data_t obj, const char *key, void *context)
5599 {
5600 job_t j = context;
5601 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
5602 rlim_t rl;
5603
5604 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
5605 return;
5606 }
5607
5608 rl = launch_data_get_integer(obj);
5609
5610 for (i = 0; i < limits_cnt; i++) {
5611 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
5612 break;
5613 }
5614 }
5615
5616 if (i == limits_cnt) {
5617 return;
5618 }
5619
5620 limititem_update(j, launchd_keys2limits[i].val, rl);
5621 }
5622
5623 bool
5624 job_useless(job_t j)
5625 {
5626 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5627 if (j->legacy_LS_job && j->j_port) {
5628 return false;
5629 }
5630 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5631 return true;
5632 } else if (j->removal_pending) {
5633 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
5634 return true;
5635 } else if (j->shutdown_monitor) {
5636 return false;
5637 } else if (j->mgr->shutting_down && !j->mgr->parentmgr) {
5638 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
5639 if (total_children == 0 && !j->anonymous) {
5640 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
5641 }
5642 return true;
5643 } else if (j->legacy_mach_job) {
5644 if (SLIST_EMPTY(&j->machservices)) {
5645 job_log(j, LOG_INFO, "Garbage collecting");
5646 return true;
5647 } else if (!j->checkedin) {
5648 job_log(j, LOG_WARNING, "Failed to check-in!");
5649 return true;
5650 }
5651 } else {
5652 /* If the job's executable does not have any valid architectures (for
5653 * example, if it's a PowerPC-only job), then we don't even bother
5654 * trying to relaunch it, as we have no reasonable expectation that
5655 * the situation will change.
5656 *
5657 * <rdar://problem/9106979>
5658 */
5659 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
5660 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5661 return true;
5662 }
5663 }
5664
5665 return false;
5666 }
5667
5668 bool
5669 job_keepalive(job_t j)
5670 {
5671 mach_msg_type_number_t statusCnt;
5672 mach_port_status_t status;
5673 struct semaphoreitem *si;
5674 struct machservice *ms;
5675 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
5676 bool is_not_kextd = (launchd_apple_internal || (strcmp(j->label, "com.apple.kextd") != 0));
5677
5678 if (unlikely(j->mgr->shutting_down)) {
5679 return false;
5680 }
5681
5682 /*
5683 * 5066316
5684 *
5685 * We definitely need to revisit this after Leopard ships. Please see
5686 * launchctl.c for the other half of this hack.
5687 */
5688 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5689 return false;
5690 }
5691
5692 if (unlikely(j->needs_kickoff)) {
5693 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5694 return false;
5695 }
5696
5697 if (j->start_pending) {
5698 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
5699 return true;
5700 }
5701
5702 if (!j->ondemand) {
5703 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5704 return true;
5705 }
5706
5707 SLIST_FOREACH(ms, &j->machservices, sle) {
5708 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5709 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5710 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
5711 continue;
5712 }
5713 if (status.mps_msgcount) {
5714 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
5715 status.mps_msgcount, ms->name);
5716 return true;
5717 }
5718 }
5719
5720 /* TODO: Coalesce external events and semaphore items, since they're basically
5721 * the same thing.
5722 */
5723 struct externalevent *ei = NULL;
5724 LIST_FOREACH(ei, &j->events, job_le) {
5725 if (ei->state == ei->wanted_state) {
5726 return true;
5727 }
5728 }
5729
5730 SLIST_FOREACH(si, &j->semaphores, sle) {
5731 bool wanted_state = false;
5732 job_t other_j;
5733
5734 switch (si->why) {
5735 case NETWORK_UP:
5736 wanted_state = true;
5737 case NETWORK_DOWN:
5738 if (network_up == wanted_state) {
5739 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
5740 return true;
5741 }
5742 break;
5743 case SUCCESSFUL_EXIT:
5744 wanted_state = true;
5745 case FAILED_EXIT:
5746 if (good_exit == wanted_state) {
5747 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5748 return true;
5749 }
5750 break;
5751 case CRASHED:
5752 wanted_state = true;
5753 case DID_NOT_CRASH:
5754 if (j->crashed == wanted_state) {
5755 return true;
5756 }
5757 break;
5758 case OTHER_JOB_ENABLED:
5759 wanted_state = true;
5760 case OTHER_JOB_DISABLED:
5761 if ((bool)job_find(NULL, si->what) == wanted_state) {
5762 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
5763 return true;
5764 }
5765 break;
5766 case OTHER_JOB_ACTIVE:
5767 wanted_state = true;
5768 case OTHER_JOB_INACTIVE:
5769 if ((other_j = job_find(NULL, si->what))) {
5770 if ((bool)other_j->p == wanted_state) {
5771 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5772 return true;
5773 }
5774 }
5775 break;
5776 }
5777 }
5778
5779 return false;
5780 }
5781
5782 const char *
5783 job_active(job_t j)
5784 {
5785 struct machservice *ms;
5786 if (j->p && j->shutdown_monitor) {
5787 return "Monitoring shutdown";
5788 }
5789 if (j->p) {
5790 return "PID is still valid";
5791 }
5792
5793 if (j->priv_port_has_senders) {
5794 return "Privileged Port still has outstanding senders";
5795 }
5796
5797 SLIST_FOREACH(ms, &j->machservices, sle) {
5798 /* If we've simulated an exit, we mark the job as non-active, even
5799 * though doing so will leave it in an unsafe state. We do this so that
5800 * shutdown can proceed. See <rdar://problem/11126530>.
5801 *
5802 * For a more sustainable solution, see <rdar://problem/11131336>.
5803 */
5804 if (!j->workaround9359725 && ms->recv && machservice_active(ms)) {
5805 job_log(j, LOG_INFO, "Mach service is still active: %s", ms->name);
5806 return "Mach service is still active";
5807 }
5808 }
5809
5810 return NULL;
5811 }
5812
5813 void
5814 machservice_watch(job_t j, struct machservice *ms)
5815 {
5816 if (ms->recv) {
5817 (void)job_assumes_zero(j, runtime_add_mport(ms->port, NULL));
5818 }
5819 }
5820
5821 void
5822 machservice_ignore(job_t j, struct machservice *ms)
5823 {
5824 /* We only add ports whose receive rights we control into the port set, so
5825 * don't attempt to remove te service from the port set if we didn't put it
5826 * there in the first place. Otherwise, we could wind up trying to access a
5827 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
5828 *
5829 * <rdar://problem/10898014>
5830 */
5831 if (ms->recv) {
5832 (void)job_assumes_zero(j, runtime_remove_mport(ms->port));
5833 }
5834 }
5835
5836 void
5837 machservice_resetport(job_t j, struct machservice *ms)
5838 {
5839 LIST_REMOVE(ms, port_hash_sle);
5840 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
5841 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
5842
5843 ms->gen_num++;
5844 (void)job_assumes_zero(j, launchd_mport_create_recv(&ms->port));
5845 (void)job_assumes_zero(j, launchd_mport_make_send(ms->port));
5846 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5847 }
5848
5849 void
5850 machservice_stamp_port(job_t j, struct machservice *ms)
5851 {
5852 mach_port_context_t ctx = 0;
5853 char *where2get = j->prog ? j->prog : j->argv[0];
5854
5855 char *prog = NULL;
5856 if ((prog = strrchr(where2get, '/'))) {
5857 prog++;
5858 } else {
5859 prog = where2get;
5860 }
5861
5862 (void)strncpy((char *)&ctx, prog, sizeof(ctx));
5863 #if __LITTLE_ENDIAN__
5864 #if __LP64__
5865 ctx = OSSwapBigToHostInt64(ctx);
5866 #else
5867 ctx = OSSwapBigToHostInt32(ctx);
5868 #endif
5869 #endif
5870
5871 (void)job_assumes_zero(j, mach_port_set_context(mach_task_self(), ms->port, ctx));
5872 }
5873
5874 struct machservice *
5875 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
5876 {
5877 /* Don't create new MachServices for dead ports. This is primarily for
5878 * clients who use bootstrap_register2(). They can pass in a send right, but
5879 * then that port can immediately go dead. Hilarity ensues.
5880 *
5881 * <rdar://problem/10898014>
5882 */
5883 if (*serviceport == MACH_PORT_DEAD) {
5884 return NULL;
5885 }
5886
5887 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
5888 if (!job_assumes(j, ms != NULL)) {
5889 return NULL;
5890 }
5891
5892 strcpy((char *)ms->name, name);
5893 ms->job = j;
5894 ms->gen_num = 1;
5895 ms->per_pid = pid_local;
5896
5897 if (likely(*serviceport == MACH_PORT_NULL)) {
5898 if (job_assumes_zero(j, launchd_mport_create_recv(&ms->port)) != KERN_SUCCESS) {
5899 goto out_bad;
5900 }
5901
5902 if (job_assumes_zero(j, launchd_mport_make_send(ms->port)) != KERN_SUCCESS) {
5903 goto out_bad2;
5904 }
5905 *serviceport = ms->port;
5906 ms->recv = true;
5907 } else {
5908 ms->port = *serviceport;
5909 ms->isActive = true;
5910 }
5911
5912 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5913
5914 jobmgr_t where2put = j->mgr;
5915 // XPC domains are separate from Mach bootstraps.
5916 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
5917 if (launchd_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
5918 where2put = root_jobmgr;
5919 }
5920 }
5921
5922 /* Don't allow MachServices added by multiple-instance jobs to be looked up
5923 * by others. We could just do this with a simple bit, but then we'd have to
5924 * uniquify the names ourselves to avoid collisions. This is just easier.
5925 */
5926 if (!j->dedicated_instance) {
5927 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5928 }
5929 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5930
5931 if (ms->recv) {
5932 machservice_stamp_port(j, ms);
5933 }
5934
5935 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
5936
5937 return ms;
5938 out_bad2:
5939 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
5940 out_bad:
5941 free(ms);
5942 return NULL;
5943 }
5944
5945 struct machservice *
5946 machservice_new_alias(job_t j, struct machservice *orig)
5947 {
5948 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
5949 if (job_assumes(j, ms != NULL)) {
5950 strcpy((char *)ms->name, orig->name);
5951 ms->alias = orig;
5952 ms->job = j;
5953
5954 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5955 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5956 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
5957 }
5958
5959 return ms;
5960 }
5961
5962 bootstrap_status_t
5963 machservice_status(struct machservice *ms)
5964 {
5965 ms = ms->alias ? ms->alias : ms;
5966 if (ms->isActive) {
5967 return BOOTSTRAP_STATUS_ACTIVE;
5968 } else if (ms->job->ondemand) {
5969 return BOOTSTRAP_STATUS_ON_DEMAND;
5970 } else {
5971 return BOOTSTRAP_STATUS_INACTIVE;
5972 }
5973 }
5974
5975 void
5976 job_setup_exception_port(job_t j, task_t target_task)
5977 {
5978 struct machservice *ms;
5979 thread_state_flavor_t f = 0;
5980 mach_port_t exc_port = the_exception_server;
5981
5982 if (unlikely(j->alt_exc_handler)) {
5983 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
5984 if (likely(ms)) {
5985 exc_port = machservice_port(ms);
5986 } else {
5987 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
5988 }
5989 } else if (unlikely(j->internal_exc_handler)) {
5990 exc_port = runtime_get_kernel_port();
5991 } else if (unlikely(!exc_port)) {
5992 return;
5993 }
5994
5995 #if defined (__ppc__) || defined(__ppc64__)
5996 f = PPC_THREAD_STATE64;
5997 #elif defined(__i386__) || defined(__x86_64__)
5998 f = x86_THREAD_STATE;
5999 #elif defined(__arm__)
6000 f = ARM_THREAD_STATE;
6001 #else
6002 #error "unknown architecture"
6003 #endif
6004
6005 if (likely(target_task)) {
6006 kern_return_t kr = task_set_exception_ports(target_task, EXC_MASK_CRASH | EXC_MASK_RESOURCE, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f);
6007 if (kr) {
6008 if (kr != MACH_SEND_INVALID_DEST) {
6009 (void)job_assumes_zero(j, kr);
6010 } else {
6011 job_log(j, LOG_WARNING, "Task died before exception port could be set.");
6012 }
6013 }
6014 } else if (pid1_magic && the_exception_server) {
6015 mach_port_t mhp = mach_host_self();
6016 (void)job_assumes_zero(j, host_set_exception_ports(mhp, EXC_MASK_CRASH | EXC_MASK_RESOURCE, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f));
6017 (void)job_assumes_zero(j, launchd_mport_deallocate(mhp));
6018 }
6019 }
6020
6021 void
6022 job_set_exception_port(job_t j, mach_port_t port)
6023 {
6024 if (unlikely(!the_exception_server)) {
6025 the_exception_server = port;
6026 job_setup_exception_port(j, 0);
6027 } else {
6028 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6029 }
6030 }
6031
6032 void
6033 machservice_setup_options(launch_data_t obj, const char *key, void *context)
6034 {
6035 struct machservice *ms = context;
6036 mach_port_t mhp = mach_host_self();
6037 int which_port;
6038 bool b;
6039
6040 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
6041 return;
6042 }
6043
6044 switch (launch_data_get_type(obj)) {
6045 case LAUNCH_DATA_INTEGER:
6046 which_port = (int)launch_data_get_integer(obj); // XXX we should bound check this...
6047 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
6048 switch (which_port) {
6049 case TASK_KERNEL_PORT:
6050 case TASK_HOST_PORT:
6051 case TASK_NAME_PORT:
6052 case TASK_BOOTSTRAP_PORT:
6053 /* I find it a little odd that zero isn't reserved in the header.
6054 * Normally Mach is fairly good about this convention...
6055 */
6056 case 0:
6057 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6058 break;
6059 default:
6060 ms->special_port_num = which_port;
6061 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6062 break;
6063 }
6064 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
6065 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
6066 (void)job_assumes_zero(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)));
6067 } else {
6068 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6069 }
6070 }
6071 case LAUNCH_DATA_BOOL:
6072 b = launch_data_get_bool(obj);
6073 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6074 ms->debug_on_close = b;
6075 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
6076 ms->reset = b;
6077 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6078 ms->hide = b;
6079 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6080 job_set_exception_port(ms->job, ms->port);
6081 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6082 ms->kUNCServer = b;
6083 (void)job_assumes_zero(ms->job, host_set_UNDServer(mhp, ms->port));
6084 }
6085 break;
6086 case LAUNCH_DATA_STRING:
6087 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6088 const char *option = launch_data_get_string(obj);
6089 if (strcasecmp(option, "One") == 0) {
6090 ms->drain_one_on_crash = true;
6091 } else if (strcasecmp(option, "All") == 0) {
6092 ms->drain_all_on_crash = true;
6093 }
6094 }
6095 break;
6096 case LAUNCH_DATA_DICTIONARY:
6097 job_set_exception_port(ms->job, ms->port);
6098 break;
6099 default:
6100 break;
6101 }
6102
6103 (void)job_assumes_zero(ms->job, launchd_mport_deallocate(mhp));
6104 }
6105
6106 void
6107 machservice_setup(launch_data_t obj, const char *key, void *context)
6108 {
6109 job_t j = context;
6110 struct machservice *ms;
6111 mach_port_t p = MACH_PORT_NULL;
6112
6113 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6114 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6115 return;
6116 }
6117
6118 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6119 return;
6120 }
6121
6122 ms->isActive = false;
6123 ms->upfront = true;
6124
6125 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6126 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6127 }
6128 }
6129
6130 jobmgr_t
6131 jobmgr_do_garbage_collection(jobmgr_t jm)
6132 {
6133 jobmgr_t jmi = NULL, jmn = NULL;
6134 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6135 jobmgr_do_garbage_collection(jmi);
6136 }
6137
6138 if (!jm->shutting_down) {
6139 return jm;
6140 }
6141
6142 if (SLIST_EMPTY(&jm->submgrs)) {
6143 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6144 } else {
6145 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6146 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6147 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6148 }
6149 }
6150
6151 size_t actives = 0;
6152 job_t ji = NULL, jn = NULL;
6153 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6154 if (ji->anonymous) {
6155 continue;
6156 }
6157
6158 // Let the shutdown monitor be up until the very end.
6159 if (ji->shutdown_monitor) {
6160 continue;
6161 }
6162
6163 /* On our first pass through, open a transaction for all the jobs that
6164 * need to be dirty at shutdown. We'll close these transactions once the
6165 * jobs that do not need to be dirty at shutdown have all exited.
6166 */
6167 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6168 job_open_shutdown_transaction(ji);
6169 }
6170
6171 const char *active = job_active(ji);
6172 if (!active) {
6173 job_remove(ji);
6174 } else {
6175 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6176 job_stop(ji);
6177
6178 if (!ji->dirty_at_shutdown) {
6179 actives++;
6180 }
6181
6182 if (ji->clean_kill) {
6183 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6184 } else {
6185 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6186 }
6187 }
6188 }
6189
6190 jm->shutdown_jobs_dirtied = true;
6191 if (actives == 0) {
6192 if (!jm->shutdown_jobs_cleaned) {
6193 /* Once all normal jobs have exited, we clean the dirty-at-shutdown
6194 * jobs and make them into normal jobs so that the above loop will
6195 * handle them appropriately.
6196 */
6197 LIST_FOREACH(ji, &jm->jobs, sle) {
6198 if (ji->anonymous) {
6199 continue;
6200 }
6201
6202 if (!job_active(ji)) {
6203 continue;
6204 }
6205
6206 if (ji->shutdown_monitor) {
6207 continue;
6208 }
6209
6210 job_close_shutdown_transaction(ji);
6211 actives++;
6212 }
6213
6214 jm->shutdown_jobs_cleaned = true;
6215 }
6216
6217 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6218 /* We may be in a situation where the shutdown monitor is all that's
6219 * left, in which case we want to stop it. Like dirty-at-shutdown
6220 * jobs, we turn it back into a normal job so that the main loop
6221 * treats it appropriately.
6222 *
6223 * See:
6224 * <rdar://problem/10756306>
6225 * <rdar://problem/11034971>
6226 * <rdar://problem/11549541>
6227 */
6228 if (jm->monitor_shutdown && _launchd_shutdown_monitor) {
6229 /* The rest of shutdown has completed, so we can kill the shutdown
6230 * monitor now like it was any other job.
6231 */
6232 _launchd_shutdown_monitor->shutdown_monitor = false;
6233
6234 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6235 job_stop(_launchd_shutdown_monitor);
6236 _launchd_shutdown_monitor = NULL;
6237 } else {
6238 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6239 jobmgr_remove(jm);
6240 return NULL;
6241 }
6242 }
6243 }
6244
6245 return jm;
6246 }
6247
6248 void
6249 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6250 {
6251 /* I maintain that stray processes should be at the mercy of launchd during
6252 * shutdown, but nevertheless, things like diskimages-helper can stick
6253 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
6254 * to all the strays and don't wait for them to exit before moving on.
6255 *
6256 * See rdar://problem/6562592
6257 */
6258 size_t i = 0;
6259 for (i = 0; i < np; i++) {
6260 if (p[i] != 0) {
6261 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6262 (void)jobmgr_assumes_zero_p(jm, kill2(p[i], SIGTERM));
6263 }
6264 }
6265 }
6266
6267 void
6268 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6269 {
6270 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6271 pid_t *pids = NULL;
6272 int i = 0, kp_cnt = 0;
6273
6274 if (likely(jm->parentmgr || !pid1_magic)) {
6275 return;
6276 }
6277
6278 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6279 return;
6280 }
6281
6282 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6283
6284 if (jobmgr_assumes_zero_p(jm, (kp_cnt = proc_listallpids(pids, len))) == -1) {
6285 goto out;
6286 }
6287
6288 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6289 for (i = 0; i < kp_cnt; i++) {
6290 struct proc_bsdshortinfo proc;
6291 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6292 if (errno != ESRCH) {
6293 (void)jobmgr_assumes_zero(jm, errno);
6294 }
6295
6296 kp_skipped++;
6297 continue;
6298 }
6299
6300 pid_t p_i = pids[i];
6301 pid_t pp_i = proc.pbsi_ppid;
6302 pid_t pg_i = proc.pbsi_pgid;
6303 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6304 const char *n = proc.pbsi_comm;
6305
6306 if (unlikely(p_i == 0 || p_i == 1)) {
6307 kp_skipped++;
6308 continue;
6309 }
6310
6311 if (_launchd_shutdown_monitor && pp_i == _launchd_shutdown_monitor->p) {
6312 kp_skipped++;
6313 continue;
6314 }
6315
6316 // We might have some jobs hanging around that we've decided to shut down in spite of.
6317 job_t j = jobmgr_find_by_pid(jm, p_i, false);
6318 if (!j || (j && j->anonymous)) {
6319 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6320
6321 int status = 0;
6322 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6323 if (jobmgr_assumes_zero(jm, waitpid(p_i, &status, WNOHANG)) == 0) {
6324 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6325 }
6326 kp_skipped++;
6327 } else {
6328 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6329 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6330 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6331 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6332 * their userspace emissaries go away, before the call to reboot(2).
6333 */
6334 if (leader && leader->ignore_pg_at_shutdown) {
6335 kp_skipped++;
6336 } else {
6337 ps[i] = p_i;
6338 }
6339 }
6340 } else {
6341 kp_skipped++;
6342 }
6343 }
6344
6345 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6346 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6347 }
6348
6349 free(ps);
6350 out:
6351 free(pids);
6352 }
6353
6354 jobmgr_t
6355 jobmgr_parent(jobmgr_t jm)
6356 {
6357 return jm->parentmgr;
6358 }
6359
6360 void
6361 job_uncork_fork(job_t j)
6362 {
6363 pid_t c = j->p;
6364
6365 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6366 /* this unblocks the child and avoids a race
6367 * between the above fork() and the kevent_mod() */
6368 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6369 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
6370 j->fork_fd = 0;
6371 }
6372
6373 jobmgr_t
6374 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6375 {
6376 job_t bootstrapper = NULL;
6377 jobmgr_t jmr;
6378
6379 __OSX_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6380
6381 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6382 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6383 return NULL;
6384 }
6385
6386 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6387
6388 if (!jobmgr_assumes(jm, jmr != NULL)) {
6389 return NULL;
6390 }
6391
6392 if (jm == NULL) {
6393 root_jobmgr = jmr;
6394 }
6395
6396 jmr->kqjobmgr_callback = jobmgr_callback;
6397 strcpy(jmr->name_init, name ? name : "Under construction");
6398
6399 jmr->req_port = requestorport;
6400
6401 if ((jmr->parentmgr = jm)) {
6402 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6403 }
6404
6405 if (jm && jobmgr_assumes_zero(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME)) != KERN_SUCCESS) {
6406 goto out_bad;
6407 }
6408
6409 if (transfer_port != MACH_PORT_NULL) {
6410 (void)jobmgr_assumes(jmr, jm != NULL);
6411 jmr->jm_port = transfer_port;
6412 } else if (!jm && !pid1_magic) {
6413 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6414 name_t service_buf;
6415
6416 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6417
6418 if (jobmgr_assumes_zero(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port)) != 0) {
6419 goto out_bad;
6420 }
6421
6422 if (trusted_fd) {
6423 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6424
6425 if ((dfd = dup(lfd)) >= 0) {
6426 (void)jobmgr_assumes_zero_p(jmr, runtime_close(dfd));
6427 (void)jobmgr_assumes_zero_p(jmr, runtime_close(lfd));
6428 }
6429
6430 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6431 }
6432
6433 // cut off the Libc cache, we don't want to deadlock against ourself
6434 inherited_bootstrap_port = bootstrap_port;
6435 bootstrap_port = MACH_PORT_NULL;
6436 osx_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME));
6437
6438 // We set this explicitly as we start each child
6439 osx_assert_zero(launchd_set_bport(MACH_PORT_NULL));
6440 } else if (jobmgr_assumes_zero(jmr, launchd_mport_create_recv(&jmr->jm_port)) != KERN_SUCCESS) {
6441 goto out_bad;
6442 }
6443
6444 if (!name) {
6445 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6446 }
6447
6448 if (!jm) {
6449 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6450 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6451 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6452 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr));
6453 }
6454
6455 if (name && !skip_init) {
6456 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6457 }
6458
6459 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6460 if (jobmgr_assumes_zero(jmr, runtime_add_mport(jmr->jm_port, job_server)) != KERN_SUCCESS) {
6461 goto out_bad;
6462 }
6463 }
6464
6465 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6466
6467 if (bootstrapper) {
6468 bootstrapper->asport = asport;
6469
6470 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6471 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6472 } else {
6473 jmr->req_asport = asport;
6474 }
6475
6476 if (asport != MACH_PORT_NULL) {
6477 (void)jobmgr_assumes_zero(jmr, launchd_mport_copy_send(asport));
6478 }
6479
6480 if (jmr->parentmgr) {
6481 runtime_add_weak_ref();
6482 }
6483
6484 return jmr;
6485
6486 out_bad:
6487 if (jmr) {
6488 jobmgr_remove(jmr);
6489 if (jm == NULL) {
6490 root_jobmgr = NULL;
6491 }
6492 }
6493 return NULL;
6494 }
6495
6496 jobmgr_t
6497 jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6498 {
6499 jobmgr_t new = NULL;
6500
6501 /* These job managers are basically singletons, so we use the root Mach
6502 * bootstrap port as their requestor ports so they'll never go away.
6503 */
6504 mach_port_t req_port = root_jobmgr->jm_port;
6505 if (jobmgr_assumes_zero(jm, launchd_mport_make_send(req_port)) == KERN_SUCCESS) {
6506 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6507 if (new) {
6508 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6509 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6510 new->xpc_singleton = true;
6511 }
6512 }
6513
6514 return new;
6515 }
6516
6517 jobmgr_t
6518 jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6519 {
6520 jobmgr_t jmi = NULL;
6521 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6522 if (jmi->req_euid == uid) {
6523 return jmi;
6524 }
6525 }
6526
6527 name_t name;
6528 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6529 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6530 if (jobmgr_assumes(jm, jmi != NULL)) {
6531 /* We need to create a per-user launchd for this UID if there isn't one
6532 * already so we can grab the bootstrap port.
6533 */
6534 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6535 if (jobmgr_assumes(jmi, puj != NULL)) {
6536 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(puj->asport));
6537 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(jmi->req_bsport));
6538 jmi->shortdesc = "per-user";
6539 jmi->req_asport = puj->asport;
6540 jmi->req_asid = puj->asid;
6541 jmi->req_euid = uid;
6542 jmi->req_egid = -1;
6543
6544 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
6545 } else {
6546 jobmgr_remove(jmi);
6547 }
6548 }
6549
6550 return jmi;
6551 }
6552
6553 jobmgr_t
6554 jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
6555 {
6556 jobmgr_t jmi = NULL;
6557 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
6558 if (jmi->req_asid == asid) {
6559 return jmi;
6560 }
6561 }
6562
6563 name_t name;
6564 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
6565 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6566 if (jobmgr_assumes(jm, jmi != NULL)) {
6567 (void)jobmgr_assumes_zero(jmi, launchd_mport_make_send(root_jobmgr->jm_port));
6568 jmi->shortdesc = "per-session";
6569 jmi->req_bsport = root_jobmgr->jm_port;
6570 (void)jobmgr_assumes_zero(jmi, audit_session_port(asid, &jmi->req_asport));
6571 jmi->req_asid = asid;
6572 jmi->req_euid = -1;
6573 jmi->req_egid = -1;
6574
6575 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
6576 } else {
6577 jobmgr_remove(jmi);
6578 }
6579
6580 return jmi;
6581 }
6582
6583 job_t
6584 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
6585 {
6586 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
6587 char thelabel[1000];
6588 job_t bootstrapper;
6589
6590 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
6591 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
6592
6593 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
6594 bootstrapper->is_bootstrapper = true;
6595 char buf[100];
6596
6597 // <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
6598 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
6599 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
6600 bootstrapper->weird_bootstrap = true;
6601 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
6602 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
6603 #if TARGET_OS_EMBEDDED
6604 bootstrapper->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
6605 #endif
6606 bootstrapper->is_bootstrapper = true;
6607 if (jobmgr_assumes(jm, pid1_magic)) {
6608 // Have our system bootstrapper print out to the console.
6609 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
6610 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
6611
6612 if (launchd_console) {
6613 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)fileno(launchd_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm));
6614 }
6615 }
6616 }
6617
6618 jm->session_initialized = true;
6619 return bootstrapper;
6620 }
6621
6622 jobmgr_t
6623 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
6624 {
6625 struct machservice *ms, *next_ms;
6626 jobmgr_t jmi, jmn;
6627
6628 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6629 * words, when some program hands us a second or subsequent send right to a
6630 * port we already have open, the Mach kernel gives us the same port number
6631 * back and increments an reference count associated with the port. This
6632 * This forces us, when discovering that a receive right at the other end
6633 * has been deleted, to wander all of our objects to see what weird places
6634 * clients might have handed us the same send right to use.
6635 */
6636
6637 if (jm == root_jobmgr) {
6638 if (port == inherited_bootstrap_port) {
6639 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(port));
6640 inherited_bootstrap_port = MACH_PORT_NULL;
6641
6642 return jobmgr_shutdown(jm);
6643 }
6644
6645 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
6646 if (ms->port == port && !ms->recv) {
6647 machservice_delete(ms->job, ms, true);
6648 }
6649 }
6650 }
6651
6652 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6653 jobmgr_delete_anything_with_port(jmi, port);
6654 }
6655
6656 if (jm->req_port == port) {
6657 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
6658 return jobmgr_shutdown(jm);
6659 }
6660
6661 return jm;
6662 }
6663
6664 struct machservice *
6665 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
6666 {
6667 struct machservice *ms;
6668 job_t target_j;
6669
6670 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
6671
6672 if (target_pid) {
6673 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6674 * bootstrap in other bootstraps.
6675 */
6676
6677 // Start in the given bootstrap.
6678 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
6679 // If we fail, do a deep traversal.
6680 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6681 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6682 return NULL;
6683 }
6684 }
6685
6686 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6687 if (ms->per_pid && strcmp(name, ms->name) == 0) {
6688 return ms;
6689 }
6690 }
6691
6692 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
6693 return NULL;
6694 }
6695
6696 jobmgr_t where2look = jm;
6697 // XPC domains are separate from Mach bootstraps.
6698 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6699 if (launchd_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6700 where2look = root_jobmgr;
6701 }
6702 }
6703
6704 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
6705 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6706 return ms;
6707 }
6708 }
6709
6710 if (jm->parentmgr == NULL || !check_parent) {
6711 return NULL;
6712 }
6713
6714 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
6715 }
6716
6717 mach_port_t
6718 machservice_port(struct machservice *ms)
6719 {
6720 return ms->port;
6721 }
6722
6723 job_t
6724 machservice_job(struct machservice *ms)
6725 {
6726 return ms->job;
6727 }
6728
6729 bool
6730 machservice_hidden(struct machservice *ms)
6731 {
6732 return ms->hide;
6733 }
6734
6735 bool
6736 machservice_active(struct machservice *ms)
6737 {
6738 return ms->isActive;
6739 }
6740
6741 const char *
6742 machservice_name(struct machservice *ms)
6743 {
6744 return ms->name;
6745 }
6746
6747 void
6748 machservice_drain_port(struct machservice *ms)
6749 {
6750 bool drain_one = ms->drain_one_on_crash;
6751 bool drain_all = ms->drain_all_on_crash;
6752
6753 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
6754 return;
6755 }
6756
6757 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6758
6759 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6760 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6761 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6762 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6763
6764 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6765
6766 do {
6767 /* This should be a direct check on the Mach service to see if it's an exception-handling
6768 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6769 * Mach services. But for now, it should be okay.
6770 */
6771 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
6772 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6773 } else {
6774 mach_msg_options_t options = MACH_RCV_MSG |
6775 MACH_RCV_TIMEOUT ;
6776
6777 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
6778 switch (mr) {
6779 case MACH_MSG_SUCCESS:
6780 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6781 break;
6782 case MACH_RCV_TIMED_OUT:
6783 break;
6784 case MACH_RCV_TOO_LARGE:
6785 launchd_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6786 break;
6787 default:
6788 break;
6789 }
6790 }
6791 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
6792 }
6793
6794 void
6795 machservice_delete(job_t j, struct machservice *ms, bool port_died)
6796 {
6797 if (ms->alias) {
6798 /* HACK: Egregious code duplication. But dealing with aliases is a
6799 * pretty simple affair since they can't and shouldn't have any complex
6800 * behaviors associated with them.
6801 */
6802 LIST_REMOVE(ms, name_hash_sle);
6803 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6804 free(ms);
6805 return;
6806 }
6807
6808 if (unlikely(ms->debug_on_close)) {
6809 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
6810 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
6811 }
6812
6813 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6814 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
6815 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6816 }
6817
6818 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
6819
6820 if (unlikely(ms->port == the_exception_server)) {
6821 the_exception_server = 0;
6822 }
6823
6824 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
6825
6826 if (ms->special_port_num) {
6827 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6828 }
6829 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6830
6831 if (!(j->dedicated_instance || ms->event_channel)) {
6832 LIST_REMOVE(ms, name_hash_sle);
6833 }
6834 LIST_REMOVE(ms, port_hash_sle);
6835
6836 free(ms);
6837 }
6838
6839 void
6840 machservice_request_notifications(struct machservice *ms)
6841 {
6842 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6843
6844 ms->isActive = true;
6845
6846 if (ms->recv) {
6847 which = MACH_NOTIFY_PORT_DESTROYED;
6848 job_checkin(ms->job);
6849 }
6850
6851 (void)job_assumes_zero(ms->job, launchd_mport_notify_req(ms->port, which));
6852 }
6853
6854 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6855 #define END_OF(x) (&(x)[NELEM(x)])
6856
6857 char **
6858 mach_cmd2argv(const char *string)
6859 {
6860 char *argv[100], args[1000];
6861 const char *cp;
6862 char *argp = args, term, **argv_ret, *co;
6863 unsigned int nargs = 0, i;
6864
6865 for (cp = string; *cp;) {
6866 while (isspace(*cp))
6867 cp++;
6868 term = (*cp == '"') ? *cp++ : '\0';
6869 if (nargs < NELEM(argv)) {
6870 argv[nargs++] = argp;
6871 }
6872 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
6873 if (*cp == '\\') {
6874 cp++;
6875 }
6876 *argp++ = *cp;
6877 if (*cp) {
6878 cp++;
6879 }
6880 }
6881 *argp++ = '\0';
6882 }
6883 argv[nargs] = NULL;
6884
6885 if (nargs == 0) {
6886 return NULL;
6887 }
6888
6889 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6890
6891 if (!argv_ret) {
6892 (void)osx_assumes_zero(errno);
6893 return NULL;
6894 }
6895
6896 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6897
6898 for (i = 0; i < nargs; i++) {
6899 strcpy(co, argv[i]);
6900 argv_ret[i] = co;
6901 co += strlen(argv[i]) + 1;
6902 }
6903 argv_ret[i] = NULL;
6904
6905 return argv_ret;
6906 }
6907
6908 void
6909 job_checkin(job_t j)
6910 {
6911 j->checkedin = true;
6912 }
6913
6914 bool job_is_god(job_t j)
6915 {
6916 return j->embedded_god;
6917 }
6918
6919 bool
6920 job_ack_port_destruction(mach_port_t p)
6921 {
6922 struct machservice *ms;
6923 job_t j;
6924
6925 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
6926 if (ms->recv && (ms->port == p)) {
6927 break;
6928 }
6929 }
6930
6931 if (!ms) {
6932 launchd_syslog(LOG_WARNING, "Could not find MachService to match receive right: 0x%x", p);
6933 return false;
6934 }
6935
6936 j = ms->job;
6937
6938 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
6939
6940 /* Without being the exception handler, NOTE_EXIT is our only way to tell if
6941 * the job crashed, and we can't rely on NOTE_EXIT always being processed
6942 * after all the job's receive rights have been returned.
6943 *
6944 * So when we get receive rights back, check to see if the job has been
6945 * reaped yet. If not, then we add this service to a list of services to be
6946 * drained on crash if it's requested that behavior. So, for a job with N
6947 * receive rights all requesting that they be drained on crash, we can
6948 * safely handle the following sequence of events.
6949 *
6950 * ReceiveRight0Returned
6951 * ReceiveRight1Returned
6952 * ReceiveRight2Returned
6953 * NOTE_EXIT (reap, get exit status)
6954 * ReceiveRight3Returned
6955 * .
6956 * .
6957 * .
6958 * ReceiveRight(N - 1)Returned
6959 */
6960 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
6961 if (j->crashed && j->reaped) {
6962 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
6963 machservice_drain_port(ms);
6964 } else if (!(j->crashed || j->reaped)) {
6965 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
6966 }
6967 }
6968
6969 ms->isActive = false;
6970 if (ms->delete_on_destruction) {
6971 machservice_delete(j, ms, false);
6972 } else if (ms->reset) {
6973 machservice_resetport(j, ms);
6974 }
6975
6976 machservice_stamp_port(j, ms);
6977 job_dispatch(j, false);
6978
6979 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
6980
6981 return true;
6982 }
6983
6984 void
6985 job_ack_no_senders(job_t j)
6986 {
6987 j->priv_port_has_senders = false;
6988
6989 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
6990 j->j_port = 0;
6991
6992 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
6993
6994 job_dispatch(j, false);
6995 }
6996
6997 bool
6998 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
6999 {
7000 struct semaphoreitem *si;
7001 size_t alloc_sz = sizeof(struct semaphoreitem);
7002
7003 if (what) {
7004 alloc_sz += strlen(what) + 1;
7005 }
7006
7007 if (job_assumes(j, si = calloc(1, alloc_sz)) == NULL) {
7008 return false;
7009 }
7010
7011 si->why = why;
7012
7013 if (what) {
7014 strcpy(si->what_init, what);
7015 }
7016
7017 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
7018
7019 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
7020 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7021 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7022 j->nosy = true;
7023 }
7024
7025 semaphoreitem_runtime_mod_ref(si, true);
7026
7027 return true;
7028 }
7029
7030 void
7031 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7032 {
7033 /*
7034 * External events need to be tracked.
7035 * Internal events do NOT need to be tracked.
7036 */
7037
7038 switch (si->why) {
7039 case SUCCESSFUL_EXIT:
7040 case FAILED_EXIT:
7041 case OTHER_JOB_ENABLED:
7042 case OTHER_JOB_DISABLED:
7043 case OTHER_JOB_ACTIVE:
7044 case OTHER_JOB_INACTIVE:
7045 return;
7046 default:
7047 break;
7048 }
7049
7050 if (add) {
7051 runtime_add_weak_ref();
7052 } else {
7053 runtime_del_weak_ref();
7054 }
7055 }
7056
7057 void
7058 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
7059 {
7060 semaphoreitem_runtime_mod_ref(si, false);
7061
7062 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
7063
7064 // We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
7065 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
7066 j->nosy = false;
7067 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7068 }
7069
7070 free(si);
7071 }
7072
7073 void
7074 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
7075 {
7076 struct semaphoreitem_dict_iter_context *sdic = context;
7077 semaphore_reason_t why;
7078
7079 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7080
7081 semaphoreitem_new(sdic->j, why, key);
7082 }
7083
7084 void
7085 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7086 {
7087 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7088 job_t j = context;
7089 semaphore_reason_t why;
7090
7091 switch (launch_data_get_type(obj)) {
7092 case LAUNCH_DATA_BOOL:
7093 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7094 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7095 semaphoreitem_new(j, why, NULL);
7096 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7097 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7098 semaphoreitem_new(j, why, NULL);
7099 j->start_pending = true;
7100 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7101 j->needs_kickoff = launch_data_get_bool(obj);
7102 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7103 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7104 semaphoreitem_new(j, why, NULL);
7105 j->start_pending = true;
7106 } else {
7107 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7108 }
7109 break;
7110 case LAUNCH_DATA_DICTIONARY:
7111 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7112 sdic.why_true = OTHER_JOB_ACTIVE;
7113 sdic.why_false = OTHER_JOB_INACTIVE;
7114 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7115 sdic.why_true = OTHER_JOB_ENABLED;
7116 sdic.why_false = OTHER_JOB_DISABLED;
7117 } else {
7118 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7119 break;
7120 }
7121
7122 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7123 break;
7124 default:
7125 job_log(j, LOG_ERR, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj));
7126 break;
7127 }
7128 }
7129
7130 bool
7131 externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event)
7132 {
7133 if (j->event_monitor) {
7134 job_log(j, LOG_ERR, "The event monitor job cannot use LaunchEvents or XPC Events.");
7135 return false;
7136 }
7137
7138 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7139 if (!ee) {
7140 return false;
7141 }
7142
7143 ee->event = xpc_retain(event);
7144 (void)strcpy(ee->name, evname);
7145 ee->job = j;
7146 ee->id = sys->curid;
7147 ee->sys = sys;
7148 ee->state = false;
7149 ee->wanted_state = true;
7150 sys->curid++;
7151
7152 if (sys == _launchd_support_system) {
7153 ee->internal = true;
7154 }
7155
7156 LIST_INSERT_HEAD(&j->events, ee, job_le);
7157 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7158
7159 job_log(j, LOG_DEBUG, "New event: %s/%s", sys->name, evname);
7160
7161 eventsystem_ping();
7162 return true;
7163 }
7164
7165 void
7166 externalevent_delete(struct externalevent *ee)
7167 {
7168 xpc_release(ee->event);
7169 LIST_REMOVE(ee, job_le);
7170 LIST_REMOVE(ee, sys_le);
7171
7172 free(ee);
7173
7174 eventsystem_ping();
7175 }
7176
7177 void
7178 externalevent_setup(launch_data_t obj, const char *key, void *context)
7179 {
7180 /* This method can ONLY be called on the job_import() path, as it assumes
7181 * the input is a launch_data_t.
7182 */
7183 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7184
7185 xpc_object_t xobj = ld2xpc(obj);
7186 if (xobj) {
7187 job_log(ctx->j, LOG_DEBUG, "Importing stream/event: %s/%s", ctx->sys->name, key);
7188 externalevent_new(ctx->j, ctx->sys, key, xobj);
7189 xpc_release(xobj);
7190 } else {
7191 job_log(ctx->j, LOG_ERR, "Could not import event for job: %s", key);
7192 }
7193 }
7194
7195 struct externalevent *
7196 externalevent_find(const char *sysname, uint64_t id)
7197 {
7198 struct externalevent *ei = NULL;
7199
7200 struct eventsystem *es = eventsystem_find(sysname);
7201 if (es != NULL) {
7202 LIST_FOREACH(ei, &es->events, sys_le) {
7203 if (ei->id == id) {
7204 break;
7205 }
7206 }
7207 } else {
7208 launchd_syslog(LOG_ERR, "Could not find event system: %s", sysname);
7209 }
7210
7211 return ei;
7212 }
7213
7214 struct eventsystem *
7215 eventsystem_new(const char *name)
7216 {
7217 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7218 if (es != NULL) {
7219 es->curid = 1;
7220 (void)strcpy(es->name, name);
7221 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7222 } else {
7223 (void)osx_assumes_zero(errno);
7224 }
7225
7226 return es;
7227 }
7228
7229 void
7230 eventsystem_delete(struct eventsystem *es)
7231 {
7232 struct externalevent *ei = NULL;
7233 while ((ei = LIST_FIRST(&es->events))) {
7234 externalevent_delete(ei);
7235 }
7236
7237 LIST_REMOVE(es, global_le);
7238
7239 free(es);
7240 }
7241
7242 void
7243 eventsystem_setup(launch_data_t obj, const char *key, void *context)
7244 {
7245 job_t j = (job_t)context;
7246 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7247 return;
7248 }
7249
7250 struct eventsystem *sys = eventsystem_find(key);
7251 if (unlikely(sys == NULL)) {
7252 sys = eventsystem_new(key);
7253 job_log(j, LOG_DEBUG, "New event system: %s", key);
7254 }
7255
7256 if (job_assumes(j, sys != NULL)) {
7257 struct externalevent_iter_ctx ctx = {
7258 .j = j,
7259 .sys = sys,
7260 };
7261
7262 job_log(j, LOG_DEBUG, "Importing events for stream: %s", key);
7263 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7264 }
7265 }
7266
7267 struct eventsystem *
7268 eventsystem_find(const char *name)
7269 {
7270 struct eventsystem *esi = NULL;
7271 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7272 if (strcmp(name, esi->name) == 0) {
7273 break;
7274 }
7275 }
7276
7277 return esi;
7278 }
7279
7280 void
7281 eventsystem_ping(void)
7282 {
7283 if (!_launchd_event_monitor) {
7284 return;
7285 }
7286
7287 if (!_launchd_event_monitor->p) {
7288 (void)job_dispatch(_launchd_event_monitor, true);
7289 } else {
7290 if (_launchd_event_monitor->event_monitor_ready2signal) {
7291 (void)job_assumes_zero_p(_launchd_event_monitor, kill(_launchd_event_monitor->p, SIGUSR1));
7292 }
7293 }
7294 }
7295
7296 void
7297 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7298 {
7299 jobmgr_t jmi, jmn;
7300 job_t ji, jn;
7301
7302
7303 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7304 jobmgr_dispatch_all_semaphores(jmi);
7305 }
7306
7307 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7308 if (!SLIST_EMPTY(&ji->semaphores)) {
7309 job_dispatch(ji, false);
7310 }
7311 }
7312 }
7313
7314 time_t
7315 cronemu(int mon, int mday, int hour, int min)
7316 {
7317 struct tm workingtm;
7318 time_t now;
7319
7320 now = time(NULL);
7321 workingtm = *localtime(&now);
7322
7323 workingtm.tm_isdst = -1;
7324 workingtm.tm_sec = 0;
7325 workingtm.tm_min++;
7326
7327 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7328 workingtm.tm_year++;
7329 workingtm.tm_mon = 0;
7330 workingtm.tm_mday = 1;
7331 workingtm.tm_hour = 0;
7332 workingtm.tm_min = 0;
7333 mktime(&workingtm);
7334 }
7335
7336 return mktime(&workingtm);
7337 }
7338
7339 time_t
7340 cronemu_wday(int wday, int hour, int min)
7341 {
7342 struct tm workingtm;
7343 time_t now;
7344
7345 now = time(NULL);
7346 workingtm = *localtime(&now);
7347
7348 workingtm.tm_isdst = -1;
7349 workingtm.tm_sec = 0;
7350 workingtm.tm_min++;
7351
7352 if (wday == 7) {
7353 wday = 0;
7354 }
7355
7356 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7357 workingtm.tm_mday++;
7358 workingtm.tm_hour = 0;
7359 workingtm.tm_min = 0;
7360 mktime(&workingtm);
7361 }
7362
7363 return mktime(&workingtm);
7364 }
7365
7366 bool
7367 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7368 {
7369 if (mon == -1) {
7370 struct tm workingtm = *wtm;
7371 int carrytest;
7372
7373 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7374 workingtm.tm_mon++;
7375 workingtm.tm_mday = 1;
7376 workingtm.tm_hour = 0;
7377 workingtm.tm_min = 0;
7378 carrytest = workingtm.tm_mon;
7379 mktime(&workingtm);
7380 if (carrytest != workingtm.tm_mon) {
7381 return false;
7382 }
7383 }
7384 *wtm = workingtm;
7385 return true;
7386 }
7387
7388 if (mon < wtm->tm_mon) {
7389 return false;
7390 }
7391
7392 if (mon > wtm->tm_mon) {
7393 wtm->tm_mon = mon;
7394 wtm->tm_mday = 1;
7395 wtm->tm_hour = 0;
7396 wtm->tm_min = 0;
7397 }
7398
7399 return cronemu_mday(wtm, mday, hour, min);
7400 }
7401
7402 bool
7403 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7404 {
7405 if (mday == -1) {
7406 struct tm workingtm = *wtm;
7407 int carrytest;
7408
7409 while (!cronemu_hour(&workingtm, hour, min)) {
7410 workingtm.tm_mday++;
7411 workingtm.tm_hour = 0;
7412 workingtm.tm_min = 0;
7413 carrytest = workingtm.tm_mday;
7414 mktime(&workingtm);
7415 if (carrytest != workingtm.tm_mday) {
7416 return false;
7417 }
7418 }
7419 *wtm = workingtm;
7420 return true;
7421 }
7422
7423 if (mday < wtm->tm_mday) {
7424 return false;
7425 }
7426
7427 if (mday > wtm->tm_mday) {
7428 wtm->tm_mday = mday;
7429 wtm->tm_hour = 0;
7430 wtm->tm_min = 0;
7431 }
7432
7433 return cronemu_hour(wtm, hour, min);
7434 }
7435
7436 bool
7437 cronemu_hour(struct tm *wtm, int hour, int min)
7438 {
7439 if (hour == -1) {
7440 struct tm workingtm = *wtm;
7441 int carrytest;
7442
7443 while (!cronemu_min(&workingtm, min)) {
7444 workingtm.tm_hour++;
7445 workingtm.tm_min = 0;
7446 carrytest = workingtm.tm_hour;
7447 mktime(&workingtm);
7448 if (carrytest != workingtm.tm_hour) {
7449 return false;
7450 }
7451 }
7452 *wtm = workingtm;
7453 return true;
7454 }
7455
7456 if (hour < wtm->tm_hour) {
7457 return false;
7458 }
7459
7460 if (hour > wtm->tm_hour) {
7461 wtm->tm_hour = hour;
7462 wtm->tm_min = 0;
7463 }
7464
7465 return cronemu_min(wtm, min);
7466 }
7467
7468 bool
7469 cronemu_min(struct tm *wtm, int min)
7470 {
7471 if (min == -1) {
7472 return true;
7473 }
7474
7475 if (min < wtm->tm_min) {
7476 return false;
7477 }
7478
7479 if (min > wtm->tm_min) {
7480 wtm->tm_min = min;
7481 }
7482
7483 return true;
7484 }
7485
7486 kern_return_t
7487 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7488 {
7489 struct ldcred *ldc = runtime_get_caller_creds();
7490 job_t js;
7491
7492 if (!j) {
7493 return BOOTSTRAP_NO_MEMORY;
7494 }
7495
7496 if (unlikely(j->deny_job_creation)) {
7497 return BOOTSTRAP_NOT_PRIVILEGED;
7498 }
7499
7500 #if HAVE_SANDBOX
7501 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7502 if (unlikely(argv == NULL)) {
7503 return BOOTSTRAP_NO_MEMORY;
7504 }
7505 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7506 free(argv);
7507 return BOOTSTRAP_NOT_PRIVILEGED;
7508 }
7509 free(argv);
7510 #endif
7511
7512 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7513
7514 if (pid1_magic) {
7515 if (ldc->euid || ldc->uid) {
7516 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
7517 return VPROC_ERR_TRY_PER_USER;
7518 }
7519 } else {
7520 if (unlikely(server_uid != getuid())) {
7521 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7522 server_cmd, getuid(), server_uid);
7523 }
7524 server_uid = 0; // zero means "do nothing"
7525 }
7526
7527 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
7528
7529 if (unlikely(js == NULL)) {
7530 return BOOTSTRAP_NO_MEMORY;
7531 }
7532
7533 *server_portp = js->j_port;
7534 return BOOTSTRAP_SUCCESS;
7535 }
7536
7537 kern_return_t
7538 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
7539 {
7540 struct ldcred *ldc = runtime_get_caller_creds();
7541 job_t otherj;
7542
7543 if (!j) {
7544 return BOOTSTRAP_NO_MEMORY;
7545 }
7546
7547 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
7548 #if TARGET_OS_EMBEDDED
7549 if (!j->embedded_god) {
7550 return BOOTSTRAP_NOT_PRIVILEGED;
7551 }
7552 #else
7553 return BOOTSTRAP_NOT_PRIVILEGED;
7554 #endif
7555 }
7556
7557 #if HAVE_SANDBOX
7558 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7559 return BOOTSTRAP_NOT_PRIVILEGED;
7560 }
7561 #endif
7562
7563 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
7564 return BOOTSTRAP_UNKNOWN_SERVICE;
7565 }
7566
7567 #if TARGET_OS_EMBEDDED
7568 if (j->embedded_god) {
7569 if (j->username && otherj->username) {
7570 if (strcmp(j->username, otherj->username) != 0) {
7571 return BOOTSTRAP_NOT_PRIVILEGED;
7572 }
7573 } else {
7574 return BOOTSTRAP_NOT_PRIVILEGED;
7575 }
7576 }
7577 #endif
7578
7579 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
7580 bool do_block = otherj->p;
7581
7582 if (otherj->anonymous) {
7583 return BOOTSTRAP_NOT_PRIVILEGED;
7584 }
7585
7586 job_remove(otherj);
7587
7588 if (do_block) {
7589 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
7590 // this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
7591 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
7592 return MIG_NO_REPLY;
7593 } else {
7594 return 0;
7595 }
7596 } else if (otherj->p) {
7597 (void)job_assumes_zero_p(j, kill2(otherj->p, sig));
7598 }
7599
7600 return 0;
7601 }
7602
7603 kern_return_t
7604 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
7605 {
7606 struct ldcred *ldc = runtime_get_caller_creds();
7607
7608 if (!j) {
7609 return BOOTSTRAP_NO_MEMORY;
7610 }
7611
7612 if (!job_assumes(j, j->per_user)) {
7613 return BOOTSTRAP_NOT_PRIVILEGED;
7614 }
7615
7616 return launchd_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
7617 }
7618
7619 kern_return_t
7620 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7621 {
7622 struct ldcred *ldc = runtime_get_caller_creds();
7623
7624 if (!j) {
7625 return BOOTSTRAP_NO_MEMORY;
7626 }
7627
7628 if (unlikely(ldc->euid)) {
7629 return BOOTSTRAP_NOT_PRIVILEGED;
7630 }
7631
7632 return launchd_log_drain(srp, outval, outvalCnt);
7633 }
7634
7635 kern_return_t
7636 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
7637 vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval,
7638 mach_msg_type_number_t *outvalCnt)
7639 {
7640 const char *action;
7641 launch_data_t input_obj = NULL, output_obj = NULL;
7642 size_t data_offset = 0;
7643 size_t packed_size;
7644 struct ldcred *ldc = runtime_get_caller_creds();
7645
7646 if (!j) {
7647 return BOOTSTRAP_NO_MEMORY;
7648 }
7649
7650 if (inkey && ldc->pid != j->p) {
7651 if (ldc->euid && ldc->euid != getuid()) {
7652 return BOOTSTRAP_NOT_PRIVILEGED;
7653 }
7654 }
7655
7656 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7657 return 1;
7658 }
7659
7660 if (inkey && outkey) {
7661 action = "Swapping";
7662 } else if (inkey) {
7663 action = "Setting";
7664 } else {
7665 action = "Getting";
7666 }
7667
7668 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7669
7670 *outvalCnt = 20 * 1024 * 1024;
7671 mig_allocate(outval, *outvalCnt);
7672 if (!job_assumes(j, *outval != 0)) {
7673 return 1;
7674 }
7675
7676 /* Note to future maintainers: launch_data_unpack() does NOT return a heap
7677 * object. The data is decoded in-place. So do not call launch_data_free()
7678 * on input_obj.
7679 */
7680 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
7681 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
7682 goto out_bad;
7683 }
7684
7685 char *store = NULL;
7686 switch (outkey) {
7687 case VPROC_GSK_ENVIRONMENT:
7688 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
7689 goto out_bad;
7690 }
7691 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
7692 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7693 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
7694 goto out_bad;
7695 }
7696 launch_data_free(output_obj);
7697 break;
7698 case VPROC_GSK_ALLJOBS:
7699 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
7700 goto out_bad;
7701 }
7702 ipc_revoke_fds(output_obj);
7703 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7704 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7705 if (!job_assumes(j, packed_size != 0)) {
7706 goto out_bad;
7707 }
7708 launch_data_free(output_obj);
7709 break;
7710 case VPROC_GSK_MGR_NAME:
7711 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
7712 goto out_bad;
7713 }
7714 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7715 if (!job_assumes(j, packed_size != 0)) {
7716 goto out_bad;
7717 }
7718
7719 launch_data_free(output_obj);
7720 break;
7721 case VPROC_GSK_JOB_OVERRIDES_DB:
7722 store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB, "overrides.plist");
7723 if (!store || !job_assumes(j, (output_obj = launch_data_new_string(store)) != NULL)) {
7724 free(store);
7725 goto out_bad;
7726 }
7727
7728 free(store);
7729 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7730 if (!job_assumes(j, packed_size != 0)) {
7731 goto out_bad;
7732 }
7733
7734 launch_data_free(output_obj);
7735 break;
7736 case VPROC_GSK_ZERO:
7737 mig_deallocate(*outval, *outvalCnt);
7738 *outval = 0;
7739 *outvalCnt = 0;
7740 break;
7741 default:
7742 goto out_bad;
7743 }
7744
7745 mig_deallocate(inval, invalCnt);
7746 return 0;
7747
7748 out_bad:
7749 mig_deallocate(inval, invalCnt);
7750 if (*outval) {
7751 mig_deallocate(*outval, *outvalCnt);
7752 }
7753 if (output_obj) {
7754 launch_data_free(output_obj);
7755 }
7756
7757 return 1;
7758 }
7759
7760 kern_return_t
7761 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7762 {
7763 const char *action;
7764 kern_return_t kr = 0;
7765 struct ldcred *ldc = runtime_get_caller_creds();
7766 int oldmask;
7767
7768 if (!j) {
7769 return BOOTSTRAP_NO_MEMORY;
7770 }
7771
7772 if (inkey && ldc->pid != j->p) {
7773 if (ldc->euid && ldc->euid != getuid()) {
7774 return BOOTSTRAP_NOT_PRIVILEGED;
7775 }
7776 }
7777
7778 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7779 return 1;
7780 }
7781
7782 if (inkey && outkey) {
7783 action = "Swapping";
7784 } else if (inkey) {
7785 action = "Setting";
7786 } else {
7787 action = "Getting";
7788 }
7789
7790 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7791
7792 switch (outkey) {
7793 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7794 *outval = j->abandon_pg;
7795 break;
7796 case VPROC_GSK_LAST_EXIT_STATUS:
7797 *outval = j->last_exit_status;
7798 break;
7799 case VPROC_GSK_MGR_UID:
7800 *outval = getuid();
7801 break;
7802 case VPROC_GSK_MGR_PID:
7803 *outval = getpid();
7804 break;
7805 case VPROC_GSK_IS_MANAGED:
7806 *outval = j->anonymous ? 0 : 1;
7807 break;
7808 case VPROC_GSK_BASIC_KEEPALIVE:
7809 *outval = !j->ondemand;
7810 break;
7811 case VPROC_GSK_START_INTERVAL:
7812 *outval = j->start_interval;
7813 break;
7814 case VPROC_GSK_IDLE_TIMEOUT:
7815 *outval = j->timeout;
7816 break;
7817 case VPROC_GSK_EXIT_TIMEOUT:
7818 *outval = j->exit_timeout;
7819 break;
7820 case VPROC_GSK_GLOBAL_LOG_MASK:
7821 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7822 *outval = oldmask;
7823 runtime_setlogmask(oldmask);
7824 break;
7825 case VPROC_GSK_GLOBAL_UMASK:
7826 oldmask = umask(0);
7827 *outval = oldmask;
7828 umask(oldmask);
7829 break;
7830 case VPROC_GSK_TRANSACTIONS_ENABLED:
7831 job_log(j, LOG_DEBUG, "Reading EnableTransactions value.");
7832 *outval = j->enable_transactions;
7833 break;
7834 case VPROC_GSK_WAITFORDEBUGGER:
7835 *outval = j->wait4debugger;
7836 break;
7837 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
7838 *outval = j->embedded_god;
7839 break;
7840 case VPROC_GSK_ZERO:
7841 *outval = 0;
7842 break;
7843 default:
7844 kr = 1;
7845 break;
7846 }
7847
7848 switch (inkey) {
7849 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7850 j->abandon_pg = (bool)inval;
7851 break;
7852 case VPROC_GSK_GLOBAL_ON_DEMAND:
7853 job_log(j, LOG_DEBUG, "Job has set global on-demand mode to: %s", inval ? "true" : "false");
7854 kr = job_set_global_on_demand(j, inval);
7855 break;
7856 case VPROC_GSK_BASIC_KEEPALIVE:
7857 j->ondemand = !inval;
7858 break;
7859 case VPROC_GSK_START_INTERVAL:
7860 if (inval > UINT32_MAX || inval < 0) {
7861 kr = 1;
7862 } else if (inval) {
7863 if (j->start_interval == 0) {
7864 runtime_add_weak_ref();
7865 }
7866 j->start_interval = (typeof(j->start_interval)) inval;
7867 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
7868 } else if (j->start_interval) {
7869 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
7870 if (j->start_interval != 0) {
7871 runtime_del_weak_ref();
7872 }
7873 j->start_interval = 0;
7874 }
7875 break;
7876 case VPROC_GSK_IDLE_TIMEOUT:
7877 if (inval < 0 || inval > UINT32_MAX) {
7878 kr = 1;
7879 } else {
7880 j->timeout = (typeof(j->timeout)) inval;
7881 }
7882 break;
7883 case VPROC_GSK_EXIT_TIMEOUT:
7884 if (inval < 0 || inval > UINT32_MAX) {
7885 kr = 1;
7886 } else {
7887 j->exit_timeout = (typeof(j->exit_timeout)) inval;
7888 }
7889 break;
7890 case VPROC_GSK_GLOBAL_LOG_MASK:
7891 if (inval < 0 || inval > UINT32_MAX) {
7892 kr = 1;
7893 } else {
7894 runtime_setlogmask((int) inval);
7895 }
7896 break;
7897 case VPROC_GSK_GLOBAL_UMASK:
7898 __OSX_COMPILETIME_ASSERT__(sizeof (mode_t) == 2);
7899 if (inval < 0 || inval > UINT16_MAX) {
7900 kr = 1;
7901 } else {
7902 #if HAVE_SANDBOX
7903 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7904 kr = 1;
7905 } else {
7906 umask((mode_t) inval);
7907 }
7908 #endif
7909 }
7910 break;
7911 case VPROC_GSK_TRANSACTIONS_ENABLED:
7912 /* No-op. */
7913 break;
7914 case VPROC_GSK_WEIRD_BOOTSTRAP:
7915 if (job_assumes(j, j->weird_bootstrap)) {
7916 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
7917
7918 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_job_subsystem);
7919
7920 if (job_mig_job_subsystem.maxsize > mxmsgsz) {
7921 mxmsgsz = job_mig_job_subsystem.maxsize;
7922 }
7923
7924 (void)job_assumes_zero(j, runtime_add_mport(j->mgr->jm_port, job_server));
7925 j->weird_bootstrap = false;
7926 }
7927 break;
7928 case VPROC_GSK_WAITFORDEBUGGER:
7929 j->wait4debugger_oneshot = inval;
7930 break;
7931 case VPROC_GSK_PERUSER_SUSPEND:
7932 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
7933 mach_port_t junk = MACH_PORT_NULL;
7934 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
7935 if (job_assumes(j, jpu != NULL)) {
7936 struct suspended_peruser *spi = NULL;
7937 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
7938 if ((int64_t)(spi->j->mach_uid) == inval) {
7939 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
7940 break;
7941 }
7942 }
7943
7944 if (spi == NULL) {
7945 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
7946 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
7947 if (job_assumes(j, spi != NULL)) {
7948 /* Stop listening for events.
7949 *
7950 * See <rdar://problem/9014146>.
7951 */
7952 if (jpu->peruser_suspend_count == 0) {
7953 job_ignore(jpu);
7954 }
7955
7956 spi->j = jpu;
7957 spi->j->peruser_suspend_count++;
7958 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
7959 job_stop(spi->j);
7960 *outval = jpu->p;
7961 } else {
7962 kr = BOOTSTRAP_NO_MEMORY;
7963 }
7964 }
7965 }
7966 } else {
7967 kr = 1;
7968 }
7969 break;
7970 case VPROC_GSK_PERUSER_RESUME:
7971 if (job_assumes(j, pid1_magic == true)) {
7972 struct suspended_peruser *spi = NULL, *spt = NULL;
7973 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
7974 if ((int64_t)(spi->j->mach_uid) == inval) {
7975 spi->j->peruser_suspend_count--;
7976 LIST_REMOVE(spi, sle);
7977 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
7978 break;
7979 }
7980 }
7981
7982 if (!job_assumes(j, spi != NULL)) {
7983 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
7984 kr = BOOTSTRAP_NOT_PRIVILEGED;
7985 } else if (spi->j->peruser_suspend_count == 0) {
7986 job_watch(spi->j);
7987 job_dispatch(spi->j, false);
7988 free(spi);
7989 }
7990 } else {
7991 kr = 1;
7992 }
7993 break;
7994 case VPROC_GSK_ZERO:
7995 break;
7996 default:
7997 kr = 1;
7998 break;
7999 }
8000
8001 return kr;
8002 }
8003
8004 kern_return_t
8005 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
8006 {
8007 struct machservice *ms;
8008
8009 if (!j) {
8010 return BOOTSTRAP_NO_MEMORY;
8011 }
8012
8013 job_log(j, LOG_DEBUG, "Post fork ping.");
8014
8015 job_setup_exception_port(j, child_task);
8016
8017 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8018 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8019 // The TASK_ACCESS_PORT funny business is to workaround 5325399.
8020 continue;
8021 }
8022
8023 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8024 if (errno) {
8025 if (errno == MACH_SEND_INVALID_DEST) {
8026 job_log(j, LOG_WARNING, "Task died before special ports could be set.");
8027 break;
8028 }
8029
8030 int desired_log_level = LOG_ERR;
8031 if (j->anonymous) {
8032 // 5338127
8033
8034 desired_log_level = LOG_WARNING;
8035
8036 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8037 desired_log_level = LOG_DEBUG;
8038 }
8039 }
8040
8041 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8042 }
8043 }
8044
8045 /* MIG will not zero-initialize this pointer, so we must always do so. See
8046 * <rdar://problem/8562593>.
8047 */
8048 *asport = MACH_PORT_NULL;
8049 #if !TARGET_OS_EMBEDDED
8050 if (!j->anonymous) {
8051 /* XPC services will spawn into the root security session by default.
8052 * xpcproxy will switch them away if needed.
8053 */
8054 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8055 job_log(j, LOG_DEBUG, "Returning session port: 0x%x", j->asport);
8056 *asport = j->asport;
8057 }
8058 }
8059 #endif
8060 (void)job_assumes_zero(j, launchd_mport_deallocate(child_task));
8061
8062 return 0;
8063 }
8064
8065 kern_return_t
8066 job_mig_reboot2(job_t j, uint64_t flags)
8067 {
8068 char who_started_the_reboot[2048] = "";
8069 struct proc_bsdshortinfo proc;
8070 struct ldcred *ldc = runtime_get_caller_creds();
8071 pid_t pid_to_log;
8072
8073 if (!j) {
8074 return BOOTSTRAP_NO_MEMORY;
8075 }
8076
8077 if (unlikely(!pid1_magic)) {
8078 return BOOTSTRAP_NOT_PRIVILEGED;
8079 }
8080
8081 #if !TARGET_OS_EMBEDDED
8082 if (unlikely(ldc->euid)) {
8083 #else
8084 if (unlikely(ldc->euid) && !j->embedded_god) {
8085 #endif
8086 return BOOTSTRAP_NOT_PRIVILEGED;
8087 }
8088
8089 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8090 size_t who_offset;
8091 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8092 if (errno != ESRCH) {
8093 (void)job_assumes_zero(j, errno);
8094 }
8095 return 1;
8096 }
8097
8098 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8099 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8100 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8101 break;
8102 }
8103
8104 who_offset = strlen(who_started_the_reboot);
8105 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8106 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8107 }
8108
8109 root_jobmgr->reboot_flags = (int)flags;
8110 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8111 launchd_shutdown();
8112
8113 return 0;
8114 }
8115
8116 kern_return_t
8117 job_mig_getsocket(job_t j, name_t spr)
8118 {
8119 if (!j) {
8120 return BOOTSTRAP_NO_MEMORY;
8121 }
8122
8123 if (j->deny_job_creation) {
8124 return BOOTSTRAP_NOT_PRIVILEGED;
8125 }
8126
8127 #if HAVE_SANDBOX
8128 struct ldcred *ldc = runtime_get_caller_creds();
8129 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8130 return BOOTSTRAP_NOT_PRIVILEGED;
8131 }
8132 #endif
8133
8134 ipc_server_init();
8135
8136 if (unlikely(!sockpath)) {
8137 return BOOTSTRAP_NO_MEMORY;
8138 }
8139
8140 strncpy(spr, sockpath, sizeof(name_t));
8141
8142 return BOOTSTRAP_SUCCESS;
8143 }
8144
8145 kern_return_t
8146 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8147 {
8148 if (!j) {
8149 return BOOTSTRAP_NO_MEMORY;
8150 }
8151
8152 if ((errno = err)) {
8153 job_log_error(j, pri, "%s", msg);
8154 } else {
8155 job_log(j, pri, "%s", msg);
8156 }
8157
8158 return 0;
8159 }
8160
8161 void
8162 job_setup_per_user_directory(job_t j, uid_t uid, const char *path)
8163 {
8164 struct stat sb;
8165
8166 bool created = false;
8167 int r = stat(path, &sb);
8168 if ((r == -1 && errno == ENOENT) || (r == 0 && !S_ISDIR(sb.st_mode))) {
8169 if (r == 0) {
8170 job_log(j, LOG_NOTICE, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path);
8171
8172 char old[PATH_MAX];
8173 snprintf(old, sizeof(old), "%s.movedaside", path);
8174 (void)job_assumes_zero_p(j, rename(path, old));
8175 }
8176
8177 (void)job_assumes_zero_p(j, mkdir(path, S_IRWXU));
8178 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8179 created = true;
8180 }
8181
8182 if (!created) {
8183 if (sb.st_uid != uid) {
8184 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper user ownership. Repairing: %s", path);
8185 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8186 }
8187 if (sb.st_gid != 0) {
8188 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper group ownership. Repairing: %s", path);
8189 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8190 }
8191 if (sb.st_mode != (S_IRWXU | S_IFDIR)) {
8192 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper mode. Repairing: %s", path);
8193 (void)job_assumes_zero_p(j, chmod(path, S_IRWXU));
8194 }
8195 }
8196 }
8197
8198 void
8199 job_setup_per_user_directories(job_t j, uid_t uid, const char *label)
8200 {
8201 char path[PATH_MAX];
8202
8203 (void)snprintf(path, sizeof(path), LAUNCHD_DB_PREFIX "/%s", label);
8204 job_setup_per_user_directory(j, uid, path);
8205
8206 (void)snprintf(path, sizeof(path), LAUNCHD_LOG_PREFIX "/%s", label);
8207 job_setup_per_user_directory(j, uid, path);
8208 }
8209
8210 job_t
8211 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8212 {
8213 job_t ji = NULL;
8214 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8215 if (!ji->per_user) {
8216 continue;
8217 }
8218 if (ji->mach_uid != which_user) {
8219 continue;
8220 }
8221 if (SLIST_EMPTY(&ji->machservices)) {
8222 continue;
8223 }
8224 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8225 continue;
8226 }
8227 break;
8228 }
8229
8230 if (unlikely(ji == NULL)) {
8231 struct machservice *ms;
8232 char lbuf[1024];
8233
8234 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8235
8236 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8237
8238 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8239
8240 if (ji != NULL) {
8241 auditinfo_addr_t auinfo = {
8242 .ai_termid = {
8243 .at_type = AU_IPv4
8244 },
8245 .ai_auid = which_user,
8246 .ai_asid = AU_ASSIGN_ASID,
8247 };
8248
8249 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8250 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8251 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8252
8253 /* Kinda lame that we have to do this, but we can't create an
8254 * audit session without joining it.
8255 */
8256 (void)job_assumes(ji, audit_session_join(launchd_audit_port));
8257 ji->asid = auinfo.ai_asid;
8258 } else {
8259 job_log(ji, LOG_WARNING, "Could not set audit session!");
8260 job_remove(ji);
8261 return NULL;
8262 }
8263
8264 ji->mach_uid = which_user;
8265 ji->per_user = true;
8266 ji->enable_transactions = true;
8267 job_setup_per_user_directories(ji, which_user, lbuf);
8268
8269 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8270 job_remove(ji);
8271 ji = NULL;
8272 } else {
8273 ms->upfront = true;
8274 ms->per_user_hack = true;
8275 ms->hide = true;
8276
8277 ji = job_dispatch(ji, false);
8278 }
8279 }
8280 } else {
8281 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
8282 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8283 }
8284
8285 return ji;
8286 }
8287
8288 kern_return_t
8289 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8290 {
8291 struct ldcred *ldc = runtime_get_caller_creds();
8292 job_t jpu;
8293
8294 if (!j) {
8295 return BOOTSTRAP_NO_MEMORY;
8296 }
8297
8298 if (launchd_osinstaller) {
8299 return BOOTSTRAP_UNKNOWN_SERVICE;
8300 }
8301
8302 #if TARGET_OS_EMBEDDED
8303 // There is no need for per-user launchd's on embedded.
8304 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8305 return BOOTSTRAP_UNKNOWN_SERVICE;
8306 #endif
8307
8308 #if HAVE_SANDBOX
8309 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8310 return BOOTSTRAP_NOT_PRIVILEGED;
8311 }
8312 #endif
8313
8314 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8315
8316 if (unlikely(!pid1_magic)) {
8317 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8318 return BOOTSTRAP_NOT_PRIVILEGED;
8319 }
8320
8321 if (ldc->euid || ldc->uid) {
8322 which_user = ldc->euid ?: ldc->uid;
8323 }
8324
8325 *up_cont = MACH_PORT_NULL;
8326
8327 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8328
8329 return 0;
8330 }
8331
8332 kern_return_t
8333 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8334 {
8335 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8336 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8337 struct ldcred *ldc = runtime_get_caller_creds();
8338 struct machservice *ms = NULL;
8339 job_t jo;
8340
8341 if (!j) {
8342 return BOOTSTRAP_NO_MEMORY;
8343 }
8344
8345 if (j->dedicated_instance) {
8346 struct machservice *msi = NULL;
8347 SLIST_FOREACH(msi, &j->machservices, sle) {
8348 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8349 uuid_copy(instance_id, j->instance_id);
8350 ms = msi;
8351 break;
8352 }
8353 }
8354 } else {
8355 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8356 }
8357
8358 if (strict) {
8359 if (likely(ms != NULL)) {
8360 if (ms->job != j) {
8361 return BOOTSTRAP_NOT_PRIVILEGED;
8362 } else if (ms->isActive) {
8363 return BOOTSTRAP_SERVICE_ACTIVE;
8364 }
8365 } else {
8366 return BOOTSTRAP_UNKNOWN_SERVICE;
8367 }
8368 } else if (ms == NULL) {
8369 if (job_assumes(j, !j->dedicated_instance)) {
8370 *serviceportp = MACH_PORT_NULL;
8371
8372 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8373 return BOOTSTRAP_NO_MEMORY;
8374 }
8375
8376 // Treat this like a legacy job.
8377 if (!j->legacy_mach_job) {
8378 ms->isActive = true;
8379 ms->recv = false;
8380 }
8381
8382 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8383 job_log(j, LOG_APPLEONLY, "Please add the following service to the configuration file for this job: %s", servicename);
8384 }
8385 } else {
8386 return BOOTSTRAP_UNKNOWN_SERVICE;
8387 }
8388 } else {
8389 if (unlikely((jo = machservice_job(ms)) != j)) {
8390 static pid_t last_warned_pid;
8391
8392 if (last_warned_pid != ldc->pid) {
8393 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8394 last_warned_pid = ldc->pid;
8395 }
8396
8397 return BOOTSTRAP_NOT_PRIVILEGED;
8398 }
8399 if (unlikely(machservice_active(ms))) {
8400 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8401 return BOOTSTRAP_SERVICE_ACTIVE;
8402 }
8403 }
8404
8405 job_checkin(j);
8406 machservice_request_notifications(ms);
8407
8408 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8409
8410 *serviceportp = machservice_port(ms);
8411 return BOOTSTRAP_SUCCESS;
8412 }
8413
8414 kern_return_t
8415 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8416 {
8417 struct machservice *ms;
8418 struct ldcred *ldc = runtime_get_caller_creds();
8419
8420 if (!j) {
8421 return BOOTSTRAP_NO_MEMORY;
8422 }
8423
8424 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
8425 job_log(j, LOG_APPLEONLY, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8426 }
8427
8428 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8429
8430 // 5641783 for the embedded hack
8431 #if !TARGET_OS_EMBEDDED
8432 /*
8433 * From a per-user/session launchd's perspective, SecurityAgent (UID
8434 * 92) is a rogue application (not our UID, not root and not a child of
8435 * us). We'll have to reconcile this design friction at a later date.
8436 */
8437 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8438 if (pid1_magic) {
8439 return VPROC_ERR_TRY_PER_USER;
8440 } else {
8441 return BOOTSTRAP_NOT_PRIVILEGED;
8442 }
8443 }
8444 #endif
8445
8446 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
8447
8448 if (unlikely(ms)) {
8449 if (machservice_job(ms) != j) {
8450 return BOOTSTRAP_NOT_PRIVILEGED;
8451 }
8452 if (machservice_active(ms)) {
8453 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
8454 return BOOTSTRAP_SERVICE_ACTIVE;
8455 }
8456 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
8457 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
8458 return BOOTSTRAP_NOT_PRIVILEGED;
8459 }
8460 job_checkin(j);
8461 machservice_delete(j, ms, false);
8462 }
8463
8464 if (likely(serviceport != MACH_PORT_NULL)) {
8465 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
8466 machservice_request_notifications(ms);
8467 } else {
8468 return BOOTSTRAP_NO_MEMORY;
8469 }
8470 }
8471
8472
8473 return BOOTSTRAP_SUCCESS;
8474 }
8475
8476 kern_return_t
8477 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
8478 {
8479 struct machservice *ms = NULL;
8480 struct ldcred *ldc = runtime_get_caller_creds();
8481 kern_return_t kr;
8482 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
8483 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
8484 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
8485 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
8486
8487 if (!j) {
8488 return BOOTSTRAP_NO_MEMORY;
8489 }
8490
8491 bool xpc_req = (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN);
8492
8493 // 5641783 for the embedded hack
8494 #if !TARGET_OS_EMBEDDED
8495 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
8496 return VPROC_ERR_TRY_PER_USER;
8497 }
8498 #endif
8499
8500 #if HAVE_SANDBOX
8501 /* We don't do sandbox checking for XPC domains because, by definition, all
8502 * the services within your domain should be accessible to you.
8503 */
8504 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8505 return BOOTSTRAP_NOT_PRIVILEGED;
8506 }
8507 #endif
8508
8509 if (per_pid_lookup) {
8510 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
8511 } else {
8512 if (xpc_req) {
8513 // Requests from XPC domains stay local.
8514 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
8515 } else {
8516 /* A strict lookup which is privileged won't even bother trying to
8517 * find a service if we're not hosting the root Mach bootstrap.
8518 */
8519 if (strict_lookup && privileged) {
8520 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8521 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8522 }
8523 } else {
8524 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8525 }
8526 }
8527 }
8528
8529 if (likely(ms)) {
8530 ms = ms->alias ? ms->alias : ms;
8531 if (unlikely(specific_instance && ms->job->multiple_instances)) {
8532 job_t ji = NULL;
8533 job_t instance = NULL;
8534 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
8535 if (uuid_compare(instance_id, ji->instance_id) == 0) {
8536 instance = ji;
8537 break;
8538 }
8539 }
8540
8541 if (unlikely(instance == NULL)) {
8542 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
8543 instance = job_new_subjob(ms->job, instance_id);
8544 if (job_assumes(j, instance != NULL)) {
8545 /* Disable this support for now. We only support having
8546 * multi-instance jobs within private XPC domains.
8547 */
8548 #if 0
8549 /* If the job is multi-instance, in a singleton XPC domain
8550 * and the request is not coming from within that singleton
8551 * domain, we need to alias the new job into the requesting
8552 * domain.
8553 */
8554 if (!j->mgr->xpc_singleton && xpc_req) {
8555 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
8556 }
8557 #endif
8558 job_dispatch(instance, false);
8559 }
8560 }
8561
8562 ms = NULL;
8563 if (job_assumes(j, instance != NULL)) {
8564 struct machservice *msi = NULL;
8565 SLIST_FOREACH(msi, &instance->machservices, sle) {
8566 /* sizeof(servicename) will return the size of a pointer,
8567 * even though it's an array type, because when passing
8568 * arrays as parameters in C, they implicitly degrade to
8569 * pointers.
8570 */
8571 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8572 ms = msi;
8573 break;
8574 }
8575 }
8576 }
8577 } else {
8578 if (machservice_hidden(ms) && !machservice_active(ms)) {
8579 ms = NULL;
8580 } else if (unlikely(ms->per_user_hack)) {
8581 ms = NULL;
8582 }
8583 }
8584 }
8585
8586 if (likely(ms)) {
8587 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
8588 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
8589 *serviceportp = machservice_port(ms);
8590
8591 kr = BOOTSTRAP_SUCCESS;
8592 } else if (strict_lookup && !privileged) {
8593 /* Hack: We need to simulate XPC's desire not to establish a hierarchy.
8594 * So if XPC is doing the lookup, and it's not a privileged lookup, we
8595 * won't forward. But if it is a privileged lookup, then we must
8596 * forward.
8597 */
8598 return BOOTSTRAP_UNKNOWN_SERVICE;
8599 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
8600 // Requests from within an XPC domain don't get forwarded.
8601 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
8602 /* Clients potentially check the audit token of the reply to verify that
8603 * the returned send right is trustworthy.
8604 */
8605 (void)job_assumes_zero(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags));
8606 return MIG_NO_REPLY;
8607 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8608 /* 5240036 Should start background session when a lookup of CCacheServer
8609 * occurs
8610 *
8611 * This is a total hack. We sniff out loginwindow session, and attempt
8612 * to guess what it is up to. If we find a EUID that isn't root, we
8613 * force it over to the per-user context.
8614 */
8615 return VPROC_ERR_TRY_PER_USER;
8616 } else {
8617 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
8618 kr = BOOTSTRAP_UNKNOWN_SERVICE;
8619 }
8620
8621 return kr;
8622 }
8623
8624 kern_return_t
8625 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
8626 {
8627 if (!j) {
8628 return BOOTSTRAP_NO_MEMORY;
8629 }
8630
8631 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
8632 jobmgr_t jm = j->mgr;
8633
8634 if (jobmgr_parent(jm)) {
8635 *parentport = jobmgr_parent(jm)->jm_port;
8636 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
8637 *parentport = jm->jm_port;
8638 } else {
8639 (void)job_assumes_zero(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp));
8640 // The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
8641 return MIG_NO_REPLY;
8642 }
8643 return BOOTSTRAP_SUCCESS;
8644 }
8645
8646 kern_return_t
8647 job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
8648 {
8649 if (!j) {
8650 return BOOTSTRAP_NO_MEMORY;
8651 }
8652
8653 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8654 *rootbsp = root_jobmgr->jm_port;
8655 (void)job_assumes_zero(j, launchd_mport_make_send(root_jobmgr->jm_port));
8656 } else {
8657 *rootbsp = inherited_bootstrap_port;
8658 (void)job_assumes_zero(j, launchd_mport_copy_send(inherited_bootstrap_port));
8659 }
8660
8661 return BOOTSTRAP_SUCCESS;
8662 }
8663
8664 kern_return_t
8665 job_mig_info(job_t j, name_array_t *servicenamesp,
8666 unsigned int *servicenames_cnt, name_array_t *servicejobsp,
8667 unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp,
8668 unsigned int *serviceactives_cnt, uint64_t flags)
8669 {
8670 name_array_t service_names = NULL;
8671 name_array_t service_jobs = NULL;
8672 bootstrap_status_array_t service_actives = NULL;
8673 unsigned int cnt = 0, cnt2 = 0;
8674 jobmgr_t jm;
8675
8676 if (!j) {
8677 return BOOTSTRAP_NO_MEMORY;
8678 }
8679
8680 if (launchd_flat_mach_namespace) {
8681 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
8682 jm = j->mgr;
8683 } else {
8684 jm = root_jobmgr;
8685 }
8686 } else {
8687 jm = j->mgr;
8688 }
8689
8690 unsigned int i = 0;
8691 struct machservice *msi = NULL;
8692 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8693 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8694 cnt += !msi->per_pid ? 1 : 0;
8695 }
8696 }
8697
8698 if (cnt == 0) {
8699 goto out;
8700 }
8701
8702 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
8703 if (!job_assumes(j, service_names != NULL)) {
8704 goto out_bad;
8705 }
8706
8707 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
8708 if (!job_assumes(j, service_jobs != NULL)) {
8709 goto out_bad;
8710 }
8711
8712 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
8713 if (!job_assumes(j, service_actives != NULL)) {
8714 goto out_bad;
8715 }
8716
8717 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8718 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8719 if (!msi->per_pid) {
8720 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
8721 msi = msi->alias ? msi->alias : msi;
8722 if (msi->job->mgr->shortdesc) {
8723 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
8724 } else {
8725 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
8726 }
8727 service_actives[cnt2] = machservice_status(msi);
8728 cnt2++;
8729 }
8730 }
8731 }
8732
8733 (void)job_assumes(j, cnt == cnt2);
8734
8735 out:
8736 *servicenamesp = service_names;
8737 *servicejobsp = service_jobs;
8738 *serviceactivesp = service_actives;
8739 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
8740
8741 return BOOTSTRAP_SUCCESS;
8742
8743 out_bad:
8744 if (service_names) {
8745 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
8746 }
8747 if (service_jobs) {
8748 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
8749 }
8750 if (service_actives) {
8751 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
8752 }
8753
8754 return BOOTSTRAP_NO_MEMORY;
8755 }
8756
8757 kern_return_t
8758 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports,
8759 mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names,
8760 mach_msg_type_number_t *child_names_cnt,
8761 bootstrap_property_array_t *child_properties,
8762 mach_msg_type_number_t *child_properties_cnt)
8763 {
8764 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
8765 if (!j) {
8766 return BOOTSTRAP_NO_MEMORY;
8767 }
8768
8769 struct ldcred *ldc = runtime_get_caller_creds();
8770
8771 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8772 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8773 * in a non-flat namespace.
8774 */
8775 if (ldc->euid != 0) {
8776 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
8777 return BOOTSTRAP_NOT_PRIVILEGED;
8778 }
8779
8780 unsigned int cnt = 0;
8781
8782 jobmgr_t jmr = j->mgr;
8783 jobmgr_t jmi = NULL;
8784 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8785 cnt++;
8786 }
8787
8788 // Find our per-user launchds if we're PID 1.
8789 job_t ji = NULL;
8790 if (pid1_magic) {
8791 LIST_FOREACH(ji, &jmr->jobs, sle) {
8792 cnt += ji->per_user ? 1 : 0;
8793 }
8794 }
8795
8796 if (cnt == 0) {
8797 return BOOTSTRAP_NO_CHILDREN;
8798 }
8799
8800 mach_port_array_t _child_ports = NULL;
8801 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
8802 if (!job_assumes(j, _child_ports != NULL)) {
8803 kr = BOOTSTRAP_NO_MEMORY;
8804 goto out_bad;
8805 }
8806
8807 name_array_t _child_names = NULL;
8808 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
8809 if (!job_assumes(j, _child_names != NULL)) {
8810 kr = BOOTSTRAP_NO_MEMORY;
8811 goto out_bad;
8812 }
8813
8814 bootstrap_property_array_t _child_properties = NULL;
8815 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
8816 if (!job_assumes(j, _child_properties != NULL)) {
8817 kr = BOOTSTRAP_NO_MEMORY;
8818 goto out_bad;
8819 }
8820
8821 unsigned int cnt2 = 0;
8822 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8823 if (jobmgr_assumes_zero(jmi, launchd_mport_make_send(jmi->jm_port)) == KERN_SUCCESS) {
8824 _child_ports[cnt2] = jmi->jm_port;
8825 } else {
8826 _child_ports[cnt2] = MACH_PORT_NULL;
8827 }
8828
8829 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
8830 _child_properties[cnt2] = jmi->properties;
8831
8832 cnt2++;
8833 }
8834
8835 if (pid1_magic) LIST_FOREACH(ji, &jmr->jobs, sle) {
8836 if (ji->per_user) {
8837 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
8838 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
8839
8840 if (job_assumes_zero(ji, launchd_mport_copy_send(port)) == KERN_SUCCESS) {
8841 _child_ports[cnt2] = port;
8842 } else {
8843 _child_ports[cnt2] = MACH_PORT_NULL;
8844 }
8845 } else {
8846 _child_ports[cnt2] = MACH_PORT_NULL;
8847 }
8848
8849 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
8850 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
8851
8852 cnt2++;
8853 }
8854 }
8855
8856 *child_names_cnt = cnt;
8857 *child_ports_cnt = cnt;
8858 *child_properties_cnt = cnt;
8859
8860 *child_names = _child_names;
8861 *child_ports = _child_ports;
8862 *child_properties = _child_properties;
8863
8864 unsigned int i = 0;
8865 for (i = 0; i < cnt; i++) {
8866 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
8867 }
8868
8869 return BOOTSTRAP_SUCCESS;
8870 out_bad:
8871 if (_child_ports) {
8872 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
8873 }
8874
8875 if (_child_names) {
8876 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
8877 }
8878
8879 if (_child_properties) {
8880 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
8881 }
8882
8883 return kr;
8884 }
8885
8886 kern_return_t
8887 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
8888 {
8889 struct ldcred *ldc = runtime_get_caller_creds();
8890 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
8891 return BOOTSTRAP_NOT_PRIVILEGED;
8892 }
8893
8894 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
8895 * directly by launchd as agents.
8896 */
8897 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
8898 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
8899 *managed = true;
8900 }
8901
8902 return BOOTSTRAP_SUCCESS;
8903 }
8904
8905 kern_return_t
8906 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
8907 {
8908 if (!j) {
8909 return BOOTSTRAP_NO_MEMORY;
8910 }
8911
8912 struct ldcred *ldc = runtime_get_caller_creds();
8913 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
8914
8915 #if HAVE_SANDBOX
8916 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8917 return BOOTSTRAP_NOT_PRIVILEGED;
8918 }
8919 #endif
8920
8921 mach_port_t _mp = MACH_PORT_NULL;
8922 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
8923 job_t target_j = job_find(NULL, label);
8924 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
8925 if (target_j->j_port == MACH_PORT_NULL) {
8926 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
8927 }
8928
8929 _mp = target_j->j_port;
8930 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
8931 } else {
8932 kr = BOOTSTRAP_NO_MEMORY;
8933 }
8934 }
8935
8936 *mp = _mp;
8937 return kr;
8938 }
8939
8940 kern_return_t
8941 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
8942 {
8943 #if TARGET_OS_EMBEDDED
8944 return KERN_SUCCESS;
8945 #endif
8946
8947 if (!j) {
8948 return BOOTSTRAP_NO_MEMORY;
8949 }
8950
8951 uuid_string_t uuid_str;
8952 uuid_unparse(uuid, uuid_str);
8953 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
8954
8955 job_t ji = NULL, jt = NULL;
8956 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
8957 uuid_string_t uuid_str2;
8958 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
8959
8960 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
8961 uuid_clear(ji->expected_audit_uuid);
8962 if (asport != MACH_PORT_NULL) {
8963 job_log(ji, LOG_DEBUG, "Job should join session with port 0x%x", asport);
8964 (void)job_assumes_zero(j, launchd_mport_copy_send(asport));
8965 } else {
8966 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
8967 }
8968
8969 ji->asport = asport;
8970 LIST_REMOVE(ji, needing_session_sle);
8971 job_dispatch(ji, false);
8972 }
8973 }
8974
8975 /* Each job that the session port was set for holds a reference. At the end of
8976 * the loop, there will be one extra reference belonging to this MiG protocol.
8977 * We need to release it so that the session goes away when all the jobs
8978 * referencing it are unloaded.
8979 */
8980 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
8981
8982 return KERN_SUCCESS;
8983 }
8984
8985 jobmgr_t
8986 jobmgr_find_by_name(jobmgr_t jm, const char *where)
8987 {
8988 jobmgr_t jmi, jmi2;
8989
8990 // NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
8991 if (where == NULL) {
8992 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8993 where = VPROCMGR_SESSION_LOGINWINDOW;
8994 } else {
8995 where = VPROCMGR_SESSION_AQUA;
8996 }
8997 }
8998
8999 if (strcasecmp(jm->name, where) == 0) {
9000 return jm;
9001 }
9002
9003 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
9004 jmi = root_jobmgr;
9005 goto jm_found;
9006 }
9007
9008 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9009 if (unlikely(jmi->shutting_down)) {
9010 continue;
9011 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9012 continue;
9013 } else if (strcasecmp(jmi->name, where) == 0) {
9014 goto jm_found;
9015 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
9016 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9017 if (strcasecmp(jmi2->name, where) == 0) {
9018 jmi = jmi2;
9019 goto jm_found;
9020 }
9021 }
9022 }
9023 }
9024
9025 jm_found:
9026 return jmi;
9027 }
9028
9029 kern_return_t
9030 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
9031 {
9032 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9033 mach_port_array_t l2l_ports = NULL;
9034 mach_port_t reqport, rcvright;
9035 kern_return_t kr = 1;
9036 launch_data_t out_obj_array = NULL;
9037 struct ldcred *ldc = runtime_get_caller_creds();
9038 jobmgr_t jmr = NULL;
9039
9040 if (!j) {
9041 return BOOTSTRAP_NO_MEMORY;
9042 }
9043
9044 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
9045 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9046
9047 kr = BOOTSTRAP_NOT_PRIVILEGED;
9048 goto out;
9049 }
9050
9051 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9052
9053 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
9054 if (job_assumes_zero(j, kr) != 0) {
9055 goto out;
9056 }
9057
9058 if (launch_data_array_get_count(out_obj_array) != l2l_port_cnt) {
9059 osx_assert_zero(l2l_port_cnt);
9060 }
9061
9062 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
9063 kr = BOOTSTRAP_NO_MEMORY;
9064 goto out;
9065 }
9066
9067 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9068
9069 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9070 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9071 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9072 */
9073 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9074 // This is so awful.
9075 // Remove the job from its current job manager.
9076 LIST_REMOVE(j, sle);
9077 LIST_REMOVE(j, pid_hash_sle);
9078
9079 // Put the job into the target job manager.
9080 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9081 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9082
9083 j->mgr = jmr;
9084 job_set_global_on_demand(j, true);
9085
9086 if (!j->holds_ref) {
9087 job_log(j, LOG_PERF, "Job moved subset into: %s", j->mgr->name);
9088 j->holds_ref = true;
9089 runtime_add_ref();
9090 }
9091 }
9092
9093 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9094 launch_data_t tmp, obj_at_idx;
9095 struct machservice *ms;
9096 job_t j_for_service;
9097 const char *serv_name;
9098 pid_t target_pid;
9099 bool serv_perpid;
9100
9101 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9102 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9103 target_pid = (pid_t)launch_data_get_integer(tmp);
9104 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9105 serv_perpid = launch_data_get_bool(tmp);
9106 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9107 serv_name = launch_data_get_string(tmp);
9108
9109 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9110
9111 if (unlikely(!j_for_service)) {
9112 // The PID probably exited
9113 (void)job_assumes_zero(j, launchd_mport_deallocate(l2l_ports[l2l_i]));
9114 continue;
9115 }
9116
9117 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9118 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9119 machservice_request_notifications(ms);
9120 }
9121 }
9122
9123 kr = 0;
9124
9125 out:
9126 if (out_obj_array) {
9127 launch_data_free(out_obj_array);
9128 }
9129
9130 if (l2l_ports) {
9131 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9132 }
9133
9134 if (kr == 0) {
9135 if (target_subset) {
9136 (void)job_assumes_zero(j, launchd_mport_deallocate(target_subset));
9137 }
9138 if (asport) {
9139 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9140 }
9141 } else if (jmr) {
9142 jobmgr_shutdown(jmr);
9143 }
9144
9145 return kr;
9146 }
9147
9148 kern_return_t
9149 job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9150 {
9151 if (!j) {
9152 return BOOTSTRAP_NO_MEMORY;
9153 }
9154
9155 job_t j2;
9156
9157 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9158 if (j->mgr->session_initialized) {
9159 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9160 kr = BOOTSTRAP_NOT_PRIVILEGED;
9161 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9162 jobmgr_t jmi;
9163
9164 /*
9165 * 5330262
9166 *
9167 * We're working around LoginWindow and the WindowServer.
9168 *
9169 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9170 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9171 * spawns a replacement loginwindow session before cleaning up the previous one.
9172 *
9173 * We're going to use the creation of a new LoginWindow context as a clue that the
9174 * previous LoginWindow context is on the way out and therefore we should just
9175 * kick-start the shutdown of it.
9176 */
9177
9178 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9179 if (unlikely(jmi->shutting_down)) {
9180 continue;
9181 } else if (strcasecmp(jmi->name, session_type) == 0) {
9182 jobmgr_shutdown(jmi);
9183 break;
9184 }
9185 }
9186 }
9187
9188 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9189 strcpy(j->mgr->name_init, session_type);
9190
9191 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9192 j2->asport = asport;
9193 (void)job_assumes(j, job_dispatch(j2, true));
9194 kr = BOOTSTRAP_SUCCESS;
9195 }
9196
9197 return kr;
9198 }
9199
9200 kern_return_t
9201 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9202 {
9203 struct ldcred *ldc = runtime_get_caller_creds();
9204 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9205 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9206 return BOOTSTRAP_NO_MEMORY;
9207 }
9208
9209 if (j->mgr->shutting_down) {
9210 return BOOTSTRAP_UNKNOWN_SERVICE;
9211 }
9212
9213 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9214
9215 if (!job_assumes(j, pid1_magic == false)) {
9216 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9217 return BOOTSTRAP_NOT_PRIVILEGED;
9218 }
9219
9220 if (!j->anonymous) {
9221 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9222 return BOOTSTRAP_NOT_PRIVILEGED;
9223 }
9224
9225 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9226 if (target_jm == j->mgr) {
9227 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9228 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9229 (void)job_assumes_zero(j, launchd_mport_deallocate(requestor_port));
9230 *new_bsport = target_jm->jm_port;
9231 return BOOTSTRAP_SUCCESS;
9232 }
9233
9234 if (!target_jm) {
9235 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9236 if (target_jm) {
9237 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9238 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9239 }
9240 }
9241
9242 if (!job_assumes(j, target_jm != NULL)) {
9243 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9244 return BOOTSTRAP_NO_MEMORY;
9245 }
9246
9247 // Remove the job from it's current job manager.
9248 LIST_REMOVE(j, sle);
9249 LIST_REMOVE(j, pid_hash_sle);
9250
9251 job_t ji = NULL, jit = NULL;
9252 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9253 if (ji == j) {
9254 LIST_REMOVE(ji, global_env_sle);
9255 break;
9256 }
9257 }
9258
9259 // Put the job into the target job manager.
9260 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9261 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9262
9263 if (ji) {
9264 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9265 }
9266
9267 // Move our Mach services over if we're not in a flat namespace.
9268 if (!launchd_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9269 struct machservice *msi = NULL, *msit = NULL;
9270 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9271 LIST_REMOVE(msi, name_hash_sle);
9272 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9273 }
9274 }
9275
9276 j->mgr = target_jm;
9277
9278 if (!j->holds_ref) {
9279 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9280 * stick around while they're still around.
9281 * For example, login calls into the PAM launchd module, which moves the process into
9282 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9283 * ourselves from going away.
9284 */
9285 j->holds_ref = true;
9286 job_log(j, LOG_PERF, "Job switched into manager: %s", j->mgr->name);
9287 runtime_add_ref();
9288 }
9289
9290 *new_bsport = target_jm->jm_port;
9291
9292 return KERN_SUCCESS;
9293 }
9294
9295 kern_return_t
9296 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9297 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9298 mach_port_array_t *portsp, unsigned int *ports_cnt)
9299 {
9300 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9301 mach_port_array_t ports = NULL;
9302 unsigned int cnt = 0, cnt2 = 0;
9303 size_t packed_size;
9304 struct machservice *ms;
9305 jobmgr_t jm;
9306 job_t ji;
9307
9308 if (!j) {
9309 return BOOTSTRAP_NO_MEMORY;
9310 }
9311
9312 jm = j->mgr;
9313
9314 if (unlikely(!pid1_magic)) {
9315 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9316 return BOOTSTRAP_NOT_PRIVILEGED;
9317 }
9318 if (unlikely(jobmgr_parent(jm) == NULL)) {
9319 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9320 return BOOTSTRAP_NOT_PRIVILEGED;
9321 }
9322 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9323 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9324 return BOOTSTRAP_NOT_PRIVILEGED;
9325 }
9326 if (unlikely(!j->anonymous)) {
9327 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9328 return BOOTSTRAP_NOT_PRIVILEGED;
9329 }
9330
9331 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9332
9333 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9334 if (!job_assumes(j, outdata_obj_array)) {
9335 goto out_bad;
9336 }
9337
9338 *outdataCnt = 20 * 1024 * 1024;
9339 mig_allocate(outdata, *outdataCnt);
9340 if (!job_assumes(j, *outdata != 0)) {
9341 return 1;
9342 }
9343
9344 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9345 if (!ji->anonymous) {
9346 continue;
9347 }
9348 SLIST_FOREACH(ms, &ji->machservices, sle) {
9349 cnt++;
9350 }
9351 }
9352
9353 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9354 if (!job_assumes(j, ports != NULL)) {
9355 goto out_bad;
9356 }
9357
9358 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9359 if (!ji->anonymous) {
9360 continue;
9361 }
9362
9363 SLIST_FOREACH(ms, &ji->machservices, sle) {
9364 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9365 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9366 } else {
9367 goto out_bad;
9368 }
9369
9370 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9371 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9372 } else {
9373 goto out_bad;
9374 }
9375
9376 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9377 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9378 } else {
9379 goto out_bad;
9380 }
9381
9382 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9383 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9384 } else {
9385 goto out_bad;
9386 }
9387
9388 ports[cnt2] = machservice_port(ms);
9389
9390 // Increment the send right by one so we can shutdown the jobmgr cleanly
9391 (void)jobmgr_assumes_zero(jm, launchd_mport_copy_send(ports[cnt2]));
9392 cnt2++;
9393 }
9394 }
9395
9396 (void)job_assumes(j, cnt == cnt2);
9397
9398 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9399 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9400 if (!job_assumes(j, packed_size != 0)) {
9401 goto out_bad;
9402 }
9403
9404 launch_data_free(outdata_obj_array);
9405
9406 *portsp = ports;
9407 *ports_cnt = cnt;
9408
9409 *reqport = jm->req_port;
9410 *rcvright = jm->jm_port;
9411
9412 jm->req_port = 0;
9413 jm->jm_port = 0;
9414
9415 workaround_5477111 = j;
9416
9417 jobmgr_shutdown(jm);
9418
9419 return BOOTSTRAP_SUCCESS;
9420
9421 out_bad:
9422 if (outdata_obj_array) {
9423 launch_data_free(outdata_obj_array);
9424 }
9425 if (*outdata) {
9426 mig_deallocate(*outdata, *outdataCnt);
9427 }
9428 if (ports) {
9429 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
9430 }
9431
9432 return BOOTSTRAP_NO_MEMORY;
9433 }
9434
9435 kern_return_t
9436 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
9437 {
9438 int bsdepth = 0;
9439 jobmgr_t jmr;
9440
9441 if (!j) {
9442 return BOOTSTRAP_NO_MEMORY;
9443 }
9444 if (j->mgr->shutting_down) {
9445 return BOOTSTRAP_UNKNOWN_SERVICE;
9446 }
9447
9448 jmr = j->mgr;
9449
9450 while ((jmr = jobmgr_parent(jmr)) != NULL) {
9451 bsdepth++;
9452 }
9453
9454 // Since we use recursion, we need an artificial depth for subsets
9455 if (unlikely(bsdepth > 100)) {
9456 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
9457 return BOOTSTRAP_NO_MEMORY;
9458 }
9459
9460 char name[NAME_MAX];
9461 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
9462
9463 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
9464 if (unlikely(requestorport == MACH_PORT_NULL)) {
9465 return BOOTSTRAP_NOT_PRIVILEGED;
9466 }
9467 return BOOTSTRAP_NO_MEMORY;
9468 }
9469
9470 *subsetportp = jmr->jm_port;
9471 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
9472
9473 /* A job could create multiple subsets, so only add a reference the first time
9474 * it does so we don't have to keep a count.
9475 */
9476 if (j->anonymous && !j->holds_ref) {
9477 job_log(j, LOG_PERF, "Job created subset: %s", jmr->name);
9478 j->holds_ref = true;
9479 runtime_add_ref();
9480 }
9481
9482 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
9483 return BOOTSTRAP_SUCCESS;
9484 }
9485
9486 job_t
9487 _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
9488 {
9489 jobmgr_t where2put = NULL;
9490
9491 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
9492 errno = EINVAL;
9493 return NULL;
9494 }
9495
9496 launch_data_t ldlabel = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL);
9497 if (!ldlabel || launch_data_get_type(ldlabel) != LAUNCH_DATA_STRING) {
9498 errno = EINVAL;
9499 return NULL;
9500 }
9501
9502 const char *label = launch_data_get_string(ldlabel);
9503 jobmgr_log(jm, LOG_DEBUG, "Importing service: %s", label);
9504
9505 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
9506 if (destname) {
9507 bool supported_domain = false;
9508
9509 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
9510 const char *str = launch_data_get_string(destname);
9511 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
9512 where2put = _s_xpc_system_domain;
9513 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
9514 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
9515 supported_domain = true;
9516 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
9517 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
9518 } else {
9519 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
9520 errno = EINVAL;
9521 }
9522 } else {
9523 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
9524 errno = EINVAL;
9525 }
9526
9527 if (where2put && !supported_domain) {
9528 launch_data_t mi = NULL;
9529 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
9530 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
9531 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
9532 where2put = NULL;
9533 errno = EINVAL;
9534 }
9535 }
9536 }
9537 } else {
9538 where2put = jm;
9539 }
9540
9541 job_t j = NULL;
9542 if (where2put) {
9543 /* Gross. If the service already exists in a singleton domain, then
9544 * jobmgr_import2() will return the existing job. But if we fail to alias
9545 * this job, we will normally want to remove it. But if we did not create
9546 * it in the first place, then we need to avoid removing it. So check
9547 * errno against EEXIST in the success case and if it's EEXIST, then do
9548 * not remove the original job in the event of a failed alias.
9549 *
9550 * This really needs to be re-thought, but I think it'll require a larger
9551 * evaluation of launchd's data structures. Right now, once a job is
9552 * imported into a singleton domain, it won't be removed until the system
9553 * shuts down, but that may not always be true. If it ever changes, we'll
9554 * have a problem because we'll have to account for all existing aliases
9555 * and clean them up somehow. Or just start ref-counting. I knew this
9556 * aliasing stuff would be trouble...
9557 *
9558 * <rdar://problem/10646503>
9559 */
9560 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
9561
9562 errno = 0;
9563 if ((j = jobmgr_import2(where2put, pload))) {
9564 bool created = (errno != EEXIST);
9565 j->xpc_service = true;
9566
9567 if (where2put->xpc_singleton) {
9568 /* If the service was destined for one of the global domains,
9569 * then we have to alias it into our local domain to reserve the
9570 * name.
9571 */
9572 job_t ja = NULL;
9573 if (!(ja = job_new_alias(jm, j))) {
9574 /* If we failed to alias the job because of a conflict over
9575 * the label, then we remove it from the global domain. We
9576 * don't want to risk having imported a malicious job into
9577 * one of the global domains.
9578 */
9579 if (errno != EEXIST) {
9580 job_log(j, LOG_ERR, "Failed to alias job into: %s: %d: %s", where2put->name, errno, strerror(errno));
9581 } else {
9582 errno = 0;
9583 }
9584
9585 if (created) {
9586 jobmgr_log(jm, LOG_WARNING, "Singleton service already existed in job-local namespace. Removing: %s", j->label);
9587 job_remove(j);
9588 }
9589
9590 j = NULL;
9591 } else {
9592 jobmgr_log(jm, LOG_DEBUG, "Aliased service into local domain: %s", j->label);
9593 (void)job_dispatch(j, false);
9594 ja->xpc_service = true;
9595 j = ja;
9596 }
9597 } else {
9598 (void)job_dispatch(j, false);
9599 }
9600 }
9601 } else {
9602 jobmgr_log(jm, LOG_DEBUG, "Could not find destination for service: %s", label);
9603 }
9604
9605 return j;
9606 }
9607
9608 int
9609 _xpc_domain_import_services(job_t j, launch_data_t services)
9610 {
9611 int error = EINVAL;
9612 if (launch_data_get_type(services) != LAUNCH_DATA_ARRAY) {
9613 return error;
9614 }
9615
9616 size_t i = 0;
9617 size_t c = launch_data_array_get_count(services);
9618 jobmgr_log(j->mgr, LOG_DEBUG, "Importing new services: %lu", c);
9619
9620 for (i = 0; i < c; i++) {
9621 jobmgr_log(j->mgr, LOG_DEBUG, "Importing service at index: %lu", i);
9622
9623 job_t nj = NULL;
9624 launch_data_t ploadi = launch_data_array_get_index(services, i);
9625 if (!(nj = _xpc_domain_import_service(j->mgr, ploadi))) {
9626 if (!j->mgr->session_initialized && errno) {
9627 /* Service import failures are only fatal if the domain is being
9628 * initialized. If we're extending the domain, we can run into
9629 * errors with services already existing, so we just ignore them.
9630 * In the case of a domain extension, we don't want to halt the
9631 * operation if we run into an error with one service.
9632 *
9633 * <rdar://problem/10842779>
9634 */
9635 jobmgr_log(j->mgr, LOG_ERR, "Failed to import service at index: %lu: %d: %s", i, errno, strerror(errno));
9636 error = errno;
9637 break;
9638 }
9639 } else {
9640 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service: %s", nj->label);
9641 }
9642 }
9643
9644 if (i == c) {
9645 error = 0;
9646 }
9647
9648 return error;
9649 }
9650
9651 kern_return_t
9652 xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
9653 {
9654 if (unlikely(!pid1_magic)) {
9655 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
9656 return BOOTSTRAP_NOT_PRIVILEGED;
9657 }
9658 if (!j || !MACH_PORT_VALID(reqport)) {
9659 return BOOTSTRAP_UNKNOWN_SERVICE;
9660 }
9661 if (root_jobmgr->shutting_down) {
9662 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
9663 return BOOTSTRAP_NOT_PRIVILEGED;
9664 }
9665 if (!j->xpc_bootstrapper) {
9666 job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
9667 return BOOTSTRAP_NOT_PRIVILEGED;
9668 }
9669
9670 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9671 /* All XPC domains are children of the root job manager. What we're creating
9672 * here is really just a skeleton. By creating it, we're adding reqp to our
9673 * port set. It will have two messages on it. The first specifies the
9674 * environment of the originator. This is so we can cache it and hand it to
9675 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9676 * to be bootstrapped in.
9677 */
9678 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
9679 if (job_assumes(j, jm != NULL)) {
9680 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
9681 jm->shortdesc = "private";
9682 kr = BOOTSTRAP_SUCCESS;
9683 }
9684
9685 return kr;
9686 }
9687
9688 kern_return_t
9689 xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
9690 {
9691 if (!j) {
9692 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9693 * getting this message long after the requesting process has gone away.
9694 * See <rdar://problem/8593143>.
9695 */
9696 return BOOTSTRAP_UNKNOWN_SERVICE;
9697 }
9698
9699 jobmgr_t jm = j->mgr;
9700 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9701 return BOOTSTRAP_NOT_PRIVILEGED;
9702 }
9703
9704 if (jm->req_asport != MACH_PORT_NULL) {
9705 return BOOTSTRAP_NOT_PRIVILEGED;
9706 }
9707
9708 struct ldcred *ldc = runtime_get_caller_creds();
9709 struct proc_bsdshortinfo proc;
9710 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
9711 if (errno != ESRCH) {
9712 (void)jobmgr_assumes_zero(jm, errno);
9713 }
9714
9715 jm->error = errno;
9716 jobmgr_remove(jm);
9717 return BOOTSTRAP_NO_MEMORY;
9718 }
9719
9720 #if !TARGET_OS_EMBEDDED
9721 if (jobmgr_assumes_zero(jm, audit_session_port(ldc->asid, &jm->req_asport)) != 0) {
9722 jm->error = EPERM;
9723 jobmgr_remove(jm);
9724 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
9725 return BOOTSTRAP_NOT_PRIVILEGED;
9726 }
9727 #else
9728 jm->req_asport = MACH_PORT_DEAD;
9729 #endif
9730
9731 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s[%i]", proc.pbsi_comm, ldc->pid);
9732 strlcpy(jm->owner, proc.pbsi_comm, sizeof(jm->owner));
9733 jm->req_bsport = bsport;
9734 jm->req_excport = excport;
9735 jm->req_rport = rp;
9736 jm->req_ctx = ctx;
9737 jm->req_ctx_sz = ctx_sz;
9738 jm->req_pid = ldc->pid;
9739 jm->req_euid = ldc->euid;
9740 jm->req_egid = ldc->egid;
9741 jm->req_asid = ldc->asid;
9742
9743 return KERN_SUCCESS;
9744 }
9745
9746 kern_return_t
9747 xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9748 {
9749 if (!j) {
9750 return BOOTSTRAP_UNKNOWN_SERVICE;
9751 }
9752
9753 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
9754 if (!(rootj && rootj->xpc_bootstrapper)) {
9755 job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
9756 return BOOTSTRAP_NOT_PRIVILEGED;
9757 }
9758
9759 // This is just for XPC domains (for now).
9760 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9761 return BOOTSTRAP_NOT_PRIVILEGED;
9762 }
9763 if (j->mgr->session_initialized) {
9764 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
9765 return BOOTSTRAP_NOT_PRIVILEGED;
9766 }
9767
9768 size_t offset = 0;
9769 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9770 if (!services) {
9771 return BOOTSTRAP_NO_MEMORY;
9772 }
9773
9774 int error = _xpc_domain_import_services(j, services);
9775 if (error) {
9776 j->mgr->error = error;
9777 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
9778 jobmgr_remove(j->mgr);
9779 } else {
9780 j->mgr->session_initialized = true;
9781 (void)jobmgr_assumes_zero(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS));
9782 j->mgr->req_rport = MACH_PORT_NULL;
9783
9784 /* Returning a failure code will destroy the message, whereas returning
9785 * success will not, so we need to clean up here.
9786 */
9787 mig_deallocate(services_buff, services_sz);
9788 error = BOOTSTRAP_SUCCESS;
9789 }
9790
9791 return error;
9792 }
9793
9794 kern_return_t
9795 xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport,
9796 mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid,
9797 int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
9798 {
9799 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9800 return BOOTSTRAP_UNKNOWN_SERVICE;
9801 }
9802 jobmgr_t jm = j->mgr;
9803 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9804 return BOOTSTRAP_NOT_PRIVILEGED;
9805 }
9806
9807 if (jm->req_asport == MACH_PORT_NULL) {
9808 return BOOTSTRAP_NOT_PRIVILEGED;
9809 }
9810
9811 *bsport = jm->req_bsport;
9812 *sbsport = root_jobmgr->jm_port;
9813 *excport = jm->req_excport;
9814 *asport = jm->req_asport;
9815 *uid = jm->req_euid;
9816 *gid = jm->req_egid;
9817 *asid = jm->req_asid;
9818
9819 *ctx = jm->req_ctx;
9820 *ctx_sz = jm->req_ctx_sz;
9821
9822 return KERN_SUCCESS;
9823 }
9824
9825 kern_return_t
9826 xpc_domain_get_service_name(job_t j, event_name_t name)
9827 {
9828 if (!j) {
9829 return BOOTSTRAP_NO_MEMORY;
9830 }
9831 if (!j->xpc_service) {
9832 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
9833 return BOOTSTRAP_NOT_PRIVILEGED;
9834 }
9835
9836 struct machservice * ms = SLIST_FIRST(&j->machservices);
9837 if (!ms) {
9838 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name of job with no MachServices: %s", j->label);
9839 return BOOTSTRAP_UNKNOWN_SERVICE;
9840 }
9841
9842 (void)strlcpy(name, ms->name, sizeof(event_name_t));
9843 return BOOTSTRAP_SUCCESS;
9844 }
9845
9846 #if XPC_LPI_VERSION >= 20111216
9847 kern_return_t
9848 xpc_domain_add_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9849 {
9850 if (!j) {
9851 return BOOTSTRAP_UNKNOWN_SERVICE;
9852 }
9853
9854 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
9855 if (!(rootj && rootj->xpc_bootstrapper)) {
9856 job_log(j, LOG_ERR, "Attempt to add service to XPC domain by unprivileged job.");
9857 return BOOTSTRAP_NOT_PRIVILEGED;
9858 }
9859
9860 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9861 return BOOTSTRAP_NOT_PRIVILEGED;
9862 }
9863
9864 size_t offset = 0;
9865 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9866 if (!services) {
9867 return BOOTSTRAP_NO_MEMORY;
9868 }
9869
9870 int error = _xpc_domain_import_services(j, services);
9871 if (!error) {
9872 mig_deallocate(services_buff, services_sz);
9873 }
9874
9875 return error;
9876 }
9877 #endif
9878
9879 #pragma mark XPC Events
9880 int
9881 xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms)
9882 {
9883 int error = EXNOMEM;
9884 struct machservice *msi = NULL;
9885 SLIST_FOREACH(msi, &j->machservices, sle) {
9886 if (strcmp(stream, msi->name) == 0) {
9887 break;
9888 }
9889 }
9890
9891 if (!msi) {
9892 mach_port_t sp = MACH_PORT_NULL;
9893 msi = machservice_new(j, stream, &sp, false);
9894 if (!msi) {
9895 return EXNOMEM;
9896 }
9897
9898 job_log(j, LOG_DEBUG, "Creating new MachService for stream: %s", stream);
9899 /* Hack to keep this from being publicly accessible through
9900 * bootstrap_look_up().
9901 */
9902 if (!j->dedicated_instance) {
9903 LIST_REMOVE(msi, name_hash_sle);
9904 }
9905 msi->event_channel = true;
9906
9907 /* If we call job_dispatch() here before the audit session for the job
9908 * has been set, we'll end up not watching this service. But we also have
9909 * to take care not to watch the port if the job is active.
9910 *
9911 * See <rdar://problem/10357855>.
9912 */
9913 if (!j->currently_ignored) {
9914 machservice_watch(j, msi);
9915 }
9916
9917 error = 0;
9918 *ms = msi;
9919 } else if (!msi->event_channel) {
9920 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
9921 error = EEXIST;
9922 } else {
9923 error = 0;
9924 *ms = msi;
9925 }
9926
9927 return error;
9928 }
9929
9930 int
9931 xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply)
9932 {
9933 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
9934 if (!stream) {
9935 return EXINVAL;
9936 }
9937
9938 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
9939 if (!token) {
9940 return EXINVAL;
9941 }
9942
9943 job_log(j, LOG_DEBUG, "Getting event name for stream/token: %s/0x%llu", stream, token);
9944
9945 int result = ESRCH;
9946 struct externalevent *event = externalevent_find(stream, token);
9947 if (event && j->event_monitor) {
9948 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
9949 xpc_dictionary_set_string(reply2, XPC_EVENT_ROUTINE_KEY_NAME, event->name);
9950 *reply = reply2;
9951
9952 job_log(j, LOG_DEBUG, "Found: %s", event->name);
9953 result = 0;
9954 }
9955
9956 return result;
9957 }
9958
9959 int
9960 xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply)
9961 {
9962 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
9963 if (!stream) {
9964 return EXINVAL;
9965 }
9966
9967 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
9968 if (!key) {
9969 return EXINVAL;
9970 }
9971
9972 xpc_object_t event = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_EVENT);
9973 if (event && xpc_get_type(event) != XPC_TYPE_DICTIONARY) {
9974 return EXINVAL;
9975 }
9976
9977 job_log(j, LOG_DEBUG, "%s event for stream/key: %s/%s", event ? "Setting" : "Removing", stream, key);
9978
9979 struct externalevent *eei = NULL;
9980 LIST_FOREACH(eei, &j->events, job_le) {
9981 /* If the event for the given key already exists for the job, we need to
9982 * remove the old one first.
9983 */
9984 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9985 job_log(j, LOG_DEBUG, "Event exists. Removing.");
9986 externalevent_delete(eei);
9987 break;
9988 }
9989 }
9990
9991 int result = EXNOMEM;
9992 if (event) {
9993 struct eventsystem *es = eventsystem_find(stream);
9994 if (!es) {
9995 job_log(j, LOG_DEBUG, "Creating stream.");
9996 es = eventsystem_new(stream);
9997 }
9998
9999 if (es) {
10000 job_log(j, LOG_DEBUG, "Adding event.");
10001 if (externalevent_new(j, es, key, event)) {
10002 job_log(j, LOG_DEBUG, "Added new event for key: %s", key);
10003 result = 0;
10004 } else {
10005 job_log(j, LOG_ERR, "Could not create event for key: %s", key);
10006 }
10007 } else {
10008 job_log(j, LOG_ERR, "Event stream could not be created: %s", stream);
10009 }
10010 } else {
10011 /* If the event was NULL, then we just remove it and return. */
10012 result = 0;
10013 }
10014
10015 if (result == 0) {
10016 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10017 *reply = reply2;
10018 }
10019
10020 return result;
10021 }
10022
10023 int
10024 xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10025 {
10026 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10027 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10028
10029 bool all_streams = (stream == NULL);
10030 bool all_events = (key == NULL || strcmp(key, "") == 0); // strcmp for libxpc compatibility
10031 xpc_object_t events = NULL;
10032
10033 if (all_streams && !all_events) {
10034 return EXINVAL;
10035 }
10036
10037 if (all_streams || all_events) {
10038 job_log(j, LOG_DEBUG, "Fetching all events%s%s", stream ? " for stream: " : "", stream ? stream : "");
10039 events = xpc_dictionary_create(NULL, NULL, 0);
10040 } else {
10041 job_log(j, LOG_DEBUG, "Fetching stream/key: %s/%s", stream, key);
10042 }
10043
10044 int result = ESRCH;
10045 struct externalevent *eei = NULL;
10046 LIST_FOREACH(eei, &j->events, job_le) {
10047 if (all_streams) {
10048 xpc_object_t sub = xpc_dictionary_get_value(events, eei->sys->name);
10049 if (sub == NULL) {
10050 sub = xpc_dictionary_create(NULL, NULL, 0);
10051 xpc_dictionary_set_value(events, eei->sys->name, sub);
10052 xpc_release(sub);
10053 }
10054 xpc_dictionary_set_value(sub, eei->name, eei->event);
10055 } else if (strcmp(eei->sys->name, stream) == 0) {
10056 if (all_events) {
10057 xpc_dictionary_set_value(events, eei->name, eei->event);
10058 } else if (strcmp(eei->name, key) == 0) {
10059 job_log(j, LOG_DEBUG, "Found event.");
10060 events = xpc_retain(eei->event);
10061 break;
10062 }
10063 }
10064 }
10065
10066 if (events) {
10067 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10068 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENT, events);
10069 xpc_release(events);
10070
10071 *reply = reply2;
10072 result = 0;
10073 }
10074
10075 return result;
10076 }
10077
10078 int
10079 xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10080 {
10081 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10082 if (!stream) {
10083 return EXINVAL;
10084 }
10085
10086 job_log(j, LOG_DEBUG, "Checking in stream: %s", stream);
10087
10088 struct machservice *ms = NULL;
10089 int error = xpc_event_find_channel(j, stream, &ms);
10090 if (error) {
10091 job_log(j, LOG_ERR, "Failed to check in: 0x%x: %s", error, xpc_strerror(error));
10092 } else if (ms->isActive) {
10093 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10094 error = EBUSY;
10095 } else {
10096 machservice_request_notifications(ms);
10097
10098 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10099 xpc_dictionary_set_mach_recv(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10100 *reply = reply2;
10101 error = 0;
10102 }
10103
10104 return error;
10105 }
10106
10107 int
10108 xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply)
10109 {
10110 if (!j->event_monitor) {
10111 return EPERM;
10112 }
10113
10114 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10115 if (!stream) {
10116 return EXINVAL;
10117 }
10118
10119 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10120 if (!token) {
10121 return EXINVAL;
10122 }
10123
10124 job_log(j, LOG_DEBUG, "Looking up channel for stream/token: %s/%llu", stream, token);
10125
10126 struct externalevent *ee = externalevent_find(stream, token);
10127 if (!ee) {
10128 return ESRCH;
10129 }
10130
10131 struct machservice *ms = NULL;
10132 int error = xpc_event_find_channel(ee->job, stream, &ms);
10133 if (!error) {
10134 job_log(j, LOG_DEBUG, "Found event channel port: 0x%x", ms->port);
10135 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10136 xpc_dictionary_set_mach_send(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10137 *reply = reply2;
10138 error = 0;
10139 } else {
10140 job_log(j, LOG_ERR, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream, token, error, xpc_strerror(error));
10141 }
10142
10143 return error;
10144 }
10145
10146 int
10147 xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10148 {
10149 if (!j->event_monitor) {
10150 return EPERM;
10151 }
10152
10153 /* This indicates that the event monitor is now safe to signal. This state is
10154 * independent of whether this operation actually succeeds; we just need it
10155 * to ignore SIGUSR1.
10156 */
10157 j->event_monitor_ready2signal = true;
10158
10159 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10160 if (!stream) {
10161 return EXINVAL;
10162 }
10163
10164 job_log(j, LOG_DEBUG, "Provider checking in for stream: %s", stream);
10165
10166 xpc_object_t events = xpc_array_create(NULL, 0);
10167 struct eventsystem *es = eventsystem_find(stream);
10168 if (!es) {
10169 /* If we had to create the event stream, there were no events, so just
10170 * give back the empty array.
10171 */
10172 job_log(j, LOG_DEBUG, "Creating event stream.");
10173 es = eventsystem_new(stream);
10174 if (!job_assumes(j, es)) {
10175 xpc_release(events);
10176 return EXNOMEM;
10177 }
10178
10179 if (strcmp(stream, "com.apple.launchd.helper") == 0) {
10180 _launchd_support_system = es;
10181 }
10182 } else {
10183 job_log(j, LOG_DEBUG, "Filling event array.");
10184
10185 struct externalevent *ei = NULL;
10186 LIST_FOREACH(ei, &es->events, sys_le) {
10187 xpc_array_set_uint64(events, XPC_ARRAY_APPEND, ei->id);
10188 xpc_array_append_value(events, ei->event);
10189 }
10190 }
10191
10192 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10193 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENTS, events);
10194 xpc_release(events);
10195 *reply = reply2;
10196
10197 return 0;
10198 }
10199
10200 int
10201 xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply)
10202 {
10203 job_t other_j = NULL;
10204
10205 if (!j->event_monitor) {
10206 return EPERM;
10207 }
10208
10209 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10210 if (!stream) {
10211 return EXINVAL;
10212 }
10213
10214 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10215 if (!token) {
10216 return EXINVAL;
10217 }
10218
10219 bool state = false;
10220 xpc_object_t xstate = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_STATE);
10221 if (!xstate || xpc_get_type(xstate) != XPC_TYPE_BOOL) {
10222 return EXINVAL;
10223 } else {
10224 state = xpc_bool_get_value(xstate);
10225 }
10226
10227 job_log(j, LOG_DEBUG, "Setting event state to %s for stream/token: %s/%llu", state ? "true" : "false", stream, token);
10228
10229 struct externalevent *ei = externalevent_find(stream, token);
10230 if (!ei) {
10231 job_log(j, LOG_ERR, "Could not find stream/token: %s/%llu", stream, token);
10232 return ESRCH;
10233 }
10234
10235 other_j = ei->job;
10236 ei->state = state;
10237
10238 if (ei->internal) {
10239 job_log(ei->job, LOG_NOTICE, "Job should be able to exec(3) now.");
10240 ei->job->waiting4ok = false;
10241 externalevent_delete(ei);
10242 }
10243
10244 (void)job_dispatch(other_j, false);
10245
10246 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10247 *reply = reply2;
10248
10249 return 0;
10250 }
10251
10252 bool
10253 xpc_event_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
10254 {
10255 uint64_t op = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_OP);
10256 if (!op) {
10257 return false;
10258 }
10259
10260 audit_token_t token;
10261 xpc_dictionary_get_audit_token(request, &token);
10262 runtime_record_caller_creds(&token);
10263
10264 job_t j = job_mig_intran(p);
10265 if (!j || j->anonymous) {
10266 op = -1;
10267 }
10268
10269 job_log(j, LOG_DEBUG, "Incoming XPC event request: %llu", op);
10270
10271 int error = -1;
10272 switch (op) {
10273 case XPC_EVENT_GET_NAME:
10274 error = xpc_event_get_event_name(j, request, reply);
10275 break;
10276 case XPC_EVENT_SET:
10277 error = xpc_event_set_event(j, request, reply);
10278 break;
10279 case XPC_EVENT_COPY:
10280 error = xpc_event_copy_event(j, request, reply);
10281 break;
10282 case XPC_EVENT_CHECK_IN:
10283 error = xpc_event_channel_check_in(j, request, reply);
10284 break;
10285 case XPC_EVENT_LOOK_UP:
10286 error = xpc_event_channel_look_up(j, request, reply);
10287 break;
10288 case XPC_EVENT_PROVIDER_CHECK_IN:
10289 error = xpc_event_provider_check_in(j, request, reply);
10290 break;
10291 case XPC_EVENT_PROVIDER_SET_STATE:
10292 error = xpc_event_provider_set_state(j, request, reply);
10293 break;
10294 case -1:
10295 if (j) {
10296 job_log(j, LOG_ERR, "Unmanaged jobs may not make XPC Events requests.");
10297 }
10298 error = EPERM;
10299 break;
10300 default:
10301 job_log(j, LOG_ERR, "Bogus opcode.");
10302 error = EDOM;
10303 }
10304
10305 if (error) {
10306 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10307 xpc_dictionary_set_uint64(reply2, XPC_EVENT_ROUTINE_KEY_ERROR, error);
10308 *reply = reply2;
10309 }
10310
10311 return true;
10312 }
10313
10314 kern_return_t
10315 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
10316 {
10317 struct ldcred *ldc = runtime_get_caller_creds();
10318 job_t otherj;
10319
10320 if (!j) {
10321 return BOOTSTRAP_NO_MEMORY;
10322 }
10323
10324 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
10325 return BOOTSTRAP_UNKNOWN_SERVICE;
10326 }
10327
10328 #if TARGET_OS_EMBEDDED
10329 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
10330 #else
10331 bool allow_non_root_kickstart = false;
10332 #endif
10333
10334 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
10335 return BOOTSTRAP_NOT_PRIVILEGED;
10336 }
10337
10338 #if HAVE_SANDBOX
10339 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10340 return BOOTSTRAP_NOT_PRIVILEGED;
10341 }
10342 #endif
10343
10344 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
10345 return BOOTSTRAP_SERVICE_ACTIVE;
10346 }
10347
10348 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
10349 otherj = job_dispatch(otherj, true);
10350
10351 if (!job_assumes(j, otherj && otherj->p)) {
10352 // <rdar://problem/6787083> Clear this flag if we failed to start the job.
10353 otherj->stall_before_exec = false;
10354 return BOOTSTRAP_NO_MEMORY;
10355 }
10356
10357 *out_pid = otherj->p;
10358
10359 return 0;
10360 }
10361
10362 kern_return_t
10363 job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
10364 {
10365 launch_data_t jobdata = NULL;
10366 size_t data_offset = 0;
10367 struct ldcred *ldc = runtime_get_caller_creds();
10368 job_t jr;
10369
10370 if (!j) {
10371 return BOOTSTRAP_NO_MEMORY;
10372 }
10373
10374 if (unlikely(j->deny_job_creation)) {
10375 return BOOTSTRAP_NOT_PRIVILEGED;
10376 }
10377
10378 #if HAVE_SANDBOX
10379 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10380 return BOOTSTRAP_NOT_PRIVILEGED;
10381 }
10382 #endif
10383
10384 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
10385 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
10386 return VPROC_ERR_TRY_PER_USER;
10387 }
10388
10389 if (!job_assumes(j, indataCnt != 0)) {
10390 return 1;
10391 }
10392
10393 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
10394 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
10395 return 1;
10396 }
10397
10398 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
10399 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
10400 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
10401 return 1;
10402 }
10403
10404 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
10405
10406 launch_data_t label = NULL;
10407 launch_data_t wait4debugger = NULL;
10408 if (!jr) {
10409 switch (errno) {
10410 case EEXIST:
10411 /* If EEXIST was returned, we know that there is a label string in
10412 * the dictionary. So we don't need to check the types here; that
10413 * has already been done.
10414 */
10415 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
10416 jr = job_find(NULL, launch_data_get_string(label));
10417 if (job_assumes(j, jr != NULL) && !jr->p) {
10418 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
10419 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
10420 if (launch_data_get_bool(wait4debugger)) {
10421 /* If the job exists, we're going to kick-start it, but
10422 * we need to give the caller the opportunity to start
10423 * it suspended if it so desires. But this will only
10424 * take effect if the job isn't running.
10425 */
10426 jr->wait4debugger_oneshot = true;
10427 }
10428 }
10429 }
10430
10431 *outj = jr;
10432 return BOOTSTRAP_NAME_IN_USE;
10433 default:
10434 return BOOTSTRAP_NO_MEMORY;
10435 }
10436 }
10437
10438 if (pid1_magic) {
10439 jr->mach_uid = ldc->uid;
10440 }
10441
10442 // TODO: Consolidate the app and legacy_LS_job bits.
10443 jr->legacy_LS_job = true;
10444 jr->abandon_pg = true;
10445 jr->asport = asport;
10446 jr->app = true;
10447 uuid_clear(jr->expected_audit_uuid);
10448 jr = job_dispatch(jr, true);
10449
10450 if (!job_assumes(j, jr != NULL)) {
10451 job_remove(jr);
10452 return BOOTSTRAP_NO_MEMORY;
10453 }
10454
10455 if (!job_assumes(jr, jr->p)) {
10456 job_remove(jr);
10457 return BOOTSTRAP_NO_MEMORY;
10458 }
10459
10460 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
10461 *outj = jr;
10462
10463 return BOOTSTRAP_SUCCESS;
10464 }
10465
10466 kern_return_t
10467 job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
10468 {
10469 job_t nj = NULL;
10470 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
10471 if (likely(kr == KERN_SUCCESS)) {
10472 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
10473 job_remove(nj);
10474 kr = BOOTSTRAP_NO_MEMORY;
10475 } else {
10476 /* Do not return until the job has called exec(3), thereby making it
10477 * safe for the caller to send it SIGCONT.
10478 *
10479 * <rdar://problem/9042798>
10480 */
10481 nj->spawn_reply_port = rp;
10482 kr = MIG_NO_REPLY;
10483 }
10484 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
10485 bool was_running = nj->p;
10486 if (job_dispatch(nj, true)) {
10487 if (!was_running) {
10488 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
10489
10490 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
10491 nj->spawn_reply_port = rp;
10492 kr = MIG_NO_REPLY;
10493 } else {
10494 kr = BOOTSTRAP_NO_MEMORY;
10495 }
10496 } else {
10497 *obsvr_port = MACH_PORT_NULL;
10498 *child_pid = nj->p;
10499 kr = KERN_SUCCESS;
10500 }
10501 } else {
10502 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
10503 kr = BOOTSTRAP_UNKNOWN_SERVICE;
10504 }
10505 }
10506
10507 mig_deallocate(indata, indataCnt);
10508 return kr;
10509 }
10510
10511 launch_data_t
10512 job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport __attribute__((unused)))
10513 {
10514 launch_data_t reply = NULL;
10515
10516 errno = ENOTSUP;
10517 if (launch_data_get_type(request) == LAUNCH_DATA_STRING) {
10518 if (strcmp(launch_data_get_string(request), LAUNCH_KEY_CHECKIN) == 0) {
10519 reply = job_export(j);
10520 job_checkin(j);
10521 }
10522 }
10523
10524 return reply;
10525 }
10526
10527 #define LAUNCHD_MAX_LEGACY_FDS 128
10528 #define countof(x) (sizeof((x)) / sizeof((x[0])))
10529
10530 kern_return_t
10531 job_mig_legacy_ipc_request(job_t j, vm_offset_t request,
10532 mach_msg_type_number_t requestCnt, mach_port_array_t request_fds,
10533 mach_msg_type_number_t request_fdsCnt, vm_offset_t *reply,
10534 mach_msg_type_number_t *replyCnt, mach_port_array_t *reply_fdps,
10535 mach_msg_type_number_t *reply_fdsCnt, mach_port_t asport)
10536 {
10537 if (!j) {
10538 return BOOTSTRAP_NO_MEMORY;
10539 }
10540
10541 /* TODO: Once we support actions other than checking in, we must check the
10542 * sandbox capabilities and EUID of the requestort.
10543 */
10544 size_t nout_fdps = 0;
10545 size_t nfds = request_fdsCnt / sizeof(request_fds[0]);
10546 if (nfds > LAUNCHD_MAX_LEGACY_FDS) {
10547 job_log(j, LOG_ERR, "Too many incoming descriptors: %lu", nfds);
10548 return BOOTSTRAP_NO_MEMORY;
10549 }
10550
10551 int in_fds[LAUNCHD_MAX_LEGACY_FDS];
10552 size_t i = 0;
10553 for (i = 0; i < nfds; i++) {
10554 in_fds[i] = fileport_makefd(request_fds[i]);
10555 if (in_fds[i] == -1) {
10556 job_log(j, LOG_ERR, "Bad descriptor passed in legacy IPC request at index: %lu", i);
10557 }
10558 }
10559
10560 // DON'T goto outbad before this point.
10561 *reply = 0;
10562 *reply_fdps = NULL;
10563 launch_data_t ldreply = NULL;
10564
10565 size_t dataoff = 0;
10566 size_t fdoff = 0;
10567 launch_data_t ldrequest = launch_data_unpack((void *)request, requestCnt, in_fds, nfds, &dataoff, &fdoff);
10568 if (!ldrequest) {
10569 job_log(j, LOG_ERR, "Invalid legacy IPC request passed.");
10570 goto out_bad;
10571 }
10572
10573 ldreply = job_do_legacy_ipc_request(j, ldrequest, asport);
10574 if (!ldreply) {
10575 ldreply = launch_data_new_errno(errno);
10576 if (!ldreply) {
10577 goto out_bad;
10578 }
10579 }
10580
10581 *replyCnt = 10 * 1024 * 1024;
10582 mig_allocate(reply, *replyCnt);
10583 if (!*reply) {
10584 goto out_bad;
10585 }
10586
10587 int out_fds[LAUNCHD_MAX_LEGACY_FDS];
10588 size_t nout_fds = 0;
10589 size_t sz = launch_data_pack(ldreply, (void *)*reply, *replyCnt, out_fds, &nout_fds);
10590 if (!sz) {
10591 job_log(j, LOG_ERR, "Could not pack legacy IPC reply.");
10592 goto out_bad;
10593 }
10594
10595 if (nout_fds) {
10596 if (nout_fds > 128) {
10597 job_log(j, LOG_ERR, "Too many outgoing descriptors: %lu", nout_fds);
10598 goto out_bad;
10599 }
10600
10601 *reply_fdsCnt = nout_fds * sizeof((*reply_fdps)[0]);
10602 mig_allocate((vm_address_t *)reply_fdps, *reply_fdsCnt);
10603 if (!*reply_fdps) {
10604 goto out_bad;
10605 }
10606
10607 for (i = 0; i < nout_fds; i++) {
10608 mach_port_t fp = MACH_PORT_NULL;
10609 /* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
10610 * deal. Note, these get stuffed into an array whose disposition is
10611 * mach_port_move_send_t, so we don't have to worry about them after
10612 * returning.
10613 */
10614 if (fileport_makeport(out_fds[i], &fp) != 0) {
10615 job_log(j, LOG_ERR, "Could not pack response descriptor at index: %lu: %d: %s", i, errno, strerror(errno));
10616 }
10617 (*reply_fdps)[i] = fp;
10618 }
10619
10620 nout_fdps = nout_fds;
10621 } else {
10622 *reply_fdsCnt = 0;
10623 }
10624
10625 mig_deallocate(request, requestCnt);
10626 launch_data_free(ldreply);
10627 ldreply = NULL;
10628
10629 // Unused for now.
10630 (void)launchd_mport_deallocate(asport);
10631
10632 return BOOTSTRAP_SUCCESS;
10633
10634 out_bad:
10635 for (i = 0; i < nfds; i++) {
10636 (void)close(in_fds[i]);
10637 }
10638
10639 for (i = 0; i < nout_fds; i++) {
10640 (void)launchd_mport_deallocate((*reply_fdps)[i]);
10641 }
10642
10643 if (*reply) {
10644 mig_deallocate(*reply, *replyCnt);
10645 }
10646
10647 /* We should never hit this since the last goto out is in the case that
10648 * allocating this fails.
10649 */
10650 if (*reply_fdps) {
10651 mig_deallocate((vm_address_t)*reply_fdps, *reply_fdsCnt);
10652 }
10653
10654 if (ldreply) {
10655 launch_data_free(ldreply);
10656 }
10657
10658 return BOOTSTRAP_NO_MEMORY;
10659 }
10660
10661 void
10662 jobmgr_init(bool sflag)
10663 {
10664 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
10665 SLIST_INIT(&s_curious_jobs);
10666 LIST_INIT(&s_needing_sessions);
10667
10668 osx_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
10669 osx_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
10670 _s_xpc_system_domain->req_asid = launchd_audit_session;
10671 _s_xpc_system_domain->req_asport = launchd_audit_port;
10672 _s_xpc_system_domain->shortdesc = "system";
10673 if (pid1_magic) {
10674 root_jobmgr->monitor_shutdown = true;
10675 }
10676
10677 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
10678 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
10679 if (likely(s_no_hang_fd == -1)) {
10680 if (jobmgr_assumes_zero_p(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK))) != -1) {
10681 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr));
10682 }
10683 }
10684 s_no_hang_fd = _fd(s_no_hang_fd);
10685 }
10686
10687 size_t
10688 our_strhash(const char *s)
10689 {
10690 size_t c, r = 5381;
10691
10692 /* djb2
10693 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10694 */
10695
10696 while ((c = *s++)) {
10697 r = ((r << 5) + r) + c; // hash*33 + c
10698 }
10699
10700 return r;
10701 }
10702
10703 size_t
10704 hash_label(const char *label)
10705 {
10706 return our_strhash(label) % LABEL_HASH_SIZE;
10707 }
10708
10709 size_t
10710 hash_ms(const char *msstr)
10711 {
10712 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
10713 }
10714
10715 bool
10716 waiting4removal_new(job_t j, mach_port_t rp)
10717 {
10718 struct waiting_for_removal *w4r;
10719
10720 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
10721 return false;
10722 }
10723
10724 w4r->reply_port = rp;
10725
10726 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
10727
10728 return true;
10729 }
10730
10731 void
10732 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
10733 {
10734 (void)job_assumes_zero(j, job_mig_send_signal_reply(w4r->reply_port, 0));
10735
10736 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
10737
10738 free(w4r);
10739 }
10740
10741 size_t
10742 get_kern_max_proc(void)
10743 {
10744 int mib[] = { CTL_KERN, KERN_MAXPROC };
10745 int max = 100;
10746 size_t max_sz = sizeof(max);
10747
10748 (void)posix_assumes_zero(sysctl(mib, 2, &max, &max_sz, NULL, 0));
10749
10750 return max;
10751 }
10752
10753 // See rdar://problem/6271234
10754 void
10755 eliminate_double_reboot(void)
10756 {
10757 if (unlikely(!pid1_magic)) {
10758 return;
10759 }
10760
10761 struct stat sb;
10762 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
10763 int result = -1;
10764
10765 if (unlikely(stat(argv[1], &sb) != -1)) {
10766 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
10767
10768 pid_t p = 0;
10769 result = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ);
10770 if (result == -1) {
10771 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script: %d: %s", result, strerror(result));
10772 goto out;
10773 }
10774
10775 int wstatus = 0;
10776 result = waitpid(p, &wstatus, 0);
10777 if (result == -1) {
10778 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to reap deferred install script: %d: %s", errno, strerror(errno));
10779 goto out;
10780 }
10781
10782 if (WIFEXITED(wstatus)) {
10783 if ((result = WEXITSTATUS(wstatus)) == 0) {
10784 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
10785 } else {
10786 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus));
10787 }
10788 } else {
10789 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Weirdness with install script: %d", wstatus);
10790 }
10791 }
10792 out:
10793 if (result == 0) {
10794 /* If the unlink(2) was to fail, it would be most likely fail with
10795 * EBUSY. All the other failure cases for unlink(2) don't apply when
10796 * we're running under PID 1 and have verified that the file exists.
10797 * Outside of someone deliberately messing with us (like if
10798 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
10799 * point for a filesystem) and I/O errors, we should be good.
10800 */
10801 if (unlink(argv[1]) == -1) {
10802 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to remove deferred install script: %d: %s", errno, strerror(errno));
10803 }
10804 }
10805 }
10806
10807 void
10808 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
10809 {
10810 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
10811 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10812 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
10813 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
10814 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10815 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
10816 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
10817 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
10818 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10819 * You can't set this in a plist.
10820 */
10821 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMACTIVE) == 0) {
10822 // Ignore.
10823 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
10824 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10825 * complain about it.
10826 */
10827 } else {
10828 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
10829 }
10830
10831 if (unlikely(!j->jetsam_properties)) {
10832 j->jetsam_properties = true;
10833 }
10834 }
10835
10836 #if TARGET_OS_EMBEDDED
10837 int
10838 launchd_set_jetsam_priorities(launch_data_t priorities)
10839 {
10840 kern_return_t result = 0;
10841
10842 if (launch_data_get_type(priorities) != LAUNCH_DATA_ARRAY) {
10843 return EINVAL;
10844 }
10845 if (!launchd_embedded_handofgod) {
10846 return EPERM;
10847 }
10848
10849 size_t npris = launch_data_array_get_count(priorities);
10850
10851 job_t ji = NULL;
10852 size_t i = 0;
10853 for (i = 0; i < npris; i++) {
10854 launch_data_t ldi = launch_data_array_get_index(priorities, i);
10855 if (launch_data_get_type(ldi) != LAUNCH_DATA_DICTIONARY) {
10856 continue;
10857 }
10858
10859 launch_data_t ldlabel = NULL;
10860 if (!(ldlabel = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL))) {
10861 continue;
10862 }
10863 const char *label = launch_data_get_string(ldlabel);
10864
10865 ji = job_find(root_jobmgr, label);
10866 if (!ji) {
10867 continue;
10868 }
10869
10870 launch_data_dict_iterate(ldi, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, ji);
10871
10872 launch_data_t frontmost = NULL;
10873 if ((frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) && launch_data_get_type(frontmost) == LAUNCH_DATA_BOOL) {
10874 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
10875 }
10876
10877 launch_data_t active = NULL;
10878 if ((active = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMACTIVE)) && launch_data_get_type(active) == LAUNCH_DATA_BOOL) {
10879 ji->jetsam_active = launch_data_get_bool(active);
10880 }
10881
10882 launchd_update_jetsam_list(ji);
10883
10884 result = result != 0 ? errno : 0;
10885 }
10886
10887 return result;
10888 }
10889
10890 int
10891 launchd_update_jetsam_list(job_t j)
10892 {
10893 memorystatus_priority_entry_t mpe;
10894 kern_return_t result;
10895
10896 mpe.pid = j->p;
10897 mpe.priority = j->jetsam_priority;
10898 mpe.flags = 0;
10899 mpe.flags |= j->jetsam_frontmost ? kMemorystatusFlagsFrontmost : 0;
10900 mpe.flags |= j->jetsam_active ? kMemorystatusFlagsActive : 0;
10901
10902 // ToDo - cache MIB if we keep this interface
10903 (void)posix_assumes_zero(result = sysctlbyname("kern.memorystatus_jetsam_change", NULL, NULL, &mpe, sizeof(memorystatus_priority_entry_t)));
10904 return result;
10905 }
10906 #endif