]> git.saurik.com Git - apple/launchd.git/blob - src/core.c
beb8e2106a5a353399efd2d6c46a5b2e0bb827c1
[apple/launchd.git] / src / core.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 #include "config.h"
20 #include "core.h"
21 #include "internal.h"
22 #include "helper.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/stat.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
43 #include <sys/un.h>
44 #include <sys/reboot.h>
45 #include <sys/wait.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
52 #include <sys/pipe.h>
53 #include <sys/mman.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <sys/kern_memorystatus.h>
57 #include <net/if.h>
58 #include <netinet/in.h>
59 #include <netinet/in_var.h>
60 #include <netinet6/nd6.h>
61 #include <bsm/libbsm.h>
62 #include <unistd.h>
63 #include <signal.h>
64 #include <errno.h>
65 #include <libgen.h>
66 #include <stdio.h>
67 #include <stdlib.h>
68 #include <stdarg.h>
69 #include <stdbool.h>
70 #include <paths.h>
71 #include <pwd.h>
72 #include <grp.h>
73 #include <ttyent.h>
74 #include <dlfcn.h>
75 #include <dirent.h>
76 #include <string.h>
77 #include <ctype.h>
78 #include <glob.h>
79 #include <System/sys/spawn.h>
80 #include <System/sys/spawn_internal.h>
81 #include <spawn.h>
82 #include <spawn_private.h>
83 #include <time.h>
84 #include <libinfo.h>
85 #include <assumes.h>
86 #include <xpc/launchd.h>
87
88 #include <libproc.h>
89 #include <System/sys/proc_info.h>
90 #include <malloc/malloc.h>
91 #include <pthread.h>
92 #include <libproc.h>
93 #if HAVE_SANDBOX
94 #define __APPLE_API_PRIVATE
95 #include <sandbox.h>
96 #endif
97 #if HAVE_QUARANTINE
98 #include <quarantine.h>
99 #endif
100 #if !TARGET_OS_EMBEDDED
101 extern int gL1CacheEnabled;
102 #endif
103
104 #include "launch.h"
105 #include "launch_priv.h"
106 #include "launch_internal.h"
107 #include "bootstrap.h"
108 #include "bootstrap_priv.h"
109 #include "vproc.h"
110 #include "vproc_internal.h"
111
112 #include "reboot2.h"
113
114 #include "launchd.h"
115 #include "runtime.h"
116 #include "ipc.h"
117 #include "job.h"
118 #include "jobServer.h"
119 #include "job_reply.h"
120 #include "job_forward.h"
121 #include "mach_excServer.h"
122
123 #define POSIX_SPAWN_IOS_INTERACTIVE 0
124
125 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
126 * If the job hasn't exited in the given number of seconds after sending
127 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
128 */
129 #define LAUNCHD_MIN_JOB_RUN_TIME 10
130 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
131 #define LAUNCHD_SIGKILL_TIMER 4
132 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
133
134 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
135
136 #define TAKE_SUBSET_NAME "TakeSubsetName"
137 #define TAKE_SUBSET_PID "TakeSubsetPID"
138 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
139
140 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
141
142 #ifndef NOTE_EXIT_REPARENTED
143 #define NOTE_EXIT_REPARENTED 0x00080000
144 #endif
145
146 extern char **environ;
147
148 struct waiting_for_removal {
149 SLIST_ENTRY(waiting_for_removal) sle;
150 mach_port_t reply_port;
151 };
152
153 static bool waiting4removal_new(job_t j, mach_port_t rp);
154 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
155
156 struct machservice {
157 SLIST_ENTRY(machservice) sle;
158 SLIST_ENTRY(machservice) special_port_sle;
159 LIST_ENTRY(machservice) name_hash_sle;
160 LIST_ENTRY(machservice) port_hash_sle;
161 struct machservice *alias;
162 job_t job;
163 unsigned int gen_num;
164 mach_port_name_t port;
165 unsigned int
166 isActive:1,
167 reset:1,
168 recv:1,
169 hide:1,
170 kUNCServer:1,
171 per_user_hack:1,
172 debug_on_close:1,
173 per_pid:1,
174 delete_on_destruction:1,
175 drain_one_on_crash:1,
176 drain_all_on_crash:1,
177 upfront:1,
178 event_channel:1,
179 /* Don't let the size of this field to get too small. It has to be large
180 * enough to represent the reasonable range of special port numbers.
181 */
182 special_port_num:18;
183 const char name[0];
184 };
185
186 // HACK: This should be per jobmgr_t
187 static SLIST_HEAD(, machservice) special_ports;
188
189 #define PORT_HASH_SIZE 32
190 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
191
192 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
193
194 static void machservice_setup(launch_data_t obj, const char *key, void *context);
195 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
196 static void machservice_resetport(job_t j, struct machservice *ms);
197 static void machservice_stamp_port(job_t j, struct machservice *ms);
198 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
199 static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
200 static void machservice_ignore(job_t j, struct machservice *ms);
201 static void machservice_watch(job_t j, struct machservice *ms);
202 static void machservice_delete(job_t j, struct machservice *, bool port_died);
203 static void machservice_request_notifications(struct machservice *);
204 static mach_port_t machservice_port(struct machservice *);
205 static job_t machservice_job(struct machservice *);
206 static bool machservice_hidden(struct machservice *);
207 static bool machservice_active(struct machservice *);
208 static const char *machservice_name(struct machservice *);
209 static bootstrap_status_t machservice_status(struct machservice *);
210 void machservice_drain_port(struct machservice *);
211
212 struct socketgroup {
213 SLIST_ENTRY(socketgroup) sle;
214 int *fds;
215 unsigned int fd_cnt;
216 union {
217 const char name[0];
218 char name_init[0];
219 };
220 };
221
222 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt);
223 static void socketgroup_delete(job_t j, struct socketgroup *sg);
224 static void socketgroup_watch(job_t j, struct socketgroup *sg);
225 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
226 static void socketgroup_callback(job_t j);
227 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
228 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
229
230 struct calendarinterval {
231 LIST_ENTRY(calendarinterval) global_sle;
232 SLIST_ENTRY(calendarinterval) sle;
233 job_t job;
234 struct tm when;
235 time_t when_next;
236 };
237
238 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
239
240 static bool calendarinterval_new(job_t j, struct tm *w);
241 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
242 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
243 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
244 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
245 static void calendarinterval_callback(void);
246 static void calendarinterval_sanity_check(void);
247
248 struct envitem {
249 SLIST_ENTRY(envitem) sle;
250 char *value;
251 union {
252 const char key[0];
253 char key_init[0];
254 };
255 };
256
257 static bool envitem_new(job_t j, const char *k, const char *v, bool global);
258 static void envitem_delete(job_t j, struct envitem *ei, bool global);
259 static void envitem_setup(launch_data_t obj, const char *key, void *context);
260
261 struct limititem {
262 SLIST_ENTRY(limititem) sle;
263 struct rlimit lim;
264 unsigned int setsoft:1, sethard:1, which:30;
265 };
266
267 static bool limititem_update(job_t j, int w, rlim_t r);
268 static void limititem_delete(job_t j, struct limititem *li);
269 static void limititem_setup(launch_data_t obj, const char *key, void *context);
270 #if HAVE_SANDBOX
271 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
272 #endif
273
274 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
275
276 typedef enum {
277 NETWORK_UP = 1,
278 NETWORK_DOWN,
279 SUCCESSFUL_EXIT,
280 FAILED_EXIT,
281 CRASHED,
282 DID_NOT_CRASH,
283 OTHER_JOB_ENABLED,
284 OTHER_JOB_DISABLED,
285 OTHER_JOB_ACTIVE,
286 OTHER_JOB_INACTIVE,
287 } semaphore_reason_t;
288
289 struct semaphoreitem {
290 SLIST_ENTRY(semaphoreitem) sle;
291 semaphore_reason_t why;
292
293 union {
294 const char what[0];
295 char what_init[0];
296 };
297 };
298
299 struct semaphoreitem_dict_iter_context {
300 job_t j;
301 semaphore_reason_t why_true;
302 semaphore_reason_t why_false;
303 };
304
305 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
306 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
307 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
308 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
309 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
310
311 struct externalevent {
312 LIST_ENTRY(externalevent) sys_le;
313 LIST_ENTRY(externalevent) job_le;
314 struct eventsystem *sys;
315
316 uint64_t id;
317 job_t job;
318 bool state;
319 bool wanted_state;
320 bool internal;
321 xpc_object_t event;
322
323 char name[0];
324 };
325
326 struct externalevent_iter_ctx {
327 job_t j;
328 struct eventsystem *sys;
329 };
330
331 static bool externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event);
332 static void externalevent_delete(struct externalevent *ee);
333 static void externalevent_setup(launch_data_t obj, const char *key, void *context);
334 static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
335
336 struct eventsystem {
337 LIST_ENTRY(eventsystem) global_le;
338 LIST_HEAD(, externalevent) events;
339 uint64_t curid;
340 char name[0];
341 };
342
343 static struct eventsystem *eventsystem_new(const char *name);
344 static void eventsystem_delete(struct eventsystem *sys) __attribute__((unused));
345 static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
346 static struct eventsystem *eventsystem_find(const char *name);
347 static void eventsystem_ping(void);
348
349 #define ACTIVE_JOB_HASH_SIZE 32
350 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
351
352 #define MACHSERVICE_HASH_SIZE 37
353
354 #define LABEL_HASH_SIZE 53
355 struct jobmgr_s {
356 kq_callback kqjobmgr_callback;
357 LIST_ENTRY(jobmgr_s) xpc_le;
358 SLIST_ENTRY(jobmgr_s) sle;
359 SLIST_HEAD(, jobmgr_s) submgrs;
360 LIST_HEAD(, job_s) jobs;
361
362 /* For legacy reasons, we keep all job labels that are imported in the root
363 * job manager's label hash. If a job manager is an XPC domain, then it gets
364 * its own label hash that is separate from the "global" one stored in the
365 * root job manager.
366 */
367 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
368 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
369 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
370 LIST_HEAD(, job_s) global_env_jobs;
371 mach_port_t jm_port;
372 mach_port_t req_port;
373 jobmgr_t parentmgr;
374 int reboot_flags;
375 time_t shutdown_time;
376 unsigned int global_on_demand_cnt;
377 unsigned int normal_active_cnt;
378 unsigned int
379 shutting_down:1,
380 session_initialized:1,
381 killed_stray_jobs:1,
382 monitor_shutdown:1,
383 shutdown_jobs_dirtied:1,
384 shutdown_jobs_cleaned:1,
385 xpc_singleton:1;
386 uint32_t properties;
387 // XPC-specific properties.
388 char owner[MAXCOMLEN];
389 char *shortdesc;
390 mach_port_t req_bsport;
391 mach_port_t req_excport;
392 mach_port_t req_asport;
393 pid_t req_pid;
394 uid_t req_euid;
395 gid_t req_egid;
396 au_asid_t req_asid;
397 vm_offset_t req_ctx;
398 mach_msg_type_number_t req_ctx_sz;
399 mach_port_t req_rport;
400 kern_return_t error;
401 union {
402 const char name[0];
403 char name_init[0];
404 };
405 };
406
407 // Global XPC domains.
408 static jobmgr_t _s_xpc_system_domain;
409 static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
410 static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
411
412 #define jobmgr_assumes(jm, e) osx_assumes_ctx(jobmgr_log_bug, jm, (e))
413 #define jobmgr_assumes_zero(jm, e) osx_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
414 #define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
415
416 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
417 static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
418 static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
419 static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
420 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
421 static jobmgr_t jobmgr_parent(jobmgr_t jm);
422 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
423 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
424 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
425 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
426 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
427 static void jobmgr_remove(jobmgr_t jm);
428 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
429 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
430 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
431 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
432 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
433 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
434 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
435 static void job_export_all2(jobmgr_t jm, launch_data_t where);
436 static void jobmgr_callback(void *obj, struct kevent *kev);
437 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
438 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
439 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
440 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
441 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
442 static void jobmgr_log_perf_statistics(jobmgr_t jm);
443 // static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
444 static bool jobmgr_log_bug(aslmsg asl_message, void *ctx, const char *message);
445
446 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
447 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
448 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
449
450 struct suspended_peruser {
451 LIST_ENTRY(suspended_peruser) sle;
452 job_t j;
453 };
454
455 struct job_s {
456 // MUST be first element of this structure.
457 kq_callback kqjob_callback;
458 LIST_ENTRY(job_s) sle;
459 LIST_ENTRY(job_s) subjob_sle;
460 LIST_ENTRY(job_s) needing_session_sle;
461 LIST_ENTRY(job_s) jetsam_sle;
462 LIST_ENTRY(job_s) pid_hash_sle;
463 LIST_ENTRY(job_s) label_hash_sle;
464 LIST_ENTRY(job_s) global_env_sle;
465 SLIST_ENTRY(job_s) curious_jobs_sle;
466 LIST_HEAD(, suspended_peruser) suspended_perusers;
467 LIST_HEAD(, waiting_for_exit) exit_watchers;
468 LIST_HEAD(, job_s) subjobs;
469 LIST_HEAD(, externalevent) events;
470 SLIST_HEAD(, socketgroup) sockets;
471 SLIST_HEAD(, calendarinterval) cal_intervals;
472 SLIST_HEAD(, envitem) global_env;
473 SLIST_HEAD(, envitem) env;
474 SLIST_HEAD(, limititem) limits;
475 SLIST_HEAD(, machservice) machservices;
476 SLIST_HEAD(, semaphoreitem) semaphores;
477 SLIST_HEAD(, waiting_for_removal) removal_watchers;
478 job_t alias;
479 struct rusage ru;
480 cpu_type_t *j_binpref;
481 size_t j_binpref_cnt;
482 mach_port_t j_port;
483 mach_port_t exit_status_dest;
484 mach_port_t exit_status_port;
485 mach_port_t spawn_reply_port;
486 uid_t mach_uid;
487 jobmgr_t mgr;
488 size_t argc;
489 char **argv;
490 char *prog;
491 char *rootdir;
492 char *workingdir;
493 char *username;
494 char *groupname;
495 char *stdinpath;
496 char *stdoutpath;
497 char *stderrpath;
498 char *alt_exc_handler;
499 unsigned int nruns;
500 uint64_t trt;
501 #if HAVE_SANDBOX
502 char *seatbelt_profile;
503 uint64_t seatbelt_flags;
504 #endif
505 #if HAVE_QUARANTINE
506 void *quarantine_data;
507 size_t quarantine_data_sz;
508 #endif
509 pid_t p;
510 int last_exit_status;
511 int stdin_fd;
512 int fork_fd;
513 int nice;
514 uint32_t pstype;
515 int32_t jetsam_priority;
516 int32_t jetsam_memlimit;
517 int32_t main_thread_priority;
518 uint32_t timeout;
519 uint32_t exit_timeout;
520 uint64_t sent_signal_time;
521 uint64_t start_time;
522 uint32_t min_run_time;
523 uint32_t start_interval;
524 uint32_t peruser_suspend_count;
525 uuid_t instance_id;
526 mode_t mask;
527 pid_t tracing_pid;
528 mach_port_t asport;
529 // Only set for per-user launchd's.
530 au_asid_t asid;
531 uuid_t expected_audit_uuid;
532 bool
533 // man launchd.plist --> Debug
534 debug:1,
535 // man launchd.plist --> KeepAlive == false
536 ondemand:1,
537 // man launchd.plist --> SessionCreate
538 session_create:1,
539 // man launchd.plist --> LowPriorityIO
540 low_pri_io:1,
541 // man launchd.plist --> InitGroups
542 no_init_groups:1,
543 /* A legacy mach_init concept to make bootstrap_create_server/service()
544 * work
545 */
546 priv_port_has_senders:1,
547 // A hack during job importing
548 importing_global_env:1,
549 // A hack during job importing
550 importing_hard_limits:1,
551 // man launchd.plist --> Umask
552 setmask:1,
553 // A process that launchd knows about but doesn't manage.
554 anonymous:1,
555 // A legacy mach_init concept to detect sick jobs
556 checkedin:1,
557 // A job created via bootstrap_create_server()
558 legacy_mach_job:1,
559 // A job created via spawn_via_launchd()
560 legacy_LS_job:1,
561 // A legacy job that wants inetd compatible semantics
562 inetcompat:1,
563 // A twist on inetd compatibility
564 inetcompat_wait:1,
565 /* An event fired and the job should start, but not necessarily right
566 * away.
567 */
568 start_pending:1,
569 // man launchd.plist --> EnableGlobbing
570 globargv:1,
571 // man launchd.plist --> WaitForDebugger
572 wait4debugger:1,
573 // One-shot WaitForDebugger.
574 wait4debugger_oneshot:1,
575 // MachExceptionHandler == true
576 internal_exc_handler:1,
577 // A hack to support an option of spawn_via_launchd()
578 stall_before_exec:1,
579 /* man launchd.plist --> LaunchOnlyOnce.
580 *
581 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
582 */
583 only_once:1,
584 /* Make job_ignore() / job_watch() work. If these calls were balanced,
585 * then this wouldn't be necessarily.
586 */
587 currently_ignored:1,
588 /* A job that forced all other jobs to be temporarily launch-on-
589 * demand
590 */
591 forced_peers_to_demand_mode:1,
592 // man launchd.plist --> Nice
593 setnice:1,
594 /* A job was asked to be unloaded/removed while running, we'll remove it
595 * after it exits.
596 */
597 removal_pending:1,
598 // job_kill() was called.
599 sent_sigkill:1,
600 // Enter the kernel debugger before killing a job.
601 debug_before_kill:1,
602 // A hack that launchd+launchctl use during jobmgr_t creation.
603 weird_bootstrap:1,
604 // man launchd.plist --> StartOnMount
605 start_on_mount:1,
606 // This job is a per-user launchd managed by the PID 1 launchd.
607 per_user:1,
608 // A job thoroughly confused launchd. We need to unload it ASAP.
609 unload_at_mig_return:1,
610 // man launchd.plist --> AbandonProcessGroup
611 abandon_pg:1,
612 /* During shutdown, do not send SIGTERM to stray processes in the
613 * process group of this job.
614 */
615 ignore_pg_at_shutdown:1,
616 /* Don't let this job create new 'job_t' objects in launchd. Has been
617 * seriously overloaded for the purposes of sandboxing.
618 */
619 deny_job_creation:1,
620 // man launchd.plist --> EnableTransactions
621 enable_transactions:1,
622 // The job was sent SIGKILL because it was clean.
623 clean_kill:1,
624 // The job has an OtherJobEnabled KeepAlive criterion.
625 nosy:1,
626 // The job exited due to a crash.
627 crashed:1,
628 // We've received NOTE_EXIT for the job and reaped it.
629 reaped:1,
630 // job_stop() was called.
631 stopped:1,
632 // The job is considered "frontmost" by Jetsam.
633 jetsam_frontmost:1,
634 /* The job is not frontmost, but it is considered "active" (i.e.
635 * backgrounded) by Jetsam.
636 */
637 jetsam_active:1,
638 /* The job is to be kept alive continuously, but it must first get an
639 * initial kick off.
640 */
641 needs_kickoff:1,
642 // The job is a bootstrapper.
643 is_bootstrapper:1,
644 // The job owns the console.
645 has_console:1,
646 /* The job runs as a non-root user on embedded but has select privileges
647 * of the root user. This is SpringBoard.
648 */
649 embedded_god:1,
650 // We got NOTE_EXEC for the job.
651 did_exec:1,
652 // The job is an XPC service, and XPC proxy successfully exec(3)ed.
653 xpcproxy_did_exec:1,
654 // The (anonymous) job called vprocmgr_switch_to_session().
655 holds_ref:1,
656 // The job has Jetsam limits in place.
657 jetsam_properties:1,
658 /* This job was created as the result of a look up of a service provided
659 * by a MultipleInstance job.
660 */
661 dedicated_instance:1,
662 // The job supports creating additional instances of itself.
663 multiple_instances:1,
664 /* The sub-job was already removed from the parent's list of
665 * sub-jobs.
666 */
667 former_subjob:1,
668 /* The job is responsible for monitoring external events for this
669 * launchd.
670 */
671 event_monitor:1,
672 // The event monitor job has retrieved the initial list of events.
673 event_monitor_ready2signal:1,
674 // A lame hack.
675 removing:1,
676 // Disable ASLR when launching this job.
677 disable_aslr:1,
678 // The job is an XPC Service.
679 xpc_service:1,
680 // The job is the Performance team's shutdown monitor.
681 shutdown_monitor:1,
682 // We should open a transaction for the job when shutdown begins.
683 dirty_at_shutdown:1,
684 /* The job was sent SIGKILL but did not exit in a timely fashion,
685 * indicating a kernel bug.
686 */
687 workaround9359725:1,
688 // The job is the XPC domain bootstrapper.
689 xpc_bootstrapper:1,
690 // The job is an app (on either iOS or OS X) and has different resource
691 // limitations.
692 app:1,
693 // The job failed to exec(3) for reasons that may be transient, so we're
694 // waiting for UserEventAgent to tell us when it's okay to try spawning
695 // again (i.e. when the executable path appears, when the UID appears,
696 // etc.).
697 waiting4ok:1,
698 // The job was implicitly reaped by the kernel.
699 implicit_reap:1;
700
701 const char label[0];
702 };
703
704 static size_t hash_label(const char *label) __attribute__((pure));
705 static size_t hash_ms(const char *msstr) __attribute__((pure));
706 static SLIST_HEAD(, job_s) s_curious_jobs;
707
708 #define job_assumes(j, e) osx_assumes_ctx(job_log_bug, j, (e))
709 #define job_assumes_zero(j, e) osx_assumes_zero_ctx(job_log_bug, j, (e))
710 #define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
711
712 static void job_import_keys(launch_data_t obj, const char *key, void *context);
713 static void job_import_bool(job_t j, const char *key, bool value);
714 static void job_import_string(job_t j, const char *key, const char *value);
715 static void job_import_integer(job_t j, const char *key, long long value);
716 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
717 static void job_import_array(job_t j, const char *key, launch_data_t value);
718 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
719 static bool job_set_global_on_demand(job_t j, bool val);
720 static const char *job_active(job_t j);
721 static void job_watch(job_t j);
722 static void job_ignore(job_t j);
723 static void job_reap(job_t j);
724 static bool job_useless(job_t j);
725 static bool job_keepalive(job_t j);
726 static void job_dispatch_curious_jobs(job_t j);
727 static void job_start(job_t j);
728 static void job_start_child(job_t j) __attribute__((noreturn));
729 static void job_setup_attributes(job_t j);
730 static bool job_setup_machport(job_t j);
731 static kern_return_t job_setup_exit_port(job_t j);
732 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
733 static void job_postfork_become_user(job_t j);
734 static void job_postfork_test_user(job_t j);
735 static void job_log_pids_with_weird_uids(job_t j);
736 static void job_setup_exception_port(job_t j, task_t target_task);
737 static void job_callback(void *obj, struct kevent *kev);
738 static void job_callback_proc(job_t j, struct kevent *kev);
739 static void job_callback_timer(job_t j, void *ident);
740 static void job_callback_read(job_t j, int ident);
741 static void job_log_stray_pg(job_t j);
742 static void job_log_children_without_exec(job_t j);
743 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
744 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
745 static job_t job_new_alias(jobmgr_t jm, job_t src);
746 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
747 static job_t job_new_subjob(job_t j, uuid_t identifier);
748 static void job_kill(job_t j);
749 static void job_uncork_fork(job_t j);
750 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
751 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
752 static bool job_log_bug(aslmsg asl_message, void *ctx, const char *message);
753 static void job_log_perf_statistics(job_t j);
754 static void job_set_exception_port(job_t j, mach_port_t port);
755 static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
756 static void job_open_shutdown_transaction(job_t ji);
757 static void job_close_shutdown_transaction(job_t ji);
758 static launch_data_t job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport);
759 static void job_setup_per_user_directory(job_t j, uid_t uid, const char *path);
760 static void job_setup_per_user_directories(job_t j, uid_t uid, const char *label);
761
762 static const struct {
763 const char *key;
764 int val;
765 } launchd_keys2limits[] = {
766 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
767 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
768 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
769 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
770 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
771 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
772 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
773 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
774 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
775 };
776
777 static time_t cronemu(int mon, int mday, int hour, int min);
778 static time_t cronemu_wday(int wday, int hour, int min);
779 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
780 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
781 static bool cronemu_hour(struct tm *wtm, int hour, int min);
782 static bool cronemu_min(struct tm *wtm, int min);
783
784 // miscellaneous file local functions
785 static size_t get_kern_max_proc(void);
786 static char **mach_cmd2argv(const char *string);
787 static size_t our_strhash(const char *s) __attribute__((pure));
788
789 void eliminate_double_reboot(void);
790
791 #pragma mark XPC Domain Forward Declarations
792 static job_t _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
793 static int _xpc_domain_import_services(job_t j, launch_data_t services);
794
795 #pragma mark XPC Event Forward Declarations
796 static int xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms);
797 static int xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply);
798 static int xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply);
799 static int xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply);
800 static int xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
801 static int xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply);
802 static int xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
803 static int xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply);
804
805 // file local globals
806 static job_t _launchd_embedded_god = NULL;
807 static size_t total_children;
808 static size_t total_anon_children;
809 static mach_port_t the_exception_server;
810 static job_t workaround_5477111;
811 static LIST_HEAD(, job_s) s_needing_sessions;
812 static LIST_HEAD(, eventsystem) _s_event_systems;
813 static struct eventsystem *_launchd_support_system;
814 static job_t _launchd_event_monitor;
815 static job_t _launchd_xpc_bootstrapper;
816 static job_t _launchd_shutdown_monitor;
817
818 mach_port_t launchd_audit_port = MACH_PORT_NULL;
819 #if !TARGET_OS_EMBEDDED
820 au_asid_t launchd_audit_session = AU_DEFAUDITSID;
821 #else
822 pid_t launchd_audit_session = 0;
823 #endif
824
825 static int s_no_hang_fd = -1;
826
827 // process wide globals
828 mach_port_t inherited_bootstrap_port;
829 jobmgr_t root_jobmgr;
830 bool launchd_shutdown_debugging = false;
831 bool launchd_verbose_boot = false;
832 bool launchd_embedded_handofgod = false;
833 bool launchd_runtime_busy_time = false;
834
835 void
836 job_ignore(job_t j)
837 {
838 struct socketgroup *sg;
839 struct machservice *ms;
840
841 if (j->currently_ignored) {
842 return;
843 }
844
845 job_log(j, LOG_DEBUG, "Ignoring...");
846
847 j->currently_ignored = true;
848
849 SLIST_FOREACH(sg, &j->sockets, sle) {
850 socketgroup_ignore(j, sg);
851 }
852
853 SLIST_FOREACH(ms, &j->machservices, sle) {
854 machservice_ignore(j, ms);
855 }
856 }
857
858 void
859 job_watch(job_t j)
860 {
861 struct socketgroup *sg;
862 struct machservice *ms;
863
864 if (!j->currently_ignored) {
865 return;
866 }
867
868 job_log(j, LOG_DEBUG, "Watching...");
869
870 j->currently_ignored = false;
871
872 SLIST_FOREACH(sg, &j->sockets, sle) {
873 socketgroup_watch(j, sg);
874 }
875
876 SLIST_FOREACH(ms, &j->machservices, sle) {
877 machservice_watch(j, ms);
878 }
879 }
880
881 void
882 job_stop(job_t j)
883 {
884 int sig;
885
886 if (unlikely(!j->p || j->stopped || j->anonymous)) {
887 return;
888 }
889
890 #if TARGET_OS_EMBEDDED
891 if (launchd_embedded_handofgod && _launchd_embedded_god) {
892 if (!_launchd_embedded_god->username || !j->username) {
893 errno = EPERM;
894 return;
895 }
896
897 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
898 errno = EPERM;
899 return;
900 }
901 } else if (launchd_embedded_handofgod) {
902 errno = EINVAL;
903 return;
904 }
905 #endif
906
907 j->sent_signal_time = runtime_get_opaque_time();
908
909 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Stopping job...");
910
911 int error = -1;
912 error = proc_terminate(j->p, &sig);
913 if (error) {
914 job_log(j, LOG_ERR | LOG_CONSOLE, "Could not terminate job: %d: %s", error, strerror(error));
915 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Using fallback option to terminate job...");
916 error = kill2(j->p, SIGTERM);
917 if (error) {
918 job_log(j, LOG_ERR, "Could not signal job: %d: %s", error, strerror(error));
919 } else {
920 sig = SIGTERM;
921 }
922 }
923
924 if (!error) {
925 switch (sig) {
926 case SIGKILL:
927 j->sent_sigkill = true;
928 j->clean_kill = true;
929
930 /* We cannot effectively simulate an exit for jobs during the course
931 * of a normal run. Even if we pretend that the job exited, we will
932 * still not have gotten the receive rights associated with the
933 * job's MachServices back, so we cannot safely respawn it.
934 */
935 if (j->mgr->shutting_down) {
936 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j);
937 (void)job_assumes_zero_p(j, error);
938 }
939
940 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sent job SIGKILL.");
941 break;
942 case SIGTERM:
943 if (j->exit_timeout) {
944 error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j);
945 (void)job_assumes_zero_p(j, error);
946 } else {
947 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
948 }
949 job_log(j, LOG_DEBUG, "Sent job SIGTERM.");
950 break;
951 default:
952 job_log(j, LOG_ERR | LOG_CONSOLE, "Job was sent unexpected signal: %d: %s", sig, strsignal(sig));
953 break;
954 }
955 }
956
957 j->stopped = true;
958 }
959
960 launch_data_t
961 job_export(job_t j)
962 {
963 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
964
965 if (r == NULL) {
966 return NULL;
967 }
968
969 if ((tmp = launch_data_new_string(j->label))) {
970 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
971 }
972 if ((tmp = launch_data_new_string(j->mgr->name))) {
973 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
974 }
975 if ((tmp = launch_data_new_bool(j->ondemand))) {
976 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
977 }
978 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
979 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
980 }
981 if (j->p && (tmp = launch_data_new_integer(j->p))) {
982 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
983 }
984 if ((tmp = launch_data_new_integer(j->timeout))) {
985 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
986 }
987 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
988 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
989 }
990 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
991 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
992 }
993 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
994 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
995 }
996 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
997 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
998 }
999 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1000 size_t i;
1001
1002 for (i = 0; i < j->argc; i++) {
1003 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
1004 launch_data_array_set_index(tmp, tmp2, i);
1005 }
1006 }
1007
1008 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
1009 }
1010
1011 if (j->enable_transactions && (tmp = launch_data_new_bool(true))) {
1012 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
1013 }
1014
1015 if (j->session_create && (tmp = launch_data_new_bool(true))) {
1016 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
1017 }
1018
1019 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1020 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
1021 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
1022 }
1023 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
1024 }
1025
1026 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1027 struct socketgroup *sg;
1028 unsigned int i;
1029
1030 SLIST_FOREACH(sg, &j->sockets, sle) {
1031 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1032 for (i = 0; i < sg->fd_cnt; i++) {
1033 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1034 launch_data_array_set_index(tmp2, tmp3, i);
1035 }
1036 }
1037 launch_data_dict_insert(tmp, tmp2, sg->name);
1038 }
1039 }
1040
1041 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1042 }
1043
1044 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1045 struct machservice *ms;
1046
1047 tmp3 = NULL;
1048
1049 SLIST_FOREACH(ms, &j->machservices, sle) {
1050 if (ms->per_pid) {
1051 if (tmp3 == NULL) {
1052 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1053 }
1054 if (tmp3) {
1055 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1056 launch_data_dict_insert(tmp3, tmp2, ms->name);
1057 }
1058 } else {
1059 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1060 launch_data_dict_insert(tmp, tmp2, ms->name);
1061 }
1062 }
1063
1064 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1065
1066 if (tmp3) {
1067 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1068 }
1069 }
1070
1071 return r;
1072 }
1073
1074 static void
1075 jobmgr_log_active_jobs(jobmgr_t jm)
1076 {
1077 const char *why_active;
1078 jobmgr_t jmi;
1079 job_t ji;
1080
1081 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1082 jobmgr_log_active_jobs(jmi);
1083 }
1084
1085 int level = LOG_DEBUG;
1086 if (pid1_magic) {
1087 level |= LOG_CONSOLE;
1088 }
1089
1090 LIST_FOREACH(ji, &jm->jobs, sle) {
1091 if ((why_active = job_active(ji))) {
1092 if (ji->p != 1) {
1093 job_log(ji, level, "%s", why_active);
1094
1095 uint32_t flags = 0;
1096 (void)proc_get_dirty(ji->p, &flags);
1097 if (!(flags & PROC_DIRTY_TRACKED)) {
1098 continue;
1099 }
1100
1101 char *dirty = "clean";
1102 if (flags & PROC_DIRTY_IS_DIRTY) {
1103 dirty = "dirty";
1104 }
1105
1106 char *idle_exit = "idle-exit unsupported";
1107 if (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) {
1108 idle_exit = "idle-exit supported";
1109 }
1110
1111 job_log(ji, level, "Killability: %s/%s", dirty, idle_exit);
1112 }
1113 }
1114 }
1115 }
1116
1117 static void
1118 jobmgr_still_alive_with_check(jobmgr_t jm)
1119 {
1120 int level = LOG_DEBUG;
1121 if (pid1_magic) {
1122 level |= LOG_CONSOLE;
1123 }
1124
1125 jobmgr_log(jm, level, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1126 jobmgr_log_active_jobs(jm);
1127 launchd_log_push();
1128 }
1129
1130 jobmgr_t
1131 jobmgr_shutdown(jobmgr_t jm)
1132 {
1133 jobmgr_t jmi, jmn;
1134 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1135
1136 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1137
1138 struct tm curtime;
1139 (void)localtime_r(&jm->shutdown_time, &curtime);
1140
1141 char date[26];
1142 (void)asctime_r(&curtime, date);
1143 // Trim the new line that asctime_r(3) puts there for some reason.
1144 date[24] = 0;
1145
1146 if (jm == root_jobmgr && pid1_magic) {
1147 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1148 } else {
1149 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1150 }
1151
1152 jm->shutting_down = true;
1153
1154 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1155 jobmgr_shutdown(jmi);
1156 }
1157
1158 if (!jm->parentmgr) {
1159 if (pid1_magic) {
1160 // Spawn the shutdown monitor.
1161 if (_launchd_shutdown_monitor && !_launchd_shutdown_monitor->p) {
1162 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1163 job_dispatch(_launchd_shutdown_monitor, true);
1164 }
1165 }
1166
1167 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1168 }
1169
1170 return jobmgr_do_garbage_collection(jm);
1171 }
1172
1173 void
1174 jobmgr_remove(jobmgr_t jm)
1175 {
1176 jobmgr_t jmi;
1177 job_t ji;
1178
1179 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1180 if (!SLIST_EMPTY(&jm->submgrs)) {
1181 size_t cnt = 0;
1182 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1183 jobmgr_remove(jmi);
1184 cnt++;
1185 }
1186
1187 (void)jobmgr_assumes_zero(jm, cnt);
1188 }
1189
1190 while ((ji = LIST_FIRST(&jm->jobs))) {
1191 if (!ji->anonymous && ji->p != 0) {
1192 job_log(ji, LOG_ERR, "Job is still active at job manager teardown.");
1193 ji->p = 0;
1194 }
1195 job_remove(ji);
1196 }
1197
1198 if (jm->req_port) {
1199 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_port));
1200 }
1201 if (jm->jm_port) {
1202 (void)jobmgr_assumes_zero(jm, launchd_mport_close_recv(jm->jm_port));
1203 }
1204
1205 if (jm->req_bsport) {
1206 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_bsport));
1207 }
1208 if (jm->req_excport) {
1209 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_excport));
1210 }
1211 if (MACH_PORT_VALID(jm->req_asport)) {
1212 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_asport));
1213 }
1214 if (jm->req_rport) {
1215 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1216 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1217 /* If the originator went away, the reply port will be a dead name,
1218 * and we expect this to fail.
1219 */
1220 (void)jobmgr_assumes_zero(jm, kr);
1221 }
1222 }
1223 if (jm->req_ctx) {
1224 (void)jobmgr_assumes_zero(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz));
1225 }
1226
1227 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1228 struct tm curtime;
1229 (void)localtime_r(&ts, &curtime);
1230
1231 char date[26];
1232 (void)asctime_r(&curtime, date);
1233 date[24] = 0;
1234
1235 time_t delta = ts - jm->shutdown_time;
1236 if (jm == root_jobmgr && pid1_magic) {
1237 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1238 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1239 } else {
1240 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1241 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1242 }
1243
1244 if (jm->parentmgr) {
1245 runtime_del_weak_ref();
1246 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1247 } else if (pid1_magic) {
1248 eliminate_double_reboot();
1249 launchd_log_vm_stats();
1250 jobmgr_log_stray_children(jm, true);
1251 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1252 launchd_closelog();
1253 (void)jobmgr_assumes_zero_p(jm, reboot(jm->reboot_flags));
1254 } else {
1255 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1256 launchd_closelog();
1257 exit(EXIT_SUCCESS);
1258 }
1259
1260 free(jm);
1261 }
1262
1263 void
1264 job_remove(job_t j)
1265 {
1266 struct waiting_for_removal *w4r;
1267 struct calendarinterval *ci;
1268 struct semaphoreitem *si;
1269 struct socketgroup *sg;
1270 struct machservice *ms;
1271 struct limititem *li;
1272 struct envitem *ei;
1273
1274 if (j->alias) {
1275 /* HACK: Egregious code duplication. But as with machservice_delete(),
1276 * job aliases can't (and shouldn't) have any complex behaviors
1277 * associated with them.
1278 */
1279 while ((ms = SLIST_FIRST(&j->machservices))) {
1280 machservice_delete(j, ms, false);
1281 }
1282
1283 LIST_REMOVE(j, sle);
1284 LIST_REMOVE(j, label_hash_sle);
1285 free(j);
1286 return;
1287 }
1288
1289 #if TARGET_OS_EMBEDDED
1290 if (launchd_embedded_handofgod && _launchd_embedded_god) {
1291 if (!(_launchd_embedded_god->username && j->username)) {
1292 errno = EPERM;
1293 return;
1294 }
1295
1296 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
1297 errno = EPERM;
1298 return;
1299 }
1300 } else if (launchd_embedded_handofgod) {
1301 errno = EINVAL;
1302 return;
1303 }
1304 #endif
1305
1306 /* Do this BEFORE we check and see whether the job is still active. If we're
1307 * a sub-job, we're being removed due to the parent job removing us.
1308 * Therefore, the parent job will free itself after this call completes. So
1309 * if we defer removing ourselves from the parent's list, we'll crash when
1310 * we finally get around to it.
1311 */
1312 if (j->dedicated_instance && !j->former_subjob) {
1313 LIST_REMOVE(j, subjob_sle);
1314 j->former_subjob = true;
1315 }
1316
1317 if (unlikely(j->p)) {
1318 if (j->anonymous) {
1319 job_reap(j);
1320 } else {
1321 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1322
1323 if (!j->removal_pending) {
1324 j->removal_pending = true;
1325 job_stop(j);
1326 }
1327
1328 return;
1329 }
1330 }
1331
1332 if (!j->removing) {
1333 j->removing = true;
1334 job_dispatch_curious_jobs(j);
1335 }
1336
1337 ipc_close_all_with_job(j);
1338
1339 if (j->forced_peers_to_demand_mode) {
1340 job_set_global_on_demand(j, false);
1341 }
1342
1343 if (job_assumes_zero(j, j->fork_fd)) {
1344 (void)posix_assumes_zero(runtime_close(j->fork_fd));
1345 }
1346
1347 if (j->stdin_fd) {
1348 (void)posix_assumes_zero(runtime_close(j->stdin_fd));
1349 }
1350
1351 if (j->j_port) {
1352 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1353 }
1354
1355 while ((sg = SLIST_FIRST(&j->sockets))) {
1356 socketgroup_delete(j, sg);
1357 }
1358 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1359 calendarinterval_delete(j, ci);
1360 }
1361 while ((ei = SLIST_FIRST(&j->env))) {
1362 envitem_delete(j, ei, false);
1363 }
1364 while ((ei = SLIST_FIRST(&j->global_env))) {
1365 envitem_delete(j, ei, true);
1366 }
1367 while ((li = SLIST_FIRST(&j->limits))) {
1368 limititem_delete(j, li);
1369 }
1370 while ((ms = SLIST_FIRST(&j->machservices))) {
1371 machservice_delete(j, ms, false);
1372 }
1373 while ((si = SLIST_FIRST(&j->semaphores))) {
1374 semaphoreitem_delete(j, si);
1375 }
1376 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1377 waiting4removal_delete(j, w4r);
1378 }
1379
1380 struct externalevent *eei = NULL;
1381 while ((eei = LIST_FIRST(&j->events))) {
1382 externalevent_delete(eei);
1383 }
1384
1385 if (j->event_monitor) {
1386 _launchd_event_monitor = NULL;
1387 }
1388 if (j->xpc_bootstrapper) {
1389 _launchd_xpc_bootstrapper = NULL;
1390 }
1391
1392 if (j->prog) {
1393 free(j->prog);
1394 }
1395 if (j->argv) {
1396 free(j->argv);
1397 }
1398 if (j->rootdir) {
1399 free(j->rootdir);
1400 }
1401 if (j->workingdir) {
1402 free(j->workingdir);
1403 }
1404 if (j->username) {
1405 free(j->username);
1406 }
1407 if (j->groupname) {
1408 free(j->groupname);
1409 }
1410 if (j->stdinpath) {
1411 free(j->stdinpath);
1412 }
1413 if (j->stdoutpath) {
1414 free(j->stdoutpath);
1415 }
1416 if (j->stderrpath) {
1417 free(j->stderrpath);
1418 }
1419 if (j->alt_exc_handler) {
1420 free(j->alt_exc_handler);
1421 }
1422 #if HAVE_SANDBOX
1423 if (j->seatbelt_profile) {
1424 free(j->seatbelt_profile);
1425 }
1426 #endif
1427 #if HAVE_QUARANTINE
1428 if (j->quarantine_data) {
1429 free(j->quarantine_data);
1430 }
1431 #endif
1432 if (j->j_binpref) {
1433 free(j->j_binpref);
1434 }
1435 if (j->start_interval) {
1436 runtime_del_weak_ref();
1437 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
1438 }
1439 if (j->exit_timeout) {
1440 /* If this fails, it just means the timer's already fired, so no need to
1441 * wrap it in an assumes() macro.
1442 */
1443 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1444 }
1445 if (j->asport != MACH_PORT_NULL) {
1446 (void)job_assumes_zero(j, launchd_mport_deallocate(j->asport));
1447 }
1448 if (!uuid_is_null(j->expected_audit_uuid)) {
1449 LIST_REMOVE(j, needing_session_sle);
1450 }
1451 if (j->embedded_god) {
1452 _launchd_embedded_god = NULL;
1453 }
1454 if (j->shutdown_monitor) {
1455 _launchd_shutdown_monitor = NULL;
1456 }
1457
1458 (void)kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1459
1460 LIST_REMOVE(j, sle);
1461 LIST_REMOVE(j, label_hash_sle);
1462
1463 job_t ji = NULL;
1464 job_t jit = NULL;
1465 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1466 job_remove(ji);
1467 }
1468
1469 job_log(j, LOG_DEBUG, "Removed");
1470
1471 j->kqjob_callback = (kq_callback)0x8badf00d;
1472 free(j);
1473 }
1474
1475 void
1476 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1477 {
1478 launch_data_t tmp_oai;
1479 job_t j = context;
1480 size_t i, fd_cnt = 1;
1481 int *fds;
1482
1483 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1484 fd_cnt = launch_data_array_get_count(obj);
1485 }
1486
1487 fds = alloca(fd_cnt * sizeof(int));
1488
1489 for (i = 0; i < fd_cnt; i++) {
1490 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1491 tmp_oai = launch_data_array_get_index(obj, i);
1492 } else {
1493 tmp_oai = obj;
1494 }
1495
1496 fds[i] = launch_data_get_fd(tmp_oai);
1497 }
1498
1499 socketgroup_new(j, key, fds, fd_cnt);
1500
1501 ipc_revoke_fds(obj);
1502 }
1503
1504 bool
1505 job_set_global_on_demand(job_t j, bool val)
1506 {
1507 if (j->forced_peers_to_demand_mode && val) {
1508 return false;
1509 } else if (!j->forced_peers_to_demand_mode && !val) {
1510 return false;
1511 }
1512
1513 if ((j->forced_peers_to_demand_mode = val)) {
1514 j->mgr->global_on_demand_cnt++;
1515 } else {
1516 j->mgr->global_on_demand_cnt--;
1517 }
1518
1519 if (j->mgr->global_on_demand_cnt == 0) {
1520 jobmgr_dispatch_all(j->mgr, false);
1521 }
1522
1523 return true;
1524 }
1525
1526 bool
1527 job_setup_machport(job_t j)
1528 {
1529 if (job_assumes_zero(j, launchd_mport_create_recv(&j->j_port)) != KERN_SUCCESS) {
1530 goto out_bad;
1531 }
1532
1533 if (job_assumes_zero(j, runtime_add_mport(j->j_port, job_server)) != KERN_SUCCESS) {
1534 goto out_bad2;
1535 }
1536
1537 if (job_assumes_zero(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS)) != KERN_SUCCESS) {
1538 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1539 goto out_bad;
1540 }
1541
1542 return true;
1543 out_bad2:
1544 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1545 out_bad:
1546 return false;
1547 }
1548
1549 kern_return_t
1550 job_setup_exit_port(job_t j)
1551 {
1552 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1553 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1554 return MACH_PORT_NULL;
1555 }
1556
1557 struct mach_port_limits limits = {
1558 .mpl_qlimit = 1,
1559 };
1560 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1561 (void)job_assumes_zero(j, kr);
1562
1563 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1564 if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1565 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
1566 j->exit_status_port = MACH_PORT_NULL;
1567 }
1568
1569 return kr;
1570 }
1571
1572 job_t
1573 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1574 {
1575 const char **argv = (const char **)mach_cmd2argv(cmd);
1576 job_t jr = NULL;
1577
1578 if (!argv) {
1579 goto out_bad;
1580 }
1581
1582 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1583 free(argv);
1584
1585 // Job creation can be denied during shutdown.
1586 if (unlikely(jr == NULL)) {
1587 goto out_bad;
1588 }
1589
1590 jr->mach_uid = uid;
1591 jr->ondemand = ond;
1592 jr->legacy_mach_job = true;
1593 jr->abandon_pg = true;
1594 jr->priv_port_has_senders = true; // the IPC that called us will make-send on this port
1595
1596 if (!job_setup_machport(jr)) {
1597 goto out_bad;
1598 }
1599
1600 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1601
1602 return jr;
1603
1604 out_bad:
1605 if (jr) {
1606 job_remove(jr);
1607 }
1608 return NULL;
1609 }
1610
1611 job_t
1612 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1613 {
1614 struct proc_bsdshortinfo proc;
1615 bool shutdown_state;
1616 job_t jp = NULL, jr = NULL;
1617 uid_t kp_euid, kp_uid, kp_svuid;
1618 gid_t kp_egid, kp_gid, kp_svgid;
1619
1620 if (anonpid == 0) {
1621 errno = EINVAL;
1622 return NULL;
1623 }
1624
1625 if (anonpid >= 100000) {
1626 /* The kernel current defines PID_MAX to be 99999, but that define isn't
1627 * exported.
1628 */
1629 launchd_syslog(LOG_WARNING, "Did PID_MAX change? Got request from PID: %d", anonpid);
1630 errno = EINVAL;
1631 return NULL;
1632 }
1633
1634 /* libproc returns the number of bytes written into the buffer upon success,
1635 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
1636 */
1637 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1638 if (errno != ESRCH) {
1639 (void)jobmgr_assumes_zero(jm, errno);
1640 }
1641 return NULL;
1642 }
1643
1644 if (proc.pbsi_comm[0] == '\0') {
1645 launchd_syslog(LOG_WARNING, "Blank command for PID: %d", anonpid);
1646 errno = EINVAL;
1647 return NULL;
1648 }
1649
1650 if (unlikely(proc.pbsi_status == SZOMB)) {
1651 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1652 }
1653
1654 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1655 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1656 }
1657
1658 kp_euid = proc.pbsi_uid;
1659 kp_uid = proc.pbsi_ruid;
1660 kp_svuid = proc.pbsi_svuid;
1661 kp_egid = proc.pbsi_gid;
1662 kp_gid = proc.pbsi_rgid;
1663 kp_svgid = proc.pbsi_svgid;
1664
1665 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1666 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1667 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1668 }
1669
1670 /* "Fix" for when the kernel turns the process tree into a weird, cyclic
1671 * graph.
1672 *
1673 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
1674 * as to why this can happen.
1675 */
1676 if ((pid_t)proc.pbsi_ppid == anonpid) {
1677 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). Ignoring: %s", proc.pbsi_comm);
1678 errno = EINVAL;
1679 return NULL;
1680 }
1681
1682 /* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1683 * jobs can pop up during shutdown and need to talk to us.
1684 */
1685 if (unlikely(shutdown_state = jm->shutting_down)) {
1686 jm->shutting_down = false;
1687 }
1688
1689 // We only set requestor_pid for XPC domains.
1690 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1691 if ((jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL))) {
1692 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1693
1694 total_anon_children++;
1695 jr->anonymous = true;
1696 jr->p = anonpid;
1697
1698 // Anonymous process reaping is messy.
1699 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1700
1701 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1)) {
1702 if (errno != ESRCH) {
1703 (void)job_assumes_zero(jr, errno);
1704 }
1705
1706 // Zombies interact weirdly with kevent(3).
1707 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1708 jr->unload_at_mig_return = true;
1709 }
1710
1711 if (unlikely(shutdown_state)) {
1712 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1713 }
1714
1715 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1716 } else {
1717 (void)osx_assumes_zero(errno);
1718 }
1719
1720 // Undo our hack from above.
1721 if (unlikely(shutdown_state)) {
1722 jm->shutting_down = true;
1723 }
1724
1725 /* This is down here to prevent infinite recursion due to a process
1726 * attaching to its parent through ptrace(3) -- causing a cycle in the
1727 * process tree and thereby not making it a tree anymore. We need to make
1728 * sure that the anonymous job has been added to the process list so that
1729 * we'll find the tracing parent PID of the parent process, which is the
1730 * child, when we go looking for it in jobmgr_find_by_pid().
1731 *
1732 * <rdar://problem/7264615>
1733 */
1734 switch (proc.pbsi_ppid) {
1735 case 0:
1736 // The kernel.
1737 break;
1738 case 1:
1739 if (!pid1_magic) {
1740 break;
1741 }
1742 // Fall through.
1743 default:
1744 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1745 if (jobmgr_assumes(jm, jp != NULL)) {
1746 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1747 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1748 }
1749 }
1750 break;
1751 }
1752
1753 return jr;
1754 }
1755
1756 job_t
1757 job_new_subjob(job_t j, uuid_t identifier)
1758 {
1759 char label[0];
1760 uuid_string_t idstr;
1761 uuid_unparse(identifier, idstr);
1762 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1763
1764 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1765 if (nj != NULL) {
1766 nj->kqjob_callback = job_callback;
1767 nj->mgr = j->mgr;
1768 nj->min_run_time = j->min_run_time;
1769 nj->timeout = j->timeout;
1770 nj->exit_timeout = j->exit_timeout;
1771
1772 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1773
1774 // Set all our simple Booleans that are applicable.
1775 nj->debug = j->debug;
1776 nj->ondemand = j->ondemand;
1777 nj->checkedin = true;
1778 nj->low_pri_io = j->low_pri_io;
1779 nj->setmask = j->setmask;
1780 nj->wait4debugger = j->wait4debugger;
1781 nj->internal_exc_handler = j->internal_exc_handler;
1782 nj->setnice = j->setnice;
1783 nj->abandon_pg = j->abandon_pg;
1784 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1785 nj->deny_job_creation = j->deny_job_creation;
1786 nj->enable_transactions = j->enable_transactions;
1787 nj->needs_kickoff = j->needs_kickoff;
1788 nj->currently_ignored = true;
1789 nj->dedicated_instance = true;
1790 nj->xpc_service = j->xpc_service;
1791 nj->xpc_bootstrapper = j->xpc_bootstrapper;
1792
1793 nj->mask = j->mask;
1794 uuid_copy(nj->instance_id, identifier);
1795
1796 // These jobs are purely on-demand Mach jobs.
1797 // {Hard | Soft}ResourceLimits are not supported.
1798 // JetsamPriority is not supported.
1799
1800 if (j->prog) {
1801 nj->prog = strdup(j->prog);
1802 }
1803 if (j->argv) {
1804 size_t sz = malloc_size(j->argv);
1805 nj->argv = (char **)malloc(sz);
1806 if (nj->argv != NULL) {
1807 // This is the start of our strings.
1808 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1809
1810 size_t i = 0;
1811 for (i = 0; i < j->argc; i++) {
1812 (void)strcpy(p, j->argv[i]);
1813 nj->argv[i] = p;
1814 p += (strlen(j->argv[i]) + 1);
1815 }
1816 nj->argv[i] = NULL;
1817 } else {
1818 (void)job_assumes_zero(nj, errno);
1819 }
1820
1821 nj->argc = j->argc;
1822 }
1823
1824 struct machservice *msi = NULL;
1825 SLIST_FOREACH(msi, &j->machservices, sle) {
1826 /* Only copy MachServices that were actually declared in the plist.
1827 * So skip over per-PID ones and ones that were created via
1828 * bootstrap_register().
1829 */
1830 if (msi->upfront) {
1831 mach_port_t mp = MACH_PORT_NULL;
1832 struct machservice *msj = machservice_new(nj, msi->name, &mp, msi->per_pid);
1833 if (msj != NULL) {
1834 msj->reset = msi->reset;
1835 msj->delete_on_destruction = msi->delete_on_destruction;
1836 msj->drain_one_on_crash = msi->drain_one_on_crash;
1837 msj->drain_all_on_crash = msi->drain_all_on_crash;
1838 } else {
1839 (void)job_assumes_zero(nj, errno);
1840 }
1841 }
1842 }
1843
1844 // We ignore global environment variables.
1845 struct envitem *ei = NULL;
1846 SLIST_FOREACH(ei, &j->env, sle) {
1847 if (envitem_new(nj, ei->key, ei->value, false)) {
1848 (void)job_assumes_zero(nj, errno);
1849 }
1850 }
1851 uuid_string_t val;
1852 uuid_unparse(identifier, val);
1853 if (envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false)) {
1854 (void)job_assumes_zero(nj, errno);
1855 }
1856
1857 if (j->rootdir) {
1858 nj->rootdir = strdup(j->rootdir);
1859 }
1860 if (j->workingdir) {
1861 nj->workingdir = strdup(j->workingdir);
1862 }
1863 if (j->username) {
1864 nj->username = strdup(j->username);
1865 }
1866 if (j->groupname) {
1867 nj->groupname = strdup(j->groupname);
1868 }
1869
1870 /* FIXME: We shouldn't redirect all the output from these jobs to the
1871 * same file. We should uniquify the file names. But this hasn't shown
1872 * to be a problem in practice.
1873 */
1874 if (j->stdinpath) {
1875 nj->stdinpath = strdup(j->stdinpath);
1876 }
1877 if (j->stdoutpath) {
1878 nj->stdoutpath = strdup(j->stdinpath);
1879 }
1880 if (j->stderrpath) {
1881 nj->stderrpath = strdup(j->stderrpath);
1882 }
1883 if (j->alt_exc_handler) {
1884 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1885 }
1886 #if HAVE_SANDBOX
1887 if (j->seatbelt_profile) {
1888 nj->seatbelt_profile = strdup(j->seatbelt_profile);
1889 }
1890 #endif
1891
1892 #if HAVE_QUARANTINE
1893 if (j->quarantine_data) {
1894 nj->quarantine_data = strdup(j->quarantine_data);
1895 }
1896 nj->quarantine_data_sz = j->quarantine_data_sz;
1897 #endif
1898 if (j->j_binpref) {
1899 size_t sz = malloc_size(j->j_binpref);
1900 nj->j_binpref = (cpu_type_t *)malloc(sz);
1901 if (nj->j_binpref) {
1902 memcpy(&nj->j_binpref, &j->j_binpref, sz);
1903 } else {
1904 (void)job_assumes_zero(nj, errno);
1905 }
1906 }
1907
1908 if (j->asport != MACH_PORT_NULL) {
1909 (void)job_assumes_zero(nj, launchd_mport_copy_send(j->asport));
1910 nj->asport = j->asport;
1911 }
1912
1913 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
1914
1915 jobmgr_t where2put = root_jobmgr;
1916 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1917 where2put = j->mgr;
1918 }
1919 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
1920 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
1921 } else {
1922 (void)osx_assumes_zero(errno);
1923 }
1924
1925 return nj;
1926 }
1927
1928 job_t
1929 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1930 {
1931 const char *const *argv_tmp = argv;
1932 char tmp_path[PATH_MAX];
1933 char auto_label[1000];
1934 const char *bn = NULL;
1935 char *co;
1936 size_t minlabel_len;
1937 size_t i, cc = 0;
1938 job_t j;
1939
1940 __OSX_COMPILETIME_ASSERT__(offsetof(struct job_s, kqjob_callback) == 0);
1941
1942 if (unlikely(jm->shutting_down)) {
1943 errno = EINVAL;
1944 return NULL;
1945 }
1946
1947 if (unlikely(prog == NULL && argv == NULL)) {
1948 errno = EINVAL;
1949 return NULL;
1950 }
1951
1952 /* I'd really like to redo this someday. Anonymous jobs carry all the
1953 * baggage of managed jobs with them, even though most of it is unused.
1954 * Maybe when we have Objective-C objects in libSystem, there can be a base
1955 * job type that anonymous and managed jobs inherit from...
1956 */
1957 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
1958 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1959 if (prog) {
1960 bn = prog;
1961 } else {
1962 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1963 // prog for auto labels is kp.kp_kproc.p_comm.
1964 bn = basename(tmp_path);
1965 }
1966
1967 (void)snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
1968 label = auto_label;
1969 /* This is so we can do gross things later. See NOTE_EXEC for anonymous
1970 * jobs.
1971 */
1972 minlabel_len = strlen(label) + MAXCOMLEN;
1973 } else {
1974 if (label == AUTO_PICK_XPC_LABEL) {
1975 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
1976 } else {
1977 minlabel_len = strlen(label);
1978 }
1979 }
1980
1981 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1982
1983 if (!j) {
1984 (void)osx_assumes_zero(errno);
1985 return NULL;
1986 }
1987
1988 if (unlikely(label == auto_label)) {
1989 (void)snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
1990 } else {
1991 (void)strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
1992 }
1993
1994 j->kqjob_callback = job_callback;
1995 j->mgr = jm;
1996 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1997 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1998 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1999 j->currently_ignored = true;
2000 j->ondemand = true;
2001 j->checkedin = true;
2002 j->jetsam_priority = DEFAULT_JETSAM_PRIORITY;
2003 j->jetsam_memlimit = -1;
2004 uuid_clear(j->expected_audit_uuid);
2005 #if TARGET_OS_EMBEDDED
2006 /* Run embedded daemons as background by default. SpringBoard jobs are
2007 * Interactive by default. Unfortunately, so many daemons have opted into
2008 * this priority band that its usefulness is highly questionable.
2009 *
2010 * See <rdar://problem/9539873>.
2011 */
2012 if (launchd_embedded_handofgod) {
2013 j->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
2014 j->app = true;
2015 } else {
2016 j->pstype = POSIX_SPAWN_IOS_APPLE_DAEMON_START;
2017 }
2018 #endif
2019
2020 if (prog) {
2021 j->prog = strdup(prog);
2022 if (!j->prog) {
2023 (void)osx_assumes_zero(errno);
2024 goto out_bad;
2025 }
2026 }
2027
2028 if (likely(argv)) {
2029 while (*argv_tmp++) {
2030 j->argc++;
2031 }
2032
2033 for (i = 0; i < j->argc; i++) {
2034 cc += strlen(argv[i]) + 1;
2035 }
2036
2037 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
2038 if (!j->argv) {
2039 (void)job_assumes_zero(j, errno);
2040 goto out_bad;
2041 }
2042
2043 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
2044
2045 for (i = 0; i < j->argc; i++) {
2046 j->argv[i] = co;
2047 (void)strcpy(co, argv[i]);
2048 co += strlen(argv[i]) + 1;
2049 }
2050 j->argv[i] = NULL;
2051 }
2052
2053 // Sssshhh... don't tell anyone.
2054 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
2055 j->has_console = true;
2056 }
2057
2058 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2059
2060 jobmgr_t where2put_label = root_jobmgr;
2061 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2062 where2put_label = j->mgr;
2063 }
2064 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
2065 uuid_clear(j->expected_audit_uuid);
2066
2067 job_log(j, LOG_DEBUG, "Conceived");
2068
2069 return j;
2070
2071 out_bad:
2072 if (j->prog) {
2073 free(j->prog);
2074 }
2075 free(j);
2076
2077 return NULL;
2078 }
2079
2080 job_t
2081 job_new_alias(jobmgr_t jm, job_t src)
2082 {
2083 if (job_find(jm, src->label)) {
2084 errno = EEXIST;
2085 return NULL;
2086 }
2087
2088 job_t j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2089 if (!j) {
2090 (void)osx_assumes_zero(errno);
2091 return NULL;
2092 }
2093
2094 (void)strcpy((char *)j->label, src->label);
2095 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2096 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2097 /* Bad jump address. The kqueue callback for aliases should never be
2098 * invoked.
2099 */
2100 j->kqjob_callback = (kq_callback)0xfa1afe1;
2101 j->alias = src;
2102 j->mgr = jm;
2103
2104 struct machservice *msi = NULL;
2105 SLIST_FOREACH(msi, &src->machservices, sle) {
2106 if (!machservice_new_alias(j, msi)) {
2107 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2108 errno = EINVAL;
2109 job_remove(j);
2110 j = NULL;
2111 break;
2112 }
2113 }
2114
2115 if (j) {
2116 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2117 }
2118
2119 return j;
2120 }
2121
2122 job_t
2123 job_import(launch_data_t pload)
2124 {
2125 job_t j = jobmgr_import2(root_jobmgr, pload);
2126
2127 if (unlikely(j == NULL)) {
2128 return NULL;
2129 }
2130
2131 /* Since jobs are effectively stalled until they get security sessions
2132 * assigned to them, we may wish to reconsider this behavior of calling the
2133 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2134 * criterion set.
2135 */
2136 job_dispatch_curious_jobs(j);
2137 return job_dispatch(j, false);
2138 }
2139
2140 launch_data_t
2141 job_import_bulk(launch_data_t pload)
2142 {
2143 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2144 job_t *ja;
2145 size_t i, c = launch_data_array_get_count(pload);
2146
2147 ja = alloca(c * sizeof(job_t));
2148
2149 for (i = 0; i < c; i++) {
2150 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2151 errno = 0;
2152 }
2153 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2154 }
2155
2156 for (i = 0; i < c; i++) {
2157 if (likely(ja[i])) {
2158 job_dispatch_curious_jobs(ja[i]);
2159 job_dispatch(ja[i], false);
2160 }
2161 }
2162
2163 return resp;
2164 }
2165
2166 void
2167 job_import_bool(job_t j, const char *key, bool value)
2168 {
2169 bool found_key = false;
2170
2171 switch (key[0]) {
2172 case 'a':
2173 case 'A':
2174 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2175 j->abandon_pg = value;
2176 found_key = true;
2177 }
2178 break;
2179 case 'b':
2180 case 'B':
2181 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2182 j->dirty_at_shutdown = value;
2183 found_key = true;
2184 }
2185 break;
2186 case 'k':
2187 case 'K':
2188 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2189 j->ondemand = !value;
2190 found_key = true;
2191 }
2192 break;
2193 case 'o':
2194 case 'O':
2195 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2196 j->ondemand = value;
2197 found_key = true;
2198 }
2199 break;
2200 case 'd':
2201 case 'D':
2202 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2203 j->debug = value;
2204 found_key = true;
2205 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2206 (void)job_assumes(j, !value);
2207 found_key = true;
2208 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2209 j->disable_aslr = value;
2210 found_key = true;
2211 }
2212 break;
2213 case 'h':
2214 case 'H':
2215 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2216 job_log(j, LOG_PERF, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2217 j->dirty_at_shutdown = value;
2218 found_key = true;
2219 }
2220 break;
2221 case 's':
2222 case 'S':
2223 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2224 j->session_create = value;
2225 found_key = true;
2226 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2227 j->start_on_mount = value;
2228 found_key = true;
2229 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2230 // this only does something on Mac OS X 10.4 "Tiger"
2231 found_key = true;
2232 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2233 if (_launchd_shutdown_monitor) {
2234 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2235 } else {
2236 j->shutdown_monitor = true;
2237 _launchd_shutdown_monitor = j;
2238 }
2239 found_key = true;
2240 }
2241 break;
2242 case 'l':
2243 case 'L':
2244 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2245 j->low_pri_io = value;
2246 found_key = true;
2247 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2248 j->only_once = value;
2249 found_key = true;
2250 }
2251 break;
2252 case 'm':
2253 case 'M':
2254 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2255 j->internal_exc_handler = value;
2256 found_key = true;
2257 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2258 j->multiple_instances = value;
2259 found_key = true;
2260 }
2261 break;
2262 case 'i':
2263 case 'I':
2264 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2265 if (getuid() != 0) {
2266 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2267 return;
2268 }
2269 j->no_init_groups = !value;
2270 found_key = true;
2271 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2272 j->ignore_pg_at_shutdown = value;
2273 found_key = true;
2274 }
2275 break;
2276 case 'r':
2277 case 'R':
2278 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2279 if (value) {
2280 // We don't want value == false to change j->start_pending
2281 j->start_pending = true;
2282 }
2283 found_key = true;
2284 }
2285 break;
2286 case 'e':
2287 case 'E':
2288 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2289 j->globargv = value;
2290 found_key = true;
2291 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2292 j->enable_transactions = value;
2293 found_key = true;
2294 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2295 j->debug_before_kill = value;
2296 found_key = true;
2297 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2298 #if TARGET_OS_EMBEDDED
2299 if (!_launchd_embedded_god) {
2300 if ((j->embedded_god = value)) {
2301 _launchd_embedded_god = j;
2302 }
2303 } else {
2304 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2305 }
2306 #else
2307 job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2308 #endif
2309 found_key = true;
2310 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2311 if (!_launchd_event_monitor) {
2312 j->event_monitor = value;
2313 if (value) {
2314 _launchd_event_monitor = j;
2315 }
2316 } else {
2317 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor->label);
2318 }
2319 found_key = true;
2320 }
2321 break;
2322 case 'w':
2323 case 'W':
2324 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2325 j->wait4debugger = value;
2326 found_key = true;
2327 }
2328 break;
2329 case 'x':
2330 case 'X':
2331 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2332 if (pid1_magic) {
2333 if (_launchd_xpc_bootstrapper) {
2334 job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper->label);
2335 } else {
2336 _launchd_xpc_bootstrapper = j;
2337 j->xpc_bootstrapper = value;
2338 }
2339 } else {
2340 job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2341 }
2342 }
2343 found_key = true;
2344 break;
2345 default:
2346 break;
2347 }
2348
2349 if (unlikely(!found_key)) {
2350 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2351 }
2352 }
2353
2354 void
2355 job_import_string(job_t j, const char *key, const char *value)
2356 {
2357 char **where2put = NULL;
2358
2359 switch (key[0]) {
2360 case 'm':
2361 case 'M':
2362 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2363 where2put = &j->alt_exc_handler;
2364 }
2365 break;
2366 case 'p':
2367 case 'P':
2368 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2369 return;
2370 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0) {
2371 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2372 #if !TARGET_OS_EMBEDDED
2373 j->pstype = POSIX_SPAWN_OSX_TALAPP_START;
2374 #endif
2375 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET) == 0) {
2376 #if !TARGET_OS_EMBEDDED
2377 j->pstype = POSIX_SPAWN_OSX_DBCLIENT_START;
2378 #endif
2379 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP) == 0) {
2380 #if TARGET_OS_EMBEDDED
2381 j->pstype = POSIX_SPAWN_IOS_APP_START;
2382 #endif
2383 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE) == 0) {
2384 #if TARGET_OS_EMBEDDED
2385 j->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
2386 #endif
2387 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND) == 0) {
2388 #if TARGET_OS_EMBEDDED
2389 j->pstype = POSIX_SPAWN_IOS_APPLE_DAEMON_START;
2390 #endif
2391 } else if (strcasecmp(value, "Adaptive") == 0) {
2392 // Hack.
2393 } else {
2394 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2395 }
2396 return;
2397 }
2398 break;
2399 case 'l':
2400 case 'L':
2401 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2402 return;
2403 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2404 return;
2405 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2406 return;
2407 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2408 return;
2409 }
2410 break;
2411 case 'r':
2412 case 'R':
2413 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2414 if (getuid() != 0) {
2415 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2416 return;
2417 }
2418 where2put = &j->rootdir;
2419 }
2420 break;
2421 case 'w':
2422 case 'W':
2423 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2424 where2put = &j->workingdir;
2425 }
2426 break;
2427 case 'u':
2428 case 'U':
2429 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2430 if (getuid() != 0) {
2431 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2432 return;
2433 } else if (strcmp(value, "root") == 0) {
2434 return;
2435 }
2436 where2put = &j->username;
2437 }
2438 break;
2439 case 'g':
2440 case 'G':
2441 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2442 if (getuid() != 0) {
2443 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2444 return;
2445 } else if (strcmp(value, "wheel") == 0) {
2446 return;
2447 }
2448 where2put = &j->groupname;
2449 }
2450 break;
2451 case 's':
2452 case 'S':
2453 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2454 where2put = &j->stdoutpath;
2455 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2456 where2put = &j->stderrpath;
2457 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2458 where2put = &j->stdinpath;
2459 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2460 if (job_assumes_zero_p(j, j->stdin_fd) != -1) {
2461 // open() should not block, but regular IO by the job should
2462 (void)job_assumes_zero_p(j, fcntl(j->stdin_fd, F_SETFL, 0));
2463 // XXX -- EV_CLEAR should make named pipes happy?
2464 (void)job_assumes_zero_p(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j));
2465 } else {
2466 j->stdin_fd = 0;
2467 }
2468 #if HAVE_SANDBOX
2469 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2470 where2put = &j->seatbelt_profile;
2471 #endif
2472 }
2473 break;
2474 case 'X':
2475 case 'x':
2476 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2477 return;
2478 }
2479 break;
2480 default:
2481 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2482 break;
2483 }
2484
2485 if (likely(where2put)) {
2486 if (!(*where2put = strdup(value))) {
2487 (void)job_assumes_zero(j, errno);
2488 }
2489 } else {
2490 // See rdar://problem/5496612. These two are okay.
2491 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2492 || strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2493 job_log(j, LOG_APPLEONLY, "This key is no longer relevant and should be removed: %s", key);
2494 } else {
2495 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2496 }
2497 }
2498 }
2499
2500 void
2501 job_import_integer(job_t j, const char *key, long long value)
2502 {
2503 switch (key[0]) {
2504 case 'e':
2505 case 'E':
2506 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2507 if (unlikely(value < 0)) {
2508 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2509 } else if (unlikely(value > UINT32_MAX)) {
2510 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2511 } else {
2512 j->exit_timeout = (typeof(j->exit_timeout)) value;
2513 }
2514 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2515 j->main_thread_priority = value;
2516 }
2517 break;
2518 case 'j':
2519 case 'J':
2520 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2521 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2522
2523 launch_data_t pri = launch_data_new_integer(value);
2524 if (job_assumes(j, pri != NULL)) {
2525 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2526 launch_data_free(pri);
2527 }
2528 }
2529 case 'n':
2530 case 'N':
2531 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2532 if (unlikely(value < PRIO_MIN)) {
2533 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2534 } else if (unlikely(value > PRIO_MAX)) {
2535 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2536 } else {
2537 j->nice = (typeof(j->nice)) value;
2538 j->setnice = true;
2539 }
2540 }
2541 break;
2542 case 't':
2543 case 'T':
2544 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2545 if (unlikely(value < 0)) {
2546 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2547 } else if (unlikely(value > UINT32_MAX)) {
2548 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2549 } else {
2550 j->timeout = (typeof(j->timeout)) value;
2551 }
2552 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2553 if (value < 0) {
2554 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2555 } else if (value > UINT32_MAX) {
2556 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2557 } else {
2558 j->min_run_time = (typeof(j->min_run_time)) value;
2559 }
2560 }
2561 break;
2562 case 'u':
2563 case 'U':
2564 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2565 j->mask = value;
2566 j->setmask = true;
2567 }
2568 break;
2569 case 's':
2570 case 'S':
2571 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2572 if (unlikely(value <= 0)) {
2573 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2574 } else if (unlikely(value > UINT32_MAX)) {
2575 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2576 } else {
2577 runtime_add_weak_ref();
2578 j->start_interval = (typeof(j->start_interval)) value;
2579
2580 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
2581 }
2582 #if HAVE_SANDBOX
2583 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2584 j->seatbelt_flags = value;
2585 #endif
2586 }
2587
2588 break;
2589 default:
2590 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2591 break;
2592 }
2593 }
2594
2595 void
2596 job_import_opaque(job_t j __attribute__((unused)), const char *key, launch_data_t value __attribute__((unused)))
2597 {
2598 switch (key[0]) {
2599 case 'q':
2600 case 'Q':
2601 #if HAVE_QUARANTINE
2602 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2603 size_t tmpsz = launch_data_get_opaque_size(value);
2604
2605 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2606 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2607 j->quarantine_data_sz = tmpsz;
2608 }
2609 }
2610 #endif
2611 case 's':
2612 case 'S':
2613 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2614 size_t tmpsz = launch_data_get_opaque_size(value);
2615 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2616 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2617 }
2618 }
2619 break;
2620 default:
2621 break;
2622 }
2623 }
2624
2625 static void
2626 policy_setup(launch_data_t obj, const char *key, void *context)
2627 {
2628 job_t j = context;
2629 bool found_key = false;
2630
2631 switch (key[0]) {
2632 case 'd':
2633 case 'D':
2634 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2635 j->deny_job_creation = launch_data_get_bool(obj);
2636 found_key = true;
2637 }
2638 break;
2639 default:
2640 break;
2641 }
2642
2643 if (unlikely(!found_key)) {
2644 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2645 }
2646 }
2647
2648 void
2649 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2650 {
2651 launch_data_t tmp;
2652
2653 switch (key[0]) {
2654 case 'p':
2655 case 'P':
2656 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2657 launch_data_dict_iterate(value, policy_setup, j);
2658 }
2659 break;
2660 case 'k':
2661 case 'K':
2662 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2663 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2664 }
2665 break;
2666 case 'i':
2667 case 'I':
2668 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2669 j->inetcompat = true;
2670 j->abandon_pg = true;
2671 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2672 j->inetcompat_wait = launch_data_get_bool(tmp);
2673 }
2674 }
2675 break;
2676 case 'j':
2677 case 'J':
2678 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2679 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2680 }
2681 case 'e':
2682 case 'E':
2683 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2684 launch_data_dict_iterate(value, envitem_setup, j);
2685 }
2686 break;
2687 case 'u':
2688 case 'U':
2689 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2690 j->importing_global_env = true;
2691 launch_data_dict_iterate(value, envitem_setup, j);
2692 j->importing_global_env = false;
2693 }
2694 break;
2695 case 's':
2696 case 'S':
2697 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2698 launch_data_dict_iterate(value, socketgroup_setup, j);
2699 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2700 calendarinterval_new_from_obj(j, value);
2701 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2702 launch_data_dict_iterate(value, limititem_setup, j);
2703 #if HAVE_SANDBOX
2704 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2705 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2706 #endif
2707 }
2708 break;
2709 case 'h':
2710 case 'H':
2711 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2712 j->importing_hard_limits = true;
2713 launch_data_dict_iterate(value, limititem_setup, j);
2714 j->importing_hard_limits = false;
2715 }
2716 break;
2717 case 'm':
2718 case 'M':
2719 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2720 launch_data_dict_iterate(value, machservice_setup, j);
2721 }
2722 break;
2723 case 'l':
2724 case 'L':
2725 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2726 launch_data_dict_iterate(value, eventsystem_setup, j);
2727 } else {
2728 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2729 return;
2730 }
2731 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2732 return;
2733 }
2734 }
2735 break;
2736 default:
2737 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2738 break;
2739 }
2740 }
2741
2742 void
2743 job_import_array(job_t j, const char *key, launch_data_t value)
2744 {
2745 size_t i, value_cnt = launch_data_array_get_count(value);
2746
2747 switch (key[0]) {
2748 case 'p':
2749 case 'P':
2750 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2751 return;
2752 }
2753 break;
2754 case 'l':
2755 case 'L':
2756 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2757 return;
2758 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2759 return;
2760 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2761 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2762 return;
2763 }
2764 break;
2765 case 'b':
2766 case 'B':
2767 if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2768 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2769 j->j_binpref_cnt = value_cnt;
2770 for (i = 0; i < value_cnt; i++) {
2771 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
2772 }
2773 }
2774 }
2775 break;
2776 case 's':
2777 case 'S':
2778 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2779 for (i = 0; i < value_cnt; i++) {
2780 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
2781 }
2782 }
2783 break;
2784 default:
2785 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
2786 break;
2787 }
2788 }
2789
2790 void
2791 job_import_keys(launch_data_t obj, const char *key, void *context)
2792 {
2793 job_t j = context;
2794 launch_data_type_t kind;
2795
2796 if (!obj) {
2797 launchd_syslog(LOG_ERR, "NULL object given to job_import_keys().");
2798 return;
2799 }
2800
2801 kind = launch_data_get_type(obj);
2802
2803 switch (kind) {
2804 case LAUNCH_DATA_BOOL:
2805 job_import_bool(j, key, launch_data_get_bool(obj));
2806 break;
2807 case LAUNCH_DATA_STRING:
2808 job_import_string(j, key, launch_data_get_string(obj));
2809 break;
2810 case LAUNCH_DATA_INTEGER:
2811 job_import_integer(j, key, launch_data_get_integer(obj));
2812 break;
2813 case LAUNCH_DATA_DICTIONARY:
2814 job_import_dictionary(j, key, obj);
2815 break;
2816 case LAUNCH_DATA_ARRAY:
2817 job_import_array(j, key, obj);
2818 break;
2819 case LAUNCH_DATA_OPAQUE:
2820 job_import_opaque(j, key, obj);
2821 break;
2822 default:
2823 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2824 break;
2825 }
2826 }
2827
2828 job_t
2829 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
2830 {
2831 launch_data_t tmp, ldpa;
2832 const char *label = NULL, *prog = NULL;
2833 const char **argv = NULL;
2834 job_t j;
2835
2836 if (!jobmgr_assumes(jm, pload != NULL)) {
2837 errno = EINVAL;
2838 return NULL;
2839 }
2840
2841 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
2842 errno = EINVAL;
2843 return NULL;
2844 }
2845
2846 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
2847 errno = EINVAL;
2848 return NULL;
2849 }
2850
2851 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
2852 errno = EINVAL;
2853 return NULL;
2854 }
2855
2856 if (unlikely(!(label = launch_data_get_string(tmp)))) {
2857 errno = EINVAL;
2858 return NULL;
2859 }
2860
2861 #if TARGET_OS_EMBEDDED
2862 if (unlikely(launchd_embedded_handofgod && _launchd_embedded_god)) {
2863 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
2864 errno = EPERM;
2865 return NULL;
2866 }
2867
2868 const char *username = NULL;
2869 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2870 username = launch_data_get_string(tmp);
2871 } else {
2872 errno = EPERM;
2873 return NULL;
2874 }
2875
2876 if (!jobmgr_assumes(jm, _launchd_embedded_god->username != NULL && username != NULL)) {
2877 errno = EPERM;
2878 return NULL;
2879 }
2880
2881 if (unlikely(strcmp(_launchd_embedded_god->username, username) != 0)) {
2882 errno = EPERM;
2883 return NULL;
2884 }
2885 } else if (launchd_embedded_handofgod) {
2886 errno = EINVAL;
2887 return NULL;
2888 }
2889 #endif
2890
2891 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM))
2892 && (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2893 prog = launch_data_get_string(tmp);
2894 }
2895
2896 int argc = 0;
2897 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2898 size_t i, c;
2899
2900 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2901 errno = EINVAL;
2902 return NULL;
2903 }
2904
2905 c = launch_data_array_get_count(ldpa);
2906
2907 argv = alloca((c + 1) * sizeof(char *));
2908
2909 for (i = 0; i < c; i++) {
2910 tmp = launch_data_array_get_index(ldpa, i);
2911
2912 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2913 errno = EINVAL;
2914 return NULL;
2915 }
2916
2917 argv[i] = launch_data_get_string(tmp);
2918 }
2919
2920 argv[i] = NULL;
2921 argc = i;
2922 }
2923
2924 if (!prog && argc == 0) {
2925 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
2926 errno = EINVAL;
2927 return NULL;
2928 }
2929
2930 /* Find the requested session. You cannot load services into XPC domains in
2931 * this manner.
2932 */
2933 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2934 if (session) {
2935 jobmgr_t jmt = NULL;
2936 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
2937 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
2938 if (!jmt) {
2939 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
2940 } else {
2941 jm = jmt;
2942 }
2943 } else {
2944 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
2945 }
2946
2947 if (!jmt) {
2948 errno = EINVAL;
2949 return NULL;
2950 }
2951 }
2952
2953 /* For legacy reasons, we have a global hash of all labels in all job
2954 * managers. So rather than make it a global, we store it in the root job
2955 * manager. But for an XPC domain, we store a local hash of all services in
2956 * the domain.
2957 */
2958 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
2959 if (unlikely((j = job_find(where2look, label)) != NULL)) {
2960 if (jm->xpc_singleton) {
2961 /* There can (and probably will be) multiple attemtps to import the
2962 * same XPC service from the same framework. This is okay. It's
2963 * treated as a singleton, so just return the existing one so that
2964 * it may be aliased into the requesting process' XPC domain.
2965 */
2966 errno = EEXIST;
2967 return j;
2968 } else {
2969 /* If we're not a global XPC domain, then it's an error to try
2970 * importing the same job/service multiple times.
2971 */
2972 errno = EEXIST;
2973 return NULL;
2974 }
2975 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
2976 errno = EINVAL;
2977 return NULL;
2978 }
2979 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
2980
2981 if (likely(j = job_new(jm, label, prog, argv))) {
2982 launch_data_dict_iterate(pload, job_import_keys, j);
2983 if (!uuid_is_null(j->expected_audit_uuid)) {
2984 uuid_string_t uuid_str;
2985 uuid_unparse(j->expected_audit_uuid, uuid_str);
2986 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2987 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2988 errno = ENEEDAUTH;
2989 } else {
2990 job_log(j, LOG_DEBUG, "No security session specified.");
2991 j->asport = MACH_PORT_NULL;
2992 }
2993
2994 if (pid1_magic && !jm->parentmgr) {
2995 /* Workaround reentrancy in CF. We don't make this a global variable
2996 * because we don't want per-user launchd's to inherit it. So we
2997 * just set it for every job that we import into the System session.
2998 *
2999 * See <rdar://problem/9468837>.
3000 */
3001 envitem_new(j, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
3002 }
3003
3004 if (j->event_monitor) {
3005 eventsystem_ping();
3006 }
3007
3008 #if TARGET_OS_EMBEDDED
3009 /* SpringBoard runs at Interactive priority.
3010 *
3011 * See <rdar://problem/9539873>.
3012 */
3013 if (j->embedded_god) {
3014 j->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
3015 }
3016 #endif
3017 }
3018
3019 return j;
3020 }
3021
3022 bool
3023 jobmgr_label_test(jobmgr_t jm, const char *str)
3024 {
3025 const char *ptr;
3026
3027 if (str[0] == '\0') {
3028 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
3029 return false;
3030 }
3031
3032 for (ptr = str; *ptr; ptr++) {
3033 if (iscntrl(*ptr)) {
3034 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
3035 return false;
3036 }
3037 }
3038
3039 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3040 || (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
3041 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
3042 return false;
3043 }
3044
3045 return true;
3046 }
3047
3048 job_t
3049 job_find(jobmgr_t jm, const char *label)
3050 {
3051 job_t ji;
3052
3053 if (!jm) {
3054 jm = root_jobmgr;
3055 }
3056
3057 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
3058 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
3059 // 5351245 and 5488633 respectively
3060 continue;
3061 }
3062
3063 if (strcmp(ji->label, label) == 0) {
3064 return ji;
3065 }
3066 }
3067
3068 errno = ESRCH;
3069 return NULL;
3070 }
3071
3072 // Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
3073 job_t
3074 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
3075 {
3076 job_t ji = NULL;
3077 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3078 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay))) {
3079 return ji;
3080 }
3081 }
3082
3083 jobmgr_t jmi = NULL;
3084 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3085 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3086 break;
3087 }
3088 }
3089
3090 return ji;
3091 }
3092
3093 job_t
3094 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3095 {
3096 job_t ji;
3097
3098 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3099 if (ji->p == p) {
3100 return ji;
3101 }
3102 }
3103
3104 return create_anon ? job_new_anonymous(jm, p) : NULL;
3105 }
3106
3107 job_t
3108 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3109 {
3110 jobmgr_t jmi;
3111 job_t ji;
3112
3113 if (jm->jm_port == mport) {
3114 return jobmgr_find_by_pid(jm, upid, true);
3115 }
3116
3117 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3118 job_t jr;
3119
3120 if ((jr = job_mig_intran2(jmi, mport, upid))) {
3121 return jr;
3122 }
3123 }
3124
3125 LIST_FOREACH(ji, &jm->jobs, sle) {
3126 if (ji->j_port == mport) {
3127 return ji;
3128 }
3129 }
3130
3131 return NULL;
3132 }
3133
3134 job_t
3135 job_mig_intran(mach_port_t p)
3136 {
3137 struct ldcred *ldc = runtime_get_caller_creds();
3138 job_t jr;
3139
3140 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3141
3142 if (!jr) {
3143 struct proc_bsdshortinfo proc;
3144 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3145 if (errno != ESRCH) {
3146 (void)jobmgr_assumes_zero(root_jobmgr, errno);
3147 } else {
3148 jobmgr_log(root_jobmgr, LOG_ERR, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc.pbsi_comm, ldc->pid, ldc->uid, ldc->euid);
3149 }
3150 }
3151 }
3152
3153 return jr;
3154 }
3155
3156 job_t
3157 job_find_by_service_port(mach_port_t p)
3158 {
3159 struct machservice *ms;
3160
3161 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3162 if (ms->recv && (ms->port == p)) {
3163 return ms->job;
3164 }
3165 }
3166
3167 return NULL;
3168 }
3169
3170 void
3171 job_mig_destructor(job_t j)
3172 {
3173 /* The job can go invalid before this point.
3174 *
3175 * <rdar://problem/5477111>
3176 */
3177 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3178 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3179 job_remove(j);
3180 }
3181
3182 workaround_5477111 = NULL;
3183
3184 calendarinterval_sanity_check();
3185 }
3186
3187 void
3188 job_export_all2(jobmgr_t jm, launch_data_t where)
3189 {
3190 jobmgr_t jmi;
3191 job_t ji;
3192
3193 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3194 job_export_all2(jmi, where);
3195 }
3196
3197 LIST_FOREACH(ji, &jm->jobs, sle) {
3198 launch_data_t tmp;
3199
3200 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3201 launch_data_dict_insert(where, tmp, ji->label);
3202 }
3203 }
3204 }
3205
3206 launch_data_t
3207 job_export_all(void)
3208 {
3209 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3210
3211 if (resp != NULL) {
3212 job_export_all2(root_jobmgr, resp);
3213 } else {
3214 (void)osx_assumes_zero(errno);
3215 }
3216
3217 return resp;
3218 }
3219
3220 void
3221 job_log_stray_pg(job_t j)
3222 {
3223 pid_t *pids = NULL;
3224 size_t len = sizeof(pid_t) * get_kern_max_proc();
3225 int i = 0, kp_cnt = 0;
3226
3227 if (!launchd_apple_internal) {
3228 return;
3229 }
3230
3231 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3232
3233 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3234 return;
3235 }
3236 if (job_assumes_zero_p(j, (kp_cnt = proc_listpgrppids(j->p, pids, len))) == -1) {
3237 goto out;
3238 }
3239
3240 for (i = 0; i < kp_cnt; i++) {
3241 pid_t p_i = pids[i];
3242 if (p_i == j->p) {
3243 continue;
3244 } else if (p_i == 0 || p_i == 1) {
3245 continue;
3246 }
3247
3248 struct proc_bsdshortinfo proc;
3249 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3250 if (errno != ESRCH) {
3251 (void)job_assumes_zero(j, errno);
3252 }
3253 continue;
3254 }
3255
3256 pid_t pp_i = proc.pbsi_ppid;
3257 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3258 const char *n = proc.pbsi_comm;
3259
3260 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3261 }
3262
3263 out:
3264 free(pids);
3265 }
3266
3267 void
3268 job_reap(job_t j)
3269 {
3270 struct rusage ru;
3271 bool is_system_bootstrapper = ((j->is_bootstrapper && pid1_magic) && !j->mgr->parentmgr);
3272
3273 job_log(j, LOG_DEBUG, "Reaping");
3274
3275 if (unlikely(j->weird_bootstrap)) {
3276 int64_t junk = 0;
3277 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3278 }
3279
3280 if (j->fork_fd) {
3281 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
3282 j->fork_fd = 0;
3283 }
3284
3285 if (j->anonymous) {
3286 j->last_exit_status = 0;
3287 memset(&ru, 0, sizeof(ru));
3288 } else {
3289 uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
3290 j->trt += rt;
3291
3292 job_log(j, LOG_PERF, "Last instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
3293 j->nruns++;
3294
3295 /* The job is dead. While the PID/PGID is still known to be valid, try
3296 * to kill abandoned descendant processes.
3297 */
3298 job_log_stray_pg(j);
3299 if (!j->abandon_pg) {
3300 if (unlikely(killpg2(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3301 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3302 }
3303 }
3304
3305 int r = -1;
3306 if (!j->implicit_reap) {
3307 /* If the shutdown monitor has suspended a task and not resumed it
3308 * resumed it before exiting, the kernel will not clean up after the
3309 * shutdown monitor. It will, instead, leave the task suspended and
3310 * not process any pending signals on the event loop for the task.
3311 *
3312 * There are a variety of other kernel bugs that could prevent a
3313 * process from exiting, usually having to do with faulty hardware
3314 * or talking to misbehaving drivers that mark a thread as
3315 * uninterruptible and deadlock/hang before unmarking it as such. So
3316 * we have to work around that too.
3317 *
3318 * See <rdar://problem/9284889&9359725>.
3319 */
3320 if (j->workaround9359725) {
3321 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3322 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3323 memset(&ru, 0, sizeof(ru));
3324 } else if ((r = wait4(j->p, &j->last_exit_status, 0, &ru)) == -1) {
3325 job_log(j, LOG_ERR, "Reap failed. Assuming job exited: %d: %s", errno, strerror(errno));
3326 j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3327 memset(&ru, 0, sizeof(ru));
3328 }
3329 }
3330
3331 if (launchd_log_perf && r != -1) {
3332 job_log(j, LOG_PERF, "Last instance user time: %ld.%06u", ru.ru_utime.tv_sec, ru.ru_utime.tv_usec);
3333 job_log(j, LOG_PERF, "Last instance system time: %ld.%06u", ru.ru_stime.tv_sec, ru.ru_stime.tv_usec);
3334 job_log(j, LOG_PERF, "Last instance maximum resident size: %lu", ru.ru_maxrss);
3335 job_log(j, LOG_PERF, "Last instance integral shared memory size: %lu", ru.ru_ixrss);
3336 job_log(j, LOG_PERF, "Last instance integral unshared data size: %lu", ru.ru_idrss);
3337 job_log(j, LOG_PERF, "Last instance integral unshared stack size: %lu", ru.ru_isrss);
3338 job_log(j, LOG_PERF, "Last instance page reclaims: %lu", ru.ru_minflt);
3339 job_log(j, LOG_PERF, "Last instance page faults: %lu", ru.ru_majflt);
3340 job_log(j, LOG_PERF, "Last instance swaps: %lu", ru.ru_nswap);
3341 job_log(j, LOG_PERF, "Last instance input ops: %lu", ru.ru_inblock);
3342 job_log(j, LOG_PERF, "Last instance output ops: %lu", ru.ru_oublock);
3343 job_log(j, LOG_PERF, "Last instance messages sent: %lu", ru.ru_msgsnd);
3344 job_log(j, LOG_PERF, "Last instance messages received: %lu", ru.ru_msgrcv);
3345 job_log(j, LOG_PERF, "Last instance signals received: %lu", ru.ru_nsignals);
3346 job_log(j, LOG_PERF, "Last instance voluntary context switches: %lu", ru.ru_nvcsw);
3347 job_log(j, LOG_PERF, "Last instance involuntary context switches: %lu", ru.ru_nivcsw);
3348 }
3349 }
3350
3351 if (j->exit_timeout) {
3352 (void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3353 }
3354
3355 LIST_REMOVE(j, pid_hash_sle);
3356
3357 if (j->sent_signal_time) {
3358 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3359
3360 td_sec = td / NSEC_PER_SEC;
3361 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3362
3363 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3364 }
3365
3366 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
3367 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
3368 if (j->ru.ru_maxrss < ru.ru_maxrss) {
3369 j->ru.ru_maxrss = ru.ru_maxrss;
3370 }
3371
3372 j->ru.ru_ixrss += ru.ru_ixrss;
3373 j->ru.ru_idrss += ru.ru_idrss;
3374 j->ru.ru_isrss += ru.ru_isrss;
3375 j->ru.ru_minflt += ru.ru_minflt;
3376 j->ru.ru_majflt += ru.ru_majflt;
3377 j->ru.ru_nswap += ru.ru_nswap;
3378 j->ru.ru_inblock += ru.ru_inblock;
3379 j->ru.ru_oublock += ru.ru_oublock;
3380 j->ru.ru_msgsnd += ru.ru_msgsnd;
3381 j->ru.ru_msgrcv += ru.ru_msgrcv;
3382 j->ru.ru_nsignals += ru.ru_nsignals;
3383 j->ru.ru_nvcsw += ru.ru_nvcsw;
3384 j->ru.ru_nivcsw += ru.ru_nivcsw;
3385 job_log_perf_statistics(j);
3386
3387 int exit_status = WEXITSTATUS(j->last_exit_status);
3388 if (WIFEXITED(j->last_exit_status) && exit_status != 0) {
3389 if (!j->did_exec && _launchd_support_system) {
3390 xpc_object_t event = NULL;
3391 switch (exit_status) {
3392 case ENOENT:
3393 case ENOTDIR:
3394 case ESRCH:
3395 job_log(j, LOG_NOTICE, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status, strerror(exit_status));
3396 event = xpc_dictionary_create(NULL, NULL, 0);
3397 xpc_dictionary_set_string(event, "Executable", j->prog ? j->prog : j->argv[0]);
3398 if (j->mach_uid) {
3399 xpc_dictionary_set_uint64(event, "UID", j->mach_uid);
3400 } else if (j->username) {
3401 xpc_dictionary_set_string(event, "UserName", j->username);
3402 }
3403
3404 if (j->groupname) {
3405 xpc_dictionary_set_string(event, "GroupName", j->groupname);
3406 }
3407
3408 (void)externalevent_new(j, _launchd_support_system, j->label, event);
3409 xpc_release(event);
3410
3411 j->waiting4ok = true;
3412 default:
3413 job_log(j, LOG_NOTICE, "Job failed to exec(3) for weird reason: %d", exit_status);
3414 }
3415 } else {
3416 int level = LOG_INFO;
3417 if (exit_status != 0) {
3418 level = LOG_ERR;
3419 }
3420
3421 job_log(j, level, "Exited with code: %d", exit_status);
3422 }
3423 }
3424
3425 if (WIFSIGNALED(j->last_exit_status)) {
3426 int s = WTERMSIG(j->last_exit_status);
3427 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3428 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3429 } else if (!j->stopped && !j->clean_kill) {
3430 switch (s) {
3431 // Signals which indicate a crash.
3432 case SIGILL:
3433 case SIGABRT:
3434 case SIGFPE:
3435 case SIGBUS:
3436 case SIGSEGV:
3437 case SIGSYS:
3438 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3439 * SIGTRAP, assume that it's a crash.
3440 */
3441 case SIGTRAP:
3442 j->crashed = true;
3443 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3444 break;
3445 default:
3446 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3447 break;
3448 }
3449
3450 if (is_system_bootstrapper && j->crashed) {
3451 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3452 }
3453 }
3454 }
3455
3456 j->reaped = true;
3457
3458 struct machservice *msi = NULL;
3459 if (j->crashed || !(j->did_exec || j->anonymous)) {
3460 SLIST_FOREACH(msi, &j->machservices, sle) {
3461 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3462 machservice_drain_port(msi);
3463 }
3464
3465 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3466 machservice_resetport(j, msi);
3467 }
3468 }
3469 }
3470
3471 /* HACK: Essentially duplicating the logic directly above. But this has
3472 * gotten really hairy, and I don't want to try consolidating it right now.
3473 */
3474 if (j->xpc_service && !j->xpcproxy_did_exec) {
3475 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3476 SLIST_FOREACH(msi, &j->machservices, sle) {
3477 /* Drain the messages but do not reset the port. If xpcproxy could
3478 * not exec(3), then we don't want to continue trying, since there
3479 * is very likely a serious configuration error with the service.
3480 *
3481 * The above comment is weird. I originally said we should drain
3482 * messages but not reset the port, but that's exactly what we do
3483 * below, and I'm not sure which is the mistake, the comment or the
3484 * actual behavior.
3485 *
3486 * Since it's always been this way, I'll assume that the comment is
3487 * incorrect, but I'll leave it in place just to remind myself to
3488 * actually look into it at some point.
3489 *
3490 * <rdar://problem/8986802>
3491 */
3492 if (msi->upfront && job_assumes(j, !msi->isActive)) {
3493 machservice_resetport(j, msi);
3494 }
3495 }
3496 }
3497
3498 struct suspended_peruser *spi = NULL;
3499 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3500 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3501 spi->j->peruser_suspend_count--;
3502 if (spi->j->peruser_suspend_count == 0) {
3503 job_dispatch(spi->j, false);
3504 }
3505 LIST_REMOVE(spi, sle);
3506 free(spi);
3507 }
3508
3509 if (j->exit_status_dest) {
3510 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3511 if (errno && errno != MACH_SEND_INVALID_DEST) {
3512 (void)job_assumes_zero(j, errno);
3513 }
3514
3515 j->exit_status_dest = MACH_PORT_NULL;
3516 }
3517
3518 if (j->spawn_reply_port) {
3519 /* If the child never called exec(3), we must send a spawn() reply so
3520 * that the requestor can get exit status from it. If we fail to send
3521 * the reply for some reason, we have to deallocate the exit status port
3522 * ourselves.
3523 */
3524 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3525 if (kr) {
3526 if (kr != MACH_SEND_INVALID_DEST) {
3527 (void)job_assumes_zero(j, kr);
3528 }
3529
3530 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
3531 }
3532
3533 j->exit_status_port = MACH_PORT_NULL;
3534 j->spawn_reply_port = MACH_PORT_NULL;
3535 }
3536
3537 if (j->anonymous) {
3538 total_anon_children--;
3539 if (j->holds_ref) {
3540 job_log(j, LOG_PERF, "Anonymous job exited holding reference.");
3541 runtime_del_ref();
3542 }
3543 } else {
3544 job_log(j, LOG_PERF, "Job exited.");
3545 runtime_del_ref();
3546 total_children--;
3547 }
3548
3549 if (j->has_console) {
3550 launchd_wsp = 0;
3551 }
3552
3553 if (j->shutdown_monitor) {
3554 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3555 _launchd_shutdown_monitor = NULL;
3556 j->shutdown_monitor = false;
3557 }
3558
3559 if (!j->anonymous) {
3560 j->mgr->normal_active_cnt--;
3561 }
3562 j->sent_signal_time = 0;
3563 j->sent_sigkill = false;
3564 j->clean_kill = false;
3565 j->event_monitor_ready2signal = false;
3566 j->p = 0;
3567 }
3568
3569 void
3570 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3571 {
3572 jobmgr_t jmi, jmn;
3573 job_t ji, jn;
3574
3575 if (jm->shutting_down) {
3576 return;
3577 }
3578
3579 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3580 jobmgr_dispatch_all(jmi, newmounthack);
3581 }
3582
3583 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3584 if (newmounthack && ji->start_on_mount) {
3585 ji->start_pending = true;
3586 }
3587
3588 job_dispatch(ji, false);
3589 }
3590 }
3591
3592 void
3593 job_dispatch_curious_jobs(job_t j)
3594 {
3595 job_t ji = NULL, jt = NULL;
3596 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3597 struct semaphoreitem *si = NULL;
3598 SLIST_FOREACH(si, &ji->semaphores, sle) {
3599 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3600 continue;
3601 }
3602
3603 if (strcmp(si->what, j->label) == 0) {
3604 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3605
3606 if (!ji->removing) {
3607 job_dispatch(ji, false);
3608 } else {
3609 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3610 }
3611
3612 /* ji could be removed here, so don't do anything with it or its semaphores
3613 * after this point.
3614 */
3615 break;
3616 }
3617 }
3618 }
3619 }
3620
3621 job_t
3622 job_dispatch(job_t j, bool kickstart)
3623 {
3624 // Don't dispatch a job if it has no audit session set.
3625 if (!uuid_is_null(j->expected_audit_uuid)) {
3626 job_log(j, LOG_DEBUG, "Job is still awaiting its audit session UUID. Not dispatching.");
3627 return NULL;
3628 }
3629 if (j->alias) {
3630 job_log(j, LOG_DEBUG, "Job is an alias. Not dispatching.");
3631 return NULL;
3632 }
3633
3634 if (j->waiting4ok) {
3635 job_log(j, LOG_DEBUG, "Job cannot exec(3). Not dispatching.");
3636 return NULL;
3637 }
3638
3639 #if TARGET_OS_EMBEDDED
3640 if (launchd_embedded_handofgod && _launchd_embedded_god) {
3641 if (!job_assumes(j, _launchd_embedded_god->username != NULL && j->username != NULL)) {
3642 errno = EPERM;
3643 return NULL;
3644 }
3645
3646 if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
3647 errno = EPERM;
3648 return NULL;
3649 }
3650 } else if (launchd_embedded_handofgod) {
3651 errno = EINVAL;
3652 return NULL;
3653 }
3654 #endif
3655
3656 /*
3657 * The whole job removal logic needs to be consolidated. The fact that
3658 * a job can be removed from just about anywhere makes it easy to have
3659 * stale pointers left behind somewhere on the stack that might get
3660 * used after the deallocation. In particular, during job iteration.
3661 *
3662 * This is a classic example. The act of dispatching a job may delete it.
3663 */
3664 if (!job_active(j)) {
3665 if (job_useless(j)) {
3666 job_log(j, LOG_DEBUG, "Job is useless. Removing.");
3667 job_remove(j);
3668 return NULL;
3669 }
3670 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3671 job_log(j, LOG_DEBUG, "Per-user launchd is suspended. Not dispatching.");
3672 return NULL;
3673 }
3674
3675 if (kickstart || job_keepalive(j)) {
3676 job_log(j, LOG_DEBUG, "%starting job", kickstart ? "Kicks" : "S");
3677 job_start(j);
3678 } else {
3679 job_log(j, LOG_DEBUG, "Watching job.");
3680 job_watch(j);
3681
3682 /*
3683 * 5455720
3684 *
3685 * Path checking and monitoring is really racy right now.
3686 * We should clean this up post Leopard.
3687 */
3688 if (job_keepalive(j)) {
3689 job_start(j);
3690 }
3691 }
3692 } else {
3693 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job: %s.", job_active(j));
3694 }
3695
3696 return j;
3697 }
3698
3699 void
3700 job_kill(job_t j)
3701 {
3702 if (unlikely(!j->p || j->anonymous)) {
3703 return;
3704 }
3705
3706 (void)job_assumes_zero_p(j, kill2(j->p, SIGKILL));
3707
3708 j->sent_sigkill = true;
3709 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j));
3710
3711 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
3712 }
3713
3714 void
3715 job_open_shutdown_transaction(job_t j)
3716 {
3717 int rv = proc_set_dirty(j->p, true);
3718 if (rv != 0) {
3719 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3720 j->dirty_at_shutdown = false;
3721 }
3722 }
3723
3724 void
3725 job_close_shutdown_transaction(job_t j)
3726 {
3727 if (j->dirty_at_shutdown) {
3728 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
3729 (void)job_assumes_zero(j, proc_set_dirty(j->p, false));
3730 j->dirty_at_shutdown = false;
3731 }
3732 }
3733
3734 void
3735 job_log_children_without_exec(job_t j)
3736 {
3737 pid_t *pids = NULL;
3738 size_t len = sizeof(pid_t) * get_kern_max_proc();
3739 int i = 0, kp_cnt = 0;
3740
3741 if (!launchd_apple_internal || j->anonymous || j->per_user) {
3742 return;
3743 }
3744
3745 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3746 return;
3747 }
3748 if (job_assumes_zero_p(j, (kp_cnt = proc_listchildpids(j->p, pids, len))) == -1) {
3749 goto out;
3750 }
3751
3752 for (i = 0; i < kp_cnt; i++) {
3753 struct proc_bsdshortinfo proc;
3754 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3755 if (errno != ESRCH) {
3756 (void)job_assumes_zero(j, errno);
3757 }
3758 continue;
3759 }
3760 if (proc.pbsi_flags & P_EXEC) {
3761 continue;
3762 }
3763
3764 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
3765 }
3766
3767 out:
3768 free(pids);
3769 }
3770
3771 void
3772 job_callback_proc(job_t j, struct kevent *kev)
3773 {
3774 bool program_changed = false;
3775 int fflags = kev->fflags;
3776
3777 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
3778 log_kevent_struct(LOG_DEBUG, kev, 0);
3779
3780 if (fflags & NOTE_EXEC) {
3781 program_changed = true;
3782
3783 if (j->anonymous) {
3784 struct proc_bsdshortinfo proc;
3785 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
3786 char newlabel[1000];
3787
3788 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
3789
3790 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3791
3792 LIST_REMOVE(j, label_hash_sle);
3793 strcpy((char *)j->label, newlabel);
3794
3795 jobmgr_t where2put = root_jobmgr;
3796 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
3797 where2put = j->mgr;
3798 }
3799 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
3800 } else if (errno != ESRCH) {
3801 (void)job_assumes_zero(j, errno);
3802 }
3803 } else {
3804 if (j->spawn_reply_port) {
3805 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3806 if (errno) {
3807 if (errno != MACH_SEND_INVALID_DEST) {
3808 (void)job_assumes_zero(j, errno);
3809 }
3810 (void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
3811 }
3812
3813 j->spawn_reply_port = MACH_PORT_NULL;
3814 j->exit_status_port = MACH_PORT_NULL;
3815 }
3816
3817 if (j->xpc_service && j->did_exec) {
3818 j->xpcproxy_did_exec = true;
3819 }
3820
3821 j->did_exec = true;
3822 job_log(j, LOG_DEBUG, "Program changed");
3823 }
3824 }
3825
3826 if (fflags & NOTE_FORK) {
3827 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3828 job_log_children_without_exec(j);
3829 }
3830
3831 if (fflags & NOTE_EXIT) {
3832 if (kev->data & NOTE_EXIT_REPARENTED) {
3833 j->implicit_reap = true;
3834 j->last_exit_status = (kev->data & 0xffff);
3835
3836 job_log(j, LOG_INFO, "Job was implicitly reaped by the kernel.");
3837 }
3838
3839 job_reap(j);
3840
3841 if (j->anonymous) {
3842 job_remove(j);
3843 j = NULL;
3844 } else {
3845 (void)job_dispatch(j, false);
3846 }
3847 }
3848 }
3849
3850 void
3851 job_callback_timer(job_t j, void *ident)
3852 {
3853 if (j == ident) {
3854 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
3855 job_dispatch(j, true);
3856 } else if (&j->semaphores == ident) {
3857 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
3858 job_dispatch(j, false);
3859 } else if (&j->start_interval == ident) {
3860 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
3861 j->start_pending = true;
3862 job_dispatch(j, false);
3863 } else if (&j->exit_timeout == ident) {
3864 if (!job_assumes(j, j->p != 0)) {
3865 return;
3866 }
3867
3868 if (j->sent_sigkill) {
3869 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
3870
3871 td /= NSEC_PER_SEC;
3872 td -= j->clean_kill ? 0 : j->exit_timeout;
3873
3874 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
3875 j->workaround9359725 = true;
3876
3877 if (launchd_trap_sigkill_bugs) {
3878 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3879 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
3880 }
3881
3882 /* We've simulated the exit, so we have to cancel the kevent for
3883 * this job, otherwise we may get a kevent later down the road that
3884 * has a stale context pointer (if we've removed the job). Or worse,
3885 * it'll corrupt our data structures if the job still exists or the
3886 * allocation was recycled.
3887 *
3888 * If the failing process had a tracer attached to it, we need to
3889 * remove out NOTE_EXIT for that tracer too, otherwise the same
3890 * thing might happen.
3891 *
3892 * Note that, if we're not shutting down, this will result in a
3893 * zombie process just hanging around forever. But if the process
3894 * didn't exit after receiving SIGKILL, odds are it would've just
3895 * stuck around forever anyway.
3896 *
3897 * See <rdar://problem/9481630>.
3898 */
3899 (void)kevent_mod((uintptr_t)j->p, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3900 if (j->tracing_pid) {
3901 (void)kevent_mod((uintptr_t)j->tracing_pid, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3902 }
3903
3904 struct kevent bogus_exit;
3905 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
3906 jobmgr_callback(j->mgr, &bogus_exit);
3907 } else {
3908 if (unlikely(j->debug_before_kill)) {
3909 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3910 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
3911 }
3912
3913 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3914 job_kill(j);
3915 }
3916 } else {
3917 job_log(j, LOG_ERR, "Unrecognized job timer callback: %p", ident);
3918 }
3919 }
3920
3921 void
3922 job_callback_read(job_t j, int ident)
3923 {
3924 if (ident == j->stdin_fd) {
3925 job_dispatch(j, true);
3926 } else {
3927 socketgroup_callback(j);
3928 }
3929 }
3930
3931 void
3932 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
3933 {
3934 jobmgr_t jmi;
3935 job_t j;
3936
3937 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3938 jobmgr_reap_bulk(jmi, kev);
3939 }
3940
3941 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
3942 kev->udata = j;
3943 job_callback(j, kev);
3944 }
3945 }
3946
3947 void
3948 jobmgr_callback(void *obj, struct kevent *kev)
3949 {
3950 jobmgr_t jm = obj;
3951
3952 #if TARGET_OS_EMBEDDED
3953 int flag2check = VQ_MOUNT;
3954 #else
3955 int flag2check = VQ_UPDATE;
3956 #endif
3957
3958 switch (kev->filter) {
3959 case EVFILT_PROC:
3960 jobmgr_reap_bulk(jm, kev);
3961 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
3962 break;
3963 case EVFILT_SIGNAL:
3964 switch (kev->ident) {
3965 case SIGTERM:
3966 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
3967 return launchd_shutdown();
3968 case SIGUSR1:
3969 return calendarinterval_callback();
3970 case SIGUSR2:
3971 // Turn on all logging.
3972 launchd_log_perf = true;
3973 launchd_log_debug = true;
3974 launchd_log_shutdown = true;
3975 /* Hopefully /var is available by this point. If not, uh, oh well.
3976 * It's just a debugging facility.
3977 */
3978 return jobmgr_log_perf_statistics(jm);
3979 default:
3980 jobmgr_log(jm, LOG_ERR, "Unrecognized signal: %lu: %s", kev->ident, strsignal(kev->ident));
3981 }
3982 break;
3983 case EVFILT_FS:
3984 if (kev->fflags & flag2check) {
3985 if (!launchd_var_available) {
3986 struct stat sb;
3987 if (stat("/var/log", &sb) == 0 && (sb.st_mode & S_IWUSR)) {
3988 launchd_var_available = true;
3989 }
3990 }
3991 } else if (kev->fflags & VQ_MOUNT) {
3992 jobmgr_dispatch_all(jm, true);
3993 }
3994 jobmgr_dispatch_all_semaphores(jm);
3995 break;
3996 case EVFILT_TIMER:
3997 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
3998 calendarinterval_callback();
3999 } else if (kev->ident == (uintptr_t)jm) {
4000 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4001 jobmgr_still_alive_with_check(jm);
4002 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4003 jobmgr_do_garbage_collection(jm);
4004 } else if (kev->ident == (uintptr_t)&launchd_runtime_busy_time) {
4005 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4006 if (jobmgr_assumes_zero(jm, runtime_busy_cnt) == 0) {
4007 return launchd_shutdown();
4008 }
4009 }
4010 break;
4011 case EVFILT_VNODE:
4012 if (kev->ident == (uintptr_t)s_no_hang_fd) {
4013 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4014 if (unlikely(_no_hang_fd != -1)) {
4015 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4016 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL));
4017 (void)jobmgr_assumes_zero_p(root_jobmgr, runtime_close(s_no_hang_fd));
4018 s_no_hang_fd = _fd(_no_hang_fd);
4019 }
4020 } else if (pid1_magic && launchd_console && kev->ident == (uintptr_t)fileno(launchd_console)) {
4021 int cfd = -1;
4022 if (jobmgr_assumes_zero_p(jm, cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) {
4023 _fd(cfd);
4024 if (!(launchd_console = fdopen(cfd, "w"))) {
4025 (void)jobmgr_assumes_zero(jm, errno);
4026 (void)close(cfd);
4027 }
4028 }
4029 }
4030 break;
4031 default:
4032 jobmgr_log(jm, LOG_ERR, "Unrecognized kevent filter: %hd", kev->filter);
4033 }
4034 }
4035
4036 void
4037 job_callback(void *obj, struct kevent *kev)
4038 {
4039 job_t j = obj;
4040
4041 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4042
4043 switch (kev->filter) {
4044 case EVFILT_PROC:
4045 return job_callback_proc(j, kev);
4046 case EVFILT_TIMER:
4047 return job_callback_timer(j, (void *) kev->ident);
4048 case EVFILT_READ:
4049 return job_callback_read(j, (int) kev->ident);
4050 case EVFILT_MACHPORT:
4051 return (void)job_dispatch(j, true);
4052 default:
4053 job_log(j, LOG_ERR, "Unrecognized job callback filter: %hd", kev->filter);
4054 }
4055 }
4056
4057 void
4058 job_start(job_t j)
4059 {
4060 uint64_t td;
4061 int spair[2];
4062 int execspair[2];
4063 char nbuf[64];
4064 pid_t c;
4065 bool sipc = false;
4066 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXITSTATUS;
4067
4068 if (!job_assumes(j, j->mgr != NULL)) {
4069 return;
4070 }
4071
4072 if (unlikely(job_active(j))) {
4073 job_log(j, LOG_DEBUG, "Already started");
4074 return;
4075 }
4076
4077 /*
4078 * Some users adjust the wall-clock and then expect software to not notice.
4079 * Therefore, launchd must use an absolute clock instead of the wall clock
4080 * wherever possible.
4081 */
4082 td = runtime_get_nanoseconds_since(j->start_time);
4083 td /= NSEC_PER_SEC;
4084
4085 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
4086 time_t respawn_delta = j->min_run_time - (uint32_t)td;
4087
4088 /* We technically should ref-count throttled jobs to prevent idle exit,
4089 * but we're not directly tracking the 'throttled' state at the moment.
4090 */
4091 job_log(j, LOG_NOTICE, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4092 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j));
4093 job_ignore(j);
4094 return;
4095 }
4096
4097 if (likely(!j->legacy_mach_job)) {
4098 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_god;
4099 }
4100
4101 if (sipc) {
4102 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair));
4103 }
4104
4105 (void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair));
4106
4107 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4108 case -1:
4109 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4110 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j));
4111 job_ignore(j);
4112
4113 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4114 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4115 if (sipc) {
4116 (void)job_assumes_zero(j, runtime_close(spair[0]));
4117 (void)job_assumes_zero(j, runtime_close(spair[1]));
4118 }
4119 break;
4120 case 0:
4121 if (unlikely(_vproc_post_fork_ping())) {
4122 _exit(EXIT_FAILURE);
4123 }
4124
4125 (void)job_assumes_zero(j, runtime_close(execspair[0]));
4126 // wait for our parent to say they've attached a kevent to us
4127 read(_fd(execspair[1]), &c, sizeof(c));
4128
4129 if (sipc) {
4130 (void)job_assumes_zero(j, runtime_close(spair[0]));
4131 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4132 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4133 }
4134 job_start_child(j);
4135 break;
4136 default:
4137 j->start_time = runtime_get_opaque_time();
4138
4139 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4140
4141 j->did_exec = false;
4142 j->xpcproxy_did_exec = false;
4143 j->checkedin = false;
4144 j->start_pending = false;
4145 j->reaped = false;
4146 j->crashed = false;
4147 j->stopped = false;
4148 j->workaround9359725 = false;
4149 j->implicit_reap = false;
4150 if (j->needs_kickoff) {
4151 j->needs_kickoff = false;
4152
4153 if (SLIST_EMPTY(&j->semaphores)) {
4154 j->ondemand = false;
4155 }
4156 }
4157
4158 if (j->has_console) {
4159 launchd_wsp = c;
4160 }
4161
4162 job_log(j, LOG_PERF, "Job started.");
4163 runtime_add_ref();
4164 total_children++;
4165 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4166 j->p = c;
4167
4168 j->mgr->normal_active_cnt++;
4169 j->fork_fd = _fd(execspair[0]);
4170 (void)job_assumes_zero(j, runtime_close(execspair[1]));
4171 if (sipc) {
4172 (void)job_assumes_zero(j, runtime_close(spair[1]));
4173 ipc_open(_fd(spair[0]), j);
4174 }
4175 if (kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1) {
4176 job_ignore(j);
4177 } else {
4178 if (errno == ESRCH) {
4179 job_log(j, LOG_ERR, "Child was killed before we could attach a kevent.");
4180 } else {
4181 (void)job_assumes(j, errno == ESRCH);
4182 }
4183 job_reap(j);
4184
4185 /* If we have reaped this job within this same run loop pass, then
4186 * it will be currently ignored. So if there's a failure to attach a
4187 * kevent, we need to make sure that we watch the job so that we can
4188 * respawn it.
4189 *
4190 * See <rdar://problem/10140809>.
4191 */
4192 job_watch(j);
4193 }
4194
4195 j->wait4debugger_oneshot = false;
4196 if (likely(!j->stall_before_exec)) {
4197 job_uncork_fork(j);
4198 }
4199 break;
4200 }
4201 }
4202
4203 void
4204 job_start_child(job_t j)
4205 {
4206 typeof(posix_spawn) *psf;
4207 const char *file2exec = "/usr/libexec/launchproxy";
4208 const char **argv;
4209 posix_spawnattr_t spattr;
4210 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4211 glob_t g;
4212 short spflags = POSIX_SPAWN_SETEXEC;
4213 size_t binpref_out_cnt = 0;
4214 size_t i;
4215
4216 (void)job_assumes_zero(j, posix_spawnattr_init(&spattr));
4217
4218 job_setup_attributes(j);
4219
4220 if (unlikely(j->argv && j->globargv)) {
4221 g.gl_offs = 1;
4222 for (i = 0; i < j->argc; i++) {
4223 if (i > 0) {
4224 gflags |= GLOB_APPEND;
4225 }
4226 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4227 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4228 exit(EXIT_FAILURE);
4229 }
4230 }
4231 g.gl_pathv[0] = (char *)file2exec;
4232 argv = (const char **)g.gl_pathv;
4233 } else if (likely(j->argv)) {
4234 argv = alloca((j->argc + 2) * sizeof(char *));
4235 argv[0] = file2exec;
4236 for (i = 0; i < j->argc; i++) {
4237 argv[i + 1] = j->argv[i];
4238 }
4239 argv[i + 1] = NULL;
4240 } else {
4241 argv = alloca(3 * sizeof(char *));
4242 argv[0] = file2exec;
4243 argv[1] = j->prog;
4244 argv[2] = NULL;
4245 }
4246
4247 if (likely(!j->inetcompat)) {
4248 argv++;
4249 }
4250
4251 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4252 if (!j->legacy_LS_job) {
4253 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4254 }
4255 spflags |= POSIX_SPAWN_START_SUSPENDED;
4256 }
4257
4258 #if !TARGET_OS_EMBEDDED
4259 if (unlikely(j->disable_aslr)) {
4260 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4261 }
4262 #endif
4263 spflags |= j->pstype;
4264
4265 (void)job_assumes_zero(j, posix_spawnattr_setflags(&spattr, spflags));
4266 if (unlikely(j->j_binpref_cnt)) {
4267 (void)job_assumes_zero(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt));
4268 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4269 }
4270
4271 #if TARGET_OS_EMBEDDED
4272 /* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4273 * against a race which arises if, during spawn, an initial jetsam property
4274 * update occurs before the values below are applied. In this case, the flag
4275 * ensures that the subsequent change is ignored; the explicit update should
4276 * be given priority.
4277 */
4278 short flags = 0;
4279 if (j->jetsam_properties) {
4280 flags = POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY;
4281 }
4282
4283 (void)job_assumes_zero(j, posix_spawnattr_setjetsam(&spattr, flags, j->jetsam_priority, j->jetsam_memlimit));
4284 #endif
4285
4286 if (!j->app) {
4287 (void)job_assumes_zero(j, posix_spawnattr_setcpumonitor(&spattr, 85, 5 * 60));
4288 }
4289
4290 #if HAVE_QUARANTINE
4291 if (j->quarantine_data) {
4292 qtn_proc_t qp;
4293
4294 if (job_assumes(j, qp = qtn_proc_alloc())) {
4295 if (job_assumes_zero(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4296 (void)job_assumes_zero(j, qtn_proc_apply_to_self(qp));
4297 }
4298 }
4299 }
4300 #endif
4301
4302 #if HAVE_SANDBOX
4303 if (j->seatbelt_profile) {
4304 char *seatbelt_err_buf = NULL;
4305
4306 if (job_assumes_zero_p(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf)) == -1) {
4307 if (seatbelt_err_buf) {
4308 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4309 }
4310 goto out_bad;
4311 }
4312 }
4313 #endif
4314
4315 psf = j->prog ? posix_spawn : posix_spawnp;
4316
4317 if (likely(!j->inetcompat)) {
4318 file2exec = j->prog ? j->prog : argv[0];
4319 }
4320
4321 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4322
4323 #if HAVE_SANDBOX
4324 out_bad:
4325 #endif
4326 _exit(errno);
4327 }
4328
4329 void
4330 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4331 {
4332 launch_data_t tmp;
4333 struct envitem *ei;
4334 job_t ji;
4335
4336 if (jm->parentmgr) {
4337 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4338 } else {
4339 char **tmpenviron = environ;
4340 for (; *tmpenviron; tmpenviron++) {
4341 char envkey[1024];
4342 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4343 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4344 strncpy(envkey, *tmpenviron, sizeof(envkey));
4345 *(strchr(envkey, '=')) = '\0';
4346 launch_data_dict_insert(dict, s, envkey);
4347 }
4348 }
4349
4350 LIST_FOREACH(ji, &jm->jobs, sle) {
4351 SLIST_FOREACH(ei, &ji->global_env, sle) {
4352 if ((tmp = launch_data_new_string(ei->value))) {
4353 launch_data_dict_insert(dict, tmp, ei->key);
4354 }
4355 }
4356 }
4357 }
4358
4359 void
4360 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4361 {
4362 struct envitem *ei;
4363 job_t ji;
4364
4365 if (jm->parentmgr) {
4366 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4367 }
4368
4369 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4370 SLIST_FOREACH(ei, &ji->global_env, sle) {
4371 setenv(ei->key, ei->value, 1);
4372 }
4373 }
4374 }
4375
4376 void
4377 job_log_pids_with_weird_uids(job_t j)
4378 {
4379 size_t len = sizeof(pid_t) * get_kern_max_proc();
4380 pid_t *pids = NULL;
4381 uid_t u = j->mach_uid;
4382 int i = 0, kp_cnt = 0;
4383
4384 if (!launchd_apple_internal) {
4385 return;
4386 }
4387
4388 pids = malloc(len);
4389 if (!job_assumes(j, pids != NULL)) {
4390 return;
4391 }
4392
4393 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4394
4395 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4396 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4397 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4398 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4399 * struct back in a single call for each one.
4400 *
4401 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4402 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4403 * libproc could go stale before we call proc_pidinfo().
4404 *
4405 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4406 * of bytes written to the buffer.
4407 */
4408 if (job_assumes_zero_p(j, (kp_cnt = proc_listallpids(pids, len))) == -1) {
4409 goto out;
4410 }
4411
4412 for (i = 0; i < kp_cnt; i++) {
4413 struct proc_bsdshortinfo proc;
4414 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4415 * detailed above.
4416 */
4417 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4418 if (errno != ESRCH) {
4419 (void)job_assumes_zero(j, errno);
4420 }
4421 continue;
4422 }
4423
4424 uid_t i_euid = proc.pbsi_uid;
4425 uid_t i_uid = proc.pbsi_ruid;
4426 uid_t i_svuid = proc.pbsi_svuid;
4427 pid_t i_pid = pids[i];
4428
4429 if (i_euid != u && i_uid != u && i_svuid != u) {
4430 continue;
4431 }
4432
4433 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4434
4435 // Temporarily disabled due to 5423935 and 4946119.
4436 #if 0
4437 // Ask the accountless process to exit.
4438 (void)job_assumes_zero_p(j, kill2(i_pid, SIGTERM));
4439 #endif
4440 }
4441
4442 out:
4443 free(pids);
4444 }
4445
4446 static struct passwd *
4447 job_getpwnam(job_t j, const char *name)
4448 {
4449 /*
4450 * methodology for system daemons
4451 *
4452 * first lookup user record without any opendirectoryd interaction,
4453 * we don't know what interprocess dependencies might be in flight.
4454 * if that fails, we re-enable opendirectoryd interaction and
4455 * re-issue the lookup. We have to disable the libinfo L1 cache
4456 * otherwise libinfo will return the negative cache entry on the retry
4457 */
4458 #if !TARGET_OS_EMBEDDED
4459 struct passwd *pw = NULL;
4460
4461 if (pid1_magic && j->mgr == root_jobmgr) {
4462 // 1 == SEARCH_MODULE_FLAG_DISABLED
4463 si_search_module_set_flags("ds", 1);
4464 gL1CacheEnabled = false;
4465
4466 pw = getpwnam(name);
4467 si_search_module_set_flags("ds", 0);
4468 }
4469
4470 if (pw == NULL) {
4471 pw = getpwnam(name);
4472 }
4473
4474 return pw;
4475 #else
4476 #pragma unused (j)
4477 return getpwnam(name);
4478 #endif
4479 }
4480
4481 static struct group *
4482 job_getgrnam(job_t j, const char *name)
4483 {
4484 #if !TARGET_OS_EMBEDDED
4485 struct group *gr = NULL;
4486
4487 if (pid1_magic && j->mgr == root_jobmgr) {
4488 si_search_module_set_flags("ds", 1);
4489 gL1CacheEnabled = false;
4490
4491 gr = getgrnam(name);
4492
4493 si_search_module_set_flags("ds", 0);
4494 }
4495
4496 if (gr == NULL) {
4497 gr = getgrnam(name);
4498 }
4499
4500 return gr;
4501 #else
4502 #pragma unused (j)
4503 return getgrnam(name);
4504 #endif
4505 }
4506
4507 void
4508 job_postfork_test_user(job_t j)
4509 {
4510 // This function is all about 5201578
4511
4512 const char *home_env_var = getenv("HOME");
4513 const char *user_env_var = getenv("USER");
4514 const char *logname_env_var = getenv("LOGNAME");
4515 uid_t tmp_uid, local_uid = getuid();
4516 gid_t tmp_gid, local_gid = getgid();
4517 char shellpath[PATH_MAX];
4518 char homedir[PATH_MAX];
4519 char loginname[2000];
4520 struct passwd *pwe;
4521
4522
4523 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4524 && strcmp(user_env_var, logname_env_var) == 0)) {
4525 goto out_bad;
4526 }
4527
4528 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
4529 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4530 goto out_bad;
4531 }
4532
4533 /*
4534 * We must copy the results of getpw*().
4535 *
4536 * Why? Because subsequent API calls may call getpw*() as a part of
4537 * their implementation. Since getpw*() returns a [now thread scoped]
4538 * global, we must therefore cache the results before continuing.
4539 */
4540
4541 tmp_uid = pwe->pw_uid;
4542 tmp_gid = pwe->pw_gid;
4543
4544 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4545 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4546 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4547
4548 if (strcmp(loginname, logname_env_var) != 0) {
4549 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4550 goto out_bad;
4551 }
4552 if (strcmp(homedir, home_env_var) != 0) {
4553 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4554 goto out_bad;
4555 }
4556 if (local_uid != tmp_uid) {
4557 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4558 'U', tmp_uid, local_uid);
4559 goto out_bad;
4560 }
4561 if (local_gid != tmp_gid) {
4562 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4563 'G', tmp_gid, local_gid);
4564 goto out_bad;
4565 }
4566
4567 return;
4568 out_bad:
4569 #if 0
4570 (void)job_assumes_zero_p(j, kill2(getppid(), SIGTERM));
4571 _exit(EXIT_FAILURE);
4572 #else
4573 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
4574 #endif
4575 }
4576
4577 void
4578 job_postfork_become_user(job_t j)
4579 {
4580 char loginname[2000];
4581 char tmpdirpath[PATH_MAX];
4582 char shellpath[PATH_MAX];
4583 char homedir[PATH_MAX];
4584 struct passwd *pwe;
4585 size_t r;
4586 gid_t desired_gid = -1;
4587 uid_t desired_uid = -1;
4588
4589 if (getuid() != 0) {
4590 return job_postfork_test_user(j);
4591 }
4592
4593 /*
4594 * I contend that having UID == 0 and GID != 0 is of dubious value.
4595 * Nevertheless, this used to work in Tiger. See: 5425348
4596 */
4597 if (j->groupname && !j->username) {
4598 j->username = "root";
4599 }
4600
4601 if (j->username) {
4602 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
4603 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
4604 _exit(ESRCH);
4605 }
4606 } else if (j->mach_uid) {
4607 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4608 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
4609 job_log_pids_with_weird_uids(j);
4610 _exit(ESRCH);
4611 }
4612 } else {
4613 return;
4614 }
4615
4616 /*
4617 * We must copy the results of getpw*().
4618 *
4619 * Why? Because subsequent API calls may call getpw*() as a part of
4620 * their implementation. Since getpw*() returns a [now thread scoped]
4621 * global, we must therefore cache the results before continuing.
4622 */
4623
4624 desired_uid = pwe->pw_uid;
4625 desired_gid = pwe->pw_gid;
4626
4627 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4628 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4629 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4630
4631 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
4632 job_log(j, LOG_ERR, "Expired account");
4633 _exit(EXIT_FAILURE);
4634 }
4635
4636
4637 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
4638 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
4639 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
4640 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
4641 }
4642
4643 if (j->groupname) {
4644 struct group *gre;
4645
4646 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
4647 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
4648 _exit(ESRCH);
4649 }
4650
4651 desired_gid = gre->gr_gid;
4652 }
4653
4654 if (job_assumes_zero_p(j, setlogin(loginname)) == -1) {
4655 _exit(EXIT_FAILURE);
4656 }
4657
4658 if (job_assumes_zero_p(j, setgid(desired_gid)) == -1) {
4659 _exit(EXIT_FAILURE);
4660 }
4661
4662 /*
4663 * The kernel team and the DirectoryServices team want initgroups()
4664 * called after setgid(). See 4616864 for more information.
4665 */
4666
4667 if (likely(!j->no_init_groups)) {
4668 #if 1
4669 if (job_assumes_zero_p(j, initgroups(loginname, desired_gid)) == -1) {
4670 _exit(EXIT_FAILURE);
4671 }
4672 #else
4673 /* Do our own little initgroups(). We do this to guarantee that we're
4674 * always opted into dynamic group resolution in the kernel. initgroups(3)
4675 * does not make this guarantee.
4676 */
4677 int groups[NGROUPS], ngroups;
4678
4679 // A failure here isn't fatal, and we'll still get data we can use.
4680 (void)job_assumes_zero_p(j, getgrouplist(j->username, desired_gid, groups, &ngroups));
4681
4682 if (job_assumes_zero_p(j, syscall(SYS_initgroups, ngroups, groups, desired_uid)) == -1) {
4683 _exit(EXIT_FAILURE);
4684 }
4685 #endif
4686 }
4687
4688 if (job_assumes_zero_p(j, setuid(desired_uid)) == -1) {
4689 _exit(EXIT_FAILURE);
4690 }
4691
4692 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4693
4694 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
4695 setenv("TMPDIR", tmpdirpath, 0);
4696 }
4697
4698 setenv("SHELL", shellpath, 0);
4699 setenv("HOME", homedir, 0);
4700 setenv("USER", loginname, 0);
4701 setenv("LOGNAME", loginname, 0);
4702 }
4703
4704 void
4705 job_setup_attributes(job_t j)
4706 {
4707 struct limititem *li;
4708 struct envitem *ei;
4709
4710 if (unlikely(j->setnice)) {
4711 (void)job_assumes_zero_p(j, setpriority(PRIO_PROCESS, 0, j->nice));
4712 }
4713
4714 SLIST_FOREACH(li, &j->limits, sle) {
4715 struct rlimit rl;
4716
4717 if (job_assumes_zero_p(j, getrlimit(li->which, &rl) == -1)) {
4718 continue;
4719 }
4720
4721 if (li->sethard) {
4722 rl.rlim_max = li->lim.rlim_max;
4723 }
4724 if (li->setsoft) {
4725 rl.rlim_cur = li->lim.rlim_cur;
4726 }
4727
4728 if (setrlimit(li->which, &rl) == -1) {
4729 job_log_error(j, LOG_WARNING, "setrlimit()");
4730 }
4731 }
4732
4733 if (unlikely(!j->inetcompat && j->session_create)) {
4734 launchd_SessionCreate();
4735 }
4736
4737 if (unlikely(j->low_pri_io)) {
4738 (void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE));
4739 }
4740 if (unlikely(j->rootdir)) {
4741 (void)job_assumes_zero_p(j, chroot(j->rootdir));
4742 (void)job_assumes_zero_p(j, chdir("."));
4743 }
4744
4745 job_postfork_become_user(j);
4746
4747 if (unlikely(j->workingdir)) {
4748 if (chdir(j->workingdir) == -1) {
4749 if (errno == ENOENT || errno == ENOTDIR) {
4750 job_log(j, LOG_ERR, "Job specified non-existent working directory: %s", j->workingdir);
4751 } else {
4752 (void)job_assumes_zero(j, errno);
4753 }
4754 }
4755 }
4756
4757 if (unlikely(j->setmask)) {
4758 umask(j->mask);
4759 }
4760
4761 if (j->stdin_fd) {
4762 (void)job_assumes_zero_p(j, dup2(j->stdin_fd, STDIN_FILENO));
4763 } else {
4764 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4765 }
4766 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4767 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
4768
4769 jobmgr_setup_env_from_other_jobs(j->mgr);
4770
4771 SLIST_FOREACH(ei, &j->env, sle) {
4772 setenv(ei->key, ei->value, 1);
4773 }
4774
4775 #if !TARGET_OS_EMBEDDED
4776 if (j->jetsam_properties) {
4777 (void)job_assumes_zero(j, proc_setpcontrol(PROC_SETPC_TERMINATE));
4778 }
4779 #endif
4780
4781 #if TARGET_OS_EMBEDDED
4782 if (j->main_thread_priority != 0) {
4783 struct sched_param params;
4784 bzero(&params, sizeof(params));
4785 params.sched_priority = j->main_thread_priority;
4786 (void)job_assumes_zero_p(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params));
4787 }
4788 #endif
4789
4790 /*
4791 * We'd like to call setsid() unconditionally, but we have reason to
4792 * believe that prevents launchd from being able to send signals to
4793 * setuid children. We'll settle for process-groups.
4794 */
4795 if (getppid() != 1) {
4796 (void)job_assumes_zero_p(j, setpgid(0, 0));
4797 } else {
4798 (void)job_assumes_zero_p(j, setsid());
4799 }
4800 }
4801
4802 void
4803 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4804 {
4805 int fd;
4806
4807 if (!path) {
4808 return;
4809 }
4810
4811 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4812 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4813 return;
4814 }
4815
4816 (void)job_assumes_zero_p(j, dup2(fd, target_fd));
4817 (void)job_assumes_zero(j, runtime_close(fd));
4818 }
4819
4820 void
4821 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
4822 {
4823 struct calendarinterval *ci_iter, *ci_prev = NULL;
4824 time_t later, head_later;
4825
4826 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4827
4828 if (ci->when.tm_wday != -1) {
4829 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4830
4831 if (ci->when.tm_mday == -1) {
4832 later = otherlater;
4833 } else {
4834 later = later < otherlater ? later : otherlater;
4835 }
4836 }
4837
4838 ci->when_next = later;
4839
4840 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4841 if (ci->when_next < ci_iter->when_next) {
4842 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4843 break;
4844 }
4845
4846 ci_prev = ci_iter;
4847 }
4848
4849 if (ci_iter == NULL) {
4850 // ci must want to fire after every other timer, or there are no timers
4851
4852 if (LIST_EMPTY(&sorted_calendar_events)) {
4853 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
4854 } else {
4855 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
4856 }
4857 }
4858
4859 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
4860
4861 if (job_assumes_zero_p(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr)) != -1) {
4862 char time_string[100];
4863 size_t time_string_len;
4864
4865 ctime_r(&later, time_string);
4866 time_string_len = strlen(time_string);
4867
4868 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
4869 time_string[time_string_len - 1] = '\0';
4870 }
4871
4872 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4873 }
4874 }
4875
4876 bool
4877 jobmgr_log_bug(aslmsg asl_message __attribute__((unused)), void *ctx, const char *message)
4878 {
4879 jobmgr_t jm = ctx;
4880 jobmgr_log(jm, LOG_ERR, "%s", message);
4881
4882 return true;
4883 }
4884
4885 bool
4886 job_log_bug(aslmsg asl_message __attribute__((unused)), void *ctx, const char *message)
4887 {
4888 job_t j = ctx;
4889 job_log(j, LOG_ERR, "%s", message);
4890
4891 return true;
4892 }
4893
4894 void
4895 job_log_perf_statistics(job_t j)
4896 {
4897 if (j->anonymous) {
4898 return;
4899 }
4900 if (!launchd_log_perf) {
4901 return;
4902 }
4903
4904 job_log(j, LOG_PERF, "Job is currently %srunning.", j->p ? "" : "not ");
4905 job_log(j, LOG_PERF, "Number of runs: %u", j->nruns);
4906 if (j->nruns) {
4907 job_log(j, LOG_PERF, "Total runtime: %06f.", (double)j->trt / (double)NSEC_PER_SEC);
4908 job_log(j, LOG_PERF, "Total user time: %ld.%06u", j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec);
4909 job_log(j, LOG_PERF, "Total system time: %ld.%06u", j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec);
4910 job_log(j, LOG_PERF, "Largest maximum resident size: %lu", j->ru.ru_maxrss);
4911 job_log(j, LOG_PERF, "Total integral shared memory size: %lu", j->ru.ru_ixrss);
4912 job_log(j, LOG_PERF, "Total integral unshared data size: %lu", j->ru.ru_idrss);
4913 job_log(j, LOG_PERF, "Total integral unshared stack size: %lu", j->ru.ru_isrss);
4914 job_log(j, LOG_PERF, "Total page reclaims: %lu", j->ru.ru_minflt);
4915 job_log(j, LOG_PERF, "Total page faults: %lu", j->ru.ru_majflt);
4916 job_log(j, LOG_PERF, "Total swaps: %lu", j->ru.ru_nswap);
4917 job_log(j, LOG_PERF, "Total input ops: %lu", j->ru.ru_inblock);
4918 job_log(j, LOG_PERF, "Total output ops: %lu", j->ru.ru_oublock);
4919 job_log(j, LOG_PERF, "Total messages sent: %lu", j->ru.ru_msgsnd);
4920 job_log(j, LOG_PERF, "Total messages received: %lu", j->ru.ru_msgrcv);
4921 job_log(j, LOG_PERF, "Total signals received: %lu", j->ru.ru_nsignals);
4922 job_log(j, LOG_PERF, "Total voluntary context switches: %lu", j->ru.ru_nvcsw);
4923 job_log(j, LOG_PERF, "Total involuntary context switches: %lu", j->ru.ru_nivcsw);
4924 }
4925
4926 if (j->p) {
4927 uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
4928 job_log(j, LOG_PERF, "Current instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
4929
4930 struct proc_taskinfo ti;
4931 int r = proc_pidinfo(j->p, PROC_PIDTASKINFO, 1, &ti, PROC_PIDTASKINFO_SIZE);
4932 if (r > 0) {
4933 job_log(j, LOG_PERF, "Current instance virtual size: %llu", ti.pti_virtual_size);
4934 job_log(j, LOG_PERF, "Current instance resident size: %llu", ti.pti_resident_size);
4935 job_log(j, LOG_PERF, "Current instance user time: %06f", (double)ti.pti_total_user / (double)NSEC_PER_SEC);
4936 job_log(j, LOG_PERF, "Current instance system time: %06f", (double)ti.pti_total_system / (double)NSEC_PER_SEC);
4937 job_log(j, LOG_PERF, "Current instance number of user threads: %llu", ti.pti_threads_user);
4938 job_log(j, LOG_PERF, "Current instance number of system threads: %llu", ti.pti_threads_system);
4939 job_log(j, LOG_PERF, "Current instance default thread policy: %d", ti.pti_policy);
4940 job_log(j, LOG_PERF, "Current instance number of page faults: %d", ti.pti_faults);
4941 job_log(j, LOG_PERF, "Current instance number of page-ins: %d", ti.pti_pageins);
4942 job_log(j, LOG_PERF, "Current instance number of COW faults: %d", ti.pti_cow_faults);
4943 job_log(j, LOG_PERF, "Current instance number of Mach syscalls: %d", ti.pti_syscalls_mach);
4944 job_log(j, LOG_PERF, "Current instance number of Unix syscalls: %d", ti.pti_syscalls_unix);
4945 job_log(j, LOG_PERF, "Current instance number of threads: %d", ti.pti_threadnum);
4946 job_log(j, LOG_PERF, "Current instance number of running threads: %d", ti.pti_numrunning);
4947 job_log(j, LOG_PERF, "Current instance task priority: %d", ti.pti_priority);
4948 } else {
4949 job_log(j, LOG_PERF, "proc_pidinfo(%d): %d: %s", j->p, errno, strerror(errno));
4950 }
4951 }
4952
4953 if (!j->ondemand) {
4954 job_log(j, LOG_PERF, "Job is configured to always run.");
4955 }
4956
4957 struct machservice *msi = NULL;
4958 SLIST_FOREACH(msi, &j->machservices, sle) {
4959 if (msi->upfront) {
4960 job_log(j, LOG_PERF, "Job advertises service in plist: %s", msi->name);
4961 } else if (!(msi->event_channel || msi->per_pid)) {
4962 job_log(j, LOG_PERF, "Job has dynamically registered service: %s", msi->name);
4963 } else if (msi->per_pid) {
4964 job_log(j, LOG_PERF, "Job advertises per-PID service: %s", msi->name);
4965 }
4966 }
4967 }
4968
4969 void
4970 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
4971 {
4972 const char *label2use = j ? j->label : "com.apple.launchd.job-unknown";
4973 const char *mgr2use = j ? j->mgr->name : "com.apple.launchd.jobmanager-unknown";
4974 char *newmsg;
4975 int oldmask = 0;
4976 size_t newmsgsz;
4977
4978 struct launchd_syslog_attr attr = {
4979 .from_name = launchd_label,
4980 .about_name = label2use,
4981 .session_name = mgr2use,
4982 .priority = pri,
4983 .from_uid = getuid(),
4984 .from_pid = getpid(),
4985 .about_pid = j ? j->p : 0,
4986 };
4987
4988 /* Hack: If bootstrap_port is set, we must be on the child side of a
4989 * fork(2), but before the exec*(3). Let's route the log message back to
4990 * launchd proper.
4991 */
4992 if (bootstrap_port) {
4993 return _vproc_logv(pri, err, msg, ap);
4994 }
4995
4996 newmsgsz = strlen(msg) + 200;
4997 newmsg = alloca(newmsgsz);
4998
4999 if (err) {
5000 #if !TARGET_OS_EMBEDDED
5001 snprintf(newmsg, newmsgsz, "%s: %d: %s", msg, err, strerror(err));
5002 #else
5003 snprintf(newmsg, newmsgsz, "(%s) %s: %d: %s", label2use, msg, err, strerror(err));
5004 #endif
5005 } else {
5006 #if !TARGET_OS_EMBEDDED
5007 snprintf(newmsg, newmsgsz, "%s", msg);
5008 #else
5009 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
5010 #endif
5011 }
5012
5013 if (j && unlikely(j->debug)) {
5014 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5015 }
5016
5017 launchd_vsyslog(&attr, newmsg, ap);
5018
5019 if (j && unlikely(j->debug)) {
5020 setlogmask(oldmask);
5021 }
5022 }
5023
5024 void
5025 job_log_error(job_t j, int pri, const char *msg, ...)
5026 {
5027 va_list ap;
5028
5029 va_start(ap, msg);
5030 job_logv(j, pri, errno, msg, ap);
5031 va_end(ap);
5032 }
5033
5034 void
5035 job_log(job_t j, int pri, const char *msg, ...)
5036 {
5037 va_list ap;
5038
5039 va_start(ap, msg);
5040 job_logv(j, pri, 0, msg, ap);
5041 va_end(ap);
5042 }
5043
5044 #if 0
5045 void
5046 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5047 {
5048 va_list ap;
5049
5050 va_start(ap, msg);
5051 jobmgr_logv(jm, pri, errno, msg, ap);
5052 va_end(ap);
5053 }
5054 #endif
5055
5056 void
5057 jobmgr_log_perf_statistics(jobmgr_t jm)
5058 {
5059 jobmgr_t jmi = NULL;
5060 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
5061 jobmgr_log_perf_statistics(jmi);
5062 }
5063
5064 if (jm->xpc_singleton) {
5065 jobmgr_log(jm, LOG_PERF, "XPC Singleton Domain: %s", jm->shortdesc);
5066 } else if (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
5067 jobmgr_log(jm, LOG_PERF, "XPC Private Domain: %s", jm->owner);
5068 } else if (jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) {
5069 jobmgr_log(jm, LOG_PERF, "Created via bootstrap_subset()");
5070 }
5071
5072 jobmgr_log(jm, LOG_PERF, "Jobs in job manager:");
5073
5074 job_t ji = NULL;
5075 LIST_FOREACH(ji, &jm->jobs, sle) {
5076 job_log_perf_statistics(ji);
5077 }
5078
5079 jobmgr_log(jm, LOG_PERF, "End of job list.");
5080 }
5081
5082 void
5083 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5084 {
5085 va_list ap;
5086
5087 va_start(ap, msg);
5088 jobmgr_logv(jm, pri, 0, msg, ap);
5089 va_end(ap);
5090 }
5091
5092 void
5093 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5094 {
5095 if (!jm) {
5096 jm = root_jobmgr;
5097 }
5098
5099 char *newmsg;
5100 char *newname;
5101 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5102
5103 newname = alloca((jmname_len + 1) * 2);
5104 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5105 newmsg = alloca(newmsgsz);
5106
5107 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5108 if (jm->name[i] == '%') {
5109 newname[o] = '%';
5110 o++;
5111 }
5112 newname[o] = jm->name[i];
5113 }
5114 newname[o] = '\0';
5115
5116 if (err) {
5117 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5118 } else {
5119 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5120 }
5121
5122 if (jm->parentmgr) {
5123 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5124 } else {
5125 struct launchd_syslog_attr attr = {
5126 .from_name = launchd_label,
5127 .about_name = launchd_label,
5128 .session_name = jm->name,
5129 .priority = pri,
5130 .from_uid = getuid(),
5131 .from_pid = getpid(),
5132 .about_pid = getpid(),
5133 };
5134
5135 launchd_vsyslog(&attr, newmsg, ap);
5136 }
5137 }
5138
5139 struct cal_dict_walk {
5140 job_t j;
5141 struct tm tmptm;
5142 };
5143
5144 void
5145 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5146 {
5147 struct cal_dict_walk *cdw = context;
5148 struct tm *tmptm = &cdw->tmptm;
5149 job_t j = cdw->j;
5150 int64_t val;
5151
5152 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5153 // hack to let caller know something went wrong
5154 tmptm->tm_sec = -1;
5155 return;
5156 }
5157
5158 val = launch_data_get_integer(obj);
5159
5160 if (val < 0) {
5161 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5162 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5163 if (val > 59) {
5164 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5165 tmptm->tm_sec = -1;
5166 } else {
5167 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5168 }
5169 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5170 if (val > 23) {
5171 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5172 tmptm->tm_sec = -1;
5173 } else {
5174 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5175 }
5176 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5177 if (val < 1 || val > 31) {
5178 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5179 tmptm->tm_sec = -1;
5180 } else {
5181 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5182 }
5183 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5184 if (val > 7) {
5185 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5186 tmptm->tm_sec = -1;
5187 } else {
5188 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5189 }
5190 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5191 if (val > 12) {
5192 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5193 tmptm->tm_sec = -1;
5194 } else {
5195 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5196 tmptm->tm_mon -= 1; // 4798263 cron compatibility
5197 }
5198 }
5199 }
5200
5201 bool
5202 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5203 {
5204 struct cal_dict_walk cdw;
5205
5206 cdw.j = j;
5207 memset(&cdw.tmptm, 0, sizeof(0));
5208
5209 cdw.tmptm.tm_min = -1;
5210 cdw.tmptm.tm_hour = -1;
5211 cdw.tmptm.tm_mday = -1;
5212 cdw.tmptm.tm_wday = -1;
5213 cdw.tmptm.tm_mon = -1;
5214
5215 if (!job_assumes(j, obj != NULL)) {
5216 return false;
5217 }
5218
5219 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5220 return false;
5221 }
5222
5223 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5224
5225 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5226 return false;
5227 }
5228
5229 return calendarinterval_new(j, &cdw.tmptm);
5230 }
5231
5232 bool
5233 calendarinterval_new(job_t j, struct tm *w)
5234 {
5235 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5236
5237 if (!job_assumes(j, ci != NULL)) {
5238 return false;
5239 }
5240
5241 ci->when = *w;
5242 ci->job = j;
5243
5244 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5245
5246 calendarinterval_setalarm(j, ci);
5247
5248 runtime_add_weak_ref();
5249
5250 return true;
5251 }
5252
5253 void
5254 calendarinterval_delete(job_t j, struct calendarinterval *ci)
5255 {
5256 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5257 LIST_REMOVE(ci, global_sle);
5258
5259 free(ci);
5260
5261 runtime_del_weak_ref();
5262 }
5263
5264 void
5265 calendarinterval_sanity_check(void)
5266 {
5267 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5268 time_t now = time(NULL);
5269
5270 if (unlikely(ci && (ci->when_next < now))) {
5271 (void)jobmgr_assumes_zero_p(root_jobmgr, raise(SIGUSR1));
5272 }
5273 }
5274
5275 void
5276 calendarinterval_callback(void)
5277 {
5278 struct calendarinterval *ci, *ci_next;
5279 time_t now = time(NULL);
5280
5281 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5282 job_t j = ci->job;
5283
5284 if (ci->when_next > now) {
5285 break;
5286 }
5287
5288 LIST_REMOVE(ci, global_sle);
5289 calendarinterval_setalarm(j, ci);
5290
5291 j->start_pending = true;
5292 job_dispatch(j, false);
5293 }
5294 }
5295
5296 bool
5297 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt)
5298 {
5299 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5300
5301 if (!job_assumes(j, sg != NULL)) {
5302 return false;
5303 }
5304
5305 sg->fds = calloc(1, fd_cnt * sizeof(int));
5306 sg->fd_cnt = fd_cnt;
5307
5308 if (!job_assumes(j, sg->fds != NULL)) {
5309 free(sg);
5310 return false;
5311 }
5312
5313 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5314 strcpy(sg->name_init, name);
5315
5316 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5317
5318 runtime_add_weak_ref();
5319
5320 return true;
5321 }
5322
5323 void
5324 socketgroup_delete(job_t j, struct socketgroup *sg)
5325 {
5326 unsigned int i;
5327
5328 for (i = 0; i < sg->fd_cnt; i++) {
5329 #if 0
5330 struct sockaddr_storage ss;
5331 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5332 socklen_t ss_len = sizeof(ss);
5333
5334 // 5480306
5335 if (job_assumes_zero(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5336 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5337 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5338 // We might conditionally need to delete a directory here
5339 }
5340 #endif
5341 (void)job_assumes_zero_p(j, runtime_close(sg->fds[i]));
5342 }
5343
5344 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5345
5346 free(sg->fds);
5347 free(sg);
5348
5349 runtime_del_weak_ref();
5350 }
5351
5352 void
5353 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5354 {
5355 struct kevent kev[sg->fd_cnt];
5356 char buf[10000];
5357 unsigned int i, buf_off = 0;
5358
5359 for (i = 0; i < sg->fd_cnt; i++) {
5360 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5361 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5362 }
5363
5364 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5365
5366 (void)job_assumes_zero_p(j, kevent_bulk_mod(kev, sg->fd_cnt));
5367
5368 for (i = 0; i < sg->fd_cnt; i++) {
5369 (void)job_assumes(j, kev[i].flags & EV_ERROR);
5370 errno = (typeof(errno)) kev[i].data;
5371 (void)job_assumes_zero(j, kev[i].data);
5372 }
5373 }
5374
5375 void
5376 socketgroup_ignore(job_t j, struct socketgroup *sg)
5377 {
5378 socketgroup_kevent_mod(j, sg, false);
5379 }
5380
5381 void
5382 socketgroup_watch(job_t j, struct socketgroup *sg)
5383 {
5384 socketgroup_kevent_mod(j, sg, true);
5385 }
5386
5387 void
5388 socketgroup_callback(job_t j)
5389 {
5390 job_dispatch(j, true);
5391 }
5392
5393 bool
5394 envitem_new(job_t j, const char *k, const char *v, bool global)
5395 {
5396 if (global && !launchd_allow_global_dyld_envvars) {
5397 if (strncmp("DYLD_", k, sizeof("DYLD_") - 1) == 0) {
5398 job_log(j, LOG_ERR, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k, v);
5399 return false;
5400 }
5401 }
5402
5403 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5404
5405 if (!job_assumes(j, ei != NULL)) {
5406 return false;
5407 }
5408
5409 strcpy(ei->key_init, k);
5410 ei->value = ei->key_init + strlen(k) + 1;
5411 strcpy(ei->value, v);
5412
5413 if (global) {
5414 if (SLIST_EMPTY(&j->global_env)) {
5415 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5416 }
5417 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5418 } else {
5419 SLIST_INSERT_HEAD(&j->env, ei, sle);
5420 }
5421
5422 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5423
5424 return true;
5425 }
5426
5427 void
5428 envitem_delete(job_t j, struct envitem *ei, bool global)
5429 {
5430 if (global) {
5431 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5432 if (SLIST_EMPTY(&j->global_env)) {
5433 LIST_REMOVE(j, global_env_sle);
5434 }
5435 } else {
5436 SLIST_REMOVE(&j->env, ei, envitem, sle);
5437 }
5438
5439 free(ei);
5440 }
5441
5442 void
5443 envitem_setup(launch_data_t obj, const char *key, void *context)
5444 {
5445 job_t j = context;
5446
5447 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5448 return;
5449 }
5450
5451 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5452 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
5453 } else {
5454 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5455 }
5456 }
5457
5458 bool
5459 limititem_update(job_t j, int w, rlim_t r)
5460 {
5461 struct limititem *li;
5462
5463 SLIST_FOREACH(li, &j->limits, sle) {
5464 if (li->which == w) {
5465 break;
5466 }
5467 }
5468
5469 if (li == NULL) {
5470 li = calloc(1, sizeof(struct limititem));
5471
5472 if (!job_assumes(j, li != NULL)) {
5473 return false;
5474 }
5475
5476 SLIST_INSERT_HEAD(&j->limits, li, sle);
5477
5478 li->which = w;
5479 }
5480
5481 if (j->importing_hard_limits) {
5482 li->lim.rlim_max = r;
5483 li->sethard = true;
5484 } else {
5485 li->lim.rlim_cur = r;
5486 li->setsoft = true;
5487 }
5488
5489 return true;
5490 }
5491
5492 void
5493 limititem_delete(job_t j, struct limititem *li)
5494 {
5495 SLIST_REMOVE(&j->limits, li, limititem, sle);
5496
5497 free(li);
5498 }
5499
5500 #if HAVE_SANDBOX
5501 void
5502 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5503 {
5504 job_t j = context;
5505
5506 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5507 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5508 return;
5509 }
5510
5511 if (launch_data_get_bool(obj) == false) {
5512 return;
5513 }
5514
5515 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5516 j->seatbelt_flags |= SANDBOX_NAMED;
5517 }
5518 }
5519 #endif
5520
5521 void
5522 limititem_setup(launch_data_t obj, const char *key, void *context)
5523 {
5524 job_t j = context;
5525 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
5526 rlim_t rl;
5527
5528 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
5529 return;
5530 }
5531
5532 rl = launch_data_get_integer(obj);
5533
5534 for (i = 0; i < limits_cnt; i++) {
5535 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
5536 break;
5537 }
5538 }
5539
5540 if (i == limits_cnt) {
5541 return;
5542 }
5543
5544 limititem_update(j, launchd_keys2limits[i].val, rl);
5545 }
5546
5547 bool
5548 job_useless(job_t j)
5549 {
5550 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5551 if (j->legacy_LS_job && j->j_port) {
5552 return false;
5553 }
5554 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5555 return true;
5556 } else if (j->removal_pending) {
5557 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
5558 return true;
5559 } else if (j->shutdown_monitor) {
5560 return false;
5561 } else if (j->mgr->shutting_down && !j->mgr->parentmgr) {
5562 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
5563 if (total_children == 0 && !j->anonymous) {
5564 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
5565 }
5566 return true;
5567 } else if (j->legacy_mach_job) {
5568 if (SLIST_EMPTY(&j->machservices)) {
5569 job_log(j, LOG_INFO, "Garbage collecting");
5570 return true;
5571 } else if (!j->checkedin) {
5572 job_log(j, LOG_WARNING, "Failed to check-in!");
5573 return true;
5574 }
5575 } else {
5576 /* If the job's executable does not have any valid architectures (for
5577 * example, if it's a PowerPC-only job), then we don't even bother
5578 * trying to relaunch it, as we have no reasonable expectation that
5579 * the situation will change.
5580 *
5581 * <rdar://problem/9106979>
5582 */
5583 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
5584 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5585 return true;
5586 }
5587 }
5588
5589 return false;
5590 }
5591
5592 bool
5593 job_keepalive(job_t j)
5594 {
5595 mach_msg_type_number_t statusCnt;
5596 mach_port_status_t status;
5597 struct semaphoreitem *si;
5598 struct machservice *ms;
5599 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
5600 bool is_not_kextd = (launchd_apple_internal || (strcmp(j->label, "com.apple.kextd") != 0));
5601
5602 if (unlikely(j->mgr->shutting_down)) {
5603 return false;
5604 }
5605
5606 /*
5607 * 5066316
5608 *
5609 * We definitely need to revisit this after Leopard ships. Please see
5610 * launchctl.c for the other half of this hack.
5611 */
5612 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5613 return false;
5614 }
5615
5616 if (unlikely(j->needs_kickoff)) {
5617 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5618 return false;
5619 }
5620
5621 if (j->start_pending) {
5622 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
5623 return true;
5624 }
5625
5626 if (!j->ondemand) {
5627 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5628 return true;
5629 }
5630
5631 SLIST_FOREACH(ms, &j->machservices, sle) {
5632 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5633 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5634 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
5635 continue;
5636 }
5637 if (status.mps_msgcount) {
5638 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
5639 status.mps_msgcount, ms->name);
5640 return true;
5641 }
5642 }
5643
5644 /* TODO: Coalesce external events and semaphore items, since they're basically
5645 * the same thing.
5646 */
5647 struct externalevent *ei = NULL;
5648 LIST_FOREACH(ei, &j->events, job_le) {
5649 if (ei->state == ei->wanted_state) {
5650 return true;
5651 }
5652 }
5653
5654 SLIST_FOREACH(si, &j->semaphores, sle) {
5655 bool wanted_state = false;
5656 job_t other_j;
5657
5658 switch (si->why) {
5659 case NETWORK_UP:
5660 wanted_state = true;
5661 case NETWORK_DOWN:
5662 if (network_up == wanted_state) {
5663 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
5664 return true;
5665 }
5666 break;
5667 case SUCCESSFUL_EXIT:
5668 wanted_state = true;
5669 case FAILED_EXIT:
5670 if (good_exit == wanted_state) {
5671 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5672 return true;
5673 }
5674 break;
5675 case CRASHED:
5676 wanted_state = true;
5677 case DID_NOT_CRASH:
5678 if (j->crashed == wanted_state) {
5679 return true;
5680 }
5681 break;
5682 case OTHER_JOB_ENABLED:
5683 wanted_state = true;
5684 case OTHER_JOB_DISABLED:
5685 if ((bool)job_find(NULL, si->what) == wanted_state) {
5686 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
5687 return true;
5688 }
5689 break;
5690 case OTHER_JOB_ACTIVE:
5691 wanted_state = true;
5692 case OTHER_JOB_INACTIVE:
5693 if ((other_j = job_find(NULL, si->what))) {
5694 if ((bool)other_j->p == wanted_state) {
5695 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5696 return true;
5697 }
5698 }
5699 break;
5700 }
5701 }
5702
5703 return false;
5704 }
5705
5706 const char *
5707 job_active(job_t j)
5708 {
5709 if (j->p && j->shutdown_monitor) {
5710 return "Monitoring shutdown";
5711 }
5712 if (j->p) {
5713 return "PID is still valid";
5714 }
5715
5716 if (j->priv_port_has_senders) {
5717 return "Privileged Port still has outstanding senders";
5718 }
5719
5720 struct machservice *ms;
5721 SLIST_FOREACH(ms, &j->machservices, sle) {
5722 /* If we've simulated an exit, we mark the job as non-active, even
5723 * though doing so will leave it in an unsafe state. We do this so that
5724 * shutdown can proceed. See <rdar://problem/11126530>.
5725 */
5726 if (!j->workaround9359725 && ms->recv && machservice_active(ms)) {
5727 job_log(j, LOG_INFO, "Mach service is still active: %s", ms->name);
5728 return "Mach service is still active";
5729 }
5730 }
5731
5732 return NULL;
5733 }
5734
5735 void
5736 machservice_watch(job_t j, struct machservice *ms)
5737 {
5738 if (ms->recv) {
5739 (void)job_assumes_zero(j, runtime_add_mport(ms->port, NULL));
5740 }
5741 }
5742
5743 void
5744 machservice_ignore(job_t j, struct machservice *ms)
5745 {
5746 /* We only add ports whose receive rights we control into the port set, so
5747 * don't attempt to remove te service from the port set if we didn't put it
5748 * there in the first place. Otherwise, we could wind up trying to access a
5749 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
5750 *
5751 * <rdar://problem/10898014>
5752 */
5753 if (ms->recv) {
5754 (void)job_assumes_zero(j, runtime_remove_mport(ms->port));
5755 }
5756 }
5757
5758 void
5759 machservice_resetport(job_t j, struct machservice *ms)
5760 {
5761 LIST_REMOVE(ms, port_hash_sle);
5762 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
5763 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
5764
5765 ms->gen_num++;
5766 (void)job_assumes_zero(j, launchd_mport_create_recv(&ms->port));
5767 (void)job_assumes_zero(j, launchd_mport_make_send(ms->port));
5768 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5769 }
5770
5771 void
5772 machservice_stamp_port(job_t j, struct machservice *ms)
5773 {
5774 mach_port_context_t ctx = 0;
5775 char *where2get = j->prog ? j->prog : j->argv[0];
5776
5777 char *prog = NULL;
5778 if ((prog = strrchr(where2get, '/'))) {
5779 prog++;
5780 } else {
5781 prog = where2get;
5782 }
5783
5784 (void)strncpy((char *)&ctx, prog, sizeof(ctx));
5785 #if __LITTLE_ENDIAN__
5786 #if __LP64__
5787 ctx = OSSwapBigToHostInt64(ctx);
5788 #else
5789 ctx = OSSwapBigToHostInt32(ctx);
5790 #endif
5791 #endif
5792
5793 (void)job_assumes_zero(j, mach_port_set_context(mach_task_self(), ms->port, ctx));
5794 }
5795
5796 struct machservice *
5797 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
5798 {
5799 /* Don't create new MachServices for dead ports. This is primarily for
5800 * clients who use bootstrap_register2(). They can pass in a send right, but
5801 * then that port can immediately go dead. Hilarity ensues.
5802 *
5803 * <rdar://problem/10898014>
5804 */
5805 if (*serviceport == MACH_PORT_DEAD) {
5806 return NULL;
5807 }
5808
5809 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
5810 if (!job_assumes(j, ms != NULL)) {
5811 return NULL;
5812 }
5813
5814 strcpy((char *)ms->name, name);
5815 ms->job = j;
5816 ms->gen_num = 1;
5817 ms->per_pid = pid_local;
5818
5819 if (likely(*serviceport == MACH_PORT_NULL)) {
5820 if (job_assumes_zero(j, launchd_mport_create_recv(&ms->port)) != KERN_SUCCESS) {
5821 goto out_bad;
5822 }
5823
5824 if (job_assumes_zero(j, launchd_mport_make_send(ms->port)) != KERN_SUCCESS) {
5825 goto out_bad2;
5826 }
5827 *serviceport = ms->port;
5828 ms->recv = true;
5829 } else {
5830 ms->port = *serviceport;
5831 ms->isActive = true;
5832 }
5833
5834 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5835
5836 jobmgr_t where2put = j->mgr;
5837 // XPC domains are separate from Mach bootstraps.
5838 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
5839 if (launchd_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
5840 where2put = root_jobmgr;
5841 }
5842 }
5843
5844 /* Don't allow MachServices added by multiple-instance jobs to be looked up
5845 * by others. We could just do this with a simple bit, but then we'd have to
5846 * uniquify the names ourselves to avoid collisions. This is just easier.
5847 */
5848 if (!j->dedicated_instance) {
5849 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5850 }
5851 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5852
5853 if (ms->recv) {
5854 machservice_stamp_port(j, ms);
5855 }
5856
5857 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
5858
5859 return ms;
5860 out_bad2:
5861 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
5862 out_bad:
5863 free(ms);
5864 return NULL;
5865 }
5866
5867 struct machservice *
5868 machservice_new_alias(job_t j, struct machservice *orig)
5869 {
5870 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
5871 if (job_assumes(j, ms != NULL)) {
5872 strcpy((char *)ms->name, orig->name);
5873 ms->alias = orig;
5874 ms->job = j;
5875
5876 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5877 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5878 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
5879 }
5880
5881 return ms;
5882 }
5883
5884 bootstrap_status_t
5885 machservice_status(struct machservice *ms)
5886 {
5887 ms = ms->alias ? ms->alias : ms;
5888 if (ms->isActive) {
5889 return BOOTSTRAP_STATUS_ACTIVE;
5890 } else if (ms->job->ondemand) {
5891 return BOOTSTRAP_STATUS_ON_DEMAND;
5892 } else {
5893 return BOOTSTRAP_STATUS_INACTIVE;
5894 }
5895 }
5896
5897 void
5898 job_setup_exception_port(job_t j, task_t target_task)
5899 {
5900 struct machservice *ms;
5901 thread_state_flavor_t f = 0;
5902 mach_port_t exc_port = the_exception_server;
5903
5904 if (unlikely(j->alt_exc_handler)) {
5905 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
5906 if (likely(ms)) {
5907 exc_port = machservice_port(ms);
5908 } else {
5909 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
5910 }
5911 } else if (unlikely(j->internal_exc_handler)) {
5912 exc_port = runtime_get_kernel_port();
5913 } else if (unlikely(!exc_port)) {
5914 return;
5915 }
5916
5917 #if defined (__ppc__) || defined(__ppc64__)
5918 f = PPC_THREAD_STATE64;
5919 #elif defined(__i386__) || defined(__x86_64__)
5920 f = x86_THREAD_STATE;
5921 #elif defined(__arm__)
5922 f = ARM_THREAD_STATE;
5923 #else
5924 #error "unknown architecture"
5925 #endif
5926
5927 if (likely(target_task)) {
5928 kern_return_t kr = task_set_exception_ports(target_task, EXC_MASK_CRASH | EXC_MASK_RESOURCE, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f);
5929 if (kr) {
5930 if (kr != MACH_SEND_INVALID_DEST) {
5931 (void)job_assumes_zero(j, kr);
5932 } else {
5933 job_log(j, LOG_WARNING, "Task died before exception port could be set.");
5934 }
5935 }
5936 } else if (pid1_magic && the_exception_server) {
5937 mach_port_t mhp = mach_host_self();
5938 (void)job_assumes_zero(j, host_set_exception_ports(mhp, EXC_MASK_CRASH | EXC_MASK_RESOURCE, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f));
5939 (void)job_assumes_zero(j, launchd_mport_deallocate(mhp));
5940 }
5941 }
5942
5943 void
5944 job_set_exception_port(job_t j, mach_port_t port)
5945 {
5946 if (unlikely(!the_exception_server)) {
5947 the_exception_server = port;
5948 job_setup_exception_port(j, 0);
5949 } else {
5950 job_log(j, LOG_WARNING, "The exception server is already claimed!");
5951 }
5952 }
5953
5954 void
5955 machservice_setup_options(launch_data_t obj, const char *key, void *context)
5956 {
5957 struct machservice *ms = context;
5958 mach_port_t mhp = mach_host_self();
5959 int which_port;
5960 bool b;
5961
5962 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
5963 return;
5964 }
5965
5966 switch (launch_data_get_type(obj)) {
5967 case LAUNCH_DATA_INTEGER:
5968 which_port = (int)launch_data_get_integer(obj); // XXX we should bound check this...
5969 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
5970 switch (which_port) {
5971 case TASK_KERNEL_PORT:
5972 case TASK_HOST_PORT:
5973 case TASK_NAME_PORT:
5974 case TASK_BOOTSTRAP_PORT:
5975 /* I find it a little odd that zero isn't reserved in the header.
5976 * Normally Mach is fairly good about this convention...
5977 */
5978 case 0:
5979 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
5980 break;
5981 default:
5982 ms->special_port_num = which_port;
5983 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
5984 break;
5985 }
5986 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
5987 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
5988 (void)job_assumes_zero(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)));
5989 } else {
5990 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
5991 }
5992 }
5993 case LAUNCH_DATA_BOOL:
5994 b = launch_data_get_bool(obj);
5995 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
5996 ms->debug_on_close = b;
5997 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
5998 ms->reset = b;
5999 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6000 ms->hide = b;
6001 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6002 job_set_exception_port(ms->job, ms->port);
6003 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6004 ms->kUNCServer = b;
6005 (void)job_assumes_zero(ms->job, host_set_UNDServer(mhp, ms->port));
6006 }
6007 break;
6008 case LAUNCH_DATA_STRING:
6009 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6010 const char *option = launch_data_get_string(obj);
6011 if (strcasecmp(option, "One") == 0) {
6012 ms->drain_one_on_crash = true;
6013 } else if (strcasecmp(option, "All") == 0) {
6014 ms->drain_all_on_crash = true;
6015 }
6016 }
6017 break;
6018 case LAUNCH_DATA_DICTIONARY:
6019 job_set_exception_port(ms->job, ms->port);
6020 break;
6021 default:
6022 break;
6023 }
6024
6025 (void)job_assumes_zero(ms->job, launchd_mport_deallocate(mhp));
6026 }
6027
6028 void
6029 machservice_setup(launch_data_t obj, const char *key, void *context)
6030 {
6031 job_t j = context;
6032 struct machservice *ms;
6033 mach_port_t p = MACH_PORT_NULL;
6034
6035 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6036 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6037 return;
6038 }
6039
6040 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6041 return;
6042 }
6043
6044 ms->isActive = false;
6045 ms->upfront = true;
6046
6047 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6048 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6049 }
6050 }
6051
6052 jobmgr_t
6053 jobmgr_do_garbage_collection(jobmgr_t jm)
6054 {
6055 jobmgr_t jmi = NULL, jmn = NULL;
6056 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6057 jobmgr_do_garbage_collection(jmi);
6058 }
6059
6060 if (!jm->shutting_down) {
6061 return jm;
6062 }
6063
6064 if (SLIST_EMPTY(&jm->submgrs)) {
6065 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6066 } else {
6067 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6068 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6069 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6070 }
6071 }
6072
6073 size_t actives = 0;
6074 job_t ji = NULL, jn = NULL;
6075 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6076 if (ji->anonymous) {
6077 continue;
6078 }
6079
6080 // Let the shutdown monitor be up until the very end.
6081 if (ji->shutdown_monitor) {
6082 continue;
6083 }
6084
6085 /* On our first pass through, open a transaction for all the jobs that
6086 * need to be dirty at shutdown. We'll close these transactions once the
6087 * jobs that do not need to be dirty at shutdown have all exited.
6088 */
6089 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6090 job_open_shutdown_transaction(ji);
6091 }
6092
6093 const char *active = job_active(ji);
6094 if (!active) {
6095 job_remove(ji);
6096 } else {
6097 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6098 job_stop(ji);
6099
6100 if (!ji->dirty_at_shutdown) {
6101 actives++;
6102 }
6103
6104 if (ji->clean_kill) {
6105 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6106 } else {
6107 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6108 }
6109 }
6110 }
6111
6112 jm->shutdown_jobs_dirtied = true;
6113 if (actives == 0) {
6114 if (!jm->shutdown_jobs_cleaned) {
6115 /* Once all normal jobs have exited, we clean the dirty-at-shutdown
6116 * jobs and make them into normal jobs so that the above loop will
6117 * handle them appropriately.
6118 */
6119 LIST_FOREACH(ji, &jm->jobs, sle) {
6120 if (ji->anonymous) {
6121 continue;
6122 }
6123
6124 if (!job_active(ji)) {
6125 continue;
6126 }
6127
6128 if (ji->shutdown_monitor) {
6129 continue;
6130 }
6131
6132 job_close_shutdown_transaction(ji);
6133 actives++;
6134 }
6135
6136 jm->shutdown_jobs_cleaned = true;
6137 }
6138
6139 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6140 /* We may be in a situation where the shutdown monitor is all that's
6141 * left, in which case we want to stop it. Like dirty-at-shutdown
6142 * jobs, we turn it back into a normal job so that the main loop
6143 * treats it appropriately.
6144 *
6145 * See:
6146 * <rdar://problem/10756306>
6147 * <rdar://problem/11034971>
6148 * <rdar://problem/11549541>
6149 */
6150 if (jm->monitor_shutdown && _launchd_shutdown_monitor) {
6151 /* The rest of shutdown has completed, so we can kill the shutdown
6152 * monitor now like it was any other job.
6153 */
6154 _launchd_shutdown_monitor->shutdown_monitor = false;
6155
6156 job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6157 job_stop(_launchd_shutdown_monitor);
6158 _launchd_shutdown_monitor = NULL;
6159 } else {
6160 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6161 jobmgr_remove(jm);
6162 return NULL;
6163 }
6164 }
6165 }
6166
6167 return jm;
6168 }
6169
6170 void
6171 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6172 {
6173 /* I maintain that stray processes should be at the mercy of launchd during
6174 * shutdown, but nevertheless, things like diskimages-helper can stick
6175 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
6176 * to all the strays and don't wait for them to exit before moving on.
6177 *
6178 * See rdar://problem/6562592
6179 */
6180 size_t i = 0;
6181 for (i = 0; i < np; i++) {
6182 if (p[i] != 0) {
6183 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6184 (void)jobmgr_assumes_zero_p(jm, kill2(p[i], SIGTERM));
6185 }
6186 }
6187 }
6188
6189 void
6190 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6191 {
6192 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6193 pid_t *pids = NULL;
6194 int i = 0, kp_cnt = 0;
6195
6196 if (likely(jm->parentmgr || !pid1_magic)) {
6197 return;
6198 }
6199
6200 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6201 return;
6202 }
6203
6204 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6205
6206 if (jobmgr_assumes_zero_p(jm, (kp_cnt = proc_listallpids(pids, len))) == -1) {
6207 goto out;
6208 }
6209
6210 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6211 for (i = 0; i < kp_cnt; i++) {
6212 struct proc_bsdshortinfo proc;
6213 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6214 if (errno != ESRCH) {
6215 (void)jobmgr_assumes_zero(jm, errno);
6216 }
6217
6218 kp_skipped++;
6219 continue;
6220 }
6221
6222 pid_t p_i = pids[i];
6223 pid_t pp_i = proc.pbsi_ppid;
6224 pid_t pg_i = proc.pbsi_pgid;
6225 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6226 const char *n = proc.pbsi_comm;
6227
6228 if (unlikely(p_i == 0 || p_i == 1)) {
6229 kp_skipped++;
6230 continue;
6231 }
6232
6233 if (_launchd_shutdown_monitor && pp_i == _launchd_shutdown_monitor->p) {
6234 kp_skipped++;
6235 continue;
6236 }
6237
6238 // We might have some jobs hanging around that we've decided to shut down in spite of.
6239 job_t j = jobmgr_find_by_pid(jm, p_i, false);
6240 if (!j || (j && j->anonymous)) {
6241 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6242
6243 int status = 0;
6244 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6245 if (jobmgr_assumes_zero(jm, waitpid(p_i, &status, WNOHANG)) == 0) {
6246 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6247 }
6248 kp_skipped++;
6249 } else {
6250 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6251 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6252 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6253 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6254 * their userspace emissaries go away, before the call to reboot(2).
6255 */
6256 if (leader && leader->ignore_pg_at_shutdown) {
6257 kp_skipped++;
6258 } else {
6259 ps[i] = p_i;
6260 }
6261 }
6262 } else {
6263 kp_skipped++;
6264 }
6265 }
6266
6267 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6268 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6269 }
6270
6271 free(ps);
6272 out:
6273 free(pids);
6274 }
6275
6276 jobmgr_t
6277 jobmgr_parent(jobmgr_t jm)
6278 {
6279 return jm->parentmgr;
6280 }
6281
6282 void
6283 job_uncork_fork(job_t j)
6284 {
6285 pid_t c = j->p;
6286
6287 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6288 /* this unblocks the child and avoids a race
6289 * between the above fork() and the kevent_mod() */
6290 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6291 (void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
6292 j->fork_fd = 0;
6293 }
6294
6295 jobmgr_t
6296 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6297 {
6298 job_t bootstrapper = NULL;
6299 jobmgr_t jmr;
6300
6301 __OSX_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6302
6303 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6304 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6305 return NULL;
6306 }
6307
6308 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6309
6310 if (!jobmgr_assumes(jm, jmr != NULL)) {
6311 return NULL;
6312 }
6313
6314 if (jm == NULL) {
6315 root_jobmgr = jmr;
6316 }
6317
6318 jmr->kqjobmgr_callback = jobmgr_callback;
6319 strcpy(jmr->name_init, name ? name : "Under construction");
6320
6321 jmr->req_port = requestorport;
6322
6323 if ((jmr->parentmgr = jm)) {
6324 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6325 }
6326
6327 if (jm && jobmgr_assumes_zero(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME)) != KERN_SUCCESS) {
6328 goto out_bad;
6329 }
6330
6331 if (transfer_port != MACH_PORT_NULL) {
6332 (void)jobmgr_assumes(jmr, jm != NULL);
6333 jmr->jm_port = transfer_port;
6334 } else if (!jm && !pid1_magic) {
6335 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6336 name_t service_buf;
6337
6338 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6339
6340 if (jobmgr_assumes_zero(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port)) != 0) {
6341 goto out_bad;
6342 }
6343
6344 if (trusted_fd) {
6345 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6346
6347 if ((dfd = dup(lfd)) >= 0) {
6348 (void)jobmgr_assumes_zero_p(jmr, runtime_close(dfd));
6349 (void)jobmgr_assumes_zero_p(jmr, runtime_close(lfd));
6350 }
6351
6352 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6353 }
6354
6355 // cut off the Libc cache, we don't want to deadlock against ourself
6356 inherited_bootstrap_port = bootstrap_port;
6357 bootstrap_port = MACH_PORT_NULL;
6358 osx_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME));
6359
6360 // We set this explicitly as we start each child
6361 osx_assert_zero(launchd_set_bport(MACH_PORT_NULL));
6362 } else if (jobmgr_assumes_zero(jmr, launchd_mport_create_recv(&jmr->jm_port)) != KERN_SUCCESS) {
6363 goto out_bad;
6364 }
6365
6366 if (!name) {
6367 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6368 }
6369
6370 if (!jm) {
6371 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6372 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6373 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6374 (void)jobmgr_assumes_zero_p(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr));
6375 }
6376
6377 if (name && !skip_init) {
6378 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6379 }
6380
6381 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6382 if (jobmgr_assumes_zero(jmr, runtime_add_mport(jmr->jm_port, job_server)) != KERN_SUCCESS) {
6383 goto out_bad;
6384 }
6385 }
6386
6387 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6388
6389 if (bootstrapper) {
6390 bootstrapper->asport = asport;
6391
6392 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6393 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6394 } else {
6395 jmr->req_asport = asport;
6396 }
6397
6398 if (asport != MACH_PORT_NULL) {
6399 (void)jobmgr_assumes_zero(jmr, launchd_mport_copy_send(asport));
6400 }
6401
6402 if (jmr->parentmgr) {
6403 runtime_add_weak_ref();
6404 }
6405
6406 return jmr;
6407
6408 out_bad:
6409 if (jmr) {
6410 jobmgr_remove(jmr);
6411 if (jm == NULL) {
6412 root_jobmgr = NULL;
6413 }
6414 }
6415 return NULL;
6416 }
6417
6418 jobmgr_t
6419 jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6420 {
6421 jobmgr_t new = NULL;
6422
6423 /* These job managers are basically singletons, so we use the root Mach
6424 * bootstrap port as their requestor ports so they'll never go away.
6425 */
6426 mach_port_t req_port = root_jobmgr->jm_port;
6427 if (jobmgr_assumes_zero(jm, launchd_mport_make_send(req_port)) == KERN_SUCCESS) {
6428 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6429 if (new) {
6430 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6431 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6432 new->xpc_singleton = true;
6433 }
6434 }
6435
6436 return new;
6437 }
6438
6439 jobmgr_t
6440 jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6441 {
6442 jobmgr_t jmi = NULL;
6443 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6444 if (jmi->req_euid == uid) {
6445 return jmi;
6446 }
6447 }
6448
6449 name_t name;
6450 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6451 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6452 if (jobmgr_assumes(jm, jmi != NULL)) {
6453 /* We need to create a per-user launchd for this UID if there isn't one
6454 * already so we can grab the bootstrap port.
6455 */
6456 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6457 if (jobmgr_assumes(jmi, puj != NULL)) {
6458 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(puj->asport));
6459 (void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(jmi->req_bsport));
6460 jmi->shortdesc = "per-user";
6461 jmi->req_asport = puj->asport;
6462 jmi->req_asid = puj->asid;
6463 jmi->req_euid = uid;
6464 jmi->req_egid = -1;
6465
6466 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
6467 } else {
6468 jobmgr_remove(jmi);
6469 }
6470 }
6471
6472 return jmi;
6473 }
6474
6475 jobmgr_t
6476 jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
6477 {
6478 jobmgr_t jmi = NULL;
6479 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
6480 if (jmi->req_asid == asid) {
6481 return jmi;
6482 }
6483 }
6484
6485 name_t name;
6486 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
6487 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6488 if (jobmgr_assumes(jm, jmi != NULL)) {
6489 (void)jobmgr_assumes_zero(jmi, launchd_mport_make_send(root_jobmgr->jm_port));
6490 jmi->shortdesc = "per-session";
6491 jmi->req_bsport = root_jobmgr->jm_port;
6492 (void)jobmgr_assumes_zero(jmi, audit_session_port(asid, &jmi->req_asport));
6493 jmi->req_asid = asid;
6494 jmi->req_euid = -1;
6495 jmi->req_egid = -1;
6496
6497 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
6498 } else {
6499 jobmgr_remove(jmi);
6500 }
6501
6502 return jmi;
6503 }
6504
6505 job_t
6506 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
6507 {
6508 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
6509 char thelabel[1000];
6510 job_t bootstrapper;
6511
6512 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
6513 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
6514
6515 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
6516 bootstrapper->is_bootstrapper = true;
6517 char buf[100];
6518
6519 // <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
6520 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
6521 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
6522 bootstrapper->weird_bootstrap = true;
6523 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
6524 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
6525 #if TARGET_OS_EMBEDDED
6526 bootstrapper->pstype = POSIX_SPAWN_IOS_INTERACTIVE;
6527 #endif
6528 bootstrapper->is_bootstrapper = true;
6529 if (jobmgr_assumes(jm, pid1_magic)) {
6530 // Have our system bootstrapper print out to the console.
6531 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
6532 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
6533
6534 if (launchd_console) {
6535 (void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)fileno(launchd_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm));
6536 }
6537 }
6538 }
6539
6540 jm->session_initialized = true;
6541 return bootstrapper;
6542 }
6543
6544 jobmgr_t
6545 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
6546 {
6547 struct machservice *ms, *next_ms;
6548 jobmgr_t jmi, jmn;
6549
6550 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6551 * words, when some program hands us a second or subsequent send right to a
6552 * port we already have open, the Mach kernel gives us the same port number
6553 * back and increments an reference count associated with the port. This
6554 * This forces us, when discovering that a receive right at the other end
6555 * has been deleted, to wander all of our objects to see what weird places
6556 * clients might have handed us the same send right to use.
6557 */
6558
6559 if (jm == root_jobmgr) {
6560 if (port == inherited_bootstrap_port) {
6561 (void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(port));
6562 inherited_bootstrap_port = MACH_PORT_NULL;
6563
6564 return jobmgr_shutdown(jm);
6565 }
6566
6567 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
6568 if (ms->port == port && !ms->recv) {
6569 machservice_delete(ms->job, ms, true);
6570 }
6571 }
6572 }
6573
6574 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6575 jobmgr_delete_anything_with_port(jmi, port);
6576 }
6577
6578 if (jm->req_port == port) {
6579 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
6580 return jobmgr_shutdown(jm);
6581 }
6582
6583 return jm;
6584 }
6585
6586 struct machservice *
6587 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
6588 {
6589 struct machservice *ms;
6590 job_t target_j;
6591
6592 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
6593
6594 if (target_pid) {
6595 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6596 * bootstrap in other bootstraps.
6597 */
6598
6599 // Start in the given bootstrap.
6600 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
6601 // If we fail, do a deep traversal.
6602 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6603 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6604 return NULL;
6605 }
6606 }
6607
6608 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6609 if (ms->per_pid && strcmp(name, ms->name) == 0) {
6610 return ms;
6611 }
6612 }
6613
6614 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
6615 return NULL;
6616 }
6617
6618 jobmgr_t where2look = jm;
6619 // XPC domains are separate from Mach bootstraps.
6620 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6621 if (launchd_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6622 where2look = root_jobmgr;
6623 }
6624 }
6625
6626 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
6627 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6628 return ms;
6629 }
6630 }
6631
6632 if (jm->parentmgr == NULL || !check_parent) {
6633 return NULL;
6634 }
6635
6636 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
6637 }
6638
6639 mach_port_t
6640 machservice_port(struct machservice *ms)
6641 {
6642 return ms->port;
6643 }
6644
6645 job_t
6646 machservice_job(struct machservice *ms)
6647 {
6648 return ms->job;
6649 }
6650
6651 bool
6652 machservice_hidden(struct machservice *ms)
6653 {
6654 return ms->hide;
6655 }
6656
6657 bool
6658 machservice_active(struct machservice *ms)
6659 {
6660 return ms->isActive;
6661 }
6662
6663 const char *
6664 machservice_name(struct machservice *ms)
6665 {
6666 return ms->name;
6667 }
6668
6669 void
6670 machservice_drain_port(struct machservice *ms)
6671 {
6672 bool drain_one = ms->drain_one_on_crash;
6673 bool drain_all = ms->drain_all_on_crash;
6674
6675 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
6676 return;
6677 }
6678
6679 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6680
6681 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6682 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6683 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6684 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6685
6686 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6687
6688 do {
6689 /* This should be a direct check on the Mach service to see if it's an exception-handling
6690 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6691 * Mach services. But for now, it should be okay.
6692 */
6693 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
6694 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6695 } else {
6696 mach_msg_options_t options = MACH_RCV_MSG |
6697 MACH_RCV_TIMEOUT ;
6698
6699 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
6700 switch (mr) {
6701 case MACH_MSG_SUCCESS:
6702 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6703 break;
6704 case MACH_RCV_TIMED_OUT:
6705 break;
6706 case MACH_RCV_TOO_LARGE:
6707 launchd_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6708 break;
6709 default:
6710 break;
6711 }
6712 }
6713 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
6714 }
6715
6716 void
6717 machservice_delete(job_t j, struct machservice *ms, bool port_died)
6718 {
6719 if (ms->alias) {
6720 /* HACK: Egregious code duplication. But dealing with aliases is a
6721 * pretty simple affair since they can't and shouldn't have any complex
6722 * behaviors associated with them.
6723 */
6724 LIST_REMOVE(ms, name_hash_sle);
6725 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6726 free(ms);
6727 return;
6728 }
6729
6730 if (unlikely(ms->debug_on_close)) {
6731 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
6732 (void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
6733 }
6734
6735 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6736 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
6737 (void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6738 }
6739
6740 (void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
6741
6742 if (unlikely(ms->port == the_exception_server)) {
6743 the_exception_server = 0;
6744 }
6745
6746 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
6747
6748 if (ms->special_port_num) {
6749 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6750 }
6751 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6752
6753 if (!(j->dedicated_instance || ms->event_channel)) {
6754 LIST_REMOVE(ms, name_hash_sle);
6755 }
6756 LIST_REMOVE(ms, port_hash_sle);
6757
6758 free(ms);
6759 }
6760
6761 void
6762 machservice_request_notifications(struct machservice *ms)
6763 {
6764 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6765
6766 ms->isActive = true;
6767
6768 if (ms->recv) {
6769 which = MACH_NOTIFY_PORT_DESTROYED;
6770 job_checkin(ms->job);
6771 }
6772
6773 (void)job_assumes_zero(ms->job, launchd_mport_notify_req(ms->port, which));
6774 }
6775
6776 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6777 #define END_OF(x) (&(x)[NELEM(x)])
6778
6779 char **
6780 mach_cmd2argv(const char *string)
6781 {
6782 char *argv[100], args[1000];
6783 const char *cp;
6784 char *argp = args, term, **argv_ret, *co;
6785 unsigned int nargs = 0, i;
6786
6787 for (cp = string; *cp;) {
6788 while (isspace(*cp))
6789 cp++;
6790 term = (*cp == '"') ? *cp++ : '\0';
6791 if (nargs < NELEM(argv)) {
6792 argv[nargs++] = argp;
6793 }
6794 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
6795 if (*cp == '\\') {
6796 cp++;
6797 }
6798 *argp++ = *cp;
6799 if (*cp) {
6800 cp++;
6801 }
6802 }
6803 *argp++ = '\0';
6804 }
6805 argv[nargs] = NULL;
6806
6807 if (nargs == 0) {
6808 return NULL;
6809 }
6810
6811 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6812
6813 if (!argv_ret) {
6814 (void)osx_assumes_zero(errno);
6815 return NULL;
6816 }
6817
6818 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6819
6820 for (i = 0; i < nargs; i++) {
6821 strcpy(co, argv[i]);
6822 argv_ret[i] = co;
6823 co += strlen(argv[i]) + 1;
6824 }
6825 argv_ret[i] = NULL;
6826
6827 return argv_ret;
6828 }
6829
6830 void
6831 job_checkin(job_t j)
6832 {
6833 j->checkedin = true;
6834 }
6835
6836 bool job_is_god(job_t j)
6837 {
6838 return j->embedded_god;
6839 }
6840
6841 bool
6842 job_ack_port_destruction(mach_port_t p)
6843 {
6844 struct machservice *ms;
6845 job_t j;
6846
6847 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
6848 if (ms->recv && (ms->port == p)) {
6849 break;
6850 }
6851 }
6852
6853 if (!ms) {
6854 launchd_syslog(LOG_WARNING, "Could not find MachService to match receive right: 0x%x", p);
6855 return false;
6856 }
6857
6858 j = ms->job;
6859
6860 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
6861
6862 /* Without being the exception handler, NOTE_EXIT is our only way to tell if
6863 * the job crashed, and we can't rely on NOTE_EXIT always being processed
6864 * after all the job's receive rights have been returned.
6865 *
6866 * So when we get receive rights back, check to see if the job has been
6867 * reaped yet. If not, then we add this service to a list of services to be
6868 * drained on crash if it's requested that behavior. So, for a job with N
6869 * receive rights all requesting that they be drained on crash, we can
6870 * safely handle the following sequence of events.
6871 *
6872 * ReceiveRight0Returned
6873 * ReceiveRight1Returned
6874 * ReceiveRight2Returned
6875 * NOTE_EXIT (reap, get exit status)
6876 * ReceiveRight3Returned
6877 * .
6878 * .
6879 * .
6880 * ReceiveRight(N - 1)Returned
6881 */
6882 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
6883 if (j->crashed && j->reaped) {
6884 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
6885 machservice_drain_port(ms);
6886 } else if (!(j->crashed || j->reaped)) {
6887 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
6888 }
6889 }
6890
6891 ms->isActive = false;
6892 if (ms->delete_on_destruction) {
6893 machservice_delete(j, ms, false);
6894 } else if (ms->reset) {
6895 machservice_resetport(j, ms);
6896 }
6897
6898 machservice_stamp_port(j, ms);
6899 job_dispatch(j, false);
6900
6901 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
6902
6903 return true;
6904 }
6905
6906 void
6907 job_ack_no_senders(job_t j)
6908 {
6909 j->priv_port_has_senders = false;
6910
6911 (void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
6912 j->j_port = 0;
6913
6914 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
6915
6916 job_dispatch(j, false);
6917 }
6918
6919 bool
6920 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
6921 {
6922 struct semaphoreitem *si;
6923 size_t alloc_sz = sizeof(struct semaphoreitem);
6924
6925 if (what) {
6926 alloc_sz += strlen(what) + 1;
6927 }
6928
6929 if (job_assumes(j, si = calloc(1, alloc_sz)) == NULL) {
6930 return false;
6931 }
6932
6933 si->why = why;
6934
6935 if (what) {
6936 strcpy(si->what_init, what);
6937 }
6938
6939 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
6940
6941 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
6942 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
6943 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
6944 j->nosy = true;
6945 }
6946
6947 semaphoreitem_runtime_mod_ref(si, true);
6948
6949 return true;
6950 }
6951
6952 void
6953 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
6954 {
6955 /*
6956 * External events need to be tracked.
6957 * Internal events do NOT need to be tracked.
6958 */
6959
6960 switch (si->why) {
6961 case SUCCESSFUL_EXIT:
6962 case FAILED_EXIT:
6963 case OTHER_JOB_ENABLED:
6964 case OTHER_JOB_DISABLED:
6965 case OTHER_JOB_ACTIVE:
6966 case OTHER_JOB_INACTIVE:
6967 return;
6968 default:
6969 break;
6970 }
6971
6972 if (add) {
6973 runtime_add_weak_ref();
6974 } else {
6975 runtime_del_weak_ref();
6976 }
6977 }
6978
6979 void
6980 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
6981 {
6982 semaphoreitem_runtime_mod_ref(si, false);
6983
6984 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
6985
6986 // We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
6987 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
6988 j->nosy = false;
6989 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
6990 }
6991
6992 free(si);
6993 }
6994
6995 void
6996 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
6997 {
6998 struct semaphoreitem_dict_iter_context *sdic = context;
6999 semaphore_reason_t why;
7000
7001 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7002
7003 semaphoreitem_new(sdic->j, why, key);
7004 }
7005
7006 void
7007 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7008 {
7009 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7010 job_t j = context;
7011 semaphore_reason_t why;
7012
7013 switch (launch_data_get_type(obj)) {
7014 case LAUNCH_DATA_BOOL:
7015 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7016 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7017 semaphoreitem_new(j, why, NULL);
7018 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7019 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7020 semaphoreitem_new(j, why, NULL);
7021 j->start_pending = true;
7022 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7023 j->needs_kickoff = launch_data_get_bool(obj);
7024 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7025 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7026 semaphoreitem_new(j, why, NULL);
7027 j->start_pending = true;
7028 } else {
7029 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7030 }
7031 break;
7032 case LAUNCH_DATA_DICTIONARY:
7033 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7034 sdic.why_true = OTHER_JOB_ACTIVE;
7035 sdic.why_false = OTHER_JOB_INACTIVE;
7036 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7037 sdic.why_true = OTHER_JOB_ENABLED;
7038 sdic.why_false = OTHER_JOB_DISABLED;
7039 } else {
7040 job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7041 break;
7042 }
7043
7044 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7045 break;
7046 default:
7047 job_log(j, LOG_ERR, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj));
7048 break;
7049 }
7050 }
7051
7052 bool
7053 externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event)
7054 {
7055 if (j->event_monitor) {
7056 job_log(j, LOG_ERR, "The event monitor job cannot use LaunchEvents or XPC Events.");
7057 return false;
7058 }
7059
7060 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7061 if (!ee) {
7062 return false;
7063 }
7064
7065 ee->event = xpc_retain(event);
7066 (void)strcpy(ee->name, evname);
7067 ee->job = j;
7068 ee->id = sys->curid;
7069 ee->sys = sys;
7070 ee->state = false;
7071 ee->wanted_state = true;
7072 sys->curid++;
7073
7074 if (sys == _launchd_support_system) {
7075 ee->internal = true;
7076 }
7077
7078 LIST_INSERT_HEAD(&j->events, ee, job_le);
7079 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7080
7081 job_log(j, LOG_DEBUG, "New event: %s/%s", sys->name, evname);
7082
7083 eventsystem_ping();
7084 return true;
7085 }
7086
7087 void
7088 externalevent_delete(struct externalevent *ee)
7089 {
7090 xpc_release(ee->event);
7091 LIST_REMOVE(ee, job_le);
7092 LIST_REMOVE(ee, sys_le);
7093
7094 free(ee);
7095
7096 eventsystem_ping();
7097 }
7098
7099 void
7100 externalevent_setup(launch_data_t obj, const char *key, void *context)
7101 {
7102 /* This method can ONLY be called on the job_import() path, as it assumes
7103 * the input is a launch_data_t.
7104 */
7105 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7106
7107 xpc_object_t xobj = ld2xpc(obj);
7108 if (xobj) {
7109 job_log(ctx->j, LOG_DEBUG, "Importing stream/event: %s/%s", ctx->sys->name, key);
7110 externalevent_new(ctx->j, ctx->sys, key, xobj);
7111 xpc_release(xobj);
7112 } else {
7113 job_log(ctx->j, LOG_ERR, "Could not import event for job: %s", key);
7114 }
7115 }
7116
7117 struct externalevent *
7118 externalevent_find(const char *sysname, uint64_t id)
7119 {
7120 struct externalevent *ei = NULL;
7121
7122 struct eventsystem *es = eventsystem_find(sysname);
7123 if (es != NULL) {
7124 LIST_FOREACH(ei, &es->events, sys_le) {
7125 if (ei->id == id) {
7126 break;
7127 }
7128 }
7129 } else {
7130 launchd_syslog(LOG_ERR, "Could not find event system: %s", sysname);
7131 }
7132
7133 return ei;
7134 }
7135
7136 struct eventsystem *
7137 eventsystem_new(const char *name)
7138 {
7139 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7140 if (es != NULL) {
7141 es->curid = 1;
7142 (void)strcpy(es->name, name);
7143 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7144 } else {
7145 (void)osx_assumes_zero(errno);
7146 }
7147
7148 return es;
7149 }
7150
7151 void
7152 eventsystem_delete(struct eventsystem *es)
7153 {
7154 struct externalevent *ei = NULL;
7155 while ((ei = LIST_FIRST(&es->events))) {
7156 externalevent_delete(ei);
7157 }
7158
7159 LIST_REMOVE(es, global_le);
7160
7161 free(es);
7162 }
7163
7164 void
7165 eventsystem_setup(launch_data_t obj, const char *key, void *context)
7166 {
7167 job_t j = (job_t)context;
7168 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7169 return;
7170 }
7171
7172 struct eventsystem *sys = eventsystem_find(key);
7173 if (unlikely(sys == NULL)) {
7174 sys = eventsystem_new(key);
7175 job_log(j, LOG_DEBUG, "New event system: %s", key);
7176 }
7177
7178 if (job_assumes(j, sys != NULL)) {
7179 struct externalevent_iter_ctx ctx = {
7180 .j = j,
7181 .sys = sys,
7182 };
7183
7184 job_log(j, LOG_DEBUG, "Importing events for stream: %s", key);
7185 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7186 }
7187 }
7188
7189 struct eventsystem *
7190 eventsystem_find(const char *name)
7191 {
7192 struct eventsystem *esi = NULL;
7193 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7194 if (strcmp(name, esi->name) == 0) {
7195 break;
7196 }
7197 }
7198
7199 return esi;
7200 }
7201
7202 void
7203 eventsystem_ping(void)
7204 {
7205 if (!_launchd_event_monitor) {
7206 return;
7207 }
7208
7209 if (!_launchd_event_monitor->p) {
7210 (void)job_dispatch(_launchd_event_monitor, true);
7211 } else {
7212 if (_launchd_event_monitor->event_monitor_ready2signal) {
7213 (void)job_assumes_zero_p(_launchd_event_monitor, kill(_launchd_event_monitor->p, SIGUSR1));
7214 }
7215 }
7216 }
7217
7218 void
7219 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7220 {
7221 jobmgr_t jmi, jmn;
7222 job_t ji, jn;
7223
7224
7225 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7226 jobmgr_dispatch_all_semaphores(jmi);
7227 }
7228
7229 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7230 if (!SLIST_EMPTY(&ji->semaphores)) {
7231 job_dispatch(ji, false);
7232 }
7233 }
7234 }
7235
7236 time_t
7237 cronemu(int mon, int mday, int hour, int min)
7238 {
7239 struct tm workingtm;
7240 time_t now;
7241
7242 now = time(NULL);
7243 workingtm = *localtime(&now);
7244
7245 workingtm.tm_isdst = -1;
7246 workingtm.tm_sec = 0;
7247 workingtm.tm_min++;
7248
7249 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7250 workingtm.tm_year++;
7251 workingtm.tm_mon = 0;
7252 workingtm.tm_mday = 1;
7253 workingtm.tm_hour = 0;
7254 workingtm.tm_min = 0;
7255 mktime(&workingtm);
7256 }
7257
7258 return mktime(&workingtm);
7259 }
7260
7261 time_t
7262 cronemu_wday(int wday, int hour, int min)
7263 {
7264 struct tm workingtm;
7265 time_t now;
7266
7267 now = time(NULL);
7268 workingtm = *localtime(&now);
7269
7270 workingtm.tm_isdst = -1;
7271 workingtm.tm_sec = 0;
7272 workingtm.tm_min++;
7273
7274 if (wday == 7) {
7275 wday = 0;
7276 }
7277
7278 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7279 workingtm.tm_mday++;
7280 workingtm.tm_hour = 0;
7281 workingtm.tm_min = 0;
7282 mktime(&workingtm);
7283 }
7284
7285 return mktime(&workingtm);
7286 }
7287
7288 bool
7289 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7290 {
7291 if (mon == -1) {
7292 struct tm workingtm = *wtm;
7293 int carrytest;
7294
7295 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7296 workingtm.tm_mon++;
7297 workingtm.tm_mday = 1;
7298 workingtm.tm_hour = 0;
7299 workingtm.tm_min = 0;
7300 carrytest = workingtm.tm_mon;
7301 mktime(&workingtm);
7302 if (carrytest != workingtm.tm_mon) {
7303 return false;
7304 }
7305 }
7306 *wtm = workingtm;
7307 return true;
7308 }
7309
7310 if (mon < wtm->tm_mon) {
7311 return false;
7312 }
7313
7314 if (mon > wtm->tm_mon) {
7315 wtm->tm_mon = mon;
7316 wtm->tm_mday = 1;
7317 wtm->tm_hour = 0;
7318 wtm->tm_min = 0;
7319 }
7320
7321 return cronemu_mday(wtm, mday, hour, min);
7322 }
7323
7324 bool
7325 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7326 {
7327 if (mday == -1) {
7328 struct tm workingtm = *wtm;
7329 int carrytest;
7330
7331 while (!cronemu_hour(&workingtm, hour, min)) {
7332 workingtm.tm_mday++;
7333 workingtm.tm_hour = 0;
7334 workingtm.tm_min = 0;
7335 carrytest = workingtm.tm_mday;
7336 mktime(&workingtm);
7337 if (carrytest != workingtm.tm_mday) {
7338 return false;
7339 }
7340 }
7341 *wtm = workingtm;
7342 return true;
7343 }
7344
7345 if (mday < wtm->tm_mday) {
7346 return false;
7347 }
7348
7349 if (mday > wtm->tm_mday) {
7350 wtm->tm_mday = mday;
7351 wtm->tm_hour = 0;
7352 wtm->tm_min = 0;
7353 }
7354
7355 return cronemu_hour(wtm, hour, min);
7356 }
7357
7358 bool
7359 cronemu_hour(struct tm *wtm, int hour, int min)
7360 {
7361 if (hour == -1) {
7362 struct tm workingtm = *wtm;
7363 int carrytest;
7364
7365 while (!cronemu_min(&workingtm, min)) {
7366 workingtm.tm_hour++;
7367 workingtm.tm_min = 0;
7368 carrytest = workingtm.tm_hour;
7369 mktime(&workingtm);
7370 if (carrytest != workingtm.tm_hour) {
7371 return false;
7372 }
7373 }
7374 *wtm = workingtm;
7375 return true;
7376 }
7377
7378 if (hour < wtm->tm_hour) {
7379 return false;
7380 }
7381
7382 if (hour > wtm->tm_hour) {
7383 wtm->tm_hour = hour;
7384 wtm->tm_min = 0;
7385 }
7386
7387 return cronemu_min(wtm, min);
7388 }
7389
7390 bool
7391 cronemu_min(struct tm *wtm, int min)
7392 {
7393 if (min == -1) {
7394 return true;
7395 }
7396
7397 if (min < wtm->tm_min) {
7398 return false;
7399 }
7400
7401 if (min > wtm->tm_min) {
7402 wtm->tm_min = min;
7403 }
7404
7405 return true;
7406 }
7407
7408 kern_return_t
7409 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7410 {
7411 struct ldcred *ldc = runtime_get_caller_creds();
7412 job_t js;
7413
7414 if (!j) {
7415 return BOOTSTRAP_NO_MEMORY;
7416 }
7417
7418 if (unlikely(j->deny_job_creation)) {
7419 return BOOTSTRAP_NOT_PRIVILEGED;
7420 }
7421
7422 #if HAVE_SANDBOX
7423 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7424 if (unlikely(argv == NULL)) {
7425 return BOOTSTRAP_NO_MEMORY;
7426 }
7427 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7428 free(argv);
7429 return BOOTSTRAP_NOT_PRIVILEGED;
7430 }
7431 free(argv);
7432 #endif
7433
7434 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7435
7436 if (pid1_magic) {
7437 if (ldc->euid || ldc->uid) {
7438 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
7439 return VPROC_ERR_TRY_PER_USER;
7440 }
7441 } else {
7442 if (unlikely(server_uid != getuid())) {
7443 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7444 server_cmd, getuid(), server_uid);
7445 }
7446 server_uid = 0; // zero means "do nothing"
7447 }
7448
7449 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
7450
7451 if (unlikely(js == NULL)) {
7452 return BOOTSTRAP_NO_MEMORY;
7453 }
7454
7455 *server_portp = js->j_port;
7456 return BOOTSTRAP_SUCCESS;
7457 }
7458
7459 kern_return_t
7460 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
7461 {
7462 struct ldcred *ldc = runtime_get_caller_creds();
7463 job_t otherj;
7464
7465 if (!j) {
7466 return BOOTSTRAP_NO_MEMORY;
7467 }
7468
7469 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
7470 #if TARGET_OS_EMBEDDED
7471 if (!j->embedded_god) {
7472 return BOOTSTRAP_NOT_PRIVILEGED;
7473 }
7474 #else
7475 return BOOTSTRAP_NOT_PRIVILEGED;
7476 #endif
7477 }
7478
7479 #if HAVE_SANDBOX
7480 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7481 return BOOTSTRAP_NOT_PRIVILEGED;
7482 }
7483 #endif
7484
7485 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
7486 return BOOTSTRAP_UNKNOWN_SERVICE;
7487 }
7488
7489 #if TARGET_OS_EMBEDDED
7490 if (j->embedded_god) {
7491 if (j->username && otherj->username) {
7492 if (strcmp(j->username, otherj->username) != 0) {
7493 return BOOTSTRAP_NOT_PRIVILEGED;
7494 }
7495 } else {
7496 return BOOTSTRAP_NOT_PRIVILEGED;
7497 }
7498 }
7499 #endif
7500
7501 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
7502 bool do_block = otherj->p;
7503
7504 if (otherj->anonymous) {
7505 return BOOTSTRAP_NOT_PRIVILEGED;
7506 }
7507
7508 job_remove(otherj);
7509
7510 if (do_block) {
7511 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
7512 // this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
7513 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
7514 return MIG_NO_REPLY;
7515 } else {
7516 return 0;
7517 }
7518 } else if (otherj->p) {
7519 (void)job_assumes_zero_p(j, kill2(otherj->p, sig));
7520 }
7521
7522 return 0;
7523 }
7524
7525 kern_return_t
7526 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
7527 {
7528 struct ldcred *ldc = runtime_get_caller_creds();
7529
7530 if (!j) {
7531 return BOOTSTRAP_NO_MEMORY;
7532 }
7533
7534 if (!job_assumes(j, j->per_user)) {
7535 return BOOTSTRAP_NOT_PRIVILEGED;
7536 }
7537
7538 return launchd_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
7539 }
7540
7541 kern_return_t
7542 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7543 {
7544 struct ldcred *ldc = runtime_get_caller_creds();
7545
7546 if (!j) {
7547 return BOOTSTRAP_NO_MEMORY;
7548 }
7549
7550 if (unlikely(ldc->euid)) {
7551 return BOOTSTRAP_NOT_PRIVILEGED;
7552 }
7553
7554 return launchd_log_drain(srp, outval, outvalCnt);
7555 }
7556
7557 kern_return_t
7558 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
7559 vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval,
7560 mach_msg_type_number_t *outvalCnt)
7561 {
7562 const char *action;
7563 launch_data_t input_obj = NULL, output_obj = NULL;
7564 size_t data_offset = 0;
7565 size_t packed_size;
7566 struct ldcred *ldc = runtime_get_caller_creds();
7567
7568 if (!j) {
7569 return BOOTSTRAP_NO_MEMORY;
7570 }
7571
7572 if (inkey && ldc->pid != j->p) {
7573 if (ldc->euid && ldc->euid != getuid()) {
7574 return BOOTSTRAP_NOT_PRIVILEGED;
7575 }
7576 }
7577
7578 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7579 return 1;
7580 }
7581
7582 if (inkey && outkey) {
7583 action = "Swapping";
7584 } else if (inkey) {
7585 action = "Setting";
7586 } else {
7587 action = "Getting";
7588 }
7589
7590 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7591
7592 *outvalCnt = 20 * 1024 * 1024;
7593 mig_allocate(outval, *outvalCnt);
7594 if (!job_assumes(j, *outval != 0)) {
7595 return 1;
7596 }
7597
7598 /* Note to future maintainers: launch_data_unpack() does NOT return a heap
7599 * object. The data is decoded in-place. So do not call launch_data_free()
7600 * on input_obj.
7601 */
7602 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
7603 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
7604 goto out_bad;
7605 }
7606
7607 char *store = NULL;
7608 switch (outkey) {
7609 case VPROC_GSK_ENVIRONMENT:
7610 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
7611 goto out_bad;
7612 }
7613 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
7614 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7615 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
7616 goto out_bad;
7617 }
7618 launch_data_free(output_obj);
7619 break;
7620 case VPROC_GSK_ALLJOBS:
7621 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
7622 goto out_bad;
7623 }
7624 ipc_revoke_fds(output_obj);
7625 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7626 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7627 if (!job_assumes(j, packed_size != 0)) {
7628 goto out_bad;
7629 }
7630 launch_data_free(output_obj);
7631 break;
7632 case VPROC_GSK_MGR_NAME:
7633 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
7634 goto out_bad;
7635 }
7636 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7637 if (!job_assumes(j, packed_size != 0)) {
7638 goto out_bad;
7639 }
7640
7641 launch_data_free(output_obj);
7642 break;
7643 case VPROC_GSK_JOB_OVERRIDES_DB:
7644 store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB, "overrides.plist");
7645 if (!store || !job_assumes(j, (output_obj = launch_data_new_string(store)) != NULL)) {
7646 free(store);
7647 goto out_bad;
7648 }
7649
7650 free(store);
7651 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7652 if (!job_assumes(j, packed_size != 0)) {
7653 goto out_bad;
7654 }
7655
7656 launch_data_free(output_obj);
7657 break;
7658 case VPROC_GSK_ZERO:
7659 mig_deallocate(*outval, *outvalCnt);
7660 *outval = 0;
7661 *outvalCnt = 0;
7662 break;
7663 default:
7664 goto out_bad;
7665 }
7666
7667 mig_deallocate(inval, invalCnt);
7668 return 0;
7669
7670 out_bad:
7671 mig_deallocate(inval, invalCnt);
7672 if (*outval) {
7673 mig_deallocate(*outval, *outvalCnt);
7674 }
7675 if (output_obj) {
7676 launch_data_free(output_obj);
7677 }
7678
7679 return 1;
7680 }
7681
7682 kern_return_t
7683 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7684 {
7685 const char *action;
7686 kern_return_t kr = 0;
7687 struct ldcred *ldc = runtime_get_caller_creds();
7688 int oldmask;
7689
7690 if (!j) {
7691 return BOOTSTRAP_NO_MEMORY;
7692 }
7693
7694 if (inkey && ldc->pid != j->p) {
7695 if (ldc->euid && ldc->euid != getuid()) {
7696 return BOOTSTRAP_NOT_PRIVILEGED;
7697 }
7698 }
7699
7700 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7701 return 1;
7702 }
7703
7704 if (inkey && outkey) {
7705 action = "Swapping";
7706 } else if (inkey) {
7707 action = "Setting";
7708 } else {
7709 action = "Getting";
7710 }
7711
7712 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7713
7714 switch (outkey) {
7715 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7716 *outval = j->abandon_pg;
7717 break;
7718 case VPROC_GSK_LAST_EXIT_STATUS:
7719 *outval = j->last_exit_status;
7720 break;
7721 case VPROC_GSK_MGR_UID:
7722 *outval = getuid();
7723 break;
7724 case VPROC_GSK_MGR_PID:
7725 *outval = getpid();
7726 break;
7727 case VPROC_GSK_IS_MANAGED:
7728 *outval = j->anonymous ? 0 : 1;
7729 break;
7730 case VPROC_GSK_BASIC_KEEPALIVE:
7731 *outval = !j->ondemand;
7732 break;
7733 case VPROC_GSK_START_INTERVAL:
7734 *outval = j->start_interval;
7735 break;
7736 case VPROC_GSK_IDLE_TIMEOUT:
7737 *outval = j->timeout;
7738 break;
7739 case VPROC_GSK_EXIT_TIMEOUT:
7740 *outval = j->exit_timeout;
7741 break;
7742 case VPROC_GSK_GLOBAL_LOG_MASK:
7743 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7744 *outval = oldmask;
7745 runtime_setlogmask(oldmask);
7746 break;
7747 case VPROC_GSK_GLOBAL_UMASK:
7748 oldmask = umask(0);
7749 *outval = oldmask;
7750 umask(oldmask);
7751 break;
7752 case VPROC_GSK_TRANSACTIONS_ENABLED:
7753 job_log(j, LOG_DEBUG, "Reading EnableTransactions value.");
7754 *outval = j->enable_transactions;
7755 break;
7756 case VPROC_GSK_WAITFORDEBUGGER:
7757 *outval = j->wait4debugger;
7758 break;
7759 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
7760 *outval = j->embedded_god;
7761 break;
7762 case VPROC_GSK_ZERO:
7763 *outval = 0;
7764 break;
7765 default:
7766 kr = 1;
7767 break;
7768 }
7769
7770 switch (inkey) {
7771 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7772 j->abandon_pg = (bool)inval;
7773 break;
7774 case VPROC_GSK_GLOBAL_ON_DEMAND:
7775 job_log(j, LOG_DEBUG, "Job has set global on-demand mode to: %s", inval ? "true" : "false");
7776 kr = job_set_global_on_demand(j, inval);
7777 break;
7778 case VPROC_GSK_BASIC_KEEPALIVE:
7779 j->ondemand = !inval;
7780 break;
7781 case VPROC_GSK_START_INTERVAL:
7782 if (inval > UINT32_MAX || inval < 0) {
7783 kr = 1;
7784 } else if (inval) {
7785 if (j->start_interval == 0) {
7786 runtime_add_weak_ref();
7787 }
7788 j->start_interval = (typeof(j->start_interval)) inval;
7789 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
7790 } else if (j->start_interval) {
7791 (void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
7792 if (j->start_interval != 0) {
7793 runtime_del_weak_ref();
7794 }
7795 j->start_interval = 0;
7796 }
7797 break;
7798 case VPROC_GSK_IDLE_TIMEOUT:
7799 if (inval < 0 || inval > UINT32_MAX) {
7800 kr = 1;
7801 } else {
7802 j->timeout = (typeof(j->timeout)) inval;
7803 }
7804 break;
7805 case VPROC_GSK_EXIT_TIMEOUT:
7806 if (inval < 0 || inval > UINT32_MAX) {
7807 kr = 1;
7808 } else {
7809 j->exit_timeout = (typeof(j->exit_timeout)) inval;
7810 }
7811 break;
7812 case VPROC_GSK_GLOBAL_LOG_MASK:
7813 if (inval < 0 || inval > UINT32_MAX) {
7814 kr = 1;
7815 } else {
7816 runtime_setlogmask((int) inval);
7817 }
7818 break;
7819 case VPROC_GSK_GLOBAL_UMASK:
7820 __OSX_COMPILETIME_ASSERT__(sizeof (mode_t) == 2);
7821 if (inval < 0 || inval > UINT16_MAX) {
7822 kr = 1;
7823 } else {
7824 #if HAVE_SANDBOX
7825 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7826 kr = 1;
7827 } else {
7828 umask((mode_t) inval);
7829 }
7830 #endif
7831 }
7832 break;
7833 case VPROC_GSK_TRANSACTIONS_ENABLED:
7834 /* No-op. */
7835 break;
7836 case VPROC_GSK_WEIRD_BOOTSTRAP:
7837 if (job_assumes(j, j->weird_bootstrap)) {
7838 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
7839
7840 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_job_subsystem);
7841
7842 if (job_mig_job_subsystem.maxsize > mxmsgsz) {
7843 mxmsgsz = job_mig_job_subsystem.maxsize;
7844 }
7845
7846 (void)job_assumes_zero(j, runtime_add_mport(j->mgr->jm_port, job_server));
7847 j->weird_bootstrap = false;
7848 }
7849 break;
7850 case VPROC_GSK_WAITFORDEBUGGER:
7851 j->wait4debugger_oneshot = inval;
7852 break;
7853 case VPROC_GSK_PERUSER_SUSPEND:
7854 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
7855 mach_port_t junk = MACH_PORT_NULL;
7856 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
7857 if (job_assumes(j, jpu != NULL)) {
7858 struct suspended_peruser *spi = NULL;
7859 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
7860 if ((int64_t)(spi->j->mach_uid) == inval) {
7861 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
7862 break;
7863 }
7864 }
7865
7866 if (spi == NULL) {
7867 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
7868 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
7869 if (job_assumes(j, spi != NULL)) {
7870 /* Stop listening for events.
7871 *
7872 * See <rdar://problem/9014146>.
7873 */
7874 if (jpu->peruser_suspend_count == 0) {
7875 job_ignore(jpu);
7876 }
7877
7878 spi->j = jpu;
7879 spi->j->peruser_suspend_count++;
7880 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
7881 job_stop(spi->j);
7882 *outval = jpu->p;
7883 } else {
7884 kr = BOOTSTRAP_NO_MEMORY;
7885 }
7886 }
7887 }
7888 } else {
7889 kr = 1;
7890 }
7891 break;
7892 case VPROC_GSK_PERUSER_RESUME:
7893 if (job_assumes(j, pid1_magic == true)) {
7894 struct suspended_peruser *spi = NULL, *spt = NULL;
7895 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
7896 if ((int64_t)(spi->j->mach_uid) == inval) {
7897 spi->j->peruser_suspend_count--;
7898 LIST_REMOVE(spi, sle);
7899 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
7900 break;
7901 }
7902 }
7903
7904 if (!job_assumes(j, spi != NULL)) {
7905 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
7906 kr = BOOTSTRAP_NOT_PRIVILEGED;
7907 } else if (spi->j->peruser_suspend_count == 0) {
7908 job_watch(spi->j);
7909 job_dispatch(spi->j, false);
7910 free(spi);
7911 }
7912 } else {
7913 kr = 1;
7914 }
7915 break;
7916 case VPROC_GSK_ZERO:
7917 break;
7918 default:
7919 kr = 1;
7920 break;
7921 }
7922
7923 return kr;
7924 }
7925
7926 kern_return_t
7927 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
7928 {
7929 struct machservice *ms;
7930
7931 if (!j) {
7932 return BOOTSTRAP_NO_MEMORY;
7933 }
7934
7935 job_log(j, LOG_DEBUG, "Post fork ping.");
7936
7937 job_setup_exception_port(j, child_task);
7938
7939 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
7940 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
7941 // The TASK_ACCESS_PORT funny business is to workaround 5325399.
7942 continue;
7943 }
7944
7945 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
7946 if (errno) {
7947 if (errno == MACH_SEND_INVALID_DEST) {
7948 job_log(j, LOG_WARNING, "Task died before special ports could be set.");
7949 break;
7950 }
7951
7952 int desired_log_level = LOG_ERR;
7953 if (j->anonymous) {
7954 // 5338127
7955
7956 desired_log_level = LOG_WARNING;
7957
7958 if (ms->special_port_num == TASK_SEATBELT_PORT) {
7959 desired_log_level = LOG_DEBUG;
7960 }
7961 }
7962
7963 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
7964 }
7965 }
7966
7967 /* MIG will not zero-initialize this pointer, so we must always do so. See
7968 * <rdar://problem/8562593>.
7969 */
7970 *asport = MACH_PORT_NULL;
7971 #if !TARGET_OS_EMBEDDED
7972 if (!j->anonymous) {
7973 /* XPC services will spawn into the root security session by default.
7974 * xpcproxy will switch them away if needed.
7975 */
7976 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
7977 job_log(j, LOG_DEBUG, "Returning session port: 0x%x", j->asport);
7978 *asport = j->asport;
7979 }
7980 }
7981 #endif
7982 (void)job_assumes_zero(j, launchd_mport_deallocate(child_task));
7983
7984 return 0;
7985 }
7986
7987 kern_return_t
7988 job_mig_reboot2(job_t j, uint64_t flags)
7989 {
7990 char who_started_the_reboot[2048] = "";
7991 struct proc_bsdshortinfo proc;
7992 struct ldcred *ldc = runtime_get_caller_creds();
7993 pid_t pid_to_log;
7994
7995 if (!j) {
7996 return BOOTSTRAP_NO_MEMORY;
7997 }
7998
7999 if (unlikely(!pid1_magic)) {
8000 return BOOTSTRAP_NOT_PRIVILEGED;
8001 }
8002
8003 #if !TARGET_OS_EMBEDDED
8004 if (unlikely(ldc->euid)) {
8005 #else
8006 if (unlikely(ldc->euid) && !j->embedded_god) {
8007 #endif
8008 return BOOTSTRAP_NOT_PRIVILEGED;
8009 }
8010
8011 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8012 size_t who_offset;
8013 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8014 if (errno != ESRCH) {
8015 (void)job_assumes_zero(j, errno);
8016 }
8017 return 1;
8018 }
8019
8020 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8021 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8022 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8023 break;
8024 }
8025
8026 who_offset = strlen(who_started_the_reboot);
8027 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8028 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8029 }
8030
8031 root_jobmgr->reboot_flags = (int)flags;
8032 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8033 launchd_shutdown();
8034
8035 return 0;
8036 }
8037
8038 kern_return_t
8039 job_mig_getsocket(job_t j, name_t spr)
8040 {
8041 if (!j) {
8042 return BOOTSTRAP_NO_MEMORY;
8043 }
8044
8045 if (j->deny_job_creation) {
8046 return BOOTSTRAP_NOT_PRIVILEGED;
8047 }
8048
8049 #if HAVE_SANDBOX
8050 struct ldcred *ldc = runtime_get_caller_creds();
8051 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8052 return BOOTSTRAP_NOT_PRIVILEGED;
8053 }
8054 #endif
8055
8056 ipc_server_init();
8057
8058 if (unlikely(!sockpath)) {
8059 return BOOTSTRAP_NO_MEMORY;
8060 }
8061
8062 strncpy(spr, sockpath, sizeof(name_t));
8063
8064 return BOOTSTRAP_SUCCESS;
8065 }
8066
8067 kern_return_t
8068 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8069 {
8070 if (!j) {
8071 return BOOTSTRAP_NO_MEMORY;
8072 }
8073
8074 if ((errno = err)) {
8075 job_log_error(j, pri, "%s", msg);
8076 } else {
8077 job_log(j, pri, "%s", msg);
8078 }
8079
8080 return 0;
8081 }
8082
8083 void
8084 job_setup_per_user_directory(job_t j, uid_t uid, const char *path)
8085 {
8086 struct stat sb;
8087
8088 bool created = false;
8089 int r = stat(path, &sb);
8090 if ((r == -1 && errno == ENOENT) || (r == 0 && !S_ISDIR(sb.st_mode))) {
8091 if (r == 0) {
8092 job_log(j, LOG_NOTICE, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path);
8093
8094 char old[PATH_MAX];
8095 snprintf(old, sizeof(old), "%s.movedaside", path);
8096 (void)job_assumes_zero_p(j, rename(path, old));
8097 }
8098
8099 (void)job_assumes_zero_p(j, mkdir(path, S_IRWXU));
8100 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8101 created = true;
8102 }
8103
8104 if (!created) {
8105 if (sb.st_uid != uid) {
8106 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper user ownership. Repairing: %s", path);
8107 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8108 }
8109 if (sb.st_gid != 0) {
8110 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper group ownership. Repairing: %s", path);
8111 (void)job_assumes_zero_p(j, chown(path, uid, 0));
8112 }
8113 if (sb.st_mode != (S_IRWXU | S_IFDIR)) {
8114 job_log(j, LOG_NOTICE, "Per-user launchd directory has improper mode. Repairing: %s", path);
8115 (void)job_assumes_zero_p(j, chmod(path, S_IRWXU));
8116 }
8117 }
8118 }
8119
8120 void
8121 job_setup_per_user_directories(job_t j, uid_t uid, const char *label)
8122 {
8123 char path[PATH_MAX];
8124
8125 (void)snprintf(path, sizeof(path), LAUNCHD_DB_PREFIX "/%s", label);
8126 job_setup_per_user_directory(j, uid, path);
8127
8128 (void)snprintf(path, sizeof(path), LAUNCHD_LOG_PREFIX "/%s", label);
8129 job_setup_per_user_directory(j, uid, path);
8130 }
8131
8132 job_t
8133 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8134 {
8135 job_t ji = NULL;
8136 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8137 if (!ji->per_user) {
8138 continue;
8139 }
8140 if (ji->mach_uid != which_user) {
8141 continue;
8142 }
8143 if (SLIST_EMPTY(&ji->machservices)) {
8144 continue;
8145 }
8146 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8147 continue;
8148 }
8149 break;
8150 }
8151
8152 if (unlikely(ji == NULL)) {
8153 struct machservice *ms;
8154 char lbuf[1024];
8155
8156 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8157
8158 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8159
8160 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8161
8162 if (ji != NULL) {
8163 auditinfo_addr_t auinfo = {
8164 .ai_termid = {
8165 .at_type = AU_IPv4
8166 },
8167 .ai_auid = which_user,
8168 .ai_asid = AU_ASSIGN_ASID,
8169 };
8170
8171 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8172 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8173 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8174
8175 /* Kinda lame that we have to do this, but we can't create an
8176 * audit session without joining it.
8177 */
8178 (void)job_assumes(ji, audit_session_join(launchd_audit_port));
8179 ji->asid = auinfo.ai_asid;
8180 } else {
8181 job_log(ji, LOG_WARNING, "Could not set audit session!");
8182 job_remove(ji);
8183 return NULL;
8184 }
8185
8186 ji->mach_uid = which_user;
8187 ji->per_user = true;
8188 ji->enable_transactions = true;
8189 job_setup_per_user_directories(ji, which_user, lbuf);
8190
8191 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8192 job_remove(ji);
8193 ji = NULL;
8194 } else {
8195 ms->upfront = true;
8196 ms->per_user_hack = true;
8197 ms->hide = true;
8198
8199 ji = job_dispatch(ji, false);
8200 }
8201 }
8202 } else {
8203 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
8204 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8205 }
8206
8207 return ji;
8208 }
8209
8210 kern_return_t
8211 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8212 {
8213 struct ldcred *ldc = runtime_get_caller_creds();
8214 job_t jpu;
8215
8216 if (!j) {
8217 return BOOTSTRAP_NO_MEMORY;
8218 }
8219
8220 if (launchd_osinstaller) {
8221 return BOOTSTRAP_UNKNOWN_SERVICE;
8222 }
8223
8224 #if TARGET_OS_EMBEDDED
8225 // There is no need for per-user launchd's on embedded.
8226 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8227 return BOOTSTRAP_UNKNOWN_SERVICE;
8228 #endif
8229
8230 #if HAVE_SANDBOX
8231 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8232 return BOOTSTRAP_NOT_PRIVILEGED;
8233 }
8234 #endif
8235
8236 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8237
8238 if (unlikely(!pid1_magic)) {
8239 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8240 return BOOTSTRAP_NOT_PRIVILEGED;
8241 }
8242
8243 if (ldc->euid || ldc->uid) {
8244 which_user = ldc->euid ?: ldc->uid;
8245 }
8246
8247 *up_cont = MACH_PORT_NULL;
8248
8249 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8250
8251 return 0;
8252 }
8253
8254 kern_return_t
8255 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8256 {
8257 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8258 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8259 struct ldcred *ldc = runtime_get_caller_creds();
8260 struct machservice *ms = NULL;
8261 job_t jo;
8262
8263 if (!j) {
8264 return BOOTSTRAP_NO_MEMORY;
8265 }
8266
8267 if (j->dedicated_instance) {
8268 struct machservice *msi = NULL;
8269 SLIST_FOREACH(msi, &j->machservices, sle) {
8270 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8271 uuid_copy(instance_id, j->instance_id);
8272 ms = msi;
8273 break;
8274 }
8275 }
8276 } else {
8277 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8278 }
8279
8280 if (strict) {
8281 if (likely(ms != NULL)) {
8282 if (ms->job != j) {
8283 return BOOTSTRAP_NOT_PRIVILEGED;
8284 } else if (ms->isActive) {
8285 return BOOTSTRAP_SERVICE_ACTIVE;
8286 }
8287 } else {
8288 return BOOTSTRAP_UNKNOWN_SERVICE;
8289 }
8290 } else if (ms == NULL) {
8291 if (job_assumes(j, !j->dedicated_instance)) {
8292 *serviceportp = MACH_PORT_NULL;
8293
8294 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8295 return BOOTSTRAP_NO_MEMORY;
8296 }
8297
8298 // Treat this like a legacy job.
8299 if (!j->legacy_mach_job) {
8300 ms->isActive = true;
8301 ms->recv = false;
8302 }
8303
8304 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8305 job_log(j, LOG_APPLEONLY, "Please add the following service to the configuration file for this job: %s", servicename);
8306 }
8307 } else {
8308 return BOOTSTRAP_UNKNOWN_SERVICE;
8309 }
8310 } else {
8311 if (unlikely((jo = machservice_job(ms)) != j)) {
8312 static pid_t last_warned_pid;
8313
8314 if (last_warned_pid != ldc->pid) {
8315 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8316 last_warned_pid = ldc->pid;
8317 }
8318
8319 return BOOTSTRAP_NOT_PRIVILEGED;
8320 }
8321 if (unlikely(machservice_active(ms))) {
8322 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8323 return BOOTSTRAP_SERVICE_ACTIVE;
8324 }
8325 }
8326
8327 job_checkin(j);
8328 machservice_request_notifications(ms);
8329
8330 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8331
8332 *serviceportp = machservice_port(ms);
8333 return BOOTSTRAP_SUCCESS;
8334 }
8335
8336 kern_return_t
8337 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8338 {
8339 struct machservice *ms;
8340 struct ldcred *ldc = runtime_get_caller_creds();
8341
8342 if (!j) {
8343 return BOOTSTRAP_NO_MEMORY;
8344 }
8345
8346 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
8347 job_log(j, LOG_APPLEONLY, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8348 }
8349
8350 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8351
8352 // 5641783 for the embedded hack
8353 #if !TARGET_OS_EMBEDDED
8354 /*
8355 * From a per-user/session launchd's perspective, SecurityAgent (UID
8356 * 92) is a rogue application (not our UID, not root and not a child of
8357 * us). We'll have to reconcile this design friction at a later date.
8358 */
8359 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8360 if (pid1_magic) {
8361 return VPROC_ERR_TRY_PER_USER;
8362 } else {
8363 return BOOTSTRAP_NOT_PRIVILEGED;
8364 }
8365 }
8366 #endif
8367
8368 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
8369
8370 if (unlikely(ms)) {
8371 if (machservice_job(ms) != j) {
8372 return BOOTSTRAP_NOT_PRIVILEGED;
8373 }
8374 if (machservice_active(ms)) {
8375 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
8376 return BOOTSTRAP_SERVICE_ACTIVE;
8377 }
8378 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
8379 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
8380 return BOOTSTRAP_NOT_PRIVILEGED;
8381 }
8382 job_checkin(j);
8383 machservice_delete(j, ms, false);
8384 }
8385
8386 if (likely(serviceport != MACH_PORT_NULL)) {
8387 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
8388 machservice_request_notifications(ms);
8389 } else {
8390 return BOOTSTRAP_NO_MEMORY;
8391 }
8392 }
8393
8394
8395 return BOOTSTRAP_SUCCESS;
8396 }
8397
8398 kern_return_t
8399 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
8400 {
8401 struct machservice *ms = NULL;
8402 struct ldcred *ldc = runtime_get_caller_creds();
8403 kern_return_t kr;
8404 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
8405 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
8406 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
8407 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
8408
8409 if (!j) {
8410 return BOOTSTRAP_NO_MEMORY;
8411 }
8412
8413 bool xpc_req = (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN);
8414
8415 // 5641783 for the embedded hack
8416 #if !TARGET_OS_EMBEDDED
8417 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
8418 return VPROC_ERR_TRY_PER_USER;
8419 }
8420 #endif
8421
8422 #if HAVE_SANDBOX
8423 /* We don't do sandbox checking for XPC domains because, by definition, all
8424 * the services within your domain should be accessible to you.
8425 */
8426 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8427 return BOOTSTRAP_NOT_PRIVILEGED;
8428 }
8429 #endif
8430
8431 if (per_pid_lookup) {
8432 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
8433 } else {
8434 if (xpc_req) {
8435 // Requests from XPC domains stay local.
8436 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
8437 } else {
8438 /* A strict lookup which is privileged won't even bother trying to
8439 * find a service if we're not hosting the root Mach bootstrap.
8440 */
8441 if (strict_lookup && privileged) {
8442 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8443 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8444 }
8445 } else {
8446 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8447 }
8448 }
8449 }
8450
8451 if (likely(ms)) {
8452 ms = ms->alias ? ms->alias : ms;
8453 if (unlikely(specific_instance && ms->job->multiple_instances)) {
8454 job_t ji = NULL;
8455 job_t instance = NULL;
8456 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
8457 if (uuid_compare(instance_id, ji->instance_id) == 0) {
8458 instance = ji;
8459 break;
8460 }
8461 }
8462
8463 if (unlikely(instance == NULL)) {
8464 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
8465 instance = job_new_subjob(ms->job, instance_id);
8466 if (job_assumes(j, instance != NULL)) {
8467 /* Disable this support for now. We only support having
8468 * multi-instance jobs within private XPC domains.
8469 */
8470 #if 0
8471 /* If the job is multi-instance, in a singleton XPC domain
8472 * and the request is not coming from within that singleton
8473 * domain, we need to alias the new job into the requesting
8474 * domain.
8475 */
8476 if (!j->mgr->xpc_singleton && xpc_req) {
8477 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
8478 }
8479 #endif
8480 job_dispatch(instance, false);
8481 }
8482 }
8483
8484 ms = NULL;
8485 if (job_assumes(j, instance != NULL)) {
8486 struct machservice *msi = NULL;
8487 SLIST_FOREACH(msi, &instance->machservices, sle) {
8488 /* sizeof(servicename) will return the size of a pointer,
8489 * even though it's an array type, because when passing
8490 * arrays as parameters in C, they implicitly degrade to
8491 * pointers.
8492 */
8493 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8494 ms = msi;
8495 break;
8496 }
8497 }
8498 }
8499 } else {
8500 if (machservice_hidden(ms) && !machservice_active(ms)) {
8501 ms = NULL;
8502 } else if (unlikely(ms->per_user_hack)) {
8503 ms = NULL;
8504 }
8505 }
8506 }
8507
8508 if (likely(ms)) {
8509 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
8510 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
8511 *serviceportp = machservice_port(ms);
8512
8513 kr = BOOTSTRAP_SUCCESS;
8514 } else if (strict_lookup && !privileged) {
8515 /* Hack: We need to simulate XPC's desire not to establish a hierarchy.
8516 * So if XPC is doing the lookup, and it's not a privileged lookup, we
8517 * won't forward. But if it is a privileged lookup, then we must
8518 * forward.
8519 */
8520 return BOOTSTRAP_UNKNOWN_SERVICE;
8521 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
8522 // Requests from within an XPC domain don't get forwarded.
8523 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
8524 /* Clients potentially check the audit token of the reply to verify that
8525 * the returned send right is trustworthy.
8526 */
8527 (void)job_assumes_zero(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags));
8528 return MIG_NO_REPLY;
8529 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8530 /* 5240036 Should start background session when a lookup of CCacheServer
8531 * occurs
8532 *
8533 * This is a total hack. We sniff out loginwindow session, and attempt
8534 * to guess what it is up to. If we find a EUID that isn't root, we
8535 * force it over to the per-user context.
8536 */
8537 return VPROC_ERR_TRY_PER_USER;
8538 } else {
8539 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
8540 kr = BOOTSTRAP_UNKNOWN_SERVICE;
8541 }
8542
8543 return kr;
8544 }
8545
8546 kern_return_t
8547 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
8548 {
8549 if (!j) {
8550 return BOOTSTRAP_NO_MEMORY;
8551 }
8552
8553 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
8554 jobmgr_t jm = j->mgr;
8555
8556 if (jobmgr_parent(jm)) {
8557 *parentport = jobmgr_parent(jm)->jm_port;
8558 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
8559 *parentport = jm->jm_port;
8560 } else {
8561 (void)job_assumes_zero(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp));
8562 // The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
8563 return MIG_NO_REPLY;
8564 }
8565 return BOOTSTRAP_SUCCESS;
8566 }
8567
8568 kern_return_t
8569 job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
8570 {
8571 if (!j) {
8572 return BOOTSTRAP_NO_MEMORY;
8573 }
8574
8575 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8576 *rootbsp = root_jobmgr->jm_port;
8577 (void)job_assumes_zero(j, launchd_mport_make_send(root_jobmgr->jm_port));
8578 } else {
8579 *rootbsp = inherited_bootstrap_port;
8580 (void)job_assumes_zero(j, launchd_mport_copy_send(inherited_bootstrap_port));
8581 }
8582
8583 return BOOTSTRAP_SUCCESS;
8584 }
8585
8586 kern_return_t
8587 job_mig_info(job_t j, name_array_t *servicenamesp,
8588 unsigned int *servicenames_cnt, name_array_t *servicejobsp,
8589 unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp,
8590 unsigned int *serviceactives_cnt, uint64_t flags)
8591 {
8592 name_array_t service_names = NULL;
8593 name_array_t service_jobs = NULL;
8594 bootstrap_status_array_t service_actives = NULL;
8595 unsigned int cnt = 0, cnt2 = 0;
8596 jobmgr_t jm;
8597
8598 if (!j) {
8599 return BOOTSTRAP_NO_MEMORY;
8600 }
8601
8602 if (launchd_flat_mach_namespace) {
8603 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
8604 jm = j->mgr;
8605 } else {
8606 jm = root_jobmgr;
8607 }
8608 } else {
8609 jm = j->mgr;
8610 }
8611
8612 unsigned int i = 0;
8613 struct machservice *msi = NULL;
8614 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8615 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8616 cnt += !msi->per_pid ? 1 : 0;
8617 }
8618 }
8619
8620 if (cnt == 0) {
8621 goto out;
8622 }
8623
8624 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
8625 if (!job_assumes(j, service_names != NULL)) {
8626 goto out_bad;
8627 }
8628
8629 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
8630 if (!job_assumes(j, service_jobs != NULL)) {
8631 goto out_bad;
8632 }
8633
8634 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
8635 if (!job_assumes(j, service_actives != NULL)) {
8636 goto out_bad;
8637 }
8638
8639 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8640 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8641 if (!msi->per_pid) {
8642 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
8643 msi = msi->alias ? msi->alias : msi;
8644 if (msi->job->mgr->shortdesc) {
8645 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
8646 } else {
8647 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
8648 }
8649 service_actives[cnt2] = machservice_status(msi);
8650 cnt2++;
8651 }
8652 }
8653 }
8654
8655 (void)job_assumes(j, cnt == cnt2);
8656
8657 out:
8658 *servicenamesp = service_names;
8659 *servicejobsp = service_jobs;
8660 *serviceactivesp = service_actives;
8661 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
8662
8663 return BOOTSTRAP_SUCCESS;
8664
8665 out_bad:
8666 if (service_names) {
8667 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
8668 }
8669 if (service_jobs) {
8670 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
8671 }
8672 if (service_actives) {
8673 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
8674 }
8675
8676 return BOOTSTRAP_NO_MEMORY;
8677 }
8678
8679 kern_return_t
8680 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports,
8681 mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names,
8682 mach_msg_type_number_t *child_names_cnt,
8683 bootstrap_property_array_t *child_properties,
8684 mach_msg_type_number_t *child_properties_cnt)
8685 {
8686 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
8687 if (!j) {
8688 return BOOTSTRAP_NO_MEMORY;
8689 }
8690
8691 struct ldcred *ldc = runtime_get_caller_creds();
8692
8693 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8694 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8695 * in a non-flat namespace.
8696 */
8697 if (ldc->euid != 0) {
8698 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
8699 return BOOTSTRAP_NOT_PRIVILEGED;
8700 }
8701
8702 unsigned int cnt = 0;
8703
8704 jobmgr_t jmr = j->mgr;
8705 jobmgr_t jmi = NULL;
8706 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8707 cnt++;
8708 }
8709
8710 // Find our per-user launchds if we're PID 1.
8711 job_t ji = NULL;
8712 if (pid1_magic) {
8713 LIST_FOREACH(ji, &jmr->jobs, sle) {
8714 cnt += ji->per_user ? 1 : 0;
8715 }
8716 }
8717
8718 if (cnt == 0) {
8719 return BOOTSTRAP_NO_CHILDREN;
8720 }
8721
8722 mach_port_array_t _child_ports = NULL;
8723 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
8724 if (!job_assumes(j, _child_ports != NULL)) {
8725 kr = BOOTSTRAP_NO_MEMORY;
8726 goto out_bad;
8727 }
8728
8729 name_array_t _child_names = NULL;
8730 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
8731 if (!job_assumes(j, _child_names != NULL)) {
8732 kr = BOOTSTRAP_NO_MEMORY;
8733 goto out_bad;
8734 }
8735
8736 bootstrap_property_array_t _child_properties = NULL;
8737 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
8738 if (!job_assumes(j, _child_properties != NULL)) {
8739 kr = BOOTSTRAP_NO_MEMORY;
8740 goto out_bad;
8741 }
8742
8743 unsigned int cnt2 = 0;
8744 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8745 if (jobmgr_assumes_zero(jmi, launchd_mport_make_send(jmi->jm_port)) == KERN_SUCCESS) {
8746 _child_ports[cnt2] = jmi->jm_port;
8747 } else {
8748 _child_ports[cnt2] = MACH_PORT_NULL;
8749 }
8750
8751 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
8752 _child_properties[cnt2] = jmi->properties;
8753
8754 cnt2++;
8755 }
8756
8757 if (pid1_magic) LIST_FOREACH(ji, &jmr->jobs, sle) {
8758 if (ji->per_user) {
8759 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
8760 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
8761
8762 if (job_assumes_zero(ji, launchd_mport_copy_send(port)) == KERN_SUCCESS) {
8763 _child_ports[cnt2] = port;
8764 } else {
8765 _child_ports[cnt2] = MACH_PORT_NULL;
8766 }
8767 } else {
8768 _child_ports[cnt2] = MACH_PORT_NULL;
8769 }
8770
8771 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
8772 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
8773
8774 cnt2++;
8775 }
8776 }
8777
8778 *child_names_cnt = cnt;
8779 *child_ports_cnt = cnt;
8780 *child_properties_cnt = cnt;
8781
8782 *child_names = _child_names;
8783 *child_ports = _child_ports;
8784 *child_properties = _child_properties;
8785
8786 unsigned int i = 0;
8787 for (i = 0; i < cnt; i++) {
8788 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
8789 }
8790
8791 return BOOTSTRAP_SUCCESS;
8792 out_bad:
8793 if (_child_ports) {
8794 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
8795 }
8796
8797 if (_child_names) {
8798 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
8799 }
8800
8801 if (_child_properties) {
8802 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
8803 }
8804
8805 return kr;
8806 }
8807
8808 kern_return_t
8809 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
8810 {
8811 struct ldcred *ldc = runtime_get_caller_creds();
8812 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
8813 return BOOTSTRAP_NOT_PRIVILEGED;
8814 }
8815
8816 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
8817 * directly by launchd as agents.
8818 */
8819 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
8820 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
8821 *managed = true;
8822 }
8823
8824 return BOOTSTRAP_SUCCESS;
8825 }
8826
8827 kern_return_t
8828 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
8829 {
8830 if (!j) {
8831 return BOOTSTRAP_NO_MEMORY;
8832 }
8833
8834 struct ldcred *ldc = runtime_get_caller_creds();
8835 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
8836
8837 #if HAVE_SANDBOX
8838 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8839 return BOOTSTRAP_NOT_PRIVILEGED;
8840 }
8841 #endif
8842
8843 mach_port_t _mp = MACH_PORT_NULL;
8844 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
8845 job_t target_j = job_find(NULL, label);
8846 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
8847 if (target_j->j_port == MACH_PORT_NULL) {
8848 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
8849 }
8850
8851 _mp = target_j->j_port;
8852 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
8853 } else {
8854 kr = BOOTSTRAP_NO_MEMORY;
8855 }
8856 }
8857
8858 *mp = _mp;
8859 return kr;
8860 }
8861
8862 kern_return_t
8863 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
8864 {
8865 #if TARGET_OS_EMBEDDED
8866 return KERN_SUCCESS;
8867 #endif
8868
8869 if (!j) {
8870 return BOOTSTRAP_NO_MEMORY;
8871 }
8872
8873 uuid_string_t uuid_str;
8874 uuid_unparse(uuid, uuid_str);
8875 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
8876
8877 job_t ji = NULL, jt = NULL;
8878 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
8879 uuid_string_t uuid_str2;
8880 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
8881
8882 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
8883 uuid_clear(ji->expected_audit_uuid);
8884 if (asport != MACH_PORT_NULL) {
8885 job_log(ji, LOG_DEBUG, "Job should join session with port 0x%x", asport);
8886 (void)job_assumes_zero(j, launchd_mport_copy_send(asport));
8887 } else {
8888 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
8889 }
8890
8891 ji->asport = asport;
8892 LIST_REMOVE(ji, needing_session_sle);
8893
8894 if (ji->event_monitor) {
8895 eventsystem_ping();
8896 } else {
8897 job_dispatch(ji, false);
8898 }
8899 }
8900 }
8901
8902 /* Each job that the session port was set for holds a reference. At the end of
8903 * the loop, there will be one extra reference belonging to this MiG protocol.
8904 * We need to release it so that the session goes away when all the jobs
8905 * referencing it are unloaded.
8906 */
8907 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
8908
8909 return KERN_SUCCESS;
8910 }
8911
8912 jobmgr_t
8913 jobmgr_find_by_name(jobmgr_t jm, const char *where)
8914 {
8915 jobmgr_t jmi, jmi2;
8916
8917 // NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
8918 if (where == NULL) {
8919 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8920 where = VPROCMGR_SESSION_LOGINWINDOW;
8921 } else {
8922 where = VPROCMGR_SESSION_AQUA;
8923 }
8924 }
8925
8926 if (strcasecmp(jm->name, where) == 0) {
8927 return jm;
8928 }
8929
8930 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
8931 jmi = root_jobmgr;
8932 goto jm_found;
8933 }
8934
8935 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
8936 if (unlikely(jmi->shutting_down)) {
8937 continue;
8938 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
8939 continue;
8940 } else if (strcasecmp(jmi->name, where) == 0) {
8941 goto jm_found;
8942 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
8943 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
8944 if (strcasecmp(jmi2->name, where) == 0) {
8945 jmi = jmi2;
8946 goto jm_found;
8947 }
8948 }
8949 }
8950 }
8951
8952 jm_found:
8953 return jmi;
8954 }
8955
8956 kern_return_t
8957 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
8958 {
8959 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
8960 mach_port_array_t l2l_ports = NULL;
8961 mach_port_t reqport, rcvright;
8962 kern_return_t kr = 1;
8963 launch_data_t out_obj_array = NULL;
8964 struct ldcred *ldc = runtime_get_caller_creds();
8965 jobmgr_t jmr = NULL;
8966
8967 if (!j) {
8968 return BOOTSTRAP_NO_MEMORY;
8969 }
8970
8971 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
8972 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
8973
8974 kr = BOOTSTRAP_NOT_PRIVILEGED;
8975 goto out;
8976 }
8977
8978 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
8979
8980 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
8981 if (job_assumes_zero(j, kr) != 0) {
8982 goto out;
8983 }
8984
8985 if (launch_data_array_get_count(out_obj_array) != l2l_port_cnt) {
8986 osx_assert_zero(l2l_port_cnt);
8987 }
8988
8989 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
8990 kr = BOOTSTRAP_NO_MEMORY;
8991 goto out;
8992 }
8993
8994 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
8995
8996 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
8997 * processing an IPC request, we'll do this action before the new job manager can get any IPC
8998 * requests. This serialization is guaranteed since we are single-threaded in that respect.
8999 */
9000 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9001 // This is so awful.
9002 // Remove the job from its current job manager.
9003 LIST_REMOVE(j, sle);
9004 LIST_REMOVE(j, pid_hash_sle);
9005
9006 // Put the job into the target job manager.
9007 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9008 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9009
9010 j->mgr = jmr;
9011 job_set_global_on_demand(j, true);
9012
9013 if (!j->holds_ref) {
9014 job_log(j, LOG_PERF, "Job moved subset into: %s", j->mgr->name);
9015 j->holds_ref = true;
9016 runtime_add_ref();
9017 }
9018 }
9019
9020 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9021 launch_data_t tmp, obj_at_idx;
9022 struct machservice *ms;
9023 job_t j_for_service;
9024 const char *serv_name;
9025 pid_t target_pid;
9026 bool serv_perpid;
9027
9028 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9029 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9030 target_pid = (pid_t)launch_data_get_integer(tmp);
9031 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9032 serv_perpid = launch_data_get_bool(tmp);
9033 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9034 serv_name = launch_data_get_string(tmp);
9035
9036 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9037
9038 if (unlikely(!j_for_service)) {
9039 // The PID probably exited
9040 (void)job_assumes_zero(j, launchd_mport_deallocate(l2l_ports[l2l_i]));
9041 continue;
9042 }
9043
9044 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9045 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9046 machservice_request_notifications(ms);
9047 }
9048 }
9049
9050 kr = 0;
9051
9052 out:
9053 if (out_obj_array) {
9054 launch_data_free(out_obj_array);
9055 }
9056
9057 if (l2l_ports) {
9058 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9059 }
9060
9061 if (kr == 0) {
9062 if (target_subset) {
9063 (void)job_assumes_zero(j, launchd_mport_deallocate(target_subset));
9064 }
9065 if (asport) {
9066 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9067 }
9068 } else if (jmr) {
9069 jobmgr_shutdown(jmr);
9070 }
9071
9072 return kr;
9073 }
9074
9075 kern_return_t
9076 job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9077 {
9078 if (!j) {
9079 return BOOTSTRAP_NO_MEMORY;
9080 }
9081
9082 job_t j2;
9083
9084 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9085 if (j->mgr->session_initialized) {
9086 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9087 kr = BOOTSTRAP_NOT_PRIVILEGED;
9088 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9089 jobmgr_t jmi;
9090
9091 /*
9092 * 5330262
9093 *
9094 * We're working around LoginWindow and the WindowServer.
9095 *
9096 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9097 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9098 * spawns a replacement loginwindow session before cleaning up the previous one.
9099 *
9100 * We're going to use the creation of a new LoginWindow context as a clue that the
9101 * previous LoginWindow context is on the way out and therefore we should just
9102 * kick-start the shutdown of it.
9103 */
9104
9105 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9106 if (unlikely(jmi->shutting_down)) {
9107 continue;
9108 } else if (strcasecmp(jmi->name, session_type) == 0) {
9109 jobmgr_shutdown(jmi);
9110 break;
9111 }
9112 }
9113 }
9114
9115 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9116 strcpy(j->mgr->name_init, session_type);
9117
9118 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9119 j2->asport = asport;
9120 (void)job_assumes(j, job_dispatch(j2, true));
9121 kr = BOOTSTRAP_SUCCESS;
9122 }
9123
9124 return kr;
9125 }
9126
9127 kern_return_t
9128 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9129 {
9130 struct ldcred *ldc = runtime_get_caller_creds();
9131 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9132 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9133 return BOOTSTRAP_NO_MEMORY;
9134 }
9135
9136 if (j->mgr->shutting_down) {
9137 return BOOTSTRAP_UNKNOWN_SERVICE;
9138 }
9139
9140 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9141
9142 if (!job_assumes(j, pid1_magic == false)) {
9143 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9144 return BOOTSTRAP_NOT_PRIVILEGED;
9145 }
9146
9147 if (!j->anonymous) {
9148 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9149 return BOOTSTRAP_NOT_PRIVILEGED;
9150 }
9151
9152 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9153 if (target_jm == j->mgr) {
9154 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9155 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9156 (void)job_assumes_zero(j, launchd_mport_deallocate(requestor_port));
9157 *new_bsport = target_jm->jm_port;
9158 return BOOTSTRAP_SUCCESS;
9159 }
9160
9161 if (!target_jm) {
9162 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9163 if (target_jm) {
9164 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9165 (void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9166 }
9167 }
9168
9169 if (!job_assumes(j, target_jm != NULL)) {
9170 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9171 return BOOTSTRAP_NO_MEMORY;
9172 }
9173
9174 // Remove the job from it's current job manager.
9175 LIST_REMOVE(j, sle);
9176 LIST_REMOVE(j, pid_hash_sle);
9177
9178 job_t ji = NULL, jit = NULL;
9179 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9180 if (ji == j) {
9181 LIST_REMOVE(ji, global_env_sle);
9182 break;
9183 }
9184 }
9185
9186 // Put the job into the target job manager.
9187 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9188 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9189
9190 if (ji) {
9191 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9192 }
9193
9194 // Move our Mach services over if we're not in a flat namespace.
9195 if (!launchd_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9196 struct machservice *msi = NULL, *msit = NULL;
9197 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9198 LIST_REMOVE(msi, name_hash_sle);
9199 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9200 }
9201 }
9202
9203 j->mgr = target_jm;
9204
9205 if (!j->holds_ref) {
9206 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9207 * stick around while they're still around.
9208 * For example, login calls into the PAM launchd module, which moves the process into
9209 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9210 * ourselves from going away.
9211 */
9212 j->holds_ref = true;
9213 job_log(j, LOG_PERF, "Job switched into manager: %s", j->mgr->name);
9214 runtime_add_ref();
9215 }
9216
9217 *new_bsport = target_jm->jm_port;
9218
9219 return KERN_SUCCESS;
9220 }
9221
9222 kern_return_t
9223 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9224 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9225 mach_port_array_t *portsp, unsigned int *ports_cnt)
9226 {
9227 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9228 mach_port_array_t ports = NULL;
9229 unsigned int cnt = 0, cnt2 = 0;
9230 size_t packed_size;
9231 struct machservice *ms;
9232 jobmgr_t jm;
9233 job_t ji;
9234
9235 if (!j) {
9236 return BOOTSTRAP_NO_MEMORY;
9237 }
9238
9239 jm = j->mgr;
9240
9241 if (unlikely(!pid1_magic)) {
9242 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9243 return BOOTSTRAP_NOT_PRIVILEGED;
9244 }
9245 if (unlikely(jobmgr_parent(jm) == NULL)) {
9246 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9247 return BOOTSTRAP_NOT_PRIVILEGED;
9248 }
9249 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9250 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9251 return BOOTSTRAP_NOT_PRIVILEGED;
9252 }
9253 if (unlikely(!j->anonymous)) {
9254 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9255 return BOOTSTRAP_NOT_PRIVILEGED;
9256 }
9257
9258 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9259
9260 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9261 if (!job_assumes(j, outdata_obj_array)) {
9262 goto out_bad;
9263 }
9264
9265 *outdataCnt = 20 * 1024 * 1024;
9266 mig_allocate(outdata, *outdataCnt);
9267 if (!job_assumes(j, *outdata != 0)) {
9268 return 1;
9269 }
9270
9271 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9272 if (!ji->anonymous) {
9273 continue;
9274 }
9275 SLIST_FOREACH(ms, &ji->machservices, sle) {
9276 cnt++;
9277 }
9278 }
9279
9280 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9281 if (!job_assumes(j, ports != NULL)) {
9282 goto out_bad;
9283 }
9284
9285 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9286 if (!ji->anonymous) {
9287 continue;
9288 }
9289
9290 SLIST_FOREACH(ms, &ji->machservices, sle) {
9291 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9292 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9293 } else {
9294 goto out_bad;
9295 }
9296
9297 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9298 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9299 } else {
9300 goto out_bad;
9301 }
9302
9303 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9304 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9305 } else {
9306 goto out_bad;
9307 }
9308
9309 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9310 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9311 } else {
9312 goto out_bad;
9313 }
9314
9315 ports[cnt2] = machservice_port(ms);
9316
9317 // Increment the send right by one so we can shutdown the jobmgr cleanly
9318 (void)jobmgr_assumes_zero(jm, launchd_mport_copy_send(ports[cnt2]));
9319 cnt2++;
9320 }
9321 }
9322
9323 (void)job_assumes(j, cnt == cnt2);
9324
9325 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9326 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9327 if (!job_assumes(j, packed_size != 0)) {
9328 goto out_bad;
9329 }
9330
9331 launch_data_free(outdata_obj_array);
9332
9333 *portsp = ports;
9334 *ports_cnt = cnt;
9335
9336 *reqport = jm->req_port;
9337 *rcvright = jm->jm_port;
9338
9339 jm->req_port = 0;
9340 jm->jm_port = 0;
9341
9342 workaround_5477111 = j;
9343
9344 jobmgr_shutdown(jm);
9345
9346 return BOOTSTRAP_SUCCESS;
9347
9348 out_bad:
9349 if (outdata_obj_array) {
9350 launch_data_free(outdata_obj_array);
9351 }
9352 if (*outdata) {
9353 mig_deallocate(*outdata, *outdataCnt);
9354 }
9355 if (ports) {
9356 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
9357 }
9358
9359 return BOOTSTRAP_NO_MEMORY;
9360 }
9361
9362 kern_return_t
9363 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
9364 {
9365 int bsdepth = 0;
9366 jobmgr_t jmr;
9367
9368 if (!j) {
9369 return BOOTSTRAP_NO_MEMORY;
9370 }
9371 if (j->mgr->shutting_down) {
9372 return BOOTSTRAP_UNKNOWN_SERVICE;
9373 }
9374
9375 jmr = j->mgr;
9376
9377 while ((jmr = jobmgr_parent(jmr)) != NULL) {
9378 bsdepth++;
9379 }
9380
9381 // Since we use recursion, we need an artificial depth for subsets
9382 if (unlikely(bsdepth > 100)) {
9383 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
9384 return BOOTSTRAP_NO_MEMORY;
9385 }
9386
9387 char name[NAME_MAX];
9388 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
9389
9390 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
9391 if (unlikely(requestorport == MACH_PORT_NULL)) {
9392 return BOOTSTRAP_NOT_PRIVILEGED;
9393 }
9394 return BOOTSTRAP_NO_MEMORY;
9395 }
9396
9397 *subsetportp = jmr->jm_port;
9398 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
9399
9400 /* A job could create multiple subsets, so only add a reference the first time
9401 * it does so we don't have to keep a count.
9402 */
9403 if (j->anonymous && !j->holds_ref) {
9404 job_log(j, LOG_PERF, "Job created subset: %s", jmr->name);
9405 j->holds_ref = true;
9406 runtime_add_ref();
9407 }
9408
9409 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
9410 return BOOTSTRAP_SUCCESS;
9411 }
9412
9413 job_t
9414 _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
9415 {
9416 jobmgr_t where2put = NULL;
9417
9418 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
9419 errno = EINVAL;
9420 return NULL;
9421 }
9422
9423 launch_data_t ldlabel = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL);
9424 if (!ldlabel || launch_data_get_type(ldlabel) != LAUNCH_DATA_STRING) {
9425 errno = EINVAL;
9426 return NULL;
9427 }
9428
9429 const char *label = launch_data_get_string(ldlabel);
9430 jobmgr_log(jm, LOG_DEBUG, "Importing service: %s", label);
9431
9432 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
9433 if (destname) {
9434 bool supported_domain = false;
9435
9436 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
9437 const char *str = launch_data_get_string(destname);
9438 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
9439 where2put = _s_xpc_system_domain;
9440 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
9441 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
9442 supported_domain = true;
9443 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
9444 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
9445 } else {
9446 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
9447 errno = EINVAL;
9448 }
9449 } else {
9450 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
9451 errno = EINVAL;
9452 }
9453
9454 if (where2put && !supported_domain) {
9455 launch_data_t mi = NULL;
9456 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
9457 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
9458 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
9459 where2put = NULL;
9460 errno = EINVAL;
9461 }
9462 }
9463 }
9464 } else {
9465 where2put = jm;
9466 }
9467
9468 job_t j = NULL;
9469 if (where2put) {
9470 /* Gross. If the service already exists in a singleton domain, then
9471 * jobmgr_import2() will return the existing job. But if we fail to alias
9472 * this job, we will normally want to remove it. But if we did not create
9473 * it in the first place, then we need to avoid removing it. So check
9474 * errno against EEXIST in the success case and if it's EEXIST, then do
9475 * not remove the original job in the event of a failed alias.
9476 *
9477 * This really needs to be re-thought, but I think it'll require a larger
9478 * evaluation of launchd's data structures. Right now, once a job is
9479 * imported into a singleton domain, it won't be removed until the system
9480 * shuts down, but that may not always be true. If it ever changes, we'll
9481 * have a problem because we'll have to account for all existing aliases
9482 * and clean them up somehow. Or just start ref-counting. I knew this
9483 * aliasing stuff would be trouble...
9484 *
9485 * <rdar://problem/10646503>
9486 */
9487 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
9488
9489 errno = 0;
9490 if ((j = jobmgr_import2(where2put, pload))) {
9491 bool created = (errno != EEXIST);
9492 j->xpc_service = true;
9493
9494 if (where2put->xpc_singleton) {
9495 /* If the service was destined for one of the global domains,
9496 * then we have to alias it into our local domain to reserve the
9497 * name.
9498 */
9499 job_t ja = NULL;
9500 if (!(ja = job_new_alias(jm, j))) {
9501 /* If we failed to alias the job because of a conflict over
9502 * the label, then we remove it from the global domain. We
9503 * don't want to risk having imported a malicious job into
9504 * one of the global domains.
9505 */
9506 if (errno != EEXIST) {
9507 job_log(j, LOG_ERR, "Failed to alias job into: %s: %d: %s", where2put->name, errno, strerror(errno));
9508 } else {
9509 errno = 0;
9510 }
9511
9512 if (created) {
9513 jobmgr_log(jm, LOG_WARNING, "Singleton service already existed in job-local namespace. Removing: %s", j->label);
9514 job_remove(j);
9515 }
9516
9517 j = NULL;
9518 } else {
9519 jobmgr_log(jm, LOG_DEBUG, "Aliased service into local domain: %s", j->label);
9520 (void)job_dispatch(j, false);
9521 ja->xpc_service = true;
9522 j = ja;
9523 }
9524 } else {
9525 (void)job_dispatch(j, false);
9526 }
9527 }
9528 } else {
9529 jobmgr_log(jm, LOG_DEBUG, "Could not find destination for service: %s", label);
9530 }
9531
9532 return j;
9533 }
9534
9535 int
9536 _xpc_domain_import_services(job_t j, launch_data_t services)
9537 {
9538 int error = EINVAL;
9539 if (launch_data_get_type(services) != LAUNCH_DATA_ARRAY) {
9540 return error;
9541 }
9542
9543 size_t i = 0;
9544 size_t c = launch_data_array_get_count(services);
9545 jobmgr_log(j->mgr, LOG_DEBUG, "Importing new services: %lu", c);
9546
9547 for (i = 0; i < c; i++) {
9548 jobmgr_log(j->mgr, LOG_DEBUG, "Importing service at index: %lu", i);
9549
9550 job_t nj = NULL;
9551 launch_data_t ploadi = launch_data_array_get_index(services, i);
9552 if (!(nj = _xpc_domain_import_service(j->mgr, ploadi))) {
9553 if (!j->mgr->session_initialized && errno) {
9554 /* Service import failures are only fatal if the domain is being
9555 * initialized. If we're extending the domain, we can run into
9556 * errors with services already existing, so we just ignore them.
9557 * In the case of a domain extension, we don't want to halt the
9558 * operation if we run into an error with one service.
9559 *
9560 * <rdar://problem/10842779>
9561 */
9562 jobmgr_log(j->mgr, LOG_ERR, "Failed to import service at index: %lu: %d: %s", i, errno, strerror(errno));
9563 error = errno;
9564 break;
9565 }
9566 } else {
9567 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service: %s", nj->label);
9568 }
9569 }
9570
9571 if (i == c) {
9572 error = 0;
9573 }
9574
9575 return error;
9576 }
9577
9578 kern_return_t
9579 xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
9580 {
9581 if (unlikely(!pid1_magic)) {
9582 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
9583 return BOOTSTRAP_NOT_PRIVILEGED;
9584 }
9585 if (!j || !MACH_PORT_VALID(reqport)) {
9586 return BOOTSTRAP_UNKNOWN_SERVICE;
9587 }
9588 if (root_jobmgr->shutting_down) {
9589 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
9590 return BOOTSTRAP_NOT_PRIVILEGED;
9591 }
9592 if (!j->xpc_bootstrapper) {
9593 job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
9594 return BOOTSTRAP_NOT_PRIVILEGED;
9595 }
9596
9597 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9598 /* All XPC domains are children of the root job manager. What we're creating
9599 * here is really just a skeleton. By creating it, we're adding reqp to our
9600 * port set. It will have two messages on it. The first specifies the
9601 * environment of the originator. This is so we can cache it and hand it to
9602 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9603 * to be bootstrapped in.
9604 */
9605 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
9606 if (job_assumes(j, jm != NULL)) {
9607 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
9608 jm->shortdesc = "private";
9609 kr = BOOTSTRAP_SUCCESS;
9610 }
9611
9612 return kr;
9613 }
9614
9615 kern_return_t
9616 xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
9617 {
9618 if (!j) {
9619 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9620 * getting this message long after the requesting process has gone away.
9621 * See <rdar://problem/8593143>.
9622 */
9623 return BOOTSTRAP_UNKNOWN_SERVICE;
9624 }
9625
9626 jobmgr_t jm = j->mgr;
9627 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9628 return BOOTSTRAP_NOT_PRIVILEGED;
9629 }
9630
9631 if (jm->req_asport != MACH_PORT_NULL) {
9632 return BOOTSTRAP_NOT_PRIVILEGED;
9633 }
9634
9635 struct ldcred *ldc = runtime_get_caller_creds();
9636 struct proc_bsdshortinfo proc;
9637 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
9638 if (errno != ESRCH) {
9639 (void)jobmgr_assumes_zero(jm, errno);
9640 }
9641
9642 jm->error = errno;
9643 jobmgr_remove(jm);
9644 return BOOTSTRAP_NO_MEMORY;
9645 }
9646
9647 #if !TARGET_OS_EMBEDDED
9648 if (jobmgr_assumes_zero(jm, audit_session_port(ldc->asid, &jm->req_asport)) != 0) {
9649 jm->error = EPERM;
9650 jobmgr_remove(jm);
9651 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
9652 return BOOTSTRAP_NOT_PRIVILEGED;
9653 }
9654 #else
9655 jm->req_asport = MACH_PORT_DEAD;
9656 #endif
9657
9658 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s[%i]", proc.pbsi_comm, ldc->pid);
9659 strlcpy(jm->owner, proc.pbsi_comm, sizeof(jm->owner));
9660 jm->req_bsport = bsport;
9661 jm->req_excport = excport;
9662 jm->req_rport = rp;
9663 jm->req_ctx = ctx;
9664 jm->req_ctx_sz = ctx_sz;
9665 jm->req_pid = ldc->pid;
9666 jm->req_euid = ldc->euid;
9667 jm->req_egid = ldc->egid;
9668 jm->req_asid = ldc->asid;
9669
9670 return KERN_SUCCESS;
9671 }
9672
9673 kern_return_t
9674 xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9675 {
9676 if (!j) {
9677 return BOOTSTRAP_UNKNOWN_SERVICE;
9678 }
9679
9680 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
9681 if (!(rootj && rootj->xpc_bootstrapper)) {
9682 job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
9683 return BOOTSTRAP_NOT_PRIVILEGED;
9684 }
9685
9686 // This is just for XPC domains (for now).
9687 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9688 return BOOTSTRAP_NOT_PRIVILEGED;
9689 }
9690 if (j->mgr->session_initialized) {
9691 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
9692 return BOOTSTRAP_NOT_PRIVILEGED;
9693 }
9694
9695 size_t offset = 0;
9696 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9697 if (!services) {
9698 return BOOTSTRAP_NO_MEMORY;
9699 }
9700
9701 int error = _xpc_domain_import_services(j, services);
9702 if (error) {
9703 j->mgr->error = error;
9704 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
9705 jobmgr_remove(j->mgr);
9706 } else {
9707 j->mgr->session_initialized = true;
9708 (void)jobmgr_assumes_zero(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS));
9709 j->mgr->req_rport = MACH_PORT_NULL;
9710
9711 /* Returning a failure code will destroy the message, whereas returning
9712 * success will not, so we need to clean up here.
9713 */
9714 mig_deallocate(services_buff, services_sz);
9715 error = BOOTSTRAP_SUCCESS;
9716 }
9717
9718 return error;
9719 }
9720
9721 kern_return_t
9722 xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport,
9723 mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid,
9724 int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
9725 {
9726 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9727 return BOOTSTRAP_UNKNOWN_SERVICE;
9728 }
9729 jobmgr_t jm = j->mgr;
9730 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9731 return BOOTSTRAP_NOT_PRIVILEGED;
9732 }
9733
9734 if (jm->req_asport == MACH_PORT_NULL) {
9735 return BOOTSTRAP_NOT_PRIVILEGED;
9736 }
9737
9738 *bsport = jm->req_bsport;
9739 *sbsport = root_jobmgr->jm_port;
9740 *excport = jm->req_excport;
9741 *asport = jm->req_asport;
9742 *uid = jm->req_euid;
9743 *gid = jm->req_egid;
9744 *asid = jm->req_asid;
9745
9746 *ctx = jm->req_ctx;
9747 *ctx_sz = jm->req_ctx_sz;
9748
9749 return KERN_SUCCESS;
9750 }
9751
9752 kern_return_t
9753 xpc_domain_get_service_name(job_t j, event_name_t name)
9754 {
9755 if (!j) {
9756 return BOOTSTRAP_NO_MEMORY;
9757 }
9758 if (!j->xpc_service) {
9759 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
9760 return BOOTSTRAP_NOT_PRIVILEGED;
9761 }
9762
9763 struct machservice * ms = SLIST_FIRST(&j->machservices);
9764 if (!ms) {
9765 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name of job with no MachServices: %s", j->label);
9766 return BOOTSTRAP_UNKNOWN_SERVICE;
9767 }
9768
9769 (void)strlcpy(name, ms->name, sizeof(event_name_t));
9770 return BOOTSTRAP_SUCCESS;
9771 }
9772
9773 #if XPC_LPI_VERSION >= 20111216
9774 kern_return_t
9775 xpc_domain_add_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9776 {
9777 if (!j) {
9778 return BOOTSTRAP_UNKNOWN_SERVICE;
9779 }
9780
9781 job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
9782 if (!(rootj && rootj->xpc_bootstrapper)) {
9783 job_log(j, LOG_ERR, "Attempt to add service to XPC domain by unprivileged job.");
9784 return BOOTSTRAP_NOT_PRIVILEGED;
9785 }
9786
9787 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9788 return BOOTSTRAP_NOT_PRIVILEGED;
9789 }
9790
9791 size_t offset = 0;
9792 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9793 if (!services) {
9794 return BOOTSTRAP_NO_MEMORY;
9795 }
9796
9797 int error = _xpc_domain_import_services(j, services);
9798 if (!error) {
9799 mig_deallocate(services_buff, services_sz);
9800 }
9801
9802 return error;
9803 }
9804 #endif
9805
9806 #pragma mark XPC Events
9807 int
9808 xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms)
9809 {
9810 int error = EXNOMEM;
9811 struct machservice *msi = NULL;
9812 SLIST_FOREACH(msi, &j->machservices, sle) {
9813 if (strcmp(stream, msi->name) == 0) {
9814 break;
9815 }
9816 }
9817
9818 if (!msi) {
9819 mach_port_t sp = MACH_PORT_NULL;
9820 msi = machservice_new(j, stream, &sp, false);
9821 if (!msi) {
9822 return EXNOMEM;
9823 }
9824
9825 job_log(j, LOG_DEBUG, "Creating new MachService for stream: %s", stream);
9826 /* Hack to keep this from being publicly accessible through
9827 * bootstrap_look_up().
9828 */
9829 if (!j->dedicated_instance) {
9830 LIST_REMOVE(msi, name_hash_sle);
9831 }
9832 msi->event_channel = true;
9833
9834 /* If we call job_dispatch() here before the audit session for the job
9835 * has been set, we'll end up not watching this service. But we also have
9836 * to take care not to watch the port if the job is active.
9837 *
9838 * See <rdar://problem/10357855>.
9839 */
9840 if (!j->currently_ignored) {
9841 machservice_watch(j, msi);
9842 }
9843
9844 error = 0;
9845 *ms = msi;
9846 } else if (!msi->event_channel) {
9847 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
9848 error = EEXIST;
9849 } else {
9850 error = 0;
9851 *ms = msi;
9852 }
9853
9854 return error;
9855 }
9856
9857 int
9858 xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply)
9859 {
9860 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
9861 if (!stream) {
9862 return EXINVAL;
9863 }
9864
9865 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
9866 if (!token) {
9867 return EXINVAL;
9868 }
9869
9870 job_log(j, LOG_DEBUG, "Getting event name for stream/token: %s/0x%llu", stream, token);
9871
9872 int result = ESRCH;
9873 struct externalevent *event = externalevent_find(stream, token);
9874 if (event && j->event_monitor) {
9875 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
9876 xpc_dictionary_set_string(reply2, XPC_EVENT_ROUTINE_KEY_NAME, event->name);
9877 *reply = reply2;
9878
9879 job_log(j, LOG_DEBUG, "Found: %s", event->name);
9880 result = 0;
9881 }
9882
9883 return result;
9884 }
9885
9886 int
9887 xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply)
9888 {
9889 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
9890 if (!stream) {
9891 return EXINVAL;
9892 }
9893
9894 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
9895 if (!key) {
9896 return EXINVAL;
9897 }
9898
9899 xpc_object_t event = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_EVENT);
9900 if (event && xpc_get_type(event) != XPC_TYPE_DICTIONARY) {
9901 return EXINVAL;
9902 }
9903
9904 job_log(j, LOG_DEBUG, "%s event for stream/key: %s/%s", event ? "Setting" : "Removing", stream, key);
9905
9906 struct externalevent *eei = NULL;
9907 LIST_FOREACH(eei, &j->events, job_le) {
9908 /* If the event for the given key already exists for the job, we need to
9909 * remove the old one first.
9910 */
9911 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9912 job_log(j, LOG_DEBUG, "Event exists. Removing.");
9913 externalevent_delete(eei);
9914 break;
9915 }
9916 }
9917
9918 int result = EXNOMEM;
9919 if (event) {
9920 struct eventsystem *es = eventsystem_find(stream);
9921 if (!es) {
9922 job_log(j, LOG_DEBUG, "Creating stream.");
9923 es = eventsystem_new(stream);
9924 }
9925
9926 if (es) {
9927 job_log(j, LOG_DEBUG, "Adding event.");
9928 if (externalevent_new(j, es, key, event)) {
9929 job_log(j, LOG_DEBUG, "Added new event for key: %s", key);
9930 result = 0;
9931 } else {
9932 job_log(j, LOG_ERR, "Could not create event for key: %s", key);
9933 }
9934 } else {
9935 job_log(j, LOG_ERR, "Event stream could not be created: %s", stream);
9936 }
9937 } else {
9938 /* If the event was NULL, then we just remove it and return. */
9939 result = 0;
9940 }
9941
9942 if (result == 0) {
9943 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
9944 *reply = reply2;
9945 }
9946
9947 return result;
9948 }
9949
9950 int
9951 xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply)
9952 {
9953 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
9954 const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
9955
9956 bool all_streams = (stream == NULL);
9957 bool all_events = (key == NULL || strcmp(key, "") == 0); // strcmp for libxpc compatibility
9958 xpc_object_t events = NULL;
9959
9960 if (all_streams && !all_events) {
9961 return EXINVAL;
9962 }
9963
9964 if (all_streams || all_events) {
9965 job_log(j, LOG_DEBUG, "Fetching all events%s%s", stream ? " for stream: " : "", stream ? stream : "");
9966 events = xpc_dictionary_create(NULL, NULL, 0);
9967 } else {
9968 job_log(j, LOG_DEBUG, "Fetching stream/key: %s/%s", stream, key);
9969 }
9970
9971 int result = ESRCH;
9972 struct externalevent *eei = NULL;
9973 LIST_FOREACH(eei, &j->events, job_le) {
9974 if (all_streams) {
9975 xpc_object_t sub = xpc_dictionary_get_value(events, eei->sys->name);
9976 if (sub == NULL) {
9977 sub = xpc_dictionary_create(NULL, NULL, 0);
9978 xpc_dictionary_set_value(events, eei->sys->name, sub);
9979 xpc_release(sub);
9980 }
9981 xpc_dictionary_set_value(sub, eei->name, eei->event);
9982 } else if (strcmp(eei->sys->name, stream) == 0) {
9983 if (all_events) {
9984 xpc_dictionary_set_value(events, eei->name, eei->event);
9985 } else if (strcmp(eei->name, key) == 0) {
9986 job_log(j, LOG_DEBUG, "Found event.");
9987 events = xpc_retain(eei->event);
9988 break;
9989 }
9990 }
9991 }
9992
9993 if (events) {
9994 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
9995 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENT, events);
9996 xpc_release(events);
9997
9998 *reply = reply2;
9999 result = 0;
10000 }
10001
10002 return result;
10003 }
10004
10005 int
10006 xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10007 {
10008 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10009 if (!stream) {
10010 return EXINVAL;
10011 }
10012
10013 job_log(j, LOG_DEBUG, "Checking in stream: %s", stream);
10014
10015 struct machservice *ms = NULL;
10016 int error = xpc_event_find_channel(j, stream, &ms);
10017 if (error) {
10018 job_log(j, LOG_ERR, "Failed to check in: 0x%x: %s", error, xpc_strerror(error));
10019 } else if (ms->isActive) {
10020 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10021 error = EBUSY;
10022 } else {
10023 machservice_request_notifications(ms);
10024
10025 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10026 xpc_dictionary_set_mach_recv(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10027 *reply = reply2;
10028 error = 0;
10029 }
10030
10031 return error;
10032 }
10033
10034 int
10035 xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply)
10036 {
10037 if (!j->event_monitor) {
10038 return EPERM;
10039 }
10040
10041 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10042 if (!stream) {
10043 return EXINVAL;
10044 }
10045
10046 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10047 if (!token) {
10048 return EXINVAL;
10049 }
10050
10051 job_log(j, LOG_DEBUG, "Looking up channel for stream/token: %s/%llu", stream, token);
10052
10053 struct externalevent *ee = externalevent_find(stream, token);
10054 if (!ee) {
10055 return ESRCH;
10056 }
10057
10058 struct machservice *ms = NULL;
10059 int error = xpc_event_find_channel(ee->job, stream, &ms);
10060 if (!error) {
10061 job_log(j, LOG_DEBUG, "Found event channel port: 0x%x", ms->port);
10062 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10063 xpc_dictionary_set_mach_send(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10064 *reply = reply2;
10065 error = 0;
10066 } else {
10067 job_log(j, LOG_ERR, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream, token, error, xpc_strerror(error));
10068 }
10069
10070 return error;
10071 }
10072
10073 int
10074 xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10075 {
10076 if (!j->event_monitor) {
10077 return EPERM;
10078 }
10079
10080 /* This indicates that the event monitor is now safe to signal. This state is
10081 * independent of whether this operation actually succeeds; we just need it
10082 * to ignore SIGUSR1.
10083 */
10084 j->event_monitor_ready2signal = true;
10085
10086 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10087 if (!stream) {
10088 return EXINVAL;
10089 }
10090
10091 job_log(j, LOG_DEBUG, "Provider checking in for stream: %s", stream);
10092
10093 xpc_object_t events = xpc_array_create(NULL, 0);
10094 struct eventsystem *es = eventsystem_find(stream);
10095 if (!es) {
10096 /* If we had to create the event stream, there were no events, so just
10097 * give back the empty array.
10098 */
10099 job_log(j, LOG_DEBUG, "Creating event stream.");
10100 es = eventsystem_new(stream);
10101 if (!job_assumes(j, es)) {
10102 xpc_release(events);
10103 return EXNOMEM;
10104 }
10105
10106 if (strcmp(stream, "com.apple.launchd.helper") == 0) {
10107 _launchd_support_system = es;
10108 }
10109 } else {
10110 job_log(j, LOG_DEBUG, "Filling event array.");
10111
10112 struct externalevent *ei = NULL;
10113 LIST_FOREACH(ei, &es->events, sys_le) {
10114 xpc_array_set_uint64(events, XPC_ARRAY_APPEND, ei->id);
10115 xpc_array_append_value(events, ei->event);
10116 }
10117 }
10118
10119 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10120 xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENTS, events);
10121 xpc_release(events);
10122 *reply = reply2;
10123
10124 return 0;
10125 }
10126
10127 int
10128 xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply)
10129 {
10130 job_t other_j = NULL;
10131
10132 if (!j->event_monitor) {
10133 return EPERM;
10134 }
10135
10136 const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10137 if (!stream) {
10138 return EXINVAL;
10139 }
10140
10141 uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10142 if (!token) {
10143 return EXINVAL;
10144 }
10145
10146 bool state = false;
10147 xpc_object_t xstate = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_STATE);
10148 if (!xstate || xpc_get_type(xstate) != XPC_TYPE_BOOL) {
10149 return EXINVAL;
10150 } else {
10151 state = xpc_bool_get_value(xstate);
10152 }
10153
10154 job_log(j, LOG_DEBUG, "Setting event state to %s for stream/token: %s/%llu", state ? "true" : "false", stream, token);
10155
10156 struct externalevent *ei = externalevent_find(stream, token);
10157 if (!ei) {
10158 job_log(j, LOG_ERR, "Could not find stream/token: %s/%llu", stream, token);
10159 return ESRCH;
10160 }
10161
10162 other_j = ei->job;
10163 ei->state = state;
10164
10165 if (ei->internal) {
10166 job_log(ei->job, LOG_NOTICE, "Job should be able to exec(3) now.");
10167 ei->job->waiting4ok = false;
10168 externalevent_delete(ei);
10169 }
10170
10171 (void)job_dispatch(other_j, false);
10172
10173 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10174 *reply = reply2;
10175
10176 return 0;
10177 }
10178
10179 bool
10180 xpc_event_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
10181 {
10182 uint64_t op = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_OP);
10183 if (!op) {
10184 return false;
10185 }
10186
10187 audit_token_t token;
10188 xpc_dictionary_get_audit_token(request, &token);
10189 runtime_record_caller_creds(&token);
10190
10191 job_t j = job_mig_intran(p);
10192 if (!j || j->anonymous) {
10193 op = -1;
10194 }
10195
10196 job_log(j, LOG_DEBUG, "Incoming XPC event request: %llu", op);
10197
10198 int error = -1;
10199 switch (op) {
10200 case XPC_EVENT_GET_NAME:
10201 error = xpc_event_get_event_name(j, request, reply);
10202 break;
10203 case XPC_EVENT_SET:
10204 error = xpc_event_set_event(j, request, reply);
10205 break;
10206 case XPC_EVENT_COPY:
10207 error = xpc_event_copy_event(j, request, reply);
10208 break;
10209 case XPC_EVENT_CHECK_IN:
10210 error = xpc_event_channel_check_in(j, request, reply);
10211 break;
10212 case XPC_EVENT_LOOK_UP:
10213 error = xpc_event_channel_look_up(j, request, reply);
10214 break;
10215 case XPC_EVENT_PROVIDER_CHECK_IN:
10216 error = xpc_event_provider_check_in(j, request, reply);
10217 break;
10218 case XPC_EVENT_PROVIDER_SET_STATE:
10219 error = xpc_event_provider_set_state(j, request, reply);
10220 break;
10221 case -1:
10222 if (j) {
10223 job_log(j, LOG_ERR, "Unmanaged jobs may not make XPC Events requests.");
10224 }
10225 error = EPERM;
10226 break;
10227 default:
10228 job_log(j, LOG_ERR, "Bogus opcode.");
10229 error = EDOM;
10230 }
10231
10232 if (error) {
10233 xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10234 xpc_dictionary_set_uint64(reply2, XPC_EVENT_ROUTINE_KEY_ERROR, error);
10235 *reply = reply2;
10236 }
10237
10238 return true;
10239 }
10240
10241 kern_return_t
10242 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
10243 {
10244 struct ldcred *ldc = runtime_get_caller_creds();
10245 job_t otherj;
10246
10247 if (!j) {
10248 return BOOTSTRAP_NO_MEMORY;
10249 }
10250
10251 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
10252 return BOOTSTRAP_UNKNOWN_SERVICE;
10253 }
10254
10255 #if TARGET_OS_EMBEDDED
10256 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
10257 #else
10258 bool allow_non_root_kickstart = false;
10259 #endif
10260
10261 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
10262 return BOOTSTRAP_NOT_PRIVILEGED;
10263 }
10264
10265 #if HAVE_SANDBOX
10266 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10267 return BOOTSTRAP_NOT_PRIVILEGED;
10268 }
10269 #endif
10270
10271 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
10272 return BOOTSTRAP_SERVICE_ACTIVE;
10273 }
10274
10275 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
10276 otherj = job_dispatch(otherj, true);
10277
10278 if (!job_assumes(j, otherj && otherj->p)) {
10279 // <rdar://problem/6787083> Clear this flag if we failed to start the job.
10280 otherj->stall_before_exec = false;
10281 return BOOTSTRAP_NO_MEMORY;
10282 }
10283
10284 *out_pid = otherj->p;
10285
10286 return 0;
10287 }
10288
10289 kern_return_t
10290 job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
10291 {
10292 launch_data_t jobdata = NULL;
10293 size_t data_offset = 0;
10294 struct ldcred *ldc = runtime_get_caller_creds();
10295 job_t jr;
10296
10297 if (!j) {
10298 return BOOTSTRAP_NO_MEMORY;
10299 }
10300
10301 if (unlikely(j->deny_job_creation)) {
10302 return BOOTSTRAP_NOT_PRIVILEGED;
10303 }
10304
10305 #if HAVE_SANDBOX
10306 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10307 return BOOTSTRAP_NOT_PRIVILEGED;
10308 }
10309 #endif
10310
10311 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
10312 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
10313 return VPROC_ERR_TRY_PER_USER;
10314 }
10315
10316 if (!job_assumes(j, indataCnt != 0)) {
10317 return 1;
10318 }
10319
10320 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
10321 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
10322 return 1;
10323 }
10324
10325 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
10326 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
10327 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
10328 return 1;
10329 }
10330
10331 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
10332
10333 launch_data_t label = NULL;
10334 launch_data_t wait4debugger = NULL;
10335 if (!jr) {
10336 switch (errno) {
10337 case EEXIST:
10338 /* If EEXIST was returned, we know that there is a label string in
10339 * the dictionary. So we don't need to check the types here; that
10340 * has already been done.
10341 */
10342 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
10343 jr = job_find(NULL, launch_data_get_string(label));
10344 if (job_assumes(j, jr != NULL) && !jr->p) {
10345 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
10346 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
10347 if (launch_data_get_bool(wait4debugger)) {
10348 /* If the job exists, we're going to kick-start it, but
10349 * we need to give the caller the opportunity to start
10350 * it suspended if it so desires. But this will only
10351 * take effect if the job isn't running.
10352 */
10353 jr->wait4debugger_oneshot = true;
10354 }
10355 }
10356 }
10357
10358 *outj = jr;
10359 return BOOTSTRAP_NAME_IN_USE;
10360 default:
10361 return BOOTSTRAP_NO_MEMORY;
10362 }
10363 }
10364
10365 if (pid1_magic) {
10366 jr->mach_uid = ldc->uid;
10367 }
10368
10369 // TODO: Consolidate the app and legacy_LS_job bits.
10370 jr->legacy_LS_job = true;
10371 jr->abandon_pg = true;
10372 jr->asport = asport;
10373 jr->app = true;
10374 uuid_clear(jr->expected_audit_uuid);
10375 jr = job_dispatch(jr, true);
10376
10377 if (!job_assumes(j, jr != NULL)) {
10378 job_remove(jr);
10379 return BOOTSTRAP_NO_MEMORY;
10380 }
10381
10382 if (!job_assumes(jr, jr->p)) {
10383 job_remove(jr);
10384 return BOOTSTRAP_NO_MEMORY;
10385 }
10386
10387 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
10388 *outj = jr;
10389
10390 return BOOTSTRAP_SUCCESS;
10391 }
10392
10393 kern_return_t
10394 job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
10395 {
10396 job_t nj = NULL;
10397 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
10398 if (likely(kr == KERN_SUCCESS)) {
10399 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
10400 job_remove(nj);
10401 kr = BOOTSTRAP_NO_MEMORY;
10402 } else {
10403 /* Do not return until the job has called exec(3), thereby making it
10404 * safe for the caller to send it SIGCONT.
10405 *
10406 * <rdar://problem/9042798>
10407 */
10408 nj->spawn_reply_port = rp;
10409 kr = MIG_NO_REPLY;
10410 }
10411 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
10412 bool was_running = nj->p;
10413 if (job_dispatch(nj, true)) {
10414 if (!was_running) {
10415 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
10416
10417 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
10418 nj->spawn_reply_port = rp;
10419 kr = MIG_NO_REPLY;
10420 } else {
10421 kr = BOOTSTRAP_NO_MEMORY;
10422 }
10423 } else {
10424 *obsvr_port = MACH_PORT_NULL;
10425 *child_pid = nj->p;
10426 kr = KERN_SUCCESS;
10427 }
10428 } else {
10429 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
10430 kr = BOOTSTRAP_UNKNOWN_SERVICE;
10431 }
10432 }
10433
10434 mig_deallocate(indata, indataCnt);
10435 return kr;
10436 }
10437
10438 launch_data_t
10439 job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport __attribute__((unused)))
10440 {
10441 launch_data_t reply = NULL;
10442
10443 errno = ENOTSUP;
10444 if (launch_data_get_type(request) == LAUNCH_DATA_STRING) {
10445 if (strcmp(launch_data_get_string(request), LAUNCH_KEY_CHECKIN) == 0) {
10446 reply = job_export(j);
10447 job_checkin(j);
10448 }
10449 }
10450
10451 return reply;
10452 }
10453
10454 #define LAUNCHD_MAX_LEGACY_FDS 128
10455 #define countof(x) (sizeof((x)) / sizeof((x[0])))
10456
10457 kern_return_t
10458 job_mig_legacy_ipc_request(job_t j, vm_offset_t request,
10459 mach_msg_type_number_t requestCnt, mach_port_array_t request_fds,
10460 mach_msg_type_number_t request_fdsCnt, vm_offset_t *reply,
10461 mach_msg_type_number_t *replyCnt, mach_port_array_t *reply_fdps,
10462 mach_msg_type_number_t *reply_fdsCnt, mach_port_t asport)
10463 {
10464 if (!j) {
10465 return BOOTSTRAP_NO_MEMORY;
10466 }
10467
10468 /* TODO: Once we support actions other than checking in, we must check the
10469 * sandbox capabilities and EUID of the requestort.
10470 */
10471 size_t nout_fdps = 0;
10472 size_t nfds = request_fdsCnt / sizeof(request_fds[0]);
10473 if (nfds > LAUNCHD_MAX_LEGACY_FDS) {
10474 job_log(j, LOG_ERR, "Too many incoming descriptors: %lu", nfds);
10475 return BOOTSTRAP_NO_MEMORY;
10476 }
10477
10478 int in_fds[LAUNCHD_MAX_LEGACY_FDS];
10479 size_t i = 0;
10480 for (i = 0; i < nfds; i++) {
10481 in_fds[i] = fileport_makefd(request_fds[i]);
10482 if (in_fds[i] == -1) {
10483 job_log(j, LOG_ERR, "Bad descriptor passed in legacy IPC request at index: %lu", i);
10484 }
10485 }
10486
10487 // DON'T goto outbad before this point.
10488 *reply = 0;
10489 *reply_fdps = NULL;
10490 launch_data_t ldreply = NULL;
10491
10492 size_t dataoff = 0;
10493 size_t fdoff = 0;
10494 launch_data_t ldrequest = launch_data_unpack((void *)request, requestCnt, in_fds, nfds, &dataoff, &fdoff);
10495 if (!ldrequest) {
10496 job_log(j, LOG_ERR, "Invalid legacy IPC request passed.");
10497 goto out_bad;
10498 }
10499
10500 ldreply = job_do_legacy_ipc_request(j, ldrequest, asport);
10501 if (!ldreply) {
10502 ldreply = launch_data_new_errno(errno);
10503 if (!ldreply) {
10504 goto out_bad;
10505 }
10506 }
10507
10508 *replyCnt = 10 * 1024 * 1024;
10509 mig_allocate(reply, *replyCnt);
10510 if (!*reply) {
10511 goto out_bad;
10512 }
10513
10514 int out_fds[LAUNCHD_MAX_LEGACY_FDS];
10515 size_t nout_fds = 0;
10516 size_t sz = launch_data_pack(ldreply, (void *)*reply, *replyCnt, out_fds, &nout_fds);
10517 if (!sz) {
10518 job_log(j, LOG_ERR, "Could not pack legacy IPC reply.");
10519 goto out_bad;
10520 }
10521
10522 if (nout_fds) {
10523 if (nout_fds > 128) {
10524 job_log(j, LOG_ERR, "Too many outgoing descriptors: %lu", nout_fds);
10525 goto out_bad;
10526 }
10527
10528 *reply_fdsCnt = nout_fds * sizeof((*reply_fdps)[0]);
10529 mig_allocate((vm_address_t *)reply_fdps, *reply_fdsCnt);
10530 if (!*reply_fdps) {
10531 goto out_bad;
10532 }
10533
10534 for (i = 0; i < nout_fds; i++) {
10535 mach_port_t fp = MACH_PORT_NULL;
10536 /* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
10537 * deal. Note, these get stuffed into an array whose disposition is
10538 * mach_port_move_send_t, so we don't have to worry about them after
10539 * returning.
10540 */
10541 if (fileport_makeport(out_fds[i], &fp) != 0) {
10542 job_log(j, LOG_ERR, "Could not pack response descriptor at index: %lu: %d: %s", i, errno, strerror(errno));
10543 }
10544 (*reply_fdps)[i] = fp;
10545 }
10546
10547 nout_fdps = nout_fds;
10548 } else {
10549 *reply_fdsCnt = 0;
10550 }
10551
10552 mig_deallocate(request, requestCnt);
10553 launch_data_free(ldreply);
10554 ldreply = NULL;
10555
10556 // Unused for now.
10557 (void)launchd_mport_deallocate(asport);
10558
10559 return BOOTSTRAP_SUCCESS;
10560
10561 out_bad:
10562 for (i = 0; i < nfds; i++) {
10563 (void)close(in_fds[i]);
10564 }
10565
10566 for (i = 0; i < nout_fds; i++) {
10567 (void)launchd_mport_deallocate((*reply_fdps)[i]);
10568 }
10569
10570 if (*reply) {
10571 mig_deallocate(*reply, *replyCnt);
10572 }
10573
10574 /* We should never hit this since the last goto out is in the case that
10575 * allocating this fails.
10576 */
10577 if (*reply_fdps) {
10578 mig_deallocate((vm_address_t)*reply_fdps, *reply_fdsCnt);
10579 }
10580
10581 if (ldreply) {
10582 launch_data_free(ldreply);
10583 }
10584
10585 return BOOTSTRAP_NO_MEMORY;
10586 }
10587
10588 void
10589 jobmgr_init(bool sflag)
10590 {
10591 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
10592 SLIST_INIT(&s_curious_jobs);
10593 LIST_INIT(&s_needing_sessions);
10594
10595 osx_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
10596 osx_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
10597 _s_xpc_system_domain->req_asid = launchd_audit_session;
10598 _s_xpc_system_domain->req_asport = launchd_audit_port;
10599 _s_xpc_system_domain->shortdesc = "system";
10600 if (pid1_magic) {
10601 root_jobmgr->monitor_shutdown = true;
10602 }
10603
10604 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
10605 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
10606 if (likely(s_no_hang_fd == -1)) {
10607 if (jobmgr_assumes_zero_p(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK))) != -1) {
10608 (void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr));
10609 }
10610 }
10611 s_no_hang_fd = _fd(s_no_hang_fd);
10612 }
10613
10614 size_t
10615 our_strhash(const char *s)
10616 {
10617 size_t c, r = 5381;
10618
10619 /* djb2
10620 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10621 */
10622
10623 while ((c = *s++)) {
10624 r = ((r << 5) + r) + c; // hash*33 + c
10625 }
10626
10627 return r;
10628 }
10629
10630 size_t
10631 hash_label(const char *label)
10632 {
10633 return our_strhash(label) % LABEL_HASH_SIZE;
10634 }
10635
10636 size_t
10637 hash_ms(const char *msstr)
10638 {
10639 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
10640 }
10641
10642 bool
10643 waiting4removal_new(job_t j, mach_port_t rp)
10644 {
10645 struct waiting_for_removal *w4r;
10646
10647 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
10648 return false;
10649 }
10650
10651 w4r->reply_port = rp;
10652
10653 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
10654
10655 return true;
10656 }
10657
10658 void
10659 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
10660 {
10661 (void)job_assumes_zero(j, job_mig_send_signal_reply(w4r->reply_port, 0));
10662
10663 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
10664
10665 free(w4r);
10666 }
10667
10668 size_t
10669 get_kern_max_proc(void)
10670 {
10671 int mib[] = { CTL_KERN, KERN_MAXPROC };
10672 int max = 100;
10673 size_t max_sz = sizeof(max);
10674
10675 (void)posix_assumes_zero(sysctl(mib, 2, &max, &max_sz, NULL, 0));
10676
10677 return max;
10678 }
10679
10680 // See rdar://problem/6271234
10681 void
10682 eliminate_double_reboot(void)
10683 {
10684 if (unlikely(!pid1_magic)) {
10685 return;
10686 }
10687
10688 struct stat sb;
10689 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
10690 int result = -1;
10691
10692 if (unlikely(stat(argv[1], &sb) != -1)) {
10693 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
10694
10695 pid_t p = 0;
10696 result = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ);
10697 if (result == -1) {
10698 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script: %d: %s", result, strerror(result));
10699 goto out;
10700 }
10701
10702 int wstatus = 0;
10703 result = waitpid(p, &wstatus, 0);
10704 if (result == -1) {
10705 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to reap deferred install script: %d: %s", errno, strerror(errno));
10706 goto out;
10707 }
10708
10709 if (WIFEXITED(wstatus)) {
10710 if ((result = WEXITSTATUS(wstatus)) == 0) {
10711 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
10712 } else {
10713 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus));
10714 }
10715 } else {
10716 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Weirdness with install script: %d", wstatus);
10717 }
10718 }
10719 out:
10720 if (result == 0) {
10721 /* If the unlink(2) was to fail, it would be most likely fail with
10722 * EBUSY. All the other failure cases for unlink(2) don't apply when
10723 * we're running under PID 1 and have verified that the file exists.
10724 * Outside of someone deliberately messing with us (like if
10725 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
10726 * point for a filesystem) and I/O errors, we should be good.
10727 */
10728 if (unlink(argv[1]) == -1) {
10729 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to remove deferred install script: %d: %s", errno, strerror(errno));
10730 }
10731 }
10732 }
10733
10734 void
10735 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
10736 {
10737 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
10738 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10739 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
10740 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
10741 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10742 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
10743 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
10744 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
10745 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10746 * You can't set this in a plist.
10747 */
10748 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMACTIVE) == 0) {
10749 // Ignore.
10750 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
10751 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10752 * complain about it.
10753 */
10754 } else {
10755 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
10756 }
10757
10758 if (unlikely(!j->jetsam_properties)) {
10759 j->jetsam_properties = true;
10760 }
10761 }
10762
10763 #if TARGET_OS_EMBEDDED
10764 int
10765 launchd_set_jetsam_priorities(launch_data_t priorities)
10766 {
10767 kern_return_t result = 0;
10768
10769 if (launch_data_get_type(priorities) != LAUNCH_DATA_ARRAY) {
10770 return EINVAL;
10771 }
10772 if (!launchd_embedded_handofgod) {
10773 return EPERM;
10774 }
10775
10776 size_t npris = launch_data_array_get_count(priorities);
10777
10778 job_t ji = NULL;
10779 size_t i = 0;
10780 for (i = 0; i < npris; i++) {
10781 launch_data_t ldi = launch_data_array_get_index(priorities, i);
10782 if (launch_data_get_type(ldi) != LAUNCH_DATA_DICTIONARY) {
10783 continue;
10784 }
10785
10786 launch_data_t ldlabel = NULL;
10787 if (!(ldlabel = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL))) {
10788 continue;
10789 }
10790 const char *label = launch_data_get_string(ldlabel);
10791
10792 ji = job_find(root_jobmgr, label);
10793 if (!ji) {
10794 continue;
10795 }
10796
10797 launch_data_dict_iterate(ldi, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, ji);
10798
10799 launch_data_t frontmost = NULL;
10800 if ((frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) && launch_data_get_type(frontmost) == LAUNCH_DATA_BOOL) {
10801 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
10802 }
10803
10804 launch_data_t active = NULL;
10805 if ((active = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMACTIVE)) && launch_data_get_type(active) == LAUNCH_DATA_BOOL) {
10806 ji->jetsam_active = launch_data_get_bool(active);
10807 }
10808
10809 launchd_update_jetsam_list(ji);
10810
10811 result = result != 0 ? errno : 0;
10812 }
10813
10814 return result;
10815 }
10816
10817 int
10818 launchd_update_jetsam_list(job_t j)
10819 {
10820 memorystatus_priority_entry_t mpe;
10821 kern_return_t result;
10822
10823 mpe.pid = j->p;
10824 mpe.priority = j->jetsam_priority;
10825 mpe.flags = 0;
10826 mpe.flags |= j->jetsam_frontmost ? kMemorystatusFlagsFrontmost : 0;
10827 mpe.flags |= j->jetsam_active ? kMemorystatusFlagsActive : 0;
10828
10829 // ToDo - cache MIB if we keep this interface
10830 (void)posix_assumes_zero(result = sysctlbyname("kern.memorystatus_jetsam_change", NULL, NULL, &mpe, sizeof(memorystatus_priority_entry_t)));
10831 return result;
10832 }
10833 #endif