]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
launchd-392.35.tar.gz
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 25247 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23 #include "launch_internal.h"
24 #include "launchd_helper.h"
25
26 #include <TargetConditionals.h>
27 #include <mach/mach.h>
28 #include <mach/mach_error.h>
29 #include <mach/boolean.h>
30 #include <mach/message.h>
31 #include <mach/notify.h>
32 #include <mach/mig_errors.h>
33 #include <mach/mach_traps.h>
34 #include <mach/mach_interface.h>
35 #include <mach/host_info.h>
36 #include <mach/mach_host.h>
37 #include <mach/exception.h>
38 #include <mach/host_reboot.h>
39 #include <sys/types.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/stat.h>
43 #include <sys/ucred.h>
44 #include <sys/fcntl.h>
45 #include <sys/un.h>
46 #include <sys/reboot.h>
47 #include <sys/wait.h>
48 #include <sys/sysctl.h>
49 #include <sys/sockio.h>
50 #include <sys/time.h>
51 #include <sys/resource.h>
52 #include <sys/ioctl.h>
53 #include <sys/mount.h>
54 #include <sys/pipe.h>
55 #include <sys/mman.h>
56 #include <sys/socket.h>
57 #include <sys/syscall.h>
58 #include <net/if.h>
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet6/nd6.h>
62 #include <bsm/libbsm.h>
63 #include <unistd.h>
64 #include <signal.h>
65 #include <errno.h>
66 #include <libgen.h>
67 #include <stdio.h>
68 #include <stdlib.h>
69 #include <stdarg.h>
70 #include <stdbool.h>
71 #include <paths.h>
72 #include <pwd.h>
73 #include <grp.h>
74 #include <ttyent.h>
75 #include <dlfcn.h>
76 #include <dirent.h>
77 #include <string.h>
78 #include <ctype.h>
79 #include <glob.h>
80 #include <System/sys/spawn.h>
81 #include <spawn.h>
82 #include <time.h>
83 #include <libinfo.h>
84
85 #include <libproc.h>
86 #include <malloc/malloc.h>
87 #include <pthread.h>
88 #include <libproc.h>
89 #if HAVE_SANDBOX
90 #define __APPLE_API_PRIVATE
91 #include <sandbox.h>
92 #endif
93 #if HAVE_QUARANTINE
94 #include <quarantine.h>
95 #endif
96 #if TARGET_OS_EMBEDDED
97 #include <sys/kern_memorystatus.h>
98 #else
99 extern int gL1CacheEnabled;
100 /* To make my life easier. */
101 typedef struct jetsam_priority_entry {
102 pid_t pid;
103 uint32_t priority;
104 uint32_t flags;
105 int32_t hiwat_pages;
106 int32_t hiwat_reserved1;
107 int32_t hiwat_reserved2;
108 int32_t hiwat_reserved3;
109 } jetsam_priority_entry_t;
110
111 enum {
112 kJetsamFlagsFrontmost = (1 << 0),
113 kJetsamFlagsKilled = (1 << 1)
114 };
115 #endif
116
117 #include "launch.h"
118 #include "launch_priv.h"
119 #include "launch_internal.h"
120 #include "bootstrap.h"
121 #include "bootstrap_priv.h"
122 #include "vproc.h"
123 #include "vproc_internal.h"
124
125 #include "reboot2.h"
126
127 #include "launchd.h"
128 #include "launchd_runtime.h"
129 #include "launchd_unix_ipc.h"
130 #include "protocol_vproc.h"
131 #include "protocol_vprocServer.h"
132 #include "protocol_job_reply.h"
133 #include "protocol_job_forward.h"
134 #include "mach_excServer.h"
135 #if !TARGET_OS_EMBEDDED
136 #include "domainServer.h"
137 #include "init.h"
138 #endif /* !TARGET_OS_EMBEDDED */
139 #include "eventsServer.h"
140
141 #ifndef POSIX_SPAWN_OSX_TALAPP_START
142 #define POSIX_SPAWN_OSX_TALAPP_START 0x0400
143 #endif
144
145 #ifndef POSIX_SPAWN_OSX_WIDGET_START
146 #define POSIX_SPAWN_OSX_WIDGET_START 0x0800
147 #endif
148
149 #ifndef POSIX_SPAWN_IOS_APP_START
150 #define POSIX_SPAWN_IOS_APP_START 0x1000
151 #endif
152
153 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
154 * If the job hasn't exited in the given number of seconds after sending
155 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
156 */
157 #define LAUNCHD_MIN_JOB_RUN_TIME 10
158 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
159 #define LAUNCHD_SIGKILL_TIMER 2
160 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
161
162 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
163
164 #define TAKE_SUBSET_NAME "TakeSubsetName"
165 #define TAKE_SUBSET_PID "TakeSubsetPID"
166 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
167
168 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
169
170 extern char **environ;
171
172 struct waiting_for_removal {
173 SLIST_ENTRY(waiting_for_removal) sle;
174 mach_port_t reply_port;
175 };
176
177 static bool waiting4removal_new(job_t j, mach_port_t rp);
178 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
179
180 struct machservice {
181 SLIST_ENTRY(machservice) sle;
182 SLIST_ENTRY(machservice) special_port_sle;
183 LIST_ENTRY(machservice) name_hash_sle;
184 LIST_ENTRY(machservice) port_hash_sle;
185 struct machservice *alias;
186 job_t job;
187 unsigned int gen_num;
188 mach_port_name_t port;
189 unsigned int
190 isActive :1,
191 reset :1,
192 recv :1,
193 hide :1,
194 kUNCServer :1,
195 per_user_hack :1,
196 debug_on_close :1,
197 per_pid :1,
198 delete_on_destruction :1,
199 drain_one_on_crash :1,
200 drain_all_on_crash :1,
201 event_update_port :1, /* The job which owns this port is the event monitor. */
202 upfront :1, /* This service was declared in the plist. */
203 event_channel :1, /* The job is to receive events on this channel. */
204 /* Don't let the size of this field to get too small. It has to be large enough
205 * to represent the reasonable range of special port numbers.
206 */
207 special_port_num :18;
208 const char name[0];
209 };
210
211 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
212
213 #define PORT_HASH_SIZE 32
214 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
215
216 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
217
218 static void machservice_setup(launch_data_t obj, const char *key, void *context);
219 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
220 static void machservice_resetport(job_t j, struct machservice *ms);
221 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
222 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
223 static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
224 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
225 static void machservice_ignore(job_t j, struct machservice *ms);
226 static void machservice_watch(job_t j, struct machservice *ms);
227 static void machservice_delete(job_t j, struct machservice *, bool port_died);
228 static void machservice_request_notifications(struct machservice *);
229 static mach_port_t machservice_port(struct machservice *);
230 static job_t machservice_job(struct machservice *);
231 static bool machservice_hidden(struct machservice *);
232 static bool machservice_active(struct machservice *);
233 static const char *machservice_name(struct machservice *);
234 static bootstrap_status_t machservice_status(struct machservice *);
235 void machservice_drain_port(struct machservice *);
236 static struct machservice *xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p);
237
238 struct socketgroup {
239 SLIST_ENTRY(socketgroup) sle;
240 int *fds;
241 unsigned int junkfds:1, fd_cnt:31;
242 union {
243 const char name[0];
244 char name_init[0];
245 };
246 };
247
248 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds);
249 static void socketgroup_delete(job_t j, struct socketgroup *sg);
250 static void socketgroup_watch(job_t j, struct socketgroup *sg);
251 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
252 static void socketgroup_callback(job_t j);
253 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
254 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
255
256 struct calendarinterval {
257 LIST_ENTRY(calendarinterval) global_sle;
258 SLIST_ENTRY(calendarinterval) sle;
259 job_t job;
260 struct tm when;
261 time_t when_next;
262 };
263
264 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
265
266 static bool calendarinterval_new(job_t j, struct tm *w);
267 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
268 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
269 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
270 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
271 static void calendarinterval_callback(void);
272 static void calendarinterval_sanity_check(void);
273
274 struct envitem {
275 SLIST_ENTRY(envitem) sle;
276 bool one_shot;
277 char *value;
278 union {
279 const char key[0];
280 char key_init[0];
281 };
282 };
283
284 static bool envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot);
285 static void envitem_delete(job_t j, struct envitem *ei, bool global);
286 static void envitem_setup(launch_data_t obj, const char *key, void *context);
287 static void envitem_setup_one_shot(launch_data_t obj, const char *key, void *context);
288
289 struct limititem {
290 SLIST_ENTRY(limititem) sle;
291 struct rlimit lim;
292 unsigned int setsoft:1, sethard:1, which:30;
293 };
294
295 static bool limititem_update(job_t j, int w, rlim_t r);
296 static void limititem_delete(job_t j, struct limititem *li);
297 static void limititem_setup(launch_data_t obj, const char *key, void *context);
298 #if HAVE_SANDBOX
299 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
300 #endif
301
302 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
303
304 typedef enum {
305 NETWORK_UP = 1,
306 NETWORK_DOWN,
307 SUCCESSFUL_EXIT,
308 FAILED_EXIT,
309 CRASHED,
310 DID_NOT_CRASH,
311 PATH_EXISTS,
312 PATH_MISSING,
313 OTHER_JOB_ENABLED,
314 OTHER_JOB_DISABLED,
315 OTHER_JOB_ACTIVE,
316 OTHER_JOB_INACTIVE,
317 PATH_CHANGES,
318 DIR_NOT_EMPTY,
319 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
320 } semaphore_reason_t;
321
322 struct semaphoreitem {
323 SLIST_ENTRY(semaphoreitem) sle;
324 semaphore_reason_t why;
325 bool watching_parent;
326 int fd;
327
328 union {
329 const char what[0];
330 char what_init[0];
331 };
332 };
333
334 struct semaphoreitem_dict_iter_context {
335 job_t j;
336 semaphore_reason_t why_true;
337 semaphore_reason_t why_false;
338 };
339
340 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
341 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
342 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
343 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
344 static void semaphoreitem_callback(job_t j, struct kevent *kev);
345 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
346 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
347 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
348
349 struct externalevent {
350 LIST_ENTRY(externalevent) sys_le;
351 LIST_ENTRY(externalevent) job_le;
352 struct eventsystem *sys;
353
354 uint64_t id;
355 job_t job;
356 bool state;
357 bool wanted_state;
358 launch_data_t event;
359
360 char name[0];
361 };
362
363 struct externalevent_iter_ctx {
364 job_t j;
365 struct eventsystem *sys;
366 };
367
368 static bool externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event);
369 static void externalevent_delete(struct externalevent *ee);
370 static void externalevent_setup(launch_data_t obj, const char *key, void *context);
371 static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
372
373 struct eventsystem {
374 LIST_ENTRY(eventsystem) global_le;
375 LIST_HEAD(, externalevent) events;
376 uint64_t curid;
377 bool has_updates;
378 char name[0];
379 };
380
381 static struct eventsystem *eventsystem_new(const char *name);
382 static void eventsystem_delete(struct eventsystem *sys);
383 static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
384 static struct eventsystem *eventsystem_find(const char *name);
385 static void eventsystem_ping(void);
386
387 #define ACTIVE_JOB_HASH_SIZE 32
388 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
389
390 #define MACHSERVICE_HASH_SIZE 37
391
392 #define LABEL_HASH_SIZE 53
393 struct jobmgr_s {
394 kq_callback kqjobmgr_callback;
395 LIST_ENTRY(jobmgr_s) xpc_le;
396 SLIST_ENTRY(jobmgr_s) sle;
397 SLIST_HEAD(, jobmgr_s) submgrs;
398 LIST_HEAD(, job_s) jobs;
399 LIST_HEAD(, job_s) jetsam_jobs;
400
401 /* For legacy reasons, we keep all job labels that are imported in the
402 * root job manager's label hash. If a job manager is an XPC domain, then
403 * it gets its own label hash that is separate from the "global" one
404 * stored in the root job manager.
405 */
406 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
407 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
408 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
409 LIST_HEAD(, job_s) global_env_jobs;
410 mach_port_t jm_port;
411 mach_port_t req_port;
412 jobmgr_t parentmgr;
413 int reboot_flags;
414 time_t shutdown_time;
415 unsigned int global_on_demand_cnt;
416 unsigned int normal_active_cnt;
417 unsigned int jetsam_jobs_cnt;
418 unsigned int
419 shutting_down :1,
420 session_initialized :1,
421 killed_stray_jobs :1,
422 monitor_shutdown :1,
423 shutdown_jobs_dirtied :1,
424 shutdown_jobs_cleaned :1,
425 xpc_singleton :1;
426 uint32_t properties;
427 /* XPC-specific properties. */
428 char owner[MAXCOMLEN];
429 char *shortdesc;
430 mach_port_t req_bsport;
431 mach_port_t req_excport;
432 mach_port_t req_asport;
433 pid_t req_pid;
434 uid_t req_euid;
435 gid_t req_egid;
436 au_asid_t req_asid;
437 vm_offset_t req_ctx;
438 mach_msg_type_number_t req_ctx_sz;
439 mach_port_t req_rport;
440 kern_return_t error;
441 union {
442 const char name[0];
443 char name_init[0];
444 };
445 };
446
447 /* Global XPC domains. */
448 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
449 static jobmgr_t _s_xpc_system_domain;
450 static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
451 static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
452 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
453
454 #define jobmgr_assumes(jm, e) \
455 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
456
457 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
458 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
459 static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
460 static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
461 static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
462 static job_t xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
463 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
464 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
465 static jobmgr_t jobmgr_parent(jobmgr_t jm);
466 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
467 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
468 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
469 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
470 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
471 static void jobmgr_remove(jobmgr_t jm);
472 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
473 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
474 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
475 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
476 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
477 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
478 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
479 static void job_export_all2(jobmgr_t jm, launch_data_t where);
480 static void jobmgr_callback(void *obj, struct kevent *kev);
481 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
482 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
483 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
484 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
485 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
486 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
487 static void jobmgr_log_bug(jobmgr_t jm, unsigned int line);
488
489 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
490 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
491 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
492
493 struct suspended_peruser {
494 LIST_ENTRY(suspended_peruser) sle;
495 job_t j;
496 };
497
498 struct job_s {
499 kq_callback kqjob_callback; /* MUST be first element of this structure for benefit of launchd's run loop. */
500 LIST_ENTRY(job_s) sle;
501 LIST_ENTRY(job_s) subjob_sle;
502 LIST_ENTRY(job_s) needing_session_sle;
503 LIST_ENTRY(job_s) jetsam_sle;
504 LIST_ENTRY(job_s) pid_hash_sle;
505 LIST_ENTRY(job_s) label_hash_sle;
506 LIST_ENTRY(job_s) global_env_sle;
507 SLIST_ENTRY(job_s) curious_jobs_sle;
508 LIST_HEAD(, suspended_peruser) suspended_perusers;
509 LIST_HEAD(, waiting_for_exit) exit_watchers;
510 LIST_HEAD(, job_s) subjobs;
511 LIST_HEAD(, externalevent) events;
512 SLIST_HEAD(, socketgroup) sockets;
513 SLIST_HEAD(, calendarinterval) cal_intervals;
514 SLIST_HEAD(, envitem) global_env;
515 SLIST_HEAD(, envitem) env;
516 SLIST_HEAD(, limititem) limits;
517 SLIST_HEAD(, machservice) machservices;
518 SLIST_HEAD(, semaphoreitem) semaphores;
519 SLIST_HEAD(, waiting_for_removal) removal_watchers;
520 job_t alias;
521 struct rusage ru;
522 cpu_type_t *j_binpref;
523 size_t j_binpref_cnt;
524 mach_port_t j_port;
525 mach_port_t exit_status_dest;
526 mach_port_t exit_status_port;
527 mach_port_t spawn_reply_port;
528 uid_t mach_uid;
529 jobmgr_t mgr;
530 size_t argc;
531 char **argv;
532 char *prog;
533 char *rootdir;
534 char *workingdir;
535 char *username;
536 char *groupname;
537 char *stdinpath;
538 char *stdoutpath;
539 char *stderrpath;
540 char *alt_exc_handler;
541 struct vproc_shmem_s *shmem;
542 struct machservice *lastlookup;
543 unsigned int lastlookup_gennum;
544 #if HAVE_SANDBOX
545 char *seatbelt_profile;
546 uint64_t seatbelt_flags;
547 #endif
548 #if HAVE_QUARANTINE
549 void *quarantine_data;
550 size_t quarantine_data_sz;
551 #endif
552 pid_t p;
553 int last_exit_status;
554 int stdin_fd;
555 int fork_fd;
556 int log_redirect_fd;
557 int nice;
558 int stdout_err_fd;
559 uint32_t pstype;
560 int32_t jetsam_priority;
561 int32_t jetsam_memlimit;
562 int32_t jetsam_seq;
563 int32_t main_thread_priority;
564 uint32_t timeout;
565 uint32_t exit_timeout;
566 uint64_t sent_signal_time;
567 uint64_t start_time;
568 uint32_t min_run_time;
569 uint32_t start_interval;
570 uint32_t peruser_suspend_count; /* The number of jobs that have disabled this per-user launchd. */
571 uuid_t instance_id;
572 uint32_t fail_cnt;
573 #if 0
574 /* someday ... */
575 enum {
576 J_TYPE_ANONYMOUS = 1,
577 J_TYPE_LANCHSERVICES,
578 J_TYPE_MACHINIT,
579 J_TYPE_INETD,
580 } j_type;
581 #endif
582 bool
583 debug :1, /* man launchd.plist --> Debug */
584 ondemand :1, /* man launchd.plist --> KeepAlive == false */
585 session_create :1, /* man launchd.plist --> SessionCreate */
586 low_pri_io :1, /* man launchd.plist --> LowPriorityIO */
587 no_init_groups :1, /* man launchd.plist --> InitGroups */
588 priv_port_has_senders :1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
589 importing_global_env :1, /* a hack during job importing */
590 importing_hard_limits :1, /* a hack during job importing */
591 setmask :1, /* man launchd.plist --> Umask */
592 anonymous :1, /* a process that launchd knows about, but isn't managed by launchd */
593 checkedin :1, /* a legacy mach_init concept to detect sick jobs */
594 legacy_mach_job :1, /* a job created via bootstrap_create_server() */
595 legacy_LS_job :1, /* a job created via spawn_via_launchd() */
596 inetcompat :1, /* a legacy job that wants inetd compatible semantics */
597 inetcompat_wait :1, /* a twist on inetd compatibility */
598 start_pending :1, /* an event fired and the job should start, but not necessarily right away */
599 globargv :1, /* man launchd.plist --> EnableGlobbing */
600 wait4debugger :1, /* man launchd.plist --> WaitForDebugger */
601 wait4debugger_oneshot :1, /* One-shot WaitForDebugger. */
602 internal_exc_handler :1, /* MachExceptionHandler == true */
603 stall_before_exec :1, /* a hack to support an option of spawn_via_launchd() */
604 only_once :1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
605 currently_ignored :1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
606 forced_peers_to_demand_mode :1, /* A job that forced all other jobs to be temporarily launch-on-demand */
607 setnice :1, /* man launchd.plist --> Nice */
608 removal_pending :1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
609 sent_sigkill :1, /* job_kill() was called */
610 debug_before_kill :1, /* enter the kernel debugger before killing a job */
611 weird_bootstrap :1, /* a hack that launchd+launchctl use during jobmgr_t creation */
612 start_on_mount :1, /* man launchd.plist --> StartOnMount */
613 per_user :1, /* This job is a per-user launchd managed by the PID 1 launchd */
614 unload_at_mig_return :1, /* A job thoroughly confused launchd. We need to unload it ASAP */
615 abandon_pg :1, /* man launchd.plist --> AbandonProcessGroup */
616 ignore_pg_at_shutdown :1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
617 poll_for_vfs_changes :1, /* a hack to work around the fact that kqueues don't work on all filesystems */
618 deny_job_creation :1, /* Don't let this job create new 'job_t' objects in launchd */
619 kill_via_shmem :1, /* man launchd.plist --> EnableTransactions */
620 sent_kill_via_shmem :1, /* We need to 'kill_via_shmem' once-and-only-once */
621 clean_kill :1, /* The job was sent SIGKILL because it was clean. */
622 kill_after_sample :1, /* The job is to be killed after sampling. */
623 reap_after_trace :1, /* The job exited before sample did, so we should reap it after sample is done. */
624 nosy :1, /* The job has an OtherJobEnabled KeepAlive criterion. */
625 crashed :1, /* The job is the default Mach exception handler, and it crashed. */
626 reaped :1, /* We've received NOTE_EXIT for the job. */
627 stopped :1, /* job_stop() was called. */
628 jetsam_frontmost :1, /* The job is considered "frontmost" by Jetsam. */
629 needs_kickoff :1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
630 is_bootstrapper :1, /* The job is a bootstrapper. */
631 has_console :1, /* The job owns the console. */
632 embedded_special_privileges :1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
633 did_exec :1, /* The job exec(2)ed successfully. */
634 xpcproxy_did_exec :1, /* The job is an XPC service, and XPC proxy successfully exec(3)ed. */
635 holds_ref :1, /* The (anonymous) job called vprocmgr_switch_to_session(). */
636 jetsam_properties :1, /* The job has Jetsam limits in place. */
637 dedicated_instance :1, /* This job was created as the result of a look up of a service provided by a per-lookup job. */
638 multiple_instances :1, /* The job supports creating additional instances of itself. */
639 former_subjob :1, /* The sub-job was already removed from the parent's list of sub-jobs. */
640 event_monitor :1, /* The job is responsible for monitoring external events for this launchd. */
641 removing :1, /* A lame hack. */
642 disable_aslr :1, /* Disable ASLR when launching this job. */
643 xpc_service :1, /* The job is an XPC Service. */
644 shutdown_monitor :1, /* The job is the Performance team's shutdown monitor. */
645 dirty_at_shutdown :1, /* We should open a transaction for the job when shutdown begins. */
646 workaround9359725 :1; /* The job was sent SIGKILL but did not exit in a timely fashion, indicating a kernel bug. */
647
648 mode_t mask;
649 pid_t tracing_pid;
650 mach_port_t asport;
651 /* Only set for per-user launchd's. */
652 au_asid_t asid;
653 uuid_t expected_audit_uuid;
654 const char label[0];
655 };
656
657 static size_t hash_label(const char *label) __attribute__((pure));
658 static size_t hash_ms(const char *msstr) __attribute__((pure));
659 static SLIST_HEAD(, job_s) s_curious_jobs;
660
661 #define job_assumes(j, e) \
662 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
663
664 static void job_import_keys(launch_data_t obj, const char *key, void *context);
665 static void job_import_bool(job_t j, const char *key, bool value);
666 static void job_import_string(job_t j, const char *key, const char *value);
667 static void job_import_integer(job_t j, const char *key, long long value);
668 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
669 static void job_import_array(job_t j, const char *key, launch_data_t value);
670 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
671 static bool job_set_global_on_demand(job_t j, bool val);
672 static const char *job_active(job_t j);
673 static void job_watch(job_t j);
674 static void job_ignore(job_t j);
675 static void job_cleanup_after_tracer(job_t j);
676 static void job_reap(job_t j);
677 static bool job_useless(job_t j);
678 static bool job_keepalive(job_t j);
679 static void job_dispatch_curious_jobs(job_t j);
680 static void job_start(job_t j);
681 static void job_start_child(job_t j) __attribute__((noreturn));
682 static void job_setup_attributes(job_t j);
683 static bool job_setup_machport(job_t j);
684 static kern_return_t job_setup_exit_port(job_t j);
685 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
686 static void job_postfork_become_user(job_t j);
687 static void job_postfork_test_user(job_t j);
688 static void job_log_pids_with_weird_uids(job_t j);
689 static void job_setup_exception_port(job_t j, task_t target_task);
690 static void job_callback(void *obj, struct kevent *kev);
691 static void job_callback_proc(job_t j, struct kevent *kev);
692 static void job_callback_timer(job_t j, void *ident);
693 static void job_callback_read(job_t j, int ident);
694 static void job_log_stray_pg(job_t j);
695 static void job_log_children_without_exec(job_t j);
696 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
697 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
698 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
699 static job_t job_new_alias(jobmgr_t jm, job_t src);
700 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
701 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
702 static job_t job_new_subjob(job_t j, uuid_t identifier);
703 static void job_kill(job_t j);
704 static void job_uncork_fork(job_t j);
705 static void job_log_stdouterr(job_t j);
706 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
707 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
708 static void job_log_bug(job_t j, unsigned int line);
709 static void job_log_stdouterr2(job_t j, const char *msg, ...);
710 static void job_set_exception_port(job_t j, mach_port_t port);
711 static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
712 static void job_open_shutdown_transaction(job_t ji);
713 static void job_close_shutdown_transaction(job_t ji);
714
715 static const struct {
716 const char *key;
717 int val;
718 } launchd_keys2limits[] = {
719 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
720 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
721 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
722 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
723 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
724 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
725 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
726 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
727 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
728 };
729
730 static time_t cronemu(int mon, int mday, int hour, int min);
731 static time_t cronemu_wday(int wday, int hour, int min);
732 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
733 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
734 static bool cronemu_hour(struct tm *wtm, int hour, int min);
735 static bool cronemu_min(struct tm *wtm, int min);
736
737 /* These functions are a total nightmare to get to through headers.
738 * See rdar://problem/8223092.
739 */
740 typedef __darwin_mach_port_t fileport_t;
741 #define FILEPORT_NULL ((fileport_t)0)
742 extern int fileport_makeport(int, fileport_t *);
743 extern int fileport_makefd(fileport_t);
744
745 /* miscellaneous file local functions */
746 static size_t get_kern_max_proc(void);
747 static int dir_has_files(job_t j, const char *path);
748 static char **mach_cmd2argv(const char *string);
749 static size_t our_strhash(const char *s) __attribute__((pure));
750 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
751
752 void eliminate_double_reboot(void);
753
754 /* file local globals */
755 static size_t total_children;
756 static size_t total_anon_children;
757 static mach_port_t the_exception_server;
758 static job_t workaround_5477111;
759 static LIST_HEAD(, job_s) s_needing_sessions;
760 static LIST_HEAD(, eventsystem) _s_event_systems;
761 static job_t _s_event_monitor;
762 static job_t _s_shutdown_monitor;
763 static mach_port_t _s_event_update_port;
764 mach_port_t g_audit_session_port = MACH_PORT_NULL;
765 static uint32_t s_jetsam_sequence_id;
766
767 #if !TARGET_OS_EMBEDDED
768 static job_t s_embedded_privileged_job = (job_t)&root_jobmgr;
769 au_asid_t g_audit_session = AU_DEFAUDITSID;
770 #else
771 static job_t s_embedded_privileged_job = NULL;
772 pid_t g_audit_session = 0;
773 #endif
774
775 static int s_no_hang_fd = -1;
776
777 /* process wide globals */
778 mach_port_t inherited_bootstrap_port;
779 jobmgr_t root_jobmgr;
780 bool g_shutdown_debugging = false;
781 bool g_verbose_boot = false;
782 bool g_embedded_privileged_action = false;
783 bool g_runtime_busy_time = false;
784
785 void
786 job_ignore(job_t j)
787 {
788 struct semaphoreitem *si;
789 struct socketgroup *sg;
790 struct machservice *ms;
791
792 if (j->currently_ignored) {
793 return;
794 }
795
796 job_log(j, LOG_DEBUG, "Ignoring...");
797
798 j->currently_ignored = true;
799
800 if (j->poll_for_vfs_changes) {
801 j->poll_for_vfs_changes = false;
802 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
803 }
804
805 SLIST_FOREACH(sg, &j->sockets, sle) {
806 socketgroup_ignore(j, sg);
807 }
808
809 SLIST_FOREACH(ms, &j->machservices, sle) {
810 machservice_ignore(j, ms);
811 }
812
813 SLIST_FOREACH(si, &j->semaphores, sle) {
814 semaphoreitem_ignore(j, si);
815 }
816 }
817
818 void
819 job_watch(job_t j)
820 {
821 struct semaphoreitem *si;
822 struct socketgroup *sg;
823 struct machservice *ms;
824
825 if (!j->currently_ignored) {
826 return;
827 }
828
829 job_log(j, LOG_DEBUG, "Watching...");
830
831 j->currently_ignored = false;
832
833 SLIST_FOREACH(sg, &j->sockets, sle) {
834 socketgroup_watch(j, sg);
835 }
836
837 SLIST_FOREACH(ms, &j->machservices, sle) {
838 machservice_watch(j, ms);
839 }
840
841 SLIST_FOREACH(si, &j->semaphores, sle) {
842 semaphoreitem_watch(j, si);
843 }
844 }
845
846 void
847 job_stop(job_t j)
848 {
849 char extralog[100];
850 int32_t newval = 1;
851
852 if (unlikely(!j->p || j->stopped || j->anonymous)) {
853 return;
854 }
855
856 #if TARGET_OS_EMBEDDED
857 if (g_embedded_privileged_action && s_embedded_privileged_job) {
858 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
859 errno = EPERM;
860 return;
861 }
862
863 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
864 errno = EPERM;
865 return;
866 }
867 } else if (g_embedded_privileged_action) {
868 errno = EINVAL;
869 return;
870 }
871 #endif
872
873 if (j->kill_via_shmem) {
874 if (j->shmem) {
875 if (!j->sent_kill_via_shmem) {
876 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
877 newval = __sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
878 j->sent_kill_via_shmem = true;
879 } else {
880 newval = j->shmem->vp_shmem_transaction_cnt;
881 }
882 } else {
883 newval = -1;
884 }
885 }
886
887 j->sent_signal_time = runtime_get_opaque_time();
888
889 if (newval < 0) {
890 j->clean_kill = true;
891 job_kill(j);
892 } else {
893 (void)job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
894
895 if (j->exit_timeout) {
896 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j) != -1);
897 } else {
898 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
899 }
900
901 if (j->kill_via_shmem) {
902 snprintf(extralog, sizeof(extralog), ": %d remaining transactions", newval + 1);
903 } else {
904 extralog[0] = '\0';
905 }
906
907 job_log(j, LOG_DEBUG, "Sent SIGTERM signal%s", extralog);
908 }
909
910 j->stopped = true;
911 }
912
913 launch_data_t
914 job_export(job_t j)
915 {
916 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
917
918 if (r == NULL) {
919 return NULL;
920 }
921
922 if ((tmp = launch_data_new_string(j->label))) {
923 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
924 }
925 if ((tmp = launch_data_new_string(j->mgr->name))) {
926 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
927 }
928 if ((tmp = launch_data_new_bool(j->ondemand))) {
929 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
930 }
931 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
932 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
933 }
934 if (j->p && (tmp = launch_data_new_integer(j->p))) {
935 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
936 }
937 if ((tmp = launch_data_new_integer(j->timeout))) {
938 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
939 }
940 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
941 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
942 }
943 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
944 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
945 }
946 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
947 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
948 }
949 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
950 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
951 }
952 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
953 size_t i;
954
955 for (i = 0; i < j->argc; i++) {
956 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
957 launch_data_array_set_index(tmp, tmp2, i);
958 }
959 }
960
961 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
962 }
963
964 if (j->kill_via_shmem && (tmp = launch_data_new_bool(true))) {
965 int32_t tmp_cnt = -1;
966
967 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
968
969 if (j->shmem) {
970 tmp_cnt = j->shmem->vp_shmem_transaction_cnt;
971 }
972
973 if (j->sent_kill_via_shmem) {
974 tmp_cnt++;
975 }
976
977 if ((tmp = launch_data_new_integer(tmp_cnt))) {
978 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TRANSACTIONCOUNT);
979 }
980 }
981
982 if (j->session_create && (tmp = launch_data_new_bool(true))) {
983 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
984 }
985
986 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
987 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
988 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
989 }
990 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
991 }
992
993 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
994 struct socketgroup *sg;
995 int i;
996
997 SLIST_FOREACH(sg, &j->sockets, sle) {
998 if (sg->junkfds) {
999 continue;
1000 }
1001 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1002 for (i = 0; i < sg->fd_cnt; i++) {
1003 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1004 launch_data_array_set_index(tmp2, tmp3, i);
1005 }
1006 }
1007 launch_data_dict_insert(tmp, tmp2, sg->name);
1008 }
1009 }
1010
1011 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1012 }
1013
1014 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1015 struct machservice *ms;
1016
1017 tmp3 = NULL;
1018
1019 SLIST_FOREACH(ms, &j->machservices, sle) {
1020 if (ms->per_pid) {
1021 if (tmp3 == NULL) {
1022 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1023 }
1024 if (tmp3) {
1025 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1026 launch_data_dict_insert(tmp3, tmp2, ms->name);
1027 }
1028 } else {
1029 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1030 launch_data_dict_insert(tmp, tmp2, ms->name);
1031 }
1032 }
1033
1034 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1035
1036 if (tmp3) {
1037 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1038 }
1039 }
1040
1041 return r;
1042 }
1043
1044 static void
1045 jobmgr_log_active_jobs(jobmgr_t jm)
1046 {
1047 const char *why_active;
1048 jobmgr_t jmi;
1049 job_t ji;
1050
1051 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1052 jobmgr_log_active_jobs(jmi);
1053 }
1054
1055 LIST_FOREACH(ji, &jm->jobs, sle) {
1056 if ((why_active = job_active(ji))) {
1057 if (ji->p != 1) {
1058 job_log(ji, LOG_DEBUG | LOG_CONSOLE, "%s", why_active);
1059 }
1060 }
1061 }
1062 }
1063
1064 static void
1065 jobmgr_still_alive_with_check(jobmgr_t jm)
1066 {
1067 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1068 jobmgr_log_active_jobs(jm);
1069 }
1070
1071 jobmgr_t
1072 jobmgr_shutdown(jobmgr_t jm)
1073 {
1074 jobmgr_t jmi, jmn;
1075 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1076
1077 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1078
1079 struct tm curtime;
1080 (void)localtime_r(&jm->shutdown_time, &curtime);
1081
1082 char date[26];
1083 (void)asctime_r(&curtime, date);
1084 /* Trim the new line that asctime_r(3) puts there for some reason. */
1085 date[24] = 0;
1086
1087 if (jm == root_jobmgr && pid1_magic) {
1088 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1089 } else {
1090 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1091 }
1092
1093 jm->shutting_down = true;
1094
1095 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1096 jobmgr_shutdown(jmi);
1097 }
1098
1099 if (jm->parentmgr == NULL && pid1_magic) {
1100 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1101
1102 /* Spawn the shutdown monitor. */
1103 if (_s_shutdown_monitor && !_s_shutdown_monitor->p) {
1104 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1105 job_dispatch(_s_shutdown_monitor, true);
1106 }
1107 }
1108
1109 return jobmgr_do_garbage_collection(jm);
1110 }
1111
1112 void
1113 jobmgr_remove(jobmgr_t jm)
1114 {
1115 jobmgr_t jmi;
1116 job_t ji;
1117
1118 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1119 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
1120 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1121 jobmgr_remove(jmi);
1122 }
1123 }
1124
1125 while ((ji = LIST_FIRST(&jm->jobs))) {
1126 if (!ji->anonymous && !job_assumes(ji, ji->p == 0)) {
1127 ji->p = 0;
1128 }
1129 job_remove(ji);
1130 }
1131
1132 if (jm->req_port) {
1133 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
1134 }
1135 if (jm->jm_port) {
1136 (void)jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
1137 }
1138
1139 if (jm->req_bsport) {
1140 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_bsport) == KERN_SUCCESS);
1141 }
1142 if (jm->req_excport) {
1143 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_excport) == KERN_SUCCESS);
1144 }
1145 if (jm->req_asport) {
1146 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_asport) == KERN_SUCCESS);
1147 }
1148 #if !TARGET_OS_EMBEDDED
1149 if (jm->req_rport) {
1150 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1151 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1152 /* If the originator went away, the reply port will be a dead name,
1153 * and we expect this to fail.
1154 */
1155 errno = kr;
1156 (void)jobmgr_assumes(jm, kr == KERN_SUCCESS);
1157 }
1158 }
1159 #endif /* !TARGET_OS_EMBEDDED */
1160 if (jm->req_ctx) {
1161 (void)jobmgr_assumes(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz) == KERN_SUCCESS);
1162 }
1163
1164 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1165 struct tm curtime;
1166 (void)localtime_r(&ts, &curtime);
1167
1168 char date[26];
1169 (void)asctime_r(&curtime, date);
1170 date[24] = 0;
1171
1172 time_t delta = ts - jm->shutdown_time;
1173 if (jm == root_jobmgr && pid1_magic) {
1174 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1175 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1176 } else {
1177 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1178 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1179 }
1180
1181 if (jm->parentmgr) {
1182 runtime_del_weak_ref();
1183 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1184 } else if (pid1_magic) {
1185 eliminate_double_reboot();
1186 launchd_log_vm_stats();
1187 jobmgr_log_stray_children(jm, true);
1188 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1189 runtime_closelog();
1190 (void)jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
1191 } else {
1192 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1193 runtime_closelog();
1194 exit(EXIT_SUCCESS);
1195 }
1196
1197 free(jm);
1198 }
1199
1200 void
1201 job_remove(job_t j)
1202 {
1203 struct waiting_for_removal *w4r;
1204 struct calendarinterval *ci;
1205 struct semaphoreitem *si;
1206 struct socketgroup *sg;
1207 struct machservice *ms;
1208 struct limititem *li;
1209 struct envitem *ei;
1210
1211 if (j->alias) {
1212 /* HACK: Egregious code duplication. But as with machservice_delete(),
1213 * job aliases can't (and shouldn't) have any complex behaviors
1214 * associated with them.
1215 */
1216 while ((ms = SLIST_FIRST(&j->machservices))) {
1217 machservice_delete(j, ms, false);
1218 }
1219
1220 LIST_REMOVE(j, sle);
1221 LIST_REMOVE(j, label_hash_sle);
1222 free(j);
1223 return;
1224 }
1225
1226 #if TARGET_OS_EMBEDDED
1227 if (g_embedded_privileged_action && s_embedded_privileged_job) {
1228 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
1229 errno = EPERM;
1230 return;
1231 }
1232
1233 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
1234 errno = EPERM;
1235 return;
1236 }
1237 } else if (g_embedded_privileged_action) {
1238 errno = EINVAL;
1239 return;
1240 }
1241 #endif
1242
1243 /* Do this BEFORE we check and see whether the job is still active. If we're a
1244 * sub-job, we're being removed due to the parent job removing us. Therefore, the
1245 * parent job will free itself after this call completes. So if we defer removing
1246 * ourselves from the parent's list, we'll crash when we finally get around to it.
1247 */
1248 if (j->dedicated_instance && !j->former_subjob) {
1249 LIST_REMOVE(j, subjob_sle);
1250 j->former_subjob = true;
1251 }
1252
1253 if (unlikely(j->p)) {
1254 if (j->anonymous) {
1255 job_reap(j);
1256 } else {
1257 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1258
1259 if (!j->removal_pending) {
1260 j->removal_pending = true;
1261 job_stop(j);
1262 }
1263
1264 return;
1265 }
1266 }
1267
1268 if (!j->removing) {
1269 j->removing = true;
1270 job_dispatch_curious_jobs(j);
1271 }
1272
1273 ipc_close_all_with_job(j);
1274
1275 job_log(j, LOG_INFO, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1276 j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec,
1277 j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec,
1278 j->ru.ru_maxrss, j->ru.ru_ixrss, j->ru.ru_idrss, j->ru.ru_isrss,
1279 j->ru.ru_minflt, j->ru.ru_majflt,
1280 j->ru.ru_nswap, j->ru.ru_inblock, j->ru.ru_oublock,
1281 j->ru.ru_msgsnd, j->ru.ru_msgrcv,
1282 j->ru.ru_nsignals, j->ru.ru_nvcsw, j->ru.ru_nivcsw);
1283
1284 if (j->forced_peers_to_demand_mode) {
1285 job_set_global_on_demand(j, false);
1286 }
1287
1288 if (!job_assumes(j, j->fork_fd == 0)) {
1289 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
1290 }
1291
1292 if (j->stdin_fd) {
1293 (void)job_assumes(j, runtime_close(j->stdin_fd) != -1);
1294 }
1295
1296 if (!job_assumes(j, j->log_redirect_fd == 0)) {
1297 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
1298 }
1299
1300 if (j->j_port) {
1301 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1302 }
1303
1304 while ((sg = SLIST_FIRST(&j->sockets))) {
1305 socketgroup_delete(j, sg);
1306 }
1307 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1308 calendarinterval_delete(j, ci);
1309 }
1310 while ((ei = SLIST_FIRST(&j->env))) {
1311 envitem_delete(j, ei, false);
1312 }
1313 while ((ei = SLIST_FIRST(&j->global_env))) {
1314 envitem_delete(j, ei, true);
1315 }
1316 while ((li = SLIST_FIRST(&j->limits))) {
1317 limititem_delete(j, li);
1318 }
1319 while ((ms = SLIST_FIRST(&j->machservices))) {
1320 machservice_delete(j, ms, false);
1321 }
1322 while ((si = SLIST_FIRST(&j->semaphores))) {
1323 semaphoreitem_delete(j, si);
1324 }
1325 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1326 waiting4removal_delete(j, w4r);
1327 }
1328
1329 struct externalevent *eei = NULL;
1330 while ((eei = LIST_FIRST(&j->events))) {
1331 eventsystem_ping();
1332 externalevent_delete(eei);
1333 }
1334
1335 #if 0
1336 /* Event systems exist independently of an actual monitor job. They're
1337 * created on-demand when a job has a LaunchEvents dictionary. So we
1338 * really don't need to get rid of them.
1339 */
1340 if (j->event_monitor) {
1341 struct eventsystem *esi = NULL;
1342 while ((esi = LIST_FIRST(&_s_event_systems))) {
1343 eventsystem_delete(esi);
1344 }
1345 }
1346 #else
1347 if (false) {
1348 /* Make gcc happy. */
1349 eventsystem_delete(NULL);
1350 }
1351 if (j->event_monitor) {
1352 if (_s_event_update_port != MACH_PORT_NULL) {
1353 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
1354 _s_event_update_port = MACH_PORT_NULL;
1355 }
1356 _s_event_monitor = NULL;
1357 }
1358 #endif
1359
1360 if (j->prog) {
1361 free(j->prog);
1362 }
1363 if (j->argv) {
1364 free(j->argv);
1365 }
1366 if (j->rootdir) {
1367 free(j->rootdir);
1368 }
1369 if (j->workingdir) {
1370 free(j->workingdir);
1371 }
1372 if (j->username) {
1373 free(j->username);
1374 }
1375 if (j->groupname) {
1376 free(j->groupname);
1377 }
1378 if (j->stdinpath) {
1379 free(j->stdinpath);
1380 }
1381 if (j->stdoutpath) {
1382 free(j->stdoutpath);
1383 }
1384 if (j->stderrpath) {
1385 free(j->stderrpath);
1386 }
1387 if (j->alt_exc_handler) {
1388 free(j->alt_exc_handler);
1389 }
1390 #if HAVE_SANDBOX
1391 if (j->seatbelt_profile) {
1392 free(j->seatbelt_profile);
1393 }
1394 #endif
1395 #if HAVE_QUARANTINE
1396 if (j->quarantine_data) {
1397 free(j->quarantine_data);
1398 }
1399 #endif
1400 if (j->j_binpref) {
1401 free(j->j_binpref);
1402 }
1403 if (j->start_interval) {
1404 runtime_del_weak_ref();
1405 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
1406 }
1407 if (j->poll_for_vfs_changes) {
1408 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
1409 }
1410 if (j->exit_timeout) {
1411 /* Not a big deal if this fails. It means that the timer's already been freed. */
1412 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1413 }
1414 if (j->jetsam_properties) {
1415 LIST_REMOVE(j, jetsam_sle);
1416 j->mgr->jetsam_jobs_cnt--;
1417 }
1418 if (j->asport != MACH_PORT_NULL) {
1419 (void)job_assumes(j, launchd_mport_deallocate(j->asport) == KERN_SUCCESS);
1420 }
1421 if (!uuid_is_null(j->expected_audit_uuid)) {
1422 LIST_REMOVE(j, needing_session_sle);
1423 }
1424 if (j->embedded_special_privileges) {
1425 s_embedded_privileged_job = NULL;
1426 }
1427 if (j->shutdown_monitor) {
1428 _s_shutdown_monitor = NULL;
1429 }
1430
1431 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1432
1433 LIST_REMOVE(j, sle);
1434 LIST_REMOVE(j, label_hash_sle);
1435
1436 job_t ji = NULL;
1437 job_t jit = NULL;
1438 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1439 job_remove(ji);
1440 }
1441
1442 job_log(j, LOG_DEBUG, "Removed");
1443
1444 j->kqjob_callback = (kq_callback)0x8badf00d;
1445 free(j);
1446 }
1447
1448 void
1449 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1450 {
1451 launch_data_t tmp_oai;
1452 job_t j = context;
1453 size_t i, fd_cnt = 1;
1454 int *fds;
1455
1456 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1457 fd_cnt = launch_data_array_get_count(obj);
1458 }
1459
1460 fds = alloca(fd_cnt * sizeof(int));
1461
1462 for (i = 0; i < fd_cnt; i++) {
1463 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1464 tmp_oai = launch_data_array_get_index(obj, i);
1465 } else {
1466 tmp_oai = obj;
1467 }
1468
1469 fds[i] = launch_data_get_fd(tmp_oai);
1470 }
1471
1472 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
1473
1474 ipc_revoke_fds(obj);
1475 }
1476
1477 bool
1478 job_set_global_on_demand(job_t j, bool val)
1479 {
1480 if (j->forced_peers_to_demand_mode && val) {
1481 return false;
1482 } else if (!j->forced_peers_to_demand_mode && !val) {
1483 return false;
1484 }
1485
1486 if ((j->forced_peers_to_demand_mode = val)) {
1487 j->mgr->global_on_demand_cnt++;
1488 } else {
1489 j->mgr->global_on_demand_cnt--;
1490 }
1491
1492 if (j->mgr->global_on_demand_cnt == 0) {
1493 jobmgr_dispatch_all(j->mgr, false);
1494 }
1495
1496 return true;
1497 }
1498
1499 bool
1500 job_setup_machport(job_t j)
1501 {
1502 mach_msg_size_t mxmsgsz;
1503
1504 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
1505 goto out_bad;
1506 }
1507
1508 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
1509 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
1510 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
1511 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
1512 }
1513
1514 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
1515 goto out_bad2;
1516 }
1517
1518 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
1519 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1520 goto out_bad;
1521 }
1522
1523 return true;
1524 out_bad2:
1525 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1526 out_bad:
1527 return false;
1528 }
1529
1530 kern_return_t
1531 job_setup_exit_port(job_t j)
1532 {
1533 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1534 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1535 return MACH_PORT_NULL;
1536 }
1537
1538 struct mach_port_limits limits = {
1539 .mpl_qlimit = 1,
1540 };
1541 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1542 (void)job_assumes(j, kr == KERN_SUCCESS);
1543
1544 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1545 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1546 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
1547 j->exit_status_port = MACH_PORT_NULL;
1548 }
1549
1550 return kr;
1551 }
1552
1553 job_t
1554 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1555 {
1556 const char **argv = (const char **)mach_cmd2argv(cmd);
1557 job_t jr = NULL;
1558
1559 if (!job_assumes(j, argv != NULL)) {
1560 goto out_bad;
1561 }
1562
1563 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1564
1565 free(argv);
1566
1567 /* jobs can easily be denied creation during shutdown */
1568 if (unlikely(jr == NULL)) {
1569 goto out_bad;
1570 }
1571
1572 jr->mach_uid = uid;
1573 jr->ondemand = ond;
1574 jr->legacy_mach_job = true;
1575 jr->abandon_pg = true;
1576 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
1577
1578 if (!job_setup_machport(jr)) {
1579 goto out_bad;
1580 }
1581
1582 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1583
1584 return jr;
1585
1586 out_bad:
1587 if (jr) {
1588 job_remove(jr);
1589 }
1590 return NULL;
1591 }
1592
1593 job_t
1594 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1595 {
1596 struct proc_bsdshortinfo proc;
1597 bool shutdown_state;
1598 job_t jp = NULL, jr = NULL;
1599 uid_t kp_euid, kp_uid, kp_svuid;
1600 gid_t kp_egid, kp_gid, kp_svgid;
1601
1602 if (!jobmgr_assumes(jm, anonpid != 0)) {
1603 errno = EINVAL;
1604 return NULL;
1605 }
1606
1607 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1608 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1609 errno = EINVAL;
1610 return NULL;
1611 }
1612
1613 /* libproc returns the number of bytes written into the buffer upon success,
1614 * zero on failure.
1615 */
1616 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1617 if (errno != ESRCH) {
1618 (void)jobmgr_assumes(jm, errno == 0);
1619 }
1620 return NULL;
1621 }
1622
1623 if (!jobmgr_assumes(jm, proc.pbsi_comm[0] != '\0')) {
1624 errno = EINVAL;
1625 return NULL;
1626 }
1627
1628 if (unlikely(proc.pbsi_status == SZOMB)) {
1629 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1630 }
1631
1632 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1633 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1634 }
1635
1636 kp_euid = proc.pbsi_uid;
1637 kp_uid = proc.pbsi_ruid;
1638 kp_svuid = proc.pbsi_svuid;
1639 kp_egid = proc.pbsi_gid;
1640 kp_gid = proc.pbsi_rgid;
1641 kp_svgid = proc.pbsi_svgid;
1642
1643 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1644 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1645 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1646 }
1647
1648 /* "Fix" for a problem that shouldn't even exist.
1649 * See rdar://problem/7264615 for the symptom and rdar://problem/5020256
1650 * as to why this can happen.
1651 */
1652 if (!jobmgr_assumes(jm, (pid_t)proc.pbsi_ppid != anonpid)) {
1653 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). It should find a different way to do whatever it's doing. Setting PPID to 0: %s", proc.pbsi_comm);
1654 errno = EINVAL;
1655 return NULL;
1656 }
1657
1658 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1659 if (unlikely(shutdown_state = jm->shutting_down)) {
1660 jm->shutting_down = false;
1661 }
1662
1663 /* We only set requestor_pid for XPC domains. */
1664 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1665 if (jobmgr_assumes(jm, (jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL)) != NULL)) {
1666 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1667
1668 total_anon_children++;
1669 jr->anonymous = true;
1670 jr->p = anonpid;
1671
1672 /* anonymous process reaping is messy */
1673 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1674
1675 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1) && job_assumes(jr, errno == ESRCH)) {
1676 /* zombies are weird */
1677 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1678 jr->unload_at_mig_return = true;
1679 }
1680
1681 if (unlikely(shutdown_state)) {
1682 job_log(jr, LOG_SCOLDING, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1683 }
1684
1685 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1686 }
1687
1688 if (unlikely(shutdown_state)) {
1689 jm->shutting_down = true;
1690 }
1691
1692 /* This is down here to mitigate the effects of rdar://problem/7264615, in which a process
1693 * attaches to its own parent. We need to make sure that the anonymous job has been added
1694 * to the process list so that, if it's used ptrace(3) to cause a cycle in the process
1695 * tree (thereby making it not a tree anymore), we'll find the tracing parent PID of the
1696 * parent process, which is the child, when we go looking for it in jobmgr_find_by_pid().
1697 */
1698 switch (proc.pbsi_ppid) {
1699 case 0:
1700 /* the kernel */
1701 break;
1702 case 1:
1703 if (!pid1_magic) {
1704 /* we cannot possibly find a parent job_t that is useful in this function */
1705 break;
1706 }
1707 /* fall through */
1708 default:
1709 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1710 if (jobmgr_assumes(jm, jp != NULL)) {
1711 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1712 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1713 }
1714 }
1715 break;
1716 }
1717
1718 return jr;
1719 }
1720
1721 job_t
1722 job_new_subjob(job_t j, uuid_t identifier)
1723 {
1724 char label[0];
1725 uuid_string_t idstr;
1726 uuid_unparse(identifier, idstr);
1727 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1728
1729 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1730 if (launchd_assumes(nj != NULL)) {
1731 nj->kqjob_callback = job_callback;
1732 nj->mgr = j->mgr;
1733 nj->min_run_time = j->min_run_time;
1734 nj->timeout = j->timeout;
1735 nj->exit_timeout = j->exit_timeout;
1736
1737 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1738
1739 /* Set all our simple Booleans that are applicable. */
1740 nj->debug = j->debug;
1741 nj->ondemand = j->ondemand;
1742 nj->checkedin = true;
1743 nj->low_pri_io = j->low_pri_io;
1744 nj->setmask = j->setmask;
1745 nj->wait4debugger = j->wait4debugger;
1746 nj->internal_exc_handler = j->internal_exc_handler;
1747 nj->setnice = j->setnice;
1748 nj->abandon_pg = j->abandon_pg;
1749 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1750 nj->deny_job_creation = j->deny_job_creation;
1751 nj->kill_via_shmem = j->kill_via_shmem;
1752 nj->needs_kickoff = j->needs_kickoff;
1753 nj->currently_ignored = true;
1754 nj->dedicated_instance = true;
1755 nj->xpc_service = j->xpc_service;
1756
1757 nj->mask = j->mask;
1758 uuid_copy(nj->instance_id, identifier);
1759
1760 /* These jobs are purely on-demand Mach jobs. */
1761
1762 /* {Hard | Soft}ResourceLimits are not supported. */
1763
1764 struct machservice *msi = NULL;
1765 SLIST_FOREACH(msi, &j->machservices, sle) {
1766 /* Only copy MachServices that were actually declared in the plist.
1767 * So skip over per-PID ones and ones that were created via
1768 * bootstrap_register().
1769 */
1770 if (msi->upfront) {
1771 mach_port_t mp = MACH_PORT_NULL;
1772 struct machservice *msj = machservice_new(nj, msi->name, &mp, msi->per_pid);
1773 if (job_assumes(nj, msj != NULL)) {
1774 msj->reset = msi->reset;
1775 msj->delete_on_destruction = msi->delete_on_destruction;
1776 msj->drain_one_on_crash = msi->drain_one_on_crash;
1777 msj->drain_all_on_crash = msi->drain_all_on_crash;
1778 }
1779 }
1780 }
1781
1782 if (j->prog) {
1783 nj->prog = strdup(j->prog);
1784 }
1785 if (j->argv) {
1786 size_t sz = malloc_size(j->argv);
1787 nj->argv = (char **)malloc(sz);
1788 if (job_assumes(nj, nj->argv != NULL)) {
1789 /* This is the start of our strings. */
1790 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1791
1792 size_t i = 0;
1793 for (i = 0; i < j->argc; i++) {
1794 (void)strcpy(p, j->argv[i]);
1795 nj->argv[i] = p;
1796 p += (strlen(j->argv[i]) + 1);
1797 }
1798 nj->argv[i] = NULL;
1799 }
1800
1801 nj->argc = j->argc;
1802 }
1803
1804 /* We ignore global environment variables. */
1805 struct envitem *ei = NULL;
1806 SLIST_FOREACH(ei, &j->env, sle) {
1807 (void)job_assumes(nj, envitem_new(nj, ei->key, ei->value, false, false));
1808 }
1809 uuid_string_t val;
1810 uuid_unparse(identifier, val);
1811 (void)job_assumes(nj, envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false, false));
1812
1813 if (j->rootdir) {
1814 nj->rootdir = strdup(j->rootdir);
1815 }
1816 if (j->workingdir) {
1817 nj->workingdir = strdup(j->workingdir);
1818 }
1819 if (j->username) {
1820 nj->username = strdup(j->username);
1821 }
1822 if (j->groupname) {
1823 nj->groupname = strdup(j->groupname);
1824 }
1825 /* FIXME: We shouldn't redirect all the output from these jobs to the same
1826 * file. We should uniquify the file names.
1827 */
1828 if (j->stdinpath) {
1829 nj->stdinpath = strdup(j->stdinpath);
1830 }
1831 if (j->stdoutpath) {
1832 nj->stdoutpath = strdup(j->stdinpath);
1833 }
1834 if (j->stderrpath) {
1835 nj->stderrpath = strdup(j->stderrpath);
1836 }
1837 if (j->alt_exc_handler) {
1838 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1839 }
1840 #if HAVE_SANDBOX
1841 if (j->seatbelt_profile) {
1842 nj->seatbelt_profile = strdup(j->seatbelt_profile);
1843 }
1844 #endif
1845
1846 #if HAVE_QUARANTINE
1847 if (j->quarantine_data) {
1848 nj->quarantine_data = strdup(j->quarantine_data);
1849 }
1850 nj->quarantine_data_sz = j->quarantine_data_sz;
1851 #endif
1852 if (j->j_binpref) {
1853 size_t sz = malloc_size(j->j_binpref);
1854 nj->j_binpref = (cpu_type_t *)malloc(sz);
1855 if (job_assumes(nj, nj->j_binpref)) {
1856 memcpy(&nj->j_binpref, &j->j_binpref, sz);
1857 }
1858 }
1859
1860 /* JetsamPriority is unsupported. */
1861
1862 if (j->asport != MACH_PORT_NULL) {
1863 (void)job_assumes(nj, launchd_mport_copy_send(j->asport) == KERN_SUCCESS);
1864 nj->asport = j->asport;
1865 }
1866
1867 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
1868
1869 jobmgr_t where2put = root_jobmgr;
1870 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1871 where2put = j->mgr;
1872 }
1873 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
1874 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
1875 }
1876
1877 return nj;
1878 }
1879
1880 job_t
1881 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1882 {
1883 const char *const *argv_tmp = argv;
1884 char tmp_path[PATH_MAX];
1885 char auto_label[1000];
1886 const char *bn = NULL;
1887 char *co;
1888 size_t minlabel_len;
1889 size_t i, cc = 0;
1890 job_t j;
1891
1892 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1893
1894 if (unlikely(jm->shutting_down)) {
1895 errno = EINVAL;
1896 return NULL;
1897 }
1898
1899 if (unlikely(prog == NULL && argv == NULL)) {
1900 errno = EINVAL;
1901 return NULL;
1902 }
1903
1904 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
1905 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1906 if (prog) {
1907 bn = prog;
1908 } else {
1909 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1910 bn = basename(tmp_path); /* prog for auto labels is kp.kp_kproc.p_comm */
1911 }
1912 snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
1913 label = auto_label;
1914 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1915 minlabel_len = strlen(label) + MAXCOMLEN;
1916 } else {
1917 if (label == AUTO_PICK_XPC_LABEL) {
1918 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
1919 } else {
1920 minlabel_len = strlen(label);
1921 }
1922 }
1923
1924 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1925
1926 if (!jobmgr_assumes(jm, j != NULL)) {
1927 return NULL;
1928 }
1929
1930 if (unlikely(label == auto_label)) {
1931 snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
1932 } else {
1933 strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
1934 }
1935 j->kqjob_callback = job_callback;
1936 j->mgr = jm;
1937 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1938 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1939 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1940 j->currently_ignored = true;
1941 j->ondemand = true;
1942 j->checkedin = true;
1943 j->jetsam_priority = -1;
1944 j->jetsam_memlimit = -1;
1945 j->jetsam_seq = -1;
1946 uuid_clear(j->expected_audit_uuid);
1947
1948 if (prog) {
1949 j->prog = strdup(prog);
1950 if (!job_assumes(j, j->prog != NULL)) {
1951 goto out_bad;
1952 }
1953 }
1954
1955 if (likely(argv)) {
1956 while (*argv_tmp++) {
1957 j->argc++;
1958 }
1959
1960 for (i = 0; i < j->argc; i++) {
1961 cc += strlen(argv[i]) + 1;
1962 }
1963
1964 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1965
1966 if (!job_assumes(j, j->argv != NULL)) {
1967 goto out_bad;
1968 }
1969
1970 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1971
1972 for (i = 0; i < j->argc; i++) {
1973 j->argv[i] = co;
1974 strcpy(co, argv[i]);
1975 co += strlen(argv[i]) + 1;
1976 }
1977 j->argv[i] = NULL;
1978 }
1979
1980 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
1981 j->has_console = true;
1982 }
1983
1984 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1985
1986 jobmgr_t where2put_label = root_jobmgr;
1987 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1988 where2put_label = j->mgr;
1989 }
1990 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
1991 uuid_clear(j->expected_audit_uuid);
1992
1993 job_log(j, LOG_DEBUG, "Conceived");
1994
1995 return j;
1996
1997 out_bad:
1998 if (j->prog) {
1999 free(j->prog);
2000 }
2001 free(j);
2002
2003 return NULL;
2004 }
2005
2006 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
2007 job_t
2008 job_new_alias(jobmgr_t jm, job_t src)
2009 {
2010 job_t j = NULL;
2011 if (job_find(jm, src->label)) {
2012 errno = EEXIST;
2013 } else {
2014 j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2015 if (jobmgr_assumes(jm, j != NULL)) {
2016 strcpy((char *)j->label, src->label);
2017 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2018 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2019 /* Bad jump address. The kqueue callback for aliases should never be
2020 * invoked.
2021 */
2022 j->kqjob_callback = (kq_callback)0xfa1afe1;
2023 j->alias = src;
2024 j->mgr = jm;
2025
2026 struct machservice *msi = NULL;
2027 SLIST_FOREACH(msi, &src->machservices, sle) {
2028 if (!machservice_new_alias(j, msi)) {
2029 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2030 errno = EINVAL;
2031 job_remove(j);
2032 j = NULL;
2033 break;
2034 }
2035 }
2036 }
2037
2038 if (j) {
2039 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2040 }
2041 }
2042
2043 return j;
2044 }
2045 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
2046
2047 job_t
2048 job_import(launch_data_t pload)
2049 {
2050 job_t j = jobmgr_import2(root_jobmgr, pload);
2051
2052 if (unlikely(j == NULL)) {
2053 return NULL;
2054 }
2055
2056 /* Since jobs are effectively stalled until they get security sessions assigned
2057 * to them, we may wish to reconsider this behavior of calling the job "enabled"
2058 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
2059 */
2060 job_dispatch_curious_jobs(j);
2061 return job_dispatch(j, false);
2062 }
2063
2064 launch_data_t
2065 job_import_bulk(launch_data_t pload)
2066 {
2067 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2068 job_t *ja;
2069 size_t i, c = launch_data_array_get_count(pload);
2070
2071 ja = alloca(c * sizeof(job_t));
2072
2073 for (i = 0; i < c; i++) {
2074 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2075 errno = 0;
2076 }
2077 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2078 }
2079
2080 for (i = 0; i < c; i++) {
2081 if (likely(ja[i])) {
2082 job_dispatch_curious_jobs(ja[i]);
2083 job_dispatch(ja[i], false);
2084 }
2085 }
2086
2087 return resp;
2088 }
2089
2090 void
2091 job_import_bool(job_t j, const char *key, bool value)
2092 {
2093 bool found_key = false;
2094
2095 switch (key[0]) {
2096 case 'a':
2097 case 'A':
2098 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2099 j->abandon_pg = value;
2100 found_key = true;
2101 }
2102 break;
2103 case 'b':
2104 case 'B':
2105 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2106 j->dirty_at_shutdown = value;
2107 found_key = true;
2108 }
2109 break;
2110 case 'k':
2111 case 'K':
2112 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2113 j->ondemand = !value;
2114 found_key = true;
2115 }
2116 break;
2117 case 'o':
2118 case 'O':
2119 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2120 j->ondemand = value;
2121 found_key = true;
2122 }
2123 break;
2124 case 'd':
2125 case 'D':
2126 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2127 j->debug = value;
2128 found_key = true;
2129 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2130 (void)job_assumes(j, !value);
2131 found_key = true;
2132 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2133 j->disable_aslr = value;
2134 found_key = true;
2135 }
2136 break;
2137 case 'h':
2138 case 'H':
2139 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2140 job_log(j, LOG_INFO, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2141 j->dirty_at_shutdown = value;
2142 found_key = true;
2143 }
2144 break;
2145 case 's':
2146 case 'S':
2147 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2148 j->session_create = value;
2149 found_key = true;
2150 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2151 j->start_on_mount = value;
2152 found_key = true;
2153 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2154 /* this only does something on Mac OS X 10.4 "Tiger" */
2155 found_key = true;
2156 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2157 if (_s_shutdown_monitor) {
2158 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2159 } else {
2160 j->shutdown_monitor = true;
2161 _s_shutdown_monitor = j;
2162 }
2163 found_key = true;
2164 }
2165 break;
2166 case 'l':
2167 case 'L':
2168 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2169 j->low_pri_io = value;
2170 found_key = true;
2171 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2172 j->only_once = value;
2173 found_key = true;
2174 }
2175 break;
2176 case 'm':
2177 case 'M':
2178 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2179 j->internal_exc_handler = value;
2180 found_key = true;
2181 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2182 j->multiple_instances = value;
2183 found_key = true;
2184 }
2185 break;
2186 case 'i':
2187 case 'I':
2188 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2189 if (getuid() != 0) {
2190 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2191 return;
2192 }
2193 j->no_init_groups = !value;
2194 found_key = true;
2195 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2196 j->ignore_pg_at_shutdown = value;
2197 found_key = true;
2198 }
2199 break;
2200 case 'r':
2201 case 'R':
2202 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2203 if (value) {
2204 /* We don't want value == false to change j->start_pending */
2205 j->start_pending = true;
2206 }
2207 found_key = true;
2208 }
2209 break;
2210 case 'e':
2211 case 'E':
2212 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2213 j->globargv = value;
2214 found_key = true;
2215 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2216 j->kill_via_shmem = value;
2217 found_key = true;
2218 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2219 j->debug_before_kill = value;
2220 found_key = true;
2221 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2222 if (!s_embedded_privileged_job) {
2223 j->embedded_special_privileges = value;
2224 s_embedded_privileged_job = j;
2225 } else {
2226 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2227 }
2228 found_key = true;
2229 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2230 if (job_assumes(j, _s_event_monitor == NULL)) {
2231 j->event_monitor = value;
2232 if (value) {
2233 _s_event_monitor = j;
2234 }
2235 } else {
2236 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility!");
2237 }
2238 found_key = true;
2239 }
2240 break;
2241 case 'w':
2242 case 'W':
2243 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2244 j->wait4debugger = value;
2245 found_key = true;
2246 }
2247 break;
2248 default:
2249 break;
2250 }
2251
2252 if (unlikely(!found_key)) {
2253 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2254 }
2255 }
2256
2257 void
2258 job_import_string(job_t j, const char *key, const char *value)
2259 {
2260 char **where2put = NULL;
2261
2262 switch (key[0]) {
2263 case 'm':
2264 case 'M':
2265 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2266 where2put = &j->alt_exc_handler;
2267 }
2268 break;
2269 case 'p':
2270 case 'P':
2271 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2272 return;
2273 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0) {
2274 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2275 j->pstype = POSIX_SPAWN_OSX_TALAPP_START;
2276 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET) == 0) {
2277 j->pstype = POSIX_SPAWN_OSX_WIDGET_START;
2278 }
2279 #if TARGET_OS_EMBEDDED
2280 else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP) == 0) {
2281 j->pstype = POSIX_SPAWN_IOS_APP_START;
2282 }
2283 #endif /* TARGET_OS_EMBEDDED */
2284 else {
2285 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2286 }
2287 return;
2288 }
2289 break;
2290 case 'l':
2291 case 'L':
2292 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2293 return;
2294 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2295 return;
2296 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2297 return;
2298 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2299 return;
2300 }
2301 break;
2302 case 'r':
2303 case 'R':
2304 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2305 if (getuid() != 0) {
2306 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2307 return;
2308 }
2309 where2put = &j->rootdir;
2310 }
2311 break;
2312 case 'w':
2313 case 'W':
2314 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2315 where2put = &j->workingdir;
2316 }
2317 break;
2318 case 'u':
2319 case 'U':
2320 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2321 if (getuid() != 0) {
2322 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2323 return;
2324 } else if (strcmp(value, "root") == 0) {
2325 return;
2326 }
2327 where2put = &j->username;
2328 }
2329 break;
2330 case 'g':
2331 case 'G':
2332 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2333 if (getuid() != 0) {
2334 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2335 return;
2336 } else if (strcmp(value, "wheel") == 0) {
2337 return;
2338 }
2339 where2put = &j->groupname;
2340 }
2341 break;
2342 case 's':
2343 case 'S':
2344 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2345 where2put = &j->stdoutpath;
2346 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2347 where2put = &j->stderrpath;
2348 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2349 where2put = &j->stdinpath;
2350 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2351 if (job_assumes(j, j->stdin_fd != -1)) {
2352 /* open() should not block, but regular IO by the job should */
2353 (void)job_assumes(j, fcntl(j->stdin_fd, F_SETFL, 0) != -1);
2354 /* XXX -- EV_CLEAR should make named pipes happy? */
2355 (void)job_assumes(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j) != -1);
2356 } else {
2357 j->stdin_fd = 0;
2358 }
2359 #if HAVE_SANDBOX
2360 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2361 where2put = &j->seatbelt_profile;
2362 #endif
2363 }
2364 break;
2365 case 'X':
2366 case 'x':
2367 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2368 return;
2369 }
2370 break;
2371 default:
2372 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2373 break;
2374 }
2375
2376 if (likely(where2put)) {
2377 (void)job_assumes(j, (*where2put = strdup(value)) != NULL);
2378 } else {
2379 /* See rdar://problem/5496612. These two are okay. */
2380 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) != 0) {
2381 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2382 }
2383 }
2384 }
2385
2386 void
2387 job_import_integer(job_t j, const char *key, long long value)
2388 {
2389 switch (key[0]) {
2390 case 'e':
2391 case 'E':
2392 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2393 if (unlikely(value < 0)) {
2394 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2395 } else if (unlikely(value > UINT32_MAX)) {
2396 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2397 } else {
2398 j->exit_timeout = (typeof(j->exit_timeout)) value;
2399 }
2400 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2401 j->main_thread_priority = value;
2402 }
2403 break;
2404 case 'j':
2405 case 'J':
2406 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2407 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2408
2409 launch_data_t pri = launch_data_new_integer(value);
2410 if (job_assumes(j, pri != NULL)) {
2411 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2412 launch_data_free(pri);
2413 }
2414 }
2415 case 'n':
2416 case 'N':
2417 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2418 if (unlikely(value < PRIO_MIN)) {
2419 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2420 } else if (unlikely(value > PRIO_MAX)) {
2421 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2422 } else {
2423 j->nice = (typeof(j->nice)) value;
2424 j->setnice = true;
2425 }
2426 }
2427 break;
2428 case 't':
2429 case 'T':
2430 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2431 if (unlikely(value < 0)) {
2432 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2433 } else if (unlikely(value > UINT32_MAX)) {
2434 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2435 } else {
2436 j->timeout = (typeof(j->timeout)) value;
2437 }
2438 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2439 if (value < 0) {
2440 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2441 } else if (value > UINT32_MAX) {
2442 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2443 } else {
2444 j->min_run_time = (typeof(j->min_run_time)) value;
2445 }
2446 }
2447 break;
2448 case 'u':
2449 case 'U':
2450 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2451 j->mask = value;
2452 j->setmask = true;
2453 }
2454 break;
2455 case 's':
2456 case 'S':
2457 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2458 if (unlikely(value <= 0)) {
2459 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2460 } else if (unlikely(value > UINT32_MAX)) {
2461 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2462 } else {
2463 runtime_add_weak_ref();
2464 j->start_interval = (typeof(j->start_interval)) value;
2465
2466 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
2467 }
2468 #if HAVE_SANDBOX
2469 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2470 j->seatbelt_flags = value;
2471 #endif
2472 }
2473
2474 break;
2475 default:
2476 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2477 break;
2478 }
2479 }
2480
2481 void
2482 job_import_opaque(job_t j __attribute__((unused)),
2483 const char *key, launch_data_t value __attribute__((unused)))
2484 {
2485 switch (key[0]) {
2486 case 'q':
2487 case 'Q':
2488 #if HAVE_QUARANTINE
2489 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2490 size_t tmpsz = launch_data_get_opaque_size(value);
2491
2492 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2493 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2494 j->quarantine_data_sz = tmpsz;
2495 }
2496 }
2497 #endif
2498 case 's':
2499 case 'S':
2500 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2501 size_t tmpsz = launch_data_get_opaque_size(value);
2502 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2503 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2504 }
2505 }
2506 break;
2507 default:
2508 break;
2509 }
2510 }
2511
2512 static void
2513 policy_setup(launch_data_t obj, const char *key, void *context)
2514 {
2515 job_t j = context;
2516 bool found_key = false;
2517
2518 switch (key[0]) {
2519 case 'd':
2520 case 'D':
2521 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2522 j->deny_job_creation = launch_data_get_bool(obj);
2523 found_key = true;
2524 }
2525 break;
2526 default:
2527 break;
2528 }
2529
2530 if (unlikely(!found_key)) {
2531 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2532 }
2533 }
2534
2535 void
2536 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2537 {
2538 launch_data_t tmp;
2539
2540 switch (key[0]) {
2541 case 'p':
2542 case 'P':
2543 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2544 launch_data_dict_iterate(value, policy_setup, j);
2545 }
2546 break;
2547 case 'k':
2548 case 'K':
2549 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2550 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2551 }
2552 break;
2553 case 'i':
2554 case 'I':
2555 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2556 j->inetcompat = true;
2557 j->abandon_pg = true;
2558 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2559 j->inetcompat_wait = launch_data_get_bool(tmp);
2560 }
2561 }
2562 break;
2563 case 'j':
2564 case 'J':
2565 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2566 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2567 }
2568 case 'e':
2569 case 'E':
2570 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2571 launch_data_dict_iterate(value, envitem_setup, j);
2572 }
2573 break;
2574 case 'u':
2575 case 'U':
2576 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2577 j->importing_global_env = true;
2578 launch_data_dict_iterate(value, envitem_setup, j);
2579 j->importing_global_env = false;
2580 }
2581 break;
2582 case 's':
2583 case 'S':
2584 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2585 launch_data_dict_iterate(value, socketgroup_setup, j);
2586 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2587 calendarinterval_new_from_obj(j, value);
2588 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2589 launch_data_dict_iterate(value, limititem_setup, j);
2590 #if HAVE_SANDBOX
2591 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2592 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2593 #endif
2594 }
2595 break;
2596 case 'h':
2597 case 'H':
2598 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2599 j->importing_hard_limits = true;
2600 launch_data_dict_iterate(value, limititem_setup, j);
2601 j->importing_hard_limits = false;
2602 }
2603 break;
2604 case 'm':
2605 case 'M':
2606 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2607 launch_data_dict_iterate(value, machservice_setup, j);
2608 }
2609 break;
2610 case 'l':
2611 case 'L':
2612 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2613 launch_data_dict_iterate(value, eventsystem_setup, j);
2614 } else {
2615 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2616 return;
2617 }
2618 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2619 return;
2620 }
2621 }
2622 break;
2623 default:
2624 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2625 break;
2626 }
2627 }
2628
2629 void
2630 job_import_array(job_t j, const char *key, launch_data_t value)
2631 {
2632 size_t i, value_cnt = launch_data_array_get_count(value);
2633 const char *str;
2634
2635 switch (key[0]) {
2636 case 'p':
2637 case 'P':
2638 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2639 return;
2640 }
2641 break;
2642 case 'l':
2643 case 'L':
2644 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2645 return;
2646 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2647 return;
2648 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2649 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2650 return;
2651 }
2652 break;
2653 case 'q':
2654 case 'Q':
2655 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
2656 for (i = 0; i < value_cnt; i++) {
2657 str = launch_data_get_string(launch_data_array_get_index(value, i));
2658 if (job_assumes(j, str != NULL)) {
2659 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
2660 }
2661 }
2662
2663 }
2664 break;
2665 case 'w':
2666 case 'W':
2667 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
2668 for (i = 0; i < value_cnt; i++) {
2669 str = launch_data_get_string(launch_data_array_get_index(value, i));
2670 if (job_assumes(j, str != NULL)) {
2671 semaphoreitem_new(j, PATH_CHANGES, str);
2672 }
2673 }
2674 }
2675 break;
2676 case 'b':
2677 case 'B':
2678 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
2679 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
2680 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2681 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2682 j->j_binpref_cnt = value_cnt;
2683 for (i = 0; i < value_cnt; i++) {
2684 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
2685 }
2686 }
2687 }
2688 break;
2689 case 's':
2690 case 'S':
2691 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2692 for (i = 0; i < value_cnt; i++) {
2693 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
2694 }
2695 }
2696 break;
2697 default:
2698 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
2699 break;
2700 }
2701 }
2702
2703 void
2704 job_import_keys(launch_data_t obj, const char *key, void *context)
2705 {
2706 job_t j = context;
2707 launch_data_type_t kind;
2708
2709 if (!launchd_assumes(obj != NULL)) {
2710 return;
2711 }
2712
2713 kind = launch_data_get_type(obj);
2714
2715 switch (kind) {
2716 case LAUNCH_DATA_BOOL:
2717 job_import_bool(j, key, launch_data_get_bool(obj));
2718 break;
2719 case LAUNCH_DATA_STRING:
2720 job_import_string(j, key, launch_data_get_string(obj));
2721 break;
2722 case LAUNCH_DATA_INTEGER:
2723 job_import_integer(j, key, launch_data_get_integer(obj));
2724 break;
2725 case LAUNCH_DATA_DICTIONARY:
2726 job_import_dictionary(j, key, obj);
2727 break;
2728 case LAUNCH_DATA_ARRAY:
2729 job_import_array(j, key, obj);
2730 break;
2731 case LAUNCH_DATA_OPAQUE:
2732 job_import_opaque(j, key, obj);
2733 break;
2734 default:
2735 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2736 break;
2737 }
2738 }
2739
2740 job_t
2741 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
2742 {
2743 launch_data_t tmp, ldpa;
2744 const char *label = NULL, *prog = NULL;
2745 const char **argv = NULL;
2746 job_t j;
2747
2748 if (!jobmgr_assumes(jm, pload != NULL)) {
2749 errno = EINVAL;
2750 return NULL;
2751 }
2752
2753 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
2754 errno = EINVAL;
2755 return NULL;
2756 }
2757
2758 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
2759 errno = EINVAL;
2760 return NULL;
2761 }
2762
2763 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
2764 errno = EINVAL;
2765 return NULL;
2766 }
2767
2768 if (unlikely(!(label = launch_data_get_string(tmp)))) {
2769 errno = EINVAL;
2770 return NULL;
2771 }
2772
2773 #if TARGET_OS_EMBEDDED
2774 if (unlikely(g_embedded_privileged_action && s_embedded_privileged_job)) {
2775 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
2776 errno = EPERM;
2777 return NULL;
2778 }
2779
2780 const char *username = NULL;
2781 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2782 username = launch_data_get_string(tmp);
2783 } else {
2784 errno = EPERM;
2785 return NULL;
2786 }
2787
2788 if (!jobmgr_assumes(jm, s_embedded_privileged_job->username != NULL && username != NULL)) {
2789 errno = EPERM;
2790 return NULL;
2791 }
2792
2793 if (unlikely(strcmp(s_embedded_privileged_job->username, username) != 0)) {
2794 errno = EPERM;
2795 return NULL;
2796 }
2797 } else if (g_embedded_privileged_action) {
2798 errno = EINVAL;
2799 return NULL;
2800 }
2801 #endif
2802
2803 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
2804 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2805 prog = launch_data_get_string(tmp);
2806 }
2807
2808 int argc = 0;
2809 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2810 size_t i, c;
2811
2812 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2813 errno = EINVAL;
2814 return NULL;
2815 }
2816
2817 c = launch_data_array_get_count(ldpa);
2818
2819 argv = alloca((c + 1) * sizeof(char *));
2820
2821 for (i = 0; i < c; i++) {
2822 tmp = launch_data_array_get_index(ldpa, i);
2823
2824 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2825 errno = EINVAL;
2826 return NULL;
2827 }
2828
2829 argv[i] = launch_data_get_string(tmp);
2830 }
2831
2832 argv[i] = NULL;
2833 argc = i;
2834 }
2835
2836 if (!prog && argc == 0) {
2837 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
2838 errno = EINVAL;
2839 return NULL;
2840 }
2841
2842 /* Find the requested session. You cannot load services into XPC domains in
2843 * this manner.
2844 */
2845 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2846 if (session) {
2847 jobmgr_t jmt = NULL;
2848 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
2849 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
2850 if (!jmt) {
2851 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
2852 } else {
2853 jm = jmt;
2854 }
2855 } else {
2856 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
2857 }
2858
2859 if (!jmt) {
2860 errno = EINVAL;
2861 return NULL;
2862 }
2863 }
2864
2865 /* For legacy reasons, we have a global hash of all labels in all job
2866 * managers. So rather than make it a global, we store it in the root job
2867 * manager. But for an XPC domain, we store a local hash of all services in
2868 * the domain.
2869 */
2870 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
2871 if (unlikely((j = job_find(where2look, label)) != NULL)) {
2872 if (jm->xpc_singleton) {
2873 /* There can (and probably will be) multiple attemtps to import the
2874 * same XPC service from the same framework. This is okay. It's
2875 * treated as a singleton, so just return the existing one so that
2876 * it may be aliased into the requesting process' XPC domain.
2877 */
2878 return j;
2879 } else {
2880 /* If we're not a global XPC domain, then it's an error to try
2881 * importing the same job/service multiple times.
2882 */
2883 errno = EEXIST;
2884 return NULL;
2885 }
2886 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
2887 errno = EINVAL;
2888 return NULL;
2889 }
2890 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
2891
2892 if (likely(j = job_new(jm, label, prog, argv))) {
2893 launch_data_dict_iterate(pload, job_import_keys, j);
2894 if (!uuid_is_null(j->expected_audit_uuid)) {
2895 uuid_string_t uuid_str;
2896 uuid_unparse(j->expected_audit_uuid, uuid_str);
2897 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2898 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2899 errno = ENEEDAUTH;
2900 } else {
2901 job_log(j, LOG_DEBUG, "No security session specified.");
2902 j->asport = MACH_PORT_NULL;
2903 }
2904
2905 if (j->event_monitor) {
2906 if (job_assumes(j, LIST_FIRST(&j->events) == NULL)) {
2907 struct machservice *msi = NULL;
2908 SLIST_FOREACH(msi, &j->machservices, sle) {
2909 if (msi->event_update_port) {
2910 break;
2911 }
2912 }
2913
2914 if (job_assumes(j, msi != NULL)) {
2915 /* Create our send-once right so we can kick things off. */
2916 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
2917 if (!LIST_EMPTY(&_s_event_systems)) {
2918 eventsystem_ping();
2919 }
2920 }
2921 } else {
2922 job_log(j, LOG_ERR, "The event monitor job may not have a LaunchEvents dictionary.");
2923 job_remove(j);
2924 j = NULL;
2925 }
2926 }
2927 }
2928
2929 return j;
2930 }
2931
2932 bool
2933 jobmgr_label_test(jobmgr_t jm, const char *str)
2934 {
2935 char *endstr = NULL;
2936 const char *ptr;
2937
2938 if (str[0] == '\0') {
2939 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
2940 return false;
2941 }
2942
2943 for (ptr = str; *ptr; ptr++) {
2944 if (iscntrl(*ptr)) {
2945 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
2946 return false;
2947 }
2948 }
2949
2950 strtoll(str, &endstr, 0);
2951
2952 if (str != endstr) {
2953 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
2954 return false;
2955 }
2956
2957 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2958 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2959 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
2960 return false;
2961 }
2962
2963 return true;
2964 }
2965
2966 job_t
2967 job_find(jobmgr_t jm, const char *label)
2968 {
2969 job_t ji;
2970
2971 if (!jm) {
2972 jm = root_jobmgr;
2973 }
2974
2975 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
2976 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
2977 continue; /* 5351245 and 5488633 respectively */
2978 }
2979
2980 if (strcmp(ji->label, label) == 0) {
2981 return ji;
2982 }
2983 }
2984
2985 errno = ESRCH;
2986 return NULL;
2987 }
2988
2989 /* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
2990 job_t
2991 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
2992 {
2993 job_t ji = NULL;
2994 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
2995 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay)) ) {
2996 return ji;
2997 }
2998 }
2999
3000 jobmgr_t jmi = NULL;
3001 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3002 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3003 break;
3004 }
3005 }
3006
3007 return ji;
3008 }
3009
3010 job_t
3011 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3012 {
3013 job_t ji;
3014
3015 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3016 if (ji->p == p) {
3017 return ji;
3018 }
3019 }
3020
3021 return create_anon ? job_new_anonymous(jm, p) : NULL;
3022 }
3023
3024 job_t
3025 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3026 {
3027 jobmgr_t jmi;
3028 job_t ji;
3029
3030 if (jm->jm_port == mport) {
3031 return jobmgr_find_by_pid(jm, upid, true);
3032 }
3033
3034 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3035 job_t jr;
3036
3037 if ((jr = job_mig_intran2(jmi, mport, upid))) {
3038 return jr;
3039 }
3040 }
3041
3042 LIST_FOREACH(ji, &jm->jobs, sle) {
3043 if (ji->j_port == mport) {
3044 return ji;
3045 }
3046 }
3047
3048 return NULL;
3049 }
3050
3051 job_t
3052 job_mig_intran(mach_port_t p)
3053 {
3054 struct ldcred *ldc = runtime_get_caller_creds();
3055 job_t jr;
3056
3057 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3058
3059 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
3060 struct proc_bsdshortinfo proc;
3061 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3062 if (errno != ESRCH) {
3063 (void)jobmgr_assumes(root_jobmgr, errno == 0);
3064 } else {
3065 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc->pid, ldc->uid, ldc->euid, p, proc.pbsi_comm);
3066 }
3067 }
3068 }
3069
3070 return jr;
3071 }
3072
3073 job_t
3074 job_find_by_service_port(mach_port_t p)
3075 {
3076 struct machservice *ms;
3077
3078 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3079 if (ms->recv && (ms->port == p)) {
3080 return ms->job;
3081 }
3082 }
3083
3084 return NULL;
3085 }
3086
3087 void
3088 job_mig_destructor(job_t j)
3089 {
3090 /*
3091 * 5477111
3092 *
3093 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
3094 */
3095
3096 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3097 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3098 job_remove(j);
3099 }
3100
3101 workaround_5477111 = NULL;
3102
3103 calendarinterval_sanity_check();
3104 }
3105
3106 void
3107 job_export_all2(jobmgr_t jm, launch_data_t where)
3108 {
3109 jobmgr_t jmi;
3110 job_t ji;
3111
3112 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3113 job_export_all2(jmi, where);
3114 }
3115
3116 LIST_FOREACH(ji, &jm->jobs, sle) {
3117 launch_data_t tmp;
3118
3119 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3120 launch_data_dict_insert(where, tmp, ji->label);
3121 }
3122 }
3123 }
3124
3125 launch_data_t
3126 job_export_all(void)
3127 {
3128 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3129
3130 if (launchd_assumes(resp != NULL)) {
3131 job_export_all2(root_jobmgr, resp);
3132 }
3133
3134 return resp;
3135 }
3136
3137 void
3138 job_log_stray_pg(job_t j)
3139 {
3140 pid_t *pids = NULL;
3141 size_t len = sizeof(pid_t) * get_kern_max_proc();
3142 int i = 0, kp_cnt = 0;
3143
3144 if (!do_apple_internal_logging) {
3145 return;
3146 }
3147
3148 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3149
3150 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3151 return;
3152 }
3153 if (!job_assumes(j, (kp_cnt = proc_listpgrppids(j->p, pids, len)) != -1)) {
3154 goto out;
3155 }
3156
3157 for (i = 0; i < kp_cnt; i++) {
3158 pid_t p_i = pids[i];
3159 if (p_i == j->p) {
3160 continue;
3161 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
3162 continue;
3163 }
3164
3165 struct proc_bsdshortinfo proc;
3166 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3167 if (errno != ESRCH) {
3168 job_assumes(j, errno == 0);
3169 }
3170 continue;
3171 }
3172
3173 pid_t pp_i = proc.pbsi_ppid;
3174 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3175 const char *n = proc.pbsi_comm;
3176
3177 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3178 }
3179
3180 out:
3181 free(pids);
3182 }
3183
3184 void
3185 job_reap(job_t j)
3186 {
3187 struct rusage ru;
3188 int status;
3189
3190 bool is_system_bootstrapper = j->is_bootstrapper && pid1_magic && !j->mgr->parentmgr;
3191
3192 job_log(j, LOG_DEBUG, "Reaping");
3193
3194 if (j->shmem) {
3195 (void)job_assumes(j, vm_deallocate(mach_task_self(), (vm_address_t)j->shmem, getpagesize()) == 0);
3196 j->shmem = NULL;
3197 }
3198
3199 if (unlikely(j->weird_bootstrap)) {
3200 int64_t junk = 0;
3201 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3202 }
3203
3204 if (j->log_redirect_fd && !j->legacy_LS_job) {
3205 job_log_stdouterr(j); /* one last chance */
3206
3207 if (j->log_redirect_fd) {
3208 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3209 j->log_redirect_fd = 0;
3210 }
3211 }
3212
3213 if (j->fork_fd) {
3214 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
3215 j->fork_fd = 0;
3216 }
3217
3218 if (j->anonymous) {
3219 status = 0;
3220 memset(&ru, 0, sizeof(ru));
3221 } else {
3222 /*
3223 * The job is dead. While the PID/PGID is still known to be
3224 * valid, try to kill abandoned descendant processes.
3225 */
3226 job_log_stray_pg(j);
3227 if (!j->abandon_pg) {
3228 if (unlikely(runtime_killpg(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3229 #ifdef __LP64__
3230 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3231 #else
3232 (void)job_assumes(j, false);
3233 #endif
3234 }
3235 }
3236
3237 /* We have to work around one of two kernel bugs here. ptrace(3) may
3238 * have abducted the child away from us and reparented it to the tracing
3239 * process. If the process then exits, we still get NOTE_EXIT, but we
3240 * cannot reap it because the kernel may not have restored the true
3241 * parent/child relationship in time.
3242 *
3243 * See <rdar://problem/5020256>.
3244 *
3245 * The other bug is if the shutdown monitor has suspended a task and not
3246 * resumed it before exiting. In this case, the kernel will not clean up
3247 * after the shutdown monitor. It will, instead, leave the task
3248 * task suspended and not process any pending signals on the event loop
3249 * for the task.
3250 *
3251 * There are a variety of other kernel bugs that could prevent a process
3252 * from exiting, usually having to do with faulty hardware or talking to
3253 * misbehaving drivers that mark a thread as uninterruptible and
3254 * deadlock/hang before unmarking it as such. So we have to work around
3255 * that too.
3256 *
3257 * See <rdar://problem/9284889&9359725>.
3258 */
3259 if (j->workaround9359725) {
3260 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3261 status = W_EXITCODE(-1, SIGSEGV);
3262 memset(&ru, 0, sizeof(ru));
3263 } else if (wait4(j->p, &status, 0, &ru) == -1) {
3264 job_log(j, LOG_NOTICE, "Assuming job exited: <rdar://problem/5020256>: %d: %s", errno, strerror(errno));
3265 status = W_EXITCODE(-1, SIGSEGV);
3266 memset(&ru, 0, sizeof(ru));
3267 }
3268 }
3269
3270 if (j->exit_timeout) {
3271 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3272 }
3273
3274 LIST_REMOVE(j, pid_hash_sle);
3275
3276 if (j->sent_signal_time) {
3277 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3278
3279 td_sec = td / NSEC_PER_SEC;
3280 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3281
3282 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3283 }
3284
3285 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
3286 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
3287 j->ru.ru_maxrss += ru.ru_maxrss;
3288 j->ru.ru_ixrss += ru.ru_ixrss;
3289 j->ru.ru_idrss += ru.ru_idrss;
3290 j->ru.ru_isrss += ru.ru_isrss;
3291 j->ru.ru_minflt += ru.ru_minflt;
3292 j->ru.ru_majflt += ru.ru_majflt;
3293 j->ru.ru_nswap += ru.ru_nswap;
3294 j->ru.ru_inblock += ru.ru_inblock;
3295 j->ru.ru_oublock += ru.ru_oublock;
3296 j->ru.ru_msgsnd += ru.ru_msgsnd;
3297 j->ru.ru_msgrcv += ru.ru_msgrcv;
3298 j->ru.ru_nsignals += ru.ru_nsignals;
3299 j->ru.ru_nvcsw += ru.ru_nvcsw;
3300 j->ru.ru_nivcsw += ru.ru_nivcsw;
3301
3302 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
3303 int level = LOG_WARNING;
3304 if (!j->did_exec && (j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
3305 level = LOG_DEBUG;
3306 }
3307
3308 job_log(j, level, "Exited with code: %d", WEXITSTATUS(status));
3309 } else {
3310 j->fail_cnt = 0;
3311 }
3312
3313 if (WIFSIGNALED(status)) {
3314 int s = WTERMSIG(status);
3315 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3316 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3317 } else if (!j->stopped && !j->clean_kill) {
3318 switch (s) {
3319 /* Signals which indicate a crash. */
3320 case SIGILL:
3321 case SIGABRT:
3322 case SIGFPE:
3323 case SIGBUS:
3324 case SIGSEGV:
3325 case SIGSYS:
3326 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3327 * SIGTRAP, assume that it's a crash.
3328 */
3329 case SIGTRAP:
3330 j->crashed = true;
3331 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3332 break;
3333 default:
3334 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3335 break;
3336 }
3337
3338 if (is_system_bootstrapper && j->crashed) {
3339 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3340 }
3341 }
3342 }
3343
3344 j->reaped = true;
3345
3346 struct machservice *msi = NULL;
3347 if (j->crashed || !(j->did_exec || j->anonymous)) {
3348 SLIST_FOREACH(msi, &j->machservices, sle) {
3349 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3350 machservice_drain_port(msi);
3351 }
3352
3353 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3354 machservice_resetport(j, msi);
3355 }
3356 }
3357 }
3358
3359 /* HACK: Essentially duplicating the logic directly above. But this has
3360 * gotten really hairy, and I don't want to try consolidating it right now.
3361 */
3362 if (j->xpc_service && !j->xpcproxy_did_exec) {
3363 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3364 SLIST_FOREACH(msi, &j->machservices, sle) {
3365 /* Drain the messages but do not reset the port. If xpcproxy could
3366 * not exec(3), then we don't want to continue trying, since there
3367 * is very likely a serious configuration error with the service.
3368 *
3369 * <rdar://problem/8986802>
3370 */
3371 machservice_resetport(j, msi);
3372 }
3373 }
3374
3375 struct suspended_peruser *spi = NULL;
3376 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3377 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3378 spi->j->peruser_suspend_count--;
3379 if (spi->j->peruser_suspend_count == 0) {
3380 job_dispatch(spi->j, false);
3381 }
3382 LIST_REMOVE(spi, sle);
3383 free(spi);
3384 }
3385
3386 j->last_exit_status = status;
3387
3388 if (j->exit_status_dest) {
3389 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3390 if (errno && errno != MACH_SEND_INVALID_DEST) {
3391 (void)job_assumes(j, errno == 0);
3392 }
3393
3394 j->exit_status_dest = MACH_PORT_NULL;
3395 }
3396
3397 if (j->spawn_reply_port) {
3398 /* If the child never called exec(3), we must send a spawn() reply so
3399 * that the requestor can get exit status from it. If we fail to send
3400 * the reply for some reason, we have to deallocate the exit status port
3401 * ourselves.
3402 */
3403 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3404 if (kr) {
3405 if (kr != MACH_SEND_INVALID_DEST) {
3406 errno = kr;
3407 (void)job_assumes(j, errno == KERN_SUCCESS);
3408 }
3409
3410 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3411 }
3412
3413 j->exit_status_port = MACH_PORT_NULL;
3414 j->spawn_reply_port = MACH_PORT_NULL;
3415 }
3416
3417 if (j->anonymous) {
3418 total_anon_children--;
3419 if (j->holds_ref) {
3420 runtime_del_ref();
3421 }
3422 } else {
3423 runtime_del_ref();
3424 total_children--;
3425 }
3426
3427 if (j->has_console) {
3428 g_wsp = 0;
3429 }
3430
3431 if (j->shutdown_monitor) {
3432 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3433 _s_shutdown_monitor = NULL;
3434 j->shutdown_monitor = false;
3435 }
3436
3437 if (j->event_monitor && !j->mgr->shutting_down) {
3438 msi = NULL;
3439 SLIST_FOREACH(msi, &j->machservices, sle) {
3440 if (msi->event_update_port) {
3441 break;
3442 }
3443 }
3444 /* Only do this if we've gotten the port-destroyed notification already.
3445 * If we haven't yet, the port destruction handler will do this.
3446 */
3447 if (job_assumes(j, msi != NULL) && !msi->isActive) {
3448 if (_s_event_update_port == MACH_PORT_NULL) {
3449 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
3450 }
3451 eventsystem_ping();
3452 }
3453 }
3454
3455 if (!j->anonymous) {
3456 j->mgr->normal_active_cnt--;
3457 }
3458 j->sent_signal_time = 0;
3459 j->sent_sigkill = false;
3460 j->clean_kill = false;
3461 j->sent_kill_via_shmem = false;
3462 j->lastlookup = NULL;
3463 j->lastlookup_gennum = 0;
3464 j->p = 0;
3465 }
3466
3467 void
3468 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3469 {
3470 jobmgr_t jmi, jmn;
3471 job_t ji, jn;
3472
3473 if (jm->shutting_down) {
3474 return;
3475 }
3476
3477 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3478 jobmgr_dispatch_all(jmi, newmounthack);
3479 }
3480
3481 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3482 if (newmounthack && ji->start_on_mount) {
3483 ji->start_pending = true;
3484 }
3485
3486 job_dispatch(ji, false);
3487 }
3488 }
3489
3490 void
3491 job_dispatch_curious_jobs(job_t j)
3492 {
3493 job_t ji = NULL, jt = NULL;
3494 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3495 struct semaphoreitem *si = NULL;
3496 SLIST_FOREACH(si, &ji->semaphores, sle) {
3497 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3498 continue;
3499 }
3500
3501 if (strcmp(si->what, j->label) == 0) {
3502 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3503
3504 if (!ji->removing) {
3505 job_dispatch(ji, false);
3506 } else {
3507 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3508 }
3509
3510 /* ji could be removed here, so don't do anything with it or its semaphores
3511 * after this point.
3512 */
3513 break;
3514 }
3515 }
3516 }
3517 }
3518
3519 job_t
3520 job_dispatch(job_t j, bool kickstart)
3521 {
3522 /* Don't dispatch a job if it has no audit session set. */
3523 if (!uuid_is_null(j->expected_audit_uuid)) {
3524 return NULL;
3525 }
3526 if (j->alias) {
3527 j = j->alias;
3528 }
3529
3530 #if TARGET_OS_EMBEDDED
3531 if (g_embedded_privileged_action && s_embedded_privileged_job) {
3532 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
3533 errno = EPERM;
3534 return NULL;
3535 }
3536
3537 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
3538 errno = EPERM;
3539 return NULL;
3540 }
3541 } else if (g_embedded_privileged_action) {
3542 errno = EINVAL;
3543 return NULL;
3544 }
3545 #endif
3546
3547 /*
3548 * The whole job removal logic needs to be consolidated. The fact that
3549 * a job can be removed from just about anywhere makes it easy to have
3550 * stale pointers left behind somewhere on the stack that might get
3551 * used after the deallocation. In particular, during job iteration.
3552 *
3553 * This is a classic example. The act of dispatching a job may delete it.
3554 */
3555 if (!job_active(j)) {
3556 if (job_useless(j)) {
3557 job_remove(j);
3558 return NULL;
3559 }
3560 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3561 return NULL;
3562 }
3563
3564 if (kickstart || job_keepalive(j)) {
3565 job_log(j, LOG_DEBUG, "Starting job (kickstart = %s)", kickstart ? "true" : "false");
3566 job_start(j);
3567 } else {
3568 job_log(j, LOG_DEBUG, "Watching job (kickstart = %s)", kickstart ? "true" : "false");
3569 job_watch(j);
3570
3571 /*
3572 * 5455720
3573 *
3574 * Path checking and monitoring is really racy right now.
3575 * We should clean this up post Leopard.
3576 */
3577 if (job_keepalive(j)) {
3578 job_start(j);
3579 }
3580 }
3581 } else {
3582 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s).", job_active(j));
3583 }
3584
3585 return j;
3586 }
3587
3588 void
3589 job_log_stdouterr2(job_t j, const char *msg, ...)
3590 {
3591 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
3592 va_list ap;
3593
3594 va_start(ap, msg);
3595 runtime_vsyslog(&attr, msg, ap);
3596 va_end(ap);
3597 }
3598
3599 void
3600 job_log_stdouterr(job_t j)
3601 {
3602 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
3603 bool close_log_redir = false;
3604 ssize_t rsz;
3605
3606 if (!job_assumes(j, buf != NULL)) {
3607 return;
3608 }
3609
3610 bufindex = buf;
3611
3612 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
3613
3614 if (unlikely(rsz == 0)) {
3615 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
3616 close_log_redir = true;
3617 } else if (rsz == -1) {
3618 if (!job_assumes(j, errno == EAGAIN)) {
3619 close_log_redir = true;
3620 }
3621 } else {
3622 buf[rsz] = '\0';
3623
3624 while ((msg = strsep(&bufindex, "\n\r"))) {
3625 if (msg[0]) {
3626 job_log_stdouterr2(j, "%s", msg);
3627 }
3628 }
3629 }
3630
3631 free(buf);
3632
3633 if (unlikely(close_log_redir)) {
3634 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3635 j->log_redirect_fd = 0;
3636 job_dispatch(j, false);
3637 }
3638 }
3639
3640 void
3641 job_kill(job_t j)
3642 {
3643 if (unlikely(!j->p || j->anonymous)) {
3644 return;
3645 }
3646
3647 (void)job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
3648
3649 j->sent_sigkill = true;
3650 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j) != -1);
3651
3652 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
3653 }
3654
3655 void
3656 job_open_shutdown_transaction(job_t j)
3657 {
3658 if (j->kill_via_shmem) {
3659 if (j->shmem) {
3660 job_log(j, LOG_DEBUG, "Opening shutdown transaction for job.");
3661 (void)__sync_add_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
3662 } else {
3663 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it has not set up shared memory. Treating normally.");
3664 j->dirty_at_shutdown = false;
3665 }
3666 } else {
3667 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3668 j->dirty_at_shutdown = false;
3669 }
3670 }
3671
3672 void
3673 job_close_shutdown_transaction(job_t j)
3674 {
3675 if (j->dirty_at_shutdown) {
3676 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
3677 if (__sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1) == -1) {
3678 job_log(j, LOG_DEBUG, "Job is now clean. Killing.");
3679 job_kill(j);
3680 }
3681 j->dirty_at_shutdown = false;
3682 }
3683 }
3684
3685 void
3686 job_log_children_without_exec(job_t j)
3687 {
3688 pid_t *pids = NULL;
3689 size_t len = sizeof(pid_t) * get_kern_max_proc();
3690 int i = 0, kp_cnt = 0;
3691
3692 if (!do_apple_internal_logging || j->anonymous || j->per_user) {
3693 return;
3694 }
3695
3696 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3697 return;
3698 }
3699 if (!job_assumes(j, (kp_cnt = proc_listchildpids(j->p, pids, len)) != -1)) {
3700 goto out;
3701 }
3702
3703 for (i = 0; i < kp_cnt; i++) {
3704 struct proc_bsdshortinfo proc;
3705 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3706 if (errno != ESRCH) {
3707 job_assumes(j, errno == 0);
3708 }
3709 continue;
3710 }
3711 if (proc.pbsi_flags & P_EXEC) {
3712 continue;
3713 }
3714
3715 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
3716 }
3717
3718 out:
3719 free(pids);
3720 }
3721
3722 void
3723 job_cleanup_after_tracer(job_t j)
3724 {
3725 j->tracing_pid = 0;
3726 if (j->reap_after_trace) {
3727 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Reaping job now that attached tracer is gone.");
3728 struct kevent kev;
3729 EV_SET(&kev, j->p, 0, 0, NOTE_EXIT, 0, 0);
3730
3731 /* Fake a kevent to keep our logic consistent. */
3732 job_callback_proc(j, &kev);
3733
3734 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3735 * on the root job manager. To make our fakery complete, we will do garbage
3736 * collection at the beginning of the next run loop cycle (after we're done
3737 * draining the current queue of kevents).
3738 */
3739 (void)job_assumes(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr) != -1);
3740 }
3741 }
3742
3743 void
3744 job_callback_proc(job_t j, struct kevent *kev)
3745 {
3746 bool program_changed = false;
3747 int fflags = kev->fflags;
3748
3749 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
3750 log_kevent_struct(LOG_DEBUG, kev, 0);
3751
3752 if (fflags & NOTE_EXIT) {
3753 if (j->p == (pid_t)kev->ident && !j->anonymous) {
3754 /* Note that the third argument to proc_pidinfo() is a magic argument for
3755 * PROC_PIDT_SHORTBSDINFO. Specifically, passing 1 means "don't fail on a zombie
3756 * PID".
3757 */
3758 struct proc_bsdshortinfo proc;
3759 if (job_assumes(j, proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0)) {
3760 if (!job_assumes(j, (pid_t)proc.pbsi_ppid == getpid())) {
3761 /* Someone has attached to the process with ptrace(). There's a race here.
3762 * If we determine that we are not the parent process and then fail to attach
3763 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3764 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3765 * reparenting of the PID should be atomic to us, so in that case, we reap the
3766 * job as normal.
3767 *
3768 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3769 * would if a job died while we were sampling it at shutdown.
3770 *
3771 * Note that we foolishly assume that in the process *tree* a node cannot be its
3772 * own parent. Apparently, that is not correct. If this is the case, we forsake
3773 * the process to its own devices. Let it reap itself.
3774 */
3775 if (!job_assumes(j, proc.pbsi_ppid != kev->ident)) {
3776 job_log(j, LOG_WARNING, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3777 return;
3778 }
3779 if (job_assumes(j, kevent_mod(proc.pbsi_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j) != -1)) {
3780 j->tracing_pid = proc.pbsi_ppid;
3781 j->reap_after_trace = true;
3782 return;
3783 }
3784 }
3785 }
3786 } else if (!j->anonymous) {
3787 if (j->tracing_pid == (pid_t)kev->ident) {
3788 job_cleanup_after_tracer(j);
3789
3790 return;
3791 } else if (j->tracing_pid && !j->reap_after_trace) {
3792 /* The job exited before our sample completed. */
3793 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
3794 j->reap_after_trace = true;
3795 return;
3796 }
3797 }
3798 }
3799
3800 if (fflags & NOTE_EXEC) {
3801 program_changed = true;
3802
3803 if (j->anonymous) {
3804 struct proc_bsdshortinfo proc;
3805 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
3806 char newlabel[1000];
3807
3808 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
3809
3810 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3811 j->lastlookup = NULL;
3812 j->lastlookup_gennum = 0;
3813
3814 LIST_REMOVE(j, label_hash_sle);
3815 strcpy((char *)j->label, newlabel);
3816
3817 jobmgr_t where2put = root_jobmgr;
3818 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
3819 where2put = j->mgr;
3820 }
3821 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
3822 } else if (errno != ESRCH) {
3823 job_assumes(j, errno == 0);
3824 }
3825 } else {
3826 if (j->spawn_reply_port) {
3827 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3828 if (errno) {
3829 if (errno != MACH_SEND_INVALID_DEST) {
3830 (void)job_assumes(j, errno == KERN_SUCCESS);
3831 }
3832 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3833 }
3834
3835 j->spawn_reply_port = MACH_PORT_NULL;
3836 j->exit_status_port = MACH_PORT_NULL;
3837 }
3838
3839 if (j->xpc_service && j->did_exec) {
3840 j->xpcproxy_did_exec = true;
3841 }
3842
3843 j->did_exec = true;
3844 job_log(j, LOG_DEBUG, "Program changed");
3845 }
3846 }
3847
3848 if (fflags & NOTE_FORK) {
3849 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3850 job_log_children_without_exec(j);
3851 }
3852
3853 if (fflags & NOTE_EXIT) {
3854 job_reap(j);
3855
3856 if (j->anonymous) {
3857 job_remove(j);
3858 j = NULL;
3859 } else {
3860 j = job_dispatch(j, false);
3861 }
3862 }
3863 }
3864
3865 void
3866 job_callback_timer(job_t j, void *ident)
3867 {
3868 if (j == ident) {
3869 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
3870 job_dispatch(j, true);
3871 } else if (&j->semaphores == ident) {
3872 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
3873 job_dispatch(j, false);
3874 } else if (&j->start_interval == ident) {
3875 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
3876 j->start_pending = true;
3877 job_dispatch(j, false);
3878 } else if (&j->exit_timeout == ident) {
3879 if (!job_assumes(j, j->p != 0)) {
3880 return;
3881 }
3882
3883 if (j->sent_sigkill) {
3884 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
3885
3886 td /= NSEC_PER_SEC;
3887 td -= j->clean_kill ? 0 : j->exit_timeout;
3888
3889 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
3890 j->workaround9359725 = true;
3891
3892 if (g_trap_sigkill_bugs) {
3893 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3894 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3895 }
3896
3897 /* We've simulated the exit, so we have to cancel the kevent for
3898 * this job, otherwise we may get a kevent later down the road that
3899 * has a stale context pointer (if we've removed the job). Or worse,
3900 * it'll corrupt our data structures if the job still exists or the
3901 * allocation was recycled.
3902 *
3903 * If the failing process had a tracer attached to it, we need to
3904 * remove out NOTE_EXIT for that tracer too, otherwise the same
3905 * thing might happen.
3906 *
3907 * Note that, if we're not shutting down, this will result in a
3908 * zombie process just hanging around forever. But if the process
3909 * didn't exit after receiving SIGKILL, odds are it would've just
3910 * stuck around forever anyway.
3911 *
3912 * See <rdar://problem/9481630>.
3913 */
3914 kevent_mod((uintptr_t)j->p, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3915 if (j->tracing_pid) {
3916 kevent_mod((uintptr_t)j->tracing_pid, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
3917 }
3918
3919 struct kevent bogus_exit;
3920 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
3921 jobmgr_callback(j->mgr, &bogus_exit);
3922 } else {
3923 if (unlikely(j->debug_before_kill)) {
3924 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3925 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3926 }
3927
3928 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3929 job_kill(j);
3930 }
3931 } else {
3932 (void)job_assumes(j, false);
3933 }
3934 }
3935
3936 void
3937 job_callback_read(job_t j, int ident)
3938 {
3939 if (ident == j->log_redirect_fd) {
3940 job_log_stdouterr(j);
3941 } else if (ident == j->stdin_fd) {
3942 job_dispatch(j, true);
3943 } else {
3944 socketgroup_callback(j);
3945 }
3946 }
3947
3948 void
3949 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
3950 {
3951 jobmgr_t jmi;
3952 job_t j;
3953
3954 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3955 jobmgr_reap_bulk(jmi, kev);
3956 }
3957
3958 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
3959 kev->udata = j;
3960 job_callback(j, kev);
3961 }
3962 }
3963
3964 void
3965 jobmgr_callback(void *obj, struct kevent *kev)
3966 {
3967 jobmgr_t jm = obj;
3968 job_t ji;
3969
3970 switch (kev->filter) {
3971 case EVFILT_PROC:
3972 jobmgr_reap_bulk(jm, kev);
3973 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
3974 break;
3975 case EVFILT_SIGNAL:
3976 switch (kev->ident) {
3977 case SIGTERM:
3978 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
3979 return launchd_shutdown();
3980 case SIGUSR1:
3981 return calendarinterval_callback();
3982 case SIGUSR2:
3983 fake_shutdown_in_progress = true;
3984 runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
3985
3986 runtime_closelog(); /* HACK -- force 'start' time to be set */
3987
3988 if (pid1_magic) {
3989 int64_t now = runtime_get_wall_time();
3990
3991 jobmgr_log(jm, LOG_NOTICE, "Anticipatory shutdown began at: %lld.%06llu", now / USEC_PER_SEC, now % USEC_PER_SEC);
3992
3993 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
3994 if (ji->per_user && ji->p) {
3995 (void)job_assumes(ji, runtime_kill(ji->p, SIGUSR2) != -1);
3996 }
3997 }
3998 } else {
3999 jobmgr_log(jm, LOG_NOTICE, "Anticipatory per-user launchd shutdown");
4000 }
4001
4002 return;
4003 default:
4004 return (void)jobmgr_assumes(jm, false);
4005 }
4006 break;
4007 case EVFILT_FS:
4008 if (kev->fflags & VQ_MOUNT) {
4009 jobmgr_dispatch_all(jm, true);
4010 }
4011 jobmgr_dispatch_all_semaphores(jm);
4012 break;
4013 case EVFILT_TIMER:
4014 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
4015 calendarinterval_callback();
4016 } else if (kev->ident == (uintptr_t)jm) {
4017 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4018 jobmgr_still_alive_with_check(jm);
4019 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4020 jobmgr_do_garbage_collection(jm);
4021 } else if (kev->ident == (uintptr_t)&g_runtime_busy_time) {
4022 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4023 if (jobmgr_assumes(jm, runtime_busy_cnt == 0)) {
4024 return launchd_shutdown();
4025 }
4026 }
4027 break;
4028 case EVFILT_VNODE:
4029 if (kev->ident == (uintptr_t)s_no_hang_fd) {
4030 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4031 if (unlikely(_no_hang_fd != -1)) {
4032 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4033 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
4034 (void)jobmgr_assumes(root_jobmgr, runtime_close(s_no_hang_fd) != -1);
4035 s_no_hang_fd = _fd(_no_hang_fd);
4036 }
4037 } else if (pid1_magic && g_console && kev->ident == (uintptr_t)fileno(g_console)) {
4038 int cfd = -1;
4039 if (launchd_assumes((cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1)) {
4040 _fd(cfd);
4041 if (!launchd_assumes((g_console = fdopen(cfd, "w")) != NULL)) {
4042 close(cfd);
4043 }
4044 }
4045 }
4046 break;
4047 default:
4048 return (void)jobmgr_assumes(jm, false);
4049 }
4050 }
4051
4052 void
4053 job_callback(void *obj, struct kevent *kev)
4054 {
4055 job_t j = obj;
4056
4057 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4058
4059 switch (kev->filter) {
4060 case EVFILT_PROC:
4061 return job_callback_proc(j, kev);
4062 case EVFILT_TIMER:
4063 return job_callback_timer(j, (void *) kev->ident);
4064 case EVFILT_VNODE:
4065 return semaphoreitem_callback(j, kev);
4066 case EVFILT_READ:
4067 return job_callback_read(j, (int) kev->ident);
4068 case EVFILT_MACHPORT:
4069 return (void)job_dispatch(j, true);
4070 default:
4071 return (void)job_assumes(j, false);
4072 }
4073 }
4074
4075 void
4076 job_start(job_t j)
4077 {
4078 uint64_t td;
4079 int spair[2];
4080 int execspair[2];
4081 int oepair[2];
4082 char nbuf[64];
4083 pid_t c;
4084 bool sipc = false;
4085 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC;
4086
4087 if (!job_assumes(j, j->mgr != NULL)) {
4088 return;
4089 }
4090
4091 if (unlikely(job_active(j))) {
4092 job_log(j, LOG_DEBUG, "Already started");
4093 return;
4094 }
4095
4096 /*
4097 * Some users adjust the wall-clock and then expect software to not notice.
4098 * Therefore, launchd must use an absolute clock instead of the wall clock
4099 * wherever possible.
4100 */
4101 td = runtime_get_nanoseconds_since(j->start_time);
4102 td /= NSEC_PER_SEC;
4103
4104 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
4105 time_t respawn_delta = j->min_run_time - (uint32_t)td;
4106
4107 /*
4108 * We technically should ref-count throttled jobs to prevent idle exit,
4109 * but we're not directly tracking the 'throttled' state at the moment.
4110 */
4111 int level = LOG_WARNING;
4112 if (!j->did_exec && ((j->fail_cnt - 1) % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4113 level = LOG_DEBUG;
4114 }
4115
4116 job_log(j, level, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4117 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
4118 job_ignore(j);
4119 return;
4120 }
4121
4122 if (likely(!j->legacy_mach_job)) {
4123 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_special_privileges;
4124 }
4125
4126 if (sipc) {
4127 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
4128 }
4129
4130 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
4131
4132 if (likely(!j->legacy_mach_job) && job_assumes(j, pipe(oepair) != -1)) {
4133 j->log_redirect_fd = _fd(oepair[0]);
4134 (void)job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
4135 (void)job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
4136 }
4137
4138 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4139 case -1:
4140 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4141 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j) != -1);
4142 job_ignore(j);
4143
4144 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
4145 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
4146 if (sipc) {
4147 (void)job_assumes(j, runtime_close(spair[0]) == 0);
4148 (void)job_assumes(j, runtime_close(spair[1]) == 0);
4149 }
4150 if (likely(!j->legacy_mach_job)) {
4151 (void)job_assumes(j, runtime_close(oepair[0]) != -1);
4152 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4153 j->log_redirect_fd = 0;
4154 }
4155 break;
4156 case 0:
4157 if (unlikely(_vproc_post_fork_ping())) {
4158 _exit(EXIT_FAILURE);
4159 }
4160 if (!j->legacy_mach_job) {
4161 (void)job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
4162 (void)job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
4163 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4164 }
4165 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
4166 /* wait for our parent to say they've attached a kevent to us */
4167 read(_fd(execspair[1]), &c, sizeof(c));
4168
4169 if (sipc) {
4170 (void)job_assumes(j, runtime_close(spair[0]) == 0);
4171 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4172 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4173 }
4174 job_start_child(j);
4175 break;
4176 default:
4177 j->start_time = runtime_get_opaque_time();
4178
4179 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4180
4181 j->did_exec = false;
4182 j->xpcproxy_did_exec = false;
4183 j->checkedin = false;
4184 j->start_pending = false;
4185 j->reaped = false;
4186 j->crashed = false;
4187 j->stopped = false;
4188 if (j->needs_kickoff) {
4189 j->needs_kickoff = false;
4190
4191 if (SLIST_EMPTY(&j->semaphores)) {
4192 j->ondemand = false;
4193 }
4194 }
4195
4196 if (j->has_console) {
4197 g_wsp = c;
4198 }
4199
4200 runtime_add_ref();
4201 total_children++;
4202 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4203
4204 if (likely(!j->legacy_mach_job)) {
4205 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4206 }
4207 j->p = c;
4208
4209 j->mgr->normal_active_cnt++;
4210 j->fork_fd = _fd(execspair[0]);
4211 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
4212 if (sipc) {
4213 (void)job_assumes(j, runtime_close(spair[1]) == 0);
4214 ipc_open(_fd(spair[0]), j);
4215 }
4216 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
4217 job_ignore(j);
4218 } else {
4219 job_reap(j);
4220 }
4221
4222 j->wait4debugger_oneshot = false;
4223
4224 struct envitem *ei = NULL, *et = NULL;
4225 SLIST_FOREACH_SAFE(ei, &j->env, sle, et) {
4226 if (ei->one_shot) {
4227 SLIST_REMOVE(&j->env, ei, envitem, sle);
4228 }
4229 }
4230
4231 if (likely(!j->stall_before_exec)) {
4232 job_uncork_fork(j);
4233 }
4234 break;
4235 }
4236 }
4237
4238 void
4239 job_start_child(job_t j)
4240 {
4241 typeof(posix_spawn) *psf;
4242 const char *file2exec = "/usr/libexec/launchproxy";
4243 const char **argv;
4244 posix_spawnattr_t spattr;
4245 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4246 glob_t g;
4247 short spflags = POSIX_SPAWN_SETEXEC;
4248 size_t binpref_out_cnt = 0;
4249 size_t i;
4250
4251 (void)job_assumes(j, posix_spawnattr_init(&spattr) == 0);
4252
4253 job_setup_attributes(j);
4254
4255 if (unlikely(j->argv && j->globargv)) {
4256 g.gl_offs = 1;
4257 for (i = 0; i < j->argc; i++) {
4258 if (i > 0) {
4259 gflags |= GLOB_APPEND;
4260 }
4261 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4262 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4263 exit(EXIT_FAILURE);
4264 }
4265 }
4266 g.gl_pathv[0] = (char *)file2exec;
4267 argv = (const char **)g.gl_pathv;
4268 } else if (likely(j->argv)) {
4269 argv = alloca((j->argc + 2) * sizeof(char *));
4270 argv[0] = file2exec;
4271 for (i = 0; i < j->argc; i++) {
4272 argv[i + 1] = j->argv[i];
4273 }
4274 argv[i + 1] = NULL;
4275 } else {
4276 argv = alloca(3 * sizeof(char *));
4277 argv[0] = file2exec;
4278 argv[1] = j->prog;
4279 argv[2] = NULL;
4280 }
4281
4282 if (likely(!j->inetcompat)) {
4283 argv++;
4284 }
4285
4286 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4287 if (!j->legacy_LS_job) {
4288 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4289 }
4290 spflags |= POSIX_SPAWN_START_SUSPENDED;
4291 }
4292
4293 if (unlikely(j->disable_aslr)) {
4294 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4295 }
4296 spflags |= j->pstype;
4297
4298 (void)job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
4299
4300 if (unlikely(j->j_binpref_cnt)) {
4301 (void)job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
4302 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4303 }
4304
4305 #if HAVE_QUARANTINE
4306 if (j->quarantine_data) {
4307 qtn_proc_t qp;
4308
4309 if (job_assumes(j, qp = qtn_proc_alloc())) {
4310 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4311 (void)job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
4312 }
4313 }
4314 }
4315 #endif
4316
4317 #if HAVE_SANDBOX
4318 if (j->seatbelt_profile) {
4319 char *seatbelt_err_buf = NULL;
4320
4321 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
4322 if (seatbelt_err_buf) {
4323 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4324 }
4325 goto out_bad;
4326 }
4327 }
4328 #endif
4329
4330 psf = j->prog ? posix_spawn : posix_spawnp;
4331
4332 if (likely(!j->inetcompat)) {
4333 file2exec = j->prog ? j->prog : argv[0];
4334 }
4335
4336 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4337 if (errno != EBADARCH) {
4338 int level = LOG_ERR;
4339 if ((j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4340 level = LOG_DEBUG;
4341 }
4342 job_log_error(j, level, "posix_spawn(\"%s\", ...)", file2exec);
4343 errno = EXIT_FAILURE;
4344 }
4345
4346 #if HAVE_SANDBOX
4347 out_bad:
4348 #endif
4349 _exit(errno);
4350 }
4351
4352 void
4353 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4354 {
4355 launch_data_t tmp;
4356 struct envitem *ei;
4357 job_t ji;
4358
4359 if (jm->parentmgr) {
4360 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4361 } else {
4362 char **tmpenviron = environ;
4363 for (; *tmpenviron; tmpenviron++) {
4364 char envkey[1024];
4365 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4366 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4367 strncpy(envkey, *tmpenviron, sizeof(envkey));
4368 *(strchr(envkey, '=')) = '\0';
4369 launch_data_dict_insert(dict, s, envkey);
4370 }
4371 }
4372
4373 LIST_FOREACH(ji, &jm->jobs, sle) {
4374 SLIST_FOREACH(ei, &ji->global_env, sle) {
4375 if ((tmp = launch_data_new_string(ei->value))) {
4376 launch_data_dict_insert(dict, tmp, ei->key);
4377 }
4378 }
4379 }
4380 }
4381
4382 void
4383 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4384 {
4385 struct envitem *ei;
4386 job_t ji;
4387
4388 if (jm->parentmgr) {
4389 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4390 }
4391
4392 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4393 SLIST_FOREACH(ei, &ji->global_env, sle) {
4394 setenv(ei->key, ei->value, 1);
4395 }
4396 }
4397 }
4398
4399 void
4400 job_log_pids_with_weird_uids(job_t j)
4401 {
4402 size_t len = sizeof(pid_t) * get_kern_max_proc();
4403 pid_t *pids = NULL;
4404 uid_t u = j->mach_uid;
4405 int i = 0, kp_cnt = 0;
4406
4407 if (!do_apple_internal_logging) {
4408 return;
4409 }
4410
4411 pids = malloc(len);
4412 if (!job_assumes(j, pids != NULL)) {
4413 return;
4414 }
4415
4416 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4417
4418 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4419 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4420 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4421 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4422 * struct back in a single call for each one.
4423 *
4424 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4425 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4426 * libproc could go stale before we call proc_pidinfo().
4427 *
4428 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4429 * of bytes written to the buffer.
4430 */
4431 if (!job_assumes(j, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
4432 goto out;
4433 }
4434
4435 for (i = 0; i < kp_cnt; i++) {
4436 struct proc_bsdshortinfo proc;
4437 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4438 * detailed above.
4439 */
4440 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4441 if (errno != ESRCH) {
4442 job_assumes(j, errno == 0);
4443 }
4444 continue;
4445 }
4446
4447 uid_t i_euid = proc.pbsi_uid;
4448 uid_t i_uid = proc.pbsi_ruid;
4449 uid_t i_svuid = proc.pbsi_svuid;
4450 pid_t i_pid = pids[i];
4451
4452 if (i_euid != u && i_uid != u && i_svuid != u) {
4453 continue;
4454 }
4455
4456 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4457
4458 /* Temporarily disabled due to 5423935 and 4946119. */
4459 #if 0
4460 /* Ask the accountless process to exit. */
4461 (void)job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
4462 #endif
4463 }
4464
4465 out:
4466 free(pids);
4467 }
4468
4469 static struct passwd *
4470 job_getpwnam(job_t j, const char *name)
4471 {
4472 /*
4473 * methodology for system daemons
4474 *
4475 * first lookup user record without any opendirectoryd interaction,
4476 * we don't know what interprocess dependencies might be in flight.
4477 * if that fails, we re-enable opendirectoryd interaction and
4478 * re-issue the lookup. We have to disable the libinfo L1 cache
4479 * otherwise libinfo will return the negative cache entry on the retry
4480 */
4481
4482 #if !TARGET_OS_EMBEDDED
4483 struct passwd *pw = NULL;
4484
4485 if (pid1_magic && j->mgr == root_jobmgr) {
4486 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4487 gL1CacheEnabled = false;
4488
4489 pw = getpwnam(name);
4490
4491 si_search_module_set_flags("ds", 0);
4492 }
4493
4494 if (pw == NULL) {
4495 pw = getpwnam(name);
4496 }
4497
4498 return pw;
4499 #else
4500 return getpwnam(name);
4501 #endif
4502 }
4503
4504 static struct group *
4505 job_getgrnam(job_t j, const char *name)
4506 {
4507 #if !TARGET_OS_EMBEDDED
4508 struct group *gr = NULL;
4509
4510 if (pid1_magic && j->mgr == root_jobmgr) {
4511 si_search_module_set_flags("ds", 1 /* SEARCH_MODULE_FLAG_DISABLED */);
4512 gL1CacheEnabled = false;
4513
4514 gr = getgrnam(name);
4515
4516 si_search_module_set_flags("ds", 0);
4517 }
4518
4519 if (gr == NULL) {
4520 gr = getgrnam(name);
4521 }
4522
4523 return gr;
4524 #else
4525 #pragma unused (j)
4526 return getgrnam(name);
4527 #endif
4528 }
4529
4530 void
4531 job_postfork_test_user(job_t j)
4532 {
4533 /* This function is all about 5201578 */
4534
4535 const char *home_env_var = getenv("HOME");
4536 const char *user_env_var = getenv("USER");
4537 const char *logname_env_var = getenv("LOGNAME");
4538 uid_t tmp_uid, local_uid = getuid();
4539 gid_t tmp_gid, local_gid = getgid();
4540 char shellpath[PATH_MAX];
4541 char homedir[PATH_MAX];
4542 char loginname[2000];
4543 struct passwd *pwe;
4544
4545
4546 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4547 && strcmp(user_env_var, logname_env_var) == 0)) {
4548 goto out_bad;
4549 }
4550
4551 if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
4552 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4553 goto out_bad;
4554 }
4555
4556 /*
4557 * We must copy the results of getpw*().
4558 *
4559 * Why? Because subsequent API calls may call getpw*() as a part of
4560 * their implementation. Since getpw*() returns a [now thread scoped]
4561 * global, we must therefore cache the results before continuing.
4562 */
4563
4564 tmp_uid = pwe->pw_uid;
4565 tmp_gid = pwe->pw_gid;
4566
4567 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4568 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4569 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4570
4571 if (strcmp(loginname, logname_env_var) != 0) {
4572 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4573 goto out_bad;
4574 }
4575 if (strcmp(homedir, home_env_var) != 0) {
4576 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4577 goto out_bad;
4578 }
4579 if (local_uid != tmp_uid) {
4580 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4581 'U', tmp_uid, local_uid);
4582 goto out_bad;
4583 }
4584 if (local_gid != tmp_gid) {
4585 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4586 'G', tmp_gid, local_gid);
4587 goto out_bad;
4588 }
4589
4590 return;
4591 out_bad:
4592 #if 0
4593 (void)job_assumes(j, runtime_kill(getppid(), SIGTERM) != -1);
4594 _exit(EXIT_FAILURE);
4595 #else
4596 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
4597 #endif
4598 }
4599
4600 void
4601 job_postfork_become_user(job_t j)
4602 {
4603 char loginname[2000];
4604 char tmpdirpath[PATH_MAX];
4605 char shellpath[PATH_MAX];
4606 char homedir[PATH_MAX];
4607 struct passwd *pwe;
4608 size_t r;
4609 gid_t desired_gid = -1;
4610 uid_t desired_uid = -1;
4611
4612 if (getuid() != 0) {
4613 return job_postfork_test_user(j);
4614 }
4615
4616 /*
4617 * I contend that having UID == 0 and GID != 0 is of dubious value.
4618 * Nevertheless, this used to work in Tiger. See: 5425348
4619 */
4620 if (j->groupname && !j->username) {
4621 j->username = "root";
4622 }
4623
4624 if (j->username) {
4625 if ((pwe = job_getpwnam(j, j->username)) == NULL) {
4626 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
4627 _exit(EXIT_FAILURE);
4628 }
4629 } else if (j->mach_uid) {
4630 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4631 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
4632 job_log_pids_with_weird_uids(j);
4633 _exit(EXIT_FAILURE);
4634 }
4635 } else {
4636 return;
4637 }
4638
4639 /*
4640 * We must copy the results of getpw*().
4641 *
4642 * Why? Because subsequent API calls may call getpw*() as a part of
4643 * their implementation. Since getpw*() returns a [now thread scoped]
4644 * global, we must therefore cache the results before continuing.
4645 */
4646
4647 desired_uid = pwe->pw_uid;
4648 desired_gid = pwe->pw_gid;
4649
4650 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4651 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4652 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4653
4654 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
4655 job_log(j, LOG_ERR, "Expired account");
4656 _exit(EXIT_FAILURE);
4657 }
4658
4659
4660 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
4661 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
4662 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
4663 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
4664 }
4665
4666 if (j->groupname) {
4667 struct group *gre;
4668
4669 if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
4670 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
4671 _exit(EXIT_FAILURE);
4672 }
4673
4674 desired_gid = gre->gr_gid;
4675 }
4676
4677 if (!job_assumes(j, setlogin(loginname) != -1)) {
4678 _exit(EXIT_FAILURE);
4679 }
4680
4681 if (!job_assumes(j, setgid(desired_gid) != -1)) {
4682 _exit(EXIT_FAILURE);
4683 }
4684
4685 /*
4686 * The kernel team and the DirectoryServices team want initgroups()
4687 * called after setgid(). See 4616864 for more information.
4688 */
4689
4690 if (likely(!j->no_init_groups)) {
4691 #if 1
4692 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
4693 _exit(EXIT_FAILURE);
4694 }
4695 #else
4696 /* Do our own little initgroups(). We do this to guarantee that we're
4697 * always opted into dynamic group resolution in the kernel. initgroups(3)
4698 * does not make this guarantee.
4699 */
4700 int groups[NGROUPS], ngroups;
4701
4702 /* A failure here isn't fatal, and we'll still get data we can use. */
4703 (void)job_assumes(j, getgrouplist(j->username, desired_gid, groups, &ngroups) != -1);
4704
4705 if (!job_assumes(j, syscall(SYS_initgroups, ngroups, groups, desired_uid) != -1)) {
4706 _exit(EXIT_FAILURE);
4707 }
4708 #endif
4709 }
4710
4711 if (!job_assumes(j, setuid(desired_uid) != -1)) {
4712 _exit(EXIT_FAILURE);
4713 }
4714
4715 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4716
4717 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
4718 setenv("TMPDIR", tmpdirpath, 0);
4719 }
4720
4721 setenv("SHELL", shellpath, 0);
4722 setenv("HOME", homedir, 0);
4723 setenv("USER", loginname, 0);
4724 setenv("LOGNAME", loginname, 0);
4725 }
4726
4727 void
4728 job_setup_attributes(job_t j)
4729 {
4730 struct limititem *li;
4731 struct envitem *ei;
4732
4733 if (unlikely(j->setnice)) {
4734 (void)job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
4735 }
4736
4737 SLIST_FOREACH(li, &j->limits, sle) {
4738 struct rlimit rl;
4739
4740 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
4741 continue;
4742 }
4743
4744 if (li->sethard) {
4745 rl.rlim_max = li->lim.rlim_max;
4746 }
4747 if (li->setsoft) {
4748 rl.rlim_cur = li->lim.rlim_cur;
4749 }
4750
4751 if (setrlimit(li->which, &rl) == -1) {
4752 job_log_error(j, LOG_WARNING, "setrlimit()");
4753 }
4754 }
4755
4756 if (unlikely(!j->inetcompat && j->session_create)) {
4757 launchd_SessionCreate();
4758 }
4759
4760 if (unlikely(j->low_pri_io)) {
4761 (void)job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
4762 }
4763 if (unlikely(j->rootdir)) {
4764 (void)job_assumes(j, chroot(j->rootdir) != -1);
4765 (void)job_assumes(j, chdir(".") != -1);
4766 }
4767
4768 job_postfork_become_user(j);
4769
4770 if (unlikely(j->workingdir)) {
4771 (void)job_assumes(j, chdir(j->workingdir) != -1);
4772 }
4773
4774 if (unlikely(j->setmask)) {
4775 umask(j->mask);
4776 }
4777
4778 if (j->stdin_fd) {
4779 (void)job_assumes(j, dup2(j->stdin_fd, STDIN_FILENO) != -1);
4780 } else {
4781 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4782 }
4783 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4784 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
4785
4786 jobmgr_setup_env_from_other_jobs(j->mgr);
4787
4788 SLIST_FOREACH(ei, &j->env, sle) {
4789 setenv(ei->key, ei->value, 1);
4790 }
4791
4792 if (do_apple_internal_logging) {
4793 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING, "true", 1);
4794 }
4795
4796 #if !TARGET_OS_EMBEDDED
4797 if (j->jetsam_properties) {
4798 (void)job_assumes(j, proc_setpcontrol(PROC_SETPC_TERMINATE) == 0);
4799 }
4800 #endif
4801
4802 #if TARGET_OS_EMBEDDED
4803 if (j->main_thread_priority != 0) {
4804 struct sched_param params;
4805 bzero(&params, sizeof(params));
4806 params.sched_priority = j->main_thread_priority;
4807 (void)job_assumes(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params) != -1);
4808 }
4809 #endif
4810
4811 /*
4812 * We'd like to call setsid() unconditionally, but we have reason to
4813 * believe that prevents launchd from being able to send signals to
4814 * setuid children. We'll settle for process-groups.
4815 */
4816 if (getppid() != 1) {
4817 (void)job_assumes(j, setpgid(0, 0) != -1);
4818 } else {
4819 (void)job_assumes(j, setsid() != -1);
4820 }
4821 }
4822
4823 void
4824 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4825 {
4826 int fd;
4827
4828 if (!path) {
4829 return;
4830 }
4831
4832 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4833 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4834 return;
4835 }
4836
4837 (void)job_assumes(j, dup2(fd, target_fd) != -1);
4838 (void)job_assumes(j, runtime_close(fd) == 0);
4839 }
4840
4841 int
4842 dir_has_files(job_t j, const char *path)
4843 {
4844 DIR *dd = opendir(path);
4845 struct dirent *de;
4846 bool r = 0;
4847
4848 if (unlikely(!dd)) {
4849 return -1;
4850 }
4851
4852 while ((de = readdir(dd))) {
4853 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
4854 r = 1;
4855 break;
4856 }
4857 }
4858
4859 (void)job_assumes(j, closedir(dd) == 0);
4860 return r;
4861 }
4862
4863 void
4864 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
4865 {
4866 struct calendarinterval *ci_iter, *ci_prev = NULL;
4867 time_t later, head_later;
4868
4869 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4870
4871 if (ci->when.tm_wday != -1) {
4872 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4873
4874 if (ci->when.tm_mday == -1) {
4875 later = otherlater;
4876 } else {
4877 later = later < otherlater ? later : otherlater;
4878 }
4879 }
4880
4881 ci->when_next = later;
4882
4883 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4884 if (ci->when_next < ci_iter->when_next) {
4885 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4886 break;
4887 }
4888
4889 ci_prev = ci_iter;
4890 }
4891
4892 if (ci_iter == NULL) {
4893 /* ci must want to fire after every other timer, or there are no timers */
4894
4895 if (LIST_EMPTY(&sorted_calendar_events)) {
4896 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
4897 } else {
4898 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
4899 }
4900 }
4901
4902 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
4903
4904 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
4905 char time_string[100];
4906 size_t time_string_len;
4907
4908 ctime_r(&later, time_string);
4909 time_string_len = strlen(time_string);
4910
4911 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
4912 time_string[time_string_len - 1] = '\0';
4913 }
4914
4915 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4916 }
4917 }
4918
4919 void
4920 extract_rcsid_substr(const char *i, char *o, size_t osz)
4921 {
4922 char *rcs_rev_tmp = strchr(i, ' ');
4923
4924 if (!rcs_rev_tmp) {
4925 strlcpy(o, i, osz);
4926 } else {
4927 strlcpy(o, rcs_rev_tmp + 1, osz);
4928 rcs_rev_tmp = strchr(o, ' ');
4929 if (rcs_rev_tmp) {
4930 *rcs_rev_tmp = '\0';
4931 }
4932 }
4933 }
4934
4935 void
4936 jobmgr_log_bug(jobmgr_t jm, unsigned int line)
4937 {
4938 static const char *file;
4939 int saved_errno = errno;
4940 char buf[100];
4941
4942 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4943
4944 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4945
4946 if (!file) {
4947 file = strrchr(__FILE__, '/');
4948 if (!file) {
4949 file = __FILE__;
4950 } else {
4951 file += 1;
4952 }
4953 }
4954
4955 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4956 if (likely(jm)) {
4957 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4958 } else {
4959 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4960 }
4961 }
4962
4963 void
4964 job_log_bug(job_t j, unsigned int line)
4965 {
4966 static const char *file;
4967 int saved_errno = errno;
4968 char buf[100];
4969
4970 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4971
4972 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4973
4974 if (!file) {
4975 file = strrchr(__FILE__, '/');
4976 if (!file) {
4977 file = __FILE__;
4978 } else {
4979 file += 1;
4980 }
4981 }
4982
4983 if (likely(j)) {
4984 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4985 } else {
4986 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4987 }
4988 }
4989
4990 void
4991 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
4992 {
4993 const char *label2use = j ? j->label : "com.apple.launchd.NULL";
4994 const char *mgr2use = j ? j->mgr->name : "NULL";
4995 struct runtime_syslog_attr attr = { g_my_label, label2use, mgr2use, pri, getuid(), getpid(), j ? j->p : 0 };
4996 char *newmsg;
4997 int oldmask = 0;
4998 size_t newmsgsz;
4999
5000 /*
5001 * Hack: If bootstrap_port is set, we must be on the child side of a
5002 * fork(), but before the exec*(). Let's route the log message back to
5003 * launchd proper.
5004 */
5005 if (bootstrap_port) {
5006 return _vproc_logv(pri, err, msg, ap);
5007 }
5008
5009 newmsgsz = strlen(msg) + 200;
5010 newmsg = alloca(newmsgsz);
5011
5012 if (err) {
5013 #if !TARGET_OS_EMBEDDED
5014 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
5015 #else
5016 snprintf(newmsg, newmsgsz, "(%s) %s: %s", label2use, msg, strerror(err));
5017 #endif
5018 } else {
5019 #if !TARGET_OS_EMBEDDED
5020 snprintf(newmsg, newmsgsz, "%s", msg);
5021 #else
5022 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
5023 #endif
5024 }
5025
5026 if (j && unlikely(j->debug)) {
5027 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5028 }
5029
5030 runtime_vsyslog(&attr, newmsg, ap);
5031
5032 if (j && unlikely(j->debug)) {
5033 setlogmask(oldmask);
5034 }
5035 }
5036
5037 void
5038 job_log_error(job_t j, int pri, const char *msg, ...)
5039 {
5040 va_list ap;
5041
5042 va_start(ap, msg);
5043 job_logv(j, pri, errno, msg, ap);
5044 va_end(ap);
5045 }
5046
5047 void
5048 job_log(job_t j, int pri, const char *msg, ...)
5049 {
5050 va_list ap;
5051
5052 va_start(ap, msg);
5053 job_logv(j, pri, 0, msg, ap);
5054 va_end(ap);
5055 }
5056
5057 #if 0
5058 void
5059 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5060 {
5061 va_list ap;
5062
5063 va_start(ap, msg);
5064 jobmgr_logv(jm, pri, errno, msg, ap);
5065 va_end(ap);
5066 }
5067 #endif
5068
5069 void
5070 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5071 {
5072 va_list ap;
5073
5074 va_start(ap, msg);
5075 jobmgr_logv(jm, pri, 0, msg, ap);
5076 va_end(ap);
5077 }
5078
5079 void
5080 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5081 {
5082 char *newmsg;
5083 char *newname;
5084 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5085
5086 newname = alloca((jmname_len + 1) * 2);
5087 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5088 newmsg = alloca(newmsgsz);
5089
5090 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5091 if (jm->name[i] == '%') {
5092 newname[o] = '%';
5093 o++;
5094 }
5095 newname[o] = jm->name[i];
5096 }
5097 newname[o] = '\0';
5098
5099 if (err) {
5100 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5101 } else {
5102 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5103 }
5104
5105 if (jm->parentmgr) {
5106 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5107 } else {
5108 struct runtime_syslog_attr attr = { g_my_label, g_my_label, jm->name, pri, getuid(), getpid(), getpid() };
5109
5110 runtime_vsyslog(&attr, newmsg, ap);
5111 }
5112 }
5113
5114 void
5115 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
5116 {
5117 if (si->fd != -1) {
5118 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
5119 (void)job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
5120 }
5121 }
5122
5123 void
5124 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
5125 {
5126 char *parentdir, tmp_path[PATH_MAX];
5127 int saved_errno = 0;
5128 int fflags = NOTE_DELETE|NOTE_RENAME;
5129
5130 switch (si->why) {
5131 case DIR_NOT_EMPTY:
5132 case PATH_CHANGES:
5133 fflags |= NOTE_ATTRIB|NOTE_LINK;
5134 /* fall through */
5135 case PATH_EXISTS:
5136 fflags |= NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
5137 /* fall through */
5138 case PATH_MISSING:
5139 break;
5140 default:
5141 return;
5142 }
5143
5144 /* dirname() may modify tmp_path */
5145 strlcpy(tmp_path, si->what, sizeof(tmp_path));
5146
5147 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
5148 return;
5149 }
5150
5151 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
5152 do {
5153 if (si->fd == -1) {
5154 struct stat sb;
5155 if (stat(si->what, &sb) == 0) {
5156 /* If we're watching a character or block device, only watch the parent directory.
5157 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
5158 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
5159 * open(2)s the file (like a character device that waits for a carrier signal) or
5160 * (b) preventing other processes from obtaining an exclusive lock on the file, even
5161 * though we're opening it with O_EVTONLY.
5162 *
5163 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
5164 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
5165 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
5166 * each time the parent changes to see if it appeared or disappeared.
5167 */
5168 if (S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode)) {
5169 si->fd = _fd(open(si->what, O_EVTONLY | O_NOCTTY | O_NONBLOCK));
5170 }
5171 }
5172
5173 if (si->fd == -1) {
5174 si->watching_parent = job_assumes(j, (si->fd = _fd(open(parentdir, O_EVTONLY | O_NOCTTY | O_NONBLOCK))) != -1);
5175 } else {
5176 si->watching_parent = false;
5177 }
5178 }
5179
5180 if (si->fd == -1) {
5181 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", si->what);
5182 }
5183
5184 job_log(j, LOG_DEBUG, "Watching %svnode (%s): %d", si->watching_parent ? "parent ": "", si->what, si->fd);
5185
5186 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
5187 saved_errno = errno;
5188 /*
5189 * The FD can be revoked between the open() and kevent().
5190 * This is similar to the inability for kevents to be
5191 * attached to short lived zombie processes after fork()
5192 * but before kevent().
5193 */
5194 (void)job_assumes(j, runtime_close(si->fd) == 0);
5195 si->fd = -1;
5196 }
5197 } while (unlikely((si->fd == -1) && (saved_errno == ENOENT)));
5198
5199 if (saved_errno == ENOTSUP) {
5200 /*
5201 * 3524219 NFS needs kqueue support
5202 * 4124079 VFS needs generic kqueue support
5203 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
5204 */
5205 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
5206
5207 if (!j->poll_for_vfs_changes) {
5208 j->poll_for_vfs_changes = true;
5209 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
5210 }
5211 }
5212 }
5213
5214 void
5215 semaphoreitem_callback(job_t j, struct kevent *kev)
5216 {
5217 char invalidation_reason[100] = "";
5218 struct semaphoreitem *si;
5219
5220 SLIST_FOREACH(si, &j->semaphores, sle) {
5221 switch (si->why) {
5222 case PATH_CHANGES:
5223 case PATH_EXISTS:
5224 case PATH_MISSING:
5225 case DIR_NOT_EMPTY:
5226 job_log(j, LOG_DEBUG, "P%s changed (%u): %s", si->watching_parent ? "arent path" : "ath", si->why, si->what);
5227 break;
5228 default:
5229 continue;
5230 }
5231
5232 if (si->fd == (int)kev->ident) {
5233 break;
5234 }
5235 }
5236
5237 if (!job_assumes(j, si != NULL)) {
5238 return;
5239 }
5240
5241 if (NOTE_DELETE & kev->fflags) {
5242 strcat(invalidation_reason, "deleted");
5243 }
5244
5245 if (NOTE_RENAME & kev->fflags) {
5246 if (invalidation_reason[0]) {
5247 strcat(invalidation_reason, "/renamed");
5248 } else {
5249 strcat(invalidation_reason, "renamed");
5250 }
5251 }
5252
5253 if (NOTE_REVOKE & kev->fflags) {
5254 if (invalidation_reason[0]) {
5255 strcat(invalidation_reason, "/revoked");
5256 } else {
5257 strcat(invalidation_reason, "revoked");
5258 }
5259 }
5260
5261 if (invalidation_reason[0]) {
5262 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
5263 (void)job_assumes(j, runtime_close(si->fd) == 0);
5264 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5265 }
5266
5267 if (!si->watching_parent) {
5268 if (si->why == PATH_CHANGES) {
5269 j->start_pending = true;
5270 } else {
5271 semaphoreitem_watch(j, si);
5272 }
5273 } else { /* Something happened to the parent directory. See if our target file appeared. */
5274 if (!invalidation_reason[0]) {
5275 (void)job_assumes(j, runtime_close(si->fd) == 0);
5276 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5277 semaphoreitem_watch(j, si);
5278 }
5279 /* Need to think about what should happen if the parent directory goes invalid. */
5280 }
5281
5282 job_dispatch(j, false);
5283 }
5284
5285 struct cal_dict_walk {
5286 job_t j;
5287 struct tm tmptm;
5288 };
5289
5290 void
5291 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5292 {
5293 struct cal_dict_walk *cdw = context;
5294 struct tm *tmptm = &cdw->tmptm;
5295 job_t j = cdw->j;
5296 int64_t val;
5297
5298 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5299 /* hack to let caller know something went wrong */
5300 tmptm->tm_sec = -1;
5301 return;
5302 }
5303
5304 val = launch_data_get_integer(obj);
5305
5306 if (val < 0) {
5307 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5308 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5309 if (val > 59) {
5310 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5311 tmptm->tm_sec = -1;
5312 } else {
5313 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5314 }
5315 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5316 if (val > 23) {
5317 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5318 tmptm->tm_sec = -1;
5319 } else {
5320 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5321 }
5322 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5323 if (val < 1 || val > 31) {
5324 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5325 tmptm->tm_sec = -1;
5326 } else {
5327 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5328 }
5329 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5330 if (val > 7) {
5331 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5332 tmptm->tm_sec = -1;
5333 } else {
5334 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5335 }
5336 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5337 if (val > 12) {
5338 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5339 tmptm->tm_sec = -1;
5340 } else {
5341 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5342 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
5343 }
5344 }
5345 }
5346
5347 bool
5348 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5349 {
5350 struct cal_dict_walk cdw;
5351
5352 cdw.j = j;
5353 memset(&cdw.tmptm, 0, sizeof(0));
5354
5355 cdw.tmptm.tm_min = -1;
5356 cdw.tmptm.tm_hour = -1;
5357 cdw.tmptm.tm_mday = -1;
5358 cdw.tmptm.tm_wday = -1;
5359 cdw.tmptm.tm_mon = -1;
5360
5361 if (!job_assumes(j, obj != NULL)) {
5362 return false;
5363 }
5364
5365 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5366 return false;
5367 }
5368
5369 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5370
5371 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5372 return false;
5373 }
5374
5375 return calendarinterval_new(j, &cdw.tmptm);
5376 }
5377
5378 bool
5379 calendarinterval_new(job_t j, struct tm *w)
5380 {
5381 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5382
5383 if (!job_assumes(j, ci != NULL)) {
5384 return false;
5385 }
5386
5387 ci->when = *w;
5388 ci->job = j;
5389
5390 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5391
5392 calendarinterval_setalarm(j, ci);
5393
5394 runtime_add_weak_ref();
5395
5396 return true;
5397 }
5398
5399 void
5400 calendarinterval_delete(job_t j, struct calendarinterval *ci)
5401 {
5402 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5403 LIST_REMOVE(ci, global_sle);
5404
5405 free(ci);
5406
5407 runtime_del_weak_ref();
5408 }
5409
5410 void
5411 calendarinterval_sanity_check(void)
5412 {
5413 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5414 time_t now = time(NULL);
5415
5416 if (unlikely(ci && (ci->when_next < now))) {
5417 (void)jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
5418 }
5419 }
5420
5421 void
5422 calendarinterval_callback(void)
5423 {
5424 struct calendarinterval *ci, *ci_next;
5425 time_t now = time(NULL);
5426
5427 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5428 job_t j = ci->job;
5429
5430 if (ci->when_next > now) {
5431 break;
5432 }
5433
5434 LIST_REMOVE(ci, global_sle);
5435 calendarinterval_setalarm(j, ci);
5436
5437 j->start_pending = true;
5438 job_dispatch(j, false);
5439 }
5440 }
5441
5442 bool
5443 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds)
5444 {
5445 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5446
5447 if (!job_assumes(j, sg != NULL)) {
5448 return false;
5449 }
5450
5451 sg->fds = calloc(1, fd_cnt * sizeof(int));
5452 sg->fd_cnt = fd_cnt;
5453 sg->junkfds = junkfds;
5454
5455 if (!job_assumes(j, sg->fds != NULL)) {
5456 free(sg);
5457 return false;
5458 }
5459
5460 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5461 strcpy(sg->name_init, name);
5462
5463 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5464
5465 runtime_add_weak_ref();
5466
5467 return true;
5468 }
5469
5470 void
5471 socketgroup_delete(job_t j, struct socketgroup *sg)
5472 {
5473 unsigned int i;
5474
5475 for (i = 0; i < sg->fd_cnt; i++) {
5476 #if 0
5477 struct sockaddr_storage ss;
5478 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5479 socklen_t ss_len = sizeof(ss);
5480
5481 /* 5480306 */
5482 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5483 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5484 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5485 /* We might conditionally need to delete a directory here */
5486 }
5487 #endif
5488 (void)job_assumes(j, runtime_close(sg->fds[i]) != -1);
5489 }
5490
5491 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5492
5493 free(sg->fds);
5494 free(sg);
5495
5496 runtime_del_weak_ref();
5497 }
5498
5499 void
5500 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5501 {
5502 struct kevent kev[sg->fd_cnt];
5503 char buf[10000];
5504 unsigned int i, buf_off = 0;
5505
5506 if (unlikely(sg->junkfds)) {
5507 return;
5508 }
5509
5510 for (i = 0; i < sg->fd_cnt; i++) {
5511 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5512 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5513 }
5514
5515 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5516
5517 (void)job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
5518
5519 for (i = 0; i < sg->fd_cnt; i++) {
5520 (void)job_assumes(j, kev[i].flags & EV_ERROR);
5521 errno = (typeof(errno)) kev[i].data;
5522 (void)job_assumes(j, kev[i].data == 0);
5523 }
5524 }
5525
5526 void
5527 socketgroup_ignore(job_t j, struct socketgroup *sg)
5528 {
5529 socketgroup_kevent_mod(j, sg, false);
5530 }
5531
5532 void
5533 socketgroup_watch(job_t j, struct socketgroup *sg)
5534 {
5535 socketgroup_kevent_mod(j, sg, true);
5536 }
5537
5538 void
5539 socketgroup_callback(job_t j)
5540 {
5541 job_dispatch(j, true);
5542 }
5543
5544 bool
5545 envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot)
5546 {
5547 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5548
5549 if (!job_assumes(j, ei != NULL)) {
5550 return false;
5551 }
5552
5553 strcpy(ei->key_init, k);
5554 ei->value = ei->key_init + strlen(k) + 1;
5555 strcpy(ei->value, v);
5556 ei->one_shot = one_shot;
5557
5558 if (global) {
5559 if (SLIST_EMPTY(&j->global_env)) {
5560 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5561 }
5562 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5563 } else {
5564 SLIST_INSERT_HEAD(&j->env, ei, sle);
5565 }
5566
5567 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5568
5569 return true;
5570 }
5571
5572 void
5573 envitem_delete(job_t j, struct envitem *ei, bool global)
5574 {
5575 if (global) {
5576 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5577 if (SLIST_EMPTY(&j->global_env)) {
5578 LIST_REMOVE(j, global_env_sle);
5579 }
5580 } else {
5581 SLIST_REMOVE(&j->env, ei, envitem, sle);
5582 }
5583
5584 free(ei);
5585 }
5586
5587 void
5588 envitem_setup(launch_data_t obj, const char *key, void *context)
5589 {
5590 job_t j = context;
5591
5592 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5593 return;
5594 }
5595
5596 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5597 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, false);
5598 } else {
5599 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5600 }
5601 }
5602
5603 void
5604 envitem_setup_one_shot(launch_data_t obj, const char *key, void *context)
5605 {
5606 job_t j = context;
5607
5608 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5609 return;
5610 }
5611
5612 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5613 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, true);
5614 } else {
5615 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5616 }
5617 }
5618
5619 bool
5620 limititem_update(job_t j, int w, rlim_t r)
5621 {
5622 struct limititem *li;
5623
5624 SLIST_FOREACH(li, &j->limits, sle) {
5625 if (li->which == w) {
5626 break;
5627 }
5628 }
5629
5630 if (li == NULL) {
5631 li = calloc(1, sizeof(struct limititem));
5632
5633 if (!job_assumes(j, li != NULL)) {
5634 return false;
5635 }
5636
5637 SLIST_INSERT_HEAD(&j->limits, li, sle);
5638
5639 li->which = w;
5640 }
5641
5642 if (j->importing_hard_limits) {
5643 li->lim.rlim_max = r;
5644 li->sethard = true;
5645 } else {
5646 li->lim.rlim_cur = r;
5647 li->setsoft = true;
5648 }
5649
5650 return true;
5651 }
5652
5653 void
5654 limititem_delete(job_t j, struct limititem *li)
5655 {
5656 SLIST_REMOVE(&j->limits, li, limititem, sle);
5657
5658 free(li);
5659 }
5660
5661 #if HAVE_SANDBOX
5662 void
5663 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5664 {
5665 job_t j = context;
5666
5667 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5668 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5669 return;
5670 }
5671
5672 if (launch_data_get_bool(obj) == false) {
5673 return;
5674 }
5675
5676 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5677 j->seatbelt_flags |= SANDBOX_NAMED;
5678 }
5679 }
5680 #endif
5681
5682 void
5683 limititem_setup(launch_data_t obj, const char *key, void *context)
5684 {
5685 job_t j = context;
5686 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
5687 rlim_t rl;
5688
5689 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
5690 return;
5691 }
5692
5693 rl = launch_data_get_integer(obj);
5694
5695 for (i = 0; i < limits_cnt; i++) {
5696 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
5697 break;
5698 }
5699 }
5700
5701 if (i == limits_cnt) {
5702 return;
5703 }
5704
5705 limititem_update(j, launchd_keys2limits[i].val, rl);
5706 }
5707
5708 bool
5709 job_useless(job_t j)
5710 {
5711 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5712 if (j->legacy_LS_job && j->j_port) {
5713 return false;
5714 }
5715 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5716 return true;
5717 } else if (j->removal_pending) {
5718 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
5719 return true;
5720 } else if (j->shutdown_monitor) {
5721 return false;
5722 } else if (j->mgr->shutting_down) {
5723 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
5724 if (total_children == 0 && !j->anonymous) {
5725 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
5726 }
5727 return true;
5728 } else if (j->legacy_mach_job) {
5729 if (SLIST_EMPTY(&j->machservices)) {
5730 job_log(j, LOG_INFO, "Garbage collecting");
5731 return true;
5732 } else if (!j->checkedin) {
5733 job_log(j, LOG_WARNING, "Failed to check-in!");
5734 return true;
5735 }
5736 } else {
5737 /* If the job's executable does not have any valid architectures (for
5738 * example, if it's a PowerPC-only job), then we don't even bother
5739 * trying to relaunch it, as we have no reasonable expectation that
5740 * the situation will change.
5741 *
5742 * <rdar://problem/9106979>
5743 */
5744 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
5745 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5746 return true;
5747 }
5748 }
5749
5750 return false;
5751 }
5752
5753 bool
5754 job_keepalive(job_t j)
5755 {
5756 mach_msg_type_number_t statusCnt;
5757 mach_port_status_t status;
5758 struct semaphoreitem *si;
5759 struct machservice *ms;
5760 struct stat sb;
5761 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
5762 bool is_not_kextd = (do_apple_internal_logging || (strcmp(j->label, "com.apple.kextd") != 0));
5763
5764 if (unlikely(j->mgr->shutting_down)) {
5765 return false;
5766 }
5767
5768 /*
5769 * 5066316
5770 *
5771 * We definitely need to revisit this after Leopard ships. Please see
5772 * launchctl.c for the other half of this hack.
5773 */
5774 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5775 return false;
5776 }
5777
5778 if (unlikely(j->needs_kickoff)) {
5779 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5780 return false;
5781 }
5782
5783 if (j->start_pending) {
5784 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
5785 return true;
5786 }
5787
5788 if (!j->ondemand) {
5789 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5790 return true;
5791 }
5792
5793 SLIST_FOREACH(ms, &j->machservices, sle) {
5794 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5795 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5796 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
5797 continue;
5798 }
5799 if (status.mps_msgcount) {
5800 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
5801 status.mps_msgcount, ms->name);
5802 return true;
5803 }
5804 }
5805
5806 /* TODO: Coalesce external events and semaphore items, since they're basically
5807 * the same thing.
5808 */
5809 struct externalevent *ei = NULL;
5810 LIST_FOREACH(ei, &j->events, job_le) {
5811 if (ei->state == ei->wanted_state) {
5812 return true;
5813 }
5814 }
5815
5816 SLIST_FOREACH(si, &j->semaphores, sle) {
5817 bool wanted_state = false;
5818 int qdir_file_cnt;
5819 job_t other_j;
5820
5821 switch (si->why) {
5822 case NETWORK_UP:
5823 wanted_state = true;
5824 case NETWORK_DOWN:
5825 if (network_up == wanted_state) {
5826 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
5827 return true;
5828 }
5829 break;
5830 case SUCCESSFUL_EXIT:
5831 wanted_state = true;
5832 case FAILED_EXIT:
5833 if (good_exit == wanted_state) {
5834 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5835 return true;
5836 }
5837 break;
5838 case CRASHED:
5839 wanted_state = true;
5840 case DID_NOT_CRASH:
5841 if (j->crashed == wanted_state) {
5842 return true;
5843 }
5844 break;
5845 case OTHER_JOB_ENABLED:
5846 wanted_state = true;
5847 case OTHER_JOB_DISABLED:
5848 if ((bool)job_find(NULL, si->what) == wanted_state) {
5849 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
5850 return true;
5851 }
5852 break;
5853 case OTHER_JOB_ACTIVE:
5854 wanted_state = true;
5855 case OTHER_JOB_INACTIVE:
5856 if ((other_j = job_find(NULL, si->what))) {
5857 if ((bool)other_j->p == wanted_state) {
5858 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5859 return true;
5860 }
5861 }
5862 break;
5863 case PATH_EXISTS:
5864 wanted_state = true;
5865 case PATH_MISSING:
5866 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
5867 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
5868 return true;
5869 } else {
5870 if (wanted_state) { /* File is not there but we wish it was. */
5871 if (si->fd != -1 && !si->watching_parent) { /* Need to be watching the parent now. */
5872 (void)job_assumes(j, runtime_close(si->fd) == 0);
5873 si->fd = -1;
5874 semaphoreitem_watch(j, si);
5875 }
5876 } else { /* File is there but we wish it wasn't. */
5877 if (si->fd != -1 && si->watching_parent) { /* Need to watch the file now. */
5878 (void)job_assumes(j, runtime_close(si->fd) == 0);
5879 si->fd = -1;
5880 semaphoreitem_watch(j, si);
5881 }
5882 }
5883 }
5884 break;
5885 case PATH_CHANGES:
5886 break;
5887 case DIR_NOT_EMPTY:
5888 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
5889 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
5890 } else if (qdir_file_cnt > 0) {
5891 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
5892 return true;
5893 }
5894 break;
5895 }
5896 }
5897
5898 return false;
5899 }
5900
5901 const char *
5902 job_active(job_t j)
5903 {
5904 struct machservice *ms;
5905 if (j->p && j->shutdown_monitor) {
5906 return "Monitoring shutdown";
5907 }
5908 if (j->p) {
5909 return "PID is still valid";
5910 }
5911
5912 if (j->mgr->shutting_down && j->log_redirect_fd) {
5913 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5914 j->log_redirect_fd = 0;
5915 }
5916
5917 if (j->log_redirect_fd) {
5918 if (job_assumes(j, j->legacy_LS_job)) {
5919 return "Standard out/error is still valid";
5920 } else {
5921 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5922 j->log_redirect_fd = 0;
5923 }
5924 }
5925
5926 if (j->priv_port_has_senders) {
5927 return "Privileged Port still has outstanding senders";
5928 }
5929
5930 SLIST_FOREACH(ms, &j->machservices, sle) {
5931 if (ms->recv && machservice_active(ms)) {
5932 return "Mach service is still active";
5933 }
5934 }
5935
5936 return NULL;
5937 }
5938
5939 void
5940 machservice_watch(job_t j, struct machservice *ms)
5941 {
5942 if (ms->recv) {
5943 (void)job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
5944 }
5945 }
5946
5947 void
5948 machservice_ignore(job_t j, struct machservice *ms)
5949 {
5950 (void)job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
5951 }
5952
5953 void
5954 machservice_resetport(job_t j, struct machservice *ms)
5955 {
5956 LIST_REMOVE(ms, port_hash_sle);
5957 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5958 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
5959 ms->gen_num++;
5960 (void)job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
5961 (void)job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
5962 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5963 }
5964
5965 struct machservice *
5966 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
5967 {
5968 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
5969
5970 if (!job_assumes(j, ms != NULL)) {
5971 return NULL;
5972 }
5973
5974 strcpy((char *)ms->name, name);
5975 ms->job = j;
5976 ms->gen_num = 1;
5977 ms->per_pid = pid_local;
5978
5979 if (likely(*serviceport == MACH_PORT_NULL)) {
5980 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
5981 goto out_bad;
5982 }
5983
5984 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
5985 goto out_bad2;
5986 }
5987 *serviceport = ms->port;
5988 ms->recv = true;
5989 } else {
5990 ms->port = *serviceport;
5991 ms->isActive = true;
5992 }
5993
5994 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5995
5996 jobmgr_t where2put = j->mgr;
5997 /* XPC domains are separate from Mach bootstraps. */
5998 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
5999 if (g_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6000 where2put = root_jobmgr;
6001 }
6002 }
6003
6004 /* Don't allow MachServices added by multiple-instance jobs to be looked up by others.
6005 * We could just do this with a simple bit, but then we'd have to uniquify the
6006 * names ourselves to avoid collisions. This is just easier.
6007 */
6008 if (!j->dedicated_instance) {
6009 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6010 }
6011 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6012
6013 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
6014
6015 return ms;
6016 out_bad2:
6017 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
6018 out_bad:
6019 free(ms);
6020 return NULL;
6021 }
6022
6023 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6024 struct machservice *
6025 machservice_new_alias(job_t j, struct machservice *orig)
6026 {
6027 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
6028 if (job_assumes(j, ms != NULL)) {
6029 strcpy((char *)ms->name, orig->name);
6030 ms->alias = orig;
6031 ms->job = j;
6032
6033 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6034 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6035 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
6036 }
6037
6038 return ms;
6039 }
6040 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
6041
6042 bootstrap_status_t
6043 machservice_status(struct machservice *ms)
6044 {
6045 ms = ms->alias ? ms->alias : ms;
6046 if (ms->isActive) {
6047 return BOOTSTRAP_STATUS_ACTIVE;
6048 } else if (ms->job->ondemand) {
6049 return BOOTSTRAP_STATUS_ON_DEMAND;
6050 } else {
6051 return BOOTSTRAP_STATUS_INACTIVE;
6052 }
6053 }
6054
6055 void
6056 job_setup_exception_port(job_t j, task_t target_task)
6057 {
6058 struct machservice *ms;
6059 thread_state_flavor_t f = 0;
6060 mach_port_t exc_port = the_exception_server;
6061
6062 if (unlikely(j->alt_exc_handler)) {
6063 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
6064 if (likely(ms)) {
6065 exc_port = machservice_port(ms);
6066 } else {
6067 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
6068 }
6069 } else if (unlikely(j->internal_exc_handler)) {
6070 exc_port = runtime_get_kernel_port();
6071 } else if (unlikely(!exc_port)) {
6072 return;
6073 }
6074
6075 #if defined (__ppc__) || defined(__ppc64__)
6076 f = PPC_THREAD_STATE64;
6077 #elif defined(__i386__) || defined(__x86_64__)
6078 f = x86_THREAD_STATE;
6079 #elif defined(__arm__)
6080 f = ARM_THREAD_STATE;
6081 #else
6082 #error "unknown architecture"
6083 #endif
6084
6085 if (likely(target_task)) {
6086 (void)job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
6087 } else if (pid1_magic && the_exception_server) {
6088 mach_port_t mhp = mach_host_self();
6089 (void)job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
6090 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
6091 }
6092 }
6093
6094 void
6095 job_set_exception_port(job_t j, mach_port_t port)
6096 {
6097 if (unlikely(!the_exception_server)) {
6098 the_exception_server = port;
6099 job_setup_exception_port(j, 0);
6100 } else {
6101 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6102 }
6103 }
6104
6105 void
6106 machservice_setup_options(launch_data_t obj, const char *key, void *context)
6107 {
6108 struct machservice *ms = context;
6109 mach_port_t mhp = mach_host_self();
6110 int which_port;
6111 bool b;
6112
6113 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
6114 return;
6115 }
6116
6117 switch (launch_data_get_type(obj)) {
6118 case LAUNCH_DATA_INTEGER:
6119 which_port = (int)launch_data_get_integer(obj); /* XXX we should bound check this... */
6120 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
6121 switch (which_port) {
6122 case TASK_KERNEL_PORT:
6123 case TASK_HOST_PORT:
6124 case TASK_NAME_PORT:
6125 case TASK_BOOTSTRAP_PORT:
6126 /* I find it a little odd that zero isn't reserved in the header.
6127 * Normally Mach is fairly good about this convention... */
6128 case 0:
6129 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6130 break;
6131 default:
6132 ms->special_port_num = which_port;
6133 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6134 break;
6135 }
6136 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
6137 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
6138 (void)job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
6139 } else {
6140 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6141 }
6142 }
6143 case LAUNCH_DATA_BOOL:
6144 b = launch_data_get_bool(obj);
6145 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6146 ms->debug_on_close = b;
6147 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
6148 ms->reset = b;
6149 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6150 ms->hide = b;
6151 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6152 job_set_exception_port(ms->job, ms->port);
6153 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6154 ms->kUNCServer = b;
6155 (void)job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
6156 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_PINGEVENTUPDATES) == 0) {
6157 ms->event_update_port = b;
6158 }
6159 break;
6160 case LAUNCH_DATA_STRING:
6161 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6162 const char *option = launch_data_get_string(obj);
6163 if (strcasecmp(option, "One") == 0) {
6164 ms->drain_one_on_crash = true;
6165 } else if (strcasecmp(option, "All") == 0) {
6166 ms->drain_all_on_crash = true;
6167 }
6168 }
6169 break;
6170 case LAUNCH_DATA_DICTIONARY:
6171 job_set_exception_port(ms->job, ms->port);
6172 break;
6173 default:
6174 break;
6175 }
6176
6177 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
6178 }
6179
6180 void
6181 machservice_setup(launch_data_t obj, const char *key, void *context)
6182 {
6183 job_t j = context;
6184 struct machservice *ms;
6185 mach_port_t p = MACH_PORT_NULL;
6186
6187 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6188 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6189 return;
6190 }
6191
6192 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6193 return;
6194 }
6195
6196 ms->isActive = false;
6197 ms->upfront = true;
6198
6199 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6200 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6201 }
6202 }
6203
6204 jobmgr_t
6205 jobmgr_do_garbage_collection(jobmgr_t jm)
6206 {
6207 jobmgr_t jmi = NULL, jmn = NULL;
6208 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6209 jobmgr_do_garbage_collection(jmi);
6210 }
6211
6212 if (!jm->shutting_down) {
6213 return jm;
6214 }
6215
6216 if (SLIST_EMPTY(&jm->submgrs)) {
6217 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6218 } else {
6219 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6220 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6221 jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6222 }
6223 }
6224
6225 size_t actives = 0;
6226 job_t ji = NULL, jn = NULL;
6227 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6228 if (ji->anonymous) {
6229 continue;
6230 }
6231
6232 /* Let the shutdown monitor be up until the very end. */
6233 if (ji->shutdown_monitor) {
6234 continue;
6235 }
6236
6237 /* On our first pass through, open a transaction for all the jobs that
6238 * need to be dirty at shutdown. We'll close these transactions once the
6239 * jobs that do not need to be dirty at shutdown have all exited.
6240 */
6241 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6242 job_open_shutdown_transaction(ji);
6243 }
6244
6245 const char *active = job_active(ji);
6246 if (!active) {
6247 job_remove(ji);
6248 } else {
6249 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6250 job_stop(ji);
6251
6252 if (ji->p && !ji->dirty_at_shutdown) {
6253 /* We really only care if the job has not yet been reaped.
6254 * There's no reason to delay shutdown if a Mach port has not
6255 * yet been sent back to us, for example. While we're shutting
6256 * all the "normal" jobs down, do not count the
6257 * dirty-at-shutdown jobs toward the total of actives.
6258 *
6259 * Note that there's a potential race here where we may not get
6260 * a port back in time, so that when we hit jobmgr_remove(), we
6261 * end up removing the job and then our attempt to close the
6262 * Mach port will fail. But at that point, the failure won't
6263 * even make it to the syslog, so not a big deal.
6264 */
6265 actives++;
6266 }
6267
6268 if (ji->clean_kill) {
6269 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6270 } else {
6271 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6272 }
6273 }
6274 }
6275
6276 jm->shutdown_jobs_dirtied = true;
6277 if (actives == 0) {
6278 if (!jm->shutdown_jobs_cleaned) {
6279 LIST_FOREACH(ji, &jm->jobs, sle) {
6280 if (!ji->anonymous) {
6281 job_close_shutdown_transaction(ji);
6282 actives++;
6283 }
6284 }
6285
6286 jm->shutdown_jobs_cleaned = true;
6287 } else if (jm->monitor_shutdown && _s_shutdown_monitor) {
6288 /* The rest of shutdown has completed, so we can kill the shutdown
6289 * monitor now like it was any other job.
6290 */
6291 _s_shutdown_monitor->shutdown_monitor = false;
6292 actives = 1;
6293
6294 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6295 job_stop(_s_shutdown_monitor);
6296 _s_shutdown_monitor = NULL;
6297 }
6298 }
6299
6300 jobmgr_t r = jm;
6301 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6302 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6303 jobmgr_remove(jm);
6304 r = NULL;
6305 }
6306
6307 return r;
6308 }
6309
6310 void
6311 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6312 {
6313 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
6314 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
6315 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
6316 * for them to exit before moving on.
6317 *
6318 * See rdar://problem/6562592
6319 */
6320 size_t i = 0;
6321 for (i = 0; i < np; i++) {
6322 if (p[i] != 0) {
6323 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6324 (void)jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
6325 }
6326 }
6327 }
6328
6329 void
6330 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6331 {
6332 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6333 pid_t *pids = NULL;
6334 int i = 0, kp_cnt = 0;
6335
6336 if (likely(jm->parentmgr || !pid1_magic)) {
6337 return;
6338 }
6339
6340 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6341 return;
6342 }
6343
6344 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6345
6346 if (!jobmgr_assumes(jm, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
6347 goto out;
6348 }
6349
6350 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6351 for (i = 0; i < kp_cnt; i++) {
6352 struct proc_bsdshortinfo proc;
6353 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6354 if (errno != ESRCH) {
6355 jobmgr_assumes(jm, errno == 0);
6356 }
6357
6358 kp_skipped++;
6359 continue;
6360 }
6361
6362 pid_t p_i = pids[i];
6363 pid_t pp_i = proc.pbsi_ppid;
6364 pid_t pg_i = proc.pbsi_pgid;
6365 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6366 const char *n = proc.pbsi_comm;
6367
6368 if (unlikely(p_i == 0 || p_i == 1)) {
6369 kp_skipped++;
6370 continue;
6371 }
6372
6373 if (_s_shutdown_monitor && pp_i == _s_shutdown_monitor->p) {
6374 kp_skipped++;
6375 continue;
6376 }
6377
6378 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
6379 job_t j = jobmgr_find_by_pid(jm, p_i, false);
6380 if (!j || (j && j->anonymous)) {
6381 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6382
6383 int status = 0;
6384 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6385 if (jobmgr_assumes(jm, waitpid(p_i, &status, WNOHANG) == 0)) {
6386 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6387 }
6388 kp_skipped++;
6389 } else {
6390 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6391 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6392 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6393 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6394 * their userspace emissaries go away, before the call to reboot(2).
6395 */
6396 if (leader && leader->ignore_pg_at_shutdown) {
6397 kp_skipped++;
6398 } else {
6399 ps[i] = p_i;
6400 }
6401 }
6402 } else {
6403 kp_skipped++;
6404 }
6405 }
6406
6407 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6408 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6409 }
6410
6411 free(ps);
6412 out:
6413 free(pids);
6414 }
6415
6416 jobmgr_t
6417 jobmgr_parent(jobmgr_t jm)
6418 {
6419 return jm->parentmgr;
6420 }
6421
6422 void
6423 job_uncork_fork(job_t j)
6424 {
6425 pid_t c = j->p;
6426
6427 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6428 /* this unblocks the child and avoids a race
6429 * between the above fork() and the kevent_mod() */
6430 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6431 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
6432 j->fork_fd = 0;
6433 }
6434
6435 jobmgr_t
6436 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6437 {
6438 mach_msg_size_t mxmsgsz;
6439 job_t bootstrapper = NULL;
6440 jobmgr_t jmr;
6441
6442 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6443
6444 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6445 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6446 return NULL;
6447 }
6448
6449 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6450
6451 if (!jobmgr_assumes(jm, jmr != NULL)) {
6452 return NULL;
6453 }
6454
6455 if (jm == NULL) {
6456 root_jobmgr = jmr;
6457 }
6458
6459 jmr->kqjobmgr_callback = jobmgr_callback;
6460 strcpy(jmr->name_init, name ? name : "Under construction");
6461
6462 jmr->req_port = requestorport;
6463
6464 if ((jmr->parentmgr = jm)) {
6465 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6466 }
6467
6468 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
6469 goto out_bad;
6470 }
6471
6472 if (transfer_port != MACH_PORT_NULL) {
6473 (void)jobmgr_assumes(jmr, jm != NULL);
6474 jmr->jm_port = transfer_port;
6475 } else if (!jm && !pid1_magic) {
6476 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6477 name_t service_buf;
6478
6479 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6480
6481 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
6482 goto out_bad;
6483 }
6484
6485 if (trusted_fd) {
6486 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6487
6488 if ((dfd = dup(lfd)) >= 0) {
6489 (void)jobmgr_assumes(jmr, runtime_close(dfd) != -1);
6490 (void)jobmgr_assumes(jmr, runtime_close(lfd) != -1);
6491 }
6492
6493 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6494 }
6495
6496 /* cut off the Libc cache, we don't want to deadlock against ourself */
6497 inherited_bootstrap_port = bootstrap_port;
6498 bootstrap_port = MACH_PORT_NULL;
6499 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
6500
6501 /* We set this explicitly as we start each child */
6502 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
6503 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
6504 goto out_bad;
6505 }
6506
6507 if (!name) {
6508 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6509 }
6510
6511 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
6512 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
6513 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
6514 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
6515 }
6516
6517 /* Total hacks. But the MIG server loop is too generic, and the more dynamic
6518 * parts of it haven't been tested, or if they have, it was a very long time
6519 * ago.
6520 */
6521 if (xpc_events_xpc_events_subsystem.maxsize > mxmsgsz) {
6522 mxmsgsz = xpc_events_xpc_events_subsystem.maxsize;
6523 }
6524 if (xpc_domain_xpc_domain_subsystem.maxsize > mxmsgsz) {
6525 mxmsgsz = xpc_domain_xpc_domain_subsystem.maxsize;
6526 }
6527
6528 if (!jm) {
6529 (void)jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6530 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6531 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6532 (void)jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
6533 }
6534
6535 if (name && !skip_init) {
6536 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6537 }
6538
6539 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6540 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
6541 goto out_bad;
6542 }
6543 }
6544
6545 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6546
6547 if (bootstrapper) {
6548 bootstrapper->asport = asport;
6549
6550 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6551 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6552 } else {
6553 jmr->req_asport = asport;
6554 }
6555
6556 if (asport != MACH_PORT_NULL) {
6557 (void)jobmgr_assumes(jmr, launchd_mport_copy_send(asport) == KERN_SUCCESS);
6558 }
6559
6560 if (jmr->parentmgr) {
6561 runtime_add_weak_ref();
6562 }
6563
6564 return jmr;
6565
6566 out_bad:
6567 if (jmr) {
6568 jobmgr_remove(jmr);
6569 if (jm == NULL) {
6570 root_jobmgr = NULL;
6571 }
6572 }
6573 return NULL;
6574 }
6575
6576 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6577 jobmgr_t
6578 jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6579 {
6580 jobmgr_t new = NULL;
6581
6582 /* These job managers are basically singletons, so we use the root Mach
6583 * bootstrap port as their requestor ports so they'll never go away.
6584 */
6585 mach_port_t req_port = root_jobmgr->jm_port;
6586 if (jobmgr_assumes(jm, launchd_mport_make_send(req_port) == KERN_SUCCESS)) {
6587 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6588 if (new) {
6589 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6590 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6591 new->xpc_singleton = true;
6592 }
6593 }
6594
6595 return new;
6596 }
6597
6598 jobmgr_t
6599 jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6600 {
6601 jobmgr_t jmi = NULL;
6602 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6603 if (jmi->req_euid == uid) {
6604 return jmi;
6605 }
6606 }
6607
6608 name_t name;
6609 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6610 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6611 if (jobmgr_assumes(jm, jmi != NULL)) {
6612 /* We need to create a per-user launchd for this UID if there isn't one
6613 * already so we can grab the bootstrap port.
6614 */
6615 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6616 if (jobmgr_assumes(jmi, puj != NULL)) {
6617 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(puj->asport) == KERN_SUCCESS);
6618 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(jmi->req_bsport) == KERN_SUCCESS);
6619 jmi->shortdesc = "per-user";
6620 jmi->req_asport = puj->asport;
6621 jmi->req_asid = puj->asid;
6622 jmi->req_euid = uid;
6623 jmi->req_egid = -1;
6624
6625 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
6626 } else {
6627 jobmgr_remove(jmi);
6628 }
6629 }
6630
6631 return jmi;
6632 }
6633
6634 jobmgr_t
6635 jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
6636 {
6637 jobmgr_t jmi = NULL;
6638 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
6639 if (jmi->req_asid == asid) {
6640 return jmi;
6641 }
6642 }
6643
6644 name_t name;
6645 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
6646 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6647 if (jobmgr_assumes(jm, jmi != NULL)) {
6648 (void)jobmgr_assumes(jmi, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
6649 jmi->shortdesc = "per-session";
6650 jmi->req_bsport = root_jobmgr->jm_port;
6651 (void)jobmgr_assumes(jmi, audit_session_port(asid, &jmi->req_asport) == 0);
6652 jmi->req_asid = asid;
6653 jmi->req_euid = -1;
6654 jmi->req_egid = -1;
6655
6656 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
6657 } else {
6658 jobmgr_remove(jmi);
6659 }
6660
6661 return jmi;
6662 }
6663 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
6664
6665 job_t
6666 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
6667 {
6668 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
6669 char thelabel[1000];
6670 job_t bootstrapper;
6671
6672 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
6673 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
6674
6675 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
6676 bootstrapper->is_bootstrapper = true;
6677 char buf[100];
6678
6679 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
6680 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
6681 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false, false);
6682 bootstrapper->weird_bootstrap = true;
6683 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
6684 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
6685 bootstrapper->is_bootstrapper = true;
6686 if (jobmgr_assumes(jm, pid1_magic)) {
6687 /* Have our system bootstrapper print out to the console. */
6688 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
6689 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
6690
6691 if (g_console) {
6692 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
6693 }
6694 }
6695 }
6696
6697 jm->session_initialized = true;
6698 return bootstrapper;
6699 }
6700
6701 jobmgr_t
6702 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
6703 {
6704 struct machservice *ms, *next_ms;
6705 jobmgr_t jmi, jmn;
6706
6707 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6708 * words, when some program hands us a second or subsequent send right
6709 * to a port we already have open, the Mach kernel gives us the same
6710 * port number back and increments an reference count associated with
6711 * the port. This forces us, when discovering that a receive right at
6712 * the other end has been deleted, to wander all of our objects to see
6713 * what weird places clients might have handed us the same send right
6714 * to use.
6715 */
6716
6717 if (jm == root_jobmgr) {
6718 if (port == inherited_bootstrap_port) {
6719 (void)jobmgr_assumes(jm, launchd_mport_deallocate(port) == KERN_SUCCESS);
6720 inherited_bootstrap_port = MACH_PORT_NULL;
6721
6722 return jobmgr_shutdown(jm);
6723 }
6724
6725 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
6726 if (ms->port == port && !ms->recv) {
6727 machservice_delete(ms->job, ms, true);
6728 }
6729 }
6730 }
6731
6732 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6733 jobmgr_delete_anything_with_port(jmi, port);
6734 }
6735
6736 if (jm->req_port == port) {
6737 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
6738 return jobmgr_shutdown(jm);
6739 }
6740
6741 return jm;
6742 }
6743
6744 struct machservice *
6745 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
6746 {
6747 struct machservice *ms;
6748 job_t target_j;
6749
6750 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
6751
6752 if (target_pid) {
6753 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6754 * bootstrap in other bootstraps.
6755 */
6756
6757 /* Start in the given bootstrap. */
6758 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
6759 /* If we fail, do a deep traversal. */
6760 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6761 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6762 return NULL;
6763 }
6764 }
6765
6766 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6767 if (ms->per_pid && strcmp(name, ms->name) == 0) {
6768 return ms;
6769 }
6770 }
6771
6772 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
6773 return NULL;
6774 }
6775
6776 jobmgr_t where2look = jm;
6777 /* XPC domains are separate from Mach bootstraps. */
6778 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6779 if (g_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6780 where2look = root_jobmgr;
6781 }
6782 }
6783
6784 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
6785 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6786 return ms;
6787 }
6788 }
6789
6790 if (jm->parentmgr == NULL || !check_parent) {
6791 return NULL;
6792 }
6793
6794 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
6795 }
6796
6797 mach_port_t
6798 machservice_port(struct machservice *ms)
6799 {
6800 return ms->port;
6801 }
6802
6803 job_t
6804 machservice_job(struct machservice *ms)
6805 {
6806 return ms->job;
6807 }
6808
6809 bool
6810 machservice_hidden(struct machservice *ms)
6811 {
6812 return ms->hide;
6813 }
6814
6815 bool
6816 machservice_active(struct machservice *ms)
6817 {
6818 return ms->isActive;
6819 }
6820
6821 const char *
6822 machservice_name(struct machservice *ms)
6823 {
6824 return ms->name;
6825 }
6826
6827 void
6828 machservice_drain_port(struct machservice *ms)
6829 {
6830 bool drain_one = ms->drain_one_on_crash;
6831 bool drain_all = ms->drain_all_on_crash;
6832
6833 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
6834 return;
6835 }
6836
6837 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6838
6839 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6840 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6841 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6842 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6843
6844 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6845
6846 do {
6847 /* This should be a direct check on the Mach service to see if it's an exception-handling
6848 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6849 * Mach services. But for now, it should be okay.
6850 */
6851 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
6852 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6853 } else {
6854 mach_msg_options_t options = MACH_RCV_MSG |
6855 MACH_RCV_TIMEOUT ;
6856
6857 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
6858 switch (mr) {
6859 case MACH_MSG_SUCCESS:
6860 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6861 break;
6862 case MACH_RCV_TIMED_OUT:
6863 break;
6864 case MACH_RCV_TOO_LARGE:
6865 runtime_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6866 break;
6867 default:
6868 break;
6869 }
6870 }
6871 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
6872 }
6873
6874 void
6875 machservice_delete(job_t j, struct machservice *ms, bool port_died)
6876 {
6877 if (ms->alias) {
6878 /* HACK: Egregious code duplication. But dealing with aliases is a
6879 * pretty simple affair since they can't and shouldn't have any complex
6880 * behaviors associated with them.
6881 */
6882 LIST_REMOVE(ms, name_hash_sle);
6883 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6884 free(ms);
6885 return;
6886 }
6887
6888 if (unlikely(ms->debug_on_close)) {
6889 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
6890 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
6891 }
6892
6893 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6894 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
6895 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
6896 }
6897
6898 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
6899
6900 if (unlikely(ms->port == the_exception_server)) {
6901 the_exception_server = 0;
6902 }
6903
6904 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
6905
6906 if (ms->special_port_num) {
6907 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6908 }
6909 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6910
6911 if (!(j->dedicated_instance || ms->event_channel)) {
6912 LIST_REMOVE(ms, name_hash_sle);
6913 }
6914 LIST_REMOVE(ms, port_hash_sle);
6915
6916 free(ms);
6917 }
6918
6919 void
6920 machservice_request_notifications(struct machservice *ms)
6921 {
6922 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6923
6924 ms->isActive = true;
6925
6926 if (ms->recv) {
6927 which = MACH_NOTIFY_PORT_DESTROYED;
6928 job_checkin(ms->job);
6929 }
6930
6931 (void)job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
6932 }
6933
6934 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6935 #define END_OF(x) (&(x)[NELEM(x)])
6936
6937 char **
6938 mach_cmd2argv(const char *string)
6939 {
6940 char *argv[100], args[1000];
6941 const char *cp;
6942 char *argp = args, term, **argv_ret, *co;
6943 unsigned int nargs = 0, i;
6944
6945 for (cp = string; *cp;) {
6946 while (isspace(*cp))
6947 cp++;
6948 term = (*cp == '"') ? *cp++ : '\0';
6949 if (nargs < NELEM(argv)) {
6950 argv[nargs++] = argp;
6951 }
6952 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
6953 if (*cp == '\\') {
6954 cp++;
6955 }
6956 *argp++ = *cp;
6957 if (*cp) {
6958 cp++;
6959 }
6960 }
6961 *argp++ = '\0';
6962 }
6963 argv[nargs] = NULL;
6964
6965 if (nargs == 0) {
6966 return NULL;
6967 }
6968
6969 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6970
6971 if (!launchd_assumes(argv_ret != NULL)) {
6972 return NULL;
6973 }
6974
6975 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6976
6977 for (i = 0; i < nargs; i++) {
6978 strcpy(co, argv[i]);
6979 argv_ret[i] = co;
6980 co += strlen(argv[i]) + 1;
6981 }
6982 argv_ret[i] = NULL;
6983
6984 return argv_ret;
6985 }
6986
6987 void
6988 job_checkin(job_t j)
6989 {
6990 j->checkedin = true;
6991 }
6992
6993 bool job_is_god(job_t j)
6994 {
6995 return j->embedded_special_privileges;
6996 }
6997
6998 bool
6999 job_ack_port_destruction(mach_port_t p)
7000 {
7001 struct machservice *ms;
7002 job_t j;
7003
7004 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
7005 if (ms->recv && (ms->port == p)) {
7006 break;
7007 }
7008 }
7009
7010 if (!jobmgr_assumes(root_jobmgr, ms != NULL)) {
7011 return false;
7012 }
7013
7014 j = ms->job;
7015
7016 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
7017
7018 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
7019 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
7020 * receive rights have been returned.
7021 *
7022 * So when we get receive rights back, check to see if the job has been reaped yet. If
7023 * not, then we add this service to a list of services to be drained on crash if it's
7024 * requested that behavior. So, for a job with N receive rights all requesting that they
7025 * be drained on crash, we can safely handle the following sequence of events.
7026 *
7027 * ReceiveRight0Returned
7028 * ReceiveRight1Returned
7029 * ReceiveRight2Returned
7030 * NOTE_EXIT (reap, get exit status)
7031 * ReceiveRight3Returned
7032 * .
7033 * .
7034 * .
7035 * ReceiveRight(N - 1)Returned
7036 */
7037
7038 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
7039 if (j->crashed && j->reaped) {
7040 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
7041 machservice_drain_port(ms);
7042 } else if (!(j->crashed || j->reaped)) {
7043 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
7044 }
7045 }
7046
7047 /* If we get this notification after the job has been reaped, then we want to ping
7048 * the event port to keep things going.
7049 */
7050 if (ms->event_update_port && !j->p && job_assumes(j, j->event_monitor)) {
7051 if (_s_event_update_port == MACH_PORT_NULL) {
7052 (void)job_assumes(j, launchd_mport_make_send_once(ms->port, &_s_event_update_port) == KERN_SUCCESS);
7053 }
7054 eventsystem_ping();
7055 }
7056
7057 ms->isActive = false;
7058 if (ms->delete_on_destruction) {
7059 machservice_delete(j, ms, false);
7060 } else if (ms->reset) {
7061 machservice_resetport(j, ms);
7062 }
7063
7064 job_dispatch(j, false);
7065
7066 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
7067
7068 return true;
7069 }
7070
7071 void
7072 job_ack_no_senders(job_t j)
7073 {
7074 j->priv_port_has_senders = false;
7075
7076 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
7077 j->j_port = 0;
7078
7079 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
7080
7081 job_dispatch(j, false);
7082 }
7083
7084 bool
7085 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
7086 {
7087 struct semaphoreitem *si;
7088 size_t alloc_sz = sizeof(struct semaphoreitem);
7089
7090 if (what) {
7091 alloc_sz += strlen(what) + 1;
7092 }
7093
7094 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
7095 return false;
7096 }
7097
7098 si->fd = -1;
7099 si->why = why;
7100
7101 if (what) {
7102 strcpy(si->what_init, what);
7103 }
7104
7105 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
7106
7107 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
7108 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7109 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7110 j->nosy = true;
7111 }
7112
7113 semaphoreitem_runtime_mod_ref(si, true);
7114
7115 return true;
7116 }
7117
7118 void
7119 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7120 {
7121 /*
7122 * External events need to be tracked.
7123 * Internal events do NOT need to be tracked.
7124 */
7125
7126 switch (si->why) {
7127 case SUCCESSFUL_EXIT:
7128 case FAILED_EXIT:
7129 case OTHER_JOB_ENABLED:
7130 case OTHER_JOB_DISABLED:
7131 case OTHER_JOB_ACTIVE:
7132 case OTHER_JOB_INACTIVE:
7133 return;
7134 default:
7135 break;
7136 }
7137
7138 if (add) {
7139 runtime_add_weak_ref();
7140 } else {
7141 runtime_del_weak_ref();
7142 }
7143 }
7144
7145 void
7146 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
7147 {
7148 semaphoreitem_runtime_mod_ref(si, false);
7149
7150 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
7151
7152 if (si->fd != -1) {
7153 (void)job_assumes(j, runtime_close(si->fd) != -1);
7154 }
7155
7156 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
7157 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
7158 j->nosy = false;
7159 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7160 }
7161
7162 free(si);
7163 }
7164
7165 void
7166 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
7167 {
7168 struct semaphoreitem_dict_iter_context *sdic = context;
7169 semaphore_reason_t why;
7170
7171 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7172
7173 semaphoreitem_new(sdic->j, why, key);
7174 }
7175
7176 void
7177 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7178 {
7179 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7180 job_t j = context;
7181 semaphore_reason_t why;
7182
7183 switch (launch_data_get_type(obj)) {
7184 case LAUNCH_DATA_BOOL:
7185 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7186 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7187 semaphoreitem_new(j, why, NULL);
7188 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7189 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7190 semaphoreitem_new(j, why, NULL);
7191 j->start_pending = true;
7192 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7193 j->needs_kickoff = launch_data_get_bool(obj);
7194 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7195 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7196 semaphoreitem_new(j, why, NULL);
7197 j->start_pending = true;
7198 } else {
7199 (void)job_assumes(j, false);
7200 }
7201 break;
7202 case LAUNCH_DATA_DICTIONARY:
7203 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
7204 sdic.why_true = PATH_EXISTS;
7205 sdic.why_false = PATH_MISSING;
7206 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7207 sdic.why_true = OTHER_JOB_ACTIVE;
7208 sdic.why_false = OTHER_JOB_INACTIVE;
7209 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7210 sdic.why_true = OTHER_JOB_ENABLED;
7211 sdic.why_false = OTHER_JOB_DISABLED;
7212 } else {
7213 (void)job_assumes(j, false);
7214 break;
7215 }
7216
7217 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7218 break;
7219 default:
7220 (void)job_assumes(j, false);
7221 break;
7222 }
7223 }
7224
7225 bool
7226 externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event)
7227 {
7228 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7229 if (job_assumes(j, ee != NULL)) {
7230 ee->event = launch_data_copy(event);
7231 if (job_assumes(j, ee->event != NULL)) {
7232 strcpy(ee->name, evname);
7233 ee->job = j;
7234 ee->id = sys->curid;
7235 ee->sys = sys;
7236 ee->state = false;
7237 ee->wanted_state = true;
7238 sys->curid++;
7239
7240 LIST_INSERT_HEAD(&j->events, ee, job_le);
7241 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7242
7243 job_log(j, LOG_DEBUG, "New event: %s:%s", sys->name, evname);
7244 } else {
7245 free(ee);
7246 ee = NULL;
7247 }
7248 }
7249
7250 eventsystem_ping();
7251 return ee;
7252 }
7253
7254 void
7255 externalevent_delete(struct externalevent *ee)
7256 {
7257 launch_data_free(ee->event);
7258 LIST_REMOVE(ee, job_le);
7259 LIST_REMOVE(ee, sys_le);
7260
7261 free(ee);
7262
7263 eventsystem_ping();
7264 }
7265
7266 void
7267 externalevent_setup(launch_data_t obj, const char *key, void *context)
7268 {
7269 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7270 (void)job_assumes(ctx->j, externalevent_new(ctx->j, ctx->sys, (char *)key, obj));
7271 }
7272
7273 struct externalevent *
7274 externalevent_find(const char *sysname, uint64_t id)
7275 {
7276 struct externalevent *ei = NULL;
7277
7278 struct eventsystem *es = eventsystem_find(sysname);
7279 if (launchd_assumes(es != NULL)) {
7280 LIST_FOREACH(ei, &es->events, sys_le) {
7281 if (ei->id == id) {
7282 break;
7283 }
7284 }
7285 }
7286
7287 return ei;
7288 }
7289
7290 struct eventsystem *
7291 eventsystem_new(const char *name)
7292 {
7293 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7294 if (launchd_assumes(es != NULL)) {
7295 strcpy(es->name, name);
7296 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7297 }
7298
7299 return es;
7300 }
7301
7302 void
7303 eventsystem_delete(struct eventsystem *es)
7304 {
7305 struct externalevent *ei = NULL;
7306 while ((ei = LIST_FIRST(&es->events))) {
7307 externalevent_delete(ei);
7308 }
7309
7310 LIST_REMOVE(es, global_le);
7311
7312 free(es);
7313 }
7314
7315 void
7316 eventsystem_setup(launch_data_t obj, const char *key, void *context)
7317 {
7318 job_t j = (job_t)context;
7319 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7320 return;
7321 }
7322
7323 struct eventsystem *sys = eventsystem_find(key);
7324 if (unlikely(sys == NULL)) {
7325 sys = eventsystem_new(key);
7326 job_log(j, LOG_DEBUG, "New event system: %s", key);
7327 }
7328
7329 if (job_assumes(j, sys != NULL)) {
7330 struct externalevent_iter_ctx ctx = {
7331 .j = j,
7332 .sys = sys,
7333 };
7334 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7335 sys->has_updates = true;
7336 }
7337 }
7338
7339 struct eventsystem *
7340 eventsystem_find(const char *name)
7341 {
7342 struct eventsystem *esi = NULL;
7343 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7344 if (strcmp(name, esi->name) == 0) {
7345 break;
7346 }
7347 }
7348
7349 return esi;
7350 }
7351
7352 void
7353 eventsystem_ping(void)
7354 {
7355 /* We don't wrap this in an assumes() macro because we could potentially
7356 * call this function many times before the helper job gets back to us
7357 * and gives us another send-once right. So if it's MACH_PORT_NULL, that
7358 * means that we've sent a ping, but the helper hasn't yet checked in to
7359 * get the new set of notifications.
7360 */
7361 if (_s_event_update_port != MACH_PORT_NULL) {
7362 kern_return_t kr = helper_downcall_ping(_s_event_update_port);
7363 if (kr != KERN_SUCCESS) {
7364 runtime_syslog(LOG_NOTICE, "helper_downcall_ping(): kr = 0x%x", kr);
7365 }
7366 _s_event_update_port = MACH_PORT_NULL;
7367 }
7368 }
7369
7370 void
7371 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7372 {
7373 jobmgr_t jmi, jmn;
7374 job_t ji, jn;
7375
7376
7377 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7378 jobmgr_dispatch_all_semaphores(jmi);
7379 }
7380
7381 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7382 if (!SLIST_EMPTY(&ji->semaphores)) {
7383 job_dispatch(ji, false);
7384 }
7385 }
7386 }
7387
7388 time_t
7389 cronemu(int mon, int mday, int hour, int min)
7390 {
7391 struct tm workingtm;
7392 time_t now;
7393
7394 now = time(NULL);
7395 workingtm = *localtime(&now);
7396
7397 workingtm.tm_isdst = -1;
7398 workingtm.tm_sec = 0;
7399 workingtm.tm_min++;
7400
7401 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7402 workingtm.tm_year++;
7403 workingtm.tm_mon = 0;
7404 workingtm.tm_mday = 1;
7405 workingtm.tm_hour = 0;
7406 workingtm.tm_min = 0;
7407 mktime(&workingtm);
7408 }
7409
7410 return mktime(&workingtm);
7411 }
7412
7413 time_t
7414 cronemu_wday(int wday, int hour, int min)
7415 {
7416 struct tm workingtm;
7417 time_t now;
7418
7419 now = time(NULL);
7420 workingtm = *localtime(&now);
7421
7422 workingtm.tm_isdst = -1;
7423 workingtm.tm_sec = 0;
7424 workingtm.tm_min++;
7425
7426 if (wday == 7) {
7427 wday = 0;
7428 }
7429
7430 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7431 workingtm.tm_mday++;
7432 workingtm.tm_hour = 0;
7433 workingtm.tm_min = 0;
7434 mktime(&workingtm);
7435 }
7436
7437 return mktime(&workingtm);
7438 }
7439
7440 bool
7441 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7442 {
7443 if (mon == -1) {
7444 struct tm workingtm = *wtm;
7445 int carrytest;
7446
7447 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7448 workingtm.tm_mon++;
7449 workingtm.tm_mday = 1;
7450 workingtm.tm_hour = 0;
7451 workingtm.tm_min = 0;
7452 carrytest = workingtm.tm_mon;
7453 mktime(&workingtm);
7454 if (carrytest != workingtm.tm_mon) {
7455 return false;
7456 }
7457 }
7458 *wtm = workingtm;
7459 return true;
7460 }
7461
7462 if (mon < wtm->tm_mon) {
7463 return false;
7464 }
7465
7466 if (mon > wtm->tm_mon) {
7467 wtm->tm_mon = mon;
7468 wtm->tm_mday = 1;
7469 wtm->tm_hour = 0;
7470 wtm->tm_min = 0;
7471 }
7472
7473 return cronemu_mday(wtm, mday, hour, min);
7474 }
7475
7476 bool
7477 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7478 {
7479 if (mday == -1) {
7480 struct tm workingtm = *wtm;
7481 int carrytest;
7482
7483 while (!cronemu_hour(&workingtm, hour, min)) {
7484 workingtm.tm_mday++;
7485 workingtm.tm_hour = 0;
7486 workingtm.tm_min = 0;
7487 carrytest = workingtm.tm_mday;
7488 mktime(&workingtm);
7489 if (carrytest != workingtm.tm_mday) {
7490 return false;
7491 }
7492 }
7493 *wtm = workingtm;
7494 return true;
7495 }
7496
7497 if (mday < wtm->tm_mday) {
7498 return false;
7499 }
7500
7501 if (mday > wtm->tm_mday) {
7502 wtm->tm_mday = mday;
7503 wtm->tm_hour = 0;
7504 wtm->tm_min = 0;
7505 }
7506
7507 return cronemu_hour(wtm, hour, min);
7508 }
7509
7510 bool
7511 cronemu_hour(struct tm *wtm, int hour, int min)
7512 {
7513 if (hour == -1) {
7514 struct tm workingtm = *wtm;
7515 int carrytest;
7516
7517 while (!cronemu_min(&workingtm, min)) {
7518 workingtm.tm_hour++;
7519 workingtm.tm_min = 0;
7520 carrytest = workingtm.tm_hour;
7521 mktime(&workingtm);
7522 if (carrytest != workingtm.tm_hour) {
7523 return false;
7524 }
7525 }
7526 *wtm = workingtm;
7527 return true;
7528 }
7529
7530 if (hour < wtm->tm_hour) {
7531 return false;
7532 }
7533
7534 if (hour > wtm->tm_hour) {
7535 wtm->tm_hour = hour;
7536 wtm->tm_min = 0;
7537 }
7538
7539 return cronemu_min(wtm, min);
7540 }
7541
7542 bool
7543 cronemu_min(struct tm *wtm, int min)
7544 {
7545 if (min == -1) {
7546 return true;
7547 }
7548
7549 if (min < wtm->tm_min) {
7550 return false;
7551 }
7552
7553 if (min > wtm->tm_min) {
7554 wtm->tm_min = min;
7555 }
7556
7557 return true;
7558 }
7559
7560 kern_return_t
7561 job_mig_setup_shmem(job_t j, mach_port_t *shmem_port)
7562 {
7563 memory_object_size_t size_of_page, size_of_page_orig;
7564 vm_address_t vm_addr;
7565 kern_return_t kr;
7566
7567 if (!launchd_assumes(j != NULL)) {
7568 return BOOTSTRAP_NO_MEMORY;
7569 }
7570
7571 if (unlikely(j->anonymous)) {
7572 job_log(j, LOG_DEBUG, "Anonymous job tried to setup shared memory");
7573 return BOOTSTRAP_NOT_PRIVILEGED;
7574 }
7575
7576 if (unlikely(j->shmem)) {
7577 job_log(j, LOG_ERR, "Tried to setup shared memory more than once");
7578 return BOOTSTRAP_NOT_PRIVILEGED;
7579 }
7580
7581 size_of_page_orig = size_of_page = getpagesize();
7582
7583 kr = vm_allocate(mach_task_self(), &vm_addr, size_of_page, true);
7584
7585 if (!job_assumes(j, kr == 0)) {
7586 return kr;
7587 }
7588
7589 j->shmem = (typeof(j->shmem))vm_addr;
7590 j->shmem->vp_shmem_standby_timeout = j->timeout;
7591
7592 kr = mach_make_memory_entry_64(mach_task_self(), &size_of_page,
7593 (memory_object_offset_t)vm_addr, VM_PROT_READ|VM_PROT_WRITE, shmem_port, 0);
7594
7595 if (job_assumes(j, kr == 0)) {
7596 (void)job_assumes(j, size_of_page == size_of_page_orig);
7597 }
7598
7599 /* no need to inherit this in child processes */
7600 (void)job_assumes(j, vm_inherit(mach_task_self(), (vm_address_t)j->shmem, size_of_page_orig, VM_INHERIT_NONE) == 0);
7601
7602 return kr;
7603 }
7604
7605 kern_return_t
7606 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7607 {
7608 struct ldcred *ldc = runtime_get_caller_creds();
7609 job_t js;
7610
7611 if (!launchd_assumes(j != NULL)) {
7612 return BOOTSTRAP_NO_MEMORY;
7613 }
7614
7615 if (unlikely(j->deny_job_creation)) {
7616 return BOOTSTRAP_NOT_PRIVILEGED;
7617 }
7618
7619 #if HAVE_SANDBOX
7620 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7621 if (unlikely(argv == NULL)) {
7622 return BOOTSTRAP_NO_MEMORY;
7623 }
7624 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7625 free(argv);
7626 return BOOTSTRAP_NOT_PRIVILEGED;
7627 }
7628 free(argv);
7629 #endif
7630
7631 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7632
7633 if (pid1_magic) {
7634 if (ldc->euid || ldc->uid) {
7635 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
7636 return VPROC_ERR_TRY_PER_USER;
7637 }
7638 } else {
7639 if (unlikely(server_uid != getuid())) {
7640 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7641 server_cmd, getuid(), server_uid);
7642 }
7643 server_uid = 0; /* zero means "do nothing" */
7644 }
7645
7646 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
7647
7648 if (unlikely(js == NULL)) {
7649 return BOOTSTRAP_NO_MEMORY;
7650 }
7651
7652 *server_portp = js->j_port;
7653 return BOOTSTRAP_SUCCESS;
7654 }
7655
7656 kern_return_t
7657 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
7658 {
7659 struct ldcred *ldc = runtime_get_caller_creds();
7660 job_t otherj;
7661
7662 if (!launchd_assumes(j != NULL)) {
7663 return BOOTSTRAP_NO_MEMORY;
7664 }
7665
7666 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
7667 #if TARGET_OS_EMBEDDED
7668 if (!j->embedded_special_privileges) {
7669 return BOOTSTRAP_NOT_PRIVILEGED;
7670 }
7671 #else
7672 return BOOTSTRAP_NOT_PRIVILEGED;
7673 #endif
7674 }
7675
7676 #if HAVE_SANDBOX
7677 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7678 return BOOTSTRAP_NOT_PRIVILEGED;
7679 }
7680 #endif
7681
7682 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
7683 return BOOTSTRAP_UNKNOWN_SERVICE;
7684 }
7685
7686 #if TARGET_OS_EMBEDDED
7687 if (j->embedded_special_privileges && strcmp(j->username, otherj->username) != 0) {
7688 return BOOTSTRAP_NOT_PRIVILEGED;
7689 }
7690 #endif
7691
7692 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
7693 bool do_block = otherj->p;
7694
7695 if (otherj->anonymous) {
7696 return BOOTSTRAP_NOT_PRIVILEGED;
7697 }
7698
7699 job_remove(otherj);
7700
7701 if (do_block) {
7702 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
7703 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
7704 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
7705 return MIG_NO_REPLY;
7706 } else {
7707 return 0;
7708 }
7709 } else if (sig == VPROC_MAGIC_TRYKILL_SIGNAL) {
7710 if (!j->kill_via_shmem) {
7711 return BOOTSTRAP_NOT_PRIVILEGED;
7712 }
7713
7714 if (!j->shmem) {
7715 j->sent_kill_via_shmem = true;
7716 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
7717 return 0;
7718 }
7719
7720 #if !TARGET_OS_EMBEDDED
7721 if (__sync_bool_compare_and_swap(&j->shmem->vp_shmem_transaction_cnt, 0, -1)) {
7722 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
7723 j->sent_kill_via_shmem = true;
7724 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
7725 return 0;
7726 }
7727 #endif
7728 return BOOTSTRAP_NOT_PRIVILEGED;
7729 } else if (otherj->p) {
7730 (void)job_assumes(j, runtime_kill(otherj->p, sig) != -1);
7731 }
7732
7733 return 0;
7734 }
7735
7736 kern_return_t
7737 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
7738 {
7739 struct ldcred *ldc = runtime_get_caller_creds();
7740
7741 if (!launchd_assumes(j != NULL)) {
7742 return BOOTSTRAP_NO_MEMORY;
7743 }
7744
7745 if (!job_assumes(j, j->per_user)) {
7746 return BOOTSTRAP_NOT_PRIVILEGED;
7747 }
7748
7749 return runtime_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
7750 }
7751
7752 kern_return_t
7753 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7754 {
7755 struct ldcred *ldc = runtime_get_caller_creds();
7756
7757 if (!launchd_assumes(j != NULL)) {
7758 return BOOTSTRAP_NO_MEMORY;
7759 }
7760
7761 if (unlikely(ldc->euid)) {
7762 return BOOTSTRAP_NOT_PRIVILEGED;
7763 }
7764
7765 return runtime_log_drain(srp, outval, outvalCnt);
7766 }
7767
7768 kern_return_t
7769 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7770 {
7771 const char *action;
7772 launch_data_t input_obj = NULL, output_obj = NULL;
7773 size_t data_offset = 0;
7774 size_t packed_size;
7775 struct ldcred *ldc = runtime_get_caller_creds();
7776
7777 if (!launchd_assumes(j != NULL)) {
7778 return BOOTSTRAP_NO_MEMORY;
7779 }
7780 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7781 return BOOTSTRAP_NOT_PRIVILEGED;
7782 }
7783 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7784 return 1;
7785 }
7786
7787 if (inkey && outkey) {
7788 action = "Swapping";
7789 } else if (inkey) {
7790 action = "Setting";
7791 } else {
7792 action = "Getting";
7793 }
7794
7795 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7796
7797 *outvalCnt = 20 * 1024 * 1024;
7798 mig_allocate(outval, *outvalCnt);
7799 if (!job_assumes(j, *outval != 0)) {
7800 return 1;
7801 }
7802
7803 /* Note to future maintainers: launch_data_unpack() does NOT return a heap object. The data
7804 * is decoded in-place. So do not call launch_data_free() on input_obj.
7805 */
7806 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
7807 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
7808 goto out_bad;
7809 }
7810
7811 switch (outkey) {
7812 case VPROC_GSK_ENVIRONMENT:
7813 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
7814 goto out_bad;
7815 }
7816 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
7817 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7818 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
7819 goto out_bad;
7820 }
7821 launch_data_free(output_obj);
7822 break;
7823 case VPROC_GSK_ALLJOBS:
7824 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
7825 goto out_bad;
7826 }
7827 ipc_revoke_fds(output_obj);
7828 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7829 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7830 if (!job_assumes(j, packed_size != 0)) {
7831 goto out_bad;
7832 }
7833 launch_data_free(output_obj);
7834 break;
7835 case VPROC_GSK_MGR_NAME:
7836 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
7837 goto out_bad;
7838 }
7839 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7840 if (!job_assumes(j, packed_size != 0)) {
7841 goto out_bad;
7842 }
7843
7844 launch_data_free(output_obj);
7845 break;
7846 case VPROC_GSK_JOB_OVERRIDES_DB:
7847 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES))) != NULL)) {
7848 goto out_bad;
7849 }
7850 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7851 if (!job_assumes(j, packed_size != 0)) {
7852 goto out_bad;
7853 }
7854
7855 launch_data_free(output_obj);
7856 break;
7857 case VPROC_GSK_JOB_CACHE_DB:
7858 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE))) != NULL)) {
7859 goto out_bad;
7860 }
7861 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7862 if (!job_assumes(j, packed_size != 0)) {
7863 goto out_bad;
7864 }
7865
7866 job_log(j, LOG_DEBUG, "Location of job cache database: %s", launch_data_get_string(output_obj));
7867
7868 launch_data_free(output_obj);
7869 break;
7870 case 0:
7871 mig_deallocate(*outval, *outvalCnt);
7872 *outval = 0;
7873 *outvalCnt = 0;
7874 break;
7875 default:
7876 goto out_bad;
7877 }
7878
7879 if (invalCnt) switch (inkey) {
7880 case VPROC_GSK_ENVIRONMENT:
7881 if (launch_data_get_type(input_obj) == LAUNCH_DATA_DICTIONARY) {
7882 if (j->p) {
7883 job_log(j, LOG_INFO, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
7884 }
7885 launch_data_dict_iterate(input_obj, envitem_setup_one_shot, j);
7886 }
7887 break;
7888 case 0:
7889 break;
7890 default:
7891 goto out_bad;
7892 }
7893
7894 mig_deallocate(inval, invalCnt);
7895 return 0;
7896
7897 out_bad:
7898 mig_deallocate(inval, invalCnt);
7899 if (*outval) {
7900 mig_deallocate(*outval, *outvalCnt);
7901 }
7902 if (output_obj) {
7903 launch_data_free(output_obj);
7904 }
7905
7906 return 1;
7907 }
7908
7909 kern_return_t
7910 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7911 {
7912 const char *action;
7913 kern_return_t kr = 0;
7914 struct ldcred *ldc = runtime_get_caller_creds();
7915 int oldmask;
7916
7917 if (!launchd_assumes(j != NULL)) {
7918 return BOOTSTRAP_NO_MEMORY;
7919 }
7920
7921 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7922 return BOOTSTRAP_NOT_PRIVILEGED;
7923 }
7924
7925 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7926 return 1;
7927 }
7928
7929 if (inkey && outkey) {
7930 action = "Swapping";
7931 } else if (inkey) {
7932 action = "Setting";
7933 } else {
7934 action = "Getting";
7935 }
7936
7937 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7938
7939 switch (outkey) {
7940 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7941 *outval = j->abandon_pg;
7942 break;
7943 case VPROC_GSK_LAST_EXIT_STATUS:
7944 *outval = j->last_exit_status;
7945 break;
7946 case VPROC_GSK_MGR_UID:
7947 *outval = getuid();
7948 break;
7949 case VPROC_GSK_MGR_PID:
7950 *outval = getpid();
7951 break;
7952 case VPROC_GSK_IS_MANAGED:
7953 *outval = j->anonymous ? 0 : 1;
7954 break;
7955 case VPROC_GSK_BASIC_KEEPALIVE:
7956 *outval = !j->ondemand;
7957 break;
7958 case VPROC_GSK_START_INTERVAL:
7959 *outval = j->start_interval;
7960 break;
7961 case VPROC_GSK_IDLE_TIMEOUT:
7962 *outval = j->timeout;
7963 break;
7964 case VPROC_GSK_EXIT_TIMEOUT:
7965 *outval = j->exit_timeout;
7966 break;
7967 case VPROC_GSK_GLOBAL_LOG_MASK:
7968 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7969 *outval = oldmask;
7970 runtime_setlogmask(oldmask);
7971 break;
7972 case VPROC_GSK_GLOBAL_UMASK:
7973 oldmask = umask(0);
7974 *outval = oldmask;
7975 umask(oldmask);
7976 break;
7977 case VPROC_GSK_TRANSACTIONS_ENABLED:
7978 job_log(j, LOG_DEBUG, "Reading transaction model status.");
7979 *outval = j->kill_via_shmem;
7980 break;
7981 case VPROC_GSK_WAITFORDEBUGGER:
7982 *outval = j->wait4debugger;
7983 break;
7984 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
7985 *outval = j->embedded_special_privileges;
7986 break;
7987 case 0:
7988 *outval = 0;
7989 break;
7990 default:
7991 kr = 1;
7992 break;
7993 }
7994
7995 switch (inkey) {
7996 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7997 j->abandon_pg = (bool)inval;
7998 break;
7999 case VPROC_GSK_GLOBAL_ON_DEMAND:
8000 job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
8001 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
8002 break;
8003 case VPROC_GSK_BASIC_KEEPALIVE:
8004 j->ondemand = !inval;
8005 break;
8006 case VPROC_GSK_START_INTERVAL:
8007 if (inval > UINT32_MAX || inval < 0) {
8008 kr = 1;
8009 } else if (inval) {
8010 if (j->start_interval == 0) {
8011 runtime_add_weak_ref();
8012 }
8013 j->start_interval = (typeof(j->start_interval)) inval;
8014 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
8015 } else if (j->start_interval) {
8016 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
8017 if (j->start_interval != 0) {
8018 runtime_del_weak_ref();
8019 }
8020 j->start_interval = 0;
8021 }
8022 break;
8023 case VPROC_GSK_IDLE_TIMEOUT:
8024 if (inval < 0 || inval > UINT32_MAX) {
8025 kr = 1;
8026 } else {
8027 j->timeout = (typeof(j->timeout)) inval;
8028 }
8029 break;
8030 case VPROC_GSK_EXIT_TIMEOUT:
8031 if (inval < 0 || inval > UINT32_MAX) {
8032 kr = 1;
8033 } else {
8034 j->exit_timeout = (typeof(j->exit_timeout)) inval;
8035 }
8036 break;
8037 case VPROC_GSK_GLOBAL_LOG_MASK:
8038 if (inval < 0 || inval > UINT32_MAX) {
8039 kr = 1;
8040 } else {
8041 runtime_setlogmask((int) inval);
8042 }
8043 break;
8044 case VPROC_GSK_GLOBAL_UMASK:
8045 launchd_assert(sizeof (mode_t) == 2);
8046 if (inval < 0 || inval > UINT16_MAX) {
8047 kr = 1;
8048 } else {
8049 #if HAVE_SANDBOX
8050 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8051 kr = 1;
8052 } else {
8053 umask((mode_t) inval);
8054 }
8055 #endif
8056 }
8057 break;
8058 case VPROC_GSK_TRANSACTIONS_ENABLED:
8059 if (!job_assumes(j, inval != 0)) {
8060 job_log(j, LOG_WARNING, "Attempt to unregister from transaction model. This is not supported.");
8061 kr = 1;
8062 } else {
8063 j->kill_via_shmem = (bool)inval;
8064 }
8065 break;
8066 case VPROC_GSK_WEIRD_BOOTSTRAP:
8067 if (job_assumes(j, j->weird_bootstrap)) {
8068 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
8069
8070 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
8071
8072 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
8073 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
8074 }
8075
8076 (void)job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
8077 j->weird_bootstrap = false;
8078 }
8079 break;
8080 case VPROC_GSK_WAITFORDEBUGGER:
8081 j->wait4debugger_oneshot = inval;
8082 break;
8083 case VPROC_GSK_PERUSER_SUSPEND:
8084 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
8085 mach_port_t junk = MACH_PORT_NULL;
8086 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8087 if (job_assumes(j, jpu != NULL)) {
8088 struct suspended_peruser *spi = NULL;
8089 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8090 if ((int64_t)(spi->j->mach_uid) == inval) {
8091 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8092 break;
8093 }
8094 }
8095
8096 if (spi == NULL) {
8097 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8098 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
8099 if (job_assumes(j, spi != NULL)) {
8100 /* Stop listening for events.
8101 *
8102 * See <rdar://problem/9014146>.
8103 */
8104 if (jpu->peruser_suspend_count == 0) {
8105 job_ignore(jpu);
8106 }
8107
8108 spi->j = jpu;
8109 spi->j->peruser_suspend_count++;
8110 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8111 job_stop(spi->j);
8112 *outval = jpu->p;
8113 } else {
8114 kr = BOOTSTRAP_NO_MEMORY;
8115 }
8116 }
8117 }
8118 } else {
8119 kr = 1;
8120 }
8121 break;
8122 case VPROC_GSK_PERUSER_RESUME:
8123 if (job_assumes(j, pid1_magic == true)) {
8124 struct suspended_peruser *spi = NULL, *spt = NULL;
8125 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8126 if ((int64_t)(spi->j->mach_uid) == inval) {
8127 spi->j->peruser_suspend_count--;
8128 LIST_REMOVE(spi, sle);
8129 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8130 break;
8131 }
8132 }
8133
8134 if (!job_assumes(j, spi != NULL)) {
8135 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8136 kr = BOOTSTRAP_NOT_PRIVILEGED;
8137 } else if (spi->j->peruser_suspend_count == 0) {
8138 job_watch(spi->j);
8139 job_dispatch(spi->j, false);
8140 free(spi);
8141 }
8142 } else {
8143 kr = 1;
8144 }
8145 break;
8146 case 0:
8147 break;
8148 default:
8149 kr = 1;
8150 break;
8151 }
8152
8153 return kr;
8154 }
8155
8156 kern_return_t
8157 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
8158 {
8159 struct machservice *ms;
8160
8161 if (!launchd_assumes(j != NULL)) {
8162 return BOOTSTRAP_NO_MEMORY;
8163 }
8164
8165 job_log(j, LOG_DEBUG, "Post fork ping.");
8166
8167 job_setup_exception_port(j, child_task);
8168
8169 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8170 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8171 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
8172 continue;
8173 }
8174
8175 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8176
8177 if (unlikely(errno)) {
8178 int desired_log_level = LOG_ERR;
8179
8180 if (j->anonymous) {
8181 /* 5338127 */
8182
8183 desired_log_level = LOG_WARNING;
8184
8185 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8186 desired_log_level = LOG_DEBUG;
8187 }
8188 }
8189
8190 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8191 }
8192 }
8193
8194 /* MIG will not zero-initialize this pointer, so we must always do so. See
8195 * <rdar://problem/8562593>.
8196 */
8197 *asport = MACH_PORT_NULL;
8198 #if !TARGET_OS_EMBEDDED
8199 if (!j->anonymous) {
8200 /* XPC services will spawn into the root security session by default.
8201 * xpcproxy will switch them away if needed.
8202 */
8203 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8204 job_log(j, LOG_DEBUG, "Returning j->asport: %u", j->asport);
8205 *asport = j->asport;
8206 }
8207 }
8208 #endif
8209 (void)job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
8210
8211 return 0;
8212 }
8213
8214 kern_return_t
8215 job_mig_reboot2(job_t j, uint64_t flags)
8216 {
8217 char who_started_the_reboot[2048] = "";
8218 struct proc_bsdshortinfo proc;
8219 struct ldcred *ldc = runtime_get_caller_creds();
8220 pid_t pid_to_log;
8221
8222 if (!launchd_assumes(j != NULL)) {
8223 return BOOTSTRAP_NO_MEMORY;
8224 }
8225
8226 if (unlikely(!pid1_magic)) {
8227 return BOOTSTRAP_NOT_PRIVILEGED;
8228 }
8229
8230 #if !TARGET_OS_EMBEDDED
8231 if (unlikely(ldc->euid)) {
8232 #else
8233 if (unlikely(ldc->euid) && !j->embedded_special_privileges) {
8234 #endif
8235 return BOOTSTRAP_NOT_PRIVILEGED;
8236 }
8237
8238 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8239 size_t who_offset;
8240 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8241 if (errno != ESRCH) {
8242 job_assumes(j, errno == 0);
8243 }
8244 return 1;
8245 }
8246
8247 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8248 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8249 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8250 break;
8251 }
8252
8253 who_offset = strlen(who_started_the_reboot);
8254 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8255 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8256 }
8257
8258 root_jobmgr->reboot_flags = (int)flags;
8259 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8260 launchd_shutdown();
8261
8262 return 0;
8263 }
8264
8265 kern_return_t
8266 job_mig_getsocket(job_t j, name_t spr)
8267 {
8268 if (!launchd_assumes(j != NULL)) {
8269 return BOOTSTRAP_NO_MEMORY;
8270 }
8271
8272 if (j->deny_job_creation) {
8273 return BOOTSTRAP_NOT_PRIVILEGED;
8274 }
8275
8276 #if HAVE_SANDBOX
8277 struct ldcred *ldc = runtime_get_caller_creds();
8278 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8279 return BOOTSTRAP_NOT_PRIVILEGED;
8280 }
8281 #endif
8282
8283 ipc_server_init();
8284
8285 if (unlikely(!sockpath)) {
8286 return BOOTSTRAP_NO_MEMORY;
8287 }
8288
8289 strncpy(spr, sockpath, sizeof(name_t));
8290
8291 return BOOTSTRAP_SUCCESS;
8292 }
8293
8294 kern_return_t
8295 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8296 {
8297 if (!launchd_assumes(j != NULL)) {
8298 return BOOTSTRAP_NO_MEMORY;
8299 }
8300
8301 if ((errno = err)) {
8302 job_log_error(j, pri, "%s", msg);
8303 } else {
8304 job_log(j, pri, "%s", msg);
8305 }
8306
8307 return 0;
8308 }
8309
8310 job_t
8311 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8312 {
8313 job_t ji = NULL;
8314 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8315 if (!ji->per_user) {
8316 continue;
8317 }
8318 if (ji->mach_uid != which_user) {
8319 continue;
8320 }
8321 if (SLIST_EMPTY(&ji->machservices)) {
8322 continue;
8323 }
8324 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8325 continue;
8326 }
8327 break;
8328 }
8329
8330 if (unlikely(ji == NULL)) {
8331 struct machservice *ms;
8332 char lbuf[1024];
8333
8334 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8335
8336 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8337
8338 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8339
8340 if (ji != NULL) {
8341 auditinfo_addr_t auinfo = {
8342 .ai_termid = { .at_type = AU_IPv4 },
8343 .ai_auid = which_user,
8344 .ai_asid = AU_ASSIGN_ASID,
8345 };
8346
8347 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8348 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8349 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8350
8351 /* Kinda lame that we have to do this, but we can't create an
8352 * audit session without joining it.
8353 */
8354 (void)job_assumes(ji, audit_session_join(g_audit_session_port));
8355 ji->asid = auinfo.ai_asid;
8356 } else {
8357 job_log(ji, LOG_WARNING, "Could not set audit session!");
8358 job_remove(ji);
8359 return NULL;
8360 }
8361
8362 ji->mach_uid = which_user;
8363 ji->per_user = true;
8364 ji->kill_via_shmem = true;
8365
8366 struct stat sb;
8367 char pu_db[PATH_MAX];
8368 snprintf(pu_db, sizeof(pu_db), LAUNCHD_DB_PREFIX "/%s", lbuf);
8369
8370 bool created = false;
8371 int err = stat(pu_db, &sb);
8372 if ((err == -1 && errno == ENOENT) || (err == 0 && !S_ISDIR(sb.st_mode))) {
8373 if (err == 0) {
8374 char move_aside[PATH_MAX];
8375 snprintf(move_aside, sizeof(move_aside), LAUNCHD_DB_PREFIX "/%s.movedaside", lbuf);
8376
8377 (void)job_assumes(ji, rename(pu_db, move_aside) != -1);
8378 }
8379
8380 (void)job_assumes(ji, mkdir(pu_db, S_IRWXU) != -1);
8381 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8382 created = true;
8383 }
8384
8385 if (!created) {
8386 if (!job_assumes(ji, sb.st_uid == which_user)) {
8387 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8388 }
8389 if (!job_assumes(ji, sb.st_gid == 0)) {
8390 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8391 }
8392 if (!job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR))) {
8393 (void)job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
8394 }
8395 }
8396
8397 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8398 job_remove(ji);
8399 ji = NULL;
8400 } else {
8401 ms->per_user_hack = true;
8402 ms->hide = true;
8403
8404 ji = job_dispatch(ji, false);
8405 }
8406 }
8407 } else {
8408 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
8409 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8410 }
8411
8412 return ji;
8413 }
8414
8415 kern_return_t
8416 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8417 {
8418 struct ldcred *ldc = runtime_get_caller_creds();
8419 job_t jpu;
8420
8421 #if TARGET_OS_EMBEDDED
8422 /* There is no need for per-user launchd's on embedded. */
8423 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8424 return BOOTSTRAP_NOT_PRIVILEGED;
8425 #endif
8426
8427 #if HAVE_SANDBOX
8428 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8429 return BOOTSTRAP_NOT_PRIVILEGED;
8430 }
8431 #endif
8432
8433 if (!launchd_assumes(j != NULL)) {
8434 return BOOTSTRAP_NO_MEMORY;
8435 }
8436
8437 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8438
8439 if (unlikely(!pid1_magic)) {
8440 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8441 return BOOTSTRAP_NOT_PRIVILEGED;
8442 }
8443
8444 if (ldc->euid || ldc->uid) {
8445 which_user = ldc->euid ?: ldc->uid;
8446 }
8447
8448 *up_cont = MACH_PORT_NULL;
8449
8450 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8451
8452 return 0;
8453 }
8454
8455 kern_return_t
8456 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8457 {
8458 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8459 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8460 struct ldcred *ldc = runtime_get_caller_creds();
8461 struct machservice *ms = NULL;
8462 job_t jo;
8463
8464 if (!launchd_assumes(j != NULL)) {
8465 return BOOTSTRAP_NO_MEMORY;
8466 }
8467
8468 if (j->dedicated_instance) {
8469 struct machservice *msi = NULL;
8470 SLIST_FOREACH(msi, &j->machservices, sle) {
8471 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8472 uuid_copy(instance_id, j->instance_id);
8473 ms = msi;
8474 break;
8475 }
8476 }
8477 } else {
8478 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8479 }
8480
8481 if (strict) {
8482 if (likely(ms != NULL)) {
8483 if (ms->job != j) {
8484 return BOOTSTRAP_NOT_PRIVILEGED;
8485 } else if (ms->isActive) {
8486 return BOOTSTRAP_SERVICE_ACTIVE;
8487 }
8488 } else {
8489 return BOOTSTRAP_UNKNOWN_SERVICE;
8490 }
8491 } else if (ms == NULL) {
8492 if (job_assumes(j, !j->dedicated_instance)) {
8493 *serviceportp = MACH_PORT_NULL;
8494
8495 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8496 return BOOTSTRAP_NO_MEMORY;
8497 }
8498
8499 /* Treat this like a legacy job. */
8500 if (!j->legacy_mach_job) {
8501 ms->isActive = true;
8502 ms->recv = false;
8503 }
8504
8505 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8506 job_log(j, LOG_SCOLDING, "Please add the following service to the configuration file for this job: %s", servicename);
8507 }
8508 } else {
8509 return BOOTSTRAP_UNKNOWN_SERVICE;
8510 }
8511 } else {
8512 if (unlikely((jo = machservice_job(ms)) != j)) {
8513 static pid_t last_warned_pid;
8514
8515 if (last_warned_pid != ldc->pid) {
8516 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8517 last_warned_pid = ldc->pid;
8518 }
8519
8520 return BOOTSTRAP_NOT_PRIVILEGED;
8521 }
8522 if (unlikely(machservice_active(ms))) {
8523 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8524 return BOOTSTRAP_SERVICE_ACTIVE;
8525 }
8526 }
8527
8528 job_checkin(j);
8529 machservice_request_notifications(ms);
8530
8531 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8532
8533 *serviceportp = machservice_port(ms);
8534 return BOOTSTRAP_SUCCESS;
8535 }
8536
8537 kern_return_t
8538 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8539 {
8540 struct machservice *ms;
8541 struct ldcred *ldc = runtime_get_caller_creds();
8542
8543 if (!launchd_assumes(j != NULL)) {
8544 return BOOTSTRAP_NO_MEMORY;
8545 }
8546
8547 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
8548 job_log(j, LOG_SCOLDING, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8549 }
8550
8551 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8552
8553 /* 5641783 for the embedded hack */
8554 #if !TARGET_OS_EMBEDDED
8555 /*
8556 * From a per-user/session launchd's perspective, SecurityAgent (UID
8557 * 92) is a rogue application (not our UID, not root and not a child of
8558 * us). We'll have to reconcile this design friction at a later date.
8559 */
8560 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8561 if (pid1_magic) {
8562 return VPROC_ERR_TRY_PER_USER;
8563 } else {
8564 return BOOTSTRAP_NOT_PRIVILEGED;
8565 }
8566 }
8567 #endif
8568
8569 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
8570
8571 if (unlikely(ms)) {
8572 if (machservice_job(ms) != j) {
8573 return BOOTSTRAP_NOT_PRIVILEGED;
8574 }
8575 if (machservice_active(ms)) {
8576 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
8577 return BOOTSTRAP_SERVICE_ACTIVE;
8578 }
8579 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
8580 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
8581 return BOOTSTRAP_NOT_PRIVILEGED;
8582 }
8583 job_checkin(j);
8584 machservice_delete(j, ms, false);
8585 }
8586
8587 if (likely(serviceport != MACH_PORT_NULL)) {
8588 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
8589 machservice_request_notifications(ms);
8590 } else {
8591 return BOOTSTRAP_NO_MEMORY;
8592 }
8593 }
8594
8595
8596 return BOOTSTRAP_SUCCESS;
8597 }
8598
8599 kern_return_t
8600 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
8601 {
8602 struct machservice *ms = NULL;
8603 struct ldcred *ldc = runtime_get_caller_creds();
8604 kern_return_t kr;
8605 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
8606 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
8607 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
8608 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
8609
8610 if (!launchd_assumes(j != NULL)) {
8611 return BOOTSTRAP_NO_MEMORY;
8612 }
8613
8614 bool xpc_req = j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN;
8615
8616 /* 5641783 for the embedded hack */
8617 #if !TARGET_OS_EMBEDDED
8618 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
8619 return VPROC_ERR_TRY_PER_USER;
8620 }
8621 #endif
8622
8623 #if HAVE_SANDBOX
8624 /* We don't do sandbox checking for XPC domains because, by definition, all
8625 * the services within your domain should be accessibly to you.
8626 */
8627 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8628 return BOOTSTRAP_NOT_PRIVILEGED;
8629 }
8630 #endif
8631
8632 if (per_pid_lookup) {
8633 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
8634 } else {
8635 if (xpc_req) {
8636 /* Requests from XPC domains stay local. */
8637 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
8638 } else {
8639 /* A strict lookup which is privileged won't even bother trying to
8640 * find a service if we're not hosting the root Mach bootstrap.
8641 */
8642 if (strict_lookup && privileged) {
8643 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8644 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8645 }
8646 } else {
8647 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8648 }
8649 }
8650 }
8651
8652 if (likely(ms)) {
8653 ms = ms->alias ? ms->alias : ms;
8654 if (unlikely(specific_instance && ms->job->multiple_instances)) {
8655 job_t ji = NULL;
8656 job_t instance = NULL;
8657 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
8658 if (uuid_compare(instance_id, ji->instance_id) == 0) {
8659 instance = ji;
8660 break;
8661 }
8662 }
8663
8664 if (unlikely(instance == NULL)) {
8665 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
8666 instance = job_new_subjob(ms->job, instance_id);
8667 if (job_assumes(j, instance != NULL)) {
8668 /* Disable this support for now. We only support having
8669 * multi-instance jobs within private XPC domains.
8670 */
8671 #if 0
8672 /* If the job is multi-instance, in a singleton XPC domain
8673 * and the request is not coming from within that singleton
8674 * domain, we need to alias the new job into the requesting
8675 * domain.
8676 */
8677 if (!j->mgr->xpc_singleton && xpc_req) {
8678 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
8679 }
8680 #endif
8681 job_dispatch(instance, false);
8682 }
8683 }
8684
8685 ms = NULL;
8686 if (job_assumes(j, instance != NULL)) {
8687 struct machservice *msi = NULL;
8688 SLIST_FOREACH(msi, &instance->machservices, sle) {
8689 /* sizeof(servicename) will return the size of a pointer, even though it's
8690 * an array type, because when passing arrays as parameters in C, they
8691 * implicitly degrade to pointers.
8692 */
8693 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8694 ms = msi;
8695 break;
8696 }
8697 }
8698 }
8699 } else {
8700 if (machservice_hidden(ms) && !machservice_active(ms)) {
8701 ms = NULL;
8702 } else if (unlikely(ms->per_user_hack)) {
8703 ms = NULL;
8704 }
8705 }
8706 }
8707
8708 if (likely(ms)) {
8709 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
8710 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
8711
8712 if (unlikely(!per_pid_lookup && j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user)) {
8713 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
8714 job_log(j, LOG_DEBUG, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms->job->label, servicename);
8715 }
8716
8717 j->lastlookup = ms;
8718 j->lastlookup_gennum = ms->gen_num;
8719
8720 *serviceportp = machservice_port(ms);
8721
8722 kr = BOOTSTRAP_SUCCESS;
8723 } else if (strict_lookup && !privileged) {
8724 /* Hack: We need to simulate XPC's desire not to establish a hierarchy. So if
8725 * XPC is doing the lookup, and it's not a privileged lookup, we won't forward.
8726 * But if it is a privileged lookup (that is, was looked up in XPC_DOMAIN_LOCAL_SYSTEM)
8727 * then we must forward.
8728 */
8729 return BOOTSTRAP_UNKNOWN_SERVICE;
8730 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
8731 /* Requests from within an XPC domain don't get forwarded. */
8732 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
8733 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
8734 (void)job_assumes(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags) == 0);
8735 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8736 return MIG_NO_REPLY;
8737 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8738 /*
8739 * 5240036 Should start background session when a lookup of CCacheServer occurs
8740 *
8741 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
8742 * If we find a EUID that isn't root, we force it over to the per-user context.
8743 */
8744 return VPROC_ERR_TRY_PER_USER;
8745 } else {
8746 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
8747 kr = BOOTSTRAP_UNKNOWN_SERVICE;
8748 }
8749
8750 return kr;
8751 }
8752
8753 kern_return_t
8754 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
8755 {
8756 if (!launchd_assumes(j != NULL)) {
8757 return BOOTSTRAP_NO_MEMORY;
8758 }
8759
8760 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
8761 jobmgr_t jm = j->mgr;
8762
8763 if (jobmgr_parent(jm)) {
8764 *parentport = jobmgr_parent(jm)->jm_port;
8765 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
8766 *parentport = jm->jm_port;
8767 } else {
8768 (void)job_assumes(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp) == 0);
8769 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8770 return MIG_NO_REPLY;
8771 }
8772 return BOOTSTRAP_SUCCESS;
8773 }
8774
8775 kern_return_t
8776 job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
8777 {
8778 if (!j) {
8779 return BOOTSTRAP_NO_MEMORY;
8780 }
8781
8782 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8783 *rootbsp = root_jobmgr->jm_port;
8784 (void)job_assumes(j, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
8785 } else {
8786 *rootbsp = inherited_bootstrap_port;
8787 (void)job_assumes(j, launchd_mport_copy_send(inherited_bootstrap_port) == KERN_SUCCESS);
8788 }
8789
8790 return BOOTSTRAP_SUCCESS;
8791 }
8792
8793 kern_return_t
8794 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt, name_array_t *servicejobsp, unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt, uint64_t flags)
8795 {
8796 name_array_t service_names = NULL;
8797 name_array_t service_jobs = NULL;
8798 bootstrap_status_array_t service_actives = NULL;
8799 unsigned int cnt = 0, cnt2 = 0;
8800 jobmgr_t jm;
8801
8802 if (!launchd_assumes(j != NULL)) {
8803 return BOOTSTRAP_NO_MEMORY;
8804 }
8805
8806 if (g_flat_mach_namespace) {
8807 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
8808 jm = j->mgr;
8809 } else {
8810 jm = root_jobmgr;
8811 }
8812 } else {
8813 jm = j->mgr;
8814 }
8815
8816 unsigned int i = 0;
8817 struct machservice *msi = NULL;
8818 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8819 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8820 cnt += !msi->per_pid ? 1 : 0;
8821 }
8822 }
8823
8824 if (cnt == 0) {
8825 goto out;
8826 }
8827
8828 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
8829 if (!job_assumes(j, service_names != NULL)) {
8830 goto out_bad;
8831 }
8832
8833 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
8834 if (!job_assumes(j, service_jobs != NULL)) {
8835 goto out_bad;
8836 }
8837
8838 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
8839 if (!job_assumes(j, service_actives != NULL)) {
8840 goto out_bad;
8841 }
8842
8843 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8844 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8845 if (!msi->per_pid) {
8846 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
8847 msi = msi->alias ? msi->alias : msi;
8848 if (msi->job->mgr->shortdesc) {
8849 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
8850 } else {
8851 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
8852 }
8853 service_actives[cnt2] = machservice_status(msi);
8854 cnt2++;
8855 }
8856 }
8857 }
8858
8859 (void)job_assumes(j, cnt == cnt2);
8860
8861 out:
8862 *servicenamesp = service_names;
8863 *servicejobsp = service_jobs;
8864 *serviceactivesp = service_actives;
8865 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
8866
8867 return BOOTSTRAP_SUCCESS;
8868
8869 out_bad:
8870 if (service_names) {
8871 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
8872 }
8873 if (service_jobs) {
8874 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
8875 }
8876 if (service_actives) {
8877 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
8878 }
8879
8880 return BOOTSTRAP_NO_MEMORY;
8881 }
8882
8883 kern_return_t
8884 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports, mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names, mach_msg_type_number_t *child_names_cnt, bootstrap_property_array_t *child_properties,mach_msg_type_number_t *child_properties_cnt)
8885 {
8886 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
8887 if (!launchd_assumes(j != NULL)) {
8888 return BOOTSTRAP_NO_MEMORY;
8889 }
8890
8891 struct ldcred *ldc = runtime_get_caller_creds();
8892
8893 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8894 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8895 * in a non-flat namespace.
8896 */
8897 if (ldc->euid != 0) {
8898 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
8899 return BOOTSTRAP_NOT_PRIVILEGED;
8900 }
8901
8902 unsigned int cnt = 0;
8903
8904 jobmgr_t jmr = j->mgr;
8905 jobmgr_t jmi = NULL;
8906 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8907 cnt++;
8908 }
8909
8910 /* Find our per-user launchds if we're PID 1. */
8911 job_t ji = NULL;
8912 if (pid1_magic) {
8913 LIST_FOREACH(ji, &jmr->jobs, sle) {
8914 cnt += ji->per_user ? 1 : 0;
8915 }
8916 }
8917
8918 if (cnt == 0) {
8919 return BOOTSTRAP_NO_CHILDREN;
8920 }
8921
8922 mach_port_array_t _child_ports = NULL;
8923 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
8924 if (!job_assumes(j, _child_ports != NULL)) {
8925 kr = BOOTSTRAP_NO_MEMORY;
8926 goto out_bad;
8927 }
8928
8929 name_array_t _child_names = NULL;
8930 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
8931 if (!job_assumes(j, _child_names != NULL)) {
8932 kr = BOOTSTRAP_NO_MEMORY;
8933 goto out_bad;
8934 }
8935
8936 bootstrap_property_array_t _child_properties = NULL;
8937 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
8938 if (!job_assumes(j, _child_properties != NULL)) {
8939 kr = BOOTSTRAP_NO_MEMORY;
8940 goto out_bad;
8941 }
8942
8943 unsigned int cnt2 = 0;
8944 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8945 if (jobmgr_assumes(jmi, launchd_mport_make_send(jmi->jm_port) == KERN_SUCCESS)) {
8946 _child_ports[cnt2] = jmi->jm_port;
8947 } else {
8948 _child_ports[cnt2] = MACH_PORT_NULL;
8949 }
8950
8951 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
8952 _child_properties[cnt2] = jmi->properties;
8953
8954 cnt2++;
8955 }
8956
8957 if (pid1_magic) LIST_FOREACH( ji, &jmr->jobs, sle) {
8958 if (ji->per_user) {
8959 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
8960 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
8961
8962 if (job_assumes(ji, launchd_mport_copy_send(port) == KERN_SUCCESS)) {
8963 _child_ports[cnt2] = port;
8964 } else {
8965 _child_ports[cnt2] = MACH_PORT_NULL;
8966 }
8967 } else {
8968 _child_ports[cnt2] = MACH_PORT_NULL;
8969 }
8970
8971 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
8972 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
8973
8974 cnt2++;
8975 }
8976 }
8977
8978 *child_names_cnt = cnt;
8979 *child_ports_cnt = cnt;
8980 *child_properties_cnt = cnt;
8981
8982 *child_names = _child_names;
8983 *child_ports = _child_ports;
8984 *child_properties = _child_properties;
8985
8986 unsigned int i = 0;
8987 for (i = 0; i < cnt; i++) {
8988 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
8989 }
8990
8991 return BOOTSTRAP_SUCCESS;
8992 out_bad:
8993 if (_child_ports) {
8994 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
8995 }
8996
8997 if (_child_names) {
8998 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
8999 }
9000
9001 if (_child_properties) {
9002 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
9003 }
9004
9005 return kr;
9006 }
9007
9008 kern_return_t
9009 job_mig_transaction_count_for_pid(job_t j, pid_t p, int32_t *cnt, boolean_t *condemned)
9010 {
9011 if (!j) {
9012 return BOOTSTRAP_NO_MEMORY;
9013 }
9014
9015 kern_return_t kr = KERN_FAILURE;
9016 struct ldcred *ldc = runtime_get_caller_creds();
9017 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
9018 return BOOTSTRAP_NOT_PRIVILEGED;
9019 }
9020
9021 job_t j_for_pid = jobmgr_find_by_pid_deep(j->mgr, p, false);
9022 if (j_for_pid) {
9023 if (j_for_pid->kill_via_shmem) {
9024 if (j_for_pid->shmem) {
9025 *cnt = j_for_pid->shmem->vp_shmem_transaction_cnt;
9026 *condemned = j_for_pid->shmem->vp_shmem_flags & VPROC_SHMEM_EXITING;
9027 *cnt += *condemned ? 1 : 0;
9028 } else {
9029 *cnt = 0;
9030 *condemned = false;
9031 }
9032
9033 kr = BOOTSTRAP_SUCCESS;
9034 } else {
9035 kr = BOOTSTRAP_NO_MEMORY;
9036 }
9037 } else {
9038 kr = BOOTSTRAP_UNKNOWN_SERVICE;
9039 }
9040
9041 return kr;
9042 }
9043
9044 kern_return_t
9045 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
9046 {
9047 struct ldcred *ldc = runtime_get_caller_creds();
9048 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
9049 return BOOTSTRAP_NOT_PRIVILEGED;
9050 }
9051
9052 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
9053 * directly by launchd as agents.
9054 */
9055 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
9056 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
9057 *managed = true;
9058 }
9059
9060 return BOOTSTRAP_SUCCESS;
9061 }
9062
9063 kern_return_t
9064 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
9065 {
9066 if (!j) {
9067 return BOOTSTRAP_NO_MEMORY;
9068 }
9069
9070 struct ldcred *ldc = runtime_get_caller_creds();
9071 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
9072
9073 #if HAVE_SANDBOX
9074 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
9075 return BOOTSTRAP_NOT_PRIVILEGED;
9076 }
9077 #endif
9078
9079 mach_port_t _mp = MACH_PORT_NULL;
9080 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
9081 job_t target_j = job_find(NULL, label);
9082 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
9083 if (target_j->j_port == MACH_PORT_NULL) {
9084 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
9085 }
9086
9087 _mp = target_j->j_port;
9088 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
9089 } else {
9090 kr = BOOTSTRAP_NO_MEMORY;
9091 }
9092 }
9093
9094 *mp = _mp;
9095 return kr;
9096 }
9097
9098 #if !TARGET_OS_EMBEDDED
9099 kern_return_t
9100 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
9101 {
9102 if (!j) {
9103 return BOOTSTRAP_NO_MEMORY;
9104 }
9105
9106 uuid_string_t uuid_str;
9107 uuid_unparse(uuid, uuid_str);
9108 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
9109
9110 job_t ji = NULL, jt = NULL;
9111 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
9112 uuid_string_t uuid_str2;
9113 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9114
9115 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
9116 uuid_clear(ji->expected_audit_uuid);
9117 if (asport != MACH_PORT_NULL ) {
9118 job_log(ji, LOG_DEBUG, "Job should join session with port %u", asport);
9119 (void)job_assumes(j, launchd_mport_copy_send(asport) == KERN_SUCCESS);
9120 } else {
9121 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9122 }
9123
9124 ji->asport = asport;
9125 LIST_REMOVE(ji, needing_session_sle);
9126 job_dispatch(ji, false);
9127 }
9128 }
9129
9130 /* Each job that the session port was set for holds a reference. At the end of
9131 * the loop, there will be one extra reference belonging to this MiG protocol.
9132 * We need to release it so that the session goes away when all the jobs
9133 * referencing it are unloaded.
9134 */
9135 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9136
9137 return KERN_SUCCESS;
9138 }
9139 #else
9140 kern_return_t
9141 job_mig_set_security_session(job_t j __attribute__((unused)), uuid_t uuid __attribute__((unused)), mach_port_t session __attribute__((unused)))
9142 {
9143 return KERN_SUCCESS;
9144 }
9145 #endif
9146
9147 jobmgr_t
9148 jobmgr_find_by_name(jobmgr_t jm, const char *where)
9149 {
9150 jobmgr_t jmi, jmi2;
9151
9152 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
9153 if (where == NULL) {
9154 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9155 where = VPROCMGR_SESSION_LOGINWINDOW;
9156 } else {
9157 where = VPROCMGR_SESSION_AQUA;
9158 }
9159 }
9160
9161 if (strcasecmp(jm->name, where) == 0) {
9162 return jm;
9163 }
9164
9165 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
9166 jmi = root_jobmgr;
9167 goto jm_found;
9168 }
9169
9170 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9171 if (unlikely(jmi->shutting_down)) {
9172 continue;
9173 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9174 continue;
9175 } else if (strcasecmp(jmi->name, where) == 0) {
9176 goto jm_found;
9177 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
9178 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9179 if (strcasecmp(jmi2->name, where) == 0) {
9180 jmi = jmi2;
9181 goto jm_found;
9182 }
9183 }
9184 }
9185 }
9186
9187 jm_found:
9188 return jmi;
9189 }
9190
9191 kern_return_t
9192 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
9193 {
9194 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9195 mach_port_array_t l2l_ports = NULL;
9196 mach_port_t reqport, rcvright;
9197 kern_return_t kr = 1;
9198 launch_data_t out_obj_array = NULL;
9199 struct ldcred *ldc = runtime_get_caller_creds();
9200 jobmgr_t jmr = NULL;
9201
9202 if (!launchd_assumes(j != NULL)) {
9203 return BOOTSTRAP_NO_MEMORY;
9204 }
9205
9206 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
9207 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9208
9209 kr = BOOTSTRAP_NOT_PRIVILEGED;
9210 goto out;
9211 }
9212
9213 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9214
9215 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
9216
9217 if (!job_assumes(j, kr == 0)) {
9218 goto out;
9219 }
9220
9221 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
9222
9223 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
9224 kr = BOOTSTRAP_NO_MEMORY;
9225 goto out;
9226 }
9227
9228 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9229
9230 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9231 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9232 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9233 */
9234 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9235 /* This is so awful. */
9236 /* Remove the job from its current job manager. */
9237 LIST_REMOVE(j, sle);
9238 LIST_REMOVE(j, pid_hash_sle);
9239
9240 /* Put the job into the target job manager. */
9241 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9242 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9243
9244 j->mgr = jmr;
9245 job_set_global_on_demand(j, true);
9246
9247 if (!j->holds_ref) {
9248 j->holds_ref = true;
9249 runtime_add_ref();
9250 }
9251 }
9252
9253 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9254 launch_data_t tmp, obj_at_idx;
9255 struct machservice *ms;
9256 job_t j_for_service;
9257 const char *serv_name;
9258 pid_t target_pid;
9259 bool serv_perpid;
9260
9261 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9262 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9263 target_pid = (pid_t)launch_data_get_integer(tmp);
9264 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9265 serv_perpid = launch_data_get_bool(tmp);
9266 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9267 serv_name = launch_data_get_string(tmp);
9268
9269 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9270
9271 if (unlikely(!j_for_service)) {
9272 /* The PID probably exited */
9273 (void)job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
9274 continue;
9275 }
9276
9277 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9278 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9279 machservice_request_notifications(ms);
9280 }
9281 }
9282
9283 kr = 0;
9284
9285 out:
9286 if (out_obj_array) {
9287 launch_data_free(out_obj_array);
9288 }
9289
9290 if (l2l_ports) {
9291 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9292 }
9293
9294 if (kr == 0) {
9295 if (target_subset) {
9296 (void)job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
9297 }
9298 if (asport) {
9299 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9300 }
9301 } else if (jmr) {
9302 jobmgr_shutdown(jmr);
9303 }
9304
9305 return kr;
9306 }
9307
9308 kern_return_t
9309 job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9310 {
9311 if (!j) {
9312 return BOOTSTRAP_NO_MEMORY;
9313 }
9314
9315 job_t j2;
9316
9317 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9318 if (j->mgr->session_initialized) {
9319 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9320 kr = BOOTSTRAP_NOT_PRIVILEGED;
9321 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9322 jobmgr_t jmi;
9323
9324 /*
9325 * 5330262
9326 *
9327 * We're working around LoginWindow and the WindowServer.
9328 *
9329 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9330 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9331 * spawns a replacement loginwindow session before cleaning up the previous one.
9332 *
9333 * We're going to use the creation of a new LoginWindow context as a clue that the
9334 * previous LoginWindow context is on the way out and therefore we should just
9335 * kick-start the shutdown of it.
9336 */
9337
9338 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9339 if (unlikely(jmi->shutting_down)) {
9340 continue;
9341 } else if (strcasecmp(jmi->name, session_type) == 0) {
9342 jobmgr_shutdown(jmi);
9343 break;
9344 }
9345 }
9346 }
9347
9348 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9349 strcpy(j->mgr->name_init, session_type);
9350
9351 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9352 j2->asport = asport;
9353 (void)job_assumes(j, job_dispatch(j2, true));
9354 kr = BOOTSTRAP_SUCCESS;
9355 }
9356
9357 return kr;
9358 }
9359
9360 kern_return_t
9361 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9362 {
9363 struct ldcred *ldc = runtime_get_caller_creds();
9364 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9365 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9366 return BOOTSTRAP_NO_MEMORY;
9367 }
9368
9369 if (j->mgr->shutting_down) {
9370 return BOOTSTRAP_UNKNOWN_SERVICE;
9371 }
9372
9373 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9374
9375 if (!job_assumes(j, pid1_magic == false)) {
9376 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9377 return BOOTSTRAP_NOT_PRIVILEGED;
9378 }
9379
9380 if (!j->anonymous) {
9381 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9382 return BOOTSTRAP_NOT_PRIVILEGED;
9383 }
9384
9385 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9386 if (target_jm == j->mgr) {
9387 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9388 *new_bsport = target_jm->jm_port;
9389 return BOOTSTRAP_SUCCESS;
9390 }
9391
9392 if (!target_jm) {
9393 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9394 if (target_jm) {
9395 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9396 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9397 }
9398 }
9399
9400 if (!job_assumes(j, target_jm != NULL)) {
9401 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9402 return BOOTSTRAP_NO_MEMORY;
9403 }
9404
9405 /* Remove the job from it's current job manager. */
9406 LIST_REMOVE(j, sle);
9407 LIST_REMOVE(j, pid_hash_sle);
9408
9409 job_t ji = NULL, jit = NULL;
9410 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9411 if (ji == j) {
9412 LIST_REMOVE(ji, global_env_sle);
9413 break;
9414 }
9415 }
9416
9417 /* Put the job into the target job manager. */
9418 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9419 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9420
9421 if (ji) {
9422 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9423 }
9424
9425 /* Move our Mach services over if we're not in a flat namespace. */
9426 if (!g_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9427 struct machservice *msi = NULL, *msit = NULL;
9428 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9429 LIST_REMOVE(msi, name_hash_sle);
9430 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9431 }
9432 }
9433
9434 j->mgr = target_jm;
9435
9436 if (!j->holds_ref) {
9437 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9438 * stick around while they're still around.
9439 * For example, login calls into the PAM launchd module, which moves the process into
9440 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9441 * ourselves from going away.
9442 */
9443 j->holds_ref = true;
9444 runtime_add_ref();
9445 }
9446
9447 *new_bsport = target_jm->jm_port;
9448
9449 return KERN_SUCCESS;
9450 }
9451
9452 kern_return_t
9453 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9454 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9455 mach_port_array_t *portsp, unsigned int *ports_cnt)
9456 {
9457 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9458 mach_port_array_t ports = NULL;
9459 unsigned int cnt = 0, cnt2 = 0;
9460 size_t packed_size;
9461 struct machservice *ms;
9462 jobmgr_t jm;
9463 job_t ji;
9464
9465 if (!launchd_assumes(j != NULL)) {
9466 return BOOTSTRAP_NO_MEMORY;
9467 }
9468
9469 jm = j->mgr;
9470
9471 if (unlikely(!pid1_magic)) {
9472 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9473 return BOOTSTRAP_NOT_PRIVILEGED;
9474 }
9475 if (unlikely(jobmgr_parent(jm) == NULL)) {
9476 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9477 return BOOTSTRAP_NOT_PRIVILEGED;
9478 }
9479 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9480 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9481 return BOOTSTRAP_NOT_PRIVILEGED;
9482 }
9483 if (unlikely(!j->anonymous)) {
9484 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9485 return BOOTSTRAP_NOT_PRIVILEGED;
9486 }
9487
9488 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9489
9490 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9491 if (!job_assumes(j, outdata_obj_array)) {
9492 goto out_bad;
9493 }
9494
9495 *outdataCnt = 20 * 1024 * 1024;
9496 mig_allocate(outdata, *outdataCnt);
9497 if (!job_assumes(j, *outdata != 0)) {
9498 return 1;
9499 }
9500
9501 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9502 if (!ji->anonymous) {
9503 continue;
9504 }
9505 SLIST_FOREACH(ms, &ji->machservices, sle) {
9506 cnt++;
9507 }
9508 }
9509
9510 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9511 if (!job_assumes(j, ports != NULL)) {
9512 goto out_bad;
9513 }
9514
9515 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9516 if (!ji->anonymous) {
9517 continue;
9518 }
9519
9520 SLIST_FOREACH(ms, &ji->machservices, sle) {
9521 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9522 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9523 } else {
9524 goto out_bad;
9525 }
9526
9527 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9528 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9529 } else {
9530 goto out_bad;
9531 }
9532
9533 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9534 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9535 } else {
9536 goto out_bad;
9537 }
9538
9539 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9540 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9541 } else {
9542 goto out_bad;
9543 }
9544
9545 ports[cnt2] = machservice_port(ms);
9546
9547 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
9548 (void)jobmgr_assumes(jm, (errno = launchd_mport_copy_send(ports[cnt2])) == KERN_SUCCESS);
9549 cnt2++;
9550 }
9551 }
9552
9553 (void)job_assumes(j, cnt == cnt2);
9554
9555 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9556 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9557 if (!job_assumes(j, packed_size != 0)) {
9558 goto out_bad;
9559 }
9560
9561 launch_data_free(outdata_obj_array);
9562
9563 *portsp = ports;
9564 *ports_cnt = cnt;
9565
9566 *reqport = jm->req_port;
9567 *rcvright = jm->jm_port;
9568
9569 jm->req_port = 0;
9570 jm->jm_port = 0;
9571
9572 workaround_5477111 = j;
9573
9574 jobmgr_shutdown(jm);
9575
9576 return BOOTSTRAP_SUCCESS;
9577
9578 out_bad:
9579 if (outdata_obj_array) {
9580 launch_data_free(outdata_obj_array);
9581 }
9582 if (*outdata) {
9583 mig_deallocate(*outdata, *outdataCnt);
9584 }
9585 if (ports) {
9586 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
9587 }
9588
9589 return BOOTSTRAP_NO_MEMORY;
9590 }
9591
9592 kern_return_t
9593 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
9594 {
9595 int bsdepth = 0;
9596 jobmgr_t jmr;
9597
9598 if (!launchd_assumes(j != NULL)) {
9599 return BOOTSTRAP_NO_MEMORY;
9600 }
9601 if (j->mgr->shutting_down) {
9602 return BOOTSTRAP_UNKNOWN_SERVICE;
9603 }
9604
9605 jmr = j->mgr;
9606
9607 while ((jmr = jobmgr_parent(jmr)) != NULL) {
9608 bsdepth++;
9609 }
9610
9611 /* Since we use recursion, we need an artificial depth for subsets */
9612 if (unlikely(bsdepth > 100)) {
9613 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
9614 return BOOTSTRAP_NO_MEMORY;
9615 }
9616
9617 char name[NAME_MAX];
9618 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
9619
9620 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
9621 if (unlikely(requestorport == MACH_PORT_NULL)) {
9622 return BOOTSTRAP_NOT_PRIVILEGED;
9623 }
9624 return BOOTSTRAP_NO_MEMORY;
9625 }
9626
9627 *subsetportp = jmr->jm_port;
9628 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
9629
9630 /* A job could create multiple subsets, so only add a reference the first time
9631 * it does so we don't have to keep a count.
9632 */
9633 if (j->anonymous && !j->holds_ref) {
9634 j->holds_ref = true;
9635 runtime_add_ref();
9636 }
9637
9638 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
9639 return BOOTSTRAP_SUCCESS;
9640 }
9641
9642 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
9643 job_t
9644 xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
9645 {
9646 jobmgr_t where2put = NULL;
9647
9648 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
9649 if (destname) {
9650 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
9651 const char *str = launch_data_get_string(destname);
9652 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
9653 where2put = _s_xpc_system_domain;
9654 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
9655 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
9656 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
9657 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
9658 } else {
9659 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
9660 errno = EINVAL;
9661 }
9662 } else {
9663 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
9664 errno = EINVAL;
9665 }
9666
9667 if (where2put) {
9668 launch_data_t mi = NULL;
9669 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
9670 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
9671 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
9672 where2put = NULL;
9673 errno = EINVAL;
9674 }
9675 }
9676 }
9677 } else {
9678 where2put = jm;
9679 }
9680
9681 job_t j = NULL;
9682 if (where2put) {
9683 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
9684 j = jobmgr_import2(where2put, pload);
9685 if (j) {
9686 j->xpc_service = true;
9687 if (where2put->xpc_singleton) {
9688 /* If the service was destined for one of the global domains,
9689 * then we have to alias it into our local domain to reserve the
9690 * name.
9691 */
9692 job_t ja = job_new_alias(jm, j);
9693 if (!ja) {
9694 /* If we failed to alias the job because of a conflict over
9695 * the label, then we remove it from the global domain. We
9696 * don't want to risk having imported a malicious job into
9697 * one of the global domains.
9698 */
9699 if (errno != EEXIST) {
9700 job_assumes(j, errno == 0);
9701 } else {
9702 job_log(j, LOG_ERR, "Failed to alias job into: %s", where2put->name);
9703 }
9704
9705 job_remove(j);
9706 } else {
9707 ja->xpc_service = true;
9708 j = ja;
9709 }
9710 }
9711 }
9712 }
9713
9714 return j;
9715 }
9716
9717 kern_return_t
9718 xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
9719 {
9720 if (unlikely(!pid1_magic)) {
9721 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
9722 return BOOTSTRAP_NOT_PRIVILEGED;
9723 }
9724 if (!j || !MACH_PORT_VALID(reqport)) {
9725 return BOOTSTRAP_UNKNOWN_SERVICE;
9726 }
9727 if (root_jobmgr->shutting_down) {
9728 jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
9729 return BOOTSTRAP_NOT_PRIVILEGED;
9730 }
9731
9732 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9733 /* All XPC domains are children of the root job manager. What we're creating
9734 * here is really just a skeleton. By creating it, we're adding reqp to our
9735 * port set. It will have two messages on it. The first specifies the
9736 * environment of the originator. This is so we can cache it and hand it to
9737 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9738 * to be bootstrapped in.
9739 */
9740 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
9741 if (job_assumes(j, jm != NULL)) {
9742 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
9743 jm->shortdesc = "private";
9744 kr = BOOTSTRAP_SUCCESS;
9745 }
9746
9747 return kr;
9748 }
9749
9750 kern_return_t
9751 xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
9752 {
9753 if (!j) {
9754 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9755 * getting this message long after the requesting process has gone away.
9756 * See <rdar://problem/8593143>.
9757 */
9758 return BOOTSTRAP_UNKNOWN_SERVICE;
9759 }
9760
9761 jobmgr_t jm = j->mgr;
9762 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9763 return BOOTSTRAP_NOT_PRIVILEGED;
9764 }
9765
9766 if (jm->req_asport != MACH_PORT_NULL) {
9767 return BOOTSTRAP_NOT_PRIVILEGED;
9768 }
9769
9770 struct ldcred *ldc = runtime_get_caller_creds();
9771 struct proc_bsdshortinfo proc;
9772 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
9773 if (errno != ESRCH) {
9774 jobmgr_assumes(jm, errno == 0);
9775 }
9776
9777 jm->error = errno;
9778 jobmgr_remove(jm);
9779 return BOOTSTRAP_NO_MEMORY;
9780 }
9781
9782 if (!jobmgr_assumes(jm, audit_session_port(ldc->asid, &jm->req_asport) == 0)) {
9783 jm->error = EPERM;
9784 jobmgr_remove(jm);
9785 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
9786 return BOOTSTRAP_NOT_PRIVILEGED;
9787 }
9788
9789 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s[%i]", proc.pbsi_comm, ldc->pid);
9790 strlcpy(jm->owner, proc.pbsi_comm, sizeof(jm->owner));
9791 jm->req_bsport = bsport;
9792 jm->req_excport = excport;
9793 jm->req_rport = rp;
9794 jm->req_ctx = ctx;
9795 jm->req_ctx_sz = ctx_sz;
9796 jm->req_pid = ldc->pid;
9797 jm->req_euid = ldc->euid;
9798 jm->req_egid = ldc->egid;
9799 jm->req_asid = ldc->asid;
9800
9801 return KERN_SUCCESS;
9802 }
9803
9804 kern_return_t
9805 xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9806 {
9807 if (!j) {
9808 return BOOTSTRAP_UNKNOWN_SERVICE;
9809 }
9810
9811 /* This is just for XPC domains (for now). */
9812 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9813 return BOOTSTRAP_NOT_PRIVILEGED;
9814 }
9815 if (j->mgr->session_initialized) {
9816 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
9817 return BOOTSTRAP_NOT_PRIVILEGED;
9818 }
9819
9820 size_t offset = 0;
9821 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9822 if (!jobmgr_assumes(j->mgr, services != NULL)) {
9823 return BOOTSTRAP_NO_MEMORY;
9824 }
9825
9826 size_t i = 0;
9827 size_t c = launch_data_array_get_count(services);
9828 for (i = 0; i < c; i++) {
9829 job_t nj = NULL;
9830 launch_data_t ploadi = launch_data_array_get_index(services, i);
9831 if (!(nj = xpc_domain_import_service(j->mgr, ploadi))) {
9832 /* If loading one job fails, just fail the whole thing. At this
9833 * point, xpchelper should receive the failure and then just refuse
9834 * to launch the application, since its XPC services could not be
9835 * fully bootstrapped.
9836 *
9837 * Take care to not reference the job or its manager after this
9838 * point.
9839 */
9840 if (errno == EINVAL) {
9841 jobmgr_log(j->mgr, LOG_ERR, "Service at index is not valid: %lu", i);
9842 } else if (errno == EEXIST) {
9843 /* If we get back EEXIST, we know that the payload was a
9844 * dictionary with a label. But, well, I guess it never hurts to
9845 * check.
9846 */
9847 char *label = "(bogus)";
9848 if (launch_data_get_type(ploadi) == LAUNCH_DATA_DICTIONARY) {
9849 launch_data_t llabel = launch_data_dict_lookup(ploadi, LAUNCH_JOBKEY_LABEL);
9850 if (launch_data_get_type(llabel) == LAUNCH_DATA_STRING) {
9851 label = (char *)launch_data_get_string(llabel);
9852 }
9853 }
9854 jobmgr_log(j->mgr, LOG_ERR, "Service name conflict: %s", label);
9855 }
9856
9857 j->mgr->error = errno;
9858 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
9859 jobmgr_remove(j->mgr);
9860 break;
9861 } else {
9862 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service %s", nj->label);
9863 job_dispatch(nj, false);
9864 }
9865 }
9866
9867 kern_return_t result = BOOTSTRAP_NO_MEMORY;
9868 if (i == c) {
9869 j->mgr->session_initialized = true;
9870 (void)jobmgr_assumes(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS) == KERN_SUCCESS);
9871 j->mgr->req_rport = MACH_PORT_NULL;
9872
9873 /* Returning a failure code will destroy the message, whereas returning
9874 * success will not, so we need to clean up here.
9875 */
9876 mig_deallocate(services_buff, services_sz);
9877 result = BOOTSTRAP_SUCCESS;
9878 }
9879
9880 return result;
9881 }
9882
9883 kern_return_t
9884 xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport, mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid, int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
9885 {
9886 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9887 return BOOTSTRAP_UNKNOWN_SERVICE;
9888 }
9889 jobmgr_t jm = j->mgr;
9890 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9891 return BOOTSTRAP_NOT_PRIVILEGED;
9892 }
9893
9894 if (jm->req_asport == MACH_PORT_NULL) {
9895 return BOOTSTRAP_NOT_PRIVILEGED;
9896 }
9897
9898 *bsport = jm->req_bsport;
9899 *sbsport = root_jobmgr->jm_port;
9900 *excport = jm->req_excport;
9901 *asport = jm->req_asport;
9902 *uid = jm->req_euid;
9903 *gid = jm->req_egid;
9904 *asid = jm->req_asid;
9905
9906 *ctx = jm->req_ctx;
9907 *ctx_sz = jm->req_ctx_sz;
9908
9909 return KERN_SUCCESS;
9910 }
9911
9912 kern_return_t
9913 xpc_domain_get_service_name(job_t j, event_name_t name)
9914 {
9915 if (!j) {
9916 return BOOTSTRAP_NO_MEMORY;
9917 }
9918 if (!j->xpc_service) {
9919 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
9920 return BOOTSTRAP_NOT_PRIVILEGED;
9921 }
9922
9923 struct machservice * ms = SLIST_FIRST(&j->machservices);
9924 if (!ms) {
9925 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name of job with no machservices: %s", j->label);
9926 return BOOTSTRAP_UNKNOWN_SERVICE;
9927 }
9928
9929 (void)strlcpy(name, ms->name, sizeof(event_name_t));
9930 return BOOTSTRAP_SUCCESS;
9931 }
9932 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
9933
9934 kern_return_t
9935 xpc_events_get_channel_name(job_t j __attribute__((unused)), event_name_t stream __attribute__((unused)), uint64_t token __attribute__((unused)), event_name_t name __attribute__((unused)))
9936 {
9937 return KERN_FAILURE;
9938 }
9939
9940 kern_return_t
9941 xpc_events_get_event_name(job_t j, event_name_t stream, uint64_t token, event_name_t name)
9942 {
9943 struct externalevent *event = externalevent_find(stream, token);
9944 if (event && j->event_monitor) {
9945 (void)strcpy(name, event->name);
9946 } else {
9947 event = NULL;
9948 }
9949
9950 return event ? BOOTSTRAP_SUCCESS : BOOTSTRAP_UNKNOWN_SERVICE;
9951 }
9952
9953 kern_return_t
9954 xpc_events_set_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t event, mach_msg_type_number_t eventCnt)
9955 {
9956 if (j->anonymous) {
9957 return BOOTSTRAP_NOT_PRIVILEGED;
9958 }
9959
9960 struct externalevent *eei = NULL;
9961 LIST_FOREACH(eei, &j->events, job_le) {
9962 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9963 externalevent_delete(eei);
9964 eventsystem_ping();
9965 break;
9966 }
9967 }
9968
9969 bool success = false;
9970 struct eventsystem *es = eventsystem_find(stream);
9971 if (!es) {
9972 es = eventsystem_new(stream);
9973 (void)job_assumes(j, es != NULL);
9974 }
9975
9976 if (es) {
9977 size_t offset = 0;
9978 launch_data_t unpacked = launch_data_unpack((void *)event, eventCnt, NULL, 0, &offset, 0);
9979 if (unpacked && launch_data_get_type(unpacked) == LAUNCH_DATA_DICTIONARY) {
9980 success = externalevent_new(j, es, key, unpacked);
9981 }
9982 }
9983
9984 if (!success) {
9985 mig_deallocate(event, eventCnt);
9986 }
9987
9988 return KERN_SUCCESS;
9989 }
9990
9991 kern_return_t
9992 xpc_events_get_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t *event, mach_msg_type_number_t *eventCnt)
9993 {
9994 struct externalevent *eei = NULL;
9995 LIST_FOREACH(eei, &j->events, job_le) {
9996 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9997 /* Big enough. */
9998 *eventCnt = 10 * 1024;
9999 mig_allocate(event, *eventCnt);
10000
10001 size_t sz = launch_data_pack(eei->event, (void *)*event, *eventCnt, NULL, NULL);
10002 if (!job_assumes(j, sz != 0)) {
10003 mig_deallocate(*event, *eventCnt);
10004 return BOOTSTRAP_NO_MEMORY;
10005 }
10006
10007 return BOOTSTRAP_SUCCESS;
10008 }
10009 }
10010
10011 return BOOTSTRAP_UNKNOWN_SERVICE;
10012 }
10013
10014 struct machservice *
10015 xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p)
10016 {
10017 struct machservice *msi = NULL;
10018 SLIST_FOREACH(msi, &j->machservices, sle) {
10019 if (strcmp(stream, msi->name) == 0) {
10020 break;
10021 }
10022 }
10023
10024 if (!msi) {
10025 mach_port_t sp = MACH_PORT_NULL;
10026 msi = machservice_new(j, stream, &sp, false);
10027 if (job_assumes(j, msi)) {
10028 /* Hack to keep this from being publicly accessible through
10029 * bootstrap_look_up().
10030 */
10031 LIST_REMOVE(msi, name_hash_sle);
10032 msi->event_channel = true;
10033 *p = sp;
10034
10035 (void)job_dispatch(j, false);
10036 } else {
10037 errno = BOOTSTRAP_NO_MEMORY;
10038 }
10039 } else {
10040 if (!msi->event_channel) {
10041 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10042 msi = NULL;
10043 errno = BOOTSTRAP_NAME_IN_USE;
10044 } else {
10045 *p = msi->port;
10046 }
10047 }
10048
10049 return msi;
10050 }
10051
10052 kern_return_t
10053 xpc_events_channel_check_in(job_t j, event_name_t stream, uint64_t flags __attribute__((unused)), mach_port_t *p)
10054 {
10055 struct machservice *ms = xpc_events_find_channel(j, stream, p);
10056 if (ms) {
10057 if (ms->isActive) {
10058 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10059 *p = MACH_PORT_NULL;
10060 errno = BOOTSTRAP_SERVICE_ACTIVE;
10061 } else {
10062 job_checkin(j);
10063 machservice_request_notifications(ms);
10064 errno = BOOTSTRAP_SUCCESS;
10065 }
10066 }
10067
10068 return errno;
10069 }
10070
10071 kern_return_t
10072 xpc_events_channel_look_up(job_t j, event_name_t stream, event_token_t token, uint64_t flags __attribute__((unused)), mach_port_t *p)
10073 {
10074 if (!j->event_monitor) {
10075 return BOOTSTRAP_NOT_PRIVILEGED;
10076 }
10077
10078 struct externalevent *ee = externalevent_find(stream, token);
10079 if (!ee) {
10080 return BOOTSTRAP_UNKNOWN_SERVICE;
10081 }
10082
10083 struct machservice *ms = xpc_events_find_channel(ee->job, stream, p);
10084 if (ms) {
10085 errno = BOOTSTRAP_SUCCESS;
10086 }
10087
10088 return errno;
10089 }
10090
10091 kern_return_t
10092 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
10093 {
10094 struct ldcred *ldc = runtime_get_caller_creds();
10095 job_t otherj;
10096
10097 if (!launchd_assumes(j != NULL)) {
10098 return BOOTSTRAP_NO_MEMORY;
10099 }
10100
10101 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
10102 return BOOTSTRAP_UNKNOWN_SERVICE;
10103 }
10104
10105 #if TARGET_OS_EMBEDDED
10106 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
10107 #else
10108 bool allow_non_root_kickstart = false;
10109 #endif
10110
10111 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
10112 return BOOTSTRAP_NOT_PRIVILEGED;
10113 }
10114
10115 #if HAVE_SANDBOX
10116 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10117 return BOOTSTRAP_NOT_PRIVILEGED;
10118 }
10119 #endif
10120
10121 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
10122 return BOOTSTRAP_SERVICE_ACTIVE;
10123 }
10124
10125 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
10126 otherj = job_dispatch(otherj, true);
10127
10128 if (!job_assumes(j, otherj && otherj->p)) {
10129 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
10130 otherj->stall_before_exec = false;
10131 return BOOTSTRAP_NO_MEMORY;
10132 }
10133
10134 *out_pid = otherj->p;
10135
10136 return 0;
10137 }
10138
10139 kern_return_t
10140 job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
10141 {
10142 launch_data_t jobdata = NULL;
10143 size_t data_offset = 0;
10144 struct ldcred *ldc = runtime_get_caller_creds();
10145 job_t jr;
10146
10147 if (!launchd_assumes(j != NULL)) {
10148 return BOOTSTRAP_NO_MEMORY;
10149 }
10150
10151 if (unlikely(j->deny_job_creation)) {
10152 return BOOTSTRAP_NOT_PRIVILEGED;
10153 }
10154
10155 #if HAVE_SANDBOX
10156 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10157 return BOOTSTRAP_NOT_PRIVILEGED;
10158 }
10159 #endif
10160
10161 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
10162 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
10163 return VPROC_ERR_TRY_PER_USER;
10164 }
10165
10166 if (!job_assumes(j, indataCnt != 0)) {
10167 return 1;
10168 }
10169
10170 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
10171 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
10172 return 1;
10173 }
10174
10175 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
10176 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
10177 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
10178 return 1;
10179 }
10180
10181 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
10182
10183 launch_data_t label = NULL;
10184 launch_data_t wait4debugger = NULL;
10185 if (!jr) {
10186 switch (errno) {
10187 case EEXIST:
10188 /* If EEXIST was returned, we know that there is a label string in
10189 * the dictionary. So we don't need to check the types here; that
10190 * has already been done.
10191 */
10192 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
10193 jr = job_find(NULL, launch_data_get_string(label));
10194 if (job_assumes(j, jr != NULL) && !jr->p) {
10195 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
10196 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
10197 if (launch_data_get_bool(wait4debugger)) {
10198 /* If the job exists, we're going to kick-start it, but
10199 * we need to give the caller the opportunity to start
10200 * it suspended if it so desires. But this will only
10201 * take effect if the job isn't running.
10202 */
10203 jr->wait4debugger_oneshot = true;
10204 }
10205 }
10206 }
10207
10208 *outj = jr;
10209 return BOOTSTRAP_NAME_IN_USE;
10210 default:
10211 return BOOTSTRAP_NO_MEMORY;
10212 }
10213 }
10214
10215 if (pid1_magic) {
10216 jr->mach_uid = ldc->uid;
10217 }
10218
10219 jr->legacy_LS_job = true;
10220 jr->abandon_pg = true;
10221 jr->asport = asport;
10222 uuid_clear(jr->expected_audit_uuid);
10223 jr = job_dispatch(jr, true);
10224
10225 if (!job_assumes(j, jr != NULL)) {
10226 job_remove(jr);
10227 return BOOTSTRAP_NO_MEMORY;
10228 }
10229
10230 if (!job_assumes(jr, jr->p)) {
10231 job_remove(jr);
10232 return BOOTSTRAP_NO_MEMORY;
10233 }
10234
10235 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
10236 *outj = jr;
10237
10238 return BOOTSTRAP_SUCCESS;
10239 }
10240
10241 kern_return_t
10242 job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
10243 {
10244 job_t nj = NULL;
10245 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
10246 if (likely(kr == KERN_SUCCESS)) {
10247 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
10248 job_remove(nj);
10249 kr = BOOTSTRAP_NO_MEMORY;
10250 } else {
10251 /* Do not return until the job has called exec(3), thereby making it
10252 * safe for the caller to send it SIGCONT.
10253 *
10254 * <rdar://problem/9042798>
10255 */
10256 nj->spawn_reply_port = rp;
10257 kr = MIG_NO_REPLY;
10258 }
10259 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
10260 bool was_running = nj->p;
10261 if (job_dispatch(nj, true)) {
10262 if (!was_running) {
10263 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
10264
10265 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
10266 nj->spawn_reply_port = rp;
10267 kr = MIG_NO_REPLY;
10268 } else {
10269 kr = BOOTSTRAP_NO_MEMORY;
10270 }
10271 } else {
10272 *obsvr_port = MACH_PORT_NULL;
10273 *child_pid = nj->p;
10274 kr = KERN_SUCCESS;
10275 }
10276 } else {
10277 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
10278 kr = BOOTSTRAP_UNKNOWN_SERVICE;
10279 }
10280 }
10281
10282 mig_deallocate(indata, indataCnt);
10283 return kr;
10284 }
10285
10286 kern_return_t
10287 job_mig_event_source_check_in(job_t j, name_t name, mach_port_t ping_port, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt, uint64_t *tokens)
10288 {
10289 if (!j || !j->event_monitor) {
10290 return BOOTSTRAP_NOT_PRIVILEGED;
10291 }
10292
10293 /* Update our ping-port. One ping will force all the notification systems
10294 * to check in, so they'll all give us send-once rights. It doesn't really
10295 * matter which one we keep around. It's not the most efficient thing ever,
10296 * but keep in mind that, by doing this over one channel, we can do it over
10297 * the job's MachService. This means that we'll get it back when the job dies,
10298 * and we can create ourselves a send-once right if we didn't have one already,
10299 * and we can just keep the helper alive without it needing to bootstrap
10300 * communication.
10301 *
10302 * So we're trading efficiency for robustness. In this case, the checkins
10303 * should happen pretty infrequently, so it's pretty worth it.
10304 */
10305 if (_s_event_update_port != MACH_PORT_NULL) {
10306 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
10307 }
10308 _s_event_update_port = ping_port;
10309
10310 kern_return_t result = BOOTSTRAP_NO_MEMORY;
10311 launch_data_t arr = launch_data_alloc(LAUNCH_DATA_ARRAY);
10312 if (job_assumes(j, arr != NULL)) {
10313 struct eventsystem *es = eventsystem_find(name);
10314 if (unlikely(es == NULL)) {
10315 es = eventsystem_new(name);
10316 }
10317
10318 if (job_assumes(j, es != NULL)) {
10319 struct externalevent *ei = NULL;
10320 size_t i = 0;
10321 LIST_FOREACH(ei, &es->events, sys_le) {
10322 (void)job_assumes(j, launch_data_array_set_index(arr, ei->event, i));
10323 if (job_assumes(j, i < 1024)) {
10324 tokens[i] = ei->id;
10325 } else {
10326 break;
10327 }
10328 i++;
10329 }
10330
10331 /* Big enough. */
10332 *outvalCnt = 10 * 1024;
10333 mig_allocate(outval, *outvalCnt);
10334
10335 size_t sz = launch_data_pack(arr, (void *)*outval, *outvalCnt, NULL, NULL);
10336 if (job_assumes(j, sz != 0)) {
10337 result = BOOTSTRAP_SUCCESS;
10338 } else {
10339 mig_deallocate(*outval, *outvalCnt);
10340 }
10341 }
10342
10343 /* Total hack, but launch_data doesn't do ref-counting. */
10344 struct _launch_data *hack = (struct _launch_data *)arr;
10345 free(hack->_array);
10346 free(arr);
10347 }
10348
10349 return result;
10350 }
10351
10352 kern_return_t
10353 job_mig_event_set_state(job_t j, name_t name, uint64_t token, boolean_t state)
10354 {
10355 if (!j || !j->event_monitor) {
10356 return BOOTSTRAP_NOT_PRIVILEGED;
10357 }
10358
10359 struct externalevent *ei = externalevent_find(name, token);
10360 if (job_assumes(j, ei != NULL)) {
10361 ei->state = state;
10362 if(job_dispatch(ei->job, false) == NULL) {
10363 if (errno == EPERM) {
10364 return BOOTSTRAP_NOT_PRIVILEGED;
10365 }
10366 return BOOTSTRAP_NO_MEMORY;
10367 }
10368 } else {
10369 return BOOTSTRAP_NO_MEMORY;
10370 }
10371
10372 return BOOTSTRAP_SUCCESS;
10373 }
10374
10375 void
10376 jobmgr_init(bool sflag)
10377 {
10378 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
10379 SLIST_INIT(&s_curious_jobs);
10380 LIST_INIT(&s_needing_sessions);
10381
10382 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
10383 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
10384 launchd_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
10385 _s_xpc_system_domain->req_asid = g_audit_session;
10386 _s_xpc_system_domain->req_asport = g_audit_session_port;
10387 _s_xpc_system_domain->shortdesc = "system";
10388 #endif /* __LAUNCH_DISABLE_XPC_SUPPORT__ */
10389 if (pid1_magic) {
10390 root_jobmgr->monitor_shutdown = true;
10391 }
10392
10393 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
10394 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
10395 if (likely(s_no_hang_fd == -1)) {
10396 if (jobmgr_assumes(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK)) != -1)) {
10397 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr) != -1);
10398 }
10399 }
10400 s_no_hang_fd = _fd(s_no_hang_fd);
10401 }
10402
10403 size_t
10404 our_strhash(const char *s)
10405 {
10406 size_t c, r = 5381;
10407
10408 /* djb2
10409 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10410 */
10411
10412 while ((c = *s++)) {
10413 r = ((r << 5) + r) + c; /* hash*33 + c */
10414 }
10415
10416 return r;
10417 }
10418
10419 size_t
10420 hash_label(const char *label)
10421 {
10422 return our_strhash(label) % LABEL_HASH_SIZE;
10423 }
10424
10425 size_t
10426 hash_ms(const char *msstr)
10427 {
10428 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
10429 }
10430
10431 bool
10432 waiting4removal_new(job_t j, mach_port_t rp)
10433 {
10434 struct waiting_for_removal *w4r;
10435
10436 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
10437 return false;
10438 }
10439
10440 w4r->reply_port = rp;
10441
10442 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
10443
10444 return true;
10445 }
10446
10447 void
10448 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
10449 {
10450 (void)job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
10451
10452 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
10453
10454 free(w4r);
10455 }
10456
10457 size_t
10458 get_kern_max_proc(void)
10459 {
10460 int mib[] = { CTL_KERN, KERN_MAXPROC };
10461 int max = 100;
10462 size_t max_sz = sizeof(max);
10463
10464 (void)launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
10465
10466 return max;
10467 }
10468
10469 /* See rdar://problem/6271234 */
10470 void
10471 eliminate_double_reboot(void)
10472 {
10473 if (unlikely(!pid1_magic)) {
10474 return;
10475 }
10476
10477 struct stat sb;
10478 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
10479 char *try_again = "Will try again at next boot.";
10480 int result = ~0;
10481
10482 if (unlikely(stat(argv[1], &sb) != -1)) {
10483 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
10484
10485 int wstatus;
10486 pid_t p;
10487
10488 (void)jobmgr_assumes(root_jobmgr, (errno = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ)) == 0);
10489
10490 if (errno) {
10491 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script! %s", try_again);
10492 goto out;
10493 }
10494
10495 if (!jobmgr_assumes(root_jobmgr, waitpid(p, &wstatus, 0) != -1)) {
10496 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't confirm that deferred install script exited successfully! %s", try_again);
10497 goto out;
10498 }
10499
10500 if (jobmgr_assumes(root_jobmgr, WIFEXITED(wstatus) != 0)) {
10501 if (jobmgr_assumes(root_jobmgr, (result = WEXITSTATUS(wstatus)) == EXIT_SUCCESS)) {
10502 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
10503 } else {
10504 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus), try_again);
10505 }
10506 } else {
10507 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again);
10508 }
10509 }
10510 out:
10511 if (result == 0) {
10512 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
10513 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
10514 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
10515 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
10516 */
10517 if (!jobmgr_assumes(root_jobmgr, unlink(argv[1]) != -1)) {
10518 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script couldn't be removed!");
10519 }
10520 }
10521 }
10522
10523 void
10524 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
10525 {
10526 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
10527 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10528 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
10529 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
10530 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10531 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
10532 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
10533 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
10534 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10535 * You can't set this in a plist.
10536 */
10537 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
10538 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10539 * complain about it.
10540 */
10541 } else {
10542 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
10543 }
10544
10545 if (unlikely(!j->jetsam_properties)) {
10546 j->jetsam_properties = true;
10547 LIST_INSERT_HEAD(&j->mgr->jetsam_jobs, j, jetsam_sle);
10548 j->mgr->jetsam_jobs_cnt++;
10549 }
10550
10551 j->jetsam_seq = s_jetsam_sequence_id++;
10552 }
10553
10554 int
10555 launchd_set_jetsam_priorities(launch_data_t priorities)
10556 {
10557 if (!launchd_assumes(launch_data_get_type(priorities) == LAUNCH_DATA_ARRAY)) {
10558 return EINVAL;
10559 }
10560
10561 jobmgr_t jm = NULL;
10562 #if !TARGET_OS_EMBEDDED
10563 /* For testing. */
10564 jm = jobmgr_find_by_name(root_jobmgr, VPROCMGR_SESSION_AQUA);
10565 if (!launchd_assumes(jm != NULL)) {
10566 return EINVAL;
10567 }
10568 #else
10569 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
10570 jm = root_jobmgr;
10571
10572 if (!g_embedded_privileged_action) {
10573 return EPERM;
10574 }
10575 #endif
10576
10577 size_t npris = launch_data_array_get_count(priorities);
10578
10579 job_t ji = NULL;
10580 size_t i = 0;
10581 for (i = 0; i < npris; i++) {
10582 launch_data_t ldi = launch_data_array_get_index(priorities, i);
10583 if (!launchd_assumes(launch_data_get_type(ldi) == LAUNCH_DATA_DICTIONARY)) {
10584 continue;
10585 }
10586
10587 launch_data_t label = NULL;
10588 if (!launchd_assumes(label = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL))) {
10589 continue;
10590 }
10591 const char *_label = launch_data_get_string(label);
10592
10593 ji = job_find(NULL, _label);
10594 if (!launchd_assumes(ji != NULL)) {
10595 continue;
10596 }
10597
10598 launch_data_dict_iterate(ldi, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, ji);
10599
10600 launch_data_t frontmost = NULL;
10601 if ((frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) && launch_data_get_type(frontmost) == LAUNCH_DATA_BOOL) {
10602 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
10603 }
10604 }
10605
10606 i = 0;
10607 job_t *jobs = (job_t *)calloc(jm->jetsam_jobs_cnt, sizeof(job_t));
10608 if (launchd_assumes(jobs != NULL)) {
10609 LIST_FOREACH(ji, &jm->jetsam_jobs, jetsam_sle) {
10610 if (ji->p) {
10611 jobs[i] = ji;
10612 i++;
10613 }
10614 }
10615 }
10616
10617 size_t totalpris = i;
10618
10619 int result = EINVAL;
10620
10621 /* It is conceivable that there could be no Jetsam jobs running. */
10622 if (totalpris > 0) {
10623 /* Yay blocks! */
10624 qsort_b((void *)jobs, totalpris, sizeof(job_t), ^ int (const void *lhs, const void *rhs) {
10625 job_t _lhs = *(job_t *)lhs;
10626 job_t _rhs = *(job_t *)rhs;
10627 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
10628 if (_lhs->jetsam_priority > _rhs->jetsam_priority) {
10629 return -1;
10630 } else if (_lhs->jetsam_priority < _rhs->jetsam_priority) {
10631 return 1;
10632 }
10633 /* Priority is equal, so sort by sequence ID to maintain LRU order */
10634 if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) > 0 ) {
10635 return 1;
10636 } else if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) < 0 ) {
10637 return -1;
10638 }
10639
10640 return 0;
10641 });
10642
10643 jetsam_priority_entry_t *jpris = (jetsam_priority_entry_t *)calloc(totalpris, sizeof(jetsam_priority_entry_t));
10644 if (!launchd_assumes(jpris != NULL)) {
10645 result = ENOMEM;
10646 } else {
10647 for (i = 0; i < totalpris; i++) {
10648 jpris[i].pid = jobs[i]->p; /* Subject to time-of-use vs. time-of-check, obviously. */
10649 jpris[i].flags |= jobs[i]->jetsam_frontmost ? kJetsamFlagsFrontmost : 0;
10650 jpris[i].hiwat_pages = jobs[i]->jetsam_memlimit;
10651 }
10652
10653 (void)launchd_assumes((result = sysctlbyname("kern.memorystatus_priority_list", NULL, NULL, &jpris[0], totalpris * sizeof(jetsam_priority_entry_t))) != -1);
10654 result = result != 0 ? errno : 0;
10655
10656 free(jpris);
10657 }
10658 }
10659
10660 if (jobs) {
10661 free(jobs);
10662 }
10663
10664 return result;
10665 }