]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
c04a496df5b75fe113d39197718b73621aaf4181
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 24984 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23 #include "launch_internal.h"
24 #include "launchd_helper.h"
25
26 #include <TargetConditionals.h>
27 #include <mach/mach.h>
28 #include <mach/mach_error.h>
29 #include <mach/boolean.h>
30 #include <mach/message.h>
31 #include <mach/notify.h>
32 #include <mach/mig_errors.h>
33 #include <mach/mach_traps.h>
34 #include <mach/mach_interface.h>
35 #include <mach/host_info.h>
36 #include <mach/mach_host.h>
37 #include <mach/exception.h>
38 #include <mach/host_reboot.h>
39 #include <sys/types.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/stat.h>
43 #include <sys/ucred.h>
44 #include <sys/fcntl.h>
45 #include <sys/un.h>
46 #include <sys/reboot.h>
47 #include <sys/wait.h>
48 #include <sys/sysctl.h>
49 #include <sys/sockio.h>
50 #include <sys/time.h>
51 #include <sys/resource.h>
52 #include <sys/ioctl.h>
53 #include <sys/mount.h>
54 #include <sys/pipe.h>
55 #include <sys/mman.h>
56 #include <sys/socket.h>
57 #include <sys/syscall.h>
58 #include <net/if.h>
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet6/nd6.h>
62 #include <bsm/libbsm.h>
63 #include <unistd.h>
64 #include <signal.h>
65 #include <errno.h>
66 #include <libgen.h>
67 #include <stdio.h>
68 #include <stdlib.h>
69 #include <stdarg.h>
70 #include <stdbool.h>
71 #include <paths.h>
72 #include <pwd.h>
73 #include <grp.h>
74 #include <ttyent.h>
75 #include <dlfcn.h>
76 #include <dirent.h>
77 #include <string.h>
78 #include <ctype.h>
79 #include <glob.h>
80 #include <System/sys/spawn.h>
81 #include <spawn.h>
82 #include <time.h>
83
84 #include <libproc.h>
85 #include <malloc/malloc.h>
86 #include <pthread.h>
87 #include <libproc.h>
88 #if HAVE_SANDBOX
89 #define __APPLE_API_PRIVATE
90 #include <sandbox.h>
91 #endif
92 #if HAVE_QUARANTINE
93 #include <quarantine.h>
94 #endif
95 #if TARGET_OS_EMBEDDED
96 #include <sys/kern_memorystatus.h>
97 #else
98 /* To make my life easier. */
99 typedef struct jetsam_priority_entry {
100 pid_t pid;
101 uint32_t priority;
102 uint32_t flags;
103 int32_t hiwat_pages;
104 int32_t hiwat_reserved1;
105 int32_t hiwat_reserved2;
106 int32_t hiwat_reserved3;
107 } jetsam_priority_entry_t;
108
109 enum {
110 kJetsamFlagsFrontmost = (1 << 0),
111 kJetsamFlagsKilled = (1 << 1)
112 };
113 #endif
114
115 #include "launch.h"
116 #include "launch_priv.h"
117 #include "launch_internal.h"
118 #include "bootstrap.h"
119 #include "bootstrap_priv.h"
120 #include "vproc.h"
121 #include "vproc_internal.h"
122
123 #include "reboot2.h"
124
125 #include "launchd.h"
126 #include "launchd_runtime.h"
127 #include "launchd_unix_ipc.h"
128 #include "protocol_vproc.h"
129 #include "protocol_vprocServer.h"
130 #include "protocol_job_reply.h"
131 #include "protocol_job_forward.h"
132 #include "mach_excServer.h"
133 #if !TARGET_OS_EMBEDDED
134 #include "domainServer.h"
135 #include "init.h"
136 #endif
137 #include "eventsServer.h"
138
139 #ifndef POSIX_SPAWN_OSX_TALAPP_START
140 #define POSIX_SPAWN_OSX_TALAPP_START 0x0400
141 #endif
142
143 #ifndef POSIX_SPAWN_OSX_WIDGET_START
144 #define POSIX_SPAWN_OSX_WIDGET_START 0x0800
145 #endif
146
147 #ifndef POSIX_SPAWN_IOS_APP_START
148 #define POSIX_SPAWN_IOS_APP_START 0x1000
149 #endif
150
151 /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
152 * If the job hasn't exited in the given number of seconds after sending
153 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
154 */
155 #define LAUNCHD_MIN_JOB_RUN_TIME 10
156 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
157 #define LAUNCHD_SIGKILL_TIMER 2
158 #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
159
160 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
161
162 #define TAKE_SUBSET_NAME "TakeSubsetName"
163 #define TAKE_SUBSET_PID "TakeSubsetPID"
164 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
165
166 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
167
168 extern char **environ;
169
170 struct waiting_for_removal {
171 SLIST_ENTRY(waiting_for_removal) sle;
172 mach_port_t reply_port;
173 };
174
175 static bool waiting4removal_new(job_t j, mach_port_t rp);
176 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
177
178 struct machservice {
179 SLIST_ENTRY(machservice) sle;
180 SLIST_ENTRY(machservice) special_port_sle;
181 LIST_ENTRY(machservice) name_hash_sle;
182 LIST_ENTRY(machservice) port_hash_sle;
183 struct machservice *alias;
184 job_t job;
185 unsigned int gen_num;
186 mach_port_name_t port;
187 unsigned int
188 isActive :1,
189 reset :1,
190 recv :1,
191 hide :1,
192 kUNCServer :1,
193 per_user_hack :1,
194 debug_on_close :1,
195 per_pid :1,
196 delete_on_destruction :1,
197 drain_one_on_crash :1,
198 drain_all_on_crash :1,
199 event_update_port :1, /* The job which owns this port is the event monitor. */
200 upfront :1, /* This service was declared in the plist. */
201 event_channel :1, /* The job is to receive events on this channel. */
202 /* Don't let the size of this field to get too small. It has to be large enough
203 * to represent the reasonable range of special port numbers.
204 */
205 special_port_num :18;
206 const char name[0];
207 };
208
209 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
210
211 #define PORT_HASH_SIZE 32
212 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
213
214 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
215
216 static void machservice_setup(launch_data_t obj, const char *key, void *context);
217 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
218 static void machservice_resetport(job_t j, struct machservice *ms);
219 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
220 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
221 static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
222 #endif
223 static void machservice_ignore(job_t j, struct machservice *ms);
224 static void machservice_watch(job_t j, struct machservice *ms);
225 static void machservice_delete(job_t j, struct machservice *, bool port_died);
226 static void machservice_request_notifications(struct machservice *);
227 static mach_port_t machservice_port(struct machservice *);
228 static job_t machservice_job(struct machservice *);
229 static bool machservice_hidden(struct machservice *);
230 static bool machservice_active(struct machservice *);
231 static const char *machservice_name(struct machservice *);
232 static bootstrap_status_t machservice_status(struct machservice *);
233 void machservice_drain_port(struct machservice *);
234 static struct machservice *xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p);
235
236 struct socketgroup {
237 SLIST_ENTRY(socketgroup) sle;
238 int *fds;
239 unsigned int junkfds:1, fd_cnt:31;
240 union {
241 const char name[0];
242 char name_init[0];
243 };
244 };
245
246 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds);
247 static void socketgroup_delete(job_t j, struct socketgroup *sg);
248 static void socketgroup_watch(job_t j, struct socketgroup *sg);
249 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
250 static void socketgroup_callback(job_t j);
251 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
252 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
253
254 struct calendarinterval {
255 LIST_ENTRY(calendarinterval) global_sle;
256 SLIST_ENTRY(calendarinterval) sle;
257 job_t job;
258 struct tm when;
259 time_t when_next;
260 };
261
262 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
263
264 static bool calendarinterval_new(job_t j, struct tm *w);
265 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
266 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
267 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
268 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
269 static void calendarinterval_callback(void);
270 static void calendarinterval_sanity_check(void);
271
272 struct envitem {
273 SLIST_ENTRY(envitem) sle;
274 bool one_shot;
275 char *value;
276 union {
277 const char key[0];
278 char key_init[0];
279 };
280 };
281
282 static bool envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot);
283 static void envitem_delete(job_t j, struct envitem *ei, bool global);
284 static void envitem_setup(launch_data_t obj, const char *key, void *context);
285 static void envitem_setup_one_shot(launch_data_t obj, const char *key, void *context);
286
287 struct limititem {
288 SLIST_ENTRY(limititem) sle;
289 struct rlimit lim;
290 unsigned int setsoft:1, sethard:1, which:30;
291 };
292
293 static bool limititem_update(job_t j, int w, rlim_t r);
294 static void limititem_delete(job_t j, struct limititem *li);
295 static void limititem_setup(launch_data_t obj, const char *key, void *context);
296 #if HAVE_SANDBOX
297 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
298 #endif
299
300 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
301
302 typedef enum {
303 NETWORK_UP = 1,
304 NETWORK_DOWN,
305 SUCCESSFUL_EXIT,
306 FAILED_EXIT,
307 CRASHED,
308 DID_NOT_CRASH,
309 PATH_EXISTS,
310 PATH_MISSING,
311 OTHER_JOB_ENABLED,
312 OTHER_JOB_DISABLED,
313 OTHER_JOB_ACTIVE,
314 OTHER_JOB_INACTIVE,
315 PATH_CHANGES,
316 DIR_NOT_EMPTY,
317 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
318 } semaphore_reason_t;
319
320 struct semaphoreitem {
321 SLIST_ENTRY(semaphoreitem) sle;
322 semaphore_reason_t why;
323 bool watching_parent;
324 int fd;
325
326 union {
327 const char what[0];
328 char what_init[0];
329 };
330 };
331
332 struct semaphoreitem_dict_iter_context {
333 job_t j;
334 semaphore_reason_t why_true;
335 semaphore_reason_t why_false;
336 };
337
338 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
339 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
340 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
341 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
342 static void semaphoreitem_callback(job_t j, struct kevent *kev);
343 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
344 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
345 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
346
347 struct externalevent {
348 LIST_ENTRY(externalevent) sys_le;
349 LIST_ENTRY(externalevent) job_le;
350 struct eventsystem *sys;
351
352 uint64_t id;
353 job_t job;
354 bool state;
355 bool wanted_state;
356 launch_data_t event;
357
358 char name[0];
359 };
360
361 struct externalevent_iter_ctx {
362 job_t j;
363 struct eventsystem *sys;
364 };
365
366 static bool externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event);
367 static void externalevent_delete(struct externalevent *ee);
368 static void externalevent_setup(launch_data_t obj, const char *key, void *context);
369 static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
370
371 struct eventsystem {
372 LIST_ENTRY(eventsystem) global_le;
373 LIST_HEAD(, externalevent) events;
374 uint64_t curid;
375 bool has_updates;
376 char name[0];
377 };
378
379 static struct eventsystem *eventsystem_new(const char *name);
380 static void eventsystem_delete(struct eventsystem *sys);
381 static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
382 static struct eventsystem *eventsystem_find(const char *name);
383 static void eventsystem_ping(void);
384
385 #define ACTIVE_JOB_HASH_SIZE 32
386 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
387
388 #define MACHSERVICE_HASH_SIZE 37
389
390 #define LABEL_HASH_SIZE 53
391 struct jobmgr_s {
392 kq_callback kqjobmgr_callback;
393 LIST_ENTRY(jobmgr_s) xpc_le;
394 SLIST_ENTRY(jobmgr_s) sle;
395 SLIST_HEAD(, jobmgr_s) submgrs;
396 LIST_HEAD(, job_s) jobs;
397 LIST_HEAD(, job_s) jetsam_jobs;
398
399 /* For legacy reasons, we keep all job labels that are imported in the
400 * root job manager's label hash. If a job manager is an XPC domain, then
401 * it gets its own label hash that is separate from the "global" one
402 * stored in the root job manager.
403 */
404 LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
405 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
406 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
407 LIST_HEAD(, job_s) global_env_jobs;
408 mach_port_t jm_port;
409 mach_port_t req_port;
410 jobmgr_t parentmgr;
411 int reboot_flags;
412 time_t shutdown_time;
413 unsigned int global_on_demand_cnt;
414 unsigned int normal_active_cnt;
415 unsigned int jetsam_jobs_cnt;
416 unsigned int
417 shutting_down :1,
418 session_initialized :1,
419 killed_stray_jobs :1,
420 monitor_shutdown :1,
421 shutdown_jobs_dirtied :1,
422 shutdown_jobs_cleaned :1,
423 xpc_singleton :1;
424 uint32_t properties;
425 /* XPC-specific properties. */
426 char owner[MAXCOMLEN];
427 char *shortdesc;
428 mach_port_t req_bsport;
429 mach_port_t req_excport;
430 mach_port_t req_asport;
431 pid_t req_pid;
432 uid_t req_euid;
433 gid_t req_egid;
434 au_asid_t req_asid;
435 vm_offset_t req_ctx;
436 mach_msg_type_number_t req_ctx_sz;
437 mach_port_t req_rport;
438 kern_return_t error;
439 union {
440 const char name[0];
441 char name_init[0];
442 };
443 };
444
445 /* Global XPC domains. */
446 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
447 static jobmgr_t _s_xpc_system_domain;
448 static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
449 static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
450 #endif
451
452 #define jobmgr_assumes(jm, e) \
453 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
454
455 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
456 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
457 static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
458 static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
459 static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
460 static job_t xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
461 #endif
462 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
463 static jobmgr_t jobmgr_parent(jobmgr_t jm);
464 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
465 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
466 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
467 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
468 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
469 static void jobmgr_remove(jobmgr_t jm);
470 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
471 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
472 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
473 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
474 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
475 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
476 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
477 static void job_export_all2(jobmgr_t jm, launch_data_t where);
478 static void jobmgr_callback(void *obj, struct kevent *kev);
479 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
480 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
481 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
482 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
483 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
484 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
485 static void jobmgr_log_bug(jobmgr_t jm, unsigned int line);
486
487 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
488 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
489 #define AUTO_PICK_XPC_LABEL (const char *)(~2)
490
491 struct suspended_peruser {
492 LIST_ENTRY(suspended_peruser) sle;
493 job_t j;
494 };
495
496 struct job_s {
497 kq_callback kqjob_callback; /* MUST be first element of this structure for benefit of launchd's run loop. */
498 LIST_ENTRY(job_s) sle;
499 LIST_ENTRY(job_s) subjob_sle;
500 LIST_ENTRY(job_s) needing_session_sle;
501 LIST_ENTRY(job_s) jetsam_sle;
502 LIST_ENTRY(job_s) pid_hash_sle;
503 LIST_ENTRY(job_s) label_hash_sle;
504 LIST_ENTRY(job_s) global_env_sle;
505 SLIST_ENTRY(job_s) curious_jobs_sle;
506 LIST_HEAD(, suspended_peruser) suspended_perusers;
507 LIST_HEAD(, waiting_for_exit) exit_watchers;
508 LIST_HEAD(, job_s) subjobs;
509 LIST_HEAD(, externalevent) events;
510 SLIST_HEAD(, socketgroup) sockets;
511 SLIST_HEAD(, calendarinterval) cal_intervals;
512 SLIST_HEAD(, envitem) global_env;
513 SLIST_HEAD(, envitem) env;
514 SLIST_HEAD(, limititem) limits;
515 SLIST_HEAD(, machservice) machservices;
516 SLIST_HEAD(, semaphoreitem) semaphores;
517 SLIST_HEAD(, waiting_for_removal) removal_watchers;
518 job_t alias;
519 struct rusage ru;
520 cpu_type_t *j_binpref;
521 size_t j_binpref_cnt;
522 mach_port_t j_port;
523 mach_port_t exit_status_dest;
524 mach_port_t exit_status_port;
525 mach_port_t spawn_reply_port;
526 uid_t mach_uid;
527 jobmgr_t mgr;
528 size_t argc;
529 char **argv;
530 char *prog;
531 char *rootdir;
532 char *workingdir;
533 char *username;
534 char *groupname;
535 char *stdinpath;
536 char *stdoutpath;
537 char *stderrpath;
538 char *alt_exc_handler;
539 struct vproc_shmem_s *shmem;
540 struct machservice *lastlookup;
541 unsigned int lastlookup_gennum;
542 #if HAVE_SANDBOX
543 char *seatbelt_profile;
544 uint64_t seatbelt_flags;
545 #endif
546 #if HAVE_QUARANTINE
547 void *quarantine_data;
548 size_t quarantine_data_sz;
549 #endif
550 pid_t p;
551 int last_exit_status;
552 int stdin_fd;
553 int fork_fd;
554 int log_redirect_fd;
555 int nice;
556 int stdout_err_fd;
557 uint32_t pstype;
558 int32_t jetsam_priority;
559 int32_t jetsam_memlimit;
560 int32_t jetsam_seq;
561 int32_t main_thread_priority;
562 uint32_t timeout;
563 uint32_t exit_timeout;
564 uint64_t sent_signal_time;
565 uint64_t start_time;
566 uint32_t min_run_time;
567 uint32_t start_interval;
568 uint32_t peruser_suspend_count; /* The number of jobs that have disabled this per-user launchd. */
569 uuid_t instance_id;
570 uint32_t fail_cnt;
571 #if 0
572 /* someday ... */
573 enum {
574 J_TYPE_ANONYMOUS = 1,
575 J_TYPE_LANCHSERVICES,
576 J_TYPE_MACHINIT,
577 J_TYPE_INETD,
578 } j_type;
579 #endif
580 bool
581 debug :1, /* man launchd.plist --> Debug */
582 ondemand :1, /* man launchd.plist --> KeepAlive == false */
583 session_create :1, /* man launchd.plist --> SessionCreate */
584 low_pri_io :1, /* man launchd.plist --> LowPriorityIO */
585 no_init_groups :1, /* man launchd.plist --> InitGroups */
586 priv_port_has_senders :1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
587 importing_global_env :1, /* a hack during job importing */
588 importing_hard_limits :1, /* a hack during job importing */
589 setmask :1, /* man launchd.plist --> Umask */
590 anonymous :1, /* a process that launchd knows about, but isn't managed by launchd */
591 checkedin :1, /* a legacy mach_init concept to detect sick jobs */
592 legacy_mach_job :1, /* a job created via bootstrap_create_server() */
593 legacy_LS_job :1, /* a job created via spawn_via_launchd() */
594 inetcompat :1, /* a legacy job that wants inetd compatible semantics */
595 inetcompat_wait :1, /* a twist on inetd compatibility */
596 start_pending :1, /* an event fired and the job should start, but not necessarily right away */
597 globargv :1, /* man launchd.plist --> EnableGlobbing */
598 wait4debugger :1, /* man launchd.plist --> WaitForDebugger */
599 wait4debugger_oneshot :1, /* One-shot WaitForDebugger. */
600 internal_exc_handler :1, /* MachExceptionHandler == true */
601 stall_before_exec :1, /* a hack to support an option of spawn_via_launchd() */
602 only_once :1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
603 currently_ignored :1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
604 forced_peers_to_demand_mode :1, /* A job that forced all other jobs to be temporarily launch-on-demand */
605 setnice :1, /* man launchd.plist --> Nice */
606 removal_pending :1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
607 sent_sigkill :1, /* job_kill() was called */
608 debug_before_kill :1, /* enter the kernel debugger before killing a job */
609 weird_bootstrap :1, /* a hack that launchd+launchctl use during jobmgr_t creation */
610 start_on_mount :1, /* man launchd.plist --> StartOnMount */
611 per_user :1, /* This job is a per-user launchd managed by the PID 1 launchd */
612 unload_at_mig_return :1, /* A job thoroughly confused launchd. We need to unload it ASAP */
613 abandon_pg :1, /* man launchd.plist --> AbandonProcessGroup */
614 ignore_pg_at_shutdown :1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
615 poll_for_vfs_changes :1, /* a hack to work around the fact that kqueues don't work on all filesystems */
616 deny_job_creation :1, /* Don't let this job create new 'job_t' objects in launchd */
617 kill_via_shmem :1, /* man launchd.plist --> EnableTransactions */
618 sent_kill_via_shmem :1, /* We need to 'kill_via_shmem' once-and-only-once */
619 clean_kill :1, /* The job was sent SIGKILL because it was clean. */
620 kill_after_sample :1, /* The job is to be killed after sampling. */
621 reap_after_trace :1, /* The job exited before sample did, so we should reap it after sample is done. */
622 nosy :1, /* The job has an OtherJobEnabled KeepAlive criterion. */
623 crashed :1, /* The job is the default Mach exception handler, and it crashed. */
624 reaped :1, /* We've received NOTE_EXIT for the job. */
625 stopped :1, /* job_stop() was called. */
626 jetsam_frontmost :1, /* The job is considered "frontmost" by Jetsam. */
627 needs_kickoff :1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
628 is_bootstrapper :1, /* The job is a bootstrapper. */
629 has_console :1, /* The job owns the console. */
630 embedded_special_privileges :1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
631 did_exec :1, /* The job exec(2)ed successfully. */
632 xpcproxy_did_exec :1, /* The job is an XPC service, and XPC proxy successfully exec(3)ed. */
633 holds_ref :1, /* The (anonymous) job called vprocmgr_switch_to_session(). */
634 jetsam_properties :1, /* The job has Jetsam limits in place. */
635 dedicated_instance :1, /* This job was created as the result of a look up of a service provided by a per-lookup job. */
636 multiple_instances :1, /* The job supports creating additional instances of itself. */
637 former_subjob :1, /* The sub-job was already removed from the parent's list of sub-jobs. */
638 event_monitor :1, /* The job is responsible for monitoring external events for this launchd. */
639 removing :1, /* A lame hack. */
640 disable_aslr :1, /* Disable ASLR when launching this job. */
641 xpc_service :1, /* The job is an XPC Service. */
642 shutdown_monitor :1, /* The job is the Performance team's shutdown monitor. */
643 dirty_at_shutdown :1, /* We should open a transaction for the job when shutdown begins. */
644 workaround9359725 :1; /* The job was sent SIGKILL but did not exit in a timely fashion, indicating a kernel bug. */
645
646 mode_t mask;
647 pid_t tracing_pid;
648 mach_port_t asport;
649 /* Only set for per-user launchd's. */
650 au_asid_t asid;
651 uuid_t expected_audit_uuid;
652 const char label[0];
653 };
654
655 static size_t hash_label(const char *label) __attribute__((pure));
656 static size_t hash_ms(const char *msstr) __attribute__((pure));
657 static SLIST_HEAD(, job_s) s_curious_jobs;
658
659 #define job_assumes(j, e) \
660 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
661
662 static void job_import_keys(launch_data_t obj, const char *key, void *context);
663 static void job_import_bool(job_t j, const char *key, bool value);
664 static void job_import_string(job_t j, const char *key, const char *value);
665 static void job_import_integer(job_t j, const char *key, long long value);
666 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
667 static void job_import_array(job_t j, const char *key, launch_data_t value);
668 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
669 static bool job_set_global_on_demand(job_t j, bool val);
670 static const char *job_active(job_t j);
671 static void job_watch(job_t j);
672 static void job_ignore(job_t j);
673 static void job_cleanup_after_tracer(job_t j);
674 static void job_reap(job_t j);
675 static bool job_useless(job_t j);
676 static bool job_keepalive(job_t j);
677 static void job_dispatch_curious_jobs(job_t j);
678 static void job_start(job_t j);
679 static void job_start_child(job_t j) __attribute__((noreturn));
680 static void job_setup_attributes(job_t j);
681 static bool job_setup_machport(job_t j);
682 static kern_return_t job_setup_exit_port(job_t j);
683 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
684 static void job_postfork_become_user(job_t j);
685 static void job_postfork_test_user(job_t j);
686 static void job_log_pids_with_weird_uids(job_t j);
687 static void job_setup_exception_port(job_t j, task_t target_task);
688 static void job_callback(void *obj, struct kevent *kev);
689 static void job_callback_proc(job_t j, struct kevent *kev);
690 static void job_callback_timer(job_t j, void *ident);
691 static void job_callback_read(job_t j, int ident);
692 static void job_log_stray_pg(job_t j);
693 static void job_log_children_without_exec(job_t j);
694 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
695 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
696 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
697 static job_t job_new_alias(jobmgr_t jm, job_t src);
698 #endif
699 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
700 static job_t job_new_subjob(job_t j, uuid_t identifier);
701 static void job_kill(job_t j);
702 static void job_uncork_fork(job_t j);
703 static void job_log_stdouterr(job_t j);
704 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
705 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
706 static void job_log_bug(job_t j, unsigned int line);
707 static void job_log_stdouterr2(job_t j, const char *msg, ...);
708 static void job_set_exception_port(job_t j, mach_port_t port);
709 static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
710 static void job_open_shutdown_transaction(job_t ji);
711 static void job_close_shutdown_transaction(job_t ji);
712
713 static const struct {
714 const char *key;
715 int val;
716 } launchd_keys2limits[] = {
717 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
718 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
719 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
720 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
721 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
722 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
723 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
724 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
725 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
726 };
727
728 static time_t cronemu(int mon, int mday, int hour, int min);
729 static time_t cronemu_wday(int wday, int hour, int min);
730 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
731 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
732 static bool cronemu_hour(struct tm *wtm, int hour, int min);
733 static bool cronemu_min(struct tm *wtm, int min);
734
735 /* These functions are a total nightmare to get to through headers.
736 * See rdar://problem/8223092.
737 */
738 typedef __darwin_mach_port_t fileport_t;
739 #define FILEPORT_NULL ((fileport_t)0)
740 extern int fileport_makeport(int, fileport_t *);
741 extern int fileport_makefd(fileport_t);
742
743 /* miscellaneous file local functions */
744 static size_t get_kern_max_proc(void);
745 static int dir_has_files(job_t j, const char *path);
746 static char **mach_cmd2argv(const char *string);
747 static size_t our_strhash(const char *s) __attribute__((pure));
748 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
749
750 void eliminate_double_reboot(void);
751
752 /* file local globals */
753 static size_t total_children;
754 static size_t total_anon_children;
755 static mach_port_t the_exception_server;
756 static job_t workaround_5477111;
757 static LIST_HEAD(, job_s) s_needing_sessions;
758 static LIST_HEAD(, eventsystem) _s_event_systems;
759 static job_t _s_event_monitor;
760 static job_t _s_shutdown_monitor;
761 static mach_port_t _s_event_update_port;
762 mach_port_t g_audit_session_port = MACH_PORT_NULL;
763 static uint32_t s_jetsam_sequence_id;
764
765 #if !TARGET_OS_EMBEDDED
766 static job_t s_embedded_privileged_job = (job_t)&root_jobmgr;
767 au_asid_t g_audit_session = AU_DEFAUDITSID;
768 #else
769 static job_t s_embedded_privileged_job = NULL;
770 pid_t g_audit_session = 0;
771 #endif
772
773 static int s_no_hang_fd = -1;
774
775 /* process wide globals */
776 mach_port_t inherited_bootstrap_port;
777 jobmgr_t root_jobmgr;
778 bool g_shutdown_debugging = false;
779 bool g_verbose_boot = false;
780 bool g_embedded_privileged_action = false;
781 bool g_runtime_busy_time = false;
782
783 void
784 job_ignore(job_t j)
785 {
786 struct semaphoreitem *si;
787 struct socketgroup *sg;
788 struct machservice *ms;
789
790 if (j->currently_ignored) {
791 return;
792 }
793
794 job_log(j, LOG_DEBUG, "Ignoring...");
795
796 j->currently_ignored = true;
797
798 if (j->poll_for_vfs_changes) {
799 j->poll_for_vfs_changes = false;
800 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
801 }
802
803 SLIST_FOREACH(sg, &j->sockets, sle) {
804 socketgroup_ignore(j, sg);
805 }
806
807 SLIST_FOREACH(ms, &j->machservices, sle) {
808 machservice_ignore(j, ms);
809 }
810
811 SLIST_FOREACH(si, &j->semaphores, sle) {
812 semaphoreitem_ignore(j, si);
813 }
814 }
815
816 void
817 job_watch(job_t j)
818 {
819 struct semaphoreitem *si;
820 struct socketgroup *sg;
821 struct machservice *ms;
822
823 if (!j->currently_ignored) {
824 return;
825 }
826
827 job_log(j, LOG_DEBUG, "Watching...");
828
829 j->currently_ignored = false;
830
831 SLIST_FOREACH(sg, &j->sockets, sle) {
832 socketgroup_watch(j, sg);
833 }
834
835 SLIST_FOREACH(ms, &j->machservices, sle) {
836 machservice_watch(j, ms);
837 }
838
839 SLIST_FOREACH(si, &j->semaphores, sle) {
840 semaphoreitem_watch(j, si);
841 }
842 }
843
844 void
845 job_stop(job_t j)
846 {
847 char extralog[100];
848 int32_t newval = 1;
849
850 if (unlikely(!j->p || j->stopped || j->anonymous)) {
851 return;
852 }
853
854 #if TARGET_OS_EMBEDDED
855 if (g_embedded_privileged_action && s_embedded_privileged_job) {
856 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
857 errno = EPERM;
858 return;
859 }
860
861 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
862 errno = EPERM;
863 return;
864 }
865 } else if (g_embedded_privileged_action) {
866 errno = EINVAL;
867 return;
868 }
869 #endif
870
871 if (j->kill_via_shmem) {
872 if (j->shmem) {
873 if (!j->sent_kill_via_shmem) {
874 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
875 newval = __sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
876 j->sent_kill_via_shmem = true;
877 } else {
878 newval = j->shmem->vp_shmem_transaction_cnt;
879 }
880 } else {
881 newval = -1;
882 }
883 }
884
885 j->sent_signal_time = runtime_get_opaque_time();
886
887 if (newval < 0) {
888 j->clean_kill = true;
889 job_kill(j);
890 } else {
891 (void)job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
892
893 if (j->exit_timeout) {
894 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j) != -1);
895 } else {
896 job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
897 }
898
899 if (j->kill_via_shmem) {
900 snprintf(extralog, sizeof(extralog), ": %d remaining transactions", newval + 1);
901 } else {
902 extralog[0] = '\0';
903 }
904
905 job_log(j, LOG_DEBUG, "Sent SIGTERM signal%s", extralog);
906 }
907
908 j->stopped = true;
909 }
910
911 launch_data_t
912 job_export(job_t j)
913 {
914 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
915
916 if (r == NULL) {
917 return NULL;
918 }
919
920 if ((tmp = launch_data_new_string(j->label))) {
921 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
922 }
923 if ((tmp = launch_data_new_string(j->mgr->name))) {
924 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
925 }
926 if ((tmp = launch_data_new_bool(j->ondemand))) {
927 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
928 }
929 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
930 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
931 }
932 if (j->p && (tmp = launch_data_new_integer(j->p))) {
933 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
934 }
935 if ((tmp = launch_data_new_integer(j->timeout))) {
936 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
937 }
938 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
939 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
940 }
941 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
942 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
943 }
944 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
945 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
946 }
947 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
948 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
949 }
950 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
951 size_t i;
952
953 for (i = 0; i < j->argc; i++) {
954 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
955 launch_data_array_set_index(tmp, tmp2, i);
956 }
957 }
958
959 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
960 }
961
962 if (j->kill_via_shmem && (tmp = launch_data_new_bool(true))) {
963 int32_t tmp_cnt = -1;
964
965 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
966
967 if (j->shmem) {
968 tmp_cnt = j->shmem->vp_shmem_transaction_cnt;
969 }
970
971 if (j->sent_kill_via_shmem) {
972 tmp_cnt++;
973 }
974
975 if ((tmp = launch_data_new_integer(tmp_cnt))) {
976 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TRANSACTIONCOUNT);
977 }
978 }
979
980 if (j->session_create && (tmp = launch_data_new_bool(true))) {
981 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
982 }
983
984 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
985 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
986 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
987 }
988 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
989 }
990
991 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
992 struct socketgroup *sg;
993 int i;
994
995 SLIST_FOREACH(sg, &j->sockets, sle) {
996 if (sg->junkfds) {
997 continue;
998 }
999 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1000 for (i = 0; i < sg->fd_cnt; i++) {
1001 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1002 launch_data_array_set_index(tmp2, tmp3, i);
1003 }
1004 }
1005 launch_data_dict_insert(tmp, tmp2, sg->name);
1006 }
1007 }
1008
1009 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1010 }
1011
1012 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1013 struct machservice *ms;
1014
1015 tmp3 = NULL;
1016
1017 SLIST_FOREACH(ms, &j->machservices, sle) {
1018 if (ms->per_pid) {
1019 if (tmp3 == NULL) {
1020 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1021 }
1022 if (tmp3) {
1023 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1024 launch_data_dict_insert(tmp3, tmp2, ms->name);
1025 }
1026 } else {
1027 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1028 launch_data_dict_insert(tmp, tmp2, ms->name);
1029 }
1030 }
1031
1032 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1033
1034 if (tmp3) {
1035 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1036 }
1037 }
1038
1039 return r;
1040 }
1041
1042 static void
1043 jobmgr_log_active_jobs(jobmgr_t jm)
1044 {
1045 const char *why_active;
1046 jobmgr_t jmi;
1047 job_t ji;
1048
1049 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1050 jobmgr_log_active_jobs(jmi);
1051 }
1052
1053 LIST_FOREACH(ji, &jm->jobs, sle) {
1054 if ((why_active = job_active(ji))) {
1055 if (ji->p != 1) {
1056 job_log(ji, LOG_DEBUG | LOG_CONSOLE, "%s", why_active);
1057 }
1058 }
1059 }
1060 }
1061
1062 static void
1063 jobmgr_still_alive_with_check(jobmgr_t jm)
1064 {
1065 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1066 jobmgr_log_active_jobs(jm);
1067 }
1068
1069 jobmgr_t
1070 jobmgr_shutdown(jobmgr_t jm)
1071 {
1072 jobmgr_t jmi, jmn;
1073 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1074
1075 jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1076
1077 struct tm curtime;
1078 (void)localtime_r(&jm->shutdown_time, &curtime);
1079
1080 char date[26];
1081 (void)asctime_r(&curtime, date);
1082 /* Trim the new line that asctime_r(3) puts there for some reason. */
1083 date[24] = 0;
1084
1085 if (jm == root_jobmgr && pid1_magic) {
1086 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1087 } else {
1088 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1089 }
1090
1091 jm->shutting_down = true;
1092
1093 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1094 jobmgr_shutdown(jmi);
1095 }
1096
1097 if (jm->parentmgr == NULL && pid1_magic) {
1098 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1099
1100 /* Spawn the shutdown monitor. */
1101 if (_s_shutdown_monitor && !_s_shutdown_monitor->p) {
1102 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1103 job_dispatch(_s_shutdown_monitor, true);
1104 }
1105 }
1106
1107 return jobmgr_do_garbage_collection(jm);
1108 }
1109
1110 void
1111 jobmgr_remove(jobmgr_t jm)
1112 {
1113 jobmgr_t jmi;
1114 job_t ji;
1115
1116 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1117 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
1118 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1119 jobmgr_remove(jmi);
1120 }
1121 }
1122
1123 while ((ji = LIST_FIRST(&jm->jobs))) {
1124 if (!ji->anonymous && !job_assumes(ji, ji->p == 0)) {
1125 ji->p = 0;
1126 }
1127 job_remove(ji);
1128 }
1129
1130 if (jm->req_port) {
1131 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
1132 }
1133 if (jm->jm_port) {
1134 (void)jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
1135 }
1136
1137 if (jm->req_bsport) {
1138 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_bsport) == KERN_SUCCESS);
1139 }
1140 if (jm->req_excport) {
1141 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_excport) == KERN_SUCCESS);
1142 }
1143 if (jm->req_asport) {
1144 (void)jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_asport) == KERN_SUCCESS);
1145 }
1146 #if !TARGET_OS_EMBEDDED
1147 if (jm->req_rport) {
1148 kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1149 if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1150 /* If the originator went away, the reply port will be a dead name,
1151 * and we expect this to fail.
1152 */
1153 errno = kr;
1154 (void)jobmgr_assumes(jm, kr == KERN_SUCCESS);
1155 }
1156 }
1157 #endif
1158 if (jm->req_ctx) {
1159 (void)jobmgr_assumes(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz) == KERN_SUCCESS);
1160 }
1161
1162 time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1163 struct tm curtime;
1164 (void)localtime_r(&ts, &curtime);
1165
1166 char date[26];
1167 (void)asctime_r(&curtime, date);
1168 date[24] = 0;
1169
1170 time_t delta = ts - jm->shutdown_time;
1171 if (jm == root_jobmgr && pid1_magic) {
1172 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1173 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1174 } else {
1175 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1176 jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1177 }
1178
1179 if (jm->parentmgr) {
1180 runtime_del_weak_ref();
1181 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1182 } else if (pid1_magic) {
1183 eliminate_double_reboot();
1184 launchd_log_vm_stats();
1185 jobmgr_log_stray_children(jm, true);
1186 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1187 runtime_closelog();
1188 (void)jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
1189 } else {
1190 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1191 runtime_closelog();
1192 exit(EXIT_SUCCESS);
1193 }
1194
1195 free(jm);
1196 }
1197
1198 void
1199 job_remove(job_t j)
1200 {
1201 struct waiting_for_removal *w4r;
1202 struct calendarinterval *ci;
1203 struct semaphoreitem *si;
1204 struct socketgroup *sg;
1205 struct machservice *ms;
1206 struct limititem *li;
1207 struct envitem *ei;
1208
1209 if (j->alias) {
1210 /* HACK: Egregious code duplication. But as with machservice_delete(),
1211 * job aliases can't (and shouldn't) have any complex behaviors
1212 * associated with them.
1213 */
1214 while ((ms = SLIST_FIRST(&j->machservices))) {
1215 machservice_delete(j, ms, false);
1216 }
1217
1218 LIST_REMOVE(j, sle);
1219 LIST_REMOVE(j, label_hash_sle);
1220 free(j);
1221 return;
1222 }
1223
1224 #if TARGET_OS_EMBEDDED
1225 if (g_embedded_privileged_action && s_embedded_privileged_job) {
1226 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
1227 errno = EPERM;
1228 return;
1229 }
1230
1231 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
1232 errno = EPERM;
1233 return;
1234 }
1235 } else if (g_embedded_privileged_action) {
1236 errno = EINVAL;
1237 return;
1238 }
1239 #endif
1240
1241 /* Do this BEFORE we check and see whether the job is still active. If we're a
1242 * sub-job, we're being removed due to the parent job removing us. Therefore, the
1243 * parent job will free itself after this call completes. So if we defer removing
1244 * ourselves from the parent's list, we'll crash when we finally get around to it.
1245 */
1246 if (j->dedicated_instance && !j->former_subjob) {
1247 LIST_REMOVE(j, subjob_sle);
1248 j->former_subjob = true;
1249 }
1250
1251 if (unlikely(j->p)) {
1252 if (j->anonymous) {
1253 job_reap(j);
1254 } else {
1255 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1256
1257 if (!j->removal_pending) {
1258 j->removal_pending = true;
1259 job_stop(j);
1260 }
1261
1262 return;
1263 }
1264 }
1265
1266 if (!j->removing) {
1267 j->removing = true;
1268 job_dispatch_curious_jobs(j);
1269 }
1270
1271 ipc_close_all_with_job(j);
1272
1273 job_log(j, LOG_INFO, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1274 j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec,
1275 j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec,
1276 j->ru.ru_maxrss, j->ru.ru_ixrss, j->ru.ru_idrss, j->ru.ru_isrss,
1277 j->ru.ru_minflt, j->ru.ru_majflt,
1278 j->ru.ru_nswap, j->ru.ru_inblock, j->ru.ru_oublock,
1279 j->ru.ru_msgsnd, j->ru.ru_msgrcv,
1280 j->ru.ru_nsignals, j->ru.ru_nvcsw, j->ru.ru_nivcsw);
1281
1282 if (j->forced_peers_to_demand_mode) {
1283 job_set_global_on_demand(j, false);
1284 }
1285
1286 if (!job_assumes(j, j->fork_fd == 0)) {
1287 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
1288 }
1289
1290 if (j->stdin_fd) {
1291 (void)job_assumes(j, runtime_close(j->stdin_fd) != -1);
1292 }
1293
1294 if (!job_assumes(j, j->log_redirect_fd == 0)) {
1295 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
1296 }
1297
1298 if (j->j_port) {
1299 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1300 }
1301
1302 while ((sg = SLIST_FIRST(&j->sockets))) {
1303 socketgroup_delete(j, sg);
1304 }
1305 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1306 calendarinterval_delete(j, ci);
1307 }
1308 while ((ei = SLIST_FIRST(&j->env))) {
1309 envitem_delete(j, ei, false);
1310 }
1311 while ((ei = SLIST_FIRST(&j->global_env))) {
1312 envitem_delete(j, ei, true);
1313 }
1314 while ((li = SLIST_FIRST(&j->limits))) {
1315 limititem_delete(j, li);
1316 }
1317 while ((ms = SLIST_FIRST(&j->machservices))) {
1318 machservice_delete(j, ms, false);
1319 }
1320 while ((si = SLIST_FIRST(&j->semaphores))) {
1321 semaphoreitem_delete(j, si);
1322 }
1323 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1324 waiting4removal_delete(j, w4r);
1325 }
1326
1327 struct externalevent *eei = NULL;
1328 while ((eei = LIST_FIRST(&j->events))) {
1329 eventsystem_ping();
1330 externalevent_delete(eei);
1331 }
1332
1333 #if 0
1334 /* Event systems exist independently of an actual monitor job. They're
1335 * created on-demand when a job has a LaunchEvents dictionary. So we
1336 * really don't need to get rid of them.
1337 */
1338 if (j->event_monitor) {
1339 struct eventsystem *esi = NULL;
1340 while ((esi = LIST_FIRST(&_s_event_systems))) {
1341 eventsystem_delete(esi);
1342 }
1343 }
1344 #else
1345 if (false) {
1346 /* Make gcc happy. */
1347 eventsystem_delete(NULL);
1348 }
1349 if (j->event_monitor) {
1350 if (_s_event_update_port != MACH_PORT_NULL) {
1351 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
1352 _s_event_update_port = MACH_PORT_NULL;
1353 }
1354 _s_event_monitor = NULL;
1355 }
1356 #endif
1357
1358 if (j->prog) {
1359 free(j->prog);
1360 }
1361 if (j->argv) {
1362 free(j->argv);
1363 }
1364 if (j->rootdir) {
1365 free(j->rootdir);
1366 }
1367 if (j->workingdir) {
1368 free(j->workingdir);
1369 }
1370 if (j->username) {
1371 free(j->username);
1372 }
1373 if (j->groupname) {
1374 free(j->groupname);
1375 }
1376 if (j->stdinpath) {
1377 free(j->stdinpath);
1378 }
1379 if (j->stdoutpath) {
1380 free(j->stdoutpath);
1381 }
1382 if (j->stderrpath) {
1383 free(j->stderrpath);
1384 }
1385 if (j->alt_exc_handler) {
1386 free(j->alt_exc_handler);
1387 }
1388 #if HAVE_SANDBOX
1389 if (j->seatbelt_profile) {
1390 free(j->seatbelt_profile);
1391 }
1392 #endif
1393 #if HAVE_QUARANTINE
1394 if (j->quarantine_data) {
1395 free(j->quarantine_data);
1396 }
1397 #endif
1398 if (j->j_binpref) {
1399 free(j->j_binpref);
1400 }
1401 if (j->start_interval) {
1402 runtime_del_weak_ref();
1403 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
1404 }
1405 if (j->poll_for_vfs_changes) {
1406 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
1407 }
1408 if (j->exit_timeout) {
1409 /* Not a big deal if this fails. It means that the timer's already been freed. */
1410 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1411 }
1412 if (j->jetsam_properties) {
1413 LIST_REMOVE(j, jetsam_sle);
1414 j->mgr->jetsam_jobs_cnt--;
1415 }
1416 if (j->asport != MACH_PORT_NULL) {
1417 (void)job_assumes(j, launchd_mport_deallocate(j->asport) == KERN_SUCCESS);
1418 }
1419 if (!uuid_is_null(j->expected_audit_uuid)) {
1420 LIST_REMOVE(j, needing_session_sle);
1421 }
1422 if (j->embedded_special_privileges) {
1423 s_embedded_privileged_job = NULL;
1424 }
1425 if (j->shutdown_monitor) {
1426 _s_shutdown_monitor = NULL;
1427 }
1428 if (j->workaround9359725) {
1429 /* We may have forcibly removed this job by simulating an exit. If this
1430 * is the case, we don't want to hear about these events anymore, lest
1431 * we get a stale context pointer and crash trying to dereference it.
1432 */
1433 kevent_mod((uintptr_t)j->p, EVFILT_PROC, EV_DELETE, 0, 0, NULL);
1434 }
1435
1436 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1437
1438 LIST_REMOVE(j, sle);
1439 LIST_REMOVE(j, label_hash_sle);
1440
1441 job_t ji = NULL;
1442 job_t jit = NULL;
1443 LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1444 job_remove(ji);
1445 }
1446
1447 job_log(j, LOG_DEBUG, "Removed");
1448
1449 free(j);
1450 }
1451
1452 void
1453 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1454 {
1455 launch_data_t tmp_oai;
1456 job_t j = context;
1457 size_t i, fd_cnt = 1;
1458 int *fds;
1459
1460 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1461 fd_cnt = launch_data_array_get_count(obj);
1462 }
1463
1464 fds = alloca(fd_cnt * sizeof(int));
1465
1466 for (i = 0; i < fd_cnt; i++) {
1467 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1468 tmp_oai = launch_data_array_get_index(obj, i);
1469 } else {
1470 tmp_oai = obj;
1471 }
1472
1473 fds[i] = launch_data_get_fd(tmp_oai);
1474 }
1475
1476 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
1477
1478 ipc_revoke_fds(obj);
1479 }
1480
1481 bool
1482 job_set_global_on_demand(job_t j, bool val)
1483 {
1484 if (j->forced_peers_to_demand_mode && val) {
1485 return false;
1486 } else if (!j->forced_peers_to_demand_mode && !val) {
1487 return false;
1488 }
1489
1490 if ((j->forced_peers_to_demand_mode = val)) {
1491 j->mgr->global_on_demand_cnt++;
1492 } else {
1493 j->mgr->global_on_demand_cnt--;
1494 }
1495
1496 if (j->mgr->global_on_demand_cnt == 0) {
1497 jobmgr_dispatch_all(j->mgr, false);
1498 }
1499
1500 return true;
1501 }
1502
1503 bool
1504 job_setup_machport(job_t j)
1505 {
1506 mach_msg_size_t mxmsgsz;
1507
1508 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
1509 goto out_bad;
1510 }
1511
1512 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
1513 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
1514 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
1515 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
1516 }
1517
1518 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
1519 goto out_bad2;
1520 }
1521
1522 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
1523 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1524 goto out_bad;
1525 }
1526
1527 return true;
1528 out_bad2:
1529 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1530 out_bad:
1531 return false;
1532 }
1533
1534 kern_return_t
1535 job_setup_exit_port(job_t j)
1536 {
1537 kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1538 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1539 return MACH_PORT_NULL;
1540 }
1541
1542 struct mach_port_limits limits = {
1543 .mpl_qlimit = 1,
1544 };
1545 kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1546 (void)job_assumes(j, kr == KERN_SUCCESS);
1547
1548 kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1549 if (!job_assumes(j, kr == KERN_SUCCESS)) {
1550 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
1551 j->exit_status_port = MACH_PORT_NULL;
1552 }
1553
1554 return kr;
1555 }
1556
1557 job_t
1558 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1559 {
1560 const char **argv = (const char **)mach_cmd2argv(cmd);
1561 job_t jr = NULL;
1562
1563 if (!job_assumes(j, argv != NULL)) {
1564 goto out_bad;
1565 }
1566
1567 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1568
1569 free(argv);
1570
1571 /* jobs can easily be denied creation during shutdown */
1572 if (unlikely(jr == NULL)) {
1573 goto out_bad;
1574 }
1575
1576 jr->mach_uid = uid;
1577 jr->ondemand = ond;
1578 jr->legacy_mach_job = true;
1579 jr->abandon_pg = true;
1580 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
1581
1582 if (!job_setup_machport(jr)) {
1583 goto out_bad;
1584 }
1585
1586 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1587
1588 return jr;
1589
1590 out_bad:
1591 if (jr) {
1592 job_remove(jr);
1593 }
1594 return NULL;
1595 }
1596
1597 job_t
1598 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1599 {
1600 struct proc_bsdshortinfo proc;
1601 bool shutdown_state;
1602 job_t jp = NULL, jr = NULL;
1603 uid_t kp_euid, kp_uid, kp_svuid;
1604 gid_t kp_egid, kp_gid, kp_svgid;
1605
1606 if (!jobmgr_assumes(jm, anonpid != 0)) {
1607 errno = EINVAL;
1608 return NULL;
1609 }
1610
1611 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1612 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1613 errno = EINVAL;
1614 return NULL;
1615 }
1616
1617 /* libproc returns the number of bytes written into the buffer upon success,
1618 * zero on failure.
1619 */
1620 if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1621 if (errno != ESRCH) {
1622 (void)jobmgr_assumes(jm, errno == 0);
1623 }
1624 return NULL;
1625 }
1626
1627 if (!jobmgr_assumes(jm, proc.pbsi_comm[0] != '\0')) {
1628 errno = EINVAL;
1629 return NULL;
1630 }
1631
1632 if (unlikely(proc.pbsi_status == SZOMB)) {
1633 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1634 }
1635
1636 if (unlikely(proc.pbsi_flags & P_SUGID)) {
1637 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1638 }
1639
1640 kp_euid = proc.pbsi_uid;
1641 kp_uid = proc.pbsi_ruid;
1642 kp_svuid = proc.pbsi_svuid;
1643 kp_egid = proc.pbsi_gid;
1644 kp_gid = proc.pbsi_rgid;
1645 kp_svgid = proc.pbsi_svgid;
1646
1647 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1648 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1649 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1650 }
1651
1652 /* "Fix" for a problem that shouldn't even exist.
1653 * See rdar://problem/7264615 for the symptom and rdar://problem/5020256
1654 * as to why this can happen.
1655 */
1656 if (!jobmgr_assumes(jm, (pid_t)proc.pbsi_ppid != anonpid)) {
1657 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). It should find a different way to do whatever it's doing. Setting PPID to 0: %s", proc.pbsi_comm);
1658 errno = EINVAL;
1659 return NULL;
1660 }
1661
1662 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1663 if (unlikely(shutdown_state = jm->shutting_down)) {
1664 jm->shutting_down = false;
1665 }
1666
1667 /* We only set requestor_pid for XPC domains. */
1668 const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1669 if (jobmgr_assumes(jm, (jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL)) != NULL)) {
1670 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1671
1672 total_anon_children++;
1673 jr->anonymous = true;
1674 jr->p = anonpid;
1675
1676 /* anonymous process reaping is messy */
1677 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1678
1679 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1) && job_assumes(jr, errno == ESRCH)) {
1680 /* zombies are weird */
1681 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1682 jr->unload_at_mig_return = true;
1683 }
1684
1685 if (unlikely(shutdown_state)) {
1686 job_log(jr, LOG_SCOLDING, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1687 }
1688
1689 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1690 }
1691
1692 if (unlikely(shutdown_state)) {
1693 jm->shutting_down = true;
1694 }
1695
1696 /* This is down here to mitigate the effects of rdar://problem/7264615, in which a process
1697 * attaches to its own parent. We need to make sure that the anonymous job has been added
1698 * to the process list so that, if it's used ptrace(3) to cause a cycle in the process
1699 * tree (thereby making it not a tree anymore), we'll find the tracing parent PID of the
1700 * parent process, which is the child, when we go looking for it in jobmgr_find_by_pid().
1701 */
1702 switch (proc.pbsi_ppid) {
1703 case 0:
1704 /* the kernel */
1705 break;
1706 case 1:
1707 if (!pid1_magic) {
1708 /* we cannot possibly find a parent job_t that is useful in this function */
1709 break;
1710 }
1711 /* fall through */
1712 default:
1713 jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1714 if (jobmgr_assumes(jm, jp != NULL)) {
1715 if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1716 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1717 }
1718 }
1719 break;
1720 }
1721
1722 return jr;
1723 }
1724
1725 job_t
1726 job_new_subjob(job_t j, uuid_t identifier)
1727 {
1728 char label[0];
1729 uuid_string_t idstr;
1730 uuid_unparse(identifier, idstr);
1731 size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1732
1733 job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1734 if (launchd_assumes(nj != NULL)) {
1735 nj->kqjob_callback = job_callback;
1736 nj->mgr = j->mgr;
1737 nj->min_run_time = j->min_run_time;
1738 nj->timeout = j->timeout;
1739 nj->exit_timeout = j->exit_timeout;
1740
1741 snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1742
1743 /* Set all our simple Booleans that are applicable. */
1744 nj->debug = j->debug;
1745 nj->ondemand = j->ondemand;
1746 nj->checkedin = true;
1747 nj->low_pri_io = j->low_pri_io;
1748 nj->setmask = j->setmask;
1749 nj->wait4debugger = j->wait4debugger;
1750 nj->internal_exc_handler = j->internal_exc_handler;
1751 nj->setnice = j->setnice;
1752 nj->abandon_pg = j->abandon_pg;
1753 nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1754 nj->deny_job_creation = j->deny_job_creation;
1755 nj->kill_via_shmem = j->kill_via_shmem;
1756 nj->needs_kickoff = j->needs_kickoff;
1757 nj->currently_ignored = true;
1758 nj->dedicated_instance = true;
1759 nj->xpc_service = j->xpc_service;
1760
1761 nj->mask = j->mask;
1762 uuid_copy(nj->instance_id, identifier);
1763
1764 /* These jobs are purely on-demand Mach jobs. */
1765
1766 /* {Hard | Soft}ResourceLimits are not supported. */
1767
1768 struct machservice *msi = NULL;
1769 SLIST_FOREACH(msi, &j->machservices, sle) {
1770 /* Only copy MachServices that were actually declared in the plist.
1771 * So skip over per-PID ones and ones that were created via
1772 * bootstrap_register().
1773 */
1774 if (msi->upfront) {
1775 mach_port_t mp = MACH_PORT_NULL;
1776 struct machservice *msj = machservice_new(nj, msi->name, &mp, msi->per_pid);
1777 if (job_assumes(nj, msj != NULL)) {
1778 msj->reset = msi->reset;
1779 msj->delete_on_destruction = msi->delete_on_destruction;
1780 msj->drain_one_on_crash = msi->drain_one_on_crash;
1781 msj->drain_all_on_crash = msi->drain_all_on_crash;
1782 }
1783 }
1784 }
1785
1786 if (j->prog) {
1787 nj->prog = strdup(j->prog);
1788 }
1789 if (j->argv) {
1790 size_t sz = malloc_size(j->argv);
1791 nj->argv = (char **)malloc(sz);
1792 if (job_assumes(nj, nj->argv != NULL)) {
1793 /* This is the start of our strings. */
1794 char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1795
1796 size_t i = 0;
1797 for (i = 0; i < j->argc; i++) {
1798 (void)strcpy(p, j->argv[i]);
1799 nj->argv[i] = p;
1800 p += (strlen(j->argv[i]) + 1);
1801 }
1802 nj->argv[i] = NULL;
1803 }
1804
1805 nj->argc = j->argc;
1806 }
1807
1808 /* We ignore global environment variables. */
1809 struct envitem *ei = NULL;
1810 SLIST_FOREACH(ei, &j->env, sle) {
1811 (void)job_assumes(nj, envitem_new(nj, ei->key, ei->value, false, false));
1812 }
1813 uuid_string_t val;
1814 uuid_unparse(identifier, val);
1815 (void)job_assumes(nj, envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false, false));
1816
1817 if (j->rootdir) {
1818 nj->rootdir = strdup(j->rootdir);
1819 }
1820 if (j->workingdir) {
1821 nj->workingdir = strdup(j->workingdir);
1822 }
1823 if (j->username) {
1824 nj->username = strdup(j->username);
1825 }
1826 if (j->groupname) {
1827 nj->groupname = strdup(j->groupname);
1828 }
1829 /* FIXME: We shouldn't redirect all the output from these jobs to the same
1830 * file. We should uniquify the file names.
1831 */
1832 if (j->stdinpath) {
1833 nj->stdinpath = strdup(j->stdinpath);
1834 }
1835 if (j->stdoutpath) {
1836 nj->stdoutpath = strdup(j->stdinpath);
1837 }
1838 if (j->stderrpath) {
1839 nj->stderrpath = strdup(j->stderrpath);
1840 }
1841 if (j->alt_exc_handler) {
1842 nj->alt_exc_handler = strdup(j->alt_exc_handler);
1843 }
1844 #if HAVE_SANDBOX
1845 if (j->seatbelt_profile) {
1846 nj->seatbelt_profile = strdup(j->seatbelt_profile);
1847 }
1848 #endif
1849
1850 #if HAVE_QUARANTINE
1851 if (j->quarantine_data) {
1852 nj->quarantine_data = strdup(j->quarantine_data);
1853 }
1854 nj->quarantine_data_sz = j->quarantine_data_sz;
1855 #endif
1856 if (j->j_binpref) {
1857 size_t sz = malloc_size(j->j_binpref);
1858 nj->j_binpref = (cpu_type_t *)malloc(sz);
1859 if (job_assumes(nj, nj->j_binpref)) {
1860 memcpy(&nj->j_binpref, &j->j_binpref, sz);
1861 }
1862 }
1863
1864 /* JetsamPriority is unsupported. */
1865
1866 if (j->asport != MACH_PORT_NULL) {
1867 (void)job_assumes(nj, launchd_mport_copy_send(j->asport) == KERN_SUCCESS);
1868 nj->asport = j->asport;
1869 }
1870
1871 LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
1872
1873 jobmgr_t where2put = root_jobmgr;
1874 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1875 where2put = j->mgr;
1876 }
1877 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
1878 LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
1879 }
1880
1881 return nj;
1882 }
1883
1884 job_t
1885 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1886 {
1887 const char *const *argv_tmp = argv;
1888 char tmp_path[PATH_MAX];
1889 char auto_label[1000];
1890 const char *bn = NULL;
1891 char *co;
1892 size_t minlabel_len;
1893 size_t i, cc = 0;
1894 job_t j;
1895
1896 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1897
1898 if (unlikely(jm->shutting_down)) {
1899 errno = EINVAL;
1900 return NULL;
1901 }
1902
1903 if (unlikely(prog == NULL && argv == NULL)) {
1904 errno = EINVAL;
1905 return NULL;
1906 }
1907
1908 char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
1909 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1910 if (prog) {
1911 bn = prog;
1912 } else {
1913 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1914 bn = basename(tmp_path); /* prog for auto labels is kp.kp_kproc.p_comm */
1915 }
1916 snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
1917 label = auto_label;
1918 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1919 minlabel_len = strlen(label) + MAXCOMLEN;
1920 } else {
1921 if (label == AUTO_PICK_XPC_LABEL) {
1922 minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
1923 } else {
1924 minlabel_len = strlen(label);
1925 }
1926 }
1927
1928 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1929
1930 if (!jobmgr_assumes(jm, j != NULL)) {
1931 return NULL;
1932 }
1933
1934 if (unlikely(label == auto_label)) {
1935 snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
1936 } else {
1937 strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
1938 }
1939 j->kqjob_callback = job_callback;
1940 j->mgr = jm;
1941 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1942 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1943 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1944 j->currently_ignored = true;
1945 j->ondemand = true;
1946 j->checkedin = true;
1947 j->jetsam_priority = -1;
1948 j->jetsam_memlimit = -1;
1949 j->jetsam_seq = -1;
1950 uuid_clear(j->expected_audit_uuid);
1951
1952 if (prog) {
1953 j->prog = strdup(prog);
1954 if (!job_assumes(j, j->prog != NULL)) {
1955 goto out_bad;
1956 }
1957 }
1958
1959 if (likely(argv)) {
1960 while (*argv_tmp++) {
1961 j->argc++;
1962 }
1963
1964 for (i = 0; i < j->argc; i++) {
1965 cc += strlen(argv[i]) + 1;
1966 }
1967
1968 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1969
1970 if (!job_assumes(j, j->argv != NULL)) {
1971 goto out_bad;
1972 }
1973
1974 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1975
1976 for (i = 0; i < j->argc; i++) {
1977 j->argv[i] = co;
1978 strcpy(co, argv[i]);
1979 co += strlen(argv[i]) + 1;
1980 }
1981 j->argv[i] = NULL;
1982 }
1983
1984 if (strcmp(j->label, "com.apple.WindowServer") == 0) {
1985 j->has_console = true;
1986 }
1987
1988 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1989
1990 jobmgr_t where2put_label = root_jobmgr;
1991 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
1992 where2put_label = j->mgr;
1993 }
1994 LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
1995 uuid_clear(j->expected_audit_uuid);
1996
1997 job_log(j, LOG_DEBUG, "Conceived");
1998
1999 return j;
2000
2001 out_bad:
2002 if (j->prog) {
2003 free(j->prog);
2004 }
2005 free(j);
2006
2007 return NULL;
2008 }
2009
2010 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
2011 job_t
2012 job_new_alias(jobmgr_t jm, job_t src)
2013 {
2014 job_t j = NULL;
2015 if (job_find(jm, src->label)) {
2016 errno = EEXIST;
2017 } else {
2018 j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2019 if (jobmgr_assumes(jm, j != NULL)) {
2020 strcpy((char *)j->label, src->label);
2021 LIST_INSERT_HEAD(&jm->jobs, j, sle);
2022 LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2023 /* Bad jump address. The kqueue callback for aliases should never be
2024 * invoked.
2025 */
2026 j->kqjob_callback = (kq_callback)0xfa1afe1;
2027 j->alias = src;
2028 j->mgr = jm;
2029
2030 struct machservice *msi = NULL;
2031 SLIST_FOREACH(msi, &src->machservices, sle) {
2032 if (!machservice_new_alias(j, msi)) {
2033 jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2034 errno = EINVAL;
2035 job_remove(j);
2036 j = NULL;
2037 break;
2038 }
2039 }
2040 }
2041
2042 if (j) {
2043 job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2044 }
2045 }
2046
2047 return j;
2048 }
2049 #endif
2050
2051 job_t
2052 job_import(launch_data_t pload)
2053 {
2054 job_t j = jobmgr_import2(root_jobmgr, pload);
2055
2056 if (unlikely(j == NULL)) {
2057 return NULL;
2058 }
2059
2060 /* Since jobs are effectively stalled until they get security sessions assigned
2061 * to them, we may wish to reconsider this behavior of calling the job "enabled"
2062 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
2063 */
2064 job_dispatch_curious_jobs(j);
2065 return job_dispatch(j, false);
2066 }
2067
2068 launch_data_t
2069 job_import_bulk(launch_data_t pload)
2070 {
2071 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2072 job_t *ja;
2073 size_t i, c = launch_data_array_get_count(pload);
2074
2075 ja = alloca(c * sizeof(job_t));
2076
2077 for (i = 0; i < c; i++) {
2078 if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2079 errno = 0;
2080 }
2081 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2082 }
2083
2084 for (i = 0; i < c; i++) {
2085 if (likely(ja[i])) {
2086 job_dispatch_curious_jobs(ja[i]);
2087 job_dispatch(ja[i], false);
2088 }
2089 }
2090
2091 return resp;
2092 }
2093
2094 void
2095 job_import_bool(job_t j, const char *key, bool value)
2096 {
2097 bool found_key = false;
2098
2099 switch (key[0]) {
2100 case 'a':
2101 case 'A':
2102 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2103 j->abandon_pg = value;
2104 found_key = true;
2105 }
2106 break;
2107 case 'b':
2108 case 'B':
2109 if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2110 j->dirty_at_shutdown = value;
2111 found_key = true;
2112 }
2113 break;
2114 case 'k':
2115 case 'K':
2116 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2117 j->ondemand = !value;
2118 found_key = true;
2119 }
2120 break;
2121 case 'o':
2122 case 'O':
2123 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2124 j->ondemand = value;
2125 found_key = true;
2126 }
2127 break;
2128 case 'd':
2129 case 'D':
2130 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2131 j->debug = value;
2132 found_key = true;
2133 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2134 (void)job_assumes(j, !value);
2135 found_key = true;
2136 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2137 j->disable_aslr = value;
2138 found_key = true;
2139 }
2140 break;
2141 case 'h':
2142 case 'H':
2143 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2144 job_log(j, LOG_INFO, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2145 j->dirty_at_shutdown = value;
2146 found_key = true;
2147 }
2148 break;
2149 case 's':
2150 case 'S':
2151 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2152 j->session_create = value;
2153 found_key = true;
2154 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2155 j->start_on_mount = value;
2156 found_key = true;
2157 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2158 /* this only does something on Mac OS X 10.4 "Tiger" */
2159 found_key = true;
2160 } else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2161 if (_s_shutdown_monitor) {
2162 job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2163 } else {
2164 j->shutdown_monitor = true;
2165 _s_shutdown_monitor = j;
2166 }
2167 found_key = true;
2168 }
2169 break;
2170 case 'l':
2171 case 'L':
2172 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2173 j->low_pri_io = value;
2174 found_key = true;
2175 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2176 j->only_once = value;
2177 found_key = true;
2178 }
2179 break;
2180 case 'm':
2181 case 'M':
2182 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2183 j->internal_exc_handler = value;
2184 found_key = true;
2185 } else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2186 j->multiple_instances = value;
2187 found_key = true;
2188 }
2189 break;
2190 case 'i':
2191 case 'I':
2192 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2193 if (getuid() != 0) {
2194 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2195 return;
2196 }
2197 j->no_init_groups = !value;
2198 found_key = true;
2199 } else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2200 j->ignore_pg_at_shutdown = value;
2201 found_key = true;
2202 }
2203 break;
2204 case 'r':
2205 case 'R':
2206 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2207 if (value) {
2208 /* We don't want value == false to change j->start_pending */
2209 j->start_pending = true;
2210 }
2211 found_key = true;
2212 }
2213 break;
2214 case 'e':
2215 case 'E':
2216 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2217 j->globargv = value;
2218 found_key = true;
2219 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2220 j->kill_via_shmem = value;
2221 found_key = true;
2222 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2223 j->debug_before_kill = value;
2224 found_key = true;
2225 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2226 if (!s_embedded_privileged_job) {
2227 j->embedded_special_privileges = value;
2228 s_embedded_privileged_job = j;
2229 } else {
2230 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2231 }
2232 found_key = true;
2233 } else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2234 if (job_assumes(j, _s_event_monitor == NULL)) {
2235 j->event_monitor = value;
2236 if (value) {
2237 _s_event_monitor = j;
2238 }
2239 } else {
2240 job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility!");
2241 }
2242 found_key = true;
2243 }
2244 break;
2245 case 'w':
2246 case 'W':
2247 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2248 j->wait4debugger = value;
2249 found_key = true;
2250 }
2251 break;
2252 default:
2253 break;
2254 }
2255
2256 if (unlikely(!found_key)) {
2257 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2258 }
2259 }
2260
2261 void
2262 job_import_string(job_t j, const char *key, const char *value)
2263 {
2264 char **where2put = NULL;
2265
2266 switch (key[0]) {
2267 case 'm':
2268 case 'M':
2269 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2270 where2put = &j->alt_exc_handler;
2271 }
2272 break;
2273 case 'p':
2274 case 'P':
2275 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2276 return;
2277 } else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0) {
2278 if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2279 j->pstype = POSIX_SPAWN_OSX_TALAPP_START;
2280 } else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_WIDGET) == 0) {
2281 j->pstype = POSIX_SPAWN_OSX_WIDGET_START;
2282 }
2283 #if TARGET_OS_EMBEDDED
2284 else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_IOSAPP) == 0) {
2285 j->pstype = POSIX_SPAWN_IOS_APP_START;
2286 }
2287 #endif
2288 else {
2289 job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2290 }
2291 return;
2292 }
2293 break;
2294 case 'l':
2295 case 'L':
2296 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2297 return;
2298 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2299 return;
2300 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2301 return;
2302 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2303 return;
2304 }
2305 break;
2306 case 'r':
2307 case 'R':
2308 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2309 if (getuid() != 0) {
2310 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2311 return;
2312 }
2313 where2put = &j->rootdir;
2314 }
2315 break;
2316 case 'w':
2317 case 'W':
2318 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2319 where2put = &j->workingdir;
2320 }
2321 break;
2322 case 'u':
2323 case 'U':
2324 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2325 if (getuid() != 0) {
2326 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2327 return;
2328 } else if (strcmp(value, "root") == 0) {
2329 return;
2330 }
2331 where2put = &j->username;
2332 }
2333 break;
2334 case 'g':
2335 case 'G':
2336 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2337 if (getuid() != 0) {
2338 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2339 return;
2340 } else if (strcmp(value, "wheel") == 0) {
2341 return;
2342 }
2343 where2put = &j->groupname;
2344 }
2345 break;
2346 case 's':
2347 case 'S':
2348 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2349 where2put = &j->stdoutpath;
2350 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2351 where2put = &j->stderrpath;
2352 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2353 where2put = &j->stdinpath;
2354 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2355 if (job_assumes(j, j->stdin_fd != -1)) {
2356 /* open() should not block, but regular IO by the job should */
2357 (void)job_assumes(j, fcntl(j->stdin_fd, F_SETFL, 0) != -1);
2358 /* XXX -- EV_CLEAR should make named pipes happy? */
2359 (void)job_assumes(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j) != -1);
2360 } else {
2361 j->stdin_fd = 0;
2362 }
2363 #if HAVE_SANDBOX
2364 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2365 where2put = &j->seatbelt_profile;
2366 #endif
2367 }
2368 break;
2369 case 'X':
2370 case 'x':
2371 if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2372 return;
2373 }
2374 break;
2375 default:
2376 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2377 break;
2378 }
2379
2380 if (likely(where2put)) {
2381 (void)job_assumes(j, (*where2put = strdup(value)) != NULL);
2382 } else {
2383 /* See rdar://problem/5496612. These two are okay. */
2384 if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) != 0) {
2385 job_log(j, LOG_WARNING, "Unknown key: %s", key);
2386 }
2387 }
2388 }
2389
2390 void
2391 job_import_integer(job_t j, const char *key, long long value)
2392 {
2393 switch (key[0]) {
2394 case 'e':
2395 case 'E':
2396 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2397 if (unlikely(value < 0)) {
2398 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2399 } else if (unlikely(value > UINT32_MAX)) {
2400 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2401 } else {
2402 j->exit_timeout = (typeof(j->exit_timeout)) value;
2403 }
2404 } else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2405 j->main_thread_priority = value;
2406 }
2407 break;
2408 case 'j':
2409 case 'J':
2410 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2411 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2412
2413 launch_data_t pri = launch_data_new_integer(value);
2414 if (job_assumes(j, pri != NULL)) {
2415 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2416 launch_data_free(pri);
2417 }
2418 }
2419 case 'n':
2420 case 'N':
2421 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2422 if (unlikely(value < PRIO_MIN)) {
2423 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2424 } else if (unlikely(value > PRIO_MAX)) {
2425 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2426 } else {
2427 j->nice = (typeof(j->nice)) value;
2428 j->setnice = true;
2429 }
2430 }
2431 break;
2432 case 't':
2433 case 'T':
2434 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2435 if (unlikely(value < 0)) {
2436 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2437 } else if (unlikely(value > UINT32_MAX)) {
2438 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2439 } else {
2440 j->timeout = (typeof(j->timeout)) value;
2441 }
2442 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2443 if (value < 0) {
2444 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2445 } else if (value > UINT32_MAX) {
2446 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2447 } else {
2448 j->min_run_time = (typeof(j->min_run_time)) value;
2449 }
2450 }
2451 break;
2452 case 'u':
2453 case 'U':
2454 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2455 j->mask = value;
2456 j->setmask = true;
2457 }
2458 break;
2459 case 's':
2460 case 'S':
2461 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2462 if (unlikely(value <= 0)) {
2463 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2464 } else if (unlikely(value > UINT32_MAX)) {
2465 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2466 } else {
2467 runtime_add_weak_ref();
2468 j->start_interval = (typeof(j->start_interval)) value;
2469
2470 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
2471 }
2472 #if HAVE_SANDBOX
2473 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2474 j->seatbelt_flags = value;
2475 #endif
2476 }
2477
2478 break;
2479 default:
2480 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2481 break;
2482 }
2483 }
2484
2485 void
2486 job_import_opaque(job_t j __attribute__((unused)),
2487 const char *key, launch_data_t value __attribute__((unused)))
2488 {
2489 switch (key[0]) {
2490 case 'q':
2491 case 'Q':
2492 #if HAVE_QUARANTINE
2493 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2494 size_t tmpsz = launch_data_get_opaque_size(value);
2495
2496 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2497 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2498 j->quarantine_data_sz = tmpsz;
2499 }
2500 }
2501 #endif
2502 case 's':
2503 case 'S':
2504 if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2505 size_t tmpsz = launch_data_get_opaque_size(value);
2506 if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2507 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2508 }
2509 }
2510 break;
2511 default:
2512 break;
2513 }
2514 }
2515
2516 static void
2517 policy_setup(launch_data_t obj, const char *key, void *context)
2518 {
2519 job_t j = context;
2520 bool found_key = false;
2521
2522 switch (key[0]) {
2523 case 'd':
2524 case 'D':
2525 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2526 j->deny_job_creation = launch_data_get_bool(obj);
2527 found_key = true;
2528 }
2529 break;
2530 default:
2531 break;
2532 }
2533
2534 if (unlikely(!found_key)) {
2535 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2536 }
2537 }
2538
2539 void
2540 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2541 {
2542 launch_data_t tmp;
2543
2544 switch (key[0]) {
2545 case 'p':
2546 case 'P':
2547 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2548 launch_data_dict_iterate(value, policy_setup, j);
2549 }
2550 break;
2551 case 'k':
2552 case 'K':
2553 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2554 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2555 }
2556 break;
2557 case 'i':
2558 case 'I':
2559 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2560 j->inetcompat = true;
2561 j->abandon_pg = true;
2562 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2563 j->inetcompat_wait = launch_data_get_bool(tmp);
2564 }
2565 }
2566 break;
2567 case 'j':
2568 case 'J':
2569 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2570 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2571 }
2572 case 'e':
2573 case 'E':
2574 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2575 launch_data_dict_iterate(value, envitem_setup, j);
2576 }
2577 break;
2578 case 'u':
2579 case 'U':
2580 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2581 j->importing_global_env = true;
2582 launch_data_dict_iterate(value, envitem_setup, j);
2583 j->importing_global_env = false;
2584 }
2585 break;
2586 case 's':
2587 case 'S':
2588 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2589 launch_data_dict_iterate(value, socketgroup_setup, j);
2590 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2591 calendarinterval_new_from_obj(j, value);
2592 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2593 launch_data_dict_iterate(value, limititem_setup, j);
2594 #if HAVE_SANDBOX
2595 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2596 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2597 #endif
2598 }
2599 break;
2600 case 'h':
2601 case 'H':
2602 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2603 j->importing_hard_limits = true;
2604 launch_data_dict_iterate(value, limititem_setup, j);
2605 j->importing_hard_limits = false;
2606 }
2607 break;
2608 case 'm':
2609 case 'M':
2610 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2611 launch_data_dict_iterate(value, machservice_setup, j);
2612 }
2613 break;
2614 case 'l':
2615 case 'L':
2616 if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2617 launch_data_dict_iterate(value, eventsystem_setup, j);
2618 } else {
2619 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2620 return;
2621 }
2622 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2623 return;
2624 }
2625 }
2626 break;
2627 default:
2628 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2629 break;
2630 }
2631 }
2632
2633 void
2634 job_import_array(job_t j, const char *key, launch_data_t value)
2635 {
2636 size_t i, value_cnt = launch_data_array_get_count(value);
2637 const char *str;
2638
2639 switch (key[0]) {
2640 case 'p':
2641 case 'P':
2642 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2643 return;
2644 }
2645 break;
2646 case 'l':
2647 case 'L':
2648 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2649 return;
2650 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2651 return;
2652 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2653 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2654 return;
2655 }
2656 break;
2657 case 'q':
2658 case 'Q':
2659 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
2660 for (i = 0; i < value_cnt; i++) {
2661 str = launch_data_get_string(launch_data_array_get_index(value, i));
2662 if (job_assumes(j, str != NULL)) {
2663 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
2664 }
2665 }
2666
2667 }
2668 break;
2669 case 'w':
2670 case 'W':
2671 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
2672 for (i = 0; i < value_cnt; i++) {
2673 str = launch_data_get_string(launch_data_array_get_index(value, i));
2674 if (job_assumes(j, str != NULL)) {
2675 semaphoreitem_new(j, PATH_CHANGES, str);
2676 }
2677 }
2678 }
2679 break;
2680 case 'b':
2681 case 'B':
2682 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
2683 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
2684 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2685 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2686 j->j_binpref_cnt = value_cnt;
2687 for (i = 0; i < value_cnt; i++) {
2688 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
2689 }
2690 }
2691 }
2692 break;
2693 case 's':
2694 case 'S':
2695 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2696 for (i = 0; i < value_cnt; i++) {
2697 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
2698 }
2699 }
2700 break;
2701 default:
2702 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
2703 break;
2704 }
2705 }
2706
2707 void
2708 job_import_keys(launch_data_t obj, const char *key, void *context)
2709 {
2710 job_t j = context;
2711 launch_data_type_t kind;
2712
2713 if (!launchd_assumes(obj != NULL)) {
2714 return;
2715 }
2716
2717 kind = launch_data_get_type(obj);
2718
2719 switch (kind) {
2720 case LAUNCH_DATA_BOOL:
2721 job_import_bool(j, key, launch_data_get_bool(obj));
2722 break;
2723 case LAUNCH_DATA_STRING:
2724 job_import_string(j, key, launch_data_get_string(obj));
2725 break;
2726 case LAUNCH_DATA_INTEGER:
2727 job_import_integer(j, key, launch_data_get_integer(obj));
2728 break;
2729 case LAUNCH_DATA_DICTIONARY:
2730 job_import_dictionary(j, key, obj);
2731 break;
2732 case LAUNCH_DATA_ARRAY:
2733 job_import_array(j, key, obj);
2734 break;
2735 case LAUNCH_DATA_OPAQUE:
2736 job_import_opaque(j, key, obj);
2737 break;
2738 default:
2739 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2740 break;
2741 }
2742 }
2743
2744 job_t
2745 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
2746 {
2747 launch_data_t tmp, ldpa;
2748 const char *label = NULL, *prog = NULL;
2749 const char **argv = NULL;
2750 job_t j;
2751
2752 if (!jobmgr_assumes(jm, pload != NULL)) {
2753 errno = EINVAL;
2754 return NULL;
2755 }
2756
2757 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
2758 errno = EINVAL;
2759 return NULL;
2760 }
2761
2762 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
2763 errno = EINVAL;
2764 return NULL;
2765 }
2766
2767 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
2768 errno = EINVAL;
2769 return NULL;
2770 }
2771
2772 if (unlikely(!(label = launch_data_get_string(tmp)))) {
2773 errno = EINVAL;
2774 return NULL;
2775 }
2776
2777 #if TARGET_OS_EMBEDDED
2778 if (unlikely(g_embedded_privileged_action && s_embedded_privileged_job)) {
2779 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
2780 errno = EPERM;
2781 return NULL;
2782 }
2783
2784 const char *username = NULL;
2785 if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2786 username = launch_data_get_string(tmp);
2787 } else {
2788 errno = EPERM;
2789 return NULL;
2790 }
2791
2792 if (!jobmgr_assumes(jm, s_embedded_privileged_job->username != NULL && username != NULL)) {
2793 errno = EPERM;
2794 return NULL;
2795 }
2796
2797 if (unlikely(strcmp(s_embedded_privileged_job->username, username) != 0)) {
2798 errno = EPERM;
2799 return NULL;
2800 }
2801 } else if (g_embedded_privileged_action) {
2802 errno = EINVAL;
2803 return NULL;
2804 }
2805 #endif
2806
2807 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
2808 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2809 prog = launch_data_get_string(tmp);
2810 }
2811
2812 int argc = 0;
2813 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2814 size_t i, c;
2815
2816 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2817 errno = EINVAL;
2818 return NULL;
2819 }
2820
2821 c = launch_data_array_get_count(ldpa);
2822
2823 argv = alloca((c + 1) * sizeof(char *));
2824
2825 for (i = 0; i < c; i++) {
2826 tmp = launch_data_array_get_index(ldpa, i);
2827
2828 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2829 errno = EINVAL;
2830 return NULL;
2831 }
2832
2833 argv[i] = launch_data_get_string(tmp);
2834 }
2835
2836 argv[i] = NULL;
2837 argc = i;
2838 }
2839
2840 if (!prog && argc == 0) {
2841 jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
2842 errno = EINVAL;
2843 return NULL;
2844 }
2845
2846 /* Find the requested session. You cannot load services into XPC domains in
2847 * this manner.
2848 */
2849 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2850 if (session) {
2851 jobmgr_t jmt = NULL;
2852 if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
2853 jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
2854 if (!jmt) {
2855 jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
2856 } else {
2857 jm = jmt;
2858 }
2859 } else {
2860 jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
2861 }
2862
2863 if (!jmt) {
2864 errno = EINVAL;
2865 return NULL;
2866 }
2867 }
2868
2869 /* For legacy reasons, we have a global hash of all labels in all job
2870 * managers. So rather than make it a global, we store it in the root job
2871 * manager. But for an XPC domain, we store a local hash of all services in
2872 * the domain.
2873 */
2874 jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
2875 if (unlikely((j = job_find(where2look, label)) != NULL)) {
2876 if (jm->xpc_singleton) {
2877 /* There can (and probably will be) multiple attemtps to import the
2878 * same XPC service from the same framework. This is okay. It's
2879 * treated as a singleton, so just return the existing one so that
2880 * it may be aliased into the requesting process' XPC domain.
2881 */
2882 return j;
2883 } else {
2884 /* If we're not a global XPC domain, then it's an error to try
2885 * importing the same job/service multiple times.
2886 */
2887 errno = EEXIST;
2888 return NULL;
2889 }
2890 } else if (unlikely(!jobmgr_label_test(where2look, label))) {
2891 errno = EINVAL;
2892 return NULL;
2893 }
2894 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
2895
2896 if (likely(j = job_new(jm, label, prog, argv))) {
2897 launch_data_dict_iterate(pload, job_import_keys, j);
2898 if (!uuid_is_null(j->expected_audit_uuid)) {
2899 uuid_string_t uuid_str;
2900 uuid_unparse(j->expected_audit_uuid, uuid_str);
2901 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2902 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2903 errno = ENEEDAUTH;
2904 } else {
2905 job_log(j, LOG_DEBUG, "No security session specified.");
2906 j->asport = MACH_PORT_NULL;
2907 }
2908
2909 if (j->event_monitor) {
2910 if (job_assumes(j, LIST_FIRST(&j->events) == NULL)) {
2911 struct machservice *msi = NULL;
2912 SLIST_FOREACH(msi, &j->machservices, sle) {
2913 if (msi->event_update_port) {
2914 break;
2915 }
2916 }
2917
2918 if (job_assumes(j, msi != NULL)) {
2919 /* Create our send-once right so we can kick things off. */
2920 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
2921 if (!LIST_EMPTY(&_s_event_systems)) {
2922 eventsystem_ping();
2923 }
2924 }
2925 } else {
2926 job_log(j, LOG_ERR, "The event monitor job may not have a LaunchEvents dictionary.");
2927 job_remove(j);
2928 j = NULL;
2929 }
2930 }
2931 }
2932
2933 return j;
2934 }
2935
2936 bool
2937 jobmgr_label_test(jobmgr_t jm, const char *str)
2938 {
2939 char *endstr = NULL;
2940 const char *ptr;
2941
2942 if (str[0] == '\0') {
2943 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
2944 return false;
2945 }
2946
2947 for (ptr = str; *ptr; ptr++) {
2948 if (iscntrl(*ptr)) {
2949 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
2950 return false;
2951 }
2952 }
2953
2954 strtoll(str, &endstr, 0);
2955
2956 if (str != endstr) {
2957 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
2958 return false;
2959 }
2960
2961 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2962 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2963 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
2964 return false;
2965 }
2966
2967 return true;
2968 }
2969
2970 job_t
2971 job_find(jobmgr_t jm, const char *label)
2972 {
2973 job_t ji;
2974
2975 if (!jm) {
2976 jm = root_jobmgr;
2977 }
2978
2979 LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
2980 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
2981 continue; /* 5351245 and 5488633 respectively */
2982 }
2983
2984 if (strcmp(ji->label, label) == 0) {
2985 return ji;
2986 }
2987 }
2988
2989 errno = ESRCH;
2990 return NULL;
2991 }
2992
2993 /* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
2994 job_t
2995 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
2996 {
2997 job_t ji = NULL;
2998 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
2999 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay)) ) {
3000 return ji;
3001 }
3002 }
3003
3004 jobmgr_t jmi = NULL;
3005 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3006 if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3007 break;
3008 }
3009 }
3010
3011 return ji;
3012 }
3013
3014 job_t
3015 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3016 {
3017 job_t ji;
3018
3019 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3020 if (ji->p == p) {
3021 return ji;
3022 }
3023 }
3024
3025 return create_anon ? job_new_anonymous(jm, p) : NULL;
3026 }
3027
3028 job_t
3029 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3030 {
3031 jobmgr_t jmi;
3032 job_t ji;
3033
3034 if (jm->jm_port == mport) {
3035 return jobmgr_find_by_pid(jm, upid, true);
3036 }
3037
3038 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3039 job_t jr;
3040
3041 if ((jr = job_mig_intran2(jmi, mport, upid))) {
3042 return jr;
3043 }
3044 }
3045
3046 LIST_FOREACH(ji, &jm->jobs, sle) {
3047 if (ji->j_port == mport) {
3048 return ji;
3049 }
3050 }
3051
3052 return NULL;
3053 }
3054
3055 job_t
3056 job_mig_intran(mach_port_t p)
3057 {
3058 struct ldcred *ldc = runtime_get_caller_creds();
3059 job_t jr;
3060
3061 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3062
3063 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
3064 struct proc_bsdshortinfo proc;
3065 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3066 if (errno != ESRCH) {
3067 (void)jobmgr_assumes(root_jobmgr, errno == 0);
3068 } else {
3069 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc->pid, ldc->uid, ldc->euid, p, proc.pbsi_comm);
3070 }
3071 }
3072 }
3073
3074 return jr;
3075 }
3076
3077 job_t
3078 job_find_by_service_port(mach_port_t p)
3079 {
3080 struct machservice *ms;
3081
3082 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3083 if (ms->recv && (ms->port == p)) {
3084 return ms->job;
3085 }
3086 }
3087
3088 return NULL;
3089 }
3090
3091 void
3092 job_mig_destructor(job_t j)
3093 {
3094 /*
3095 * 5477111
3096 *
3097 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
3098 */
3099
3100 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3101 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3102 job_remove(j);
3103 }
3104
3105 workaround_5477111 = NULL;
3106
3107 calendarinterval_sanity_check();
3108 }
3109
3110 void
3111 job_export_all2(jobmgr_t jm, launch_data_t where)
3112 {
3113 jobmgr_t jmi;
3114 job_t ji;
3115
3116 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3117 job_export_all2(jmi, where);
3118 }
3119
3120 LIST_FOREACH(ji, &jm->jobs, sle) {
3121 launch_data_t tmp;
3122
3123 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3124 launch_data_dict_insert(where, tmp, ji->label);
3125 }
3126 }
3127 }
3128
3129 launch_data_t
3130 job_export_all(void)
3131 {
3132 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3133
3134 if (launchd_assumes(resp != NULL)) {
3135 job_export_all2(root_jobmgr, resp);
3136 }
3137
3138 return resp;
3139 }
3140
3141 void
3142 job_log_stray_pg(job_t j)
3143 {
3144 pid_t *pids = NULL;
3145 size_t len = sizeof(pid_t) * get_kern_max_proc();
3146 int i = 0, kp_cnt = 0;
3147
3148 if (!do_apple_internal_logging) {
3149 return;
3150 }
3151
3152 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3153
3154 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3155 return;
3156 }
3157 if (!job_assumes(j, (kp_cnt = proc_listpgrppids(j->p, pids, len)) != -1)) {
3158 goto out;
3159 }
3160
3161 for (i = 0; i < kp_cnt; i++) {
3162 pid_t p_i = pids[i];
3163 if (p_i == j->p) {
3164 continue;
3165 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
3166 continue;
3167 }
3168
3169 struct proc_bsdshortinfo proc;
3170 if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3171 if (errno != ESRCH) {
3172 job_assumes(j, errno == 0);
3173 }
3174 continue;
3175 }
3176
3177 pid_t pp_i = proc.pbsi_ppid;
3178 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3179 const char *n = proc.pbsi_comm;
3180
3181 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3182 }
3183
3184 out:
3185 free(pids);
3186 }
3187
3188 void
3189 job_reap(job_t j)
3190 {
3191 struct rusage ru;
3192 int status;
3193
3194 bool is_system_bootstrapper = j->is_bootstrapper && pid1_magic && !j->mgr->parentmgr;
3195
3196 job_log(j, LOG_DEBUG, "Reaping");
3197
3198 if (j->shmem) {
3199 (void)job_assumes(j, vm_deallocate(mach_task_self(), (vm_address_t)j->shmem, getpagesize()) == 0);
3200 j->shmem = NULL;
3201 }
3202
3203 if (unlikely(j->weird_bootstrap)) {
3204 int64_t junk = 0;
3205 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3206 }
3207
3208 if (j->log_redirect_fd && !j->legacy_LS_job) {
3209 job_log_stdouterr(j); /* one last chance */
3210
3211 if (j->log_redirect_fd) {
3212 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3213 j->log_redirect_fd = 0;
3214 }
3215 }
3216
3217 if (j->fork_fd) {
3218 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
3219 j->fork_fd = 0;
3220 }
3221
3222 if (j->anonymous) {
3223 status = 0;
3224 memset(&ru, 0, sizeof(ru));
3225 } else {
3226 /*
3227 * The job is dead. While the PID/PGID is still known to be
3228 * valid, try to kill abandoned descendant processes.
3229 */
3230 job_log_stray_pg(j);
3231 if (!j->abandon_pg) {
3232 if (unlikely(runtime_killpg(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3233 #ifdef __LP64__
3234 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3235 #else
3236 (void)job_assumes(j, false);
3237 #endif
3238 }
3239 }
3240
3241 /* We have to work around one of two kernel bugs here. ptrace(3) may
3242 * have abducted the child away from us and reparented it to the tracing
3243 * process. If the process then exits, we still get NOTE_EXIT, but we
3244 * cannot reap it because the kernel may not have restored the true
3245 * parent/child relationship in time.
3246 *
3247 * See <rdar://problem/5020256>.
3248 *
3249 * The other bug is if the shutdown monitor has suspended a task and not
3250 * resumed it before exiting. In this case, the kernel will not clean up
3251 * after the shutdown monitor. It will, instead, leave the task
3252 * task suspended and not process any pending signals on the event loop
3253 * for the task.
3254 *
3255 * There are a variety of other kernel bugs that could prevent a process
3256 * from exiting, usually having to do with faulty hardware or talking to
3257 * misbehaving drivers that mark a thread as uninterruptible and
3258 * deadlock/hang before unmarking it as such. So we have to work around
3259 * that too.
3260 *
3261 * See <rdar://problem/9284889&9359725>.
3262 */
3263 if (j->workaround9359725) {
3264 job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3265 status = W_EXITCODE(-1, SIGSEGV);
3266 memset(&ru, 0, sizeof(ru));
3267 } else if (wait4(j->p, &status, 0, &ru) == -1) {
3268 job_log(j, LOG_NOTICE, "Assuming job exited: <rdar://problem/5020256>: %d: %s", errno, strerror(errno));
3269 status = W_EXITCODE(-1, SIGSEGV);
3270 memset(&ru, 0, sizeof(ru));
3271 }
3272 }
3273
3274 if (j->exit_timeout) {
3275 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3276 }
3277
3278 LIST_REMOVE(j, pid_hash_sle);
3279
3280 if (j->sent_signal_time) {
3281 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3282
3283 td_sec = td / NSEC_PER_SEC;
3284 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3285
3286 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3287 }
3288
3289 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
3290 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
3291 j->ru.ru_maxrss += ru.ru_maxrss;
3292 j->ru.ru_ixrss += ru.ru_ixrss;
3293 j->ru.ru_idrss += ru.ru_idrss;
3294 j->ru.ru_isrss += ru.ru_isrss;
3295 j->ru.ru_minflt += ru.ru_minflt;
3296 j->ru.ru_majflt += ru.ru_majflt;
3297 j->ru.ru_nswap += ru.ru_nswap;
3298 j->ru.ru_inblock += ru.ru_inblock;
3299 j->ru.ru_oublock += ru.ru_oublock;
3300 j->ru.ru_msgsnd += ru.ru_msgsnd;
3301 j->ru.ru_msgrcv += ru.ru_msgrcv;
3302 j->ru.ru_nsignals += ru.ru_nsignals;
3303 j->ru.ru_nvcsw += ru.ru_nvcsw;
3304 j->ru.ru_nivcsw += ru.ru_nivcsw;
3305
3306 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
3307 int level = LOG_WARNING;
3308 if (!j->did_exec && (j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
3309 level = LOG_DEBUG;
3310 }
3311
3312 job_log(j, level, "Exited with code: %d", WEXITSTATUS(status));
3313 } else {
3314 j->fail_cnt = 0;
3315 }
3316
3317 if (WIFSIGNALED(status)) {
3318 int s = WTERMSIG(status);
3319 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3320 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3321 } else if (!j->stopped && !j->clean_kill) {
3322 switch (s) {
3323 /* Signals which indicate a crash. */
3324 case SIGILL:
3325 case SIGABRT:
3326 case SIGFPE:
3327 case SIGBUS:
3328 case SIGSEGV:
3329 case SIGSYS:
3330 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3331 * SIGTRAP, assume that it's a crash.
3332 */
3333 case SIGTRAP:
3334 j->crashed = true;
3335 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3336 break;
3337 default:
3338 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3339 break;
3340 }
3341
3342 if (is_system_bootstrapper && j->crashed) {
3343 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3344 }
3345 }
3346 }
3347
3348 j->reaped = true;
3349
3350 struct machservice *msi = NULL;
3351 if (j->crashed || !(j->did_exec || j->anonymous)) {
3352 SLIST_FOREACH(msi, &j->machservices, sle) {
3353 if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3354 machservice_drain_port(msi);
3355 }
3356
3357 if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3358 machservice_resetport(j, msi);
3359 }
3360 }
3361 }
3362
3363 /* HACK: Essentially duplicating the logic directly above. But this has
3364 * gotten really hairy, and I don't want to try consolidating it right now.
3365 */
3366 if (j->xpc_service && !j->xpcproxy_did_exec) {
3367 job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3368 SLIST_FOREACH(msi, &j->machservices, sle) {
3369 /* Drain the messages but do not reset the port. If xpcproxy could
3370 * not exec(3), then we don't want to continue trying, since there
3371 * is very likely a serious configuration error with the service.
3372 *
3373 * <rdar://problem/8986802>
3374 */
3375 machservice_resetport(j, msi);
3376 }
3377 }
3378
3379 struct suspended_peruser *spi = NULL;
3380 while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3381 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3382 spi->j->peruser_suspend_count--;
3383 if (spi->j->peruser_suspend_count == 0) {
3384 job_dispatch(spi->j, false);
3385 }
3386 LIST_REMOVE(spi, sle);
3387 free(spi);
3388 }
3389
3390 j->last_exit_status = status;
3391
3392 if (j->exit_status_dest) {
3393 errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3394 if (errno && errno != MACH_SEND_INVALID_DEST) {
3395 (void)job_assumes(j, errno == 0);
3396 }
3397
3398 j->exit_status_dest = MACH_PORT_NULL;
3399 }
3400
3401 if (j->spawn_reply_port) {
3402 /* If the child never called exec(3), we must send a spawn() reply so
3403 * that the requestor can get exit status from it. If we fail to send
3404 * the reply for some reason, we have to deallocate the exit status port
3405 * ourselves.
3406 */
3407 kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3408 if (kr) {
3409 if (kr != MACH_SEND_INVALID_DEST) {
3410 errno = kr;
3411 (void)job_assumes(j, errno == KERN_SUCCESS);
3412 }
3413
3414 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3415 }
3416
3417 j->exit_status_port = MACH_PORT_NULL;
3418 j->spawn_reply_port = MACH_PORT_NULL;
3419 }
3420
3421 if (j->anonymous) {
3422 total_anon_children--;
3423 if (j->holds_ref) {
3424 runtime_del_ref();
3425 }
3426 } else {
3427 runtime_del_ref();
3428 total_children--;
3429 }
3430
3431 if (j->has_console) {
3432 g_wsp = 0;
3433 }
3434
3435 if (j->shutdown_monitor) {
3436 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3437 _s_shutdown_monitor = NULL;
3438 j->shutdown_monitor = false;
3439 }
3440
3441 if (j->event_monitor && !j->mgr->shutting_down) {
3442 msi = NULL;
3443 SLIST_FOREACH(msi, &j->machservices, sle) {
3444 if (msi->event_update_port) {
3445 break;
3446 }
3447 }
3448 /* Only do this if we've gotten the port-destroyed notification already.
3449 * If we haven't yet, the port destruction handler will do this.
3450 */
3451 if (job_assumes(j, msi != NULL) && !msi->isActive) {
3452 if (_s_event_update_port == MACH_PORT_NULL) {
3453 (void)job_assumes(j, launchd_mport_make_send_once(msi->port, &_s_event_update_port) == KERN_SUCCESS);
3454 }
3455 eventsystem_ping();
3456 }
3457 }
3458
3459 if (!j->anonymous) {
3460 j->mgr->normal_active_cnt--;
3461 }
3462 j->sent_signal_time = 0;
3463 j->sent_sigkill = false;
3464 j->clean_kill = false;
3465 j->sent_kill_via_shmem = false;
3466 j->lastlookup = NULL;
3467 j->lastlookup_gennum = 0;
3468 j->p = 0;
3469 }
3470
3471 void
3472 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3473 {
3474 jobmgr_t jmi, jmn;
3475 job_t ji, jn;
3476
3477 if (jm->shutting_down) {
3478 return;
3479 }
3480
3481 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3482 jobmgr_dispatch_all(jmi, newmounthack);
3483 }
3484
3485 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3486 if (newmounthack && ji->start_on_mount) {
3487 ji->start_pending = true;
3488 }
3489
3490 job_dispatch(ji, false);
3491 }
3492 }
3493
3494 void
3495 job_dispatch_curious_jobs(job_t j)
3496 {
3497 job_t ji = NULL, jt = NULL;
3498 SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3499 struct semaphoreitem *si = NULL;
3500 SLIST_FOREACH(si, &ji->semaphores, sle) {
3501 if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3502 continue;
3503 }
3504
3505 if (strcmp(si->what, j->label) == 0) {
3506 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3507
3508 if (!ji->removing) {
3509 job_dispatch(ji, false);
3510 } else {
3511 job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3512 }
3513
3514 /* ji could be removed here, so don't do anything with it or its semaphores
3515 * after this point.
3516 */
3517 break;
3518 }
3519 }
3520 }
3521 }
3522
3523 job_t
3524 job_dispatch(job_t j, bool kickstart)
3525 {
3526 /* Don't dispatch a job if it has no audit session set. */
3527 if (!uuid_is_null(j->expected_audit_uuid)) {
3528 return NULL;
3529 }
3530 if (j->alias) {
3531 j = j->alias;
3532 }
3533
3534 #if TARGET_OS_EMBEDDED
3535 if (g_embedded_privileged_action && s_embedded_privileged_job) {
3536 if (!job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL)) {
3537 errno = EPERM;
3538 return NULL;
3539 }
3540
3541 if (strcmp(j->username, s_embedded_privileged_job->username) != 0) {
3542 errno = EPERM;
3543 return NULL;
3544 }
3545 } else if (g_embedded_privileged_action) {
3546 errno = EINVAL;
3547 return NULL;
3548 }
3549 #endif
3550
3551 /*
3552 * The whole job removal logic needs to be consolidated. The fact that
3553 * a job can be removed from just about anywhere makes it easy to have
3554 * stale pointers left behind somewhere on the stack that might get
3555 * used after the deallocation. In particular, during job iteration.
3556 *
3557 * This is a classic example. The act of dispatching a job may delete it.
3558 */
3559 if (!job_active(j)) {
3560 if (job_useless(j)) {
3561 job_remove(j);
3562 return NULL;
3563 }
3564 if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3565 return NULL;
3566 }
3567
3568 if (kickstart || job_keepalive(j)) {
3569 job_log(j, LOG_DEBUG, "Starting job (kickstart = %s)", kickstart ? "true" : "false");
3570 job_start(j);
3571 } else {
3572 job_log(j, LOG_DEBUG, "Watching job (kickstart = %s)", kickstart ? "true" : "false");
3573 job_watch(j);
3574
3575 /*
3576 * 5455720
3577 *
3578 * Path checking and monitoring is really racy right now.
3579 * We should clean this up post Leopard.
3580 */
3581 if (job_keepalive(j)) {
3582 job_start(j);
3583 }
3584 }
3585 } else {
3586 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s).", job_active(j));
3587 }
3588
3589 return j;
3590 }
3591
3592 void
3593 job_log_stdouterr2(job_t j, const char *msg, ...)
3594 {
3595 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
3596 va_list ap;
3597
3598 va_start(ap, msg);
3599 runtime_vsyslog(&attr, msg, ap);
3600 va_end(ap);
3601 }
3602
3603 void
3604 job_log_stdouterr(job_t j)
3605 {
3606 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
3607 bool close_log_redir = false;
3608 ssize_t rsz;
3609
3610 if (!job_assumes(j, buf != NULL)) {
3611 return;
3612 }
3613
3614 bufindex = buf;
3615
3616 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
3617
3618 if (unlikely(rsz == 0)) {
3619 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
3620 close_log_redir = true;
3621 } else if (rsz == -1) {
3622 if (!job_assumes(j, errno == EAGAIN)) {
3623 close_log_redir = true;
3624 }
3625 } else {
3626 buf[rsz] = '\0';
3627
3628 while ((msg = strsep(&bufindex, "\n\r"))) {
3629 if (msg[0]) {
3630 job_log_stdouterr2(j, "%s", msg);
3631 }
3632 }
3633 }
3634
3635 free(buf);
3636
3637 if (unlikely(close_log_redir)) {
3638 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3639 j->log_redirect_fd = 0;
3640 job_dispatch(j, false);
3641 }
3642 }
3643
3644 void
3645 job_kill(job_t j)
3646 {
3647 if (unlikely(!j->p || j->anonymous)) {
3648 return;
3649 }
3650
3651 (void)job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
3652
3653 j->sent_sigkill = true;
3654 (void)job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j) != -1);
3655
3656 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
3657 }
3658
3659 void
3660 job_open_shutdown_transaction(job_t j)
3661 {
3662 if (j->kill_via_shmem) {
3663 if (j->shmem) {
3664 job_log(j, LOG_DEBUG, "Opening shutdown transaction for job.");
3665 (void)__sync_add_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
3666 } else {
3667 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it has not set up shared memory. Treating normally.");
3668 j->dirty_at_shutdown = false;
3669 }
3670 } else {
3671 job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
3672 j->dirty_at_shutdown = false;
3673 }
3674 }
3675
3676 void
3677 job_close_shutdown_transaction(job_t j)
3678 {
3679 if (j->dirty_at_shutdown) {
3680 job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
3681 if (__sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1) == -1) {
3682 job_log(j, LOG_DEBUG, "Job is now clean. Killing.");
3683 job_kill(j);
3684 }
3685 j->dirty_at_shutdown = false;
3686 }
3687 }
3688
3689 void
3690 job_log_children_without_exec(job_t j)
3691 {
3692 pid_t *pids = NULL;
3693 size_t len = sizeof(pid_t) * get_kern_max_proc();
3694 int i = 0, kp_cnt = 0;
3695
3696 if (!do_apple_internal_logging || j->anonymous || j->per_user) {
3697 return;
3698 }
3699
3700 if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3701 return;
3702 }
3703 if (!job_assumes(j, (kp_cnt = proc_listchildpids(j->p, pids, len)) != -1)) {
3704 goto out;
3705 }
3706
3707 for (i = 0; i < kp_cnt; i++) {
3708 struct proc_bsdshortinfo proc;
3709 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3710 if (errno != ESRCH) {
3711 job_assumes(j, errno == 0);
3712 }
3713 continue;
3714 }
3715 if (proc.pbsi_flags & P_EXEC) {
3716 continue;
3717 }
3718
3719 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
3720 }
3721
3722 out:
3723 free(pids);
3724 }
3725
3726 void
3727 job_cleanup_after_tracer(job_t j)
3728 {
3729 j->tracing_pid = 0;
3730 if (j->reap_after_trace) {
3731 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Reaping job now that attached tracer is gone.");
3732 struct kevent kev;
3733 EV_SET(&kev, j->p, 0, 0, NOTE_EXIT, 0, 0);
3734
3735 /* Fake a kevent to keep our logic consistent. */
3736 job_callback_proc(j, &kev);
3737
3738 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3739 * on the root job manager. To make our fakery complete, we will do garbage
3740 * collection at the beginning of the next run loop cycle (after we're done
3741 * draining the current queue of kevents).
3742 */
3743 (void)job_assumes(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr) != -1);
3744 }
3745 }
3746
3747 void
3748 job_callback_proc(job_t j, struct kevent *kev)
3749 {
3750 bool program_changed = false;
3751 int fflags = kev->fflags;
3752
3753 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
3754 log_kevent_struct(LOG_DEBUG, kev, 0);
3755
3756 if (fflags & NOTE_EXIT) {
3757 if (j->p == (pid_t)kev->ident && !j->anonymous) {
3758 /* Note that the third argument to proc_pidinfo() is a magic argument for
3759 * PROC_PIDT_SHORTBSDINFO. Specifically, passing 1 means "don't fail on a zombie
3760 * PID".
3761 */
3762 struct proc_bsdshortinfo proc;
3763 if (job_assumes(j, proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0)) {
3764 if (!job_assumes(j, (pid_t)proc.pbsi_ppid == getpid())) {
3765 /* Someone has attached to the process with ptrace(). There's a race here.
3766 * If we determine that we are not the parent process and then fail to attach
3767 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3768 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3769 * reparenting of the PID should be atomic to us, so in that case, we reap the
3770 * job as normal.
3771 *
3772 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3773 * would if a job died while we were sampling it at shutdown.
3774 *
3775 * Note that we foolishly assume that in the process *tree* a node cannot be its
3776 * own parent. Apparently, that is not correct. If this is the case, we forsake
3777 * the process to its own devices. Let it reap itself.
3778 */
3779 if (!job_assumes(j, proc.pbsi_ppid != kev->ident)) {
3780 job_log(j, LOG_WARNING, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3781 return;
3782 }
3783 if (job_assumes(j, kevent_mod(proc.pbsi_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j) != -1)) {
3784 j->tracing_pid = proc.pbsi_ppid;
3785 j->reap_after_trace = true;
3786 return;
3787 }
3788 }
3789 }
3790 } else if (!j->anonymous) {
3791 if (j->tracing_pid == (pid_t)kev->ident) {
3792 job_cleanup_after_tracer(j);
3793
3794 return;
3795 } else if (j->tracing_pid && !j->reap_after_trace) {
3796 /* The job exited before our sample completed. */
3797 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
3798 j->reap_after_trace = true;
3799 return;
3800 }
3801 }
3802 }
3803
3804 if (fflags & NOTE_EXEC) {
3805 program_changed = true;
3806
3807 if (j->anonymous) {
3808 struct proc_bsdshortinfo proc;
3809 if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
3810 char newlabel[1000];
3811
3812 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
3813
3814 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3815 j->lastlookup = NULL;
3816 j->lastlookup_gennum = 0;
3817
3818 LIST_REMOVE(j, label_hash_sle);
3819 strcpy((char *)j->label, newlabel);
3820
3821 jobmgr_t where2put = root_jobmgr;
3822 if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
3823 where2put = j->mgr;
3824 }
3825 LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
3826 } else if (errno != ESRCH) {
3827 job_assumes(j, errno == 0);
3828 }
3829 } else {
3830 if (j->spawn_reply_port) {
3831 errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3832 if (errno) {
3833 if (errno != MACH_SEND_INVALID_DEST) {
3834 (void)job_assumes(j, errno == KERN_SUCCESS);
3835 }
3836 (void)job_assumes(j, launchd_mport_close_recv(j->exit_status_port) == KERN_SUCCESS);
3837 }
3838
3839 j->spawn_reply_port = MACH_PORT_NULL;
3840 j->exit_status_port = MACH_PORT_NULL;
3841 }
3842
3843 if (j->xpc_service && j->did_exec) {
3844 j->xpcproxy_did_exec = true;
3845 }
3846
3847 j->did_exec = true;
3848 job_log(j, LOG_DEBUG, "Program changed");
3849 }
3850 }
3851
3852 if (fflags & NOTE_FORK) {
3853 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3854 job_log_children_without_exec(j);
3855 }
3856
3857 if (fflags & NOTE_EXIT) {
3858 job_reap(j);
3859
3860 if (j->anonymous) {
3861 job_remove(j);
3862 j = NULL;
3863 } else {
3864 j = job_dispatch(j, false);
3865 }
3866 }
3867 }
3868
3869 void
3870 job_callback_timer(job_t j, void *ident)
3871 {
3872 if (j == ident) {
3873 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
3874 job_dispatch(j, true);
3875 } else if (&j->semaphores == ident) {
3876 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
3877 job_dispatch(j, false);
3878 } else if (&j->start_interval == ident) {
3879 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
3880 j->start_pending = true;
3881 job_dispatch(j, false);
3882 } else if (&j->exit_timeout == ident) {
3883 if (!job_assumes(j, j->p != 0)) {
3884 return;
3885 }
3886
3887 if (j->sent_sigkill) {
3888 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
3889
3890 td /= NSEC_PER_SEC;
3891 td -= j->clean_kill ? 0 : j->exit_timeout;
3892
3893 job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
3894 j->workaround9359725 = true;
3895
3896 if (g_trap_sigkill_bugs) {
3897 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
3898 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3899 }
3900
3901 struct kevent bogus_exit;
3902 EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
3903 jobmgr_callback(j->mgr, &bogus_exit);
3904 } else {
3905 if (unlikely(j->debug_before_kill)) {
3906 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3907 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3908 }
3909
3910 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3911 job_kill(j);
3912 }
3913 } else {
3914 (void)job_assumes(j, false);
3915 }
3916 }
3917
3918 void
3919 job_callback_read(job_t j, int ident)
3920 {
3921 if (ident == j->log_redirect_fd) {
3922 job_log_stdouterr(j);
3923 } else if (ident == j->stdin_fd) {
3924 job_dispatch(j, true);
3925 } else {
3926 socketgroup_callback(j);
3927 }
3928 }
3929
3930 void
3931 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
3932 {
3933 jobmgr_t jmi;
3934 job_t j;
3935
3936 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3937 jobmgr_reap_bulk(jmi, kev);
3938 }
3939
3940 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
3941 kev->udata = j;
3942 job_callback(j, kev);
3943 }
3944 }
3945
3946 void
3947 jobmgr_callback(void *obj, struct kevent *kev)
3948 {
3949 jobmgr_t jm = obj;
3950 job_t ji;
3951
3952 switch (kev->filter) {
3953 case EVFILT_PROC:
3954 jobmgr_reap_bulk(jm, kev);
3955 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
3956 break;
3957 case EVFILT_SIGNAL:
3958 switch (kev->ident) {
3959 case SIGTERM:
3960 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
3961 return launchd_shutdown();
3962 case SIGUSR1:
3963 return calendarinterval_callback();
3964 case SIGUSR2:
3965 fake_shutdown_in_progress = true;
3966 runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
3967
3968 runtime_closelog(); /* HACK -- force 'start' time to be set */
3969
3970 if (pid1_magic) {
3971 int64_t now = runtime_get_wall_time();
3972
3973 jobmgr_log(jm, LOG_NOTICE, "Anticipatory shutdown began at: %lld.%06llu", now / USEC_PER_SEC, now % USEC_PER_SEC);
3974
3975 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
3976 if (ji->per_user && ji->p) {
3977 (void)job_assumes(ji, runtime_kill(ji->p, SIGUSR2) != -1);
3978 }
3979 }
3980 } else {
3981 jobmgr_log(jm, LOG_NOTICE, "Anticipatory per-user launchd shutdown");
3982 }
3983
3984 return;
3985 default:
3986 return (void)jobmgr_assumes(jm, false);
3987 }
3988 break;
3989 case EVFILT_FS:
3990 if (kev->fflags & VQ_MOUNT) {
3991 jobmgr_dispatch_all(jm, true);
3992 }
3993 jobmgr_dispatch_all_semaphores(jm);
3994 break;
3995 case EVFILT_TIMER:
3996 if (kev->ident == (uintptr_t)&sorted_calendar_events) {
3997 calendarinterval_callback();
3998 } else if (kev->ident == (uintptr_t)jm) {
3999 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4000 jobmgr_still_alive_with_check(jm);
4001 } else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4002 jobmgr_do_garbage_collection(jm);
4003 } else if (kev->ident == (uintptr_t)&g_runtime_busy_time) {
4004 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4005 if (jobmgr_assumes(jm, runtime_busy_cnt == 0)) {
4006 return launchd_shutdown();
4007 }
4008 }
4009 break;
4010 case EVFILT_VNODE:
4011 if (kev->ident == (uintptr_t)s_no_hang_fd) {
4012 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4013 if (unlikely(_no_hang_fd != -1)) {
4014 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4015 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
4016 (void)jobmgr_assumes(root_jobmgr, runtime_close(s_no_hang_fd) != -1);
4017 s_no_hang_fd = _fd(_no_hang_fd);
4018 }
4019 } else if (pid1_magic && g_console && kev->ident == (uintptr_t)fileno(g_console)) {
4020 int cfd = -1;
4021 if (launchd_assumes((cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1)) {
4022 _fd(cfd);
4023 if (!launchd_assumes((g_console = fdopen(cfd, "w")) != NULL)) {
4024 close(cfd);
4025 }
4026 }
4027 }
4028 break;
4029 default:
4030 return (void)jobmgr_assumes(jm, false);
4031 }
4032 }
4033
4034 void
4035 job_callback(void *obj, struct kevent *kev)
4036 {
4037 job_t j = obj;
4038
4039 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4040
4041 switch (kev->filter) {
4042 case EVFILT_PROC:
4043 return job_callback_proc(j, kev);
4044 case EVFILT_TIMER:
4045 return job_callback_timer(j, (void *) kev->ident);
4046 case EVFILT_VNODE:
4047 return semaphoreitem_callback(j, kev);
4048 case EVFILT_READ:
4049 return job_callback_read(j, (int) kev->ident);
4050 case EVFILT_MACHPORT:
4051 return (void)job_dispatch(j, true);
4052 default:
4053 return (void)job_assumes(j, false);
4054 }
4055 }
4056
4057 void
4058 job_start(job_t j)
4059 {
4060 uint64_t td;
4061 int spair[2];
4062 int execspair[2];
4063 int oepair[2];
4064 char nbuf[64];
4065 pid_t c;
4066 bool sipc = false;
4067 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC;
4068
4069 if (!job_assumes(j, j->mgr != NULL)) {
4070 return;
4071 }
4072
4073 if (unlikely(job_active(j))) {
4074 job_log(j, LOG_DEBUG, "Already started");
4075 return;
4076 }
4077
4078 /*
4079 * Some users adjust the wall-clock and then expect software to not notice.
4080 * Therefore, launchd must use an absolute clock instead of the wall clock
4081 * wherever possible.
4082 */
4083 td = runtime_get_nanoseconds_since(j->start_time);
4084 td /= NSEC_PER_SEC;
4085
4086 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
4087 time_t respawn_delta = j->min_run_time - (uint32_t)td;
4088
4089 /*
4090 * We technically should ref-count throttled jobs to prevent idle exit,
4091 * but we're not directly tracking the 'throttled' state at the moment.
4092 */
4093 int level = LOG_WARNING;
4094 if (!j->did_exec && ((j->fail_cnt - 1) % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4095 level = LOG_DEBUG;
4096 }
4097
4098 job_log(j, level, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4099 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
4100 job_ignore(j);
4101 return;
4102 }
4103
4104 if (likely(!j->legacy_mach_job)) {
4105 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_special_privileges;
4106 }
4107
4108 if (sipc) {
4109 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
4110 }
4111
4112 (void)job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
4113
4114 if (likely(!j->legacy_mach_job) && job_assumes(j, pipe(oepair) != -1)) {
4115 j->log_redirect_fd = _fd(oepair[0]);
4116 (void)job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
4117 (void)job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
4118 }
4119
4120 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4121 case -1:
4122 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4123 (void)job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j) != -1);
4124 job_ignore(j);
4125
4126 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
4127 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
4128 if (sipc) {
4129 (void)job_assumes(j, runtime_close(spair[0]) == 0);
4130 (void)job_assumes(j, runtime_close(spair[1]) == 0);
4131 }
4132 if (likely(!j->legacy_mach_job)) {
4133 (void)job_assumes(j, runtime_close(oepair[0]) != -1);
4134 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4135 j->log_redirect_fd = 0;
4136 }
4137 break;
4138 case 0:
4139 if (unlikely(_vproc_post_fork_ping())) {
4140 _exit(EXIT_FAILURE);
4141 }
4142 if (!j->legacy_mach_job) {
4143 (void)job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
4144 (void)job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
4145 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4146 }
4147 (void)job_assumes(j, runtime_close(execspair[0]) == 0);
4148 /* wait for our parent to say they've attached a kevent to us */
4149 read(_fd(execspair[1]), &c, sizeof(c));
4150
4151 if (sipc) {
4152 (void)job_assumes(j, runtime_close(spair[0]) == 0);
4153 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4154 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4155 }
4156 job_start_child(j);
4157 break;
4158 default:
4159 j->start_time = runtime_get_opaque_time();
4160
4161 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4162
4163 j->did_exec = false;
4164 j->xpcproxy_did_exec = false;
4165 j->checkedin = false;
4166 j->start_pending = false;
4167 j->reaped = false;
4168 j->crashed = false;
4169 j->stopped = false;
4170 if (j->needs_kickoff) {
4171 j->needs_kickoff = false;
4172
4173 if (SLIST_EMPTY(&j->semaphores)) {
4174 j->ondemand = false;
4175 }
4176 }
4177
4178 if (j->has_console) {
4179 g_wsp = c;
4180 }
4181
4182 runtime_add_ref();
4183 total_children++;
4184 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4185
4186 if (likely(!j->legacy_mach_job)) {
4187 (void)job_assumes(j, runtime_close(oepair[1]) != -1);
4188 }
4189 j->p = c;
4190
4191 j->mgr->normal_active_cnt++;
4192 j->fork_fd = _fd(execspair[0]);
4193 (void)job_assumes(j, runtime_close(execspair[1]) == 0);
4194 if (sipc) {
4195 (void)job_assumes(j, runtime_close(spair[1]) == 0);
4196 ipc_open(_fd(spair[0]), j);
4197 }
4198 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
4199 job_ignore(j);
4200 } else {
4201 job_reap(j);
4202 }
4203
4204 j->wait4debugger_oneshot = false;
4205
4206 struct envitem *ei = NULL, *et = NULL;
4207 SLIST_FOREACH_SAFE(ei, &j->env, sle, et) {
4208 if (ei->one_shot) {
4209 SLIST_REMOVE(&j->env, ei, envitem, sle);
4210 }
4211 }
4212
4213 if (likely(!j->stall_before_exec)) {
4214 job_uncork_fork(j);
4215 }
4216 break;
4217 }
4218 }
4219
4220 void
4221 job_start_child(job_t j)
4222 {
4223 typeof(posix_spawn) *psf;
4224 const char *file2exec = "/usr/libexec/launchproxy";
4225 const char **argv;
4226 posix_spawnattr_t spattr;
4227 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4228 glob_t g;
4229 short spflags = POSIX_SPAWN_SETEXEC;
4230 size_t binpref_out_cnt = 0;
4231 size_t i;
4232
4233 (void)job_assumes(j, posix_spawnattr_init(&spattr) == 0);
4234
4235 job_setup_attributes(j);
4236
4237 if (unlikely(j->argv && j->globargv)) {
4238 g.gl_offs = 1;
4239 for (i = 0; i < j->argc; i++) {
4240 if (i > 0) {
4241 gflags |= GLOB_APPEND;
4242 }
4243 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4244 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4245 exit(EXIT_FAILURE);
4246 }
4247 }
4248 g.gl_pathv[0] = (char *)file2exec;
4249 argv = (const char **)g.gl_pathv;
4250 } else if (likely(j->argv)) {
4251 argv = alloca((j->argc + 2) * sizeof(char *));
4252 argv[0] = file2exec;
4253 for (i = 0; i < j->argc; i++) {
4254 argv[i + 1] = j->argv[i];
4255 }
4256 argv[i + 1] = NULL;
4257 } else {
4258 argv = alloca(3 * sizeof(char *));
4259 argv[0] = file2exec;
4260 argv[1] = j->prog;
4261 argv[2] = NULL;
4262 }
4263
4264 if (likely(!j->inetcompat)) {
4265 argv++;
4266 }
4267
4268 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4269 if (!j->legacy_LS_job) {
4270 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4271 }
4272 spflags |= POSIX_SPAWN_START_SUSPENDED;
4273 }
4274
4275 if (unlikely(j->disable_aslr)) {
4276 spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4277 }
4278 spflags |= j->pstype;
4279
4280 (void)job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
4281
4282 if (unlikely(j->j_binpref_cnt)) {
4283 (void)job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
4284 (void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4285 }
4286
4287 #if HAVE_QUARANTINE
4288 if (j->quarantine_data) {
4289 qtn_proc_t qp;
4290
4291 if (job_assumes(j, qp = qtn_proc_alloc())) {
4292 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4293 (void)job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
4294 }
4295 }
4296 }
4297 #endif
4298
4299 #if HAVE_SANDBOX
4300 if (j->seatbelt_profile) {
4301 char *seatbelt_err_buf = NULL;
4302
4303 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
4304 if (seatbelt_err_buf) {
4305 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4306 }
4307 goto out_bad;
4308 }
4309 }
4310 #endif
4311
4312 psf = j->prog ? posix_spawn : posix_spawnp;
4313
4314 if (likely(!j->inetcompat)) {
4315 file2exec = j->prog ? j->prog : argv[0];
4316 }
4317
4318 errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4319 if (errno != EBADARCH) {
4320 int level = LOG_ERR;
4321 if ((j->fail_cnt++ % LAUNCHD_LOG_FAILED_EXEC_FREQ) != 0) {
4322 level = LOG_DEBUG;
4323 }
4324 job_log_error(j, level, "posix_spawn(\"%s\", ...)", file2exec);
4325 errno = EXIT_FAILURE;
4326 }
4327
4328 #if HAVE_SANDBOX
4329 out_bad:
4330 #endif
4331 _exit(errno);
4332 }
4333
4334 void
4335 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4336 {
4337 launch_data_t tmp;
4338 struct envitem *ei;
4339 job_t ji;
4340
4341 if (jm->parentmgr) {
4342 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4343 } else {
4344 char **tmpenviron = environ;
4345 for (; *tmpenviron; tmpenviron++) {
4346 char envkey[1024];
4347 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4348 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4349 strncpy(envkey, *tmpenviron, sizeof(envkey));
4350 *(strchr(envkey, '=')) = '\0';
4351 launch_data_dict_insert(dict, s, envkey);
4352 }
4353 }
4354
4355 LIST_FOREACH(ji, &jm->jobs, sle) {
4356 SLIST_FOREACH(ei, &ji->global_env, sle) {
4357 if ((tmp = launch_data_new_string(ei->value))) {
4358 launch_data_dict_insert(dict, tmp, ei->key);
4359 }
4360 }
4361 }
4362 }
4363
4364 void
4365 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4366 {
4367 struct envitem *ei;
4368 job_t ji;
4369
4370 if (jm->parentmgr) {
4371 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4372 }
4373
4374 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4375 SLIST_FOREACH(ei, &ji->global_env, sle) {
4376 setenv(ei->key, ei->value, 1);
4377 }
4378 }
4379 }
4380
4381 void
4382 job_log_pids_with_weird_uids(job_t j)
4383 {
4384 size_t len = sizeof(pid_t) * get_kern_max_proc();
4385 pid_t *pids = NULL;
4386 uid_t u = j->mach_uid;
4387 int i = 0, kp_cnt = 0;
4388
4389 if (!do_apple_internal_logging) {
4390 return;
4391 }
4392
4393 pids = malloc(len);
4394 if (!job_assumes(j, pids != NULL)) {
4395 return;
4396 }
4397
4398 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4399
4400 /* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4401 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4402 * one kernel call, libproc requires that we get a list of PIDs we're interested in
4403 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4404 * struct back in a single call for each one.
4405 *
4406 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4407 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4408 * libproc could go stale before we call proc_pidinfo().
4409 *
4410 * Note that proc_list*() APIs return the number of PIDs given back, not the number
4411 * of bytes written to the buffer.
4412 */
4413 if (!job_assumes(j, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
4414 goto out;
4415 }
4416
4417 for (i = 0; i < kp_cnt; i++) {
4418 struct proc_bsdshortinfo proc;
4419 /* We perhaps should not log a bug here if we get ESRCH back, due to the race
4420 * detailed above.
4421 */
4422 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4423 if (errno != ESRCH) {
4424 job_assumes(j, errno == 0);
4425 }
4426 continue;
4427 }
4428
4429 uid_t i_euid = proc.pbsi_uid;
4430 uid_t i_uid = proc.pbsi_ruid;
4431 uid_t i_svuid = proc.pbsi_svuid;
4432 pid_t i_pid = pids[i];
4433
4434 if (i_euid != u && i_uid != u && i_svuid != u) {
4435 continue;
4436 }
4437
4438 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4439
4440 /* Temporarily disabled due to 5423935 and 4946119. */
4441 #if 0
4442 /* Ask the accountless process to exit. */
4443 (void)job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
4444 #endif
4445 }
4446
4447 out:
4448 free(pids);
4449 }
4450
4451 void
4452 job_postfork_test_user(job_t j)
4453 {
4454 /* This function is all about 5201578 */
4455
4456 const char *home_env_var = getenv("HOME");
4457 const char *user_env_var = getenv("USER");
4458 const char *logname_env_var = getenv("LOGNAME");
4459 uid_t tmp_uid, local_uid = getuid();
4460 gid_t tmp_gid, local_gid = getgid();
4461 char shellpath[PATH_MAX];
4462 char homedir[PATH_MAX];
4463 char loginname[2000];
4464 struct passwd *pwe;
4465
4466
4467 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4468 && strcmp(user_env_var, logname_env_var) == 0)) {
4469 goto out_bad;
4470 }
4471
4472 if ((pwe = getpwnam(user_env_var)) == NULL) {
4473 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4474 goto out_bad;
4475 }
4476
4477 /*
4478 * We must copy the results of getpw*().
4479 *
4480 * Why? Because subsequent API calls may call getpw*() as a part of
4481 * their implementation. Since getpw*() returns a [now thread scoped]
4482 * global, we must therefore cache the results before continuing.
4483 */
4484
4485 tmp_uid = pwe->pw_uid;
4486 tmp_gid = pwe->pw_gid;
4487
4488 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4489 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4490 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4491
4492 if (strcmp(loginname, logname_env_var) != 0) {
4493 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4494 goto out_bad;
4495 }
4496 if (strcmp(homedir, home_env_var) != 0) {
4497 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4498 goto out_bad;
4499 }
4500 if (local_uid != tmp_uid) {
4501 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4502 'U', tmp_uid, local_uid);
4503 goto out_bad;
4504 }
4505 if (local_gid != tmp_gid) {
4506 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4507 'G', tmp_gid, local_gid);
4508 goto out_bad;
4509 }
4510
4511 return;
4512 out_bad:
4513 #if 0
4514 (void)job_assumes(j, runtime_kill(getppid(), SIGTERM) != -1);
4515 _exit(EXIT_FAILURE);
4516 #else
4517 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
4518 #endif
4519 }
4520
4521 void
4522 job_postfork_become_user(job_t j)
4523 {
4524 char loginname[2000];
4525 char tmpdirpath[PATH_MAX];
4526 char shellpath[PATH_MAX];
4527 char homedir[PATH_MAX];
4528 struct passwd *pwe;
4529 size_t r;
4530 gid_t desired_gid = -1;
4531 uid_t desired_uid = -1;
4532
4533 if (getuid() != 0) {
4534 return job_postfork_test_user(j);
4535 }
4536
4537 /*
4538 * I contend that having UID == 0 and GID != 0 is of dubious value.
4539 * Nevertheless, this used to work in Tiger. See: 5425348
4540 */
4541 if (j->groupname && !j->username) {
4542 j->username = "root";
4543 }
4544
4545 if (j->username) {
4546 if ((pwe = getpwnam(j->username)) == NULL) {
4547 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
4548 _exit(EXIT_FAILURE);
4549 }
4550 } else if (j->mach_uid) {
4551 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4552 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
4553 job_log_pids_with_weird_uids(j);
4554 _exit(EXIT_FAILURE);
4555 }
4556 } else {
4557 return;
4558 }
4559
4560 /*
4561 * We must copy the results of getpw*().
4562 *
4563 * Why? Because subsequent API calls may call getpw*() as a part of
4564 * their implementation. Since getpw*() returns a [now thread scoped]
4565 * global, we must therefore cache the results before continuing.
4566 */
4567
4568 desired_uid = pwe->pw_uid;
4569 desired_gid = pwe->pw_gid;
4570
4571 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4572 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4573 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4574
4575 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
4576 job_log(j, LOG_ERR, "Expired account");
4577 _exit(EXIT_FAILURE);
4578 }
4579
4580
4581 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
4582 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
4583 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
4584 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
4585 }
4586
4587 if (j->groupname) {
4588 struct group *gre;
4589
4590 if (unlikely((gre = getgrnam(j->groupname)) == NULL)) {
4591 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
4592 _exit(EXIT_FAILURE);
4593 }
4594
4595 desired_gid = gre->gr_gid;
4596 }
4597
4598 if (!job_assumes(j, setlogin(loginname) != -1)) {
4599 _exit(EXIT_FAILURE);
4600 }
4601
4602 if (!job_assumes(j, setgid(desired_gid) != -1)) {
4603 _exit(EXIT_FAILURE);
4604 }
4605
4606 /*
4607 * The kernel team and the DirectoryServices team want initgroups()
4608 * called after setgid(). See 4616864 for more information.
4609 */
4610
4611 if (likely(!j->no_init_groups)) {
4612 #if 1
4613 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
4614 _exit(EXIT_FAILURE);
4615 }
4616 #else
4617 /* Do our own little initgroups(). We do this to guarantee that we're
4618 * always opted into dynamic group resolution in the kernel. initgroups(3)
4619 * does not make this guarantee.
4620 */
4621 int groups[NGROUPS], ngroups;
4622
4623 /* A failure here isn't fatal, and we'll still get data we can use. */
4624 (void)job_assumes(j, getgrouplist(j->username, desired_gid, groups, &ngroups) != -1);
4625
4626 if (!job_assumes(j, syscall(SYS_initgroups, ngroups, groups, desired_uid) != -1)) {
4627 _exit(EXIT_FAILURE);
4628 }
4629 #endif
4630 }
4631
4632 if (!job_assumes(j, setuid(desired_uid) != -1)) {
4633 _exit(EXIT_FAILURE);
4634 }
4635
4636 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4637
4638 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
4639 setenv("TMPDIR", tmpdirpath, 0);
4640 }
4641
4642 setenv("SHELL", shellpath, 0);
4643 setenv("HOME", homedir, 0);
4644 setenv("USER", loginname, 0);
4645 setenv("LOGNAME", loginname, 0);
4646 }
4647
4648 void
4649 job_setup_attributes(job_t j)
4650 {
4651 struct limititem *li;
4652 struct envitem *ei;
4653
4654 if (unlikely(j->setnice)) {
4655 (void)job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
4656 }
4657
4658 SLIST_FOREACH(li, &j->limits, sle) {
4659 struct rlimit rl;
4660
4661 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
4662 continue;
4663 }
4664
4665 if (li->sethard) {
4666 rl.rlim_max = li->lim.rlim_max;
4667 }
4668 if (li->setsoft) {
4669 rl.rlim_cur = li->lim.rlim_cur;
4670 }
4671
4672 if (setrlimit(li->which, &rl) == -1) {
4673 job_log_error(j, LOG_WARNING, "setrlimit()");
4674 }
4675 }
4676
4677 if (unlikely(!j->inetcompat && j->session_create)) {
4678 launchd_SessionCreate();
4679 }
4680
4681 if (unlikely(j->low_pri_io)) {
4682 (void)job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
4683 }
4684 if (unlikely(j->rootdir)) {
4685 (void)job_assumes(j, chroot(j->rootdir) != -1);
4686 (void)job_assumes(j, chdir(".") != -1);
4687 }
4688
4689 job_postfork_become_user(j);
4690
4691 if (unlikely(j->workingdir)) {
4692 (void)job_assumes(j, chdir(j->workingdir) != -1);
4693 }
4694
4695 if (unlikely(j->setmask)) {
4696 umask(j->mask);
4697 }
4698
4699 if (j->stdin_fd) {
4700 (void)job_assumes(j, dup2(j->stdin_fd, STDIN_FILENO) != -1);
4701 } else {
4702 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4703 }
4704 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4705 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
4706
4707 jobmgr_setup_env_from_other_jobs(j->mgr);
4708
4709 SLIST_FOREACH(ei, &j->env, sle) {
4710 setenv(ei->key, ei->value, 1);
4711 }
4712
4713 if (do_apple_internal_logging) {
4714 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING, "true", 1);
4715 }
4716
4717 #if !TARGET_OS_EMBEDDED
4718 if (j->jetsam_properties) {
4719 (void)job_assumes(j, proc_setpcontrol(PROC_SETPC_TERMINATE) == 0);
4720 }
4721 #endif
4722
4723 #if TARGET_OS_EMBEDDED
4724 if (j->main_thread_priority != 0) {
4725 struct sched_param params;
4726 bzero(&params, sizeof(params));
4727 params.sched_priority = j->main_thread_priority;
4728 (void)job_assumes(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params) != -1);
4729 }
4730 #endif
4731
4732 /*
4733 * We'd like to call setsid() unconditionally, but we have reason to
4734 * believe that prevents launchd from being able to send signals to
4735 * setuid children. We'll settle for process-groups.
4736 */
4737 if (getppid() != 1) {
4738 (void)job_assumes(j, setpgid(0, 0) != -1);
4739 } else {
4740 (void)job_assumes(j, setsid() != -1);
4741 }
4742 }
4743
4744 void
4745 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4746 {
4747 int fd;
4748
4749 if (!path) {
4750 return;
4751 }
4752
4753 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4754 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4755 return;
4756 }
4757
4758 (void)job_assumes(j, dup2(fd, target_fd) != -1);
4759 (void)job_assumes(j, runtime_close(fd) == 0);
4760 }
4761
4762 int
4763 dir_has_files(job_t j, const char *path)
4764 {
4765 DIR *dd = opendir(path);
4766 struct dirent *de;
4767 bool r = 0;
4768
4769 if (unlikely(!dd)) {
4770 return -1;
4771 }
4772
4773 while ((de = readdir(dd))) {
4774 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
4775 r = 1;
4776 break;
4777 }
4778 }
4779
4780 (void)job_assumes(j, closedir(dd) == 0);
4781 return r;
4782 }
4783
4784 void
4785 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
4786 {
4787 struct calendarinterval *ci_iter, *ci_prev = NULL;
4788 time_t later, head_later;
4789
4790 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4791
4792 if (ci->when.tm_wday != -1) {
4793 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4794
4795 if (ci->when.tm_mday == -1) {
4796 later = otherlater;
4797 } else {
4798 later = later < otherlater ? later : otherlater;
4799 }
4800 }
4801
4802 ci->when_next = later;
4803
4804 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4805 if (ci->when_next < ci_iter->when_next) {
4806 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4807 break;
4808 }
4809
4810 ci_prev = ci_iter;
4811 }
4812
4813 if (ci_iter == NULL) {
4814 /* ci must want to fire after every other timer, or there are no timers */
4815
4816 if (LIST_EMPTY(&sorted_calendar_events)) {
4817 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
4818 } else {
4819 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
4820 }
4821 }
4822
4823 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
4824
4825 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
4826 char time_string[100];
4827 size_t time_string_len;
4828
4829 ctime_r(&later, time_string);
4830 time_string_len = strlen(time_string);
4831
4832 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
4833 time_string[time_string_len - 1] = '\0';
4834 }
4835
4836 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4837 }
4838 }
4839
4840 void
4841 extract_rcsid_substr(const char *i, char *o, size_t osz)
4842 {
4843 char *rcs_rev_tmp = strchr(i, ' ');
4844
4845 if (!rcs_rev_tmp) {
4846 strlcpy(o, i, osz);
4847 } else {
4848 strlcpy(o, rcs_rev_tmp + 1, osz);
4849 rcs_rev_tmp = strchr(o, ' ');
4850 if (rcs_rev_tmp) {
4851 *rcs_rev_tmp = '\0';
4852 }
4853 }
4854 }
4855
4856 void
4857 jobmgr_log_bug(jobmgr_t jm, unsigned int line)
4858 {
4859 static const char *file;
4860 int saved_errno = errno;
4861 char buf[100];
4862
4863 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4864
4865 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4866
4867 if (!file) {
4868 file = strrchr(__FILE__, '/');
4869 if (!file) {
4870 file = __FILE__;
4871 } else {
4872 file += 1;
4873 }
4874 }
4875
4876 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4877 if (likely(jm)) {
4878 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4879 } else {
4880 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4881 }
4882 }
4883
4884 void
4885 job_log_bug(job_t j, unsigned int line)
4886 {
4887 static const char *file;
4888 int saved_errno = errno;
4889 char buf[100];
4890
4891 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4892
4893 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4894
4895 if (!file) {
4896 file = strrchr(__FILE__, '/');
4897 if (!file) {
4898 file = __FILE__;
4899 } else {
4900 file += 1;
4901 }
4902 }
4903
4904 if (likely(j)) {
4905 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4906 } else {
4907 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4908 }
4909 }
4910
4911 void
4912 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
4913 {
4914 const char *label2use = j ? j->label : "com.apple.launchd.NULL";
4915 const char *mgr2use = j ? j->mgr->name : "NULL";
4916 struct runtime_syslog_attr attr = { g_my_label, label2use, mgr2use, pri, getuid(), getpid(), j ? j->p : 0 };
4917 char *newmsg;
4918 int oldmask = 0;
4919 size_t newmsgsz;
4920
4921 /*
4922 * Hack: If bootstrap_port is set, we must be on the child side of a
4923 * fork(), but before the exec*(). Let's route the log message back to
4924 * launchd proper.
4925 */
4926 if (bootstrap_port) {
4927 return _vproc_logv(pri, err, msg, ap);
4928 }
4929
4930 newmsgsz = strlen(msg) + 200;
4931 newmsg = alloca(newmsgsz);
4932
4933 if (err) {
4934 #if !TARGET_OS_EMBEDDED
4935 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
4936 #else
4937 snprintf(newmsg, newmsgsz, "(%s) %s: %s", label2use, msg, strerror(err));
4938 #endif
4939 } else {
4940 #if !TARGET_OS_EMBEDDED
4941 snprintf(newmsg, newmsgsz, "%s", msg);
4942 #else
4943 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
4944 #endif
4945 }
4946
4947 if (j && unlikely(j->debug)) {
4948 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
4949 }
4950
4951 runtime_vsyslog(&attr, newmsg, ap);
4952
4953 if (j && unlikely(j->debug)) {
4954 setlogmask(oldmask);
4955 }
4956 }
4957
4958 void
4959 job_log_error(job_t j, int pri, const char *msg, ...)
4960 {
4961 va_list ap;
4962
4963 va_start(ap, msg);
4964 job_logv(j, pri, errno, msg, ap);
4965 va_end(ap);
4966 }
4967
4968 void
4969 job_log(job_t j, int pri, const char *msg, ...)
4970 {
4971 va_list ap;
4972
4973 va_start(ap, msg);
4974 job_logv(j, pri, 0, msg, ap);
4975 va_end(ap);
4976 }
4977
4978 #if 0
4979 void
4980 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
4981 {
4982 va_list ap;
4983
4984 va_start(ap, msg);
4985 jobmgr_logv(jm, pri, errno, msg, ap);
4986 va_end(ap);
4987 }
4988 #endif
4989
4990 void
4991 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
4992 {
4993 va_list ap;
4994
4995 va_start(ap, msg);
4996 jobmgr_logv(jm, pri, 0, msg, ap);
4997 va_end(ap);
4998 }
4999
5000 void
5001 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5002 {
5003 char *newmsg;
5004 char *newname;
5005 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5006
5007 newname = alloca((jmname_len + 1) * 2);
5008 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5009 newmsg = alloca(newmsgsz);
5010
5011 for (i = 0, o = 0; i < jmname_len; i++, o++) {
5012 if (jm->name[i] == '%') {
5013 newname[o] = '%';
5014 o++;
5015 }
5016 newname[o] = jm->name[i];
5017 }
5018 newname[o] = '\0';
5019
5020 if (err) {
5021 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5022 } else {
5023 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5024 }
5025
5026 if (jm->parentmgr) {
5027 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5028 } else {
5029 struct runtime_syslog_attr attr = { g_my_label, g_my_label, jm->name, pri, getuid(), getpid(), getpid() };
5030
5031 runtime_vsyslog(&attr, newmsg, ap);
5032 }
5033 }
5034
5035 void
5036 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
5037 {
5038 if (si->fd != -1) {
5039 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
5040 (void)job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
5041 }
5042 }
5043
5044 void
5045 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
5046 {
5047 char *parentdir, tmp_path[PATH_MAX];
5048 int saved_errno = 0;
5049 int fflags = NOTE_DELETE|NOTE_RENAME;
5050
5051 switch (si->why) {
5052 case DIR_NOT_EMPTY:
5053 case PATH_CHANGES:
5054 fflags |= NOTE_ATTRIB|NOTE_LINK;
5055 /* fall through */
5056 case PATH_EXISTS:
5057 fflags |= NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
5058 /* fall through */
5059 case PATH_MISSING:
5060 break;
5061 default:
5062 return;
5063 }
5064
5065 /* dirname() may modify tmp_path */
5066 strlcpy(tmp_path, si->what, sizeof(tmp_path));
5067
5068 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
5069 return;
5070 }
5071
5072 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
5073 do {
5074 if (si->fd == -1) {
5075 struct stat sb;
5076 if (stat(si->what, &sb) == 0) {
5077 /* If we're watching a character or block device, only watch the parent directory.
5078 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
5079 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
5080 * open(2)s the file (like a character device that waits for a carrier signal) or
5081 * (b) preventing other processes from obtaining an exclusive lock on the file, even
5082 * though we're opening it with O_EVTONLY.
5083 *
5084 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
5085 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
5086 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
5087 * each time the parent changes to see if it appeared or disappeared.
5088 */
5089 if (S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode)) {
5090 si->fd = _fd(open(si->what, O_EVTONLY | O_NOCTTY | O_NONBLOCK));
5091 }
5092 }
5093
5094 if (si->fd == -1) {
5095 si->watching_parent = job_assumes(j, (si->fd = _fd(open(parentdir, O_EVTONLY | O_NOCTTY | O_NONBLOCK))) != -1);
5096 } else {
5097 si->watching_parent = false;
5098 }
5099 }
5100
5101 if (si->fd == -1) {
5102 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", si->what);
5103 }
5104
5105 job_log(j, LOG_DEBUG, "Watching %svnode (%s): %d", si->watching_parent ? "parent ": "", si->what, si->fd);
5106
5107 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
5108 saved_errno = errno;
5109 /*
5110 * The FD can be revoked between the open() and kevent().
5111 * This is similar to the inability for kevents to be
5112 * attached to short lived zombie processes after fork()
5113 * but before kevent().
5114 */
5115 (void)job_assumes(j, runtime_close(si->fd) == 0);
5116 si->fd = -1;
5117 }
5118 } while (unlikely((si->fd == -1) && (saved_errno == ENOENT)));
5119
5120 if (saved_errno == ENOTSUP) {
5121 /*
5122 * 3524219 NFS needs kqueue support
5123 * 4124079 VFS needs generic kqueue support
5124 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
5125 */
5126 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
5127
5128 if (!j->poll_for_vfs_changes) {
5129 j->poll_for_vfs_changes = true;
5130 (void)job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
5131 }
5132 }
5133 }
5134
5135 void
5136 semaphoreitem_callback(job_t j, struct kevent *kev)
5137 {
5138 char invalidation_reason[100] = "";
5139 struct semaphoreitem *si;
5140
5141 SLIST_FOREACH(si, &j->semaphores, sle) {
5142 switch (si->why) {
5143 case PATH_CHANGES:
5144 case PATH_EXISTS:
5145 case PATH_MISSING:
5146 case DIR_NOT_EMPTY:
5147 job_log(j, LOG_DEBUG, "P%s changed (%u): %s", si->watching_parent ? "arent path" : "ath", si->why, si->what);
5148 break;
5149 default:
5150 continue;
5151 }
5152
5153 if (si->fd == (int)kev->ident) {
5154 break;
5155 }
5156 }
5157
5158 if (!job_assumes(j, si != NULL)) {
5159 return;
5160 }
5161
5162 if (NOTE_DELETE & kev->fflags) {
5163 strcat(invalidation_reason, "deleted");
5164 }
5165
5166 if (NOTE_RENAME & kev->fflags) {
5167 if (invalidation_reason[0]) {
5168 strcat(invalidation_reason, "/renamed");
5169 } else {
5170 strcat(invalidation_reason, "renamed");
5171 }
5172 }
5173
5174 if (NOTE_REVOKE & kev->fflags) {
5175 if (invalidation_reason[0]) {
5176 strcat(invalidation_reason, "/revoked");
5177 } else {
5178 strcat(invalidation_reason, "revoked");
5179 }
5180 }
5181
5182 if (invalidation_reason[0]) {
5183 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
5184 (void)job_assumes(j, runtime_close(si->fd) == 0);
5185 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5186 }
5187
5188 if (!si->watching_parent) {
5189 if (si->why == PATH_CHANGES) {
5190 j->start_pending = true;
5191 } else {
5192 semaphoreitem_watch(j, si);
5193 }
5194 } else { /* Something happened to the parent directory. See if our target file appeared. */
5195 if (!invalidation_reason[0]) {
5196 (void)job_assumes(j, runtime_close(si->fd) == 0);
5197 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
5198 semaphoreitem_watch(j, si);
5199 }
5200 /* Need to think about what should happen if the parent directory goes invalid. */
5201 }
5202
5203 job_dispatch(j, false);
5204 }
5205
5206 struct cal_dict_walk {
5207 job_t j;
5208 struct tm tmptm;
5209 };
5210
5211 void
5212 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5213 {
5214 struct cal_dict_walk *cdw = context;
5215 struct tm *tmptm = &cdw->tmptm;
5216 job_t j = cdw->j;
5217 int64_t val;
5218
5219 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5220 /* hack to let caller know something went wrong */
5221 tmptm->tm_sec = -1;
5222 return;
5223 }
5224
5225 val = launch_data_get_integer(obj);
5226
5227 if (val < 0) {
5228 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5229 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5230 if (val > 59) {
5231 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5232 tmptm->tm_sec = -1;
5233 } else {
5234 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5235 }
5236 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5237 if (val > 23) {
5238 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5239 tmptm->tm_sec = -1;
5240 } else {
5241 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5242 }
5243 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5244 if (val < 1 || val > 31) {
5245 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5246 tmptm->tm_sec = -1;
5247 } else {
5248 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5249 }
5250 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5251 if (val > 7) {
5252 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5253 tmptm->tm_sec = -1;
5254 } else {
5255 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5256 }
5257 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5258 if (val > 12) {
5259 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5260 tmptm->tm_sec = -1;
5261 } else {
5262 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5263 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
5264 }
5265 }
5266 }
5267
5268 bool
5269 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5270 {
5271 struct cal_dict_walk cdw;
5272
5273 cdw.j = j;
5274 memset(&cdw.tmptm, 0, sizeof(0));
5275
5276 cdw.tmptm.tm_min = -1;
5277 cdw.tmptm.tm_hour = -1;
5278 cdw.tmptm.tm_mday = -1;
5279 cdw.tmptm.tm_wday = -1;
5280 cdw.tmptm.tm_mon = -1;
5281
5282 if (!job_assumes(j, obj != NULL)) {
5283 return false;
5284 }
5285
5286 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5287 return false;
5288 }
5289
5290 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5291
5292 if (unlikely(cdw.tmptm.tm_sec == -1)) {
5293 return false;
5294 }
5295
5296 return calendarinterval_new(j, &cdw.tmptm);
5297 }
5298
5299 bool
5300 calendarinterval_new(job_t j, struct tm *w)
5301 {
5302 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5303
5304 if (!job_assumes(j, ci != NULL)) {
5305 return false;
5306 }
5307
5308 ci->when = *w;
5309 ci->job = j;
5310
5311 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5312
5313 calendarinterval_setalarm(j, ci);
5314
5315 runtime_add_weak_ref();
5316
5317 return true;
5318 }
5319
5320 void
5321 calendarinterval_delete(job_t j, struct calendarinterval *ci)
5322 {
5323 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5324 LIST_REMOVE(ci, global_sle);
5325
5326 free(ci);
5327
5328 runtime_del_weak_ref();
5329 }
5330
5331 void
5332 calendarinterval_sanity_check(void)
5333 {
5334 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5335 time_t now = time(NULL);
5336
5337 if (unlikely(ci && (ci->when_next < now))) {
5338 (void)jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
5339 }
5340 }
5341
5342 void
5343 calendarinterval_callback(void)
5344 {
5345 struct calendarinterval *ci, *ci_next;
5346 time_t now = time(NULL);
5347
5348 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5349 job_t j = ci->job;
5350
5351 if (ci->when_next > now) {
5352 break;
5353 }
5354
5355 LIST_REMOVE(ci, global_sle);
5356 calendarinterval_setalarm(j, ci);
5357
5358 j->start_pending = true;
5359 job_dispatch(j, false);
5360 }
5361 }
5362
5363 bool
5364 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds)
5365 {
5366 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5367
5368 if (!job_assumes(j, sg != NULL)) {
5369 return false;
5370 }
5371
5372 sg->fds = calloc(1, fd_cnt * sizeof(int));
5373 sg->fd_cnt = fd_cnt;
5374 sg->junkfds = junkfds;
5375
5376 if (!job_assumes(j, sg->fds != NULL)) {
5377 free(sg);
5378 return false;
5379 }
5380
5381 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5382 strcpy(sg->name_init, name);
5383
5384 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5385
5386 runtime_add_weak_ref();
5387
5388 return true;
5389 }
5390
5391 void
5392 socketgroup_delete(job_t j, struct socketgroup *sg)
5393 {
5394 unsigned int i;
5395
5396 for (i = 0; i < sg->fd_cnt; i++) {
5397 #if 0
5398 struct sockaddr_storage ss;
5399 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5400 socklen_t ss_len = sizeof(ss);
5401
5402 /* 5480306 */
5403 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5404 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5405 (void)job_assumes(j, unlink(sun->sun_path) != -1);
5406 /* We might conditionally need to delete a directory here */
5407 }
5408 #endif
5409 (void)job_assumes(j, runtime_close(sg->fds[i]) != -1);
5410 }
5411
5412 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5413
5414 free(sg->fds);
5415 free(sg);
5416
5417 runtime_del_weak_ref();
5418 }
5419
5420 void
5421 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5422 {
5423 struct kevent kev[sg->fd_cnt];
5424 char buf[10000];
5425 unsigned int i, buf_off = 0;
5426
5427 if (unlikely(sg->junkfds)) {
5428 return;
5429 }
5430
5431 for (i = 0; i < sg->fd_cnt; i++) {
5432 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5433 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5434 }
5435
5436 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5437
5438 (void)job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
5439
5440 for (i = 0; i < sg->fd_cnt; i++) {
5441 (void)job_assumes(j, kev[i].flags & EV_ERROR);
5442 errno = (typeof(errno)) kev[i].data;
5443 (void)job_assumes(j, kev[i].data == 0);
5444 }
5445 }
5446
5447 void
5448 socketgroup_ignore(job_t j, struct socketgroup *sg)
5449 {
5450 socketgroup_kevent_mod(j, sg, false);
5451 }
5452
5453 void
5454 socketgroup_watch(job_t j, struct socketgroup *sg)
5455 {
5456 socketgroup_kevent_mod(j, sg, true);
5457 }
5458
5459 void
5460 socketgroup_callback(job_t j)
5461 {
5462 job_dispatch(j, true);
5463 }
5464
5465 bool
5466 envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot)
5467 {
5468 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5469
5470 if (!job_assumes(j, ei != NULL)) {
5471 return false;
5472 }
5473
5474 strcpy(ei->key_init, k);
5475 ei->value = ei->key_init + strlen(k) + 1;
5476 strcpy(ei->value, v);
5477 ei->one_shot = one_shot;
5478
5479 if (global) {
5480 if (SLIST_EMPTY(&j->global_env)) {
5481 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5482 }
5483 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5484 } else {
5485 SLIST_INSERT_HEAD(&j->env, ei, sle);
5486 }
5487
5488 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5489
5490 return true;
5491 }
5492
5493 void
5494 envitem_delete(job_t j, struct envitem *ei, bool global)
5495 {
5496 if (global) {
5497 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5498 if (SLIST_EMPTY(&j->global_env)) {
5499 LIST_REMOVE(j, global_env_sle);
5500 }
5501 } else {
5502 SLIST_REMOVE(&j->env, ei, envitem, sle);
5503 }
5504
5505 free(ei);
5506 }
5507
5508 void
5509 envitem_setup(launch_data_t obj, const char *key, void *context)
5510 {
5511 job_t j = context;
5512
5513 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5514 return;
5515 }
5516
5517 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5518 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, false);
5519 } else {
5520 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5521 }
5522 }
5523
5524 void
5525 envitem_setup_one_shot(launch_data_t obj, const char *key, void *context)
5526 {
5527 job_t j = context;
5528
5529 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5530 return;
5531 }
5532
5533 if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5534 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, true);
5535 } else {
5536 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5537 }
5538 }
5539
5540 bool
5541 limititem_update(job_t j, int w, rlim_t r)
5542 {
5543 struct limititem *li;
5544
5545 SLIST_FOREACH(li, &j->limits, sle) {
5546 if (li->which == w) {
5547 break;
5548 }
5549 }
5550
5551 if (li == NULL) {
5552 li = calloc(1, sizeof(struct limititem));
5553
5554 if (!job_assumes(j, li != NULL)) {
5555 return false;
5556 }
5557
5558 SLIST_INSERT_HEAD(&j->limits, li, sle);
5559
5560 li->which = w;
5561 }
5562
5563 if (j->importing_hard_limits) {
5564 li->lim.rlim_max = r;
5565 li->sethard = true;
5566 } else {
5567 li->lim.rlim_cur = r;
5568 li->setsoft = true;
5569 }
5570
5571 return true;
5572 }
5573
5574 void
5575 limititem_delete(job_t j, struct limititem *li)
5576 {
5577 SLIST_REMOVE(&j->limits, li, limititem, sle);
5578
5579 free(li);
5580 }
5581
5582 #if HAVE_SANDBOX
5583 void
5584 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5585 {
5586 job_t j = context;
5587
5588 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5589 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5590 return;
5591 }
5592
5593 if (launch_data_get_bool(obj) == false) {
5594 return;
5595 }
5596
5597 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5598 j->seatbelt_flags |= SANDBOX_NAMED;
5599 }
5600 }
5601 #endif
5602
5603 void
5604 limititem_setup(launch_data_t obj, const char *key, void *context)
5605 {
5606 job_t j = context;
5607 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
5608 rlim_t rl;
5609
5610 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
5611 return;
5612 }
5613
5614 rl = launch_data_get_integer(obj);
5615
5616 for (i = 0; i < limits_cnt; i++) {
5617 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
5618 break;
5619 }
5620 }
5621
5622 if (i == limits_cnt) {
5623 return;
5624 }
5625
5626 limititem_update(j, launchd_keys2limits[i].val, rl);
5627 }
5628
5629 bool
5630 job_useless(job_t j)
5631 {
5632 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5633 if (j->legacy_LS_job && j->j_port) {
5634 return false;
5635 }
5636 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5637 return true;
5638 } else if (j->removal_pending) {
5639 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
5640 return true;
5641 } else if (j->shutdown_monitor) {
5642 return false;
5643 } else if (j->mgr->shutting_down) {
5644 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
5645 if (total_children == 0 && !j->anonymous) {
5646 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
5647 }
5648 return true;
5649 } else if (j->legacy_mach_job) {
5650 if (SLIST_EMPTY(&j->machservices)) {
5651 job_log(j, LOG_INFO, "Garbage collecting");
5652 return true;
5653 } else if (!j->checkedin) {
5654 job_log(j, LOG_WARNING, "Failed to check-in!");
5655 return true;
5656 }
5657 } else {
5658 /* If the job's executable does not have any valid architectures (for
5659 * example, if it's a PowerPC-only job), then we don't even bother
5660 * trying to relaunch it, as we have no reasonable expectation that
5661 * the situation will change.
5662 *
5663 * <rdar://problem/9106979>
5664 */
5665 if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
5666 job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
5667 return true;
5668 }
5669 }
5670
5671 return false;
5672 }
5673
5674 bool
5675 job_keepalive(job_t j)
5676 {
5677 mach_msg_type_number_t statusCnt;
5678 mach_port_status_t status;
5679 struct semaphoreitem *si;
5680 struct machservice *ms;
5681 struct stat sb;
5682 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
5683 bool is_not_kextd = (do_apple_internal_logging || (strcmp(j->label, "com.apple.kextd") != 0));
5684
5685 if (unlikely(j->mgr->shutting_down)) {
5686 return false;
5687 }
5688
5689 /*
5690 * 5066316
5691 *
5692 * We definitely need to revisit this after Leopard ships. Please see
5693 * launchctl.c for the other half of this hack.
5694 */
5695 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5696 return false;
5697 }
5698
5699 if (unlikely(j->needs_kickoff)) {
5700 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5701 return false;
5702 }
5703
5704 if (j->start_pending) {
5705 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
5706 return true;
5707 }
5708
5709 if (!j->ondemand) {
5710 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5711 return true;
5712 }
5713
5714 SLIST_FOREACH(ms, &j->machservices, sle) {
5715 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5716 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5717 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
5718 continue;
5719 }
5720 if (status.mps_msgcount) {
5721 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
5722 status.mps_msgcount, ms->name);
5723 return true;
5724 }
5725 }
5726
5727 /* TODO: Coalesce external events and semaphore items, since they're basically
5728 * the same thing.
5729 */
5730 struct externalevent *ei = NULL;
5731 LIST_FOREACH(ei, &j->events, job_le) {
5732 if (ei->state == ei->wanted_state) {
5733 return true;
5734 }
5735 }
5736
5737 SLIST_FOREACH(si, &j->semaphores, sle) {
5738 bool wanted_state = false;
5739 int qdir_file_cnt;
5740 job_t other_j;
5741
5742 switch (si->why) {
5743 case NETWORK_UP:
5744 wanted_state = true;
5745 case NETWORK_DOWN:
5746 if (network_up == wanted_state) {
5747 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
5748 return true;
5749 }
5750 break;
5751 case SUCCESSFUL_EXIT:
5752 wanted_state = true;
5753 case FAILED_EXIT:
5754 if (good_exit == wanted_state) {
5755 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5756 return true;
5757 }
5758 break;
5759 case CRASHED:
5760 wanted_state = true;
5761 case DID_NOT_CRASH:
5762 if (j->crashed == wanted_state) {
5763 return true;
5764 }
5765 break;
5766 case OTHER_JOB_ENABLED:
5767 wanted_state = true;
5768 case OTHER_JOB_DISABLED:
5769 if ((bool)job_find(NULL, si->what) == wanted_state) {
5770 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
5771 return true;
5772 }
5773 break;
5774 case OTHER_JOB_ACTIVE:
5775 wanted_state = true;
5776 case OTHER_JOB_INACTIVE:
5777 if ((other_j = job_find(NULL, si->what))) {
5778 if ((bool)other_j->p == wanted_state) {
5779 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5780 return true;
5781 }
5782 }
5783 break;
5784 case PATH_EXISTS:
5785 wanted_state = true;
5786 case PATH_MISSING:
5787 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
5788 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
5789 return true;
5790 } else {
5791 if (wanted_state) { /* File is not there but we wish it was. */
5792 if (si->fd != -1 && !si->watching_parent) { /* Need to be watching the parent now. */
5793 (void)job_assumes(j, runtime_close(si->fd) == 0);
5794 si->fd = -1;
5795 semaphoreitem_watch(j, si);
5796 }
5797 } else { /* File is there but we wish it wasn't. */
5798 if (si->fd != -1 && si->watching_parent) { /* Need to watch the file now. */
5799 (void)job_assumes(j, runtime_close(si->fd) == 0);
5800 si->fd = -1;
5801 semaphoreitem_watch(j, si);
5802 }
5803 }
5804 }
5805 break;
5806 case PATH_CHANGES:
5807 break;
5808 case DIR_NOT_EMPTY:
5809 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
5810 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
5811 } else if (qdir_file_cnt > 0) {
5812 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
5813 return true;
5814 }
5815 break;
5816 }
5817 }
5818
5819 return false;
5820 }
5821
5822 const char *
5823 job_active(job_t j)
5824 {
5825 struct machservice *ms;
5826 if (j->p && j->shutdown_monitor) {
5827 return "Monitoring shutdown";
5828 }
5829 if (j->p) {
5830 return "PID is still valid";
5831 }
5832
5833 if (j->mgr->shutting_down && j->log_redirect_fd) {
5834 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5835 j->log_redirect_fd = 0;
5836 }
5837
5838 if (j->log_redirect_fd) {
5839 if (job_assumes(j, j->legacy_LS_job)) {
5840 return "Standard out/error is still valid";
5841 } else {
5842 (void)job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5843 j->log_redirect_fd = 0;
5844 }
5845 }
5846
5847 if (j->priv_port_has_senders) {
5848 return "Privileged Port still has outstanding senders";
5849 }
5850
5851 SLIST_FOREACH(ms, &j->machservices, sle) {
5852 if (ms->recv && machservice_active(ms)) {
5853 return "Mach service is still active";
5854 }
5855 }
5856
5857 return NULL;
5858 }
5859
5860 void
5861 machservice_watch(job_t j, struct machservice *ms)
5862 {
5863 if (ms->recv) {
5864 (void)job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
5865 }
5866 }
5867
5868 void
5869 machservice_ignore(job_t j, struct machservice *ms)
5870 {
5871 (void)job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
5872 }
5873
5874 void
5875 machservice_resetport(job_t j, struct machservice *ms)
5876 {
5877 LIST_REMOVE(ms, port_hash_sle);
5878 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5879 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
5880 ms->gen_num++;
5881 (void)job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
5882 (void)job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
5883 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5884 }
5885
5886 struct machservice *
5887 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
5888 {
5889 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
5890
5891 if (!job_assumes(j, ms != NULL)) {
5892 return NULL;
5893 }
5894
5895 strcpy((char *)ms->name, name);
5896 ms->job = j;
5897 ms->gen_num = 1;
5898 ms->per_pid = pid_local;
5899
5900 if (likely(*serviceport == MACH_PORT_NULL)) {
5901 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
5902 goto out_bad;
5903 }
5904
5905 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
5906 goto out_bad2;
5907 }
5908 *serviceport = ms->port;
5909 ms->recv = true;
5910 } else {
5911 ms->port = *serviceport;
5912 ms->isActive = true;
5913 }
5914
5915 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5916
5917 jobmgr_t where2put = j->mgr;
5918 /* XPC domains are separate from Mach bootstraps. */
5919 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
5920 if (g_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
5921 where2put = root_jobmgr;
5922 }
5923 }
5924
5925 /* Don't allow MachServices added by multiple-instance jobs to be looked up by others.
5926 * We could just do this with a simple bit, but then we'd have to uniquify the
5927 * names ourselves to avoid collisions. This is just easier.
5928 */
5929 if (!j->dedicated_instance) {
5930 LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5931 }
5932 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5933
5934 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
5935
5936 return ms;
5937 out_bad2:
5938 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5939 out_bad:
5940 free(ms);
5941 return NULL;
5942 }
5943
5944 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
5945 struct machservice *
5946 machservice_new_alias(job_t j, struct machservice *orig)
5947 {
5948 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
5949 if (job_assumes(j, ms != NULL)) {
5950 strcpy((char *)ms->name, orig->name);
5951 ms->alias = orig;
5952 ms->job = j;
5953
5954 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5955 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5956 jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
5957 }
5958
5959 return ms;
5960 }
5961 #endif
5962
5963 bootstrap_status_t
5964 machservice_status(struct machservice *ms)
5965 {
5966 ms = ms->alias ? ms->alias : ms;
5967 if (ms->isActive) {
5968 return BOOTSTRAP_STATUS_ACTIVE;
5969 } else if (ms->job->ondemand) {
5970 return BOOTSTRAP_STATUS_ON_DEMAND;
5971 } else {
5972 return BOOTSTRAP_STATUS_INACTIVE;
5973 }
5974 }
5975
5976 void
5977 job_setup_exception_port(job_t j, task_t target_task)
5978 {
5979 struct machservice *ms;
5980 thread_state_flavor_t f = 0;
5981 mach_port_t exc_port = the_exception_server;
5982
5983 if (unlikely(j->alt_exc_handler)) {
5984 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
5985 if (likely(ms)) {
5986 exc_port = machservice_port(ms);
5987 } else {
5988 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
5989 }
5990 } else if (unlikely(j->internal_exc_handler)) {
5991 exc_port = runtime_get_kernel_port();
5992 } else if (unlikely(!exc_port)) {
5993 return;
5994 }
5995
5996 #if defined (__ppc__) || defined(__ppc64__)
5997 f = PPC_THREAD_STATE64;
5998 #elif defined(__i386__) || defined(__x86_64__)
5999 f = x86_THREAD_STATE;
6000 #elif defined(__arm__)
6001 f = ARM_THREAD_STATE;
6002 #else
6003 #error "unknown architecture"
6004 #endif
6005
6006 if (likely(target_task)) {
6007 (void)job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
6008 } else if (pid1_magic && the_exception_server) {
6009 mach_port_t mhp = mach_host_self();
6010 (void)job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
6011 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
6012 }
6013 }
6014
6015 void
6016 job_set_exception_port(job_t j, mach_port_t port)
6017 {
6018 if (unlikely(!the_exception_server)) {
6019 the_exception_server = port;
6020 job_setup_exception_port(j, 0);
6021 } else {
6022 job_log(j, LOG_WARNING, "The exception server is already claimed!");
6023 }
6024 }
6025
6026 void
6027 machservice_setup_options(launch_data_t obj, const char *key, void *context)
6028 {
6029 struct machservice *ms = context;
6030 mach_port_t mhp = mach_host_self();
6031 int which_port;
6032 bool b;
6033
6034 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
6035 return;
6036 }
6037
6038 switch (launch_data_get_type(obj)) {
6039 case LAUNCH_DATA_INTEGER:
6040 which_port = (int)launch_data_get_integer(obj); /* XXX we should bound check this... */
6041 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
6042 switch (which_port) {
6043 case TASK_KERNEL_PORT:
6044 case TASK_HOST_PORT:
6045 case TASK_NAME_PORT:
6046 case TASK_BOOTSTRAP_PORT:
6047 /* I find it a little odd that zero isn't reserved in the header.
6048 * Normally Mach is fairly good about this convention... */
6049 case 0:
6050 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6051 break;
6052 default:
6053 ms->special_port_num = which_port;
6054 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6055 break;
6056 }
6057 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
6058 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
6059 (void)job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
6060 } else {
6061 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6062 }
6063 }
6064 case LAUNCH_DATA_BOOL:
6065 b = launch_data_get_bool(obj);
6066 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6067 ms->debug_on_close = b;
6068 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
6069 ms->reset = b;
6070 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6071 ms->hide = b;
6072 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6073 job_set_exception_port(ms->job, ms->port);
6074 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6075 ms->kUNCServer = b;
6076 (void)job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
6077 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_PINGEVENTUPDATES) == 0) {
6078 ms->event_update_port = b;
6079 }
6080 break;
6081 case LAUNCH_DATA_STRING:
6082 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6083 const char *option = launch_data_get_string(obj);
6084 if (strcasecmp(option, "One") == 0) {
6085 ms->drain_one_on_crash = true;
6086 } else if (strcasecmp(option, "All") == 0) {
6087 ms->drain_all_on_crash = true;
6088 }
6089 }
6090 break;
6091 case LAUNCH_DATA_DICTIONARY:
6092 job_set_exception_port(ms->job, ms->port);
6093 break;
6094 default:
6095 break;
6096 }
6097
6098 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
6099 }
6100
6101 void
6102 machservice_setup(launch_data_t obj, const char *key, void *context)
6103 {
6104 job_t j = context;
6105 struct machservice *ms;
6106 mach_port_t p = MACH_PORT_NULL;
6107
6108 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6109 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6110 return;
6111 }
6112
6113 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6114 return;
6115 }
6116
6117 ms->isActive = false;
6118 ms->upfront = true;
6119
6120 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6121 launch_data_dict_iterate(obj, machservice_setup_options, ms);
6122 }
6123 }
6124
6125 jobmgr_t
6126 jobmgr_do_garbage_collection(jobmgr_t jm)
6127 {
6128 jobmgr_t jmi = NULL, jmn = NULL;
6129 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6130 jobmgr_do_garbage_collection(jmi);
6131 }
6132
6133 if (!jm->shutting_down) {
6134 return jm;
6135 }
6136
6137 if (SLIST_EMPTY(&jm->submgrs)) {
6138 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6139 } else {
6140 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6141 }
6142
6143 size_t actives = 0;
6144 job_t ji = NULL, jn = NULL;
6145 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6146 if (ji->anonymous) {
6147 continue;
6148 }
6149
6150 /* Let the shutdown monitor be up until the very end. */
6151 if (ji->shutdown_monitor) {
6152 continue;
6153 }
6154
6155 /* On our first pass through, open a transaction for all the jobs that
6156 * need to be dirty at shutdown. We'll close these transactions once the
6157 * jobs that do not need to be dirty at shutdown have all exited.
6158 */
6159 if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6160 job_open_shutdown_transaction(ji);
6161 }
6162
6163 const char *active = job_active(ji);
6164 if (!active) {
6165 job_remove(ji);
6166 } else {
6167 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6168 job_stop(ji);
6169
6170 if (ji->p && !ji->dirty_at_shutdown) {
6171 /* We really only care if the job has not yet been reaped.
6172 * There's no reason to delay shutdown if a Mach port has not
6173 * yet been sent back to us, for example. While we're shutting
6174 * all the "normal" jobs down, do not count the
6175 * dirty-at-shutdown jobs toward the total of actives.
6176 *
6177 * Note that there's a potential race here where we may not get
6178 * a port back in time, so that when we hit jobmgr_remove(), we
6179 * end up removing the job and then our attempt to close the
6180 * Mach port will fail. But at that point, the failure won't
6181 * even make it to the syslog, so not a big deal.
6182 */
6183 actives++;
6184 }
6185
6186 if (ji->clean_kill) {
6187 job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6188 } else {
6189 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6190 }
6191 }
6192 }
6193
6194 jm->shutdown_jobs_dirtied = true;
6195 if (actives == 0) {
6196 if (!jm->shutdown_jobs_cleaned) {
6197 LIST_FOREACH(ji, &jm->jobs, sle) {
6198 if (!ji->anonymous) {
6199 job_close_shutdown_transaction(ji);
6200 actives++;
6201 }
6202 }
6203
6204 jm->shutdown_jobs_cleaned = true;
6205 } else if (jm->monitor_shutdown && _s_shutdown_monitor) {
6206 /* The rest of shutdown has completed, so we can kill the shutdown
6207 * monitor now like it was any other job.
6208 */
6209 _s_shutdown_monitor->shutdown_monitor = false;
6210 actives = 1;
6211
6212 job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6213 job_stop(_s_shutdown_monitor);
6214 _s_shutdown_monitor = NULL;
6215 }
6216 }
6217
6218 jobmgr_t r = jm;
6219 if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6220 jobmgr_log(jm, LOG_DEBUG, "Removing.");
6221 jobmgr_remove(jm);
6222 r = NULL;
6223 }
6224
6225 return r;
6226 }
6227
6228 void
6229 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6230 {
6231 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
6232 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
6233 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
6234 * for them to exit before moving on.
6235 *
6236 * See rdar://problem/6562592
6237 */
6238 size_t i = 0;
6239 for (i = 0; i < np; i++) {
6240 if (p[i] != 0) {
6241 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6242 (void)jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
6243 }
6244 }
6245 }
6246
6247 void
6248 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6249 {
6250 size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6251 pid_t *pids = NULL;
6252 int i = 0, kp_cnt = 0;
6253
6254 if (likely(jm->parentmgr || !pid1_magic)) {
6255 return;
6256 }
6257
6258 if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6259 return;
6260 }
6261
6262 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6263
6264 if (!jobmgr_assumes(jm, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
6265 goto out;
6266 }
6267
6268 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6269 for (i = 0; i < kp_cnt; i++) {
6270 struct proc_bsdshortinfo proc;
6271 if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6272 if (errno != ESRCH) {
6273 jobmgr_assumes(jm, errno == 0);
6274 }
6275
6276 kp_skipped++;
6277 continue;
6278 }
6279
6280 pid_t p_i = pids[i];
6281 pid_t pp_i = proc.pbsi_ppid;
6282 pid_t pg_i = proc.pbsi_pgid;
6283 const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6284 const char *n = proc.pbsi_comm;
6285
6286 if (unlikely(p_i == 0 || p_i == 1)) {
6287 kp_skipped++;
6288 continue;
6289 }
6290
6291 if (_s_shutdown_monitor && pp_i == _s_shutdown_monitor->p) {
6292 kp_skipped++;
6293 continue;
6294 }
6295
6296 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
6297 job_t j = jobmgr_find_by_pid(jm, p_i, false);
6298 if (!j || (j && j->anonymous)) {
6299 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6300
6301 int status = 0;
6302 if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6303 if (jobmgr_assumes(jm, waitpid(p_i, &status, WNOHANG) == 0)) {
6304 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6305 }
6306 kp_skipped++;
6307 } else {
6308 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6309 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6310 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6311 * hints to the kernel along the way, so that it could shutdown certain subsystems when
6312 * their userspace emissaries go away, before the call to reboot(2).
6313 */
6314 if (leader && leader->ignore_pg_at_shutdown) {
6315 kp_skipped++;
6316 } else {
6317 ps[i] = p_i;
6318 }
6319 }
6320 } else {
6321 kp_skipped++;
6322 }
6323 }
6324
6325 if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6326 jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6327 }
6328
6329 free(ps);
6330 out:
6331 free(pids);
6332 }
6333
6334 jobmgr_t
6335 jobmgr_parent(jobmgr_t jm)
6336 {
6337 return jm->parentmgr;
6338 }
6339
6340 void
6341 job_uncork_fork(job_t j)
6342 {
6343 pid_t c = j->p;
6344
6345 job_log(j, LOG_DEBUG, "Uncorking the fork().");
6346 /* this unblocks the child and avoids a race
6347 * between the above fork() and the kevent_mod() */
6348 (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6349 (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
6350 j->fork_fd = 0;
6351 }
6352
6353 jobmgr_t
6354 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6355 {
6356 mach_msg_size_t mxmsgsz;
6357 job_t bootstrapper = NULL;
6358 jobmgr_t jmr;
6359
6360 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6361
6362 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6363 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6364 return NULL;
6365 }
6366
6367 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6368
6369 if (!jobmgr_assumes(jm, jmr != NULL)) {
6370 return NULL;
6371 }
6372
6373 if (jm == NULL) {
6374 root_jobmgr = jmr;
6375 }
6376
6377 jmr->kqjobmgr_callback = jobmgr_callback;
6378 strcpy(jmr->name_init, name ? name : "Under construction");
6379
6380 jmr->req_port = requestorport;
6381
6382 if ((jmr->parentmgr = jm)) {
6383 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6384 }
6385
6386 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
6387 goto out_bad;
6388 }
6389
6390 if (transfer_port != MACH_PORT_NULL) {
6391 (void)jobmgr_assumes(jmr, jm != NULL);
6392 jmr->jm_port = transfer_port;
6393 } else if (!jm && !pid1_magic) {
6394 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6395 name_t service_buf;
6396
6397 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6398
6399 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
6400 goto out_bad;
6401 }
6402
6403 if (trusted_fd) {
6404 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6405
6406 if ((dfd = dup(lfd)) >= 0) {
6407 (void)jobmgr_assumes(jmr, runtime_close(dfd) != -1);
6408 (void)jobmgr_assumes(jmr, runtime_close(lfd) != -1);
6409 }
6410
6411 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6412 }
6413
6414 /* cut off the Libc cache, we don't want to deadlock against ourself */
6415 inherited_bootstrap_port = bootstrap_port;
6416 bootstrap_port = MACH_PORT_NULL;
6417 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
6418
6419 /* We set this explicitly as we start each child */
6420 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
6421 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
6422 goto out_bad;
6423 }
6424
6425 if (!name) {
6426 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6427 }
6428
6429 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
6430 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
6431 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
6432 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
6433 }
6434
6435 /* Total hacks. But the MIG server loop is too generic, and the more dynamic
6436 * parts of it haven't been tested, or if they have, it was a very long time
6437 * ago.
6438 */
6439 if (xpc_events_xpc_events_subsystem.maxsize > mxmsgsz) {
6440 mxmsgsz = xpc_events_xpc_events_subsystem.maxsize;
6441 }
6442 if (xpc_domain_xpc_domain_subsystem.maxsize > mxmsgsz) {
6443 mxmsgsz = xpc_domain_xpc_domain_subsystem.maxsize;
6444 }
6445
6446 if (!jm) {
6447 (void)jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6448 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6449 (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
6450 (void)jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
6451 }
6452
6453 if (name && !skip_init) {
6454 bootstrapper = jobmgr_init_session(jmr, name, sflag);
6455 }
6456
6457 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6458 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
6459 goto out_bad;
6460 }
6461 }
6462
6463 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6464
6465 if (bootstrapper) {
6466 bootstrapper->asport = asport;
6467
6468 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6469 (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6470 } else {
6471 jmr->req_asport = asport;
6472 }
6473
6474 if (asport != MACH_PORT_NULL) {
6475 (void)jobmgr_assumes(jmr, launchd_mport_copy_send(asport) == KERN_SUCCESS);
6476 }
6477
6478 if (jmr->parentmgr) {
6479 runtime_add_weak_ref();
6480 }
6481
6482 return jmr;
6483
6484 out_bad:
6485 if (jmr) {
6486 jobmgr_remove(jmr);
6487 if (jm == NULL) {
6488 root_jobmgr = NULL;
6489 }
6490 }
6491 return NULL;
6492 }
6493
6494 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
6495 jobmgr_t
6496 jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6497 {
6498 jobmgr_t new = NULL;
6499
6500 /* These job managers are basically singletons, so we use the root Mach
6501 * bootstrap port as their requestor ports so they'll never go away.
6502 */
6503 mach_port_t req_port = root_jobmgr->jm_port;
6504 if (jobmgr_assumes(jm, launchd_mport_make_send(req_port) == KERN_SUCCESS)) {
6505 new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6506 if (new) {
6507 new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6508 new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6509 new->xpc_singleton = true;
6510 }
6511 }
6512
6513 return new;
6514 }
6515
6516 jobmgr_t
6517 jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6518 {
6519 jobmgr_t jmi = NULL;
6520 LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6521 if (jmi->req_euid == uid) {
6522 return jmi;
6523 }
6524 }
6525
6526 name_t name;
6527 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6528 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6529 if (jobmgr_assumes(jm, jmi != NULL)) {
6530 /* We need to create a per-user launchd for this UID if there isn't one
6531 * already so we can grab the bootstrap port.
6532 */
6533 job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6534 if (jobmgr_assumes(jmi, puj != NULL)) {
6535 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(puj->asport) == KERN_SUCCESS);
6536 (void)jobmgr_assumes(jmi, launchd_mport_copy_send(jmi->req_bsport) == KERN_SUCCESS);
6537 jmi->shortdesc = "per-user";
6538 jmi->req_asport = puj->asport;
6539 jmi->req_asid = puj->asid;
6540 jmi->req_euid = uid;
6541 jmi->req_egid = -1;
6542
6543 LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
6544 } else {
6545 jobmgr_remove(jmi);
6546 }
6547 }
6548
6549 return jmi;
6550 }
6551
6552 jobmgr_t
6553 jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
6554 {
6555 jobmgr_t jmi = NULL;
6556 LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
6557 if (jmi->req_asid == asid) {
6558 return jmi;
6559 }
6560 }
6561
6562 name_t name;
6563 (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
6564 jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6565 if (jobmgr_assumes(jm, jmi != NULL)) {
6566 (void)jobmgr_assumes(jmi, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
6567 jmi->shortdesc = "per-session";
6568 jmi->req_bsport = root_jobmgr->jm_port;
6569 (void)jobmgr_assumes(jmi, audit_session_port(asid, &jmi->req_asport) == 0);
6570 jmi->req_asid = asid;
6571 jmi->req_euid = -1;
6572 jmi->req_egid = -1;
6573
6574 LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
6575 } else {
6576 jobmgr_remove(jmi);
6577 }
6578
6579 return jmi;
6580 }
6581 #endif
6582
6583 job_t
6584 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
6585 {
6586 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
6587 char thelabel[1000];
6588 job_t bootstrapper;
6589
6590 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
6591 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
6592
6593 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
6594 bootstrapper->is_bootstrapper = true;
6595 char buf[100];
6596
6597 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
6598 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
6599 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false, false);
6600 bootstrapper->weird_bootstrap = true;
6601 (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
6602 } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
6603 bootstrapper->is_bootstrapper = true;
6604 if (jobmgr_assumes(jm, pid1_magic)) {
6605 /* Have our system bootstrapper print out to the console. */
6606 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
6607 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
6608
6609 if (g_console) {
6610 (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
6611 }
6612 }
6613 }
6614
6615 jm->session_initialized = true;
6616 return bootstrapper;
6617 }
6618
6619 jobmgr_t
6620 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
6621 {
6622 struct machservice *ms, *next_ms;
6623 jobmgr_t jmi, jmn;
6624
6625 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6626 * words, when some program hands us a second or subsequent send right
6627 * to a port we already have open, the Mach kernel gives us the same
6628 * port number back and increments an reference count associated with
6629 * the port. This forces us, when discovering that a receive right at
6630 * the other end has been deleted, to wander all of our objects to see
6631 * what weird places clients might have handed us the same send right
6632 * to use.
6633 */
6634
6635 if (jm == root_jobmgr) {
6636 if (port == inherited_bootstrap_port) {
6637 (void)jobmgr_assumes(jm, launchd_mport_deallocate(port) == KERN_SUCCESS);
6638 inherited_bootstrap_port = MACH_PORT_NULL;
6639
6640 return jobmgr_shutdown(jm);
6641 }
6642
6643 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
6644 if (ms->port == port && !ms->recv) {
6645 machservice_delete(ms->job, ms, true);
6646 }
6647 }
6648 }
6649
6650 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6651 jobmgr_delete_anything_with_port(jmi, port);
6652 }
6653
6654 if (jm->req_port == port) {
6655 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
6656 return jobmgr_shutdown(jm);
6657 }
6658
6659 return jm;
6660 }
6661
6662 struct machservice *
6663 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
6664 {
6665 struct machservice *ms;
6666 job_t target_j;
6667
6668 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
6669
6670 if (target_pid) {
6671 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6672 * bootstrap in other bootstraps.
6673 */
6674
6675 /* Start in the given bootstrap. */
6676 if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
6677 /* If we fail, do a deep traversal. */
6678 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6679 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6680 return NULL;
6681 }
6682 }
6683
6684 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6685 if (ms->per_pid && strcmp(name, ms->name) == 0) {
6686 return ms;
6687 }
6688 }
6689
6690 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
6691 return NULL;
6692 }
6693
6694 jobmgr_t where2look = jm;
6695 /* XPC domains are separate from Mach bootstraps. */
6696 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6697 if (g_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6698 where2look = root_jobmgr;
6699 }
6700 }
6701
6702 LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
6703 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6704 return ms;
6705 }
6706 }
6707
6708 if (jm->parentmgr == NULL || !check_parent) {
6709 return NULL;
6710 }
6711
6712 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
6713 }
6714
6715 mach_port_t
6716 machservice_port(struct machservice *ms)
6717 {
6718 return ms->port;
6719 }
6720
6721 job_t
6722 machservice_job(struct machservice *ms)
6723 {
6724 return ms->job;
6725 }
6726
6727 bool
6728 machservice_hidden(struct machservice *ms)
6729 {
6730 return ms->hide;
6731 }
6732
6733 bool
6734 machservice_active(struct machservice *ms)
6735 {
6736 return ms->isActive;
6737 }
6738
6739 const char *
6740 machservice_name(struct machservice *ms)
6741 {
6742 return ms->name;
6743 }
6744
6745 void
6746 machservice_drain_port(struct machservice *ms)
6747 {
6748 bool drain_one = ms->drain_one_on_crash;
6749 bool drain_all = ms->drain_all_on_crash;
6750
6751 if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
6752 return;
6753 }
6754
6755 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6756
6757 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6758 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6759 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6760 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6761
6762 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6763
6764 do {
6765 /* This should be a direct check on the Mach service to see if it's an exception-handling
6766 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6767 * Mach services. But for now, it should be okay.
6768 */
6769 if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
6770 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6771 } else {
6772 mach_msg_options_t options = MACH_RCV_MSG |
6773 MACH_RCV_TIMEOUT ;
6774
6775 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
6776 switch (mr) {
6777 case MACH_MSG_SUCCESS:
6778 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6779 break;
6780 case MACH_RCV_TIMED_OUT:
6781 break;
6782 case MACH_RCV_TOO_LARGE:
6783 runtime_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6784 break;
6785 default:
6786 break;
6787 }
6788 }
6789 } while (drain_all && mr != MACH_RCV_TIMED_OUT);
6790 }
6791
6792 void
6793 machservice_delete(job_t j, struct machservice *ms, bool port_died)
6794 {
6795 if (ms->alias) {
6796 /* HACK: Egregious code duplication. But dealing with aliases is a
6797 * pretty simple affair since they can't and shouldn't have any complex
6798 * behaviors associated with them.
6799 */
6800 LIST_REMOVE(ms, name_hash_sle);
6801 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6802 free(ms);
6803 return;
6804 }
6805
6806 if (unlikely(ms->debug_on_close)) {
6807 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
6808 (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
6809 }
6810
6811 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6812 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
6813 (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
6814 }
6815
6816 (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
6817
6818 if (unlikely(ms->port == the_exception_server)) {
6819 the_exception_server = 0;
6820 }
6821
6822 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
6823
6824 if (ms->special_port_num) {
6825 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6826 }
6827 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6828
6829 if (!(j->dedicated_instance || ms->event_channel)) {
6830 LIST_REMOVE(ms, name_hash_sle);
6831 }
6832 LIST_REMOVE(ms, port_hash_sle);
6833
6834 free(ms);
6835 }
6836
6837 void
6838 machservice_request_notifications(struct machservice *ms)
6839 {
6840 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6841
6842 ms->isActive = true;
6843
6844 if (ms->recv) {
6845 which = MACH_NOTIFY_PORT_DESTROYED;
6846 job_checkin(ms->job);
6847 }
6848
6849 (void)job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
6850 }
6851
6852 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6853 #define END_OF(x) (&(x)[NELEM(x)])
6854
6855 char **
6856 mach_cmd2argv(const char *string)
6857 {
6858 char *argv[100], args[1000];
6859 const char *cp;
6860 char *argp = args, term, **argv_ret, *co;
6861 unsigned int nargs = 0, i;
6862
6863 for (cp = string; *cp;) {
6864 while (isspace(*cp))
6865 cp++;
6866 term = (*cp == '"') ? *cp++ : '\0';
6867 if (nargs < NELEM(argv)) {
6868 argv[nargs++] = argp;
6869 }
6870 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
6871 if (*cp == '\\') {
6872 cp++;
6873 }
6874 *argp++ = *cp;
6875 if (*cp) {
6876 cp++;
6877 }
6878 }
6879 *argp++ = '\0';
6880 }
6881 argv[nargs] = NULL;
6882
6883 if (nargs == 0) {
6884 return NULL;
6885 }
6886
6887 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6888
6889 if (!launchd_assumes(argv_ret != NULL)) {
6890 return NULL;
6891 }
6892
6893 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6894
6895 for (i = 0; i < nargs; i++) {
6896 strcpy(co, argv[i]);
6897 argv_ret[i] = co;
6898 co += strlen(argv[i]) + 1;
6899 }
6900 argv_ret[i] = NULL;
6901
6902 return argv_ret;
6903 }
6904
6905 void
6906 job_checkin(job_t j)
6907 {
6908 j->checkedin = true;
6909 }
6910
6911 bool job_is_god(job_t j)
6912 {
6913 return j->embedded_special_privileges;
6914 }
6915
6916 bool
6917 job_ack_port_destruction(mach_port_t p)
6918 {
6919 struct machservice *ms;
6920 job_t j;
6921
6922 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
6923 if (ms->recv && (ms->port == p)) {
6924 break;
6925 }
6926 }
6927
6928 if (!jobmgr_assumes(root_jobmgr, ms != NULL)) {
6929 return false;
6930 }
6931
6932 j = ms->job;
6933
6934 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
6935
6936 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
6937 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
6938 * receive rights have been returned.
6939 *
6940 * So when we get receive rights back, check to see if the job has been reaped yet. If
6941 * not, then we add this service to a list of services to be drained on crash if it's
6942 * requested that behavior. So, for a job with N receive rights all requesting that they
6943 * be drained on crash, we can safely handle the following sequence of events.
6944 *
6945 * ReceiveRight0Returned
6946 * ReceiveRight1Returned
6947 * ReceiveRight2Returned
6948 * NOTE_EXIT (reap, get exit status)
6949 * ReceiveRight3Returned
6950 * .
6951 * .
6952 * .
6953 * ReceiveRight(N - 1)Returned
6954 */
6955
6956 if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
6957 if (j->crashed && j->reaped) {
6958 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
6959 machservice_drain_port(ms);
6960 } else if (!(j->crashed || j->reaped)) {
6961 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
6962 }
6963 }
6964
6965 /* If we get this notification after the job has been reaped, then we want to ping
6966 * the event port to keep things going.
6967 */
6968 if (ms->event_update_port && !j->p && job_assumes(j, j->event_monitor)) {
6969 if (_s_event_update_port == MACH_PORT_NULL) {
6970 (void)job_assumes(j, launchd_mport_make_send_once(ms->port, &_s_event_update_port) == KERN_SUCCESS);
6971 }
6972 eventsystem_ping();
6973 }
6974
6975 ms->isActive = false;
6976 if (ms->delete_on_destruction) {
6977 machservice_delete(j, ms, false);
6978 } else if (ms->reset) {
6979 machservice_resetport(j, ms);
6980 }
6981
6982 job_dispatch(j, false);
6983
6984 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
6985
6986 return true;
6987 }
6988
6989 void
6990 job_ack_no_senders(job_t j)
6991 {
6992 j->priv_port_has_senders = false;
6993
6994 (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
6995 j->j_port = 0;
6996
6997 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
6998
6999 job_dispatch(j, false);
7000 }
7001
7002 bool
7003 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
7004 {
7005 struct semaphoreitem *si;
7006 size_t alloc_sz = sizeof(struct semaphoreitem);
7007
7008 if (what) {
7009 alloc_sz += strlen(what) + 1;
7010 }
7011
7012 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
7013 return false;
7014 }
7015
7016 si->fd = -1;
7017 si->why = why;
7018
7019 if (what) {
7020 strcpy(si->what_init, what);
7021 }
7022
7023 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
7024
7025 if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
7026 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7027 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7028 j->nosy = true;
7029 }
7030
7031 semaphoreitem_runtime_mod_ref(si, true);
7032
7033 return true;
7034 }
7035
7036 void
7037 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7038 {
7039 /*
7040 * External events need to be tracked.
7041 * Internal events do NOT need to be tracked.
7042 */
7043
7044 switch (si->why) {
7045 case SUCCESSFUL_EXIT:
7046 case FAILED_EXIT:
7047 case OTHER_JOB_ENABLED:
7048 case OTHER_JOB_DISABLED:
7049 case OTHER_JOB_ACTIVE:
7050 case OTHER_JOB_INACTIVE:
7051 return;
7052 default:
7053 break;
7054 }
7055
7056 if (add) {
7057 runtime_add_weak_ref();
7058 } else {
7059 runtime_del_weak_ref();
7060 }
7061 }
7062
7063 void
7064 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
7065 {
7066 semaphoreitem_runtime_mod_ref(si, false);
7067
7068 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
7069
7070 if (si->fd != -1) {
7071 (void)job_assumes(j, runtime_close(si->fd) != -1);
7072 }
7073
7074 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
7075 if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
7076 j->nosy = false;
7077 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7078 }
7079
7080 free(si);
7081 }
7082
7083 void
7084 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
7085 {
7086 struct semaphoreitem_dict_iter_context *sdic = context;
7087 semaphore_reason_t why;
7088
7089 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7090
7091 semaphoreitem_new(sdic->j, why, key);
7092 }
7093
7094 void
7095 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7096 {
7097 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7098 job_t j = context;
7099 semaphore_reason_t why;
7100
7101 switch (launch_data_get_type(obj)) {
7102 case LAUNCH_DATA_BOOL:
7103 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7104 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7105 semaphoreitem_new(j, why, NULL);
7106 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7107 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7108 semaphoreitem_new(j, why, NULL);
7109 j->start_pending = true;
7110 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7111 j->needs_kickoff = launch_data_get_bool(obj);
7112 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7113 why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7114 semaphoreitem_new(j, why, NULL);
7115 j->start_pending = true;
7116 } else {
7117 (void)job_assumes(j, false);
7118 }
7119 break;
7120 case LAUNCH_DATA_DICTIONARY:
7121 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
7122 sdic.why_true = PATH_EXISTS;
7123 sdic.why_false = PATH_MISSING;
7124 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7125 sdic.why_true = OTHER_JOB_ACTIVE;
7126 sdic.why_false = OTHER_JOB_INACTIVE;
7127 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7128 sdic.why_true = OTHER_JOB_ENABLED;
7129 sdic.why_false = OTHER_JOB_DISABLED;
7130 } else {
7131 (void)job_assumes(j, false);
7132 break;
7133 }
7134
7135 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7136 break;
7137 default:
7138 (void)job_assumes(j, false);
7139 break;
7140 }
7141 }
7142
7143 bool
7144 externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event)
7145 {
7146 struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7147 if (job_assumes(j, ee != NULL)) {
7148 ee->event = launch_data_copy(event);
7149 if (job_assumes(j, ee->event != NULL)) {
7150 strcpy(ee->name, evname);
7151 ee->job = j;
7152 ee->id = sys->curid;
7153 ee->sys = sys;
7154 ee->state = false;
7155 ee->wanted_state = true;
7156 sys->curid++;
7157
7158 LIST_INSERT_HEAD(&j->events, ee, job_le);
7159 LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7160
7161 job_log(j, LOG_DEBUG, "New event: %s:%s", sys->name, evname);
7162 } else {
7163 free(ee);
7164 ee = NULL;
7165 }
7166 }
7167
7168 eventsystem_ping();
7169 return ee;
7170 }
7171
7172 void
7173 externalevent_delete(struct externalevent *ee)
7174 {
7175 launch_data_free(ee->event);
7176 LIST_REMOVE(ee, job_le);
7177 LIST_REMOVE(ee, sys_le);
7178
7179 free(ee);
7180
7181 eventsystem_ping();
7182 }
7183
7184 void
7185 externalevent_setup(launch_data_t obj, const char *key, void *context)
7186 {
7187 struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7188 (void)job_assumes(ctx->j, externalevent_new(ctx->j, ctx->sys, (char *)key, obj));
7189 }
7190
7191 struct externalevent *
7192 externalevent_find(const char *sysname, uint64_t id)
7193 {
7194 struct externalevent *ei = NULL;
7195
7196 struct eventsystem *es = eventsystem_find(sysname);
7197 if (launchd_assumes(es != NULL)) {
7198 LIST_FOREACH(ei, &es->events, sys_le) {
7199 if (ei->id == id) {
7200 break;
7201 }
7202 }
7203 }
7204
7205 return ei;
7206 }
7207
7208 struct eventsystem *
7209 eventsystem_new(const char *name)
7210 {
7211 struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7212 if (launchd_assumes(es != NULL)) {
7213 strcpy(es->name, name);
7214 LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7215 }
7216
7217 return es;
7218 }
7219
7220 void
7221 eventsystem_delete(struct eventsystem *es)
7222 {
7223 struct externalevent *ei = NULL;
7224 while ((ei = LIST_FIRST(&es->events))) {
7225 externalevent_delete(ei);
7226 }
7227
7228 LIST_REMOVE(es, global_le);
7229
7230 free(es);
7231 }
7232
7233 void
7234 eventsystem_setup(launch_data_t obj, const char *key, void *context)
7235 {
7236 job_t j = (job_t)context;
7237 if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7238 return;
7239 }
7240
7241 struct eventsystem *sys = eventsystem_find(key);
7242 if (unlikely(sys == NULL)) {
7243 sys = eventsystem_new(key);
7244 job_log(j, LOG_DEBUG, "New event system: %s", key);
7245 }
7246
7247 if (job_assumes(j, sys != NULL)) {
7248 struct externalevent_iter_ctx ctx = {
7249 .j = j,
7250 .sys = sys,
7251 };
7252 launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7253 sys->has_updates = true;
7254 }
7255 }
7256
7257 struct eventsystem *
7258 eventsystem_find(const char *name)
7259 {
7260 struct eventsystem *esi = NULL;
7261 LIST_FOREACH(esi, &_s_event_systems, global_le) {
7262 if (strcmp(name, esi->name) == 0) {
7263 break;
7264 }
7265 }
7266
7267 return esi;
7268 }
7269
7270 void
7271 eventsystem_ping(void)
7272 {
7273 /* We don't wrap this in an assumes() macro because we could potentially
7274 * call this function many times before the helper job gets back to us
7275 * and gives us another send-once right. So if it's MACH_PORT_NULL, that
7276 * means that we've sent a ping, but the helper hasn't yet checked in to
7277 * get the new set of notifications.
7278 */
7279 if (_s_event_update_port != MACH_PORT_NULL) {
7280 kern_return_t kr = helper_downcall_ping(_s_event_update_port);
7281 if (kr != KERN_SUCCESS) {
7282 runtime_syslog(LOG_NOTICE, "helper_downcall_ping(): kr = 0x%x", kr);
7283 }
7284 _s_event_update_port = MACH_PORT_NULL;
7285 }
7286 }
7287
7288 void
7289 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7290 {
7291 jobmgr_t jmi, jmn;
7292 job_t ji, jn;
7293
7294
7295 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7296 jobmgr_dispatch_all_semaphores(jmi);
7297 }
7298
7299 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7300 if (!SLIST_EMPTY(&ji->semaphores)) {
7301 job_dispatch(ji, false);
7302 }
7303 }
7304 }
7305
7306 time_t
7307 cronemu(int mon, int mday, int hour, int min)
7308 {
7309 struct tm workingtm;
7310 time_t now;
7311
7312 now = time(NULL);
7313 workingtm = *localtime(&now);
7314
7315 workingtm.tm_isdst = -1;
7316 workingtm.tm_sec = 0;
7317 workingtm.tm_min++;
7318
7319 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7320 workingtm.tm_year++;
7321 workingtm.tm_mon = 0;
7322 workingtm.tm_mday = 1;
7323 workingtm.tm_hour = 0;
7324 workingtm.tm_min = 0;
7325 mktime(&workingtm);
7326 }
7327
7328 return mktime(&workingtm);
7329 }
7330
7331 time_t
7332 cronemu_wday(int wday, int hour, int min)
7333 {
7334 struct tm workingtm;
7335 time_t now;
7336
7337 now = time(NULL);
7338 workingtm = *localtime(&now);
7339
7340 workingtm.tm_isdst = -1;
7341 workingtm.tm_sec = 0;
7342 workingtm.tm_min++;
7343
7344 if (wday == 7) {
7345 wday = 0;
7346 }
7347
7348 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7349 workingtm.tm_mday++;
7350 workingtm.tm_hour = 0;
7351 workingtm.tm_min = 0;
7352 mktime(&workingtm);
7353 }
7354
7355 return mktime(&workingtm);
7356 }
7357
7358 bool
7359 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7360 {
7361 if (mon == -1) {
7362 struct tm workingtm = *wtm;
7363 int carrytest;
7364
7365 while (!cronemu_mday(&workingtm, mday, hour, min)) {
7366 workingtm.tm_mon++;
7367 workingtm.tm_mday = 1;
7368 workingtm.tm_hour = 0;
7369 workingtm.tm_min = 0;
7370 carrytest = workingtm.tm_mon;
7371 mktime(&workingtm);
7372 if (carrytest != workingtm.tm_mon) {
7373 return false;
7374 }
7375 }
7376 *wtm = workingtm;
7377 return true;
7378 }
7379
7380 if (mon < wtm->tm_mon) {
7381 return false;
7382 }
7383
7384 if (mon > wtm->tm_mon) {
7385 wtm->tm_mon = mon;
7386 wtm->tm_mday = 1;
7387 wtm->tm_hour = 0;
7388 wtm->tm_min = 0;
7389 }
7390
7391 return cronemu_mday(wtm, mday, hour, min);
7392 }
7393
7394 bool
7395 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7396 {
7397 if (mday == -1) {
7398 struct tm workingtm = *wtm;
7399 int carrytest;
7400
7401 while (!cronemu_hour(&workingtm, hour, min)) {
7402 workingtm.tm_mday++;
7403 workingtm.tm_hour = 0;
7404 workingtm.tm_min = 0;
7405 carrytest = workingtm.tm_mday;
7406 mktime(&workingtm);
7407 if (carrytest != workingtm.tm_mday) {
7408 return false;
7409 }
7410 }
7411 *wtm = workingtm;
7412 return true;
7413 }
7414
7415 if (mday < wtm->tm_mday) {
7416 return false;
7417 }
7418
7419 if (mday > wtm->tm_mday) {
7420 wtm->tm_mday = mday;
7421 wtm->tm_hour = 0;
7422 wtm->tm_min = 0;
7423 }
7424
7425 return cronemu_hour(wtm, hour, min);
7426 }
7427
7428 bool
7429 cronemu_hour(struct tm *wtm, int hour, int min)
7430 {
7431 if (hour == -1) {
7432 struct tm workingtm = *wtm;
7433 int carrytest;
7434
7435 while (!cronemu_min(&workingtm, min)) {
7436 workingtm.tm_hour++;
7437 workingtm.tm_min = 0;
7438 carrytest = workingtm.tm_hour;
7439 mktime(&workingtm);
7440 if (carrytest != workingtm.tm_hour) {
7441 return false;
7442 }
7443 }
7444 *wtm = workingtm;
7445 return true;
7446 }
7447
7448 if (hour < wtm->tm_hour) {
7449 return false;
7450 }
7451
7452 if (hour > wtm->tm_hour) {
7453 wtm->tm_hour = hour;
7454 wtm->tm_min = 0;
7455 }
7456
7457 return cronemu_min(wtm, min);
7458 }
7459
7460 bool
7461 cronemu_min(struct tm *wtm, int min)
7462 {
7463 if (min == -1) {
7464 return true;
7465 }
7466
7467 if (min < wtm->tm_min) {
7468 return false;
7469 }
7470
7471 if (min > wtm->tm_min) {
7472 wtm->tm_min = min;
7473 }
7474
7475 return true;
7476 }
7477
7478 kern_return_t
7479 job_mig_setup_shmem(job_t j, mach_port_t *shmem_port)
7480 {
7481 memory_object_size_t size_of_page, size_of_page_orig;
7482 vm_address_t vm_addr;
7483 kern_return_t kr;
7484
7485 if (!launchd_assumes(j != NULL)) {
7486 return BOOTSTRAP_NO_MEMORY;
7487 }
7488
7489 if (unlikely(j->anonymous)) {
7490 job_log(j, LOG_DEBUG, "Anonymous job tried to setup shared memory");
7491 return BOOTSTRAP_NOT_PRIVILEGED;
7492 }
7493
7494 if (unlikely(j->shmem)) {
7495 job_log(j, LOG_ERR, "Tried to setup shared memory more than once");
7496 return BOOTSTRAP_NOT_PRIVILEGED;
7497 }
7498
7499 size_of_page_orig = size_of_page = getpagesize();
7500
7501 kr = vm_allocate(mach_task_self(), &vm_addr, size_of_page, true);
7502
7503 if (!job_assumes(j, kr == 0)) {
7504 return kr;
7505 }
7506
7507 j->shmem = (typeof(j->shmem))vm_addr;
7508 j->shmem->vp_shmem_standby_timeout = j->timeout;
7509
7510 kr = mach_make_memory_entry_64(mach_task_self(), &size_of_page,
7511 (memory_object_offset_t)vm_addr, VM_PROT_READ|VM_PROT_WRITE, shmem_port, 0);
7512
7513 if (job_assumes(j, kr == 0)) {
7514 (void)job_assumes(j, size_of_page == size_of_page_orig);
7515 }
7516
7517 /* no need to inherit this in child processes */
7518 (void)job_assumes(j, vm_inherit(mach_task_self(), (vm_address_t)j->shmem, size_of_page_orig, VM_INHERIT_NONE) == 0);
7519
7520 return kr;
7521 }
7522
7523 kern_return_t
7524 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7525 {
7526 struct ldcred *ldc = runtime_get_caller_creds();
7527 job_t js;
7528
7529 if (!launchd_assumes(j != NULL)) {
7530 return BOOTSTRAP_NO_MEMORY;
7531 }
7532
7533 if (unlikely(j->deny_job_creation)) {
7534 return BOOTSTRAP_NOT_PRIVILEGED;
7535 }
7536
7537 #if HAVE_SANDBOX
7538 const char **argv = (const char **)mach_cmd2argv(server_cmd);
7539 if (unlikely(argv == NULL)) {
7540 return BOOTSTRAP_NO_MEMORY;
7541 }
7542 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7543 free(argv);
7544 return BOOTSTRAP_NOT_PRIVILEGED;
7545 }
7546 free(argv);
7547 #endif
7548
7549 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7550
7551 if (pid1_magic) {
7552 if (ldc->euid || ldc->uid) {
7553 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
7554 return VPROC_ERR_TRY_PER_USER;
7555 }
7556 } else {
7557 if (unlikely(server_uid != getuid())) {
7558 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
7559 server_cmd, getuid(), server_uid);
7560 }
7561 server_uid = 0; /* zero means "do nothing" */
7562 }
7563
7564 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
7565
7566 if (unlikely(js == NULL)) {
7567 return BOOTSTRAP_NO_MEMORY;
7568 }
7569
7570 *server_portp = js->j_port;
7571 return BOOTSTRAP_SUCCESS;
7572 }
7573
7574 kern_return_t
7575 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
7576 {
7577 struct ldcred *ldc = runtime_get_caller_creds();
7578 job_t otherj;
7579
7580 if (!launchd_assumes(j != NULL)) {
7581 return BOOTSTRAP_NO_MEMORY;
7582 }
7583
7584 if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
7585 #if TARGET_OS_EMBEDDED
7586 if (!j->embedded_special_privileges) {
7587 return BOOTSTRAP_NOT_PRIVILEGED;
7588 }
7589 #else
7590 return BOOTSTRAP_NOT_PRIVILEGED;
7591 #endif
7592 }
7593
7594 #if HAVE_SANDBOX
7595 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7596 return BOOTSTRAP_NOT_PRIVILEGED;
7597 }
7598 #endif
7599
7600 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
7601 return BOOTSTRAP_UNKNOWN_SERVICE;
7602 }
7603
7604 #if TARGET_OS_EMBEDDED
7605 if (j->embedded_special_privileges && strcmp(j->username, otherj->username) != 0) {
7606 return BOOTSTRAP_NOT_PRIVILEGED;
7607 }
7608 #endif
7609
7610 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
7611 bool do_block = otherj->p;
7612
7613 if (otherj->anonymous) {
7614 return BOOTSTRAP_NOT_PRIVILEGED;
7615 }
7616
7617 job_remove(otherj);
7618
7619 if (do_block) {
7620 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
7621 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
7622 (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
7623 return MIG_NO_REPLY;
7624 } else {
7625 return 0;
7626 }
7627 } else if (sig == VPROC_MAGIC_TRYKILL_SIGNAL) {
7628 if (!j->kill_via_shmem) {
7629 return BOOTSTRAP_NOT_PRIVILEGED;
7630 }
7631
7632 if (!j->shmem) {
7633 j->sent_kill_via_shmem = true;
7634 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
7635 return 0;
7636 }
7637
7638 #if !TARGET_OS_EMBEDDED
7639 if (__sync_bool_compare_and_swap(&j->shmem->vp_shmem_transaction_cnt, 0, -1)) {
7640 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
7641 j->sent_kill_via_shmem = true;
7642 (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
7643 return 0;
7644 }
7645 #endif
7646 return BOOTSTRAP_NOT_PRIVILEGED;
7647 } else if (otherj->p) {
7648 (void)job_assumes(j, runtime_kill(otherj->p, sig) != -1);
7649 }
7650
7651 return 0;
7652 }
7653
7654 kern_return_t
7655 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
7656 {
7657 struct ldcred *ldc = runtime_get_caller_creds();
7658
7659 if (!launchd_assumes(j != NULL)) {
7660 return BOOTSTRAP_NO_MEMORY;
7661 }
7662
7663 if (!job_assumes(j, j->per_user)) {
7664 return BOOTSTRAP_NOT_PRIVILEGED;
7665 }
7666
7667 return runtime_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
7668 }
7669
7670 kern_return_t
7671 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7672 {
7673 struct ldcred *ldc = runtime_get_caller_creds();
7674
7675 if (!launchd_assumes(j != NULL)) {
7676 return BOOTSTRAP_NO_MEMORY;
7677 }
7678
7679 if (unlikely(ldc->euid)) {
7680 return BOOTSTRAP_NOT_PRIVILEGED;
7681 }
7682
7683 return runtime_log_drain(srp, outval, outvalCnt);
7684 }
7685
7686 kern_return_t
7687 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
7688 {
7689 const char *action;
7690 launch_data_t input_obj = NULL, output_obj = NULL;
7691 size_t data_offset = 0;
7692 size_t packed_size;
7693 struct ldcred *ldc = runtime_get_caller_creds();
7694
7695 if (!launchd_assumes(j != NULL)) {
7696 return BOOTSTRAP_NO_MEMORY;
7697 }
7698 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7699 return BOOTSTRAP_NOT_PRIVILEGED;
7700 }
7701 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7702 return 1;
7703 }
7704
7705 if (inkey && outkey) {
7706 action = "Swapping";
7707 } else if (inkey) {
7708 action = "Setting";
7709 } else {
7710 action = "Getting";
7711 }
7712
7713 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7714
7715 *outvalCnt = 20 * 1024 * 1024;
7716 mig_allocate(outval, *outvalCnt);
7717 if (!job_assumes(j, *outval != 0)) {
7718 return 1;
7719 }
7720
7721 /* Note to future maintainers: launch_data_unpack() does NOT return a heap object. The data
7722 * is decoded in-place. So do not call launch_data_free() on input_obj.
7723 */
7724 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
7725 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
7726 goto out_bad;
7727 }
7728
7729 switch (outkey) {
7730 case VPROC_GSK_ENVIRONMENT:
7731 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
7732 goto out_bad;
7733 }
7734 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
7735 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7736 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
7737 goto out_bad;
7738 }
7739 launch_data_free(output_obj);
7740 break;
7741 case VPROC_GSK_ALLJOBS:
7742 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
7743 goto out_bad;
7744 }
7745 ipc_revoke_fds(output_obj);
7746 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
7747 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7748 if (!job_assumes(j, packed_size != 0)) {
7749 goto out_bad;
7750 }
7751 launch_data_free(output_obj);
7752 break;
7753 case VPROC_GSK_MGR_NAME:
7754 if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
7755 goto out_bad;
7756 }
7757 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7758 if (!job_assumes(j, packed_size != 0)) {
7759 goto out_bad;
7760 }
7761
7762 launch_data_free(output_obj);
7763 break;
7764 case VPROC_GSK_JOB_OVERRIDES_DB:
7765 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES))) != NULL)) {
7766 goto out_bad;
7767 }
7768 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7769 if (!job_assumes(j, packed_size != 0)) {
7770 goto out_bad;
7771 }
7772
7773 launch_data_free(output_obj);
7774 break;
7775 case VPROC_GSK_JOB_CACHE_DB:
7776 if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE))) != NULL)) {
7777 goto out_bad;
7778 }
7779 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
7780 if (!job_assumes(j, packed_size != 0)) {
7781 goto out_bad;
7782 }
7783
7784 job_log(j, LOG_DEBUG, "Location of job cache database: %s", launch_data_get_string(output_obj));
7785
7786 launch_data_free(output_obj);
7787 break;
7788 case 0:
7789 mig_deallocate(*outval, *outvalCnt);
7790 *outval = 0;
7791 *outvalCnt = 0;
7792 break;
7793 default:
7794 goto out_bad;
7795 }
7796
7797 if (invalCnt) switch (inkey) {
7798 case VPROC_GSK_ENVIRONMENT:
7799 if (launch_data_get_type(input_obj) == LAUNCH_DATA_DICTIONARY) {
7800 if (j->p) {
7801 job_log(j, LOG_INFO, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
7802 }
7803 launch_data_dict_iterate(input_obj, envitem_setup_one_shot, j);
7804 }
7805 break;
7806 case 0:
7807 break;
7808 default:
7809 goto out_bad;
7810 }
7811
7812 mig_deallocate(inval, invalCnt);
7813 return 0;
7814
7815 out_bad:
7816 mig_deallocate(inval, invalCnt);
7817 if (*outval) {
7818 mig_deallocate(*outval, *outvalCnt);
7819 }
7820 if (output_obj) {
7821 launch_data_free(output_obj);
7822 }
7823
7824 return 1;
7825 }
7826
7827 kern_return_t
7828 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7829 {
7830 const char *action;
7831 kern_return_t kr = 0;
7832 struct ldcred *ldc = runtime_get_caller_creds();
7833 int oldmask;
7834
7835 if (!launchd_assumes(j != NULL)) {
7836 return BOOTSTRAP_NO_MEMORY;
7837 }
7838
7839 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7840 return BOOTSTRAP_NOT_PRIVILEGED;
7841 }
7842
7843 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7844 return 1;
7845 }
7846
7847 if (inkey && outkey) {
7848 action = "Swapping";
7849 } else if (inkey) {
7850 action = "Setting";
7851 } else {
7852 action = "Getting";
7853 }
7854
7855 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7856
7857 switch (outkey) {
7858 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7859 *outval = j->abandon_pg;
7860 break;
7861 case VPROC_GSK_LAST_EXIT_STATUS:
7862 *outval = j->last_exit_status;
7863 break;
7864 case VPROC_GSK_MGR_UID:
7865 *outval = getuid();
7866 break;
7867 case VPROC_GSK_MGR_PID:
7868 *outval = getpid();
7869 break;
7870 case VPROC_GSK_IS_MANAGED:
7871 *outval = j->anonymous ? 0 : 1;
7872 break;
7873 case VPROC_GSK_BASIC_KEEPALIVE:
7874 *outval = !j->ondemand;
7875 break;
7876 case VPROC_GSK_START_INTERVAL:
7877 *outval = j->start_interval;
7878 break;
7879 case VPROC_GSK_IDLE_TIMEOUT:
7880 *outval = j->timeout;
7881 break;
7882 case VPROC_GSK_EXIT_TIMEOUT:
7883 *outval = j->exit_timeout;
7884 break;
7885 case VPROC_GSK_GLOBAL_LOG_MASK:
7886 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7887 *outval = oldmask;
7888 runtime_setlogmask(oldmask);
7889 break;
7890 case VPROC_GSK_GLOBAL_UMASK:
7891 oldmask = umask(0);
7892 *outval = oldmask;
7893 umask(oldmask);
7894 break;
7895 case VPROC_GSK_TRANSACTIONS_ENABLED:
7896 job_log(j, LOG_DEBUG, "Reading transaction model status.");
7897 *outval = j->kill_via_shmem;
7898 break;
7899 case VPROC_GSK_WAITFORDEBUGGER:
7900 *outval = j->wait4debugger;
7901 break;
7902 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
7903 *outval = j->embedded_special_privileges;
7904 break;
7905 case 0:
7906 *outval = 0;
7907 break;
7908 default:
7909 kr = 1;
7910 break;
7911 }
7912
7913 switch (inkey) {
7914 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7915 j->abandon_pg = (bool)inval;
7916 break;
7917 case VPROC_GSK_GLOBAL_ON_DEMAND:
7918 job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
7919 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
7920 break;
7921 case VPROC_GSK_BASIC_KEEPALIVE:
7922 j->ondemand = !inval;
7923 break;
7924 case VPROC_GSK_START_INTERVAL:
7925 if (inval > UINT32_MAX || inval < 0) {
7926 kr = 1;
7927 } else if (inval) {
7928 if (j->start_interval == 0) {
7929 runtime_add_weak_ref();
7930 }
7931 j->start_interval = (typeof(j->start_interval)) inval;
7932 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
7933 } else if (j->start_interval) {
7934 (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
7935 if (j->start_interval != 0) {
7936 runtime_del_weak_ref();
7937 }
7938 j->start_interval = 0;
7939 }
7940 break;
7941 case VPROC_GSK_IDLE_TIMEOUT:
7942 if (inval < 0 || inval > UINT32_MAX) {
7943 kr = 1;
7944 } else {
7945 j->timeout = (typeof(j->timeout)) inval;
7946 }
7947 break;
7948 case VPROC_GSK_EXIT_TIMEOUT:
7949 if (inval < 0 || inval > UINT32_MAX) {
7950 kr = 1;
7951 } else {
7952 j->exit_timeout = (typeof(j->exit_timeout)) inval;
7953 }
7954 break;
7955 case VPROC_GSK_GLOBAL_LOG_MASK:
7956 if (inval < 0 || inval > UINT32_MAX) {
7957 kr = 1;
7958 } else {
7959 runtime_setlogmask((int) inval);
7960 }
7961 break;
7962 case VPROC_GSK_GLOBAL_UMASK:
7963 launchd_assert(sizeof (mode_t) == 2);
7964 if (inval < 0 || inval > UINT16_MAX) {
7965 kr = 1;
7966 } else {
7967 #if HAVE_SANDBOX
7968 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
7969 kr = 1;
7970 } else {
7971 umask((mode_t) inval);
7972 }
7973 #endif
7974 }
7975 break;
7976 case VPROC_GSK_TRANSACTIONS_ENABLED:
7977 if (!job_assumes(j, inval != 0)) {
7978 job_log(j, LOG_WARNING, "Attempt to unregister from transaction model. This is not supported.");
7979 kr = 1;
7980 } else {
7981 j->kill_via_shmem = (bool)inval;
7982 }
7983 break;
7984 case VPROC_GSK_WEIRD_BOOTSTRAP:
7985 if (job_assumes(j, j->weird_bootstrap)) {
7986 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
7987
7988 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
7989
7990 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
7991 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
7992 }
7993
7994 (void)job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
7995 j->weird_bootstrap = false;
7996 }
7997 break;
7998 case VPROC_GSK_WAITFORDEBUGGER:
7999 j->wait4debugger_oneshot = inval;
8000 break;
8001 case VPROC_GSK_PERUSER_SUSPEND:
8002 if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
8003 mach_port_t junk = MACH_PORT_NULL;
8004 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8005 if (job_assumes(j, jpu != NULL)) {
8006 struct suspended_peruser *spi = NULL;
8007 LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8008 if ((int64_t)(spi->j->mach_uid) == inval) {
8009 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8010 break;
8011 }
8012 }
8013
8014 if (spi == NULL) {
8015 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8016 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
8017 if (job_assumes(j, spi != NULL)) {
8018 /* Stop listening for events.
8019 *
8020 * See <rdar://problem/9014146>.
8021 */
8022 if (jpu->peruser_suspend_count == 0) {
8023 job_ignore(jpu);
8024 }
8025
8026 spi->j = jpu;
8027 spi->j->peruser_suspend_count++;
8028 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8029 job_stop(spi->j);
8030 *outval = jpu->p;
8031 } else {
8032 kr = BOOTSTRAP_NO_MEMORY;
8033 }
8034 }
8035 }
8036 } else {
8037 kr = 1;
8038 }
8039 break;
8040 case VPROC_GSK_PERUSER_RESUME:
8041 if (job_assumes(j, pid1_magic == true)) {
8042 struct suspended_peruser *spi = NULL, *spt = NULL;
8043 LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8044 if ((int64_t)(spi->j->mach_uid) == inval) {
8045 spi->j->peruser_suspend_count--;
8046 LIST_REMOVE(spi, sle);
8047 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8048 break;
8049 }
8050 }
8051
8052 if (!job_assumes(j, spi != NULL)) {
8053 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8054 kr = BOOTSTRAP_NOT_PRIVILEGED;
8055 } else if (spi->j->peruser_suspend_count == 0) {
8056 job_watch(spi->j);
8057 job_dispatch(spi->j, false);
8058 free(spi);
8059 }
8060 } else {
8061 kr = 1;
8062 }
8063 break;
8064 case 0:
8065 break;
8066 default:
8067 kr = 1;
8068 break;
8069 }
8070
8071 return kr;
8072 }
8073
8074 kern_return_t
8075 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
8076 {
8077 struct machservice *ms;
8078
8079 if (!launchd_assumes(j != NULL)) {
8080 return BOOTSTRAP_NO_MEMORY;
8081 }
8082
8083 job_log(j, LOG_DEBUG, "Post fork ping.");
8084
8085 job_setup_exception_port(j, child_task);
8086
8087 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8088 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8089 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
8090 continue;
8091 }
8092
8093 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8094
8095 if (unlikely(errno)) {
8096 int desired_log_level = LOG_ERR;
8097
8098 if (j->anonymous) {
8099 /* 5338127 */
8100
8101 desired_log_level = LOG_WARNING;
8102
8103 if (ms->special_port_num == TASK_SEATBELT_PORT) {
8104 desired_log_level = LOG_DEBUG;
8105 }
8106 }
8107
8108 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8109 }
8110 }
8111
8112 /* MIG will not zero-initialize this pointer, so we must always do so. See
8113 * <rdar://problem/8562593>.
8114 */
8115 *asport = MACH_PORT_NULL;
8116 #if !TARGET_OS_EMBEDDED
8117 if (!j->anonymous) {
8118 /* XPC services will spawn into the root security session by default.
8119 * xpcproxy will switch them away if needed.
8120 */
8121 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8122 job_log(j, LOG_DEBUG, "Returning j->asport: %u", j->asport);
8123 *asport = j->asport;
8124 }
8125 }
8126 #endif
8127 (void)job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
8128
8129 return 0;
8130 }
8131
8132 kern_return_t
8133 job_mig_reboot2(job_t j, uint64_t flags)
8134 {
8135 char who_started_the_reboot[2048] = "";
8136 struct proc_bsdshortinfo proc;
8137 struct ldcred *ldc = runtime_get_caller_creds();
8138 pid_t pid_to_log;
8139
8140 if (!launchd_assumes(j != NULL)) {
8141 return BOOTSTRAP_NO_MEMORY;
8142 }
8143
8144 if (unlikely(!pid1_magic)) {
8145 return BOOTSTRAP_NOT_PRIVILEGED;
8146 }
8147
8148 #if !TARGET_OS_EMBEDDED
8149 if (unlikely(ldc->euid)) {
8150 #else
8151 if (unlikely(ldc->euid) && !j->embedded_special_privileges) {
8152 #endif
8153 return BOOTSTRAP_NOT_PRIVILEGED;
8154 }
8155
8156 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8157 size_t who_offset;
8158 if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8159 if (errno != ESRCH) {
8160 job_assumes(j, errno == 0);
8161 }
8162 return 1;
8163 }
8164
8165 if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8166 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8167 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8168 break;
8169 }
8170
8171 who_offset = strlen(who_started_the_reboot);
8172 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8173 " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8174 }
8175
8176 root_jobmgr->reboot_flags = (int)flags;
8177 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8178 launchd_shutdown();
8179
8180 return 0;
8181 }
8182
8183 kern_return_t
8184 job_mig_getsocket(job_t j, name_t spr)
8185 {
8186 if (!launchd_assumes(j != NULL)) {
8187 return BOOTSTRAP_NO_MEMORY;
8188 }
8189
8190 if (j->deny_job_creation) {
8191 return BOOTSTRAP_NOT_PRIVILEGED;
8192 }
8193
8194 #if HAVE_SANDBOX
8195 struct ldcred *ldc = runtime_get_caller_creds();
8196 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8197 return BOOTSTRAP_NOT_PRIVILEGED;
8198 }
8199 #endif
8200
8201 ipc_server_init();
8202
8203 if (unlikely(!sockpath)) {
8204 return BOOTSTRAP_NO_MEMORY;
8205 }
8206
8207 strncpy(spr, sockpath, sizeof(name_t));
8208
8209 return BOOTSTRAP_SUCCESS;
8210 }
8211
8212 kern_return_t
8213 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8214 {
8215 if (!launchd_assumes(j != NULL)) {
8216 return BOOTSTRAP_NO_MEMORY;
8217 }
8218
8219 if ((errno = err)) {
8220 job_log_error(j, pri, "%s", msg);
8221 } else {
8222 job_log(j, pri, "%s", msg);
8223 }
8224
8225 return 0;
8226 }
8227
8228 job_t
8229 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8230 {
8231 job_t ji = NULL;
8232 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8233 if (!ji->per_user) {
8234 continue;
8235 }
8236 if (ji->mach_uid != which_user) {
8237 continue;
8238 }
8239 if (SLIST_EMPTY(&ji->machservices)) {
8240 continue;
8241 }
8242 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8243 continue;
8244 }
8245 break;
8246 }
8247
8248 if (unlikely(ji == NULL)) {
8249 struct machservice *ms;
8250 char lbuf[1024];
8251
8252 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8253
8254 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8255
8256 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8257
8258 if (ji != NULL) {
8259 auditinfo_addr_t auinfo = {
8260 .ai_termid = { .at_type = AU_IPv4 },
8261 .ai_auid = which_user,
8262 .ai_asid = AU_ASSIGN_ASID,
8263 };
8264
8265 if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8266 job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8267 (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8268
8269 /* Kinda lame that we have to do this, but we can't create an
8270 * audit session without joining it.
8271 */
8272 (void)job_assumes(ji, audit_session_join(g_audit_session_port));
8273 ji->asid = auinfo.ai_asid;
8274 } else {
8275 job_log(ji, LOG_WARNING, "Could not set audit session!");
8276 job_remove(ji);
8277 return NULL;
8278 }
8279
8280 ji->mach_uid = which_user;
8281 ji->per_user = true;
8282 ji->kill_via_shmem = true;
8283
8284 struct stat sb;
8285 char pu_db[PATH_MAX];
8286 snprintf(pu_db, sizeof(pu_db), LAUNCHD_DB_PREFIX "/%s", lbuf);
8287
8288 bool created = false;
8289 int err = stat(pu_db, &sb);
8290 if ((err == -1 && errno == ENOENT) || (err == 0 && !S_ISDIR(sb.st_mode))) {
8291 if (err == 0) {
8292 char move_aside[PATH_MAX];
8293 snprintf(move_aside, sizeof(move_aside), LAUNCHD_DB_PREFIX "/%s.movedaside", lbuf);
8294
8295 (void)job_assumes(ji, rename(pu_db, move_aside) != -1);
8296 }
8297
8298 (void)job_assumes(ji, mkdir(pu_db, S_IRWXU) != -1);
8299 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8300 created = true;
8301 }
8302
8303 if (!created) {
8304 if (!job_assumes(ji, sb.st_uid == which_user)) {
8305 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8306 }
8307 if (!job_assumes(ji, sb.st_gid == 0)) {
8308 (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
8309 }
8310 if (!job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR))) {
8311 (void)job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
8312 }
8313 }
8314
8315 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8316 job_remove(ji);
8317 ji = NULL;
8318 } else {
8319 ms->per_user_hack = true;
8320 ms->hide = true;
8321
8322 ji = job_dispatch(ji, false);
8323 }
8324 }
8325 } else {
8326 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
8327 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8328 }
8329
8330 return ji;
8331 }
8332
8333 kern_return_t
8334 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8335 {
8336 struct ldcred *ldc = runtime_get_caller_creds();
8337 job_t jpu;
8338
8339 #if TARGET_OS_EMBEDDED
8340 /* There is no need for per-user launchd's on embedded. */
8341 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8342 return BOOTSTRAP_NOT_PRIVILEGED;
8343 #endif
8344
8345 #if HAVE_SANDBOX
8346 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8347 return BOOTSTRAP_NOT_PRIVILEGED;
8348 }
8349 #endif
8350
8351 if (!launchd_assumes(j != NULL)) {
8352 return BOOTSTRAP_NO_MEMORY;
8353 }
8354
8355 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8356
8357 if (unlikely(!pid1_magic)) {
8358 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8359 return BOOTSTRAP_NOT_PRIVILEGED;
8360 }
8361
8362 if (ldc->euid || ldc->uid) {
8363 which_user = ldc->euid ?: ldc->uid;
8364 }
8365
8366 *up_cont = MACH_PORT_NULL;
8367
8368 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8369
8370 return 0;
8371 }
8372
8373 kern_return_t
8374 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8375 {
8376 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8377 bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8378 struct ldcred *ldc = runtime_get_caller_creds();
8379 struct machservice *ms = NULL;
8380 job_t jo;
8381
8382 if (!launchd_assumes(j != NULL)) {
8383 return BOOTSTRAP_NO_MEMORY;
8384 }
8385
8386 if (j->dedicated_instance) {
8387 struct machservice *msi = NULL;
8388 SLIST_FOREACH(msi, &j->machservices, sle) {
8389 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8390 uuid_copy(instance_id, j->instance_id);
8391 ms = msi;
8392 break;
8393 }
8394 }
8395 } else {
8396 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8397 }
8398
8399 if (strict) {
8400 if (likely(ms != NULL)) {
8401 if (ms->job != j) {
8402 return BOOTSTRAP_NOT_PRIVILEGED;
8403 } else if (ms->isActive) {
8404 return BOOTSTRAP_SERVICE_ACTIVE;
8405 }
8406 } else {
8407 return BOOTSTRAP_UNKNOWN_SERVICE;
8408 }
8409 } else if (ms == NULL) {
8410 if (job_assumes(j, !j->dedicated_instance)) {
8411 *serviceportp = MACH_PORT_NULL;
8412
8413 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8414 return BOOTSTRAP_NO_MEMORY;
8415 }
8416
8417 /* Treat this like a legacy job. */
8418 if (!j->legacy_mach_job) {
8419 ms->isActive = true;
8420 ms->recv = false;
8421 }
8422
8423 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8424 job_log(j, LOG_SCOLDING, "Please add the following service to the configuration file for this job: %s", servicename);
8425 }
8426 } else {
8427 return BOOTSTRAP_UNKNOWN_SERVICE;
8428 }
8429 } else {
8430 if (unlikely((jo = machservice_job(ms)) != j)) {
8431 static pid_t last_warned_pid;
8432
8433 if (last_warned_pid != ldc->pid) {
8434 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8435 last_warned_pid = ldc->pid;
8436 }
8437
8438 return BOOTSTRAP_NOT_PRIVILEGED;
8439 }
8440 if (unlikely(machservice_active(ms))) {
8441 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8442 return BOOTSTRAP_SERVICE_ACTIVE;
8443 }
8444 }
8445
8446 job_checkin(j);
8447 machservice_request_notifications(ms);
8448
8449 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8450
8451 *serviceportp = machservice_port(ms);
8452 return BOOTSTRAP_SUCCESS;
8453 }
8454
8455 kern_return_t
8456 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8457 {
8458 struct machservice *ms;
8459 struct ldcred *ldc = runtime_get_caller_creds();
8460
8461 if (!launchd_assumes(j != NULL)) {
8462 return BOOTSTRAP_NO_MEMORY;
8463 }
8464
8465 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
8466 job_log(j, LOG_SCOLDING, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8467 }
8468
8469 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8470
8471 /* 5641783 for the embedded hack */
8472 #if !TARGET_OS_EMBEDDED
8473 /*
8474 * From a per-user/session launchd's perspective, SecurityAgent (UID
8475 * 92) is a rogue application (not our UID, not root and not a child of
8476 * us). We'll have to reconcile this design friction at a later date.
8477 */
8478 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8479 if (pid1_magic) {
8480 return VPROC_ERR_TRY_PER_USER;
8481 } else {
8482 return BOOTSTRAP_NOT_PRIVILEGED;
8483 }
8484 }
8485 #endif
8486
8487 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
8488
8489 if (unlikely(ms)) {
8490 if (machservice_job(ms) != j) {
8491 return BOOTSTRAP_NOT_PRIVILEGED;
8492 }
8493 if (machservice_active(ms)) {
8494 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
8495 return BOOTSTRAP_SERVICE_ACTIVE;
8496 }
8497 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
8498 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
8499 return BOOTSTRAP_NOT_PRIVILEGED;
8500 }
8501 job_checkin(j);
8502 machservice_delete(j, ms, false);
8503 }
8504
8505 if (likely(serviceport != MACH_PORT_NULL)) {
8506 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
8507 machservice_request_notifications(ms);
8508 } else {
8509 return BOOTSTRAP_NO_MEMORY;
8510 }
8511 }
8512
8513
8514 return BOOTSTRAP_SUCCESS;
8515 }
8516
8517 kern_return_t
8518 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
8519 {
8520 struct machservice *ms = NULL;
8521 struct ldcred *ldc = runtime_get_caller_creds();
8522 kern_return_t kr;
8523 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
8524 bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
8525 bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
8526 bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
8527
8528 if (!launchd_assumes(j != NULL)) {
8529 return BOOTSTRAP_NO_MEMORY;
8530 }
8531
8532 bool xpc_req = j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN;
8533
8534 /* 5641783 for the embedded hack */
8535 #if !TARGET_OS_EMBEDDED
8536 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
8537 return VPROC_ERR_TRY_PER_USER;
8538 }
8539 #endif
8540
8541 #if HAVE_SANDBOX
8542 /* We don't do sandbox checking for XPC domains because, by definition, all
8543 * the services within your domain should be accessibly to you.
8544 */
8545 if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8546 return BOOTSTRAP_NOT_PRIVILEGED;
8547 }
8548 #endif
8549
8550 if (per_pid_lookup) {
8551 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
8552 } else {
8553 if (xpc_req) {
8554 /* Requests from XPC domains stay local. */
8555 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
8556 } else {
8557 /* A strict lookup which is privileged won't even bother trying to
8558 * find a service if we're not hosting the root Mach bootstrap.
8559 */
8560 if (strict_lookup && privileged) {
8561 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8562 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8563 }
8564 } else {
8565 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
8566 }
8567 }
8568 }
8569
8570 if (likely(ms)) {
8571 ms = ms->alias ? ms->alias : ms;
8572 if (unlikely(specific_instance && ms->job->multiple_instances)) {
8573 job_t ji = NULL;
8574 job_t instance = NULL;
8575 LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
8576 if (uuid_compare(instance_id, ji->instance_id) == 0) {
8577 instance = ji;
8578 break;
8579 }
8580 }
8581
8582 if (unlikely(instance == NULL)) {
8583 job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
8584 instance = job_new_subjob(ms->job, instance_id);
8585 if (job_assumes(j, instance != NULL)) {
8586 /* Disable this support for now. We only support having
8587 * multi-instance jobs within private XPC domains.
8588 */
8589 #if 0
8590 /* If the job is multi-instance, in a singleton XPC domain
8591 * and the request is not coming from within that singleton
8592 * domain, we need to alias the new job into the requesting
8593 * domain.
8594 */
8595 if (!j->mgr->xpc_singleton && xpc_req) {
8596 (void)job_assumes(instance, job_new_alias(j->mgr, instance));
8597 }
8598 #endif
8599 job_dispatch(instance, false);
8600 }
8601 }
8602
8603 ms = NULL;
8604 if (job_assumes(j, instance != NULL)) {
8605 struct machservice *msi = NULL;
8606 SLIST_FOREACH(msi, &instance->machservices, sle) {
8607 /* sizeof(servicename) will return the size of a pointer, even though it's
8608 * an array type, because when passing arrays as parameters in C, they
8609 * implicitly degrade to pointers.
8610 */
8611 if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8612 ms = msi;
8613 break;
8614 }
8615 }
8616 }
8617 } else {
8618 if (machservice_hidden(ms) && !machservice_active(ms)) {
8619 ms = NULL;
8620 } else if (unlikely(ms->per_user_hack)) {
8621 ms = NULL;
8622 }
8623 }
8624 }
8625
8626 if (likely(ms)) {
8627 (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
8628 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
8629
8630 if (unlikely(!per_pid_lookup && j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user)) {
8631 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
8632 job_log(j, LOG_DEBUG, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms->job->label, servicename);
8633 }
8634
8635 j->lastlookup = ms;
8636 j->lastlookup_gennum = ms->gen_num;
8637
8638 *serviceportp = machservice_port(ms);
8639
8640 kr = BOOTSTRAP_SUCCESS;
8641 } else if (strict_lookup && !privileged) {
8642 /* Hack: We need to simulate XPC's desire not to establish a hierarchy. So if
8643 * XPC is doing the lookup, and it's not a privileged lookup, we won't forward.
8644 * But if it is a privileged lookup (that is, was looked up in XPC_DOMAIN_LOCAL_SYSTEM)
8645 * then we must forward.
8646 */
8647 return BOOTSTRAP_UNKNOWN_SERVICE;
8648 } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
8649 /* Requests from within an XPC domain don't get forwarded. */
8650 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
8651 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
8652 (void)job_assumes(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags) == 0);
8653 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8654 return MIG_NO_REPLY;
8655 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8656 /*
8657 * 5240036 Should start background session when a lookup of CCacheServer occurs
8658 *
8659 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
8660 * If we find a EUID that isn't root, we force it over to the per-user context.
8661 */
8662 return VPROC_ERR_TRY_PER_USER;
8663 } else {
8664 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
8665 kr = BOOTSTRAP_UNKNOWN_SERVICE;
8666 }
8667
8668 return kr;
8669 }
8670
8671 kern_return_t
8672 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
8673 {
8674 if (!launchd_assumes(j != NULL)) {
8675 return BOOTSTRAP_NO_MEMORY;
8676 }
8677
8678 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
8679 jobmgr_t jm = j->mgr;
8680
8681 if (jobmgr_parent(jm)) {
8682 *parentport = jobmgr_parent(jm)->jm_port;
8683 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
8684 *parentport = jm->jm_port;
8685 } else {
8686 (void)job_assumes(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp) == 0);
8687 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
8688 return MIG_NO_REPLY;
8689 }
8690 return BOOTSTRAP_SUCCESS;
8691 }
8692
8693 kern_return_t
8694 job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
8695 {
8696 if (inherited_bootstrap_port == MACH_PORT_NULL) {
8697 *rootbsp = root_jobmgr->jm_port;
8698 (void)job_assumes(j, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
8699 } else {
8700 *rootbsp = inherited_bootstrap_port;
8701 (void)job_assumes(j, launchd_mport_copy_send(inherited_bootstrap_port) == KERN_SUCCESS);
8702 }
8703
8704 return BOOTSTRAP_SUCCESS;
8705 }
8706
8707 kern_return_t
8708 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt, name_array_t *servicejobsp, unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt, uint64_t flags)
8709 {
8710 name_array_t service_names = NULL;
8711 name_array_t service_jobs = NULL;
8712 bootstrap_status_array_t service_actives = NULL;
8713 unsigned int cnt = 0, cnt2 = 0;
8714 jobmgr_t jm;
8715
8716 if (!launchd_assumes(j != NULL)) {
8717 return BOOTSTRAP_NO_MEMORY;
8718 }
8719
8720 if (g_flat_mach_namespace) {
8721 if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
8722 jm = j->mgr;
8723 } else {
8724 jm = root_jobmgr;
8725 }
8726 } else {
8727 jm = j->mgr;
8728 }
8729
8730 unsigned int i = 0;
8731 struct machservice *msi = NULL;
8732 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8733 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8734 cnt += !msi->per_pid ? 1 : 0;
8735 }
8736 }
8737
8738 if (cnt == 0) {
8739 goto out;
8740 }
8741
8742 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
8743 if (!job_assumes(j, service_names != NULL)) {
8744 goto out_bad;
8745 }
8746
8747 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
8748 if (!job_assumes(j, service_jobs != NULL)) {
8749 goto out_bad;
8750 }
8751
8752 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
8753 if (!job_assumes(j, service_actives != NULL)) {
8754 goto out_bad;
8755 }
8756
8757 for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
8758 LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
8759 if (!msi->per_pid) {
8760 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
8761 msi = msi->alias ? msi->alias : msi;
8762 if (msi->job->mgr->shortdesc) {
8763 strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
8764 } else {
8765 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
8766 }
8767 service_actives[cnt2] = machservice_status(msi);
8768 cnt2++;
8769 }
8770 }
8771 }
8772
8773 (void)job_assumes(j, cnt == cnt2);
8774
8775 out:
8776 *servicenamesp = service_names;
8777 *servicejobsp = service_jobs;
8778 *serviceactivesp = service_actives;
8779 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
8780
8781 return BOOTSTRAP_SUCCESS;
8782
8783 out_bad:
8784 if (service_names) {
8785 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
8786 }
8787 if (service_jobs) {
8788 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
8789 }
8790 if (service_actives) {
8791 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
8792 }
8793
8794 return BOOTSTRAP_NO_MEMORY;
8795 }
8796
8797 kern_return_t
8798 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports, mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names, mach_msg_type_number_t *child_names_cnt, bootstrap_property_array_t *child_properties,mach_msg_type_number_t *child_properties_cnt)
8799 {
8800 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
8801 if (!launchd_assumes(j != NULL)) {
8802 return BOOTSTRAP_NO_MEMORY;
8803 }
8804
8805 struct ldcred *ldc = runtime_get_caller_creds();
8806
8807 /* Only allow root processes to look up children, even if we're in the per-user launchd.
8808 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
8809 * in a non-flat namespace.
8810 */
8811 if (ldc->euid != 0) {
8812 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
8813 return BOOTSTRAP_NOT_PRIVILEGED;
8814 }
8815
8816 unsigned int cnt = 0;
8817
8818 jobmgr_t jmr = j->mgr;
8819 jobmgr_t jmi = NULL;
8820 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8821 cnt++;
8822 }
8823
8824 /* Find our per-user launchds if we're PID 1. */
8825 job_t ji = NULL;
8826 if (pid1_magic) {
8827 LIST_FOREACH(ji, &jmr->jobs, sle) {
8828 cnt += ji->per_user ? 1 : 0;
8829 }
8830 }
8831
8832 if (cnt == 0) {
8833 return BOOTSTRAP_NO_CHILDREN;
8834 }
8835
8836 mach_port_array_t _child_ports = NULL;
8837 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
8838 if (!job_assumes(j, _child_ports != NULL)) {
8839 kr = BOOTSTRAP_NO_MEMORY;
8840 goto out_bad;
8841 }
8842
8843 name_array_t _child_names = NULL;
8844 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
8845 if (!job_assumes(j, _child_names != NULL)) {
8846 kr = BOOTSTRAP_NO_MEMORY;
8847 goto out_bad;
8848 }
8849
8850 bootstrap_property_array_t _child_properties = NULL;
8851 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
8852 if (!job_assumes(j, _child_properties != NULL)) {
8853 kr = BOOTSTRAP_NO_MEMORY;
8854 goto out_bad;
8855 }
8856
8857 unsigned int cnt2 = 0;
8858 SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
8859 if (jobmgr_assumes(jmi, launchd_mport_make_send(jmi->jm_port) == KERN_SUCCESS)) {
8860 _child_ports[cnt2] = jmi->jm_port;
8861 } else {
8862 _child_ports[cnt2] = MACH_PORT_NULL;
8863 }
8864
8865 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
8866 _child_properties[cnt2] = jmi->properties;
8867
8868 cnt2++;
8869 }
8870
8871 if (pid1_magic) LIST_FOREACH( ji, &jmr->jobs, sle) {
8872 if (ji->per_user) {
8873 if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
8874 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
8875
8876 if (job_assumes(ji, launchd_mport_copy_send(port) == KERN_SUCCESS)) {
8877 _child_ports[cnt2] = port;
8878 } else {
8879 _child_ports[cnt2] = MACH_PORT_NULL;
8880 }
8881 } else {
8882 _child_ports[cnt2] = MACH_PORT_NULL;
8883 }
8884
8885 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
8886 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
8887
8888 cnt2++;
8889 }
8890 }
8891
8892 *child_names_cnt = cnt;
8893 *child_ports_cnt = cnt;
8894 *child_properties_cnt = cnt;
8895
8896 *child_names = _child_names;
8897 *child_ports = _child_ports;
8898 *child_properties = _child_properties;
8899
8900 unsigned int i = 0;
8901 for (i = 0; i < cnt; i++) {
8902 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
8903 }
8904
8905 return BOOTSTRAP_SUCCESS;
8906 out_bad:
8907 if (_child_ports) {
8908 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
8909 }
8910
8911 if (_child_names) {
8912 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
8913 }
8914
8915 if (_child_properties) {
8916 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
8917 }
8918
8919 return kr;
8920 }
8921
8922 kern_return_t
8923 job_mig_transaction_count_for_pid(job_t j, pid_t p, int32_t *cnt, boolean_t *condemned)
8924 {
8925 kern_return_t kr = KERN_FAILURE;
8926 struct ldcred *ldc = runtime_get_caller_creds();
8927 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
8928 return BOOTSTRAP_NOT_PRIVILEGED;
8929 }
8930
8931 job_t j_for_pid = jobmgr_find_by_pid_deep(j->mgr, p, false);
8932 if (j_for_pid) {
8933 if (j_for_pid->kill_via_shmem) {
8934 if (j_for_pid->shmem) {
8935 *cnt = j_for_pid->shmem->vp_shmem_transaction_cnt;
8936 *condemned = j_for_pid->shmem->vp_shmem_flags & VPROC_SHMEM_EXITING;
8937 *cnt += *condemned ? 1 : 0;
8938 } else {
8939 *cnt = 0;
8940 *condemned = false;
8941 }
8942
8943 kr = BOOTSTRAP_SUCCESS;
8944 } else {
8945 kr = BOOTSTRAP_NO_MEMORY;
8946 }
8947 } else {
8948 kr = BOOTSTRAP_UNKNOWN_SERVICE;
8949 }
8950
8951 return kr;
8952 }
8953
8954 kern_return_t
8955 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
8956 {
8957 struct ldcred *ldc = runtime_get_caller_creds();
8958 if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
8959 return BOOTSTRAP_NOT_PRIVILEGED;
8960 }
8961
8962 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
8963 * directly by launchd as agents.
8964 */
8965 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
8966 if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
8967 *managed = true;
8968 }
8969
8970 return BOOTSTRAP_SUCCESS;
8971 }
8972
8973 kern_return_t
8974 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
8975 {
8976 struct ldcred *ldc = runtime_get_caller_creds();
8977 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
8978
8979 #if HAVE_SANDBOX
8980 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8981 return BOOTSTRAP_NOT_PRIVILEGED;
8982 }
8983 #endif
8984
8985 mach_port_t _mp = MACH_PORT_NULL;
8986 if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
8987 job_t target_j = job_find(NULL, label);
8988 if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
8989 if (target_j->j_port == MACH_PORT_NULL) {
8990 (void)job_assumes(target_j, job_setup_machport(target_j) == true);
8991 }
8992
8993 _mp = target_j->j_port;
8994 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
8995 } else {
8996 kr = BOOTSTRAP_NO_MEMORY;
8997 }
8998 }
8999
9000 *mp = _mp;
9001 return kr;
9002 }
9003
9004 #if !TARGET_OS_EMBEDDED
9005 kern_return_t
9006 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
9007 {
9008 uuid_string_t uuid_str;
9009 uuid_unparse(uuid, uuid_str);
9010 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
9011
9012 job_t ji = NULL, jt = NULL;
9013 LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
9014 uuid_string_t uuid_str2;
9015 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9016
9017 if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
9018 uuid_clear(ji->expected_audit_uuid);
9019 if (asport != MACH_PORT_NULL ) {
9020 job_log(ji, LOG_DEBUG, "Job should join session with port %u", asport);
9021 (void)job_assumes(j, launchd_mport_copy_send(asport) == KERN_SUCCESS);
9022 } else {
9023 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9024 }
9025
9026 ji->asport = asport;
9027 LIST_REMOVE(ji, needing_session_sle);
9028 job_dispatch(ji, false);
9029 }
9030 }
9031
9032 /* Each job that the session port was set for holds a reference. At the end of
9033 * the loop, there will be one extra reference belonging to this MiG protocol.
9034 * We need to release it so that the session goes away when all the jobs
9035 * referencing it are unloaded.
9036 */
9037 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9038
9039 return KERN_SUCCESS;
9040 }
9041 #else
9042 kern_return_t
9043 job_mig_set_security_session(job_t j __attribute__((unused)), uuid_t uuid __attribute__((unused)), mach_port_t session __attribute__((unused)))
9044 {
9045 return KERN_SUCCESS;
9046 }
9047 #endif
9048
9049 jobmgr_t
9050 jobmgr_find_by_name(jobmgr_t jm, const char *where)
9051 {
9052 jobmgr_t jmi, jmi2;
9053
9054 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
9055 if (where == NULL) {
9056 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9057 where = VPROCMGR_SESSION_LOGINWINDOW;
9058 } else {
9059 where = VPROCMGR_SESSION_AQUA;
9060 }
9061 }
9062
9063 if (strcasecmp(jm->name, where) == 0) {
9064 return jm;
9065 }
9066
9067 if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
9068 jmi = root_jobmgr;
9069 goto jm_found;
9070 }
9071
9072 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9073 if (unlikely(jmi->shutting_down)) {
9074 continue;
9075 } else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9076 continue;
9077 } else if (strcasecmp(jmi->name, where) == 0) {
9078 goto jm_found;
9079 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
9080 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9081 if (strcasecmp(jmi2->name, where) == 0) {
9082 jmi = jmi2;
9083 goto jm_found;
9084 }
9085 }
9086 }
9087 }
9088
9089 jm_found:
9090 return jmi;
9091 }
9092
9093 kern_return_t
9094 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
9095 {
9096 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9097 mach_port_array_t l2l_ports = NULL;
9098 mach_port_t reqport, rcvright;
9099 kern_return_t kr = 1;
9100 launch_data_t out_obj_array = NULL;
9101 struct ldcred *ldc = runtime_get_caller_creds();
9102 jobmgr_t jmr = NULL;
9103
9104 if (!launchd_assumes(j != NULL)) {
9105 return BOOTSTRAP_NO_MEMORY;
9106 }
9107
9108 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
9109 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9110
9111 kr = BOOTSTRAP_NOT_PRIVILEGED;
9112 goto out;
9113 }
9114
9115 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9116
9117 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
9118
9119 if (!job_assumes(j, kr == 0)) {
9120 goto out;
9121 }
9122
9123 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
9124
9125 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
9126 kr = BOOTSTRAP_NO_MEMORY;
9127 goto out;
9128 }
9129
9130 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9131
9132 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9133 * processing an IPC request, we'll do this action before the new job manager can get any IPC
9134 * requests. This serialization is guaranteed since we are single-threaded in that respect.
9135 */
9136 if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9137 /* This is so awful. */
9138 /* Remove the job from its current job manager. */
9139 LIST_REMOVE(j, sle);
9140 LIST_REMOVE(j, pid_hash_sle);
9141
9142 /* Put the job into the target job manager. */
9143 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9144 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9145
9146 j->mgr = jmr;
9147 job_set_global_on_demand(j, true);
9148
9149 if (!j->holds_ref) {
9150 j->holds_ref = true;
9151 runtime_add_ref();
9152 }
9153 }
9154
9155 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9156 launch_data_t tmp, obj_at_idx;
9157 struct machservice *ms;
9158 job_t j_for_service;
9159 const char *serv_name;
9160 pid_t target_pid;
9161 bool serv_perpid;
9162
9163 (void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9164 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9165 target_pid = (pid_t)launch_data_get_integer(tmp);
9166 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9167 serv_perpid = launch_data_get_bool(tmp);
9168 (void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9169 serv_name = launch_data_get_string(tmp);
9170
9171 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9172
9173 if (unlikely(!j_for_service)) {
9174 /* The PID probably exited */
9175 (void)job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
9176 continue;
9177 }
9178
9179 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9180 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9181 machservice_request_notifications(ms);
9182 }
9183 }
9184
9185 kr = 0;
9186
9187 out:
9188 if (out_obj_array) {
9189 launch_data_free(out_obj_array);
9190 }
9191
9192 if (l2l_ports) {
9193 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9194 }
9195
9196 if (kr == 0) {
9197 if (target_subset) {
9198 (void)job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
9199 }
9200 if (asport) {
9201 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9202 }
9203 } else if (jmr) {
9204 jobmgr_shutdown(jmr);
9205 }
9206
9207 return kr;
9208 }
9209
9210 kern_return_t
9211 job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9212 {
9213 job_t j2;
9214
9215 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9216 if (j->mgr->session_initialized) {
9217 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9218 kr = BOOTSTRAP_NOT_PRIVILEGED;
9219 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9220 jobmgr_t jmi;
9221
9222 /*
9223 * 5330262
9224 *
9225 * We're working around LoginWindow and the WindowServer.
9226 *
9227 * In practice, there is only one LoginWindow session. Unfortunately, for certain
9228 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9229 * spawns a replacement loginwindow session before cleaning up the previous one.
9230 *
9231 * We're going to use the creation of a new LoginWindow context as a clue that the
9232 * previous LoginWindow context is on the way out and therefore we should just
9233 * kick-start the shutdown of it.
9234 */
9235
9236 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9237 if (unlikely(jmi->shutting_down)) {
9238 continue;
9239 } else if (strcasecmp(jmi->name, session_type) == 0) {
9240 jobmgr_shutdown(jmi);
9241 break;
9242 }
9243 }
9244 }
9245
9246 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9247 strcpy(j->mgr->name_init, session_type);
9248
9249 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9250 j2->asport = asport;
9251 (void)job_assumes(j, job_dispatch(j2, true));
9252 kr = BOOTSTRAP_SUCCESS;
9253 }
9254
9255 return kr;
9256 }
9257
9258 kern_return_t
9259 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9260 {
9261 struct ldcred *ldc = runtime_get_caller_creds();
9262 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9263 jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9264 return BOOTSTRAP_NO_MEMORY;
9265 }
9266
9267 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9268
9269 if (!job_assumes(j, pid1_magic == false)) {
9270 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9271 return BOOTSTRAP_NOT_PRIVILEGED;
9272 }
9273
9274 if (!j->anonymous) {
9275 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9276 return BOOTSTRAP_NOT_PRIVILEGED;
9277 }
9278
9279 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9280 if (target_jm == j->mgr) {
9281 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9282 *new_bsport = target_jm->jm_port;
9283 return BOOTSTRAP_SUCCESS;
9284 }
9285
9286 if (!target_jm) {
9287 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9288 if (target_jm) {
9289 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9290 (void)job_assumes(j, launchd_mport_deallocate(asport) == KERN_SUCCESS);
9291 }
9292 }
9293
9294 if (!job_assumes(j, target_jm != NULL)) {
9295 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9296 return BOOTSTRAP_NO_MEMORY;
9297 }
9298
9299 /* Remove the job from it's current job manager. */
9300 LIST_REMOVE(j, sle);
9301 LIST_REMOVE(j, pid_hash_sle);
9302
9303 job_t ji = NULL, jit = NULL;
9304 LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9305 if (ji == j) {
9306 LIST_REMOVE(ji, global_env_sle);
9307 break;
9308 }
9309 }
9310
9311 /* Put the job into the target job manager. */
9312 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9313 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9314
9315 if (ji) {
9316 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9317 }
9318
9319 /* Move our Mach services over if we're not in a flat namespace. */
9320 if (!g_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9321 struct machservice *msi = NULL, *msit = NULL;
9322 SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9323 LIST_REMOVE(msi, name_hash_sle);
9324 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9325 }
9326 }
9327
9328 j->mgr = target_jm;
9329
9330 if (!j->holds_ref) {
9331 /* Anonymous jobs which move around are particularly interesting to us, so we want to
9332 * stick around while they're still around.
9333 * For example, login calls into the PAM launchd module, which moves the process into
9334 * the StandardIO session by default. So we'll hold a reference on that job to prevent
9335 * ourselves from going away.
9336 */
9337 j->holds_ref = true;
9338 runtime_add_ref();
9339 }
9340
9341 *new_bsport = target_jm->jm_port;
9342
9343 return KERN_SUCCESS;
9344 }
9345
9346 kern_return_t
9347 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9348 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9349 mach_port_array_t *portsp, unsigned int *ports_cnt)
9350 {
9351 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9352 mach_port_array_t ports = NULL;
9353 unsigned int cnt = 0, cnt2 = 0;
9354 size_t packed_size;
9355 struct machservice *ms;
9356 jobmgr_t jm;
9357 job_t ji;
9358
9359 if (!launchd_assumes(j != NULL)) {
9360 return BOOTSTRAP_NO_MEMORY;
9361 }
9362
9363 jm = j->mgr;
9364
9365 if (unlikely(!pid1_magic)) {
9366 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9367 return BOOTSTRAP_NOT_PRIVILEGED;
9368 }
9369 if (unlikely(jobmgr_parent(jm) == NULL)) {
9370 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9371 return BOOTSTRAP_NOT_PRIVILEGED;
9372 }
9373 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9374 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9375 return BOOTSTRAP_NOT_PRIVILEGED;
9376 }
9377 if (unlikely(!j->anonymous)) {
9378 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9379 return BOOTSTRAP_NOT_PRIVILEGED;
9380 }
9381
9382 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9383
9384 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9385 if (!job_assumes(j, outdata_obj_array)) {
9386 goto out_bad;
9387 }
9388
9389 *outdataCnt = 20 * 1024 * 1024;
9390 mig_allocate(outdata, *outdataCnt);
9391 if (!job_assumes(j, *outdata != 0)) {
9392 return 1;
9393 }
9394
9395 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9396 if (!ji->anonymous) {
9397 continue;
9398 }
9399 SLIST_FOREACH(ms, &ji->machservices, sle) {
9400 cnt++;
9401 }
9402 }
9403
9404 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9405 if (!job_assumes(j, ports != NULL)) {
9406 goto out_bad;
9407 }
9408
9409 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9410 if (!ji->anonymous) {
9411 continue;
9412 }
9413
9414 SLIST_FOREACH(ms, &ji->machservices, sle) {
9415 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9416 (void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9417 } else {
9418 goto out_bad;
9419 }
9420
9421 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9422 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9423 } else {
9424 goto out_bad;
9425 }
9426
9427 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9428 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9429 } else {
9430 goto out_bad;
9431 }
9432
9433 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9434 (void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9435 } else {
9436 goto out_bad;
9437 }
9438
9439 ports[cnt2] = machservice_port(ms);
9440
9441 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
9442 (void)jobmgr_assumes(jm, (errno = launchd_mport_copy_send(ports[cnt2])) == KERN_SUCCESS);
9443 cnt2++;
9444 }
9445 }
9446
9447 (void)job_assumes(j, cnt == cnt2);
9448
9449 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9450 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9451 if (!job_assumes(j, packed_size != 0)) {
9452 goto out_bad;
9453 }
9454
9455 launch_data_free(outdata_obj_array);
9456
9457 *portsp = ports;
9458 *ports_cnt = cnt;
9459
9460 *reqport = jm->req_port;
9461 *rcvright = jm->jm_port;
9462
9463 jm->req_port = 0;
9464 jm->jm_port = 0;
9465
9466 workaround_5477111 = j;
9467
9468 jobmgr_shutdown(jm);
9469
9470 return BOOTSTRAP_SUCCESS;
9471
9472 out_bad:
9473 if (outdata_obj_array) {
9474 launch_data_free(outdata_obj_array);
9475 }
9476 if (*outdata) {
9477 mig_deallocate(*outdata, *outdataCnt);
9478 }
9479 if (ports) {
9480 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
9481 }
9482
9483 return BOOTSTRAP_NO_MEMORY;
9484 }
9485
9486 kern_return_t
9487 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
9488 {
9489 int bsdepth = 0;
9490 jobmgr_t jmr;
9491
9492 if (!launchd_assumes(j != NULL)) {
9493 return BOOTSTRAP_NO_MEMORY;
9494 }
9495
9496 jmr = j->mgr;
9497
9498 while ((jmr = jobmgr_parent(jmr)) != NULL) {
9499 bsdepth++;
9500 }
9501
9502 /* Since we use recursion, we need an artificial depth for subsets */
9503 if (unlikely(bsdepth > 100)) {
9504 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
9505 return BOOTSTRAP_NO_MEMORY;
9506 }
9507
9508 char name[NAME_MAX];
9509 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
9510
9511 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
9512 if (unlikely(requestorport == MACH_PORT_NULL)) {
9513 return BOOTSTRAP_NOT_PRIVILEGED;
9514 }
9515 return BOOTSTRAP_NO_MEMORY;
9516 }
9517
9518 *subsetportp = jmr->jm_port;
9519 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
9520
9521 /* A job could create multiple subsets, so only add a reference the first time
9522 * it does so we don't have to keep a count.
9523 */
9524 if (j->anonymous && !j->holds_ref) {
9525 j->holds_ref = true;
9526 runtime_add_ref();
9527 }
9528
9529 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
9530 return BOOTSTRAP_SUCCESS;
9531 }
9532
9533 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
9534 job_t
9535 xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
9536 {
9537 jobmgr_t where2put = NULL;
9538
9539 launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
9540 if (destname) {
9541 if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
9542 const char *str = launch_data_get_string(destname);
9543 if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
9544 where2put = _s_xpc_system_domain;
9545 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
9546 where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
9547 } else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
9548 where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
9549 } else {
9550 jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
9551 errno = EINVAL;
9552 }
9553 } else {
9554 jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
9555 errno = EINVAL;
9556 }
9557
9558 if (where2put) {
9559 launch_data_t mi = NULL;
9560 if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
9561 if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
9562 jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
9563 where2put = NULL;
9564 errno = EINVAL;
9565 }
9566 }
9567 }
9568 } else {
9569 where2put = jm;
9570 }
9571
9572 job_t j = NULL;
9573 if (where2put) {
9574 jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
9575 j = jobmgr_import2(where2put, pload);
9576 if (j) {
9577 j->xpc_service = true;
9578 if (where2put->xpc_singleton) {
9579 /* If the service was destined for one of the global domains,
9580 * then we have to alias it into our local domain to reserve the
9581 * name.
9582 */
9583 job_t ja = job_new_alias(jm, j);
9584 if (!ja) {
9585 /* If we failed to alias the job because of a conflict over
9586 * the label, then we remove it from the global domain. We
9587 * don't want to risk having imported a malicious job into
9588 * one of the global domains.
9589 */
9590 if (errno != EEXIST) {
9591 job_assumes(j, errno == 0);
9592 } else {
9593 job_log(j, LOG_ERR, "Failed to alias job into: %s", where2put->name);
9594 }
9595
9596 job_remove(j);
9597 } else {
9598 ja->xpc_service = true;
9599 j = ja;
9600 }
9601 }
9602 }
9603 }
9604
9605 return j;
9606 }
9607
9608 kern_return_t
9609 xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
9610 {
9611 if (unlikely(!pid1_magic)) {
9612 job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
9613 return BOOTSTRAP_NOT_PRIVILEGED;
9614 }
9615 if (!MACH_PORT_VALID(reqport)) {
9616 return BOOTSTRAP_UNKNOWN_SERVICE;
9617 }
9618
9619 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9620 /* All XPC domains are children of the root job manager. What we're creating
9621 * here is really just a skeleton. By creating it, we're adding reqp to our
9622 * port set. It will have two messages on it. The first specifies the
9623 * environment of the originator. This is so we can cache it and hand it to
9624 * xpcproxy to bootstrap our services. The second is the set of jobs that is
9625 * to be bootstrapped in.
9626 */
9627 jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
9628 if (job_assumes(j, jm != NULL)) {
9629 jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
9630 jm->shortdesc = "private";
9631 kr = BOOTSTRAP_SUCCESS;
9632 }
9633
9634 return kr;
9635 }
9636
9637 kern_return_t
9638 xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
9639 {
9640 if (!j) {
9641 /* Due to the whacky nature of XPC service bootstrapping, we can end up
9642 * getting this message long after the requesting process has gone away.
9643 * See <rdar://problem/8593143>.
9644 */
9645 return BOOTSTRAP_UNKNOWN_SERVICE;
9646 }
9647
9648 jobmgr_t jm = j->mgr;
9649 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9650 return BOOTSTRAP_NOT_PRIVILEGED;
9651 }
9652
9653 if (jm->req_asport != MACH_PORT_NULL) {
9654 return BOOTSTRAP_NOT_PRIVILEGED;
9655 }
9656
9657 struct ldcred *ldc = runtime_get_caller_creds();
9658 struct proc_bsdshortinfo proc;
9659 if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
9660 if (errno != ESRCH) {
9661 jobmgr_assumes(jm, errno == 0);
9662 }
9663
9664 jm->error = errno;
9665 jobmgr_remove(jm);
9666 return BOOTSTRAP_NO_MEMORY;
9667 }
9668
9669 if (!jobmgr_assumes(jm, audit_session_port(ldc->asid, &jm->req_asport) == 0)) {
9670 jm->error = EPERM;
9671 jobmgr_remove(jm);
9672 job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
9673 return BOOTSTRAP_NOT_PRIVILEGED;
9674 }
9675
9676 (void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s[%i]", proc.pbsi_comm, ldc->pid);
9677 strlcpy(jm->owner, proc.pbsi_comm, sizeof(jm->owner));
9678 jm->req_bsport = bsport;
9679 jm->req_excport = excport;
9680 jm->req_rport = rp;
9681 jm->req_ctx = ctx;
9682 jm->req_ctx_sz = ctx_sz;
9683 jm->req_pid = ldc->pid;
9684 jm->req_euid = ldc->euid;
9685 jm->req_egid = ldc->egid;
9686 jm->req_asid = ldc->asid;
9687
9688 return KERN_SUCCESS;
9689 }
9690
9691 kern_return_t
9692 xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
9693 {
9694 if (!j) {
9695 return BOOTSTRAP_UNKNOWN_SERVICE;
9696 }
9697
9698 /* This is just for XPC domains (for now). */
9699 if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9700 return BOOTSTRAP_NOT_PRIVILEGED;
9701 }
9702 if (j->mgr->session_initialized) {
9703 jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
9704 return BOOTSTRAP_NOT_PRIVILEGED;
9705 }
9706
9707 size_t offset = 0;
9708 launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
9709 if (!jobmgr_assumes(j->mgr, services != NULL)) {
9710 return BOOTSTRAP_NO_MEMORY;
9711 }
9712
9713 size_t i = 0;
9714 size_t c = launch_data_array_get_count(services);
9715 for (i = 0; i < c; i++) {
9716 job_t nj = NULL;
9717 launch_data_t ploadi = launch_data_array_get_index(services, i);
9718 if (!(nj = xpc_domain_import_service(j->mgr, ploadi))) {
9719 /* If loading one job fails, just fail the whole thing. At this
9720 * point, xpchelper should receive the failure and then just refuse
9721 * to launch the application, since its XPC services could not be
9722 * fully bootstrapped.
9723 *
9724 * Take care to not reference the job or its manager after this
9725 * point.
9726 */
9727 if (errno == EINVAL) {
9728 jobmgr_log(j->mgr, LOG_ERR, "Service at index is not valid: %lu", i);
9729 } else if (errno == EEXIST) {
9730 /* If we get back EEXIST, we know that the payload was a
9731 * dictionary with a label. But, well, I guess it never hurts to
9732 * check.
9733 */
9734 char *label = "(bogus)";
9735 if (launch_data_get_type(ploadi) == LAUNCH_DATA_DICTIONARY) {
9736 launch_data_t llabel = launch_data_dict_lookup(ploadi, LAUNCH_JOBKEY_LABEL);
9737 if (launch_data_get_type(llabel) == LAUNCH_DATA_STRING) {
9738 label = (char *)launch_data_get_string(llabel);
9739 }
9740 }
9741 jobmgr_log(j->mgr, LOG_ERR, "Service name conflict: %s", label);
9742 }
9743
9744 j->mgr->error = errno;
9745 jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
9746 jobmgr_remove(j->mgr);
9747 break;
9748 } else {
9749 jobmgr_log(j->mgr, LOG_DEBUG, "Imported service %s", nj->label);
9750 job_dispatch(nj, false);
9751 }
9752 }
9753
9754 kern_return_t result = BOOTSTRAP_NO_MEMORY;
9755 if (i == c) {
9756 j->mgr->session_initialized = true;
9757 (void)jobmgr_assumes(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS) == KERN_SUCCESS);
9758 j->mgr->req_rport = MACH_PORT_NULL;
9759
9760 /* Returning a failure code will destroy the message, whereas returning
9761 * success will not, so we need to clean up here.
9762 */
9763 mig_deallocate(services_buff, services_sz);
9764 result = BOOTSTRAP_SUCCESS;
9765 }
9766
9767 return result;
9768 }
9769
9770 kern_return_t
9771 xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport, mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid, int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
9772 {
9773 if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9774 return BOOTSTRAP_UNKNOWN_SERVICE;
9775 }
9776 jobmgr_t jm = j->mgr;
9777 if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
9778 return BOOTSTRAP_NOT_PRIVILEGED;
9779 }
9780
9781 if (jm->req_asport == MACH_PORT_NULL) {
9782 return BOOTSTRAP_NOT_PRIVILEGED;
9783 }
9784
9785 *bsport = jm->req_bsport;
9786 *sbsport = root_jobmgr->jm_port;
9787 *excport = jm->req_excport;
9788 *asport = jm->req_asport;
9789 *uid = jm->req_euid;
9790 *gid = jm->req_egid;
9791 *asid = jm->req_asid;
9792
9793 *ctx = jm->req_ctx;
9794 *ctx_sz = jm->req_ctx_sz;
9795
9796 return KERN_SUCCESS;
9797 }
9798
9799 kern_return_t
9800 xpc_domain_get_service_name(job_t j, event_name_t name)
9801 {
9802 if (!j) {
9803 return BOOTSTRAP_NO_MEMORY;
9804 }
9805 if (!j->xpc_service) {
9806 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
9807 return BOOTSTRAP_NOT_PRIVILEGED;
9808 }
9809
9810 struct machservice * ms = SLIST_FIRST(&j->machservices);
9811 if (!ms) {
9812 jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name of job with no machservices: %s", j->label);
9813 return BOOTSTRAP_UNKNOWN_SERVICE;
9814 }
9815
9816 (void)strlcpy(name, ms->name, sizeof(event_name_t));
9817 return BOOTSTRAP_SUCCESS;
9818 }
9819 #endif
9820
9821 kern_return_t
9822 xpc_events_get_channel_name(job_t j __attribute__((unused)), event_name_t stream __attribute__((unused)), uint64_t token __attribute__((unused)), event_name_t name __attribute__((unused)))
9823 {
9824 return KERN_FAILURE;
9825 }
9826
9827 kern_return_t
9828 xpc_events_get_event_name(job_t j, event_name_t stream, uint64_t token, event_name_t name)
9829 {
9830 struct externalevent *event = externalevent_find(stream, token);
9831 if (event && j->event_monitor) {
9832 (void)strcpy(name, event->name);
9833 } else {
9834 event = NULL;
9835 }
9836
9837 return event ? BOOTSTRAP_SUCCESS : BOOTSTRAP_UNKNOWN_SERVICE;
9838 }
9839
9840 kern_return_t
9841 xpc_events_set_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t event, mach_msg_type_number_t eventCnt)
9842 {
9843 if (j->anonymous) {
9844 return BOOTSTRAP_NOT_PRIVILEGED;
9845 }
9846
9847 struct externalevent *eei = NULL;
9848 LIST_FOREACH(eei, &j->events, job_le) {
9849 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9850 externalevent_delete(eei);
9851 eventsystem_ping();
9852 break;
9853 }
9854 }
9855
9856 bool success = false;
9857 struct eventsystem *es = eventsystem_find(stream);
9858 if (!es) {
9859 es = eventsystem_new(stream);
9860 (void)job_assumes(j, es != NULL);
9861 }
9862
9863 if (es) {
9864 size_t offset = 0;
9865 launch_data_t unpacked = launch_data_unpack((void *)event, eventCnt, NULL, 0, &offset, 0);
9866 if (unpacked && launch_data_get_type(unpacked) == LAUNCH_DATA_DICTIONARY) {
9867 success = externalevent_new(j, es, key, unpacked);
9868 }
9869 }
9870
9871 if (!success) {
9872 mig_deallocate(event, eventCnt);
9873 }
9874
9875 return KERN_SUCCESS;
9876 }
9877
9878 kern_return_t
9879 xpc_events_get_event(job_t j, event_name_t stream, event_name_t key, vm_offset_t *event, mach_msg_type_number_t *eventCnt)
9880 {
9881 struct externalevent *eei = NULL;
9882 LIST_FOREACH(eei, &j->events, job_le) {
9883 if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
9884 /* Big enough. */
9885 *eventCnt = 10 * 1024;
9886 mig_allocate(event, *eventCnt);
9887
9888 size_t sz = launch_data_pack(eei->event, (void *)*event, *eventCnt, NULL, NULL);
9889 if (!job_assumes(j, sz != 0)) {
9890 mig_deallocate(*event, *eventCnt);
9891 return BOOTSTRAP_NO_MEMORY;
9892 }
9893
9894 return BOOTSTRAP_SUCCESS;
9895 }
9896 }
9897
9898 return BOOTSTRAP_UNKNOWN_SERVICE;
9899 }
9900
9901 struct machservice *
9902 xpc_events_find_channel(job_t j, event_name_t stream, mach_port_t *p)
9903 {
9904 struct machservice *msi = NULL;
9905 SLIST_FOREACH(msi, &j->machservices, sle) {
9906 if (strcmp(stream, msi->name) == 0) {
9907 break;
9908 }
9909 }
9910
9911 if (!msi) {
9912 mach_port_t sp = MACH_PORT_NULL;
9913 msi = machservice_new(j, stream, &sp, false);
9914 if (job_assumes(j, msi)) {
9915 /* Hack to keep this from being publicly accessible through
9916 * bootstrap_look_up().
9917 */
9918 LIST_REMOVE(msi, name_hash_sle);
9919 msi->event_channel = true;
9920 *p = sp;
9921
9922 machservice_watch(j, msi);
9923 } else {
9924 errno = BOOTSTRAP_NO_MEMORY;
9925 }
9926 } else {
9927 if (!msi->event_channel) {
9928 job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
9929 msi = NULL;
9930 errno = BOOTSTRAP_NAME_IN_USE;
9931 } else {
9932 *p = msi->port;
9933 }
9934 }
9935
9936 return msi;
9937 }
9938
9939 kern_return_t
9940 xpc_events_channel_check_in(job_t j, event_name_t stream, uint64_t flags __attribute__((unused)), mach_port_t *p)
9941 {
9942 struct machservice *ms = xpc_events_find_channel(j, stream, p);
9943 if (ms) {
9944 if (ms->isActive) {
9945 job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
9946 *p = MACH_PORT_NULL;
9947 errno = BOOTSTRAP_SERVICE_ACTIVE;
9948 } else {
9949 job_checkin(j);
9950 machservice_request_notifications(ms);
9951 errno = BOOTSTRAP_SUCCESS;
9952 }
9953 }
9954
9955 return errno;
9956 }
9957
9958 kern_return_t
9959 xpc_events_channel_look_up(job_t j, event_name_t stream, event_token_t token, uint64_t flags __attribute__((unused)), mach_port_t *p)
9960 {
9961 if (!j->event_monitor) {
9962 return BOOTSTRAP_NOT_PRIVILEGED;
9963 }
9964
9965 struct externalevent *ee = externalevent_find(stream, token);
9966 if (!ee) {
9967 return BOOTSTRAP_UNKNOWN_SERVICE;
9968 }
9969
9970 struct machservice *ms = xpc_events_find_channel(ee->job, stream, p);
9971 if (ms) {
9972 errno = BOOTSTRAP_SUCCESS;
9973 }
9974
9975 return errno;
9976 }
9977
9978 kern_return_t
9979 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
9980 {
9981 struct ldcred *ldc = runtime_get_caller_creds();
9982 job_t otherj;
9983
9984 if (!launchd_assumes(j != NULL)) {
9985 return BOOTSTRAP_NO_MEMORY;
9986 }
9987
9988 if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
9989 return BOOTSTRAP_UNKNOWN_SERVICE;
9990 }
9991
9992 #if TARGET_OS_EMBEDDED
9993 bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
9994 #else
9995 bool allow_non_root_kickstart = false;
9996 #endif
9997
9998 if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
9999 return BOOTSTRAP_NOT_PRIVILEGED;
10000 }
10001
10002 #if HAVE_SANDBOX
10003 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10004 return BOOTSTRAP_NOT_PRIVILEGED;
10005 }
10006 #endif
10007
10008 if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
10009 return BOOTSTRAP_SERVICE_ACTIVE;
10010 }
10011
10012 otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
10013 otherj = job_dispatch(otherj, true);
10014
10015 if (!job_assumes(j, otherj && otherj->p)) {
10016 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
10017 otherj->stall_before_exec = false;
10018 return BOOTSTRAP_NO_MEMORY;
10019 }
10020
10021 *out_pid = otherj->p;
10022
10023 return 0;
10024 }
10025
10026 kern_return_t
10027 job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
10028 {
10029 launch_data_t jobdata = NULL;
10030 size_t data_offset = 0;
10031 struct ldcred *ldc = runtime_get_caller_creds();
10032 job_t jr;
10033
10034 if (!launchd_assumes(j != NULL)) {
10035 return BOOTSTRAP_NO_MEMORY;
10036 }
10037
10038 if (unlikely(j->deny_job_creation)) {
10039 return BOOTSTRAP_NOT_PRIVILEGED;
10040 }
10041
10042 #if HAVE_SANDBOX
10043 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
10044 return BOOTSTRAP_NOT_PRIVILEGED;
10045 }
10046 #endif
10047
10048 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
10049 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
10050 return VPROC_ERR_TRY_PER_USER;
10051 }
10052
10053 if (!job_assumes(j, indataCnt != 0)) {
10054 return 1;
10055 }
10056
10057 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
10058 if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
10059 return 1;
10060 }
10061
10062 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
10063 if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
10064 jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
10065 return 1;
10066 }
10067
10068 jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
10069
10070 launch_data_t label = NULL;
10071 launch_data_t wait4debugger = NULL;
10072 if (!jr) {
10073 switch (errno) {
10074 case EEXIST:
10075 /* If EEXIST was returned, we know that there is a label string in
10076 * the dictionary. So we don't need to check the types here; that
10077 * has already been done.
10078 */
10079 label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
10080 jr = job_find(NULL, launch_data_get_string(label));
10081 if (job_assumes(j, jr != NULL) && !jr->p) {
10082 wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
10083 if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
10084 if (launch_data_get_bool(wait4debugger)) {
10085 /* If the job exists, we're going to kick-start it, but
10086 * we need to give the caller the opportunity to start
10087 * it suspended if it so desires. But this will only
10088 * take effect if the job isn't running.
10089 */
10090 jr->wait4debugger_oneshot = true;
10091 }
10092 }
10093 }
10094
10095 *outj = jr;
10096 return BOOTSTRAP_NAME_IN_USE;
10097 default:
10098 return BOOTSTRAP_NO_MEMORY;
10099 }
10100 }
10101
10102 if (pid1_magic) {
10103 jr->mach_uid = ldc->uid;
10104 }
10105
10106 jr->legacy_LS_job = true;
10107 jr->abandon_pg = true;
10108 jr->asport = asport;
10109 uuid_clear(jr->expected_audit_uuid);
10110 jr = job_dispatch(jr, true);
10111
10112 if (!job_assumes(j, jr != NULL)) {
10113 job_remove(jr);
10114 return BOOTSTRAP_NO_MEMORY;
10115 }
10116
10117 if (!job_assumes(jr, jr->p)) {
10118 job_remove(jr);
10119 return BOOTSTRAP_NO_MEMORY;
10120 }
10121
10122 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
10123 *outj = jr;
10124
10125 return BOOTSTRAP_SUCCESS;
10126 }
10127
10128 kern_return_t
10129 job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
10130 {
10131 job_t nj = NULL;
10132 kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
10133 if (likely(kr == KERN_SUCCESS)) {
10134 if (job_setup_exit_port(nj) != KERN_SUCCESS) {
10135 job_remove(nj);
10136 kr = BOOTSTRAP_NO_MEMORY;
10137 } else {
10138 /* Do not return until the job has called exec(3), thereby making it
10139 * safe for the caller to send it SIGCONT.
10140 *
10141 * <rdar://problem/9042798>
10142 */
10143 nj->spawn_reply_port = rp;
10144 kr = MIG_NO_REPLY;
10145 }
10146 } else if (kr == BOOTSTRAP_NAME_IN_USE) {
10147 bool was_running = nj->p;
10148 if (job_dispatch(nj, true)) {
10149 if (!was_running) {
10150 job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
10151
10152 if (job_setup_exit_port(nj) == KERN_SUCCESS) {
10153 nj->spawn_reply_port = rp;
10154 kr = MIG_NO_REPLY;
10155 } else {
10156 kr = BOOTSTRAP_NO_MEMORY;
10157 }
10158 } else {
10159 *obsvr_port = MACH_PORT_NULL;
10160 *child_pid = nj->p;
10161 kr = KERN_SUCCESS;
10162 }
10163 } else {
10164 job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
10165 kr = BOOTSTRAP_UNKNOWN_SERVICE;
10166 }
10167 }
10168
10169 mig_deallocate(indata, indataCnt);
10170 return kr;
10171 }
10172
10173 kern_return_t
10174 job_mig_event_source_check_in(job_t j, name_t name, mach_port_t ping_port, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt, uint64_t *tokens)
10175 {
10176 if (!j || !j->event_monitor) {
10177 return BOOTSTRAP_NOT_PRIVILEGED;
10178 }
10179
10180 /* Update our ping-port. One ping will force all the notification systems
10181 * to check in, so they'll all give us send-once rights. It doesn't really
10182 * matter which one we keep around. It's not the most efficient thing ever,
10183 * but keep in mind that, by doing this over one channel, we can do it over
10184 * the job's MachService. This means that we'll get it back when the job dies,
10185 * and we can create ourselves a send-once right if we didn't have one already,
10186 * and we can just keep the helper alive without it needing to bootstrap
10187 * communication.
10188 *
10189 * So we're trading efficiency for robustness. In this case, the checkins
10190 * should happen pretty infrequently, so it's pretty worth it.
10191 */
10192 if (_s_event_update_port != MACH_PORT_NULL) {
10193 (void)job_assumes(j, launchd_mport_deallocate(_s_event_update_port) == KERN_SUCCESS);
10194 }
10195 _s_event_update_port = ping_port;
10196
10197 kern_return_t result = BOOTSTRAP_NO_MEMORY;
10198 launch_data_t arr = launch_data_alloc(LAUNCH_DATA_ARRAY);
10199 if (job_assumes(j, arr != NULL)) {
10200 struct eventsystem *es = eventsystem_find(name);
10201 if (unlikely(es == NULL)) {
10202 es = eventsystem_new(name);
10203 }
10204
10205 if (job_assumes(j, es != NULL)) {
10206 struct externalevent *ei = NULL;
10207 size_t i = 0;
10208 LIST_FOREACH(ei, &es->events, sys_le) {
10209 (void)job_assumes(j, launch_data_array_set_index(arr, ei->event, i));
10210 if (job_assumes(j, i < 1024)) {
10211 tokens[i] = ei->id;
10212 } else {
10213 break;
10214 }
10215 i++;
10216 }
10217
10218 /* Big enough. */
10219 *outvalCnt = 10 * 1024;
10220 mig_allocate(outval, *outvalCnt);
10221
10222 size_t sz = launch_data_pack(arr, (void *)*outval, *outvalCnt, NULL, NULL);
10223 if (job_assumes(j, sz != 0)) {
10224 result = BOOTSTRAP_SUCCESS;
10225 } else {
10226 mig_deallocate(*outval, *outvalCnt);
10227 }
10228 }
10229
10230 /* Total hack, but launch_data doesn't do ref-counting. */
10231 struct _launch_data *hack = (struct _launch_data *)arr;
10232 free(hack->_array);
10233 free(arr);
10234 }
10235
10236 return result;
10237 }
10238
10239 kern_return_t
10240 job_mig_event_set_state(job_t j, name_t name, uint64_t token, boolean_t state)
10241 {
10242 if (!j->event_monitor) {
10243 return BOOTSTRAP_NOT_PRIVILEGED;
10244 }
10245
10246 struct externalevent *ei = externalevent_find(name, token);
10247 if (job_assumes(j, ei != NULL)) {
10248 ei->state = state;
10249 if(job_dispatch(ei->job, false) == NULL) {
10250 if (errno == EPERM) {
10251 return BOOTSTRAP_NOT_PRIVILEGED;
10252 }
10253 return BOOTSTRAP_NO_MEMORY;
10254 }
10255 } else {
10256 return BOOTSTRAP_NO_MEMORY;
10257 }
10258
10259 return BOOTSTRAP_SUCCESS;
10260 }
10261
10262 void
10263 jobmgr_init(bool sflag)
10264 {
10265 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
10266 SLIST_INIT(&s_curious_jobs);
10267 LIST_INIT(&s_needing_sessions);
10268
10269 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
10270 #ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
10271 launchd_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
10272 _s_xpc_system_domain->req_asid = g_audit_session;
10273 _s_xpc_system_domain->req_asport = g_audit_session_port;
10274 _s_xpc_system_domain->shortdesc = "system";
10275 #endif
10276 if (pid1_magic) {
10277 root_jobmgr->monitor_shutdown = true;
10278 }
10279
10280 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
10281 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
10282 if (likely(s_no_hang_fd == -1)) {
10283 if (jobmgr_assumes(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK)) != -1)) {
10284 (void)jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr) != -1);
10285 }
10286 }
10287 s_no_hang_fd = _fd(s_no_hang_fd);
10288 }
10289
10290 size_t
10291 our_strhash(const char *s)
10292 {
10293 size_t c, r = 5381;
10294
10295 /* djb2
10296 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
10297 */
10298
10299 while ((c = *s++)) {
10300 r = ((r << 5) + r) + c; /* hash*33 + c */
10301 }
10302
10303 return r;
10304 }
10305
10306 size_t
10307 hash_label(const char *label)
10308 {
10309 return our_strhash(label) % LABEL_HASH_SIZE;
10310 }
10311
10312 size_t
10313 hash_ms(const char *msstr)
10314 {
10315 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
10316 }
10317
10318 bool
10319 waiting4removal_new(job_t j, mach_port_t rp)
10320 {
10321 struct waiting_for_removal *w4r;
10322
10323 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
10324 return false;
10325 }
10326
10327 w4r->reply_port = rp;
10328
10329 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
10330
10331 return true;
10332 }
10333
10334 void
10335 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
10336 {
10337 (void)job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
10338
10339 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
10340
10341 free(w4r);
10342 }
10343
10344 size_t
10345 get_kern_max_proc(void)
10346 {
10347 int mib[] = { CTL_KERN, KERN_MAXPROC };
10348 int max = 100;
10349 size_t max_sz = sizeof(max);
10350
10351 (void)launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
10352
10353 return max;
10354 }
10355
10356 /* See rdar://problem/6271234 */
10357 void
10358 eliminate_double_reboot(void)
10359 {
10360 if (unlikely(!pid1_magic)) {
10361 return;
10362 }
10363
10364 struct stat sb;
10365 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
10366 char *try_again = "Will try again at next boot.";
10367 int result = ~0;
10368
10369 if (unlikely(stat(argv[1], &sb) != -1)) {
10370 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
10371
10372 int wstatus;
10373 pid_t p;
10374
10375 (void)jobmgr_assumes(root_jobmgr, (errno = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ)) == 0);
10376
10377 if (errno) {
10378 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script! %s", try_again);
10379 goto out;
10380 }
10381
10382 if (!jobmgr_assumes(root_jobmgr, waitpid(p, &wstatus, 0) != -1)) {
10383 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't confirm that deferred install script exited successfully! %s", try_again);
10384 goto out;
10385 }
10386
10387 if (jobmgr_assumes(root_jobmgr, WIFEXITED(wstatus) != 0)) {
10388 if (jobmgr_assumes(root_jobmgr, (result = WEXITSTATUS(wstatus)) == EXIT_SUCCESS)) {
10389 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
10390 } else {
10391 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus), try_again);
10392 }
10393 } else {
10394 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again);
10395 }
10396 }
10397 out:
10398 if (result == 0) {
10399 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
10400 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
10401 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
10402 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
10403 */
10404 if (!jobmgr_assumes(root_jobmgr, unlink(argv[1]) != -1)) {
10405 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script couldn't be removed!");
10406 }
10407 }
10408 }
10409
10410 void
10411 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
10412 {
10413 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
10414 if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10415 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
10416 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
10417 } else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
10418 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
10419 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
10420 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
10421 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
10422 * You can't set this in a plist.
10423 */
10424 } else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
10425 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
10426 * complain about it.
10427 */
10428 } else {
10429 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
10430 }
10431
10432 if (unlikely(!j->jetsam_properties)) {
10433 j->jetsam_properties = true;
10434 LIST_INSERT_HEAD(&j->mgr->jetsam_jobs, j, jetsam_sle);
10435 j->mgr->jetsam_jobs_cnt++;
10436 }
10437
10438 j->jetsam_seq = s_jetsam_sequence_id++;
10439 }
10440
10441 int
10442 launchd_set_jetsam_priorities(launch_data_t priorities)
10443 {
10444 if (!launchd_assumes(launch_data_get_type(priorities) == LAUNCH_DATA_ARRAY)) {
10445 return EINVAL;
10446 }
10447
10448 jobmgr_t jm = NULL;
10449 #if !TARGET_OS_EMBEDDED
10450 /* For testing. */
10451 jm = jobmgr_find_by_name(root_jobmgr, VPROCMGR_SESSION_AQUA);
10452 if (!launchd_assumes(jm != NULL)) {
10453 return EINVAL;
10454 }
10455 #else
10456 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
10457 jm = root_jobmgr;
10458
10459 if (!g_embedded_privileged_action) {
10460 return EPERM;
10461 }
10462 #endif
10463
10464 size_t npris = launch_data_array_get_count(priorities);
10465
10466 job_t ji = NULL;
10467 size_t i = 0;
10468 for (i = 0; i < npris; i++) {
10469 launch_data_t ldi = launch_data_array_get_index(priorities, i);
10470 if (!launchd_assumes(launch_data_get_type(ldi) == LAUNCH_DATA_DICTIONARY)) {
10471 continue;
10472 }
10473
10474 launch_data_t label = NULL;
10475 if (!launchd_assumes(label = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL))) {
10476 continue;
10477 }
10478 const char *_label = launch_data_get_string(label);
10479
10480 ji = job_find(NULL, _label);
10481 if (!launchd_assumes(ji != NULL)) {
10482 continue;
10483 }
10484
10485 launch_data_dict_iterate(ldi, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, ji);
10486
10487 launch_data_t frontmost = NULL;
10488 if ((frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) && launch_data_get_type(frontmost) == LAUNCH_DATA_BOOL) {
10489 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
10490 }
10491 }
10492
10493 i = 0;
10494 job_t *jobs = (job_t *)calloc(jm->jetsam_jobs_cnt, sizeof(job_t));
10495 if (launchd_assumes(jobs != NULL)) {
10496 LIST_FOREACH(ji, &jm->jetsam_jobs, jetsam_sle) {
10497 if (ji->p) {
10498 jobs[i] = ji;
10499 i++;
10500 }
10501 }
10502 }
10503
10504 size_t totalpris = i;
10505
10506 int result = EINVAL;
10507
10508 /* It is conceivable that there could be no Jetsam jobs running. */
10509 if (totalpris > 0) {
10510 /* Yay blocks! */
10511 qsort_b((void *)jobs, totalpris, sizeof(job_t), ^ int (const void *lhs, const void *rhs) {
10512 job_t _lhs = *(job_t *)lhs;
10513 job_t _rhs = *(job_t *)rhs;
10514 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
10515 if (_lhs->jetsam_priority > _rhs->jetsam_priority) {
10516 return -1;
10517 } else if (_lhs->jetsam_priority < _rhs->jetsam_priority) {
10518 return 1;
10519 }
10520 /* Priority is equal, so sort by sequence ID to maintain LRU order */
10521 if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) > 0 ) {
10522 return 1;
10523 } else if( (int)(_lhs->jetsam_seq - _rhs->jetsam_seq) < 0 ) {
10524 return -1;
10525 }
10526
10527 return 0;
10528 });
10529
10530 jetsam_priority_entry_t *jpris = (jetsam_priority_entry_t *)calloc(totalpris, sizeof(jetsam_priority_entry_t));
10531 if (!launchd_assumes(jpris != NULL)) {
10532 result = ENOMEM;
10533 } else {
10534 for (i = 0; i < totalpris; i++) {
10535 jpris[i].pid = jobs[i]->p; /* Subject to time-of-use vs. time-of-check, obviously. */
10536 jpris[i].flags |= jobs[i]->jetsam_frontmost ? kJetsamFlagsFrontmost : 0;
10537 jpris[i].hiwat_pages = jobs[i]->jetsam_memlimit;
10538 }
10539
10540 (void)launchd_assumes((result = sysctlbyname("kern.memorystatus_priority_list", NULL, NULL, &jpris[0], totalpris * sizeof(jetsam_priority_entry_t))) != -1);
10541 result = result != 0 ? errno : 0;
10542
10543 free(jpris);
10544 }
10545 }
10546
10547 if (jobs) {
10548 free(jobs);
10549 }
10550
10551 return result;
10552 }