]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
3a24f37e035dd5ea76dee9e55413fa36c5d24575
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 23932 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/stat.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
43 #include <sys/un.h>
44 #include <sys/reboot.h>
45 #include <sys/wait.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
52 #include <sys/pipe.h>
53 #include <sys/mman.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <net/if.h>
57 #include <netinet/in.h>
58 #include <netinet/in_var.h>
59 #include <netinet6/nd6.h>
60 #include <bsm/libbsm.h>
61 #include <unistd.h>
62 #include <signal.h>
63 #include <errno.h>
64 #include <libgen.h>
65 #include <stdio.h>
66 #include <stdlib.h>
67 #include <stdarg.h>
68 #include <stdbool.h>
69 #include <paths.h>
70 #include <pwd.h>
71 #include <grp.h>
72 #include <ttyent.h>
73 #include <dlfcn.h>
74 #include <dirent.h>
75 #include <string.h>
76 #include <ctype.h>
77 #include <glob.h>
78 #include <spawn.h>
79 #include <libproc.h>
80 #include <malloc/malloc.h>
81 #include <pthread.h>
82 #if HAVE_SANDBOX
83 #define __APPLE_API_PRIVATE
84 #include <sandbox.h>
85 #endif
86 #if HAVE_QUARANTINE
87 #include <quarantine.h>
88 #endif
89 #if TARGET_OS_EMBEDDED
90 #include <sys/kern_memorystatus.h>
91 #else
92 /* To make my life easier. */
93 typedef struct jetsam_priority_entry {
94 pid_t pid;
95 uint32_t flags;
96 } jetsam_priority_entry_t;
97
98 enum {
99 kJetsamFlagsFrontmost = (1 << 0),
100 kJetsamFlagsKilled = (1 << 1)
101 };
102 #endif
103
104 #include "launch.h"
105 #include "launch_priv.h"
106 #include "launch_internal.h"
107 #include "bootstrap.h"
108 #include "bootstrap_priv.h"
109 #include "vproc.h"
110 #include "vproc_internal.h"
111
112 #include "reboot2.h"
113
114 #include "launchd.h"
115 #include "launchd_runtime.h"
116 #include "launchd_unix_ipc.h"
117 #include "protocol_vproc.h"
118 #include "protocol_vprocServer.h"
119 #include "protocol_job_reply.h"
120 #include "protocol_job_forward.h"
121 #include "mach_excServer.h"
122
123 /*
124 * LAUNCHD_SAMPLE_TIMEOUT
125 * If the job hasn't exited in the given number of seconds after sending
126 * it a SIGTERM, start sampling it.
127 * LAUNCHD_DEFAULT_EXIT_TIMEOUT
128 * If the job hasn't exited in the given number of seconds after sending
129 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
130 */
131 #define LAUNCHD_MIN_JOB_RUN_TIME 10
132 #define LAUNCHD_SAMPLE_TIMEOUT 2
133 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
134 #define LAUNCHD_SIGKILL_TIMER 5
135 #define LAUNCHD_CLEAN_KILL_TIMER 1
136 #define LAUNCHD_JETSAM_PRIORITY_UNSET 0xdead1eebabell
137
138 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
139
140 #define TAKE_SUBSET_NAME "TakeSubsetName"
141 #define TAKE_SUBSET_PID "TakeSubsetPID"
142 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
143
144 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
145
146 extern char **environ;
147
148 struct waiting_for_removal {
149 SLIST_ENTRY(waiting_for_removal) sle;
150 mach_port_t reply_port;
151 };
152
153 static bool waiting4removal_new(job_t j, mach_port_t rp);
154 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
155
156 struct waiting_for_exit {
157 LIST_ENTRY(waiting_for_exit) sle;
158 mach_port_t rp;
159 bool legacy;
160 };
161
162 static bool waiting4exit_new(job_t j, mach_port_t rp, bool legacy);
163 static void waiting4exit_delete(job_t j, struct waiting_for_exit *w4e);
164
165 struct machservice {
166 SLIST_ENTRY(machservice) sle;
167 SLIST_ENTRY(machservice) special_port_sle;
168 LIST_ENTRY(machservice) name_hash_sle;
169 LIST_ENTRY(machservice) port_hash_sle;
170 job_t job;
171 unsigned int gen_num;
172 mach_port_name_t port;
173 unsigned int isActive :1,
174 reset :1,
175 recv :1,
176 hide :1,
177 kUNCServer :1,
178 per_user_hack :1,
179 debug_on_close :1,
180 per_pid :1,
181 delete_on_destruction :1,
182 drain_one_on_crash :1,
183 drain_all_on_crash :1,
184 /* Don't let the size of this field to get too small. It has to be large enough
185 * to represent the reasonable range of special port numbers.
186 */
187 special_port_num :20;
188
189 const char name[0];
190 };
191
192 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
193
194 #define PORT_HASH_SIZE 32
195 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
196
197 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
198
199 static void machservice_setup(launch_data_t obj, const char *key, void *context);
200 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
201 static void machservice_resetport(job_t j, struct machservice *ms);
202 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
203 static void machservice_ignore(job_t j, struct machservice *ms);
204 static void machservice_watch(job_t j, struct machservice *ms);
205 static void machservice_delete(job_t j, struct machservice *, bool port_died);
206 static void machservice_request_notifications(struct machservice *);
207 static mach_port_t machservice_port(struct machservice *);
208 static job_t machservice_job(struct machservice *);
209 static bool machservice_hidden(struct machservice *);
210 static bool machservice_active(struct machservice *);
211 static const char *machservice_name(struct machservice *);
212 static bootstrap_status_t machservice_status(struct machservice *);
213 void machservice_drain_port(struct machservice *);
214
215 struct socketgroup {
216 SLIST_ENTRY(socketgroup) sle;
217 int *fds;
218 unsigned int junkfds:1, fd_cnt:31;
219 union {
220 const char name[0];
221 char name_init[0];
222 };
223 };
224
225 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds);
226 static void socketgroup_delete(job_t j, struct socketgroup *sg);
227 static void socketgroup_watch(job_t j, struct socketgroup *sg);
228 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
229 static void socketgroup_callback(job_t j);
230 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
231 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
232
233 struct calendarinterval {
234 LIST_ENTRY(calendarinterval) global_sle;
235 SLIST_ENTRY(calendarinterval) sle;
236 job_t job;
237 struct tm when;
238 time_t when_next;
239 };
240
241 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
242
243 static bool calendarinterval_new(job_t j, struct tm *w);
244 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
245 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
246 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
247 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
248 static void calendarinterval_callback(void);
249 static void calendarinterval_sanity_check(void);
250
251 struct envitem {
252 SLIST_ENTRY(envitem) sle;
253 bool one_shot;
254 char *value;
255 union {
256 const char key[0];
257 char key_init[0];
258 };
259 };
260
261 static bool envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot);
262 static void envitem_delete(job_t j, struct envitem *ei, bool global);
263 static void envitem_setup(launch_data_t obj, const char *key, void *context);
264 static void envitem_setup_one_shot(launch_data_t obj, const char *key, void *context);
265
266 struct limititem {
267 SLIST_ENTRY(limititem) sle;
268 struct rlimit lim;
269 unsigned int setsoft:1, sethard:1, which:30;
270 };
271
272 static bool limititem_update(job_t j, int w, rlim_t r);
273 static void limititem_delete(job_t j, struct limititem *li);
274 static void limititem_setup(launch_data_t obj, const char *key, void *context);
275 #if HAVE_SANDBOX
276 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
277 #endif
278
279 typedef enum {
280 NETWORK_UP = 1,
281 NETWORK_DOWN,
282 SUCCESSFUL_EXIT,
283 FAILED_EXIT,
284 PATH_EXISTS,
285 PATH_MISSING,
286 OTHER_JOB_ENABLED,
287 OTHER_JOB_DISABLED,
288 OTHER_JOB_ACTIVE,
289 OTHER_JOB_INACTIVE,
290 PATH_CHANGES,
291 DIR_NOT_EMPTY,
292 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
293 } semaphore_reason_t;
294
295 struct semaphoreitem {
296 SLIST_ENTRY(semaphoreitem) sle;
297 semaphore_reason_t why;
298 bool watching_parent;
299 int fd;
300 union {
301 const char what[0];
302 char what_init[0];
303 };
304 };
305
306 struct semaphoreitem_dict_iter_context {
307 job_t j;
308 semaphore_reason_t why_true;
309 semaphore_reason_t why_false;
310 };
311
312 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
313 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
314 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
315 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
316 static void semaphoreitem_callback(job_t j, struct kevent *kev);
317 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
318 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
319 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
320
321 #define ACTIVE_JOB_HASH_SIZE 32
322 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
323
324 #define MACHSERVICE_HASH_SIZE 37
325
326 enum {
327 JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST,
328 JOBMGR_PHASE_NORMAL,
329 JOBMGR_PHASE_HOPEFULLY_EXITS_LAST,
330 JOBMGR_PHASE_LAST,
331 };
332
333 static char *s_phases[JOBMGR_PHASE_LAST + 1] = {
334 "HopefullyExitsFirst",
335 "Normal",
336 "HopefullyExitsLast",
337 "Finalized",
338 };
339
340 struct jobmgr_s {
341 kq_callback kqjobmgr_callback;
342 SLIST_ENTRY(jobmgr_s) sle;
343 SLIST_HEAD(, jobmgr_s) submgrs;
344 LIST_HEAD(, job_s) jobs;
345 LIST_HEAD(, job_s) jetsam_jobs;
346 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
347 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
348 LIST_HEAD(, job_s) global_env_jobs;
349 STAILQ_HEAD(, job_s) pending_samples;
350 mach_port_t jm_port;
351 mach_port_t req_port;
352 mach_port_t init_audit_session;
353 jobmgr_t parentmgr;
354 int reboot_flags;
355 int shutdown_phase;
356 unsigned int global_on_demand_cnt;
357 unsigned int hopefully_first_cnt;
358 unsigned int normal_active_cnt;
359 unsigned int jetsam_jobs_cnt;
360 unsigned int shutting_down :1,
361 session_initialized :1,
362 killed_hopefully_first_jobs :1,
363 killed_normal_jobs :1,
364 killed_hopefully_last_jobs :1,
365 killed_stray_jobs :1;
366 char sample_log_file[PATH_MAX];
367 uint32_t properties;
368 union {
369 const char name[0];
370 char name_init[0];
371 };
372 };
373
374 #define jobmgr_assumes(jm, e) \
375 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
376
377 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t session_port);
378 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
379 static jobmgr_t jobmgr_parent(jobmgr_t jm);
380 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
381 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
382 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
383 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
384 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
385 static void jobmgr_remove(jobmgr_t jm);
386 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
387 static void jobmgr_dequeue_next_sample(jobmgr_t jm);
388 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
389 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
390 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
391 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
392 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
393 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, bool dispatch, mach_port_t *mp);
394 static void job_export_all2(jobmgr_t jm, launch_data_t where);
395 static void jobmgr_callback(void *obj, struct kevent *kev);
396 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
397 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
398 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
399 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
400 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
401 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
402 static void jobmgr_log_bug(jobmgr_t jm, unsigned int line);
403
404 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
405 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
406
407 struct suspended_peruser {
408 LIST_ENTRY(suspended_peruser) sle;
409 job_t j;
410 };
411
412 struct job_s {
413 kq_callback kqjob_callback; /* MUST be first element of this structure for benefit of launchd's run loop. */
414 LIST_ENTRY(job_s) sle;
415 LIST_ENTRY(job_s) needing_session_sle;
416 LIST_ENTRY(job_s) jetsam_sle;
417 LIST_ENTRY(job_s) pid_hash_sle;
418 LIST_ENTRY(job_s) label_hash_sle;
419 LIST_ENTRY(job_s) global_env_sle;
420 STAILQ_ENTRY(job_s) pending_samples_sle;
421 SLIST_ENTRY(job_s) curious_jobs_sle;
422 LIST_HEAD(, suspended_peruser) suspended_perusers;
423 LIST_HEAD(, waiting_for_exit) exit_watchers;
424 SLIST_HEAD(, socketgroup) sockets;
425 SLIST_HEAD(, calendarinterval) cal_intervals;
426 SLIST_HEAD(, envitem) global_env;
427 SLIST_HEAD(, envitem) env;
428 SLIST_HEAD(, limititem) limits;
429 SLIST_HEAD(, machservice) machservices;
430 SLIST_HEAD(, semaphoreitem) semaphores;
431 SLIST_HEAD(, waiting_for_removal) removal_watchers;
432 struct rusage ru;
433 cpu_type_t *j_binpref;
434 size_t j_binpref_cnt;
435 mach_port_t j_port;
436 mach_port_t wait_reply_port; /* we probably should switch to a list of waiters */
437 uid_t mach_uid;
438 jobmgr_t mgr;
439 size_t argc;
440 char **argv;
441 char *prog;
442 char *rootdir;
443 char *workingdir;
444 char *username;
445 char *groupname;
446 char *stdinpath;
447 char *stdoutpath;
448 char *stderrpath;
449 char *alt_exc_handler;
450 struct vproc_shmem_s *shmem;
451 struct machservice *lastlookup;
452 unsigned int lastlookup_gennum;
453 #if HAVE_SANDBOX
454 char *seatbelt_profile;
455 uint64_t seatbelt_flags;
456 #endif
457 #if HAVE_QUARANTINE
458 void *quarantine_data;
459 size_t quarantine_data_sz;
460 #endif
461 pid_t p;
462 int last_exit_status;
463 int stdin_fd;
464 int fork_fd;
465 int log_redirect_fd;
466 int nice;
467 int stdout_err_fd;
468 long long jetsam_priority;
469 long long main_thread_priority;
470 uint32_t timeout;
471 uint32_t exit_timeout;
472 uint64_t sent_signal_time;
473 uint64_t start_time;
474 uint32_t min_run_time;
475 uint32_t start_interval;
476 uint32_t peruser_suspend_count; /* The number of jobs that have disabled this per-user launchd. */
477 #if 0
478 /* someday ... */
479 enum {
480 J_TYPE_ANONYMOUS = 1,
481 J_TYPE_LANCHSERVICES,
482 J_TYPE_MACHINIT,
483 J_TYPE_INETD,
484 } j_type;
485 #endif
486 bool debug :1, /* man launchd.plist --> Debug */
487 ondemand :1, /* man launchd.plist --> KeepAlive == false */
488 session_create :1, /* man launchd.plist --> SessionCreate */
489 low_pri_io :1, /* man launchd.plist --> LowPriorityIO */
490 no_init_groups :1, /* man launchd.plist --> InitGroups */
491 priv_port_has_senders :1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
492 importing_global_env :1, /* a hack during job importing */
493 importing_hard_limits :1, /* a hack during job importing */
494 setmask :1, /* man launchd.plist --> Umask */
495 anonymous :1, /* a process that launchd knows about, but isn't managed by launchd */
496 checkedin :1, /* a legacy mach_init concept to detect sick jobs */
497 legacy_mach_job :1, /* a job created via bootstrap_create_server() */
498 legacy_LS_job :1, /* a job created via spawn_via_launchd() */
499 inetcompat :1, /* a legacy job that wants inetd compatible semantics */
500 inetcompat_wait :1, /* a twist on inetd compatibility */
501 start_pending :1, /* an event fired and the job should start, but not necessarily right away */
502 globargv :1, /* man launchd.plist --> EnableGlobbing */
503 wait4debugger :1, /* man launchd.plist --> WaitForDebugger */
504 wait4debugger_oneshot :1, /* One-shot WaitForDebugger. */
505 internal_exc_handler :1, /* MachExceptionHandler == true */
506 stall_before_exec :1, /* a hack to support an option of spawn_via_launchd() */
507 only_once :1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
508 currently_ignored :1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
509 forced_peers_to_demand_mode :1, /* A job that forced all other jobs to be temporarily launch-on-demand */
510 setnice :1, /* man launchd.plist --> Nice */
511 hopefully_exits_last :1, /* man launchd.plist --> HopefullyExitsLast */
512 removal_pending :1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
513 sent_sigkill :1, /* job_kill() was called */
514 sampling_complete :1, /* job_force_sampletool() was called (or is disabled) */
515 debug_before_kill :1, /* enter the kernel debugger before killing a job */
516 weird_bootstrap :1, /* a hack that launchd+launchctl use during jobmgr_t creation */
517 start_on_mount :1, /* man launchd.plist --> StartOnMount */
518 per_user :1, /* This job is a per-user launchd managed by the PID 1 launchd */
519 hopefully_exits_first :1, /* man launchd.plist --> HopefullyExitsFirst */
520 deny_unknown_mslookups :1, /* A flag for changing the behavior of bootstrap_look_up() */
521 unload_at_mig_return :1, /* A job thoroughly confused launchd. We need to unload it ASAP */
522 abandon_pg :1, /* man launchd.plist --> AbandonProcessGroup */
523 ignore_pg_at_shutdown :1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
524 poll_for_vfs_changes :1, /* a hack to work around the fact that kqueues don't work on all filesystems */
525 deny_job_creation :1, /* Don't let this job create new 'job_t' objects in launchd */
526 kill_via_shmem :1, /* man launchd.plist --> EnableTransactions */
527 sent_kill_via_shmem :1, /* We need to 'kill_via_shmem' once-and-only-once */
528 clean_kill :1, /* The job was sent SIGKILL because it was clean. */
529 pending_sample :1, /* This job needs to be sampled for some reason. */
530 kill_after_sample :1, /* The job is to be killed after sampling. */
531 is_being_sampled :1, /* We've spawned a sample tool to sample the job. */
532 reap_after_trace :1, /* The job exited before sample did, so we should reap it after sample is done. */
533 nosy :1, /* The job has an OtherJobEnabled KeepAlive criterion. */
534 crashed :1, /* The job is the default Mach exception handler, and it crashed. */
535 reaped :1, /* We've received NOTE_EXIT for the job. */
536 stopped :1, /* job_stop() was called. */
537 jetsam_frontmost :1, /* The job is considered "frontmost" by Jetsam. */
538 needs_kickoff :1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
539 is_bootstrapper :1, /* The job is a bootstrapper. */
540 has_console :1, /* The job owns the console. */
541 clean_exit_timer_expired :1, /* The job was clean, received SIGKILL and failed to exit after LAUNCHD_CLEAN_KILL_TIMER seconds. */
542 embedded_special_privileges :1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
543 did_exec :1, /* The job exec(2)ed successfully. */
544 migratory :1; /* The (anonymous) job called vprocmgr_switch_to_session(). */
545 mode_t mask;
546 pid_t tracing_pid;
547 mach_port_t audit_session;
548 uuid_t expected_audit_uuid;
549 const char label[0];
550 };
551
552 #define LABEL_HASH_SIZE 53
553
554 static LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
555 static size_t hash_label(const char *label) __attribute__((pure));
556 static size_t hash_ms(const char *msstr) __attribute__((pure));
557 static SLIST_HEAD(, job_s) s_curious_jobs;
558
559 #define job_assumes(j, e) \
560 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
561
562 static void job_import_keys(launch_data_t obj, const char *key, void *context);
563 static void job_import_bool(job_t j, const char *key, bool value);
564 static void job_import_string(job_t j, const char *key, const char *value);
565 static void job_import_integer(job_t j, const char *key, long long value);
566 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
567 static void job_import_array(job_t j, const char *key, launch_data_t value);
568 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
569 static bool job_set_global_on_demand(job_t j, bool val);
570 static const char *job_active(job_t j);
571 static void job_watch(job_t j);
572 static void job_ignore(job_t j);
573 static void job_cleanup_after_tracer(job_t j);
574 static void job_reap(job_t j);
575 static bool job_useless(job_t j);
576 static bool job_keepalive(job_t j);
577 static void job_dispatch_curious_jobs(job_t j);
578 static void job_start(job_t j);
579 static void job_start_child(job_t j) __attribute__((noreturn));
580 static void job_setup_attributes(job_t j);
581 static bool job_setup_machport(job_t j);
582 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
583 static void job_postfork_become_user(job_t j);
584 static void job_postfork_test_user(job_t j);
585 static void job_log_pids_with_weird_uids(job_t j);
586 static void job_setup_exception_port(job_t j, task_t target_task);
587 static void job_callback(void *obj, struct kevent *kev);
588 static void job_callback_proc(job_t j, struct kevent *kev);
589 static void job_callback_timer(job_t j, void *ident);
590 static void job_callback_read(job_t j, int ident);
591 static void job_log_stray_pg(job_t j);
592 static void job_log_children_without_exec(job_t j);
593 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
594 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
595 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
596 static void job_kill(job_t j);
597 static void job_uncork_fork(job_t j);
598 static void job_log_stdouterr(job_t j);
599 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
600 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
601 static void job_log_bug(job_t j, unsigned int line);
602 static void job_log_stdouterr2(job_t j, const char *msg, ...);
603 static void job_set_exception_port(job_t j, mach_port_t port);
604 static kern_return_t job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus);
605
606 static const struct {
607 const char *key;
608 int val;
609 } launchd_keys2limits[] = {
610 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
611 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
612 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
613 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
614 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
615 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
616 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
617 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
618 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
619 };
620
621 static time_t cronemu(int mon, int mday, int hour, int min);
622 static time_t cronemu_wday(int wday, int hour, int min);
623 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
624 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
625 static bool cronemu_hour(struct tm *wtm, int hour, int min);
626 static bool cronemu_min(struct tm *wtm, int min);
627
628 /* miscellaneous file local functions */
629 static size_t get_kern_max_proc(void);
630 static int dir_has_files(job_t j, const char *path);
631 static char **mach_cmd2argv(const char *string);
632 static size_t our_strhash(const char *s) __attribute__((pure));
633 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
634 static void simulate_pid1_crash(void);
635 static pid_t basic_spawn(job_t j, void (*what_to_do)(job_t));
636 static void take_sample(job_t j);
637
638 void eliminate_double_reboot(void);
639
640 /* For Jetsam. */
641 static void jetsam_priority_from_job(job_t j, bool front, jetsam_priority_entry_t *jp);
642 static int job_cmp(const job_t *lhs, const job_t *rhs);
643 int launchd_set_jetsam_priorities(launch_data_t priorities);
644
645 /* file local globals */
646 static size_t total_children;
647 static size_t total_anon_children;
648 static mach_port_t the_exception_server;
649 static job_t workaround_5477111;
650 static LIST_HEAD(, job_s) s_needing_sessions;
651 mach_port_t g_audit_session_port = MACH_PORT_NULL;
652
653 #if !TARGET_OS_EMBEDDED
654 static job_t s_embedded_privileged_job = (job_t)&root_jobmgr;
655 au_asid_t g_audit_session = AU_DEFAUDITSID;
656 #else
657 static job_t s_embedded_privileged_job = NULL;
658 pid_t g_audit_session = 0;
659 #endif
660
661 static int s_no_hang_fd = -1;
662
663 /* process wide globals */
664 mach_port_t inherited_bootstrap_port;
665 jobmgr_t root_jobmgr;
666 bool g_shutdown_debugging = false;
667 bool g_verbose_boot = false;
668 bool g_embedded_privileged_action = false;
669
670 void
671 job_ignore(job_t j)
672 {
673 struct semaphoreitem *si;
674 struct socketgroup *sg;
675 struct machservice *ms;
676
677 if (j->currently_ignored) {
678 return;
679 }
680
681 job_log(j, LOG_DEBUG, "Ignoring...");
682
683 j->currently_ignored = true;
684
685 if (j->poll_for_vfs_changes) {
686 j->poll_for_vfs_changes = false;
687 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
688 }
689
690 SLIST_FOREACH(sg, &j->sockets, sle) {
691 socketgroup_ignore(j, sg);
692 }
693
694 SLIST_FOREACH(ms, &j->machservices, sle) {
695 machservice_ignore(j, ms);
696 }
697
698 SLIST_FOREACH(si, &j->semaphores, sle) {
699 semaphoreitem_ignore(j, si);
700 }
701 }
702
703 void
704 job_watch(job_t j)
705 {
706 struct semaphoreitem *si;
707 struct socketgroup *sg;
708 struct machservice *ms;
709
710 if (!j->currently_ignored) {
711 return;
712 }
713
714 job_log(j, LOG_DEBUG, "Watching...");
715
716 j->currently_ignored = false;
717
718 SLIST_FOREACH(sg, &j->sockets, sle) {
719 socketgroup_watch(j, sg);
720 }
721
722 SLIST_FOREACH(ms, &j->machservices, sle) {
723 machservice_watch(j, ms);
724 }
725
726 SLIST_FOREACH(si, &j->semaphores, sle) {
727 semaphoreitem_watch(j, si);
728 }
729 }
730
731 void
732 job_stop(job_t j)
733 {
734 char extralog[100];
735 int32_t newval = 1;
736
737 if (unlikely(!j->p || j->anonymous)) {
738 return;
739 }
740
741 #if !TARGET_OS_EMBEDDED
742 if (j->kill_via_shmem && !g_force_old_kill_path) {
743 if (j->shmem) {
744 if (!j->sent_kill_via_shmem) {
745 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
746 newval = __sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
747 j->sent_kill_via_shmem = true;
748 } else {
749 newval = j->shmem->vp_shmem_transaction_cnt;
750 }
751 } else {
752 newval = -1;
753 }
754 } else if( j->kill_via_shmem ) {
755 job_log(j, LOG_DEBUG, "Stopping transactional job the old-fashioned way.");
756 }
757 #endif
758
759 #if TARGET_OS_EMBEDDED
760 if( g_embedded_privileged_action && s_embedded_privileged_job ) {
761 if( !job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL) ) {
762 errno = EPERM;
763 return;
764 }
765
766 if( strcmp(j->username, s_embedded_privileged_job->username) != 0 ) {
767 errno = EPERM;
768 return;
769 }
770 } else if( g_embedded_privileged_action ) {
771 errno = EINVAL;
772 return;
773 }
774 #endif
775
776 j->sent_signal_time = runtime_get_opaque_time();
777
778 if (newval < 0) {
779 j->clean_kill = true;
780 job_kill(j);
781 } else {
782 /*
783 * If sampling is enabled and SAMPLE_TIMEOUT is earlier than the job exit_timeout,
784 * then set a timer for SAMPLE_TIMEOUT seconds after killing
785 */
786 unsigned int exit_timeout = j->exit_timeout;
787 bool do_sample = do_apple_internal_logging;
788 unsigned int timeout = exit_timeout;
789
790 if (do_sample && (!exit_timeout || (LAUNCHD_SAMPLE_TIMEOUT < exit_timeout))) {
791 timeout = LAUNCHD_SAMPLE_TIMEOUT;
792 }
793
794 job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
795
796 if (timeout) {
797 j->sampling_complete = !do_sample;
798 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
799 EV_ADD|EV_ONESHOT, NOTE_SECONDS, timeout, j) != -1);
800 }
801
802 if (!exit_timeout) {
803 job_log(j, LOG_DEBUG, "This job has an infinite exit timeout");
804 }
805
806 if (j->kill_via_shmem) {
807 snprintf(extralog, sizeof(extralog), ": %d remaining transactions", newval + 1);
808 } else {
809 extralog[0] = '\0';
810 }
811
812 job_log(j, LOG_DEBUG, "Sent SIGTERM signal%s", extralog);
813 }
814
815 j->stopped = true;
816 }
817
818 launch_data_t
819 job_export(job_t j)
820 {
821 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
822
823 if (r == NULL) {
824 return NULL;
825 }
826
827 if ((tmp = launch_data_new_string(j->label))) {
828 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
829 }
830 if ((tmp = launch_data_new_string(j->mgr->name))) {
831 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
832 }
833 if ((tmp = launch_data_new_bool(j->ondemand))) {
834 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
835 }
836 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
837 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
838 }
839 if (j->p && (tmp = launch_data_new_integer(j->p))) {
840 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
841 }
842 if ((tmp = launch_data_new_integer(j->timeout))) {
843 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
844 }
845 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
846 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
847 }
848 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
849 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
850 }
851 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
852 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
853 }
854 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
855 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
856 }
857 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
858 size_t i;
859
860 for (i = 0; i < j->argc; i++) {
861 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
862 launch_data_array_set_index(tmp, tmp2, i);
863 }
864 }
865
866 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
867 }
868
869 if (j->kill_via_shmem && (tmp = launch_data_new_bool(true))) {
870 int32_t tmp_cnt = -1;
871
872 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
873
874 if (j->shmem) {
875 tmp_cnt = j->shmem->vp_shmem_transaction_cnt;
876 }
877
878 if (j->sent_kill_via_shmem) {
879 tmp_cnt++;
880 }
881
882 if ((tmp = launch_data_new_integer(tmp_cnt))) {
883 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TRANSACTIONCOUNT);
884 }
885 }
886
887 if (j->session_create && (tmp = launch_data_new_bool(true))) {
888 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
889 }
890
891 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
892 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
893 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
894 }
895 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
896 }
897
898 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
899 struct socketgroup *sg;
900 int i;
901
902 SLIST_FOREACH(sg, &j->sockets, sle) {
903 if (sg->junkfds) {
904 continue;
905 }
906 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
907 for (i = 0; i < sg->fd_cnt; i++) {
908 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
909 launch_data_array_set_index(tmp2, tmp3, i);
910 }
911 }
912 launch_data_dict_insert(tmp, tmp2, sg->name);
913 }
914 }
915
916 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
917 }
918
919 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
920 struct machservice *ms;
921
922 tmp3 = NULL;
923
924 SLIST_FOREACH(ms, &j->machservices, sle) {
925 if (ms->per_pid) {
926 if (tmp3 == NULL) {
927 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
928 }
929 if (tmp3) {
930 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
931 launch_data_dict_insert(tmp3, tmp2, ms->name);
932 }
933 } else {
934 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
935 launch_data_dict_insert(tmp, tmp2, ms->name);
936 }
937 }
938
939 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
940
941 if (tmp3) {
942 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
943 }
944 }
945
946 return r;
947 }
948
949 static void
950 jobmgr_log_active_jobs(jobmgr_t jm)
951 {
952 const char *why_active;
953 jobmgr_t jmi;
954 job_t ji;
955
956 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
957 jobmgr_log_active_jobs(jmi);
958 }
959
960 LIST_FOREACH(ji, &jm->jobs, sle) {
961 if( (why_active = job_active(ji)) ) {
962 job_log(ji, LOG_DEBUG | LOG_CONSOLE, "%s", why_active);
963 }
964 }
965 }
966
967 static void
968 jobmgr_still_alive_with_check(jobmgr_t jm)
969 {
970 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children. In %s phase of shutdown.", total_children, total_anon_children, s_phases[jm->shutdown_phase]);
971 jobmgr_log_active_jobs(jm);
972 }
973
974 jobmgr_t
975 jobmgr_shutdown(jobmgr_t jm)
976 {
977 jobmgr_t jmi, jmn;
978 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
979
980 jm->shutting_down = true;
981
982 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
983 jobmgr_shutdown(jmi);
984 }
985
986 if (jm->parentmgr == NULL && pid1_magic) {
987 jobmgr_assumes(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
988 #if !TARGET_OS_EMBEDDED
989 /* Kill the update thread. */
990 jobmgr_assumes(jm, __sync_sub_and_fetch(&g_sync_frequency, 30) == 0);
991 #endif
992 }
993
994 return jobmgr_do_garbage_collection(jm);
995 }
996
997 void
998 jobmgr_remove(jobmgr_t jm)
999 {
1000 jobmgr_t jmi;
1001 job_t ji;
1002
1003 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1004 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
1005 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1006 jobmgr_remove(jmi);
1007 }
1008 }
1009
1010 while( (ji = LIST_FIRST(&jm->jobs)) ) {
1011 if( !ji->anonymous && ji->p ) {
1012 job_log(ji, LOG_WARNING | LOG_CONSOLE, "Job has overstayed its welcome. Forcing removal.");
1013 ji->p = 0;
1014 }
1015 job_remove(ji);
1016 }
1017
1018 if (jm->req_port) {
1019 jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
1020 }
1021
1022 if (jm->jm_port) {
1023 jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
1024 }
1025
1026 if (jm->parentmgr) {
1027 runtime_del_weak_ref();
1028 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1029 } else if (pid1_magic) {
1030 eliminate_double_reboot();
1031 launchd_log_vm_stats();
1032 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1033 runtime_closelog();
1034 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
1035 } else {
1036 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1037 runtime_closelog();
1038 exit(EXIT_SUCCESS);
1039 }
1040
1041 free(jm);
1042 }
1043
1044 void
1045 job_remove(job_t j)
1046 {
1047 struct waiting_for_removal *w4r;
1048 struct calendarinterval *ci;
1049 struct semaphoreitem *si;
1050 struct socketgroup *sg;
1051 struct machservice *ms;
1052 struct limititem *li;
1053 struct envitem *ei;
1054
1055 #if TARGET_OS_EMBEDDED
1056 if( g_embedded_privileged_action && s_embedded_privileged_job ) {
1057 if( !job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL) ) {
1058 errno = EPERM;
1059 return;
1060 }
1061
1062 if( strcmp(j->username, s_embedded_privileged_job->username) != 0 ) {
1063 errno = EPERM;
1064 return;
1065 }
1066 } else if( g_embedded_privileged_action ) {
1067 errno = EINVAL;
1068 return;
1069 }
1070 #endif
1071
1072 if (unlikely(j->p)) {
1073 if (j->anonymous) {
1074 job_reap(j);
1075 } else {
1076 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1077
1078 if (!j->removal_pending) {
1079 j->removal_pending = true;
1080 job_stop(j);
1081 }
1082 return;
1083 }
1084 }
1085
1086 job_dispatch_curious_jobs(j);
1087
1088 ipc_close_all_with_job(j);
1089
1090 job_log(j, LOG_INFO, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1091 j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec,
1092 j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec,
1093 j->ru.ru_maxrss, j->ru.ru_ixrss, j->ru.ru_idrss, j->ru.ru_isrss,
1094 j->ru.ru_minflt, j->ru.ru_majflt,
1095 j->ru.ru_nswap, j->ru.ru_inblock, j->ru.ru_oublock,
1096 j->ru.ru_msgsnd, j->ru.ru_msgrcv,
1097 j->ru.ru_nsignals, j->ru.ru_nvcsw, j->ru.ru_nivcsw);
1098
1099 if (j->forced_peers_to_demand_mode) {
1100 job_set_global_on_demand(j, false);
1101 }
1102
1103 if (!job_assumes(j, j->fork_fd == 0)) {
1104 job_assumes(j, runtime_close(j->fork_fd) != -1);
1105 }
1106
1107 if (j->stdin_fd) {
1108 job_assumes(j, runtime_close(j->stdin_fd) != -1);
1109 }
1110
1111 if (!job_assumes(j, j->log_redirect_fd == 0)) {
1112 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
1113 }
1114
1115 if (j->j_port) {
1116 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1117 }
1118
1119 if (!job_assumes(j, j->wait_reply_port == MACH_PORT_NULL)) {
1120 job_assumes(j, launchd_mport_deallocate(j->wait_reply_port) == KERN_SUCCESS);
1121 }
1122
1123 while ((sg = SLIST_FIRST(&j->sockets))) {
1124 socketgroup_delete(j, sg);
1125 }
1126 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1127 calendarinterval_delete(j, ci);
1128 }
1129 while ((ei = SLIST_FIRST(&j->env))) {
1130 envitem_delete(j, ei, false);
1131 }
1132 while ((ei = SLIST_FIRST(&j->global_env))) {
1133 envitem_delete(j, ei, true);
1134 }
1135 while ((li = SLIST_FIRST(&j->limits))) {
1136 limititem_delete(j, li);
1137 }
1138 while ((ms = SLIST_FIRST(&j->machservices))) {
1139 machservice_delete(j, ms, false);
1140 }
1141 while ((si = SLIST_FIRST(&j->semaphores))) {
1142 semaphoreitem_delete(j, si);
1143 }
1144 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1145 waiting4removal_delete(j, w4r);
1146 }
1147
1148 if (j->prog) {
1149 free(j->prog);
1150 }
1151 if (j->argv) {
1152 free(j->argv);
1153 }
1154 if (j->rootdir) {
1155 free(j->rootdir);
1156 }
1157 if (j->workingdir) {
1158 free(j->workingdir);
1159 }
1160 if (j->username) {
1161 free(j->username);
1162 }
1163 if (j->groupname) {
1164 free(j->groupname);
1165 }
1166 if (j->stdinpath) {
1167 free(j->stdinpath);
1168 }
1169 if (j->stdoutpath) {
1170 free(j->stdoutpath);
1171 }
1172 if (j->stderrpath) {
1173 free(j->stderrpath);
1174 }
1175 if (j->alt_exc_handler) {
1176 free(j->alt_exc_handler);
1177 }
1178 #if HAVE_SANDBOX
1179 if (j->seatbelt_profile) {
1180 free(j->seatbelt_profile);
1181 }
1182 #endif
1183 #if HAVE_QUARANTINE
1184 if (j->quarantine_data) {
1185 free(j->quarantine_data);
1186 }
1187 #endif
1188 if (j->j_binpref) {
1189 free(j->j_binpref);
1190 }
1191 if (j->start_interval) {
1192 runtime_del_weak_ref();
1193 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
1194 }
1195 if (j->poll_for_vfs_changes) {
1196 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
1197 }
1198 if( j->exit_timeout ) {
1199 /* Not a big deal if this fails. It means that the timer's already been freed. */
1200 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1201 }
1202 if( j->jetsam_priority != LAUNCHD_JETSAM_PRIORITY_UNSET ) {
1203 LIST_REMOVE(j, jetsam_sle);
1204 j->mgr->jetsam_jobs_cnt--;
1205 }
1206 if( j->audit_session != MACH_PORT_NULL ) {
1207 job_assumes(j, mach_port_deallocate(mach_task_self(), j->audit_session) == KERN_SUCCESS);
1208 }
1209 if( !uuid_is_null(j->expected_audit_uuid) ) {
1210 LIST_REMOVE(j, needing_session_sle);
1211 }
1212 if( j->embedded_special_privileges ) {
1213 s_embedded_privileged_job = NULL;
1214 }
1215
1216 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1217
1218 LIST_REMOVE(j, sle);
1219 LIST_REMOVE(j, label_hash_sle);
1220
1221 job_log(j, LOG_DEBUG, "Removed");
1222
1223 free(j);
1224 }
1225
1226 void
1227 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1228 {
1229 launch_data_t tmp_oai;
1230 job_t j = context;
1231 size_t i, fd_cnt = 1;
1232 int *fds;
1233
1234 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1235 fd_cnt = launch_data_array_get_count(obj);
1236 }
1237
1238 fds = alloca(fd_cnt * sizeof(int));
1239
1240 for (i = 0; i < fd_cnt; i++) {
1241 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1242 tmp_oai = launch_data_array_get_index(obj, i);
1243 } else {
1244 tmp_oai = obj;
1245 }
1246
1247 fds[i] = launch_data_get_fd(tmp_oai);
1248 }
1249
1250 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
1251
1252 ipc_revoke_fds(obj);
1253 }
1254
1255 bool
1256 job_set_global_on_demand(job_t j, bool val)
1257 {
1258 if (j->forced_peers_to_demand_mode && val) {
1259 return false;
1260 } else if (!j->forced_peers_to_demand_mode && !val) {
1261 return false;
1262 }
1263
1264 if ((j->forced_peers_to_demand_mode = val)) {
1265 j->mgr->global_on_demand_cnt++;
1266 } else {
1267 j->mgr->global_on_demand_cnt--;
1268 }
1269
1270 if (j->mgr->global_on_demand_cnt == 0) {
1271 jobmgr_dispatch_all(j->mgr, false);
1272 }
1273
1274 return true;
1275 }
1276
1277 bool
1278 job_setup_machport(job_t j)
1279 {
1280 mach_msg_size_t mxmsgsz;
1281
1282 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
1283 goto out_bad;
1284 }
1285
1286 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
1287 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
1288 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
1289 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
1290 }
1291
1292 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
1293 goto out_bad2;
1294 }
1295
1296 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
1297 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1298 goto out_bad;
1299 }
1300
1301 return true;
1302 out_bad2:
1303 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1304 out_bad:
1305 return false;
1306 }
1307
1308 job_t
1309 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1310 {
1311 const char **argv = (const char **)mach_cmd2argv(cmd);
1312 job_t jr = NULL;
1313
1314 if (!job_assumes(j, argv != NULL)) {
1315 goto out_bad;
1316 }
1317
1318 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1319
1320 free(argv);
1321
1322 /* jobs can easily be denied creation during shutdown */
1323 if (unlikely(jr == NULL)) {
1324 goto out_bad;
1325 }
1326
1327 jr->mach_uid = uid;
1328 jr->ondemand = ond;
1329 jr->legacy_mach_job = true;
1330 jr->abandon_pg = true;
1331 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
1332
1333 if (!job_setup_machport(jr)) {
1334 goto out_bad;
1335 }
1336
1337 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1338
1339 return jr;
1340
1341 out_bad:
1342 if (jr) {
1343 job_remove(jr);
1344 }
1345 return NULL;
1346 }
1347
1348 kern_return_t
1349 job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus)
1350 {
1351 if (j->p) {
1352 j->wait_reply_port = srp;
1353 return MIG_NO_REPLY;
1354 }
1355
1356 *waitstatus = j->last_exit_status;
1357
1358 return 0;
1359 }
1360
1361 job_t
1362 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1363 {
1364 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, anonpid };
1365 struct kinfo_proc kp;
1366 size_t len = sizeof(kp);
1367 bool shutdown_state;
1368 job_t jp = NULL, jr = NULL;
1369 uid_t kp_euid, kp_uid, kp_svuid;
1370 gid_t kp_egid, kp_gid, kp_svgid;
1371
1372 if (!jobmgr_assumes(jm, anonpid != 0)) {
1373 errno = EINVAL;
1374 return NULL;
1375 }
1376
1377 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1378 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1379 errno = EINVAL;
1380 return NULL;
1381 }
1382
1383 if (!jobmgr_assumes(jm, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
1384 return NULL;
1385 }
1386
1387 if (unlikely(len != sizeof(kp))) {
1388 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for nonexistent PID: %u", anonpid);
1389 errno = ESRCH;
1390 return NULL;
1391 }
1392
1393 if (!jobmgr_assumes(jm, kp.kp_proc.p_comm[0] != '\0')) {
1394 errno = EINVAL;
1395 return NULL;
1396 }
1397
1398 if (unlikely(kp.kp_proc.p_stat == SZOMB)) {
1399 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, kp.kp_proc.p_comm);
1400 }
1401
1402 if (unlikely(kp.kp_proc.p_flag & P_SUGID)) {
1403 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, kp.kp_proc.p_comm);
1404 }
1405
1406 kp_euid = kp.kp_eproc.e_ucred.cr_uid;
1407 kp_uid = kp.kp_eproc.e_pcred.p_ruid;
1408 kp_svuid = kp.kp_eproc.e_pcred.p_svuid;
1409 kp_egid = kp.kp_eproc.e_ucred.cr_gid;
1410 kp_gid = kp.kp_eproc.e_pcred.p_rgid;
1411 kp_svgid = kp.kp_eproc.e_pcred.p_svgid;
1412
1413 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1414 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1415 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, kp.kp_proc.p_comm);
1416 }
1417
1418 switch (kp.kp_eproc.e_ppid) {
1419 case 0:
1420 /* the kernel */
1421 break;
1422 case 1:
1423 if (!pid1_magic) {
1424 /* we cannot possibly find a parent job_t that is useful in this function */
1425 break;
1426 }
1427 /* fall through */
1428 default:
1429 jp = jobmgr_find_by_pid(jm, kp.kp_eproc.e_ppid, true);
1430 jobmgr_assumes(jm, jp != NULL);
1431 break;
1432 }
1433
1434 if (jp && !jp->anonymous && unlikely(!(kp.kp_proc.p_flag & P_EXEC))) {
1435 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u",
1436 kp.kp_proc.p_pid);
1437 }
1438
1439
1440 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1441 if (unlikely(shutdown_state = jm->shutting_down)) {
1442 jm->shutting_down = false;
1443 }
1444
1445 if (jobmgr_assumes(jm, (jr = job_new(jm, AUTO_PICK_ANONYMOUS_LABEL, kp.kp_proc.p_comm, NULL)) != NULL)) {
1446 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT|NOTE_REAP;
1447
1448 total_anon_children++;
1449 jr->anonymous = true;
1450 jr->p = anonpid;
1451
1452 /* anonymous process reaping is messy */
1453 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1454
1455 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1) && job_assumes(jr, errno == ESRCH)) {
1456 /* zombies are weird */
1457 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1458 jr->unload_at_mig_return = true;
1459 }
1460
1461 if (unlikely(shutdown_state && jm->hopefully_first_cnt == 0)) {
1462 job_log(jr, LOG_SCOLDING, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1463 }
1464
1465 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, kp.kp_eproc.e_ppid, jp ? ": " : "", jp ? jp->label : "");
1466 }
1467
1468 if (unlikely(shutdown_state)) {
1469 jm->shutting_down = true;
1470 }
1471
1472 return jr;
1473 }
1474
1475 job_t
1476 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1477 {
1478 const char *const *argv_tmp = argv;
1479 char tmp_path[PATH_MAX];
1480 char auto_label[1000];
1481 const char *bn = NULL;
1482 char *co;
1483 size_t minlabel_len;
1484 size_t i, cc = 0;
1485 job_t j;
1486
1487 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1488
1489 if (unlikely(jm->shutting_down)) {
1490 errno = EINVAL;
1491 return NULL;
1492 }
1493
1494 if (unlikely(prog == NULL && argv == NULL)) {
1495 errno = EINVAL;
1496 return NULL;
1497 }
1498
1499 char *anon_or_legacy = ( label == AUTO_PICK_ANONYMOUS_LABEL ) ? "anonymous" : "mach_init";
1500 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1501 if (prog) {
1502 bn = prog;
1503 } else {
1504 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1505 bn = basename(tmp_path); /* prog for auto labels is kp.kp_kproc.p_comm */
1506 }
1507 snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
1508 label = auto_label;
1509 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1510 minlabel_len = strlen(label) + MAXCOMLEN;
1511 } else {
1512 minlabel_len = strlen(label);
1513 }
1514
1515 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1516
1517 if (!jobmgr_assumes(jm, j != NULL)) {
1518 return NULL;
1519 }
1520
1521 if (unlikely(label == auto_label)) {
1522 snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
1523 } else {
1524 strcpy((char *)j->label, label);
1525 }
1526 j->kqjob_callback = job_callback;
1527 j->mgr = jm;
1528 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1529 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1530 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1531 j->currently_ignored = true;
1532 j->ondemand = true;
1533 j->checkedin = true;
1534 j->jetsam_priority = LAUNCHD_JETSAM_PRIORITY_UNSET;
1535 uuid_clear(j->expected_audit_uuid);
1536
1537 if (prog) {
1538 j->prog = strdup(prog);
1539 if (!job_assumes(j, j->prog != NULL)) {
1540 goto out_bad;
1541 }
1542 }
1543
1544 if (likely(argv)) {
1545 while (*argv_tmp++) {
1546 j->argc++;
1547 }
1548
1549 for (i = 0; i < j->argc; i++) {
1550 cc += strlen(argv[i]) + 1;
1551 }
1552
1553 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1554
1555 if (!job_assumes(j, j->argv != NULL)) {
1556 goto out_bad;
1557 }
1558
1559 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1560
1561 for (i = 0; i < j->argc; i++) {
1562 j->argv[i] = co;
1563 strcpy(co, argv[i]);
1564 co += strlen(argv[i]) + 1;
1565 }
1566 j->argv[i] = NULL;
1567 }
1568
1569 if( strcmp(j->label, "com.apple.WindowServer") == 0 ) {
1570 j->has_console = true;
1571 }
1572
1573 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1574 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
1575 uuid_clear(j->expected_audit_uuid);
1576
1577 job_log(j, LOG_DEBUG, "Conceived");
1578
1579 return j;
1580
1581 out_bad:
1582 if (j->prog) {
1583 free(j->prog);
1584 }
1585 free(j);
1586
1587 return NULL;
1588 }
1589
1590 job_t
1591 job_import(launch_data_t pload)
1592 {
1593 job_t j = jobmgr_import2(root_jobmgr, pload);
1594
1595 if (unlikely(j == NULL)) {
1596 return NULL;
1597 }
1598
1599 /* Since jobs are effectively stalled until they get security sessions assigned
1600 * to them, we may wish to reconsider this behavior of calling the job "enabled"
1601 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
1602 */
1603 job_dispatch_curious_jobs(j);
1604 return job_dispatch(j, false);
1605 }
1606
1607 launch_data_t
1608 job_import_bulk(launch_data_t pload)
1609 {
1610 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
1611 job_t *ja;
1612 size_t i, c = launch_data_array_get_count(pload);
1613
1614 ja = alloca(c * sizeof(job_t));
1615
1616 for (i = 0; i < c; i++) {
1617 if( (likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH ) {
1618 errno = 0;
1619 }
1620 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
1621 }
1622
1623 for (i = 0; i < c; i++) {
1624 if (likely(ja[i])) {
1625 job_dispatch_curious_jobs(ja[i]);
1626 job_dispatch(ja[i], false);
1627 }
1628 }
1629
1630 return resp;
1631 }
1632
1633 void
1634 job_import_bool(job_t j, const char *key, bool value)
1635 {
1636 bool found_key = false;
1637
1638 switch (key[0]) {
1639 case 'a':
1640 case 'A':
1641 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
1642 j->abandon_pg = value;
1643 found_key = true;
1644 }
1645 break;
1646 case 'k':
1647 case 'K':
1648 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1649 j->ondemand = !value;
1650 found_key = true;
1651 }
1652 break;
1653 case 'o':
1654 case 'O':
1655 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
1656 j->ondemand = value;
1657 found_key = true;
1658 }
1659 break;
1660 case 'd':
1661 case 'D':
1662 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
1663 j->debug = value;
1664 found_key = true;
1665 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
1666 job_assumes(j, !value);
1667 found_key = true;
1668 }
1669 break;
1670 case 'h':
1671 case 'H':
1672 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
1673 j->hopefully_exits_last = value;
1674 found_key = true;
1675 } else if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST) == 0) {
1676 j->hopefully_exits_first = value;
1677 found_key = true;
1678 }
1679 break;
1680 case 's':
1681 case 'S':
1682 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
1683 j->session_create = value;
1684 found_key = true;
1685 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
1686 j->start_on_mount = value;
1687 found_key = true;
1688 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
1689 /* this only does something on Mac OS X 10.4 "Tiger" */
1690 found_key = true;
1691 }
1692 break;
1693 case 'l':
1694 case 'L':
1695 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
1696 j->low_pri_io = value;
1697 found_key = true;
1698 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
1699 j->only_once = value;
1700 found_key = true;
1701 }
1702 break;
1703 case 'm':
1704 case 'M':
1705 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1706 j->internal_exc_handler = value;
1707 found_key = true;
1708 }
1709 break;
1710 case 'i':
1711 case 'I':
1712 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
1713 if (getuid() != 0) {
1714 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1715 return;
1716 }
1717 j->no_init_groups = !value;
1718 found_key = true;
1719 } else if( strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0 ) {
1720 j->ignore_pg_at_shutdown = value;
1721 found_key = true;
1722 }
1723 break;
1724 case 'r':
1725 case 'R':
1726 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
1727 if (value) {
1728 /* We don't want value == false to change j->start_pending */
1729 j->start_pending = true;
1730 }
1731 found_key = true;
1732 }
1733 break;
1734 case 'e':
1735 case 'E':
1736 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
1737 j->globargv = value;
1738 found_key = true;
1739 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
1740 j->kill_via_shmem = value;
1741 found_key = true;
1742 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
1743 j->debug_before_kill = value;
1744 found_key = true;
1745 } else if( strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0 ) {
1746 if( !s_embedded_privileged_job ) {
1747 j->embedded_special_privileges = value;
1748 s_embedded_privileged_job = j;
1749 } else {
1750 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
1751 }
1752 found_key = true;
1753 }
1754 break;
1755 case 'w':
1756 case 'W':
1757 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
1758 j->wait4debugger = value;
1759 found_key = true;
1760 }
1761 break;
1762 default:
1763 break;
1764 }
1765
1766 if (unlikely(!found_key)) {
1767 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
1768 }
1769 }
1770
1771 void
1772 job_import_string(job_t j, const char *key, const char *value)
1773 {
1774 char **where2put = NULL;
1775
1776 switch (key[0]) {
1777 case 'm':
1778 case 'M':
1779 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1780 where2put = &j->alt_exc_handler;
1781 }
1782 break;
1783 case 'p':
1784 case 'P':
1785 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
1786 return;
1787 }
1788 break;
1789 case 'l':
1790 case 'L':
1791 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
1792 return;
1793 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1794 return;
1795 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1796 return;
1797 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1798 return;
1799 }
1800 break;
1801 case 'r':
1802 case 'R':
1803 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
1804 if (getuid() != 0) {
1805 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1806 return;
1807 }
1808 where2put = &j->rootdir;
1809 }
1810 break;
1811 case 'w':
1812 case 'W':
1813 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
1814 where2put = &j->workingdir;
1815 }
1816 break;
1817 case 'u':
1818 case 'U':
1819 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
1820 if (getuid() != 0) {
1821 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1822 return;
1823 } else if (strcmp(value, "root") == 0) {
1824 return;
1825 }
1826 where2put = &j->username;
1827 }
1828 break;
1829 case 'g':
1830 case 'G':
1831 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
1832 if (getuid() != 0) {
1833 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1834 return;
1835 } else if (strcmp(value, "wheel") == 0) {
1836 return;
1837 }
1838 where2put = &j->groupname;
1839 }
1840 break;
1841 case 's':
1842 case 'S':
1843 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
1844 where2put = &j->stdoutpath;
1845 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
1846 where2put = &j->stderrpath;
1847 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
1848 where2put = &j->stdinpath;
1849 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
1850 if (job_assumes(j, j->stdin_fd != -1)) {
1851 /* open() should not block, but regular IO by the job should */
1852 job_assumes(j, fcntl(j->stdin_fd, F_SETFL, 0) != -1);
1853 /* XXX -- EV_CLEAR should make named pipes happy? */
1854 job_assumes(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j) != -1);
1855 } else {
1856 j->stdin_fd = 0;
1857 }
1858 #if HAVE_SANDBOX
1859 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
1860 where2put = &j->seatbelt_profile;
1861 #endif
1862 }
1863 break;
1864 default:
1865 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
1866 break;
1867 }
1868
1869 if (likely(where2put)) {
1870 job_assumes(j, (*where2put = strdup(value)) != NULL);
1871 } else {
1872 /* See rdar://problem/5496612. These two are okay. */
1873 if( strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) != 0 ) {
1874 job_log(j, LOG_WARNING, "Unknown key: %s", key);
1875 }
1876 }
1877 }
1878
1879 void
1880 job_import_integer(job_t j, const char *key, long long value)
1881 {
1882 switch (key[0]) {
1883 case 'e':
1884 case 'E':
1885 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
1886 if (unlikely(value < 0)) {
1887 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1888 } else if (unlikely(value > UINT32_MAX)) {
1889 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1890 } else {
1891 j->exit_timeout = (typeof(j->exit_timeout)) value;
1892 }
1893 } else if( strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0 ) {
1894 j->main_thread_priority = value;
1895 }
1896 break;
1897 case 'j':
1898 case 'J':
1899 if( strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 ) {
1900 job_log(j, LOG_DEBUG, "Importing job with priority: %lld", value);
1901 j->jetsam_priority = (typeof(j->jetsam_priority))value;
1902 LIST_INSERT_HEAD(&j->mgr->jetsam_jobs, j, jetsam_sle);
1903 j->mgr->jetsam_jobs_cnt++;
1904 }
1905 break;
1906 case 'n':
1907 case 'N':
1908 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
1909 if (unlikely(value < PRIO_MIN)) {
1910 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
1911 } else if (unlikely(value > PRIO_MAX)) {
1912 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
1913 } else {
1914 j->nice = (typeof(j->nice)) value;
1915 j->setnice = true;
1916 }
1917 }
1918 break;
1919 case 't':
1920 case 'T':
1921 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
1922 if (unlikely(value < 0)) {
1923 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1924 } else if (unlikely(value > UINT32_MAX)) {
1925 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1926 } else {
1927 j->timeout = (typeof(j->timeout)) value;
1928 }
1929 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
1930 if (value < 0) {
1931 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1932 } else if (value > UINT32_MAX) {
1933 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1934 } else {
1935 j->min_run_time = (typeof(j->min_run_time)) value;
1936 }
1937 }
1938 break;
1939 case 'u':
1940 case 'U':
1941 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
1942 j->mask = value;
1943 j->setmask = true;
1944 }
1945 break;
1946 case 's':
1947 case 'S':
1948 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
1949 if (unlikely(value <= 0)) {
1950 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1951 } else if (unlikely(value > UINT32_MAX)) {
1952 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1953 } else {
1954 runtime_add_weak_ref();
1955 j->start_interval = (typeof(j->start_interval)) value;
1956
1957 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
1958 }
1959 #if HAVE_SANDBOX
1960 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1961 j->seatbelt_flags = value;
1962 #endif
1963 }
1964
1965 break;
1966 default:
1967 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
1968 break;
1969 }
1970 }
1971
1972 void
1973 job_import_opaque(job_t j __attribute__((unused)),
1974 const char *key, launch_data_t value __attribute__((unused)))
1975 {
1976 switch (key[0]) {
1977 case 'q':
1978 case 'Q':
1979 #if HAVE_QUARANTINE
1980 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
1981 size_t tmpsz = launch_data_get_opaque_size(value);
1982
1983 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
1984 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
1985 j->quarantine_data_sz = tmpsz;
1986 }
1987 }
1988 #endif
1989 case 's':
1990 case 'S':
1991 if( strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0 ) {
1992 size_t tmpsz = launch_data_get_opaque_size(value);
1993 if( job_assumes(j, tmpsz == sizeof(uuid_t)) ) {
1994 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
1995 }
1996 }
1997 break;
1998 default:
1999 break;
2000 }
2001 }
2002
2003 static void
2004 policy_setup(launch_data_t obj, const char *key, void *context)
2005 {
2006 job_t j = context;
2007 bool found_key = false;
2008
2009 switch (key[0]) {
2010 case 'd':
2011 case 'D':
2012 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2013 j->deny_job_creation = launch_data_get_bool(obj);
2014 found_key = true;
2015 }
2016 break;
2017 default:
2018 break;
2019 }
2020
2021 if (unlikely(!found_key)) {
2022 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2023 }
2024 }
2025
2026 void
2027 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2028 {
2029 launch_data_t tmp;
2030
2031 switch (key[0]) {
2032 case 'p':
2033 case 'P':
2034 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2035 launch_data_dict_iterate(value, policy_setup, j);
2036 }
2037 break;
2038 case 'k':
2039 case 'K':
2040 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2041 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2042 }
2043 break;
2044 case 'i':
2045 case 'I':
2046 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2047 j->inetcompat = true;
2048 j->abandon_pg = true;
2049 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2050 j->inetcompat_wait = launch_data_get_bool(tmp);
2051 }
2052 }
2053 break;
2054 case 'e':
2055 case 'E':
2056 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2057 launch_data_dict_iterate(value, envitem_setup, j);
2058 }
2059 break;
2060 case 'u':
2061 case 'U':
2062 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2063 j->importing_global_env = true;
2064 launch_data_dict_iterate(value, envitem_setup, j);
2065 j->importing_global_env = false;
2066 }
2067 break;
2068 case 's':
2069 case 'S':
2070 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2071 launch_data_dict_iterate(value, socketgroup_setup, j);
2072 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2073 calendarinterval_new_from_obj(j, value);
2074 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2075 launch_data_dict_iterate(value, limititem_setup, j);
2076 #if HAVE_SANDBOX
2077 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2078 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2079 #endif
2080 }
2081 break;
2082 case 'h':
2083 case 'H':
2084 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2085 j->importing_hard_limits = true;
2086 launch_data_dict_iterate(value, limititem_setup, j);
2087 j->importing_hard_limits = false;
2088 }
2089 break;
2090 case 'm':
2091 case 'M':
2092 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2093 launch_data_dict_iterate(value, machservice_setup, j);
2094 }
2095 break;
2096 default:
2097 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2098 break;
2099 }
2100 }
2101
2102 void
2103 job_import_array(job_t j, const char *key, launch_data_t value)
2104 {
2105 size_t i, value_cnt = launch_data_array_get_count(value);
2106 const char *str;
2107
2108 switch (key[0]) {
2109 case 'p':
2110 case 'P':
2111 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2112 return;
2113 }
2114 break;
2115 case 'l':
2116 case 'L':
2117 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2118 return;
2119 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2120 return;
2121 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2122 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2123 return;
2124 }
2125 break;
2126 case 'q':
2127 case 'Q':
2128 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
2129 for (i = 0; i < value_cnt; i++) {
2130 str = launch_data_get_string(launch_data_array_get_index(value, i));
2131 if (job_assumes(j, str != NULL)) {
2132 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
2133 }
2134 }
2135
2136 }
2137 break;
2138 case 'w':
2139 case 'W':
2140 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
2141 for (i = 0; i < value_cnt; i++) {
2142 str = launch_data_get_string(launch_data_array_get_index(value, i));
2143 if (job_assumes(j, str != NULL)) {
2144 semaphoreitem_new(j, PATH_CHANGES, str);
2145 }
2146 }
2147 }
2148 break;
2149 case 'b':
2150 case 'B':
2151 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
2152 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
2153 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2154 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2155 j->j_binpref_cnt = value_cnt;
2156 for (i = 0; i < value_cnt; i++) {
2157 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
2158 }
2159 }
2160 }
2161 break;
2162 case 's':
2163 case 'S':
2164 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2165 for (i = 0; i < value_cnt; i++) {
2166 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
2167 }
2168 }
2169 break;
2170 default:
2171 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
2172 break;
2173 }
2174 }
2175
2176 void
2177 job_import_keys(launch_data_t obj, const char *key, void *context)
2178 {
2179 job_t j = context;
2180 launch_data_type_t kind;
2181
2182 if (!launchd_assumes(obj != NULL)) {
2183 return;
2184 }
2185
2186 kind = launch_data_get_type(obj);
2187
2188 switch (kind) {
2189 case LAUNCH_DATA_BOOL:
2190 job_import_bool(j, key, launch_data_get_bool(obj));
2191 break;
2192 case LAUNCH_DATA_STRING:
2193 job_import_string(j, key, launch_data_get_string(obj));
2194 break;
2195 case LAUNCH_DATA_INTEGER:
2196 job_import_integer(j, key, launch_data_get_integer(obj));
2197 break;
2198 case LAUNCH_DATA_DICTIONARY:
2199 job_import_dictionary(j, key, obj);
2200 break;
2201 case LAUNCH_DATA_ARRAY:
2202 job_import_array(j, key, obj);
2203 break;
2204 case LAUNCH_DATA_OPAQUE:
2205 job_import_opaque(j, key, obj);
2206 break;
2207 default:
2208 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2209 break;
2210 }
2211 }
2212
2213 job_t
2214 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
2215 {
2216 launch_data_t tmp, ldpa;
2217 const char *label = NULL, *prog = NULL;
2218 const char **argv = NULL;
2219 job_t j;
2220
2221 if (!jobmgr_assumes(jm, pload != NULL)) {
2222 errno = EINVAL;
2223 return NULL;
2224 }
2225
2226 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
2227 errno = EINVAL;
2228 return NULL;
2229 }
2230
2231 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
2232 errno = EINVAL;
2233 return NULL;
2234 }
2235
2236 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
2237 errno = EINVAL;
2238 return NULL;
2239 }
2240
2241 if (unlikely(!(label = launch_data_get_string(tmp)))) {
2242 errno = EINVAL;
2243 return NULL;
2244 }
2245
2246 #if TARGET_OS_EMBEDDED
2247 if( unlikely(g_embedded_privileged_action && s_embedded_privileged_job) ) {
2248 if( unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME))) ) {
2249 errno = EPERM;
2250 return NULL;
2251 }
2252
2253 const char *username = NULL;
2254 if( likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING) ) {
2255 username = launch_data_get_string(tmp);
2256 } else {
2257 errno = EPERM;
2258 return NULL;
2259 }
2260
2261 if( !jobmgr_assumes(jm, s_embedded_privileged_job->username != NULL && username != NULL) ) {
2262 errno = EPERM;
2263 return NULL;
2264 }
2265
2266 if( unlikely(strcmp(s_embedded_privileged_job->username, username) != 0) ) {
2267 errno = EPERM;
2268 return NULL;
2269 }
2270 } else if( g_embedded_privileged_action ) {
2271 errno = EINVAL;
2272 return NULL;
2273 }
2274 #endif
2275
2276 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
2277 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2278 prog = launch_data_get_string(tmp);
2279 }
2280
2281 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2282 size_t i, c;
2283
2284 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2285 errno = EINVAL;
2286 return NULL;
2287 }
2288
2289 c = launch_data_array_get_count(ldpa);
2290
2291 argv = alloca((c + 1) * sizeof(char *));
2292
2293 for (i = 0; i < c; i++) {
2294 tmp = launch_data_array_get_index(ldpa, i);
2295
2296 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2297 errno = EINVAL;
2298 return NULL;
2299 }
2300
2301 argv[i] = launch_data_get_string(tmp);
2302 }
2303
2304 argv[i] = NULL;
2305 }
2306
2307 /* Hack to make sure the proper job manager is set the whole way through. */
2308 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2309 if( session ) {
2310 jm = jobmgr_find_by_name(jm, launch_data_get_string(session)) ?: jm;
2311 }
2312
2313 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
2314
2315 if (unlikely((j = job_find(label)) != NULL)) {
2316 errno = EEXIST;
2317 return NULL;
2318 } else if (unlikely(!jobmgr_label_test(jm, label))) {
2319 errno = EINVAL;
2320 return NULL;
2321 }
2322
2323 if (likely(j = job_new(jm, label, prog, argv))) {
2324 launch_data_dict_iterate(pload, job_import_keys, j);
2325 if( !uuid_is_null(j->expected_audit_uuid) ) {
2326 uuid_string_t uuid_str;
2327 uuid_unparse(j->expected_audit_uuid, uuid_str);
2328 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2329 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2330 errno = ENEEDAUTH;
2331 } else {
2332 job_log(j, LOG_DEBUG, "No security session specified.");
2333 j->audit_session = MACH_PORT_NULL;
2334 }
2335 }
2336
2337 return j;
2338 }
2339
2340 bool
2341 jobmgr_label_test(jobmgr_t jm, const char *str)
2342 {
2343 char *endstr = NULL;
2344 const char *ptr;
2345
2346 if (str[0] == '\0') {
2347 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
2348 return false;
2349 }
2350
2351 for (ptr = str; *ptr; ptr++) {
2352 if (iscntrl(*ptr)) {
2353 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
2354 return false;
2355 }
2356 }
2357
2358 strtoll(str, &endstr, 0);
2359
2360 if (str != endstr) {
2361 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
2362 return false;
2363 }
2364
2365 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2366 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2367 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
2368 return false;
2369 }
2370
2371 return true;
2372 }
2373
2374 job_t
2375 job_find(const char *label)
2376 {
2377 job_t ji;
2378
2379 LIST_FOREACH(ji, &label_hash[hash_label(label)], label_hash_sle) {
2380 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
2381 continue; /* 5351245 and 5488633 respectively */
2382 }
2383
2384 if (strcmp(ji->label, label) == 0) {
2385 return ji;
2386 }
2387 }
2388
2389 errno = ESRCH;
2390 return NULL;
2391 }
2392
2393 /* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
2394 job_t
2395 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
2396 {
2397 job_t ji = NULL;
2398 LIST_FOREACH( ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle ) {
2399 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay)) ) {
2400 return ji;
2401 }
2402 }
2403
2404 jobmgr_t jmi = NULL;
2405 SLIST_FOREACH( jmi, &jm->submgrs, sle ) {
2406 if( (ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay)) ) {
2407 break;
2408 }
2409 }
2410
2411 return ji;
2412 }
2413
2414 job_t
2415 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
2416 {
2417 job_t ji;
2418
2419 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
2420 if (ji->p == p) {
2421 return ji;
2422 }
2423 }
2424
2425 return create_anon ? job_new_anonymous(jm, p) : NULL;
2426 }
2427
2428 job_t
2429 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
2430 {
2431 jobmgr_t jmi;
2432 job_t ji;
2433
2434 if (jm->jm_port == mport) {
2435 return jobmgr_find_by_pid(jm, upid, true);
2436 }
2437
2438 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2439 job_t jr;
2440
2441 if ((jr = job_mig_intran2(jmi, mport, upid))) {
2442 return jr;
2443 }
2444 }
2445
2446 LIST_FOREACH(ji, &jm->jobs, sle) {
2447 if (ji->j_port == mport) {
2448 return ji;
2449 }
2450 }
2451
2452 return NULL;
2453 }
2454
2455 job_t
2456 job_mig_intran(mach_port_t p)
2457 {
2458 struct ldcred *ldc = runtime_get_caller_creds();
2459 job_t jr;
2460
2461 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
2462
2463 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
2464 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 };
2465 struct kinfo_proc kp;
2466 size_t len = sizeof(kp);
2467
2468 mib[3] = ldc->pid;
2469
2470 if (jobmgr_assumes(root_jobmgr, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)
2471 && jobmgr_assumes(root_jobmgr, len == sizeof(kp))) {
2472 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc->pid, ldc->uid, ldc->euid, p, kp.kp_proc.p_comm);
2473 }
2474 }
2475
2476 return jr;
2477 }
2478
2479 job_t
2480 job_find_by_service_port(mach_port_t p)
2481 {
2482 struct machservice *ms;
2483
2484 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
2485 if (ms->recv && (ms->port == p)) {
2486 return ms->job;
2487 }
2488 }
2489
2490 return NULL;
2491 }
2492
2493 void
2494 job_mig_destructor(job_t j)
2495 {
2496 /*
2497 * 5477111
2498 *
2499 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
2500 */
2501
2502 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
2503 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
2504 job_remove(j);
2505 }
2506
2507 workaround_5477111 = NULL;
2508
2509 calendarinterval_sanity_check();
2510 }
2511
2512 void
2513 job_export_all2(jobmgr_t jm, launch_data_t where)
2514 {
2515 jobmgr_t jmi;
2516 job_t ji;
2517
2518 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2519 job_export_all2(jmi, where);
2520 }
2521
2522 LIST_FOREACH(ji, &jm->jobs, sle) {
2523 launch_data_t tmp;
2524
2525 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
2526 launch_data_dict_insert(where, tmp, ji->label);
2527 }
2528 }
2529 }
2530
2531 launch_data_t
2532 job_export_all(void)
2533 {
2534 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
2535
2536 if (launchd_assumes(resp != NULL)) {
2537 job_export_all2(root_jobmgr, resp);
2538 }
2539
2540 return resp;
2541 }
2542
2543 void
2544 job_log_stray_pg(job_t j)
2545 {
2546 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PGRP, j->p };
2547 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
2548 struct kinfo_proc *kp;
2549
2550 if (!do_apple_internal_logging) {
2551 return;
2552 }
2553
2554 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
2555
2556 if (!job_assumes(j, (kp = malloc(len)) != NULL)) {
2557 return;
2558 }
2559 if (!job_assumes(j, sysctl(mib, 4, kp, &len, NULL, 0) != -1)) {
2560 goto out;
2561 }
2562
2563 kp_cnt = len / sizeof(struct kinfo_proc);
2564
2565 for (i = 0; i < kp_cnt; i++) {
2566 pid_t p_i = kp[i].kp_proc.p_pid;
2567 pid_t pp_i = kp[i].kp_eproc.e_ppid;
2568 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
2569 const char *n = kp[i].kp_proc.p_comm;
2570
2571 if (p_i == j->p) {
2572 continue;
2573 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
2574 continue;
2575 }
2576
2577 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z, p_i, pp_i, n);
2578 }
2579
2580 out:
2581 free(kp);
2582 }
2583
2584 void
2585 job_reap(job_t j)
2586 {
2587 struct rusage ru;
2588 int status;
2589
2590 bool is_system_bootstrapper = j->is_bootstrapper && pid1_magic && !j->mgr->parentmgr;
2591
2592 job_log(j, LOG_DEBUG, "Reaping");
2593
2594 if (j->shmem) {
2595 job_assumes(j, vm_deallocate(mach_task_self(), (vm_address_t)j->shmem, getpagesize()) == 0);
2596 j->shmem = NULL;
2597 }
2598
2599 if (unlikely(j->weird_bootstrap)) {
2600 int64_t junk = 0;
2601 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
2602 }
2603
2604 if (j->log_redirect_fd && !j->legacy_LS_job) {
2605 job_log_stdouterr(j); /* one last chance */
2606
2607 if (j->log_redirect_fd) {
2608 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2609 j->log_redirect_fd = 0;
2610 }
2611 }
2612
2613 if (j->fork_fd) {
2614 job_assumes(j, runtime_close(j->fork_fd) != -1);
2615 j->fork_fd = 0;
2616 }
2617
2618 if (j->anonymous) {
2619 status = 0;
2620 memset(&ru, 0, sizeof(ru));
2621 } else {
2622 /*
2623 * The job is dead. While the PID/PGID is still known to be
2624 * valid, try to kill abandoned descendant processes.
2625 */
2626 job_log_stray_pg(j);
2627 if (!j->abandon_pg) {
2628 if (unlikely(runtime_killpg(j->p, SIGTERM) == -1 && errno != ESRCH)) {
2629 #ifdef __LP64__
2630 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
2631 #else
2632 job_assumes(j, false);
2633 #endif
2634 }
2635 }
2636
2637 /*
2638 * 5020256
2639 *
2640 * The current implementation of ptrace() causes the traced process to
2641 * be abducted away from the true parent and adopted by the tracer.
2642 *
2643 * Once the tracing process relinquishes control, the kernel then
2644 * restores the true parent/child relationship.
2645 *
2646 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2647 * data structures changes, and they return an error if reality hasn't
2648 * been restored by the time they are called.
2649 */
2650 if (!job_assumes(j, wait4(j->p, &status, 0, &ru) != -1)) {
2651 job_log(j, LOG_NOTICE, "Working around 5020256. Assuming the job crashed.");
2652
2653 status = W_EXITCODE(0, SIGSEGV);
2654 memset(&ru, 0, sizeof(ru));
2655 }
2656 }
2657
2658 if (j->exit_timeout) {
2659 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
2660 }
2661
2662 LIST_REMOVE(j, pid_hash_sle);
2663
2664 if (j->wait_reply_port) {
2665 job_log(j, LOG_DEBUG, "MPM wait reply being sent");
2666 job_assumes(j, job_mig_wait_reply(j->wait_reply_port, 0, status) == 0);
2667 j->wait_reply_port = MACH_PORT_NULL;
2668 }
2669
2670 if( j->pending_sample ) {
2671 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job exited before we could sample it.");
2672 STAILQ_REMOVE(&j->mgr->pending_samples, j, job_s, pending_samples_sle);
2673 j->pending_sample = false;
2674 }
2675
2676 if (j->sent_signal_time) {
2677 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
2678
2679 td_sec = td / NSEC_PER_SEC;
2680 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
2681
2682 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
2683 }
2684
2685 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
2686 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
2687 j->ru.ru_maxrss += ru.ru_maxrss;
2688 j->ru.ru_ixrss += ru.ru_ixrss;
2689 j->ru.ru_idrss += ru.ru_idrss;
2690 j->ru.ru_isrss += ru.ru_isrss;
2691 j->ru.ru_minflt += ru.ru_minflt;
2692 j->ru.ru_majflt += ru.ru_majflt;
2693 j->ru.ru_nswap += ru.ru_nswap;
2694 j->ru.ru_inblock += ru.ru_inblock;
2695 j->ru.ru_oublock += ru.ru_oublock;
2696 j->ru.ru_msgsnd += ru.ru_msgsnd;
2697 j->ru.ru_msgrcv += ru.ru_msgrcv;
2698 j->ru.ru_nsignals += ru.ru_nsignals;
2699 j->ru.ru_nvcsw += ru.ru_nvcsw;
2700 j->ru.ru_nivcsw += ru.ru_nivcsw;
2701
2702 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
2703 job_log(j, LOG_WARNING, "Exited with exit code: %d", WEXITSTATUS(status));
2704 }
2705
2706 if (WIFSIGNALED(status)) {
2707 int s = WTERMSIG(status);
2708 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
2709 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
2710 } else if( !j->stopped && !j->clean_kill ) {
2711 switch( s ) {
2712 /* Signals which indicate a crash. */
2713 case SIGILL :
2714 case SIGABRT :
2715 case SIGFPE :
2716 case SIGBUS :
2717 case SIGSEGV :
2718 case SIGSYS :
2719 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
2720 * SIGTRAP, assume that it's a crash.
2721 */
2722 case SIGTRAP :
2723 j->crashed = true;
2724 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
2725 break;
2726 default :
2727 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
2728 break;
2729 }
2730
2731 if( is_system_bootstrapper && j->crashed ) {
2732 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
2733 }
2734 }
2735 }
2736
2737 j->reaped = true;
2738
2739 struct machservice *msi = NULL;
2740 if( j->crashed || !(j->did_exec || j->anonymous) ) {
2741 SLIST_FOREACH( msi, &j->machservices, sle ) {
2742 if( j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash) ) {
2743 machservice_drain_port(msi);
2744 }
2745
2746 if( !j->did_exec && msi->reset && job_assumes(j, !msi->isActive) ) {
2747 machservice_resetport(j, msi);
2748 }
2749 }
2750 }
2751
2752 struct suspended_peruser *spi = NULL;
2753 while( (spi = LIST_FIRST(&j->suspended_perusers)) ) {
2754 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
2755 spi->j->peruser_suspend_count--;
2756 if( spi->j->peruser_suspend_count == 0 ) {
2757 job_dispatch(spi->j, false);
2758 }
2759 LIST_REMOVE(spi, sle);
2760 free(spi);
2761 }
2762
2763 struct waiting_for_exit *w4e = NULL;
2764 while( (w4e = LIST_FIRST(&j->exit_watchers)) ) {
2765 waiting4exit_delete(j, w4e);
2766 }
2767
2768 if (j->anonymous) {
2769 total_anon_children--;
2770 if( j->migratory ) {
2771 runtime_del_ref();
2772 }
2773 } else {
2774 runtime_del_ref();
2775 total_children--;
2776 }
2777
2778 if( j->has_console ) {
2779 g_wsp = 0;
2780 }
2781
2782 if (j->hopefully_exits_first) {
2783 j->mgr->hopefully_first_cnt--;
2784 } else if (!j->anonymous && !j->hopefully_exits_last) {
2785 j->mgr->normal_active_cnt--;
2786 }
2787 j->last_exit_status = status;
2788 j->sent_signal_time = 0;
2789 j->sent_sigkill = false;
2790 j->clean_kill = false;
2791 j->sampling_complete = false;
2792 j->sent_kill_via_shmem = false;
2793 j->lastlookup = NULL;
2794 j->lastlookup_gennum = 0;
2795 j->p = 0;
2796
2797 /*
2798 * We need to someday evaluate other jobs and find those who wish to track the
2799 * active/inactive state of this job. The current job_dispatch() logic makes
2800 * this messy, given that jobs can be deleted at dispatch.
2801 */
2802 }
2803
2804 void
2805 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
2806 {
2807 jobmgr_t jmi, jmn;
2808 job_t ji, jn;
2809
2810 if (jm->shutting_down) {
2811 return;
2812 }
2813
2814 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
2815 jobmgr_dispatch_all(jmi, newmounthack);
2816 }
2817
2818 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
2819 if (newmounthack && ji->start_on_mount) {
2820 ji->start_pending = true;
2821 }
2822
2823 job_dispatch(ji, false);
2824 }
2825 }
2826
2827 pid_t
2828 basic_spawn(job_t j, void (*what_to_do)(job_t))
2829 {
2830 pid_t p = 0;
2831 thread_state_flavor_t f = 0;
2832 #if defined (__ppc__) || defined(__ppc64__)
2833 f = PPC_THREAD_STATE64;
2834 #elif defined(__i386__) || defined(__x86_64__)
2835 f = x86_THREAD_STATE;
2836 #elif defined(__arm__)
2837 f = ARM_THREAD_STATE;
2838 #else
2839 #error "unknown architecture"
2840 #endif
2841
2842 int execpair[2] = { 0, 0 };
2843 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execpair) != -1);
2844
2845 switch( (p = fork()) ) {
2846 case 0 :
2847 job_assumes(j, runtime_close(execpair[0]) != -1);
2848 /* Wait for the parent to attach a kevent. */
2849 read(_fd(execpair[1]), &p, sizeof(p));
2850 what_to_do(j);
2851 _exit(EXIT_FAILURE);
2852 case -1 :
2853 job_assumes(j, runtime_close(execpair[0]) != -1);
2854 job_assumes(j, runtime_close(execpair[1]) != -1);
2855 execpair[0] = -1;
2856 execpair[1] = -1;
2857 job_log(j, LOG_NOTICE | LOG_CONSOLE, "fork(2) failed: %d", errno);
2858 break;
2859 default :
2860 job_assumes(j, runtime_close(execpair[1]) != -1);
2861 execpair[1] = -1;
2862 break;
2863 }
2864
2865 int r = -1;
2866 if( p != -1 ) {
2867 /* Let us know when sample is done. ONESHOT is implicit if we're just interested in NOTE_EXIT. */
2868 if( job_assumes(j, (r = kevent_mod(p, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j)) != -1) ) {
2869 if( !job_assumes(j, write(execpair[0], &p, sizeof(p)) == sizeof(p)) ) {
2870 job_assumes(j, kevent_mod(p, EVFILT_PROC, EV_DELETE, 0, 0, NULL) != -1);
2871 job_assumes(j, runtime_kill(p, SIGKILL) != -1);
2872 r = -1;
2873 p = -1;
2874 }
2875 } else {
2876 job_assumes(j, runtime_kill(p, SIGKILL) != -1);
2877 }
2878
2879 int status = 0;
2880 if( r == -1 ) {
2881 job_assumes(j, waitpid(p, &status, WNOHANG) != -1);
2882 }
2883 }
2884
2885 if( execpair[0] != -1 ) {
2886 job_assumes(j, runtime_close(execpair[0]) != -1);
2887 }
2888
2889 if( execpair[1] != -1 ) {
2890 job_assumes(j, runtime_close(execpair[0]) != -1);
2891 }
2892
2893 return p;
2894 }
2895
2896 void
2897 take_sample(job_t j)
2898 {
2899 char pidstr[32];
2900 snprintf(pidstr, sizeof(pidstr), "%u", j->p);
2901 #if !TARGET_OS_EMBEDDED
2902 /* -nodsyms so sample doesn't try to use Spotlight to find dsym files after mds has gone away. */
2903 char *sample_args[] = { "/usr/bin/sample", pidstr, "1", "-unsupportedShowArch", "-mayDie", "-nodsyms", "-file", j->mgr->sample_log_file, NULL };
2904 #else
2905 char *sample_args[] = { "/usr/bin/sample", pidstr, "1", "-unsupportedShowArch", "-mayDie", "-file", j->mgr->sample_log_file, NULL };
2906 #endif
2907
2908 execve(sample_args[0], sample_args, environ);
2909 _exit(EXIT_FAILURE);
2910 }
2911
2912 void
2913 jobmgr_dequeue_next_sample(jobmgr_t jm)
2914 {
2915 if( STAILQ_EMPTY(&jm->pending_samples) ) {
2916 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sample queue is empty.");
2917 return;
2918 }
2919
2920 /* Dequeue the next in line. */
2921 job_t j = STAILQ_FIRST(&jm->pending_samples);
2922 if( j->is_being_sampled ) {
2923 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sampling is in progress. Not dequeuing next job.");
2924 return;
2925 }
2926
2927 if( !job_assumes(j, !j->sampling_complete) ) {
2928 return;
2929 }
2930
2931 if (!job_assumes(j, do_apple_internal_logging)) {
2932 return;
2933 }
2934
2935 if (!job_assumes(j, mkdir(SHUTDOWN_LOG_DIR, S_IRWXU) != -1 || errno == EEXIST)) {
2936 return;
2937 }
2938
2939 char pidstr[32];
2940 snprintf(pidstr, sizeof(pidstr), "%u", j->p);
2941 snprintf(j->mgr->sample_log_file, sizeof(j->mgr->sample_log_file), SHUTDOWN_LOG_DIR "/%s-%u.sample.txt", j->label, j->p);
2942
2943 if (job_assumes(j, unlink(jm->sample_log_file) != -1 || errno == ENOENT)) {
2944 pid_t sp = basic_spawn(j, take_sample);
2945
2946 if( sp == -1 ) {
2947 job_log(j, LOG_ERR | LOG_CONSOLE, "Sampling for job failed!");
2948 STAILQ_REMOVE(&jm->pending_samples, j, job_s, pending_samples_sle);
2949 j->sampling_complete = true;
2950 jobmgr_dequeue_next_sample(jm);
2951 } else {
2952 j->tracing_pid = sp;
2953 j->is_being_sampled = true;
2954 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sampling job (sample PID: %i, file: %s).", sp, j->mgr->sample_log_file);
2955 }
2956 } else {
2957 STAILQ_REMOVE(&jm->pending_samples, j, job_s, pending_samples_sle);
2958 j->sampling_complete = true;
2959 }
2960
2961 j->pending_sample = false;
2962 }
2963
2964 void
2965 job_dispatch_curious_jobs(job_t j)
2966 {
2967 job_t ji = NULL, jt = NULL;
2968 SLIST_FOREACH_SAFE( ji, &s_curious_jobs, curious_jobs_sle, jt ) {
2969 struct semaphoreitem *si = NULL;
2970 SLIST_FOREACH( si, &ji->semaphores, sle ) {
2971 if( !(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) ) {
2972 continue;
2973 }
2974
2975 if( strncmp(si->what, j->label, strlen(j->label)) == 0 ) {
2976 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
2977
2978 job_dispatch(ji, false);
2979 /* ji could be removed here, so don't do anything with it or its semaphores
2980 * after this point.
2981 */
2982 break;
2983 }
2984 }
2985 }
2986 }
2987
2988 job_t
2989 job_dispatch(job_t j, bool kickstart)
2990 {
2991 /* Don't dispatch a job if it has no audit session set. */
2992 if( !uuid_is_null(j->expected_audit_uuid) ) {
2993 return NULL;
2994 }
2995
2996 #if TARGET_OS_EMBEDDED
2997 if( g_embedded_privileged_action && s_embedded_privileged_job ) {
2998 if( !job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL) ) {
2999 errno = EPERM;
3000 return NULL;
3001 }
3002
3003 if( strcmp(j->username, s_embedded_privileged_job->username) != 0 ) {
3004 errno = EPERM;
3005 return NULL;
3006 }
3007 } else if( g_embedded_privileged_action ) {
3008 errno = EINVAL;
3009 return NULL;
3010 }
3011 #endif
3012
3013 /*
3014 * The whole job removal logic needs to be consolidated. The fact that
3015 * a job can be removed from just about anywhere makes it easy to have
3016 * stale pointers left behind somewhere on the stack that might get
3017 * used after the deallocation. In particular, during job iteration.
3018 *
3019 * This is a classic example. The act of dispatching a job may delete it.
3020 */
3021 if (!job_active(j)) {
3022 if (job_useless(j)) {
3023 job_remove(j);
3024 return NULL;
3025 }
3026 if( unlikely(j->per_user && j->peruser_suspend_count > 0) ) {
3027 return NULL;
3028 }
3029
3030 if (kickstart || job_keepalive(j)) {
3031 job_log(j, LOG_DEBUG, "Starting job (kickstart = %s)", kickstart ? "true" : "false");
3032 job_start(j);
3033 } else {
3034 job_log(j, LOG_DEBUG, "Watching job (kickstart = %s)", kickstart ? "true" : "false");
3035 job_watch(j);
3036
3037 /*
3038 * 5455720
3039 *
3040 * Path checking and monitoring is really racy right now.
3041 * We should clean this up post Leopard.
3042 */
3043 if (job_keepalive(j)) {
3044 job_start(j);
3045 }
3046 }
3047 } else {
3048 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s).", job_active(j));
3049 }
3050
3051 return j;
3052 }
3053
3054 void
3055 job_log_stdouterr2(job_t j, const char *msg, ...)
3056 {
3057 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
3058 va_list ap;
3059
3060 va_start(ap, msg);
3061 runtime_vsyslog(&attr, msg, ap);
3062 va_end(ap);
3063 }
3064
3065 void
3066 job_log_stdouterr(job_t j)
3067 {
3068 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
3069 bool close_log_redir = false;
3070 ssize_t rsz;
3071
3072 if (!job_assumes(j, buf != NULL)) {
3073 return;
3074 }
3075
3076 bufindex = buf;
3077
3078 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
3079
3080 if (unlikely(rsz == 0)) {
3081 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
3082 close_log_redir = true;
3083 } else if (rsz == -1) {
3084 if( !job_assumes(j, errno == EAGAIN) ) {
3085 close_log_redir = true;
3086 }
3087 } else {
3088 buf[rsz] = '\0';
3089
3090 while ((msg = strsep(&bufindex, "\n\r"))) {
3091 if (msg[0]) {
3092 job_log_stdouterr2(j, "%s", msg);
3093 }
3094 }
3095 }
3096
3097 free(buf);
3098
3099 if (unlikely(close_log_redir)) {
3100 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3101 j->log_redirect_fd = 0;
3102 job_dispatch(j, false);
3103 }
3104 }
3105
3106 void
3107 job_kill(job_t j)
3108 {
3109 if (unlikely(!j->p || j->anonymous)) {
3110 return;
3111 }
3112
3113 job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
3114
3115 j->sent_sigkill = true;
3116
3117 intptr_t timer = j->clean_kill ? LAUNCHD_CLEAN_KILL_TIMER : LAUNCHD_SIGKILL_TIMER;
3118 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, timer, j) != -1);
3119
3120 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
3121 }
3122
3123 void
3124 job_log_children_without_exec(job_t j)
3125 {
3126 /* <rdar://problem/5701343> ER: Add a KERN_PROC_PPID sysctl */
3127 #ifdef KERN_PROC_PPID
3128 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PPID, j->p };
3129 #else
3130 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
3131 #endif
3132 size_t mib_sz = sizeof(mib) / sizeof(mib[0]);
3133 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
3134 struct kinfo_proc *kp;
3135
3136 if (!do_apple_internal_logging || j->anonymous || j->per_user) {
3137 return;
3138 }
3139
3140 if (!job_assumes(j, (kp = malloc(len)) != NULL)) {
3141 return;
3142 }
3143 if (!job_assumes(j, sysctl(mib, (u_int) mib_sz, kp, &len, NULL, 0) != -1)) {
3144 goto out;
3145 }
3146
3147 kp_cnt = len / sizeof(struct kinfo_proc);
3148
3149 for (i = 0; i < kp_cnt; i++) {
3150 #ifndef KERN_PROC_PPID
3151 if (kp[i].kp_eproc.e_ppid != j->p) {
3152 continue;
3153 }
3154 #endif
3155 if (kp[i].kp_proc.p_flag & P_EXEC) {
3156 continue;
3157 }
3158
3159 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u",
3160 kp[i].kp_proc.p_pid);
3161 }
3162
3163 out:
3164 free(kp);
3165 }
3166
3167 void
3168 job_cleanup_after_tracer(job_t j)
3169 {
3170 jobmgr_t jm = NULL;
3171 if( j->is_being_sampled ) {
3172 int wstatus = 0;
3173 job_log(j, LOG_DEBUG | LOG_CONSOLE, "sample[%i] finished with job.", j->tracing_pid);
3174 if( job_assumes(j, waitpid(j->tracing_pid, &wstatus, 0) != -1) ) {
3175 job_assumes(j, WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0);
3176 }
3177 STAILQ_REMOVE(&j->mgr->pending_samples, j, job_s, pending_samples_sle);
3178
3179 if( j->kill_after_sample ) {
3180 if (unlikely(j->debug_before_kill)) {
3181 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3182 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3183 }
3184
3185 job_log(j, LOG_NOTICE, "Killing...");
3186 job_kill(j);
3187 }
3188 j->sampling_complete = true;
3189 j->is_being_sampled = false;
3190 jm = j->mgr;
3191 }
3192
3193 j->tracing_pid = 0;
3194 if( j->reap_after_trace ) {
3195 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Reaping job now that attached tracer is gone.");
3196 struct kevent kev;
3197 EV_SET(&kev, j->p, 0, 0, NOTE_EXIT, 0, 0);
3198
3199 /* Fake a kevent to keep our logic consistent. */
3200 job_callback_proc(j, &kev);
3201
3202 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3203 * on the root job manager. To make our fakery complete, we will do garbage
3204 * collection at the beginning of the next run loop cycle (after we're done
3205 * draining the current queue of kevents).
3206 */
3207 job_assumes(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr) != -1);
3208 }
3209
3210 if( jm ) {
3211 jobmgr_dequeue_next_sample(jm);
3212 }
3213 }
3214
3215 void
3216 job_callback_proc(job_t j, struct kevent *kev)
3217 {
3218 bool program_changed = false;
3219 int fflags = kev->fflags;
3220
3221 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job:");
3222 log_kevent_struct(LOG_DEBUG, kev, 0);
3223
3224 if( fflags & NOTE_EXIT ) {
3225 if( j->p == (pid_t)kev->ident && !j->anonymous && !j->is_being_sampled ) {
3226 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
3227 struct kinfo_proc kp;
3228 size_t len = sizeof(kp);
3229
3230 /* Sometimes, the kernel says it succeeded but really didn't. */
3231 if( job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && len == sizeof(kp) ) {
3232 if( !job_assumes(j, kp.kp_eproc.e_ppid == getpid()) ) {
3233 /* Someone has attached to the process with ptrace(). There's a race here.
3234 * If we determine that we are not the parent process and then fail to attach
3235 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3236 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3237 * reparenting of the PID should be atomic to us, so in that case, we reap the
3238 * job as normal.
3239 *
3240 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3241 * would if a job died while we were sampling it at shutdown.
3242 */
3243 if( job_assumes(j, kevent_mod(kp.kp_eproc.e_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j) != -1) ) {
3244 j->tracing_pid = kp.kp_eproc.e_ppid;
3245 j->reap_after_trace = true;
3246 return;
3247 }
3248 }
3249 }
3250 } else if( !j->anonymous ) {
3251 if( j->tracing_pid == (pid_t)kev->ident ) {
3252 job_cleanup_after_tracer(j);
3253
3254 return;
3255 } else if( j->tracing_pid && !j->reap_after_trace ) {
3256 /* The job exited before our sample completed. */
3257 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
3258 j->reap_after_trace = true;
3259 return;
3260 }
3261 }
3262 }
3263
3264 if (fflags & NOTE_EXEC) {
3265 program_changed = true;
3266
3267 if (j->anonymous) {
3268 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
3269 struct kinfo_proc kp;
3270 size_t len = sizeof(kp);
3271
3272 /* Sometimes, the kernel says it succeeded but really didn't. */
3273 if (job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && len == sizeof(kp)) {
3274 char newlabel[1000];
3275
3276 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, kp.kp_proc.p_comm);
3277
3278 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3279 j->lastlookup = NULL;
3280 j->lastlookup_gennum = 0;
3281
3282 LIST_REMOVE(j, label_hash_sle);
3283 strcpy((char *)j->label, newlabel);
3284 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
3285 }
3286 } else {
3287 j->did_exec = true;
3288 job_log(j, LOG_DEBUG, "Program changed");
3289 }
3290 }
3291
3292 if (fflags & NOTE_FORK) {
3293 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3294 job_log_children_without_exec(j);
3295 }
3296
3297 if (fflags & NOTE_EXIT) {
3298 job_reap(j);
3299
3300 if( !j->anonymous ) {
3301 j = job_dispatch(j, false);
3302 } else {
3303 job_remove(j);
3304 j = NULL;
3305 }
3306 }
3307
3308 if (j && (fflags & NOTE_REAP)) {
3309 job_assumes(j, j->p == 0);
3310 }
3311 }
3312
3313 void
3314 job_callback_timer(job_t j, void *ident)
3315 {
3316 if (j == ident) {
3317 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
3318 job_dispatch(j, true);
3319 } else if (&j->semaphores == ident) {
3320 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
3321 job_dispatch(j, false);
3322 } else if (&j->start_interval == ident) {
3323 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
3324 j->start_pending = true;
3325 job_dispatch(j, false);
3326 } else if (&j->exit_timeout == ident) {
3327 if( !job_assumes(j, j->p != 0) ) {
3328 return;
3329 }
3330
3331 if( j->clean_kill ) {
3332 job_log(j, LOG_ERR | LOG_CONSOLE, "Clean job failed to exit %u second after receiving SIGKILL.", LAUNCHD_CLEAN_KILL_TIMER);
3333 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
3334 j->clean_exit_timer_expired = true;
3335
3336 jobmgr_do_garbage_collection(j->mgr);
3337 return;
3338 }
3339
3340 /*
3341 * This block might be executed up to 3 times for a given (slow) job
3342 * - once for the SAMPLE_TIMEOUT timer, at which point sampling is triggered
3343 * - once for the exit_timeout timer, at which point:
3344 * - sampling is performed if not triggered previously
3345 * - SIGKILL is being sent to the job
3346 * - once for the SIGKILL_TIMER timer, at which point we log an issue
3347 * with the long SIGKILL
3348 */
3349
3350 if( j->per_user ) {
3351 /* Don't sample per-user launchd's. */
3352 j->sampling_complete = true;
3353 }
3354 bool was_is_or_will_be_sampled = ( j->sampling_complete || j->is_being_sampled || j->pending_sample );
3355 bool should_enqueue = ( !was_is_or_will_be_sampled && do_apple_internal_logging );
3356
3357 if (j->sent_sigkill) {
3358 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
3359
3360 td /= NSEC_PER_SEC;
3361 td -= j->clean_kill ? 0 : j->exit_timeout;
3362
3363 job_log(j, LOG_WARNING | LOG_CONSOLE, "Did not die after sending SIGKILL %llu seconds ago...", td);
3364 } else if( should_enqueue && (!j->exit_timeout || (LAUNCHD_SAMPLE_TIMEOUT < j->exit_timeout)) ) {
3365 /* This should work even if the job changes its exit_timeout midstream */
3366 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Sampling timeout elapsed (%u seconds). Scheduling a sample...", LAUNCHD_SAMPLE_TIMEOUT);
3367 if (j->exit_timeout) {
3368 unsigned int ttk = (j->exit_timeout - LAUNCHD_SAMPLE_TIMEOUT);
3369 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
3370 EV_ADD|EV_ONESHOT, NOTE_SECONDS, ttk, j) != -1);
3371 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Scheduled new exit timeout for %u seconds later", ttk);
3372 }
3373
3374 STAILQ_INSERT_TAIL(&j->mgr->pending_samples, j, pending_samples_sle);
3375 j->pending_sample = true;
3376 jobmgr_dequeue_next_sample(j->mgr);
3377 } else {
3378 if( do_apple_internal_logging && !j->sampling_complete ) {
3379 if( j->is_being_sampled || j->pending_sample ) {
3380 char pidstr[24] = { 0 };
3381 snprintf(pidstr, sizeof(pidstr), "[%i] ", j->tracing_pid);
3382
3383 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Will kill after sample%shas completed.", j->exit_timeout, j->tracing_pid ? pidstr : " ");
3384 j->kill_after_sample = true;
3385 } else {
3386 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Will sample and then kill.", j->exit_timeout);
3387
3388 STAILQ_INSERT_TAIL(&j->mgr->pending_samples, j, pending_samples_sle);
3389 j->pending_sample = true;
3390 }
3391
3392 jobmgr_dequeue_next_sample(j->mgr);
3393 } else {
3394 if (unlikely(j->debug_before_kill)) {
3395 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3396 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3397 }
3398 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3399 job_kill(j);
3400 jobmgr_do_garbage_collection(j->mgr);
3401 }
3402 }
3403 } else {
3404 job_assumes(j, false);
3405 }
3406 }
3407
3408 void
3409 job_callback_read(job_t j, int ident)
3410 {
3411 if (ident == j->log_redirect_fd) {
3412 job_log_stdouterr(j);
3413 } else if (ident == j->stdin_fd) {
3414 job_dispatch(j, true);
3415 } else {
3416 socketgroup_callback(j);
3417 }
3418 }
3419
3420 void
3421 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
3422 {
3423 jobmgr_t jmi;
3424 job_t j;
3425
3426 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3427 jobmgr_reap_bulk(jmi, kev);
3428 }
3429
3430 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
3431 kev->udata = j;
3432 job_callback(j, kev);
3433 }
3434 }
3435
3436 void
3437 jobmgr_callback(void *obj, struct kevent *kev)
3438 {
3439 jobmgr_t jm = obj;
3440 job_t ji;
3441
3442 switch (kev->filter) {
3443 case EVFILT_PROC:
3444 jobmgr_reap_bulk(jm, kev);
3445 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
3446 break;
3447 case EVFILT_SIGNAL:
3448 switch (kev->ident) {
3449 case SIGTERM:
3450 return launchd_shutdown();
3451 case SIGUSR1:
3452 return calendarinterval_callback();
3453 case SIGUSR2:
3454 fake_shutdown_in_progress = true;
3455 runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
3456
3457 runtime_closelog(); /* HACK -- force 'start' time to be set */
3458
3459 if (pid1_magic) {
3460 int64_t now = runtime_get_wall_time();
3461
3462 jobmgr_log(jm, LOG_NOTICE, "Anticipatory shutdown began at: %lld.%06llu", now / USEC_PER_SEC, now % USEC_PER_SEC);
3463
3464 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
3465 if (ji->per_user && ji->p) {
3466 job_assumes(ji, runtime_kill(ji->p, SIGUSR2) != -1);
3467 }
3468 }
3469 } else {
3470 jobmgr_log(jm, LOG_NOTICE, "Anticipatory per-user launchd shutdown");
3471 }
3472
3473 return;
3474 default:
3475 return (void)jobmgr_assumes(jm, false);
3476 }
3477 break;
3478 case EVFILT_FS:
3479 if (kev->fflags & VQ_MOUNT) {
3480 jobmgr_dispatch_all(jm, true);
3481 }
3482 jobmgr_dispatch_all_semaphores(jm);
3483 break;
3484 case EVFILT_TIMER:
3485 if( kev->ident == (uintptr_t)&sorted_calendar_events ) {
3486 calendarinterval_callback();
3487 } else if( kev->ident == (uintptr_t)jm ) {
3488 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
3489 jobmgr_still_alive_with_check(jm);
3490 } else if( kev->ident == (uintptr_t)&jm->reboot_flags ) {
3491 jobmgr_do_garbage_collection(jm);
3492 }
3493 break;
3494 case EVFILT_VNODE:
3495 if( kev->ident == (uintptr_t)s_no_hang_fd ) {
3496 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
3497 if( unlikely(_no_hang_fd != -1) ) {
3498 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
3499 jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
3500 jobmgr_assumes(root_jobmgr, runtime_close(s_no_hang_fd) != -1);
3501 s_no_hang_fd = _fd(_no_hang_fd);
3502 }
3503 } else if( pid1_magic && g_console && kev->ident == (uintptr_t)fileno(g_console) ) {
3504 int cfd = -1;
3505 if( launchd_assumes((cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) ) {
3506 _fd(cfd);
3507 if( !launchd_assumes((g_console = fdopen(cfd, "w")) != NULL) ) {
3508 close(cfd);
3509 }
3510 }
3511 }
3512 break;
3513 default:
3514 return (void)jobmgr_assumes(jm, false);
3515 }
3516 }
3517
3518 void
3519 job_callback(void *obj, struct kevent *kev)
3520 {
3521 job_t j = obj;
3522
3523 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
3524
3525 switch (kev->filter) {
3526 case EVFILT_PROC:
3527 return job_callback_proc(j, kev);
3528 case EVFILT_TIMER:
3529 return job_callback_timer(j, (void *) kev->ident);
3530 case EVFILT_VNODE:
3531 return semaphoreitem_callback(j, kev);
3532 case EVFILT_READ:
3533 return job_callback_read(j, (int) kev->ident);
3534 case EVFILT_MACHPORT:
3535 return (void)job_dispatch(j, true);
3536 default:
3537 return (void)job_assumes(j, false);
3538 }
3539 }
3540
3541 void
3542 job_start(job_t j)
3543 {
3544 uint64_t td;
3545 int spair[2];
3546 int execspair[2];
3547 int oepair[2];
3548 char nbuf[64];
3549 pid_t c;
3550 bool sipc = false;
3551 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_REAP;
3552
3553 if (!job_assumes(j, j->mgr != NULL)) {
3554 return;
3555 }
3556
3557 if (unlikely(job_active(j))) {
3558 job_log(j, LOG_DEBUG, "Already started");
3559 return;
3560 }
3561
3562 /*
3563 * Some users adjust the wall-clock and then expect software to not notice.
3564 * Therefore, launchd must use an absolute clock instead of the wall clock
3565 * wherever possible.
3566 */
3567 td = runtime_get_nanoseconds_since(j->start_time);
3568 td /= NSEC_PER_SEC;
3569
3570 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
3571 time_t respawn_delta = j->min_run_time - (uint32_t)td;
3572
3573 /*
3574 * We technically should ref-count throttled jobs to prevent idle exit,
3575 * but we're not directly tracking the 'throttled' state at the moment.
3576 */
3577
3578 job_log(j, LOG_WARNING, "Throttling respawn: Will start in %ld seconds", respawn_delta);
3579 job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
3580 job_ignore(j);
3581 return;
3582 }
3583
3584 if (likely(!j->legacy_mach_job)) {
3585 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_special_privileges;
3586 }
3587
3588 if( sipc ) {
3589 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
3590 }
3591
3592 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
3593
3594 if (likely(!j->legacy_mach_job) && job_assumes(j, pipe(oepair) != -1)) {
3595 j->log_redirect_fd = _fd(oepair[0]);
3596 job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
3597 job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
3598 }
3599
3600 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
3601 case -1:
3602 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
3603 job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j) != -1);
3604 job_ignore(j);
3605
3606 job_assumes(j, runtime_close(execspair[0]) == 0);
3607 job_assumes(j, runtime_close(execspair[1]) == 0);
3608 if (sipc) {
3609 job_assumes(j, runtime_close(spair[0]) == 0);
3610 job_assumes(j, runtime_close(spair[1]) == 0);
3611 }
3612 if (likely(!j->legacy_mach_job)) {
3613 job_assumes(j, runtime_close(oepair[0]) != -1);
3614 job_assumes(j, runtime_close(oepair[1]) != -1);
3615 j->log_redirect_fd = 0;
3616 }
3617 break;
3618 case 0:
3619 if (unlikely(_vproc_post_fork_ping())) {
3620 _exit(EXIT_FAILURE);
3621 }
3622 if (!j->legacy_mach_job) {
3623 job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
3624 job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
3625 job_assumes(j, runtime_close(oepair[1]) != -1);
3626 }
3627 job_assumes(j, runtime_close(execspair[0]) == 0);
3628 /* wait for our parent to say they've attached a kevent to us */
3629 read(_fd(execspair[1]), &c, sizeof(c));
3630
3631 if (sipc) {
3632 job_assumes(j, runtime_close(spair[0]) == 0);
3633 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
3634 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
3635 }
3636 job_start_child(j);
3637 break;
3638 default:
3639 j->start_time = runtime_get_opaque_time();
3640
3641 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
3642
3643 j->did_exec = false;
3644 j->checkedin = false;
3645 j->start_pending = false;
3646 j->reaped = false;
3647 j->crashed = false;
3648 j->stopped = false;
3649 if( j->needs_kickoff ) {
3650 j->needs_kickoff = false;
3651
3652 if( SLIST_EMPTY(&j->semaphores) ) {
3653 j->ondemand = false;
3654 }
3655 }
3656
3657 if( j->has_console ) {
3658 g_wsp = c;
3659 }
3660
3661 runtime_add_ref();
3662 total_children++;
3663 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
3664
3665 if (likely(!j->legacy_mach_job)) {
3666 job_assumes(j, runtime_close(oepair[1]) != -1);
3667 }
3668 j->p = c;
3669 if (unlikely(j->hopefully_exits_first)) {
3670 j->mgr->hopefully_first_cnt++;
3671 } else if (likely(!j->hopefully_exits_last)) {
3672 j->mgr->normal_active_cnt++;
3673 }
3674 j->fork_fd = _fd(execspair[0]);
3675 job_assumes(j, runtime_close(execspair[1]) == 0);
3676 if (sipc) {
3677 job_assumes(j, runtime_close(spair[1]) == 0);
3678 ipc_open(_fd(spair[0]), j);
3679 }
3680 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
3681 job_ignore(j);
3682 } else {
3683 job_reap(j);
3684 }
3685
3686 j->wait4debugger_oneshot = false;
3687
3688 struct envitem *ei = NULL, *et = NULL;
3689 SLIST_FOREACH_SAFE( ei, &j->env, sle, et ) {
3690 if( ei->one_shot ) {
3691 SLIST_REMOVE(&j->env, ei, envitem, sle);
3692 }
3693 }
3694
3695 if (likely(!j->stall_before_exec)) {
3696 job_uncork_fork(j);
3697 }
3698 break;
3699 }
3700 }
3701
3702 void
3703 job_start_child(job_t j)
3704 {
3705 typeof(posix_spawn) *psf;
3706 const char *file2exec = "/usr/libexec/launchproxy";
3707 const char **argv;
3708 posix_spawnattr_t spattr;
3709 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
3710 glob_t g;
3711 short spflags = POSIX_SPAWN_SETEXEC;
3712 size_t binpref_out_cnt = 0;
3713 size_t i;
3714
3715 job_assumes(j, posix_spawnattr_init(&spattr) == 0);
3716
3717 job_setup_attributes(j);
3718
3719 if (unlikely(j->argv && j->globargv)) {
3720 g.gl_offs = 1;
3721 for (i = 0; i < j->argc; i++) {
3722 if (i > 0) {
3723 gflags |= GLOB_APPEND;
3724 }
3725 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
3726 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
3727 exit(EXIT_FAILURE);
3728 }
3729 }
3730 g.gl_pathv[0] = (char *)file2exec;
3731 argv = (const char **)g.gl_pathv;
3732 } else if (likely(j->argv)) {
3733 argv = alloca((j->argc + 2) * sizeof(char *));
3734 argv[0] = file2exec;
3735 for (i = 0; i < j->argc; i++) {
3736 argv[i + 1] = j->argv[i];
3737 }
3738 argv[i + 1] = NULL;
3739 } else {
3740 argv = alloca(3 * sizeof(char *));
3741 argv[0] = file2exec;
3742 argv[1] = j->prog;
3743 argv[2] = NULL;
3744 }
3745
3746 if (likely(!j->inetcompat)) {
3747 argv++;
3748 }
3749
3750 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
3751 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
3752 spflags |= POSIX_SPAWN_START_SUSPENDED;
3753 }
3754
3755 job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
3756
3757 if (unlikely(j->j_binpref_cnt)) {
3758 job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
3759 job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
3760 }
3761
3762 #if HAVE_QUARANTINE
3763 if (j->quarantine_data) {
3764 qtn_proc_t qp;
3765
3766 if (job_assumes(j, qp = qtn_proc_alloc())) {
3767 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
3768 job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
3769 }
3770 }
3771 }
3772 #endif
3773
3774 #if HAVE_SANDBOX
3775 if (j->seatbelt_profile) {
3776 char *seatbelt_err_buf = NULL;
3777
3778 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
3779 if (seatbelt_err_buf) {
3780 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
3781 }
3782 goto out_bad;
3783 }
3784 }
3785 #endif
3786
3787 psf = j->prog ? posix_spawn : posix_spawnp;
3788
3789 if (likely(!j->inetcompat)) {
3790 file2exec = j->prog ? j->prog : argv[0];
3791 }
3792
3793 errno = psf(NULL, file2exec, NULL, &spattr, (char *const*)argv, environ);
3794 job_log_error(j, LOG_ERR, "posix_spawn(\"%s\", ...)", file2exec);
3795
3796 #if HAVE_SANDBOX
3797 out_bad:
3798 #endif
3799 _exit(EXIT_FAILURE);
3800 }
3801
3802 void
3803 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
3804 {
3805 launch_data_t tmp;
3806 struct envitem *ei;
3807 job_t ji;
3808
3809 if (jm->parentmgr) {
3810 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
3811 } else {
3812 char **tmpenviron = environ;
3813 for (; *tmpenviron; tmpenviron++) {
3814 char envkey[1024];
3815 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
3816 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
3817 strncpy(envkey, *tmpenviron, sizeof(envkey));
3818 *(strchr(envkey, '=')) = '\0';
3819 launch_data_dict_insert(dict, s, envkey);
3820 }
3821 }
3822
3823 LIST_FOREACH(ji, &jm->jobs, sle) {
3824 SLIST_FOREACH(ei, &ji->global_env, sle) {
3825 if ((tmp = launch_data_new_string(ei->value))) {
3826 launch_data_dict_insert(dict, tmp, ei->key);
3827 }
3828 }
3829 }
3830 }
3831
3832 void
3833 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
3834 {
3835 struct envitem *ei;
3836 job_t ji;
3837
3838 if (jm->parentmgr) {
3839 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
3840 }
3841
3842 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
3843 SLIST_FOREACH(ei, &ji->global_env, sle) {
3844 setenv(ei->key, ei->value, 1);
3845 }
3846 }
3847 }
3848
3849 void
3850 job_log_pids_with_weird_uids(job_t j)
3851 {
3852 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
3853 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
3854 struct kinfo_proc *kp;
3855 uid_t u = j->mach_uid;
3856
3857 if (!do_apple_internal_logging) {
3858 return;
3859 }
3860
3861 kp = malloc(len);
3862
3863 if (!job_assumes(j, kp != NULL)) {
3864 return;
3865 }
3866
3867 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
3868
3869 if (!job_assumes(j, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
3870 goto out;
3871 }
3872
3873 kp_cnt = len / sizeof(struct kinfo_proc);
3874
3875 for (i = 0; i < kp_cnt; i++) {
3876 uid_t i_euid = kp[i].kp_eproc.e_ucred.cr_uid;
3877 uid_t i_uid = kp[i].kp_eproc.e_pcred.p_ruid;
3878 uid_t i_svuid = kp[i].kp_eproc.e_pcred.p_svuid;
3879 pid_t i_pid = kp[i].kp_proc.p_pid;
3880
3881 if (i_euid != u && i_uid != u && i_svuid != u) {
3882 continue;
3883 }
3884
3885 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
3886 i_pid, kp[i].kp_proc.p_comm, i_uid, i_euid, i_svuid);
3887
3888 /* Temporarily disabled due to 5423935 and 4946119. */
3889 #if 0
3890 /* Ask the accountless process to exit. */
3891 job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
3892 #endif
3893 }
3894
3895 out:
3896 free(kp);
3897 }
3898
3899 void
3900 job_postfork_test_user(job_t j)
3901 {
3902 /* This function is all about 5201578 */
3903
3904 const char *home_env_var = getenv("HOME");
3905 const char *user_env_var = getenv("USER");
3906 const char *logname_env_var = getenv("LOGNAME");
3907 uid_t tmp_uid, local_uid = getuid();
3908 gid_t tmp_gid, local_gid = getgid();
3909 char shellpath[PATH_MAX];
3910 char homedir[PATH_MAX];
3911 char loginname[2000];
3912 struct passwd *pwe;
3913
3914
3915 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
3916 && strcmp(user_env_var, logname_env_var) == 0)) {
3917 goto out_bad;
3918 }
3919
3920 if ((pwe = getpwnam(user_env_var)) == NULL) {
3921 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
3922 goto out_bad;
3923 }
3924
3925 /*
3926 * We must copy the results of getpw*().
3927 *
3928 * Why? Because subsequent API calls may call getpw*() as a part of
3929 * their implementation. Since getpw*() returns a [now thread scoped]
3930 * global, we must therefore cache the results before continuing.
3931 */
3932
3933 tmp_uid = pwe->pw_uid;
3934 tmp_gid = pwe->pw_gid;
3935
3936 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
3937 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
3938 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
3939
3940 if (strcmp(loginname, logname_env_var) != 0) {
3941 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
3942 goto out_bad;
3943 }
3944 if (strcmp(homedir, home_env_var) != 0) {
3945 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
3946 goto out_bad;
3947 }
3948 if (local_uid != tmp_uid) {
3949 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
3950 'U', tmp_uid, local_uid);
3951 goto out_bad;
3952 }
3953 if (local_gid != tmp_gid) {
3954 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
3955 'G', tmp_gid, local_gid);
3956 goto out_bad;
3957 }
3958
3959 return;
3960 out_bad:
3961 #if 0
3962 job_assumes(j, runtime_kill(getppid(), SIGTERM) != -1);
3963 _exit(EXIT_FAILURE);
3964 #else
3965 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
3966 #endif
3967 }
3968
3969 void
3970 job_postfork_become_user(job_t j)
3971 {
3972 char loginname[2000];
3973 char tmpdirpath[PATH_MAX];
3974 char shellpath[PATH_MAX];
3975 char homedir[PATH_MAX];
3976 struct passwd *pwe;
3977 size_t r;
3978 gid_t desired_gid = -1;
3979 uid_t desired_uid = -1;
3980
3981 if (getuid() != 0) {
3982 return job_postfork_test_user(j);
3983 }
3984
3985 /*
3986 * I contend that having UID == 0 and GID != 0 is of dubious value.
3987 * Nevertheless, this used to work in Tiger. See: 5425348
3988 */
3989 if (j->groupname && !j->username) {
3990 j->username = "root";
3991 }
3992
3993 if (j->username) {
3994 if ((pwe = getpwnam(j->username)) == NULL) {
3995 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
3996 _exit(EXIT_FAILURE);
3997 }
3998 } else if (j->mach_uid) {
3999 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4000 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
4001 job_log_pids_with_weird_uids(j);
4002 _exit(EXIT_FAILURE);
4003 }
4004 } else {
4005 return;
4006 }
4007
4008 /*
4009 * We must copy the results of getpw*().
4010 *
4011 * Why? Because subsequent API calls may call getpw*() as a part of
4012 * their implementation. Since getpw*() returns a [now thread scoped]
4013 * global, we must therefore cache the results before continuing.
4014 */
4015
4016 desired_uid = pwe->pw_uid;
4017 desired_gid = pwe->pw_gid;
4018
4019 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4020 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4021 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4022
4023 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
4024 job_log(j, LOG_ERR, "Expired account");
4025 _exit(EXIT_FAILURE);
4026 }
4027
4028
4029 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
4030 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
4031 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
4032 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
4033 }
4034
4035 if (j->groupname) {
4036 struct group *gre;
4037
4038 if (unlikely((gre = getgrnam(j->groupname)) == NULL)) {
4039 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
4040 _exit(EXIT_FAILURE);
4041 }
4042
4043 desired_gid = gre->gr_gid;
4044 }
4045
4046 if (!job_assumes(j, setlogin(loginname) != -1)) {
4047 _exit(EXIT_FAILURE);
4048 }
4049
4050 if (!job_assumes(j, setgid(desired_gid) != -1)) {
4051 _exit(EXIT_FAILURE);
4052 }
4053
4054 /*
4055 * The kernel team and the DirectoryServices team want initgroups()
4056 * called after setgid(). See 4616864 for more information.
4057 */
4058
4059 if (likely(!j->no_init_groups)) {
4060 #if 1
4061 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
4062 _exit(EXIT_FAILURE);
4063 }
4064 #else
4065 /* Do our own little initgroups(). We do this to guarantee that we're
4066 * always opted into dynamic group resolution in the kernel. initgroups(3)
4067 * does not make this guarantee.
4068 */
4069 int groups[NGROUPS], ngroups;
4070
4071 /* A failure here isn't fatal, and we'll still get data we can use. */
4072 job_assumes(j, getgrouplist(j->username, desired_gid, groups, &ngroups) != -1);
4073
4074 if( !job_assumes(j, syscall(SYS_initgroups, ngroups, groups, desired_uid) != -1) ) {
4075 _exit(EXIT_FAILURE);
4076 }
4077 #endif
4078 }
4079
4080 if (!job_assumes(j, setuid(desired_uid) != -1)) {
4081 _exit(EXIT_FAILURE);
4082 }
4083
4084 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4085
4086 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
4087 setenv("TMPDIR", tmpdirpath, 0);
4088 }
4089
4090 setenv("SHELL", shellpath, 0);
4091 setenv("HOME", homedir, 0);
4092 setenv("USER", loginname, 0);
4093 setenv("LOGNAME", loginname, 0);
4094 }
4095
4096 void
4097 job_setup_attributes(job_t j)
4098 {
4099 struct limititem *li;
4100 struct envitem *ei;
4101
4102 if (unlikely(j->setnice)) {
4103 job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
4104 }
4105
4106 SLIST_FOREACH(li, &j->limits, sle) {
4107 struct rlimit rl;
4108
4109 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
4110 continue;
4111 }
4112
4113 if (li->sethard) {
4114 rl.rlim_max = li->lim.rlim_max;
4115 }
4116 if (li->setsoft) {
4117 rl.rlim_cur = li->lim.rlim_cur;
4118 }
4119
4120 if (setrlimit(li->which, &rl) == -1) {
4121 job_log_error(j, LOG_WARNING, "setrlimit()");
4122 }
4123 }
4124
4125 #if !TARGET_OS_EMBEDDED
4126 if( unlikely(j->per_user) ) {
4127 auditinfo_addr_t auinfo = {
4128 .ai_termid = { .at_type = AU_IPv4 },
4129 .ai_auid = j->mach_uid,
4130 .ai_asid = AU_ASSIGN_ASID,
4131 };
4132 (void)au_user_mask(j->username, &auinfo.ai_mask);
4133
4134 if( !launchd_assumes(setaudit_addr(&auinfo, sizeof(auinfo)) != -1) ) {
4135 runtime_syslog(LOG_WARNING, "Could not set audit session! (errno = %d)", errno);
4136 _exit(EXIT_FAILURE);
4137 } else {
4138 job_log(j, LOG_DEBUG, "Created new security session for per-user launchd.");
4139 }
4140 }
4141 #endif
4142
4143 if (unlikely(!j->inetcompat && j->session_create)) {
4144 launchd_SessionCreate();
4145 }
4146
4147 if (unlikely(j->low_pri_io)) {
4148 job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
4149 }
4150 if (unlikely(j->rootdir)) {
4151 job_assumes(j, chroot(j->rootdir) != -1);
4152 job_assumes(j, chdir(".") != -1);
4153 }
4154
4155 job_postfork_become_user(j);
4156
4157 if (unlikely(j->workingdir)) {
4158 job_assumes(j, chdir(j->workingdir) != -1);
4159 }
4160
4161 if (unlikely(j->setmask)) {
4162 umask(j->mask);
4163 }
4164
4165 if (j->stdin_fd) {
4166 job_assumes(j, dup2(j->stdin_fd, STDIN_FILENO) != -1);
4167 } else {
4168 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4169 }
4170 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4171 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
4172
4173 jobmgr_setup_env_from_other_jobs(j->mgr);
4174
4175 SLIST_FOREACH(ei, &j->env, sle) {
4176 setenv(ei->key, ei->value, 1);
4177 }
4178
4179 if( do_apple_internal_logging ) {
4180 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING, "true", 1);
4181 }
4182
4183 #if !TARGET_OS_EMBEDDED
4184 if( j->jetsam_priority != LAUNCHD_JETSAM_PRIORITY_UNSET ) {
4185 job_assumes(j, proc_setpcontrol(PROC_SETPC_TERMINATE) == 0);
4186 }
4187 #endif
4188
4189 #if TARGET_OS_EMBEDDED
4190 if( j->main_thread_priority != 0 ) {
4191 struct sched_param params;
4192 bzero(&params, sizeof(params));
4193 params.sched_priority = j->main_thread_priority;
4194 job_assumes(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params) != -1);
4195 }
4196 #endif
4197
4198 /*
4199 * We'd like to call setsid() unconditionally, but we have reason to
4200 * believe that prevents launchd from being able to send signals to
4201 * setuid children. We'll settle for process-groups.
4202 */
4203 if (getppid() != 1) {
4204 job_assumes(j, setpgid(0, 0) != -1);
4205 } else {
4206 job_assumes(j, setsid() != -1);
4207 }
4208 }
4209
4210 void
4211 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4212 {
4213 int fd;
4214
4215 if (!path) {
4216 return;
4217 }
4218
4219 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4220 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4221 return;
4222 }
4223
4224 job_assumes(j, dup2(fd, target_fd) != -1);
4225 job_assumes(j, runtime_close(fd) == 0);
4226 }
4227
4228 int
4229 dir_has_files(job_t j, const char *path)
4230 {
4231 DIR *dd = opendir(path);
4232 struct dirent *de;
4233 bool r = 0;
4234
4235 if (unlikely(!dd)) {
4236 return -1;
4237 }
4238
4239 while ((de = readdir(dd))) {
4240 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
4241 r = 1;
4242 break;
4243 }
4244 }
4245
4246 job_assumes(j, closedir(dd) == 0);
4247 return r;
4248 }
4249
4250 void
4251 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
4252 {
4253 struct calendarinterval *ci_iter, *ci_prev = NULL;
4254 time_t later, head_later;
4255
4256 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4257
4258 if (ci->when.tm_wday != -1) {
4259 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4260
4261 if (ci->when.tm_mday == -1) {
4262 later = otherlater;
4263 } else {
4264 later = later < otherlater ? later : otherlater;
4265 }
4266 }
4267
4268 ci->when_next = later;
4269
4270 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4271 if (ci->when_next < ci_iter->when_next) {
4272 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4273 break;
4274 }
4275
4276 ci_prev = ci_iter;
4277 }
4278
4279 if (ci_iter == NULL) {
4280 /* ci must want to fire after every other timer, or there are no timers */
4281
4282 if (LIST_EMPTY(&sorted_calendar_events)) {
4283 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
4284 } else {
4285 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
4286 }
4287 }
4288
4289 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
4290
4291 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
4292 char time_string[100];
4293 size_t time_string_len;
4294
4295 ctime_r(&later, time_string);
4296 time_string_len = strlen(time_string);
4297
4298 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
4299 time_string[time_string_len - 1] = '\0';
4300 }
4301
4302 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4303 }
4304 }
4305
4306 void
4307 extract_rcsid_substr(const char *i, char *o, size_t osz)
4308 {
4309 char *rcs_rev_tmp = strchr(i, ' ');
4310
4311 if (!rcs_rev_tmp) {
4312 strlcpy(o, i, osz);
4313 } else {
4314 strlcpy(o, rcs_rev_tmp + 1, osz);
4315 rcs_rev_tmp = strchr(o, ' ');
4316 if (rcs_rev_tmp) {
4317 *rcs_rev_tmp = '\0';
4318 }
4319 }
4320 }
4321
4322 void
4323 jobmgr_log_bug(jobmgr_t jm, unsigned int line)
4324 {
4325 static const char *file;
4326 int saved_errno = errno;
4327 char buf[100];
4328
4329 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4330
4331 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4332
4333 if (!file) {
4334 file = strrchr(__FILE__, '/');
4335 if (!file) {
4336 file = __FILE__;
4337 } else {
4338 file += 1;
4339 }
4340 }
4341
4342 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4343 if (likely(jm)) {
4344 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4345 } else {
4346 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4347 }
4348 }
4349
4350 void
4351 job_log_bug(job_t j, unsigned int line)
4352 {
4353 static const char *file;
4354 int saved_errno = errno;
4355 char buf[100];
4356
4357 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4358
4359 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4360
4361 if (!file) {
4362 file = strrchr(__FILE__, '/');
4363 if (!file) {
4364 file = __FILE__;
4365 } else {
4366 file += 1;
4367 }
4368 }
4369
4370 /* I cannot think of any reason why 'j' should ever be NULL, nor have I ever seen the case in the wild */
4371 if (likely(j)) {
4372 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4373 } else {
4374 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4375 }
4376 }
4377
4378 void
4379 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
4380 {
4381 const char *label2use = j ? j->label : "com.apple.launchd.NULL";
4382 const char *mgr2use = j ? j->mgr->name : "NULL";
4383 struct runtime_syslog_attr attr = { g_my_label, label2use, mgr2use, pri, getuid(), getpid(), j ? j->p : 0 };
4384 char *newmsg;
4385 int oldmask = 0;
4386 size_t newmsgsz;
4387
4388 /*
4389 * Hack: If bootstrap_port is set, we must be on the child side of a
4390 * fork(), but before the exec*(). Let's route the log message back to
4391 * launchd proper.
4392 */
4393 if (bootstrap_port) {
4394 return _vproc_logv(pri, err, msg, ap);
4395 }
4396
4397 newmsgsz = strlen(msg) + 200;
4398 newmsg = alloca(newmsgsz);
4399
4400 if (err) {
4401 #if !TARGET_OS_EMBEDDED
4402 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
4403 #else
4404 snprintf(newmsg, newmsgsz, "(%s) %s: %s", label2use, msg, strerror(err));
4405 #endif
4406 } else {
4407 #if !TARGET_OS_EMBEDDED
4408 snprintf(newmsg, newmsgsz, "%s", msg);
4409 #else
4410 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
4411 #endif
4412 }
4413
4414 if( j && unlikely(j->debug) ) {
4415 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
4416 }
4417
4418 runtime_vsyslog(&attr, newmsg, ap);
4419
4420 if( j && unlikely(j->debug) ) {
4421 setlogmask(oldmask);
4422 }
4423 }
4424
4425 void
4426 job_log_error(job_t j, int pri, const char *msg, ...)
4427 {
4428 va_list ap;
4429
4430 va_start(ap, msg);
4431 job_logv(j, pri, errno, msg, ap);
4432 va_end(ap);
4433 }
4434
4435 void
4436 job_log(job_t j, int pri, const char *msg, ...)
4437 {
4438 va_list ap;
4439
4440 va_start(ap, msg);
4441 job_logv(j, pri, 0, msg, ap);
4442 va_end(ap);
4443 }
4444
4445 #if 0
4446 void
4447 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
4448 {
4449 va_list ap;
4450
4451 va_start(ap, msg);
4452 jobmgr_logv(jm, pri, errno, msg, ap);
4453 va_end(ap);
4454 }
4455 #endif
4456
4457 void
4458 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
4459 {
4460 va_list ap;
4461
4462 va_start(ap, msg);
4463 jobmgr_logv(jm, pri, 0, msg, ap);
4464 va_end(ap);
4465 }
4466
4467 void
4468 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
4469 {
4470 char *newmsg;
4471 char *newname;
4472 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
4473
4474 newname = alloca((jmname_len + 1) * 2);
4475 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
4476 newmsg = alloca(newmsgsz);
4477
4478 for (i = 0, o = 0; i < jmname_len; i++, o++) {
4479 if (jm->name[i] == '%') {
4480 newname[o] = '%';
4481 o++;
4482 }
4483 newname[o] = jm->name[i];
4484 }
4485 newname[o] = '\0';
4486
4487 if (err) {
4488 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
4489 } else {
4490 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
4491 }
4492
4493 if (jm->parentmgr) {
4494 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
4495 } else {
4496 struct runtime_syslog_attr attr = { g_my_label, g_my_label, jm->name, pri, getuid(), getpid(), getpid() };
4497
4498 runtime_vsyslog(&attr, newmsg, ap);
4499 }
4500 }
4501
4502 void
4503 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
4504 {
4505 if (si->fd != -1) {
4506 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
4507 job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
4508 }
4509 }
4510
4511 void
4512 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
4513 {
4514 char *parentdir, tmp_path[PATH_MAX];
4515 int saved_errno = 0;
4516 int fflags = NOTE_DELETE|NOTE_RENAME;
4517
4518 switch (si->why) {
4519 case DIR_NOT_EMPTY:
4520 case PATH_CHANGES:
4521 fflags |= NOTE_ATTRIB|NOTE_LINK;
4522 /* fall through */
4523 case PATH_EXISTS:
4524 fflags |= NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
4525 /* fall through */
4526 case PATH_MISSING:
4527 break;
4528 default:
4529 return;
4530 }
4531
4532 /* dirname() may modify tmp_path */
4533 strlcpy(tmp_path, si->what, sizeof(tmp_path));
4534
4535 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
4536 return;
4537 }
4538
4539 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
4540 do {
4541 if (si->fd == -1) {
4542 struct stat sb;
4543 if( stat(si->what, &sb) == 0 ) {
4544 /* If we're watching a character or block device, only watch the parent directory.
4545 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
4546 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
4547 * open(2)s the file (like a character device that waits for a carrier signal) or
4548 * (b) preventing other processes from obtaining an exclusive lock on the file, even
4549 * though we're opening it with O_EVTONLY.
4550 *
4551 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
4552 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
4553 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
4554 * each time the parent changes to see if it appeared or disappeared.
4555 */
4556 if( S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode) ) {
4557 si->fd = _fd(open(si->what, O_EVTONLY | O_NOCTTY | O_NONBLOCK));
4558 }
4559 }
4560
4561 if( si->fd == -1 ) {
4562 si->watching_parent = job_assumes(j, (si->fd = _fd(open(parentdir, O_EVTONLY | O_NOCTTY | O_NONBLOCK))) != -1);
4563 } else {
4564 si->watching_parent = false;
4565 }
4566 }
4567
4568 if (si->fd == -1) {
4569 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", si->what);
4570 }
4571
4572 job_log(j, LOG_DEBUG, "Watching %svnode (%s): %d", si->watching_parent ? "parent ": "", si->what, si->fd);
4573
4574 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
4575 saved_errno = errno;
4576 /*
4577 * The FD can be revoked between the open() and kevent().
4578 * This is similar to the inability for kevents to be
4579 * attached to short lived zombie processes after fork()
4580 * but before kevent().
4581 */
4582 job_assumes(j, runtime_close(si->fd) == 0);
4583 si->fd = -1;
4584 }
4585 } while (unlikely((si->fd == -1) && (saved_errno == ENOENT)));
4586
4587 if (saved_errno == ENOTSUP) {
4588 /*
4589 * 3524219 NFS needs kqueue support
4590 * 4124079 VFS needs generic kqueue support
4591 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
4592 */
4593 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
4594
4595 if (!j->poll_for_vfs_changes) {
4596 j->poll_for_vfs_changes = true;
4597 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
4598 }
4599 }
4600 }
4601
4602 void
4603 semaphoreitem_callback(job_t j, struct kevent *kev)
4604 {
4605 char invalidation_reason[100] = "";
4606 struct semaphoreitem *si;
4607
4608 SLIST_FOREACH(si, &j->semaphores, sle) {
4609 switch (si->why) {
4610 case PATH_CHANGES:
4611 case PATH_EXISTS:
4612 case PATH_MISSING:
4613 case DIR_NOT_EMPTY:
4614 job_log(j, LOG_DEBUG, "P%s changed (%u): %s", si->watching_parent ? "arent path" : "ath", si->why, si->what);
4615 break;
4616 default:
4617 continue;
4618 }
4619
4620 if (si->fd == (int)kev->ident) {
4621 break;
4622 }
4623 }
4624
4625 if (!job_assumes(j, si != NULL)) {
4626 return;
4627 }
4628
4629 if (NOTE_DELETE & kev->fflags) {
4630 strcat(invalidation_reason, "deleted");
4631 }
4632
4633 if (NOTE_RENAME & kev->fflags) {
4634 if (invalidation_reason[0]) {
4635 strcat(invalidation_reason, "/renamed");
4636 } else {
4637 strcat(invalidation_reason, "renamed");
4638 }
4639 }
4640
4641 if (NOTE_REVOKE & kev->fflags) {
4642 if (invalidation_reason[0]) {
4643 strcat(invalidation_reason, "/revoked");
4644 } else {
4645 strcat(invalidation_reason, "revoked");
4646 }
4647 }
4648
4649 if (invalidation_reason[0]) {
4650 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
4651 job_assumes(j, runtime_close(si->fd) == 0);
4652 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
4653 }
4654
4655 if( !si->watching_parent ) {
4656 if (si->why == PATH_CHANGES) {
4657 j->start_pending = true;
4658 } else {
4659 semaphoreitem_watch(j, si);
4660 }
4661 } else { /* Something happened to the parent directory. See if our target file appeared. */
4662 if( !invalidation_reason[0] ) {
4663 job_assumes(j, runtime_close(si->fd) == 0);
4664 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
4665 semaphoreitem_watch(j, si);
4666 }
4667 /* Need to think about what should happen if the parent directory goes invalid. */
4668 }
4669
4670 job_dispatch(j, false);
4671 }
4672
4673 struct cal_dict_walk {
4674 job_t j;
4675 struct tm tmptm;
4676 };
4677
4678 void
4679 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
4680 {
4681 struct cal_dict_walk *cdw = context;
4682 struct tm *tmptm = &cdw->tmptm;
4683 job_t j = cdw->j;
4684 int64_t val;
4685
4686 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
4687 /* hack to let caller know something went wrong */
4688 tmptm->tm_sec = -1;
4689 return;
4690 }
4691
4692 val = launch_data_get_integer(obj);
4693
4694 if (val < 0) {
4695 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
4696 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
4697 if( val > 59 ) {
4698 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
4699 tmptm->tm_sec = -1;
4700 } else {
4701 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
4702 }
4703 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
4704 if( val > 23 ) {
4705 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
4706 tmptm->tm_sec = -1;
4707 } else {
4708 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
4709 }
4710 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
4711 if( val < 1 || val > 31 ) {
4712 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
4713 tmptm->tm_sec = -1;
4714 } else {
4715 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
4716 }
4717 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
4718 if( val > 7 ) {
4719 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
4720 tmptm->tm_sec = -1;
4721 } else {
4722 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
4723 }
4724 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
4725 if( val > 12 ) {
4726 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
4727 tmptm->tm_sec = -1;
4728 } else {
4729 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
4730 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
4731 }
4732 }
4733 }
4734
4735 bool
4736 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
4737 {
4738 struct cal_dict_walk cdw;
4739
4740 cdw.j = j;
4741 memset(&cdw.tmptm, 0, sizeof(0));
4742
4743 cdw.tmptm.tm_min = -1;
4744 cdw.tmptm.tm_hour = -1;
4745 cdw.tmptm.tm_mday = -1;
4746 cdw.tmptm.tm_wday = -1;
4747 cdw.tmptm.tm_mon = -1;
4748
4749 if (!job_assumes(j, obj != NULL)) {
4750 return false;
4751 }
4752
4753 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
4754 return false;
4755 }
4756
4757 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
4758
4759 if (unlikely(cdw.tmptm.tm_sec == -1)) {
4760 return false;
4761 }
4762
4763 return calendarinterval_new(j, &cdw.tmptm);
4764 }
4765
4766 bool
4767 calendarinterval_new(job_t j, struct tm *w)
4768 {
4769 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
4770
4771 if (!job_assumes(j, ci != NULL)) {
4772 return false;
4773 }
4774
4775 ci->when = *w;
4776 ci->job = j;
4777
4778 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
4779
4780 calendarinterval_setalarm(j, ci);
4781
4782 runtime_add_weak_ref();
4783
4784 return true;
4785 }
4786
4787 void
4788 calendarinterval_delete(job_t j, struct calendarinterval *ci)
4789 {
4790 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
4791 LIST_REMOVE(ci, global_sle);
4792
4793 free(ci);
4794
4795 runtime_del_weak_ref();
4796 }
4797
4798 void
4799 calendarinterval_sanity_check(void)
4800 {
4801 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
4802 time_t now = time(NULL);
4803
4804 if (unlikely(ci && (ci->when_next < now))) {
4805 jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
4806 }
4807 }
4808
4809 void
4810 calendarinterval_callback(void)
4811 {
4812 struct calendarinterval *ci, *ci_next;
4813 time_t now = time(NULL);
4814
4815 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
4816 job_t j = ci->job;
4817
4818 if (ci->when_next > now) {
4819 break;
4820 }
4821
4822 LIST_REMOVE(ci, global_sle);
4823 calendarinterval_setalarm(j, ci);
4824
4825 j->start_pending = true;
4826 job_dispatch(j, false);
4827 }
4828 }
4829
4830 bool
4831 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds)
4832 {
4833 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
4834
4835 if (!job_assumes(j, sg != NULL)) {
4836 return false;
4837 }
4838
4839 sg->fds = calloc(1, fd_cnt * sizeof(int));
4840 sg->fd_cnt = fd_cnt;
4841 sg->junkfds = junkfds;
4842
4843 if (!job_assumes(j, sg->fds != NULL)) {
4844 free(sg);
4845 return false;
4846 }
4847
4848 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
4849 strcpy(sg->name_init, name);
4850
4851 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
4852
4853 runtime_add_weak_ref();
4854
4855 return true;
4856 }
4857
4858 void
4859 socketgroup_delete(job_t j, struct socketgroup *sg)
4860 {
4861 unsigned int i;
4862
4863 for (i = 0; i < sg->fd_cnt; i++) {
4864 #if 0
4865 struct sockaddr_storage ss;
4866 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
4867 socklen_t ss_len = sizeof(ss);
4868
4869 /* 5480306 */
4870 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
4871 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
4872 job_assumes(j, unlink(sun->sun_path) != -1);
4873 /* We might conditionally need to delete a directory here */
4874 }
4875 #endif
4876 job_assumes(j, runtime_close(sg->fds[i]) != -1);
4877 }
4878
4879 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
4880
4881 free(sg->fds);
4882 free(sg);
4883
4884 runtime_del_weak_ref();
4885 }
4886
4887 void
4888 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
4889 {
4890 struct kevent kev[sg->fd_cnt];
4891 char buf[10000];
4892 unsigned int i, buf_off = 0;
4893
4894 if (unlikely(sg->junkfds)) {
4895 return;
4896 }
4897
4898 for (i = 0; i < sg->fd_cnt; i++) {
4899 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
4900 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
4901 }
4902
4903 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
4904
4905 job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
4906
4907 for (i = 0; i < sg->fd_cnt; i++) {
4908 job_assumes(j, kev[i].flags & EV_ERROR);
4909 errno = (typeof(errno)) kev[i].data;
4910 job_assumes(j, kev[i].data == 0);
4911 }
4912 }
4913
4914 void
4915 socketgroup_ignore(job_t j, struct socketgroup *sg)
4916 {
4917 socketgroup_kevent_mod(j, sg, false);
4918 }
4919
4920 void
4921 socketgroup_watch(job_t j, struct socketgroup *sg)
4922 {
4923 socketgroup_kevent_mod(j, sg, true);
4924 }
4925
4926 void
4927 socketgroup_callback(job_t j)
4928 {
4929 job_dispatch(j, true);
4930 }
4931
4932 bool
4933 envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot)
4934 {
4935 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
4936
4937 if (!job_assumes(j, ei != NULL)) {
4938 return false;
4939 }
4940
4941 strcpy(ei->key_init, k);
4942 ei->value = ei->key_init + strlen(k) + 1;
4943 strcpy(ei->value, v);
4944 ei->one_shot = one_shot;
4945
4946 if (global) {
4947 if (SLIST_EMPTY(&j->global_env)) {
4948 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
4949 }
4950 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
4951 } else {
4952 SLIST_INSERT_HEAD(&j->env, ei, sle);
4953 }
4954
4955 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
4956
4957 return true;
4958 }
4959
4960 void
4961 envitem_delete(job_t j, struct envitem *ei, bool global)
4962 {
4963 if (global) {
4964 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
4965 if (SLIST_EMPTY(&j->global_env)) {
4966 LIST_REMOVE(j, global_env_sle);
4967 }
4968 } else {
4969 SLIST_REMOVE(&j->env, ei, envitem, sle);
4970 }
4971
4972 free(ei);
4973 }
4974
4975 void
4976 envitem_setup(launch_data_t obj, const char *key, void *context)
4977 {
4978 job_t j = context;
4979
4980 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
4981 return;
4982 }
4983
4984 if( strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0 ) {
4985 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, false);
4986 } else {
4987 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
4988 }
4989 }
4990
4991 void
4992 envitem_setup_one_shot(launch_data_t obj, const char *key, void *context)
4993 {
4994 job_t j = context;
4995
4996 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
4997 return;
4998 }
4999
5000 if( strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0 ) {
5001 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, true);
5002 } else {
5003 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5004 }
5005 }
5006
5007 bool
5008 limititem_update(job_t j, int w, rlim_t r)
5009 {
5010 struct limititem *li;
5011
5012 SLIST_FOREACH(li, &j->limits, sle) {
5013 if (li->which == w) {
5014 break;
5015 }
5016 }
5017
5018 if (li == NULL) {
5019 li = calloc(1, sizeof(struct limititem));
5020
5021 if (!job_assumes(j, li != NULL)) {
5022 return false;
5023 }
5024
5025 SLIST_INSERT_HEAD(&j->limits, li, sle);
5026
5027 li->which = w;
5028 }
5029
5030 if (j->importing_hard_limits) {
5031 li->lim.rlim_max = r;
5032 li->sethard = true;
5033 } else {
5034 li->lim.rlim_cur = r;
5035 li->setsoft = true;
5036 }
5037
5038 return true;
5039 }
5040
5041 void
5042 limititem_delete(job_t j, struct limititem *li)
5043 {
5044 SLIST_REMOVE(&j->limits, li, limititem, sle);
5045
5046 free(li);
5047 }
5048
5049 #if HAVE_SANDBOX
5050 void
5051 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5052 {
5053 job_t j = context;
5054
5055 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5056 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5057 return;
5058 }
5059
5060 if (launch_data_get_bool(obj) == false) {
5061 return;
5062 }
5063
5064 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5065 j->seatbelt_flags |= SANDBOX_NAMED;
5066 }
5067 }
5068 #endif
5069
5070 void
5071 limititem_setup(launch_data_t obj, const char *key, void *context)
5072 {
5073 job_t j = context;
5074 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
5075 rlim_t rl;
5076
5077 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
5078 return;
5079 }
5080
5081 rl = launch_data_get_integer(obj);
5082
5083 for (i = 0; i < limits_cnt; i++) {
5084 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
5085 break;
5086 }
5087 }
5088
5089 if (i == limits_cnt) {
5090 return;
5091 }
5092
5093 limititem_update(j, launchd_keys2limits[i].val, rl);
5094 }
5095
5096 bool
5097 job_useless(job_t j)
5098 {
5099 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5100 if (j->legacy_LS_job && j->j_port) {
5101 return false;
5102 }
5103 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5104 return true;
5105 } else if (j->removal_pending) {
5106 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
5107 return true;
5108 } else if (j->mgr->shutting_down && (j->hopefully_exits_first || j->mgr->hopefully_first_cnt == 0)) {
5109 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
5110 if( total_children == 0 && !j->anonymous ) {
5111 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last (non-anonymous) to exit during %s shutdown.", (pid1_magic && j->mgr == root_jobmgr) ? "system" : "job manager");
5112 } else if( total_anon_children == 0 && j->anonymous ) {
5113 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last (anonymous) to exit during %s shutdown.", (pid1_magic && j->mgr == root_jobmgr) ? "system" : "job manager");
5114 }
5115 return true;
5116 } else if (j->legacy_mach_job) {
5117 if (SLIST_EMPTY(&j->machservices)) {
5118 job_log(j, LOG_INFO, "Garbage collecting");
5119 return true;
5120 } else if (!j->checkedin) {
5121 job_log(j, LOG_WARNING, "Failed to check-in!");
5122 return true;
5123 }
5124 }
5125
5126 return false;
5127 }
5128
5129 bool
5130 job_keepalive(job_t j)
5131 {
5132 mach_msg_type_number_t statusCnt;
5133 mach_port_status_t status;
5134 struct semaphoreitem *si;
5135 struct machservice *ms;
5136 struct stat sb;
5137 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
5138 bool is_not_kextd = (do_apple_internal_logging || (strcmp(j->label, "com.apple.kextd") != 0));
5139
5140 if (unlikely(j->mgr->shutting_down)) {
5141 return false;
5142 }
5143
5144 /*
5145 * 5066316
5146 *
5147 * We definitely need to revisit this after Leopard ships. Please see
5148 * launchctl.c for the other half of this hack.
5149 */
5150 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5151 return false;
5152 }
5153
5154 if( unlikely(j->needs_kickoff) ) {
5155 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5156 return false;
5157 }
5158
5159 if (j->start_pending) {
5160 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
5161 return true;
5162 }
5163
5164 if (!j->ondemand) {
5165 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5166 return true;
5167 }
5168
5169 SLIST_FOREACH(ms, &j->machservices, sle) {
5170 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5171 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5172 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
5173 continue;
5174 }
5175 if (status.mps_msgcount) {
5176 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
5177 status.mps_msgcount, ms->name);
5178 return true;
5179 }
5180 }
5181
5182
5183 SLIST_FOREACH(si, &j->semaphores, sle) {
5184 bool wanted_state = false;
5185 int qdir_file_cnt;
5186 job_t other_j;
5187
5188 switch (si->why) {
5189 case NETWORK_UP:
5190 wanted_state = true;
5191 case NETWORK_DOWN:
5192 if (network_up == wanted_state) {
5193 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
5194 return true;
5195 }
5196 break;
5197 case SUCCESSFUL_EXIT:
5198 wanted_state = true;
5199 case FAILED_EXIT:
5200 if (good_exit == wanted_state) {
5201 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5202 return true;
5203 }
5204 break;
5205 case OTHER_JOB_ENABLED:
5206 wanted_state = true;
5207 case OTHER_JOB_DISABLED:
5208 if ((bool)job_find(si->what) == wanted_state) {
5209 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
5210 return true;
5211 }
5212 break;
5213 case OTHER_JOB_ACTIVE:
5214 wanted_state = true;
5215 case OTHER_JOB_INACTIVE:
5216 if ((other_j = job_find(si->what))) {
5217 if ((bool)other_j->p == wanted_state) {
5218 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5219 return true;
5220 }
5221 }
5222 break;
5223 case PATH_EXISTS:
5224 wanted_state = true;
5225 case PATH_MISSING:
5226 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
5227 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
5228 return true;
5229 } else {
5230 if( wanted_state ) { /* File is not there but we wish it was. */
5231 if( si->fd != -1 && !si->watching_parent ) { /* Need to be watching the parent now. */
5232 job_assumes(j, runtime_close(si->fd) == 0);
5233 si->fd = -1;
5234 semaphoreitem_watch(j, si);
5235 }
5236 } else { /* File is there but we wish it wasn't. */
5237 if( si->fd != -1 && si->watching_parent ) { /* Need to watch the file now. */
5238 job_assumes(j, runtime_close(si->fd) == 0);
5239 si->fd = -1;
5240 semaphoreitem_watch(j, si);
5241 }
5242 }
5243 }
5244 break;
5245 case PATH_CHANGES:
5246 break;
5247 case DIR_NOT_EMPTY:
5248 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
5249 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
5250 } else if (qdir_file_cnt > 0) {
5251 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
5252 return true;
5253 }
5254 break;
5255 }
5256 }
5257
5258 return false;
5259 }
5260
5261 const char *
5262 job_active(job_t j)
5263 {
5264 struct machservice *ms;
5265 if (j->p) {
5266 return "PID is still valid";
5267 }
5268
5269 if (j->mgr->shutting_down && j->log_redirect_fd) {
5270 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5271 j->log_redirect_fd = 0;
5272 }
5273
5274 if (j->log_redirect_fd) {
5275 if (job_assumes(j, j->legacy_LS_job)) {
5276 return "Standard out/error is still valid";
5277 } else {
5278 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5279 j->log_redirect_fd = 0;
5280 }
5281 }
5282
5283 if (j->priv_port_has_senders) {
5284 return "Privileged Port still has outstanding senders";
5285 }
5286
5287 SLIST_FOREACH(ms, &j->machservices, sle) {
5288 if (ms->recv && machservice_active(ms)) {
5289 return "Mach service is still active";
5290 }
5291 }
5292
5293 return NULL;
5294 }
5295
5296 void
5297 machservice_watch(job_t j, struct machservice *ms)
5298 {
5299 if (ms->recv) {
5300 job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
5301 }
5302 }
5303
5304 void
5305 machservice_ignore(job_t j, struct machservice *ms)
5306 {
5307 job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
5308 }
5309
5310 void
5311 machservice_resetport(job_t j, struct machservice *ms)
5312 {
5313 LIST_REMOVE(ms, port_hash_sle);
5314 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5315 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
5316 ms->gen_num++;
5317 job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
5318 job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
5319 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5320 }
5321
5322 struct machservice *
5323 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
5324 {
5325 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
5326
5327 if (!job_assumes(j, ms != NULL)) {
5328 return NULL;
5329 }
5330
5331 strcpy((char *)ms->name, name);
5332 ms->job = j;
5333 ms->gen_num = 1;
5334 ms->per_pid = pid_local;
5335
5336 if (likely(*serviceport == MACH_PORT_NULL)) {
5337 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
5338 goto out_bad;
5339 }
5340
5341 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
5342 goto out_bad2;
5343 }
5344 *serviceport = ms->port;
5345 ms->recv = true;
5346 } else {
5347 ms->port = *serviceport;
5348 ms->isActive = true;
5349 }
5350
5351 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5352
5353 jobmgr_t jm_to_insert = j->mgr;
5354 if( g_flat_mach_namespace ) {
5355 jm_to_insert = (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? j->mgr : root_jobmgr;
5356 }
5357
5358 LIST_INSERT_HEAD(&jm_to_insert->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5359 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5360
5361 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
5362
5363 return ms;
5364 out_bad2:
5365 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5366 out_bad:
5367 free(ms);
5368 return NULL;
5369 }
5370
5371 bootstrap_status_t
5372 machservice_status(struct machservice *ms)
5373 {
5374 if (ms->isActive) {
5375 return BOOTSTRAP_STATUS_ACTIVE;
5376 } else if (ms->job->ondemand) {
5377 return BOOTSTRAP_STATUS_ON_DEMAND;
5378 } else {
5379 return BOOTSTRAP_STATUS_INACTIVE;
5380 }
5381 }
5382
5383 void
5384 job_setup_exception_port(job_t j, task_t target_task)
5385 {
5386 struct machservice *ms;
5387 thread_state_flavor_t f = 0;
5388 mach_port_t exc_port = the_exception_server;
5389
5390 if (unlikely(j->alt_exc_handler)) {
5391 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
5392 if (likely(ms)) {
5393 exc_port = machservice_port(ms);
5394 } else {
5395 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
5396 }
5397 } else if (unlikely(j->internal_exc_handler)) {
5398 exc_port = runtime_get_kernel_port();
5399 } else if (unlikely(!exc_port)) {
5400 return;
5401 }
5402
5403 #if defined (__ppc__) || defined(__ppc64__)
5404 f = PPC_THREAD_STATE64;
5405 #elif defined(__i386__) || defined(__x86_64__)
5406 f = x86_THREAD_STATE;
5407 #elif defined(__arm__)
5408 f = ARM_THREAD_STATE;
5409 #else
5410 #error "unknown architecture"
5411 #endif
5412
5413 if (likely(target_task)) {
5414 job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
5415 } else if (pid1_magic && the_exception_server) {
5416 mach_port_t mhp = mach_host_self();
5417 job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
5418 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
5419 }
5420 }
5421
5422 void
5423 job_set_exception_port(job_t j, mach_port_t port)
5424 {
5425 if (unlikely(!the_exception_server)) {
5426 the_exception_server = port;
5427 job_setup_exception_port(j, 0);
5428 } else {
5429 job_log(j, LOG_WARNING, "The exception server is already claimed!");
5430 }
5431 }
5432
5433 void
5434 machservice_setup_options(launch_data_t obj, const char *key, void *context)
5435 {
5436 struct machservice *ms = context;
5437 mach_port_t mhp = mach_host_self();
5438 int which_port;
5439 bool b;
5440
5441 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
5442 return;
5443 }
5444
5445 switch (launch_data_get_type(obj)) {
5446 case LAUNCH_DATA_INTEGER:
5447 which_port = (int)launch_data_get_integer(obj); /* XXX we should bound check this... */
5448 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
5449 switch (which_port) {
5450 case TASK_KERNEL_PORT:
5451 case TASK_HOST_PORT:
5452 case TASK_NAME_PORT:
5453 case TASK_BOOTSTRAP_PORT:
5454 /* I find it a little odd that zero isn't reserved in the header.
5455 * Normally Mach is fairly good about this convention... */
5456 case 0:
5457 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
5458 break;
5459 default:
5460 ms->special_port_num = which_port;
5461 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
5462 break;
5463 }
5464 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
5465 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
5466 job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
5467 } else {
5468 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
5469 }
5470 }
5471 case LAUNCH_DATA_BOOL:
5472 b = launch_data_get_bool(obj);
5473 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
5474 ms->debug_on_close = b;
5475 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
5476 ms->reset = b;
5477 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
5478 ms->hide = b;
5479 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
5480 job_set_exception_port(ms->job, ms->port);
5481 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
5482 ms->kUNCServer = b;
5483 job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
5484 }
5485 break;
5486 case LAUNCH_DATA_STRING:
5487 if( strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0 ) {
5488 const char *option = launch_data_get_string(obj);
5489 if( strcasecmp(option, "One") == 0 ) {
5490 ms->drain_one_on_crash = true;
5491 } else if( strcasecmp(option, "All") == 0 ) {
5492 ms->drain_all_on_crash = true;
5493 }
5494 }
5495 break;
5496 case LAUNCH_DATA_DICTIONARY:
5497 job_set_exception_port(ms->job, ms->port);
5498 break;
5499 default:
5500 break;
5501 }
5502
5503 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
5504 }
5505
5506 void
5507 machservice_setup(launch_data_t obj, const char *key, void *context)
5508 {
5509 job_t j = context;
5510 struct machservice *ms;
5511 mach_port_t p = MACH_PORT_NULL;
5512
5513 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
5514 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
5515 return;
5516 }
5517
5518 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
5519 return;
5520 }
5521
5522 ms->isActive = false;
5523
5524 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
5525 launch_data_dict_iterate(obj, machservice_setup_options, ms);
5526 }
5527 }
5528
5529 jobmgr_t
5530 jobmgr_do_garbage_collection(jobmgr_t jm)
5531 {
5532 jobmgr_t jmi = NULL, jmn = NULL;
5533 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
5534 jobmgr_do_garbage_collection(jmi);
5535 }
5536
5537 if( !jm->shutting_down ) {
5538 return jm;
5539 }
5540
5541 if( SLIST_EMPTY(&jm->submgrs) ) {
5542 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
5543 } else {
5544 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
5545 }
5546
5547 int phase = -1;
5548 for( phase = jm->shutdown_phase; phase < JOBMGR_PHASE_LAST; phase++ ) {
5549 if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_LAST ) {
5550 if( jm == root_jobmgr ) {
5551 simulate_pid1_crash();
5552 }
5553
5554 if( jm == root_jobmgr && pid1_magic && !jm->killed_stray_jobs ) {
5555 jobmgr_log_stray_children(jm, true);
5556 jm->killed_stray_jobs = true;
5557 }
5558 }
5559
5560 uint32_t unkilled_cnt = 0;
5561 job_t ji = NULL, jn = NULL;
5562 LIST_FOREACH_SAFE( ji, &jm->jobs, sle, jn ) {
5563 if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST && !ji->hopefully_exits_first ) {
5564 continue;
5565 } else if( phase == JOBMGR_PHASE_NORMAL ) {
5566 if( ji->migratory ) {
5567 /* If we're shutting down, release the hold migratory jobs
5568 * have on us.
5569 */
5570 job_remove(ji);
5571 }
5572
5573 if( ji->hopefully_exits_first || ji->hopefully_exits_last ) {
5574 continue;
5575 }
5576 } else if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_LAST && !ji->hopefully_exits_last ) {
5577 continue;
5578 }
5579
5580 if( ji->anonymous ) {
5581 continue;
5582 }
5583
5584 const char *active = job_active(ji);
5585 if( !active ) {
5586 job_log(ji, LOG_DEBUG, "Job is inactive. Removing.");
5587 job_remove(ji);
5588 } else {
5589 if( ji->p ) {
5590 if( !ji->stopped ) {
5591 job_log(ji, LOG_DEBUG, "Stopping job.");
5592 job_stop(ji);
5593 unkilled_cnt++;
5594 } else {
5595 if( ji->clean_kill ) {
5596 job_log(ji, LOG_DEBUG, "Job was clean and sent SIGKILL.");
5597 if( !ji->clean_exit_timer_expired ) {
5598 /* Give jobs that were clean and sent SIGKILL 1 second to exit after receipt. */
5599 unkilled_cnt++;
5600 } else {
5601 job_log(ji, LOG_ERR, "Job was clean, killed and has not exited after 1 second. Moving on.");
5602 }
5603 } else {
5604 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
5605 unkilled_cnt += !ji->sent_sigkill;
5606 }
5607 }
5608 } else {
5609 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
5610 }
5611 }
5612 } /* LIST_FOREACH_SAFE */
5613
5614 if( unkilled_cnt == 0 ) {
5615 jobmgr_log(jm, LOG_DEBUG, "Done with the %s bucket, advancing.", s_phases[jm->shutdown_phase]);
5616 jm->shutdown_phase++;
5617 } else {
5618 jobmgr_log(jm, LOG_DEBUG, "Still %u unkilled job%s in %s bucket.", unkilled_cnt, unkilled_cnt > 1 ? "s" : "", s_phases[jm->shutdown_phase]);
5619 phase = JOBMGR_PHASE_LAST;
5620 }
5621 } /* for */
5622
5623 jobmgr_t r = jm;
5624 if( jm->shutdown_phase > JOBMGR_PHASE_HOPEFULLY_EXITS_LAST && SLIST_EMPTY(&jm->submgrs) ) {
5625 jobmgr_log(jm, LOG_DEBUG, "Removing.");
5626 jobmgr_log_stray_children(jm, false);
5627 jobmgr_remove(jm);
5628 r = NULL;
5629 }
5630
5631 return r;
5632 }
5633
5634 void
5635 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
5636 {
5637 #if 1
5638 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
5639 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
5640 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
5641 * for them to exit before moving on.
5642 *
5643 * See rdar://problem/6562592
5644 */
5645 size_t i = 0;
5646 for( i = 0; i < np; i++ ) {
5647 if( p[i] != 0 ) {
5648 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
5649 jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
5650 }
5651 }
5652 #else
5653 struct timespec tts = { 2, 0 }; /* Wait 2 seconds for stray children to die after being SIGTERM'ed. */
5654 struct timespec kts = { 1, 0 }; /* Wait 1 second for stray children to die after being SIGKILL'ed. */
5655 uint64_t start, end, nanosec;
5656 struct kevent kev;
5657 int r, kq = kqueue();
5658
5659 if (!jobmgr_assumes(jm, kq != -1)) {
5660 return;
5661 }
5662
5663 start = runtime_get_opaque_time();
5664 size_t i = 0, n2t = 0;
5665 for( i = 0; i < np; i++ ) {
5666 if( p[i] != 0 ) {
5667 EV_SET(&kev, p[i], EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, 0);
5668
5669 if( jobmgr_assumes(jm, kevent(kq, &kev, 1, NULL, 0, NULL) != -1) ) {
5670 jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
5671 n2t++;
5672 } else {
5673 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Disregarding PID %u and continuing.", p[i]);
5674 p[i] = 0;
5675 }
5676 }
5677 }
5678
5679 while( n2t > 0 && (r = kevent(kq, NULL, 0, &kev, 1, &tts)) ) {
5680 int status = 0;
5681 waitpid((pid_t)kev.ident, &status, WNOHANG);
5682
5683 end = runtime_get_opaque_time();
5684 nanosec = runtime_opaque_time_to_nano(end - start);
5685 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "PID %u died after %llu nanoseconds.", (pid_t)kev.ident, nanosec);
5686
5687 for( i = 0; i < np; i++ ) {
5688 p[i] = ( p[i] == (pid_t)kev.ident ) ? 0 : p[i];
5689 }
5690 }
5691
5692 size_t n2k = 0;
5693 for( i = 0; i < np; i++ ) {
5694 if( p[i] != 0 ) {
5695 jobmgr_assumes(jm, runtime_kill(p[i], SIGKILL) != -1);
5696 n2k++;
5697 }
5698 }
5699
5700 while( n2k > 0 && (r = kevent(kq, NULL, 0, &kev, 1, &kts)) ) {
5701 int status = 0;
5702 waitpid((pid_t)kev.ident, &status, WNOHANG);
5703
5704 end = runtime_get_opaque_time();
5705 nanosec = runtime_opaque_time_to_nano(end - start);
5706 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "PID %u was killed and died after %llu nanoseconds.", (pid_t)kev.ident, nanosec);
5707
5708 for( i = 0; i < np; i++ ) {
5709 p[i] = ( p[i] == (pid_t)kev.ident ) ? 0 : p[i];
5710 }
5711 }
5712
5713 for( i = 0; i < np; i++ ) {
5714 if( p[i] != 0 ) {
5715 jobmgr_log(jm, LOG_NOTICE | LOG_CONSOLE, "PID %u did not die after being SIGKILL'ed 1 second ago.", p[i]);
5716 }
5717 }
5718 #endif
5719 }
5720
5721 void
5722 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
5723 {
5724 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
5725 size_t i, kp_cnt = 0, kp_skipped = 0, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
5726 struct kinfo_proc *kp;
5727
5728 if (likely(jm->parentmgr || !pid1_magic)) {
5729 return;
5730 }
5731
5732 if (!jobmgr_assumes(jm, (kp = malloc(len)) != NULL)) {
5733 return;
5734 }
5735
5736 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
5737
5738 if (!jobmgr_assumes(jm, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
5739 goto out;
5740 }
5741
5742 kp_cnt = len / sizeof(struct kinfo_proc);
5743 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
5744
5745 for (i = 0; i < kp_cnt; i++) {
5746 pid_t p_i = kp[i].kp_proc.p_pid;
5747 pid_t pp_i = kp[i].kp_eproc.e_ppid;
5748 pid_t pg_i = kp[i].kp_eproc.e_pgid;
5749 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
5750 const char *n = kp[i].kp_proc.p_comm;
5751
5752 if (unlikely(p_i == 0 || p_i == 1)) {
5753 kp_skipped++;
5754 continue;
5755 }
5756
5757 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
5758 job_t j = jobmgr_find_by_pid(jm, p_i, false);
5759 if( !j || (j && j->anonymous) ) {
5760 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
5761
5762 int status = 0;
5763 if( pp_i == getpid() && !jobmgr_assumes(jm, kp[i].kp_proc.p_stat != SZOMB) ) {
5764 if( jobmgr_assumes(jm, waitpid(p_i, &status, WNOHANG) == 0) ) {
5765 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
5766 }
5767 kp_skipped++;
5768 } else {
5769 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
5770 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
5771 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
5772 * hints to the kernel along the way, so that it could shutdown certain subsystems when
5773 * their userspace emissaries go away, before the call to reboot(2).
5774 */
5775 if( leader && leader->ignore_pg_at_shutdown ) {
5776 kp_skipped++;
5777 } else {
5778 ps[i] = p_i;
5779 }
5780 }
5781 } else {
5782 kp_skipped++;
5783 }
5784 }
5785
5786 if( (kp_cnt - kp_skipped > 0) && kill_strays ) {
5787 jobmgr_kill_stray_children(jm, ps, kp_cnt);
5788 }
5789
5790 free(ps);
5791 out:
5792 free(kp);
5793 }
5794
5795 jobmgr_t
5796 jobmgr_parent(jobmgr_t jm)
5797 {
5798 return jm->parentmgr;
5799 }
5800
5801 void
5802 job_uncork_fork(job_t j)
5803 {
5804 pid_t c = j->p;
5805
5806 job_log(j, LOG_DEBUG, "Uncorking the fork().");
5807 /* this unblocks the child and avoids a race
5808 * between the above fork() and the kevent_mod() */
5809 job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
5810 job_assumes(j, runtime_close(j->fork_fd) != -1);
5811 j->fork_fd = 0;
5812 }
5813
5814 jobmgr_t
5815 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t session_port)
5816 {
5817 mach_msg_size_t mxmsgsz;
5818 job_t bootstrapper = NULL;
5819 jobmgr_t jmr;
5820
5821 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
5822
5823 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
5824 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
5825 return NULL;
5826 }
5827
5828 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
5829
5830 if (!jobmgr_assumes(jm, jmr != NULL)) {
5831 return NULL;
5832 }
5833
5834 jmr->kqjobmgr_callback = jobmgr_callback;
5835 strcpy(jmr->name_init, name ? name : "Under construction");
5836
5837 jmr->req_port = requestorport;
5838
5839 if ((jmr->parentmgr = jm)) {
5840 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
5841 }
5842
5843 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
5844 goto out_bad;
5845 }
5846
5847 if (transfer_port != MACH_PORT_NULL) {
5848 jobmgr_assumes(jmr, jm != NULL);
5849 jmr->jm_port = transfer_port;
5850 } else if (!jm && !pid1_magic) {
5851 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
5852 name_t service_buf;
5853
5854 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
5855
5856 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
5857 goto out_bad;
5858 }
5859
5860 if (trusted_fd) {
5861 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
5862
5863 if ((dfd = dup(lfd)) >= 0) {
5864 jobmgr_assumes(jmr, runtime_close(dfd) != -1);
5865 jobmgr_assumes(jmr, runtime_close(lfd) != -1);
5866 }
5867
5868 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
5869 }
5870
5871 /* cut off the Libc cache, we don't want to deadlock against ourself */
5872 inherited_bootstrap_port = bootstrap_port;
5873 bootstrap_port = MACH_PORT_NULL;
5874 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
5875
5876 /* We set this explicitly as we start each child */
5877 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
5878 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
5879 goto out_bad;
5880 }
5881
5882 if (!name) {
5883 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
5884 }
5885
5886 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
5887 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
5888 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
5889 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
5890 }
5891
5892 if (!jm) {
5893 jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
5894 jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
5895 jobmgr_assumes(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
5896 jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
5897 }
5898
5899 if (name && !no_init) {
5900 bootstrapper = jobmgr_init_session(jmr, name, sflag);
5901 }
5902
5903 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
5904 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
5905 goto out_bad;
5906 }
5907 }
5908
5909 STAILQ_INIT(&jmr->pending_samples);
5910
5911 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
5912
5913 if (bootstrapper) {
5914 bootstrapper->audit_session = session_port;
5915 if( session_port != MACH_PORT_NULL ) {
5916 mach_port_mod_refs(mach_task_self(), session_port, MACH_PORT_RIGHT_SEND, 1);
5917 }
5918
5919 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", session_port);
5920 jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
5921 }
5922
5923 if (jmr->parentmgr) {
5924 runtime_add_weak_ref();
5925 }
5926
5927 return jmr;
5928
5929 out_bad:
5930 if (jmr) {
5931 jobmgr_remove(jmr);
5932 }
5933 return NULL;
5934 }
5935
5936 job_t
5937 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
5938 {
5939 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
5940 char thelabel[1000];
5941 job_t bootstrapper;
5942
5943 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
5944 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
5945
5946 if( jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic) ) {
5947 bootstrapper->is_bootstrapper = true;
5948 char buf[100];
5949
5950 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
5951 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
5952 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false, false);
5953 bootstrapper->weird_bootstrap = true;
5954 jobmgr_assumes(jm, job_setup_machport(bootstrapper));
5955 } else if( bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0 ) {
5956 bootstrapper->is_bootstrapper = true;
5957 if( jobmgr_assumes(jm, pid1_magic) ) {
5958 /* Have our system bootstrapper print out to the console. */
5959 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
5960 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
5961
5962 if( g_console ) {
5963 jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
5964 }
5965 }
5966 }
5967
5968 jm->session_initialized = true;
5969
5970 return bootstrapper;
5971 }
5972
5973 jobmgr_t
5974 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
5975 {
5976 struct machservice *ms, *next_ms;
5977 jobmgr_t jmi, jmn;
5978
5979 /* Mach ports, unlike Unix descriptors, are reference counted. In other
5980 * words, when some program hands us a second or subsequent send right
5981 * to a port we already have open, the Mach kernel gives us the same
5982 * port number back and increments an reference count associated with
5983 * the port. This forces us, when discovering that a receive right at
5984 * the other end has been deleted, to wander all of our objects to see
5985 * what weird places clients might have handed us the same send right
5986 * to use.
5987 */
5988
5989 if (jm == root_jobmgr) {
5990 if (port == inherited_bootstrap_port) {
5991 jobmgr_assumes(jm, launchd_mport_deallocate(port) == KERN_SUCCESS);
5992 inherited_bootstrap_port = MACH_PORT_NULL;
5993
5994 return jobmgr_shutdown(jm);
5995 }
5996
5997 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
5998 if (ms->port == port && !ms->recv) {
5999 machservice_delete(ms->job, ms, true);
6000 }
6001 }
6002 }
6003
6004 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6005 jobmgr_delete_anything_with_port(jmi, port);
6006 }
6007
6008 if (jm->req_port == port) {
6009 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
6010 return jobmgr_shutdown(jm);
6011 }
6012
6013 return jm;
6014 }
6015
6016 struct machservice *
6017 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
6018 {
6019 struct machservice *ms;
6020 job_t target_j;
6021
6022 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
6023
6024 if (target_pid) {
6025 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6026 * bootstrap in other bootstraps.
6027 */
6028
6029 /* Start in the given bootstrap. */
6030 if( unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL) ) {
6031 /* If we fail, do a deep traversal. */
6032 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6033 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6034 return NULL;
6035 }
6036 }
6037
6038 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6039 if (ms->per_pid && strcmp(name, ms->name) == 0) {
6040 return ms;
6041 }
6042 }
6043
6044 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
6045 return NULL;
6046 }
6047
6048 jobmgr_t jm_to_search = ( g_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ) ? root_jobmgr : jm;
6049 LIST_FOREACH(ms, &jm_to_search->ms_hash[hash_ms(name)], name_hash_sle) {
6050 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6051 return ms;
6052 }
6053 }
6054
6055 if (jm->parentmgr == NULL || !check_parent) {
6056 return NULL;
6057 }
6058
6059 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
6060 }
6061
6062 mach_port_t
6063 machservice_port(struct machservice *ms)
6064 {
6065 return ms->port;
6066 }
6067
6068 job_t
6069 machservice_job(struct machservice *ms)
6070 {
6071 return ms->job;
6072 }
6073
6074 bool
6075 machservice_hidden(struct machservice *ms)
6076 {
6077 return ms->hide;
6078 }
6079
6080 bool
6081 machservice_active(struct machservice *ms)
6082 {
6083 return ms->isActive;
6084 }
6085
6086 const char *
6087 machservice_name(struct machservice *ms)
6088 {
6089 return ms->name;
6090 }
6091
6092 void
6093 machservice_drain_port(struct machservice *ms)
6094 {
6095 bool drain_one = ms->drain_one_on_crash;
6096 bool drain_all = ms->drain_all_on_crash;
6097
6098 if( !job_assumes(ms->job, (drain_one || drain_all) == true) ) {
6099 return;
6100 }
6101
6102 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6103
6104 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6105 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6106 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6107 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6108
6109 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6110
6111 do {
6112 /* This should be a direct check on the Mach service to see if it's an exception-handling
6113 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6114 * Mach services. But for now, it should be okay.
6115 */
6116 if( ms->job->alt_exc_handler || ms->job->internal_exc_handler ) {
6117 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6118 } else {
6119 mach_msg_options_t options = MACH_RCV_MSG |
6120 MACH_RCV_TIMEOUT ;
6121
6122 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
6123 switch( mr ) {
6124 case MACH_MSG_SUCCESS :
6125 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6126 break;
6127 case MACH_RCV_TIMED_OUT :
6128 break;
6129 case MACH_RCV_TOO_LARGE :
6130 runtime_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6131 break;
6132 default :
6133 break;
6134 }
6135 }
6136 } while( drain_all && mr != MACH_RCV_TIMED_OUT );
6137 }
6138
6139 void
6140 machservice_delete(job_t j, struct machservice *ms, bool port_died)
6141 {
6142 if (unlikely(ms->debug_on_close)) {
6143 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
6144 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
6145 }
6146
6147 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6148 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
6149 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
6150 }
6151
6152 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
6153
6154 if (unlikely(ms->port == the_exception_server)) {
6155 the_exception_server = 0;
6156 }
6157
6158 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
6159
6160 if (ms->special_port_num) {
6161 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6162 }
6163
6164 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6165 LIST_REMOVE(ms, name_hash_sle);
6166 LIST_REMOVE(ms, port_hash_sle);
6167
6168 free(ms);
6169 }
6170
6171 void
6172 machservice_request_notifications(struct machservice *ms)
6173 {
6174 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6175
6176 ms->isActive = true;
6177
6178 if (ms->recv) {
6179 which = MACH_NOTIFY_PORT_DESTROYED;
6180 job_checkin(ms->job);
6181 }
6182
6183 job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
6184 }
6185
6186 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6187 #define END_OF(x) (&(x)[NELEM(x)])
6188
6189 char **
6190 mach_cmd2argv(const char *string)
6191 {
6192 char *argv[100], args[1000];
6193 const char *cp;
6194 char *argp = args, term, **argv_ret, *co;
6195 unsigned int nargs = 0, i;
6196
6197 for (cp = string; *cp;) {
6198 while (isspace(*cp))
6199 cp++;
6200 term = (*cp == '"') ? *cp++ : '\0';
6201 if (nargs < NELEM(argv)) {
6202 argv[nargs++] = argp;
6203 }
6204 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
6205 if (*cp == '\\') {
6206 cp++;
6207 }
6208 *argp++ = *cp;
6209 if (*cp) {
6210 cp++;
6211 }
6212 }
6213 *argp++ = '\0';
6214 }
6215 argv[nargs] = NULL;
6216
6217 if (nargs == 0) {
6218 return NULL;
6219 }
6220
6221 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6222
6223 if (!launchd_assumes(argv_ret != NULL)) {
6224 return NULL;
6225 }
6226
6227 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6228
6229 for (i = 0; i < nargs; i++) {
6230 strcpy(co, argv[i]);
6231 argv_ret[i] = co;
6232 co += strlen(argv[i]) + 1;
6233 }
6234 argv_ret[i] = NULL;
6235
6236 return argv_ret;
6237 }
6238
6239 void
6240 job_checkin(job_t j)
6241 {
6242 j->checkedin = true;
6243 }
6244
6245 bool job_is_god(job_t j)
6246 {
6247 return j->embedded_special_privileges;
6248 }
6249
6250 bool
6251 job_ack_port_destruction(mach_port_t p)
6252 {
6253 struct machservice *ms;
6254 job_t j;
6255
6256 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
6257 if (ms->recv && (ms->port == p)) {
6258 break;
6259 }
6260 }
6261
6262 if (!jobmgr_assumes(root_jobmgr, ms != NULL)) {
6263 return false;
6264 }
6265
6266 j = ms->job;
6267
6268 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
6269
6270 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
6271 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
6272 * receive rights have been returned.
6273 *
6274 * So when we get receive rights back, check to see if the job has been reaped yet. If
6275 * not, then we add this service to a list of services to be drained on crash if it's
6276 * requested that behavior. So, for a job with N receive rights all requesting that they
6277 * be drained on crash, we can safely handle the following sequence of events.
6278 *
6279 * ReceiveRight0Returned
6280 * ReceiveRight1Returned
6281 * ReceiveRight2Returned
6282 * NOTE_EXIT (reap, get exit status)
6283 * ReceiveRight3Returned
6284 * .
6285 * .
6286 * .
6287 * ReceiveRight(N - 1)Returned
6288 */
6289
6290 if( ms->drain_one_on_crash || ms->drain_all_on_crash ) {
6291 if( j->crashed && j->reaped ) {
6292 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
6293 machservice_drain_port(ms);
6294 } else if( !(j->crashed || j->reaped) ) {
6295 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
6296 }
6297 }
6298
6299 ms->isActive = false;
6300 if (ms->delete_on_destruction) {
6301 machservice_delete(j, ms, false);
6302 } else if (ms->reset) {
6303 machservice_resetport(j, ms);
6304 }
6305
6306 job_dispatch(j, false);
6307
6308 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
6309
6310 return true;
6311 }
6312
6313 void
6314 job_ack_no_senders(job_t j)
6315 {
6316 j->priv_port_has_senders = false;
6317
6318 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
6319 j->j_port = 0;
6320
6321 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
6322
6323 job_dispatch(j, false);
6324 }
6325
6326 bool
6327 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
6328 {
6329 struct semaphoreitem *si;
6330 size_t alloc_sz = sizeof(struct semaphoreitem);
6331
6332 if (what) {
6333 alloc_sz += strlen(what) + 1;
6334 }
6335
6336 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
6337 return false;
6338 }
6339
6340 si->fd = -1;
6341 si->why = why;
6342
6343 if (what) {
6344 strcpy(si->what_init, what);
6345 }
6346
6347 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
6348
6349 if( (why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy ) {
6350 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
6351 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
6352 j->nosy = true;
6353 }
6354
6355 semaphoreitem_runtime_mod_ref(si, true);
6356
6357 return true;
6358 }
6359
6360 void
6361 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
6362 {
6363 /*
6364 * External events need to be tracked.
6365 * Internal events do NOT need to be tracked.
6366 */
6367
6368 switch (si->why) {
6369 case SUCCESSFUL_EXIT:
6370 case FAILED_EXIT:
6371 case OTHER_JOB_ENABLED:
6372 case OTHER_JOB_DISABLED:
6373 case OTHER_JOB_ACTIVE:
6374 case OTHER_JOB_INACTIVE:
6375 return;
6376 default:
6377 break;
6378 }
6379
6380 if (add) {
6381 runtime_add_weak_ref();
6382 } else {
6383 runtime_del_weak_ref();
6384 }
6385 }
6386
6387 void
6388 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
6389 {
6390 semaphoreitem_runtime_mod_ref(si, false);
6391
6392 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
6393
6394 if (si->fd != -1) {
6395 job_assumes(j, runtime_close(si->fd) != -1);
6396 }
6397
6398 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
6399 if( (si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy ) {
6400 j->nosy = false;
6401 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
6402 }
6403
6404 free(si);
6405 }
6406
6407 void
6408 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
6409 {
6410 struct semaphoreitem_dict_iter_context *sdic = context;
6411 semaphore_reason_t why;
6412
6413 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
6414
6415 semaphoreitem_new(sdic->j, why, key);
6416 }
6417
6418 void
6419 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
6420 {
6421 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
6422 job_t j = context;
6423 semaphore_reason_t why;
6424
6425 switch (launch_data_get_type(obj)) {
6426 case LAUNCH_DATA_BOOL:
6427 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
6428 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
6429 semaphoreitem_new(j, why, NULL);
6430 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
6431 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
6432 semaphoreitem_new(j, why, NULL);
6433 j->start_pending = true;
6434 } else if( strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0 ) {
6435 j->needs_kickoff = launch_data_get_bool(obj);
6436 } else {
6437 job_assumes(j, false);
6438 }
6439 break;
6440 case LAUNCH_DATA_DICTIONARY:
6441 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
6442 sdic.why_true = PATH_EXISTS;
6443 sdic.why_false = PATH_MISSING;
6444 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
6445 sdic.why_true = OTHER_JOB_ACTIVE;
6446 sdic.why_false = OTHER_JOB_INACTIVE;
6447 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
6448 sdic.why_true = OTHER_JOB_ENABLED;
6449 sdic.why_false = OTHER_JOB_DISABLED;
6450 } else {
6451 job_assumes(j, false);
6452 break;
6453 }
6454
6455 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
6456 break;
6457 default:
6458 job_assumes(j, false);
6459 break;
6460 }
6461 }
6462
6463 void
6464 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
6465 {
6466 jobmgr_t jmi, jmn;
6467 job_t ji, jn;
6468
6469
6470 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6471 jobmgr_dispatch_all_semaphores(jmi);
6472 }
6473
6474 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6475 if (!SLIST_EMPTY(&ji->semaphores)) {
6476 job_dispatch(ji, false);
6477 }
6478 }
6479 }
6480
6481 time_t
6482 cronemu(int mon, int mday, int hour, int min)
6483 {
6484 struct tm workingtm;
6485 time_t now;
6486
6487 now = time(NULL);
6488 workingtm = *localtime(&now);
6489
6490 workingtm.tm_isdst = -1;
6491 workingtm.tm_sec = 0;
6492 workingtm.tm_min++;
6493
6494 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
6495 workingtm.tm_year++;
6496 workingtm.tm_mon = 0;
6497 workingtm.tm_mday = 1;
6498 workingtm.tm_hour = 0;
6499 workingtm.tm_min = 0;
6500 mktime(&workingtm);
6501 }
6502
6503 return mktime(&workingtm);
6504 }
6505
6506 time_t
6507 cronemu_wday(int wday, int hour, int min)
6508 {
6509 struct tm workingtm;
6510 time_t now;
6511
6512 now = time(NULL);
6513 workingtm = *localtime(&now);
6514
6515 workingtm.tm_isdst = -1;
6516 workingtm.tm_sec = 0;
6517 workingtm.tm_min++;
6518
6519 if (wday == 7) {
6520 wday = 0;
6521 }
6522
6523 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
6524 workingtm.tm_mday++;
6525 workingtm.tm_hour = 0;
6526 workingtm.tm_min = 0;
6527 mktime(&workingtm);
6528 }
6529
6530 return mktime(&workingtm);
6531 }
6532
6533 bool
6534 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
6535 {
6536 if (mon == -1) {
6537 struct tm workingtm = *wtm;
6538 int carrytest;
6539
6540 while (!cronemu_mday(&workingtm, mday, hour, min)) {
6541 workingtm.tm_mon++;
6542 workingtm.tm_mday = 1;
6543 workingtm.tm_hour = 0;
6544 workingtm.tm_min = 0;
6545 carrytest = workingtm.tm_mon;
6546 mktime(&workingtm);
6547 if (carrytest != workingtm.tm_mon) {
6548 return false;
6549 }
6550 }
6551 *wtm = workingtm;
6552 return true;
6553 }
6554
6555 if (mon < wtm->tm_mon) {
6556 return false;
6557 }
6558
6559 if (mon > wtm->tm_mon) {
6560 wtm->tm_mon = mon;
6561 wtm->tm_mday = 1;
6562 wtm->tm_hour = 0;
6563 wtm->tm_min = 0;
6564 }
6565
6566 return cronemu_mday(wtm, mday, hour, min);
6567 }
6568
6569 bool
6570 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
6571 {
6572 if (mday == -1) {
6573 struct tm workingtm = *wtm;
6574 int carrytest;
6575
6576 while (!cronemu_hour(&workingtm, hour, min)) {
6577 workingtm.tm_mday++;
6578 workingtm.tm_hour = 0;
6579 workingtm.tm_min = 0;
6580 carrytest = workingtm.tm_mday;
6581 mktime(&workingtm);
6582 if (carrytest != workingtm.tm_mday) {
6583 return false;
6584 }
6585 }
6586 *wtm = workingtm;
6587 return true;
6588 }
6589
6590 if (mday < wtm->tm_mday) {
6591 return false;
6592 }
6593
6594 if (mday > wtm->tm_mday) {
6595 wtm->tm_mday = mday;
6596 wtm->tm_hour = 0;
6597 wtm->tm_min = 0;
6598 }
6599
6600 return cronemu_hour(wtm, hour, min);
6601 }
6602
6603 bool
6604 cronemu_hour(struct tm *wtm, int hour, int min)
6605 {
6606 if (hour == -1) {
6607 struct tm workingtm = *wtm;
6608 int carrytest;
6609
6610 while (!cronemu_min(&workingtm, min)) {
6611 workingtm.tm_hour++;
6612 workingtm.tm_min = 0;
6613 carrytest = workingtm.tm_hour;
6614 mktime(&workingtm);
6615 if (carrytest != workingtm.tm_hour) {
6616 return false;
6617 }
6618 }
6619 *wtm = workingtm;
6620 return true;
6621 }
6622
6623 if (hour < wtm->tm_hour) {
6624 return false;
6625 }
6626
6627 if (hour > wtm->tm_hour) {
6628 wtm->tm_hour = hour;
6629 wtm->tm_min = 0;
6630 }
6631
6632 return cronemu_min(wtm, min);
6633 }
6634
6635 bool
6636 cronemu_min(struct tm *wtm, int min)
6637 {
6638 if (min == -1) {
6639 return true;
6640 }
6641
6642 if (min < wtm->tm_min) {
6643 return false;
6644 }
6645
6646 if (min > wtm->tm_min) {
6647 wtm->tm_min = min;
6648 }
6649
6650 return true;
6651 }
6652
6653 kern_return_t
6654 job_mig_setup_shmem(job_t j, mach_port_t *shmem_port)
6655 {
6656 memory_object_size_t size_of_page, size_of_page_orig;
6657 vm_address_t vm_addr;
6658 kern_return_t kr;
6659
6660 if (!launchd_assumes(j != NULL)) {
6661 return BOOTSTRAP_NO_MEMORY;
6662 }
6663
6664 if (unlikely(j->anonymous)) {
6665 job_log(j, LOG_DEBUG, "Anonymous job tried to setup shared memory");
6666 return BOOTSTRAP_NOT_PRIVILEGED;
6667 }
6668
6669 if (unlikely(j->shmem)) {
6670 job_log(j, LOG_ERR, "Tried to setup shared memory more than once");
6671 return BOOTSTRAP_NOT_PRIVILEGED;
6672 }
6673
6674 size_of_page_orig = size_of_page = getpagesize();
6675
6676 kr = vm_allocate(mach_task_self(), &vm_addr, size_of_page, true);
6677
6678 if (!job_assumes(j, kr == 0)) {
6679 return kr;
6680 }
6681
6682 j->shmem = (typeof(j->shmem))vm_addr;
6683 j->shmem->vp_shmem_standby_timeout = j->timeout;
6684
6685 kr = mach_make_memory_entry_64(mach_task_self(), &size_of_page,
6686 (memory_object_offset_t)vm_addr, VM_PROT_READ|VM_PROT_WRITE, shmem_port, 0);
6687
6688 if (job_assumes(j, kr == 0)) {
6689 job_assumes(j, size_of_page == size_of_page_orig);
6690 }
6691
6692 /* no need to inherit this in child processes */
6693 job_assumes(j, vm_inherit(mach_task_self(), (vm_address_t)j->shmem, size_of_page_orig, VM_INHERIT_NONE) == 0);
6694
6695 return kr;
6696 }
6697
6698 kern_return_t
6699 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
6700 {
6701 struct ldcred *ldc = runtime_get_caller_creds();
6702 job_t js;
6703
6704 if (!launchd_assumes(j != NULL)) {
6705 return BOOTSTRAP_NO_MEMORY;
6706 }
6707
6708 if (unlikely(j->deny_job_creation)) {
6709 return BOOTSTRAP_NOT_PRIVILEGED;
6710 }
6711
6712 #if HAVE_SANDBOX
6713 const char **argv = (const char **)mach_cmd2argv(server_cmd);
6714 if (unlikely(argv == NULL)) {
6715 return BOOTSTRAP_NO_MEMORY;
6716 }
6717 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
6718 free(argv);
6719 return BOOTSTRAP_NOT_PRIVILEGED;
6720 }
6721 free(argv);
6722 #endif
6723
6724 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
6725
6726 if (pid1_magic) {
6727 if (ldc->euid || ldc->uid) {
6728 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
6729 return VPROC_ERR_TRY_PER_USER;
6730 }
6731 } else {
6732 if (unlikely(server_uid != getuid())) {
6733 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
6734 server_cmd, getuid(), server_uid);
6735 }
6736 server_uid = 0; /* zero means "do nothing" */
6737 }
6738
6739 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
6740
6741 if (unlikely(js == NULL)) {
6742 return BOOTSTRAP_NO_MEMORY;
6743 }
6744
6745 *server_portp = js->j_port;
6746 return BOOTSTRAP_SUCCESS;
6747 }
6748
6749 kern_return_t
6750 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
6751 {
6752 struct ldcred *ldc = runtime_get_caller_creds();
6753 job_t otherj;
6754
6755 if (!launchd_assumes(j != NULL)) {
6756 return BOOTSTRAP_NO_MEMORY;
6757 }
6758
6759 if( unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation ) {
6760
6761 }
6762
6763 if( unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation ) {
6764 #if TARGET_OS_EMBEDDED
6765 if( !j->embedded_special_privileges ) {
6766 return BOOTSTRAP_NOT_PRIVILEGED;
6767 }
6768 #else
6769 return BOOTSTRAP_NOT_PRIVILEGED;
6770 #endif
6771 }
6772
6773 #if HAVE_SANDBOX
6774 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
6775 return BOOTSTRAP_NOT_PRIVILEGED;
6776 }
6777 #endif
6778
6779 if (unlikely(!(otherj = job_find(targetlabel)))) {
6780 return BOOTSTRAP_UNKNOWN_SERVICE;
6781 }
6782
6783 #if TARGET_OS_EMBEDDED
6784 if( j->embedded_special_privileges && strcmp(j->username, otherj->username) != 0 ) {
6785 return BOOTSTRAP_NOT_PRIVILEGED;
6786 }
6787 #endif
6788
6789 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
6790 bool do_block = otherj->p;
6791
6792 if (otherj->anonymous) {
6793 return BOOTSTRAP_NOT_PRIVILEGED;
6794 }
6795
6796 job_remove(otherj);
6797
6798 if (do_block) {
6799 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
6800 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
6801 job_assumes(otherj, waiting4removal_new(otherj, srp));
6802 return MIG_NO_REPLY;
6803 } else {
6804 return 0;
6805 }
6806 } else if (sig == VPROC_MAGIC_TRYKILL_SIGNAL) {
6807 if (!j->kill_via_shmem) {
6808 return BOOTSTRAP_NOT_PRIVILEGED;
6809 }
6810
6811 if (!j->shmem) {
6812 j->sent_kill_via_shmem = true;
6813 job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
6814 return 0;
6815 }
6816 #if !TARGET_OS_EMBEDDED
6817 if (__sync_bool_compare_and_swap(&j->shmem->vp_shmem_transaction_cnt, 0, -1)) {
6818 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
6819 j->sent_kill_via_shmem = true;
6820 job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
6821 return 0;
6822 }
6823 #endif
6824 return BOOTSTRAP_NOT_PRIVILEGED;
6825 } else if (otherj->p) {
6826 job_assumes(j, runtime_kill(otherj->p, sig) != -1);
6827 }
6828
6829 return 0;
6830 }
6831
6832 kern_return_t
6833 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
6834 {
6835 struct ldcred *ldc = runtime_get_caller_creds();
6836
6837 if (!launchd_assumes(j != NULL)) {
6838 return BOOTSTRAP_NO_MEMORY;
6839 }
6840
6841 if (!job_assumes(j, j->per_user)) {
6842 return BOOTSTRAP_NOT_PRIVILEGED;
6843 }
6844
6845 return runtime_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
6846 }
6847
6848 kern_return_t
6849 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
6850 {
6851 struct ldcred *ldc = runtime_get_caller_creds();
6852
6853 if (!launchd_assumes(j != NULL)) {
6854 return BOOTSTRAP_NO_MEMORY;
6855 }
6856
6857 if (unlikely(ldc->euid)) {
6858 return BOOTSTRAP_NOT_PRIVILEGED;
6859 }
6860
6861 return runtime_log_drain(srp, outval, outvalCnt);
6862 }
6863
6864 kern_return_t
6865 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
6866 vm_offset_t inval, mach_msg_type_number_t invalCnt,
6867 vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
6868 {
6869 const char *action;
6870 launch_data_t input_obj = NULL, output_obj = NULL;
6871 size_t data_offset = 0;
6872 size_t packed_size;
6873 struct ldcred *ldc = runtime_get_caller_creds();
6874
6875 if (!launchd_assumes(j != NULL)) {
6876 return BOOTSTRAP_NO_MEMORY;
6877 }
6878
6879 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
6880 return BOOTSTRAP_NOT_PRIVILEGED;
6881 }
6882
6883 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
6884 return 1;
6885 }
6886
6887 if (inkey && outkey) {
6888 action = "Swapping";
6889 } else if (inkey) {
6890 action = "Setting";
6891 } else {
6892 action = "Getting";
6893 }
6894
6895 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
6896
6897 *outvalCnt = 20 * 1024 * 1024;
6898 mig_allocate(outval, *outvalCnt);
6899 if (!job_assumes(j, *outval != 0)) {
6900 return 1;
6901 }
6902
6903 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
6904 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
6905 goto out_bad;
6906 }
6907
6908 switch (outkey) {
6909 case VPROC_GSK_ENVIRONMENT:
6910 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
6911 goto out_bad;
6912 }
6913 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
6914 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
6915 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
6916 goto out_bad;
6917 }
6918 launch_data_free(output_obj);
6919 break;
6920 case VPROC_GSK_ALLJOBS:
6921 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
6922 goto out_bad;
6923 }
6924 ipc_revoke_fds(output_obj);
6925 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
6926 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6927 if (!job_assumes(j, packed_size != 0)) {
6928 goto out_bad;
6929 }
6930 launch_data_free(output_obj);
6931 break;
6932 case VPROC_GSK_MGR_NAME:
6933 if( !job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL) ) {
6934 goto out_bad;
6935 }
6936 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6937 if (!job_assumes(j, packed_size != 0)) {
6938 goto out_bad;
6939 }
6940
6941 launch_data_free(output_obj);
6942 break;
6943 case VPROC_GSK_JOB_OVERRIDES_DB:
6944 if( !job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES))) != NULL) ) {
6945 goto out_bad;
6946 }
6947 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6948 if (!job_assumes(j, packed_size != 0)) {
6949 goto out_bad;
6950 }
6951
6952 launch_data_free(output_obj);
6953 break;
6954 case VPROC_GSK_JOB_CACHE_DB:
6955 if( !job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE))) != NULL) ) {
6956 goto out_bad;
6957 }
6958 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6959 if (!job_assumes(j, packed_size != 0)) {
6960 goto out_bad;
6961 }
6962
6963 job_log(j, LOG_DEBUG, "Location of job cache database: %s", launch_data_get_string(output_obj));
6964
6965 launch_data_free(output_obj);
6966 break;
6967 case 0:
6968 mig_deallocate(*outval, *outvalCnt);
6969 *outval = 0;
6970 *outvalCnt = 0;
6971 break;
6972 default:
6973 goto out_bad;
6974 }
6975
6976 if (invalCnt) switch (inkey) {
6977 case VPROC_GSK_ENVIRONMENT:
6978 if( launch_data_get_type(input_obj) == LAUNCH_DATA_DICTIONARY ) {
6979 if( j->p ) {
6980 job_log(j, LOG_INFO, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
6981 }
6982 launch_data_dict_iterate(input_obj, envitem_setup_one_shot, j);
6983 }
6984 break;
6985 case 0:
6986 break;
6987 default:
6988 goto out_bad;
6989 }
6990
6991 mig_deallocate(inval, invalCnt);
6992
6993 return 0;
6994
6995 out_bad:
6996 if (*outval) {
6997 mig_deallocate(*outval, *outvalCnt);
6998 }
6999 return 1;
7000 }
7001
7002 kern_return_t
7003 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7004 {
7005 const char *action;
7006 kern_return_t kr = 0;
7007 struct ldcred *ldc = runtime_get_caller_creds();
7008 int oldmask;
7009
7010 if (!launchd_assumes(j != NULL)) {
7011 return BOOTSTRAP_NO_MEMORY;
7012 }
7013
7014 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7015 return BOOTSTRAP_NOT_PRIVILEGED;
7016 }
7017
7018 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7019 return 1;
7020 }
7021
7022 if (inkey && outkey) {
7023 action = "Swapping";
7024 } else if (inkey) {
7025 action = "Setting";
7026 } else {
7027 action = "Getting";
7028 }
7029
7030 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7031
7032 switch (outkey) {
7033 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7034 *outval = j->abandon_pg;
7035 break;
7036 case VPROC_GSK_LAST_EXIT_STATUS:
7037 *outval = j->last_exit_status;
7038 break;
7039 case VPROC_GSK_MGR_UID:
7040 *outval = getuid();
7041 break;
7042 case VPROC_GSK_MGR_PID:
7043 *outval = getpid();
7044 break;
7045 case VPROC_GSK_IS_MANAGED:
7046 *outval = j->anonymous ? 0 : 1;
7047 break;
7048 case VPROC_GSK_BASIC_KEEPALIVE:
7049 *outval = !j->ondemand;
7050 break;
7051 case VPROC_GSK_START_INTERVAL:
7052 *outval = j->start_interval;
7053 break;
7054 case VPROC_GSK_IDLE_TIMEOUT:
7055 *outval = j->timeout;
7056 break;
7057 case VPROC_GSK_EXIT_TIMEOUT:
7058 *outval = j->exit_timeout;
7059 break;
7060 case VPROC_GSK_GLOBAL_LOG_MASK:
7061 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7062 *outval = oldmask;
7063 runtime_setlogmask(oldmask);
7064 break;
7065 case VPROC_GSK_GLOBAL_UMASK:
7066 oldmask = umask(0);
7067 *outval = oldmask;
7068 umask(oldmask);
7069 break;
7070 case VPROC_GSK_TRANSACTIONS_ENABLED:
7071 job_log(j, LOG_DEBUG, "Reading transaction model status.");
7072 *outval = j->kill_via_shmem;
7073 break;
7074 case VPROC_GSK_WAITFORDEBUGGER:
7075 *outval = j->wait4debugger;
7076 break;
7077 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
7078 *outval = j->embedded_special_privileges;
7079 break;
7080 case 0:
7081 *outval = 0;
7082 break;
7083 default:
7084 kr = 1;
7085 break;
7086 }
7087
7088 switch (inkey) {
7089 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7090 j->abandon_pg = (bool)inval;
7091 break;
7092 case VPROC_GSK_GLOBAL_ON_DEMAND:
7093 job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
7094 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
7095 break;
7096 case VPROC_GSK_BASIC_KEEPALIVE:
7097 j->ondemand = !inval;
7098 break;
7099 case VPROC_GSK_START_INTERVAL:
7100 if (inval > UINT32_MAX || inval < 0) {
7101 kr = 1;
7102 } else if (inval) {
7103 if (j->start_interval == 0) {
7104 runtime_add_weak_ref();
7105 }
7106 j->start_interval = (typeof(j->start_interval)) inval;
7107 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
7108 } else if (j->start_interval) {
7109 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
7110 if (j->start_interval != 0) {
7111 runtime_del_weak_ref();
7112 }
7113 j->start_interval = 0;
7114 }
7115 break;
7116 case VPROC_GSK_IDLE_TIMEOUT:
7117 if (inval < 0 || inval > UINT32_MAX) {
7118 kr = 1;
7119 } else {
7120 j->timeout = (typeof(j->timeout)) inval;
7121 }
7122 break;
7123 case VPROC_GSK_EXIT_TIMEOUT:
7124 if (inval < 0 || inval > UINT32_MAX) {
7125 kr = 1;
7126 } else {
7127 j->exit_timeout = (typeof(j->exit_timeout)) inval;
7128 }
7129 break;
7130 case VPROC_GSK_GLOBAL_LOG_MASK:
7131 if (inval < 0 || inval > UINT32_MAX) {
7132 kr = 1;
7133 } else {
7134 runtime_setlogmask((int) inval);
7135 }
7136 break;
7137 case VPROC_GSK_GLOBAL_UMASK:
7138 launchd_assert(sizeof (mode_t) == 2);
7139 if (inval < 0 || inval > UINT16_MAX) {
7140 kr = 1;
7141 } else {
7142 umask((mode_t) inval);
7143 }
7144 break;
7145 case VPROC_GSK_TRANSACTIONS_ENABLED:
7146 if( !job_assumes(j, inval != 0) ) {
7147 job_log(j, LOG_WARNING, "Attempt to unregister from transaction model. This is not supported.");
7148 kr = 1;
7149 } else {
7150 job_log(j, LOG_DEBUG, "Now participating in transaction model.");
7151 j->kill_via_shmem = (bool)inval;
7152 job_log(j, LOG_DEBUG, "j->kill_via_shmem = %s", j->kill_via_shmem ? "true" : "false");
7153 }
7154 break;
7155 case VPROC_GSK_WEIRD_BOOTSTRAP:
7156 if( job_assumes(j, j->weird_bootstrap) ) {
7157 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
7158
7159 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
7160
7161 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
7162 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
7163 }
7164
7165 job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
7166 j->weird_bootstrap = false;
7167 }
7168 break;
7169 case VPROC_GSK_WAITFORDEBUGGER:
7170 j->wait4debugger_oneshot = inval;
7171 break;
7172 case VPROC_GSK_PERUSER_SUSPEND:
7173 if( job_assumes(j, pid1_magic && ldc->euid == 0) ) {
7174 mach_port_t junk = MACH_PORT_NULL;
7175 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, false, &junk);
7176 if( job_assumes(j, jpu != NULL) ) {
7177 struct suspended_peruser *spi = NULL;
7178 LIST_FOREACH( spi, &j->suspended_perusers, sle ) {
7179 if( (int64_t)(spi->j->mach_uid) == inval ) {
7180 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
7181 break;
7182 }
7183 }
7184
7185 if( spi == NULL ) {
7186 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
7187 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
7188 if( job_assumes(j, spi != NULL) ) {
7189 spi->j = jpu;
7190 spi->j->peruser_suspend_count++;
7191 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
7192 job_stop(spi->j);
7193 } else {
7194 kr = BOOTSTRAP_NO_MEMORY;
7195 }
7196 }
7197 }
7198 } else {
7199 kr = 1;
7200 }
7201 break;
7202 case VPROC_GSK_PERUSER_RESUME:
7203 if( job_assumes(j, pid1_magic == true) ) {
7204 struct suspended_peruser *spi = NULL, *spt = NULL;
7205 LIST_FOREACH_SAFE( spi, &j->suspended_perusers, sle, spt ) {
7206 if( (int64_t)(spi->j->mach_uid) == inval ) {
7207 spi->j->peruser_suspend_count--;
7208 LIST_REMOVE(spi, sle);
7209 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
7210 break;
7211 }
7212 }
7213
7214 if( !job_assumes(j, spi != NULL) ) {
7215 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
7216 kr = BOOTSTRAP_NOT_PRIVILEGED;
7217 } else if( spi->j->peruser_suspend_count == 0 ) {
7218 job_dispatch(spi->j, false);
7219 free(spi);
7220 }
7221 } else {
7222 kr = 1;
7223 }
7224 break;
7225 case 0:
7226 break;
7227 default:
7228 kr = 1;
7229 break;
7230 }
7231
7232 return kr;
7233 }
7234
7235 kern_return_t
7236 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *audit_session)
7237 {
7238 struct machservice *ms;
7239
7240 if (!launchd_assumes(j != NULL)) {
7241 return BOOTSTRAP_NO_MEMORY;
7242 }
7243
7244 job_log(j, LOG_DEBUG, "Post fork ping.");
7245
7246 job_setup_exception_port(j, child_task);
7247
7248 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
7249 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
7250 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
7251 continue;
7252 }
7253
7254 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
7255
7256 if (unlikely(errno)) {
7257 int desired_log_level = LOG_ERR;
7258
7259 if (j->anonymous) {
7260 /* 5338127 */
7261
7262 desired_log_level = LOG_WARNING;
7263
7264 if (ms->special_port_num == TASK_SEATBELT_PORT) {
7265 desired_log_level = LOG_DEBUG;
7266 }
7267 }
7268
7269 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
7270 }
7271 }
7272
7273 mach_port_t _session = MACH_PORT_NULL;
7274 #if !TARGET_OS_EMBEDDED
7275 if( !j->anonymous && !j->per_user ) {
7276 job_log(j, LOG_DEBUG, "Returning session port %u", j->audit_session);
7277 _session = j->audit_session;
7278 }
7279 #endif
7280 *audit_session = _session;
7281 job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
7282
7283 return 0;
7284 }
7285
7286 kern_return_t
7287 job_mig_reboot2(job_t j, uint64_t flags)
7288 {
7289 char who_started_the_reboot[2048] = "";
7290 struct kinfo_proc kp;
7291 struct ldcred *ldc = runtime_get_caller_creds();
7292 pid_t pid_to_log;
7293
7294 if (!launchd_assumes(j != NULL)) {
7295 return BOOTSTRAP_NO_MEMORY;
7296 }
7297
7298 if (unlikely(!pid1_magic)) {
7299 return BOOTSTRAP_NOT_PRIVILEGED;
7300 }
7301
7302 #if !TARGET_OS_EMBEDDED
7303 if (unlikely(ldc->euid)) {
7304 #else
7305 if( unlikely(ldc->euid) && !j->embedded_special_privileges ) {
7306 #endif
7307 return BOOTSTRAP_NOT_PRIVILEGED;
7308 }
7309
7310 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = kp.kp_eproc.e_ppid) {
7311 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid_to_log };
7312 size_t who_offset, len = sizeof(kp);
7313
7314 if (!job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
7315 return 1;
7316 }
7317
7318 who_offset = strlen(who_started_the_reboot);
7319 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
7320 " %s[%u]%s", kp.kp_proc.p_comm, pid_to_log, kp.kp_eproc.e_ppid ? " ->" : "");
7321 }
7322
7323 root_jobmgr->reboot_flags = (int)flags;
7324
7325 launchd_shutdown();
7326
7327 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
7328
7329 return 0;
7330 }
7331
7332 kern_return_t
7333 job_mig_getsocket(job_t j, name_t spr)
7334 {
7335 if (!launchd_assumes(j != NULL)) {
7336 return BOOTSTRAP_NO_MEMORY;
7337 }
7338
7339 if( j->deny_job_creation ) {
7340 return BOOTSTRAP_NOT_PRIVILEGED;
7341 }
7342
7343 ipc_server_init();
7344
7345 if (unlikely(!sockpath)) {
7346 return BOOTSTRAP_NO_MEMORY;
7347 }
7348
7349 strncpy(spr, sockpath, sizeof(name_t));
7350
7351 return BOOTSTRAP_SUCCESS;
7352 }
7353
7354 kern_return_t
7355 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
7356 {
7357 if (!launchd_assumes(j != NULL)) {
7358 return BOOTSTRAP_NO_MEMORY;
7359 }
7360
7361 if ((errno = err)) {
7362 job_log_error(j, pri, "%s", msg);
7363 } else {
7364 job_log(j, pri, "%s", msg);
7365 }
7366
7367 return 0;
7368 }
7369
7370 job_t
7371 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, bool dispatch, mach_port_t *mp)
7372 {
7373 job_t ji = NULL;
7374 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
7375 if (!ji->per_user) {
7376 continue;
7377 }
7378 if (ji->mach_uid != which_user) {
7379 continue;
7380 }
7381 if (SLIST_EMPTY(&ji->machservices)) {
7382 continue;
7383 }
7384 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
7385 continue;
7386 }
7387 break;
7388 }
7389
7390 if( unlikely(ji == NULL) ) {
7391 struct machservice *ms;
7392 char lbuf[1024];
7393
7394 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
7395
7396 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
7397
7398 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
7399
7400 if( ji != NULL ) {
7401 ji->mach_uid = which_user;
7402 ji->per_user = true;
7403 ji->kill_via_shmem = true;
7404
7405 struct stat sb;
7406 char pu_db[PATH_MAX];
7407 snprintf(pu_db, sizeof(pu_db), LAUNCHD_DB_PREFIX "/%s", lbuf);
7408
7409 bool created = false;
7410 int err = stat(pu_db, &sb);
7411 if( (err == -1 && errno == ENOENT) || (err == 0 && !S_ISDIR(sb.st_mode)) ) {
7412 if( err == 0 ) {
7413 char move_aside[PATH_MAX];
7414 snprintf(move_aside, sizeof(move_aside), LAUNCHD_DB_PREFIX "/%s.movedaside", lbuf);
7415
7416 job_assumes(ji, rename(pu_db, move_aside) != -1);
7417 }
7418
7419 job_assumes(ji, mkdir(pu_db, S_IRWXU) != -1);
7420 job_assumes(ji, chown(pu_db, which_user, 0) != -1);
7421 created = true;
7422 }
7423
7424 if( !created ) {
7425 if( !job_assumes(ji, sb.st_uid == which_user) ) {
7426 job_assumes(ji, chown(pu_db, which_user, 0) != -1);
7427 }
7428 if( !job_assumes(ji, sb.st_gid == 0) ) {
7429 job_assumes(ji, chown(pu_db, which_user, 0) != -1);
7430 }
7431 if( !job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR)) ) {
7432 job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
7433 }
7434 }
7435
7436 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
7437 job_remove(ji);
7438 ji = NULL;
7439 } else {
7440 ms->per_user_hack = true;
7441 ms->hide = true;
7442
7443 ji = dispatch ? job_dispatch(ji, false) : ji;
7444 }
7445 }
7446 } else {
7447 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
7448 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
7449 }
7450
7451 return ji;
7452 }
7453
7454 kern_return_t
7455 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
7456 {
7457 struct ldcred *ldc = runtime_get_caller_creds();
7458 job_t jpu;
7459
7460 #if TARGET_OS_EMBEDDED
7461 /* There is no need for per-user launchd's on embedded. */
7462 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
7463 return BOOTSTRAP_NOT_PRIVILEGED;
7464 #endif
7465
7466 #if HAVE_SANDBOX
7467 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
7468 return BOOTSTRAP_NOT_PRIVILEGED;
7469 }
7470 #endif
7471
7472 if (!launchd_assumes(j != NULL)) {
7473 return BOOTSTRAP_NO_MEMORY;
7474 }
7475
7476 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
7477
7478 if (unlikely(!pid1_magic)) {
7479 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
7480 return BOOTSTRAP_NOT_PRIVILEGED;
7481 }
7482
7483 if (ldc->euid || ldc->uid) {
7484 which_user = ldc->euid ?: ldc->uid;
7485 }
7486
7487 *up_cont = MACH_PORT_NULL;
7488
7489 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, true, up_cont);
7490
7491 return 0;
7492 }
7493
7494 kern_return_t
7495 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uint64_t flags)
7496 {
7497 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
7498 struct ldcred *ldc = runtime_get_caller_creds();
7499 struct machservice *ms;
7500 job_t jo;
7501
7502 if (!launchd_assumes(j != NULL)) {
7503 return BOOTSTRAP_NO_MEMORY;
7504 }
7505
7506 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
7507
7508 if (ms == NULL) {
7509 *serviceportp = MACH_PORT_NULL;
7510
7511 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
7512 return BOOTSTRAP_NO_MEMORY;
7513 }
7514
7515 /* Treat this like a legacy job. */
7516 if( !j->legacy_mach_job ) {
7517 ms->isActive = true;
7518 ms->recv = false;
7519 }
7520
7521 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
7522 job_log(j, LOG_SCOLDING, "Please add the following service to the configuration file for this job: %s", servicename);
7523 }
7524 } else {
7525 if (unlikely((jo = machservice_job(ms)) != j)) {
7526 static pid_t last_warned_pid;
7527
7528 if (last_warned_pid != ldc->pid) {
7529 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
7530 last_warned_pid = ldc->pid;
7531 }
7532
7533 return BOOTSTRAP_NOT_PRIVILEGED;
7534 }
7535 if (unlikely(machservice_active(ms))) {
7536 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
7537 return BOOTSTRAP_SERVICE_ACTIVE;
7538 }
7539 }
7540
7541 job_checkin(j);
7542 machservice_request_notifications(ms);
7543
7544 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
7545
7546 *serviceportp = machservice_port(ms);
7547 return BOOTSTRAP_SUCCESS;
7548 }
7549
7550 kern_return_t
7551 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
7552 {
7553 struct machservice *ms;
7554 struct ldcred *ldc = runtime_get_caller_creds();
7555
7556 if (!launchd_assumes(j != NULL)) {
7557 return BOOTSTRAP_NO_MEMORY;
7558 }
7559
7560 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
7561 job_log(j, LOG_SCOLDING, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
7562 }
7563
7564 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
7565
7566 /* 5641783 for the embedded hack */
7567 #if !TARGET_OS_EMBEDDED
7568 /*
7569 * From a per-user/session launchd's perspective, SecurityAgent (UID
7570 * 92) is a rogue application (not our UID, not root and not a child of
7571 * us). We'll have to reconcile this design friction at a later date.
7572 */
7573 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
7574 if (pid1_magic) {
7575 return VPROC_ERR_TRY_PER_USER;
7576 } else {
7577 return BOOTSTRAP_NOT_PRIVILEGED;
7578 }
7579 }
7580 #endif
7581
7582 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
7583
7584 if (unlikely(ms)) {
7585 if (machservice_job(ms) != j) {
7586 return BOOTSTRAP_NOT_PRIVILEGED;
7587 }
7588 if (machservice_active(ms)) {
7589 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
7590 return BOOTSTRAP_SERVICE_ACTIVE;
7591 }
7592 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
7593 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
7594 return BOOTSTRAP_NOT_PRIVILEGED;
7595 }
7596 job_checkin(j);
7597 machservice_delete(j, ms, false);
7598 }
7599
7600 if (likely(serviceport != MACH_PORT_NULL)) {
7601 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
7602 machservice_request_notifications(ms);
7603 } else {
7604 return BOOTSTRAP_NO_MEMORY;
7605 }
7606 }
7607
7608
7609 return BOOTSTRAP_SUCCESS;
7610 }
7611
7612 kern_return_t
7613 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uint64_t flags)
7614 {
7615 struct machservice *ms;
7616 struct ldcred *ldc = runtime_get_caller_creds();
7617 kern_return_t kr;
7618 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
7619
7620 if (!launchd_assumes(j != NULL)) {
7621 return BOOTSTRAP_NO_MEMORY;
7622 }
7623
7624 /* 5641783 for the embedded hack */
7625 #if !TARGET_OS_EMBEDDED
7626 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
7627 return VPROC_ERR_TRY_PER_USER;
7628 }
7629 #endif
7630
7631 #if HAVE_SANDBOX
7632 if (unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
7633 return BOOTSTRAP_NOT_PRIVILEGED;
7634 }
7635 #endif
7636
7637 if (per_pid_lookup) {
7638 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
7639 } else {
7640 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
7641 }
7642
7643 if (likely(ms)) {
7644 if (machservice_hidden(ms) && !machservice_active(ms)) {
7645 ms = NULL;
7646 } else if (unlikely(ms->per_user_hack)) {
7647 ms = NULL;
7648 }
7649 }
7650
7651 if (likely(ms)) {
7652 job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
7653 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
7654
7655 if (unlikely(!per_pid_lookup && j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user)) {
7656 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
7657 job_log(j, LOG_DEBUG, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms->job->label, servicename);
7658 }
7659
7660 j->lastlookup = ms;
7661 j->lastlookup_gennum = ms->gen_num;
7662
7663 *serviceportp = machservice_port(ms);
7664
7665 kr = BOOTSTRAP_SUCCESS;
7666 } else if (!per_pid_lookup && (inherited_bootstrap_port != MACH_PORT_NULL)) {
7667 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
7668 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
7669 job_assumes(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, 0, 0) == 0);
7670 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
7671 return MIG_NO_REPLY;
7672 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
7673 /*
7674 * 5240036 Should start background session when a lookup of CCacheServer occurs
7675 *
7676 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
7677 * If we find a EUID that isn't root, we force it over to the per-user context.
7678 */
7679 return VPROC_ERR_TRY_PER_USER;
7680 } else {
7681 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
7682 kr = BOOTSTRAP_UNKNOWN_SERVICE;
7683 }
7684
7685 return kr;
7686 }
7687
7688 kern_return_t
7689 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
7690 {
7691 if (!launchd_assumes(j != NULL)) {
7692 return BOOTSTRAP_NO_MEMORY;
7693 }
7694
7695 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
7696 jobmgr_t jm = j->mgr;
7697
7698 if (jobmgr_parent(jm)) {
7699 *parentport = jobmgr_parent(jm)->jm_port;
7700 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
7701 *parentport = jm->jm_port;
7702 } else {
7703 job_assumes(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp) == 0);
7704 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
7705 return MIG_NO_REPLY;
7706 }
7707 return BOOTSTRAP_SUCCESS;
7708 }
7709
7710 kern_return_t
7711 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt,
7712 name_array_t *servicejobsp, unsigned int *servicejobs_cnt,
7713 bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt,
7714 uint64_t flags)
7715 {
7716 name_array_t service_names = NULL;
7717 name_array_t service_jobs = NULL;
7718 bootstrap_status_array_t service_actives = NULL;
7719 unsigned int cnt = 0, cnt2 = 0;
7720 jobmgr_t jm;
7721
7722 if (!launchd_assumes(j != NULL)) {
7723 return BOOTSTRAP_NO_MEMORY;
7724 }
7725
7726 if( g_flat_mach_namespace ) {
7727 if( (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL) ) {
7728 jm = j->mgr;
7729 } else {
7730 jm = root_jobmgr;
7731 }
7732 } else {
7733 jm = j->mgr;
7734 }
7735
7736 unsigned int i = 0;
7737 struct machservice *msi = NULL;
7738 for( i = 0; i < MACHSERVICE_HASH_SIZE; i++ ) {
7739 LIST_FOREACH( msi, &jm->ms_hash[i], name_hash_sle ) {
7740 cnt += !msi->per_pid ? 1 : 0;
7741 }
7742 }
7743
7744 if (cnt == 0) {
7745 goto out;
7746 }
7747
7748 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
7749 if (!job_assumes(j, service_names != NULL)) {
7750 goto out_bad;
7751 }
7752
7753 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
7754 if (!job_assumes(j, service_jobs != NULL)) {
7755 goto out_bad;
7756 }
7757
7758 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
7759 if (!job_assumes(j, service_actives != NULL)) {
7760 goto out_bad;
7761 }
7762
7763 for( i = 0; i < MACHSERVICE_HASH_SIZE; i++ ) {
7764 LIST_FOREACH( msi, &jm->ms_hash[i], name_hash_sle ) {
7765 if( !msi->per_pid ) {
7766 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
7767 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
7768 service_actives[cnt2] = machservice_status(msi);
7769 cnt2++;
7770 }
7771 }
7772 }
7773
7774 job_assumes(j, cnt == cnt2);
7775
7776 out:
7777 *servicenamesp = service_names;
7778 *servicejobsp = service_jobs;
7779 *serviceactivesp = service_actives;
7780 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
7781
7782 return BOOTSTRAP_SUCCESS;
7783
7784 out_bad:
7785 if (service_names) {
7786 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
7787 }
7788 if (service_jobs) {
7789 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
7790 }
7791 if (service_actives) {
7792 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
7793 }
7794
7795 return BOOTSTRAP_NO_MEMORY;
7796 }
7797
7798 kern_return_t
7799 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports, mach_msg_type_number_t *child_ports_cnt,
7800 name_array_t *child_names, mach_msg_type_number_t *child_names_cnt,
7801 bootstrap_property_array_t *child_properties, mach_msg_type_number_t *child_properties_cnt)
7802 {
7803 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
7804 if( !launchd_assumes(j != NULL) ) {
7805 return BOOTSTRAP_NO_MEMORY;
7806 }
7807
7808 struct ldcred *ldc = runtime_get_caller_creds();
7809
7810 /* Only allow root processes to look up children, even if we're in the per-user launchd.
7811 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
7812 * in a non-flat namespace.
7813 */
7814 if( ldc->euid != 0 ) {
7815 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
7816 return BOOTSTRAP_NOT_PRIVILEGED;
7817 }
7818
7819 unsigned int cnt = 0;
7820
7821 jobmgr_t jmr = j->mgr;
7822 jobmgr_t jmi = NULL;
7823 SLIST_FOREACH( jmi, &jmr->submgrs, sle ) {
7824 cnt++;
7825 }
7826
7827 /* Find our per-user launchds if we're PID 1. */
7828 job_t ji = NULL;
7829 if( pid1_magic ) {
7830 LIST_FOREACH( ji, &jmr->jobs, sle ) {
7831 cnt += ji->per_user ? 1 : 0;
7832 }
7833 }
7834
7835 if( cnt == 0 ) {
7836 return BOOTSTRAP_NO_CHILDREN;
7837 }
7838
7839 mach_port_array_t _child_ports = NULL;
7840 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
7841 if( !job_assumes(j, _child_ports != NULL) ) {
7842 kr = BOOTSTRAP_NO_MEMORY;
7843 goto out_bad;
7844 }
7845
7846 name_array_t _child_names = NULL;
7847 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
7848 if( !job_assumes(j, _child_names != NULL) ) {
7849 kr = BOOTSTRAP_NO_MEMORY;
7850 goto out_bad;
7851 }
7852
7853 bootstrap_property_array_t _child_properties = NULL;
7854 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
7855 if( !job_assumes(j, _child_properties != NULL) ) {
7856 kr = BOOTSTRAP_NO_MEMORY;
7857 goto out_bad;
7858 }
7859
7860 unsigned int cnt2 = 0;
7861 SLIST_FOREACH( jmi, &jmr->submgrs, sle ) {
7862 if( jobmgr_assumes(jmi, launchd_mport_make_send(jmi->jm_port) == KERN_SUCCESS) ) {
7863 _child_ports[cnt2] = jmi->jm_port;
7864 } else {
7865 _child_ports[cnt2] = MACH_PORT_NULL;
7866 }
7867
7868 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
7869 _child_properties[cnt2] = jmi->properties;
7870
7871 cnt2++;
7872 }
7873
7874 if( pid1_magic ) LIST_FOREACH( ji, &jmr->jobs, sle ) {
7875 if( ji->per_user ) {
7876 if( job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true) ) {
7877 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
7878
7879 if( job_assumes(ji, launchd_mport_copy_send(port) == KERN_SUCCESS) ) {
7880 _child_ports[cnt2] = port;
7881 } else {
7882 _child_ports[cnt2] = MACH_PORT_NULL;
7883 }
7884 } else {
7885 _child_ports[cnt2] = MACH_PORT_NULL;
7886 }
7887
7888 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
7889 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
7890
7891 cnt2++;
7892 }
7893 }
7894
7895 *child_names_cnt = cnt;
7896 *child_ports_cnt = cnt;
7897 *child_properties_cnt = cnt;
7898
7899 *child_names = _child_names;
7900 *child_ports = _child_ports;
7901 *child_properties = _child_properties;
7902
7903 unsigned int i = 0;
7904 for( i = 0; i < cnt; i++ ) {
7905 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
7906 }
7907
7908 return BOOTSTRAP_SUCCESS;
7909 out_bad:
7910 if( _child_ports ) {
7911 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
7912 }
7913
7914 if( _child_names ) {
7915 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
7916 }
7917
7918 if( _child_properties ) {
7919 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
7920 }
7921
7922 return kr;
7923 }
7924
7925 kern_return_t
7926 job_mig_transaction_count_for_pid(job_t j, pid_t p, int32_t *cnt, boolean_t *condemned)
7927 {
7928 kern_return_t kr = KERN_FAILURE;
7929 struct ldcred *ldc = runtime_get_caller_creds();
7930 if( (ldc->euid != geteuid()) && (ldc->euid != 0) ) {
7931 return BOOTSTRAP_NOT_PRIVILEGED;
7932 }
7933
7934 job_t j_for_pid = jobmgr_find_by_pid_deep(j->mgr, p, false);
7935 if( j_for_pid ) {
7936 if( j_for_pid->kill_via_shmem ) {
7937 if( j_for_pid->shmem ) {
7938 *cnt = j_for_pid->shmem->vp_shmem_transaction_cnt;
7939 *condemned = j_for_pid->shmem->vp_shmem_flags & VPROC_SHMEM_EXITING;
7940 *cnt += *condemned ? 1 : 0;
7941 } else {
7942 *cnt = 0;
7943 *condemned = false;
7944 }
7945
7946 kr = BOOTSTRAP_SUCCESS;
7947 } else {
7948 kr = BOOTSTRAP_NO_MEMORY;
7949 }
7950 } else {
7951 kr = BOOTSTRAP_UNKNOWN_SERVICE;
7952 }
7953
7954 return kr;
7955 }
7956
7957 kern_return_t
7958 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
7959 {
7960 struct ldcred *ldc = runtime_get_caller_creds();
7961 if( (ldc->euid != geteuid()) && (ldc->euid != 0) ) {
7962 return BOOTSTRAP_NOT_PRIVILEGED;
7963 }
7964
7965 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
7966 * directly by launchd as agents.
7967 */
7968 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
7969 if( j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job ) {
7970 *managed = true;
7971 }
7972
7973 return BOOTSTRAP_SUCCESS;
7974 }
7975
7976 kern_return_t
7977 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
7978 {
7979 struct ldcred *ldc = runtime_get_caller_creds();
7980 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
7981
7982 mach_port_t _mp = MACH_PORT_NULL;
7983 if( !j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid()) ) {
7984 job_t target_j = job_find(label);
7985 if( jobmgr_assumes(root_jobmgr, target_j != NULL) ) {
7986 if( target_j->j_port == MACH_PORT_NULL ) {
7987 job_assumes(target_j, job_setup_machport(target_j) == true);
7988 }
7989
7990 _mp = target_j->j_port;
7991 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
7992 } else {
7993 kr = BOOTSTRAP_NO_MEMORY;
7994 }
7995 }
7996
7997 *mp = _mp;
7998 return kr;
7999 }
8000
8001 #if !TARGET_OS_EMBEDDED
8002 kern_return_t
8003 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t session)
8004 {
8005 uuid_string_t uuid_str;
8006 uuid_unparse(uuid, uuid_str);
8007 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", session, uuid_str);
8008
8009 job_t ji = NULL, jt = NULL;
8010 LIST_FOREACH_SAFE( ji, &s_needing_sessions, sle, jt ) {
8011 uuid_string_t uuid_str2;
8012 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
8013
8014 if( uuid_compare(uuid, ji->expected_audit_uuid) == 0 ) {
8015 uuid_clear(ji->expected_audit_uuid);
8016 if( session != MACH_PORT_NULL ) {
8017 job_log(ji, LOG_DEBUG, "Job should join session with port %u", session);
8018 mach_port_mod_refs(mach_task_self(), session, MACH_PORT_RIGHT_SEND, 1);
8019 } else {
8020 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
8021 }
8022
8023 ji->audit_session = session;
8024 LIST_REMOVE(ji, needing_session_sle);
8025 job_dispatch(ji, false);
8026 }
8027 }
8028
8029 /* Each job that the session port was set for holds a reference. At the end of
8030 * the loop, there will be one extra reference belonging to this MiG protocol.
8031 * We need to release it so that the session goes away when all the jobs
8032 * referencing it are unloaded.
8033 */
8034 mach_port_deallocate(mach_task_self(), session);
8035
8036 return KERN_SUCCESS;
8037 }
8038 #else
8039 kern_return_t
8040 job_mig_set_security_session(job_t j __attribute__((unused)), uuid_t uuid __attribute__((unused)), mach_port_t session __attribute__((unused)))
8041 {
8042 return KERN_SUCCESS;
8043 }
8044 #endif
8045
8046 jobmgr_t
8047 jobmgr_find_by_name(jobmgr_t jm, const char *where)
8048 {
8049 jobmgr_t jmi, jmi2;
8050
8051 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
8052 if (where == NULL) {
8053 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8054 where = VPROCMGR_SESSION_LOGINWINDOW;
8055 } else {
8056 where = VPROCMGR_SESSION_AQUA;
8057 }
8058 }
8059
8060 if (strcasecmp(jm->name, where) == 0) {
8061 return jm;
8062 }
8063
8064 if( strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic ) {
8065 jmi = root_jobmgr;
8066 goto jm_found;
8067 }
8068
8069 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
8070 if (unlikely(jmi->shutting_down)) {
8071 continue;
8072 } else if (strcasecmp(jmi->name, where) == 0) {
8073 goto jm_found;
8074 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
8075 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
8076 if (strcasecmp(jmi2->name, where) == 0) {
8077 jmi = jmi2;
8078 goto jm_found;
8079 }
8080 }
8081 }
8082 }
8083
8084 jm_found:
8085 return jmi;
8086 }
8087
8088 kern_return_t
8089 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t audit_session, uint64_t flags)
8090 {
8091 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
8092 mach_port_array_t l2l_ports = NULL;
8093 mach_port_t reqport, rcvright;
8094 kern_return_t kr = 1;
8095 launch_data_t out_obj_array = NULL;
8096 struct ldcred *ldc = runtime_get_caller_creds();
8097 jobmgr_t jmr = NULL;
8098
8099 if (!launchd_assumes(j != NULL)) {
8100 return BOOTSTRAP_NO_MEMORY;
8101 }
8102
8103 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
8104 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
8105
8106 kr = BOOTSTRAP_NOT_PRIVILEGED;
8107 goto out;
8108 }
8109
8110 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
8111
8112 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
8113
8114 if (!job_assumes(j, kr == 0)) {
8115 goto out;
8116 }
8117
8118 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
8119
8120 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, audit_session)) != NULL)) {
8121 kr = BOOTSTRAP_NO_MEMORY;
8122 goto out;
8123 }
8124
8125 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
8126
8127 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
8128 * processing an IPC request, we'll do this action before the new job manager can get any IPC
8129 * requests. This serialization is guaranteed since we are single-threaded in that respect.
8130 */
8131 if( flags & LAUNCH_GLOBAL_ON_DEMAND ) {
8132 /* This is so awful. */
8133 /* Remove the job from its current job manager. */
8134 LIST_REMOVE(j, sle);
8135 LIST_REMOVE(j, pid_hash_sle);
8136
8137 /* Put the job into the target job manager. */
8138 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
8139 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
8140
8141 j->mgr = jmr;
8142 job_set_global_on_demand(j, true);
8143 }
8144
8145 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
8146 launch_data_t tmp, obj_at_idx;
8147 struct machservice *ms;
8148 job_t j_for_service;
8149 const char *serv_name;
8150 pid_t target_pid;
8151 bool serv_perpid;
8152
8153 job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
8154 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
8155 target_pid = (pid_t)launch_data_get_integer(tmp);
8156 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
8157 serv_perpid = launch_data_get_bool(tmp);
8158 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
8159 serv_name = launch_data_get_string(tmp);
8160
8161 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
8162
8163 if (unlikely(!j_for_service)) {
8164 /* The PID probably exited */
8165 job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
8166 continue;
8167 }
8168
8169 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
8170 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
8171 machservice_request_notifications(ms);
8172 }
8173 }
8174
8175 kr = 0;
8176
8177 out:
8178 if (out_obj_array) {
8179 launch_data_free(out_obj_array);
8180 }
8181
8182 if (l2l_ports) {
8183 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
8184 }
8185
8186 if (kr == 0) {
8187 if (target_subset) {
8188 job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
8189 }
8190 } else if (jmr) {
8191 jobmgr_shutdown(jmr);
8192 }
8193
8194 return kr;
8195 }
8196
8197 kern_return_t
8198 job_mig_init_session(job_t j, name_t session_type, mach_port_t audit_session)
8199 {
8200 job_t j2;
8201
8202 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
8203 if (j->mgr->session_initialized) {
8204 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
8205 kr = BOOTSTRAP_NOT_PRIVILEGED;
8206 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8207 jobmgr_t jmi;
8208
8209 /*
8210 * 5330262
8211 *
8212 * We're working around LoginWindow and the WindowServer.
8213 *
8214 * In practice, there is only one LoginWindow session. Unfortunately, for certain
8215 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
8216 * spawns a replacement loginwindow session before cleaning up the previous one.
8217 *
8218 * We're going to use the creation of a new LoginWindow context as a clue that the
8219 * previous LoginWindow context is on the way out and therefore we should just
8220 * kick-start the shutdown of it.
8221 */
8222
8223 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
8224 if (unlikely(jmi->shutting_down)) {
8225 continue;
8226 } else if (strcasecmp(jmi->name, session_type) == 0) {
8227 jobmgr_shutdown(jmi);
8228 break;
8229 }
8230 }
8231 }
8232
8233 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
8234 strcpy(j->mgr->name_init, session_type);
8235
8236 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
8237 j2->audit_session = audit_session;
8238 job_assumes(j, job_dispatch(j2, true));
8239 kr = BOOTSTRAP_SUCCESS;
8240 }
8241
8242 return kr;
8243 }
8244
8245 kern_return_t
8246 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t audit_session, mach_port_t *new_bsport)
8247 {
8248 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
8249
8250 if( !job_assumes(j, pid1_magic == false) ) {
8251 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
8252 return BOOTSTRAP_NOT_PRIVILEGED;
8253 }
8254
8255 if( !j->anonymous ) {
8256 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
8257 return BOOTSTRAP_NOT_PRIVILEGED;
8258 }
8259
8260 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
8261 if( target_jm == j->mgr ) {
8262 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
8263 *new_bsport = target_jm->jm_port;
8264 return BOOTSTRAP_SUCCESS;
8265 }
8266
8267 if( !target_jm ) {
8268 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, audit_session);
8269 if( !target_jm ) {
8270 mach_port_deallocate(mach_task_self(), audit_session);
8271 } else {
8272 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
8273 }
8274 }
8275
8276 if( !job_assumes(j, target_jm != NULL) ) {
8277 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
8278 return BOOTSTRAP_NO_MEMORY;
8279 }
8280
8281 /* Remove the job from it's current job manager. */
8282 LIST_REMOVE(j, sle);
8283 LIST_REMOVE(j, pid_hash_sle);
8284
8285 job_t ji = NULL, jit = NULL;
8286 LIST_FOREACH_SAFE( ji, &j->mgr->global_env_jobs, global_env_sle, jit ) {
8287 if( ji == j ) {
8288 LIST_REMOVE(ji, global_env_sle);
8289 break;
8290 }
8291 }
8292
8293 /* Put the job into the target job manager. */
8294 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
8295 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
8296
8297 if( ji ) {
8298 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
8299 }
8300
8301 /* Move our Mach services over if we're not in a flat namespace. */
8302 if( !g_flat_mach_namespace && !SLIST_EMPTY(&j->machservices) ) {
8303 struct machservice *msi = NULL, *msit = NULL;
8304 SLIST_FOREACH_SAFE( msi, &j->machservices, sle, msit ) {
8305 LIST_REMOVE(msi, name_hash_sle);
8306 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
8307 }
8308 }
8309
8310 j->mgr = target_jm;
8311 j->migratory = true;
8312 *new_bsport = target_jm->jm_port;
8313
8314 /* Anonymous jobs which move around are particularly interesting to us, so we want to
8315 * stick around while they're still around.
8316 * For example, login calls into the PAM launchd module, which moves the process into
8317 * the StandardIO session by default. So we'll hold a reference on that job to prevent
8318 * ourselves from going away.
8319 */
8320 runtime_add_ref();
8321
8322 return KERN_SUCCESS;
8323 }
8324
8325 kern_return_t
8326 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
8327 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
8328 mach_port_array_t *portsp, unsigned int *ports_cnt)
8329 {
8330 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
8331 mach_port_array_t ports = NULL;
8332 unsigned int cnt = 0, cnt2 = 0;
8333 size_t packed_size;
8334 struct machservice *ms;
8335 jobmgr_t jm;
8336 job_t ji;
8337
8338 if (!launchd_assumes(j != NULL)) {
8339 return BOOTSTRAP_NO_MEMORY;
8340 }
8341
8342 jm = j->mgr;
8343
8344 if (unlikely(!pid1_magic)) {
8345 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
8346 return BOOTSTRAP_NOT_PRIVILEGED;
8347 }
8348 if (unlikely(jobmgr_parent(jm) == NULL)) {
8349 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
8350 return BOOTSTRAP_NOT_PRIVILEGED;
8351 }
8352 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
8353 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
8354 return BOOTSTRAP_NOT_PRIVILEGED;
8355 }
8356 if (unlikely(!j->anonymous)) {
8357 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
8358 return BOOTSTRAP_NOT_PRIVILEGED;
8359 }
8360
8361 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
8362
8363 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
8364 if (!job_assumes(j, outdata_obj_array)) {
8365 goto out_bad;
8366 }
8367
8368 *outdataCnt = 20 * 1024 * 1024;
8369 mig_allocate(outdata, *outdataCnt);
8370 if (!job_assumes(j, *outdata != 0)) {
8371 return 1;
8372 }
8373
8374 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
8375 if (!ji->anonymous) {
8376 continue;
8377 }
8378 SLIST_FOREACH(ms, &ji->machservices, sle) {
8379 cnt++;
8380 }
8381 }
8382
8383 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
8384 if (!job_assumes(j, ports != NULL)) {
8385 goto out_bad;
8386 }
8387
8388 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
8389 if (!ji->anonymous) {
8390 continue;
8391 }
8392
8393 SLIST_FOREACH(ms, &ji->machservices, sle) {
8394 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
8395 job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
8396 } else {
8397 goto out_bad;
8398 }
8399
8400 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
8401 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
8402 } else {
8403 goto out_bad;
8404 }
8405
8406 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
8407 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
8408 } else {
8409 goto out_bad;
8410 }
8411
8412 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
8413 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
8414 } else {
8415 goto out_bad;
8416 }
8417
8418 ports[cnt2] = machservice_port(ms);
8419
8420 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
8421 jobmgr_assumes(jm, (errno = mach_port_mod_refs(mach_task_self(), ports[cnt2], MACH_PORT_RIGHT_SEND, 1)) == 0);
8422 cnt2++;
8423 }
8424 }
8425
8426 job_assumes(j, cnt == cnt2);
8427
8428 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8429 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
8430 if (!job_assumes(j, packed_size != 0)) {
8431 goto out_bad;
8432 }
8433
8434 launch_data_free(outdata_obj_array);
8435
8436 *portsp = ports;
8437 *ports_cnt = cnt;
8438
8439 *reqport = jm->req_port;
8440 *rcvright = jm->jm_port;
8441
8442 jm->req_port = 0;
8443 jm->jm_port = 0;
8444
8445 workaround_5477111 = j;
8446
8447 jobmgr_shutdown(jm);
8448
8449 return BOOTSTRAP_SUCCESS;
8450
8451 out_bad:
8452 if (outdata_obj_array) {
8453 launch_data_free(outdata_obj_array);
8454 }
8455 if (*outdata) {
8456 mig_deallocate(*outdata, *outdataCnt);
8457 }
8458 if (ports) {
8459 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
8460 }
8461
8462 return BOOTSTRAP_NO_MEMORY;
8463 }
8464
8465 kern_return_t
8466 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
8467 {
8468 int bsdepth = 0;
8469 jobmgr_t jmr;
8470
8471 if (!launchd_assumes(j != NULL)) {
8472 return BOOTSTRAP_NO_MEMORY;
8473 }
8474
8475 jmr = j->mgr;
8476
8477 while ((jmr = jobmgr_parent(jmr)) != NULL) {
8478 bsdepth++;
8479 }
8480
8481 /* Since we use recursion, we need an artificial depth for subsets */
8482 if (unlikely(bsdepth > 100)) {
8483 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
8484 return BOOTSTRAP_NO_MEMORY;
8485 }
8486
8487 char name[NAME_MAX];
8488 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
8489
8490 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->audit_session)) != NULL)) {
8491 if (unlikely(requestorport == MACH_PORT_NULL)) {
8492 return BOOTSTRAP_NOT_PRIVILEGED;
8493 }
8494 return BOOTSTRAP_NO_MEMORY;
8495 }
8496
8497 *subsetportp = jmr->jm_port;
8498 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
8499
8500 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
8501 return BOOTSTRAP_SUCCESS;
8502 }
8503
8504 kern_return_t
8505 job_mig_embedded_wait(job_t j, name_t targetlabel, integer_t *waitstatus)
8506 {
8507 job_t otherj;
8508
8509 if (!launchd_assumes(j != NULL)) {
8510 return BOOTSTRAP_NO_MEMORY;
8511 }
8512
8513 if (unlikely(!(otherj = job_find(targetlabel)))) {
8514 return BOOTSTRAP_UNKNOWN_SERVICE;
8515 }
8516
8517 *waitstatus = j->last_exit_status;
8518
8519 return 0;
8520 }
8521
8522 kern_return_t
8523 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, mach_port_t *out_name_port, mach_port_t *obsrvr_port, unsigned int flags)
8524 {
8525 struct ldcred *ldc = runtime_get_caller_creds();
8526 job_t otherj;
8527
8528 if (!launchd_assumes(j != NULL)) {
8529 return BOOTSTRAP_NO_MEMORY;
8530 }
8531
8532 if (unlikely(!(otherj = job_find(targetlabel)))) {
8533 return BOOTSTRAP_UNKNOWN_SERVICE;
8534 }
8535
8536 #if TARGET_OS_EMBEDDED
8537 bool allow_non_root_kickstart = j->username && otherj->username && ( strcmp(j->username, otherj->username) == 0 );
8538 #else
8539 bool allow_non_root_kickstart = false;
8540 #endif
8541
8542 if( ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart ) {
8543 return BOOTSTRAP_NOT_PRIVILEGED;
8544 }
8545
8546 if( otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC) ) {
8547 return BOOTSTRAP_SERVICE_ACTIVE;
8548 }
8549
8550 otherj->stall_before_exec = ( flags & VPROCFLAG_STALL_JOB_EXEC );
8551 otherj = job_dispatch(otherj, true);
8552
8553 if (!job_assumes(j, otherj && otherj->p)) {
8554 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
8555 otherj->stall_before_exec = false;
8556 return BOOTSTRAP_NO_MEMORY;
8557 }
8558
8559 /* If any of these proceeding steps fail, we return an error to the client.
8560 * the problem is that, if the client has requested the job be stalled before
8561 * exec(2), the client won't be able to uncork the fork(2), leaving the job
8562 * forever stalled until the client tries again and we successfully start
8563 * the job.
8564 *
8565 * See <rdar://problem/6787083> for more about the implications.
8566 *
8567 * Fortunately, these next actions should pretty much never fail. In the
8568 * future, we should look at cleaning up after these failures if the job
8569 * was started in a stalled state.
8570 */
8571
8572 kern_return_t kr = task_name_for_pid(mach_task_self(), otherj->p, out_name_port);
8573 if (!job_assumes(j, kr == 0)) {
8574 return kr;
8575 }
8576
8577 if (!job_setup_machport(otherj)) {
8578 return BOOTSTRAP_NO_MEMORY;
8579 }
8580
8581 *obsrvr_port = otherj->j_port;
8582 *out_pid = otherj->p;
8583
8584 return 0;
8585 }
8586
8587 kern_return_t
8588 job_mig_wait(job_t j, mach_port_t srp, integer_t *waitstatus)
8589 {
8590 #if 0
8591 if (!launchd_assumes(j != NULL)) {
8592 return BOOTSTRAP_NO_MEMORY;
8593 }
8594 return job_handle_mpm_wait(j, srp, waitstatus);
8595 #else
8596 if( false ) {
8597 /* To make the compiler happy. */
8598 job_handle_mpm_wait(NULL, MACH_PORT_NULL, NULL);
8599 }
8600 struct ldcred *ldc = runtime_get_caller_creds();
8601 job_t calling_j = job_mig_intran2(j->mgr, MACH_PORT_NULL, ldc->pid);
8602
8603 return job_mig_wait2(calling_j, j, srp, waitstatus, true);
8604 #endif
8605 }
8606
8607 kern_return_t
8608 job_mig_wait2(job_t j, job_t target_j, mach_port_t srp, integer_t *status, boolean_t legacy)
8609 {
8610 if( !launchd_assumes(j != NULL) ) {
8611 return BOOTSTRAP_NO_MEMORY;
8612 }
8613 if( !launchd_assumes(target_j != NULL) ) {
8614 return BOOTSTRAP_NO_MEMORY;
8615 }
8616 if( !launchd_assumes(status != NULL) ) {
8617 return BOOTSTRAP_NO_MEMORY;
8618 }
8619
8620 if( target_j->p == 0 ) {
8621 *status = target_j->last_exit_status;
8622 return BOOTSTRAP_SUCCESS;
8623 }
8624
8625 if( !job_assumes(j, waiting4exit_new(target_j, srp, legacy) == true) ) {
8626 return BOOTSTRAP_NO_MEMORY;
8627 }
8628
8629 return MIG_NO_REPLY;
8630 }
8631
8632 kern_return_t
8633 job_mig_uncork_fork(job_t j)
8634 {
8635 if (!launchd_assumes(j != NULL)) {
8636 return BOOTSTRAP_NO_MEMORY;
8637 }
8638
8639 if (unlikely(!j->stall_before_exec)) {
8640 job_log(j, LOG_WARNING, "Attempt to uncork a job that isn't in the middle of a fork().");
8641 return 1;
8642 }
8643
8644 job_uncork_fork(j);
8645 j->stall_before_exec = false;
8646 return 0;
8647 }
8648
8649 kern_return_t
8650 job_mig_spawn(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t audit_session, pid_t *child_pid, mach_port_t *obsvr_port)
8651 {
8652 launch_data_t input_obj = NULL;
8653 size_t data_offset = 0;
8654 struct ldcred *ldc = runtime_get_caller_creds();
8655 job_t jr;
8656
8657 if (!launchd_assumes(j != NULL)) {
8658 return BOOTSTRAP_NO_MEMORY;
8659 }
8660
8661 if (unlikely(j->deny_job_creation)) {
8662 return BOOTSTRAP_NOT_PRIVILEGED;
8663 }
8664
8665 #if HAVE_SANDBOX
8666 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8667 return BOOTSTRAP_NOT_PRIVILEGED;
8668 }
8669 #endif
8670
8671 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
8672 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
8673 return VPROC_ERR_TRY_PER_USER;
8674 }
8675
8676 if (!job_assumes(j, indataCnt != 0)) {
8677 return 1;
8678 }
8679
8680 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
8681 if (!job_assumes(j, (input_obj = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
8682 return 1;
8683 }
8684
8685 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
8686 if( !jobmgr_assumes(j->mgr, target_jm != NULL) ) {
8687 jobmgr_log(j->mgr, LOG_NOTICE, "%s() can't find its session!", __func__);
8688 return 1;
8689 }
8690
8691 jr = jobmgr_import2(target_jm ?: j->mgr, input_obj);
8692
8693 if (!job_assumes(j, jr != NULL)) {
8694 switch (errno) {
8695 case EEXIST:
8696 return BOOTSTRAP_NAME_IN_USE;
8697 default:
8698 return BOOTSTRAP_NO_MEMORY;
8699 }
8700 }
8701
8702 if (pid1_magic) {
8703 jr->mach_uid = ldc->uid;
8704 }
8705
8706 jr->legacy_LS_job = true;
8707 jr->abandon_pg = true;
8708 jr->stall_before_exec = jr->wait4debugger;
8709 jr->wait4debugger = false;
8710 jr->audit_session = audit_session;
8711 uuid_clear(jr->expected_audit_uuid);
8712
8713 jr = job_dispatch(jr, true);
8714
8715 if (!job_assumes(j, jr != NULL)) {
8716 return BOOTSTRAP_NO_MEMORY;
8717 }
8718
8719 if (!job_assumes(jr, jr->p)) {
8720 job_remove(jr);
8721 return BOOTSTRAP_NO_MEMORY;
8722 }
8723
8724 if (!job_setup_machport(jr)) {
8725 job_remove(jr);
8726 return BOOTSTRAP_NO_MEMORY;
8727 }
8728
8729 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
8730
8731 *child_pid = jr->p;
8732 *obsvr_port = jr->j_port;
8733
8734 mig_deallocate(indata, indataCnt);
8735
8736 return BOOTSTRAP_SUCCESS;
8737 }
8738
8739 void
8740 jobmgr_init(bool sflag)
8741 {
8742 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
8743 SLIST_INIT(&s_curious_jobs);
8744 LIST_INIT(&s_needing_sessions);
8745
8746 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
8747
8748 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
8749 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
8750 if( likely(s_no_hang_fd == -1) ) {
8751 if( jobmgr_assumes(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK)) != -1) ) {
8752 jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr) != -1);
8753 }
8754 }
8755 s_no_hang_fd = _fd(s_no_hang_fd);
8756 }
8757
8758 size_t
8759 our_strhash(const char *s)
8760 {
8761 size_t c, r = 5381;
8762
8763 /* djb2
8764 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
8765 */
8766
8767 while ((c = *s++)) {
8768 r = ((r << 5) + r) + c; /* hash*33 + c */
8769 }
8770
8771 return r;
8772 }
8773
8774 size_t
8775 hash_label(const char *label)
8776 {
8777 return our_strhash(label) % LABEL_HASH_SIZE;
8778 }
8779
8780 size_t
8781 hash_ms(const char *msstr)
8782 {
8783 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
8784 }
8785
8786 bool
8787 waiting4removal_new(job_t j, mach_port_t rp)
8788 {
8789 struct waiting_for_removal *w4r;
8790
8791 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
8792 return false;
8793 }
8794
8795 w4r->reply_port = rp;
8796
8797 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
8798
8799 return true;
8800 }
8801
8802 void
8803 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
8804 {
8805 job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
8806
8807 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
8808
8809 free(w4r);
8810 }
8811
8812 bool
8813 waiting4exit_new(job_t j, mach_port_t rp, bool legacy)
8814 {
8815 struct waiting_for_exit *w4e = NULL;
8816 if( !job_assumes(j, (w4e = malloc(sizeof(struct waiting_for_exit))) != NULL) ) {
8817 return false;
8818 }
8819
8820 w4e->rp = rp;
8821 w4e->legacy = legacy;
8822 LIST_INSERT_HEAD(&j->exit_watchers, w4e, sle);
8823
8824 return true;
8825 }
8826
8827 void
8828 waiting4exit_delete(job_t j, struct waiting_for_exit *w4e)
8829 {
8830 if( !w4e->legacy ) {
8831 job_assumes(j, job_mig_wait2_reply(w4e->rp, KERN_SUCCESS, j->last_exit_status, false) == KERN_SUCCESS);
8832 } else {
8833 job_assumes(j, job_mig_wait_reply(w4e->rp, KERN_SUCCESS, j->last_exit_status) == KERN_SUCCESS);
8834 }
8835
8836 LIST_REMOVE(w4e, sle);
8837
8838 free(w4e);
8839 }
8840
8841 size_t
8842 get_kern_max_proc(void)
8843 {
8844 int mib[] = { CTL_KERN, KERN_MAXPROC };
8845 int max = 100;
8846 size_t max_sz = sizeof(max);
8847
8848 launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
8849
8850 return max;
8851 }
8852
8853 /* See rdar://problem/6271234 */
8854 void
8855 eliminate_double_reboot(void)
8856 {
8857 if( unlikely(!pid1_magic) ) {
8858 return;
8859 }
8860
8861 struct stat sb;
8862 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
8863 char *try_again = "Will try again at next boot.";
8864 int result = ~0;
8865
8866 if( unlikely(stat(argv[1], &sb) != -1) ) {
8867 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
8868
8869 int wstatus;
8870 pid_t p;
8871
8872 jobmgr_assumes(root_jobmgr, (errno = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ)) == 0);
8873
8874 if (errno) {
8875 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script! %s", try_again);
8876 goto out;
8877 }
8878
8879 if( !jobmgr_assumes(root_jobmgr, waitpid(p, &wstatus, 0) != -1) ) {
8880 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't confirm that deferred install script exited successfully! %s", try_again);
8881 goto out;
8882 }
8883
8884 if( jobmgr_assumes(root_jobmgr, WIFEXITED(wstatus) != 0) ) {
8885 if( jobmgr_assumes(root_jobmgr, (result = WEXITSTATUS(wstatus)) == EXIT_SUCCESS) ) {
8886 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
8887 } else {
8888 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus), try_again);
8889 }
8890 } else {
8891 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again);
8892 }
8893 }
8894 out:
8895 if( result == 0 ) {
8896 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
8897 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
8898 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
8899 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
8900 */
8901 if( !jobmgr_assumes(root_jobmgr, unlink(argv[1]) != -1) ) {
8902 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script couldn't be removed!");
8903 }
8904 }
8905 }
8906
8907 static void
8908 simulate_pid1_crash(void)
8909 {
8910 if( pid1_magic && g_simulate_pid1_crash ) {
8911 runtime_syslog(LOG_EMERG | LOG_CONSOLE, "About to simulate a crash.");
8912 raise(SIGSEGV);
8913 }
8914 }
8915
8916 static void
8917 jetsam_priority_from_job(job_t j, bool front, jetsam_priority_entry_t *jp)
8918 {
8919 jp->pid = j->p;
8920 jp->flags |= front ? kJetsamFlagsFrontmost : 0;
8921 }
8922
8923 static int
8924 job_cmp(const job_t *lhs, const job_t *rhs)
8925 {
8926 job_t _lhs = *lhs;
8927 job_t _rhs = *rhs;
8928 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
8929 if( _lhs->jetsam_priority > _rhs->jetsam_priority ) {
8930 return -1;
8931 } else if( _lhs->jetsam_priority < _rhs->jetsam_priority ) {
8932 return 1;
8933 }
8934
8935 return 0;
8936 }
8937
8938 int
8939 launchd_set_jetsam_priorities(launch_data_t priorities)
8940 {
8941 if( !launchd_assumes(launch_data_get_type(priorities) == LAUNCH_DATA_ARRAY) ) {
8942 return EINVAL;
8943 }
8944
8945 jobmgr_t jm = NULL;
8946 #if !TARGET_OS_EMBEDDED
8947 /* For testing. */
8948 jm = jobmgr_find_by_name(root_jobmgr, VPROCMGR_SESSION_AQUA);
8949 if( !launchd_assumes(jm != NULL) ) {
8950 return EINVAL;
8951 }
8952 #else
8953 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
8954 jm = root_jobmgr;
8955
8956 if( !g_embedded_privileged_action ) {
8957 return EPERM;
8958 }
8959 #endif
8960
8961 size_t npris = launch_data_array_get_count(priorities);
8962
8963 job_t ji = NULL;
8964 size_t i = 0;
8965 for( i = 0; i < npris; i++ ) {
8966 launch_data_t ldi = launch_data_array_get_index(priorities, i);
8967 if( !launchd_assumes(launch_data_get_type(ldi) == LAUNCH_DATA_DICTIONARY) ) {
8968 continue;
8969 }
8970
8971 launch_data_t label = NULL;
8972 if( !launchd_assumes(label = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL)) ) {
8973 continue;
8974 }
8975 const char *_label = launch_data_get_string(label);
8976
8977 ji = job_find(_label);
8978 if( !launchd_assumes(ji != NULL) ) {
8979 continue;
8980 }
8981
8982 launch_data_t pri;
8983 long long _pri = 0;
8984 if( !launchd_assumes(pri = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMPRIORITY)) ) {
8985 continue;
8986 }
8987 _pri = launch_data_get_integer(pri);
8988
8989 if( ji->jetsam_priority == LAUNCHD_JETSAM_PRIORITY_UNSET ) {
8990 LIST_INSERT_HEAD(&ji->mgr->jetsam_jobs, ji, jetsam_sle);
8991 ji->mgr->jetsam_jobs_cnt++;
8992 }
8993 ji->jetsam_priority = _pri;
8994
8995 launch_data_t frontmost = NULL;
8996 if( !(frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) ) {
8997 ji->jetsam_frontmost = false;
8998 continue;
8999 }
9000 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
9001 }
9002
9003 i = 0;
9004 job_t *jobs = (job_t *)calloc(jm->jetsam_jobs_cnt, sizeof(job_t));
9005 LIST_FOREACH( ji, &jm->jetsam_jobs, jetsam_sle ) {
9006 if( ji->p ) {
9007 jobs[i] = ji;
9008 i++;
9009 }
9010 }
9011 size_t totalpris = i;
9012
9013 int result = EINVAL;
9014 if( launchd_assumes(totalpris > 0) ) {
9015 qsort((void *)jobs, totalpris, sizeof(job_t), (int (*)(const void *, const void *))job_cmp);
9016
9017 jetsam_priority_entry_t *jpris = (jetsam_priority_entry_t *)calloc(totalpris, sizeof(jetsam_priority_entry_t));
9018 if( !launchd_assumes(jpris != NULL) ) {
9019 result = ENOMEM;
9020 } else {
9021 for( i = 0; i < totalpris; i++ ) {
9022 jetsam_priority_from_job(jobs[i], jobs[i]->jetsam_frontmost, &jpris[i]);
9023 }
9024
9025 int _result = 0;
9026 launchd_assumes((_result = sysctlbyname("kern.memorystatus_priority_list", NULL, NULL, &jpris[0], totalpris * sizeof(jetsam_priority_entry_t))) != -1);
9027 result = _result != 0 ? errno : 0;
9028
9029 free(jpris);
9030 }
9031 }
9032 free(jobs);
9033
9034 return result;
9035 }