]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
58f8bd8c4d9f0a24bab8421e4aff785bea8357b7
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 24003 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/stat.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
43 #include <sys/un.h>
44 #include <sys/reboot.h>
45 #include <sys/wait.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
52 #include <sys/pipe.h>
53 #include <sys/mman.h>
54 #include <sys/socket.h>
55 #include <sys/syscall.h>
56 #include <net/if.h>
57 #include <netinet/in.h>
58 #include <netinet/in_var.h>
59 #include <netinet6/nd6.h>
60 #include <bsm/libbsm.h>
61 #include <unistd.h>
62 #include <signal.h>
63 #include <errno.h>
64 #include <libgen.h>
65 #include <stdio.h>
66 #include <stdlib.h>
67 #include <stdarg.h>
68 #include <stdbool.h>
69 #include <paths.h>
70 #include <pwd.h>
71 #include <grp.h>
72 #include <ttyent.h>
73 #include <dlfcn.h>
74 #include <dirent.h>
75 #include <string.h>
76 #include <ctype.h>
77 #include <glob.h>
78 #include <spawn.h>
79 #include <libproc.h>
80 #include <malloc/malloc.h>
81 #include <pthread.h>
82 #if HAVE_SANDBOX
83 #define __APPLE_API_PRIVATE
84 #include <sandbox.h>
85 #endif
86 #if HAVE_QUARANTINE
87 #include <quarantine.h>
88 #endif
89 #if TARGET_OS_EMBEDDED
90 #include <sys/kern_memorystatus.h>
91 #else
92 /* To make my life easier. */
93 typedef struct jetsam_priority_entry {
94 pid_t pid;
95 uint32_t flags;
96 int32_t hiwat_pages;
97 int32_t hiwat_reserved1;
98 int32_t hiwat_reserved2;
99 int32_t hiwat_reserved3;
100 } jetsam_priority_entry_t;
101
102 enum {
103 kJetsamFlagsFrontmost = (1 << 0),
104 kJetsamFlagsKilled = (1 << 1)
105 };
106 #endif
107
108 #include "launch.h"
109 #include "launch_priv.h"
110 #include "launch_internal.h"
111 #include "bootstrap.h"
112 #include "bootstrap_priv.h"
113 #include "vproc.h"
114 #include "vproc_internal.h"
115
116 #include "reboot2.h"
117
118 #include "launchd.h"
119 #include "launchd_runtime.h"
120 #include "launchd_unix_ipc.h"
121 #include "protocol_vproc.h"
122 #include "protocol_vprocServer.h"
123 #include "protocol_job_reply.h"
124 #include "protocol_job_forward.h"
125 #include "mach_excServer.h"
126
127 /*
128 * LAUNCHD_SAMPLE_TIMEOUT
129 * If the job hasn't exited in the given number of seconds after sending
130 * it a SIGTERM, start sampling it.
131 * LAUNCHD_DEFAULT_EXIT_TIMEOUT
132 * If the job hasn't exited in the given number of seconds after sending
133 * it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
134 */
135 #define LAUNCHD_MIN_JOB_RUN_TIME 10
136 #define LAUNCHD_SAMPLE_TIMEOUT 2
137 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
138 #define LAUNCHD_SIGKILL_TIMER 5
139 #define LAUNCHD_CLEAN_KILL_TIMER 1
140
141 #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
142
143 #define TAKE_SUBSET_NAME "TakeSubsetName"
144 #define TAKE_SUBSET_PID "TakeSubsetPID"
145 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
146
147 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
148
149 extern char **environ;
150
151 struct waiting_for_removal {
152 SLIST_ENTRY(waiting_for_removal) sle;
153 mach_port_t reply_port;
154 };
155
156 static bool waiting4removal_new(job_t j, mach_port_t rp);
157 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
158
159 struct waiting_for_exit {
160 LIST_ENTRY(waiting_for_exit) sle;
161 mach_port_t rp;
162 bool legacy;
163 };
164
165 static bool waiting4exit_new(job_t j, mach_port_t rp, bool legacy);
166 static void waiting4exit_delete(job_t j, struct waiting_for_exit *w4e);
167
168 struct machservice {
169 SLIST_ENTRY(machservice) sle;
170 SLIST_ENTRY(machservice) special_port_sle;
171 LIST_ENTRY(machservice) name_hash_sle;
172 LIST_ENTRY(machservice) port_hash_sle;
173 job_t job;
174 unsigned int gen_num;
175 mach_port_name_t port;
176 unsigned int isActive :1,
177 reset :1,
178 recv :1,
179 hide :1,
180 kUNCServer :1,
181 per_user_hack :1,
182 debug_on_close :1,
183 per_pid :1,
184 delete_on_destruction :1,
185 drain_one_on_crash :1,
186 drain_all_on_crash :1,
187 /* Don't let the size of this field to get too small. It has to be large enough
188 * to represent the reasonable range of special port numbers.
189 */
190 special_port_num :20;
191
192 const char name[0];
193 };
194
195 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
196
197 #define PORT_HASH_SIZE 32
198 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
199
200 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
201
202 static void machservice_setup(launch_data_t obj, const char *key, void *context);
203 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
204 static void machservice_resetport(job_t j, struct machservice *ms);
205 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
206 static void machservice_ignore(job_t j, struct machservice *ms);
207 static void machservice_watch(job_t j, struct machservice *ms);
208 static void machservice_delete(job_t j, struct machservice *, bool port_died);
209 static void machservice_request_notifications(struct machservice *);
210 static mach_port_t machservice_port(struct machservice *);
211 static job_t machservice_job(struct machservice *);
212 static bool machservice_hidden(struct machservice *);
213 static bool machservice_active(struct machservice *);
214 static const char *machservice_name(struct machservice *);
215 static bootstrap_status_t machservice_status(struct machservice *);
216 void machservice_drain_port(struct machservice *);
217
218 struct socketgroup {
219 SLIST_ENTRY(socketgroup) sle;
220 int *fds;
221 unsigned int junkfds:1, fd_cnt:31;
222 union {
223 const char name[0];
224 char name_init[0];
225 };
226 };
227
228 static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds);
229 static void socketgroup_delete(job_t j, struct socketgroup *sg);
230 static void socketgroup_watch(job_t j, struct socketgroup *sg);
231 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
232 static void socketgroup_callback(job_t j);
233 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
234 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
235
236 struct calendarinterval {
237 LIST_ENTRY(calendarinterval) global_sle;
238 SLIST_ENTRY(calendarinterval) sle;
239 job_t job;
240 struct tm when;
241 time_t when_next;
242 };
243
244 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
245
246 static bool calendarinterval_new(job_t j, struct tm *w);
247 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
248 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
249 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
250 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
251 static void calendarinterval_callback(void);
252 static void calendarinterval_sanity_check(void);
253
254 struct envitem {
255 SLIST_ENTRY(envitem) sle;
256 bool one_shot;
257 char *value;
258 union {
259 const char key[0];
260 char key_init[0];
261 };
262 };
263
264 static bool envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot);
265 static void envitem_delete(job_t j, struct envitem *ei, bool global);
266 static void envitem_setup(launch_data_t obj, const char *key, void *context);
267 static void envitem_setup_one_shot(launch_data_t obj, const char *key, void *context);
268
269 struct limititem {
270 SLIST_ENTRY(limititem) sle;
271 struct rlimit lim;
272 unsigned int setsoft:1, sethard:1, which:30;
273 };
274
275 static bool limititem_update(job_t j, int w, rlim_t r);
276 static void limititem_delete(job_t j, struct limititem *li);
277 static void limititem_setup(launch_data_t obj, const char *key, void *context);
278 #if HAVE_SANDBOX
279 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
280 #endif
281
282 static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
283
284 typedef enum {
285 NETWORK_UP = 1,
286 NETWORK_DOWN,
287 SUCCESSFUL_EXIT,
288 FAILED_EXIT,
289 PATH_EXISTS,
290 PATH_MISSING,
291 OTHER_JOB_ENABLED,
292 OTHER_JOB_DISABLED,
293 OTHER_JOB_ACTIVE,
294 OTHER_JOB_INACTIVE,
295 PATH_CHANGES,
296 DIR_NOT_EMPTY,
297 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
298 } semaphore_reason_t;
299
300 struct semaphoreitem {
301 SLIST_ENTRY(semaphoreitem) sle;
302 semaphore_reason_t why;
303 bool watching_parent;
304 int fd;
305 union {
306 const char what[0];
307 char what_init[0];
308 };
309 };
310
311 struct semaphoreitem_dict_iter_context {
312 job_t j;
313 semaphore_reason_t why_true;
314 semaphore_reason_t why_false;
315 };
316
317 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
318 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
319 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
320 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
321 static void semaphoreitem_callback(job_t j, struct kevent *kev);
322 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
323 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
324 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
325
326 #define ACTIVE_JOB_HASH_SIZE 32
327 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
328
329 #define MACHSERVICE_HASH_SIZE 37
330
331 enum {
332 JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST,
333 JOBMGR_PHASE_NORMAL,
334 JOBMGR_PHASE_HOPEFULLY_EXITS_LAST,
335 JOBMGR_PHASE_LAST,
336 };
337
338 static char *s_phases[JOBMGR_PHASE_LAST + 1] = {
339 "HopefullyExitsFirst",
340 "Normal",
341 "HopefullyExitsLast",
342 "Finalized",
343 };
344
345 struct jobmgr_s {
346 kq_callback kqjobmgr_callback;
347 SLIST_ENTRY(jobmgr_s) sle;
348 SLIST_HEAD(, jobmgr_s) submgrs;
349 LIST_HEAD(, job_s) jobs;
350 LIST_HEAD(, job_s) jetsam_jobs;
351 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
352 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
353 LIST_HEAD(, job_s) global_env_jobs;
354 STAILQ_HEAD(, job_s) pending_samples;
355 mach_port_t jm_port;
356 mach_port_t req_port;
357 mach_port_t init_audit_session;
358 jobmgr_t parentmgr;
359 int reboot_flags;
360 int shutdown_phase;
361 unsigned int global_on_demand_cnt;
362 unsigned int hopefully_first_cnt;
363 unsigned int normal_active_cnt;
364 unsigned int jetsam_jobs_cnt;
365 unsigned int shutting_down :1,
366 session_initialized :1,
367 killed_hopefully_first_jobs :1,
368 killed_normal_jobs :1,
369 killed_hopefully_last_jobs :1,
370 killed_stray_jobs :1;
371 char sample_log_file[PATH_MAX];
372 uint32_t properties;
373 union {
374 const char name[0];
375 char name_init[0];
376 };
377 };
378
379 #define jobmgr_assumes(jm, e) \
380 (unlikely(!(e)) ? jobmgr_log_bug(jm, __LINE__), false : true)
381
382 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t session_port);
383 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
384 static jobmgr_t jobmgr_parent(jobmgr_t jm);
385 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
386 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
387 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
388 static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
389 static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
390 static void jobmgr_remove(jobmgr_t jm);
391 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
392 static void jobmgr_dequeue_next_sample(jobmgr_t jm);
393 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
394 static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
395 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
396 static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
397 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
398 static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, bool dispatch, mach_port_t *mp);
399 static void job_export_all2(jobmgr_t jm, launch_data_t where);
400 static void jobmgr_callback(void *obj, struct kevent *kev);
401 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
402 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
403 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
404 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
405 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
406 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
407 static void jobmgr_log_bug(jobmgr_t jm, unsigned int line);
408
409 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
410 #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
411
412 struct suspended_peruser {
413 LIST_ENTRY(suspended_peruser) sle;
414 job_t j;
415 };
416
417 struct job_s {
418 kq_callback kqjob_callback; /* MUST be first element of this structure for benefit of launchd's run loop. */
419 LIST_ENTRY(job_s) sle;
420 LIST_ENTRY(job_s) needing_session_sle;
421 LIST_ENTRY(job_s) jetsam_sle;
422 LIST_ENTRY(job_s) pid_hash_sle;
423 LIST_ENTRY(job_s) label_hash_sle;
424 LIST_ENTRY(job_s) global_env_sle;
425 STAILQ_ENTRY(job_s) pending_samples_sle;
426 SLIST_ENTRY(job_s) curious_jobs_sle;
427 LIST_HEAD(, suspended_peruser) suspended_perusers;
428 LIST_HEAD(, waiting_for_exit) exit_watchers;
429 SLIST_HEAD(, socketgroup) sockets;
430 SLIST_HEAD(, calendarinterval) cal_intervals;
431 SLIST_HEAD(, envitem) global_env;
432 SLIST_HEAD(, envitem) env;
433 SLIST_HEAD(, limititem) limits;
434 SLIST_HEAD(, machservice) machservices;
435 SLIST_HEAD(, semaphoreitem) semaphores;
436 SLIST_HEAD(, waiting_for_removal) removal_watchers;
437 struct rusage ru;
438 cpu_type_t *j_binpref;
439 size_t j_binpref_cnt;
440 mach_port_t j_port;
441 mach_port_t wait_reply_port; /* we probably should switch to a list of waiters */
442 uid_t mach_uid;
443 jobmgr_t mgr;
444 size_t argc;
445 char **argv;
446 char *prog;
447 char *rootdir;
448 char *workingdir;
449 char *username;
450 char *groupname;
451 char *stdinpath;
452 char *stdoutpath;
453 char *stderrpath;
454 char *alt_exc_handler;
455 struct vproc_shmem_s *shmem;
456 struct machservice *lastlookup;
457 unsigned int lastlookup_gennum;
458 #if HAVE_SANDBOX
459 char *seatbelt_profile;
460 uint64_t seatbelt_flags;
461 #endif
462 #if HAVE_QUARANTINE
463 void *quarantine_data;
464 size_t quarantine_data_sz;
465 #endif
466 pid_t p;
467 int last_exit_status;
468 int stdin_fd;
469 int fork_fd;
470 int log_redirect_fd;
471 int nice;
472 int stdout_err_fd;
473 int32_t jetsam_priority;
474 int32_t jetsam_memlimit;
475 int32_t main_thread_priority;
476 uint32_t timeout;
477 uint32_t exit_timeout;
478 uint64_t sent_signal_time;
479 uint64_t start_time;
480 uint32_t min_run_time;
481 uint32_t start_interval;
482 uint32_t peruser_suspend_count; /* The number of jobs that have disabled this per-user launchd. */
483 #if 0
484 /* someday ... */
485 enum {
486 J_TYPE_ANONYMOUS = 1,
487 J_TYPE_LANCHSERVICES,
488 J_TYPE_MACHINIT,
489 J_TYPE_INETD,
490 } j_type;
491 #endif
492 bool debug :1, /* man launchd.plist --> Debug */
493 ondemand :1, /* man launchd.plist --> KeepAlive == false */
494 session_create :1, /* man launchd.plist --> SessionCreate */
495 low_pri_io :1, /* man launchd.plist --> LowPriorityIO */
496 no_init_groups :1, /* man launchd.plist --> InitGroups */
497 priv_port_has_senders :1, /* a legacy mach_init concept to make bootstrap_create_server/service() work */
498 importing_global_env :1, /* a hack during job importing */
499 importing_hard_limits :1, /* a hack during job importing */
500 setmask :1, /* man launchd.plist --> Umask */
501 anonymous :1, /* a process that launchd knows about, but isn't managed by launchd */
502 checkedin :1, /* a legacy mach_init concept to detect sick jobs */
503 legacy_mach_job :1, /* a job created via bootstrap_create_server() */
504 legacy_LS_job :1, /* a job created via spawn_via_launchd() */
505 inetcompat :1, /* a legacy job that wants inetd compatible semantics */
506 inetcompat_wait :1, /* a twist on inetd compatibility */
507 start_pending :1, /* an event fired and the job should start, but not necessarily right away */
508 globargv :1, /* man launchd.plist --> EnableGlobbing */
509 wait4debugger :1, /* man launchd.plist --> WaitForDebugger */
510 wait4debugger_oneshot :1, /* One-shot WaitForDebugger. */
511 internal_exc_handler :1, /* MachExceptionHandler == true */
512 stall_before_exec :1, /* a hack to support an option of spawn_via_launchd() */
513 only_once :1, /* man launchd.plist --> LaunchOnlyOnce. Note: 5465184 Rename this to "HopefullyNeverExits" */
514 currently_ignored :1, /* Make job_ignore() / job_watch() work. If these calls were balanced, then this wouldn't be necessarily. */
515 forced_peers_to_demand_mode :1, /* A job that forced all other jobs to be temporarily launch-on-demand */
516 setnice :1, /* man launchd.plist --> Nice */
517 hopefully_exits_last :1, /* man launchd.plist --> HopefullyExitsLast */
518 removal_pending :1, /* a job was asked to be unloaded/removed while running, we'll remove it after it exits */
519 sent_sigkill :1, /* job_kill() was called */
520 sampling_complete :1, /* job_force_sampletool() was called (or is disabled) */
521 debug_before_kill :1, /* enter the kernel debugger before killing a job */
522 weird_bootstrap :1, /* a hack that launchd+launchctl use during jobmgr_t creation */
523 start_on_mount :1, /* man launchd.plist --> StartOnMount */
524 per_user :1, /* This job is a per-user launchd managed by the PID 1 launchd */
525 hopefully_exits_first :1, /* man launchd.plist --> HopefullyExitsFirst */
526 deny_unknown_mslookups :1, /* A flag for changing the behavior of bootstrap_look_up() */
527 unload_at_mig_return :1, /* A job thoroughly confused launchd. We need to unload it ASAP */
528 abandon_pg :1, /* man launchd.plist --> AbandonProcessGroup */
529 ignore_pg_at_shutdown :1, /* During shutdown, do not send SIGTERM to stray processes in the process group of this job. */
530 poll_for_vfs_changes :1, /* a hack to work around the fact that kqueues don't work on all filesystems */
531 deny_job_creation :1, /* Don't let this job create new 'job_t' objects in launchd */
532 kill_via_shmem :1, /* man launchd.plist --> EnableTransactions */
533 sent_kill_via_shmem :1, /* We need to 'kill_via_shmem' once-and-only-once */
534 clean_kill :1, /* The job was sent SIGKILL because it was clean. */
535 pending_sample :1, /* This job needs to be sampled for some reason. */
536 kill_after_sample :1, /* The job is to be killed after sampling. */
537 is_being_sampled :1, /* We've spawned a sample tool to sample the job. */
538 reap_after_trace :1, /* The job exited before sample did, so we should reap it after sample is done. */
539 nosy :1, /* The job has an OtherJobEnabled KeepAlive criterion. */
540 crashed :1, /* The job is the default Mach exception handler, and it crashed. */
541 reaped :1, /* We've received NOTE_EXIT for the job. */
542 stopped :1, /* job_stop() was called. */
543 jetsam_frontmost :1, /* The job is considered "frontmost" by Jetsam. */
544 needs_kickoff :1, /* The job is to be kept alive continuously, but it must be initially kicked off. */
545 is_bootstrapper :1, /* The job is a bootstrapper. */
546 has_console :1, /* The job owns the console. */
547 clean_exit_timer_expired :1, /* The job was clean, received SIGKILL and failed to exit after LAUNCHD_CLEAN_KILL_TIMER seconds. */
548 embedded_special_privileges :1, /* The job runs as a non-root user on embedded but has select privileges of the root user. */
549 did_exec :1, /* The job exec(2)ed successfully. */
550 holds_ref :1, /* The (anonymous) job called vprocmgr_switch_to_session(). */
551 jetsam_properties :1; /* The job has Jetsam limits in place. */
552 mode_t mask;
553 pid_t tracing_pid;
554 mach_port_t audit_session;
555 uuid_t expected_audit_uuid;
556 const char label[0];
557 };
558
559 #define LABEL_HASH_SIZE 53
560
561 static LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
562 static size_t hash_label(const char *label) __attribute__((pure));
563 static size_t hash_ms(const char *msstr) __attribute__((pure));
564 static SLIST_HEAD(, job_s) s_curious_jobs;
565
566 #define job_assumes(j, e) \
567 (unlikely(!(e)) ? job_log_bug(j, __LINE__), false : true)
568
569 static void job_import_keys(launch_data_t obj, const char *key, void *context);
570 static void job_import_bool(job_t j, const char *key, bool value);
571 static void job_import_string(job_t j, const char *key, const char *value);
572 static void job_import_integer(job_t j, const char *key, long long value);
573 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
574 static void job_import_array(job_t j, const char *key, launch_data_t value);
575 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
576 static bool job_set_global_on_demand(job_t j, bool val);
577 static const char *job_active(job_t j);
578 static void job_watch(job_t j);
579 static void job_ignore(job_t j);
580 static void job_cleanup_after_tracer(job_t j);
581 static void job_reap(job_t j);
582 static bool job_useless(job_t j);
583 static bool job_keepalive(job_t j);
584 static void job_dispatch_curious_jobs(job_t j);
585 static void job_start(job_t j);
586 static void job_start_child(job_t j) __attribute__((noreturn));
587 static void job_setup_attributes(job_t j);
588 static bool job_setup_machport(job_t j);
589 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
590 static void job_postfork_become_user(job_t j);
591 static void job_postfork_test_user(job_t j);
592 static void job_log_pids_with_weird_uids(job_t j);
593 static void job_setup_exception_port(job_t j, task_t target_task);
594 static void job_callback(void *obj, struct kevent *kev);
595 static void job_callback_proc(job_t j, struct kevent *kev);
596 static void job_callback_timer(job_t j, void *ident);
597 static void job_callback_read(job_t j, int ident);
598 static void job_log_stray_pg(job_t j);
599 static void job_log_children_without_exec(job_t j);
600 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
601 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
602 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
603 static void job_kill(job_t j);
604 static void job_uncork_fork(job_t j);
605 static void job_log_stdouterr(job_t j);
606 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
607 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
608 static void job_log_bug(job_t j, unsigned int line);
609 static void job_log_stdouterr2(job_t j, const char *msg, ...);
610 static void job_set_exception_port(job_t j, mach_port_t port);
611 static kern_return_t job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus);
612
613 static const struct {
614 const char *key;
615 int val;
616 } launchd_keys2limits[] = {
617 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
618 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
619 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
620 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
621 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
622 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
623 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
624 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
625 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
626 };
627
628 static time_t cronemu(int mon, int mday, int hour, int min);
629 static time_t cronemu_wday(int wday, int hour, int min);
630 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
631 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
632 static bool cronemu_hour(struct tm *wtm, int hour, int min);
633 static bool cronemu_min(struct tm *wtm, int min);
634
635 /* miscellaneous file local functions */
636 static size_t get_kern_max_proc(void);
637 static int dir_has_files(job_t j, const char *path);
638 static char **mach_cmd2argv(const char *string);
639 static size_t our_strhash(const char *s) __attribute__((pure));
640 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
641 static void simulate_pid1_crash(void);
642 static pid_t basic_spawn(job_t j, void (*what_to_do)(job_t));
643 static void take_sample(job_t j);
644
645 void eliminate_double_reboot(void);
646
647 /* file local globals */
648 static size_t total_children;
649 static size_t total_anon_children;
650 static mach_port_t the_exception_server;
651 static job_t workaround_5477111;
652 static LIST_HEAD(, job_s) s_needing_sessions;
653 mach_port_t g_audit_session_port = MACH_PORT_NULL;
654
655 #if !TARGET_OS_EMBEDDED
656 static job_t s_embedded_privileged_job = (job_t)&root_jobmgr;
657 au_asid_t g_audit_session = AU_DEFAUDITSID;
658 #else
659 static job_t s_embedded_privileged_job = NULL;
660 pid_t g_audit_session = 0;
661 #endif
662
663 static int s_no_hang_fd = -1;
664
665 /* process wide globals */
666 mach_port_t inherited_bootstrap_port;
667 jobmgr_t root_jobmgr;
668 bool g_shutdown_debugging = false;
669 bool g_verbose_boot = false;
670 bool g_embedded_privileged_action = false;
671 bool g_runtime_busy_time = false;
672
673 void
674 job_ignore(job_t j)
675 {
676 struct semaphoreitem *si;
677 struct socketgroup *sg;
678 struct machservice *ms;
679
680 if (j->currently_ignored) {
681 return;
682 }
683
684 job_log(j, LOG_DEBUG, "Ignoring...");
685
686 j->currently_ignored = true;
687
688 if (j->poll_for_vfs_changes) {
689 j->poll_for_vfs_changes = false;
690 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
691 }
692
693 SLIST_FOREACH(sg, &j->sockets, sle) {
694 socketgroup_ignore(j, sg);
695 }
696
697 SLIST_FOREACH(ms, &j->machservices, sle) {
698 machservice_ignore(j, ms);
699 }
700
701 SLIST_FOREACH(si, &j->semaphores, sle) {
702 semaphoreitem_ignore(j, si);
703 }
704 }
705
706 void
707 job_watch(job_t j)
708 {
709 struct semaphoreitem *si;
710 struct socketgroup *sg;
711 struct machservice *ms;
712
713 if (!j->currently_ignored) {
714 return;
715 }
716
717 job_log(j, LOG_DEBUG, "Watching...");
718
719 j->currently_ignored = false;
720
721 SLIST_FOREACH(sg, &j->sockets, sle) {
722 socketgroup_watch(j, sg);
723 }
724
725 SLIST_FOREACH(ms, &j->machservices, sle) {
726 machservice_watch(j, ms);
727 }
728
729 SLIST_FOREACH(si, &j->semaphores, sle) {
730 semaphoreitem_watch(j, si);
731 }
732 }
733
734 void
735 job_stop(job_t j)
736 {
737 char extralog[100];
738 int32_t newval = 1;
739
740 if (unlikely(!j->p || j->anonymous)) {
741 return;
742 }
743
744 #if !TARGET_OS_EMBEDDED
745 if (j->kill_via_shmem && !g_force_old_kill_path) {
746 if (j->shmem) {
747 if (!j->sent_kill_via_shmem) {
748 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
749 newval = __sync_sub_and_fetch(&j->shmem->vp_shmem_transaction_cnt, 1);
750 j->sent_kill_via_shmem = true;
751 } else {
752 newval = j->shmem->vp_shmem_transaction_cnt;
753 }
754 } else {
755 newval = -1;
756 }
757 } else if( j->kill_via_shmem ) {
758 job_log(j, LOG_DEBUG, "Stopping transactional job the old-fashioned way.");
759 }
760 #endif
761
762 #if TARGET_OS_EMBEDDED
763 if( g_embedded_privileged_action && s_embedded_privileged_job ) {
764 if( !job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL) ) {
765 errno = EPERM;
766 return;
767 }
768
769 if( strcmp(j->username, s_embedded_privileged_job->username) != 0 ) {
770 errno = EPERM;
771 return;
772 }
773 } else if( g_embedded_privileged_action ) {
774 errno = EINVAL;
775 return;
776 }
777 #endif
778
779 j->sent_signal_time = runtime_get_opaque_time();
780
781 if (newval < 0) {
782 j->clean_kill = true;
783 job_kill(j);
784 } else {
785 /*
786 * If sampling is enabled and SAMPLE_TIMEOUT is earlier than the job exit_timeout,
787 * then set a timer for SAMPLE_TIMEOUT seconds after killing
788 */
789 unsigned int exit_timeout = j->exit_timeout;
790 bool do_sample = do_apple_internal_logging;
791 unsigned int timeout = exit_timeout;
792
793 if (do_sample && (!exit_timeout || (LAUNCHD_SAMPLE_TIMEOUT < exit_timeout))) {
794 timeout = LAUNCHD_SAMPLE_TIMEOUT;
795 }
796
797 job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
798
799 if (timeout) {
800 j->sampling_complete = !do_sample;
801 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
802 EV_ADD|EV_ONESHOT, NOTE_SECONDS, timeout, j) != -1);
803 }
804
805 if (!exit_timeout) {
806 job_log(j, LOG_DEBUG, "This job has an infinite exit timeout");
807 }
808
809 if (j->kill_via_shmem) {
810 snprintf(extralog, sizeof(extralog), ": %d remaining transactions", newval + 1);
811 } else {
812 extralog[0] = '\0';
813 }
814
815 job_log(j, LOG_DEBUG, "Sent SIGTERM signal%s", extralog);
816 }
817
818 j->stopped = true;
819 }
820
821 launch_data_t
822 job_export(job_t j)
823 {
824 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
825
826 if (r == NULL) {
827 return NULL;
828 }
829
830 if ((tmp = launch_data_new_string(j->label))) {
831 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
832 }
833 if ((tmp = launch_data_new_string(j->mgr->name))) {
834 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
835 }
836 if ((tmp = launch_data_new_bool(j->ondemand))) {
837 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
838 }
839 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
840 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
841 }
842 if (j->p && (tmp = launch_data_new_integer(j->p))) {
843 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
844 }
845 if ((tmp = launch_data_new_integer(j->timeout))) {
846 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
847 }
848 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
849 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
850 }
851 if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
852 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
853 }
854 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
855 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
856 }
857 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
858 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
859 }
860 if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
861 size_t i;
862
863 for (i = 0; i < j->argc; i++) {
864 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
865 launch_data_array_set_index(tmp, tmp2, i);
866 }
867 }
868
869 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
870 }
871
872 if (j->kill_via_shmem && (tmp = launch_data_new_bool(true))) {
873 int32_t tmp_cnt = -1;
874
875 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
876
877 if (j->shmem) {
878 tmp_cnt = j->shmem->vp_shmem_transaction_cnt;
879 }
880
881 if (j->sent_kill_via_shmem) {
882 tmp_cnt++;
883 }
884
885 if ((tmp = launch_data_new_integer(tmp_cnt))) {
886 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TRANSACTIONCOUNT);
887 }
888 }
889
890 if (j->session_create && (tmp = launch_data_new_bool(true))) {
891 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
892 }
893
894 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
895 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
896 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
897 }
898 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
899 }
900
901 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
902 struct socketgroup *sg;
903 int i;
904
905 SLIST_FOREACH(sg, &j->sockets, sle) {
906 if (sg->junkfds) {
907 continue;
908 }
909 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
910 for (i = 0; i < sg->fd_cnt; i++) {
911 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
912 launch_data_array_set_index(tmp2, tmp3, i);
913 }
914 }
915 launch_data_dict_insert(tmp, tmp2, sg->name);
916 }
917 }
918
919 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
920 }
921
922 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
923 struct machservice *ms;
924
925 tmp3 = NULL;
926
927 SLIST_FOREACH(ms, &j->machservices, sle) {
928 if (ms->per_pid) {
929 if (tmp3 == NULL) {
930 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
931 }
932 if (tmp3) {
933 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
934 launch_data_dict_insert(tmp3, tmp2, ms->name);
935 }
936 } else {
937 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
938 launch_data_dict_insert(tmp, tmp2, ms->name);
939 }
940 }
941
942 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
943
944 if (tmp3) {
945 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
946 }
947 }
948
949 return r;
950 }
951
952 static void
953 jobmgr_log_active_jobs(jobmgr_t jm)
954 {
955 const char *why_active;
956 jobmgr_t jmi;
957 job_t ji;
958
959 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
960 jobmgr_log_active_jobs(jmi);
961 }
962
963 LIST_FOREACH(ji, &jm->jobs, sle) {
964 if( (why_active = job_active(ji)) ) {
965 job_log(ji, LOG_DEBUG | LOG_CONSOLE, "%s", why_active);
966 }
967 }
968 }
969
970 static void
971 jobmgr_still_alive_with_check(jobmgr_t jm)
972 {
973 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Still alive with %lu/%lu (normal/anonymous) children. In %s phase of shutdown.", total_children, total_anon_children, s_phases[jm->shutdown_phase]);
974 jobmgr_log_active_jobs(jm);
975 }
976
977 jobmgr_t
978 jobmgr_shutdown(jobmgr_t jm)
979 {
980 jobmgr_t jmi, jmn;
981 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
982
983 jm->shutting_down = true;
984
985 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
986 jobmgr_shutdown(jmi);
987 }
988
989 if (jm->parentmgr == NULL && pid1_magic) {
990 jobmgr_assumes(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
991 #if !TARGET_OS_EMBEDDED
992 /* Kill the update thread. */
993 jobmgr_assumes(jm, __sync_sub_and_fetch(&g_sync_frequency, 30) == 0);
994 #endif
995 }
996
997 return jobmgr_do_garbage_collection(jm);
998 }
999
1000 void
1001 jobmgr_remove(jobmgr_t jm)
1002 {
1003 jobmgr_t jmi;
1004 job_t ji;
1005
1006 jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1007 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
1008 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1009 jobmgr_remove(jmi);
1010 }
1011 }
1012
1013 while( (ji = LIST_FIRST(&jm->jobs)) ) {
1014 if( !ji->anonymous && ji->p ) {
1015 job_log(ji, LOG_WARNING | LOG_CONSOLE, "Job has overstayed its welcome. Forcing removal.");
1016 ji->p = 0;
1017 }
1018 job_remove(ji);
1019 }
1020
1021 if (jm->req_port) {
1022 jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
1023 }
1024
1025 if (jm->jm_port) {
1026 jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
1027 }
1028
1029 if (jm->parentmgr) {
1030 runtime_del_weak_ref();
1031 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1032 } else if (pid1_magic) {
1033 eliminate_double_reboot();
1034 launchd_log_vm_stats();
1035 jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1036 runtime_closelog();
1037 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
1038 } else {
1039 jobmgr_log(jm, LOG_DEBUG, "About to exit");
1040 runtime_closelog();
1041 exit(EXIT_SUCCESS);
1042 }
1043
1044 free(jm);
1045 }
1046
1047 void
1048 job_remove(job_t j)
1049 {
1050 struct waiting_for_removal *w4r;
1051 struct calendarinterval *ci;
1052 struct semaphoreitem *si;
1053 struct socketgroup *sg;
1054 struct machservice *ms;
1055 struct limititem *li;
1056 struct envitem *ei;
1057
1058 #if TARGET_OS_EMBEDDED
1059 if( g_embedded_privileged_action && s_embedded_privileged_job ) {
1060 if( !job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL) ) {
1061 errno = EPERM;
1062 return;
1063 }
1064
1065 if( strcmp(j->username, s_embedded_privileged_job->username) != 0 ) {
1066 errno = EPERM;
1067 return;
1068 }
1069 } else if( g_embedded_privileged_action ) {
1070 errno = EINVAL;
1071 return;
1072 }
1073 #endif
1074
1075 if (unlikely(j->p)) {
1076 if (j->anonymous) {
1077 job_reap(j);
1078 } else {
1079 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1080
1081 if (!j->removal_pending) {
1082 j->removal_pending = true;
1083 job_stop(j);
1084 }
1085 return;
1086 }
1087 }
1088
1089 job_dispatch_curious_jobs(j);
1090
1091 ipc_close_all_with_job(j);
1092
1093 job_log(j, LOG_INFO, "Total rusage: utime %ld.%06u stime %ld.%06u maxrss %lu ixrss %lu idrss %lu isrss %lu minflt %lu majflt %lu nswap %lu inblock %lu oublock %lu msgsnd %lu msgrcv %lu nsignals %lu nvcsw %lu nivcsw %lu",
1094 j->ru.ru_utime.tv_sec, j->ru.ru_utime.tv_usec,
1095 j->ru.ru_stime.tv_sec, j->ru.ru_stime.tv_usec,
1096 j->ru.ru_maxrss, j->ru.ru_ixrss, j->ru.ru_idrss, j->ru.ru_isrss,
1097 j->ru.ru_minflt, j->ru.ru_majflt,
1098 j->ru.ru_nswap, j->ru.ru_inblock, j->ru.ru_oublock,
1099 j->ru.ru_msgsnd, j->ru.ru_msgrcv,
1100 j->ru.ru_nsignals, j->ru.ru_nvcsw, j->ru.ru_nivcsw);
1101
1102 if (j->forced_peers_to_demand_mode) {
1103 job_set_global_on_demand(j, false);
1104 }
1105
1106 if (!job_assumes(j, j->fork_fd == 0)) {
1107 job_assumes(j, runtime_close(j->fork_fd) != -1);
1108 }
1109
1110 if (j->stdin_fd) {
1111 job_assumes(j, runtime_close(j->stdin_fd) != -1);
1112 }
1113
1114 if (!job_assumes(j, j->log_redirect_fd == 0)) {
1115 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
1116 }
1117
1118 if (j->j_port) {
1119 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1120 }
1121
1122 if (!job_assumes(j, j->wait_reply_port == MACH_PORT_NULL)) {
1123 job_assumes(j, launchd_mport_deallocate(j->wait_reply_port) == KERN_SUCCESS);
1124 }
1125
1126 while ((sg = SLIST_FIRST(&j->sockets))) {
1127 socketgroup_delete(j, sg);
1128 }
1129 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1130 calendarinterval_delete(j, ci);
1131 }
1132 while ((ei = SLIST_FIRST(&j->env))) {
1133 envitem_delete(j, ei, false);
1134 }
1135 while ((ei = SLIST_FIRST(&j->global_env))) {
1136 envitem_delete(j, ei, true);
1137 }
1138 while ((li = SLIST_FIRST(&j->limits))) {
1139 limititem_delete(j, li);
1140 }
1141 while ((ms = SLIST_FIRST(&j->machservices))) {
1142 machservice_delete(j, ms, false);
1143 }
1144 while ((si = SLIST_FIRST(&j->semaphores))) {
1145 semaphoreitem_delete(j, si);
1146 }
1147 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1148 waiting4removal_delete(j, w4r);
1149 }
1150
1151 if (j->prog) {
1152 free(j->prog);
1153 }
1154 if (j->argv) {
1155 free(j->argv);
1156 }
1157 if (j->rootdir) {
1158 free(j->rootdir);
1159 }
1160 if (j->workingdir) {
1161 free(j->workingdir);
1162 }
1163 if (j->username) {
1164 free(j->username);
1165 }
1166 if (j->groupname) {
1167 free(j->groupname);
1168 }
1169 if (j->stdinpath) {
1170 free(j->stdinpath);
1171 }
1172 if (j->stdoutpath) {
1173 free(j->stdoutpath);
1174 }
1175 if (j->stderrpath) {
1176 free(j->stderrpath);
1177 }
1178 if (j->alt_exc_handler) {
1179 free(j->alt_exc_handler);
1180 }
1181 #if HAVE_SANDBOX
1182 if (j->seatbelt_profile) {
1183 free(j->seatbelt_profile);
1184 }
1185 #endif
1186 #if HAVE_QUARANTINE
1187 if (j->quarantine_data) {
1188 free(j->quarantine_data);
1189 }
1190 #endif
1191 if (j->j_binpref) {
1192 free(j->j_binpref);
1193 }
1194 if (j->start_interval) {
1195 runtime_del_weak_ref();
1196 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
1197 }
1198 if (j->poll_for_vfs_changes) {
1199 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
1200 }
1201 if( j->exit_timeout ) {
1202 /* Not a big deal if this fails. It means that the timer's already been freed. */
1203 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1204 }
1205 if( j->jetsam_properties ) {
1206 LIST_REMOVE(j, jetsam_sle);
1207 j->mgr->jetsam_jobs_cnt--;
1208 }
1209 if( j->audit_session != MACH_PORT_NULL ) {
1210 job_assumes(j, mach_port_deallocate(mach_task_self(), j->audit_session) == KERN_SUCCESS);
1211 }
1212 if( !uuid_is_null(j->expected_audit_uuid) ) {
1213 LIST_REMOVE(j, needing_session_sle);
1214 }
1215 if( j->embedded_special_privileges ) {
1216 s_embedded_privileged_job = NULL;
1217 }
1218
1219 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1220
1221 LIST_REMOVE(j, sle);
1222 LIST_REMOVE(j, label_hash_sle);
1223
1224 job_log(j, LOG_DEBUG, "Removed");
1225
1226 free(j);
1227 }
1228
1229 void
1230 socketgroup_setup(launch_data_t obj, const char *key, void *context)
1231 {
1232 launch_data_t tmp_oai;
1233 job_t j = context;
1234 size_t i, fd_cnt = 1;
1235 int *fds;
1236
1237 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1238 fd_cnt = launch_data_array_get_count(obj);
1239 }
1240
1241 fds = alloca(fd_cnt * sizeof(int));
1242
1243 for (i = 0; i < fd_cnt; i++) {
1244 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1245 tmp_oai = launch_data_array_get_index(obj, i);
1246 } else {
1247 tmp_oai = obj;
1248 }
1249
1250 fds[i] = launch_data_get_fd(tmp_oai);
1251 }
1252
1253 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
1254
1255 ipc_revoke_fds(obj);
1256 }
1257
1258 bool
1259 job_set_global_on_demand(job_t j, bool val)
1260 {
1261 if (j->forced_peers_to_demand_mode && val) {
1262 return false;
1263 } else if (!j->forced_peers_to_demand_mode && !val) {
1264 return false;
1265 }
1266
1267 if ((j->forced_peers_to_demand_mode = val)) {
1268 j->mgr->global_on_demand_cnt++;
1269 } else {
1270 j->mgr->global_on_demand_cnt--;
1271 }
1272
1273 if (j->mgr->global_on_demand_cnt == 0) {
1274 jobmgr_dispatch_all(j->mgr, false);
1275 }
1276
1277 return true;
1278 }
1279
1280 bool
1281 job_setup_machport(job_t j)
1282 {
1283 mach_msg_size_t mxmsgsz;
1284
1285 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
1286 goto out_bad;
1287 }
1288
1289 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
1290 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
1291 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
1292 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
1293 }
1294
1295 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
1296 goto out_bad2;
1297 }
1298
1299 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
1300 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1301 goto out_bad;
1302 }
1303
1304 return true;
1305 out_bad2:
1306 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1307 out_bad:
1308 return false;
1309 }
1310
1311 job_t
1312 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1313 {
1314 const char **argv = (const char **)mach_cmd2argv(cmd);
1315 job_t jr = NULL;
1316
1317 if (!job_assumes(j, argv != NULL)) {
1318 goto out_bad;
1319 }
1320
1321 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1322
1323 free(argv);
1324
1325 /* jobs can easily be denied creation during shutdown */
1326 if (unlikely(jr == NULL)) {
1327 goto out_bad;
1328 }
1329
1330 jr->mach_uid = uid;
1331 jr->ondemand = ond;
1332 jr->legacy_mach_job = true;
1333 jr->abandon_pg = true;
1334 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
1335
1336 if (!job_setup_machport(jr)) {
1337 goto out_bad;
1338 }
1339
1340 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1341
1342 return jr;
1343
1344 out_bad:
1345 if (jr) {
1346 job_remove(jr);
1347 }
1348 return NULL;
1349 }
1350
1351 kern_return_t
1352 job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus)
1353 {
1354 if (j->p) {
1355 j->wait_reply_port = srp;
1356 return MIG_NO_REPLY;
1357 }
1358
1359 *waitstatus = j->last_exit_status;
1360
1361 return 0;
1362 }
1363
1364 job_t
1365 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1366 {
1367 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, anonpid };
1368 struct kinfo_proc kp;
1369 size_t len = sizeof(kp);
1370 bool shutdown_state;
1371 job_t jp = NULL, jr = NULL;
1372 uid_t kp_euid, kp_uid, kp_svuid;
1373 gid_t kp_egid, kp_gid, kp_svgid;
1374
1375 if (!jobmgr_assumes(jm, anonpid != 0)) {
1376 errno = EINVAL;
1377 return NULL;
1378 }
1379
1380 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1381 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1382 errno = EINVAL;
1383 return NULL;
1384 }
1385
1386 if (!jobmgr_assumes(jm, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
1387 return NULL;
1388 }
1389
1390 if (unlikely(len != sizeof(kp))) {
1391 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for nonexistent PID: %u", anonpid);
1392 errno = ESRCH;
1393 return NULL;
1394 }
1395
1396 if (!jobmgr_assumes(jm, kp.kp_proc.p_comm[0] != '\0')) {
1397 errno = EINVAL;
1398 return NULL;
1399 }
1400
1401 if (unlikely(kp.kp_proc.p_stat == SZOMB)) {
1402 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, kp.kp_proc.p_comm);
1403 }
1404
1405 if (unlikely(kp.kp_proc.p_flag & P_SUGID)) {
1406 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, kp.kp_proc.p_comm);
1407 }
1408
1409 kp_euid = kp.kp_eproc.e_ucred.cr_uid;
1410 kp_uid = kp.kp_eproc.e_pcred.p_ruid;
1411 kp_svuid = kp.kp_eproc.e_pcred.p_svuid;
1412 kp_egid = kp.kp_eproc.e_ucred.cr_gid;
1413 kp_gid = kp.kp_eproc.e_pcred.p_rgid;
1414 kp_svgid = kp.kp_eproc.e_pcred.p_svgid;
1415
1416 if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1417 jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1418 kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, kp.kp_proc.p_comm);
1419 }
1420
1421 /* "Fix" for a problem that shouldn't even exist.
1422 * See rdar://problem/7264615 for the symptom and rdar://problem/5020256
1423 * as to why this can happen.
1424 */
1425 if( !jobmgr_assumes(jm, kp.kp_eproc.e_ppid != anonpid) ) {
1426 jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). It should find a different way to do whatever it's doing. Setting PPID to 0: %s", kp.kp_proc.p_comm);
1427 errno = EINVAL;
1428 return NULL;
1429 }
1430
1431 if (jp && !jp->anonymous && unlikely(!(kp.kp_proc.p_flag & P_EXEC))) {
1432 job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u",
1433 kp.kp_proc.p_pid);
1434 }
1435
1436 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1437 if (unlikely(shutdown_state = jm->shutting_down)) {
1438 jm->shutting_down = false;
1439 }
1440
1441 if (jobmgr_assumes(jm, (jr = job_new(jm, AUTO_PICK_ANONYMOUS_LABEL, kp.kp_proc.p_comm, NULL)) != NULL)) {
1442 u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT|NOTE_REAP;
1443
1444 total_anon_children++;
1445 jr->anonymous = true;
1446 jr->p = anonpid;
1447
1448 /* anonymous process reaping is messy */
1449 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1450
1451 if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1) && job_assumes(jr, errno == ESRCH)) {
1452 /* zombies are weird */
1453 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1454 jr->unload_at_mig_return = true;
1455 }
1456
1457 if (unlikely(shutdown_state && jm->hopefully_first_cnt == 0)) {
1458 job_log(jr, LOG_SCOLDING, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1459 }
1460
1461 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, kp.kp_eproc.e_ppid, jp ? ": " : "", jp ? jp->label : "");
1462 }
1463
1464 if (unlikely(shutdown_state)) {
1465 jm->shutting_down = true;
1466 }
1467
1468 /* This is down here to mitigate the effects of rdar://problem/7264615, in which a process
1469 * attaches to its own parent. We need to make sure that the anonymous job has been added
1470 * to the process list so that, if it's used ptrace(3) to cause a cycle in the process
1471 * tree (thereby making it not a tree anymore), we'll find the tracing parent PID of the
1472 * parent process, which is the child, when we go looking for it in jobmgr_find_by_pid().
1473 */
1474 switch (kp.kp_eproc.e_ppid) {
1475 case 0:
1476 /* the kernel */
1477 break;
1478 case 1:
1479 if (!pid1_magic) {
1480 /* we cannot possibly find a parent job_t that is useful in this function */
1481 break;
1482 }
1483 /* fall through */
1484 default:
1485 jp = jobmgr_find_by_pid(jm, kp.kp_eproc.e_ppid, true);
1486 jobmgr_assumes(jm, jp != NULL);
1487 break;
1488 }
1489
1490 return jr;
1491 }
1492
1493 job_t
1494 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1495 {
1496 const char *const *argv_tmp = argv;
1497 char tmp_path[PATH_MAX];
1498 char auto_label[1000];
1499 const char *bn = NULL;
1500 char *co;
1501 size_t minlabel_len;
1502 size_t i, cc = 0;
1503 job_t j;
1504
1505 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1506
1507 if (unlikely(jm->shutting_down)) {
1508 errno = EINVAL;
1509 return NULL;
1510 }
1511
1512 if (unlikely(prog == NULL && argv == NULL)) {
1513 errno = EINVAL;
1514 return NULL;
1515 }
1516
1517 char *anon_or_legacy = ( label == AUTO_PICK_ANONYMOUS_LABEL ) ? "anonymous" : "mach_init";
1518 if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
1519 if (prog) {
1520 bn = prog;
1521 } else {
1522 strlcpy(tmp_path, argv[0], sizeof(tmp_path));
1523 bn = basename(tmp_path); /* prog for auto labels is kp.kp_kproc.p_comm */
1524 }
1525 snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
1526 label = auto_label;
1527 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1528 minlabel_len = strlen(label) + MAXCOMLEN;
1529 } else {
1530 minlabel_len = strlen(label);
1531 }
1532
1533 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1534
1535 if (!jobmgr_assumes(jm, j != NULL)) {
1536 return NULL;
1537 }
1538
1539 if (unlikely(label == auto_label)) {
1540 snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
1541 } else {
1542 strcpy((char *)j->label, label);
1543 }
1544 j->kqjob_callback = job_callback;
1545 j->mgr = jm;
1546 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1547 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1548 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1549 j->currently_ignored = true;
1550 j->ondemand = true;
1551 j->checkedin = true;
1552 j->jetsam_priority = -1;
1553 j->jetsam_memlimit = -1;
1554 uuid_clear(j->expected_audit_uuid);
1555
1556 if (prog) {
1557 j->prog = strdup(prog);
1558 if (!job_assumes(j, j->prog != NULL)) {
1559 goto out_bad;
1560 }
1561 }
1562
1563 if (likely(argv)) {
1564 while (*argv_tmp++) {
1565 j->argc++;
1566 }
1567
1568 for (i = 0; i < j->argc; i++) {
1569 cc += strlen(argv[i]) + 1;
1570 }
1571
1572 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1573
1574 if (!job_assumes(j, j->argv != NULL)) {
1575 goto out_bad;
1576 }
1577
1578 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1579
1580 for (i = 0; i < j->argc; i++) {
1581 j->argv[i] = co;
1582 strcpy(co, argv[i]);
1583 co += strlen(argv[i]) + 1;
1584 }
1585 j->argv[i] = NULL;
1586 }
1587
1588 if( strcmp(j->label, "com.apple.WindowServer") == 0 ) {
1589 j->has_console = true;
1590 }
1591
1592 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1593 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
1594 uuid_clear(j->expected_audit_uuid);
1595
1596 job_log(j, LOG_DEBUG, "Conceived");
1597
1598 return j;
1599
1600 out_bad:
1601 if (j->prog) {
1602 free(j->prog);
1603 }
1604 free(j);
1605
1606 return NULL;
1607 }
1608
1609 job_t
1610 job_import(launch_data_t pload)
1611 {
1612 job_t j = jobmgr_import2(root_jobmgr, pload);
1613
1614 if (unlikely(j == NULL)) {
1615 return NULL;
1616 }
1617
1618 /* Since jobs are effectively stalled until they get security sessions assigned
1619 * to them, we may wish to reconsider this behavior of calling the job "enabled"
1620 * as far as other jobs with the OtherJobEnabled KeepAlive criterion set.
1621 */
1622 job_dispatch_curious_jobs(j);
1623 return job_dispatch(j, false);
1624 }
1625
1626 launch_data_t
1627 job_import_bulk(launch_data_t pload)
1628 {
1629 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
1630 job_t *ja;
1631 size_t i, c = launch_data_array_get_count(pload);
1632
1633 ja = alloca(c * sizeof(job_t));
1634
1635 for (i = 0; i < c; i++) {
1636 if( (likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH ) {
1637 errno = 0;
1638 }
1639 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
1640 }
1641
1642 for (i = 0; i < c; i++) {
1643 if (likely(ja[i])) {
1644 job_dispatch_curious_jobs(ja[i]);
1645 job_dispatch(ja[i], false);
1646 }
1647 }
1648
1649 return resp;
1650 }
1651
1652 void
1653 job_import_bool(job_t j, const char *key, bool value)
1654 {
1655 bool found_key = false;
1656
1657 switch (key[0]) {
1658 case 'a':
1659 case 'A':
1660 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
1661 j->abandon_pg = value;
1662 found_key = true;
1663 }
1664 break;
1665 case 'k':
1666 case 'K':
1667 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1668 j->ondemand = !value;
1669 found_key = true;
1670 }
1671 break;
1672 case 'o':
1673 case 'O':
1674 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
1675 j->ondemand = value;
1676 found_key = true;
1677 }
1678 break;
1679 case 'd':
1680 case 'D':
1681 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
1682 j->debug = value;
1683 found_key = true;
1684 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
1685 job_assumes(j, !value);
1686 found_key = true;
1687 }
1688 break;
1689 case 'h':
1690 case 'H':
1691 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
1692 j->hopefully_exits_last = value;
1693 found_key = true;
1694 } else if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST) == 0) {
1695 j->hopefully_exits_first = value;
1696 found_key = true;
1697 }
1698 break;
1699 case 's':
1700 case 'S':
1701 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
1702 j->session_create = value;
1703 found_key = true;
1704 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
1705 j->start_on_mount = value;
1706 found_key = true;
1707 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
1708 /* this only does something on Mac OS X 10.4 "Tiger" */
1709 found_key = true;
1710 }
1711 break;
1712 case 'l':
1713 case 'L':
1714 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
1715 j->low_pri_io = value;
1716 found_key = true;
1717 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
1718 j->only_once = value;
1719 found_key = true;
1720 }
1721 break;
1722 case 'm':
1723 case 'M':
1724 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1725 j->internal_exc_handler = value;
1726 found_key = true;
1727 }
1728 break;
1729 case 'i':
1730 case 'I':
1731 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
1732 if (getuid() != 0) {
1733 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1734 return;
1735 }
1736 j->no_init_groups = !value;
1737 found_key = true;
1738 } else if( strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0 ) {
1739 j->ignore_pg_at_shutdown = value;
1740 found_key = true;
1741 }
1742 break;
1743 case 'r':
1744 case 'R':
1745 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
1746 if (value) {
1747 /* We don't want value == false to change j->start_pending */
1748 j->start_pending = true;
1749 }
1750 found_key = true;
1751 }
1752 break;
1753 case 'e':
1754 case 'E':
1755 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
1756 j->globargv = value;
1757 found_key = true;
1758 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
1759 j->kill_via_shmem = value;
1760 found_key = true;
1761 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
1762 j->debug_before_kill = value;
1763 found_key = true;
1764 } else if( strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0 ) {
1765 if( !s_embedded_privileged_job ) {
1766 j->embedded_special_privileges = value;
1767 s_embedded_privileged_job = j;
1768 } else {
1769 job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
1770 }
1771 found_key = true;
1772 }
1773 break;
1774 case 'w':
1775 case 'W':
1776 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
1777 j->wait4debugger = value;
1778 found_key = true;
1779 }
1780 break;
1781 default:
1782 break;
1783 }
1784
1785 if (unlikely(!found_key)) {
1786 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
1787 }
1788 }
1789
1790 void
1791 job_import_string(job_t j, const char *key, const char *value)
1792 {
1793 char **where2put = NULL;
1794
1795 switch (key[0]) {
1796 case 'm':
1797 case 'M':
1798 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1799 where2put = &j->alt_exc_handler;
1800 }
1801 break;
1802 case 'p':
1803 case 'P':
1804 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
1805 return;
1806 }
1807 break;
1808 case 'l':
1809 case 'L':
1810 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
1811 return;
1812 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1813 return;
1814 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1815 return;
1816 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1817 return;
1818 }
1819 break;
1820 case 'r':
1821 case 'R':
1822 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
1823 if (getuid() != 0) {
1824 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1825 return;
1826 }
1827 where2put = &j->rootdir;
1828 }
1829 break;
1830 case 'w':
1831 case 'W':
1832 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
1833 where2put = &j->workingdir;
1834 }
1835 break;
1836 case 'u':
1837 case 'U':
1838 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
1839 if (getuid() != 0) {
1840 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1841 return;
1842 } else if (strcmp(value, "root") == 0) {
1843 return;
1844 }
1845 where2put = &j->username;
1846 }
1847 break;
1848 case 'g':
1849 case 'G':
1850 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
1851 if (getuid() != 0) {
1852 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1853 return;
1854 } else if (strcmp(value, "wheel") == 0) {
1855 return;
1856 }
1857 where2put = &j->groupname;
1858 }
1859 break;
1860 case 's':
1861 case 'S':
1862 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
1863 where2put = &j->stdoutpath;
1864 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
1865 where2put = &j->stderrpath;
1866 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
1867 where2put = &j->stdinpath;
1868 j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
1869 if (job_assumes(j, j->stdin_fd != -1)) {
1870 /* open() should not block, but regular IO by the job should */
1871 job_assumes(j, fcntl(j->stdin_fd, F_SETFL, 0) != -1);
1872 /* XXX -- EV_CLEAR should make named pipes happy? */
1873 job_assumes(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j) != -1);
1874 } else {
1875 j->stdin_fd = 0;
1876 }
1877 #if HAVE_SANDBOX
1878 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
1879 where2put = &j->seatbelt_profile;
1880 #endif
1881 }
1882 break;
1883 default:
1884 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
1885 break;
1886 }
1887
1888 if (likely(where2put)) {
1889 job_assumes(j, (*where2put = strdup(value)) != NULL);
1890 } else {
1891 /* See rdar://problem/5496612. These two are okay. */
1892 if( strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) != 0 && strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) != 0 ) {
1893 job_log(j, LOG_WARNING, "Unknown key: %s", key);
1894 }
1895 }
1896 }
1897
1898 void
1899 job_import_integer(job_t j, const char *key, long long value)
1900 {
1901 switch (key[0]) {
1902 case 'e':
1903 case 'E':
1904 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
1905 if (unlikely(value < 0)) {
1906 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1907 } else if (unlikely(value > UINT32_MAX)) {
1908 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1909 } else {
1910 j->exit_timeout = (typeof(j->exit_timeout)) value;
1911 }
1912 } else if( strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0 ) {
1913 j->main_thread_priority = value;
1914 }
1915 break;
1916 case 'j':
1917 case 'J':
1918 if( strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 ) {
1919 job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
1920
1921 launch_data_t pri = launch_data_new_integer(value);
1922 if( job_assumes(j, pri != NULL) ) {
1923 jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
1924 launch_data_free(pri);
1925 }
1926 }
1927 case 'n':
1928 case 'N':
1929 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
1930 if (unlikely(value < PRIO_MIN)) {
1931 job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
1932 } else if (unlikely(value > PRIO_MAX)) {
1933 job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
1934 } else {
1935 j->nice = (typeof(j->nice)) value;
1936 j->setnice = true;
1937 }
1938 }
1939 break;
1940 case 't':
1941 case 'T':
1942 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
1943 if (unlikely(value < 0)) {
1944 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1945 } else if (unlikely(value > UINT32_MAX)) {
1946 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1947 } else {
1948 j->timeout = (typeof(j->timeout)) value;
1949 }
1950 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
1951 if (value < 0) {
1952 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1953 } else if (value > UINT32_MAX) {
1954 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1955 } else {
1956 j->min_run_time = (typeof(j->min_run_time)) value;
1957 }
1958 }
1959 break;
1960 case 'u':
1961 case 'U':
1962 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
1963 j->mask = value;
1964 j->setmask = true;
1965 }
1966 break;
1967 case 's':
1968 case 'S':
1969 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
1970 if (unlikely(value <= 0)) {
1971 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1972 } else if (unlikely(value > UINT32_MAX)) {
1973 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1974 } else {
1975 runtime_add_weak_ref();
1976 j->start_interval = (typeof(j->start_interval)) value;
1977
1978 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
1979 }
1980 #if HAVE_SANDBOX
1981 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1982 j->seatbelt_flags = value;
1983 #endif
1984 }
1985
1986 break;
1987 default:
1988 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
1989 break;
1990 }
1991 }
1992
1993 void
1994 job_import_opaque(job_t j __attribute__((unused)),
1995 const char *key, launch_data_t value __attribute__((unused)))
1996 {
1997 switch (key[0]) {
1998 case 'q':
1999 case 'Q':
2000 #if HAVE_QUARANTINE
2001 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2002 size_t tmpsz = launch_data_get_opaque_size(value);
2003
2004 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2005 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2006 j->quarantine_data_sz = tmpsz;
2007 }
2008 }
2009 #endif
2010 case 's':
2011 case 'S':
2012 if( strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0 ) {
2013 size_t tmpsz = launch_data_get_opaque_size(value);
2014 if( job_assumes(j, tmpsz == sizeof(uuid_t)) ) {
2015 memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2016 }
2017 }
2018 break;
2019 default:
2020 break;
2021 }
2022 }
2023
2024 static void
2025 policy_setup(launch_data_t obj, const char *key, void *context)
2026 {
2027 job_t j = context;
2028 bool found_key = false;
2029
2030 switch (key[0]) {
2031 case 'd':
2032 case 'D':
2033 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2034 j->deny_job_creation = launch_data_get_bool(obj);
2035 found_key = true;
2036 }
2037 break;
2038 default:
2039 break;
2040 }
2041
2042 if (unlikely(!found_key)) {
2043 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2044 }
2045 }
2046
2047 void
2048 job_import_dictionary(job_t j, const char *key, launch_data_t value)
2049 {
2050 launch_data_t tmp;
2051
2052 switch (key[0]) {
2053 case 'p':
2054 case 'P':
2055 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2056 launch_data_dict_iterate(value, policy_setup, j);
2057 }
2058 break;
2059 case 'k':
2060 case 'K':
2061 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2062 launch_data_dict_iterate(value, semaphoreitem_setup, j);
2063 }
2064 break;
2065 case 'i':
2066 case 'I':
2067 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2068 j->inetcompat = true;
2069 j->abandon_pg = true;
2070 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2071 j->inetcompat_wait = launch_data_get_bool(tmp);
2072 }
2073 }
2074 break;
2075 case 'j':
2076 case 'J':
2077 if( strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0 ) {
2078 launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2079 }
2080 case 'e':
2081 case 'E':
2082 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2083 launch_data_dict_iterate(value, envitem_setup, j);
2084 }
2085 break;
2086 case 'u':
2087 case 'U':
2088 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2089 j->importing_global_env = true;
2090 launch_data_dict_iterate(value, envitem_setup, j);
2091 j->importing_global_env = false;
2092 }
2093 break;
2094 case 's':
2095 case 'S':
2096 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2097 launch_data_dict_iterate(value, socketgroup_setup, j);
2098 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2099 calendarinterval_new_from_obj(j, value);
2100 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2101 launch_data_dict_iterate(value, limititem_setup, j);
2102 #if HAVE_SANDBOX
2103 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2104 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2105 #endif
2106 }
2107 break;
2108 case 'h':
2109 case 'H':
2110 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2111 j->importing_hard_limits = true;
2112 launch_data_dict_iterate(value, limititem_setup, j);
2113 j->importing_hard_limits = false;
2114 }
2115 break;
2116 case 'm':
2117 case 'M':
2118 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2119 launch_data_dict_iterate(value, machservice_setup, j);
2120 }
2121 break;
2122 default:
2123 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2124 break;
2125 }
2126 }
2127
2128 void
2129 job_import_array(job_t j, const char *key, launch_data_t value)
2130 {
2131 size_t i, value_cnt = launch_data_array_get_count(value);
2132 const char *str;
2133
2134 switch (key[0]) {
2135 case 'p':
2136 case 'P':
2137 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
2138 return;
2139 }
2140 break;
2141 case 'l':
2142 case 'L':
2143 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2144 return;
2145 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2146 return;
2147 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2148 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2149 return;
2150 }
2151 break;
2152 case 'q':
2153 case 'Q':
2154 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
2155 for (i = 0; i < value_cnt; i++) {
2156 str = launch_data_get_string(launch_data_array_get_index(value, i));
2157 if (job_assumes(j, str != NULL)) {
2158 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
2159 }
2160 }
2161
2162 }
2163 break;
2164 case 'w':
2165 case 'W':
2166 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
2167 for (i = 0; i < value_cnt; i++) {
2168 str = launch_data_get_string(launch_data_array_get_index(value, i));
2169 if (job_assumes(j, str != NULL)) {
2170 semaphoreitem_new(j, PATH_CHANGES, str);
2171 }
2172 }
2173 }
2174 break;
2175 case 'b':
2176 case 'B':
2177 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
2178 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
2179 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
2180 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
2181 j->j_binpref_cnt = value_cnt;
2182 for (i = 0; i < value_cnt; i++) {
2183 j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
2184 }
2185 }
2186 }
2187 break;
2188 case 's':
2189 case 'S':
2190 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2191 for (i = 0; i < value_cnt; i++) {
2192 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
2193 }
2194 }
2195 break;
2196 default:
2197 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
2198 break;
2199 }
2200 }
2201
2202 void
2203 job_import_keys(launch_data_t obj, const char *key, void *context)
2204 {
2205 job_t j = context;
2206 launch_data_type_t kind;
2207
2208 if (!launchd_assumes(obj != NULL)) {
2209 return;
2210 }
2211
2212 kind = launch_data_get_type(obj);
2213
2214 switch (kind) {
2215 case LAUNCH_DATA_BOOL:
2216 job_import_bool(j, key, launch_data_get_bool(obj));
2217 break;
2218 case LAUNCH_DATA_STRING:
2219 job_import_string(j, key, launch_data_get_string(obj));
2220 break;
2221 case LAUNCH_DATA_INTEGER:
2222 job_import_integer(j, key, launch_data_get_integer(obj));
2223 break;
2224 case LAUNCH_DATA_DICTIONARY:
2225 job_import_dictionary(j, key, obj);
2226 break;
2227 case LAUNCH_DATA_ARRAY:
2228 job_import_array(j, key, obj);
2229 break;
2230 case LAUNCH_DATA_OPAQUE:
2231 job_import_opaque(j, key, obj);
2232 break;
2233 default:
2234 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
2235 break;
2236 }
2237 }
2238
2239 job_t
2240 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
2241 {
2242 launch_data_t tmp, ldpa;
2243 const char *label = NULL, *prog = NULL;
2244 const char **argv = NULL;
2245 job_t j;
2246
2247 if (!jobmgr_assumes(jm, pload != NULL)) {
2248 errno = EINVAL;
2249 return NULL;
2250 }
2251
2252 if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
2253 errno = EINVAL;
2254 return NULL;
2255 }
2256
2257 if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
2258 errno = EINVAL;
2259 return NULL;
2260 }
2261
2262 if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
2263 errno = EINVAL;
2264 return NULL;
2265 }
2266
2267 if (unlikely(!(label = launch_data_get_string(tmp)))) {
2268 errno = EINVAL;
2269 return NULL;
2270 }
2271
2272 #if TARGET_OS_EMBEDDED
2273 if( unlikely(g_embedded_privileged_action && s_embedded_privileged_job) ) {
2274 if( unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME))) ) {
2275 errno = EPERM;
2276 return NULL;
2277 }
2278
2279 const char *username = NULL;
2280 if( likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING) ) {
2281 username = launch_data_get_string(tmp);
2282 } else {
2283 errno = EPERM;
2284 return NULL;
2285 }
2286
2287 if( !jobmgr_assumes(jm, s_embedded_privileged_job->username != NULL && username != NULL) ) {
2288 errno = EPERM;
2289 return NULL;
2290 }
2291
2292 if( unlikely(strcmp(s_embedded_privileged_job->username, username) != 0) ) {
2293 errno = EPERM;
2294 return NULL;
2295 }
2296 } else if( g_embedded_privileged_action ) {
2297 errno = EINVAL;
2298 return NULL;
2299 }
2300 #endif
2301
2302 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
2303 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
2304 prog = launch_data_get_string(tmp);
2305 }
2306
2307 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
2308 size_t i, c;
2309
2310 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
2311 errno = EINVAL;
2312 return NULL;
2313 }
2314
2315 c = launch_data_array_get_count(ldpa);
2316
2317 argv = alloca((c + 1) * sizeof(char *));
2318
2319 for (i = 0; i < c; i++) {
2320 tmp = launch_data_array_get_index(ldpa, i);
2321
2322 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
2323 errno = EINVAL;
2324 return NULL;
2325 }
2326
2327 argv[i] = launch_data_get_string(tmp);
2328 }
2329
2330 argv[i] = NULL;
2331 }
2332
2333 /* Hack to make sure the proper job manager is set the whole way through. */
2334 launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
2335 if( session ) {
2336 jm = jobmgr_find_by_name(jm, launch_data_get_string(session)) ?: jm;
2337 }
2338
2339 jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
2340
2341 if (unlikely((j = job_find(label)) != NULL)) {
2342 errno = EEXIST;
2343 return NULL;
2344 } else if (unlikely(!jobmgr_label_test(jm, label))) {
2345 errno = EINVAL;
2346 return NULL;
2347 }
2348
2349 if (likely(j = job_new(jm, label, prog, argv))) {
2350 launch_data_dict_iterate(pload, job_import_keys, j);
2351 if( !uuid_is_null(j->expected_audit_uuid) ) {
2352 uuid_string_t uuid_str;
2353 uuid_unparse(j->expected_audit_uuid, uuid_str);
2354 job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
2355 LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
2356 errno = ENEEDAUTH;
2357 } else {
2358 job_log(j, LOG_DEBUG, "No security session specified.");
2359 j->audit_session = MACH_PORT_NULL;
2360 }
2361 }
2362
2363 return j;
2364 }
2365
2366 bool
2367 jobmgr_label_test(jobmgr_t jm, const char *str)
2368 {
2369 char *endstr = NULL;
2370 const char *ptr;
2371
2372 if (str[0] == '\0') {
2373 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
2374 return false;
2375 }
2376
2377 for (ptr = str; *ptr; ptr++) {
2378 if (iscntrl(*ptr)) {
2379 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
2380 return false;
2381 }
2382 }
2383
2384 strtoll(str, &endstr, 0);
2385
2386 if (str != endstr) {
2387 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
2388 return false;
2389 }
2390
2391 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
2392 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
2393 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
2394 return false;
2395 }
2396
2397 return true;
2398 }
2399
2400 job_t
2401 job_find(const char *label)
2402 {
2403 job_t ji;
2404
2405 LIST_FOREACH(ji, &label_hash[hash_label(label)], label_hash_sle) {
2406 if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
2407 continue; /* 5351245 and 5488633 respectively */
2408 }
2409
2410 if (strcmp(ji->label, label) == 0) {
2411 return ji;
2412 }
2413 }
2414
2415 errno = ESRCH;
2416 return NULL;
2417 }
2418
2419 /* Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid(). */
2420 job_t
2421 jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
2422 {
2423 job_t ji = NULL;
2424 LIST_FOREACH( ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle ) {
2425 if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay)) ) {
2426 return ji;
2427 }
2428 }
2429
2430 jobmgr_t jmi = NULL;
2431 SLIST_FOREACH( jmi, &jm->submgrs, sle ) {
2432 if( (ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay)) ) {
2433 break;
2434 }
2435 }
2436
2437 return ji;
2438 }
2439
2440 job_t
2441 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
2442 {
2443 job_t ji;
2444
2445 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
2446 if (ji->p == p) {
2447 return ji;
2448 }
2449 }
2450
2451 return create_anon ? job_new_anonymous(jm, p) : NULL;
2452 }
2453
2454 job_t
2455 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
2456 {
2457 jobmgr_t jmi;
2458 job_t ji;
2459
2460 if (jm->jm_port == mport) {
2461 return jobmgr_find_by_pid(jm, upid, true);
2462 }
2463
2464 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2465 job_t jr;
2466
2467 if ((jr = job_mig_intran2(jmi, mport, upid))) {
2468 return jr;
2469 }
2470 }
2471
2472 LIST_FOREACH(ji, &jm->jobs, sle) {
2473 if (ji->j_port == mport) {
2474 return ji;
2475 }
2476 }
2477
2478 return NULL;
2479 }
2480
2481 job_t
2482 job_mig_intran(mach_port_t p)
2483 {
2484 struct ldcred *ldc = runtime_get_caller_creds();
2485 job_t jr;
2486
2487 jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
2488
2489 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
2490 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 };
2491 struct kinfo_proc kp;
2492 size_t len = sizeof(kp);
2493
2494 mib[3] = ldc->pid;
2495
2496 if (jobmgr_assumes(root_jobmgr, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)
2497 && jobmgr_assumes(root_jobmgr, len == sizeof(kp))) {
2498 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc->pid, ldc->uid, ldc->euid, p, kp.kp_proc.p_comm);
2499 }
2500 }
2501
2502 return jr;
2503 }
2504
2505 job_t
2506 job_find_by_service_port(mach_port_t p)
2507 {
2508 struct machservice *ms;
2509
2510 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
2511 if (ms->recv && (ms->port == p)) {
2512 return ms->job;
2513 }
2514 }
2515
2516 return NULL;
2517 }
2518
2519 void
2520 job_mig_destructor(job_t j)
2521 {
2522 /*
2523 * 5477111
2524 *
2525 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
2526 */
2527
2528 if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
2529 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
2530 job_remove(j);
2531 }
2532
2533 workaround_5477111 = NULL;
2534
2535 calendarinterval_sanity_check();
2536 }
2537
2538 void
2539 job_export_all2(jobmgr_t jm, launch_data_t where)
2540 {
2541 jobmgr_t jmi;
2542 job_t ji;
2543
2544 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2545 job_export_all2(jmi, where);
2546 }
2547
2548 LIST_FOREACH(ji, &jm->jobs, sle) {
2549 launch_data_t tmp;
2550
2551 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
2552 launch_data_dict_insert(where, tmp, ji->label);
2553 }
2554 }
2555 }
2556
2557 launch_data_t
2558 job_export_all(void)
2559 {
2560 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
2561
2562 if (launchd_assumes(resp != NULL)) {
2563 job_export_all2(root_jobmgr, resp);
2564 }
2565
2566 return resp;
2567 }
2568
2569 void
2570 job_log_stray_pg(job_t j)
2571 {
2572 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PGRP, j->p };
2573 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
2574 struct kinfo_proc *kp;
2575
2576 if (!do_apple_internal_logging) {
2577 return;
2578 }
2579
2580 runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
2581
2582 if (!job_assumes(j, (kp = malloc(len)) != NULL)) {
2583 return;
2584 }
2585 if (!job_assumes(j, sysctl(mib, 4, kp, &len, NULL, 0) != -1)) {
2586 goto out;
2587 }
2588
2589 kp_cnt = len / sizeof(struct kinfo_proc);
2590
2591 for (i = 0; i < kp_cnt; i++) {
2592 pid_t p_i = kp[i].kp_proc.p_pid;
2593 pid_t pp_i = kp[i].kp_eproc.e_ppid;
2594 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
2595 const char *n = kp[i].kp_proc.p_comm;
2596
2597 if (p_i == j->p) {
2598 continue;
2599 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
2600 continue;
2601 }
2602
2603 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z, p_i, pp_i, n);
2604 }
2605
2606 out:
2607 free(kp);
2608 }
2609
2610 void
2611 job_reap(job_t j)
2612 {
2613 struct rusage ru;
2614 int status;
2615
2616 bool is_system_bootstrapper = j->is_bootstrapper && pid1_magic && !j->mgr->parentmgr;
2617
2618 job_log(j, LOG_DEBUG, "Reaping");
2619
2620 if (j->shmem) {
2621 job_assumes(j, vm_deallocate(mach_task_self(), (vm_address_t)j->shmem, getpagesize()) == 0);
2622 j->shmem = NULL;
2623 }
2624
2625 if (unlikely(j->weird_bootstrap)) {
2626 int64_t junk = 0;
2627 job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
2628 }
2629
2630 if (j->log_redirect_fd && !j->legacy_LS_job) {
2631 job_log_stdouterr(j); /* one last chance */
2632
2633 if (j->log_redirect_fd) {
2634 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2635 j->log_redirect_fd = 0;
2636 }
2637 }
2638
2639 if (j->fork_fd) {
2640 job_assumes(j, runtime_close(j->fork_fd) != -1);
2641 j->fork_fd = 0;
2642 }
2643
2644 if (j->anonymous) {
2645 status = 0;
2646 memset(&ru, 0, sizeof(ru));
2647 } else {
2648 /*
2649 * The job is dead. While the PID/PGID is still known to be
2650 * valid, try to kill abandoned descendant processes.
2651 */
2652 job_log_stray_pg(j);
2653 if (!j->abandon_pg) {
2654 if (unlikely(runtime_killpg(j->p, SIGTERM) == -1 && errno != ESRCH)) {
2655 #ifdef __LP64__
2656 job_log(j, LOG_APPLEONLY, "Bug: 5487498");
2657 #else
2658 job_assumes(j, false);
2659 #endif
2660 }
2661 }
2662
2663 /*
2664 * 5020256
2665 *
2666 * The current implementation of ptrace() causes the traced process to
2667 * be abducted away from the true parent and adopted by the tracer.
2668 *
2669 * Once the tracing process relinquishes control, the kernel then
2670 * restores the true parent/child relationship.
2671 *
2672 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2673 * data structures changes, and they return an error if reality hasn't
2674 * been restored by the time they are called.
2675 */
2676 if (!job_assumes(j, wait4(j->p, &status, 0, &ru) != -1)) {
2677 job_log(j, LOG_NOTICE, "Working around 5020256. Assuming the job crashed.");
2678
2679 status = W_EXITCODE(0, SIGSEGV);
2680 memset(&ru, 0, sizeof(ru));
2681 }
2682 }
2683
2684 if (j->exit_timeout) {
2685 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
2686 }
2687
2688 LIST_REMOVE(j, pid_hash_sle);
2689
2690 if (j->wait_reply_port) {
2691 job_log(j, LOG_DEBUG, "MPM wait reply being sent");
2692 job_assumes(j, job_mig_wait_reply(j->wait_reply_port, 0, status) == 0);
2693 j->wait_reply_port = MACH_PORT_NULL;
2694 }
2695
2696 if( j->pending_sample ) {
2697 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job exited before we could sample it.");
2698 STAILQ_REMOVE(&j->mgr->pending_samples, j, job_s, pending_samples_sle);
2699 j->pending_sample = false;
2700 }
2701
2702 if (j->sent_signal_time) {
2703 uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
2704
2705 td_sec = td / NSEC_PER_SEC;
2706 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
2707
2708 job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
2709 }
2710
2711 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
2712 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
2713 j->ru.ru_maxrss += ru.ru_maxrss;
2714 j->ru.ru_ixrss += ru.ru_ixrss;
2715 j->ru.ru_idrss += ru.ru_idrss;
2716 j->ru.ru_isrss += ru.ru_isrss;
2717 j->ru.ru_minflt += ru.ru_minflt;
2718 j->ru.ru_majflt += ru.ru_majflt;
2719 j->ru.ru_nswap += ru.ru_nswap;
2720 j->ru.ru_inblock += ru.ru_inblock;
2721 j->ru.ru_oublock += ru.ru_oublock;
2722 j->ru.ru_msgsnd += ru.ru_msgsnd;
2723 j->ru.ru_msgrcv += ru.ru_msgrcv;
2724 j->ru.ru_nsignals += ru.ru_nsignals;
2725 j->ru.ru_nvcsw += ru.ru_nvcsw;
2726 j->ru.ru_nivcsw += ru.ru_nivcsw;
2727
2728 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
2729 job_log(j, LOG_WARNING, "Exited with exit code: %d", WEXITSTATUS(status));
2730 }
2731
2732 if (WIFSIGNALED(status)) {
2733 int s = WTERMSIG(status);
2734 if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
2735 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
2736 } else if( !j->stopped && !j->clean_kill ) {
2737 switch( s ) {
2738 /* Signals which indicate a crash. */
2739 case SIGILL :
2740 case SIGABRT :
2741 case SIGFPE :
2742 case SIGBUS :
2743 case SIGSEGV :
2744 case SIGSYS :
2745 /* If the kernel has posted NOTE_EXIT and the signal sent to the process was
2746 * SIGTRAP, assume that it's a crash.
2747 */
2748 case SIGTRAP :
2749 j->crashed = true;
2750 job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
2751 break;
2752 default :
2753 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
2754 break;
2755 }
2756
2757 if( is_system_bootstrapper && j->crashed ) {
2758 job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
2759 }
2760 }
2761 }
2762
2763 j->reaped = true;
2764
2765 struct machservice *msi = NULL;
2766 if( j->crashed || !(j->did_exec || j->anonymous) ) {
2767 SLIST_FOREACH( msi, &j->machservices, sle ) {
2768 if( j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash) ) {
2769 machservice_drain_port(msi);
2770 }
2771
2772 if( !j->did_exec && msi->reset && job_assumes(j, !msi->isActive) ) {
2773 machservice_resetport(j, msi);
2774 }
2775 }
2776 }
2777
2778 struct suspended_peruser *spi = NULL;
2779 while( (spi = LIST_FIRST(&j->suspended_perusers)) ) {
2780 job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
2781 spi->j->peruser_suspend_count--;
2782 if( spi->j->peruser_suspend_count == 0 ) {
2783 job_dispatch(spi->j, false);
2784 }
2785 LIST_REMOVE(spi, sle);
2786 free(spi);
2787 }
2788
2789 struct waiting_for_exit *w4e = NULL;
2790 while( (w4e = LIST_FIRST(&j->exit_watchers)) ) {
2791 waiting4exit_delete(j, w4e);
2792 }
2793
2794 if (j->anonymous) {
2795 total_anon_children--;
2796 if( j->holds_ref ) {
2797 runtime_del_ref();
2798 }
2799 } else {
2800 runtime_del_ref();
2801 total_children--;
2802 }
2803
2804 if( j->has_console ) {
2805 g_wsp = 0;
2806 }
2807
2808 if (j->hopefully_exits_first) {
2809 j->mgr->hopefully_first_cnt--;
2810 } else if (!j->anonymous && !j->hopefully_exits_last) {
2811 j->mgr->normal_active_cnt--;
2812 }
2813 j->last_exit_status = status;
2814 j->sent_signal_time = 0;
2815 j->sent_sigkill = false;
2816 j->clean_kill = false;
2817 j->sampling_complete = false;
2818 j->sent_kill_via_shmem = false;
2819 j->lastlookup = NULL;
2820 j->lastlookup_gennum = 0;
2821 j->p = 0;
2822
2823 /*
2824 * We need to someday evaluate other jobs and find those who wish to track the
2825 * active/inactive state of this job. The current job_dispatch() logic makes
2826 * this messy, given that jobs can be deleted at dispatch.
2827 */
2828 }
2829
2830 void
2831 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
2832 {
2833 jobmgr_t jmi, jmn;
2834 job_t ji, jn;
2835
2836 if (jm->shutting_down) {
2837 return;
2838 }
2839
2840 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
2841 jobmgr_dispatch_all(jmi, newmounthack);
2842 }
2843
2844 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
2845 if (newmounthack && ji->start_on_mount) {
2846 ji->start_pending = true;
2847 }
2848
2849 job_dispatch(ji, false);
2850 }
2851 }
2852
2853 pid_t
2854 basic_spawn(job_t j, void (*what_to_do)(job_t))
2855 {
2856 pid_t p = 0;
2857 thread_state_flavor_t f = 0;
2858 #if defined (__ppc__) || defined(__ppc64__)
2859 f = PPC_THREAD_STATE64;
2860 #elif defined(__i386__) || defined(__x86_64__)
2861 f = x86_THREAD_STATE;
2862 #elif defined(__arm__)
2863 f = ARM_THREAD_STATE;
2864 #else
2865 #error "unknown architecture"
2866 #endif
2867
2868 int execpair[2] = { 0, 0 };
2869 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execpair) != -1);
2870
2871 switch( (p = fork()) ) {
2872 case 0 :
2873 job_assumes(j, runtime_close(execpair[0]) != -1);
2874 /* Wait for the parent to attach a kevent. */
2875 read(_fd(execpair[1]), &p, sizeof(p));
2876 what_to_do(j);
2877 _exit(EXIT_FAILURE);
2878 case -1 :
2879 job_assumes(j, runtime_close(execpair[0]) != -1);
2880 job_assumes(j, runtime_close(execpair[1]) != -1);
2881 execpair[0] = -1;
2882 execpair[1] = -1;
2883 job_log(j, LOG_NOTICE | LOG_CONSOLE, "fork(2) failed: %d", errno);
2884 break;
2885 default :
2886 job_assumes(j, runtime_close(execpair[1]) != -1);
2887 execpair[1] = -1;
2888 break;
2889 }
2890
2891 int r = -1;
2892 if( p != -1 ) {
2893 /* Let us know when sample is done. ONESHOT is implicit if we're just interested in NOTE_EXIT. */
2894 if( job_assumes(j, (r = kevent_mod(p, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j)) != -1) ) {
2895 if( !job_assumes(j, write(execpair[0], &p, sizeof(p)) == sizeof(p)) ) {
2896 job_assumes(j, kevent_mod(p, EVFILT_PROC, EV_DELETE, 0, 0, NULL) != -1);
2897 job_assumes(j, runtime_kill(p, SIGKILL) != -1);
2898 r = -1;
2899 p = -1;
2900 }
2901 } else {
2902 job_assumes(j, runtime_kill(p, SIGKILL) != -1);
2903 }
2904
2905 int status = 0;
2906 if( r == -1 ) {
2907 job_assumes(j, waitpid(p, &status, WNOHANG) != -1);
2908 }
2909 }
2910
2911 if( execpair[0] != -1 ) {
2912 job_assumes(j, runtime_close(execpair[0]) != -1);
2913 }
2914
2915 if( execpair[1] != -1 ) {
2916 job_assumes(j, runtime_close(execpair[0]) != -1);
2917 }
2918
2919 return p;
2920 }
2921
2922 void
2923 take_sample(job_t j)
2924 {
2925 char pidstr[32];
2926 snprintf(pidstr, sizeof(pidstr), "%u", j->p);
2927 #if !TARGET_OS_EMBEDDED
2928 /* -nodsyms so sample doesn't try to use Spotlight to find dsym files after mds has gone away. */
2929 char *sample_args[] = { "/usr/bin/sample", pidstr, "1", "-unsupportedShowArch", "-mayDie", "-nodsyms", "-file", j->mgr->sample_log_file, NULL };
2930 #else
2931 char *sample_args[] = { "/usr/bin/sample", pidstr, "1", "-unsupportedShowArch", "-mayDie", "-file", j->mgr->sample_log_file, NULL };
2932 #endif
2933
2934 execve(sample_args[0], sample_args, environ);
2935 _exit(EXIT_FAILURE);
2936 }
2937
2938 void
2939 jobmgr_dequeue_next_sample(jobmgr_t jm)
2940 {
2941 if( STAILQ_EMPTY(&jm->pending_samples) ) {
2942 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sample queue is empty.");
2943 return;
2944 }
2945
2946 /* Dequeue the next in line. */
2947 job_t j = STAILQ_FIRST(&jm->pending_samples);
2948 if( j->is_being_sampled ) {
2949 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sampling is in progress. Not dequeuing next job.");
2950 return;
2951 }
2952
2953 if( !job_assumes(j, !j->sampling_complete) ) {
2954 return;
2955 }
2956
2957 if (!job_assumes(j, do_apple_internal_logging)) {
2958 return;
2959 }
2960
2961 if (!job_assumes(j, mkdir(SHUTDOWN_LOG_DIR, S_IRWXU) != -1 || errno == EEXIST)) {
2962 return;
2963 }
2964
2965 char pidstr[32];
2966 snprintf(pidstr, sizeof(pidstr), "%u", j->p);
2967 snprintf(j->mgr->sample_log_file, sizeof(j->mgr->sample_log_file), SHUTDOWN_LOG_DIR "/%s-%u.sample.txt", j->label, j->p);
2968
2969 if (job_assumes(j, unlink(jm->sample_log_file) != -1 || errno == ENOENT)) {
2970 pid_t sp = basic_spawn(j, take_sample);
2971
2972 if( sp == -1 ) {
2973 job_log(j, LOG_ERR | LOG_CONSOLE, "Sampling for job failed!");
2974 STAILQ_REMOVE(&jm->pending_samples, j, job_s, pending_samples_sle);
2975 j->sampling_complete = true;
2976 jobmgr_dequeue_next_sample(jm);
2977 } else {
2978 j->tracing_pid = sp;
2979 j->is_being_sampled = true;
2980 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sampling job (sample PID: %i, file: %s).", sp, j->mgr->sample_log_file);
2981 }
2982 } else {
2983 STAILQ_REMOVE(&jm->pending_samples, j, job_s, pending_samples_sle);
2984 j->sampling_complete = true;
2985 }
2986
2987 j->pending_sample = false;
2988 }
2989
2990 void
2991 job_dispatch_curious_jobs(job_t j)
2992 {
2993 job_t ji = NULL, jt = NULL;
2994 SLIST_FOREACH_SAFE( ji, &s_curious_jobs, curious_jobs_sle, jt ) {
2995 struct semaphoreitem *si = NULL;
2996 SLIST_FOREACH( si, &ji->semaphores, sle ) {
2997 if( !(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) ) {
2998 continue;
2999 }
3000
3001 if( strncmp(si->what, j->label, strlen(j->label)) == 0 ) {
3002 job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3003
3004 job_dispatch(ji, false);
3005 /* ji could be removed here, so don't do anything with it or its semaphores
3006 * after this point.
3007 */
3008 break;
3009 }
3010 }
3011 }
3012 }
3013
3014 job_t
3015 job_dispatch(job_t j, bool kickstart)
3016 {
3017 /* Don't dispatch a job if it has no audit session set. */
3018 if( !uuid_is_null(j->expected_audit_uuid) ) {
3019 return NULL;
3020 }
3021
3022 #if TARGET_OS_EMBEDDED
3023 if( g_embedded_privileged_action && s_embedded_privileged_job ) {
3024 if( !job_assumes(j, s_embedded_privileged_job->username != NULL && j->username != NULL) ) {
3025 errno = EPERM;
3026 return NULL;
3027 }
3028
3029 if( strcmp(j->username, s_embedded_privileged_job->username) != 0 ) {
3030 errno = EPERM;
3031 return NULL;
3032 }
3033 } else if( g_embedded_privileged_action ) {
3034 errno = EINVAL;
3035 return NULL;
3036 }
3037 #endif
3038
3039 /*
3040 * The whole job removal logic needs to be consolidated. The fact that
3041 * a job can be removed from just about anywhere makes it easy to have
3042 * stale pointers left behind somewhere on the stack that might get
3043 * used after the deallocation. In particular, during job iteration.
3044 *
3045 * This is a classic example. The act of dispatching a job may delete it.
3046 */
3047 if (!job_active(j)) {
3048 if (job_useless(j)) {
3049 job_remove(j);
3050 return NULL;
3051 }
3052 if( unlikely(j->per_user && j->peruser_suspend_count > 0) ) {
3053 return NULL;
3054 }
3055
3056 if (kickstart || job_keepalive(j)) {
3057 job_log(j, LOG_DEBUG, "Starting job (kickstart = %s)", kickstart ? "true" : "false");
3058 job_start(j);
3059 } else {
3060 job_log(j, LOG_DEBUG, "Watching job (kickstart = %s)", kickstart ? "true" : "false");
3061 job_watch(j);
3062
3063 /*
3064 * 5455720
3065 *
3066 * Path checking and monitoring is really racy right now.
3067 * We should clean this up post Leopard.
3068 */
3069 if (job_keepalive(j)) {
3070 job_start(j);
3071 }
3072 }
3073 } else {
3074 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job (%s).", job_active(j));
3075 }
3076
3077 return j;
3078 }
3079
3080 void
3081 job_log_stdouterr2(job_t j, const char *msg, ...)
3082 {
3083 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
3084 va_list ap;
3085
3086 va_start(ap, msg);
3087 runtime_vsyslog(&attr, msg, ap);
3088 va_end(ap);
3089 }
3090
3091 void
3092 job_log_stdouterr(job_t j)
3093 {
3094 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
3095 bool close_log_redir = false;
3096 ssize_t rsz;
3097
3098 if (!job_assumes(j, buf != NULL)) {
3099 return;
3100 }
3101
3102 bufindex = buf;
3103
3104 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
3105
3106 if (unlikely(rsz == 0)) {
3107 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
3108 close_log_redir = true;
3109 } else if (rsz == -1) {
3110 if( !job_assumes(j, errno == EAGAIN) ) {
3111 close_log_redir = true;
3112 }
3113 } else {
3114 buf[rsz] = '\0';
3115
3116 while ((msg = strsep(&bufindex, "\n\r"))) {
3117 if (msg[0]) {
3118 job_log_stdouterr2(j, "%s", msg);
3119 }
3120 }
3121 }
3122
3123 free(buf);
3124
3125 if (unlikely(close_log_redir)) {
3126 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3127 j->log_redirect_fd = 0;
3128 job_dispatch(j, false);
3129 }
3130 }
3131
3132 void
3133 job_kill(job_t j)
3134 {
3135 if (unlikely(!j->p || j->anonymous)) {
3136 return;
3137 }
3138
3139 job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
3140
3141 j->sent_sigkill = true;
3142
3143 intptr_t timer = j->clean_kill ? LAUNCHD_CLEAN_KILL_TIMER : LAUNCHD_SIGKILL_TIMER;
3144 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, timer, j) != -1);
3145
3146 job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
3147 }
3148
3149 void
3150 job_log_children_without_exec(job_t j)
3151 {
3152 /* <rdar://problem/5701343> ER: Add a KERN_PROC_PPID sysctl */
3153 #ifdef KERN_PROC_PPID
3154 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PPID, j->p };
3155 #else
3156 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
3157 #endif
3158 size_t mib_sz = sizeof(mib) / sizeof(mib[0]);
3159 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
3160 struct kinfo_proc *kp;
3161
3162 if (!do_apple_internal_logging || j->anonymous || j->per_user) {
3163 return;
3164 }
3165
3166 if (!job_assumes(j, (kp = malloc(len)) != NULL)) {
3167 return;
3168 }
3169 if (!job_assumes(j, sysctl(mib, (u_int) mib_sz, kp, &len, NULL, 0) != -1)) {
3170 goto out;
3171 }
3172
3173 kp_cnt = len / sizeof(struct kinfo_proc);
3174
3175 for (i = 0; i < kp_cnt; i++) {
3176 #ifndef KERN_PROC_PPID
3177 if (kp[i].kp_eproc.e_ppid != j->p) {
3178 continue;
3179 }
3180 #endif
3181 if (kp[i].kp_proc.p_flag & P_EXEC) {
3182 continue;
3183 }
3184
3185 job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u",
3186 kp[i].kp_proc.p_pid);
3187 }
3188
3189 out:
3190 free(kp);
3191 }
3192
3193 void
3194 job_cleanup_after_tracer(job_t j)
3195 {
3196 jobmgr_t jm = NULL;
3197 if( j->is_being_sampled ) {
3198 int wstatus = 0;
3199 job_log(j, LOG_DEBUG | LOG_CONSOLE, "sample[%i] finished with job.", j->tracing_pid);
3200 if( job_assumes(j, waitpid(j->tracing_pid, &wstatus, 0) != -1) ) {
3201 job_assumes(j, WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0);
3202 }
3203 STAILQ_REMOVE(&j->mgr->pending_samples, j, job_s, pending_samples_sle);
3204
3205 if( j->kill_after_sample ) {
3206 if (unlikely(j->debug_before_kill)) {
3207 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3208 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3209 }
3210
3211 job_log(j, LOG_NOTICE, "Killing...");
3212 job_kill(j);
3213 }
3214 j->sampling_complete = true;
3215 j->is_being_sampled = false;
3216 jm = j->mgr;
3217 }
3218
3219 j->tracing_pid = 0;
3220 if( j->reap_after_trace ) {
3221 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Reaping job now that attached tracer is gone.");
3222 struct kevent kev;
3223 EV_SET(&kev, j->p, 0, 0, NOTE_EXIT, 0, 0);
3224
3225 /* Fake a kevent to keep our logic consistent. */
3226 job_callback_proc(j, &kev);
3227
3228 /* Normally, after getting a EVFILT_PROC event, we do garbage collection
3229 * on the root job manager. To make our fakery complete, we will do garbage
3230 * collection at the beginning of the next run loop cycle (after we're done
3231 * draining the current queue of kevents).
3232 */
3233 job_assumes(j, kevent_mod((uintptr_t)&root_jobmgr->reboot_flags, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_NSECONDS, 1, root_jobmgr) != -1);
3234 }
3235
3236 if( jm ) {
3237 jobmgr_dequeue_next_sample(jm);
3238 }
3239 }
3240
3241 void
3242 job_callback_proc(job_t j, struct kevent *kev)
3243 {
3244 bool program_changed = false;
3245 int fflags = kev->fflags;
3246
3247 job_log(j, LOG_DEBUG, "EVFILT_PROC event for job:");
3248 log_kevent_struct(LOG_DEBUG, kev, 0);
3249
3250 if( fflags & NOTE_EXIT ) {
3251 if( j->p == (pid_t)kev->ident && !j->anonymous && !j->is_being_sampled ) {
3252 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
3253 struct kinfo_proc kp;
3254 size_t len = sizeof(kp);
3255
3256 /* Sometimes, the kernel says it succeeded but really didn't. */
3257 if( job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && len == sizeof(kp) ) {
3258 if( !job_assumes(j, kp.kp_eproc.e_ppid == getpid()) ) {
3259 /* Someone has attached to the process with ptrace(). There's a race here.
3260 * If we determine that we are not the parent process and then fail to attach
3261 * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
3262 * indication that the parent exited between sysctl(3) and kevent_mod(). The
3263 * reparenting of the PID should be atomic to us, so in that case, we reap the
3264 * job as normal.
3265 *
3266 * Otherwise, we wait for the death of the parent tracer and then reap, just as we
3267 * would if a job died while we were sampling it at shutdown.
3268 *
3269 * Note that we foolishly assume that in the process *tree* a node cannot be its
3270 * own parent. Apparently, that is not correct. If this is the case, we forsake
3271 * the process to its own devices. Let it reap itself.
3272 */
3273 if( !job_assumes(j, kp.kp_eproc.e_ppid != (pid_t)kev->ident) ) {
3274 job_log(j, LOG_WARNING, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
3275 return;
3276 }
3277 if( job_assumes(j, kevent_mod(kp.kp_eproc.e_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j) != -1) ) {
3278 j->tracing_pid = kp.kp_eproc.e_ppid;
3279 j->reap_after_trace = true;
3280 return;
3281 }
3282 }
3283 }
3284 } else if( !j->anonymous ) {
3285 if( j->tracing_pid == (pid_t)kev->ident ) {
3286 job_cleanup_after_tracer(j);
3287
3288 return;
3289 } else if( j->tracing_pid && !j->reap_after_trace ) {
3290 /* The job exited before our sample completed. */
3291 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
3292 j->reap_after_trace = true;
3293 return;
3294 }
3295 }
3296 }
3297
3298 if (fflags & NOTE_EXEC) {
3299 program_changed = true;
3300
3301 if (j->anonymous) {
3302 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
3303 struct kinfo_proc kp;
3304 size_t len = sizeof(kp);
3305
3306 /* Sometimes, the kernel says it succeeded but really didn't. */
3307 if (job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && len == sizeof(kp)) {
3308 char newlabel[1000];
3309
3310 snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, kp.kp_proc.p_comm);
3311
3312 job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
3313 j->lastlookup = NULL;
3314 j->lastlookup_gennum = 0;
3315
3316 LIST_REMOVE(j, label_hash_sle);
3317 strcpy((char *)j->label, newlabel);
3318 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
3319 }
3320 } else {
3321 j->did_exec = true;
3322 job_log(j, LOG_DEBUG, "Program changed");
3323 }
3324 }
3325
3326 if (fflags & NOTE_FORK) {
3327 job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
3328 job_log_children_without_exec(j);
3329 }
3330
3331 if (fflags & NOTE_EXIT) {
3332 job_reap(j);
3333
3334 if( !j->anonymous ) {
3335 j = job_dispatch(j, false);
3336 } else {
3337 job_remove(j);
3338 j = NULL;
3339 }
3340 }
3341
3342 if (j && (fflags & NOTE_REAP)) {
3343 job_assumes(j, j->p == 0);
3344 }
3345 }
3346
3347 void
3348 job_callback_timer(job_t j, void *ident)
3349 {
3350 if (j == ident) {
3351 job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
3352 job_dispatch(j, true);
3353 } else if (&j->semaphores == ident) {
3354 job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
3355 job_dispatch(j, false);
3356 } else if (&j->start_interval == ident) {
3357 job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
3358 j->start_pending = true;
3359 job_dispatch(j, false);
3360 } else if (&j->exit_timeout == ident) {
3361 if( !job_assumes(j, j->p != 0) ) {
3362 return;
3363 }
3364
3365 if( j->clean_kill ) {
3366 job_log(j, LOG_ERR | LOG_CONSOLE, "Clean job failed to exit %u second after receiving SIGKILL.", LAUNCHD_CLEAN_KILL_TIMER);
3367 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
3368 j->clean_exit_timer_expired = true;
3369
3370 jobmgr_do_garbage_collection(j->mgr);
3371 return;
3372 }
3373
3374 /*
3375 * This block might be executed up to 3 times for a given (slow) job
3376 * - once for the SAMPLE_TIMEOUT timer, at which point sampling is triggered
3377 * - once for the exit_timeout timer, at which point:
3378 * - sampling is performed if not triggered previously
3379 * - SIGKILL is being sent to the job
3380 * - once for the SIGKILL_TIMER timer, at which point we log an issue
3381 * with the long SIGKILL
3382 */
3383
3384 if( j->per_user ) {
3385 /* Don't sample per-user launchd's. */
3386 j->sampling_complete = true;
3387 }
3388 bool was_is_or_will_be_sampled = ( j->sampling_complete || j->is_being_sampled || j->pending_sample );
3389 bool should_enqueue = ( !was_is_or_will_be_sampled && do_apple_internal_logging );
3390
3391 if (j->sent_sigkill) {
3392 uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
3393
3394 td /= NSEC_PER_SEC;
3395 td -= j->clean_kill ? 0 : j->exit_timeout;
3396
3397 job_log(j, LOG_WARNING | LOG_CONSOLE, "Did not die after sending SIGKILL %llu seconds ago...", td);
3398 } else if( should_enqueue && (!j->exit_timeout || (LAUNCHD_SAMPLE_TIMEOUT < j->exit_timeout)) ) {
3399 /* This should work even if the job changes its exit_timeout midstream */
3400 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Sampling timeout elapsed (%u seconds). Scheduling a sample...", LAUNCHD_SAMPLE_TIMEOUT);
3401 if (j->exit_timeout) {
3402 unsigned int ttk = (j->exit_timeout - LAUNCHD_SAMPLE_TIMEOUT);
3403 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
3404 EV_ADD|EV_ONESHOT, NOTE_SECONDS, ttk, j) != -1);
3405 job_log(j, LOG_NOTICE | LOG_CONSOLE, "Scheduled new exit timeout for %u seconds later", ttk);
3406 }
3407
3408 STAILQ_INSERT_TAIL(&j->mgr->pending_samples, j, pending_samples_sle);
3409 j->pending_sample = true;
3410 jobmgr_dequeue_next_sample(j->mgr);
3411 } else {
3412 if( do_apple_internal_logging && !j->sampling_complete ) {
3413 if( j->is_being_sampled || j->pending_sample ) {
3414 char pidstr[24] = { 0 };
3415 snprintf(pidstr, sizeof(pidstr), "[%i] ", j->tracing_pid);
3416
3417 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Will kill after sample%shas completed.", j->exit_timeout, j->tracing_pid ? pidstr : " ");
3418 j->kill_after_sample = true;
3419 } else {
3420 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Will sample and then kill.", j->exit_timeout);
3421
3422 STAILQ_INSERT_TAIL(&j->mgr->pending_samples, j, pending_samples_sle);
3423 j->pending_sample = true;
3424 }
3425
3426 jobmgr_dequeue_next_sample(j->mgr);
3427 } else {
3428 if (unlikely(j->debug_before_kill)) {
3429 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
3430 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
3431 }
3432 job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
3433 job_kill(j);
3434 jobmgr_do_garbage_collection(j->mgr);
3435 }
3436 }
3437 } else {
3438 job_assumes(j, false);
3439 }
3440 }
3441
3442 void
3443 job_callback_read(job_t j, int ident)
3444 {
3445 if (ident == j->log_redirect_fd) {
3446 job_log_stdouterr(j);
3447 } else if (ident == j->stdin_fd) {
3448 job_dispatch(j, true);
3449 } else {
3450 socketgroup_callback(j);
3451 }
3452 }
3453
3454 void
3455 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
3456 {
3457 jobmgr_t jmi;
3458 job_t j;
3459
3460 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3461 jobmgr_reap_bulk(jmi, kev);
3462 }
3463
3464 if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
3465 kev->udata = j;
3466 job_callback(j, kev);
3467 }
3468 }
3469
3470 void
3471 jobmgr_callback(void *obj, struct kevent *kev)
3472 {
3473 jobmgr_t jm = obj;
3474 job_t ji;
3475
3476 switch (kev->filter) {
3477 case EVFILT_PROC:
3478 jobmgr_reap_bulk(jm, kev);
3479 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
3480 break;
3481 case EVFILT_SIGNAL:
3482 switch (kev->ident) {
3483 case SIGTERM:
3484 jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
3485 return launchd_shutdown();
3486 case SIGUSR1:
3487 return calendarinterval_callback();
3488 case SIGUSR2:
3489 fake_shutdown_in_progress = true;
3490 runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
3491
3492 runtime_closelog(); /* HACK -- force 'start' time to be set */
3493
3494 if (pid1_magic) {
3495 int64_t now = runtime_get_wall_time();
3496
3497 jobmgr_log(jm, LOG_NOTICE, "Anticipatory shutdown began at: %lld.%06llu", now / USEC_PER_SEC, now % USEC_PER_SEC);
3498
3499 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
3500 if (ji->per_user && ji->p) {
3501 job_assumes(ji, runtime_kill(ji->p, SIGUSR2) != -1);
3502 }
3503 }
3504 } else {
3505 jobmgr_log(jm, LOG_NOTICE, "Anticipatory per-user launchd shutdown");
3506 }
3507
3508 return;
3509 default:
3510 return (void)jobmgr_assumes(jm, false);
3511 }
3512 break;
3513 case EVFILT_FS:
3514 if (kev->fflags & VQ_MOUNT) {
3515 jobmgr_dispatch_all(jm, true);
3516 }
3517 jobmgr_dispatch_all_semaphores(jm);
3518 break;
3519 case EVFILT_TIMER:
3520 if( kev->ident == (uintptr_t)&sorted_calendar_events ) {
3521 calendarinterval_callback();
3522 } else if( kev->ident == (uintptr_t)jm ) {
3523 jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
3524 jobmgr_still_alive_with_check(jm);
3525 } else if( kev->ident == (uintptr_t)&jm->reboot_flags ) {
3526 jobmgr_do_garbage_collection(jm);
3527 } else if( kev->ident == (uintptr_t)&g_runtime_busy_time ) {
3528 jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
3529 if( jobmgr_assumes(jm, runtime_busy_cnt == 0) ) {
3530 return launchd_shutdown();
3531 }
3532 }
3533 break;
3534 case EVFILT_VNODE:
3535 if( kev->ident == (uintptr_t)s_no_hang_fd ) {
3536 int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
3537 if( unlikely(_no_hang_fd != -1) ) {
3538 jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
3539 jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
3540 jobmgr_assumes(root_jobmgr, runtime_close(s_no_hang_fd) != -1);
3541 s_no_hang_fd = _fd(_no_hang_fd);
3542 }
3543 } else if( pid1_magic && g_console && kev->ident == (uintptr_t)fileno(g_console) ) {
3544 int cfd = -1;
3545 if( launchd_assumes((cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) ) {
3546 _fd(cfd);
3547 if( !launchd_assumes((g_console = fdopen(cfd, "w")) != NULL) ) {
3548 close(cfd);
3549 }
3550 }
3551 }
3552 break;
3553 default:
3554 return (void)jobmgr_assumes(jm, false);
3555 }
3556 }
3557
3558 void
3559 job_callback(void *obj, struct kevent *kev)
3560 {
3561 job_t j = obj;
3562
3563 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
3564
3565 switch (kev->filter) {
3566 case EVFILT_PROC:
3567 return job_callback_proc(j, kev);
3568 case EVFILT_TIMER:
3569 return job_callback_timer(j, (void *) kev->ident);
3570 case EVFILT_VNODE:
3571 return semaphoreitem_callback(j, kev);
3572 case EVFILT_READ:
3573 return job_callback_read(j, (int) kev->ident);
3574 case EVFILT_MACHPORT:
3575 return (void)job_dispatch(j, true);
3576 default:
3577 return (void)job_assumes(j, false);
3578 }
3579 }
3580
3581 void
3582 job_start(job_t j)
3583 {
3584 uint64_t td;
3585 int spair[2];
3586 int execspair[2];
3587 int oepair[2];
3588 char nbuf[64];
3589 pid_t c;
3590 bool sipc = false;
3591 u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_REAP;
3592
3593 if (!job_assumes(j, j->mgr != NULL)) {
3594 return;
3595 }
3596
3597 if (unlikely(job_active(j))) {
3598 job_log(j, LOG_DEBUG, "Already started");
3599 return;
3600 }
3601
3602 /*
3603 * Some users adjust the wall-clock and then expect software to not notice.
3604 * Therefore, launchd must use an absolute clock instead of the wall clock
3605 * wherever possible.
3606 */
3607 td = runtime_get_nanoseconds_since(j->start_time);
3608 td /= NSEC_PER_SEC;
3609
3610 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
3611 time_t respawn_delta = j->min_run_time - (uint32_t)td;
3612
3613 /*
3614 * We technically should ref-count throttled jobs to prevent idle exit,
3615 * but we're not directly tracking the 'throttled' state at the moment.
3616 */
3617
3618 job_log(j, LOG_WARNING, "Throttling respawn: Will start in %ld seconds", respawn_delta);
3619 job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
3620 job_ignore(j);
3621 return;
3622 }
3623
3624 if (likely(!j->legacy_mach_job)) {
3625 sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_special_privileges;
3626 }
3627
3628 if( sipc ) {
3629 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
3630 }
3631
3632 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
3633
3634 if (likely(!j->legacy_mach_job) && job_assumes(j, pipe(oepair) != -1)) {
3635 j->log_redirect_fd = _fd(oepair[0]);
3636 job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
3637 job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
3638 }
3639
3640 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
3641 case -1:
3642 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
3643 job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j) != -1);
3644 job_ignore(j);
3645
3646 job_assumes(j, runtime_close(execspair[0]) == 0);
3647 job_assumes(j, runtime_close(execspair[1]) == 0);
3648 if (sipc) {
3649 job_assumes(j, runtime_close(spair[0]) == 0);
3650 job_assumes(j, runtime_close(spair[1]) == 0);
3651 }
3652 if (likely(!j->legacy_mach_job)) {
3653 job_assumes(j, runtime_close(oepair[0]) != -1);
3654 job_assumes(j, runtime_close(oepair[1]) != -1);
3655 j->log_redirect_fd = 0;
3656 }
3657 break;
3658 case 0:
3659 if (unlikely(_vproc_post_fork_ping())) {
3660 _exit(EXIT_FAILURE);
3661 }
3662 if (!j->legacy_mach_job) {
3663 job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
3664 job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
3665 job_assumes(j, runtime_close(oepair[1]) != -1);
3666 }
3667 job_assumes(j, runtime_close(execspair[0]) == 0);
3668 /* wait for our parent to say they've attached a kevent to us */
3669 read(_fd(execspair[1]), &c, sizeof(c));
3670
3671 if (sipc) {
3672 job_assumes(j, runtime_close(spair[0]) == 0);
3673 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
3674 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
3675 }
3676 job_start_child(j);
3677 break;
3678 default:
3679 j->start_time = runtime_get_opaque_time();
3680
3681 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
3682
3683 j->did_exec = false;
3684 j->checkedin = false;
3685 j->start_pending = false;
3686 j->reaped = false;
3687 j->crashed = false;
3688 j->stopped = false;
3689 if( j->needs_kickoff ) {
3690 j->needs_kickoff = false;
3691
3692 if( SLIST_EMPTY(&j->semaphores) ) {
3693 j->ondemand = false;
3694 }
3695 }
3696
3697 if( j->has_console ) {
3698 g_wsp = c;
3699 }
3700
3701 runtime_add_ref();
3702 total_children++;
3703 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
3704
3705 if (likely(!j->legacy_mach_job)) {
3706 job_assumes(j, runtime_close(oepair[1]) != -1);
3707 }
3708 j->p = c;
3709 if (unlikely(j->hopefully_exits_first)) {
3710 j->mgr->hopefully_first_cnt++;
3711 } else if (likely(!j->hopefully_exits_last)) {
3712 j->mgr->normal_active_cnt++;
3713 }
3714 j->fork_fd = _fd(execspair[0]);
3715 job_assumes(j, runtime_close(execspair[1]) == 0);
3716 if (sipc) {
3717 job_assumes(j, runtime_close(spair[1]) == 0);
3718 ipc_open(_fd(spair[0]), j);
3719 }
3720 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
3721 job_ignore(j);
3722 } else {
3723 job_reap(j);
3724 }
3725
3726 j->wait4debugger_oneshot = false;
3727
3728 struct envitem *ei = NULL, *et = NULL;
3729 SLIST_FOREACH_SAFE( ei, &j->env, sle, et ) {
3730 if( ei->one_shot ) {
3731 SLIST_REMOVE(&j->env, ei, envitem, sle);
3732 }
3733 }
3734
3735 if (likely(!j->stall_before_exec)) {
3736 job_uncork_fork(j);
3737 }
3738 break;
3739 }
3740 }
3741
3742 void
3743 job_start_child(job_t j)
3744 {
3745 typeof(posix_spawn) *psf;
3746 const char *file2exec = "/usr/libexec/launchproxy";
3747 const char **argv;
3748 posix_spawnattr_t spattr;
3749 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
3750 glob_t g;
3751 short spflags = POSIX_SPAWN_SETEXEC;
3752 size_t binpref_out_cnt = 0;
3753 size_t i;
3754
3755 job_assumes(j, posix_spawnattr_init(&spattr) == 0);
3756
3757 job_setup_attributes(j);
3758
3759 if (unlikely(j->argv && j->globargv)) {
3760 g.gl_offs = 1;
3761 for (i = 0; i < j->argc; i++) {
3762 if (i > 0) {
3763 gflags |= GLOB_APPEND;
3764 }
3765 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
3766 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
3767 exit(EXIT_FAILURE);
3768 }
3769 }
3770 g.gl_pathv[0] = (char *)file2exec;
3771 argv = (const char **)g.gl_pathv;
3772 } else if (likely(j->argv)) {
3773 argv = alloca((j->argc + 2) * sizeof(char *));
3774 argv[0] = file2exec;
3775 for (i = 0; i < j->argc; i++) {
3776 argv[i + 1] = j->argv[i];
3777 }
3778 argv[i + 1] = NULL;
3779 } else {
3780 argv = alloca(3 * sizeof(char *));
3781 argv[0] = file2exec;
3782 argv[1] = j->prog;
3783 argv[2] = NULL;
3784 }
3785
3786 if (likely(!j->inetcompat)) {
3787 argv++;
3788 }
3789
3790 if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
3791 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
3792 spflags |= POSIX_SPAWN_START_SUSPENDED;
3793 }
3794
3795 job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
3796
3797 if (unlikely(j->j_binpref_cnt)) {
3798 job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
3799 job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
3800 }
3801
3802 #if HAVE_QUARANTINE
3803 if (j->quarantine_data) {
3804 qtn_proc_t qp;
3805
3806 if (job_assumes(j, qp = qtn_proc_alloc())) {
3807 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
3808 job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
3809 }
3810 }
3811 }
3812 #endif
3813
3814 #if HAVE_SANDBOX
3815 if (j->seatbelt_profile) {
3816 char *seatbelt_err_buf = NULL;
3817
3818 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
3819 if (seatbelt_err_buf) {
3820 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
3821 }
3822 goto out_bad;
3823 }
3824 }
3825 #endif
3826
3827 psf = j->prog ? posix_spawn : posix_spawnp;
3828
3829 if (likely(!j->inetcompat)) {
3830 file2exec = j->prog ? j->prog : argv[0];
3831 }
3832
3833 errno = psf(NULL, file2exec, NULL, &spattr, (char *const*)argv, environ);
3834 job_log_error(j, LOG_ERR, "posix_spawn(\"%s\", ...)", file2exec);
3835
3836 #if HAVE_SANDBOX
3837 out_bad:
3838 #endif
3839 _exit(EXIT_FAILURE);
3840 }
3841
3842 void
3843 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
3844 {
3845 launch_data_t tmp;
3846 struct envitem *ei;
3847 job_t ji;
3848
3849 if (jm->parentmgr) {
3850 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
3851 } else {
3852 char **tmpenviron = environ;
3853 for (; *tmpenviron; tmpenviron++) {
3854 char envkey[1024];
3855 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
3856 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
3857 strncpy(envkey, *tmpenviron, sizeof(envkey));
3858 *(strchr(envkey, '=')) = '\0';
3859 launch_data_dict_insert(dict, s, envkey);
3860 }
3861 }
3862
3863 LIST_FOREACH(ji, &jm->jobs, sle) {
3864 SLIST_FOREACH(ei, &ji->global_env, sle) {
3865 if ((tmp = launch_data_new_string(ei->value))) {
3866 launch_data_dict_insert(dict, tmp, ei->key);
3867 }
3868 }
3869 }
3870 }
3871
3872 void
3873 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
3874 {
3875 struct envitem *ei;
3876 job_t ji;
3877
3878 if (jm->parentmgr) {
3879 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
3880 }
3881
3882 LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
3883 SLIST_FOREACH(ei, &ji->global_env, sle) {
3884 setenv(ei->key, ei->value, 1);
3885 }
3886 }
3887 }
3888
3889 void
3890 job_log_pids_with_weird_uids(job_t j)
3891 {
3892 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
3893 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
3894 struct kinfo_proc *kp;
3895 uid_t u = j->mach_uid;
3896
3897 if (!do_apple_internal_logging) {
3898 return;
3899 }
3900
3901 kp = malloc(len);
3902
3903 if (!job_assumes(j, kp != NULL)) {
3904 return;
3905 }
3906
3907 runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
3908
3909 if (!job_assumes(j, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
3910 goto out;
3911 }
3912
3913 kp_cnt = len / sizeof(struct kinfo_proc);
3914
3915 for (i = 0; i < kp_cnt; i++) {
3916 uid_t i_euid = kp[i].kp_eproc.e_ucred.cr_uid;
3917 uid_t i_uid = kp[i].kp_eproc.e_pcred.p_ruid;
3918 uid_t i_svuid = kp[i].kp_eproc.e_pcred.p_svuid;
3919 pid_t i_pid = kp[i].kp_proc.p_pid;
3920
3921 if (i_euid != u && i_uid != u && i_svuid != u) {
3922 continue;
3923 }
3924
3925 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
3926 i_pid, kp[i].kp_proc.p_comm, i_uid, i_euid, i_svuid);
3927
3928 /* Temporarily disabled due to 5423935 and 4946119. */
3929 #if 0
3930 /* Ask the accountless process to exit. */
3931 job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
3932 #endif
3933 }
3934
3935 out:
3936 free(kp);
3937 }
3938
3939 void
3940 job_postfork_test_user(job_t j)
3941 {
3942 /* This function is all about 5201578 */
3943
3944 const char *home_env_var = getenv("HOME");
3945 const char *user_env_var = getenv("USER");
3946 const char *logname_env_var = getenv("LOGNAME");
3947 uid_t tmp_uid, local_uid = getuid();
3948 gid_t tmp_gid, local_gid = getgid();
3949 char shellpath[PATH_MAX];
3950 char homedir[PATH_MAX];
3951 char loginname[2000];
3952 struct passwd *pwe;
3953
3954
3955 if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
3956 && strcmp(user_env_var, logname_env_var) == 0)) {
3957 goto out_bad;
3958 }
3959
3960 if ((pwe = getpwnam(user_env_var)) == NULL) {
3961 job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
3962 goto out_bad;
3963 }
3964
3965 /*
3966 * We must copy the results of getpw*().
3967 *
3968 * Why? Because subsequent API calls may call getpw*() as a part of
3969 * their implementation. Since getpw*() returns a [now thread scoped]
3970 * global, we must therefore cache the results before continuing.
3971 */
3972
3973 tmp_uid = pwe->pw_uid;
3974 tmp_gid = pwe->pw_gid;
3975
3976 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
3977 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
3978 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
3979
3980 if (strcmp(loginname, logname_env_var) != 0) {
3981 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
3982 goto out_bad;
3983 }
3984 if (strcmp(homedir, home_env_var) != 0) {
3985 job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
3986 goto out_bad;
3987 }
3988 if (local_uid != tmp_uid) {
3989 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
3990 'U', tmp_uid, local_uid);
3991 goto out_bad;
3992 }
3993 if (local_gid != tmp_gid) {
3994 job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
3995 'G', tmp_gid, local_gid);
3996 goto out_bad;
3997 }
3998
3999 return;
4000 out_bad:
4001 #if 0
4002 job_assumes(j, runtime_kill(getppid(), SIGTERM) != -1);
4003 _exit(EXIT_FAILURE);
4004 #else
4005 job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
4006 #endif
4007 }
4008
4009 void
4010 job_postfork_become_user(job_t j)
4011 {
4012 char loginname[2000];
4013 char tmpdirpath[PATH_MAX];
4014 char shellpath[PATH_MAX];
4015 char homedir[PATH_MAX];
4016 struct passwd *pwe;
4017 size_t r;
4018 gid_t desired_gid = -1;
4019 uid_t desired_uid = -1;
4020
4021 if (getuid() != 0) {
4022 return job_postfork_test_user(j);
4023 }
4024
4025 /*
4026 * I contend that having UID == 0 and GID != 0 is of dubious value.
4027 * Nevertheless, this used to work in Tiger. See: 5425348
4028 */
4029 if (j->groupname && !j->username) {
4030 j->username = "root";
4031 }
4032
4033 if (j->username) {
4034 if ((pwe = getpwnam(j->username)) == NULL) {
4035 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
4036 _exit(EXIT_FAILURE);
4037 }
4038 } else if (j->mach_uid) {
4039 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
4040 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
4041 job_log_pids_with_weird_uids(j);
4042 _exit(EXIT_FAILURE);
4043 }
4044 } else {
4045 return;
4046 }
4047
4048 /*
4049 * We must copy the results of getpw*().
4050 *
4051 * Why? Because subsequent API calls may call getpw*() as a part of
4052 * their implementation. Since getpw*() returns a [now thread scoped]
4053 * global, we must therefore cache the results before continuing.
4054 */
4055
4056 desired_uid = pwe->pw_uid;
4057 desired_gid = pwe->pw_gid;
4058
4059 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4060 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4061 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4062
4063 if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
4064 job_log(j, LOG_ERR, "Expired account");
4065 _exit(EXIT_FAILURE);
4066 }
4067
4068
4069 if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
4070 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
4071 } else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
4072 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
4073 }
4074
4075 if (j->groupname) {
4076 struct group *gre;
4077
4078 if (unlikely((gre = getgrnam(j->groupname)) == NULL)) {
4079 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
4080 _exit(EXIT_FAILURE);
4081 }
4082
4083 desired_gid = gre->gr_gid;
4084 }
4085
4086 if (!job_assumes(j, setlogin(loginname) != -1)) {
4087 _exit(EXIT_FAILURE);
4088 }
4089
4090 if (!job_assumes(j, setgid(desired_gid) != -1)) {
4091 _exit(EXIT_FAILURE);
4092 }
4093
4094 /*
4095 * The kernel team and the DirectoryServices team want initgroups()
4096 * called after setgid(). See 4616864 for more information.
4097 */
4098
4099 if (likely(!j->no_init_groups)) {
4100 #if 1
4101 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
4102 _exit(EXIT_FAILURE);
4103 }
4104 #else
4105 /* Do our own little initgroups(). We do this to guarantee that we're
4106 * always opted into dynamic group resolution in the kernel. initgroups(3)
4107 * does not make this guarantee.
4108 */
4109 int groups[NGROUPS], ngroups;
4110
4111 /* A failure here isn't fatal, and we'll still get data we can use. */
4112 job_assumes(j, getgrouplist(j->username, desired_gid, groups, &ngroups) != -1);
4113
4114 if( !job_assumes(j, syscall(SYS_initgroups, ngroups, groups, desired_uid) != -1) ) {
4115 _exit(EXIT_FAILURE);
4116 }
4117 #endif
4118 }
4119
4120 if (!job_assumes(j, setuid(desired_uid) != -1)) {
4121 _exit(EXIT_FAILURE);
4122 }
4123
4124 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
4125
4126 if (likely(r > 0 && r < sizeof(tmpdirpath))) {
4127 setenv("TMPDIR", tmpdirpath, 0);
4128 }
4129
4130 setenv("SHELL", shellpath, 0);
4131 setenv("HOME", homedir, 0);
4132 setenv("USER", loginname, 0);
4133 setenv("LOGNAME", loginname, 0);
4134 }
4135
4136 void
4137 job_setup_attributes(job_t j)
4138 {
4139 struct limititem *li;
4140 struct envitem *ei;
4141
4142 if (unlikely(j->setnice)) {
4143 job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
4144 }
4145
4146 SLIST_FOREACH(li, &j->limits, sle) {
4147 struct rlimit rl;
4148
4149 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
4150 continue;
4151 }
4152
4153 if (li->sethard) {
4154 rl.rlim_max = li->lim.rlim_max;
4155 }
4156 if (li->setsoft) {
4157 rl.rlim_cur = li->lim.rlim_cur;
4158 }
4159
4160 if (setrlimit(li->which, &rl) == -1) {
4161 job_log_error(j, LOG_WARNING, "setrlimit()");
4162 }
4163 }
4164
4165 #if !TARGET_OS_EMBEDDED
4166 if( unlikely(j->per_user) ) {
4167 auditinfo_addr_t auinfo = {
4168 .ai_termid = { .at_type = AU_IPv4 },
4169 .ai_auid = j->mach_uid,
4170 .ai_asid = AU_ASSIGN_ASID,
4171 };
4172 (void)au_user_mask(j->username, &auinfo.ai_mask);
4173
4174 if( !launchd_assumes(setaudit_addr(&auinfo, sizeof(auinfo)) != -1) ) {
4175 runtime_syslog(LOG_WARNING, "Could not set audit session! (errno = %d)", errno);
4176 _exit(EXIT_FAILURE);
4177 } else {
4178 job_log(j, LOG_DEBUG, "Created new security session for per-user launchd.");
4179 }
4180 }
4181 #endif
4182
4183 if (unlikely(!j->inetcompat && j->session_create)) {
4184 launchd_SessionCreate();
4185 }
4186
4187 if (unlikely(j->low_pri_io)) {
4188 job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
4189 }
4190 if (unlikely(j->rootdir)) {
4191 job_assumes(j, chroot(j->rootdir) != -1);
4192 job_assumes(j, chdir(".") != -1);
4193 }
4194
4195 job_postfork_become_user(j);
4196
4197 if (unlikely(j->workingdir)) {
4198 job_assumes(j, chdir(j->workingdir) != -1);
4199 }
4200
4201 if (unlikely(j->setmask)) {
4202 umask(j->mask);
4203 }
4204
4205 if (j->stdin_fd) {
4206 job_assumes(j, dup2(j->stdin_fd, STDIN_FILENO) != -1);
4207 } else {
4208 job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
4209 }
4210 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
4211 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
4212
4213 jobmgr_setup_env_from_other_jobs(j->mgr);
4214
4215 SLIST_FOREACH(ei, &j->env, sle) {
4216 setenv(ei->key, ei->value, 1);
4217 }
4218
4219 if( do_apple_internal_logging ) {
4220 setenv(LAUNCHD_DO_APPLE_INTERNAL_LOGGING, "true", 1);
4221 }
4222
4223 #if !TARGET_OS_EMBEDDED
4224 if( j->jetsam_properties ) {
4225 job_assumes(j, proc_setpcontrol(PROC_SETPC_TERMINATE) == 0);
4226 }
4227 #endif
4228
4229 #if TARGET_OS_EMBEDDED
4230 if( j->main_thread_priority != 0 ) {
4231 struct sched_param params;
4232 bzero(&params, sizeof(params));
4233 params.sched_priority = j->main_thread_priority;
4234 job_assumes(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params) != -1);
4235 }
4236 #endif
4237
4238 /*
4239 * We'd like to call setsid() unconditionally, but we have reason to
4240 * believe that prevents launchd from being able to send signals to
4241 * setuid children. We'll settle for process-groups.
4242 */
4243 if (getppid() != 1) {
4244 job_assumes(j, setpgid(0, 0) != -1);
4245 } else {
4246 job_assumes(j, setsid() != -1);
4247 }
4248 }
4249
4250 void
4251 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
4252 {
4253 int fd;
4254
4255 if (!path) {
4256 return;
4257 }
4258
4259 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
4260 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
4261 return;
4262 }
4263
4264 job_assumes(j, dup2(fd, target_fd) != -1);
4265 job_assumes(j, runtime_close(fd) == 0);
4266 }
4267
4268 int
4269 dir_has_files(job_t j, const char *path)
4270 {
4271 DIR *dd = opendir(path);
4272 struct dirent *de;
4273 bool r = 0;
4274
4275 if (unlikely(!dd)) {
4276 return -1;
4277 }
4278
4279 while ((de = readdir(dd))) {
4280 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
4281 r = 1;
4282 break;
4283 }
4284 }
4285
4286 job_assumes(j, closedir(dd) == 0);
4287 return r;
4288 }
4289
4290 void
4291 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
4292 {
4293 struct calendarinterval *ci_iter, *ci_prev = NULL;
4294 time_t later, head_later;
4295
4296 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
4297
4298 if (ci->when.tm_wday != -1) {
4299 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
4300
4301 if (ci->when.tm_mday == -1) {
4302 later = otherlater;
4303 } else {
4304 later = later < otherlater ? later : otherlater;
4305 }
4306 }
4307
4308 ci->when_next = later;
4309
4310 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
4311 if (ci->when_next < ci_iter->when_next) {
4312 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
4313 break;
4314 }
4315
4316 ci_prev = ci_iter;
4317 }
4318
4319 if (ci_iter == NULL) {
4320 /* ci must want to fire after every other timer, or there are no timers */
4321
4322 if (LIST_EMPTY(&sorted_calendar_events)) {
4323 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
4324 } else {
4325 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
4326 }
4327 }
4328
4329 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
4330
4331 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
4332 char time_string[100];
4333 size_t time_string_len;
4334
4335 ctime_r(&later, time_string);
4336 time_string_len = strlen(time_string);
4337
4338 if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
4339 time_string[time_string_len - 1] = '\0';
4340 }
4341
4342 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
4343 }
4344 }
4345
4346 void
4347 extract_rcsid_substr(const char *i, char *o, size_t osz)
4348 {
4349 char *rcs_rev_tmp = strchr(i, ' ');
4350
4351 if (!rcs_rev_tmp) {
4352 strlcpy(o, i, osz);
4353 } else {
4354 strlcpy(o, rcs_rev_tmp + 1, osz);
4355 rcs_rev_tmp = strchr(o, ' ');
4356 if (rcs_rev_tmp) {
4357 *rcs_rev_tmp = '\0';
4358 }
4359 }
4360 }
4361
4362 void
4363 jobmgr_log_bug(jobmgr_t jm, unsigned int line)
4364 {
4365 static const char *file;
4366 int saved_errno = errno;
4367 char buf[100];
4368
4369 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4370
4371 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4372
4373 if (!file) {
4374 file = strrchr(__FILE__, '/');
4375 if (!file) {
4376 file = __FILE__;
4377 } else {
4378 file += 1;
4379 }
4380 }
4381
4382 /* the only time 'jm' should not be set is if setting up the first bootstrap fails for some reason */
4383 if (likely(jm)) {
4384 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4385 } else {
4386 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4387 }
4388 }
4389
4390 void
4391 job_log_bug(job_t j, unsigned int line)
4392 {
4393 static const char *file;
4394 int saved_errno = errno;
4395 char buf[100];
4396
4397 runtime_ktrace1(RTKT_LAUNCHD_BUG);
4398
4399 extract_rcsid_substr(__rcs_file_version__, buf, sizeof(buf));
4400
4401 if (!file) {
4402 file = strrchr(__FILE__, '/');
4403 if (!file) {
4404 file = __FILE__;
4405 } else {
4406 file += 1;
4407 }
4408 }
4409
4410 /* I cannot think of any reason why 'j' should ever be NULL, nor have I ever seen the case in the wild */
4411 if (likely(j)) {
4412 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4413 } else {
4414 runtime_syslog(LOG_NOTICE, "Bug: %s:%u (%s):%u", file, line, buf, saved_errno);
4415 }
4416 }
4417
4418 void
4419 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
4420 {
4421 const char *label2use = j ? j->label : "com.apple.launchd.NULL";
4422 const char *mgr2use = j ? j->mgr->name : "NULL";
4423 struct runtime_syslog_attr attr = { g_my_label, label2use, mgr2use, pri, getuid(), getpid(), j ? j->p : 0 };
4424 char *newmsg;
4425 int oldmask = 0;
4426 size_t newmsgsz;
4427
4428 /*
4429 * Hack: If bootstrap_port is set, we must be on the child side of a
4430 * fork(), but before the exec*(). Let's route the log message back to
4431 * launchd proper.
4432 */
4433 if (bootstrap_port) {
4434 return _vproc_logv(pri, err, msg, ap);
4435 }
4436
4437 newmsgsz = strlen(msg) + 200;
4438 newmsg = alloca(newmsgsz);
4439
4440 if (err) {
4441 #if !TARGET_OS_EMBEDDED
4442 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
4443 #else
4444 snprintf(newmsg, newmsgsz, "(%s) %s: %s", label2use, msg, strerror(err));
4445 #endif
4446 } else {
4447 #if !TARGET_OS_EMBEDDED
4448 snprintf(newmsg, newmsgsz, "%s", msg);
4449 #else
4450 snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
4451 #endif
4452 }
4453
4454 if( j && unlikely(j->debug) ) {
4455 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
4456 }
4457
4458 runtime_vsyslog(&attr, newmsg, ap);
4459
4460 if( j && unlikely(j->debug) ) {
4461 setlogmask(oldmask);
4462 }
4463 }
4464
4465 void
4466 job_log_error(job_t j, int pri, const char *msg, ...)
4467 {
4468 va_list ap;
4469
4470 va_start(ap, msg);
4471 job_logv(j, pri, errno, msg, ap);
4472 va_end(ap);
4473 }
4474
4475 void
4476 job_log(job_t j, int pri, const char *msg, ...)
4477 {
4478 va_list ap;
4479
4480 va_start(ap, msg);
4481 job_logv(j, pri, 0, msg, ap);
4482 va_end(ap);
4483 }
4484
4485 #if 0
4486 void
4487 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
4488 {
4489 va_list ap;
4490
4491 va_start(ap, msg);
4492 jobmgr_logv(jm, pri, errno, msg, ap);
4493 va_end(ap);
4494 }
4495 #endif
4496
4497 void
4498 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
4499 {
4500 va_list ap;
4501
4502 va_start(ap, msg);
4503 jobmgr_logv(jm, pri, 0, msg, ap);
4504 va_end(ap);
4505 }
4506
4507 void
4508 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
4509 {
4510 char *newmsg;
4511 char *newname;
4512 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
4513
4514 newname = alloca((jmname_len + 1) * 2);
4515 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
4516 newmsg = alloca(newmsgsz);
4517
4518 for (i = 0, o = 0; i < jmname_len; i++, o++) {
4519 if (jm->name[i] == '%') {
4520 newname[o] = '%';
4521 o++;
4522 }
4523 newname[o] = jm->name[i];
4524 }
4525 newname[o] = '\0';
4526
4527 if (err) {
4528 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
4529 } else {
4530 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
4531 }
4532
4533 if (jm->parentmgr) {
4534 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
4535 } else {
4536 struct runtime_syslog_attr attr = { g_my_label, g_my_label, jm->name, pri, getuid(), getpid(), getpid() };
4537
4538 runtime_vsyslog(&attr, newmsg, ap);
4539 }
4540 }
4541
4542 void
4543 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
4544 {
4545 if (si->fd != -1) {
4546 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
4547 job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
4548 }
4549 }
4550
4551 void
4552 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
4553 {
4554 char *parentdir, tmp_path[PATH_MAX];
4555 int saved_errno = 0;
4556 int fflags = NOTE_DELETE|NOTE_RENAME;
4557
4558 switch (si->why) {
4559 case DIR_NOT_EMPTY:
4560 case PATH_CHANGES:
4561 fflags |= NOTE_ATTRIB|NOTE_LINK;
4562 /* fall through */
4563 case PATH_EXISTS:
4564 fflags |= NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
4565 /* fall through */
4566 case PATH_MISSING:
4567 break;
4568 default:
4569 return;
4570 }
4571
4572 /* dirname() may modify tmp_path */
4573 strlcpy(tmp_path, si->what, sizeof(tmp_path));
4574
4575 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
4576 return;
4577 }
4578
4579 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
4580 do {
4581 if (si->fd == -1) {
4582 struct stat sb;
4583 if( stat(si->what, &sb) == 0 ) {
4584 /* If we're watching a character or block device, only watch the parent directory.
4585 * See rdar://problem/6489900 for the gory details. Basically, holding an open file
4586 * descriptor to a devnode could end up (a) blocking us on open(2) until someone else
4587 * open(2)s the file (like a character device that waits for a carrier signal) or
4588 * (b) preventing other processes from obtaining an exclusive lock on the file, even
4589 * though we're opening it with O_EVTONLY.
4590 *
4591 * The main point of contention is that O_EVTONLY doesn't actually mean "event only".
4592 * It means "Don't prevent unmounts of this descriptor's volume". We work around this
4593 * for dev nodes by only watching the parent directory and stat(2)ing our desired file
4594 * each time the parent changes to see if it appeared or disappeared.
4595 */
4596 if( S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode) ) {
4597 si->fd = _fd(open(si->what, O_EVTONLY | O_NOCTTY | O_NONBLOCK));
4598 }
4599 }
4600
4601 if( si->fd == -1 ) {
4602 si->watching_parent = job_assumes(j, (si->fd = _fd(open(parentdir, O_EVTONLY | O_NOCTTY | O_NONBLOCK))) != -1);
4603 } else {
4604 si->watching_parent = false;
4605 }
4606 }
4607
4608 if (si->fd == -1) {
4609 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", si->what);
4610 }
4611
4612 job_log(j, LOG_DEBUG, "Watching %svnode (%s): %d", si->watching_parent ? "parent ": "", si->what, si->fd);
4613
4614 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
4615 saved_errno = errno;
4616 /*
4617 * The FD can be revoked between the open() and kevent().
4618 * This is similar to the inability for kevents to be
4619 * attached to short lived zombie processes after fork()
4620 * but before kevent().
4621 */
4622 job_assumes(j, runtime_close(si->fd) == 0);
4623 si->fd = -1;
4624 }
4625 } while (unlikely((si->fd == -1) && (saved_errno == ENOENT)));
4626
4627 if (saved_errno == ENOTSUP) {
4628 /*
4629 * 3524219 NFS needs kqueue support
4630 * 4124079 VFS needs generic kqueue support
4631 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
4632 */
4633 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
4634
4635 if (!j->poll_for_vfs_changes) {
4636 j->poll_for_vfs_changes = true;
4637 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
4638 }
4639 }
4640 }
4641
4642 void
4643 semaphoreitem_callback(job_t j, struct kevent *kev)
4644 {
4645 char invalidation_reason[100] = "";
4646 struct semaphoreitem *si;
4647
4648 SLIST_FOREACH(si, &j->semaphores, sle) {
4649 switch (si->why) {
4650 case PATH_CHANGES:
4651 case PATH_EXISTS:
4652 case PATH_MISSING:
4653 case DIR_NOT_EMPTY:
4654 job_log(j, LOG_DEBUG, "P%s changed (%u): %s", si->watching_parent ? "arent path" : "ath", si->why, si->what);
4655 break;
4656 default:
4657 continue;
4658 }
4659
4660 if (si->fd == (int)kev->ident) {
4661 break;
4662 }
4663 }
4664
4665 if (!job_assumes(j, si != NULL)) {
4666 return;
4667 }
4668
4669 if (NOTE_DELETE & kev->fflags) {
4670 strcat(invalidation_reason, "deleted");
4671 }
4672
4673 if (NOTE_RENAME & kev->fflags) {
4674 if (invalidation_reason[0]) {
4675 strcat(invalidation_reason, "/renamed");
4676 } else {
4677 strcat(invalidation_reason, "renamed");
4678 }
4679 }
4680
4681 if (NOTE_REVOKE & kev->fflags) {
4682 if (invalidation_reason[0]) {
4683 strcat(invalidation_reason, "/revoked");
4684 } else {
4685 strcat(invalidation_reason, "revoked");
4686 }
4687 }
4688
4689 if (invalidation_reason[0]) {
4690 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
4691 job_assumes(j, runtime_close(si->fd) == 0);
4692 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
4693 }
4694
4695 if( !si->watching_parent ) {
4696 if (si->why == PATH_CHANGES) {
4697 j->start_pending = true;
4698 } else {
4699 semaphoreitem_watch(j, si);
4700 }
4701 } else { /* Something happened to the parent directory. See if our target file appeared. */
4702 if( !invalidation_reason[0] ) {
4703 job_assumes(j, runtime_close(si->fd) == 0);
4704 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
4705 semaphoreitem_watch(j, si);
4706 }
4707 /* Need to think about what should happen if the parent directory goes invalid. */
4708 }
4709
4710 job_dispatch(j, false);
4711 }
4712
4713 struct cal_dict_walk {
4714 job_t j;
4715 struct tm tmptm;
4716 };
4717
4718 void
4719 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
4720 {
4721 struct cal_dict_walk *cdw = context;
4722 struct tm *tmptm = &cdw->tmptm;
4723 job_t j = cdw->j;
4724 int64_t val;
4725
4726 if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
4727 /* hack to let caller know something went wrong */
4728 tmptm->tm_sec = -1;
4729 return;
4730 }
4731
4732 val = launch_data_get_integer(obj);
4733
4734 if (val < 0) {
4735 job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
4736 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
4737 if( val > 59 ) {
4738 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
4739 tmptm->tm_sec = -1;
4740 } else {
4741 tmptm->tm_min = (typeof(tmptm->tm_min)) val;
4742 }
4743 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
4744 if( val > 23 ) {
4745 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
4746 tmptm->tm_sec = -1;
4747 } else {
4748 tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
4749 }
4750 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
4751 if( val < 1 || val > 31 ) {
4752 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
4753 tmptm->tm_sec = -1;
4754 } else {
4755 tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
4756 }
4757 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
4758 if( val > 7 ) {
4759 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
4760 tmptm->tm_sec = -1;
4761 } else {
4762 tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
4763 }
4764 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
4765 if( val > 12 ) {
4766 job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
4767 tmptm->tm_sec = -1;
4768 } else {
4769 tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
4770 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
4771 }
4772 }
4773 }
4774
4775 bool
4776 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
4777 {
4778 struct cal_dict_walk cdw;
4779
4780 cdw.j = j;
4781 memset(&cdw.tmptm, 0, sizeof(0));
4782
4783 cdw.tmptm.tm_min = -1;
4784 cdw.tmptm.tm_hour = -1;
4785 cdw.tmptm.tm_mday = -1;
4786 cdw.tmptm.tm_wday = -1;
4787 cdw.tmptm.tm_mon = -1;
4788
4789 if (!job_assumes(j, obj != NULL)) {
4790 return false;
4791 }
4792
4793 if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
4794 return false;
4795 }
4796
4797 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
4798
4799 if (unlikely(cdw.tmptm.tm_sec == -1)) {
4800 return false;
4801 }
4802
4803 return calendarinterval_new(j, &cdw.tmptm);
4804 }
4805
4806 bool
4807 calendarinterval_new(job_t j, struct tm *w)
4808 {
4809 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
4810
4811 if (!job_assumes(j, ci != NULL)) {
4812 return false;
4813 }
4814
4815 ci->when = *w;
4816 ci->job = j;
4817
4818 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
4819
4820 calendarinterval_setalarm(j, ci);
4821
4822 runtime_add_weak_ref();
4823
4824 return true;
4825 }
4826
4827 void
4828 calendarinterval_delete(job_t j, struct calendarinterval *ci)
4829 {
4830 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
4831 LIST_REMOVE(ci, global_sle);
4832
4833 free(ci);
4834
4835 runtime_del_weak_ref();
4836 }
4837
4838 void
4839 calendarinterval_sanity_check(void)
4840 {
4841 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
4842 time_t now = time(NULL);
4843
4844 if (unlikely(ci && (ci->when_next < now))) {
4845 jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
4846 }
4847 }
4848
4849 void
4850 calendarinterval_callback(void)
4851 {
4852 struct calendarinterval *ci, *ci_next;
4853 time_t now = time(NULL);
4854
4855 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
4856 job_t j = ci->job;
4857
4858 if (ci->when_next > now) {
4859 break;
4860 }
4861
4862 LIST_REMOVE(ci, global_sle);
4863 calendarinterval_setalarm(j, ci);
4864
4865 j->start_pending = true;
4866 job_dispatch(j, false);
4867 }
4868 }
4869
4870 bool
4871 socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt, bool junkfds)
4872 {
4873 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
4874
4875 if (!job_assumes(j, sg != NULL)) {
4876 return false;
4877 }
4878
4879 sg->fds = calloc(1, fd_cnt * sizeof(int));
4880 sg->fd_cnt = fd_cnt;
4881 sg->junkfds = junkfds;
4882
4883 if (!job_assumes(j, sg->fds != NULL)) {
4884 free(sg);
4885 return false;
4886 }
4887
4888 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
4889 strcpy(sg->name_init, name);
4890
4891 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
4892
4893 runtime_add_weak_ref();
4894
4895 return true;
4896 }
4897
4898 void
4899 socketgroup_delete(job_t j, struct socketgroup *sg)
4900 {
4901 unsigned int i;
4902
4903 for (i = 0; i < sg->fd_cnt; i++) {
4904 #if 0
4905 struct sockaddr_storage ss;
4906 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
4907 socklen_t ss_len = sizeof(ss);
4908
4909 /* 5480306 */
4910 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
4911 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
4912 job_assumes(j, unlink(sun->sun_path) != -1);
4913 /* We might conditionally need to delete a directory here */
4914 }
4915 #endif
4916 job_assumes(j, runtime_close(sg->fds[i]) != -1);
4917 }
4918
4919 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
4920
4921 free(sg->fds);
4922 free(sg);
4923
4924 runtime_del_weak_ref();
4925 }
4926
4927 void
4928 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
4929 {
4930 struct kevent kev[sg->fd_cnt];
4931 char buf[10000];
4932 unsigned int i, buf_off = 0;
4933
4934 if (unlikely(sg->junkfds)) {
4935 return;
4936 }
4937
4938 for (i = 0; i < sg->fd_cnt; i++) {
4939 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
4940 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
4941 }
4942
4943 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
4944
4945 job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
4946
4947 for (i = 0; i < sg->fd_cnt; i++) {
4948 job_assumes(j, kev[i].flags & EV_ERROR);
4949 errno = (typeof(errno)) kev[i].data;
4950 job_assumes(j, kev[i].data == 0);
4951 }
4952 }
4953
4954 void
4955 socketgroup_ignore(job_t j, struct socketgroup *sg)
4956 {
4957 socketgroup_kevent_mod(j, sg, false);
4958 }
4959
4960 void
4961 socketgroup_watch(job_t j, struct socketgroup *sg)
4962 {
4963 socketgroup_kevent_mod(j, sg, true);
4964 }
4965
4966 void
4967 socketgroup_callback(job_t j)
4968 {
4969 job_dispatch(j, true);
4970 }
4971
4972 bool
4973 envitem_new(job_t j, const char *k, const char *v, bool global, bool one_shot)
4974 {
4975 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
4976
4977 if (!job_assumes(j, ei != NULL)) {
4978 return false;
4979 }
4980
4981 strcpy(ei->key_init, k);
4982 ei->value = ei->key_init + strlen(k) + 1;
4983 strcpy(ei->value, v);
4984 ei->one_shot = one_shot;
4985
4986 if (global) {
4987 if (SLIST_EMPTY(&j->global_env)) {
4988 LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
4989 }
4990 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
4991 } else {
4992 SLIST_INSERT_HEAD(&j->env, ei, sle);
4993 }
4994
4995 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
4996
4997 return true;
4998 }
4999
5000 void
5001 envitem_delete(job_t j, struct envitem *ei, bool global)
5002 {
5003 if (global) {
5004 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5005 if (SLIST_EMPTY(&j->global_env)) {
5006 LIST_REMOVE(j, global_env_sle);
5007 }
5008 } else {
5009 SLIST_REMOVE(&j->env, ei, envitem, sle);
5010 }
5011
5012 free(ei);
5013 }
5014
5015 void
5016 envitem_setup(launch_data_t obj, const char *key, void *context)
5017 {
5018 job_t j = context;
5019
5020 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5021 return;
5022 }
5023
5024 if( strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0 ) {
5025 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, false);
5026 } else {
5027 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5028 }
5029 }
5030
5031 void
5032 envitem_setup_one_shot(launch_data_t obj, const char *key, void *context)
5033 {
5034 job_t j = context;
5035
5036 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5037 return;
5038 }
5039
5040 if( strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0 ) {
5041 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env, true);
5042 } else {
5043 job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5044 }
5045 }
5046
5047 bool
5048 limititem_update(job_t j, int w, rlim_t r)
5049 {
5050 struct limititem *li;
5051
5052 SLIST_FOREACH(li, &j->limits, sle) {
5053 if (li->which == w) {
5054 break;
5055 }
5056 }
5057
5058 if (li == NULL) {
5059 li = calloc(1, sizeof(struct limititem));
5060
5061 if (!job_assumes(j, li != NULL)) {
5062 return false;
5063 }
5064
5065 SLIST_INSERT_HEAD(&j->limits, li, sle);
5066
5067 li->which = w;
5068 }
5069
5070 if (j->importing_hard_limits) {
5071 li->lim.rlim_max = r;
5072 li->sethard = true;
5073 } else {
5074 li->lim.rlim_cur = r;
5075 li->setsoft = true;
5076 }
5077
5078 return true;
5079 }
5080
5081 void
5082 limititem_delete(job_t j, struct limititem *li)
5083 {
5084 SLIST_REMOVE(&j->limits, li, limititem, sle);
5085
5086 free(li);
5087 }
5088
5089 #if HAVE_SANDBOX
5090 void
5091 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
5092 {
5093 job_t j = context;
5094
5095 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
5096 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
5097 return;
5098 }
5099
5100 if (launch_data_get_bool(obj) == false) {
5101 return;
5102 }
5103
5104 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
5105 j->seatbelt_flags |= SANDBOX_NAMED;
5106 }
5107 }
5108 #endif
5109
5110 void
5111 limititem_setup(launch_data_t obj, const char *key, void *context)
5112 {
5113 job_t j = context;
5114 size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
5115 rlim_t rl;
5116
5117 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
5118 return;
5119 }
5120
5121 rl = launch_data_get_integer(obj);
5122
5123 for (i = 0; i < limits_cnt; i++) {
5124 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
5125 break;
5126 }
5127 }
5128
5129 if (i == limits_cnt) {
5130 return;
5131 }
5132
5133 limititem_update(j, launchd_keys2limits[i].val, rl);
5134 }
5135
5136 bool
5137 job_useless(job_t j)
5138 {
5139 if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
5140 if (j->legacy_LS_job && j->j_port) {
5141 return false;
5142 }
5143 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
5144 return true;
5145 } else if (j->removal_pending) {
5146 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
5147 return true;
5148 } else if (j->mgr->shutting_down && (j->hopefully_exits_first || j->mgr->hopefully_first_cnt == 0)) {
5149 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
5150 if( total_children == 0 && !j->anonymous ) {
5151 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last (non-anonymous) to exit during %s shutdown.", (pid1_magic && j->mgr == root_jobmgr) ? "system" : "job manager");
5152 } else if( total_anon_children == 0 && j->anonymous ) {
5153 job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last (anonymous) to exit during %s shutdown.", (pid1_magic && j->mgr == root_jobmgr) ? "system" : "job manager");
5154 }
5155 return true;
5156 } else if (j->legacy_mach_job) {
5157 if (SLIST_EMPTY(&j->machservices)) {
5158 job_log(j, LOG_INFO, "Garbage collecting");
5159 return true;
5160 } else if (!j->checkedin) {
5161 job_log(j, LOG_WARNING, "Failed to check-in!");
5162 return true;
5163 }
5164 }
5165
5166 return false;
5167 }
5168
5169 bool
5170 job_keepalive(job_t j)
5171 {
5172 mach_msg_type_number_t statusCnt;
5173 mach_port_status_t status;
5174 struct semaphoreitem *si;
5175 struct machservice *ms;
5176 struct stat sb;
5177 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
5178 bool is_not_kextd = (do_apple_internal_logging || (strcmp(j->label, "com.apple.kextd") != 0));
5179
5180 if (unlikely(j->mgr->shutting_down)) {
5181 return false;
5182 }
5183
5184 /*
5185 * 5066316
5186 *
5187 * We definitely need to revisit this after Leopard ships. Please see
5188 * launchctl.c for the other half of this hack.
5189 */
5190 if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
5191 return false;
5192 }
5193
5194 if( unlikely(j->needs_kickoff) ) {
5195 job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
5196 return false;
5197 }
5198
5199 if (j->start_pending) {
5200 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
5201 return true;
5202 }
5203
5204 if (!j->ondemand) {
5205 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
5206 return true;
5207 }
5208
5209 SLIST_FOREACH(ms, &j->machservices, sle) {
5210 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
5211 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
5212 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
5213 continue;
5214 }
5215 if (status.mps_msgcount) {
5216 job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
5217 status.mps_msgcount, ms->name);
5218 return true;
5219 }
5220 }
5221
5222
5223 SLIST_FOREACH(si, &j->semaphores, sle) {
5224 bool wanted_state = false;
5225 int qdir_file_cnt;
5226 job_t other_j;
5227
5228 switch (si->why) {
5229 case NETWORK_UP:
5230 wanted_state = true;
5231 case NETWORK_DOWN:
5232 if (network_up == wanted_state) {
5233 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
5234 return true;
5235 }
5236 break;
5237 case SUCCESSFUL_EXIT:
5238 wanted_state = true;
5239 case FAILED_EXIT:
5240 if (good_exit == wanted_state) {
5241 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
5242 return true;
5243 }
5244 break;
5245 case OTHER_JOB_ENABLED:
5246 wanted_state = true;
5247 case OTHER_JOB_DISABLED:
5248 if ((bool)job_find(si->what) == wanted_state) {
5249 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
5250 return true;
5251 }
5252 break;
5253 case OTHER_JOB_ACTIVE:
5254 wanted_state = true;
5255 case OTHER_JOB_INACTIVE:
5256 if ((other_j = job_find(si->what))) {
5257 if ((bool)other_j->p == wanted_state) {
5258 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
5259 return true;
5260 }
5261 }
5262 break;
5263 case PATH_EXISTS:
5264 wanted_state = true;
5265 case PATH_MISSING:
5266 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
5267 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
5268 return true;
5269 } else {
5270 if( wanted_state ) { /* File is not there but we wish it was. */
5271 if( si->fd != -1 && !si->watching_parent ) { /* Need to be watching the parent now. */
5272 job_assumes(j, runtime_close(si->fd) == 0);
5273 si->fd = -1;
5274 semaphoreitem_watch(j, si);
5275 }
5276 } else { /* File is there but we wish it wasn't. */
5277 if( si->fd != -1 && si->watching_parent ) { /* Need to watch the file now. */
5278 job_assumes(j, runtime_close(si->fd) == 0);
5279 si->fd = -1;
5280 semaphoreitem_watch(j, si);
5281 }
5282 }
5283 }
5284 break;
5285 case PATH_CHANGES:
5286 break;
5287 case DIR_NOT_EMPTY:
5288 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
5289 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
5290 } else if (qdir_file_cnt > 0) {
5291 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
5292 return true;
5293 }
5294 break;
5295 }
5296 }
5297
5298 return false;
5299 }
5300
5301 const char *
5302 job_active(job_t j)
5303 {
5304 struct machservice *ms;
5305 if (j->p) {
5306 return "PID is still valid";
5307 }
5308
5309 if (j->mgr->shutting_down && j->log_redirect_fd) {
5310 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5311 j->log_redirect_fd = 0;
5312 }
5313
5314 if (j->log_redirect_fd) {
5315 if (job_assumes(j, j->legacy_LS_job)) {
5316 return "Standard out/error is still valid";
5317 } else {
5318 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
5319 j->log_redirect_fd = 0;
5320 }
5321 }
5322
5323 if (j->priv_port_has_senders) {
5324 return "Privileged Port still has outstanding senders";
5325 }
5326
5327 SLIST_FOREACH(ms, &j->machservices, sle) {
5328 if (ms->recv && machservice_active(ms)) {
5329 return "Mach service is still active";
5330 }
5331 }
5332
5333 return NULL;
5334 }
5335
5336 void
5337 machservice_watch(job_t j, struct machservice *ms)
5338 {
5339 if (ms->recv) {
5340 job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
5341 }
5342 }
5343
5344 void
5345 machservice_ignore(job_t j, struct machservice *ms)
5346 {
5347 job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
5348 }
5349
5350 void
5351 machservice_resetport(job_t j, struct machservice *ms)
5352 {
5353 LIST_REMOVE(ms, port_hash_sle);
5354 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5355 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
5356 ms->gen_num++;
5357 job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
5358 job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
5359 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5360 }
5361
5362 struct machservice *
5363 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
5364 {
5365 struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
5366
5367 if (!job_assumes(j, ms != NULL)) {
5368 return NULL;
5369 }
5370
5371 strcpy((char *)ms->name, name);
5372 ms->job = j;
5373 ms->gen_num = 1;
5374 ms->per_pid = pid_local;
5375
5376 if (likely(*serviceport == MACH_PORT_NULL)) {
5377 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
5378 goto out_bad;
5379 }
5380
5381 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
5382 goto out_bad2;
5383 }
5384 *serviceport = ms->port;
5385 ms->recv = true;
5386 } else {
5387 ms->port = *serviceport;
5388 ms->isActive = true;
5389 }
5390
5391 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
5392
5393 jobmgr_t jm_to_insert = j->mgr;
5394 if( g_flat_mach_namespace ) {
5395 jm_to_insert = (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? j->mgr : root_jobmgr;
5396 }
5397
5398 LIST_INSERT_HEAD(&jm_to_insert->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
5399 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
5400
5401 job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
5402
5403 return ms;
5404 out_bad2:
5405 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
5406 out_bad:
5407 free(ms);
5408 return NULL;
5409 }
5410
5411 bootstrap_status_t
5412 machservice_status(struct machservice *ms)
5413 {
5414 if (ms->isActive) {
5415 return BOOTSTRAP_STATUS_ACTIVE;
5416 } else if (ms->job->ondemand) {
5417 return BOOTSTRAP_STATUS_ON_DEMAND;
5418 } else {
5419 return BOOTSTRAP_STATUS_INACTIVE;
5420 }
5421 }
5422
5423 void
5424 job_setup_exception_port(job_t j, task_t target_task)
5425 {
5426 struct machservice *ms;
5427 thread_state_flavor_t f = 0;
5428 mach_port_t exc_port = the_exception_server;
5429
5430 if (unlikely(j->alt_exc_handler)) {
5431 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
5432 if (likely(ms)) {
5433 exc_port = machservice_port(ms);
5434 } else {
5435 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
5436 }
5437 } else if (unlikely(j->internal_exc_handler)) {
5438 exc_port = runtime_get_kernel_port();
5439 } else if (unlikely(!exc_port)) {
5440 return;
5441 }
5442
5443 #if defined (__ppc__) || defined(__ppc64__)
5444 f = PPC_THREAD_STATE64;
5445 #elif defined(__i386__) || defined(__x86_64__)
5446 f = x86_THREAD_STATE;
5447 #elif defined(__arm__)
5448 f = ARM_THREAD_STATE;
5449 #else
5450 #error "unknown architecture"
5451 #endif
5452
5453 if (likely(target_task)) {
5454 job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
5455 } else if (pid1_magic && the_exception_server) {
5456 mach_port_t mhp = mach_host_self();
5457 job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
5458 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
5459 }
5460 }
5461
5462 void
5463 job_set_exception_port(job_t j, mach_port_t port)
5464 {
5465 if (unlikely(!the_exception_server)) {
5466 the_exception_server = port;
5467 job_setup_exception_port(j, 0);
5468 } else {
5469 job_log(j, LOG_WARNING, "The exception server is already claimed!");
5470 }
5471 }
5472
5473 void
5474 machservice_setup_options(launch_data_t obj, const char *key, void *context)
5475 {
5476 struct machservice *ms = context;
5477 mach_port_t mhp = mach_host_self();
5478 int which_port;
5479 bool b;
5480
5481 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
5482 return;
5483 }
5484
5485 switch (launch_data_get_type(obj)) {
5486 case LAUNCH_DATA_INTEGER:
5487 which_port = (int)launch_data_get_integer(obj); /* XXX we should bound check this... */
5488 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
5489 switch (which_port) {
5490 case TASK_KERNEL_PORT:
5491 case TASK_HOST_PORT:
5492 case TASK_NAME_PORT:
5493 case TASK_BOOTSTRAP_PORT:
5494 /* I find it a little odd that zero isn't reserved in the header.
5495 * Normally Mach is fairly good about this convention... */
5496 case 0:
5497 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
5498 break;
5499 default:
5500 ms->special_port_num = which_port;
5501 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
5502 break;
5503 }
5504 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
5505 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
5506 job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
5507 } else {
5508 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
5509 }
5510 }
5511 case LAUNCH_DATA_BOOL:
5512 b = launch_data_get_bool(obj);
5513 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
5514 ms->debug_on_close = b;
5515 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
5516 ms->reset = b;
5517 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
5518 ms->hide = b;
5519 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
5520 job_set_exception_port(ms->job, ms->port);
5521 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
5522 ms->kUNCServer = b;
5523 job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
5524 }
5525 break;
5526 case LAUNCH_DATA_STRING:
5527 if( strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0 ) {
5528 const char *option = launch_data_get_string(obj);
5529 if( strcasecmp(option, "One") == 0 ) {
5530 ms->drain_one_on_crash = true;
5531 } else if( strcasecmp(option, "All") == 0 ) {
5532 ms->drain_all_on_crash = true;
5533 }
5534 }
5535 break;
5536 case LAUNCH_DATA_DICTIONARY:
5537 job_set_exception_port(ms->job, ms->port);
5538 break;
5539 default:
5540 break;
5541 }
5542
5543 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
5544 }
5545
5546 void
5547 machservice_setup(launch_data_t obj, const char *key, void *context)
5548 {
5549 job_t j = context;
5550 struct machservice *ms;
5551 mach_port_t p = MACH_PORT_NULL;
5552
5553 if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
5554 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
5555 return;
5556 }
5557
5558 if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
5559 return;
5560 }
5561
5562 ms->isActive = false;
5563
5564 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
5565 launch_data_dict_iterate(obj, machservice_setup_options, ms);
5566 }
5567 }
5568
5569 jobmgr_t
5570 jobmgr_do_garbage_collection(jobmgr_t jm)
5571 {
5572 jobmgr_t jmi = NULL, jmn = NULL;
5573 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
5574 jobmgr_do_garbage_collection(jmi);
5575 }
5576
5577 if( !jm->shutting_down ) {
5578 return jm;
5579 }
5580
5581 if( SLIST_EMPTY(&jm->submgrs) ) {
5582 jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
5583 } else {
5584 jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
5585 }
5586
5587 int phase = -1;
5588 for( phase = jm->shutdown_phase; phase < JOBMGR_PHASE_LAST; phase++ ) {
5589 if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_LAST ) {
5590 if( jm == root_jobmgr ) {
5591 simulate_pid1_crash();
5592 }
5593
5594 if( jm == root_jobmgr && pid1_magic && !jm->killed_stray_jobs ) {
5595 jobmgr_log_stray_children(jm, true);
5596 jm->killed_stray_jobs = true;
5597 }
5598 }
5599
5600 uint32_t unkilled_cnt = 0;
5601 job_t ji = NULL, jn = NULL;
5602 LIST_FOREACH_SAFE( ji, &jm->jobs, sle, jn ) {
5603 if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_FIRST && !ji->hopefully_exits_first ) {
5604 continue;
5605 } else if( phase == JOBMGR_PHASE_NORMAL ) {
5606 if( ji->holds_ref ) {
5607 /* If we're shutting down, release the hold holds_ref jobs
5608 * have on us.
5609 */
5610 job_remove(ji);
5611 }
5612
5613 if( ji->hopefully_exits_first || ji->hopefully_exits_last ) {
5614 continue;
5615 }
5616 } else if( phase == JOBMGR_PHASE_HOPEFULLY_EXITS_LAST && !ji->hopefully_exits_last ) {
5617 continue;
5618 }
5619
5620 if( ji->anonymous ) {
5621 continue;
5622 }
5623
5624 const char *active = job_active(ji);
5625 if( !active ) {
5626 job_log(ji, LOG_DEBUG, "Job is inactive. Removing.");
5627 job_remove(ji);
5628 } else {
5629 if( ji->p ) {
5630 if( !ji->stopped ) {
5631 job_log(ji, LOG_DEBUG, "Stopping job.");
5632 job_stop(ji);
5633 unkilled_cnt++;
5634 } else {
5635 if( ji->clean_kill ) {
5636 job_log(ji, LOG_DEBUG, "Job was clean and sent SIGKILL.");
5637 if( !ji->clean_exit_timer_expired ) {
5638 /* Give jobs that were clean and sent SIGKILL 1 second to exit after receipt. */
5639 unkilled_cnt++;
5640 } else {
5641 job_log(ji, LOG_ERR, "Job was clean, killed and has not exited after 1 second. Moving on.");
5642 }
5643 } else {
5644 job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
5645 unkilled_cnt += !ji->sent_sigkill;
5646 }
5647 }
5648 } else {
5649 job_log(ji, LOG_DEBUG, "Job is active: %s", active);
5650 }
5651 }
5652 } /* LIST_FOREACH_SAFE */
5653
5654 if( unkilled_cnt == 0 ) {
5655 jobmgr_log(jm, LOG_DEBUG, "Done with the %s bucket, advancing.", s_phases[jm->shutdown_phase]);
5656 jm->shutdown_phase++;
5657 } else {
5658 jobmgr_log(jm, LOG_DEBUG, "Still %u unkilled job%s in %s bucket.", unkilled_cnt, unkilled_cnt > 1 ? "s" : "", s_phases[jm->shutdown_phase]);
5659 phase = JOBMGR_PHASE_LAST;
5660 }
5661 } /* for */
5662
5663 jobmgr_t r = jm;
5664 if( jm->shutdown_phase > JOBMGR_PHASE_HOPEFULLY_EXITS_LAST && SLIST_EMPTY(&jm->submgrs) ) {
5665 jobmgr_log(jm, LOG_DEBUG, "Removing.");
5666 jobmgr_log_stray_children(jm, false);
5667 jobmgr_remove(jm);
5668 r = NULL;
5669 }
5670
5671 return r;
5672 }
5673
5674 void
5675 jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
5676 {
5677 #if 1
5678 /* I maintain that stray processes should be at the mercy of launchd during shutdown,
5679 * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
5680 * them can result in data loss. So we send SIGTERM to all the strays and don't wait
5681 * for them to exit before moving on.
5682 *
5683 * See rdar://problem/6562592
5684 */
5685 size_t i = 0;
5686 for( i = 0; i < np; i++ ) {
5687 if( p[i] != 0 ) {
5688 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
5689 jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
5690 }
5691 }
5692 #else
5693 struct timespec tts = { 2, 0 }; /* Wait 2 seconds for stray children to die after being SIGTERM'ed. */
5694 struct timespec kts = { 1, 0 }; /* Wait 1 second for stray children to die after being SIGKILL'ed. */
5695 uint64_t start, end, nanosec;
5696 struct kevent kev;
5697 int r, kq = kqueue();
5698
5699 if (!jobmgr_assumes(jm, kq != -1)) {
5700 return;
5701 }
5702
5703 start = runtime_get_opaque_time();
5704 size_t i = 0, n2t = 0;
5705 for( i = 0; i < np; i++ ) {
5706 if( p[i] != 0 ) {
5707 EV_SET(&kev, p[i], EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, 0);
5708
5709 if( jobmgr_assumes(jm, kevent(kq, &kev, 1, NULL, 0, NULL) != -1) ) {
5710 jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
5711 n2t++;
5712 } else {
5713 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Disregarding PID %u and continuing.", p[i]);
5714 p[i] = 0;
5715 }
5716 }
5717 }
5718
5719 while( n2t > 0 && (r = kevent(kq, NULL, 0, &kev, 1, &tts)) ) {
5720 int status = 0;
5721 waitpid((pid_t)kev.ident, &status, WNOHANG);
5722
5723 end = runtime_get_opaque_time();
5724 nanosec = runtime_opaque_time_to_nano(end - start);
5725 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "PID %u died after %llu nanoseconds.", (pid_t)kev.ident, nanosec);
5726
5727 for( i = 0; i < np; i++ ) {
5728 p[i] = ( p[i] == (pid_t)kev.ident ) ? 0 : p[i];
5729 }
5730 }
5731
5732 size_t n2k = 0;
5733 for( i = 0; i < np; i++ ) {
5734 if( p[i] != 0 ) {
5735 jobmgr_assumes(jm, runtime_kill(p[i], SIGKILL) != -1);
5736 n2k++;
5737 }
5738 }
5739
5740 while( n2k > 0 && (r = kevent(kq, NULL, 0, &kev, 1, &kts)) ) {
5741 int status = 0;
5742 waitpid((pid_t)kev.ident, &status, WNOHANG);
5743
5744 end = runtime_get_opaque_time();
5745 nanosec = runtime_opaque_time_to_nano(end - start);
5746 jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "PID %u was killed and died after %llu nanoseconds.", (pid_t)kev.ident, nanosec);
5747
5748 for( i = 0; i < np; i++ ) {
5749 p[i] = ( p[i] == (pid_t)kev.ident ) ? 0 : p[i];
5750 }
5751 }
5752
5753 for( i = 0; i < np; i++ ) {
5754 if( p[i] != 0 ) {
5755 jobmgr_log(jm, LOG_NOTICE | LOG_CONSOLE, "PID %u did not die after being SIGKILL'ed 1 second ago.", p[i]);
5756 }
5757 }
5758 #endif
5759 }
5760
5761 void
5762 jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
5763 {
5764 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
5765 size_t i, kp_cnt = 0, kp_skipped = 0, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
5766 struct kinfo_proc *kp;
5767
5768 if (likely(jm->parentmgr || !pid1_magic)) {
5769 return;
5770 }
5771
5772 if (!jobmgr_assumes(jm, (kp = malloc(len)) != NULL)) {
5773 return;
5774 }
5775
5776 runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
5777
5778 if (!jobmgr_assumes(jm, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
5779 goto out;
5780 }
5781
5782 kp_cnt = len / sizeof(struct kinfo_proc);
5783 pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
5784
5785 for (i = 0; i < kp_cnt; i++) {
5786 pid_t p_i = kp[i].kp_proc.p_pid;
5787 pid_t pp_i = kp[i].kp_eproc.e_ppid;
5788 pid_t pg_i = kp[i].kp_eproc.e_pgid;
5789 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
5790 const char *n = kp[i].kp_proc.p_comm;
5791
5792 if (unlikely(p_i == 0 || p_i == 1)) {
5793 kp_skipped++;
5794 continue;
5795 }
5796
5797 /* We might have some jobs hanging around that we've decided to shut down in spite of. */
5798 job_t j = jobmgr_find_by_pid(jm, p_i, false);
5799 if( !j || (j && j->anonymous) ) {
5800 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
5801
5802 int status = 0;
5803 if( pp_i == getpid() && !jobmgr_assumes(jm, kp[i].kp_proc.p_stat != SZOMB) ) {
5804 if( jobmgr_assumes(jm, waitpid(p_i, &status, WNOHANG) == 0) ) {
5805 jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
5806 }
5807 kp_skipped++;
5808 } else {
5809 job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
5810 /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
5811 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
5812 * hints to the kernel along the way, so that it could shutdown certain subsystems when
5813 * their userspace emissaries go away, before the call to reboot(2).
5814 */
5815 if( leader && leader->ignore_pg_at_shutdown ) {
5816 kp_skipped++;
5817 } else {
5818 ps[i] = p_i;
5819 }
5820 }
5821 } else {
5822 kp_skipped++;
5823 }
5824 }
5825
5826 if( (kp_cnt - kp_skipped > 0) && kill_strays ) {
5827 jobmgr_kill_stray_children(jm, ps, kp_cnt);
5828 }
5829
5830 free(ps);
5831 out:
5832 free(kp);
5833 }
5834
5835 jobmgr_t
5836 jobmgr_parent(jobmgr_t jm)
5837 {
5838 return jm->parentmgr;
5839 }
5840
5841 void
5842 job_uncork_fork(job_t j)
5843 {
5844 pid_t c = j->p;
5845
5846 job_log(j, LOG_DEBUG, "Uncorking the fork().");
5847 /* this unblocks the child and avoids a race
5848 * between the above fork() and the kevent_mod() */
5849 job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
5850 job_assumes(j, runtime_close(j->fork_fd) != -1);
5851 j->fork_fd = 0;
5852 }
5853
5854 jobmgr_t
5855 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t session_port)
5856 {
5857 mach_msg_size_t mxmsgsz;
5858 job_t bootstrapper = NULL;
5859 jobmgr_t jmr;
5860
5861 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
5862
5863 if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
5864 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
5865 return NULL;
5866 }
5867
5868 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
5869
5870 if (!jobmgr_assumes(jm, jmr != NULL)) {
5871 return NULL;
5872 }
5873
5874 jmr->kqjobmgr_callback = jobmgr_callback;
5875 strcpy(jmr->name_init, name ? name : "Under construction");
5876
5877 jmr->req_port = requestorport;
5878
5879 if ((jmr->parentmgr = jm)) {
5880 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
5881 }
5882
5883 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
5884 goto out_bad;
5885 }
5886
5887 if (transfer_port != MACH_PORT_NULL) {
5888 jobmgr_assumes(jmr, jm != NULL);
5889 jmr->jm_port = transfer_port;
5890 } else if (!jm && !pid1_magic) {
5891 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
5892 name_t service_buf;
5893
5894 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
5895
5896 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
5897 goto out_bad;
5898 }
5899
5900 if (trusted_fd) {
5901 int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
5902
5903 if ((dfd = dup(lfd)) >= 0) {
5904 jobmgr_assumes(jmr, runtime_close(dfd) != -1);
5905 jobmgr_assumes(jmr, runtime_close(lfd) != -1);
5906 }
5907
5908 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
5909 }
5910
5911 /* cut off the Libc cache, we don't want to deadlock against ourself */
5912 inherited_bootstrap_port = bootstrap_port;
5913 bootstrap_port = MACH_PORT_NULL;
5914 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
5915
5916 /* We set this explicitly as we start each child */
5917 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
5918 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
5919 goto out_bad;
5920 }
5921
5922 if (!name) {
5923 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
5924 }
5925
5926 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
5927 mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
5928 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
5929 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
5930 }
5931
5932 if (!jm) {
5933 jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
5934 jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
5935 jobmgr_assumes(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
5936 jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
5937 }
5938
5939 if (name && !no_init) {
5940 bootstrapper = jobmgr_init_session(jmr, name, sflag);
5941 }
5942
5943 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
5944 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
5945 goto out_bad;
5946 }
5947 }
5948
5949 STAILQ_INIT(&jmr->pending_samples);
5950
5951 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
5952
5953 if (bootstrapper) {
5954 bootstrapper->audit_session = session_port;
5955 if( session_port != MACH_PORT_NULL ) {
5956 mach_port_mod_refs(mach_task_self(), session_port, MACH_PORT_RIGHT_SEND, 1);
5957 }
5958
5959 jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", session_port);
5960 jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
5961 }
5962
5963 if (jmr->parentmgr) {
5964 runtime_add_weak_ref();
5965 }
5966
5967 return jmr;
5968
5969 out_bad:
5970 if (jmr) {
5971 jobmgr_remove(jmr);
5972 }
5973 return NULL;
5974 }
5975
5976 job_t
5977 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
5978 {
5979 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
5980 char thelabel[1000];
5981 job_t bootstrapper;
5982
5983 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
5984 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
5985
5986 if( jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic) ) {
5987 bootstrapper->is_bootstrapper = true;
5988 char buf[100];
5989
5990 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
5991 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
5992 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false, false);
5993 bootstrapper->weird_bootstrap = true;
5994 jobmgr_assumes(jm, job_setup_machport(bootstrapper));
5995 } else if( bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0 ) {
5996 bootstrapper->is_bootstrapper = true;
5997 if( jobmgr_assumes(jm, pid1_magic) ) {
5998 /* Have our system bootstrapper print out to the console. */
5999 bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
6000 bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
6001
6002 if( g_console ) {
6003 jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
6004 }
6005 }
6006 }
6007
6008 jm->session_initialized = true;
6009
6010 return bootstrapper;
6011 }
6012
6013 jobmgr_t
6014 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
6015 {
6016 struct machservice *ms, *next_ms;
6017 jobmgr_t jmi, jmn;
6018
6019 /* Mach ports, unlike Unix descriptors, are reference counted. In other
6020 * words, when some program hands us a second or subsequent send right
6021 * to a port we already have open, the Mach kernel gives us the same
6022 * port number back and increments an reference count associated with
6023 * the port. This forces us, when discovering that a receive right at
6024 * the other end has been deleted, to wander all of our objects to see
6025 * what weird places clients might have handed us the same send right
6026 * to use.
6027 */
6028
6029 if (jm == root_jobmgr) {
6030 if (port == inherited_bootstrap_port) {
6031 jobmgr_assumes(jm, launchd_mport_deallocate(port) == KERN_SUCCESS);
6032 inherited_bootstrap_port = MACH_PORT_NULL;
6033
6034 return jobmgr_shutdown(jm);
6035 }
6036
6037 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
6038 if (ms->port == port && !ms->recv) {
6039 machservice_delete(ms->job, ms, true);
6040 }
6041 }
6042 }
6043
6044 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6045 jobmgr_delete_anything_with_port(jmi, port);
6046 }
6047
6048 if (jm->req_port == port) {
6049 jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
6050 return jobmgr_shutdown(jm);
6051 }
6052
6053 return jm;
6054 }
6055
6056 struct machservice *
6057 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
6058 {
6059 struct machservice *ms;
6060 job_t target_j;
6061
6062 jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
6063
6064 if (target_pid) {
6065 /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
6066 * bootstrap in other bootstraps.
6067 */
6068
6069 /* Start in the given bootstrap. */
6070 if( unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL) ) {
6071 /* If we fail, do a deep traversal. */
6072 if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
6073 jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
6074 return NULL;
6075 }
6076 }
6077
6078 SLIST_FOREACH(ms, &target_j->machservices, sle) {
6079 if (ms->per_pid && strcmp(name, ms->name) == 0) {
6080 return ms;
6081 }
6082 }
6083
6084 job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
6085 return NULL;
6086 }
6087
6088 jobmgr_t jm_to_search = ( g_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ) ? root_jobmgr : jm;
6089 LIST_FOREACH(ms, &jm_to_search->ms_hash[hash_ms(name)], name_hash_sle) {
6090 if (!ms->per_pid && strcmp(name, ms->name) == 0) {
6091 return ms;
6092 }
6093 }
6094
6095 if (jm->parentmgr == NULL || !check_parent) {
6096 return NULL;
6097 }
6098
6099 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
6100 }
6101
6102 mach_port_t
6103 machservice_port(struct machservice *ms)
6104 {
6105 return ms->port;
6106 }
6107
6108 job_t
6109 machservice_job(struct machservice *ms)
6110 {
6111 return ms->job;
6112 }
6113
6114 bool
6115 machservice_hidden(struct machservice *ms)
6116 {
6117 return ms->hide;
6118 }
6119
6120 bool
6121 machservice_active(struct machservice *ms)
6122 {
6123 return ms->isActive;
6124 }
6125
6126 const char *
6127 machservice_name(struct machservice *ms)
6128 {
6129 return ms->name;
6130 }
6131
6132 void
6133 machservice_drain_port(struct machservice *ms)
6134 {
6135 bool drain_one = ms->drain_one_on_crash;
6136 bool drain_all = ms->drain_all_on_crash;
6137
6138 if( !job_assumes(ms->job, (drain_one || drain_all) == true) ) {
6139 return;
6140 }
6141
6142 job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
6143
6144 char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
6145 char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
6146 mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
6147 mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
6148
6149 mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
6150
6151 do {
6152 /* This should be a direct check on the Mach service to see if it's an exception-handling
6153 * port, and it will break things if ReportCrash or SafetyNet start advertising other
6154 * Mach services. But for now, it should be okay.
6155 */
6156 if( ms->job->alt_exc_handler || ms->job->internal_exc_handler ) {
6157 mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
6158 } else {
6159 mach_msg_options_t options = MACH_RCV_MSG |
6160 MACH_RCV_TIMEOUT ;
6161
6162 mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
6163 switch( mr ) {
6164 case MACH_MSG_SUCCESS :
6165 mach_msg_destroy((mach_msg_header_t *)req_hdr);
6166 break;
6167 case MACH_RCV_TIMED_OUT :
6168 break;
6169 case MACH_RCV_TOO_LARGE :
6170 runtime_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
6171 break;
6172 default :
6173 break;
6174 }
6175 }
6176 } while( drain_all && mr != MACH_RCV_TIMED_OUT );
6177 }
6178
6179 void
6180 machservice_delete(job_t j, struct machservice *ms, bool port_died)
6181 {
6182 if (unlikely(ms->debug_on_close)) {
6183 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
6184 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
6185 }
6186
6187 if (ms->recv && job_assumes(j, !machservice_active(ms))) {
6188 job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
6189 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
6190 }
6191
6192 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
6193
6194 if (unlikely(ms->port == the_exception_server)) {
6195 the_exception_server = 0;
6196 }
6197
6198 job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
6199
6200 if (ms->special_port_num) {
6201 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
6202 }
6203
6204 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
6205 LIST_REMOVE(ms, name_hash_sle);
6206 LIST_REMOVE(ms, port_hash_sle);
6207
6208 free(ms);
6209 }
6210
6211 void
6212 machservice_request_notifications(struct machservice *ms)
6213 {
6214 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
6215
6216 ms->isActive = true;
6217
6218 if (ms->recv) {
6219 which = MACH_NOTIFY_PORT_DESTROYED;
6220 job_checkin(ms->job);
6221 }
6222
6223 job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
6224 }
6225
6226 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
6227 #define END_OF(x) (&(x)[NELEM(x)])
6228
6229 char **
6230 mach_cmd2argv(const char *string)
6231 {
6232 char *argv[100], args[1000];
6233 const char *cp;
6234 char *argp = args, term, **argv_ret, *co;
6235 unsigned int nargs = 0, i;
6236
6237 for (cp = string; *cp;) {
6238 while (isspace(*cp))
6239 cp++;
6240 term = (*cp == '"') ? *cp++ : '\0';
6241 if (nargs < NELEM(argv)) {
6242 argv[nargs++] = argp;
6243 }
6244 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
6245 if (*cp == '\\') {
6246 cp++;
6247 }
6248 *argp++ = *cp;
6249 if (*cp) {
6250 cp++;
6251 }
6252 }
6253 *argp++ = '\0';
6254 }
6255 argv[nargs] = NULL;
6256
6257 if (nargs == 0) {
6258 return NULL;
6259 }
6260
6261 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
6262
6263 if (!launchd_assumes(argv_ret != NULL)) {
6264 return NULL;
6265 }
6266
6267 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
6268
6269 for (i = 0; i < nargs; i++) {
6270 strcpy(co, argv[i]);
6271 argv_ret[i] = co;
6272 co += strlen(argv[i]) + 1;
6273 }
6274 argv_ret[i] = NULL;
6275
6276 return argv_ret;
6277 }
6278
6279 void
6280 job_checkin(job_t j)
6281 {
6282 j->checkedin = true;
6283 }
6284
6285 bool job_is_god(job_t j)
6286 {
6287 return j->embedded_special_privileges;
6288 }
6289
6290 bool
6291 job_ack_port_destruction(mach_port_t p)
6292 {
6293 struct machservice *ms;
6294 job_t j;
6295
6296 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
6297 if (ms->recv && (ms->port == p)) {
6298 break;
6299 }
6300 }
6301
6302 if (!jobmgr_assumes(root_jobmgr, ms != NULL)) {
6303 return false;
6304 }
6305
6306 j = ms->job;
6307
6308 jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
6309
6310 /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
6311 * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
6312 * receive rights have been returned.
6313 *
6314 * So when we get receive rights back, check to see if the job has been reaped yet. If
6315 * not, then we add this service to a list of services to be drained on crash if it's
6316 * requested that behavior. So, for a job with N receive rights all requesting that they
6317 * be drained on crash, we can safely handle the following sequence of events.
6318 *
6319 * ReceiveRight0Returned
6320 * ReceiveRight1Returned
6321 * ReceiveRight2Returned
6322 * NOTE_EXIT (reap, get exit status)
6323 * ReceiveRight3Returned
6324 * .
6325 * .
6326 * .
6327 * ReceiveRight(N - 1)Returned
6328 */
6329
6330 if( ms->drain_one_on_crash || ms->drain_all_on_crash ) {
6331 if( j->crashed && j->reaped ) {
6332 job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
6333 machservice_drain_port(ms);
6334 } else if( !(j->crashed || j->reaped) ) {
6335 job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
6336 }
6337 }
6338
6339 ms->isActive = false;
6340 if (ms->delete_on_destruction) {
6341 machservice_delete(j, ms, false);
6342 } else if (ms->reset) {
6343 machservice_resetport(j, ms);
6344 }
6345
6346 job_dispatch(j, false);
6347
6348 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
6349
6350 return true;
6351 }
6352
6353 void
6354 job_ack_no_senders(job_t j)
6355 {
6356 j->priv_port_has_senders = false;
6357
6358 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
6359 j->j_port = 0;
6360
6361 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
6362
6363 job_dispatch(j, false);
6364 }
6365
6366 bool
6367 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
6368 {
6369 struct semaphoreitem *si;
6370 size_t alloc_sz = sizeof(struct semaphoreitem);
6371
6372 if (what) {
6373 alloc_sz += strlen(what) + 1;
6374 }
6375
6376 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
6377 return false;
6378 }
6379
6380 si->fd = -1;
6381 si->why = why;
6382
6383 if (what) {
6384 strcpy(si->what_init, what);
6385 }
6386
6387 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
6388
6389 if( (why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy ) {
6390 job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
6391 SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
6392 j->nosy = true;
6393 }
6394
6395 semaphoreitem_runtime_mod_ref(si, true);
6396
6397 return true;
6398 }
6399
6400 void
6401 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
6402 {
6403 /*
6404 * External events need to be tracked.
6405 * Internal events do NOT need to be tracked.
6406 */
6407
6408 switch (si->why) {
6409 case SUCCESSFUL_EXIT:
6410 case FAILED_EXIT:
6411 case OTHER_JOB_ENABLED:
6412 case OTHER_JOB_DISABLED:
6413 case OTHER_JOB_ACTIVE:
6414 case OTHER_JOB_INACTIVE:
6415 return;
6416 default:
6417 break;
6418 }
6419
6420 if (add) {
6421 runtime_add_weak_ref();
6422 } else {
6423 runtime_del_weak_ref();
6424 }
6425 }
6426
6427 void
6428 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
6429 {
6430 semaphoreitem_runtime_mod_ref(si, false);
6431
6432 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
6433
6434 if (si->fd != -1) {
6435 job_assumes(j, runtime_close(si->fd) != -1);
6436 }
6437
6438 /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
6439 if( (si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy ) {
6440 j->nosy = false;
6441 SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
6442 }
6443
6444 free(si);
6445 }
6446
6447 void
6448 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
6449 {
6450 struct semaphoreitem_dict_iter_context *sdic = context;
6451 semaphore_reason_t why;
6452
6453 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
6454
6455 semaphoreitem_new(sdic->j, why, key);
6456 }
6457
6458 void
6459 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
6460 {
6461 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
6462 job_t j = context;
6463 semaphore_reason_t why;
6464
6465 switch (launch_data_get_type(obj)) {
6466 case LAUNCH_DATA_BOOL:
6467 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
6468 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
6469 semaphoreitem_new(j, why, NULL);
6470 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
6471 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
6472 semaphoreitem_new(j, why, NULL);
6473 j->start_pending = true;
6474 } else if( strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0 ) {
6475 j->needs_kickoff = launch_data_get_bool(obj);
6476 } else {
6477 job_assumes(j, false);
6478 }
6479 break;
6480 case LAUNCH_DATA_DICTIONARY:
6481 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
6482 sdic.why_true = PATH_EXISTS;
6483 sdic.why_false = PATH_MISSING;
6484 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
6485 sdic.why_true = OTHER_JOB_ACTIVE;
6486 sdic.why_false = OTHER_JOB_INACTIVE;
6487 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
6488 sdic.why_true = OTHER_JOB_ENABLED;
6489 sdic.why_false = OTHER_JOB_DISABLED;
6490 } else {
6491 job_assumes(j, false);
6492 break;
6493 }
6494
6495 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
6496 break;
6497 default:
6498 job_assumes(j, false);
6499 break;
6500 }
6501 }
6502
6503 void
6504 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
6505 {
6506 jobmgr_t jmi, jmn;
6507 job_t ji, jn;
6508
6509
6510 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6511 jobmgr_dispatch_all_semaphores(jmi);
6512 }
6513
6514 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6515 if (!SLIST_EMPTY(&ji->semaphores)) {
6516 job_dispatch(ji, false);
6517 }
6518 }
6519 }
6520
6521 time_t
6522 cronemu(int mon, int mday, int hour, int min)
6523 {
6524 struct tm workingtm;
6525 time_t now;
6526
6527 now = time(NULL);
6528 workingtm = *localtime(&now);
6529
6530 workingtm.tm_isdst = -1;
6531 workingtm.tm_sec = 0;
6532 workingtm.tm_min++;
6533
6534 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
6535 workingtm.tm_year++;
6536 workingtm.tm_mon = 0;
6537 workingtm.tm_mday = 1;
6538 workingtm.tm_hour = 0;
6539 workingtm.tm_min = 0;
6540 mktime(&workingtm);
6541 }
6542
6543 return mktime(&workingtm);
6544 }
6545
6546 time_t
6547 cronemu_wday(int wday, int hour, int min)
6548 {
6549 struct tm workingtm;
6550 time_t now;
6551
6552 now = time(NULL);
6553 workingtm = *localtime(&now);
6554
6555 workingtm.tm_isdst = -1;
6556 workingtm.tm_sec = 0;
6557 workingtm.tm_min++;
6558
6559 if (wday == 7) {
6560 wday = 0;
6561 }
6562
6563 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
6564 workingtm.tm_mday++;
6565 workingtm.tm_hour = 0;
6566 workingtm.tm_min = 0;
6567 mktime(&workingtm);
6568 }
6569
6570 return mktime(&workingtm);
6571 }
6572
6573 bool
6574 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
6575 {
6576 if (mon == -1) {
6577 struct tm workingtm = *wtm;
6578 int carrytest;
6579
6580 while (!cronemu_mday(&workingtm, mday, hour, min)) {
6581 workingtm.tm_mon++;
6582 workingtm.tm_mday = 1;
6583 workingtm.tm_hour = 0;
6584 workingtm.tm_min = 0;
6585 carrytest = workingtm.tm_mon;
6586 mktime(&workingtm);
6587 if (carrytest != workingtm.tm_mon) {
6588 return false;
6589 }
6590 }
6591 *wtm = workingtm;
6592 return true;
6593 }
6594
6595 if (mon < wtm->tm_mon) {
6596 return false;
6597 }
6598
6599 if (mon > wtm->tm_mon) {
6600 wtm->tm_mon = mon;
6601 wtm->tm_mday = 1;
6602 wtm->tm_hour = 0;
6603 wtm->tm_min = 0;
6604 }
6605
6606 return cronemu_mday(wtm, mday, hour, min);
6607 }
6608
6609 bool
6610 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
6611 {
6612 if (mday == -1) {
6613 struct tm workingtm = *wtm;
6614 int carrytest;
6615
6616 while (!cronemu_hour(&workingtm, hour, min)) {
6617 workingtm.tm_mday++;
6618 workingtm.tm_hour = 0;
6619 workingtm.tm_min = 0;
6620 carrytest = workingtm.tm_mday;
6621 mktime(&workingtm);
6622 if (carrytest != workingtm.tm_mday) {
6623 return false;
6624 }
6625 }
6626 *wtm = workingtm;
6627 return true;
6628 }
6629
6630 if (mday < wtm->tm_mday) {
6631 return false;
6632 }
6633
6634 if (mday > wtm->tm_mday) {
6635 wtm->tm_mday = mday;
6636 wtm->tm_hour = 0;
6637 wtm->tm_min = 0;
6638 }
6639
6640 return cronemu_hour(wtm, hour, min);
6641 }
6642
6643 bool
6644 cronemu_hour(struct tm *wtm, int hour, int min)
6645 {
6646 if (hour == -1) {
6647 struct tm workingtm = *wtm;
6648 int carrytest;
6649
6650 while (!cronemu_min(&workingtm, min)) {
6651 workingtm.tm_hour++;
6652 workingtm.tm_min = 0;
6653 carrytest = workingtm.tm_hour;
6654 mktime(&workingtm);
6655 if (carrytest != workingtm.tm_hour) {
6656 return false;
6657 }
6658 }
6659 *wtm = workingtm;
6660 return true;
6661 }
6662
6663 if (hour < wtm->tm_hour) {
6664 return false;
6665 }
6666
6667 if (hour > wtm->tm_hour) {
6668 wtm->tm_hour = hour;
6669 wtm->tm_min = 0;
6670 }
6671
6672 return cronemu_min(wtm, min);
6673 }
6674
6675 bool
6676 cronemu_min(struct tm *wtm, int min)
6677 {
6678 if (min == -1) {
6679 return true;
6680 }
6681
6682 if (min < wtm->tm_min) {
6683 return false;
6684 }
6685
6686 if (min > wtm->tm_min) {
6687 wtm->tm_min = min;
6688 }
6689
6690 return true;
6691 }
6692
6693 kern_return_t
6694 job_mig_setup_shmem(job_t j, mach_port_t *shmem_port)
6695 {
6696 memory_object_size_t size_of_page, size_of_page_orig;
6697 vm_address_t vm_addr;
6698 kern_return_t kr;
6699
6700 if (!launchd_assumes(j != NULL)) {
6701 return BOOTSTRAP_NO_MEMORY;
6702 }
6703
6704 if (unlikely(j->anonymous)) {
6705 job_log(j, LOG_DEBUG, "Anonymous job tried to setup shared memory");
6706 return BOOTSTRAP_NOT_PRIVILEGED;
6707 }
6708
6709 if (unlikely(j->shmem)) {
6710 job_log(j, LOG_ERR, "Tried to setup shared memory more than once");
6711 return BOOTSTRAP_NOT_PRIVILEGED;
6712 }
6713
6714 size_of_page_orig = size_of_page = getpagesize();
6715
6716 kr = vm_allocate(mach_task_self(), &vm_addr, size_of_page, true);
6717
6718 if (!job_assumes(j, kr == 0)) {
6719 return kr;
6720 }
6721
6722 j->shmem = (typeof(j->shmem))vm_addr;
6723 j->shmem->vp_shmem_standby_timeout = j->timeout;
6724
6725 kr = mach_make_memory_entry_64(mach_task_self(), &size_of_page,
6726 (memory_object_offset_t)vm_addr, VM_PROT_READ|VM_PROT_WRITE, shmem_port, 0);
6727
6728 if (job_assumes(j, kr == 0)) {
6729 job_assumes(j, size_of_page == size_of_page_orig);
6730 }
6731
6732 /* no need to inherit this in child processes */
6733 job_assumes(j, vm_inherit(mach_task_self(), (vm_address_t)j->shmem, size_of_page_orig, VM_INHERIT_NONE) == 0);
6734
6735 return kr;
6736 }
6737
6738 kern_return_t
6739 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
6740 {
6741 struct ldcred *ldc = runtime_get_caller_creds();
6742 job_t js;
6743
6744 if (!launchd_assumes(j != NULL)) {
6745 return BOOTSTRAP_NO_MEMORY;
6746 }
6747
6748 if (unlikely(j->deny_job_creation)) {
6749 return BOOTSTRAP_NOT_PRIVILEGED;
6750 }
6751
6752 #if HAVE_SANDBOX
6753 const char **argv = (const char **)mach_cmd2argv(server_cmd);
6754 if (unlikely(argv == NULL)) {
6755 return BOOTSTRAP_NO_MEMORY;
6756 }
6757 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
6758 free(argv);
6759 return BOOTSTRAP_NOT_PRIVILEGED;
6760 }
6761 free(argv);
6762 #endif
6763
6764 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
6765
6766 if (pid1_magic) {
6767 if (ldc->euid || ldc->uid) {
6768 job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
6769 return VPROC_ERR_TRY_PER_USER;
6770 }
6771 } else {
6772 if (unlikely(server_uid != getuid())) {
6773 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
6774 server_cmd, getuid(), server_uid);
6775 }
6776 server_uid = 0; /* zero means "do nothing" */
6777 }
6778
6779 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
6780
6781 if (unlikely(js == NULL)) {
6782 return BOOTSTRAP_NO_MEMORY;
6783 }
6784
6785 *server_portp = js->j_port;
6786 return BOOTSTRAP_SUCCESS;
6787 }
6788
6789 kern_return_t
6790 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
6791 {
6792 struct ldcred *ldc = runtime_get_caller_creds();
6793 job_t otherj;
6794
6795 if (!launchd_assumes(j != NULL)) {
6796 return BOOTSTRAP_NO_MEMORY;
6797 }
6798
6799 if( unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation ) {
6800
6801 }
6802
6803 if( unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation ) {
6804 #if TARGET_OS_EMBEDDED
6805 if( !j->embedded_special_privileges ) {
6806 return BOOTSTRAP_NOT_PRIVILEGED;
6807 }
6808 #else
6809 return BOOTSTRAP_NOT_PRIVILEGED;
6810 #endif
6811 }
6812
6813 #if HAVE_SANDBOX
6814 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
6815 return BOOTSTRAP_NOT_PRIVILEGED;
6816 }
6817 #endif
6818
6819 if (unlikely(!(otherj = job_find(targetlabel)))) {
6820 return BOOTSTRAP_UNKNOWN_SERVICE;
6821 }
6822
6823 #if TARGET_OS_EMBEDDED
6824 if( j->embedded_special_privileges && strcmp(j->username, otherj->username) != 0 ) {
6825 return BOOTSTRAP_NOT_PRIVILEGED;
6826 }
6827 #endif
6828
6829 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
6830 bool do_block = otherj->p;
6831
6832 if (otherj->anonymous) {
6833 return BOOTSTRAP_NOT_PRIVILEGED;
6834 }
6835
6836 job_remove(otherj);
6837
6838 if (do_block) {
6839 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
6840 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
6841 job_assumes(otherj, waiting4removal_new(otherj, srp));
6842 return MIG_NO_REPLY;
6843 } else {
6844 return 0;
6845 }
6846 } else if (sig == VPROC_MAGIC_TRYKILL_SIGNAL) {
6847 if (!j->kill_via_shmem) {
6848 return BOOTSTRAP_NOT_PRIVILEGED;
6849 }
6850
6851 if (!j->shmem) {
6852 j->sent_kill_via_shmem = true;
6853 job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
6854 return 0;
6855 }
6856 #if !TARGET_OS_EMBEDDED
6857 if (__sync_bool_compare_and_swap(&j->shmem->vp_shmem_transaction_cnt, 0, -1)) {
6858 j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
6859 j->sent_kill_via_shmem = true;
6860 job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
6861 return 0;
6862 }
6863 #endif
6864 return BOOTSTRAP_NOT_PRIVILEGED;
6865 } else if (otherj->p) {
6866 job_assumes(j, runtime_kill(otherj->p, sig) != -1);
6867 }
6868
6869 return 0;
6870 }
6871
6872 kern_return_t
6873 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
6874 {
6875 struct ldcred *ldc = runtime_get_caller_creds();
6876
6877 if (!launchd_assumes(j != NULL)) {
6878 return BOOTSTRAP_NO_MEMORY;
6879 }
6880
6881 if (!job_assumes(j, j->per_user)) {
6882 return BOOTSTRAP_NOT_PRIVILEGED;
6883 }
6884
6885 return runtime_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
6886 }
6887
6888 kern_return_t
6889 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
6890 {
6891 struct ldcred *ldc = runtime_get_caller_creds();
6892
6893 if (!launchd_assumes(j != NULL)) {
6894 return BOOTSTRAP_NO_MEMORY;
6895 }
6896
6897 if (unlikely(ldc->euid)) {
6898 return BOOTSTRAP_NOT_PRIVILEGED;
6899 }
6900
6901 return runtime_log_drain(srp, outval, outvalCnt);
6902 }
6903
6904 kern_return_t
6905 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
6906 vm_offset_t inval, mach_msg_type_number_t invalCnt,
6907 vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
6908 {
6909 const char *action;
6910 launch_data_t input_obj = NULL, output_obj = NULL;
6911 size_t data_offset = 0;
6912 size_t packed_size;
6913 struct ldcred *ldc = runtime_get_caller_creds();
6914
6915 if (!launchd_assumes(j != NULL)) {
6916 return BOOTSTRAP_NO_MEMORY;
6917 }
6918
6919 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
6920 return BOOTSTRAP_NOT_PRIVILEGED;
6921 }
6922
6923 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
6924 return 1;
6925 }
6926
6927 if (inkey && outkey) {
6928 action = "Swapping";
6929 } else if (inkey) {
6930 action = "Setting";
6931 } else {
6932 action = "Getting";
6933 }
6934
6935 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
6936
6937 *outvalCnt = 20 * 1024 * 1024;
6938 mig_allocate(outval, *outvalCnt);
6939 if (!job_assumes(j, *outval != 0)) {
6940 return 1;
6941 }
6942
6943 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
6944 if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
6945 goto out_bad;
6946 }
6947
6948 switch (outkey) {
6949 case VPROC_GSK_ENVIRONMENT:
6950 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
6951 goto out_bad;
6952 }
6953 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
6954 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
6955 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
6956 goto out_bad;
6957 }
6958 launch_data_free(output_obj);
6959 break;
6960 case VPROC_GSK_ALLJOBS:
6961 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
6962 goto out_bad;
6963 }
6964 ipc_revoke_fds(output_obj);
6965 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
6966 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6967 if (!job_assumes(j, packed_size != 0)) {
6968 goto out_bad;
6969 }
6970 launch_data_free(output_obj);
6971 break;
6972 case VPROC_GSK_MGR_NAME:
6973 if( !job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL) ) {
6974 goto out_bad;
6975 }
6976 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6977 if (!job_assumes(j, packed_size != 0)) {
6978 goto out_bad;
6979 }
6980
6981 launch_data_free(output_obj);
6982 break;
6983 case VPROC_GSK_JOB_OVERRIDES_DB:
6984 if( !job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES))) != NULL) ) {
6985 goto out_bad;
6986 }
6987 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6988 if (!job_assumes(j, packed_size != 0)) {
6989 goto out_bad;
6990 }
6991
6992 launch_data_free(output_obj);
6993 break;
6994 case VPROC_GSK_JOB_CACHE_DB:
6995 if( !job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE))) != NULL) ) {
6996 goto out_bad;
6997 }
6998 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
6999 if (!job_assumes(j, packed_size != 0)) {
7000 goto out_bad;
7001 }
7002
7003 job_log(j, LOG_DEBUG, "Location of job cache database: %s", launch_data_get_string(output_obj));
7004
7005 launch_data_free(output_obj);
7006 break;
7007 case 0:
7008 mig_deallocate(*outval, *outvalCnt);
7009 *outval = 0;
7010 *outvalCnt = 0;
7011 break;
7012 default:
7013 goto out_bad;
7014 }
7015
7016 if (invalCnt) switch (inkey) {
7017 case VPROC_GSK_ENVIRONMENT:
7018 if( launch_data_get_type(input_obj) == LAUNCH_DATA_DICTIONARY ) {
7019 if( j->p ) {
7020 job_log(j, LOG_INFO, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
7021 }
7022 launch_data_dict_iterate(input_obj, envitem_setup_one_shot, j);
7023 }
7024 break;
7025 case 0:
7026 break;
7027 default:
7028 goto out_bad;
7029 }
7030
7031 mig_deallocate(inval, invalCnt);
7032
7033 return 0;
7034
7035 out_bad:
7036 if (*outval) {
7037 mig_deallocate(*outval, *outvalCnt);
7038 }
7039 return 1;
7040 }
7041
7042 kern_return_t
7043 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
7044 {
7045 const char *action;
7046 kern_return_t kr = 0;
7047 struct ldcred *ldc = runtime_get_caller_creds();
7048 int oldmask;
7049
7050 if (!launchd_assumes(j != NULL)) {
7051 return BOOTSTRAP_NO_MEMORY;
7052 }
7053
7054 if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
7055 return BOOTSTRAP_NOT_PRIVILEGED;
7056 }
7057
7058 if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
7059 return 1;
7060 }
7061
7062 if (inkey && outkey) {
7063 action = "Swapping";
7064 } else if (inkey) {
7065 action = "Setting";
7066 } else {
7067 action = "Getting";
7068 }
7069
7070 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
7071
7072 switch (outkey) {
7073 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7074 *outval = j->abandon_pg;
7075 break;
7076 case VPROC_GSK_LAST_EXIT_STATUS:
7077 *outval = j->last_exit_status;
7078 break;
7079 case VPROC_GSK_MGR_UID:
7080 *outval = getuid();
7081 break;
7082 case VPROC_GSK_MGR_PID:
7083 *outval = getpid();
7084 break;
7085 case VPROC_GSK_IS_MANAGED:
7086 *outval = j->anonymous ? 0 : 1;
7087 break;
7088 case VPROC_GSK_BASIC_KEEPALIVE:
7089 *outval = !j->ondemand;
7090 break;
7091 case VPROC_GSK_START_INTERVAL:
7092 *outval = j->start_interval;
7093 break;
7094 case VPROC_GSK_IDLE_TIMEOUT:
7095 *outval = j->timeout;
7096 break;
7097 case VPROC_GSK_EXIT_TIMEOUT:
7098 *outval = j->exit_timeout;
7099 break;
7100 case VPROC_GSK_GLOBAL_LOG_MASK:
7101 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
7102 *outval = oldmask;
7103 runtime_setlogmask(oldmask);
7104 break;
7105 case VPROC_GSK_GLOBAL_UMASK:
7106 oldmask = umask(0);
7107 *outval = oldmask;
7108 umask(oldmask);
7109 break;
7110 case VPROC_GSK_TRANSACTIONS_ENABLED:
7111 job_log(j, LOG_DEBUG, "Reading transaction model status.");
7112 *outval = j->kill_via_shmem;
7113 break;
7114 case VPROC_GSK_WAITFORDEBUGGER:
7115 *outval = j->wait4debugger;
7116 break;
7117 case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
7118 *outval = j->embedded_special_privileges;
7119 break;
7120 case 0:
7121 *outval = 0;
7122 break;
7123 default:
7124 kr = 1;
7125 break;
7126 }
7127
7128 switch (inkey) {
7129 case VPROC_GSK_ABANDON_PROCESS_GROUP:
7130 j->abandon_pg = (bool)inval;
7131 break;
7132 case VPROC_GSK_GLOBAL_ON_DEMAND:
7133 job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
7134 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
7135 break;
7136 case VPROC_GSK_BASIC_KEEPALIVE:
7137 j->ondemand = !inval;
7138 break;
7139 case VPROC_GSK_START_INTERVAL:
7140 if (inval > UINT32_MAX || inval < 0) {
7141 kr = 1;
7142 } else if (inval) {
7143 if (j->start_interval == 0) {
7144 runtime_add_weak_ref();
7145 }
7146 j->start_interval = (typeof(j->start_interval)) inval;
7147 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
7148 } else if (j->start_interval) {
7149 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
7150 if (j->start_interval != 0) {
7151 runtime_del_weak_ref();
7152 }
7153 j->start_interval = 0;
7154 }
7155 break;
7156 case VPROC_GSK_IDLE_TIMEOUT:
7157 if (inval < 0 || inval > UINT32_MAX) {
7158 kr = 1;
7159 } else {
7160 j->timeout = (typeof(j->timeout)) inval;
7161 }
7162 break;
7163 case VPROC_GSK_EXIT_TIMEOUT:
7164 if (inval < 0 || inval > UINT32_MAX) {
7165 kr = 1;
7166 } else {
7167 j->exit_timeout = (typeof(j->exit_timeout)) inval;
7168 }
7169 break;
7170 case VPROC_GSK_GLOBAL_LOG_MASK:
7171 if (inval < 0 || inval > UINT32_MAX) {
7172 kr = 1;
7173 } else {
7174 runtime_setlogmask((int) inval);
7175 }
7176 break;
7177 case VPROC_GSK_GLOBAL_UMASK:
7178 launchd_assert(sizeof (mode_t) == 2);
7179 if (inval < 0 || inval > UINT16_MAX) {
7180 kr = 1;
7181 } else {
7182 umask((mode_t) inval);
7183 }
7184 break;
7185 case VPROC_GSK_TRANSACTIONS_ENABLED:
7186 if( !job_assumes(j, inval != 0) ) {
7187 job_log(j, LOG_WARNING, "Attempt to unregister from transaction model. This is not supported.");
7188 kr = 1;
7189 } else {
7190 job_log(j, LOG_DEBUG, "Now participating in transaction model.");
7191 j->kill_via_shmem = (bool)inval;
7192 job_log(j, LOG_DEBUG, "j->kill_via_shmem = %s", j->kill_via_shmem ? "true" : "false");
7193 }
7194 break;
7195 case VPROC_GSK_WEIRD_BOOTSTRAP:
7196 if( job_assumes(j, j->weird_bootstrap) ) {
7197 job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
7198
7199 mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
7200
7201 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
7202 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
7203 }
7204
7205 job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
7206 j->weird_bootstrap = false;
7207 }
7208 break;
7209 case VPROC_GSK_WAITFORDEBUGGER:
7210 j->wait4debugger_oneshot = inval;
7211 break;
7212 case VPROC_GSK_PERUSER_SUSPEND:
7213 if( job_assumes(j, pid1_magic && ldc->euid == 0) ) {
7214 mach_port_t junk = MACH_PORT_NULL;
7215 job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, false, &junk);
7216 if( job_assumes(j, jpu != NULL) ) {
7217 struct suspended_peruser *spi = NULL;
7218 LIST_FOREACH( spi, &j->suspended_perusers, sle ) {
7219 if( (int64_t)(spi->j->mach_uid) == inval ) {
7220 job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
7221 break;
7222 }
7223 }
7224
7225 if( spi == NULL ) {
7226 job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
7227 spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
7228 if( job_assumes(j, spi != NULL) ) {
7229 spi->j = jpu;
7230 spi->j->peruser_suspend_count++;
7231 LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
7232 job_stop(spi->j);
7233 } else {
7234 kr = BOOTSTRAP_NO_MEMORY;
7235 }
7236 }
7237 }
7238 } else {
7239 kr = 1;
7240 }
7241 break;
7242 case VPROC_GSK_PERUSER_RESUME:
7243 if( job_assumes(j, pid1_magic == true) ) {
7244 struct suspended_peruser *spi = NULL, *spt = NULL;
7245 LIST_FOREACH_SAFE( spi, &j->suspended_perusers, sle, spt ) {
7246 if( (int64_t)(spi->j->mach_uid) == inval ) {
7247 spi->j->peruser_suspend_count--;
7248 LIST_REMOVE(spi, sle);
7249 job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
7250 break;
7251 }
7252 }
7253
7254 if( !job_assumes(j, spi != NULL) ) {
7255 job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
7256 kr = BOOTSTRAP_NOT_PRIVILEGED;
7257 } else if( spi->j->peruser_suspend_count == 0 ) {
7258 job_dispatch(spi->j, false);
7259 free(spi);
7260 }
7261 } else {
7262 kr = 1;
7263 }
7264 break;
7265 case 0:
7266 break;
7267 default:
7268 kr = 1;
7269 break;
7270 }
7271
7272 return kr;
7273 }
7274
7275 kern_return_t
7276 job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *audit_session)
7277 {
7278 struct machservice *ms;
7279
7280 if (!launchd_assumes(j != NULL)) {
7281 return BOOTSTRAP_NO_MEMORY;
7282 }
7283
7284 job_log(j, LOG_DEBUG, "Post fork ping.");
7285
7286 job_setup_exception_port(j, child_task);
7287
7288 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
7289 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
7290 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
7291 continue;
7292 }
7293
7294 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
7295
7296 if (unlikely(errno)) {
7297 int desired_log_level = LOG_ERR;
7298
7299 if (j->anonymous) {
7300 /* 5338127 */
7301
7302 desired_log_level = LOG_WARNING;
7303
7304 if (ms->special_port_num == TASK_SEATBELT_PORT) {
7305 desired_log_level = LOG_DEBUG;
7306 }
7307 }
7308
7309 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
7310 }
7311 }
7312
7313 mach_port_t _session = MACH_PORT_NULL;
7314 #if !TARGET_OS_EMBEDDED
7315 if( !j->anonymous && !j->per_user ) {
7316 job_log(j, LOG_DEBUG, "Returning session port %u", j->audit_session);
7317 _session = j->audit_session;
7318 }
7319 #endif
7320 *audit_session = _session;
7321 job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
7322
7323 return 0;
7324 }
7325
7326 kern_return_t
7327 job_mig_reboot2(job_t j, uint64_t flags)
7328 {
7329 char who_started_the_reboot[2048] = "";
7330 struct kinfo_proc kp;
7331 struct ldcred *ldc = runtime_get_caller_creds();
7332 pid_t pid_to_log;
7333
7334 if (!launchd_assumes(j != NULL)) {
7335 return BOOTSTRAP_NO_MEMORY;
7336 }
7337
7338 if (unlikely(!pid1_magic)) {
7339 return BOOTSTRAP_NOT_PRIVILEGED;
7340 }
7341
7342 #if !TARGET_OS_EMBEDDED
7343 if (unlikely(ldc->euid)) {
7344 #else
7345 if( unlikely(ldc->euid) && !j->embedded_special_privileges ) {
7346 #endif
7347 return BOOTSTRAP_NOT_PRIVILEGED;
7348 }
7349
7350 for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = kp.kp_eproc.e_ppid) {
7351 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid_to_log };
7352 size_t who_offset, len = sizeof(kp);
7353
7354 if (!job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
7355 return 1;
7356 }
7357
7358 if( !job_assumes(j, pid_to_log != kp.kp_eproc.e_ppid) ) {
7359 job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
7360 snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", kp.kp_proc.p_comm, pid_to_log, kp.kp_proc.p_comm, pid_to_log, kp.kp_proc.p_comm, pid_to_log);
7361 break;
7362 }
7363
7364 who_offset = strlen(who_started_the_reboot);
7365 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
7366 " %s[%u]%s", kp.kp_proc.p_comm, pid_to_log, kp.kp_eproc.e_ppid ? " ->" : "");
7367 }
7368
7369 root_jobmgr->reboot_flags = (int)flags;
7370
7371 launchd_shutdown();
7372
7373 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
7374
7375 return 0;
7376 }
7377
7378 kern_return_t
7379 job_mig_getsocket(job_t j, name_t spr)
7380 {
7381 if (!launchd_assumes(j != NULL)) {
7382 return BOOTSTRAP_NO_MEMORY;
7383 }
7384
7385 if( j->deny_job_creation ) {
7386 return BOOTSTRAP_NOT_PRIVILEGED;
7387 }
7388
7389 ipc_server_init();
7390
7391 if (unlikely(!sockpath)) {
7392 return BOOTSTRAP_NO_MEMORY;
7393 }
7394
7395 strncpy(spr, sockpath, sizeof(name_t));
7396
7397 return BOOTSTRAP_SUCCESS;
7398 }
7399
7400 kern_return_t
7401 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
7402 {
7403 if (!launchd_assumes(j != NULL)) {
7404 return BOOTSTRAP_NO_MEMORY;
7405 }
7406
7407 if ((errno = err)) {
7408 job_log_error(j, pri, "%s", msg);
7409 } else {
7410 job_log(j, pri, "%s", msg);
7411 }
7412
7413 return 0;
7414 }
7415
7416 job_t
7417 jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, bool dispatch, mach_port_t *mp)
7418 {
7419 job_t ji = NULL;
7420 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
7421 if (!ji->per_user) {
7422 continue;
7423 }
7424 if (ji->mach_uid != which_user) {
7425 continue;
7426 }
7427 if (SLIST_EMPTY(&ji->machservices)) {
7428 continue;
7429 }
7430 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
7431 continue;
7432 }
7433 break;
7434 }
7435
7436 if( unlikely(ji == NULL) ) {
7437 struct machservice *ms;
7438 char lbuf[1024];
7439
7440 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
7441
7442 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
7443
7444 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
7445
7446 if( ji != NULL ) {
7447 ji->mach_uid = which_user;
7448 ji->per_user = true;
7449 ji->kill_via_shmem = true;
7450
7451 struct stat sb;
7452 char pu_db[PATH_MAX];
7453 snprintf(pu_db, sizeof(pu_db), LAUNCHD_DB_PREFIX "/%s", lbuf);
7454
7455 bool created = false;
7456 int err = stat(pu_db, &sb);
7457 if( (err == -1 && errno == ENOENT) || (err == 0 && !S_ISDIR(sb.st_mode)) ) {
7458 if( err == 0 ) {
7459 char move_aside[PATH_MAX];
7460 snprintf(move_aside, sizeof(move_aside), LAUNCHD_DB_PREFIX "/%s.movedaside", lbuf);
7461
7462 job_assumes(ji, rename(pu_db, move_aside) != -1);
7463 }
7464
7465 job_assumes(ji, mkdir(pu_db, S_IRWXU) != -1);
7466 job_assumes(ji, chown(pu_db, which_user, 0) != -1);
7467 created = true;
7468 }
7469
7470 if( !created ) {
7471 if( !job_assumes(ji, sb.st_uid == which_user) ) {
7472 job_assumes(ji, chown(pu_db, which_user, 0) != -1);
7473 }
7474 if( !job_assumes(ji, sb.st_gid == 0) ) {
7475 job_assumes(ji, chown(pu_db, which_user, 0) != -1);
7476 }
7477 if( !job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR)) ) {
7478 job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
7479 }
7480 }
7481
7482 if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
7483 job_remove(ji);
7484 ji = NULL;
7485 } else {
7486 ms->per_user_hack = true;
7487 ms->hide = true;
7488
7489 ji = dispatch ? job_dispatch(ji, false) : ji;
7490 }
7491 }
7492 } else {
7493 *mp = machservice_port(SLIST_FIRST(&ji->machservices));
7494 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
7495 }
7496
7497 return ji;
7498 }
7499
7500 kern_return_t
7501 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
7502 {
7503 struct ldcred *ldc = runtime_get_caller_creds();
7504 job_t jpu;
7505
7506 #if TARGET_OS_EMBEDDED
7507 /* There is no need for per-user launchd's on embedded. */
7508 job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
7509 return BOOTSTRAP_NOT_PRIVILEGED;
7510 #endif
7511
7512 #if HAVE_SANDBOX
7513 if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
7514 return BOOTSTRAP_NOT_PRIVILEGED;
7515 }
7516 #endif
7517
7518 if (!launchd_assumes(j != NULL)) {
7519 return BOOTSTRAP_NO_MEMORY;
7520 }
7521
7522 job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
7523
7524 if (unlikely(!pid1_magic)) {
7525 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
7526 return BOOTSTRAP_NOT_PRIVILEGED;
7527 }
7528
7529 if (ldc->euid || ldc->uid) {
7530 which_user = ldc->euid ?: ldc->uid;
7531 }
7532
7533 *up_cont = MACH_PORT_NULL;
7534
7535 jpu = jobmgr_lookup_per_user_context_internal(j, which_user, true, up_cont);
7536
7537 return 0;
7538 }
7539
7540 kern_return_t
7541 job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uint64_t flags)
7542 {
7543 bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
7544 struct ldcred *ldc = runtime_get_caller_creds();
7545 struct machservice *ms;
7546 job_t jo;
7547
7548 if (!launchd_assumes(j != NULL)) {
7549 return BOOTSTRAP_NO_MEMORY;
7550 }
7551
7552 ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
7553
7554 if (ms == NULL) {
7555 *serviceportp = MACH_PORT_NULL;
7556
7557 if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
7558 return BOOTSTRAP_NO_MEMORY;
7559 }
7560
7561 /* Treat this like a legacy job. */
7562 if( !j->legacy_mach_job ) {
7563 ms->isActive = true;
7564 ms->recv = false;
7565 }
7566
7567 if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
7568 job_log(j, LOG_SCOLDING, "Please add the following service to the configuration file for this job: %s", servicename);
7569 }
7570 } else {
7571 if (unlikely((jo = machservice_job(ms)) != j)) {
7572 static pid_t last_warned_pid;
7573
7574 if (last_warned_pid != ldc->pid) {
7575 job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
7576 last_warned_pid = ldc->pid;
7577 }
7578
7579 return BOOTSTRAP_NOT_PRIVILEGED;
7580 }
7581 if (unlikely(machservice_active(ms))) {
7582 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
7583 return BOOTSTRAP_SERVICE_ACTIVE;
7584 }
7585 }
7586
7587 job_checkin(j);
7588 machservice_request_notifications(ms);
7589
7590 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
7591
7592 *serviceportp = machservice_port(ms);
7593 return BOOTSTRAP_SUCCESS;
7594 }
7595
7596 kern_return_t
7597 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
7598 {
7599 struct machservice *ms;
7600 struct ldcred *ldc = runtime_get_caller_creds();
7601
7602 if (!launchd_assumes(j != NULL)) {
7603 return BOOTSTRAP_NO_MEMORY;
7604 }
7605
7606 if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
7607 job_log(j, LOG_SCOLDING, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
7608 }
7609
7610 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
7611
7612 /* 5641783 for the embedded hack */
7613 #if !TARGET_OS_EMBEDDED
7614 /*
7615 * From a per-user/session launchd's perspective, SecurityAgent (UID
7616 * 92) is a rogue application (not our UID, not root and not a child of
7617 * us). We'll have to reconcile this design friction at a later date.
7618 */
7619 if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
7620 if (pid1_magic) {
7621 return VPROC_ERR_TRY_PER_USER;
7622 } else {
7623 return BOOTSTRAP_NOT_PRIVILEGED;
7624 }
7625 }
7626 #endif
7627
7628 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
7629
7630 if (unlikely(ms)) {
7631 if (machservice_job(ms) != j) {
7632 return BOOTSTRAP_NOT_PRIVILEGED;
7633 }
7634 if (machservice_active(ms)) {
7635 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
7636 return BOOTSTRAP_SERVICE_ACTIVE;
7637 }
7638 if (ms->recv && (serviceport != MACH_PORT_NULL)) {
7639 job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
7640 return BOOTSTRAP_NOT_PRIVILEGED;
7641 }
7642 job_checkin(j);
7643 machservice_delete(j, ms, false);
7644 }
7645
7646 if (likely(serviceport != MACH_PORT_NULL)) {
7647 if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
7648 machservice_request_notifications(ms);
7649 } else {
7650 return BOOTSTRAP_NO_MEMORY;
7651 }
7652 }
7653
7654
7655 return BOOTSTRAP_SUCCESS;
7656 }
7657
7658 kern_return_t
7659 job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uint64_t flags)
7660 {
7661 struct machservice *ms;
7662 struct ldcred *ldc = runtime_get_caller_creds();
7663 kern_return_t kr;
7664 bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
7665
7666 if (!launchd_assumes(j != NULL)) {
7667 return BOOTSTRAP_NO_MEMORY;
7668 }
7669
7670 /* 5641783 for the embedded hack */
7671 #if !TARGET_OS_EMBEDDED
7672 if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
7673 return VPROC_ERR_TRY_PER_USER;
7674 }
7675 #endif
7676
7677 #if HAVE_SANDBOX
7678 if (unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
7679 return BOOTSTRAP_NOT_PRIVILEGED;
7680 }
7681 #endif
7682
7683 if (per_pid_lookup) {
7684 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
7685 } else {
7686 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
7687 }
7688
7689 if (likely(ms)) {
7690 if (machservice_hidden(ms) && !machservice_active(ms)) {
7691 ms = NULL;
7692 } else if (unlikely(ms->per_user_hack)) {
7693 ms = NULL;
7694 }
7695 }
7696
7697 if (likely(ms)) {
7698 job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
7699 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
7700
7701 if (unlikely(!per_pid_lookup && j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user)) {
7702 /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
7703 job_log(j, LOG_DEBUG, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms->job->label, servicename);
7704 }
7705
7706 j->lastlookup = ms;
7707 j->lastlookup_gennum = ms->gen_num;
7708
7709 *serviceportp = machservice_port(ms);
7710
7711 kr = BOOTSTRAP_SUCCESS;
7712 } else if (!per_pid_lookup && (inherited_bootstrap_port != MACH_PORT_NULL)) {
7713 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
7714 /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
7715 job_assumes(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, 0, 0) == 0);
7716 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
7717 return MIG_NO_REPLY;
7718 } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
7719 /*
7720 * 5240036 Should start background session when a lookup of CCacheServer occurs
7721 *
7722 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
7723 * If we find a EUID that isn't root, we force it over to the per-user context.
7724 */
7725 return VPROC_ERR_TRY_PER_USER;
7726 } else {
7727 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
7728 kr = BOOTSTRAP_UNKNOWN_SERVICE;
7729 }
7730
7731 return kr;
7732 }
7733
7734 kern_return_t
7735 job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
7736 {
7737 if (!launchd_assumes(j != NULL)) {
7738 return BOOTSTRAP_NO_MEMORY;
7739 }
7740
7741 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
7742 jobmgr_t jm = j->mgr;
7743
7744 if (jobmgr_parent(jm)) {
7745 *parentport = jobmgr_parent(jm)->jm_port;
7746 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
7747 *parentport = jm->jm_port;
7748 } else {
7749 job_assumes(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp) == 0);
7750 /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
7751 return MIG_NO_REPLY;
7752 }
7753 return BOOTSTRAP_SUCCESS;
7754 }
7755
7756 kern_return_t
7757 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt,
7758 name_array_t *servicejobsp, unsigned int *servicejobs_cnt,
7759 bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt,
7760 uint64_t flags)
7761 {
7762 name_array_t service_names = NULL;
7763 name_array_t service_jobs = NULL;
7764 bootstrap_status_array_t service_actives = NULL;
7765 unsigned int cnt = 0, cnt2 = 0;
7766 jobmgr_t jm;
7767
7768 if (!launchd_assumes(j != NULL)) {
7769 return BOOTSTRAP_NO_MEMORY;
7770 }
7771
7772 if( g_flat_mach_namespace ) {
7773 if( (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL) ) {
7774 jm = j->mgr;
7775 } else {
7776 jm = root_jobmgr;
7777 }
7778 } else {
7779 jm = j->mgr;
7780 }
7781
7782 unsigned int i = 0;
7783 struct machservice *msi = NULL;
7784 for( i = 0; i < MACHSERVICE_HASH_SIZE; i++ ) {
7785 LIST_FOREACH( msi, &jm->ms_hash[i], name_hash_sle ) {
7786 cnt += !msi->per_pid ? 1 : 0;
7787 }
7788 }
7789
7790 if (cnt == 0) {
7791 goto out;
7792 }
7793
7794 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
7795 if (!job_assumes(j, service_names != NULL)) {
7796 goto out_bad;
7797 }
7798
7799 mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
7800 if (!job_assumes(j, service_jobs != NULL)) {
7801 goto out_bad;
7802 }
7803
7804 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
7805 if (!job_assumes(j, service_actives != NULL)) {
7806 goto out_bad;
7807 }
7808
7809 for( i = 0; i < MACHSERVICE_HASH_SIZE; i++ ) {
7810 LIST_FOREACH( msi, &jm->ms_hash[i], name_hash_sle ) {
7811 if( !msi->per_pid ) {
7812 strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
7813 strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
7814 service_actives[cnt2] = machservice_status(msi);
7815 cnt2++;
7816 }
7817 }
7818 }
7819
7820 job_assumes(j, cnt == cnt2);
7821
7822 out:
7823 *servicenamesp = service_names;
7824 *servicejobsp = service_jobs;
7825 *serviceactivesp = service_actives;
7826 *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
7827
7828 return BOOTSTRAP_SUCCESS;
7829
7830 out_bad:
7831 if (service_names) {
7832 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
7833 }
7834 if (service_jobs) {
7835 mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
7836 }
7837 if (service_actives) {
7838 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
7839 }
7840
7841 return BOOTSTRAP_NO_MEMORY;
7842 }
7843
7844 kern_return_t
7845 job_mig_lookup_children(job_t j, mach_port_array_t *child_ports, mach_msg_type_number_t *child_ports_cnt,
7846 name_array_t *child_names, mach_msg_type_number_t *child_names_cnt,
7847 bootstrap_property_array_t *child_properties, mach_msg_type_number_t *child_properties_cnt)
7848 {
7849 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
7850 if( !launchd_assumes(j != NULL) ) {
7851 return BOOTSTRAP_NO_MEMORY;
7852 }
7853
7854 struct ldcred *ldc = runtime_get_caller_creds();
7855
7856 /* Only allow root processes to look up children, even if we're in the per-user launchd.
7857 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
7858 * in a non-flat namespace.
7859 */
7860 if( ldc->euid != 0 ) {
7861 job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
7862 return BOOTSTRAP_NOT_PRIVILEGED;
7863 }
7864
7865 unsigned int cnt = 0;
7866
7867 jobmgr_t jmr = j->mgr;
7868 jobmgr_t jmi = NULL;
7869 SLIST_FOREACH( jmi, &jmr->submgrs, sle ) {
7870 cnt++;
7871 }
7872
7873 /* Find our per-user launchds if we're PID 1. */
7874 job_t ji = NULL;
7875 if( pid1_magic ) {
7876 LIST_FOREACH( ji, &jmr->jobs, sle ) {
7877 cnt += ji->per_user ? 1 : 0;
7878 }
7879 }
7880
7881 if( cnt == 0 ) {
7882 return BOOTSTRAP_NO_CHILDREN;
7883 }
7884
7885 mach_port_array_t _child_ports = NULL;
7886 mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
7887 if( !job_assumes(j, _child_ports != NULL) ) {
7888 kr = BOOTSTRAP_NO_MEMORY;
7889 goto out_bad;
7890 }
7891
7892 name_array_t _child_names = NULL;
7893 mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
7894 if( !job_assumes(j, _child_names != NULL) ) {
7895 kr = BOOTSTRAP_NO_MEMORY;
7896 goto out_bad;
7897 }
7898
7899 bootstrap_property_array_t _child_properties = NULL;
7900 mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
7901 if( !job_assumes(j, _child_properties != NULL) ) {
7902 kr = BOOTSTRAP_NO_MEMORY;
7903 goto out_bad;
7904 }
7905
7906 unsigned int cnt2 = 0;
7907 SLIST_FOREACH( jmi, &jmr->submgrs, sle ) {
7908 if( jobmgr_assumes(jmi, launchd_mport_make_send(jmi->jm_port) == KERN_SUCCESS) ) {
7909 _child_ports[cnt2] = jmi->jm_port;
7910 } else {
7911 _child_ports[cnt2] = MACH_PORT_NULL;
7912 }
7913
7914 strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
7915 _child_properties[cnt2] = jmi->properties;
7916
7917 cnt2++;
7918 }
7919
7920 if( pid1_magic ) LIST_FOREACH( ji, &jmr->jobs, sle ) {
7921 if( ji->per_user ) {
7922 if( job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true) ) {
7923 mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
7924
7925 if( job_assumes(ji, launchd_mport_copy_send(port) == KERN_SUCCESS) ) {
7926 _child_ports[cnt2] = port;
7927 } else {
7928 _child_ports[cnt2] = MACH_PORT_NULL;
7929 }
7930 } else {
7931 _child_ports[cnt2] = MACH_PORT_NULL;
7932 }
7933
7934 strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
7935 _child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
7936
7937 cnt2++;
7938 }
7939 }
7940
7941 *child_names_cnt = cnt;
7942 *child_ports_cnt = cnt;
7943 *child_properties_cnt = cnt;
7944
7945 *child_names = _child_names;
7946 *child_ports = _child_ports;
7947 *child_properties = _child_properties;
7948
7949 unsigned int i = 0;
7950 for( i = 0; i < cnt; i++ ) {
7951 job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
7952 }
7953
7954 return BOOTSTRAP_SUCCESS;
7955 out_bad:
7956 if( _child_ports ) {
7957 mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
7958 }
7959
7960 if( _child_names ) {
7961 mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_ports[0]));
7962 }
7963
7964 if( _child_properties ) {
7965 mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
7966 }
7967
7968 return kr;
7969 }
7970
7971 kern_return_t
7972 job_mig_transaction_count_for_pid(job_t j, pid_t p, int32_t *cnt, boolean_t *condemned)
7973 {
7974 kern_return_t kr = KERN_FAILURE;
7975 struct ldcred *ldc = runtime_get_caller_creds();
7976 if( (ldc->euid != geteuid()) && (ldc->euid != 0) ) {
7977 return BOOTSTRAP_NOT_PRIVILEGED;
7978 }
7979
7980 job_t j_for_pid = jobmgr_find_by_pid_deep(j->mgr, p, false);
7981 if( j_for_pid ) {
7982 if( j_for_pid->kill_via_shmem ) {
7983 if( j_for_pid->shmem ) {
7984 *cnt = j_for_pid->shmem->vp_shmem_transaction_cnt;
7985 *condemned = j_for_pid->shmem->vp_shmem_flags & VPROC_SHMEM_EXITING;
7986 *cnt += *condemned ? 1 : 0;
7987 } else {
7988 *cnt = 0;
7989 *condemned = false;
7990 }
7991
7992 kr = BOOTSTRAP_SUCCESS;
7993 } else {
7994 kr = BOOTSTRAP_NO_MEMORY;
7995 }
7996 } else {
7997 kr = BOOTSTRAP_UNKNOWN_SERVICE;
7998 }
7999
8000 return kr;
8001 }
8002
8003 kern_return_t
8004 job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
8005 {
8006 struct ldcred *ldc = runtime_get_caller_creds();
8007 if( (ldc->euid != geteuid()) && (ldc->euid != 0) ) {
8008 return BOOTSTRAP_NOT_PRIVILEGED;
8009 }
8010
8011 /* This is so loginwindow doesn't try to quit GUI apps that have been launched
8012 * directly by launchd as agents.
8013 */
8014 job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
8015 if( j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job ) {
8016 *managed = true;
8017 }
8018
8019 return BOOTSTRAP_SUCCESS;
8020 }
8021
8022 kern_return_t
8023 job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
8024 {
8025 struct ldcred *ldc = runtime_get_caller_creds();
8026 kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
8027
8028 mach_port_t _mp = MACH_PORT_NULL;
8029 if( !j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid()) ) {
8030 job_t target_j = job_find(label);
8031 if( jobmgr_assumes(root_jobmgr, target_j != NULL) ) {
8032 if( target_j->j_port == MACH_PORT_NULL ) {
8033 job_assumes(target_j, job_setup_machport(target_j) == true);
8034 }
8035
8036 _mp = target_j->j_port;
8037 kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
8038 } else {
8039 kr = BOOTSTRAP_NO_MEMORY;
8040 }
8041 }
8042
8043 *mp = _mp;
8044 return kr;
8045 }
8046
8047 #if !TARGET_OS_EMBEDDED
8048 kern_return_t
8049 job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t session)
8050 {
8051 uuid_string_t uuid_str;
8052 uuid_unparse(uuid, uuid_str);
8053 job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", session, uuid_str);
8054
8055 job_t ji = NULL, jt = NULL;
8056 LIST_FOREACH_SAFE( ji, &s_needing_sessions, sle, jt ) {
8057 uuid_string_t uuid_str2;
8058 uuid_unparse(ji->expected_audit_uuid, uuid_str2);
8059
8060 if( uuid_compare(uuid, ji->expected_audit_uuid) == 0 ) {
8061 uuid_clear(ji->expected_audit_uuid);
8062 if( session != MACH_PORT_NULL ) {
8063 job_log(ji, LOG_DEBUG, "Job should join session with port %u", session);
8064 mach_port_mod_refs(mach_task_self(), session, MACH_PORT_RIGHT_SEND, 1);
8065 } else {
8066 job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
8067 }
8068
8069 ji->audit_session = session;
8070 LIST_REMOVE(ji, needing_session_sle);
8071 job_dispatch(ji, false);
8072 }
8073 }
8074
8075 /* Each job that the session port was set for holds a reference. At the end of
8076 * the loop, there will be one extra reference belonging to this MiG protocol.
8077 * We need to release it so that the session goes away when all the jobs
8078 * referencing it are unloaded.
8079 */
8080 mach_port_deallocate(mach_task_self(), session);
8081
8082 return KERN_SUCCESS;
8083 }
8084 #else
8085 kern_return_t
8086 job_mig_set_security_session(job_t j __attribute__((unused)), uuid_t uuid __attribute__((unused)), mach_port_t session __attribute__((unused)))
8087 {
8088 return KERN_SUCCESS;
8089 }
8090 #endif
8091
8092 jobmgr_t
8093 jobmgr_find_by_name(jobmgr_t jm, const char *where)
8094 {
8095 jobmgr_t jmi, jmi2;
8096
8097 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
8098 if (where == NULL) {
8099 if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8100 where = VPROCMGR_SESSION_LOGINWINDOW;
8101 } else {
8102 where = VPROCMGR_SESSION_AQUA;
8103 }
8104 }
8105
8106 if (strcasecmp(jm->name, where) == 0) {
8107 return jm;
8108 }
8109
8110 if( strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic ) {
8111 jmi = root_jobmgr;
8112 goto jm_found;
8113 }
8114
8115 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
8116 if (unlikely(jmi->shutting_down)) {
8117 continue;
8118 } else if (strcasecmp(jmi->name, where) == 0) {
8119 goto jm_found;
8120 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
8121 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
8122 if (strcasecmp(jmi2->name, where) == 0) {
8123 jmi = jmi2;
8124 goto jm_found;
8125 }
8126 }
8127 }
8128 }
8129
8130 jm_found:
8131 return jmi;
8132 }
8133
8134 kern_return_t
8135 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t audit_session, uint64_t flags)
8136 {
8137 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
8138 mach_port_array_t l2l_ports = NULL;
8139 mach_port_t reqport, rcvright;
8140 kern_return_t kr = 1;
8141 launch_data_t out_obj_array = NULL;
8142 struct ldcred *ldc = runtime_get_caller_creds();
8143 jobmgr_t jmr = NULL;
8144
8145 if (!launchd_assumes(j != NULL)) {
8146 return BOOTSTRAP_NO_MEMORY;
8147 }
8148
8149 if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
8150 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
8151
8152 kr = BOOTSTRAP_NOT_PRIVILEGED;
8153 goto out;
8154 }
8155
8156 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
8157
8158 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
8159
8160 if (!job_assumes(j, kr == 0)) {
8161 goto out;
8162 }
8163
8164 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
8165
8166 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, audit_session)) != NULL)) {
8167 kr = BOOTSTRAP_NO_MEMORY;
8168 goto out;
8169 }
8170
8171 jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
8172
8173 /* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
8174 * processing an IPC request, we'll do this action before the new job manager can get any IPC
8175 * requests. This serialization is guaranteed since we are single-threaded in that respect.
8176 */
8177 if( flags & LAUNCH_GLOBAL_ON_DEMAND ) {
8178 /* This is so awful. */
8179 /* Remove the job from its current job manager. */
8180 LIST_REMOVE(j, sle);
8181 LIST_REMOVE(j, pid_hash_sle);
8182
8183 /* Put the job into the target job manager. */
8184 LIST_INSERT_HEAD(&jmr->jobs, j, sle);
8185 LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
8186
8187 j->mgr = jmr;
8188 job_set_global_on_demand(j, true);
8189
8190 if( !j->holds_ref ) {
8191 j->holds_ref = true;
8192 runtime_add_ref();
8193 }
8194 }
8195
8196 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
8197 launch_data_t tmp, obj_at_idx;
8198 struct machservice *ms;
8199 job_t j_for_service;
8200 const char *serv_name;
8201 pid_t target_pid;
8202 bool serv_perpid;
8203
8204 job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
8205 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
8206 target_pid = (pid_t)launch_data_get_integer(tmp);
8207 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
8208 serv_perpid = launch_data_get_bool(tmp);
8209 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
8210 serv_name = launch_data_get_string(tmp);
8211
8212 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
8213
8214 if (unlikely(!j_for_service)) {
8215 /* The PID probably exited */
8216 job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
8217 continue;
8218 }
8219
8220 if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
8221 job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
8222 machservice_request_notifications(ms);
8223 }
8224 }
8225
8226 kr = 0;
8227
8228 out:
8229 if (out_obj_array) {
8230 launch_data_free(out_obj_array);
8231 }
8232
8233 if (l2l_ports) {
8234 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
8235 }
8236
8237 if (kr == 0) {
8238 if (target_subset) {
8239 job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
8240 }
8241 } else if (jmr) {
8242 jobmgr_shutdown(jmr);
8243 }
8244
8245 return kr;
8246 }
8247
8248 kern_return_t
8249 job_mig_init_session(job_t j, name_t session_type, mach_port_t audit_session)
8250 {
8251 job_t j2;
8252
8253 kern_return_t kr = BOOTSTRAP_NO_MEMORY;
8254 if (j->mgr->session_initialized) {
8255 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
8256 kr = BOOTSTRAP_NOT_PRIVILEGED;
8257 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
8258 jobmgr_t jmi;
8259
8260 /*
8261 * 5330262
8262 *
8263 * We're working around LoginWindow and the WindowServer.
8264 *
8265 * In practice, there is only one LoginWindow session. Unfortunately, for certain
8266 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
8267 * spawns a replacement loginwindow session before cleaning up the previous one.
8268 *
8269 * We're going to use the creation of a new LoginWindow context as a clue that the
8270 * previous LoginWindow context is on the way out and therefore we should just
8271 * kick-start the shutdown of it.
8272 */
8273
8274 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
8275 if (unlikely(jmi->shutting_down)) {
8276 continue;
8277 } else if (strcasecmp(jmi->name, session_type) == 0) {
8278 jobmgr_shutdown(jmi);
8279 break;
8280 }
8281 }
8282 }
8283
8284 jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
8285 strcpy(j->mgr->name_init, session_type);
8286
8287 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
8288 j2->audit_session = audit_session;
8289 job_assumes(j, job_dispatch(j2, true));
8290 kr = BOOTSTRAP_SUCCESS;
8291 }
8292
8293 return kr;
8294 }
8295
8296 kern_return_t
8297 job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t audit_session, mach_port_t *new_bsport)
8298 {
8299 job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
8300
8301 if( !job_assumes(j, pid1_magic == false) ) {
8302 job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
8303 return BOOTSTRAP_NOT_PRIVILEGED;
8304 }
8305
8306 if( !j->anonymous ) {
8307 job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
8308 return BOOTSTRAP_NOT_PRIVILEGED;
8309 }
8310
8311 jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
8312 if( target_jm == j->mgr ) {
8313 job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
8314 *new_bsport = target_jm->jm_port;
8315 return BOOTSTRAP_SUCCESS;
8316 }
8317
8318 if( !target_jm ) {
8319 target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, audit_session);
8320 if( !target_jm ) {
8321 mach_port_deallocate(mach_task_self(), audit_session);
8322 } else {
8323 target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
8324 }
8325 }
8326
8327 if( !job_assumes(j, target_jm != NULL) ) {
8328 job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
8329 return BOOTSTRAP_NO_MEMORY;
8330 }
8331
8332 /* Remove the job from it's current job manager. */
8333 LIST_REMOVE(j, sle);
8334 LIST_REMOVE(j, pid_hash_sle);
8335
8336 job_t ji = NULL, jit = NULL;
8337 LIST_FOREACH_SAFE( ji, &j->mgr->global_env_jobs, global_env_sle, jit ) {
8338 if( ji == j ) {
8339 LIST_REMOVE(ji, global_env_sle);
8340 break;
8341 }
8342 }
8343
8344 /* Put the job into the target job manager. */
8345 LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
8346 LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
8347
8348 if( ji ) {
8349 LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
8350 }
8351
8352 /* Move our Mach services over if we're not in a flat namespace. */
8353 if( !g_flat_mach_namespace && !SLIST_EMPTY(&j->machservices) ) {
8354 struct machservice *msi = NULL, *msit = NULL;
8355 SLIST_FOREACH_SAFE( msi, &j->machservices, sle, msit ) {
8356 LIST_REMOVE(msi, name_hash_sle);
8357 LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
8358 }
8359 }
8360
8361 j->mgr = target_jm;
8362
8363 if( !j->holds_ref ) {
8364 /* Anonymous jobs which move around are particularly interesting to us, so we want to
8365 * stick around while they're still around.
8366 * For example, login calls into the PAM launchd module, which moves the process into
8367 * the StandardIO session by default. So we'll hold a reference on that job to prevent
8368 * ourselves from going away.
8369 */
8370 j->holds_ref = true;
8371 runtime_add_ref();
8372 }
8373
8374 *new_bsport = target_jm->jm_port;
8375
8376 return KERN_SUCCESS;
8377 }
8378
8379 kern_return_t
8380 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
8381 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
8382 mach_port_array_t *portsp, unsigned int *ports_cnt)
8383 {
8384 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
8385 mach_port_array_t ports = NULL;
8386 unsigned int cnt = 0, cnt2 = 0;
8387 size_t packed_size;
8388 struct machservice *ms;
8389 jobmgr_t jm;
8390 job_t ji;
8391
8392 if (!launchd_assumes(j != NULL)) {
8393 return BOOTSTRAP_NO_MEMORY;
8394 }
8395
8396 jm = j->mgr;
8397
8398 if (unlikely(!pid1_magic)) {
8399 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
8400 return BOOTSTRAP_NOT_PRIVILEGED;
8401 }
8402 if (unlikely(jobmgr_parent(jm) == NULL)) {
8403 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
8404 return BOOTSTRAP_NOT_PRIVILEGED;
8405 }
8406 if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
8407 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
8408 return BOOTSTRAP_NOT_PRIVILEGED;
8409 }
8410 if (unlikely(!j->anonymous)) {
8411 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
8412 return BOOTSTRAP_NOT_PRIVILEGED;
8413 }
8414
8415 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
8416
8417 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
8418 if (!job_assumes(j, outdata_obj_array)) {
8419 goto out_bad;
8420 }
8421
8422 *outdataCnt = 20 * 1024 * 1024;
8423 mig_allocate(outdata, *outdataCnt);
8424 if (!job_assumes(j, *outdata != 0)) {
8425 return 1;
8426 }
8427
8428 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
8429 if (!ji->anonymous) {
8430 continue;
8431 }
8432 SLIST_FOREACH(ms, &ji->machservices, sle) {
8433 cnt++;
8434 }
8435 }
8436
8437 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
8438 if (!job_assumes(j, ports != NULL)) {
8439 goto out_bad;
8440 }
8441
8442 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
8443 if (!ji->anonymous) {
8444 continue;
8445 }
8446
8447 SLIST_FOREACH(ms, &ji->machservices, sle) {
8448 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
8449 job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
8450 } else {
8451 goto out_bad;
8452 }
8453
8454 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
8455 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
8456 } else {
8457 goto out_bad;
8458 }
8459
8460 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
8461 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
8462 } else {
8463 goto out_bad;
8464 }
8465
8466 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
8467 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
8468 } else {
8469 goto out_bad;
8470 }
8471
8472 ports[cnt2] = machservice_port(ms);
8473
8474 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
8475 jobmgr_assumes(jm, (errno = mach_port_mod_refs(mach_task_self(), ports[cnt2], MACH_PORT_RIGHT_SEND, 1)) == 0);
8476 cnt2++;
8477 }
8478 }
8479
8480 job_assumes(j, cnt == cnt2);
8481
8482 runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8483 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
8484 if (!job_assumes(j, packed_size != 0)) {
8485 goto out_bad;
8486 }
8487
8488 launch_data_free(outdata_obj_array);
8489
8490 *portsp = ports;
8491 *ports_cnt = cnt;
8492
8493 *reqport = jm->req_port;
8494 *rcvright = jm->jm_port;
8495
8496 jm->req_port = 0;
8497 jm->jm_port = 0;
8498
8499 workaround_5477111 = j;
8500
8501 jobmgr_shutdown(jm);
8502
8503 return BOOTSTRAP_SUCCESS;
8504
8505 out_bad:
8506 if (outdata_obj_array) {
8507 launch_data_free(outdata_obj_array);
8508 }
8509 if (*outdata) {
8510 mig_deallocate(*outdata, *outdataCnt);
8511 }
8512 if (ports) {
8513 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
8514 }
8515
8516 return BOOTSTRAP_NO_MEMORY;
8517 }
8518
8519 kern_return_t
8520 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
8521 {
8522 int bsdepth = 0;
8523 jobmgr_t jmr;
8524
8525 if (!launchd_assumes(j != NULL)) {
8526 return BOOTSTRAP_NO_MEMORY;
8527 }
8528
8529 jmr = j->mgr;
8530
8531 while ((jmr = jobmgr_parent(jmr)) != NULL) {
8532 bsdepth++;
8533 }
8534
8535 /* Since we use recursion, we need an artificial depth for subsets */
8536 if (unlikely(bsdepth > 100)) {
8537 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
8538 return BOOTSTRAP_NO_MEMORY;
8539 }
8540
8541 char name[NAME_MAX];
8542 snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
8543
8544 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->audit_session)) != NULL)) {
8545 if (unlikely(requestorport == MACH_PORT_NULL)) {
8546 return BOOTSTRAP_NOT_PRIVILEGED;
8547 }
8548 return BOOTSTRAP_NO_MEMORY;
8549 }
8550
8551 *subsetportp = jmr->jm_port;
8552 jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
8553
8554 /* A job could create multiple subsets, so only add a reference the first time
8555 * it does so we don't have to keep a count.
8556 */
8557 if( j->anonymous && !j->holds_ref ) {
8558 j->holds_ref = true;
8559 runtime_add_ref();
8560 }
8561
8562 job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
8563 return BOOTSTRAP_SUCCESS;
8564 }
8565
8566 kern_return_t
8567 job_mig_embedded_wait(job_t j, name_t targetlabel, integer_t *waitstatus)
8568 {
8569 job_t otherj;
8570
8571 if (!launchd_assumes(j != NULL)) {
8572 return BOOTSTRAP_NO_MEMORY;
8573 }
8574
8575 if (unlikely(!(otherj = job_find(targetlabel)))) {
8576 return BOOTSTRAP_UNKNOWN_SERVICE;
8577 }
8578
8579 *waitstatus = j->last_exit_status;
8580
8581 return 0;
8582 }
8583
8584 kern_return_t
8585 job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, mach_port_t *out_name_port, mach_port_t *obsrvr_port, unsigned int flags)
8586 {
8587 struct ldcred *ldc = runtime_get_caller_creds();
8588 job_t otherj;
8589
8590 if (!launchd_assumes(j != NULL)) {
8591 return BOOTSTRAP_NO_MEMORY;
8592 }
8593
8594 if (unlikely(!(otherj = job_find(targetlabel)))) {
8595 return BOOTSTRAP_UNKNOWN_SERVICE;
8596 }
8597
8598 #if TARGET_OS_EMBEDDED
8599 bool allow_non_root_kickstart = j->username && otherj->username && ( strcmp(j->username, otherj->username) == 0 );
8600 #else
8601 bool allow_non_root_kickstart = false;
8602 #endif
8603
8604 if( ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart ) {
8605 return BOOTSTRAP_NOT_PRIVILEGED;
8606 }
8607
8608 if( otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC) ) {
8609 return BOOTSTRAP_SERVICE_ACTIVE;
8610 }
8611
8612 otherj->stall_before_exec = ( flags & VPROCFLAG_STALL_JOB_EXEC );
8613 otherj = job_dispatch(otherj, true);
8614
8615 if (!job_assumes(j, otherj && otherj->p)) {
8616 /* <rdar://problem/6787083> Clear this flag if we failed to start the job. */
8617 otherj->stall_before_exec = false;
8618 return BOOTSTRAP_NO_MEMORY;
8619 }
8620
8621 /* If any of these proceeding steps fail, we return an error to the client.
8622 * the problem is that, if the client has requested the job be stalled before
8623 * exec(2), the client won't be able to uncork the fork(2), leaving the job
8624 * forever stalled until the client tries again and we successfully start
8625 * the job.
8626 *
8627 * See <rdar://problem/6787083> for more about the implications.
8628 *
8629 * Fortunately, these next actions should pretty much never fail. In the
8630 * future, we should look at cleaning up after these failures if the job
8631 * was started in a stalled state.
8632 */
8633
8634 kern_return_t kr = task_name_for_pid(mach_task_self(), otherj->p, out_name_port);
8635 if (!job_assumes(j, kr == 0)) {
8636 return kr;
8637 }
8638
8639 if (!job_setup_machport(otherj)) {
8640 return BOOTSTRAP_NO_MEMORY;
8641 }
8642
8643 *obsrvr_port = otherj->j_port;
8644 *out_pid = otherj->p;
8645
8646 return 0;
8647 }
8648
8649 kern_return_t
8650 job_mig_wait(job_t j, mach_port_t srp, integer_t *waitstatus)
8651 {
8652 #if 0
8653 if (!launchd_assumes(j != NULL)) {
8654 return BOOTSTRAP_NO_MEMORY;
8655 }
8656 return job_handle_mpm_wait(j, srp, waitstatus);
8657 #else
8658 if( false ) {
8659 /* To make the compiler happy. */
8660 job_handle_mpm_wait(NULL, MACH_PORT_NULL, NULL);
8661 }
8662 struct ldcred *ldc = runtime_get_caller_creds();
8663 job_t calling_j = jobmgr_find_by_pid(j->mgr, ldc->pid, true);
8664
8665 return job_mig_wait2(calling_j, j, srp, waitstatus, true);
8666 #endif
8667 }
8668
8669 kern_return_t
8670 job_mig_wait2(job_t j, job_t target_j, mach_port_t srp, integer_t *status, boolean_t legacy)
8671 {
8672 if( !launchd_assumes(j != NULL) ) {
8673 return BOOTSTRAP_NO_MEMORY;
8674 }
8675 if( !launchd_assumes(target_j != NULL) ) {
8676 return BOOTSTRAP_NO_MEMORY;
8677 }
8678 if( !launchd_assumes(status != NULL) ) {
8679 return BOOTSTRAP_NO_MEMORY;
8680 }
8681
8682 /* See rdar://problem/7084138 for why we do the second part of this check.
8683 * Basically, since Finder, Dock and SystemUIServer are now real launchd
8684 * jobs, they don't get removed after exiting, like legacy LaunchServices
8685 * jobs do. So there's a race. coreservicesd came in asking for the exit
8686 * status after we'd relaunched Finder, so Finder's PID isn't 0.
8687 *
8688 * So we check to make sure the target job isn't a LaunchServices job and
8689 * that the request is coming through the legacy path (mpm_wait()). If so,
8690 * we return the last exit status, regardless of the current PID value.
8691 */
8692 if( target_j->p == 0 || (!target_j->legacy_LS_job && legacy) ) {
8693 *status = target_j->last_exit_status;
8694 return BOOTSTRAP_SUCCESS;
8695 }
8696
8697 if( !job_assumes(j, waiting4exit_new(target_j, srp, legacy) == true) ) {
8698 return BOOTSTRAP_NO_MEMORY;
8699 }
8700
8701 return MIG_NO_REPLY;
8702 }
8703
8704 kern_return_t
8705 job_mig_uncork_fork(job_t j)
8706 {
8707 if (!launchd_assumes(j != NULL)) {
8708 return BOOTSTRAP_NO_MEMORY;
8709 }
8710
8711 if (unlikely(!j->stall_before_exec)) {
8712 job_log(j, LOG_WARNING, "Attempt to uncork a job that isn't in the middle of a fork().");
8713 return 1;
8714 }
8715
8716 job_uncork_fork(j);
8717 j->stall_before_exec = false;
8718 return 0;
8719 }
8720
8721 kern_return_t
8722 job_mig_spawn(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t audit_session, pid_t *child_pid, mach_port_t *obsvr_port)
8723 {
8724 launch_data_t input_obj = NULL;
8725 size_t data_offset = 0;
8726 struct ldcred *ldc = runtime_get_caller_creds();
8727 job_t jr;
8728
8729 if (!launchd_assumes(j != NULL)) {
8730 return BOOTSTRAP_NO_MEMORY;
8731 }
8732
8733 if (unlikely(j->deny_job_creation)) {
8734 return BOOTSTRAP_NOT_PRIVILEGED;
8735 }
8736
8737 #if HAVE_SANDBOX
8738 if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8739 return BOOTSTRAP_NOT_PRIVILEGED;
8740 }
8741 #endif
8742
8743 if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
8744 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
8745 return VPROC_ERR_TRY_PER_USER;
8746 }
8747
8748 if (!job_assumes(j, indataCnt != 0)) {
8749 return 1;
8750 }
8751
8752 runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
8753 if (!job_assumes(j, (input_obj = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
8754 return 1;
8755 }
8756
8757 jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
8758 if( !jobmgr_assumes(j->mgr, target_jm != NULL) ) {
8759 jobmgr_log(j->mgr, LOG_NOTICE, "%s() can't find its session!", __func__);
8760 return 1;
8761 }
8762
8763 jr = jobmgr_import2(target_jm ?: j->mgr, input_obj);
8764
8765 if (!job_assumes(j, jr != NULL)) {
8766 switch (errno) {
8767 case EEXIST:
8768 return BOOTSTRAP_NAME_IN_USE;
8769 default:
8770 return BOOTSTRAP_NO_MEMORY;
8771 }
8772 }
8773
8774 if (pid1_magic) {
8775 jr->mach_uid = ldc->uid;
8776 }
8777
8778 jr->legacy_LS_job = true;
8779 jr->abandon_pg = true;
8780 jr->stall_before_exec = jr->wait4debugger;
8781 jr->wait4debugger = false;
8782 jr->audit_session = audit_session;
8783 uuid_clear(jr->expected_audit_uuid);
8784
8785 jr = job_dispatch(jr, true);
8786
8787 if (!job_assumes(j, jr != NULL)) {
8788 return BOOTSTRAP_NO_MEMORY;
8789 }
8790
8791 if (!job_assumes(jr, jr->p)) {
8792 job_remove(jr);
8793 return BOOTSTRAP_NO_MEMORY;
8794 }
8795
8796 if (!job_setup_machport(jr)) {
8797 job_remove(jr);
8798 return BOOTSTRAP_NO_MEMORY;
8799 }
8800
8801 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
8802
8803 *child_pid = jr->p;
8804 *obsvr_port = jr->j_port;
8805
8806 mig_deallocate(indata, indataCnt);
8807
8808 return BOOTSTRAP_SUCCESS;
8809 }
8810
8811 void
8812 jobmgr_init(bool sflag)
8813 {
8814 const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
8815 SLIST_INIT(&s_curious_jobs);
8816 LIST_INIT(&s_needing_sessions);
8817
8818 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
8819
8820 uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
8821 s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
8822 if( likely(s_no_hang_fd == -1) ) {
8823 if( jobmgr_assumes(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK)) != -1) ) {
8824 jobmgr_assumes(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr) != -1);
8825 }
8826 }
8827 s_no_hang_fd = _fd(s_no_hang_fd);
8828 }
8829
8830 size_t
8831 our_strhash(const char *s)
8832 {
8833 size_t c, r = 5381;
8834
8835 /* djb2
8836 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
8837 */
8838
8839 while ((c = *s++)) {
8840 r = ((r << 5) + r) + c; /* hash*33 + c */
8841 }
8842
8843 return r;
8844 }
8845
8846 size_t
8847 hash_label(const char *label)
8848 {
8849 return our_strhash(label) % LABEL_HASH_SIZE;
8850 }
8851
8852 size_t
8853 hash_ms(const char *msstr)
8854 {
8855 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
8856 }
8857
8858 bool
8859 waiting4removal_new(job_t j, mach_port_t rp)
8860 {
8861 struct waiting_for_removal *w4r;
8862
8863 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
8864 return false;
8865 }
8866
8867 w4r->reply_port = rp;
8868
8869 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
8870
8871 return true;
8872 }
8873
8874 void
8875 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
8876 {
8877 job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
8878
8879 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
8880
8881 free(w4r);
8882 }
8883
8884 bool
8885 waiting4exit_new(job_t j, mach_port_t rp, bool legacy)
8886 {
8887 struct waiting_for_exit *w4e = NULL;
8888 if( !job_assumes(j, (w4e = malloc(sizeof(struct waiting_for_exit))) != NULL) ) {
8889 return false;
8890 }
8891
8892 w4e->rp = rp;
8893 w4e->legacy = legacy;
8894 LIST_INSERT_HEAD(&j->exit_watchers, w4e, sle);
8895
8896 return true;
8897 }
8898
8899 void
8900 waiting4exit_delete(job_t j, struct waiting_for_exit *w4e)
8901 {
8902 if( !w4e->legacy ) {
8903 job_assumes(j, job_mig_wait2_reply(w4e->rp, KERN_SUCCESS, j->last_exit_status, false) == KERN_SUCCESS);
8904 } else {
8905 job_assumes(j, job_mig_wait_reply(w4e->rp, KERN_SUCCESS, j->last_exit_status) == KERN_SUCCESS);
8906 }
8907
8908 LIST_REMOVE(w4e, sle);
8909
8910 free(w4e);
8911 }
8912
8913 size_t
8914 get_kern_max_proc(void)
8915 {
8916 int mib[] = { CTL_KERN, KERN_MAXPROC };
8917 int max = 100;
8918 size_t max_sz = sizeof(max);
8919
8920 launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
8921
8922 return max;
8923 }
8924
8925 /* See rdar://problem/6271234 */
8926 void
8927 eliminate_double_reboot(void)
8928 {
8929 if( unlikely(!pid1_magic) ) {
8930 return;
8931 }
8932
8933 struct stat sb;
8934 const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
8935 char *try_again = "Will try again at next boot.";
8936 int result = ~0;
8937
8938 if( unlikely(stat(argv[1], &sb) != -1) ) {
8939 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
8940
8941 int wstatus;
8942 pid_t p;
8943
8944 jobmgr_assumes(root_jobmgr, (errno = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ)) == 0);
8945
8946 if (errno) {
8947 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script! %s", try_again);
8948 goto out;
8949 }
8950
8951 if( !jobmgr_assumes(root_jobmgr, waitpid(p, &wstatus, 0) != -1) ) {
8952 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't confirm that deferred install script exited successfully! %s", try_again);
8953 goto out;
8954 }
8955
8956 if( jobmgr_assumes(root_jobmgr, WIFEXITED(wstatus) != 0) ) {
8957 if( jobmgr_assumes(root_jobmgr, (result = WEXITSTATUS(wstatus)) == EXIT_SUCCESS) ) {
8958 jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
8959 } else {
8960 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script exited with status %d. %s", WEXITSTATUS(wstatus), try_again);
8961 }
8962 } else {
8963 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Confirmed that deferred install script exited, but couldn't confirm that it was successful. %s", try_again);
8964 }
8965 }
8966 out:
8967 if( result == 0 ) {
8968 /* If the unlink(2) was to fail, it would be most likely fail with EBUSY. All the other
8969 * failure cases for unlink(2) don't apply when we're running under PID 1 and have verified
8970 * that the file exists. Outside of someone deliberately messing with us (like if /etc/rc.deferredinstall
8971 * is actually a looping sym-link or a mount point for a filesystem) and I/O errors, we should be good.
8972 */
8973 if( !jobmgr_assumes(root_jobmgr, unlink(argv[1]) != -1) ) {
8974 jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script couldn't be removed!");
8975 }
8976 }
8977 }
8978
8979 static void
8980 simulate_pid1_crash(void)
8981 {
8982 if( pid1_magic && g_simulate_pid1_crash ) {
8983 runtime_syslog(LOG_EMERG | LOG_CONSOLE, "About to simulate a crash.");
8984 raise(SIGSEGV);
8985 }
8986 }
8987
8988 void
8989 jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
8990 {
8991 job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
8992 if( strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER ) {
8993 j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
8994 job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
8995 } else if( strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER ) {
8996 j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
8997 job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
8998 } else if( strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0 ) {
8999 /* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
9000 * You can't set this in a plist.
9001 */
9002 } else if( strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0 ) {
9003 /* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
9004 * complain about it.
9005 */
9006 } else {
9007 job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
9008 }
9009
9010 if( unlikely(!j->jetsam_properties) ) {
9011 j->jetsam_properties = true;
9012 LIST_INSERT_HEAD(&j->mgr->jetsam_jobs, j, jetsam_sle);
9013 j->mgr->jetsam_jobs_cnt++;
9014 }
9015 }
9016
9017 int
9018 launchd_set_jetsam_priorities(launch_data_t priorities)
9019 {
9020 if( !launchd_assumes(launch_data_get_type(priorities) == LAUNCH_DATA_ARRAY) ) {
9021 return EINVAL;
9022 }
9023
9024 jobmgr_t jm = NULL;
9025 #if !TARGET_OS_EMBEDDED
9026 /* For testing. */
9027 jm = jobmgr_find_by_name(root_jobmgr, VPROCMGR_SESSION_AQUA);
9028 if( !launchd_assumes(jm != NULL) ) {
9029 return EINVAL;
9030 }
9031 #else
9032 /* Since this is for embedded, we can assume that the root job manager holds the Jetsam jobs. */
9033 jm = root_jobmgr;
9034
9035 if( !g_embedded_privileged_action ) {
9036 return EPERM;
9037 }
9038 #endif
9039
9040 size_t npris = launch_data_array_get_count(priorities);
9041
9042 job_t ji = NULL;
9043 size_t i = 0;
9044 for( i = 0; i < npris; i++ ) {
9045 launch_data_t ldi = launch_data_array_get_index(priorities, i);
9046 if( !launchd_assumes(launch_data_get_type(ldi) == LAUNCH_DATA_DICTIONARY) ) {
9047 continue;
9048 }
9049
9050 launch_data_t label = NULL;
9051 if( !launchd_assumes(label = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMLABEL)) ) {
9052 continue;
9053 }
9054 const char *_label = launch_data_get_string(label);
9055
9056 ji = job_find(_label);
9057 if( !launchd_assumes(ji != NULL) ) {
9058 continue;
9059 }
9060
9061 launch_data_dict_iterate(ldi, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, ji);
9062
9063 launch_data_t frontmost = NULL;
9064 if( (frontmost = launch_data_dict_lookup(ldi, LAUNCH_KEY_JETSAMFRONTMOST)) && launch_data_get_type(frontmost) == LAUNCH_DATA_BOOL ) {
9065 ji->jetsam_frontmost = launch_data_get_bool(frontmost);
9066 }
9067 }
9068
9069 i = 0;
9070 job_t *jobs = (job_t *)calloc(jm->jetsam_jobs_cnt, sizeof(job_t));
9071 if( launchd_assumes(jobs != NULL) ) {
9072 LIST_FOREACH( ji, &jm->jetsam_jobs, jetsam_sle ) {
9073 if( ji->p ) {
9074 jobs[i] = ji;
9075 i++;
9076 }
9077 }
9078 }
9079
9080 size_t totalpris = i;
9081
9082 int result = EINVAL;
9083
9084 /* It is conceivable that there could be no Jetsam jobs running. */
9085 if( totalpris > 0 ) {
9086 /* Yay blocks! */
9087 qsort_b((void *)jobs, totalpris, sizeof(job_t), ^ int (const void *lhs, const void *rhs) {
9088 job_t _lhs = *(job_t *)lhs;
9089 job_t _rhs = *(job_t *)rhs;
9090 /* Sort in descending order. (Priority correlates to the soonishness with which you will be killed.) */
9091 if( _lhs->jetsam_priority > _rhs->jetsam_priority ) {
9092 return -1;
9093 } else if( _lhs->jetsam_priority < _rhs->jetsam_priority ) {
9094 return 1;
9095 }
9096
9097 return 0;
9098 });
9099
9100 jetsam_priority_entry_t *jpris = (jetsam_priority_entry_t *)calloc(totalpris, sizeof(jetsam_priority_entry_t));
9101 if( !launchd_assumes(jpris != NULL) ) {
9102 result = ENOMEM;
9103 } else {
9104 for( i = 0; i < totalpris; i++ ) {
9105 jpris[i].pid = jobs[i]->p; /* Subject to time-of-use vs. time-of-check, obviously. */
9106 jpris[i].flags |= jobs[i]->jetsam_frontmost ? kJetsamFlagsFrontmost : 0;
9107 jpris[i].hiwat_pages = jobs[i]->jetsam_memlimit;
9108 }
9109
9110 launchd_assumes((result = sysctlbyname("kern.memorystatus_priority_list", NULL, NULL, &jpris[0], totalpris * sizeof(jetsam_priority_entry_t))) != -1);
9111 result = result != 0 ? errno : 0;
9112
9113 free(jpris);
9114 }
9115 }
9116
9117 if( jobs ) {
9118 free(jobs);
9119 }
9120
9121 return result;
9122 }