]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
ea6f608137057572c8662cfc2569297670bdca44
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 23585 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/mach_time.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/exception.h>
37 #include <mach/host_reboot.h>
38 #include <sys/types.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
41 #include <sys/stat.h>
42 #include <sys/ucred.h>
43 #include <sys/fcntl.h>
44 #include <sys/un.h>
45 #include <sys/reboot.h>
46 #include <sys/wait.h>
47 #include <sys/sysctl.h>
48 #include <sys/sockio.h>
49 #include <sys/time.h>
50 #include <sys/resource.h>
51 #include <sys/ioctl.h>
52 #include <sys/mount.h>
53 #include <sys/pipe.h>
54 #include <net/if.h>
55 #include <netinet/in.h>
56 #include <netinet/in_var.h>
57 #include <netinet6/nd6.h>
58 #include <bsm/libbsm.h>
59 #include <unistd.h>
60 #include <signal.h>
61 #include <errno.h>
62 #include <libgen.h>
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <stdarg.h>
66 #include <stdbool.h>
67 #include <paths.h>
68 #include <pwd.h>
69 #include <grp.h>
70 #include <ttyent.h>
71 #include <dlfcn.h>
72 #include <dirent.h>
73 #include <string.h>
74 #include <ctype.h>
75 #include <glob.h>
76 #include <spawn.h>
77 #if HAVE_SANDBOX
78 #include <sandbox.h>
79 #endif
80 #if HAVE_QUARANTINE
81 #include <quarantine.h>
82 #endif
83
84 #include "liblaunch_public.h"
85 #include "liblaunch_private.h"
86 #include "liblaunch_internal.h"
87 #include "libbootstrap_public.h"
88 #include "libbootstrap_private.h"
89 #include "libvproc_public.h"
90 #include "libvproc_internal.h"
91
92 #include "reboot2.h"
93
94 #include "launchd.h"
95 #include "launchd_runtime.h"
96 #include "launchd_unix_ipc.h"
97 #include "protocol_vproc.h"
98 #include "protocol_vprocServer.h"
99 #include "job_reply.h"
100
101 #define LAUNCHD_MIN_JOB_RUN_TIME 10
102 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
103 #define LAUNCHD_SIGKILL_TIMER 5
104
105
106 #define TAKE_SUBSET_NAME "TakeSubsetName"
107 #define TAKE_SUBSET_PID "TakeSubsetPID"
108 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
109
110 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
111
112 extern char **environ;
113
114 struct waiting_for_removal {
115 SLIST_ENTRY(waiting_for_removal) sle;
116 mach_port_t reply_port;
117 };
118
119 static bool waiting4removal_new(job_t j, mach_port_t rp);
120 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
121
122 struct mspolicy {
123 SLIST_ENTRY(mspolicy) sle;
124 unsigned int allow:1, per_pid:1;
125 const char name[0];
126 };
127
128 static bool mspolicy_new(job_t j, const char *name, bool allow, bool pid_local, bool skip_check);
129 static bool mspolicy_copy(job_t j_to, job_t j_from);
130 static void mspolicy_setup(launch_data_t obj, const char *key, void *context);
131 static bool mspolicy_check(job_t j, const char *name, bool pid_local);
132 static void mspolicy_delete(job_t j, struct mspolicy *msp);
133
134 struct machservice {
135 SLIST_ENTRY(machservice) sle;
136 SLIST_ENTRY(machservice) special_port_sle;
137 LIST_ENTRY(machservice) name_hash_sle;
138 LIST_ENTRY(machservice) port_hash_sle;
139 job_t job;
140 uint64_t bad_perf_cnt;
141 unsigned int gen_num;
142 mach_port_name_t port;
143 unsigned int isActive:1, reset:1, recv:1, hide:1, kUNCServer:1, per_user_hack:1, debug_on_close:1, per_pid:1, special_port_num:10;
144 const char name[0];
145 };
146
147 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
148
149 #define PORT_HASH_SIZE 32
150 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
151
152 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
153
154 static void machservice_setup(launch_data_t obj, const char *key, void *context);
155 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
156 static void machservice_resetport(job_t j, struct machservice *ms);
157 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
158 static void machservice_ignore(job_t j, struct machservice *ms);
159 static void machservice_watch(job_t j, struct machservice *ms);
160 static void machservice_delete(job_t j, struct machservice *, bool port_died);
161 static void machservice_request_notifications(struct machservice *);
162 static mach_port_t machservice_port(struct machservice *);
163 static job_t machservice_job(struct machservice *);
164 static bool machservice_hidden(struct machservice *);
165 static bool machservice_active(struct machservice *);
166 static const char *machservice_name(struct machservice *);
167 static bootstrap_status_t machservice_status(struct machservice *);
168
169 struct socketgroup {
170 SLIST_ENTRY(socketgroup) sle;
171 int *fds;
172 unsigned int junkfds:1, fd_cnt:31;
173 union {
174 const char name[0];
175 char name_init[0];
176 };
177 };
178
179 static bool socketgroup_new(job_t j, const char *name, int *fds, unsigned int fd_cnt, bool junkfds);
180 static void socketgroup_delete(job_t j, struct socketgroup *sg);
181 static void socketgroup_watch(job_t j, struct socketgroup *sg);
182 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
183 static void socketgroup_callback(job_t j);
184 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
185 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
186
187 struct calendarinterval {
188 LIST_ENTRY(calendarinterval) global_sle;
189 SLIST_ENTRY(calendarinterval) sle;
190 job_t job;
191 struct tm when;
192 time_t when_next;
193 };
194
195 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
196
197 static bool calendarinterval_new(job_t j, struct tm *w);
198 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
199 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
200 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
201 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
202 static void calendarinterval_callback(void);
203 static void calendarinterval_sanity_check(void);
204
205 struct envitem {
206 SLIST_ENTRY(envitem) sle;
207 char *value;
208 union {
209 const char key[0];
210 char key_init[0];
211 };
212 };
213
214 static bool envitem_new(job_t j, const char *k, const char *v, bool global);
215 static void envitem_delete(job_t j, struct envitem *ei, bool global);
216 static void envitem_setup(launch_data_t obj, const char *key, void *context);
217
218 struct limititem {
219 SLIST_ENTRY(limititem) sle;
220 struct rlimit lim;
221 unsigned int setsoft:1, sethard:1, which:30;
222 };
223
224 static bool limititem_update(job_t j, int w, rlim_t r);
225 static void limititem_delete(job_t j, struct limititem *li);
226 static void limititem_setup(launch_data_t obj, const char *key, void *context);
227 #if HAVE_SANDBOX
228 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
229 #endif
230
231 typedef enum {
232 NETWORK_UP = 1,
233 NETWORK_DOWN,
234 SUCCESSFUL_EXIT,
235 FAILED_EXIT,
236 PATH_EXISTS,
237 PATH_MISSING,
238 OTHER_JOB_ENABLED,
239 OTHER_JOB_DISABLED,
240 OTHER_JOB_ACTIVE,
241 OTHER_JOB_INACTIVE,
242 PATH_CHANGES,
243 DIR_NOT_EMPTY,
244 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
245 } semaphore_reason_t;
246
247 struct semaphoreitem {
248 SLIST_ENTRY(semaphoreitem) sle;
249 semaphore_reason_t why;
250 int fd;
251 union {
252 const char what[0];
253 char what_init[0];
254 };
255 };
256
257 struct semaphoreitem_dict_iter_context {
258 job_t j;
259 semaphore_reason_t why_true;
260 semaphore_reason_t why_false;
261 };
262
263 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
264 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
265 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
266 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
267 static void semaphoreitem_callback(job_t j, struct kevent *kev);
268 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
269 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
270 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
271
272 #define ACTIVE_JOB_HASH_SIZE 32
273 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
274 #define MACHSERVICE_HASH_SIZE 37
275
276 struct jobmgr_s {
277 kq_callback kqjobmgr_callback;
278 SLIST_ENTRY(jobmgr_s) sle;
279 SLIST_HEAD(, jobmgr_s) submgrs;
280 LIST_HEAD(, job_s) jobs;
281 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
282 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
283 mach_port_t jm_port;
284 mach_port_t req_port;
285 jobmgr_t parentmgr;
286 int reboot_flags;
287 unsigned int global_on_demand_cnt;
288 unsigned int hopefully_first_cnt;
289 unsigned int normal_active_cnt;
290 unsigned int sent_stop_to_normal_jobs:1, sent_stop_to_hopefully_last_jobs:1, shutting_down:1, session_initialized:1;
291 union {
292 const char name[0];
293 char name_init[0];
294 };
295 };
296
297 #define jobmgr_assumes(jm, e) \
298 (__builtin_expect(!(e), 0) ? jobmgr_log_bug(jm, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
299
300 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name);
301 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
302 static jobmgr_t jobmgr_parent(jobmgr_t jm);
303 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
304 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
305 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
306 static void jobmgr_log_stray_children(jobmgr_t jm);
307 static void jobmgr_remove(jobmgr_t jm);
308 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
309 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
310 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
311 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
312 static void job_export_all2(jobmgr_t jm, launch_data_t where);
313 static void jobmgr_callback(void *obj, struct kevent *kev);
314 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
315 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
316 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
317 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
318 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
319 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
320 static void jobmgr_log_bug(jobmgr_t jm, const char *rcs_rev, const char *path, unsigned int line, const char *test);
321
322 #define DO_RUSAGE_SUMMATION 0
323
324 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
325
326 struct job_s {
327 kq_callback kqjob_callback;
328 LIST_ENTRY(job_s) sle;
329 LIST_ENTRY(job_s) pid_hash_sle;
330 LIST_ENTRY(job_s) label_hash_sle;
331 SLIST_HEAD(, socketgroup) sockets;
332 SLIST_HEAD(, calendarinterval) cal_intervals;
333 SLIST_HEAD(, envitem) global_env;
334 SLIST_HEAD(, envitem) env;
335 SLIST_HEAD(, limititem) limits;
336 SLIST_HEAD(, mspolicy) mspolicies;
337 SLIST_HEAD(, machservice) machservices;
338 SLIST_HEAD(, semaphoreitem) semaphores;
339 SLIST_HEAD(, waiting_for_removal) removal_watchers;
340 #if DO_RUSAGE_SUMMATION
341 struct rusage ru;
342 #endif
343 cpu_type_t *j_binpref;
344 size_t j_binpref_cnt;
345 mach_port_t j_port;
346 mach_port_t wait_reply_port; /* we probably should switch to a list of waiters */
347 uid_t mach_uid;
348 jobmgr_t mgr;
349 char **argv;
350 char *prog;
351 char *rootdir;
352 char *workingdir;
353 char *username;
354 char *groupname;
355 char *stdoutpath;
356 char *stderrpath;
357 char *alt_exc_handler;
358 struct machservice *lastlookup;
359 unsigned int lastlookup_gennum;
360 #if HAVE_SANDBOX
361 char *seatbelt_profile;
362 uint64_t seatbelt_flags;
363 #endif
364 #if HAVE_QUARANTINE
365 void *quarantine_data;
366 size_t quarantine_data_sz;
367 #endif
368 pid_t p;
369 int argc;
370 int last_exit_status;
371 int forkfd;
372 int log_redirect_fd;
373 int nice;
374 unsigned int timeout;
375 unsigned int exit_timeout;
376 int stdout_err_fd;
377 uint64_t sent_sigterm_time;
378 uint64_t start_time;
379 uint32_t min_run_time;
380 uint32_t start_interval;
381 unsigned int checkedin:1, anonymous:1, debug:1, inetcompat:1, inetcompat_wait:1,
382 ondemand:1, session_create:1, low_pri_io:1, no_init_groups:1, priv_port_has_senders:1,
383 importing_global_env:1, importing_hard_limits:1, setmask:1, legacy_mach_job:1, start_pending:1;
384 mode_t mask;
385 unsigned int globargv:1, wait4debugger:1, unload_at_exit:1, stall_before_exec:1, only_once:1,
386 currently_ignored:1, forced_peers_to_demand_mode:1, setnice:1, hopefully_exits_last:1, removal_pending:1,
387 wait4pipe_eof:1, sent_sigkill:1, debug_before_kill:1, weird_bootstrap:1, start_on_mount:1,
388 per_user:1, hopefully_exits_first:1, deny_unknown_mslookups:1, unload_at_mig_return:1, abandon_pg:1,
389 poll_for_vfs_changes:1, internal_exc_handler:1, deny_job_creation:1;
390 const char label[0];
391 };
392
393 #define LABEL_HASH_SIZE 53
394
395 static LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
396 static size_t hash_label(const char *label) __attribute__((pure));
397 static size_t hash_ms(const char *msstr) __attribute__((pure));
398
399
400 #define job_assumes(j, e) \
401 (__builtin_expect(!(e), 0) ? job_log_bug(j, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
402
403 static void job_import_keys(launch_data_t obj, const char *key, void *context);
404 static void job_import_bool(job_t j, const char *key, bool value);
405 static void job_import_string(job_t j, const char *key, const char *value);
406 static void job_import_integer(job_t j, const char *key, long long value);
407 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
408 static void job_import_array(job_t j, const char *key, launch_data_t value);
409 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
410 static bool job_set_global_on_demand(job_t j, bool val);
411 static const char *job_active(job_t j);
412 static void job_watch(job_t j);
413 static void job_ignore(job_t j);
414 static void job_reap(job_t j);
415 static bool job_useless(job_t j);
416 static bool job_keepalive(job_t j);
417 static void job_start(job_t j);
418 static void job_start_child(job_t j) __attribute__((noreturn));
419 static void job_setup_attributes(job_t j);
420 static bool job_setup_machport(job_t j);
421 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
422 static void job_postfork_become_user(job_t j);
423 static void job_find_and_blame_pids_with_weird_uids(job_t j);
424 static void job_force_sampletool(job_t j);
425 static void job_setup_exception_port(job_t j, task_t target_task);
426 static void job_reparent_hack(job_t j, const char *where);
427 static void job_callback(void *obj, struct kevent *kev);
428 static void job_callback_proc(job_t j, int flags, int fflags);
429 static void job_callback_timer(job_t j, void *ident);
430 static void job_callback_read(job_t j, int ident);
431 static void job_log_stray_pg(job_t j);
432 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid);
433 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv);
434 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond);
435 static const char *job_prog(job_t j);
436 static jobmgr_t job_get_bs(job_t j);
437 static void job_kill(job_t j);
438 static void job_uncork_fork(job_t j);
439 static void job_log_stdouterr(job_t j);
440 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
441 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
442 static void job_log_bug(job_t j, const char *rcs_rev, const char *path, unsigned int line, const char *test);
443 static void job_log_stdouterr2(job_t j, const char *msg, ...);
444 static void job_set_exeception_port(job_t j, mach_port_t port);
445 static kern_return_t job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus);
446
447
448
449 static const struct {
450 const char *key;
451 int val;
452 } launchd_keys2limits[] = {
453 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
454 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
455 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
456 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
457 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
458 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
459 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
460 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
461 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
462 };
463
464 static time_t cronemu(int mon, int mday, int hour, int min);
465 static time_t cronemu_wday(int wday, int hour, int min);
466 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
467 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
468 static bool cronemu_hour(struct tm *wtm, int hour, int min);
469 static bool cronemu_min(struct tm *wtm, int min);
470
471 /* miscellaneous file local functions */
472 static void ensure_root_bkgd_setup(void);
473 static int dir_has_files(job_t j, const char *path);
474 static char **mach_cmd2argv(const char *string);
475 static size_t our_strhash(const char *s) __attribute__((pure));
476 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
477 static void do_first_per_user_launchd_hack(void);
478 static size_t get_kern_max_proc(void);
479 static void do_file_init(void) __attribute__((constructor));
480
481 /* file local globals */
482 static bool do_apple_internal_magic;
483 static size_t total_children;
484 static size_t total_anon_children;
485 static mach_port_t the_exception_server;
486 static bool did_first_per_user_launchd_BootCache_hack;
487 #define JOB_BOOTCACHE_HACK_CHECK(j) (j->per_user && !did_first_per_user_launchd_BootCache_hack && (j->mach_uid >= 500) && (j->mach_uid != (uid_t)-2))
488 static jobmgr_t background_jobmgr;
489 static job_t workaround_5477111;
490 static mach_timebase_info_data_t tbi;
491
492 /* process wide globals */
493 mach_port_t inherited_bootstrap_port;
494 jobmgr_t root_jobmgr;
495
496
497 void
498 job_ignore(job_t j)
499 {
500 struct semaphoreitem *si;
501 struct socketgroup *sg;
502 struct machservice *ms;
503
504 if (j->currently_ignored) {
505 return;
506 }
507
508 job_log(j, LOG_DEBUG, "Ignoring...");
509
510 j->currently_ignored = true;
511
512 if (j->poll_for_vfs_changes) {
513 j->poll_for_vfs_changes = false;
514 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
515 }
516
517 SLIST_FOREACH(sg, &j->sockets, sle) {
518 socketgroup_ignore(j, sg);
519 }
520
521 SLIST_FOREACH(ms, &j->machservices, sle) {
522 machservice_ignore(j, ms);
523 }
524
525 SLIST_FOREACH(si, &j->semaphores, sle) {
526 semaphoreitem_ignore(j, si);
527 }
528 }
529
530 void
531 job_watch(job_t j)
532 {
533 struct semaphoreitem *si;
534 struct socketgroup *sg;
535 struct machservice *ms;
536
537 if (!j->currently_ignored) {
538 return;
539 }
540
541 job_log(j, LOG_DEBUG, "Watching...");
542
543 j->currently_ignored = false;
544
545 SLIST_FOREACH(sg, &j->sockets, sle) {
546 socketgroup_watch(j, sg);
547 }
548
549 SLIST_FOREACH(ms, &j->machservices, sle) {
550 machservice_watch(j, ms);
551 }
552
553 SLIST_FOREACH(si, &j->semaphores, sle) {
554 semaphoreitem_watch(j, si);
555 }
556 }
557
558 void
559 job_stop(job_t j)
560 {
561 if (!j->p || j->anonymous) {
562 return;
563 }
564
565 job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
566 j->sent_sigterm_time = mach_absolute_time();
567
568 if (j->exit_timeout) {
569 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
570 EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j) != -1);
571 }
572
573 job_log(j, LOG_DEBUG, "Sent SIGTERM signal");
574 }
575
576 launch_data_t
577 job_export(job_t j)
578 {
579 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
580
581 if (r == NULL) {
582 return NULL;
583 }
584
585 if ((tmp = launch_data_new_string(j->label))) {
586 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
587 }
588 if ((tmp = launch_data_new_string(j->mgr->name))) {
589 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
590 }
591 if ((tmp = launch_data_new_bool(j->ondemand))) {
592 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
593 }
594 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
595 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
596 }
597 if (j->p && (tmp = launch_data_new_integer(j->p))) {
598 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
599 }
600 if ((tmp = launch_data_new_integer(j->timeout))) {
601 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
602 }
603 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
604 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
605 }
606 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
607 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
608 }
609 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
610 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
611 }
612 if (j->argv && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
613 int i;
614
615 for (i = 0; i < j->argc; i++) {
616 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
617 launch_data_array_set_index(tmp, tmp2, i);
618 }
619 }
620
621 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
622 }
623
624 if (j->session_create && (tmp = launch_data_new_bool(true))) {
625 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
626 }
627
628 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
629 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
630 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
631 }
632 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
633 }
634
635 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
636 struct socketgroup *sg;
637 int i;
638
639 SLIST_FOREACH(sg, &j->sockets, sle) {
640 if (sg->junkfds) {
641 continue;
642 }
643 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
644 for (i = 0; i < sg->fd_cnt; i++) {
645 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
646 launch_data_array_set_index(tmp2, tmp3, i);
647 }
648 }
649 launch_data_dict_insert(tmp, tmp2, sg->name);
650 }
651 }
652
653 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
654 }
655
656 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
657 struct machservice *ms;
658
659 tmp3 = NULL;
660
661 SLIST_FOREACH(ms, &j->machservices, sle) {
662 if (ms->per_pid) {
663 if (tmp3 == NULL) {
664 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
665 }
666 if (tmp3) {
667 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
668 launch_data_dict_insert(tmp3, tmp2, ms->name);
669 }
670 } else {
671 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
672 launch_data_dict_insert(tmp, tmp2, ms->name);
673 }
674 }
675
676 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
677
678 if (tmp3) {
679 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
680 }
681 }
682
683 return r;
684 }
685
686 static void
687 jobmgr_log_active_jobs(jobmgr_t jm)
688 {
689 const char *why_active;
690 jobmgr_t jmi;
691 job_t ji;
692
693 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
694 jobmgr_log_active_jobs(jmi);
695 }
696
697 LIST_FOREACH(ji, &jm->jobs, sle) {
698 why_active = job_active(ji);
699
700 job_log(ji, LOG_DEBUG, "%s", why_active ? why_active : "Inactive");
701 }
702
703 }
704
705 static void
706 still_alive_with_check(void)
707 {
708 jobmgr_log(root_jobmgr, LOG_NOTICE, "Still alive with %lu/%lu children", total_children, total_anon_children);
709
710 jobmgr_log_active_jobs(root_jobmgr);
711
712 runtime_closelog(); /* hack to flush logs */
713 }
714
715 jobmgr_t
716 jobmgr_shutdown(jobmgr_t jm)
717 {
718 jobmgr_t jmi, jmn;
719 job_t ji;
720
721 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
722
723 jm->shutting_down = true;
724
725 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
726 jobmgr_shutdown(jmi);
727 }
728
729 if (jm->hopefully_first_cnt) {
730 LIST_FOREACH(ji, &jm->jobs, sle) {
731 if (ji->p && ji->hopefully_exits_first) {
732 job_stop(ji);
733 }
734 }
735 }
736
737 if (debug_shutdown_hangs && jm->parentmgr == NULL && getpid() == 1) {
738 runtime_set_timeout(still_alive_with_check, 5);
739 }
740
741 return jobmgr_do_garbage_collection(jm);
742 }
743
744 void
745 jobmgr_remove(jobmgr_t jm)
746 {
747 jobmgr_t jmi;
748 job_t ji;
749
750 jobmgr_log(jm, LOG_DEBUG, "Removed job manager");
751
752 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
753 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
754 jobmgr_remove(jmi);
755 }
756 }
757
758 while ((ji = LIST_FIRST(&jm->jobs))) {
759 /* We should only have anonymous jobs left */
760 job_assumes(ji, ji->anonymous);
761 job_remove(ji);
762 }
763
764 if (jm->req_port) {
765 jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
766 }
767
768 if (jm->jm_port) {
769 jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
770 }
771
772 if (jm == background_jobmgr) {
773 background_jobmgr = NULL;
774 }
775
776 if (jm->parentmgr) {
777 runtime_del_ref();
778 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
779 } else if (getpid() == 1) {
780 jobmgr_log(jm, LOG_DEBUG, "About to call: reboot(%s)", reboot_flags_to_C_names(jm->reboot_flags));
781 runtime_closelog();
782 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
783 runtime_closelog();
784 } else {
785 runtime_closelog();
786 jobmgr_log(jm, LOG_DEBUG, "About to exit");
787 exit(EXIT_SUCCESS);
788 }
789
790 free(jm);
791 }
792
793 void
794 job_remove(job_t j)
795 {
796 struct waiting_for_removal *w4r;
797 struct calendarinterval *ci;
798 struct semaphoreitem *si;
799 struct socketgroup *sg;
800 struct machservice *ms;
801 struct limititem *li;
802 struct mspolicy *msp;
803 struct envitem *ei;
804
805 if (j->p && j->anonymous) {
806 job_reap(j);
807 } else if (j->p) {
808 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
809
810 if (!j->removal_pending) {
811 j->removal_pending = true;
812 job_stop(j);
813 }
814
815 return;
816 }
817
818 ipc_close_all_with_job(j);
819
820 if (j->forced_peers_to_demand_mode) {
821 job_set_global_on_demand(j, false);
822 }
823
824 if (!job_assumes(j, j->forkfd == 0)) {
825 job_assumes(j, runtime_close(j->forkfd) != -1);
826 }
827
828 if (!job_assumes(j, j->log_redirect_fd == 0)) {
829 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
830 }
831
832 if (j->j_port) {
833 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
834 }
835
836 if (!job_assumes(j, j->wait_reply_port == MACH_PORT_NULL)) {
837 job_assumes(j, launchd_mport_deallocate(j->wait_reply_port) == KERN_SUCCESS);
838 }
839
840 while ((msp = SLIST_FIRST(&j->mspolicies))) {
841 mspolicy_delete(j, msp);
842 }
843 while ((sg = SLIST_FIRST(&j->sockets))) {
844 socketgroup_delete(j, sg);
845 }
846 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
847 calendarinterval_delete(j, ci);
848 }
849 while ((ei = SLIST_FIRST(&j->env))) {
850 envitem_delete(j, ei, false);
851 }
852 while ((ei = SLIST_FIRST(&j->global_env))) {
853 envitem_delete(j, ei, true);
854 }
855 while ((li = SLIST_FIRST(&j->limits))) {
856 limititem_delete(j, li);
857 }
858 while ((ms = SLIST_FIRST(&j->machservices))) {
859 machservice_delete(j, ms, false);
860 }
861 while ((si = SLIST_FIRST(&j->semaphores))) {
862 semaphoreitem_delete(j, si);
863 }
864 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
865 waiting4removal_delete(j, w4r);
866 }
867
868 if (j->prog) {
869 free(j->prog);
870 }
871 if (j->argv) {
872 free(j->argv);
873 }
874 if (j->rootdir) {
875 free(j->rootdir);
876 }
877 if (j->workingdir) {
878 free(j->workingdir);
879 }
880 if (j->username) {
881 free(j->username);
882 }
883 if (j->groupname) {
884 free(j->groupname);
885 }
886 if (j->stdoutpath) {
887 free(j->stdoutpath);
888 }
889 if (j->stderrpath) {
890 free(j->stderrpath);
891 }
892 if (j->alt_exc_handler) {
893 free(j->alt_exc_handler);
894 }
895 #if HAVE_SANDBOX
896 if (j->seatbelt_profile) {
897 free(j->seatbelt_profile);
898 }
899 #endif
900 #if HAVE_QUARANTINE
901 if (j->quarantine_data) {
902 free(j->quarantine_data);
903 }
904 #endif
905 if (j->j_binpref) {
906 free(j->j_binpref);
907 }
908 if (j->start_interval) {
909 runtime_del_ref();
910 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
911 }
912 if (j->poll_for_vfs_changes) {
913 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
914 }
915
916 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
917
918 LIST_REMOVE(j, sle);
919 LIST_REMOVE(j, label_hash_sle);
920
921 job_log(j, LOG_DEBUG, "Removed");
922
923 free(j);
924 }
925
926 void
927 socketgroup_setup(launch_data_t obj, const char *key, void *context)
928 {
929 launch_data_t tmp_oai;
930 job_t j = context;
931 unsigned int i, fd_cnt = 1;
932 int *fds;
933
934 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
935 fd_cnt = launch_data_array_get_count(obj);
936 }
937
938 fds = alloca(fd_cnt * sizeof(int));
939
940 for (i = 0; i < fd_cnt; i++) {
941 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
942 tmp_oai = launch_data_array_get_index(obj, i);
943 } else {
944 tmp_oai = obj;
945 }
946
947 fds[i] = launch_data_get_fd(tmp_oai);
948 }
949
950 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
951
952 ipc_revoke_fds(obj);
953 }
954
955 bool
956 job_set_global_on_demand(job_t j, bool val)
957 {
958 if (j->forced_peers_to_demand_mode && val) {
959 return false;
960 } else if (!j->forced_peers_to_demand_mode && !val) {
961 return false;
962 }
963
964 if ((j->forced_peers_to_demand_mode = val)) {
965 j->mgr->global_on_demand_cnt++;
966 } else {
967 j->mgr->global_on_demand_cnt--;
968 }
969
970 if (j->mgr->global_on_demand_cnt == 0) {
971 jobmgr_dispatch_all(j->mgr, false);
972 }
973
974 return true;
975 }
976
977 bool
978 job_setup_machport(job_t j)
979 {
980 mach_msg_size_t mxmsgsz;
981
982 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
983 goto out_bad;
984 }
985
986 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
987 mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
988 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
989 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
990 }
991
992 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
993 goto out_bad2;
994 }
995
996 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
997 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
998 goto out_bad;
999 }
1000
1001 return true;
1002 out_bad2:
1003 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1004 out_bad:
1005 return false;
1006 }
1007
1008 job_t
1009 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1010 {
1011 const char **argv = (const char **)mach_cmd2argv(cmd);
1012 job_t jr = NULL;
1013
1014 if (!job_assumes(j, argv != NULL)) {
1015 goto out_bad;
1016 }
1017
1018 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1019
1020 free(argv);
1021
1022 /* jobs can easily be denied creation during shutdown */
1023 if (!jr) {
1024 goto out_bad;
1025 }
1026
1027 jr->mach_uid = uid;
1028 jr->ondemand = ond;
1029 jr->legacy_mach_job = true;
1030 jr->abandon_pg = true;
1031 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
1032
1033 if (!job_setup_machport(jr)) {
1034 goto out_bad;
1035 }
1036
1037 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1038
1039 return jr;
1040
1041 out_bad:
1042 if (jr) {
1043 job_remove(jr);
1044 }
1045 return NULL;
1046 }
1047
1048 kern_return_t
1049 job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus)
1050 {
1051 if (j->p) {
1052 j->wait_reply_port = srp;
1053 return MIG_NO_REPLY;
1054 }
1055
1056 *waitstatus = j->last_exit_status;
1057
1058 return 0;
1059 }
1060
1061 job_t
1062 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1063 {
1064 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, anonpid };
1065 struct kinfo_proc kp;
1066 size_t len = sizeof(kp);
1067 const char *zombie = NULL;
1068 bool shutdown_state;
1069 job_t jp = NULL, jr = NULL;
1070
1071 if (!jobmgr_assumes(jm, anonpid != 0)) {
1072 return NULL;
1073 }
1074
1075 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1076 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1077 return NULL;
1078 }
1079
1080 if (!jobmgr_assumes(jm, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
1081 return NULL;
1082 }
1083
1084 if (len != sizeof(kp)) {
1085 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for nonexistent PID: %u", anonpid);
1086 return NULL;
1087 }
1088
1089 if (!jobmgr_assumes(jm, kp.kp_proc.p_comm[0] != '\0')) {
1090 return NULL;
1091 }
1092
1093 if (kp.kp_proc.p_stat == SZOMB) {
1094 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID: %u", anonpid);
1095 zombie = "zombie";
1096 }
1097
1098 switch (kp.kp_eproc.e_ppid) {
1099 case 0:
1100 /* the kernel */
1101 break;
1102 case 1:
1103 if (getpid() != 1) {
1104 /* we cannot possibly find a parent job_t that is useful in this function */
1105 break;
1106 }
1107 /* fall through */
1108 default:
1109 jp = jobmgr_find_by_pid(jm, kp.kp_eproc.e_ppid, true);
1110 jobmgr_assumes(jm, jp != NULL);
1111 break;
1112 }
1113
1114 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1115 if ((shutdown_state = jm->shutting_down)) {
1116 jm->shutting_down = false;
1117 }
1118
1119 if (jobmgr_assumes(jm, (jr = job_new(jm, AUTO_PICK_LEGACY_LABEL, zombie ? zombie : kp.kp_proc.p_comm, NULL)) != NULL)) {
1120 u_int proc_fflags = NOTE_EXEC|NOTE_EXIT /* |NOTE_REAP */;
1121
1122 total_anon_children++;
1123 jr->anonymous = true;
1124 jr->p = anonpid;
1125
1126 /* anonymous process reaping is messy */
1127 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1128
1129 if (kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1 && job_assumes(jr, errno == ESRCH)) {
1130 /* zombies are weird */
1131 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1132 jr->unload_at_mig_return = true;
1133 }
1134
1135 if (jp) {
1136 job_assumes(jr, mspolicy_copy(jr, jp));
1137 }
1138
1139 if (shutdown_state && jm->hopefully_first_cnt == 0) {
1140 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time");
1141 }
1142
1143 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, kp.kp_eproc.e_ppid, jp ? ": " : "", jp ? jp->label : "");
1144 }
1145
1146 if (shutdown_state) {
1147 jm->shutting_down = true;
1148 }
1149
1150 return jr;
1151 }
1152
1153 job_t
1154 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1155 {
1156 const char *const *argv_tmp = argv;
1157 char auto_label[1000];
1158 const char *bn = NULL;
1159 char *co;
1160 size_t minlabel_len;
1161 int i, cc = 0;
1162 job_t j;
1163
1164 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1165
1166 if (jm->shutting_down) {
1167 errno = EINVAL;
1168 return NULL;
1169 }
1170
1171 if (prog == NULL && argv == NULL) {
1172 errno = EINVAL;
1173 return NULL;
1174 }
1175
1176 if (label == AUTO_PICK_LEGACY_LABEL) {
1177 bn = prog ? prog : basename((char *)argv[0]); /* prog for auto labels is kp.kp_kproc.p_comm */
1178 snprintf(auto_label, sizeof(auto_label), "%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", bn);
1179 label = auto_label;
1180 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1181 minlabel_len = strlen(label) + MAXCOMLEN;
1182 } else {
1183 minlabel_len = strlen(label);
1184 }
1185
1186 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1187
1188 if (!jobmgr_assumes(jm, j != NULL)) {
1189 return NULL;
1190 }
1191
1192 if (label == auto_label) {
1193 snprintf((char *)j->label, strlen(label) + 1, "%p.%s", j, bn);
1194 } else {
1195 strcpy((char *)j->label, label);
1196 }
1197 j->kqjob_callback = job_callback;
1198 j->mgr = jm;
1199 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1200 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1201 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1202 j->currently_ignored = true;
1203 j->ondemand = true;
1204 j->checkedin = true;
1205
1206 if (prog) {
1207 j->prog = strdup(prog);
1208 if (!job_assumes(j, j->prog != NULL)) {
1209 goto out_bad;
1210 }
1211 }
1212
1213 if (argv) {
1214 while (*argv_tmp++)
1215 j->argc++;
1216
1217 for (i = 0; i < j->argc; i++) {
1218 cc += strlen(argv[i]) + 1;
1219 }
1220
1221 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1222
1223 if (!job_assumes(j, j->argv != NULL)) {
1224 goto out_bad;
1225 }
1226
1227 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1228
1229 for (i = 0; i < j->argc; i++) {
1230 j->argv[i] = co;
1231 strcpy(co, argv[i]);
1232 co += strlen(argv[i]) + 1;
1233 }
1234 j->argv[i] = NULL;
1235 }
1236
1237 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1238 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
1239
1240 job_log(j, LOG_DEBUG, "Conceived");
1241
1242 return j;
1243
1244 out_bad:
1245 if (j->prog) {
1246 free(j->prog);
1247 }
1248 free(j);
1249
1250 return NULL;
1251 }
1252
1253 job_t
1254 job_import(launch_data_t pload)
1255 {
1256 job_t j = jobmgr_import2(root_jobmgr, pload);
1257
1258 if (j == NULL) {
1259 return NULL;
1260 }
1261
1262 return job_dispatch(j, false);
1263 }
1264
1265 launch_data_t
1266 job_import_bulk(launch_data_t pload)
1267 {
1268 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
1269 job_t *ja;
1270 size_t i, c = launch_data_array_get_count(pload);
1271
1272 ja = alloca(c * sizeof(job_t ));
1273
1274 for (i = 0; i < c; i++) {
1275 if ((ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) {
1276 errno = 0;
1277 }
1278 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
1279 }
1280
1281 for (i = 0; i < c; i++) {
1282 if (ja[i] == NULL) {
1283 continue;
1284 }
1285 job_dispatch(ja[i], false);
1286 }
1287
1288 return resp;
1289 }
1290
1291 void
1292 job_import_bool(job_t j, const char *key, bool value)
1293 {
1294 bool found_key = false;
1295
1296 switch (key[0]) {
1297 case 'a':
1298 case 'A':
1299 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
1300 j->abandon_pg = value;
1301 found_key = true;
1302 }
1303 break;
1304 case 'k':
1305 case 'K':
1306 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1307 j->ondemand = !value;
1308 found_key = true;
1309 }
1310 break;
1311 case 'o':
1312 case 'O':
1313 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
1314 j->ondemand = value;
1315 found_key = true;
1316 }
1317 break;
1318 case 'd':
1319 case 'D':
1320 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
1321 j->debug = value;
1322 found_key = true;
1323 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
1324 job_assumes(j, !value);
1325 found_key = true;
1326 }
1327 break;
1328 case 'h':
1329 case 'H':
1330 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
1331 j->hopefully_exits_last = value;
1332 found_key = true;
1333 } else if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST) == 0) {
1334 j->hopefully_exits_first = value;
1335 found_key = true;
1336 }
1337 break;
1338 case 's':
1339 case 'S':
1340 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
1341 j->session_create = value;
1342 found_key = true;
1343 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
1344 j->start_on_mount = value;
1345 found_key = true;
1346 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
1347 /* this only does something on Mac OS X 10.4 "Tiger" */
1348 found_key = true;
1349 }
1350 break;
1351 case 'l':
1352 case 'L':
1353 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
1354 j->low_pri_io = value;
1355 found_key = true;
1356 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
1357 j->only_once = value;
1358 found_key = true;
1359 }
1360 break;
1361 case 'm':
1362 case 'M':
1363 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1364 j->internal_exc_handler = value;
1365 found_key = true;
1366 }
1367 break;
1368 case 'i':
1369 case 'I':
1370 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
1371 if (getuid() != 0) {
1372 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1373 return;
1374 }
1375 j->no_init_groups = !value;
1376 found_key = true;
1377 }
1378 break;
1379 case 'r':
1380 case 'R':
1381 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
1382 if (value) {
1383 /* We don't want value == false to change j->start_pending */
1384 j->start_pending = true;
1385 }
1386 found_key = true;
1387 }
1388 break;
1389 case 'e':
1390 case 'E':
1391 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
1392 j->globargv = value;
1393 found_key = true;
1394 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
1395 j->debug_before_kill = value;
1396 found_key = true;
1397 }
1398 break;
1399 case 'w':
1400 case 'W':
1401 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
1402 j->wait4debugger = value;
1403 found_key = true;
1404 }
1405 break;
1406 default:
1407 break;
1408 }
1409
1410 if (!found_key) {
1411 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
1412 }
1413 }
1414
1415 void
1416 job_import_string(job_t j, const char *key, const char *value)
1417 {
1418 char **where2put = NULL;
1419
1420 switch (key[0]) {
1421 case 'm':
1422 case 'M':
1423 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1424 where2put = &j->alt_exc_handler;
1425 }
1426 break;
1427 case 'p':
1428 case 'P':
1429 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
1430 return;
1431 }
1432 break;
1433 case 'l':
1434 case 'L':
1435 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
1436 return;
1437 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1438 return;
1439 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1440 return;
1441 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1442 job_reparent_hack(j, value);
1443 return;
1444 }
1445 break;
1446 case 'r':
1447 case 'R':
1448 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
1449 if (getuid() != 0) {
1450 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1451 return;
1452 }
1453 where2put = &j->rootdir;
1454 }
1455 break;
1456 case 'w':
1457 case 'W':
1458 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
1459 where2put = &j->workingdir;
1460 }
1461 break;
1462 case 'u':
1463 case 'U':
1464 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
1465 if (getuid() != 0) {
1466 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1467 return;
1468 } else if (strcmp(value, "root") == 0) {
1469 return;
1470 }
1471 where2put = &j->username;
1472 }
1473 break;
1474 case 'g':
1475 case 'G':
1476 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
1477 if (getuid() != 0) {
1478 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1479 return;
1480 } else if (strcmp(value, "wheel") == 0) {
1481 return;
1482 }
1483 where2put = &j->groupname;
1484 }
1485 break;
1486 case 's':
1487 case 'S':
1488 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
1489 where2put = &j->stdoutpath;
1490 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
1491 where2put = &j->stderrpath;
1492 #if HAVE_SANDBOX
1493 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
1494 where2put = &j->seatbelt_profile;
1495 #endif
1496 }
1497 break;
1498 default:
1499 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
1500 break;
1501 }
1502
1503 if (where2put) {
1504 job_assumes(j, (*where2put = strdup(value)) != NULL);
1505 } else {
1506 job_log(j, LOG_WARNING, "Unknown key: %s", key);
1507 }
1508 }
1509
1510 void
1511 job_import_integer(job_t j, const char *key, long long value)
1512 {
1513 switch (key[0]) {
1514 case 'e':
1515 case 'E':
1516 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
1517 if (value < 0) {
1518 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1519 } else if (value > UINT32_MAX) {
1520 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1521 } else {
1522 j->exit_timeout = value;
1523 }
1524 }
1525 break;
1526 case 'n':
1527 case 'N':
1528 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
1529 j->nice = value;
1530 j->setnice = true;
1531 }
1532 break;
1533 case 't':
1534 case 'T':
1535 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
1536 if (value < 0) {
1537 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1538 } else if (value > UINT32_MAX) {
1539 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1540 } else {
1541 j->timeout = value;
1542 }
1543 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
1544 if (value < 0) {
1545 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1546 } else if (value > UINT32_MAX) {
1547 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1548 } else {
1549 j->min_run_time = value;
1550 }
1551 }
1552 break;
1553 case 'u':
1554 case 'U':
1555 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
1556 j->mask = value;
1557 j->setmask = true;
1558 }
1559 break;
1560 case 's':
1561 case 'S':
1562 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
1563 if (value <= 0) {
1564 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1565 } else if (value > UINT32_MAX) {
1566 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1567 } else {
1568 runtime_add_ref();
1569 j->start_interval = value;
1570
1571 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, value, j) != -1);
1572 }
1573 #if HAVE_SANDBOX
1574 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1575 j->seatbelt_flags = value;
1576 #endif
1577 }
1578
1579 break;
1580 default:
1581 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
1582 break;
1583 }
1584 }
1585
1586 void
1587 job_import_opaque(job_t j, const char *key, launch_data_t value)
1588 {
1589 switch (key[0]) {
1590 case 'q':
1591 case 'Q':
1592 #if HAVE_QUARANTINE
1593 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
1594 size_t tmpsz = launch_data_get_opaque_size(value);
1595
1596 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
1597 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
1598 j->quarantine_data_sz = tmpsz;
1599 }
1600 }
1601 #endif
1602 break;
1603 default:
1604 break;
1605 }
1606 }
1607
1608 static void
1609 policy_setup(launch_data_t obj, const char *key, void *context)
1610 {
1611 job_t j = context;
1612 bool found_key = false;
1613
1614 switch (key[0]) {
1615 case 'd':
1616 case 'D':
1617 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
1618 j->deny_job_creation = launch_data_get_bool(obj);
1619 found_key = true;
1620 }
1621 break;
1622 default:
1623 break;
1624 }
1625
1626 if (unlikely(!found_key)) {
1627 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
1628 }
1629 }
1630
1631 void
1632 job_import_dictionary(job_t j, const char *key, launch_data_t value)
1633 {
1634 launch_data_t tmp;
1635
1636 switch (key[0]) {
1637 case 'p':
1638 case 'P':
1639 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
1640 launch_data_dict_iterate(value, policy_setup, j);
1641 }
1642 break;
1643 case 'k':
1644 case 'K':
1645 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1646 launch_data_dict_iterate(value, semaphoreitem_setup, j);
1647 }
1648 break;
1649 case 'i':
1650 case 'I':
1651 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
1652 j->inetcompat = true;
1653 j->abandon_pg = true;
1654 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
1655 j->inetcompat_wait = launch_data_get_bool(tmp);
1656 }
1657 }
1658 break;
1659 case 'e':
1660 case 'E':
1661 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
1662 launch_data_dict_iterate(value, envitem_setup, j);
1663 }
1664 break;
1665 case 'u':
1666 case 'U':
1667 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
1668 j->importing_global_env = true;
1669 launch_data_dict_iterate(value, envitem_setup, j);
1670 j->importing_global_env = false;
1671 }
1672 break;
1673 case 's':
1674 case 'S':
1675 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
1676 launch_data_dict_iterate(value, socketgroup_setup, j);
1677 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
1678 calendarinterval_new_from_obj(j, value);
1679 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
1680 launch_data_dict_iterate(value, limititem_setup, j);
1681 #if HAVE_SANDBOX
1682 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1683 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
1684 #endif
1685 }
1686 break;
1687 case 'h':
1688 case 'H':
1689 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
1690 j->importing_hard_limits = true;
1691 launch_data_dict_iterate(value, limititem_setup, j);
1692 j->importing_hard_limits = false;
1693 }
1694 break;
1695 case 'm':
1696 case 'M':
1697 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
1698 launch_data_dict_iterate(value, machservice_setup, j);
1699 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICELOOKUPPOLICIES) == 0) {
1700 launch_data_dict_iterate(value, mspolicy_setup, j);
1701 }
1702 break;
1703 default:
1704 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
1705 break;
1706 }
1707 }
1708
1709 void
1710 job_import_array(job_t j, const char *key, launch_data_t value)
1711 {
1712 size_t i, value_cnt = launch_data_array_get_count(value);
1713 const char *str;
1714
1715 switch (key[0]) {
1716 case 'p':
1717 case 'P':
1718 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
1719 return;
1720 }
1721 break;
1722 case 'l':
1723 case 'L':
1724 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1725 return;
1726 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1727 return;
1728 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1729 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
1730 return;
1731 }
1732 break;
1733 case 'q':
1734 case 'Q':
1735 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
1736 for (i = 0; i < value_cnt; i++) {
1737 str = launch_data_get_string(launch_data_array_get_index(value, i));
1738 if (job_assumes(j, str != NULL)) {
1739 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
1740 }
1741 }
1742
1743 }
1744 break;
1745 case 'w':
1746 case 'W':
1747 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
1748 for (i = 0; i < value_cnt; i++) {
1749 str = launch_data_get_string(launch_data_array_get_index(value, i));
1750 if (job_assumes(j, str != NULL)) {
1751 semaphoreitem_new(j, PATH_CHANGES, str);
1752 }
1753 }
1754 }
1755 break;
1756 case 'b':
1757 case 'B':
1758 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
1759 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
1760 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
1761 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
1762 j->j_binpref_cnt = value_cnt;
1763 for (i = 0; i < value_cnt; i++) {
1764 j->j_binpref[i] = launch_data_get_integer(launch_data_array_get_index(value, i));
1765 }
1766 }
1767 }
1768 break;
1769 case 's':
1770 case 'S':
1771 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
1772 for (i = 0; i < value_cnt; i++) {
1773 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
1774 }
1775 }
1776 break;
1777 default:
1778 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
1779 break;
1780 }
1781 }
1782
1783 void
1784 job_import_keys(launch_data_t obj, const char *key, void *context)
1785 {
1786 job_t j = context;
1787 launch_data_type_t kind;
1788
1789 if (obj == NULL) {
1790 return;
1791 }
1792
1793 kind = launch_data_get_type(obj);
1794
1795 switch (kind) {
1796 case LAUNCH_DATA_BOOL:
1797 job_import_bool(j, key, launch_data_get_bool(obj));
1798 break;
1799 case LAUNCH_DATA_STRING:
1800 job_import_string(j, key, launch_data_get_string(obj));
1801 break;
1802 case LAUNCH_DATA_INTEGER:
1803 job_import_integer(j, key, launch_data_get_integer(obj));
1804 break;
1805 case LAUNCH_DATA_DICTIONARY:
1806 job_import_dictionary(j, key, obj);
1807 break;
1808 case LAUNCH_DATA_ARRAY:
1809 job_import_array(j, key, obj);
1810 break;
1811 case LAUNCH_DATA_OPAQUE:
1812 job_import_opaque(j, key, obj);
1813 break;
1814 default:
1815 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
1816 break;
1817 }
1818 }
1819
1820 job_t
1821 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
1822 {
1823 launch_data_t tmp, ldpa;
1824 const char *label = NULL, *prog = NULL;
1825 const char **argv = NULL;
1826 job_t j;
1827
1828 if (pload == NULL) {
1829 errno = EINVAL;
1830 return NULL;
1831 }
1832
1833 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
1834 errno = EINVAL;
1835 return NULL;
1836 }
1837
1838 if (!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL))) {
1839 errno = EINVAL;
1840 return NULL;
1841 }
1842
1843 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
1844 errno = EINVAL;
1845 return NULL;
1846 }
1847
1848 if (!(label = launch_data_get_string(tmp))) {
1849 errno = EINVAL;
1850 return NULL;
1851 }
1852
1853 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
1854 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
1855 prog = launch_data_get_string(tmp);
1856 }
1857
1858 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
1859 size_t i, c;
1860
1861 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
1862 errno = EINVAL;
1863 return NULL;
1864 }
1865
1866 c = launch_data_array_get_count(ldpa);
1867
1868 argv = alloca((c + 1) * sizeof(char *));
1869
1870 for (i = 0; i < c; i++) {
1871 tmp = launch_data_array_get_index(ldpa, i);
1872
1873 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
1874 errno = EINVAL;
1875 return NULL;
1876 }
1877
1878 argv[i] = launch_data_get_string(tmp);
1879 }
1880
1881 argv[i] = NULL;
1882 }
1883
1884 if ((j = job_find(label)) != NULL) {
1885 errno = EEXIST;
1886 return NULL;
1887 } else if (!jobmgr_label_test(jm, label)) {
1888 errno = EINVAL;
1889 return NULL;
1890 }
1891
1892 if ((j = job_new(jm, label, prog, argv))) {
1893 launch_data_dict_iterate(pload, job_import_keys, j);
1894 }
1895
1896 return j;
1897 }
1898
1899 bool
1900 jobmgr_label_test(jobmgr_t jm, const char *str)
1901 {
1902 char *endstr = NULL;
1903 const char *ptr;
1904
1905 if (str[0] == '\0') {
1906 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
1907 return false;
1908 }
1909
1910 for (ptr = str; *ptr; ptr++) {
1911 if (iscntrl(*ptr)) {
1912 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
1913 return false;
1914 }
1915 }
1916
1917 strtoll(str, &endstr, 0);
1918
1919 if (str != endstr) {
1920 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
1921 return false;
1922 }
1923
1924 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
1925 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
1926 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
1927 return false;
1928 }
1929
1930 return true;
1931 }
1932
1933 job_t
1934 job_find(const char *label)
1935 {
1936 job_t ji;
1937
1938 LIST_FOREACH(ji, &label_hash[hash_label(label)], label_hash_sle) {
1939 if (ji->removal_pending) {
1940 continue; /* 5351245 */
1941 } else if (ji->mgr->shutting_down) {
1942 continue; /* 5488633 */
1943 }
1944
1945 if (strcmp(ji->label, label) == 0) {
1946 return ji;
1947 }
1948 }
1949
1950 errno = ESRCH;
1951 return NULL;
1952 }
1953
1954 job_t
1955 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
1956 {
1957 job_t ji = NULL;
1958
1959 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
1960 if (ji->p == p) {
1961 break;
1962 }
1963 }
1964
1965 if (ji) {
1966 return ji;
1967 } else if (create_anon) {
1968 return job_new_anonymous(jm, p);
1969 } else {
1970 return NULL;
1971 }
1972 }
1973
1974 job_t
1975 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
1976 {
1977 jobmgr_t jmi;
1978 job_t ji;
1979
1980 if (jm->jm_port == mport) {
1981 jobmgr_assumes(jm, (ji = jobmgr_find_by_pid(jm, upid, true)) != NULL);
1982 return ji;
1983 }
1984
1985 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1986 job_t jr;
1987
1988 if ((jr = job_mig_intran2(jmi, mport, upid))) {
1989 return jr;
1990 }
1991 }
1992
1993 LIST_FOREACH(ji, &jm->jobs, sle) {
1994 if (ji->j_port == mport) {
1995 return ji;
1996 }
1997 }
1998
1999 return NULL;
2000 }
2001
2002 job_t
2003 job_mig_intran(mach_port_t p)
2004 {
2005 struct ldcred ldc;
2006 job_t jr;
2007
2008 runtime_get_caller_creds(&ldc);
2009
2010 jr = job_mig_intran2(root_jobmgr, p, ldc.pid);
2011
2012 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
2013 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 };
2014 struct kinfo_proc kp;
2015 size_t len = sizeof(kp);
2016
2017 mib[3] = ldc.pid;
2018
2019 if (jobmgr_assumes(root_jobmgr, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && jobmgr_assumes(root_jobmgr, len == sizeof(kp))) {
2020 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc.pid, ldc.uid, ldc.euid, p, kp.kp_proc.p_comm);
2021 }
2022 }
2023
2024 return jr;
2025 }
2026
2027 job_t
2028 job_find_by_service_port(mach_port_t p)
2029 {
2030 struct machservice *ms;
2031
2032 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
2033 if (ms->recv && (ms->port == p)) {
2034 return ms->job;
2035 }
2036 }
2037
2038 return NULL;
2039 }
2040
2041 void
2042 job_mig_destructor(job_t j)
2043 {
2044 /*
2045 * 5477111
2046 *
2047 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
2048 */
2049
2050 if (j && j != workaround_5477111 && j->unload_at_mig_return) {
2051 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
2052 job_remove(j);
2053 }
2054
2055 workaround_5477111 = NULL;
2056
2057 calendarinterval_sanity_check();
2058 }
2059
2060 void
2061 job_export_all2(jobmgr_t jm, launch_data_t where)
2062 {
2063 jobmgr_t jmi;
2064 job_t ji;
2065
2066 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2067 job_export_all2(jmi, where);
2068 }
2069
2070 LIST_FOREACH(ji, &jm->jobs, sle) {
2071 launch_data_t tmp;
2072
2073 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
2074 launch_data_dict_insert(where, tmp, ji->label);
2075 }
2076 }
2077 }
2078
2079 launch_data_t
2080 job_export_all(void)
2081 {
2082 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
2083
2084 if (launchd_assumes(resp != NULL)) {
2085 job_export_all2(root_jobmgr, resp);
2086 }
2087
2088 return resp;
2089 }
2090
2091 void
2092 job_log_stray_pg(job_t j)
2093 {
2094 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PGRP, j->p };
2095 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
2096 struct kinfo_proc *kp;
2097
2098 #if TARGET_OS_EMBEDDED
2099 if (!do_apple_internal_magic) {
2100 return;
2101 }
2102 #endif
2103
2104 if (!job_assumes(j, (kp = malloc(len)) != NULL)) {
2105 return;
2106 }
2107 if (!job_assumes(j, sysctl(mib, 4, kp, &len, NULL, 0) != -1)) {
2108 goto out;
2109 }
2110
2111 kp_cnt = len / sizeof(struct kinfo_proc);
2112
2113 for (i = 0; i < kp_cnt; i++) {
2114 pid_t p_i = kp[i].kp_proc.p_pid;
2115 pid_t pp_i = kp[i].kp_eproc.e_ppid;
2116 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
2117 const char *n = kp[i].kp_proc.p_comm;
2118
2119 if (p_i == j->p) {
2120 continue;
2121 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
2122 continue;
2123 }
2124
2125 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z, p_i, pp_i, n);
2126 }
2127
2128 out:
2129 free(kp);
2130 }
2131
2132 void
2133 job_reap(job_t j)
2134 {
2135 struct rusage ru;
2136 int status;
2137
2138 job_log(j, LOG_DEBUG, "Reaping");
2139
2140 if (j->weird_bootstrap) {
2141 mach_msg_size_t mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
2142
2143 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
2144 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
2145 }
2146
2147 job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
2148 j->weird_bootstrap = false;
2149 }
2150
2151 if (j->log_redirect_fd && !j->wait4pipe_eof) {
2152 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2153 j->log_redirect_fd = 0;
2154 }
2155
2156 if (j->forkfd) {
2157 job_assumes(j, runtime_close(j->forkfd) != -1);
2158 j->forkfd = 0;
2159 }
2160
2161 if (j->anonymous) {
2162 status = 0;
2163 memset(&ru, 0, sizeof(ru));
2164 } else {
2165 /*
2166 * The job is dead. While the PID/PGID is still known to be
2167 * valid, try to kill abandoned descendant processes.
2168 */
2169 job_log_stray_pg(j);
2170 if (!j->abandon_pg) {
2171 job_assumes(j, runtime_killpg(j->p, SIGTERM) != -1 || errno == ESRCH);
2172 }
2173
2174 /*
2175 * 5020256
2176 *
2177 * The current implementation of ptrace() causes the traced process to
2178 * be abducted away from the true parent and adopted by the tracer.
2179 *
2180 * Once the tracing process relinquishes control, the kernel then
2181 * restores the true parent/child relationship.
2182 *
2183 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2184 * data structures changes, and they return an error if reality hasn't
2185 * been restored by the time they are called.
2186 */
2187 if (!job_assumes(j, wait4(j->p, &status, 0, &ru) != -1)) {
2188 job_log(j, LOG_NOTICE, "Working around 5020256. Assuming the job crashed.");
2189
2190 status = W_EXITCODE(0, SIGSEGV);
2191 memset(&ru, 0, sizeof(ru));
2192 }
2193 }
2194
2195 if (j->exit_timeout) {
2196 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
2197 }
2198
2199 if (j->anonymous) {
2200 total_anon_children--;
2201 } else {
2202 runtime_del_ref();
2203 total_children--;
2204 }
2205 LIST_REMOVE(j, pid_hash_sle);
2206
2207 if (j->wait_reply_port) {
2208 job_log(j, LOG_DEBUG, "MPM wait reply being sent");
2209 job_assumes(j, job_mig_wait_reply(j->wait_reply_port, 0, status) == 0);
2210 j->wait_reply_port = MACH_PORT_NULL;
2211 }
2212
2213 if (j->sent_sigterm_time) {
2214 uint64_t td_sec, td_usec, td = (mach_absolute_time() - j->sent_sigterm_time) * tbi.numer / tbi.denom;
2215
2216 td_sec = td / NSEC_PER_SEC;
2217 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
2218
2219 job_log(j, LOG_INFO, "Exited %lld.%06lld seconds after %s was sent",
2220 td_sec, td_usec, signal_to_C_name(j->sent_sigkill ? SIGKILL : SIGTERM));
2221 }
2222
2223 #if DO_RUSAGE_SUMMATION
2224 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
2225 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
2226 j->ru.ru_maxrss += ru.ru_maxrss;
2227 j->ru.ru_ixrss += ru.ru_ixrss;
2228 j->ru.ru_idrss += ru.ru_idrss;
2229 j->ru.ru_isrss += ru.ru_isrss;
2230 j->ru.ru_minflt += ru.ru_minflt;
2231 j->ru.ru_majflt += ru.ru_majflt;
2232 j->ru.ru_nswap += ru.ru_nswap;
2233 j->ru.ru_inblock += ru.ru_inblock;
2234 j->ru.ru_oublock += ru.ru_oublock;
2235 j->ru.ru_msgsnd += ru.ru_msgsnd;
2236 j->ru.ru_msgrcv += ru.ru_msgrcv;
2237 j->ru.ru_nsignals += ru.ru_nsignals;
2238 j->ru.ru_nvcsw += ru.ru_nvcsw;
2239 j->ru.ru_nivcsw += ru.ru_nivcsw;
2240 #endif
2241
2242 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
2243 job_log(j, LOG_WARNING, "Exited with exit code: %d", WEXITSTATUS(status));
2244 }
2245
2246 if (WIFSIGNALED(status)) {
2247 int s = WTERMSIG(status);
2248 if (SIGKILL == s || SIGTERM == s) {
2249 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
2250 } else {
2251 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
2252 }
2253 }
2254
2255 if (j->hopefully_exits_first) {
2256 j->mgr->hopefully_first_cnt--;
2257 } else if (!j->anonymous && !j->hopefully_exits_last) {
2258 j->mgr->normal_active_cnt--;
2259 }
2260 j->last_exit_status = status;
2261 j->sent_sigkill = false;
2262 j->p = 0;
2263
2264 /*
2265 * We need to someday evaluate other jobs and find those who wish to track the
2266 * active/inactive state of this job. The current job_dispatch() logic makes
2267 * this messy, given that jobs can be deleted at dispatch.
2268 */
2269 }
2270
2271 void
2272 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
2273 {
2274 jobmgr_t jmi, jmn;
2275 job_t ji, jn;
2276
2277 if (jm->shutting_down) {
2278 return;
2279 }
2280
2281 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
2282 jobmgr_dispatch_all(jmi, newmounthack);
2283 }
2284
2285 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
2286 if (newmounthack && ji->start_on_mount) {
2287 ji->start_pending = true;
2288 }
2289
2290 job_dispatch(ji, false);
2291 }
2292 }
2293
2294 job_t
2295 job_dispatch(job_t j, bool kickstart)
2296 {
2297 /*
2298 * The whole job removal logic needs to be consolidated. The fact that
2299 * a job can be removed from just about anywhere makes it easy to have
2300 * stale pointers left behind somewhere on the stack that might get
2301 * used after the deallocation. In particular, during job iteration.
2302 *
2303 * This is a classic example. The act of dispatching a job may delete it.
2304 */
2305 if (!job_active(j)) {
2306 if (job_useless(j)) {
2307 job_remove(j);
2308 return NULL;
2309 } else if (kickstart || job_keepalive(j)) {
2310 job_start(j);
2311 } else {
2312 job_watch(j);
2313
2314 /*
2315 * 5455720
2316 *
2317 * Path checking and monitoring is really racy right now.
2318 * We should clean this up post Leopard.
2319 */
2320 if (job_keepalive(j)) {
2321 job_start(j);
2322 }
2323 }
2324 } else {
2325 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job.");
2326 }
2327
2328 return j;
2329 }
2330
2331 void
2332 job_log_stdouterr2(job_t j, const char *msg, ...)
2333 {
2334 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
2335 va_list ap;
2336
2337 va_start(ap, msg);
2338 runtime_vsyslog(&attr, msg, ap);
2339 va_end(ap);
2340 }
2341
2342 void
2343 job_log_stdouterr(job_t j)
2344 {
2345 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
2346 bool close_log_redir = false;
2347 ssize_t rsz;
2348
2349 if (!job_assumes(j, buf != NULL)) {
2350 return;
2351 }
2352
2353 bufindex = buf;
2354
2355 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
2356
2357 if (rsz == 0) {
2358 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
2359 close_log_redir = true;
2360 } else if (!job_assumes(j, rsz != -1)) {
2361 close_log_redir = true;
2362 } else {
2363 buf[rsz] = '\0';
2364
2365 while ((msg = strsep(&bufindex, "\n\r"))) {
2366 if (msg[0]) {
2367 job_log_stdouterr2(j, "%s", msg);
2368 }
2369 }
2370 }
2371
2372 free(buf);
2373
2374 if (close_log_redir) {
2375 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2376 j->log_redirect_fd = 0;
2377 job_dispatch(j, false);
2378 }
2379 }
2380
2381 void
2382 job_kill(job_t j)
2383 {
2384 if (!j->p || j->anonymous) {
2385 return;
2386 }
2387
2388 job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
2389
2390 j->sent_sigkill = true;
2391
2392 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
2393 EV_ADD, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j) != -1);
2394
2395 job_log(j, LOG_DEBUG, "Sent SIGKILL signal.");
2396 }
2397
2398 void
2399 job_callback_proc(job_t j, int flags, int fflags)
2400 {
2401 if ((fflags & NOTE_EXEC) && j->anonymous) {
2402 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
2403 struct kinfo_proc kp;
2404 size_t len = sizeof(kp);
2405
2406 if (job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
2407 char newlabel[1000];
2408
2409 snprintf(newlabel, sizeof(newlabel), "%p.%s", j, kp.kp_proc.p_comm);
2410
2411 job_log(j, LOG_DEBUG, "Program changed. Updating the label to: %s", newlabel);
2412
2413 LIST_REMOVE(j, label_hash_sle);
2414 strcpy((char *)j->label, newlabel);
2415 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
2416 }
2417 }
2418
2419 if (fflags & NOTE_FORK) {
2420 job_log(j, LOG_DEBUG, "Called fork()");
2421 }
2422
2423 if (fflags & NOTE_EXIT) {
2424 job_reap(j);
2425
2426 if (j->anonymous) {
2427 job_remove(j);
2428 j = NULL;
2429 } else {
2430 j = job_dispatch(j, false);
2431 }
2432 }
2433
2434 /* NOTE_REAP sanity checking is disabled for now while we try and diagnose 5289559 */
2435 #if 0
2436 if (j && (fflags & NOTE_REAP)) {
2437 job_assumes(j, flags & EV_ONESHOT);
2438 job_assumes(j, flags & EV_EOF);
2439
2440 job_assumes(j, j->p == 0);
2441 }
2442 #endif
2443 }
2444
2445 void
2446 job_callback_timer(job_t j, void *ident)
2447 {
2448 if (j == ident) {
2449 job_dispatch(j, true);
2450 } else if (&j->semaphores == ident) {
2451 job_dispatch(j, false);
2452 } else if (&j->start_interval == ident) {
2453 j->start_pending = true;
2454 job_dispatch(j, false);
2455 } else if (&j->exit_timeout == ident) {
2456 if (j->sent_sigkill) {
2457 uint64_t td = (mach_absolute_time() - j->sent_sigterm_time) * tbi.numer / tbi.denom;
2458
2459 td /= NSEC_PER_SEC;
2460 td -= j->exit_timeout;
2461
2462 job_log(j, LOG_ERR, "Did not die after sending SIGKILL %llu seconds ago...", td);
2463 } else {
2464 job_force_sampletool(j);
2465 if (j->debug_before_kill) {
2466 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger.");
2467 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
2468 }
2469 job_log(j, LOG_WARNING, "Exit timeout elapsed (%u seconds). Killing.", j->exit_timeout);
2470 job_kill(j);
2471 }
2472 } else {
2473 job_assumes(j, false);
2474 }
2475 }
2476
2477 void
2478 job_callback_read(job_t j, int ident)
2479 {
2480 if (ident == j->log_redirect_fd) {
2481 job_log_stdouterr(j);
2482 } else {
2483 socketgroup_callback(j);
2484 }
2485 }
2486
2487 void
2488 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
2489 {
2490 jobmgr_t jmi;
2491 job_t j;
2492
2493 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2494 jobmgr_reap_bulk(jmi, kev);
2495 }
2496
2497 if ((j = jobmgr_find_by_pid(jm, kev->ident, false))) {
2498 kev->udata = j;
2499 job_callback(j, kev);
2500 }
2501 }
2502
2503 void
2504 jobmgr_callback(void *obj, struct kevent *kev)
2505 {
2506 jobmgr_t jm = obj;
2507
2508 switch (kev->filter) {
2509 case EVFILT_PROC:
2510 jobmgr_reap_bulk(jm, kev);
2511 if (launchd_assumes(root_jobmgr != NULL)) {
2512 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
2513 }
2514 break;
2515 case EVFILT_SIGNAL:
2516 switch (kev->ident) {
2517 case SIGTERM:
2518 return launchd_shutdown();
2519 case SIGUSR1:
2520 return calendarinterval_callback();
2521 default:
2522 return (void)jobmgr_assumes(jm, false);
2523 }
2524 break;
2525 case EVFILT_FS:
2526 if (kev->fflags & VQ_MOUNT) {
2527 jobmgr_dispatch_all(jm, true);
2528 }
2529 jobmgr_dispatch_all_semaphores(jm);
2530 break;
2531 case EVFILT_TIMER:
2532 if (jobmgr_assumes(jm, kev->ident == (uintptr_t)&sorted_calendar_events)) {
2533 calendarinterval_callback();
2534 }
2535 break;
2536 default:
2537 return (void)jobmgr_assumes(jm, false);
2538 }
2539 }
2540
2541 void
2542 job_callback(void *obj, struct kevent *kev)
2543 {
2544 job_t j = obj;
2545
2546 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
2547
2548 switch (kev->filter) {
2549 case EVFILT_PROC:
2550 return job_callback_proc(j, kev->flags, kev->fflags);
2551 case EVFILT_TIMER:
2552 return job_callback_timer(j, (void *)kev->ident);
2553 case EVFILT_VNODE:
2554 return semaphoreitem_callback(j, kev);
2555 case EVFILT_READ:
2556 return job_callback_read(j, kev->ident);
2557 case EVFILT_MACHPORT:
2558 return (void)job_dispatch(j, true);
2559 default:
2560 return (void)job_assumes(j, false);
2561 }
2562 }
2563
2564 void
2565 job_start(job_t j)
2566 {
2567 uint64_t td, tnow = mach_absolute_time();
2568 int spair[2];
2569 int execspair[2];
2570 int oepair[2];
2571 char nbuf[64];
2572 pid_t c;
2573 bool sipc = false;
2574 u_int proc_fflags = /* NOTE_EXEC|NOTE_FORK| */ NOTE_EXIT /* |NOTE_REAP */;
2575
2576 if (!job_assumes(j, j->mgr != NULL)) {
2577 return;
2578 }
2579
2580 if (job_active(j)) {
2581 job_log(j, LOG_DEBUG, "Already started");
2582 return;
2583 }
2584
2585 job_assumes(j, tnow > j->start_time);
2586
2587 /*
2588 * Some users adjust the wall-clock and then expect software to not notice.
2589 * Therefore, launchd must use an absolute clock instead of gettimeofday()
2590 * or time() wherever possible.
2591 */
2592 td = (tnow - j->start_time) * tbi.numer / tbi.denom;
2593 td /= NSEC_PER_SEC;
2594
2595 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
2596 time_t respawn_delta = j->min_run_time - (uint32_t)td;
2597
2598 /*
2599 * We technically should ref-count throttled jobs to prevent idle exit,
2600 * but we're not directly tracking the 'throttled' state at the moment.
2601 */
2602
2603 job_log(j, LOG_WARNING, "Throttling respawn: Will start in %ld seconds", respawn_delta);
2604 job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
2605 job_ignore(j);
2606 return;
2607 }
2608
2609 j->sent_sigterm_time = 0;
2610
2611 if (!j->legacy_mach_job) {
2612 sipc = (!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices));
2613 }
2614
2615 j->checkedin = false;
2616
2617 if (sipc) {
2618 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
2619 }
2620
2621 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
2622
2623 if (!j->legacy_mach_job && job_assumes(j, pipe(oepair) != -1)) {
2624 j->log_redirect_fd = _fd(oepair[0]);
2625 job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
2626 job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
2627 }
2628
2629 j->start_time = tnow;
2630
2631 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
2632 case -1:
2633 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
2634 job_assumes(j, runtime_close(execspair[0]) == 0);
2635 job_assumes(j, runtime_close(execspair[1]) == 0);
2636 if (sipc) {
2637 job_assumes(j, runtime_close(spair[0]) == 0);
2638 job_assumes(j, runtime_close(spair[1]) == 0);
2639 }
2640 if (!j->legacy_mach_job) {
2641 job_assumes(j, runtime_close(oepair[0]) != -1);
2642 job_assumes(j, runtime_close(oepair[1]) != -1);
2643 j->log_redirect_fd = 0;
2644 }
2645 break;
2646 case 0:
2647 if (_vproc_post_fork_ping()) {
2648 _exit(EXIT_FAILURE);
2649 }
2650 if (!j->legacy_mach_job) {
2651 job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
2652 job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
2653 job_assumes(j, runtime_close(oepair[1]) != -1);
2654 }
2655 job_assumes(j, runtime_close(execspair[0]) == 0);
2656 /* wait for our parent to say they've attached a kevent to us */
2657 read(_fd(execspair[1]), &c, sizeof(c));
2658
2659 if (sipc) {
2660 job_assumes(j, runtime_close(spair[0]) == 0);
2661 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
2662 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
2663 }
2664 job_start_child(j);
2665 break;
2666 default:
2667 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
2668
2669 j->start_pending = false;
2670
2671 runtime_add_ref();
2672 total_children++;
2673 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
2674
2675 if (JOB_BOOTCACHE_HACK_CHECK(j)) {
2676 did_first_per_user_launchd_BootCache_hack = true;
2677 }
2678
2679 if (!j->legacy_mach_job) {
2680 job_assumes(j, runtime_close(oepair[1]) != -1);
2681 }
2682 j->p = c;
2683 if (j->hopefully_exits_first) {
2684 j->mgr->hopefully_first_cnt++;
2685 } else if (!j->hopefully_exits_last) {
2686 j->mgr->normal_active_cnt++;
2687 }
2688 j->forkfd = _fd(execspair[0]);
2689 job_assumes(j, runtime_close(execspair[1]) == 0);
2690 if (sipc) {
2691 job_assumes(j, runtime_close(spair[1]) == 0);
2692 ipc_open(_fd(spair[0]), j);
2693 }
2694 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
2695 job_ignore(j);
2696 } else {
2697 job_reap(j);
2698 }
2699
2700 if (!j->stall_before_exec) {
2701 job_uncork_fork(j);
2702 }
2703 break;
2704 }
2705 }
2706
2707 void
2708 do_first_per_user_launchd_hack(void)
2709 {
2710 char *bcct_tool[] = { "/usr/sbin/BootCacheControl", "tag", NULL };
2711 int dummystatus;
2712 pid_t bcp;
2713
2714 if (launchd_assumes((bcp = vfork()) != -1)) {
2715 if (bcp == 0) {
2716 execve(bcct_tool[0], bcct_tool, environ);
2717 _exit(EXIT_FAILURE);
2718 } else {
2719 launchd_assumes(waitpid(bcp, &dummystatus, 0) != -1);
2720 }
2721 }
2722 }
2723
2724 void
2725 job_start_child(job_t j)
2726 {
2727 const char *file2exec = "/usr/libexec/launchproxy";
2728 const char **argv;
2729 posix_spawnattr_t spattr;
2730 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
2731 pid_t junk_pid;
2732 glob_t g;
2733 short spflags = POSIX_SPAWN_SETEXEC;
2734 size_t binpref_out_cnt = 0;
2735 int i;
2736
2737 if (JOB_BOOTCACHE_HACK_CHECK(j)) {
2738 do_first_per_user_launchd_hack();
2739 }
2740
2741 job_assumes(j, posix_spawnattr_init(&spattr) == 0);
2742
2743 job_setup_attributes(j);
2744
2745 if (j->argv && j->globargv) {
2746 g.gl_offs = 1;
2747 for (i = 0; i < j->argc; i++) {
2748 if (i > 0) {
2749 gflags |= GLOB_APPEND;
2750 }
2751 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
2752 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
2753 exit(EXIT_FAILURE);
2754 }
2755 }
2756 g.gl_pathv[0] = (char *)file2exec;
2757 argv = (const char **)g.gl_pathv;
2758 } else if (j->argv) {
2759 argv = alloca((j->argc + 2) * sizeof(char *));
2760 argv[0] = file2exec;
2761 for (i = 0; i < j->argc; i++) {
2762 argv[i + 1] = j->argv[i];
2763 }
2764 argv[i + 1] = NULL;
2765 } else {
2766 argv = alloca(3 * sizeof(char *));
2767 argv[0] = file2exec;
2768 argv[1] = j->prog;
2769 argv[2] = NULL;
2770 }
2771
2772 if (!j->inetcompat) {
2773 argv++;
2774 }
2775
2776 if (j->wait4debugger) {
2777 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
2778 spflags |= POSIX_SPAWN_START_SUSPENDED;
2779 }
2780
2781 job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
2782
2783 if (j->j_binpref_cnt) {
2784 job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
2785 job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
2786 }
2787
2788 #if HAVE_QUARANTINE
2789 if (j->quarantine_data) {
2790 qtn_proc_t qp;
2791
2792 if (job_assumes(j, qp = qtn_proc_alloc())) {
2793 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
2794 job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
2795 }
2796 }
2797 }
2798 #endif
2799
2800 #if HAVE_SANDBOX
2801 if (j->seatbelt_profile) {
2802 char *seatbelt_err_buf = NULL;
2803
2804 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
2805 if (seatbelt_err_buf) {
2806 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
2807 }
2808 goto out_bad;
2809 }
2810 }
2811 #endif
2812
2813 if (j->prog) {
2814 errno = posix_spawn(&junk_pid, j->inetcompat ? file2exec : j->prog, NULL, &spattr, (char *const*)argv, environ);
2815 job_log_error(j, LOG_ERR, "posix_spawn(\"%s\", ...)", j->prog);
2816 } else {
2817 errno = posix_spawnp(&junk_pid, j->inetcompat ? file2exec : argv[0], NULL, &spattr, (char *const*)argv, environ);
2818 job_log_error(j, LOG_ERR, "posix_spawnp(\"%s\", ...)", argv[0]);
2819 }
2820
2821 out_bad:
2822 _exit(EXIT_FAILURE);
2823 }
2824
2825 void
2826 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
2827 {
2828 launch_data_t tmp;
2829 struct envitem *ei;
2830 job_t ji;
2831
2832 if (jm->parentmgr) {
2833 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
2834 } else {
2835 char **tmpenviron = environ;
2836 for (; *tmpenviron; tmpenviron++) {
2837 char envkey[1024];
2838 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
2839 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
2840 strncpy(envkey, *tmpenviron, sizeof(envkey));
2841 *(strchr(envkey, '=')) = '\0';
2842 launch_data_dict_insert(dict, s, envkey);
2843 }
2844 }
2845
2846 LIST_FOREACH(ji, &jm->jobs, sle) {
2847 SLIST_FOREACH(ei, &ji->global_env, sle) {
2848 if ((tmp = launch_data_new_string(ei->value))) {
2849 launch_data_dict_insert(dict, tmp, ei->key);
2850 }
2851 }
2852 }
2853 }
2854
2855 void
2856 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
2857 {
2858 struct envitem *ei;
2859 job_t ji;
2860
2861 if (jm->parentmgr) {
2862 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
2863 }
2864
2865 LIST_FOREACH(ji, &jm->jobs, sle) {
2866 SLIST_FOREACH(ei, &ji->global_env, sle) {
2867 setenv(ei->key, ei->value, 1);
2868 }
2869 }
2870 }
2871
2872 void
2873 job_find_and_blame_pids_with_weird_uids(job_t j)
2874 {
2875 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
2876 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
2877 struct kinfo_proc *kp;
2878 uid_t u = j->mach_uid;
2879
2880 #if TARGET_OS_EMBEDDED
2881 if (!do_apple_internal_magic) {
2882 return;
2883 }
2884 #endif
2885 kp = malloc(len);
2886
2887 if (!job_assumes(j, kp != NULL)) {
2888 return;
2889 }
2890 if (!job_assumes(j, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
2891 goto out;
2892 }
2893
2894 kp_cnt = len / sizeof(struct kinfo_proc);
2895
2896 for (i = 0; i < kp_cnt; i++) {
2897 uid_t i_euid = kp[i].kp_eproc.e_ucred.cr_uid;
2898 uid_t i_uid = kp[i].kp_eproc.e_pcred.p_ruid;
2899 uid_t i_svuid = kp[i].kp_eproc.e_pcred.p_svuid;
2900 pid_t i_pid = kp[i].kp_proc.p_pid;
2901
2902 if (i_euid != u && i_uid != u && i_svuid != u) {
2903 continue;
2904 }
2905
2906 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
2907 i_pid, kp[i].kp_proc.p_comm, i_uid, i_euid, i_svuid);
2908
2909 /* Temporarily disabled due to 5423935 and 4946119. */
2910 #if 0
2911 /* Ask the accountless process to exit. */
2912 job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
2913 #endif
2914 }
2915
2916 out:
2917 free(kp);
2918 }
2919
2920 void
2921 job_postfork_become_user(job_t j)
2922 {
2923 char loginname[2000];
2924 char tmpdirpath[PATH_MAX];
2925 char shellpath[PATH_MAX];
2926 char homedir[PATH_MAX];
2927 struct passwd *pwe;
2928 size_t r;
2929 gid_t desired_gid = -1;
2930 uid_t desired_uid = -1;
2931
2932 if (getuid() != 0) {
2933 return;
2934 }
2935
2936 /*
2937 * I contend that having UID == 0 and GID != 0 is of dubious value.
2938 * Nevertheless, this used to work in Tiger. See: 5425348
2939 */
2940 if (j->groupname && !j->username) {
2941 j->username = "root";
2942 }
2943
2944 if (j->username) {
2945 if ((pwe = getpwnam(j->username)) == NULL) {
2946 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
2947 _exit(EXIT_FAILURE);
2948 }
2949 } else if (j->mach_uid) {
2950 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
2951 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
2952 job_find_and_blame_pids_with_weird_uids(j);
2953 _exit(EXIT_FAILURE);
2954 }
2955 } else {
2956 return;
2957 }
2958
2959 /*
2960 * We must copy the results of getpw*().
2961 *
2962 * Why? Because subsequent API calls may call getpw*() as a part of
2963 * their implementation. Since getpw*() returns a [now thread scoped]
2964 * global, we must therefore cache the results before continuing.
2965 */
2966
2967 desired_uid = pwe->pw_uid;
2968 desired_gid = pwe->pw_gid;
2969
2970 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
2971 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
2972 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
2973
2974 if (pwe->pw_expire && time(NULL) >= pwe->pw_expire) {
2975 job_log(j, LOG_ERR, "Expired account");
2976 _exit(EXIT_FAILURE);
2977 }
2978
2979
2980 if (j->username && strcmp(j->username, loginname) != 0) {
2981 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
2982 } else if (j->mach_uid && (j->mach_uid != desired_uid)) {
2983 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
2984 }
2985
2986 if (j->groupname) {
2987 struct group *gre;
2988
2989 if ((gre = getgrnam(j->groupname)) == NULL) {
2990 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
2991 _exit(EXIT_FAILURE);
2992 }
2993
2994 desired_gid = gre->gr_gid;
2995 }
2996
2997 if (!job_assumes(j, setlogin(loginname) != -1)) {
2998 _exit(EXIT_FAILURE);
2999 }
3000
3001 if (!job_assumes(j, setgid(desired_gid) != -1)) {
3002 _exit(EXIT_FAILURE);
3003 }
3004
3005 /*
3006 * The kernel team and the DirectoryServices team want initgroups()
3007 * called after setgid(). See 4616864 for more information.
3008 */
3009
3010 if (!j->no_init_groups) {
3011 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
3012 _exit(EXIT_FAILURE);
3013 }
3014 }
3015
3016 if (!job_assumes(j, setuid(desired_uid) != -1)) {
3017 _exit(EXIT_FAILURE);
3018 }
3019
3020 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
3021
3022 if (r > 0 && r < sizeof(tmpdirpath)) {
3023 setenv("TMPDIR", tmpdirpath, 0);
3024 }
3025
3026 setenv("SHELL", shellpath, 0);
3027 setenv("HOME", homedir, 0);
3028 setenv("USER", loginname, 0);
3029 setenv("LOGNAME", loginname, 0);
3030 }
3031
3032 void
3033 job_setup_attributes(job_t j)
3034 {
3035 struct limititem *li;
3036 struct envitem *ei;
3037
3038 if (j->setnice) {
3039 job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
3040 }
3041
3042 SLIST_FOREACH(li, &j->limits, sle) {
3043 struct rlimit rl;
3044
3045 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
3046 continue;
3047 }
3048
3049 if (li->sethard) {
3050 rl.rlim_max = li->lim.rlim_max;
3051 }
3052 if (li->setsoft) {
3053 rl.rlim_cur = li->lim.rlim_cur;
3054 }
3055
3056 if (setrlimit(li->which, &rl) == -1) {
3057 job_log_error(j, LOG_WARNING, "setrlimit()");
3058 }
3059 }
3060
3061 if (!j->inetcompat && j->session_create) {
3062 launchd_SessionCreate();
3063 }
3064
3065 if (j->low_pri_io) {
3066 job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
3067 }
3068 if (j->rootdir) {
3069 job_assumes(j, chroot(j->rootdir) != -1);
3070 job_assumes(j, chdir(".") != -1);
3071 }
3072
3073 job_postfork_become_user(j);
3074
3075 if (j->workingdir) {
3076 job_assumes(j, chdir(j->workingdir) != -1);
3077 }
3078
3079 if (j->setmask) {
3080 umask(j->mask);
3081 }
3082
3083 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_APPEND|O_CREAT);
3084 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_APPEND|O_CREAT);
3085
3086 jobmgr_setup_env_from_other_jobs(j->mgr);
3087
3088 SLIST_FOREACH(ei, &j->env, sle) {
3089 setenv(ei->key, ei->value, 1);
3090 }
3091
3092 /*
3093 * We'd like to call setsid() unconditionally, but we have reason to
3094 * believe that prevents launchd from being able to send signals to
3095 * setuid children. We'll settle for process-groups.
3096 */
3097 if (getppid() != 1) {
3098 job_assumes(j, setpgid(0, 0) != -1);
3099 } else {
3100 job_assumes(j, setsid() != -1);
3101 }
3102 }
3103
3104 void
3105 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
3106 {
3107 int fd;
3108
3109 if (!path) {
3110 return;
3111 }
3112
3113 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
3114 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
3115 return;
3116 }
3117
3118 job_assumes(j, dup2(fd, target_fd) != -1);
3119 job_assumes(j, runtime_close(fd) == 0);
3120 }
3121
3122 int
3123 dir_has_files(job_t j, const char *path)
3124 {
3125 DIR *dd = opendir(path);
3126 struct dirent *de;
3127 bool r = 0;
3128
3129 if (!dd) {
3130 return -1;
3131 }
3132
3133 while ((de = readdir(dd))) {
3134 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
3135 r = 1;
3136 break;
3137 }
3138 }
3139
3140 job_assumes(j, closedir(dd) == 0);
3141 return r;
3142 }
3143
3144 void
3145 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
3146 {
3147 struct calendarinterval *ci_iter, *ci_prev = NULL;
3148 time_t later, head_later;
3149
3150 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
3151
3152 if (ci->when.tm_wday != -1) {
3153 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
3154
3155 if (ci->when.tm_mday == -1) {
3156 later = otherlater;
3157 } else {
3158 later = later < otherlater ? later : otherlater;
3159 }
3160 }
3161
3162 ci->when_next = later;
3163
3164 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
3165 if (ci->when_next < ci_iter->when_next) {
3166 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
3167 break;
3168 }
3169
3170 ci_prev = ci_iter;
3171 }
3172
3173 if (ci_iter == NULL) {
3174 /* ci must want to fire after every other timer, or there are no timers */
3175
3176 if (LIST_EMPTY(&sorted_calendar_events)) {
3177 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
3178 } else {
3179 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
3180 }
3181 }
3182
3183 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
3184
3185 /* Workaround 5225889 */
3186 kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_DELETE, 0, 0, root_jobmgr);
3187
3188 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
3189 char time_string[100];
3190 size_t time_string_len;
3191
3192 ctime_r(&later, time_string);
3193 time_string_len = strlen(time_string);
3194
3195 if (time_string_len && time_string[time_string_len - 1] == '\n') {
3196 time_string[time_string_len - 1] = '\0';
3197 }
3198
3199 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
3200 }
3201 }
3202
3203 void
3204 extract_rcsid_substr(const char *i, char *o, size_t osz)
3205 {
3206 char *rcs_rev_tmp = strchr(i, ' ');
3207
3208 if (!rcs_rev_tmp) {
3209 strlcpy(o, i, osz);
3210 } else {
3211 strlcpy(o, rcs_rev_tmp + 1, osz);
3212 rcs_rev_tmp = strchr(o, ' ');
3213 if (rcs_rev_tmp) {
3214 *rcs_rev_tmp = '\0';
3215 }
3216 }
3217 }
3218
3219 void
3220 jobmgr_log_bug(jobmgr_t jm, const char *rcs_rev, const char *path, unsigned int line, const char *test)
3221 {
3222 int saved_errno = errno;
3223 const char *file = strrchr(path, '/');
3224 char buf[100];
3225
3226 extract_rcsid_substr(rcs_rev, buf, sizeof(buf));
3227
3228 if (!file) {
3229 file = path;
3230 } else {
3231 file += 1;
3232 }
3233
3234 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u: %s", file, line, buf, saved_errno, test);
3235 }
3236
3237 void
3238 job_log_bug(job_t j, const char *rcs_rev, const char *path, unsigned int line, const char *test)
3239 {
3240 int saved_errno = errno;
3241 const char *file = strrchr(path, '/');
3242 char buf[100];
3243
3244 extract_rcsid_substr(rcs_rev, buf, sizeof(buf));
3245
3246 if (!file) {
3247 file = path;
3248 } else {
3249 file += 1;
3250 }
3251
3252 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u: %s", file, line, buf, saved_errno, test);
3253 }
3254
3255 void
3256 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
3257 {
3258 struct runtime_syslog_attr attr = { "com.apple.launchd", j->label, j->mgr->name, pri, getuid(), getpid(), j->p };
3259 char *newmsg;
3260 int oldmask = 0;
3261 size_t newmsgsz;
3262
3263 /*
3264 * Hack: If bootstrap_port is set, we must be on the child side of a
3265 * fork(), but before the exec*(). Let's route the log message back to
3266 * launchd proper.
3267 */
3268 if (bootstrap_port) {
3269 return _vproc_logv(pri, err, msg, ap);
3270 }
3271
3272 newmsgsz = strlen(msg) + 200;
3273 newmsg = alloca(newmsgsz);
3274
3275 if (err) {
3276 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
3277 } else {
3278 snprintf(newmsg, newmsgsz, "%s", msg);
3279 }
3280
3281 if (j->debug) {
3282 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
3283 }
3284
3285 runtime_vsyslog(&attr, newmsg, ap);
3286
3287 if (j->debug) {
3288 setlogmask(oldmask);
3289 }
3290 }
3291
3292 void
3293 job_log_error(job_t j, int pri, const char *msg, ...)
3294 {
3295 va_list ap;
3296
3297 va_start(ap, msg);
3298 job_logv(j, pri, errno, msg, ap);
3299 va_end(ap);
3300 }
3301
3302 void
3303 job_log(job_t j, int pri, const char *msg, ...)
3304 {
3305 va_list ap;
3306
3307 va_start(ap, msg);
3308 job_logv(j, pri, 0, msg, ap);
3309 va_end(ap);
3310 }
3311
3312 #if 0
3313 void
3314 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
3315 {
3316 va_list ap;
3317
3318 va_start(ap, msg);
3319 jobmgr_logv(jm, pri, errno, msg, ap);
3320 va_end(ap);
3321 }
3322 #endif
3323
3324 void
3325 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
3326 {
3327 va_list ap;
3328
3329 va_start(ap, msg);
3330 jobmgr_logv(jm, pri, 0, msg, ap);
3331 va_end(ap);
3332 }
3333
3334 void
3335 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
3336 {
3337 char *newmsg;
3338 char *newname;
3339 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
3340
3341 newname = alloca((jmname_len + 1) * 2);
3342 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
3343 newmsg = alloca(newmsgsz);
3344
3345 for (i = 0, o = 0; i < jmname_len; i++, o++) {
3346 if (jm->name[i] == '%') {
3347 newname[o] = '%';
3348 o++;
3349 }
3350 newname[o] = jm->name[i];
3351 }
3352 newname[o] = '\0';
3353
3354 if (err) {
3355 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
3356 } else {
3357 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
3358 }
3359
3360 if (jm->parentmgr) {
3361 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
3362 } else {
3363 struct runtime_syslog_attr attr = { "com.apple.launchd", "com.apple.launchd", jm->name, pri, getuid(), getpid(), getpid() };
3364
3365 runtime_vsyslog(&attr, newmsg, ap);
3366 }
3367 }
3368
3369 void
3370 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
3371 {
3372 if (si->fd != -1) {
3373 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
3374 job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
3375 }
3376 }
3377
3378 void
3379 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
3380 {
3381 char *parentdir, tmp_path[PATH_MAX];
3382 const char *which_path = si->what;
3383 int saved_errno = 0;
3384 int fflags = 0;
3385
3386 switch (si->why) {
3387 case PATH_EXISTS:
3388 fflags = NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
3389 break;
3390 case PATH_MISSING:
3391 fflags = NOTE_DELETE|NOTE_RENAME;
3392 break;
3393 case DIR_NOT_EMPTY:
3394 case PATH_CHANGES:
3395 fflags = NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE|NOTE_ATTRIB|NOTE_LINK;
3396 break;
3397 default:
3398 return;
3399 }
3400
3401 /* dirname() may modify tmp_path */
3402 strlcpy(tmp_path, si->what, sizeof(tmp_path));
3403
3404 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
3405 return;
3406 }
3407
3408 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
3409 do {
3410 if (si->fd == -1) {
3411 if ((si->fd = _fd(open(which_path, O_EVTONLY|O_NOCTTY))) == -1) {
3412 which_path = parentdir;
3413 si->fd = _fd(open(which_path, O_EVTONLY|O_NOCTTY));
3414 }
3415 }
3416
3417 if (si->fd == -1) {
3418 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", which_path);
3419 }
3420
3421 job_log(j, LOG_DEBUG, "Watching Vnode: %d", si->fd);
3422
3423 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
3424 saved_errno = errno;
3425 /*
3426 * The FD can be revoked between the open() and kevent().
3427 * This is similar to the inability for kevents to be
3428 * attached to short lived zombie processes after fork()
3429 * but before kevent().
3430 */
3431 job_assumes(j, runtime_close(si->fd) == 0);
3432 si->fd = -1;
3433 }
3434 } while ((si->fd == -1) && (saved_errno == ENOENT));
3435
3436 if (saved_errno == ENOTSUP) {
3437 /*
3438 * 3524219 NFS needs kqueue support
3439 * 4124079 VFS needs generic kqueue support
3440 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
3441 */
3442 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
3443
3444 if (!j->poll_for_vfs_changes) {
3445 j->poll_for_vfs_changes = true;
3446 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
3447 }
3448 }
3449 }
3450
3451 void
3452 semaphoreitem_callback(job_t j, struct kevent *kev)
3453 {
3454 char invalidation_reason[100] = "";
3455 struct semaphoreitem *si;
3456
3457 SLIST_FOREACH(si, &j->semaphores, sle) {
3458 switch (si->why) {
3459 case PATH_CHANGES:
3460 case PATH_EXISTS:
3461 case PATH_MISSING:
3462 case DIR_NOT_EMPTY:
3463 break;
3464 default:
3465 continue;
3466 }
3467
3468 if (si->fd == (int)kev->ident) {
3469 break;
3470 }
3471 }
3472
3473 if (!job_assumes(j, si != NULL)) {
3474 return;
3475 }
3476
3477 if (NOTE_DELETE & kev->fflags) {
3478 strcat(invalidation_reason, "deleted");
3479 }
3480
3481 if (NOTE_RENAME & kev->fflags) {
3482 if (invalidation_reason[0]) {
3483 strcat(invalidation_reason, "/renamed");
3484 } else {
3485 strcat(invalidation_reason, "renamed");
3486 }
3487 }
3488
3489 if (NOTE_REVOKE & kev->fflags) {
3490 if (invalidation_reason[0]) {
3491 strcat(invalidation_reason, "/revoked");
3492 } else {
3493 strcat(invalidation_reason, "revoked");
3494 }
3495 }
3496
3497 if (invalidation_reason[0]) {
3498 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
3499 job_assumes(j, runtime_close(si->fd) == 0);
3500 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
3501 }
3502
3503 job_log(j, LOG_DEBUG, "Watch path modified: %s", si->what);
3504
3505 if (si->why == PATH_CHANGES) {
3506 j->start_pending = true;
3507 }
3508
3509 job_dispatch(j, false);
3510 }
3511
3512 void
3513 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
3514 {
3515 struct tm *tmptm = context;
3516 int64_t val;
3517
3518 if (LAUNCH_DATA_INTEGER != launch_data_get_type(obj)) {
3519 /* hack to let caller know something went wrong */
3520 tmptm->tm_sec = -1;
3521 return;
3522 }
3523
3524 val = launch_data_get_integer(obj);
3525
3526 if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
3527 tmptm->tm_min = val;
3528 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
3529 tmptm->tm_hour = val;
3530 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
3531 tmptm->tm_mday = val;
3532 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
3533 tmptm->tm_wday = val;
3534 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
3535 tmptm->tm_mon = val;
3536 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
3537 }
3538 }
3539
3540 bool
3541 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
3542 {
3543 struct tm tmptm;
3544
3545 memset(&tmptm, 0, sizeof(0));
3546
3547 tmptm.tm_min = -1;
3548 tmptm.tm_hour = -1;
3549 tmptm.tm_mday = -1;
3550 tmptm.tm_wday = -1;
3551 tmptm.tm_mon = -1;
3552
3553 if (!job_assumes(j, obj != NULL)) {
3554 return false;
3555 }
3556
3557 if (LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj)) {
3558 return false;
3559 }
3560
3561 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &tmptm);
3562
3563 if (tmptm.tm_sec == -1) {
3564 return false;
3565 }
3566
3567 return calendarinterval_new(j, &tmptm);
3568 }
3569
3570 bool
3571 calendarinterval_new(job_t j, struct tm *w)
3572 {
3573 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
3574
3575 if (!job_assumes(j, ci != NULL)) {
3576 return false;
3577 }
3578
3579 ci->when = *w;
3580 ci->job = j;
3581
3582 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
3583
3584 calendarinterval_setalarm(j, ci);
3585
3586 runtime_add_ref();
3587
3588 return true;
3589 }
3590
3591 void
3592 calendarinterval_delete(job_t j, struct calendarinterval *ci)
3593 {
3594 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
3595 LIST_REMOVE(ci, global_sle);
3596
3597 free(ci);
3598
3599 runtime_del_ref();
3600 }
3601
3602 void
3603 calendarinterval_sanity_check(void)
3604 {
3605 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
3606 time_t now = time(NULL);
3607
3608 if (ci && (ci->when_next < now)) {
3609 jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
3610 }
3611 }
3612
3613 void
3614 calendarinterval_callback(void)
3615 {
3616 struct calendarinterval *ci, *ci_next;
3617 time_t now = time(NULL);
3618
3619 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
3620 job_t j = ci->job;
3621
3622 if (ci->when_next > now) {
3623 break;
3624 }
3625
3626 LIST_REMOVE(ci, global_sle);
3627 calendarinterval_setalarm(j, ci);
3628
3629 j->start_pending = true;
3630 job_dispatch(j, false);
3631 }
3632 }
3633
3634 bool
3635 socketgroup_new(job_t j, const char *name, int *fds, unsigned int fd_cnt, bool junkfds)
3636 {
3637 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
3638
3639 if (!job_assumes(j, sg != NULL)) {
3640 return false;
3641 }
3642
3643 sg->fds = calloc(1, fd_cnt * sizeof(int));
3644 sg->fd_cnt = fd_cnt;
3645 sg->junkfds = junkfds;
3646
3647 if (!job_assumes(j, sg->fds != NULL)) {
3648 free(sg);
3649 return false;
3650 }
3651
3652 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
3653 strcpy(sg->name_init, name);
3654
3655 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
3656
3657 runtime_add_ref();
3658
3659 return true;
3660 }
3661
3662 void
3663 socketgroup_delete(job_t j, struct socketgroup *sg)
3664 {
3665 unsigned int i;
3666
3667 for (i = 0; i < sg->fd_cnt; i++) {
3668 #if 0
3669 struct sockaddr_storage ss;
3670 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
3671 socklen_t ss_len = sizeof(ss);
3672
3673 /* 5480306 */
3674 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
3675 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
3676 job_assumes(j, unlink(sun->sun_path) != -1);
3677 /* We might conditionally need to delete a directory here */
3678 }
3679 #endif
3680 job_assumes(j, runtime_close(sg->fds[i]) != -1);
3681 }
3682
3683 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
3684
3685 free(sg->fds);
3686 free(sg);
3687
3688 runtime_del_ref();
3689 }
3690
3691 void
3692 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
3693 {
3694 struct kevent kev[sg->fd_cnt];
3695 char buf[10000];
3696 unsigned int i, buf_off = 0;
3697
3698 if (sg->junkfds) {
3699 return;
3700 }
3701
3702 for (i = 0; i < sg->fd_cnt; i++) {
3703 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
3704 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
3705 }
3706
3707 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
3708
3709 job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
3710
3711 for (i = 0; i < sg->fd_cnt; i++) {
3712 job_assumes(j, kev[i].flags & EV_ERROR);
3713 errno = kev[i].data;
3714 job_assumes(j, kev[i].data == 0);
3715 }
3716 }
3717
3718 void
3719 socketgroup_ignore(job_t j, struct socketgroup *sg)
3720 {
3721 socketgroup_kevent_mod(j, sg, false);
3722 }
3723
3724 void
3725 socketgroup_watch(job_t j, struct socketgroup *sg)
3726 {
3727 socketgroup_kevent_mod(j, sg, true);
3728 }
3729
3730 void
3731 socketgroup_callback(job_t j)
3732 {
3733 job_dispatch(j, true);
3734 }
3735
3736 bool
3737 envitem_new(job_t j, const char *k, const char *v, bool global)
3738 {
3739 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
3740
3741 if (!job_assumes(j, ei != NULL)) {
3742 return false;
3743 }
3744
3745 strcpy(ei->key_init, k);
3746 ei->value = ei->key_init + strlen(k) + 1;
3747 strcpy(ei->value, v);
3748
3749 if (global) {
3750 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
3751 } else {
3752 SLIST_INSERT_HEAD(&j->env, ei, sle);
3753 }
3754
3755 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
3756
3757 return true;
3758 }
3759
3760 void
3761 envitem_delete(job_t j, struct envitem *ei, bool global)
3762 {
3763 if (global) {
3764 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
3765 } else {
3766 SLIST_REMOVE(&j->env, ei, envitem, sle);
3767 }
3768
3769 free(ei);
3770 }
3771
3772 void
3773 envitem_setup(launch_data_t obj, const char *key, void *context)
3774 {
3775 job_t j = context;
3776
3777 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
3778 return;
3779 }
3780
3781 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
3782 }
3783
3784 bool
3785 limititem_update(job_t j, int w, rlim_t r)
3786 {
3787 struct limititem *li;
3788
3789 SLIST_FOREACH(li, &j->limits, sle) {
3790 if (li->which == w) {
3791 break;
3792 }
3793 }
3794
3795 if (li == NULL) {
3796 li = calloc(1, sizeof(struct limititem));
3797
3798 if (!job_assumes(j, li != NULL)) {
3799 return false;
3800 }
3801
3802 SLIST_INSERT_HEAD(&j->limits, li, sle);
3803
3804 li->which = w;
3805 }
3806
3807 if (j->importing_hard_limits) {
3808 li->lim.rlim_max = r;
3809 li->sethard = true;
3810 } else {
3811 li->lim.rlim_cur = r;
3812 li->setsoft = true;
3813 }
3814
3815 return true;
3816 }
3817
3818 void
3819 limititem_delete(job_t j, struct limititem *li)
3820 {
3821 SLIST_REMOVE(&j->limits, li, limititem, sle);
3822
3823 free(li);
3824 }
3825
3826 #if HAVE_SANDBOX
3827 void
3828 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
3829 {
3830 job_t j = context;
3831
3832 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
3833 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
3834 return;
3835 }
3836
3837 if (launch_data_get_bool(obj) == false) {
3838 return;
3839 }
3840
3841 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
3842 j->seatbelt_flags |= SANDBOX_NAMED;
3843 }
3844 }
3845 #endif
3846
3847 void
3848 limititem_setup(launch_data_t obj, const char *key, void *context)
3849 {
3850 job_t j = context;
3851 int i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
3852 rlim_t rl;
3853
3854 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
3855 return;
3856 }
3857
3858 rl = launch_data_get_integer(obj);
3859
3860 for (i = 0; i < limits_cnt; i++) {
3861 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
3862 break;
3863 }
3864 }
3865
3866 if (i == limits_cnt) {
3867 return;
3868 }
3869
3870 limititem_update(j, launchd_keys2limits[i].val, rl);
3871 }
3872
3873 bool
3874 job_useless(job_t j)
3875 {
3876 /* Yes, j->unload_at_exit and j->only_once seem the same, but they'll differ someday... */
3877
3878 if ((j->unload_at_exit || j->only_once) && j->start_time != 0) {
3879 if (j->unload_at_exit && j->j_port) {
3880 return false;
3881 }
3882 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
3883 return true;
3884 } else if (j->removal_pending) {
3885 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
3886 return true;
3887 } else if (j->mgr->shutting_down && (j->hopefully_exits_first || j->mgr->hopefully_first_cnt == 0)) {
3888 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
3889 return true;
3890 } else if (j->legacy_mach_job) {
3891 if (SLIST_EMPTY(&j->machservices)) {
3892 job_log(j, LOG_INFO, "Garbage collecting");
3893 return true;
3894 } else if (!j->checkedin) {
3895 job_log(j, LOG_WARNING, "Failed to check-in!");
3896 return true;
3897 }
3898 }
3899
3900 return false;
3901 }
3902
3903 bool
3904 job_keepalive(job_t j)
3905 {
3906 mach_msg_type_number_t statusCnt;
3907 mach_port_status_t status;
3908 struct semaphoreitem *si;
3909 struct machservice *ms;
3910 struct stat sb;
3911 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
3912
3913 if (j->mgr->shutting_down) {
3914 return false;
3915 }
3916
3917 /*
3918 * 5066316
3919 *
3920 * We definitely need to revisit this after Leopard ships. Please see
3921 * launchctl.c for the other half of this hack.
3922 */
3923 if (j->mgr->global_on_demand_cnt > 0 && strcmp(j->label, "com.apple.kextd") != 0) {
3924 return false;
3925 }
3926
3927 if (j->start_pending) {
3928 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
3929 return true;
3930 }
3931
3932 if (!j->ondemand) {
3933 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
3934 return true;
3935 }
3936
3937 SLIST_FOREACH(ms, &j->machservices, sle) {
3938 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
3939 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
3940 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
3941 continue;
3942 }
3943 if (status.mps_msgcount) {
3944 job_log(j, LOG_DEBUG, "KeepAlive check: job restarted due to %d queued Mach messages on service: %s",
3945 status.mps_msgcount, ms->name);
3946 return true;
3947 }
3948 }
3949
3950
3951 SLIST_FOREACH(si, &j->semaphores, sle) {
3952 bool wanted_state = false;
3953 int qdir_file_cnt;
3954 job_t other_j;
3955
3956 switch (si->why) {
3957 case NETWORK_UP:
3958 wanted_state = true;
3959 case NETWORK_DOWN:
3960 if (network_up == wanted_state) {
3961 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
3962 return true;
3963 }
3964 break;
3965 case SUCCESSFUL_EXIT:
3966 wanted_state = true;
3967 case FAILED_EXIT:
3968 if (good_exit == wanted_state) {
3969 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
3970 return true;
3971 }
3972 break;
3973 case OTHER_JOB_ENABLED:
3974 wanted_state = true;
3975 case OTHER_JOB_DISABLED:
3976 if ((bool)job_find(si->what) == wanted_state) {
3977 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
3978 return true;
3979 }
3980 break;
3981 case OTHER_JOB_ACTIVE:
3982 wanted_state = true;
3983 case OTHER_JOB_INACTIVE:
3984 if ((other_j = job_find(si->what))) {
3985 if ((bool)other_j->p == wanted_state) {
3986 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
3987 return true;
3988 }
3989 }
3990 break;
3991 case PATH_EXISTS:
3992 wanted_state = true;
3993 case PATH_MISSING:
3994 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
3995 if (si->fd != -1) {
3996 job_assumes(j, runtime_close(si->fd) == 0);
3997 si->fd = -1;
3998 }
3999 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
4000 return true;
4001 }
4002 break;
4003 case PATH_CHANGES:
4004 break;
4005 case DIR_NOT_EMPTY:
4006 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
4007 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
4008 } else if (qdir_file_cnt > 0) {
4009 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
4010 return true;
4011 }
4012 break;
4013 }
4014 }
4015
4016 return false;
4017 }
4018
4019 const char *
4020 job_prog(job_t j)
4021 {
4022 if (j->prog) {
4023 return j->prog;
4024 } else if (j->argv) {
4025 return j->argv[0];
4026 } else {
4027 return "";
4028 }
4029 }
4030
4031 const char *
4032 job_active(job_t j)
4033 {
4034 struct machservice *ms;
4035
4036 if (j->p) {
4037 return "PID is still valid";
4038 }
4039
4040 if (j->mgr->shutting_down && j->log_redirect_fd) {
4041 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
4042 j->log_redirect_fd = 0;
4043 }
4044
4045 if (j->log_redirect_fd) {
4046 if (job_assumes(j, j->wait4pipe_eof)) {
4047 return "Standard out/error is still valid";
4048 } else {
4049 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
4050 j->log_redirect_fd = 0;
4051 }
4052 }
4053
4054 if (j->priv_port_has_senders) {
4055 return "Privileged Port still has outstanding senders";
4056 }
4057
4058 SLIST_FOREACH(ms, &j->machservices, sle) {
4059 if (ms->recv && ms->isActive) {
4060 return "Mach service is still active";
4061 }
4062 }
4063
4064 return NULL;
4065 }
4066
4067 void
4068 machservice_watch(job_t j, struct machservice *ms)
4069 {
4070 if (ms->recv) {
4071 job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
4072 }
4073 }
4074
4075 void
4076 machservice_ignore(job_t j, struct machservice *ms)
4077 {
4078 job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
4079 }
4080
4081 void
4082 machservice_resetport(job_t j, struct machservice *ms)
4083 {
4084 LIST_REMOVE(ms, port_hash_sle);
4085 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
4086 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
4087 ms->gen_num++;
4088 job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
4089 job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
4090 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
4091 }
4092
4093 struct machservice *
4094 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
4095 {
4096 struct machservice *ms;
4097
4098 if ((ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1)) == NULL) {
4099 return NULL;
4100 }
4101
4102 strcpy((char *)ms->name, name);
4103 ms->job = j;
4104 ms->per_pid = pid_local;
4105
4106 if (*serviceport == MACH_PORT_NULL) {
4107 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
4108 goto out_bad;
4109 }
4110
4111 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
4112 goto out_bad2;
4113 }
4114 *serviceport = ms->port;
4115 ms->recv = true;
4116 } else {
4117 ms->port = *serviceport;
4118 ms->isActive = true;
4119 }
4120
4121 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
4122 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
4123 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
4124
4125 job_log(j, LOG_INFO, "Mach service added: %s", name);
4126
4127 return ms;
4128 out_bad2:
4129 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
4130 out_bad:
4131 free(ms);
4132 return NULL;
4133 }
4134
4135 bootstrap_status_t
4136 machservice_status(struct machservice *ms)
4137 {
4138 if (ms->isActive) {
4139 return BOOTSTRAP_STATUS_ACTIVE;
4140 } else if (ms->job->ondemand) {
4141 return BOOTSTRAP_STATUS_ON_DEMAND;
4142 } else {
4143 return BOOTSTRAP_STATUS_INACTIVE;
4144 }
4145 }
4146
4147 void
4148 job_setup_exception_port(job_t j, task_t target_task)
4149 {
4150 struct machservice *ms;
4151 thread_state_flavor_t f = 0;
4152 mach_port_t exc_port = the_exception_server;
4153
4154 if (j->alt_exc_handler) {
4155 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
4156 if (ms) {
4157 exc_port = machservice_port(ms);
4158 } else {
4159 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
4160 }
4161 } else if (j->internal_exc_handler) {
4162 exc_port = runtime_get_kernel_port();
4163 } else if (!exc_port) {
4164 return;
4165 }
4166
4167 #if defined (__ppc__)
4168 f = PPC_THREAD_STATE64;
4169 #elif defined(__i386__)
4170 f = x86_THREAD_STATE;
4171 #elif defined(__arm__)
4172 f = ARM_THREAD_STATE;
4173 #else
4174 #error "unknown architecture"
4175 #endif
4176
4177 if (target_task) {
4178 job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port,
4179 EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
4180 } else if (getpid() == 1 && the_exception_server) {
4181 mach_port_t mhp = mach_host_self();
4182 job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server,
4183 EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
4184 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
4185 }
4186
4187 }
4188
4189 void
4190 job_set_exeception_port(job_t j, mach_port_t port)
4191 {
4192 if (!the_exception_server) {
4193 the_exception_server = port;
4194 job_setup_exception_port(j, 0);
4195 } else {
4196 job_log(j, LOG_WARNING, "The exception server is already claimed!");
4197 }
4198 }
4199
4200 void
4201 machservice_setup_options(launch_data_t obj, const char *key, void *context)
4202 {
4203 struct machservice *ms = context;
4204 mach_port_t mhp = mach_host_self();
4205 int which_port;
4206 bool b;
4207
4208 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
4209 return;
4210 }
4211
4212 switch (launch_data_get_type(obj)) {
4213 case LAUNCH_DATA_INTEGER:
4214 which_port = launch_data_get_integer(obj);
4215 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
4216 switch (which_port) {
4217 case TASK_KERNEL_PORT:
4218 case TASK_HOST_PORT:
4219 case TASK_NAME_PORT:
4220 case TASK_BOOTSTRAP_PORT:
4221 /* I find it a little odd that zero isn't reserved in the header */
4222 case 0:
4223 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
4224 break;
4225 default:
4226 ms->special_port_num = which_port;
4227 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
4228 break;
4229 }
4230 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && getpid() == 1) {
4231 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
4232 job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
4233 } else {
4234 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
4235 }
4236 }
4237 case LAUNCH_DATA_BOOL:
4238 b = launch_data_get_bool(obj);
4239 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
4240 ms->debug_on_close = b;
4241 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
4242 ms->reset = b;
4243 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
4244 ms->hide = b;
4245 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
4246 job_set_exeception_port(ms->job, ms->port);
4247 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
4248 ms->kUNCServer = b;
4249 job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
4250 }
4251 break;
4252 case LAUNCH_DATA_DICTIONARY:
4253 job_set_exeception_port(ms->job, ms->port);
4254 break;
4255 default:
4256 break;
4257 }
4258
4259 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
4260 }
4261
4262 void
4263 machservice_setup(launch_data_t obj, const char *key, void *context)
4264 {
4265 job_t j = context;
4266 struct machservice *ms;
4267 mach_port_t p = MACH_PORT_NULL;
4268
4269 if ((ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
4270 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
4271 return;
4272 }
4273
4274 if ((ms = machservice_new(j, key, &p, false)) == NULL) {
4275 job_log_error(j, LOG_WARNING, "Cannot add service: %s", key);
4276 return;
4277 }
4278
4279 ms->isActive = false;
4280
4281 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
4282 launch_data_dict_iterate(obj, machservice_setup_options, ms);
4283 }
4284 }
4285
4286 jobmgr_t
4287 jobmgr_do_garbage_collection(jobmgr_t jm)
4288 {
4289 jobmgr_t jmi, jmn;
4290 job_t ji, jn;
4291
4292 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
4293 jobmgr_do_garbage_collection(jmi);
4294 }
4295
4296 if (!jm->shutting_down) {
4297 return jm;
4298 }
4299
4300 jobmgr_log(jm, LOG_DEBUG, "Garbage collecting.");
4301
4302 /*
4303 * Normally, we wait for all resources of a job (Unix PIDs/FDs and Mach ports)
4304 * to reset before we conider the job truly dead and ready to be spawned again.
4305 *
4306 * In order to work around 5487724 and 3456090, we're going to call reboot()
4307 * when the last PID dies and not wait for the associated resources to reset.
4308 */
4309 if (getpid() == 1 && jm->parentmgr == NULL && total_children == 0) {
4310 jobmgr_log(jm, LOG_DEBUG, "About to force a call to: reboot(%s)", reboot_flags_to_C_names(jm->reboot_flags));
4311 runtime_closelog();
4312 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
4313 }
4314
4315 if (jm->hopefully_first_cnt) {
4316 return jm;
4317 }
4318
4319 if (jm->parentmgr && jm->parentmgr->shutting_down && jm->parentmgr->hopefully_first_cnt) {
4320 return jm;
4321 }
4322
4323 if (!jm->sent_stop_to_normal_jobs) {
4324 jobmgr_log(jm, LOG_DEBUG, "Asking \"normal\" jobs to exit.");
4325
4326 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
4327 if (!job_active(ji)) {
4328 job_remove(ji);
4329 } else if (!ji->hopefully_exits_last) {
4330 job_stop(ji);
4331 }
4332 }
4333
4334 jm->sent_stop_to_normal_jobs = true;
4335 }
4336
4337 if (jm->normal_active_cnt) {
4338 return jm;
4339 }
4340
4341 if (!jm->sent_stop_to_hopefully_last_jobs) {
4342 jobmgr_log(jm, LOG_DEBUG, "Asking \"hopefully last\" jobs to exit.");
4343
4344 LIST_FOREACH(ji, &jm->jobs, sle) {
4345 if (ji->p && ji->anonymous) {
4346 continue;
4347 } else if (ji->p && job_assumes(ji, ji->hopefully_exits_last)) {
4348 job_stop(ji);
4349 }
4350 }
4351
4352 jm->sent_stop_to_hopefully_last_jobs = true;
4353 }
4354
4355 if (!SLIST_EMPTY(&jm->submgrs)) {
4356 return jm;
4357 }
4358
4359 LIST_FOREACH(ji, &jm->jobs, sle) {
4360 if (!ji->anonymous) {
4361 return jm;
4362 }
4363 }
4364
4365 jobmgr_log_stray_children(jm);
4366 jobmgr_remove(jm);
4367 return NULL;
4368 }
4369
4370 void
4371 jobmgr_log_stray_children(jobmgr_t jm)
4372 {
4373 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
4374 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
4375 struct kinfo_proc *kp;
4376
4377 #if TARGET_OS_EMBEDDED
4378 if (!do_apple_internal_magic) {
4379 return;
4380 }
4381 #endif
4382 if (jm->parentmgr || getpid() != 1) {
4383 return;
4384 }
4385
4386 if (!jobmgr_assumes(jm, (kp = malloc(len)) != NULL)) {
4387 return;
4388 }
4389 if (!jobmgr_assumes(jm, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
4390 goto out;
4391 }
4392
4393 kp_cnt = len / sizeof(struct kinfo_proc);
4394
4395 for (i = 0; i < kp_cnt; i++) {
4396 pid_t p_i = kp[i].kp_proc.p_pid;
4397 pid_t pp_i = kp[i].kp_eproc.e_ppid;
4398 pid_t pg_i = kp[i].kp_eproc.e_pgid;
4399 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
4400 const char *n = kp[i].kp_proc.p_comm;
4401
4402 if (p_i == 0 || p_i == 1) {
4403 continue;
4404 }
4405
4406 jobmgr_log(jm, LOG_WARNING, "Stray %sprocess at shutdown: PID %u PPID %u PGID %u %s", z, p_i, pp_i, pg_i, n);
4407
4408 /*
4409 * The kernel team requested that launchd not do this for Leopard.
4410 * jobmgr_assumes(jm, runtime_kill(p_i, SIGKILL) != -1);
4411 */
4412 }
4413
4414 out:
4415 free(kp);
4416 }
4417
4418 jobmgr_t
4419 jobmgr_parent(jobmgr_t jm)
4420 {
4421 return jm->parentmgr;
4422 }
4423
4424 void
4425 job_uncork_fork(job_t j)
4426 {
4427 pid_t c = j->p;
4428
4429 job_log(j, LOG_DEBUG, "Uncorking the fork().");
4430 /* this unblocks the child and avoids a race
4431 * between the above fork() and the kevent_mod() */
4432 job_assumes(j, write(j->forkfd, &c, sizeof(c)) == sizeof(c));
4433 job_assumes(j, runtime_close(j->forkfd) != -1);
4434 j->forkfd = 0;
4435 }
4436
4437 jobmgr_t
4438 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name)
4439 {
4440 mach_msg_size_t mxmsgsz;
4441 job_t bootstrapper = NULL;
4442 jobmgr_t jmr;
4443
4444 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
4445
4446 if (jm && requestorport == MACH_PORT_NULL) {
4447 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
4448 return NULL;
4449 }
4450
4451 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : 128));
4452
4453 if (jmr == NULL) {
4454 return NULL;
4455 }
4456
4457 jmr->kqjobmgr_callback = jobmgr_callback;
4458 strcpy(jmr->name_init, name ? name : "Under construction");
4459
4460 jmr->req_port = requestorport;
4461
4462 if ((jmr->parentmgr = jm)) {
4463 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
4464 }
4465
4466 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
4467 goto out_bad;
4468 }
4469
4470 if (transfer_port != MACH_PORT_NULL) {
4471 jobmgr_assumes(jmr, jm != NULL);
4472 jmr->jm_port = transfer_port;
4473 } else if (!jm && getpid() != 1) {
4474 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
4475 name_t service_buf;
4476
4477 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
4478
4479 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
4480 goto out_bad;
4481 }
4482
4483 if (trusted_fd) {
4484 int dfd, lfd = strtol(trusted_fd, NULL, 10);
4485
4486 if ((dfd = dup(lfd)) >= 0) {
4487 jobmgr_assumes(jmr, runtime_close(dfd) != -1);
4488 jobmgr_assumes(jmr, runtime_close(lfd) != -1);
4489 }
4490
4491 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
4492 }
4493
4494 /* cut off the Libc cache, we don't want to deadlock against ourself */
4495 inherited_bootstrap_port = bootstrap_port;
4496 bootstrap_port = MACH_PORT_NULL;
4497 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
4498
4499 /* We set this explicitly as we start each child */
4500 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
4501 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
4502 goto out_bad;
4503 }
4504
4505 if (!name) {
4506 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
4507 }
4508
4509 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
4510 mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
4511 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
4512 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
4513 }
4514
4515 if (!jm) {
4516 jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
4517 jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
4518 jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
4519 }
4520
4521 if (name) {
4522 bootstrapper = jobmgr_init_session(jmr, name, sflag);
4523 }
4524
4525 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
4526 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
4527 goto out_bad;
4528 }
4529 }
4530
4531 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
4532
4533 if (bootstrapper) {
4534 jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
4535 }
4536
4537 if (jmr->parentmgr) {
4538 runtime_add_ref();
4539 }
4540
4541 return jmr;
4542
4543 out_bad:
4544 if (jmr) {
4545 jobmgr_remove(jmr);
4546 }
4547 return NULL;
4548 }
4549
4550 job_t
4551 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
4552 {
4553 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
4554 char thelabel[1000];
4555 job_t bootstrapper;
4556
4557 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
4558 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
4559 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || getuid())) {
4560 char buf[100];
4561
4562 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
4563 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
4564 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
4565 bootstrapper->weird_bootstrap = true;
4566 jobmgr_assumes(jm, job_setup_machport(bootstrapper));
4567 }
4568
4569 jm->session_initialized = true;
4570
4571 return bootstrapper;
4572 }
4573
4574 jobmgr_t
4575 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
4576 {
4577 struct machservice *ms, *next_ms;
4578 jobmgr_t jmi, jmn;
4579
4580 /* Mach ports, unlike Unix descriptors, are reference counted. In other
4581 * words, when some program hands us a second or subsequent send right
4582 * to a port we already have open, the Mach kernel gives us the same
4583 * port number back and increments an reference count associated with
4584 * the port. This forces us, when discovering that a receive right at
4585 * the other end has been deleted, to wander all of our objects to see
4586 * what weird places clients might have handed us the same send right
4587 * to use.
4588 */
4589
4590 if (jm == root_jobmgr) {
4591 if (port == inherited_bootstrap_port) {
4592 launchd_assumes(launchd_mport_deallocate(port) == KERN_SUCCESS);
4593 inherited_bootstrap_port = MACH_PORT_NULL;
4594
4595 return jobmgr_shutdown(jm);
4596 }
4597
4598 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
4599 if (ms->port == port) {
4600 machservice_delete(ms->job, ms, true);
4601 }
4602 }
4603 }
4604
4605 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
4606 jobmgr_delete_anything_with_port(jmi, port);
4607 }
4608
4609 if (jm->req_port == port) {
4610 jobmgr_log(jm, LOG_DEBUG, "Request port died: 0x%x", port);
4611 return jobmgr_shutdown(jm);
4612 }
4613
4614 return jm;
4615 }
4616
4617 struct machservice *
4618 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
4619 {
4620 struct machservice *ms;
4621
4622 if (target_pid) {
4623 jobmgr_assumes(jm, !check_parent);
4624 }
4625
4626 LIST_FOREACH(ms, &jm->ms_hash[hash_ms(name)], name_hash_sle) {
4627 if ((target_pid && ms->per_pid && ms->job->p == target_pid) || (!target_pid && !ms->per_pid)) {
4628 if (strcmp(name, ms->name) == 0) {
4629 return ms;
4630 }
4631 }
4632 }
4633
4634 if (jm->parentmgr == NULL) {
4635 return NULL;
4636 }
4637
4638 if (!check_parent) {
4639 return NULL;
4640 }
4641
4642 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
4643 }
4644
4645 mach_port_t
4646 machservice_port(struct machservice *ms)
4647 {
4648 return ms->port;
4649 }
4650
4651 job_t
4652 machservice_job(struct machservice *ms)
4653 {
4654 return ms->job;
4655 }
4656
4657 bool
4658 machservice_hidden(struct machservice *ms)
4659 {
4660 return ms->hide;
4661 }
4662
4663 bool
4664 machservice_active(struct machservice *ms)
4665 {
4666 return ms->isActive;
4667 }
4668
4669 const char *
4670 machservice_name(struct machservice *ms)
4671 {
4672 return ms->name;
4673 }
4674
4675 void
4676 machservice_delete(job_t j, struct machservice *ms, bool port_died)
4677 {
4678 if (ms->debug_on_close) {
4679 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
4680 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
4681 }
4682
4683 if (ms->recv && job_assumes(j, !ms->isActive)) {
4684 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
4685 }
4686
4687 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
4688
4689 if (ms->port == the_exception_server) {
4690 the_exception_server = 0;
4691 }
4692
4693 job_log(j, LOG_INFO, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
4694
4695 if (ms->special_port_num) {
4696 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
4697 }
4698
4699 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
4700 LIST_REMOVE(ms, name_hash_sle);
4701 LIST_REMOVE(ms, port_hash_sle);
4702
4703 free(ms);
4704 }
4705
4706 void
4707 machservice_request_notifications(struct machservice *ms)
4708 {
4709 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
4710
4711 ms->isActive = true;
4712
4713 if (ms->recv) {
4714 which = MACH_NOTIFY_PORT_DESTROYED;
4715 job_checkin(ms->job);
4716 }
4717
4718 job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
4719 }
4720
4721 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
4722 #define END_OF(x) (&(x)[NELEM(x)])
4723
4724 char **
4725 mach_cmd2argv(const char *string)
4726 {
4727 char *argv[100], args[1000];
4728 const char *cp;
4729 char *argp = args, term, **argv_ret, *co;
4730 unsigned int nargs = 0, i;
4731
4732 for (cp = string; *cp;) {
4733 while (isspace(*cp))
4734 cp++;
4735 term = (*cp == '"') ? *cp++ : '\0';
4736 if (nargs < NELEM(argv)) {
4737 argv[nargs++] = argp;
4738 }
4739 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
4740 if (*cp == '\\') {
4741 cp++;
4742 }
4743 *argp++ = *cp;
4744 if (*cp) {
4745 cp++;
4746 }
4747 }
4748 *argp++ = '\0';
4749 }
4750 argv[nargs] = NULL;
4751
4752 if (nargs == 0) {
4753 return NULL;
4754 }
4755
4756 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
4757
4758 if (!launchd_assumes(argv_ret != NULL)) {
4759 return NULL;
4760 }
4761
4762 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
4763
4764 for (i = 0; i < nargs; i++) {
4765 strcpy(co, argv[i]);
4766 argv_ret[i] = co;
4767 co += strlen(argv[i]) + 1;
4768 }
4769 argv_ret[i] = NULL;
4770
4771 return argv_ret;
4772 }
4773
4774 void
4775 job_checkin(job_t j)
4776 {
4777 j->checkedin = true;
4778 }
4779
4780 bool
4781 job_ack_port_destruction(mach_port_t p)
4782 {
4783 struct machservice *ms;
4784
4785 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
4786 if (ms->recv && (ms->port == p)) {
4787 break;
4788 }
4789 }
4790
4791 if (!ms) {
4792 return false;
4793 }
4794
4795 ms->isActive = false;
4796
4797 if (ms->reset) {
4798 machservice_resetport(ms->job, ms);
4799 }
4800
4801 job_log(ms->job, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
4802 job_dispatch(ms->job, false);
4803
4804 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
4805
4806 return true;
4807 }
4808
4809 void
4810 job_ack_no_senders(job_t j)
4811 {
4812 j->priv_port_has_senders = false;
4813
4814 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
4815 j->j_port = 0;
4816
4817 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
4818
4819 job_dispatch(j, false);
4820 }
4821
4822 jobmgr_t
4823 job_get_bs(job_t j)
4824 {
4825 if (job_assumes(j, j->mgr != NULL)) {
4826 return j->mgr;
4827 }
4828
4829 return NULL;
4830 }
4831
4832 bool
4833 job_is_anonymous(job_t j)
4834 {
4835 return j->anonymous;
4836 }
4837
4838 void
4839 job_force_sampletool(job_t j)
4840 {
4841 struct stat sb;
4842 char logfile[PATH_MAX];
4843 char pidstr[100];
4844 char *sample_args[] = { "sample", pidstr, "1", "-mayDie", "-file", logfile, NULL };
4845 char *contents = NULL;
4846 int logfile_fd = -1;
4847 int console_fd = -1;
4848 int wstatus;
4849 pid_t sp;
4850
4851 if (!debug_shutdown_hangs) {
4852 return;
4853 }
4854
4855 snprintf(pidstr, sizeof(pidstr), "%u", j->p);
4856 snprintf(logfile, sizeof(logfile), SHUTDOWN_LOG_DIR "/%s-%u.sample.txt", j->label, j->p);
4857
4858 if (!job_assumes(j, unlink(logfile) != -1 || errno == ENOENT)) {
4859 goto out;
4860 }
4861
4862 /*
4863 * This will stall launchd for as long as the 'sample' tool runs.
4864 *
4865 * We didn't give the 'sample' tool a bootstrap port, so it therefore
4866 * can't deadlock against launchd.
4867 */
4868 if (!job_assumes(j, (errno = posix_spawnp(&sp, sample_args[0], NULL, NULL, sample_args, environ)) == 0)) {
4869 goto out;
4870 }
4871
4872 job_log(j, LOG_DEBUG, "Waiting for 'sample' to finish.");
4873
4874 if (!job_assumes(j, waitpid(sp, &wstatus, 0) != -1)) {
4875 goto out;
4876 }
4877
4878 /*
4879 * This won't work if the VFS or filesystems are sick:
4880 * sync();
4881 */
4882
4883 if (!job_assumes(j, WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0)) {
4884 goto out;
4885 }
4886
4887 if (!job_assumes(j, (logfile_fd = open(logfile, O_RDONLY|O_NOCTTY)) != -1)) {
4888 goto out;
4889 }
4890
4891 if (!job_assumes(j, (console_fd = open(_PATH_CONSOLE, O_WRONLY|O_APPEND|O_NOCTTY)) != -1)) {
4892 goto out;
4893 }
4894
4895 if (!job_assumes(j, fstat(logfile_fd, &sb) != -1)) {
4896 goto out;
4897 }
4898
4899 contents = malloc(sb.st_size);
4900
4901 if (!job_assumes(j, contents != NULL)) {
4902 goto out;
4903 }
4904
4905 if (!job_assumes(j, read(logfile_fd, contents, sb.st_size) == sb.st_size)) {
4906 goto out;
4907 }
4908
4909 job_assumes(j, write(console_fd, contents, sb.st_size) == sb.st_size);
4910
4911 out:
4912 if (contents) {
4913 free(contents);
4914 }
4915
4916 if (logfile_fd != -1) {
4917 job_assumes(j, runtime_fsync(logfile_fd) != -1);
4918 job_assumes(j, runtime_close(logfile_fd) != -1);
4919 }
4920
4921 if (console_fd != -1) {
4922 job_assumes(j, runtime_close(console_fd) != -1);
4923 }
4924
4925 job_log(j, LOG_DEBUG, "Finished sampling.");
4926 }
4927
4928 bool
4929 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
4930 {
4931 struct semaphoreitem *si;
4932 size_t alloc_sz = sizeof(struct semaphoreitem);
4933
4934 if (what) {
4935 alloc_sz += strlen(what) + 1;
4936 }
4937
4938 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
4939 return false;
4940 }
4941
4942 si->fd = -1;
4943 si->why = why;
4944
4945 if (what) {
4946 strcpy(si->what_init, what);
4947 }
4948
4949 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
4950
4951 semaphoreitem_runtime_mod_ref(si, true);
4952
4953 return true;
4954 }
4955
4956 void
4957 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
4958 {
4959 /*
4960 * External events need to be tracked.
4961 * Internal events do NOT need to be tracked.
4962 */
4963
4964 switch (si->why) {
4965 case SUCCESSFUL_EXIT:
4966 case FAILED_EXIT:
4967 case OTHER_JOB_ENABLED:
4968 case OTHER_JOB_DISABLED:
4969 case OTHER_JOB_ACTIVE:
4970 case OTHER_JOB_INACTIVE:
4971 return;
4972 default:
4973 break;
4974 }
4975
4976 if (add) {
4977 runtime_add_ref();
4978 } else {
4979 runtime_del_ref();
4980 }
4981 }
4982
4983 void
4984 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
4985 {
4986 semaphoreitem_runtime_mod_ref(si, false);
4987
4988 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
4989
4990 if (si->fd != -1) {
4991 job_assumes(j, runtime_close(si->fd) != -1);
4992 }
4993
4994 free(si);
4995 }
4996
4997 void
4998 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
4999 {
5000 struct semaphoreitem_dict_iter_context *sdic = context;
5001 semaphore_reason_t why;
5002
5003 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
5004
5005 semaphoreitem_new(sdic->j, why, key);
5006 }
5007
5008 void
5009 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
5010 {
5011 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
5012 job_t j = context;
5013 semaphore_reason_t why;
5014
5015 switch (launch_data_get_type(obj)) {
5016 case LAUNCH_DATA_BOOL:
5017 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
5018 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
5019 semaphoreitem_new(j, why, NULL);
5020 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
5021 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
5022 semaphoreitem_new(j, why, NULL);
5023 j->start_pending = true;
5024 } else {
5025 job_assumes(j, false);
5026 }
5027 break;
5028 case LAUNCH_DATA_DICTIONARY:
5029 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
5030 sdic.why_true = PATH_EXISTS;
5031 sdic.why_false = PATH_MISSING;
5032 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
5033 sdic.why_true = OTHER_JOB_ACTIVE;
5034 sdic.why_false = OTHER_JOB_INACTIVE;
5035 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
5036 sdic.why_true = OTHER_JOB_ENABLED;
5037 sdic.why_false = OTHER_JOB_DISABLED;
5038 } else {
5039 job_assumes(j, false);
5040 break;
5041 }
5042
5043 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
5044 break;
5045 default:
5046 job_assumes(j, false);
5047 break;
5048 }
5049 }
5050
5051 void
5052 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
5053 {
5054 jobmgr_t jmi, jmn;
5055 job_t ji, jn;
5056
5057
5058 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
5059 jobmgr_dispatch_all_semaphores(jmi);
5060 }
5061
5062 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
5063 if (!SLIST_EMPTY(&ji->semaphores)) {
5064 job_dispatch(ji, false);
5065 }
5066 }
5067 }
5068
5069 time_t
5070 cronemu(int mon, int mday, int hour, int min)
5071 {
5072 struct tm workingtm;
5073 time_t now;
5074
5075 now = time(NULL);
5076 workingtm = *localtime(&now);
5077
5078 workingtm.tm_isdst = -1;
5079 workingtm.tm_sec = 0;
5080 workingtm.tm_min++;
5081
5082 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
5083 workingtm.tm_year++;
5084 workingtm.tm_mon = 0;
5085 workingtm.tm_mday = 1;
5086 workingtm.tm_hour = 0;
5087 workingtm.tm_min = 0;
5088 mktime(&workingtm);
5089 }
5090
5091 return mktime(&workingtm);
5092 }
5093
5094 time_t
5095 cronemu_wday(int wday, int hour, int min)
5096 {
5097 struct tm workingtm;
5098 time_t now;
5099
5100 now = time(NULL);
5101 workingtm = *localtime(&now);
5102
5103 workingtm.tm_isdst = -1;
5104 workingtm.tm_sec = 0;
5105 workingtm.tm_min++;
5106
5107 if (wday == 7) {
5108 wday = 0;
5109 }
5110
5111 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
5112 workingtm.tm_mday++;
5113 workingtm.tm_hour = 0;
5114 workingtm.tm_min = 0;
5115 mktime(&workingtm);
5116 }
5117
5118 return mktime(&workingtm);
5119 }
5120
5121 bool
5122 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
5123 {
5124 if (mon == -1) {
5125 struct tm workingtm = *wtm;
5126 int carrytest;
5127
5128 while (!cronemu_mday(&workingtm, mday, hour, min)) {
5129 workingtm.tm_mon++;
5130 workingtm.tm_mday = 1;
5131 workingtm.tm_hour = 0;
5132 workingtm.tm_min = 0;
5133 carrytest = workingtm.tm_mon;
5134 mktime(&workingtm);
5135 if (carrytest != workingtm.tm_mon) {
5136 return false;
5137 }
5138 }
5139 *wtm = workingtm;
5140 return true;
5141 }
5142
5143 if (mon < wtm->tm_mon) {
5144 return false;
5145 }
5146
5147 if (mon > wtm->tm_mon) {
5148 wtm->tm_mon = mon;
5149 wtm->tm_mday = 1;
5150 wtm->tm_hour = 0;
5151 wtm->tm_min = 0;
5152 }
5153
5154 return cronemu_mday(wtm, mday, hour, min);
5155 }
5156
5157 bool
5158 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
5159 {
5160 if (mday == -1) {
5161 struct tm workingtm = *wtm;
5162 int carrytest;
5163
5164 while (!cronemu_hour(&workingtm, hour, min)) {
5165 workingtm.tm_mday++;
5166 workingtm.tm_hour = 0;
5167 workingtm.tm_min = 0;
5168 carrytest = workingtm.tm_mday;
5169 mktime(&workingtm);
5170 if (carrytest != workingtm.tm_mday) {
5171 return false;
5172 }
5173 }
5174 *wtm = workingtm;
5175 return true;
5176 }
5177
5178 if (mday < wtm->tm_mday) {
5179 return false;
5180 }
5181
5182 if (mday > wtm->tm_mday) {
5183 wtm->tm_mday = mday;
5184 wtm->tm_hour = 0;
5185 wtm->tm_min = 0;
5186 }
5187
5188 return cronemu_hour(wtm, hour, min);
5189 }
5190
5191 bool
5192 cronemu_hour(struct tm *wtm, int hour, int min)
5193 {
5194 if (hour == -1) {
5195 struct tm workingtm = *wtm;
5196 int carrytest;
5197
5198 while (!cronemu_min(&workingtm, min)) {
5199 workingtm.tm_hour++;
5200 workingtm.tm_min = 0;
5201 carrytest = workingtm.tm_hour;
5202 mktime(&workingtm);
5203 if (carrytest != workingtm.tm_hour) {
5204 return false;
5205 }
5206 }
5207 *wtm = workingtm;
5208 return true;
5209 }
5210
5211 if (hour < wtm->tm_hour) {
5212 return false;
5213 }
5214
5215 if (hour > wtm->tm_hour) {
5216 wtm->tm_hour = hour;
5217 wtm->tm_min = 0;
5218 }
5219
5220 return cronemu_min(wtm, min);
5221 }
5222
5223 bool
5224 cronemu_min(struct tm *wtm, int min)
5225 {
5226 if (min == -1) {
5227 return true;
5228 }
5229
5230 if (min < wtm->tm_min) {
5231 return false;
5232 }
5233
5234 if (min > wtm->tm_min) {
5235 wtm->tm_min = min;
5236 }
5237
5238 return true;
5239 }
5240
5241 kern_return_t
5242 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
5243 {
5244 struct ldcred ldc;
5245 job_t js;
5246
5247 if (!launchd_assumes(j != NULL)) {
5248 return BOOTSTRAP_NO_MEMORY;
5249 }
5250
5251 if (unlikely(j->deny_job_creation)) {
5252 return BOOTSTRAP_NOT_PRIVILEGED;
5253 }
5254
5255 runtime_get_caller_creds(&ldc);
5256
5257 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
5258
5259 #define LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5260 /* XXX - This code should go away once the per session launchd is integrated with the rest of the system */
5261 #ifdef LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5262 if (getpid() == 1) {
5263 if (ldc.euid && server_uid && (ldc.euid != server_uid)) {
5264 job_log(j, LOG_WARNING, "Server create: \"%s\": Will run as UID %d, not UID %d as they told us to",
5265 server_cmd, ldc.euid, server_uid);
5266 server_uid = ldc.euid;
5267 }
5268 } else
5269 #endif
5270 if (getuid()) {
5271 if (server_uid != getuid()) {
5272 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
5273 server_cmd, getuid(), server_uid);
5274 }
5275 server_uid = 0; /* zero means "do nothing" */
5276 }
5277
5278 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
5279
5280 if (js == NULL) {
5281 return BOOTSTRAP_NO_MEMORY;
5282 }
5283
5284 *server_portp = js->j_port;
5285 return BOOTSTRAP_SUCCESS;
5286 }
5287
5288 kern_return_t
5289 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
5290 {
5291 struct ldcred ldc;
5292 job_t otherj;
5293
5294 if (!launchd_assumes(j != NULL)) {
5295 return BOOTSTRAP_NO_MEMORY;
5296 }
5297
5298 runtime_get_caller_creds(&ldc);
5299
5300 if (ldc.euid != 0 && ldc.euid != getuid()) {
5301 return BOOTSTRAP_NOT_PRIVILEGED;
5302 }
5303
5304 if (!(otherj = job_find(targetlabel))) {
5305 return BOOTSTRAP_UNKNOWN_SERVICE;
5306 }
5307
5308 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
5309 bool do_block = otherj->p;
5310
5311 if (otherj->anonymous) {
5312 return BOOTSTRAP_NOT_PRIVILEGED;
5313 }
5314
5315 job_remove(otherj);
5316
5317 if (do_block) {
5318 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
5319 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
5320 job_assumes(otherj, waiting4removal_new(otherj, srp));
5321 return MIG_NO_REPLY;
5322 } else {
5323 return 0;
5324 }
5325 } else if (otherj->p) {
5326 job_assumes(j, runtime_kill(otherj->p, sig) != -1);
5327 }
5328
5329 return 0;
5330 }
5331
5332 kern_return_t
5333 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
5334 {
5335 struct ldcred ldc;
5336
5337 if (!launchd_assumes(j != NULL)) {
5338 return BOOTSTRAP_NO_MEMORY;
5339 }
5340
5341 if (!job_assumes(j, j->per_user)) {
5342 return BOOTSTRAP_NOT_PRIVILEGED;
5343 }
5344
5345 runtime_get_caller_creds(&ldc);
5346
5347 return runtime_log_forward(ldc.euid, ldc.egid, inval, invalCnt);
5348 }
5349
5350 kern_return_t
5351 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
5352 {
5353 struct ldcred ldc;
5354
5355 if (!launchd_assumes(j != NULL)) {
5356 return BOOTSTRAP_NO_MEMORY;
5357 }
5358
5359 runtime_get_caller_creds(&ldc);
5360
5361 if (ldc.euid) {
5362 return BOOTSTRAP_NOT_PRIVILEGED;
5363 }
5364
5365 return runtime_log_drain(srp, outval, outvalCnt);
5366 }
5367
5368 kern_return_t
5369 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
5370 vm_offset_t inval, mach_msg_type_number_t invalCnt,
5371 vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
5372 {
5373 const char *action;
5374 launch_data_t input_obj, output_obj;
5375 size_t data_offset = 0;
5376 size_t packed_size;
5377 struct ldcred ldc;
5378
5379 runtime_get_caller_creds(&ldc);
5380
5381 if (!launchd_assumes(j != NULL)) {
5382 return BOOTSTRAP_NO_MEMORY;
5383 }
5384
5385 if (inkey && ldc.euid && ldc.euid != getuid()) {
5386 return BOOTSTRAP_NOT_PRIVILEGED;
5387 }
5388
5389 if (inkey && outkey && !job_assumes(j, inkey == outkey)) {
5390 return 1;
5391 }
5392
5393 if (inkey && outkey) {
5394 action = "Swapping";
5395 } else if (inkey) {
5396 action = "Setting";
5397 } else {
5398 action = "Getting";
5399 }
5400
5401 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
5402
5403 *outvalCnt = 20 * 1024 * 1024;
5404 mig_allocate(outval, *outvalCnt);
5405 if (!job_assumes(j, *outval != 0)) {
5406 return 1;
5407 }
5408
5409 if (invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
5410 goto out_bad;
5411 }
5412
5413 switch (outkey) {
5414 case VPROC_GSK_ENVIRONMENT:
5415 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
5416 goto out_bad;
5417 }
5418 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
5419 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
5420 goto out_bad;
5421 }
5422 launch_data_free(output_obj);
5423 break;
5424 case VPROC_GSK_ALLJOBS:
5425 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
5426 goto out_bad;
5427 }
5428 ipc_revoke_fds(output_obj);
5429 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
5430 if (!job_assumes(j, packed_size != 0)) {
5431 goto out_bad;
5432 }
5433 launch_data_free(output_obj);
5434 break;
5435 case 0:
5436 mig_deallocate(*outval, *outvalCnt);
5437 *outval = 0;
5438 *outvalCnt = 0;
5439 break;
5440 default:
5441 goto out_bad;
5442 }
5443
5444 if (invalCnt) switch (inkey) {
5445 case VPROC_GSK_ENVIRONMENT:
5446 job_assumes(j, false);
5447 break;
5448 case 0:
5449 break;
5450 default:
5451 goto out_bad;
5452 }
5453
5454 mig_deallocate(inval, invalCnt);
5455
5456 return 0;
5457
5458 out_bad:
5459 if (*outval) {
5460 mig_deallocate(*outval, *outvalCnt);
5461 }
5462 return 1;
5463 }
5464
5465 kern_return_t
5466 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
5467 {
5468 const char *action;
5469 kern_return_t kr = 0;
5470 struct ldcred ldc;
5471 int oldmask;
5472
5473 runtime_get_caller_creds(&ldc);
5474
5475 if (!launchd_assumes(j != NULL)) {
5476 return BOOTSTRAP_NO_MEMORY;
5477 }
5478
5479 if (inkey && ldc.euid && ldc.euid != getuid()) {
5480 return BOOTSTRAP_NOT_PRIVILEGED;
5481 }
5482
5483 if (inkey && outkey && !job_assumes(j, inkey == outkey)) {
5484 return 1;
5485 }
5486
5487 if (inkey && outkey) {
5488 action = "Swapping";
5489 } else if (inkey) {
5490 action = "Setting";
5491 } else {
5492 action = "Getting";
5493 }
5494
5495 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
5496
5497 switch (outkey) {
5498 case VPROC_GSK_LAST_EXIT_STATUS:
5499 *outval = j->last_exit_status;
5500 break;
5501 case VPROC_GSK_MGR_UID:
5502 *outval = getuid();
5503 break;
5504 case VPROC_GSK_MGR_PID:
5505 *outval = getpid();
5506 break;
5507 case VPROC_GSK_IS_MANAGED:
5508 *outval = j->anonymous ? 0 : 1;
5509 break;
5510 case VPROC_GSK_BASIC_KEEPALIVE:
5511 *outval = !j->ondemand;
5512 break;
5513 case VPROC_GSK_START_INTERVAL:
5514 *outval = j->start_interval;
5515 break;
5516 case VPROC_GSK_IDLE_TIMEOUT:
5517 *outval = j->timeout;
5518 break;
5519 case VPROC_GSK_EXIT_TIMEOUT:
5520 *outval = j->exit_timeout;
5521 break;
5522 case VPROC_GSK_GLOBAL_LOG_MASK:
5523 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
5524 *outval = oldmask;
5525 runtime_setlogmask(oldmask);
5526 break;
5527 case VPROC_GSK_GLOBAL_UMASK:
5528 oldmask = umask(0);
5529 *outval = oldmask;
5530 umask(oldmask);
5531 break;
5532 case 0:
5533 *outval = 0;
5534 break;
5535 default:
5536 kr = 1;
5537 break;
5538 }
5539
5540 switch (inkey) {
5541 case VPROC_GSK_GLOBAL_ON_DEMAND:
5542 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
5543 break;
5544 case VPROC_GSK_BASIC_KEEPALIVE:
5545 j->ondemand = !inval;
5546 break;
5547 case VPROC_GSK_START_INTERVAL:
5548 if ((uint64_t)inval > UINT32_MAX) {
5549 kr = 1;
5550 } else if (inval) {
5551 if (j->start_interval == 0) {
5552 runtime_add_ref();
5553 } else {
5554 /* Workaround 5225889 */
5555 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
5556 }
5557 j->start_interval = inval;
5558 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
5559 } else if (j->start_interval) {
5560 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
5561 if (j->start_interval != 0) {
5562 runtime_del_ref();
5563 }
5564 j->start_interval = 0;
5565 }
5566 break;
5567 case VPROC_GSK_IDLE_TIMEOUT:
5568 if ((unsigned int)inval > 0) {
5569 j->timeout = inval;
5570 }
5571 break;
5572 case VPROC_GSK_EXIT_TIMEOUT:
5573 if ((unsigned int)inval > 0) {
5574 j->exit_timeout = inval;
5575 }
5576 break;
5577 case VPROC_GSK_GLOBAL_LOG_MASK:
5578 runtime_setlogmask(inval);
5579 break;
5580 case VPROC_GSK_GLOBAL_UMASK:
5581 umask(inval);
5582 break;
5583 case 0:
5584 break;
5585 default:
5586 kr = 1;
5587 break;
5588 }
5589
5590 return kr;
5591 }
5592
5593 kern_return_t
5594 job_mig_post_fork_ping(job_t j, task_t child_task)
5595 {
5596 struct machservice *ms;
5597
5598 if (!launchd_assumes(j != NULL)) {
5599 return BOOTSTRAP_NO_MEMORY;
5600 }
5601
5602 job_log(j, LOG_DEBUG, "Post fork ping.");
5603
5604 job_setup_exception_port(j, child_task);
5605
5606 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
5607 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
5608 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
5609 continue;
5610 }
5611
5612 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
5613
5614 if (errno) {
5615 int desired_log_level = LOG_ERR;
5616
5617 if (j->anonymous) {
5618 /* 5338127 */
5619
5620 desired_log_level = LOG_WARNING;
5621
5622 if (ms->special_port_num == TASK_SEATBELT_PORT) {
5623 desired_log_level = LOG_DEBUG;
5624 }
5625 }
5626
5627 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
5628 }
5629 }
5630
5631 job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
5632
5633 return 0;
5634 }
5635
5636 kern_return_t
5637 job_mig_reboot2(job_t j, uint64_t flags)
5638 {
5639 char who_started_the_reboot[2048] = "";
5640 struct kinfo_proc kp;
5641 struct ldcred ldc;
5642 pid_t pid_to_log;
5643
5644 if (!launchd_assumes(j != NULL)) {
5645 return BOOTSTRAP_NO_MEMORY;
5646 }
5647
5648 if (getpid() != 1) {
5649 return BOOTSTRAP_NOT_PRIVILEGED;
5650 }
5651
5652 runtime_get_caller_creds(&ldc);
5653
5654 if (ldc.euid) {
5655 return BOOTSTRAP_NOT_PRIVILEGED;
5656 }
5657
5658 for (pid_to_log = ldc.pid; pid_to_log; pid_to_log = kp.kp_eproc.e_ppid) {
5659 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid_to_log };
5660 size_t who_offset, len = sizeof(kp);
5661
5662 if (!job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
5663 return 1;
5664 }
5665
5666 who_offset = strlen(who_started_the_reboot);
5667 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
5668 " %s[%u]%s", kp.kp_proc.p_comm, pid_to_log, kp.kp_eproc.e_ppid ? " ->" : "");
5669 }
5670
5671 root_jobmgr->reboot_flags = (int)flags;
5672
5673 launchd_shutdown();
5674
5675 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
5676
5677 return 0;
5678 }
5679
5680 kern_return_t
5681 job_mig_getsocket(job_t j, name_t spr)
5682 {
5683 if (!launchd_assumes(j != NULL)) {
5684 return BOOTSTRAP_NO_MEMORY;
5685 }
5686
5687 ipc_server_init();
5688
5689 if (!sockpath) {
5690 return BOOTSTRAP_NO_MEMORY;
5691 }
5692
5693 strncpy(spr, sockpath, sizeof(name_t));
5694
5695 return BOOTSTRAP_SUCCESS;
5696 }
5697
5698 kern_return_t
5699 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
5700 {
5701 if (!launchd_assumes(j != NULL)) {
5702 return BOOTSTRAP_NO_MEMORY;
5703 }
5704
5705 if ((errno = err)) {
5706 job_log_error(j, pri, "%s", msg);
5707 } else {
5708 job_log(j, pri, "%s", msg);
5709 }
5710
5711 return 0;
5712 }
5713
5714 void
5715 ensure_root_bkgd_setup(void)
5716 {
5717 if (background_jobmgr || getpid() != 1) {
5718 return;
5719 }
5720
5721 if (!jobmgr_assumes(root_jobmgr, (background_jobmgr = jobmgr_new(root_jobmgr, mach_task_self(), MACH_PORT_NULL, false, VPROCMGR_SESSION_BACKGROUND)) != NULL)) {
5722 return;
5723 }
5724
5725 background_jobmgr->req_port = 0;
5726 jobmgr_assumes(root_jobmgr, launchd_mport_make_send(background_jobmgr->jm_port) == KERN_SUCCESS);
5727 }
5728
5729 kern_return_t
5730 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
5731 {
5732 struct ldcred ldc;
5733 job_t ji;
5734
5735 if (!launchd_assumes(j != NULL)) {
5736 return BOOTSTRAP_NO_MEMORY;
5737 }
5738
5739 job_log(j, LOG_DEBUG, "Looking up per user launchd for UID: %u", which_user);
5740
5741 runtime_get_caller_creds(&ldc);
5742
5743 if (getpid() != 1) {
5744 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
5745 return BOOTSTRAP_NOT_PRIVILEGED;
5746 }
5747
5748 if (ldc.euid || ldc.uid) {
5749 which_user = ldc.euid ? ldc.euid : ldc.uid;
5750 }
5751
5752 *up_cont = MACH_PORT_NULL;
5753
5754 if (which_user == 0) {
5755 ensure_root_bkgd_setup();
5756
5757 *up_cont = background_jobmgr->jm_port;
5758
5759 return 0;
5760 }
5761
5762 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
5763 if (!ji->per_user) {
5764 continue;
5765 }
5766 if (ji->mach_uid != which_user) {
5767 continue;
5768 }
5769 if (SLIST_EMPTY(&ji->machservices)) {
5770 continue;
5771 }
5772 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
5773 continue;
5774 }
5775 break;
5776 }
5777
5778 if (ji == NULL) {
5779 struct machservice *ms;
5780 char lbuf[1024];
5781
5782 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
5783
5784 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
5785
5786 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
5787
5788 if (ji == NULL) {
5789 return BOOTSTRAP_NO_MEMORY;
5790 }
5791
5792 ji->mach_uid = which_user;
5793 ji->per_user = true;
5794
5795 if ((ms = machservice_new(ji, lbuf, up_cont, false)) == NULL) {
5796 job_remove(ji);
5797 return BOOTSTRAP_NO_MEMORY;
5798 }
5799
5800 ms->per_user_hack = true;
5801 ms->hide = true;
5802
5803 ji = job_dispatch(ji, false);
5804 } else {
5805 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
5806 }
5807
5808 if (job_assumes(j, ji != NULL)) {
5809 *up_cont = machservice_port(SLIST_FIRST(&ji->machservices));
5810 }
5811
5812 return 0;
5813 }
5814
5815 kern_return_t
5816 job_mig_check_in(job_t j, name_t servicename, mach_port_t *serviceportp)
5817 {
5818 static pid_t last_warned_pid = 0;
5819 struct machservice *ms;
5820 struct ldcred ldc;
5821
5822 if (!launchd_assumes(j != NULL)) {
5823 return BOOTSTRAP_NO_MEMORY;
5824 }
5825
5826 runtime_get_caller_creds(&ldc);
5827
5828 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
5829
5830 if (ms == NULL) {
5831 job_log(j, LOG_DEBUG, "Check-in of Mach service failed. Unknown: %s", servicename);
5832 return BOOTSTRAP_UNKNOWN_SERVICE;
5833 }
5834 if (machservice_job(ms) != j) {
5835 if (last_warned_pid != ldc.pid) {
5836 job_log(j, LOG_NOTICE, "Check-in of Mach service failed. PID %d is not privileged: %s",
5837 ldc.pid, servicename);
5838 last_warned_pid = ldc.pid;
5839 }
5840 return BOOTSTRAP_NOT_PRIVILEGED;
5841 }
5842 if (machservice_active(ms)) {
5843 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
5844 return BOOTSTRAP_SERVICE_ACTIVE;
5845 }
5846
5847 machservice_request_notifications(ms);
5848
5849 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
5850
5851 *serviceportp = machservice_port(ms);
5852 return BOOTSTRAP_SUCCESS;
5853 }
5854
5855 kern_return_t
5856 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
5857 {
5858 struct machservice *ms;
5859 struct ldcred ldc;
5860
5861 if (!launchd_assumes(j != NULL)) {
5862 return BOOTSTRAP_NO_MEMORY;
5863 }
5864
5865 runtime_get_caller_creds(&ldc);
5866
5867 #if 0
5868 job_log(j, LOG_APPLEONLY, "bootstrap_register() is deprecated. Service: %s", servicename);
5869 #endif
5870
5871 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5872
5873 /* 5641783 for the embedded hack */
5874 #if !TARGET_OS_EMBEDDED
5875 /*
5876 * From a per-user/session launchd's perspective, SecurityAgent (UID
5877 * 92) is a rogue application (not our UID, not root and not a child of
5878 * us). We'll have to reconcile this design friction at a later date.
5879 */
5880 if (j->anonymous && job_get_bs(j)->parentmgr == NULL && ldc.uid != 0 && ldc.uid != getuid() && ldc.uid != 92) {
5881 if (getpid() == 1) {
5882 return VPROC_ERR_TRY_PER_USER;
5883 } else {
5884 return BOOTSTRAP_NOT_PRIVILEGED;
5885 }
5886 }
5887 #endif
5888
5889 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc.pid : 0);
5890
5891 if (ms) {
5892 if (machservice_job(ms) != j) {
5893 return BOOTSTRAP_NOT_PRIVILEGED;
5894 }
5895 if (machservice_active(ms)) {
5896 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
5897 return BOOTSTRAP_SERVICE_ACTIVE;
5898 }
5899 job_checkin(j);
5900 machservice_delete(j, ms, false);
5901 }
5902
5903 if (serviceport != MACH_PORT_NULL) {
5904 if ((ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
5905 machservice_request_notifications(ms);
5906 } else {
5907 return BOOTSTRAP_NO_MEMORY;
5908 }
5909 }
5910
5911 return BOOTSTRAP_SUCCESS;
5912 }
5913
5914 kern_return_t
5915 job_mig_look_up2(job_t j, name_t servicename, mach_port_t *serviceportp, mach_msg_type_name_t *ptype, pid_t target_pid, uint64_t flags)
5916 {
5917 struct machservice *ms;
5918 struct ldcred ldc;
5919 kern_return_t kr;
5920
5921 if (!launchd_assumes(j != NULL)) {
5922 return BOOTSTRAP_NO_MEMORY;
5923 }
5924
5925 runtime_get_caller_creds(&ldc);
5926
5927 /* 5641783 for the embedded hack */
5928 #if !TARGET_OS_EMBEDDED
5929 if (getpid() == 1 && j->anonymous && job_get_bs(j)->parentmgr == NULL && ldc.uid != 0 && ldc.euid != 0) {
5930 return VPROC_ERR_TRY_PER_USER;
5931 }
5932 #endif
5933
5934 if (!mspolicy_check(j, servicename, flags & BOOTSTRAP_PER_PID_SERVICE)) {
5935 job_log(j, LOG_NOTICE, "Policy denied Mach service lookup: %s", servicename);
5936 return BOOTSTRAP_NOT_PRIVILEGED;
5937 }
5938
5939 if (flags & BOOTSTRAP_PER_PID_SERVICE) {
5940 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
5941 } else {
5942 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
5943 }
5944
5945 if (ms && machservice_hidden(ms) && !job_active(machservice_job(ms))) {
5946 ms = NULL;
5947 } else if (ms && ms->per_user_hack) {
5948 ms = NULL;
5949 }
5950
5951 if (ms) {
5952 launchd_assumes(machservice_port(ms) != MACH_PORT_NULL);
5953 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5954 #if 0
5955 /* After Leopard ships, we should enable this */
5956 if (j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user) {
5957 ms->bad_perf_cnt++;
5958 job_log(j, LOG_APPLEONLY, "Performance opportunity: Number of bootstrap_lookup(... \"%s\" ...) calls that should have been cached: %llu",
5959 servicename, ms->bad_perf_cnt);
5960 }
5961 j->lastlookup = ms;
5962 j->lastlookup_gennum = ms->gen_num;
5963 #endif
5964 *serviceportp = machservice_port(ms);
5965 *ptype = MACH_MSG_TYPE_COPY_SEND;
5966 kr = BOOTSTRAP_SUCCESS;
5967 } else if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && (inherited_bootstrap_port != MACH_PORT_NULL)) {
5968 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
5969 *ptype = MACH_MSG_TYPE_MOVE_SEND;
5970 kr = bootstrap_look_up(inherited_bootstrap_port, servicename, serviceportp);
5971 } else if (getpid() == 1 && j->anonymous && ldc.euid >= 500 && strcasecmp(job_get_bs(j)->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
5972 /*
5973 * 5240036 Should start background session when a lookup of CCacheServer occurs
5974 *
5975 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
5976 * If we find a EUID that isn't root, we force it over to the per-user context.
5977 */
5978 return VPROC_ERR_TRY_PER_USER;
5979 } else {
5980 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5981 kr = BOOTSTRAP_UNKNOWN_SERVICE;
5982 }
5983
5984 return kr;
5985 }
5986
5987 kern_return_t
5988 job_mig_parent(job_t j, mach_port_t *parentport, mach_msg_type_name_t *pptype)
5989 {
5990 if (!launchd_assumes(j != NULL)) {
5991 return BOOTSTRAP_NO_MEMORY;
5992 }
5993
5994 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
5995 jobmgr_t jm = j->mgr;
5996
5997 *pptype = MACH_MSG_TYPE_MAKE_SEND;
5998
5999 if (jobmgr_parent(jm)) {
6000 *parentport = jobmgr_parent(jm)->jm_port;
6001 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
6002 *parentport = jm->jm_port;
6003 } else {
6004 *pptype = MACH_MSG_TYPE_COPY_SEND;
6005 *parentport = inherited_bootstrap_port;
6006 }
6007 return BOOTSTRAP_SUCCESS;
6008 }
6009
6010 kern_return_t
6011 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt,
6012 bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt)
6013 {
6014 name_array_t service_names = NULL;
6015 bootstrap_status_array_t service_actives = NULL;
6016 unsigned int cnt = 0, cnt2 = 0;
6017 struct machservice *ms;
6018 jobmgr_t jm;
6019 job_t ji;
6020
6021 if (!launchd_assumes(j != NULL)) {
6022 return BOOTSTRAP_NO_MEMORY;
6023 }
6024
6025 jm = j->mgr;
6026
6027 LIST_FOREACH(ji, &jm->jobs, sle) {
6028 SLIST_FOREACH(ms, &ji->machservices, sle) {
6029 if (!ms->per_pid) {
6030 cnt++;
6031 }
6032 }
6033 }
6034
6035 if (cnt == 0) {
6036 goto out;
6037 }
6038
6039 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
6040 if (!launchd_assumes(service_names != NULL)) {
6041 goto out_bad;
6042 }
6043
6044 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
6045 if (!launchd_assumes(service_actives != NULL)) {
6046 goto out_bad;
6047 }
6048
6049 LIST_FOREACH(ji, &jm->jobs, sle) {
6050 SLIST_FOREACH(ms, &ji->machservices, sle) {
6051 if (!ms->per_pid) {
6052 strlcpy(service_names[cnt2], machservice_name(ms), sizeof(service_names[0]));
6053 service_actives[cnt2] = machservice_status(ms);
6054 cnt2++;
6055 }
6056 }
6057 }
6058
6059 launchd_assumes(cnt == cnt2);
6060
6061 out:
6062 *servicenamesp = service_names;
6063 *serviceactivesp = service_actives;
6064 *servicenames_cnt = *serviceactives_cnt = cnt;
6065
6066 return BOOTSTRAP_SUCCESS;
6067
6068 out_bad:
6069 if (service_names) {
6070 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
6071 }
6072 if (service_actives) {
6073 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
6074 }
6075
6076 return BOOTSTRAP_NO_MEMORY;
6077 }
6078
6079 void
6080 job_reparent_hack(job_t j, const char *where)
6081 {
6082 jobmgr_t jmi, jmi2;
6083
6084 ensure_root_bkgd_setup();
6085
6086 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
6087 if (where == NULL) {
6088 if (strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6089 where = VPROCMGR_SESSION_LOGINWINDOW;
6090 } else {
6091 where = VPROCMGR_SESSION_AQUA;
6092 }
6093 }
6094
6095 if (strcasecmp(j->mgr->name, where) == 0) {
6096 return;
6097 }
6098
6099 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
6100 if (jmi->shutting_down) {
6101 continue;
6102 } else if (strcasecmp(jmi->name, where) == 0) {
6103 goto jm_found;
6104 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && getpid() == 1) {
6105 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
6106 if (strcasecmp(jmi2->name, where) == 0) {
6107 jmi = jmi2;
6108 goto jm_found;
6109 }
6110 }
6111 }
6112 }
6113
6114 jm_found:
6115 if (job_assumes(j, jmi != NULL)) {
6116 struct machservice *msi;
6117
6118 SLIST_FOREACH(msi, &j->machservices, sle) {
6119 LIST_REMOVE(msi, name_hash_sle);
6120 }
6121
6122 LIST_REMOVE(j, sle);
6123 LIST_INSERT_HEAD(&jmi->jobs, j, sle);
6124 j->mgr = jmi;
6125
6126 SLIST_FOREACH(msi, &j->machservices, sle) {
6127 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
6128 }
6129 }
6130 }
6131
6132 kern_return_t
6133 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type)
6134 {
6135 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
6136 mach_port_array_t l2l_ports = NULL;
6137 mach_port_t reqport, rcvright;
6138 kern_return_t kr = 1;
6139 launch_data_t out_obj_array = NULL;
6140 struct ldcred ldc;
6141 jobmgr_t jmr = NULL;
6142
6143 if (!launchd_assumes(j != NULL)) {
6144 return BOOTSTRAP_NO_MEMORY;
6145 }
6146
6147 runtime_get_caller_creds(&ldc);
6148
6149 if (target_subset == MACH_PORT_NULL) {
6150 job_t j2;
6151
6152 if (j->mgr->session_initialized) {
6153 if (ldc.uid == 0 && getpid() == 1) {
6154 if (strcmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6155 job_t ji, jn;
6156
6157 LIST_FOREACH_SAFE(ji, &j->mgr->jobs, sle, jn) {
6158 if (!ji->anonymous) {
6159 job_remove(ji);
6160 }
6161 }
6162
6163 ensure_root_bkgd_setup();
6164
6165 SLIST_REMOVE(&j->mgr->parentmgr->submgrs, j->mgr, jobmgr_s, sle);
6166 j->mgr->parentmgr = background_jobmgr;
6167 SLIST_INSERT_HEAD(&j->mgr->parentmgr->submgrs, j->mgr, sle);
6168
6169 /*
6170 * We really should wait for all the jobs to die before proceeding. See 5351245 for more info.
6171 *
6172 * We have hacked around this in job_find() by ignoring jobs that are pending removal.
6173 */
6174
6175 } else if (strcmp(j->mgr->name, VPROCMGR_SESSION_AQUA) == 0) {
6176 job_log(j, LOG_DEBUG, "Tried to move the Aqua session.");
6177 return 0;
6178 } else if (strcmp(j->mgr->name, VPROCMGR_SESSION_BACKGROUND) == 0) {
6179 job_log(j, LOG_DEBUG, "Tried to move the background session.");
6180 return 0;
6181 } else {
6182 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
6183 kr = BOOTSTRAP_NOT_PRIVILEGED;
6184 goto out;
6185 }
6186 } else {
6187 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
6188 kr = BOOTSTRAP_NOT_PRIVILEGED;
6189 goto out;
6190 }
6191 } else if (ldc.uid == 0 && getpid() == 1 && strcmp(session_type, VPROCMGR_SESSION_STANDARDIO) == 0) {
6192 ensure_root_bkgd_setup();
6193
6194 SLIST_REMOVE(&j->mgr->parentmgr->submgrs, j->mgr, jobmgr_s, sle);
6195 j->mgr->parentmgr = background_jobmgr;
6196 SLIST_INSERT_HEAD(&j->mgr->parentmgr->submgrs, j->mgr, sle);
6197 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6198 jobmgr_t jmi;
6199
6200 /*
6201 * 5330262
6202 *
6203 * We're working around LoginWindow and the WindowServer.
6204 *
6205 * In practice, there is only one LoginWindow session. Unfortunately, for certain
6206 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
6207 * spawns a replacement loginwindow session before cleaning up the previous one.
6208 *
6209 * We're going to use the creation of a new LoginWindow context as a clue that the
6210 * previous LoginWindow context is on the way out and therefore we should just
6211 * kick-start the shutdown of it.
6212 */
6213
6214 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
6215 if (jmi->shutting_down) {
6216 continue;
6217 } else if (strcasecmp(jmi->name, session_type) == 0) {
6218 jobmgr_shutdown(jmi);
6219 break;
6220 }
6221 }
6222 }
6223
6224 jobmgr_log(j->mgr, LOG_DEBUG, "Renaming to: %s", session_type);
6225 strcpy(j->mgr->name_init, session_type);
6226
6227 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
6228 job_assumes(j, job_dispatch(j2, true));
6229 }
6230
6231 kr = 0;
6232 goto out;
6233 } else if (job_mig_intran2(root_jobmgr, target_subset, ldc.pid)) {
6234 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
6235
6236 kr = BOOTSTRAP_NOT_PRIVILEGED;
6237 goto out;
6238 }
6239
6240 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
6241
6242 errno = kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
6243
6244 if (!job_assumes(j, kr == 0)) {
6245 goto out;
6246 }
6247
6248 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
6249
6250 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type)) != NULL)) {
6251 kr = BOOTSTRAP_NO_MEMORY;
6252 goto out;
6253 }
6254
6255 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
6256 launch_data_t tmp, obj_at_idx;
6257 struct machservice *ms;
6258 job_t j_for_service;
6259 const char *serv_name;
6260 pid_t target_pid;
6261 bool serv_perpid;
6262
6263 job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
6264 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
6265 target_pid = (pid_t)launch_data_get_integer(tmp);
6266 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
6267 serv_perpid = launch_data_get_bool(tmp);
6268 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
6269 serv_name = launch_data_get_string(tmp);
6270
6271 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
6272
6273 if (!j_for_service) {
6274 /* The PID probably exited */
6275 job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
6276 continue;
6277 }
6278
6279 if ((ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
6280 machservice_request_notifications(ms);
6281 }
6282 }
6283
6284 kr = 0;
6285
6286 out:
6287 if (out_obj_array) {
6288 launch_data_free(out_obj_array);
6289 }
6290
6291 if (l2l_ports) {
6292 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
6293 }
6294
6295 if (kr == 0) {
6296 if (target_subset) {
6297 job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
6298 }
6299 } else if (jmr) {
6300 jobmgr_shutdown(jmr);
6301 }
6302
6303 return kr;
6304 }
6305
6306 kern_return_t
6307 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
6308 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
6309 mach_port_array_t *portsp, unsigned int *ports_cnt)
6310 {
6311 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
6312 mach_port_array_t ports = NULL;
6313 unsigned int cnt = 0, cnt2 = 0;
6314 size_t packed_size;
6315 struct machservice *ms;
6316 jobmgr_t jm;
6317 job_t ji;
6318
6319 if (!launchd_assumes(j != NULL)) {
6320 return BOOTSTRAP_NO_MEMORY;
6321 }
6322
6323 jm = j->mgr;
6324
6325 if (getpid() != 1) {
6326 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
6327 return BOOTSTRAP_NOT_PRIVILEGED;
6328 } else if (jobmgr_parent(jm) == NULL) {
6329 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
6330 return BOOTSTRAP_NOT_PRIVILEGED;
6331 } else if (strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0) {
6332 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
6333 return BOOTSTRAP_NOT_PRIVILEGED;
6334 } else if (!j->anonymous) {
6335 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
6336 return BOOTSTRAP_NOT_PRIVILEGED;
6337 }
6338
6339 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
6340
6341 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
6342 if (!job_assumes(j, outdata_obj_array)) {
6343 goto out_bad;
6344 }
6345
6346 *outdataCnt = 20 * 1024 * 1024;
6347 mig_allocate(outdata, *outdataCnt);
6348 if (!job_assumes(j, *outdata != 0)) {
6349 return 1;
6350 }
6351
6352 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
6353 if (!ji->anonymous) {
6354 continue;
6355 }
6356 SLIST_FOREACH(ms, &ji->machservices, sle) {
6357 cnt++;
6358 }
6359 }
6360
6361 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
6362 if (!launchd_assumes(ports != NULL)) {
6363 goto out_bad;
6364 }
6365
6366 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
6367 if (!ji->anonymous) {
6368 continue;
6369 }
6370
6371 SLIST_FOREACH(ms, &ji->machservices, sle) {
6372 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
6373 job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
6374 } else {
6375 goto out_bad;
6376 }
6377
6378 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
6379 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
6380 } else {
6381 goto out_bad;
6382 }
6383
6384 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
6385 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
6386 } else {
6387 goto out_bad;
6388 }
6389
6390 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
6391 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
6392 } else {
6393 goto out_bad;
6394 }
6395
6396 ports[cnt2] = machservice_port(ms);
6397
6398 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
6399 jobmgr_assumes(jm, (errno = mach_port_mod_refs(mach_task_self(), ports[cnt2], MACH_PORT_RIGHT_SEND, 1)) == 0);
6400 cnt2++;
6401 }
6402 }
6403
6404 launchd_assumes(cnt == cnt2);
6405
6406 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
6407 if (!job_assumes(j, packed_size != 0)) {
6408 goto out_bad;
6409 }
6410
6411 launch_data_free(outdata_obj_array);
6412
6413 *portsp = ports;
6414 *ports_cnt = cnt;
6415
6416 *reqport = jm->req_port;
6417 *rcvright = jm->jm_port;
6418
6419 jm->req_port = 0;
6420 jm->jm_port = 0;
6421
6422 workaround_5477111 = j;
6423
6424 jobmgr_shutdown(jm);
6425
6426 return BOOTSTRAP_SUCCESS;
6427
6428 out_bad:
6429 if (outdata_obj_array) {
6430 launch_data_free(outdata_obj_array);
6431 }
6432 if (*outdata) {
6433 mig_deallocate(*outdata, *outdataCnt);
6434 }
6435 if (ports) {
6436 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
6437 }
6438
6439 return BOOTSTRAP_NO_MEMORY;
6440 }
6441
6442 kern_return_t
6443 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
6444 {
6445 int bsdepth = 0;
6446 jobmgr_t jmr;
6447
6448 if (!launchd_assumes(j != NULL)) {
6449 return BOOTSTRAP_NO_MEMORY;
6450 }
6451
6452 jmr = j->mgr;
6453
6454 while ((jmr = jobmgr_parent(jmr)) != NULL) {
6455 bsdepth++;
6456 }
6457
6458 /* Since we use recursion, we need an artificial depth for subsets */
6459 if (bsdepth > 100) {
6460 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
6461 return BOOTSTRAP_NO_MEMORY;
6462 }
6463
6464 if ((jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, NULL)) == NULL) {
6465 if (requestorport == MACH_PORT_NULL) {
6466 return BOOTSTRAP_NOT_PRIVILEGED;
6467 }
6468 return BOOTSTRAP_NO_MEMORY;
6469 }
6470
6471 *subsetportp = jmr->jm_port;
6472 return BOOTSTRAP_SUCCESS;
6473 }
6474
6475 kern_return_t
6476 job_mig_create_service(job_t j, name_t servicename, mach_port_t *serviceportp)
6477 {
6478 struct machservice *ms;
6479
6480 if (!launchd_assumes(j != NULL)) {
6481 return BOOTSTRAP_NO_MEMORY;
6482 }
6483
6484 if (job_prog(j)[0] == '\0') {
6485 job_log(j, LOG_ERR, "Mach service creation requires a target server: %s", servicename);
6486 return BOOTSTRAP_NOT_PRIVILEGED;
6487 }
6488
6489 if (!j->legacy_mach_job) {
6490 job_log(j, LOG_ERR, "bootstrap_create_service() is only allowed against legacy Mach jobs: %s", servicename);
6491 return BOOTSTRAP_NOT_PRIVILEGED;
6492 }
6493
6494 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
6495 if (ms) {
6496 job_log(j, LOG_DEBUG, "Mach service creation attempt for failed. Already exists: %s", servicename);
6497 return BOOTSTRAP_NAME_IN_USE;
6498 }
6499
6500 job_checkin(j);
6501
6502 *serviceportp = MACH_PORT_NULL;
6503 ms = machservice_new(j, servicename, serviceportp, false);
6504
6505 if (!launchd_assumes(ms != NULL)) {
6506 goto out_bad;
6507 }
6508
6509 return BOOTSTRAP_SUCCESS;
6510
6511 out_bad:
6512 launchd_assumes(launchd_mport_close_recv(*serviceportp) == KERN_SUCCESS);
6513 return BOOTSTRAP_NO_MEMORY;
6514 }
6515
6516 kern_return_t
6517 job_mig_embedded_wait(job_t j, name_t targetlabel, integer_t *waitstatus)
6518 {
6519 job_t otherj;
6520
6521 if (!launchd_assumes(j != NULL)) {
6522 return BOOTSTRAP_NO_MEMORY;
6523 }
6524
6525 if (unlikely(!(otherj = job_find(targetlabel)))) {
6526 return BOOTSTRAP_UNKNOWN_SERVICE;
6527 }
6528
6529 *waitstatus = j->last_exit_status;
6530
6531 return 0;
6532 }
6533
6534 kern_return_t
6535 job_mig_embedded_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, mach_port_t *out_name_port)
6536 {
6537 struct ldcred ldc;
6538 kern_return_t kr;
6539 job_t otherj;
6540
6541 if (!launchd_assumes(j != NULL)) {
6542 return BOOTSTRAP_NO_MEMORY;
6543 }
6544
6545 if (unlikely(!(otherj = job_find(targetlabel)))) {
6546 return BOOTSTRAP_UNKNOWN_SERVICE;
6547 }
6548
6549 runtime_get_caller_creds(&ldc);
6550
6551 if (ldc.euid != 0 && ldc.euid != geteuid()
6552 #if TARGET_OS_EMBEDDED
6553 && j->username && otherj->username
6554 && strcmp(j->username, otherj->username) != 0
6555 #endif
6556 ) {
6557 return BOOTSTRAP_NOT_PRIVILEGED;
6558 }
6559
6560 otherj = job_dispatch(otherj, true);
6561
6562 if (!job_assumes(j, otherj && otherj->p)) {
6563 return BOOTSTRAP_NO_MEMORY;
6564 }
6565
6566 kr = task_name_for_pid(mach_task_self(), otherj->p, out_name_port);
6567 if (!job_assumes(j, kr == 0)) {
6568 return kr;
6569 }
6570
6571 *out_pid = otherj->p;
6572
6573 return 0;
6574 }
6575
6576 kern_return_t
6577 job_mig_wait(job_t j, mach_port_t srp, integer_t *waitstatus)
6578 {
6579 if (!launchd_assumes(j != NULL)) {
6580 return BOOTSTRAP_NO_MEMORY;
6581 }
6582 #if 0
6583 struct ldcred ldc;
6584 runtime_get_caller_creds(&ldc);
6585 #endif
6586 return job_handle_mpm_wait(j, srp, waitstatus);
6587 }
6588
6589 kern_return_t
6590 job_mig_uncork_fork(job_t j)
6591 {
6592 if (!launchd_assumes(j != NULL)) {
6593 return BOOTSTRAP_NO_MEMORY;
6594 }
6595
6596 if (!j->stall_before_exec) {
6597 job_log(j, LOG_WARNING, "Attempt to uncork a job that isn't in the middle of a fork().");
6598 return 1;
6599 }
6600
6601 job_uncork_fork(j);
6602 j->stall_before_exec = false;
6603 return 0;
6604 }
6605
6606 kern_return_t
6607 job_mig_set_service_policy(job_t j, pid_t target_pid, uint64_t flags, name_t target_service)
6608 {
6609 job_t target_j;
6610
6611 if (!launchd_assumes(j != NULL)) {
6612 return BOOTSTRAP_NO_MEMORY;
6613 }
6614
6615 if (!job_assumes(j, (target_j = jobmgr_find_by_pid(j->mgr, target_pid, true)) != NULL)) {
6616 return BOOTSTRAP_NO_MEMORY;
6617 }
6618
6619 if (SLIST_EMPTY(&j->mspolicies)) {
6620 job_log(j, LOG_DEBUG, "Setting policy on job \"%s\" for Mach service: %s", target_j->label, target_service);
6621 if (target_service[0]) {
6622 job_assumes(j, mspolicy_new(target_j, target_service, flags & BOOTSTRAP_ALLOW_LOOKUP, flags & BOOTSTRAP_PER_PID_SERVICE, false));
6623 } else {
6624 target_j->deny_unknown_mslookups = !(flags & BOOTSTRAP_ALLOW_LOOKUP);
6625 target_j->deny_job_creation = (bool)(flags & BOOTSTRAP_DENY_JOB_CREATION);
6626 }
6627 } else {
6628 job_log(j, LOG_WARNING, "Jobs that have policies assigned to them may not set policies.");
6629 return BOOTSTRAP_NOT_PRIVILEGED;
6630 }
6631
6632 return 0;
6633 }
6634
6635 kern_return_t
6636 job_mig_spawn(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, pid_t *child_pid, mach_port_t *obsvr_port)
6637 {
6638 launch_data_t input_obj = NULL;
6639 size_t data_offset = 0;
6640 struct ldcred ldc;
6641 job_t jr;
6642
6643 runtime_get_caller_creds(&ldc);
6644
6645 if (!launchd_assumes(j != NULL)) {
6646 return BOOTSTRAP_NO_MEMORY;
6647 }
6648
6649 if (unlikely(j->deny_job_creation)) {
6650 return BOOTSTRAP_NOT_PRIVILEGED;
6651 }
6652
6653 if (getpid() == 1 && ldc.euid && ldc.uid) {
6654 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
6655 return VPROC_ERR_TRY_PER_USER;
6656 }
6657
6658 if (!job_assumes(j, indataCnt != 0)) {
6659 return 1;
6660 }
6661
6662 if (!job_assumes(j, (input_obj = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
6663 return 1;
6664 }
6665
6666 jr = jobmgr_import2(j->mgr, input_obj);
6667
6668 if (!job_assumes(j, jr != NULL)) {
6669 switch (errno) {
6670 case EEXIST:
6671 return BOOTSTRAP_NAME_IN_USE;
6672 default:
6673 return BOOTSTRAP_NO_MEMORY;
6674 }
6675 }
6676
6677 job_reparent_hack(jr, NULL);
6678
6679 if (getpid() == 1) {
6680 jr->mach_uid = ldc.uid;
6681 }
6682
6683 jr->unload_at_exit = true;
6684 jr->wait4pipe_eof = true;
6685 jr->abandon_pg = true;
6686 jr->stall_before_exec = jr->wait4debugger;
6687 jr->wait4debugger = false;
6688
6689 jr = job_dispatch(jr, true);
6690
6691 if (!job_assumes(j, jr != NULL)) {
6692 return BOOTSTRAP_NO_MEMORY;
6693 }
6694
6695 if (!job_assumes(jr, jr->p)) {
6696 job_remove(jr);
6697 return BOOTSTRAP_NO_MEMORY;
6698 }
6699
6700 if (!job_setup_machport(jr)) {
6701 job_remove(jr);
6702 return BOOTSTRAP_NO_MEMORY;
6703 }
6704
6705 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
6706
6707 *child_pid = jr->p;
6708 *obsvr_port = jr->j_port;
6709
6710 mig_deallocate(indata, indataCnt);
6711
6712 return BOOTSTRAP_SUCCESS;
6713 }
6714
6715 void
6716 jobmgr_init(bool sflag)
6717 {
6718 const char *root_session_type = getpid() == 1 ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
6719
6720 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type)) != NULL);
6721 }
6722
6723 size_t
6724 our_strhash(const char *s)
6725 {
6726 size_t c, r = 5381;
6727
6728 /* djb2
6729 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
6730 */
6731
6732 while ((c = *s++)) {
6733 r = ((r << 5) + r) + c; /* hash*33 + c */
6734 }
6735
6736 return r;
6737 }
6738
6739 size_t
6740 hash_label(const char *label)
6741 {
6742 return our_strhash(label) % LABEL_HASH_SIZE;
6743 }
6744
6745 size_t
6746 hash_ms(const char *msstr)
6747 {
6748 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
6749 }
6750
6751 bool
6752 mspolicy_copy(job_t j_to, job_t j_from)
6753 {
6754 struct mspolicy *msp;
6755
6756 SLIST_FOREACH(msp, &j_from->mspolicies, sle) {
6757 if (!mspolicy_new(j_to, msp->name, msp->allow, msp->per_pid, true)) {
6758 return false;
6759 }
6760 }
6761
6762 return true;
6763 }
6764
6765 bool
6766 mspolicy_new(job_t j, const char *name, bool allow, bool pid_local, bool skip_check)
6767 {
6768 struct mspolicy *msp;
6769
6770 if (!skip_check) SLIST_FOREACH(msp, &j->mspolicies, sle) {
6771 if (msp->per_pid != pid_local) {
6772 continue;
6773 } else if (strcmp(msp->name, name) == 0) {
6774 return false;
6775 }
6776 }
6777
6778 if ((msp = calloc(1, sizeof(struct mspolicy) + strlen(name) + 1)) == NULL) {
6779 return false;
6780 }
6781
6782 strcpy((char *)msp->name, name);
6783 msp->per_pid = pid_local;
6784 msp->allow = allow;
6785
6786 SLIST_INSERT_HEAD(&j->mspolicies, msp, sle);
6787
6788 return true;
6789 }
6790
6791 void
6792 mspolicy_setup(launch_data_t obj, const char *key, void *context)
6793 {
6794 job_t j = context;
6795
6796 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
6797 job_log(j, LOG_WARNING, "Invalid object type for Mach service policy key: %s", key);
6798 return;
6799 }
6800
6801 job_assumes(j, mspolicy_new(j, key, launch_data_get_bool(obj), false, false));
6802 }
6803
6804 bool
6805 mspolicy_check(job_t j, const char *name, bool pid_local)
6806 {
6807 struct mspolicy *mspi;
6808
6809 SLIST_FOREACH(mspi, &j->mspolicies, sle) {
6810 if (mspi->per_pid != pid_local) {
6811 continue;
6812 } else if (strcmp(mspi->name, name) != 0) {
6813 continue;
6814 }
6815 return mspi->allow;
6816 }
6817
6818 return !j->deny_unknown_mslookups;
6819 }
6820
6821 void
6822 mspolicy_delete(job_t j, struct mspolicy *msp)
6823 {
6824 SLIST_REMOVE(&j->mspolicies, msp, mspolicy, sle);
6825
6826 free(msp);
6827 }
6828
6829 bool
6830 waiting4removal_new(job_t j, mach_port_t rp)
6831 {
6832 struct waiting_for_removal *w4r;
6833
6834 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
6835 return false;
6836 }
6837
6838 w4r->reply_port = rp;
6839
6840 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
6841
6842 return true;
6843 }
6844
6845 void
6846 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
6847 {
6848 job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
6849
6850 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
6851
6852 free(w4r);
6853 }
6854
6855 size_t
6856 get_kern_max_proc(void)
6857 {
6858 int mib[] = { CTL_KERN, KERN_MAXPROC };
6859 int max = 100;
6860 size_t max_sz = sizeof(max);
6861
6862 launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
6863
6864 return max;
6865 }
6866
6867 void
6868 do_file_init(void)
6869 {
6870 struct stat sb;
6871
6872 launchd_assert(mach_timebase_info(&tbi) == 0);
6873
6874 if (stat("/AppleInternal", &sb) == 0) {
6875 do_apple_internal_magic = true;
6876 }
6877 }