]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
726ab6342a3f48d769ffed4ee51c3639c26a95aa
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 23433 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23
24 #include <mach/mach.h>
25 #include <mach/mach_error.h>
26 #include <mach/mach_time.h>
27 #include <mach/boolean.h>
28 #include <mach/message.h>
29 #include <mach/notify.h>
30 #include <mach/mig_errors.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_interface.h>
33 #include <mach/host_info.h>
34 #include <mach/mach_host.h>
35 #include <mach/exception.h>
36 #include <mach/host_reboot.h>
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/stat.h>
41 #include <sys/ucred.h>
42 #include <sys/fcntl.h>
43 #include <sys/un.h>
44 #include <sys/reboot.h>
45 #include <sys/wait.h>
46 #include <sys/sysctl.h>
47 #include <sys/sockio.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/ioctl.h>
51 #include <sys/mount.h>
52 #include <sys/pipe.h>
53 #include <net/if.h>
54 #include <netinet/in.h>
55 #include <netinet/in_var.h>
56 #include <netinet6/nd6.h>
57 #include <bsm/libbsm.h>
58 #include <unistd.h>
59 #include <signal.h>
60 #include <errno.h>
61 #include <libgen.h>
62 #include <stdio.h>
63 #include <stdlib.h>
64 #include <stdarg.h>
65 #include <stdbool.h>
66 #include <paths.h>
67 #include <pwd.h>
68 #include <grp.h>
69 #include <ttyent.h>
70 #include <dlfcn.h>
71 #include <dirent.h>
72 #include <string.h>
73 #include <ctype.h>
74 #include <glob.h>
75 #include <spawn.h>
76 #include <sandbox.h>
77
78 #include "liblaunch_public.h"
79 #include "liblaunch_private.h"
80 #include "liblaunch_internal.h"
81 #include "libbootstrap_public.h"
82 #include "libbootstrap_private.h"
83 #include "libvproc_public.h"
84 #include "libvproc_internal.h"
85
86 #include "reboot2.h"
87
88 #include "launchd.h"
89 #include "launchd_runtime.h"
90 #include "launchd_unix_ipc.h"
91 #include "protocol_vproc.h"
92 #include "protocol_vprocServer.h"
93 #include "job_reply.h"
94
95 #define LAUNCHD_MIN_JOB_RUN_TIME 10
96 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
97 #define LAUNCHD_SIGKILL_TIMER 5
98
99
100 #define TAKE_SUBSET_NAME "TakeSubsetName"
101 #define TAKE_SUBSET_PID "TakeSubsetPID"
102 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
103
104 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
105
106 extern char **environ;
107
108 struct waiting_for_removal {
109 SLIST_ENTRY(waiting_for_removal) sle;
110 mach_port_t reply_port;
111 };
112
113 static bool waiting4removal_new(job_t j, mach_port_t rp);
114 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
115
116 struct mspolicy {
117 SLIST_ENTRY(mspolicy) sle;
118 unsigned int allow:1, per_pid:1;
119 const char name[0];
120 };
121
122 static bool mspolicy_new(job_t j, const char *name, bool allow, bool pid_local, bool skip_check);
123 static bool mspolicy_copy(job_t j_to, job_t j_from);
124 static void mspolicy_setup(launch_data_t obj, const char *key, void *context);
125 static bool mspolicy_check(job_t j, const char *name, bool pid_local);
126 static void mspolicy_delete(job_t j, struct mspolicy *msp);
127
128 struct machservice {
129 SLIST_ENTRY(machservice) sle;
130 SLIST_ENTRY(machservice) special_port_sle;
131 LIST_ENTRY(machservice) name_hash_sle;
132 LIST_ENTRY(machservice) port_hash_sle;
133 job_t job;
134 uint64_t bad_perf_cnt;
135 unsigned int gen_num;
136 mach_port_name_t port;
137 unsigned int isActive:1, reset:1, recv:1, hide:1, kUNCServer:1, per_user_hack:1, debug_on_close:1, per_pid:1, special_port_num:10;
138 const char name[0];
139 };
140
141 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
142
143 #define PORT_HASH_SIZE 32
144 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
145
146 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
147
148 static void machservice_setup(launch_data_t obj, const char *key, void *context);
149 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
150 static void machservice_resetport(job_t j, struct machservice *ms);
151 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
152 static void machservice_ignore(job_t j, struct machservice *ms);
153 static void machservice_watch(job_t j, struct machservice *ms);
154 static void machservice_delete(job_t j, struct machservice *, bool port_died);
155 static void machservice_request_notifications(struct machservice *);
156 static mach_port_t machservice_port(struct machservice *);
157 static job_t machservice_job(struct machservice *);
158 static bool machservice_hidden(struct machservice *);
159 static bool machservice_active(struct machservice *);
160 static const char *machservice_name(struct machservice *);
161 static bootstrap_status_t machservice_status(struct machservice *);
162
163 struct socketgroup {
164 SLIST_ENTRY(socketgroup) sle;
165 int *fds;
166 unsigned int junkfds:1, fd_cnt:31;
167 char name[0];
168 };
169
170 static bool socketgroup_new(job_t j, const char *name, int *fds, unsigned int fd_cnt, bool junkfds);
171 static void socketgroup_delete(job_t j, struct socketgroup *sg);
172 static void socketgroup_watch(job_t j, struct socketgroup *sg);
173 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
174 static void socketgroup_callback(job_t j);
175 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
176 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
177
178 struct calendarinterval {
179 LIST_ENTRY(calendarinterval) global_sle;
180 SLIST_ENTRY(calendarinterval) sle;
181 job_t job;
182 struct tm when;
183 time_t when_next;
184 };
185
186 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
187
188 static bool calendarinterval_new(job_t j, struct tm *w);
189 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
190 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
191 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
192 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
193 static void calendarinterval_callback(void);
194 static void calendarinterval_sanity_check(void);
195
196 struct envitem {
197 SLIST_ENTRY(envitem) sle;
198 char *value;
199 char key[0];
200 };
201
202 static bool envitem_new(job_t j, const char *k, const char *v, bool global);
203 static void envitem_delete(job_t j, struct envitem *ei, bool global);
204 static void envitem_setup(launch_data_t obj, const char *key, void *context);
205
206 struct limititem {
207 SLIST_ENTRY(limititem) sle;
208 struct rlimit lim;
209 unsigned int setsoft:1, sethard:1, which:30;
210 };
211
212 static bool limititem_update(job_t j, int w, rlim_t r);
213 static void limititem_delete(job_t j, struct limititem *li);
214 static void limititem_setup(launch_data_t obj, const char *key, void *context);
215 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
216
217 typedef enum {
218 NETWORK_UP = 1,
219 NETWORK_DOWN,
220 SUCCESSFUL_EXIT,
221 FAILED_EXIT,
222 PATH_EXISTS,
223 PATH_MISSING,
224 OTHER_JOB_ENABLED,
225 OTHER_JOB_DISABLED,
226 OTHER_JOB_ACTIVE,
227 OTHER_JOB_INACTIVE,
228 PATH_CHANGES,
229 DIR_NOT_EMPTY,
230 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
231 } semaphore_reason_t;
232
233 struct semaphoreitem {
234 SLIST_ENTRY(semaphoreitem) sle;
235 semaphore_reason_t why;
236 int fd;
237 char what[0];
238 };
239
240 struct semaphoreitem_dict_iter_context {
241 job_t j;
242 semaphore_reason_t why_true;
243 semaphore_reason_t why_false;
244 };
245
246 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
247 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
248 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
249 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
250 static void semaphoreitem_callback(job_t j, struct kevent *kev);
251 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
252 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
253 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
254
255 #define ACTIVE_JOB_HASH_SIZE 32
256 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
257 #define MACHSERVICE_HASH_SIZE 37
258
259 struct jobmgr_s {
260 kq_callback kqjobmgr_callback;
261 SLIST_ENTRY(jobmgr_s) sle;
262 SLIST_HEAD(, jobmgr_s) submgrs;
263 LIST_HEAD(, job_s) jobs;
264 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
265 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
266 mach_port_t jm_port;
267 mach_port_t req_port;
268 jobmgr_t parentmgr;
269 int reboot_flags;
270 unsigned int global_on_demand_cnt;
271 unsigned int hopefully_first_cnt;
272 unsigned int normal_active_cnt;
273 unsigned int sent_stop_to_normal_jobs:1, sent_stop_to_hopefully_last_jobs:1, shutting_down:1, session_initialized:1;
274 char name[0];
275 };
276
277 #define jobmgr_assumes(jm, e) \
278 (__builtin_expect(!(e), 0) ? jobmgr_log_bug(jm, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
279
280 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name);
281 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
282 static jobmgr_t jobmgr_parent(jobmgr_t jm);
283 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
284 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
285 static void jobmgr_log_stray_children(jobmgr_t jm);
286 static void jobmgr_remove(jobmgr_t jm);
287 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
288 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
289 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
290 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
291 static void job_export_all2(jobmgr_t jm, launch_data_t where);
292 static void jobmgr_callback(void *obj, struct kevent *kev);
293 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
294 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
295 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
296 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
297 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
298 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
299 static void jobmgr_log_bug(jobmgr_t jm, const char *rcs_rev, const char *path, unsigned int line, const char *test);
300
301 #define DO_RUSAGE_SUMMATION 0
302
303 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
304
305 struct job_s {
306 kq_callback kqjob_callback;
307 LIST_ENTRY(job_s) sle;
308 LIST_ENTRY(job_s) pid_hash_sle;
309 LIST_ENTRY(job_s) label_hash_sle;
310 SLIST_HEAD(, socketgroup) sockets;
311 SLIST_HEAD(, calendarinterval) cal_intervals;
312 SLIST_HEAD(, envitem) global_env;
313 SLIST_HEAD(, envitem) env;
314 SLIST_HEAD(, limititem) limits;
315 SLIST_HEAD(, mspolicy) mspolicies;
316 SLIST_HEAD(, machservice) machservices;
317 SLIST_HEAD(, semaphoreitem) semaphores;
318 SLIST_HEAD(, waiting_for_removal) removal_watchers;
319 #if DO_RUSAGE_SUMMATION
320 struct rusage ru;
321 #endif
322 cpu_type_t *j_binpref;
323 size_t j_binpref_cnt;
324 mach_port_t j_port;
325 mach_port_t wait_reply_port;
326 uid_t mach_uid;
327 jobmgr_t mgr;
328 char **argv;
329 char *prog;
330 char *rootdir;
331 char *workingdir;
332 char *username;
333 char *groupname;
334 char *stdoutpath;
335 char *stderrpath;
336 struct machservice *lastlookup;
337 unsigned int lastlookup_gennum;
338 char *seatbelt_profile;
339 uint64_t seatbelt_flags;
340 void *quarantine_data;
341 size_t quarantine_data_sz;
342 pid_t p;
343 int argc;
344 int last_exit_status;
345 int forkfd;
346 int log_redirect_fd;
347 int nice;
348 unsigned int timeout;
349 unsigned int exit_timeout;
350 int stdout_err_fd;
351 uint64_t sent_sigterm_time;
352 uint64_t start_time;
353 uint32_t min_run_time;
354 uint32_t start_interval;
355 unsigned int checkedin:1, anonymous:1, debug:1, inetcompat:1, inetcompat_wait:1,
356 ondemand:1, session_create:1, low_pri_io:1, no_init_groups:1, priv_port_has_senders:1,
357 importing_global_env:1, importing_hard_limits:1, setmask:1, legacy_mach_job:1, start_pending:1;
358 mode_t mask;
359 unsigned int globargv:1, wait4debugger:1, unload_at_exit:1, stall_before_exec:1, only_once:1,
360 currently_ignored:1, forced_peers_to_demand_mode:1, setnice:1, hopefully_exits_last:1, removal_pending:1,
361 wait4pipe_eof:1, sent_sigkill:1, debug_before_kill:1, weird_bootstrap:1, start_on_mount:1,
362 per_user:1, hopefully_exits_first:1, deny_unknown_mslookups:1, unload_at_mig_return:1, abandon_pg:1,
363 poll_for_vfs_changes:1;
364 const char label[0];
365 };
366
367 #define LABEL_HASH_SIZE 53
368
369 static LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
370 static size_t hash_label(const char *label) __attribute__((pure));
371 static size_t hash_ms(const char *msstr) __attribute__((pure));
372
373
374 #define job_assumes(j, e) \
375 (__builtin_expect(!(e), 0) ? job_log_bug(j, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
376
377 static void job_import_keys(launch_data_t obj, const char *key, void *context);
378 static void job_import_bool(job_t j, const char *key, bool value);
379 static void job_import_string(job_t j, const char *key, const char *value);
380 static void job_import_integer(job_t j, const char *key, long long value);
381 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
382 static void job_import_array(job_t j, const char *key, launch_data_t value);
383 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
384 static bool job_set_global_on_demand(job_t j, bool val);
385 static const char *job_active(job_t j);
386 static void job_watch(job_t j);
387 static void job_ignore(job_t j);
388 static void job_reap(job_t j);
389 static bool job_useless(job_t j);
390 static bool job_keepalive(job_t j);
391 static void job_start(job_t j);
392 static void job_start_child(job_t j) __attribute__((noreturn));
393 static void job_setup_attributes(job_t j);
394 static bool job_setup_machport(job_t j);
395 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
396 static void job_postfork_become_user(job_t j);
397 static void job_find_and_blame_pids_with_weird_uids(job_t j);
398 static void job_force_sampletool(job_t j);
399 static void job_setup_exception_port(job_t j, task_t target_task);
400 static void job_reparent_hack(job_t j, const char *where);
401 static void job_callback(void *obj, struct kevent *kev);
402 static void job_callback_proc(job_t j, int flags, int fflags);
403 static void job_callback_timer(job_t j, void *ident);
404 static void job_callback_read(job_t j, int ident);
405 static void job_log_stray_pg(job_t j);
406 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid);
407 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv);
408 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond);
409 static const char *job_prog(job_t j);
410 static jobmgr_t job_get_bs(job_t j);
411 static void job_kill(job_t j);
412 static void job_uncork_fork(job_t j);
413 static void job_log_stdouterr(job_t j);
414 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
415 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
416 static void job_log_bug(job_t j, const char *rcs_rev, const char *path, unsigned int line, const char *test);
417 static void job_log_stdouterr2(job_t j, const char *msg, ...);
418 static void job_set_exeception_port(job_t j, mach_port_t port);
419 static kern_return_t job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus);
420
421
422
423 static const struct {
424 const char *key;
425 int val;
426 } launchd_keys2limits[] = {
427 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
428 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
429 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
430 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
431 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
432 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
433 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
434 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
435 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
436 };
437
438 static time_t cronemu(int mon, int mday, int hour, int min);
439 static time_t cronemu_wday(int wday, int hour, int min);
440 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
441 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
442 static bool cronemu_hour(struct tm *wtm, int hour, int min);
443 static bool cronemu_min(struct tm *wtm, int min);
444
445 /* miscellaneous file local functions */
446 static void ensure_root_bkgd_setup(void);
447 static int dir_has_files(job_t j, const char *path);
448 static char **mach_cmd2argv(const char *string);
449 static size_t our_strhash(const char *s) __attribute__((pure));
450 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
451 static void do_first_per_user_launchd_hack(void);
452 static void do_file_init(void) __attribute__((constructor));
453
454 /* file local globals */
455 static size_t total_children;
456 static size_t total_anon_children;
457 static mach_port_t the_exception_server;
458 static bool did_first_per_user_launchd_BootCache_hack;
459 #define JOB_BOOTCACHE_HACK_CHECK(j) (j->per_user && !did_first_per_user_launchd_BootCache_hack && (j->mach_uid >= 500) && (j->mach_uid != (uid_t)-2))
460 static jobmgr_t background_jobmgr;
461 static job_t workaround_5477111;
462 static mach_timebase_info_data_t tbi;
463
464 /* process wide globals */
465 mach_port_t inherited_bootstrap_port;
466 jobmgr_t root_jobmgr;
467
468
469 void
470 job_ignore(job_t j)
471 {
472 struct semaphoreitem *si;
473 struct socketgroup *sg;
474 struct machservice *ms;
475
476 if (j->currently_ignored) {
477 return;
478 }
479
480 job_log(j, LOG_DEBUG, "Ignoring...");
481
482 j->currently_ignored = true;
483
484 if (j->poll_for_vfs_changes) {
485 j->poll_for_vfs_changes = false;
486 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
487 }
488
489 SLIST_FOREACH(sg, &j->sockets, sle) {
490 socketgroup_ignore(j, sg);
491 }
492
493 SLIST_FOREACH(ms, &j->machservices, sle) {
494 machservice_ignore(j, ms);
495 }
496
497 SLIST_FOREACH(si, &j->semaphores, sle) {
498 semaphoreitem_ignore(j, si);
499 }
500 }
501
502 void
503 job_watch(job_t j)
504 {
505 struct semaphoreitem *si;
506 struct socketgroup *sg;
507 struct machservice *ms;
508
509 if (!j->currently_ignored) {
510 return;
511 }
512
513 job_log(j, LOG_DEBUG, "Watching...");
514
515 j->currently_ignored = false;
516
517 SLIST_FOREACH(sg, &j->sockets, sle) {
518 socketgroup_watch(j, sg);
519 }
520
521 SLIST_FOREACH(ms, &j->machservices, sle) {
522 machservice_watch(j, ms);
523 }
524
525 SLIST_FOREACH(si, &j->semaphores, sle) {
526 semaphoreitem_watch(j, si);
527 }
528 }
529
530 void
531 job_stop(job_t j)
532 {
533 if (!j->p || j->anonymous) {
534 return;
535 }
536
537 job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
538 j->sent_sigterm_time = mach_absolute_time();
539
540 if (j->exit_timeout) {
541 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
542 EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j) != -1);
543 }
544
545 job_log(j, LOG_DEBUG, "Sent SIGTERM signal");
546 }
547
548 launch_data_t
549 job_export(job_t j)
550 {
551 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
552
553 if (r == NULL) {
554 return NULL;
555 }
556
557 if ((tmp = launch_data_new_string(j->label))) {
558 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
559 }
560 if ((tmp = launch_data_new_string(j->mgr->name))) {
561 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
562 }
563 if ((tmp = launch_data_new_bool(j->ondemand))) {
564 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
565 }
566 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
567 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
568 }
569 if (j->p && (tmp = launch_data_new_integer(j->p))) {
570 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
571 }
572 if ((tmp = launch_data_new_integer(j->timeout))) {
573 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
574 }
575 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
576 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
577 }
578 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
579 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
580 }
581 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
582 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
583 }
584 if (j->argv && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
585 int i;
586
587 for (i = 0; i < j->argc; i++) {
588 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
589 launch_data_array_set_index(tmp, tmp2, i);
590 }
591 }
592
593 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
594 }
595
596 if (j->session_create && (tmp = launch_data_new_bool(true))) {
597 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
598 }
599
600 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
601 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
602 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
603 }
604 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
605 }
606
607 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
608 struct socketgroup *sg;
609 int i;
610
611 SLIST_FOREACH(sg, &j->sockets, sle) {
612 if (sg->junkfds) {
613 continue;
614 }
615 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
616 for (i = 0; i < sg->fd_cnt; i++) {
617 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
618 launch_data_array_set_index(tmp2, tmp3, i);
619 }
620 }
621 launch_data_dict_insert(tmp, tmp2, sg->name);
622 }
623 }
624
625 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
626 }
627
628 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
629 struct machservice *ms;
630
631 tmp3 = NULL;
632
633 SLIST_FOREACH(ms, &j->machservices, sle) {
634 if (ms->per_pid) {
635 if (tmp3 == NULL) {
636 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
637 }
638 if (tmp3) {
639 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
640 launch_data_dict_insert(tmp3, tmp2, ms->name);
641 }
642 } else {
643 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
644 launch_data_dict_insert(tmp, tmp2, ms->name);
645 }
646 }
647
648 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
649
650 if (tmp3) {
651 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
652 }
653 }
654
655 return r;
656 }
657
658 static void
659 jobmgr_log_active_jobs(jobmgr_t jm)
660 {
661 const char *why_active;
662 jobmgr_t jmi;
663 job_t ji;
664
665 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
666 jobmgr_log_active_jobs(jmi);
667 }
668
669 LIST_FOREACH(ji, &jm->jobs, sle) {
670 why_active = job_active(ji);
671
672 job_log(ji, LOG_DEBUG, "%s", why_active ? why_active : "Inactive");
673 }
674
675 }
676
677 static void
678 still_alive_with_check(void)
679 {
680 jobmgr_log(root_jobmgr, LOG_NOTICE, "Still alive with %lu/%lu children", total_children, total_anon_children);
681
682 jobmgr_log_active_jobs(root_jobmgr);
683
684 runtime_closelog(); /* hack to flush logs */
685 }
686
687 jobmgr_t
688 jobmgr_shutdown(jobmgr_t jm)
689 {
690 jobmgr_t jmi, jmn;
691 job_t ji;
692
693 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
694
695 jm->shutting_down = true;
696
697 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
698 jobmgr_shutdown(jmi);
699 }
700
701 if (jm->hopefully_first_cnt) {
702 LIST_FOREACH(ji, &jm->jobs, sle) {
703 if (ji->p && ji->hopefully_exits_first) {
704 job_stop(ji);
705 }
706 }
707 }
708
709 if (debug_shutdown_hangs && jm->parentmgr == NULL && getpid() == 1) {
710 runtime_set_timeout(still_alive_with_check, 5);
711 }
712
713 return jobmgr_do_garbage_collection(jm);
714 }
715
716 void
717 jobmgr_remove(jobmgr_t jm)
718 {
719 jobmgr_t jmi;
720 job_t ji;
721
722 jobmgr_log(jm, LOG_DEBUG, "Removed job manager");
723
724 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
725 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
726 jobmgr_remove(jmi);
727 }
728 }
729
730 while ((ji = LIST_FIRST(&jm->jobs))) {
731 /* We should only have anonymous jobs left */
732 job_assumes(ji, ji->anonymous);
733 job_remove(ji);
734 }
735
736 if (jm->req_port) {
737 jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
738 }
739
740 if (jm->jm_port) {
741 jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
742 }
743
744 if (jm == background_jobmgr) {
745 background_jobmgr = NULL;
746 }
747
748 if (jm->parentmgr) {
749 runtime_del_ref();
750 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
751 } else if (getpid() == 1) {
752 jobmgr_log(jm, LOG_DEBUG, "About to call: reboot(%s)", reboot_flags_to_C_names(jm->reboot_flags));
753 runtime_closelog();
754 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
755 runtime_closelog();
756 } else {
757 runtime_closelog();
758 jobmgr_log(jm, LOG_DEBUG, "About to exit");
759 exit(EXIT_SUCCESS);
760 }
761
762 free(jm);
763 }
764
765 void
766 job_remove(job_t j)
767 {
768 struct waiting_for_removal *w4r;
769 struct calendarinterval *ci;
770 struct semaphoreitem *si;
771 struct socketgroup *sg;
772 struct machservice *ms;
773 struct limititem *li;
774 struct mspolicy *msp;
775 struct envitem *ei;
776
777 if (j->p && j->anonymous) {
778 job_reap(j);
779 } else if (j->p) {
780 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
781
782 if (!j->removal_pending) {
783 j->removal_pending = true;
784 job_stop(j);
785 }
786
787 return;
788 }
789
790 ipc_close_all_with_job(j);
791
792 if (j->forced_peers_to_demand_mode) {
793 job_set_global_on_demand(j, false);
794 }
795
796 if (!job_assumes(j, j->forkfd == 0)) {
797 job_assumes(j, runtime_close(j->forkfd) != -1);
798 }
799
800 if (!job_assumes(j, j->log_redirect_fd == 0)) {
801 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
802 }
803
804 if (j->j_port) {
805 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
806 }
807
808 if (!job_assumes(j, j->wait_reply_port == MACH_PORT_NULL)) {
809 job_assumes(j, launchd_mport_deallocate(j->wait_reply_port) == KERN_SUCCESS);
810 }
811
812 while ((msp = SLIST_FIRST(&j->mspolicies))) {
813 mspolicy_delete(j, msp);
814 }
815 while ((sg = SLIST_FIRST(&j->sockets))) {
816 socketgroup_delete(j, sg);
817 }
818 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
819 calendarinterval_delete(j, ci);
820 }
821 while ((ei = SLIST_FIRST(&j->env))) {
822 envitem_delete(j, ei, false);
823 }
824 while ((ei = SLIST_FIRST(&j->global_env))) {
825 envitem_delete(j, ei, true);
826 }
827 while ((li = SLIST_FIRST(&j->limits))) {
828 limititem_delete(j, li);
829 }
830 while ((ms = SLIST_FIRST(&j->machservices))) {
831 machservice_delete(j, ms, false);
832 }
833 while ((si = SLIST_FIRST(&j->semaphores))) {
834 semaphoreitem_delete(j, si);
835 }
836 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
837 waiting4removal_delete(j, w4r);
838 }
839
840 if (j->prog) {
841 free(j->prog);
842 }
843 if (j->argv) {
844 free(j->argv);
845 }
846 if (j->rootdir) {
847 free(j->rootdir);
848 }
849 if (j->workingdir) {
850 free(j->workingdir);
851 }
852 if (j->username) {
853 free(j->username);
854 }
855 if (j->groupname) {
856 free(j->groupname);
857 }
858 if (j->stdoutpath) {
859 free(j->stdoutpath);
860 }
861 if (j->stderrpath) {
862 free(j->stderrpath);
863 }
864 if (j->seatbelt_profile) {
865 free(j->seatbelt_profile);
866 }
867 if (j->quarantine_data) {
868 free(j->quarantine_data);
869 }
870 if (j->j_binpref) {
871 free(j->j_binpref);
872 }
873 if (j->start_interval) {
874 runtime_del_ref();
875 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
876 }
877 if (j->poll_for_vfs_changes) {
878 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
879 }
880
881 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
882
883 LIST_REMOVE(j, sle);
884 LIST_REMOVE(j, label_hash_sle);
885
886 job_log(j, LOG_DEBUG, "Removed");
887
888 free(j);
889 }
890
891 void
892 socketgroup_setup(launch_data_t obj, const char *key, void *context)
893 {
894 launch_data_t tmp_oai;
895 job_t j = context;
896 unsigned int i, fd_cnt = 1;
897 int *fds;
898
899 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
900 fd_cnt = launch_data_array_get_count(obj);
901 }
902
903 fds = alloca(fd_cnt * sizeof(int));
904
905 for (i = 0; i < fd_cnt; i++) {
906 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
907 tmp_oai = launch_data_array_get_index(obj, i);
908 } else {
909 tmp_oai = obj;
910 }
911
912 fds[i] = launch_data_get_fd(tmp_oai);
913 }
914
915 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
916
917 ipc_revoke_fds(obj);
918 }
919
920 bool
921 job_set_global_on_demand(job_t j, bool val)
922 {
923 if (j->forced_peers_to_demand_mode && val) {
924 return false;
925 } else if (!j->forced_peers_to_demand_mode && !val) {
926 return false;
927 }
928
929 if ((j->forced_peers_to_demand_mode = val)) {
930 j->mgr->global_on_demand_cnt++;
931 } else {
932 j->mgr->global_on_demand_cnt--;
933 }
934
935 if (j->mgr->global_on_demand_cnt == 0) {
936 jobmgr_dispatch_all(j->mgr, false);
937 }
938
939 return true;
940 }
941
942 bool
943 job_setup_machport(job_t j)
944 {
945 mach_msg_size_t mxmsgsz;
946
947 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
948 goto out_bad;
949 }
950
951 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
952 mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
953 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
954 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
955 }
956
957 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
958 goto out_bad2;
959 }
960
961 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
962 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
963 goto out_bad;
964 }
965
966 return true;
967 out_bad2:
968 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
969 out_bad:
970 return false;
971 }
972
973 job_t
974 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
975 {
976 const char **argv = (const char **)mach_cmd2argv(cmd);
977 job_t jr = NULL;
978
979 if (!job_assumes(j, argv != NULL)) {
980 goto out_bad;
981 }
982
983 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
984
985 free(argv);
986
987 /* jobs can easily be denied creation during shutdown */
988 if (!jr) {
989 goto out_bad;
990 }
991
992 jr->mach_uid = uid;
993 jr->ondemand = ond;
994 jr->legacy_mach_job = true;
995 jr->abandon_pg = true;
996 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
997
998 if (!job_setup_machport(jr)) {
999 goto out_bad;
1000 }
1001
1002 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1003
1004 return jr;
1005
1006 out_bad:
1007 if (jr) {
1008 job_remove(jr);
1009 }
1010 return NULL;
1011 }
1012
1013 kern_return_t
1014 job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus)
1015 {
1016 if (j->p) {
1017 j->wait_reply_port = srp;
1018 return MIG_NO_REPLY;
1019 }
1020
1021 *waitstatus = j->last_exit_status;
1022
1023 return 0;
1024 }
1025
1026 job_t
1027 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1028 {
1029 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, anonpid };
1030 struct kinfo_proc kp;
1031 size_t len = sizeof(kp);
1032 const char *zombie = NULL;
1033 bool shutdown_state;
1034 job_t jp = NULL, jr = NULL;
1035
1036 if (!jobmgr_assumes(jm, anonpid != 0)) {
1037 return NULL;
1038 }
1039
1040 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1041 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1042 return NULL;
1043 }
1044
1045 if (!jobmgr_assumes(jm, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
1046 return NULL;
1047 }
1048
1049 if (len != sizeof(kp)) {
1050 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for nonexistent PID: %u", anonpid);
1051 return NULL;
1052 }
1053
1054 if (!jobmgr_assumes(jm, kp.kp_proc.p_comm[0] != '\0')) {
1055 return NULL;
1056 }
1057
1058 if (kp.kp_proc.p_stat == SZOMB) {
1059 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID: %u", anonpid);
1060 zombie = "zombie";
1061 }
1062
1063 switch (kp.kp_eproc.e_ppid) {
1064 case 0:
1065 /* the kernel */
1066 break;
1067 case 1:
1068 if (getpid() != 1) {
1069 /* we cannot possibly find a parent job_t that is useful in this function */
1070 break;
1071 }
1072 /* fall through */
1073 default:
1074 jp = jobmgr_find_by_pid(jm, kp.kp_eproc.e_ppid, true);
1075 jobmgr_assumes(jm, jp != NULL);
1076 break;
1077 }
1078
1079 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1080 if ((shutdown_state = jm->shutting_down)) {
1081 jm->shutting_down = false;
1082 }
1083
1084 if (jobmgr_assumes(jm, (jr = job_new(jm, AUTO_PICK_LEGACY_LABEL, zombie ? zombie : kp.kp_proc.p_comm, NULL)) != NULL)) {
1085 u_int proc_fflags = NOTE_EXEC|NOTE_EXIT /* |NOTE_REAP */;
1086
1087 total_anon_children++;
1088 jr->anonymous = true;
1089 jr->p = anonpid;
1090
1091 /* anonymous process reaping is messy */
1092 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1093
1094 if (kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1 && job_assumes(jr, errno == ESRCH)) {
1095 /* zombies are weird */
1096 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1097 jr->unload_at_mig_return = true;
1098 }
1099
1100 if (jp) {
1101 job_assumes(jr, mspolicy_copy(jr, jp));
1102 }
1103
1104 if (shutdown_state && jm->hopefully_first_cnt == 0) {
1105 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time");
1106 }
1107
1108 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, kp.kp_eproc.e_ppid, jp ? ": " : "", jp ? jp->label : "");
1109 }
1110
1111 if (shutdown_state) {
1112 jm->shutting_down = true;
1113 }
1114
1115 return jr;
1116 }
1117
1118 job_t
1119 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1120 {
1121 const char *const *argv_tmp = argv;
1122 char auto_label[1000];
1123 const char *bn = NULL;
1124 char *co;
1125 size_t minlabel_len;
1126 int i, cc = 0;
1127 job_t j;
1128
1129 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1130
1131 if (jm->shutting_down) {
1132 errno = EINVAL;
1133 return NULL;
1134 }
1135
1136 if (prog == NULL && argv == NULL) {
1137 errno = EINVAL;
1138 return NULL;
1139 }
1140
1141 if (label == AUTO_PICK_LEGACY_LABEL) {
1142 bn = prog ? prog : basename((char *)argv[0]); /* prog for auto labels is kp.kp_kproc.p_comm */
1143 snprintf(auto_label, sizeof(auto_label), "%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", bn);
1144 label = auto_label;
1145 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1146 minlabel_len = strlen(label) + MAXCOMLEN;
1147 } else {
1148 minlabel_len = strlen(label);
1149 }
1150
1151 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1152
1153 if (!jobmgr_assumes(jm, j != NULL)) {
1154 return NULL;
1155 }
1156
1157 if (label == auto_label) {
1158 snprintf((char *)j->label, strlen(label) + 1, "%p.%s", j, bn);
1159 } else {
1160 strcpy((char *)j->label, label);
1161 }
1162 j->kqjob_callback = job_callback;
1163 j->mgr = jm;
1164 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1165 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1166 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1167 j->currently_ignored = true;
1168 j->ondemand = true;
1169 j->checkedin = true;
1170
1171 if (prog) {
1172 j->prog = strdup(prog);
1173 if (!job_assumes(j, j->prog != NULL)) {
1174 goto out_bad;
1175 }
1176 }
1177
1178 if (argv) {
1179 while (*argv_tmp++)
1180 j->argc++;
1181
1182 for (i = 0; i < j->argc; i++) {
1183 cc += strlen(argv[i]) + 1;
1184 }
1185
1186 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1187
1188 if (!job_assumes(j, j->argv != NULL)) {
1189 goto out_bad;
1190 }
1191
1192 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1193
1194 for (i = 0; i < j->argc; i++) {
1195 j->argv[i] = co;
1196 strcpy(co, argv[i]);
1197 co += strlen(argv[i]) + 1;
1198 }
1199 j->argv[i] = NULL;
1200 }
1201
1202 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1203 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
1204
1205 job_log(j, LOG_DEBUG, "Conceived");
1206
1207 return j;
1208
1209 out_bad:
1210 if (j->prog) {
1211 free(j->prog);
1212 }
1213 free(j);
1214
1215 return NULL;
1216 }
1217
1218 job_t
1219 job_import(launch_data_t pload)
1220 {
1221 job_t j = jobmgr_import2(root_jobmgr, pload);
1222
1223 if (j == NULL) {
1224 return NULL;
1225 }
1226
1227 return job_dispatch(j, false);
1228 }
1229
1230 launch_data_t
1231 job_import_bulk(launch_data_t pload)
1232 {
1233 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
1234 job_t *ja;
1235 size_t i, c = launch_data_array_get_count(pload);
1236
1237 ja = alloca(c * sizeof(job_t ));
1238
1239 for (i = 0; i < c; i++) {
1240 if ((ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) {
1241 errno = 0;
1242 }
1243 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
1244 }
1245
1246 for (i = 0; i < c; i++) {
1247 if (ja[i] == NULL) {
1248 continue;
1249 }
1250 job_dispatch(ja[i], false);
1251 }
1252
1253 return resp;
1254 }
1255
1256 void
1257 job_import_bool(job_t j, const char *key, bool value)
1258 {
1259 bool found_key = false;
1260
1261 switch (key[0]) {
1262 case 'a':
1263 case 'A':
1264 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
1265 j->abandon_pg = value;
1266 found_key = true;
1267 }
1268 break;
1269 case 'k':
1270 case 'K':
1271 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1272 j->ondemand = !value;
1273 found_key = true;
1274 }
1275 break;
1276 case 'o':
1277 case 'O':
1278 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
1279 j->ondemand = value;
1280 found_key = true;
1281 }
1282 break;
1283 case 'd':
1284 case 'D':
1285 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
1286 j->debug = value;
1287 found_key = true;
1288 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
1289 job_assumes(j, !value);
1290 found_key = true;
1291 }
1292 break;
1293 case 'h':
1294 case 'H':
1295 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
1296 j->hopefully_exits_last = value;
1297 found_key = true;
1298 } else if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST) == 0) {
1299 j->hopefully_exits_first = value;
1300 found_key = true;
1301 }
1302 break;
1303 case 's':
1304 case 'S':
1305 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
1306 j->session_create = value;
1307 found_key = true;
1308 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
1309 j->start_on_mount = value;
1310 found_key = true;
1311 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
1312 /* this only does something on Mac OS X 10.4 "Tiger" */
1313 found_key = true;
1314 }
1315 break;
1316 case 'l':
1317 case 'L':
1318 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
1319 j->low_pri_io = value;
1320 found_key = true;
1321 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
1322 j->only_once = value;
1323 found_key = true;
1324 }
1325 break;
1326 case 'i':
1327 case 'I':
1328 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
1329 if (getuid() != 0) {
1330 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1331 return;
1332 }
1333 j->no_init_groups = !value;
1334 found_key = true;
1335 }
1336 break;
1337 case 'r':
1338 case 'R':
1339 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
1340 if (value) {
1341 /* We don't want value == false to change j->start_pending */
1342 j->start_pending = true;
1343 }
1344 found_key = true;
1345 }
1346 break;
1347 case 'e':
1348 case 'E':
1349 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
1350 j->globargv = value;
1351 found_key = true;
1352 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
1353 j->debug_before_kill = value;
1354 found_key = true;
1355 }
1356 break;
1357 case 'w':
1358 case 'W':
1359 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
1360 j->wait4debugger = value;
1361 found_key = true;
1362 }
1363 break;
1364 default:
1365 break;
1366 }
1367
1368 if (!found_key) {
1369 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
1370 }
1371 }
1372
1373 void
1374 job_import_string(job_t j, const char *key, const char *value)
1375 {
1376 char **where2put = NULL;
1377
1378 switch (key[0]) {
1379 case 'p':
1380 case 'P':
1381 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
1382 return;
1383 }
1384 break;
1385 case 'l':
1386 case 'L':
1387 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
1388 return;
1389 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1390 return;
1391 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1392 return;
1393 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1394 job_reparent_hack(j, value);
1395 return;
1396 }
1397 break;
1398 case 'r':
1399 case 'R':
1400 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
1401 if (getuid() != 0) {
1402 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1403 return;
1404 }
1405 where2put = &j->rootdir;
1406 }
1407 break;
1408 case 'w':
1409 case 'W':
1410 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
1411 where2put = &j->workingdir;
1412 }
1413 break;
1414 case 'u':
1415 case 'U':
1416 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
1417 if (getuid() != 0) {
1418 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1419 return;
1420 } else if (strcmp(value, "root") == 0) {
1421 return;
1422 }
1423 where2put = &j->username;
1424 }
1425 break;
1426 case 'g':
1427 case 'G':
1428 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
1429 if (getuid() != 0) {
1430 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1431 return;
1432 } else if (strcmp(value, "wheel") == 0) {
1433 return;
1434 }
1435 where2put = &j->groupname;
1436 }
1437 break;
1438 case 's':
1439 case 'S':
1440 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
1441 where2put = &j->stdoutpath;
1442 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
1443 where2put = &j->stderrpath;
1444 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
1445 where2put = &j->seatbelt_profile;
1446 }
1447 break;
1448 default:
1449 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
1450 break;
1451 }
1452
1453 if (where2put) {
1454 job_assumes(j, (*where2put = strdup(value)) != NULL);
1455 } else {
1456 job_log(j, LOG_WARNING, "Unknown key: %s", key);
1457 }
1458 }
1459
1460 void
1461 job_import_integer(job_t j, const char *key, long long value)
1462 {
1463 switch (key[0]) {
1464 case 'e':
1465 case 'E':
1466 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
1467 if (value < 0) {
1468 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1469 } else if (value > UINT32_MAX) {
1470 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1471 } else {
1472 j->exit_timeout = value;
1473 }
1474 }
1475 break;
1476 case 'n':
1477 case 'N':
1478 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
1479 j->nice = value;
1480 j->setnice = true;
1481 }
1482 break;
1483 case 't':
1484 case 'T':
1485 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
1486 if (value < 0) {
1487 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1488 } else if (value > UINT32_MAX) {
1489 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1490 } else {
1491 j->timeout = value;
1492 }
1493 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
1494 if (value < 0) {
1495 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1496 } else if (value > UINT32_MAX) {
1497 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1498 } else {
1499 j->min_run_time = value;
1500 }
1501 }
1502 break;
1503 case 'u':
1504 case 'U':
1505 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
1506 j->mask = value;
1507 j->setmask = true;
1508 }
1509 break;
1510 case 's':
1511 case 'S':
1512 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
1513 if (value <= 0) {
1514 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1515 } else if (value > UINT32_MAX) {
1516 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1517 } else {
1518 runtime_add_ref();
1519 j->start_interval = value;
1520
1521 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, value, j) != -1);
1522 }
1523 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1524 j->seatbelt_flags = value;
1525 }
1526
1527 break;
1528 default:
1529 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
1530 break;
1531 }
1532 }
1533
1534 void
1535 job_import_opaque(job_t j, const char *key, launch_data_t value)
1536 {
1537 switch (key[0]) {
1538 case 'q':
1539 case 'Q':
1540 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
1541 size_t tmpsz = launch_data_get_opaque_size(value);
1542
1543 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
1544 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
1545 j->quarantine_data_sz = tmpsz;
1546 }
1547 }
1548 break;
1549 default:
1550 break;
1551 }
1552 }
1553
1554 void
1555 job_import_dictionary(job_t j, const char *key, launch_data_t value)
1556 {
1557 launch_data_t tmp;
1558
1559 switch (key[0]) {
1560 case 'k':
1561 case 'K':
1562 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1563 launch_data_dict_iterate(value, semaphoreitem_setup, j);
1564 }
1565 break;
1566 case 'i':
1567 case 'I':
1568 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
1569 j->inetcompat = true;
1570 j->abandon_pg = true;
1571 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
1572 j->inetcompat_wait = launch_data_get_bool(tmp);
1573 }
1574 }
1575 break;
1576 case 'e':
1577 case 'E':
1578 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
1579 launch_data_dict_iterate(value, envitem_setup, j);
1580 }
1581 break;
1582 case 'u':
1583 case 'U':
1584 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
1585 j->importing_global_env = true;
1586 launch_data_dict_iterate(value, envitem_setup, j);
1587 j->importing_global_env = false;
1588 }
1589 break;
1590 case 's':
1591 case 'S':
1592 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
1593 launch_data_dict_iterate(value, socketgroup_setup, j);
1594 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
1595 calendarinterval_new_from_obj(j, value);
1596 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
1597 launch_data_dict_iterate(value, limititem_setup, j);
1598 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1599 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
1600 }
1601 break;
1602 case 'h':
1603 case 'H':
1604 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
1605 j->importing_hard_limits = true;
1606 launch_data_dict_iterate(value, limititem_setup, j);
1607 j->importing_hard_limits = false;
1608 }
1609 break;
1610 case 'm':
1611 case 'M':
1612 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
1613 launch_data_dict_iterate(value, machservice_setup, j);
1614 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICELOOKUPPOLICIES) == 0) {
1615 launch_data_dict_iterate(value, mspolicy_setup, j);
1616 }
1617 break;
1618 default:
1619 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
1620 break;
1621 }
1622 }
1623
1624 void
1625 job_import_array(job_t j, const char *key, launch_data_t value)
1626 {
1627 size_t i, value_cnt = launch_data_array_get_count(value);
1628 const char *str;
1629
1630 switch (key[0]) {
1631 case 'p':
1632 case 'P':
1633 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
1634 return;
1635 }
1636 break;
1637 case 'l':
1638 case 'L':
1639 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1640 return;
1641 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1642 return;
1643 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1644 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
1645 return;
1646 }
1647 break;
1648 case 'q':
1649 case 'Q':
1650 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
1651 for (i = 0; i < value_cnt; i++) {
1652 str = launch_data_get_string(launch_data_array_get_index(value, i));
1653 if (job_assumes(j, str != NULL)) {
1654 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
1655 }
1656 }
1657
1658 }
1659 break;
1660 case 'w':
1661 case 'W':
1662 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
1663 for (i = 0; i < value_cnt; i++) {
1664 str = launch_data_get_string(launch_data_array_get_index(value, i));
1665 if (job_assumes(j, str != NULL)) {
1666 semaphoreitem_new(j, PATH_CHANGES, str);
1667 }
1668 }
1669 }
1670 break;
1671 case 'b':
1672 case 'B':
1673 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
1674 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
1675 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
1676 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
1677 j->j_binpref_cnt = value_cnt;
1678 for (i = 0; i < value_cnt; i++) {
1679 j->j_binpref[i] = launch_data_get_integer(launch_data_array_get_index(value, i));
1680 }
1681 }
1682 }
1683 break;
1684 case 's':
1685 case 'S':
1686 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
1687 for (i = 0; i < value_cnt; i++) {
1688 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
1689 }
1690 }
1691 break;
1692 default:
1693 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
1694 break;
1695 }
1696 }
1697
1698 void
1699 job_import_keys(launch_data_t obj, const char *key, void *context)
1700 {
1701 job_t j = context;
1702 launch_data_type_t kind;
1703
1704 if (obj == NULL) {
1705 return;
1706 }
1707
1708 kind = launch_data_get_type(obj);
1709
1710 switch (kind) {
1711 case LAUNCH_DATA_BOOL:
1712 job_import_bool(j, key, launch_data_get_bool(obj));
1713 break;
1714 case LAUNCH_DATA_STRING:
1715 job_import_string(j, key, launch_data_get_string(obj));
1716 break;
1717 case LAUNCH_DATA_INTEGER:
1718 job_import_integer(j, key, launch_data_get_integer(obj));
1719 break;
1720 case LAUNCH_DATA_DICTIONARY:
1721 job_import_dictionary(j, key, obj);
1722 break;
1723 case LAUNCH_DATA_ARRAY:
1724 job_import_array(j, key, obj);
1725 break;
1726 case LAUNCH_DATA_OPAQUE:
1727 job_import_opaque(j, key, obj);
1728 break;
1729 default:
1730 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
1731 break;
1732 }
1733 }
1734
1735 job_t
1736 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
1737 {
1738 launch_data_t tmp, ldpa;
1739 const char *label = NULL, *prog = NULL;
1740 const char **argv = NULL;
1741 job_t j;
1742
1743 if (pload == NULL) {
1744 errno = EINVAL;
1745 return NULL;
1746 }
1747
1748 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
1749 errno = EINVAL;
1750 return NULL;
1751 }
1752
1753 if (!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL))) {
1754 errno = EINVAL;
1755 return NULL;
1756 }
1757
1758 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
1759 errno = EINVAL;
1760 return NULL;
1761 }
1762
1763 if (!(label = launch_data_get_string(tmp))) {
1764 errno = EINVAL;
1765 return NULL;
1766 }
1767
1768 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
1769 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
1770 prog = launch_data_get_string(tmp);
1771 }
1772
1773 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
1774 size_t i, c;
1775
1776 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
1777 errno = EINVAL;
1778 return NULL;
1779 }
1780
1781 c = launch_data_array_get_count(ldpa);
1782
1783 argv = alloca((c + 1) * sizeof(char *));
1784
1785 for (i = 0; i < c; i++) {
1786 tmp = launch_data_array_get_index(ldpa, i);
1787
1788 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
1789 errno = EINVAL;
1790 return NULL;
1791 }
1792
1793 argv[i] = launch_data_get_string(tmp);
1794 }
1795
1796 argv[i] = NULL;
1797 }
1798
1799 if ((j = job_find(label)) != NULL) {
1800 errno = EEXIST;
1801 return NULL;
1802 } else if (label[0] == '\0' || (strncasecmp(label, "", strlen("com.apple.launchd")) == 0) ||
1803 (strtol(label, NULL, 10) != 0)) {
1804 jobmgr_log(jm, LOG_ERR, "Somebody attempted to use a reserved prefix for a label: %s", label);
1805 /* the empty string, com.apple.launchd and number prefixes for labels are reserved */
1806 errno = EINVAL;
1807 return NULL;
1808 }
1809
1810 if ((j = job_new(jm, label, prog, argv))) {
1811 launch_data_dict_iterate(pload, job_import_keys, j);
1812 }
1813
1814 return j;
1815 }
1816
1817 job_t
1818 job_find(const char *label)
1819 {
1820 job_t ji;
1821
1822 LIST_FOREACH(ji, &label_hash[hash_label(label)], label_hash_sle) {
1823 if (ji->removal_pending) {
1824 continue; /* 5351245 */
1825 } else if (ji->mgr->shutting_down) {
1826 continue; /* 5488633 */
1827 }
1828
1829 if (strcmp(ji->label, label) == 0) {
1830 return ji;
1831 }
1832 }
1833
1834 errno = ESRCH;
1835 return NULL;
1836 }
1837
1838 job_t
1839 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
1840 {
1841 job_t ji = NULL;
1842
1843 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
1844 if (ji->p == p) {
1845 break;
1846 }
1847 }
1848
1849 if (ji) {
1850 return ji;
1851 } else if (create_anon) {
1852 return job_new_anonymous(jm, p);
1853 } else {
1854 return NULL;
1855 }
1856 }
1857
1858 job_t
1859 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
1860 {
1861 jobmgr_t jmi;
1862 job_t ji;
1863
1864 if (jm->jm_port == mport) {
1865 jobmgr_assumes(jm, (ji = jobmgr_find_by_pid(jm, upid, true)) != NULL);
1866 return ji;
1867 }
1868
1869 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1870 job_t jr;
1871
1872 if ((jr = job_mig_intran2(jmi, mport, upid))) {
1873 return jr;
1874 }
1875 }
1876
1877 LIST_FOREACH(ji, &jm->jobs, sle) {
1878 if (ji->j_port == mport) {
1879 return ji;
1880 }
1881 }
1882
1883 return NULL;
1884 }
1885
1886 job_t
1887 job_mig_intran(mach_port_t p)
1888 {
1889 struct ldcred ldc;
1890 job_t jr;
1891
1892 runtime_get_caller_creds(&ldc);
1893
1894 jr = job_mig_intran2(root_jobmgr, p, ldc.pid);
1895
1896 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
1897 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 };
1898 struct kinfo_proc kp;
1899 size_t len = sizeof(kp);
1900
1901 mib[3] = ldc.pid;
1902
1903 if (jobmgr_assumes(root_jobmgr, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && jobmgr_assumes(root_jobmgr, len == sizeof(kp))) {
1904 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc.pid, ldc.uid, ldc.euid, p, kp.kp_proc.p_comm);
1905 }
1906 }
1907
1908 return jr;
1909 }
1910
1911 job_t
1912 job_find_by_service_port(mach_port_t p)
1913 {
1914 struct machservice *ms;
1915
1916 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
1917 if (ms->recv && (ms->port == p)) {
1918 return ms->job;
1919 }
1920 }
1921
1922 return NULL;
1923 }
1924
1925 void
1926 job_mig_destructor(job_t j)
1927 {
1928 /*
1929 * 5477111
1930 *
1931 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
1932 */
1933
1934 if (j && j != workaround_5477111 && j->unload_at_mig_return) {
1935 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
1936 job_remove(j);
1937 }
1938
1939 workaround_5477111 = NULL;
1940
1941 calendarinterval_sanity_check();
1942 }
1943
1944 void
1945 job_export_all2(jobmgr_t jm, launch_data_t where)
1946 {
1947 jobmgr_t jmi;
1948 job_t ji;
1949
1950 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1951 job_export_all2(jmi, where);
1952 }
1953
1954 LIST_FOREACH(ji, &jm->jobs, sle) {
1955 launch_data_t tmp;
1956
1957 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
1958 launch_data_dict_insert(where, tmp, ji->label);
1959 }
1960 }
1961 }
1962
1963 launch_data_t
1964 job_export_all(void)
1965 {
1966 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1967
1968 if (launchd_assumes(resp != NULL)) {
1969 job_export_all2(root_jobmgr, resp);
1970 }
1971
1972 return resp;
1973 }
1974
1975 void
1976 job_log_stray_pg(job_t j)
1977 {
1978 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PGRP, j->p };
1979 size_t i, kp_cnt, len = 10*1024*1024;
1980 struct kinfo_proc *kp;
1981
1982 if (!job_assumes(j, (kp = malloc(len)) != NULL)) {
1983 return;
1984 }
1985 if (!job_assumes(j, sysctl(mib, 4, kp, &len, NULL, 0) != -1)) {
1986 goto out;
1987 }
1988
1989 kp_cnt = len / sizeof(struct kinfo_proc);
1990
1991 for (i = 0; i < kp_cnt; i++) {
1992 pid_t p_i = kp[i].kp_proc.p_pid;
1993 pid_t pp_i = kp[i].kp_eproc.e_ppid;
1994 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
1995 const char *n = kp[i].kp_proc.p_comm;
1996
1997 if (p_i == j->p) {
1998 continue;
1999 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
2000 continue;
2001 }
2002
2003 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z, p_i, pp_i, n);
2004 }
2005
2006 out:
2007 free(kp);
2008 }
2009
2010 void
2011 job_reap(job_t j)
2012 {
2013 struct rusage ru;
2014 int status;
2015
2016 job_log(j, LOG_DEBUG, "Reaping");
2017
2018 if (j->weird_bootstrap) {
2019 mach_msg_size_t mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
2020
2021 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
2022 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
2023 }
2024
2025 job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
2026 j->weird_bootstrap = false;
2027 }
2028
2029 if (j->log_redirect_fd && !j->wait4pipe_eof) {
2030 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2031 j->log_redirect_fd = 0;
2032 }
2033
2034 if (j->forkfd) {
2035 job_assumes(j, runtime_close(j->forkfd) != -1);
2036 j->forkfd = 0;
2037 }
2038
2039 if (j->anonymous) {
2040 status = 0;
2041 memset(&ru, 0, sizeof(ru));
2042 } else {
2043 /*
2044 * The job is dead. While the PID/PGID is still known to be
2045 * valid, try to kill abandoned descendant processes.
2046 */
2047 job_log_stray_pg(j);
2048 if (!j->abandon_pg) {
2049 job_assumes(j, runtime_killpg(j->p, SIGTERM) != -1 || errno == ESRCH);
2050 }
2051
2052 /*
2053 * 5020256
2054 *
2055 * The current implementation of ptrace() causes the traced process to
2056 * be abducted away from the true parent and adopted by the tracer.
2057 *
2058 * Once the tracing process relinquishes control, the kernel then
2059 * restores the true parent/child relationship.
2060 *
2061 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2062 * data structures changes, and they return an error if reality hasn't
2063 * been restored by the time they are called.
2064 */
2065 if (!job_assumes(j, wait4(j->p, &status, 0, &ru) != -1)) {
2066 job_log(j, LOG_NOTICE, "Working around 5020256. Assuming the job crashed.");
2067
2068 status = W_EXITCODE(0, SIGSEGV);
2069 memset(&ru, 0, sizeof(ru));
2070 }
2071 }
2072
2073 if (j->exit_timeout) {
2074 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
2075 }
2076
2077 if (j->anonymous) {
2078 total_anon_children--;
2079 } else {
2080 runtime_del_ref();
2081 total_children--;
2082 }
2083 LIST_REMOVE(j, pid_hash_sle);
2084
2085 if (j->wait_reply_port) {
2086 job_log(j, LOG_DEBUG, "MPM wait reply being sent");
2087 job_assumes(j, job_mig_wait_reply(j->wait_reply_port, 0, status) == 0);
2088 j->wait_reply_port = MACH_PORT_NULL;
2089 }
2090
2091 if (j->sent_sigterm_time) {
2092 uint64_t td_sec, td_usec, td = (mach_absolute_time() - j->sent_sigterm_time) * tbi.numer / tbi.denom;
2093
2094 td_sec = td / NSEC_PER_SEC;
2095 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
2096
2097 job_log(j, LOG_INFO, "Exited %lld.%06lld seconds after %s was sent",
2098 td_sec, td_usec, signal_to_C_name(j->sent_sigkill ? SIGKILL : SIGTERM));
2099 }
2100
2101 #if DO_RUSAGE_SUMMATION
2102 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
2103 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
2104 j->ru.ru_maxrss += ru.ru_maxrss;
2105 j->ru.ru_ixrss += ru.ru_ixrss;
2106 j->ru.ru_idrss += ru.ru_idrss;
2107 j->ru.ru_isrss += ru.ru_isrss;
2108 j->ru.ru_minflt += ru.ru_minflt;
2109 j->ru.ru_majflt += ru.ru_majflt;
2110 j->ru.ru_nswap += ru.ru_nswap;
2111 j->ru.ru_inblock += ru.ru_inblock;
2112 j->ru.ru_oublock += ru.ru_oublock;
2113 j->ru.ru_msgsnd += ru.ru_msgsnd;
2114 j->ru.ru_msgrcv += ru.ru_msgrcv;
2115 j->ru.ru_nsignals += ru.ru_nsignals;
2116 j->ru.ru_nvcsw += ru.ru_nvcsw;
2117 j->ru.ru_nivcsw += ru.ru_nivcsw;
2118 #endif
2119
2120 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
2121 job_log(j, LOG_WARNING, "Exited with exit code: %d", WEXITSTATUS(status));
2122 }
2123
2124 if (WIFSIGNALED(status)) {
2125 int s = WTERMSIG(status);
2126 if (SIGKILL == s || SIGTERM == s) {
2127 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
2128 } else {
2129 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
2130 }
2131 }
2132
2133 if (j->hopefully_exits_first) {
2134 j->mgr->hopefully_first_cnt--;
2135 } else if (!j->anonymous && !j->hopefully_exits_last) {
2136 j->mgr->normal_active_cnt--;
2137 }
2138 j->last_exit_status = status;
2139 j->sent_sigkill = false;
2140 j->p = 0;
2141
2142 /*
2143 * We need to someday evaluate other jobs and find those who wish to track the
2144 * active/inactive state of this job. The current job_dispatch() logic makes
2145 * this messy, given that jobs can be deleted at dispatch.
2146 */
2147 }
2148
2149 void
2150 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
2151 {
2152 jobmgr_t jmi, jmn;
2153 job_t ji, jn;
2154
2155 if (jm->shutting_down) {
2156 return;
2157 }
2158
2159 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
2160 jobmgr_dispatch_all(jmi, newmounthack);
2161 }
2162
2163 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
2164 if (newmounthack && ji->start_on_mount) {
2165 ji->start_pending = true;
2166 }
2167
2168 job_dispatch(ji, false);
2169 }
2170 }
2171
2172 job_t
2173 job_dispatch(job_t j, bool kickstart)
2174 {
2175 /*
2176 * The whole job removal logic needs to be consolidated. The fact that
2177 * a job can be removed from just about anywhere makes it easy to have
2178 * stale pointers left behind somewhere on the stack that might get
2179 * used after the deallocation. In particular, during job iteration.
2180 *
2181 * This is a classic example. The act of dispatching a job may delete it.
2182 */
2183 if (!job_active(j)) {
2184 if (job_useless(j)) {
2185 job_remove(j);
2186 return NULL;
2187 } else if (kickstart || job_keepalive(j)) {
2188 job_start(j);
2189 } else {
2190 job_watch(j);
2191
2192 /*
2193 * 5455720
2194 *
2195 * Path checking and monitoring is really racy right now.
2196 * We should clean this up post Leopard.
2197 */
2198 if (job_keepalive(j)) {
2199 job_start(j);
2200 }
2201 }
2202 } else {
2203 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job.");
2204 }
2205
2206 return j;
2207 }
2208
2209 void
2210 job_log_stdouterr2(job_t j, const char *msg, ...)
2211 {
2212 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
2213 va_list ap;
2214
2215 va_start(ap, msg);
2216 runtime_vsyslog(&attr, msg, ap);
2217 va_end(ap);
2218 }
2219
2220 void
2221 job_log_stdouterr(job_t j)
2222 {
2223 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
2224 bool close_log_redir = false;
2225 ssize_t rsz;
2226
2227 if (!job_assumes(j, buf != NULL)) {
2228 return;
2229 }
2230
2231 bufindex = buf;
2232
2233 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
2234
2235 if (rsz == 0) {
2236 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
2237 close_log_redir = true;
2238 } else if (!job_assumes(j, rsz != -1)) {
2239 close_log_redir = true;
2240 } else {
2241 buf[rsz] = '\0';
2242
2243 while ((msg = strsep(&bufindex, "\n\r"))) {
2244 if (msg[0]) {
2245 job_log_stdouterr2(j, "%s", msg);
2246 }
2247 }
2248 }
2249
2250 free(buf);
2251
2252 if (close_log_redir) {
2253 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2254 j->log_redirect_fd = 0;
2255 job_dispatch(j, false);
2256 }
2257 }
2258
2259 void
2260 job_kill(job_t j)
2261 {
2262 if (!j->p || j->anonymous) {
2263 return;
2264 }
2265
2266 job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
2267
2268 j->sent_sigkill = true;
2269
2270 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
2271 EV_ADD, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j) != -1);
2272
2273 job_log(j, LOG_DEBUG, "Sent SIGKILL signal.");
2274 }
2275
2276 void
2277 job_callback_proc(job_t j, int flags, int fflags)
2278 {
2279 if ((fflags & NOTE_EXEC) && j->anonymous) {
2280 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
2281 struct kinfo_proc kp;
2282 size_t len = sizeof(kp);
2283
2284 if (job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
2285 char newlabel[1000];
2286
2287 snprintf(newlabel, sizeof(newlabel), "%p.%s", j, kp.kp_proc.p_comm);
2288
2289 job_log(j, LOG_DEBUG, "Program changed. Updating the label to: %s", newlabel);
2290
2291 LIST_REMOVE(j, label_hash_sle);
2292 strcpy((char *)j->label, newlabel);
2293 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
2294 }
2295 }
2296
2297 if (fflags & NOTE_FORK) {
2298 job_log(j, LOG_DEBUG, "Called fork()");
2299 }
2300
2301 if (fflags & NOTE_EXIT) {
2302 job_reap(j);
2303
2304 if (j->anonymous) {
2305 job_remove(j);
2306 j = NULL;
2307 } else {
2308 j = job_dispatch(j, false);
2309 }
2310 }
2311
2312 /* NOTE_REAP sanity checking is disabled for now while we try and diagnose 5289559 */
2313 #if 0
2314 if (j && (fflags & NOTE_REAP)) {
2315 job_assumes(j, flags & EV_ONESHOT);
2316 job_assumes(j, flags & EV_EOF);
2317
2318 job_assumes(j, j->p == 0);
2319 }
2320 #endif
2321 }
2322
2323 void
2324 job_callback_timer(job_t j, void *ident)
2325 {
2326 if (j == ident) {
2327 job_dispatch(j, true);
2328 } else if (&j->semaphores == ident) {
2329 job_dispatch(j, false);
2330 } else if (&j->start_interval == ident) {
2331 j->start_pending = true;
2332 job_dispatch(j, false);
2333 } else if (&j->exit_timeout == ident) {
2334 if (j->sent_sigkill) {
2335 uint64_t td = (mach_absolute_time() - j->sent_sigterm_time) * tbi.numer / tbi.denom;
2336
2337 td /= NSEC_PER_SEC;
2338 td -= j->exit_timeout;
2339
2340 job_log(j, LOG_ERR, "Did not die after sending SIGKILL %llu seconds ago...", td);
2341 } else {
2342 job_force_sampletool(j);
2343 if (j->debug_before_kill) {
2344 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger.");
2345 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
2346 }
2347 job_log(j, LOG_WARNING, "Exit timeout elapsed (%u seconds). Killing.", j->exit_timeout);
2348 job_kill(j);
2349 }
2350 } else {
2351 job_assumes(j, false);
2352 }
2353 }
2354
2355 void
2356 job_callback_read(job_t j, int ident)
2357 {
2358 if (ident == j->log_redirect_fd) {
2359 job_log_stdouterr(j);
2360 } else {
2361 socketgroup_callback(j);
2362 }
2363 }
2364
2365 void
2366 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
2367 {
2368 jobmgr_t jmi;
2369 job_t j;
2370
2371 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2372 jobmgr_reap_bulk(jmi, kev);
2373 }
2374
2375 if ((j = jobmgr_find_by_pid(jm, kev->ident, false))) {
2376 kev->udata = j;
2377 job_callback(j, kev);
2378 }
2379 }
2380
2381 void
2382 jobmgr_callback(void *obj, struct kevent *kev)
2383 {
2384 jobmgr_t jm = obj;
2385
2386 switch (kev->filter) {
2387 case EVFILT_PROC:
2388 jobmgr_reap_bulk(jm, kev);
2389 if (launchd_assumes(root_jobmgr != NULL)) {
2390 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
2391 }
2392 break;
2393 case EVFILT_SIGNAL:
2394 switch (kev->ident) {
2395 case SIGTERM:
2396 return launchd_shutdown();
2397 case SIGUSR1:
2398 return calendarinterval_callback();
2399 default:
2400 return (void)jobmgr_assumes(jm, false);
2401 }
2402 break;
2403 case EVFILT_FS:
2404 if (kev->fflags & VQ_MOUNT) {
2405 jobmgr_dispatch_all(jm, true);
2406 }
2407 jobmgr_dispatch_all_semaphores(jm);
2408 break;
2409 case EVFILT_TIMER:
2410 if (jobmgr_assumes(jm, kev->ident == (uintptr_t)&sorted_calendar_events)) {
2411 calendarinterval_callback();
2412 }
2413 break;
2414 default:
2415 return (void)jobmgr_assumes(jm, false);
2416 }
2417 }
2418
2419 void
2420 job_callback(void *obj, struct kevent *kev)
2421 {
2422 job_t j = obj;
2423
2424 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
2425
2426 switch (kev->filter) {
2427 case EVFILT_PROC:
2428 return job_callback_proc(j, kev->flags, kev->fflags);
2429 case EVFILT_TIMER:
2430 return job_callback_timer(j, (void *)kev->ident);
2431 case EVFILT_VNODE:
2432 return semaphoreitem_callback(j, kev);
2433 case EVFILT_READ:
2434 return job_callback_read(j, kev->ident);
2435 case EVFILT_MACHPORT:
2436 return (void)job_dispatch(j, true);
2437 default:
2438 return (void)job_assumes(j, false);
2439 }
2440 }
2441
2442 void
2443 job_start(job_t j)
2444 {
2445 uint64_t td, tnow = mach_absolute_time();
2446 int spair[2];
2447 int execspair[2];
2448 int oepair[2];
2449 char nbuf[64];
2450 pid_t c;
2451 bool sipc = false;
2452 u_int proc_fflags = /* NOTE_EXEC|NOTE_FORK| */ NOTE_EXIT /* |NOTE_REAP */;
2453
2454 if (!job_assumes(j, j->mgr != NULL)) {
2455 return;
2456 }
2457
2458 if (job_active(j)) {
2459 job_log(j, LOG_DEBUG, "Already started");
2460 return;
2461 }
2462
2463 job_assumes(j, tnow > j->start_time);
2464
2465 /*
2466 * Some users adjust the wall-clock and then expect software to not notice.
2467 * Therefore, launchd must use an absolute clock instead of gettimeofday()
2468 * or time() wherever possible.
2469 */
2470 td = (tnow - j->start_time) * tbi.numer / tbi.denom;
2471 td /= NSEC_PER_SEC;
2472
2473 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
2474 time_t respawn_delta = j->min_run_time - (uint32_t)td;
2475
2476 /*
2477 * We technically should ref-count throttled jobs to prevent idle exit,
2478 * but we're not directly tracking the 'throttled' state at the moment.
2479 */
2480
2481 job_log(j, LOG_WARNING, "Throttling respawn: Will start in %ld seconds", respawn_delta);
2482 job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
2483 job_ignore(j);
2484 return;
2485 }
2486
2487 j->sent_sigterm_time = 0;
2488
2489 if (!j->legacy_mach_job) {
2490 sipc = (!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices));
2491 }
2492
2493 j->checkedin = false;
2494
2495 if (sipc) {
2496 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
2497 }
2498
2499 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
2500
2501 if (!j->legacy_mach_job && job_assumes(j, pipe(oepair) != -1)) {
2502 j->log_redirect_fd = _fd(oepair[0]);
2503 job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
2504 job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
2505 }
2506
2507 j->start_time = tnow;
2508
2509 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
2510 case -1:
2511 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
2512 job_assumes(j, runtime_close(execspair[0]) == 0);
2513 job_assumes(j, runtime_close(execspair[1]) == 0);
2514 if (sipc) {
2515 job_assumes(j, runtime_close(spair[0]) == 0);
2516 job_assumes(j, runtime_close(spair[1]) == 0);
2517 }
2518 break;
2519 case 0:
2520 if (_vproc_post_fork_ping()) {
2521 _exit(EXIT_FAILURE);
2522 }
2523 if (!j->legacy_mach_job) {
2524 job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
2525 job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
2526 job_assumes(j, runtime_close(oepair[1]) != -1);
2527 }
2528 job_assumes(j, runtime_close(execspair[0]) == 0);
2529 /* wait for our parent to say they've attached a kevent to us */
2530 read(_fd(execspair[1]), &c, sizeof(c));
2531
2532 if (sipc) {
2533 job_assumes(j, runtime_close(spair[0]) == 0);
2534 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
2535 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
2536 }
2537 job_start_child(j);
2538 break;
2539 default:
2540 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
2541
2542 j->start_pending = false;
2543
2544 runtime_add_ref();
2545 total_children++;
2546 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
2547
2548 if (JOB_BOOTCACHE_HACK_CHECK(j)) {
2549 did_first_per_user_launchd_BootCache_hack = true;
2550 }
2551
2552 if (!j->legacy_mach_job) {
2553 job_assumes(j, runtime_close(oepair[1]) != -1);
2554 }
2555 j->p = c;
2556 if (j->hopefully_exits_first) {
2557 j->mgr->hopefully_first_cnt++;
2558 } else if (!j->hopefully_exits_last) {
2559 j->mgr->normal_active_cnt++;
2560 }
2561 j->forkfd = _fd(execspair[0]);
2562 job_assumes(j, runtime_close(execspair[1]) == 0);
2563 if (sipc) {
2564 job_assumes(j, runtime_close(spair[1]) == 0);
2565 ipc_open(_fd(spair[0]), j);
2566 }
2567 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
2568 job_ignore(j);
2569 } else {
2570 job_reap(j);
2571 }
2572
2573 if (!j->stall_before_exec) {
2574 job_uncork_fork(j);
2575 }
2576 break;
2577 }
2578 }
2579
2580 void
2581 do_first_per_user_launchd_hack(void)
2582 {
2583 char *bcct_tool[] = { "/usr/sbin/BootCacheControl", "tag", NULL };
2584 int dummystatus;
2585 pid_t bcp;
2586
2587 if (launchd_assumes((bcp = vfork()) != -1)) {
2588 if (bcp == 0) {
2589 execve(bcct_tool[0], bcct_tool, environ);
2590 _exit(EXIT_FAILURE);
2591 } else {
2592 launchd_assumes(waitpid(bcp, &dummystatus, 0) != -1);
2593 }
2594 }
2595 }
2596
2597 void
2598 job_start_child(job_t j)
2599 {
2600 const char *file2exec = "/usr/libexec/launchproxy";
2601 const char **argv;
2602 posix_spawnattr_t spattr;
2603 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
2604 pid_t junk_pid;
2605 glob_t g;
2606 short spflags = POSIX_SPAWN_SETEXEC;
2607 size_t binpref_out_cnt = 0;
2608 int i;
2609
2610 if (JOB_BOOTCACHE_HACK_CHECK(j)) {
2611 do_first_per_user_launchd_hack();
2612 }
2613
2614 job_assumes(j, posix_spawnattr_init(&spattr) == 0);
2615
2616 job_setup_attributes(j);
2617
2618 if (j->argv && j->globargv) {
2619 g.gl_offs = 1;
2620 for (i = 0; i < j->argc; i++) {
2621 if (i > 0) {
2622 gflags |= GLOB_APPEND;
2623 }
2624 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
2625 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
2626 exit(EXIT_FAILURE);
2627 }
2628 }
2629 g.gl_pathv[0] = (char *)file2exec;
2630 argv = (const char **)g.gl_pathv;
2631 } else if (j->argv) {
2632 argv = alloca((j->argc + 2) * sizeof(char *));
2633 argv[0] = file2exec;
2634 for (i = 0; i < j->argc; i++) {
2635 argv[i + 1] = j->argv[i];
2636 }
2637 argv[i + 1] = NULL;
2638 } else {
2639 argv = alloca(3 * sizeof(char *));
2640 argv[0] = file2exec;
2641 argv[1] = j->prog;
2642 argv[2] = NULL;
2643 }
2644
2645 if (!j->inetcompat) {
2646 argv++;
2647 }
2648
2649 if (j->wait4debugger) {
2650 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
2651 spflags |= POSIX_SPAWN_START_SUSPENDED;
2652 }
2653
2654 job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
2655
2656 if (j->j_binpref_cnt) {
2657 job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
2658 job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
2659 }
2660
2661 if (j->quarantine_data) {
2662 qtn_proc_t qp;
2663
2664 if (job_assumes(j, qp = qtn_proc_alloc())) {
2665 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
2666 job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
2667 }
2668 }
2669 }
2670
2671 if (j->seatbelt_profile) {
2672 char *seatbelt_err_buf = NULL;
2673
2674 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
2675 if (seatbelt_err_buf) {
2676 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
2677 }
2678 goto out_bad;
2679 }
2680 }
2681
2682 if (j->prog) {
2683 errno = posix_spawn(&junk_pid, j->inetcompat ? file2exec : j->prog, NULL, &spattr, (char *const*)argv, environ);
2684 job_log_error(j, LOG_ERR, "posix_spawn(\"%s\", ...)", j->prog);
2685 } else {
2686 errno = posix_spawnp(&junk_pid, j->inetcompat ? file2exec : argv[0], NULL, &spattr, (char *const*)argv, environ);
2687 job_log_error(j, LOG_ERR, "posix_spawnp(\"%s\", ...)", argv[0]);
2688 }
2689
2690 out_bad:
2691 _exit(EXIT_FAILURE);
2692 }
2693
2694 void
2695 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
2696 {
2697 launch_data_t tmp;
2698 struct envitem *ei;
2699 job_t ji;
2700
2701 if (jm->parentmgr) {
2702 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
2703 } else {
2704 char **tmpenviron = environ;
2705 for (; *tmpenviron; tmpenviron++) {
2706 char envkey[1024];
2707 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
2708 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
2709 strncpy(envkey, *tmpenviron, sizeof(envkey));
2710 *(strchr(envkey, '=')) = '\0';
2711 launch_data_dict_insert(dict, s, envkey);
2712 }
2713 }
2714
2715 LIST_FOREACH(ji, &jm->jobs, sle) {
2716 SLIST_FOREACH(ei, &ji->global_env, sle) {
2717 if ((tmp = launch_data_new_string(ei->value))) {
2718 launch_data_dict_insert(dict, tmp, ei->key);
2719 }
2720 }
2721 }
2722 }
2723
2724 void
2725 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
2726 {
2727 struct envitem *ei;
2728 job_t ji;
2729
2730 if (jm->parentmgr) {
2731 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
2732 }
2733
2734 LIST_FOREACH(ji, &jm->jobs, sle) {
2735 SLIST_FOREACH(ei, &ji->global_env, sle) {
2736 setenv(ei->key, ei->value, 1);
2737 }
2738 }
2739 }
2740
2741 void
2742 job_find_and_blame_pids_with_weird_uids(job_t j)
2743 {
2744 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
2745 size_t i, kp_cnt, len = 10*1024*1024;
2746 struct kinfo_proc *kp = malloc(len);
2747 uid_t u = j->mach_uid;
2748
2749 if (!job_assumes(j, kp != NULL)) {
2750 return;
2751 }
2752 if (!job_assumes(j, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
2753 goto out;
2754 }
2755
2756 kp_cnt = len / sizeof(struct kinfo_proc);
2757
2758 for (i = 0; i < kp_cnt; i++) {
2759 uid_t i_euid = kp[i].kp_eproc.e_ucred.cr_uid;
2760 uid_t i_uid = kp[i].kp_eproc.e_pcred.p_ruid;
2761 uid_t i_svuid = kp[i].kp_eproc.e_pcred.p_svuid;
2762 pid_t i_pid = kp[i].kp_proc.p_pid;
2763
2764 if (i_euid != u && i_uid != u && i_svuid != u) {
2765 continue;
2766 }
2767
2768 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
2769 i_pid, kp[i].kp_proc.p_comm, i_uid, i_euid, i_svuid);
2770
2771 /* Temporarily disabled due to 5423935 and 4946119. */
2772 #if 0
2773 /* Ask the accountless process to exit. */
2774 job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
2775 #endif
2776 }
2777
2778 out:
2779 free(kp);
2780 }
2781
2782 void
2783 job_postfork_become_user(job_t j)
2784 {
2785 char loginname[2000];
2786 char tmpdirpath[PATH_MAX];
2787 char shellpath[PATH_MAX];
2788 char homedir[PATH_MAX];
2789 struct passwd *pwe;
2790 size_t r;
2791 gid_t desired_gid = -1;
2792 uid_t desired_uid = -1;
2793
2794 if (getuid() != 0) {
2795 return;
2796 }
2797
2798 /*
2799 * I contend that having UID == 0 and GID != 0 is of dubious value.
2800 * Nevertheless, this used to work in Tiger. See: 5425348
2801 */
2802 if (j->groupname && !j->username) {
2803 j->username = "root";
2804 }
2805
2806 if (j->username) {
2807 if ((pwe = getpwnam(j->username)) == NULL) {
2808 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
2809 _exit(EXIT_FAILURE);
2810 }
2811 } else if (j->mach_uid) {
2812 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
2813 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
2814 job_find_and_blame_pids_with_weird_uids(j);
2815 _exit(EXIT_FAILURE);
2816 }
2817 } else {
2818 return;
2819 }
2820
2821 /*
2822 * We must copy the results of getpw*().
2823 *
2824 * Why? Because subsequent API calls may call getpw*() as a part of
2825 * their implementation. Since getpw*() returns a [now thread scoped]
2826 * global, we must therefore cache the results before continuing.
2827 */
2828
2829 desired_uid = pwe->pw_uid;
2830 desired_gid = pwe->pw_gid;
2831
2832 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
2833 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
2834 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
2835
2836 if (pwe->pw_expire && time(NULL) >= pwe->pw_expire) {
2837 job_log(j, LOG_ERR, "Expired account");
2838 _exit(EXIT_FAILURE);
2839 }
2840
2841
2842 if (j->username && strcmp(j->username, loginname) != 0) {
2843 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
2844 } else if (j->mach_uid && (j->mach_uid != desired_uid)) {
2845 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
2846 }
2847
2848 if (j->groupname) {
2849 struct group *gre;
2850
2851 if ((gre = getgrnam(j->groupname)) == NULL) {
2852 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
2853 _exit(EXIT_FAILURE);
2854 }
2855
2856 desired_gid = gre->gr_gid;
2857 }
2858
2859 if (!job_assumes(j, setlogin(loginname) != -1)) {
2860 _exit(EXIT_FAILURE);
2861 }
2862
2863 if (!job_assumes(j, setgid(desired_gid) != -1)) {
2864 _exit(EXIT_FAILURE);
2865 }
2866
2867 /*
2868 * The kernel team and the DirectoryServices team want initgroups()
2869 * called after setgid(). See 4616864 for more information.
2870 */
2871
2872 if (!j->no_init_groups) {
2873 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
2874 _exit(EXIT_FAILURE);
2875 }
2876 }
2877
2878 if (!job_assumes(j, setuid(desired_uid) != -1)) {
2879 _exit(EXIT_FAILURE);
2880 }
2881
2882 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
2883
2884 if (r > 0 && r < sizeof(tmpdirpath)) {
2885 setenv("TMPDIR", tmpdirpath, 0);
2886 }
2887
2888 setenv("SHELL", shellpath, 0);
2889 setenv("HOME", homedir, 0);
2890 setenv("USER", loginname, 0);
2891 setenv("LOGNAME", loginname, 0);
2892 }
2893
2894 void
2895 job_setup_attributes(job_t j)
2896 {
2897 struct limititem *li;
2898 struct envitem *ei;
2899
2900 if (j->setnice) {
2901 job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
2902 }
2903
2904 SLIST_FOREACH(li, &j->limits, sle) {
2905 struct rlimit rl;
2906
2907 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
2908 continue;
2909 }
2910
2911 if (li->sethard) {
2912 rl.rlim_max = li->lim.rlim_max;
2913 }
2914 if (li->setsoft) {
2915 rl.rlim_cur = li->lim.rlim_cur;
2916 }
2917
2918 if (setrlimit(li->which, &rl) == -1) {
2919 job_log_error(j, LOG_WARNING, "setrlimit()");
2920 }
2921 }
2922
2923 if (!j->inetcompat && j->session_create) {
2924 launchd_SessionCreate();
2925 }
2926
2927 if (j->low_pri_io) {
2928 job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
2929 }
2930 if (j->rootdir) {
2931 job_assumes(j, chroot(j->rootdir) != -1);
2932 job_assumes(j, chdir(".") != -1);
2933 }
2934
2935 job_postfork_become_user(j);
2936
2937 if (j->workingdir) {
2938 job_assumes(j, chdir(j->workingdir) != -1);
2939 }
2940
2941 if (j->setmask) {
2942 umask(j->mask);
2943 }
2944
2945 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_APPEND|O_CREAT);
2946 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_APPEND|O_CREAT);
2947
2948 jobmgr_setup_env_from_other_jobs(j->mgr);
2949
2950 SLIST_FOREACH(ei, &j->env, sle) {
2951 setenv(ei->key, ei->value, 1);
2952 }
2953
2954 /*
2955 * We'd like to call setsid() unconditionally, but we have reason to
2956 * believe that prevents launchd from being able to send signals to
2957 * setuid children. We'll settle for process-groups.
2958 */
2959 if (getppid() != 1) {
2960 job_assumes(j, setpgid(0, 0) != -1);
2961 } else {
2962 job_assumes(j, setsid() != -1);
2963 }
2964 }
2965
2966 void
2967 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
2968 {
2969 int fd;
2970
2971 if (!path) {
2972 return;
2973 }
2974
2975 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
2976 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
2977 return;
2978 }
2979
2980 job_assumes(j, dup2(fd, target_fd) != -1);
2981 job_assumes(j, runtime_close(fd) == 0);
2982 }
2983
2984 int
2985 dir_has_files(job_t j, const char *path)
2986 {
2987 DIR *dd = opendir(path);
2988 struct dirent *de;
2989 bool r = 0;
2990
2991 if (!dd) {
2992 return -1;
2993 }
2994
2995 while ((de = readdir(dd))) {
2996 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
2997 r = 1;
2998 break;
2999 }
3000 }
3001
3002 job_assumes(j, closedir(dd) == 0);
3003 return r;
3004 }
3005
3006 void
3007 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
3008 {
3009 struct calendarinterval *ci_iter, *ci_prev = NULL;
3010 time_t later, head_later;
3011
3012 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
3013
3014 if (ci->when.tm_wday != -1) {
3015 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
3016
3017 if (ci->when.tm_mday == -1) {
3018 later = otherlater;
3019 } else {
3020 later = later < otherlater ? later : otherlater;
3021 }
3022 }
3023
3024 ci->when_next = later;
3025
3026 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
3027 if (ci->when_next < ci_iter->when_next) {
3028 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
3029 break;
3030 }
3031
3032 ci_prev = ci_iter;
3033 }
3034
3035 if (ci_iter == NULL) {
3036 /* ci must want to fire after every other timer, or there are no timers */
3037
3038 if (LIST_EMPTY(&sorted_calendar_events)) {
3039 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
3040 } else {
3041 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
3042 }
3043 }
3044
3045 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
3046
3047 /* Workaround 5225889 */
3048 kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_DELETE, 0, 0, root_jobmgr);
3049
3050 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
3051 char time_string[100];
3052 size_t time_string_len;
3053
3054 ctime_r(&later, time_string);
3055 time_string_len = strlen(time_string);
3056
3057 if (time_string_len && time_string[time_string_len - 1] == '\n') {
3058 time_string[time_string_len - 1] = '\0';
3059 }
3060
3061 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
3062 }
3063 }
3064
3065 void
3066 extract_rcsid_substr(const char *i, char *o, size_t osz)
3067 {
3068 char *rcs_rev_tmp = strchr(i, ' ');
3069
3070 if (!rcs_rev_tmp) {
3071 strlcpy(o, i, osz);
3072 } else {
3073 strlcpy(o, rcs_rev_tmp + 1, osz);
3074 rcs_rev_tmp = strchr(o, ' ');
3075 if (rcs_rev_tmp) {
3076 *rcs_rev_tmp = '\0';
3077 }
3078 }
3079 }
3080
3081 void
3082 jobmgr_log_bug(jobmgr_t jm, const char *rcs_rev, const char *path, unsigned int line, const char *test)
3083 {
3084 int saved_errno = errno;
3085 const char *file = strrchr(path, '/');
3086 char buf[100];
3087
3088 extract_rcsid_substr(rcs_rev, buf, sizeof(buf));
3089
3090 if (!file) {
3091 file = path;
3092 } else {
3093 file += 1;
3094 }
3095
3096 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u: %s", file, line, buf, saved_errno, test);
3097 }
3098
3099 void
3100 job_log_bug(job_t j, const char *rcs_rev, const char *path, unsigned int line, const char *test)
3101 {
3102 int saved_errno = errno;
3103 const char *file = strrchr(path, '/');
3104 char buf[100];
3105
3106 extract_rcsid_substr(rcs_rev, buf, sizeof(buf));
3107
3108 if (!file) {
3109 file = path;
3110 } else {
3111 file += 1;
3112 }
3113
3114 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u: %s", file, line, buf, saved_errno, test);
3115 }
3116
3117 void
3118 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
3119 {
3120 struct runtime_syslog_attr attr = { "com.apple.launchd", j->label, j->mgr->name, pri, getuid(), getpid(), j->p };
3121 char *newmsg;
3122 int oldmask = 0;
3123 size_t newmsgsz;
3124
3125 /*
3126 * Hack: If bootstrap_port is set, we must be on the child side of a
3127 * fork(), but before the exec*(). Let's route the log message back to
3128 * launchd proper.
3129 */
3130 if (bootstrap_port) {
3131 return _vproc_logv(pri, err, msg, ap);
3132 }
3133
3134 newmsgsz = strlen(msg) + 200;
3135 newmsg = alloca(newmsgsz);
3136
3137 if (err) {
3138 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
3139 } else {
3140 snprintf(newmsg, newmsgsz, "%s", msg);
3141 }
3142
3143 if (j->debug) {
3144 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
3145 }
3146
3147 runtime_vsyslog(&attr, newmsg, ap);
3148
3149 if (j->debug) {
3150 setlogmask(oldmask);
3151 }
3152 }
3153
3154 void
3155 job_log_error(job_t j, int pri, const char *msg, ...)
3156 {
3157 va_list ap;
3158
3159 va_start(ap, msg);
3160 job_logv(j, pri, errno, msg, ap);
3161 va_end(ap);
3162 }
3163
3164 void
3165 job_log(job_t j, int pri, const char *msg, ...)
3166 {
3167 va_list ap;
3168
3169 va_start(ap, msg);
3170 job_logv(j, pri, 0, msg, ap);
3171 va_end(ap);
3172 }
3173
3174 #if 0
3175 void
3176 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
3177 {
3178 va_list ap;
3179
3180 va_start(ap, msg);
3181 jobmgr_logv(jm, pri, errno, msg, ap);
3182 va_end(ap);
3183 }
3184 #endif
3185
3186 void
3187 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
3188 {
3189 va_list ap;
3190
3191 va_start(ap, msg);
3192 jobmgr_logv(jm, pri, 0, msg, ap);
3193 va_end(ap);
3194 }
3195
3196 void
3197 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
3198 {
3199 char *newmsg;
3200 char *newname;
3201 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
3202
3203 newname = alloca((jmname_len + 1) * 2);
3204 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
3205 newmsg = alloca(newmsgsz);
3206
3207 for (i = 0, o = 0; i < jmname_len; i++, o++) {
3208 if (jm->name[i] == '%') {
3209 newname[o] = '%';
3210 o++;
3211 }
3212 newname[o] = jm->name[i];
3213 }
3214 newname[o] = '\0';
3215
3216 if (err) {
3217 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
3218 } else {
3219 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
3220 }
3221
3222 if (jm->parentmgr) {
3223 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
3224 } else {
3225 struct runtime_syslog_attr attr = { "com.apple.launchd", "com.apple.launchd", jm->name, pri, getuid(), getpid(), getpid() };
3226
3227 runtime_vsyslog(&attr, newmsg, ap);
3228 }
3229 }
3230
3231 void
3232 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
3233 {
3234 if (si->fd != -1) {
3235 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
3236 job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
3237 }
3238 }
3239
3240 void
3241 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
3242 {
3243 char parentdir_path[PATH_MAX], *which_path = si->what;
3244 int saved_errno = 0;
3245 int fflags = 0;
3246
3247 strlcpy(parentdir_path, dirname(si->what), sizeof(parentdir_path));
3248
3249 switch (si->why) {
3250 case PATH_EXISTS:
3251 fflags = NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
3252 break;
3253 case PATH_MISSING:
3254 fflags = NOTE_DELETE|NOTE_RENAME;
3255 break;
3256 case DIR_NOT_EMPTY:
3257 case PATH_CHANGES:
3258 fflags = NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE|NOTE_ATTRIB|NOTE_LINK;
3259 break;
3260 default:
3261 return;
3262 }
3263
3264 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
3265 do {
3266 if (si->fd == -1) {
3267 if ((si->fd = _fd(open(which_path, O_EVTONLY|O_NOCTTY))) == -1) {
3268 which_path = parentdir_path;
3269 si->fd = _fd(open(which_path, O_EVTONLY|O_NOCTTY));
3270 }
3271 }
3272
3273 if (si->fd == -1) {
3274 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", which_path);
3275 }
3276
3277 job_log(j, LOG_DEBUG, "Watching Vnode: %d", si->fd);
3278
3279 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
3280 saved_errno = errno;
3281 /*
3282 * The FD can be revoked between the open() and kevent().
3283 * This is similar to the inability for kevents to be
3284 * attached to short lived zombie processes after fork()
3285 * but before kevent().
3286 */
3287 job_assumes(j, runtime_close(si->fd) == 0);
3288 si->fd = -1;
3289 }
3290 } while ((si->fd == -1) && (saved_errno == ENOENT));
3291
3292 if (saved_errno == ENOTSUP) {
3293 /*
3294 * 3524219 NFS needs kqueue support
3295 * 4124079 VFS needs generic kqueue support
3296 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
3297 */
3298 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
3299
3300 if (!j->poll_for_vfs_changes) {
3301 j->poll_for_vfs_changes = true;
3302 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
3303 }
3304 }
3305 }
3306
3307 void
3308 semaphoreitem_callback(job_t j, struct kevent *kev)
3309 {
3310 char invalidation_reason[100] = "";
3311 struct semaphoreitem *si;
3312
3313 SLIST_FOREACH(si, &j->semaphores, sle) {
3314 switch (si->why) {
3315 case PATH_CHANGES:
3316 case PATH_EXISTS:
3317 case PATH_MISSING:
3318 case DIR_NOT_EMPTY:
3319 break;
3320 default:
3321 continue;
3322 }
3323
3324 if (si->fd == (int)kev->ident) {
3325 break;
3326 }
3327 }
3328
3329 if (!job_assumes(j, si != NULL)) {
3330 return;
3331 }
3332
3333 if (NOTE_DELETE & kev->fflags) {
3334 strcat(invalidation_reason, "deleted");
3335 }
3336
3337 if (NOTE_RENAME & kev->fflags) {
3338 if (invalidation_reason[0]) {
3339 strcat(invalidation_reason, "/renamed");
3340 } else {
3341 strcat(invalidation_reason, "renamed");
3342 }
3343 }
3344
3345 if (NOTE_REVOKE & kev->fflags) {
3346 if (invalidation_reason[0]) {
3347 strcat(invalidation_reason, "/revoked");
3348 } else {
3349 strcat(invalidation_reason, "revoked");
3350 }
3351 }
3352
3353 if (invalidation_reason[0]) {
3354 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
3355 job_assumes(j, runtime_close(si->fd) == 0);
3356 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
3357 }
3358
3359 job_log(j, LOG_DEBUG, "Watch path modified: %s", si->what);
3360
3361 if (si->why == PATH_CHANGES) {
3362 j->start_pending = true;
3363 }
3364
3365 job_dispatch(j, false);
3366 }
3367
3368 void
3369 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
3370 {
3371 struct tm *tmptm = context;
3372 int64_t val;
3373
3374 if (LAUNCH_DATA_INTEGER != launch_data_get_type(obj)) {
3375 /* hack to let caller know something went wrong */
3376 tmptm->tm_sec = -1;
3377 return;
3378 }
3379
3380 val = launch_data_get_integer(obj);
3381
3382 if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
3383 tmptm->tm_min = val;
3384 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
3385 tmptm->tm_hour = val;
3386 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
3387 tmptm->tm_mday = val;
3388 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
3389 tmptm->tm_wday = val;
3390 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
3391 tmptm->tm_mon = val;
3392 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
3393 }
3394 }
3395
3396 bool
3397 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
3398 {
3399 struct tm tmptm;
3400
3401 memset(&tmptm, 0, sizeof(0));
3402
3403 tmptm.tm_min = -1;
3404 tmptm.tm_hour = -1;
3405 tmptm.tm_mday = -1;
3406 tmptm.tm_wday = -1;
3407 tmptm.tm_mon = -1;
3408
3409 if (!job_assumes(j, obj != NULL)) {
3410 return false;
3411 }
3412
3413 if (LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj)) {
3414 return false;
3415 }
3416
3417 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &tmptm);
3418
3419 if (tmptm.tm_sec == -1) {
3420 return false;
3421 }
3422
3423 return calendarinterval_new(j, &tmptm);
3424 }
3425
3426 bool
3427 calendarinterval_new(job_t j, struct tm *w)
3428 {
3429 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
3430
3431 if (!job_assumes(j, ci != NULL)) {
3432 return false;
3433 }
3434
3435 ci->when = *w;
3436 ci->job = j;
3437
3438 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
3439
3440 calendarinterval_setalarm(j, ci);
3441
3442 runtime_add_ref();
3443
3444 return true;
3445 }
3446
3447 void
3448 calendarinterval_delete(job_t j, struct calendarinterval *ci)
3449 {
3450 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
3451 LIST_REMOVE(ci, global_sle);
3452
3453 free(ci);
3454
3455 runtime_del_ref();
3456 }
3457
3458 void
3459 calendarinterval_sanity_check(void)
3460 {
3461 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
3462 time_t now = time(NULL);
3463
3464 if (ci && (ci->when_next < now)) {
3465 jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
3466 }
3467 }
3468
3469 void
3470 calendarinterval_callback(void)
3471 {
3472 struct calendarinterval *ci, *ci_next;
3473 time_t now = time(NULL);
3474
3475 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
3476 job_t j = ci->job;
3477
3478 if (ci->when_next > now) {
3479 break;
3480 }
3481
3482 LIST_REMOVE(ci, global_sle);
3483 calendarinterval_setalarm(j, ci);
3484
3485 j->start_pending = true;
3486 job_dispatch(j, false);
3487 }
3488 }
3489
3490 bool
3491 socketgroup_new(job_t j, const char *name, int *fds, unsigned int fd_cnt, bool junkfds)
3492 {
3493 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
3494
3495 if (!job_assumes(j, sg != NULL)) {
3496 return false;
3497 }
3498
3499 sg->fds = calloc(1, fd_cnt * sizeof(int));
3500 sg->fd_cnt = fd_cnt;
3501 sg->junkfds = junkfds;
3502
3503 if (!job_assumes(j, sg->fds != NULL)) {
3504 free(sg);
3505 return false;
3506 }
3507
3508 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
3509 strcpy(sg->name, name);
3510
3511 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
3512
3513 runtime_add_ref();
3514
3515 return true;
3516 }
3517
3518 void
3519 socketgroup_delete(job_t j, struct socketgroup *sg)
3520 {
3521 unsigned int i;
3522
3523 for (i = 0; i < sg->fd_cnt; i++) {
3524 #if 0
3525 struct sockaddr_storage ss;
3526 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
3527 socklen_t ss_len = sizeof(ss);
3528
3529 /* 5480306 */
3530 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
3531 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
3532 job_assumes(j, unlink(sun->sun_path) != -1);
3533 /* We might conditionally need to delete a directory here */
3534 }
3535 #endif
3536 job_assumes(j, runtime_close(sg->fds[i]) != -1);
3537 }
3538
3539 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
3540
3541 free(sg->fds);
3542 free(sg);
3543
3544 runtime_del_ref();
3545 }
3546
3547 void
3548 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
3549 {
3550 struct kevent kev[sg->fd_cnt];
3551 char buf[10000];
3552 unsigned int i, buf_off = 0;
3553
3554 if (sg->junkfds) {
3555 return;
3556 }
3557
3558 for (i = 0; i < sg->fd_cnt; i++) {
3559 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
3560 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
3561 }
3562
3563 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
3564
3565 job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
3566
3567 for (i = 0; i < sg->fd_cnt; i++) {
3568 job_assumes(j, kev[i].flags & EV_ERROR);
3569 errno = kev[i].data;
3570 job_assumes(j, kev[i].data == 0);
3571 }
3572 }
3573
3574 void
3575 socketgroup_ignore(job_t j, struct socketgroup *sg)
3576 {
3577 socketgroup_kevent_mod(j, sg, false);
3578 }
3579
3580 void
3581 socketgroup_watch(job_t j, struct socketgroup *sg)
3582 {
3583 socketgroup_kevent_mod(j, sg, true);
3584 }
3585
3586 void
3587 socketgroup_callback(job_t j)
3588 {
3589 job_dispatch(j, true);
3590 }
3591
3592 bool
3593 envitem_new(job_t j, const char *k, const char *v, bool global)
3594 {
3595 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
3596
3597 if (!job_assumes(j, ei != NULL)) {
3598 return false;
3599 }
3600
3601 strcpy(ei->key, k);
3602 ei->value = ei->key + strlen(k) + 1;
3603 strcpy(ei->value, v);
3604
3605 if (global) {
3606 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
3607 } else {
3608 SLIST_INSERT_HEAD(&j->env, ei, sle);
3609 }
3610
3611 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
3612
3613 return true;
3614 }
3615
3616 void
3617 envitem_delete(job_t j, struct envitem *ei, bool global)
3618 {
3619 if (global) {
3620 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
3621 } else {
3622 SLIST_REMOVE(&j->env, ei, envitem, sle);
3623 }
3624
3625 free(ei);
3626 }
3627
3628 void
3629 envitem_setup(launch_data_t obj, const char *key, void *context)
3630 {
3631 job_t j = context;
3632
3633 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
3634 return;
3635 }
3636
3637 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
3638 }
3639
3640 bool
3641 limititem_update(job_t j, int w, rlim_t r)
3642 {
3643 struct limititem *li;
3644
3645 SLIST_FOREACH(li, &j->limits, sle) {
3646 if (li->which == w) {
3647 break;
3648 }
3649 }
3650
3651 if (li == NULL) {
3652 li = calloc(1, sizeof(struct limititem));
3653
3654 if (!job_assumes(j, li != NULL)) {
3655 return false;
3656 }
3657
3658 SLIST_INSERT_HEAD(&j->limits, li, sle);
3659
3660 li->which = w;
3661 }
3662
3663 if (j->importing_hard_limits) {
3664 li->lim.rlim_max = r;
3665 li->sethard = true;
3666 } else {
3667 li->lim.rlim_cur = r;
3668 li->setsoft = true;
3669 }
3670
3671 return true;
3672 }
3673
3674 void
3675 limititem_delete(job_t j, struct limititem *li)
3676 {
3677 SLIST_REMOVE(&j->limits, li, limititem, sle);
3678
3679 free(li);
3680 }
3681
3682 void
3683 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
3684 {
3685 job_t j = context;
3686
3687 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
3688 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
3689 return;
3690 }
3691
3692 if (launch_data_get_bool(obj) == false) {
3693 return;
3694 }
3695
3696 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
3697 j->seatbelt_flags |= SANDBOX_NAMED;
3698 }
3699 }
3700
3701 void
3702 limititem_setup(launch_data_t obj, const char *key, void *context)
3703 {
3704 job_t j = context;
3705 int i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
3706 rlim_t rl;
3707
3708 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
3709 return;
3710 }
3711
3712 rl = launch_data_get_integer(obj);
3713
3714 for (i = 0; i < limits_cnt; i++) {
3715 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
3716 break;
3717 }
3718 }
3719
3720 if (i == limits_cnt) {
3721 return;
3722 }
3723
3724 limititem_update(j, launchd_keys2limits[i].val, rl);
3725 }
3726
3727 bool
3728 job_useless(job_t j)
3729 {
3730 /* Yes, j->unload_at_exit and j->only_once seem the same, but they'll differ someday... */
3731
3732 if ((j->unload_at_exit || j->only_once) && j->start_time != 0) {
3733 if (j->unload_at_exit && j->j_port) {
3734 return false;
3735 }
3736 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
3737 return true;
3738 } else if (j->removal_pending) {
3739 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
3740 return true;
3741 } else if (j->mgr->shutting_down) {
3742 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
3743 return true;
3744 } else if (j->legacy_mach_job) {
3745 if (SLIST_EMPTY(&j->machservices)) {
3746 job_log(j, LOG_INFO, "Garbage collecting");
3747 return true;
3748 } else if (!j->checkedin) {
3749 job_log(j, LOG_WARNING, "Failed to check-in!");
3750 return true;
3751 }
3752 }
3753
3754 return false;
3755 }
3756
3757 bool
3758 job_keepalive(job_t j)
3759 {
3760 mach_msg_type_number_t statusCnt;
3761 mach_port_status_t status;
3762 struct semaphoreitem *si;
3763 struct machservice *ms;
3764 struct stat sb;
3765 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
3766
3767 /*
3768 * 5066316
3769 *
3770 * We definitely need to revisit this after Leopard ships. Please see
3771 * launchctl.c for the other half of this hack.
3772 */
3773 if (j->mgr->global_on_demand_cnt > 0 && strcmp(j->label, "com.apple.kextd") != 0) {
3774 return false;
3775 }
3776
3777 if (j->start_pending) {
3778 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
3779 return true;
3780 }
3781
3782 if (!j->ondemand) {
3783 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
3784 return true;
3785 }
3786
3787 SLIST_FOREACH(ms, &j->machservices, sle) {
3788 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
3789 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
3790 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
3791 continue;
3792 }
3793 if (status.mps_msgcount) {
3794 job_log(j, LOG_DEBUG, "KeepAlive check: job restarted due to %d queued Mach messages on service: %s",
3795 status.mps_msgcount, ms->name);
3796 return true;
3797 }
3798 }
3799
3800
3801 SLIST_FOREACH(si, &j->semaphores, sle) {
3802 bool wanted_state = false;
3803 int qdir_file_cnt;
3804 job_t other_j;
3805
3806 switch (si->why) {
3807 case NETWORK_UP:
3808 wanted_state = true;
3809 case NETWORK_DOWN:
3810 if (network_up == wanted_state) {
3811 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
3812 return true;
3813 }
3814 break;
3815 case SUCCESSFUL_EXIT:
3816 wanted_state = true;
3817 case FAILED_EXIT:
3818 if (good_exit == wanted_state) {
3819 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
3820 return true;
3821 }
3822 break;
3823 case OTHER_JOB_ENABLED:
3824 wanted_state = true;
3825 case OTHER_JOB_DISABLED:
3826 if ((bool)job_find(si->what) == wanted_state) {
3827 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
3828 return true;
3829 }
3830 break;
3831 case OTHER_JOB_ACTIVE:
3832 wanted_state = true;
3833 case OTHER_JOB_INACTIVE:
3834 if ((other_j = job_find(si->what))) {
3835 if ((bool)other_j->p == wanted_state) {
3836 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
3837 return true;
3838 }
3839 }
3840 break;
3841 case PATH_EXISTS:
3842 wanted_state = true;
3843 case PATH_MISSING:
3844 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
3845 if (si->fd != -1) {
3846 job_assumes(j, runtime_close(si->fd) == 0);
3847 si->fd = -1;
3848 }
3849 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
3850 return true;
3851 }
3852 break;
3853 case PATH_CHANGES:
3854 break;
3855 case DIR_NOT_EMPTY:
3856 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
3857 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
3858 } else if (qdir_file_cnt > 0) {
3859 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
3860 return true;
3861 }
3862 break;
3863 }
3864 }
3865
3866 return false;
3867 }
3868
3869 const char *
3870 job_prog(job_t j)
3871 {
3872 if (j->prog) {
3873 return j->prog;
3874 } else if (j->argv) {
3875 return j->argv[0];
3876 } else {
3877 return "";
3878 }
3879 }
3880
3881 const char *
3882 job_active(job_t j)
3883 {
3884 struct machservice *ms;
3885
3886 if (j->p) {
3887 return "PID is still valid";
3888 }
3889
3890 if (j->mgr->shutting_down && j->log_redirect_fd) {
3891 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3892 j->log_redirect_fd = 0;
3893 }
3894
3895 if (j->log_redirect_fd) {
3896 if (job_assumes(j, j->wait4pipe_eof)) {
3897 return "Standard out/error is still valid";
3898 } else {
3899 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
3900 j->log_redirect_fd = 0;
3901 }
3902 }
3903
3904 if (j->priv_port_has_senders) {
3905 return "Privileged Port still has outstanding senders";
3906 }
3907
3908 SLIST_FOREACH(ms, &j->machservices, sle) {
3909 if (ms->recv && ms->isActive) {
3910 return "Mach service is still active";
3911 }
3912 }
3913
3914 return NULL;
3915 }
3916
3917 void
3918 machservice_watch(job_t j, struct machservice *ms)
3919 {
3920 if (ms->recv) {
3921 job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
3922 }
3923 }
3924
3925 void
3926 machservice_ignore(job_t j, struct machservice *ms)
3927 {
3928 job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
3929 }
3930
3931 void
3932 machservice_resetport(job_t j, struct machservice *ms)
3933 {
3934 LIST_REMOVE(ms, port_hash_sle);
3935 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
3936 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
3937 ms->gen_num++;
3938 job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
3939 job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
3940 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
3941 }
3942
3943 struct machservice *
3944 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
3945 {
3946 struct machservice *ms;
3947
3948 if ((ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1)) == NULL) {
3949 return NULL;
3950 }
3951
3952 strcpy((char *)ms->name, name);
3953 ms->job = j;
3954 ms->per_pid = pid_local;
3955
3956 if (*serviceport == MACH_PORT_NULL) {
3957 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
3958 goto out_bad;
3959 }
3960
3961 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
3962 goto out_bad2;
3963 }
3964 *serviceport = ms->port;
3965 ms->recv = true;
3966 } else {
3967 ms->port = *serviceport;
3968 ms->isActive = true;
3969 }
3970
3971 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
3972 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
3973 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
3974
3975 job_log(j, LOG_INFO, "Mach service added: %s", name);
3976
3977 return ms;
3978 out_bad2:
3979 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
3980 out_bad:
3981 free(ms);
3982 return NULL;
3983 }
3984
3985 bootstrap_status_t
3986 machservice_status(struct machservice *ms)
3987 {
3988 if (ms->isActive) {
3989 return BOOTSTRAP_STATUS_ACTIVE;
3990 } else if (ms->job->ondemand) {
3991 return BOOTSTRAP_STATUS_ON_DEMAND;
3992 } else {
3993 return BOOTSTRAP_STATUS_INACTIVE;
3994 }
3995 }
3996
3997 void
3998 job_setup_exception_port(job_t j, task_t target_task)
3999 {
4000 thread_state_flavor_t f = 0;
4001
4002 if (!the_exception_server) {
4003 return;
4004 }
4005
4006 #if defined (__ppc__)
4007 f = PPC_THREAD_STATE64;
4008 #elif defined(__i386__)
4009 f = x86_THREAD_STATE;
4010 #endif
4011
4012 if (target_task) {
4013 job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, the_exception_server,
4014 EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
4015 } else if (getpid() == 1) {
4016 mach_port_t mhp = mach_host_self();
4017 job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server,
4018 EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
4019 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
4020 }
4021
4022 }
4023
4024 void
4025 job_set_exeception_port(job_t j, mach_port_t port)
4026 {
4027 if (!the_exception_server) {
4028 the_exception_server = port;
4029 job_setup_exception_port(j, 0);
4030 } else {
4031 job_log(j, LOG_WARNING, "The exception server is already claimed!");
4032 }
4033 }
4034
4035 void
4036 machservice_setup_options(launch_data_t obj, const char *key, void *context)
4037 {
4038 struct machservice *ms = context;
4039 mach_port_t mhp = mach_host_self();
4040 int which_port;
4041 bool b;
4042
4043 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
4044 return;
4045 }
4046
4047 switch (launch_data_get_type(obj)) {
4048 case LAUNCH_DATA_INTEGER:
4049 which_port = launch_data_get_integer(obj);
4050 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
4051 switch (which_port) {
4052 case TASK_KERNEL_PORT:
4053 case TASK_HOST_PORT:
4054 case TASK_NAME_PORT:
4055 case TASK_BOOTSTRAP_PORT:
4056 /* I find it a little odd that zero isn't reserved in the header */
4057 case 0:
4058 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
4059 break;
4060 default:
4061 ms->special_port_num = which_port;
4062 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
4063 break;
4064 }
4065 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && getpid() == 1) {
4066 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
4067 job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
4068 } else {
4069 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
4070 }
4071 }
4072 case LAUNCH_DATA_BOOL:
4073 b = launch_data_get_bool(obj);
4074 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
4075 ms->debug_on_close = b;
4076 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
4077 ms->reset = b;
4078 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
4079 ms->hide = b;
4080 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
4081 job_set_exeception_port(ms->job, ms->port);
4082 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
4083 ms->kUNCServer = b;
4084 job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
4085 }
4086 break;
4087 case LAUNCH_DATA_DICTIONARY:
4088 job_set_exeception_port(ms->job, ms->port);
4089 break;
4090 default:
4091 break;
4092 }
4093
4094 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
4095 }
4096
4097 void
4098 machservice_setup(launch_data_t obj, const char *key, void *context)
4099 {
4100 job_t j = context;
4101 struct machservice *ms;
4102 mach_port_t p = MACH_PORT_NULL;
4103
4104 if ((ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
4105 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
4106 return;
4107 }
4108
4109 if ((ms = machservice_new(j, key, &p, false)) == NULL) {
4110 job_log_error(j, LOG_WARNING, "Cannot add service: %s", key);
4111 return;
4112 }
4113
4114 ms->isActive = false;
4115
4116 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
4117 launch_data_dict_iterate(obj, machservice_setup_options, ms);
4118 }
4119 }
4120
4121 jobmgr_t
4122 jobmgr_do_garbage_collection(jobmgr_t jm)
4123 {
4124 jobmgr_t jmi, jmn;
4125 job_t ji, jn;
4126
4127 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
4128 jobmgr_do_garbage_collection(jmi);
4129 }
4130
4131 if (!jm->shutting_down) {
4132 return jm;
4133 }
4134
4135 jobmgr_log(jm, LOG_DEBUG, "Garbage collecting.");
4136
4137 /*
4138 * Normally, we wait for all resources of a job (Unix PIDs/FDs and Mach ports)
4139 * to reset before we conider the job truly dead and ready to be spawned again.
4140 *
4141 * In order to work around 5487724 and 3456090, we're going to call reboot()
4142 * when the last PID dies and not wait for the associated resources to reset.
4143 */
4144 if (getpid() == 1 && jm->parentmgr == NULL && total_children == 0) {
4145 jobmgr_log(jm, LOG_DEBUG, "About to force a call to: reboot(%s)", reboot_flags_to_C_names(jm->reboot_flags));
4146 runtime_closelog();
4147 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
4148 }
4149
4150 if (jm->hopefully_first_cnt) {
4151 return jm;
4152 }
4153
4154 if (jm->parentmgr && jm->parentmgr->shutting_down && jm->parentmgr->hopefully_first_cnt) {
4155 return jm;
4156 }
4157
4158 if (!jm->sent_stop_to_normal_jobs) {
4159 jobmgr_log(jm, LOG_DEBUG, "Asking \"normal\" jobs to exit.");
4160
4161 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
4162 if (!job_active(ji)) {
4163 job_remove(ji);
4164 } else if (!ji->hopefully_exits_last) {
4165 job_stop(ji);
4166 }
4167 }
4168
4169 jm->sent_stop_to_normal_jobs = true;
4170 }
4171
4172 if (jm->normal_active_cnt) {
4173 return jm;
4174 }
4175
4176 if (!jm->sent_stop_to_hopefully_last_jobs) {
4177 jobmgr_log(jm, LOG_DEBUG, "Asking \"hopefully last\" jobs to exit.");
4178
4179 LIST_FOREACH(ji, &jm->jobs, sle) {
4180 if (ji->p && ji->anonymous) {
4181 continue;
4182 } else if (ji->p && job_assumes(ji, ji->hopefully_exits_last)) {
4183 job_stop(ji);
4184 }
4185 }
4186
4187 jm->sent_stop_to_hopefully_last_jobs = true;
4188 }
4189
4190 if (!SLIST_EMPTY(&jm->submgrs)) {
4191 return jm;
4192 }
4193
4194 LIST_FOREACH(ji, &jm->jobs, sle) {
4195 if (!ji->anonymous) {
4196 return jm;
4197 }
4198 }
4199
4200 jobmgr_log_stray_children(jm);
4201 jobmgr_remove(jm);
4202 return NULL;
4203 }
4204
4205 void
4206 jobmgr_log_stray_children(jobmgr_t jm)
4207 {
4208 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
4209 size_t i, kp_cnt, len = 10*1024*1024;
4210 struct kinfo_proc *kp;
4211
4212 if (jm->parentmgr || getpid() != 1) {
4213 return;
4214 }
4215
4216 if (!jobmgr_assumes(jm, (kp = malloc(len)) != NULL)) {
4217 return;
4218 }
4219 if (!jobmgr_assumes(jm, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
4220 goto out;
4221 }
4222
4223 kp_cnt = len / sizeof(struct kinfo_proc);
4224
4225 for (i = 0; i < kp_cnt; i++) {
4226 pid_t p_i = kp[i].kp_proc.p_pid;
4227 pid_t pp_i = kp[i].kp_eproc.e_ppid;
4228 pid_t pg_i = kp[i].kp_eproc.e_pgid;
4229 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
4230 const char *n = kp[i].kp_proc.p_comm;
4231
4232 if (p_i == 0 || p_i == 1) {
4233 continue;
4234 }
4235
4236 jobmgr_log(jm, LOG_WARNING, "Stray %sprocess at shutdown: PID %u PPID %u PGID %u %s", z, p_i, pp_i, pg_i, n);
4237
4238 /*
4239 * The kernel team requested that launchd not do this for Leopard.
4240 * jobmgr_assumes(jm, runtime_kill(p_i, SIGKILL) != -1);
4241 */
4242 }
4243
4244 out:
4245 free(kp);
4246 }
4247
4248 jobmgr_t
4249 jobmgr_parent(jobmgr_t jm)
4250 {
4251 return jm->parentmgr;
4252 }
4253
4254 void
4255 job_uncork_fork(job_t j)
4256 {
4257 pid_t c = j->p;
4258
4259 job_log(j, LOG_DEBUG, "Uncorking the fork().");
4260 /* this unblocks the child and avoids a race
4261 * between the above fork() and the kevent_mod() */
4262 job_assumes(j, write(j->forkfd, &c, sizeof(c)) == sizeof(c));
4263 job_assumes(j, runtime_close(j->forkfd) != -1);
4264 j->forkfd = 0;
4265 }
4266
4267 jobmgr_t
4268 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name)
4269 {
4270 mach_msg_size_t mxmsgsz;
4271 job_t bootstrapper = NULL;
4272 jobmgr_t jmr;
4273
4274 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
4275
4276 if (jm && requestorport == MACH_PORT_NULL) {
4277 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
4278 return NULL;
4279 }
4280
4281 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : 128));
4282
4283 if (jmr == NULL) {
4284 return NULL;
4285 }
4286
4287 jmr->kqjobmgr_callback = jobmgr_callback;
4288 strcpy(jmr->name, name ? name : "Under construction");
4289
4290 jmr->req_port = requestorport;
4291
4292 if ((jmr->parentmgr = jm)) {
4293 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
4294 }
4295
4296 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
4297 goto out_bad;
4298 }
4299
4300 if (transfer_port != MACH_PORT_NULL) {
4301 jobmgr_assumes(jmr, jm != NULL);
4302 jmr->jm_port = transfer_port;
4303 } else if (!jm && getpid() != 1) {
4304 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
4305 name_t service_buf;
4306
4307 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
4308
4309 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
4310 goto out_bad;
4311 }
4312
4313 if (trusted_fd) {
4314 int dfd, lfd = strtol(trusted_fd, NULL, 10);
4315
4316 if ((dfd = dup(lfd)) >= 0) {
4317 jobmgr_assumes(jmr, runtime_close(dfd) != -1);
4318 jobmgr_assumes(jmr, runtime_close(lfd) != -1);
4319 }
4320
4321 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
4322 }
4323
4324 /* cut off the Libc cache, we don't want to deadlock against ourself */
4325 inherited_bootstrap_port = bootstrap_port;
4326 bootstrap_port = MACH_PORT_NULL;
4327 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
4328
4329 /* We set this explicitly as we start each child */
4330 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
4331 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
4332 goto out_bad;
4333 }
4334
4335 if (!name) {
4336 sprintf(jmr->name, "%u", MACH_PORT_INDEX(jmr->jm_port));
4337 }
4338
4339 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
4340 mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
4341 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
4342 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
4343 }
4344
4345 if (!jm) {
4346 jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
4347 jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
4348 jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
4349 }
4350
4351 if (name) {
4352 bootstrapper = jobmgr_init_session(jmr, name, sflag);
4353 }
4354
4355 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
4356 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
4357 goto out_bad;
4358 }
4359 }
4360
4361 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
4362
4363 if (bootstrapper) {
4364 jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
4365 }
4366
4367 if (jmr->parentmgr) {
4368 runtime_add_ref();
4369 }
4370
4371 return jmr;
4372
4373 out_bad:
4374 if (jmr) {
4375 jobmgr_remove(jmr);
4376 }
4377 return NULL;
4378 }
4379
4380 job_t
4381 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
4382 {
4383 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
4384 char thelabel[1000];
4385 job_t bootstrapper;
4386
4387 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
4388 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
4389 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || getuid())) {
4390 char buf[100];
4391
4392 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
4393 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
4394 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
4395 bootstrapper->weird_bootstrap = true;
4396 jobmgr_assumes(jm, job_setup_machport(bootstrapper));
4397 }
4398
4399 jm->session_initialized = true;
4400
4401 return bootstrapper;
4402 }
4403
4404 jobmgr_t
4405 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
4406 {
4407 struct machservice *ms, *next_ms;
4408 jobmgr_t jmi, jmn;
4409
4410 /* Mach ports, unlike Unix descriptors, are reference counted. In other
4411 * words, when some program hands us a second or subsequent send right
4412 * to a port we already have open, the Mach kernel gives us the same
4413 * port number back and increments an reference count associated with
4414 * the port. This forces us, when discovering that a receive right at
4415 * the other end has been deleted, to wander all of our objects to see
4416 * what weird places clients might have handed us the same send right
4417 * to use.
4418 */
4419
4420 if (jm == root_jobmgr) {
4421 if (port == inherited_bootstrap_port) {
4422 launchd_assumes(launchd_mport_deallocate(port) == KERN_SUCCESS);
4423 inherited_bootstrap_port = MACH_PORT_NULL;
4424
4425 return jobmgr_shutdown(jm);
4426 }
4427
4428 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
4429 if (ms->port == port) {
4430 machservice_delete(ms->job, ms, true);
4431 }
4432 }
4433 }
4434
4435 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
4436 jobmgr_delete_anything_with_port(jmi, port);
4437 }
4438
4439 if (jm->req_port == port) {
4440 jobmgr_log(jm, LOG_DEBUG, "Request port died: 0x%x", port);
4441 return jobmgr_shutdown(jm);
4442 }
4443
4444 return jm;
4445 }
4446
4447 struct machservice *
4448 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
4449 {
4450 struct machservice *ms;
4451
4452 if (target_pid) {
4453 jobmgr_assumes(jm, !check_parent);
4454 }
4455
4456 LIST_FOREACH(ms, &jm->ms_hash[hash_ms(name)], name_hash_sle) {
4457 if ((target_pid && ms->per_pid && ms->job->p == target_pid) || (!target_pid && !ms->per_pid)) {
4458 if (strcmp(name, ms->name) == 0) {
4459 return ms;
4460 }
4461 }
4462 }
4463
4464 if (jm->parentmgr == NULL) {
4465 return NULL;
4466 }
4467
4468 if (!check_parent) {
4469 return NULL;
4470 }
4471
4472 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
4473 }
4474
4475 mach_port_t
4476 machservice_port(struct machservice *ms)
4477 {
4478 return ms->port;
4479 }
4480
4481 job_t
4482 machservice_job(struct machservice *ms)
4483 {
4484 return ms->job;
4485 }
4486
4487 bool
4488 machservice_hidden(struct machservice *ms)
4489 {
4490 return ms->hide;
4491 }
4492
4493 bool
4494 machservice_active(struct machservice *ms)
4495 {
4496 return ms->isActive;
4497 }
4498
4499 const char *
4500 machservice_name(struct machservice *ms)
4501 {
4502 return ms->name;
4503 }
4504
4505 void
4506 machservice_delete(job_t j, struct machservice *ms, bool port_died)
4507 {
4508 if (ms->debug_on_close) {
4509 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
4510 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
4511 }
4512
4513 if (ms->recv && job_assumes(j, !ms->isActive)) {
4514 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
4515 }
4516
4517 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
4518
4519 if (ms->port == the_exception_server) {
4520 the_exception_server = 0;
4521 }
4522
4523 job_log(j, LOG_INFO, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
4524
4525 if (ms->special_port_num) {
4526 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
4527 }
4528
4529 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
4530 LIST_REMOVE(ms, name_hash_sle);
4531 LIST_REMOVE(ms, port_hash_sle);
4532
4533 free(ms);
4534 }
4535
4536 void
4537 machservice_request_notifications(struct machservice *ms)
4538 {
4539 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
4540
4541 ms->isActive = true;
4542
4543 if (ms->recv) {
4544 which = MACH_NOTIFY_PORT_DESTROYED;
4545 job_checkin(ms->job);
4546 }
4547
4548 job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
4549 }
4550
4551 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
4552 #define END_OF(x) (&(x)[NELEM(x)])
4553
4554 char **
4555 mach_cmd2argv(const char *string)
4556 {
4557 char *argv[100], args[1000];
4558 const char *cp;
4559 char *argp = args, term, **argv_ret, *co;
4560 unsigned int nargs = 0, i;
4561
4562 for (cp = string; *cp;) {
4563 while (isspace(*cp))
4564 cp++;
4565 term = (*cp == '"') ? *cp++ : '\0';
4566 if (nargs < NELEM(argv)) {
4567 argv[nargs++] = argp;
4568 }
4569 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
4570 if (*cp == '\\') {
4571 cp++;
4572 }
4573 *argp++ = *cp;
4574 if (*cp) {
4575 cp++;
4576 }
4577 }
4578 *argp++ = '\0';
4579 }
4580 argv[nargs] = NULL;
4581
4582 if (nargs == 0) {
4583 return NULL;
4584 }
4585
4586 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
4587
4588 if (!launchd_assumes(argv_ret != NULL)) {
4589 return NULL;
4590 }
4591
4592 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
4593
4594 for (i = 0; i < nargs; i++) {
4595 strcpy(co, argv[i]);
4596 argv_ret[i] = co;
4597 co += strlen(argv[i]) + 1;
4598 }
4599 argv_ret[i] = NULL;
4600
4601 return argv_ret;
4602 }
4603
4604 void
4605 job_checkin(job_t j)
4606 {
4607 j->checkedin = true;
4608 }
4609
4610 bool
4611 job_ack_port_destruction(mach_port_t p)
4612 {
4613 struct machservice *ms;
4614
4615 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
4616 if (ms->recv && (ms->port == p)) {
4617 break;
4618 }
4619 }
4620
4621 if (!ms) {
4622 return false;
4623 }
4624
4625 ms->isActive = false;
4626
4627 if (ms->reset) {
4628 machservice_resetport(ms->job, ms);
4629 }
4630
4631 job_log(ms->job, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
4632 job_dispatch(ms->job, false);
4633
4634 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
4635
4636 return true;
4637 }
4638
4639 void
4640 job_ack_no_senders(job_t j)
4641 {
4642 j->priv_port_has_senders = false;
4643
4644 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
4645 j->j_port = 0;
4646
4647 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
4648
4649 job_dispatch(j, false);
4650 }
4651
4652 jobmgr_t
4653 job_get_bs(job_t j)
4654 {
4655 if (job_assumes(j, j->mgr != NULL)) {
4656 return j->mgr;
4657 }
4658
4659 return NULL;
4660 }
4661
4662 bool
4663 job_is_anonymous(job_t j)
4664 {
4665 return j->anonymous;
4666 }
4667
4668 void
4669 job_force_sampletool(job_t j)
4670 {
4671 struct stat sb;
4672 char logfile[PATH_MAX];
4673 char pidstr[100];
4674 char *sample_args[] = { "sample", pidstr, "1", "-mayDie", "-file", logfile, NULL };
4675 char *contents = NULL;
4676 int logfile_fd = -1;
4677 int console_fd = -1;
4678 int wstatus;
4679 pid_t sp;
4680
4681 if (!debug_shutdown_hangs) {
4682 return;
4683 }
4684
4685 snprintf(pidstr, sizeof(pidstr), "%u", j->p);
4686 snprintf(logfile, sizeof(logfile), SHUTDOWN_LOG_DIR "/%s-%u.sample.txt", j->label, j->p);
4687
4688 if (!job_assumes(j, unlink(logfile) != -1 || errno == ENOENT)) {
4689 goto out;
4690 }
4691
4692 /*
4693 * This will stall launchd for as long as the 'sample' tool runs.
4694 *
4695 * We didn't give the 'sample' tool a bootstrap port, so it therefore
4696 * can't deadlock against launchd.
4697 */
4698 if (!job_assumes(j, (errno = posix_spawnp(&sp, sample_args[0], NULL, NULL, sample_args, environ)) == 0)) {
4699 goto out;
4700 }
4701
4702 job_log(j, LOG_DEBUG, "Waiting for 'sample' to finish.");
4703
4704 if (!job_assumes(j, waitpid(sp, &wstatus, 0) != -1)) {
4705 goto out;
4706 }
4707
4708 /*
4709 * This won't work if the VFS or filesystems are sick:
4710 * sync();
4711 */
4712
4713 if (!job_assumes(j, WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0)) {
4714 goto out;
4715 }
4716
4717 if (!job_assumes(j, (logfile_fd = open(logfile, O_RDONLY|O_NOCTTY)) != -1)) {
4718 goto out;
4719 }
4720
4721 if (!job_assumes(j, (console_fd = open(_PATH_CONSOLE, O_WRONLY|O_APPEND|O_NOCTTY)) != -1)) {
4722 goto out;
4723 }
4724
4725 if (!job_assumes(j, fstat(logfile_fd, &sb) != -1)) {
4726 goto out;
4727 }
4728
4729 contents = malloc(sb.st_size);
4730
4731 if (!job_assumes(j, contents != NULL)) {
4732 goto out;
4733 }
4734
4735 if (!job_assumes(j, read(logfile_fd, contents, sb.st_size) == sb.st_size)) {
4736 goto out;
4737 }
4738
4739 job_assumes(j, write(console_fd, contents, sb.st_size) == sb.st_size);
4740
4741 out:
4742 if (contents) {
4743 free(contents);
4744 }
4745
4746 if (logfile_fd != -1) {
4747 job_assumes(j, runtime_fsync(logfile_fd) != -1);
4748 job_assumes(j, runtime_close(logfile_fd) != -1);
4749 }
4750
4751 if (console_fd != -1) {
4752 job_assumes(j, runtime_close(console_fd) != -1);
4753 }
4754
4755 job_log(j, LOG_DEBUG, "Finished sampling.");
4756 }
4757
4758 bool
4759 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
4760 {
4761 struct semaphoreitem *si;
4762 size_t alloc_sz = sizeof(struct semaphoreitem);
4763
4764 if (what) {
4765 alloc_sz += strlen(what) + 1;
4766 }
4767
4768 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
4769 return false;
4770 }
4771
4772 si->fd = -1;
4773 si->why = why;
4774
4775 if (what) {
4776 strcpy(si->what, what);
4777 }
4778
4779 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
4780
4781 semaphoreitem_runtime_mod_ref(si, true);
4782
4783 return true;
4784 }
4785
4786 void
4787 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
4788 {
4789 /*
4790 * External events need to be tracked.
4791 * Internal events do NOT need to be tracked.
4792 */
4793
4794 switch (si->why) {
4795 case SUCCESSFUL_EXIT:
4796 case FAILED_EXIT:
4797 case OTHER_JOB_ENABLED:
4798 case OTHER_JOB_DISABLED:
4799 case OTHER_JOB_ACTIVE:
4800 case OTHER_JOB_INACTIVE:
4801 return;
4802 default:
4803 break;
4804 }
4805
4806 if (add) {
4807 runtime_add_ref();
4808 } else {
4809 runtime_del_ref();
4810 }
4811 }
4812
4813 void
4814 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
4815 {
4816 semaphoreitem_runtime_mod_ref(si, false);
4817
4818 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
4819
4820 if (si->fd != -1) {
4821 job_assumes(j, runtime_close(si->fd) != -1);
4822 }
4823
4824 free(si);
4825 }
4826
4827 void
4828 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
4829 {
4830 struct semaphoreitem_dict_iter_context *sdic = context;
4831 semaphore_reason_t why;
4832
4833 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
4834
4835 semaphoreitem_new(sdic->j, why, key);
4836 }
4837
4838 void
4839 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
4840 {
4841 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
4842 job_t j = context;
4843 semaphore_reason_t why;
4844
4845 switch (launch_data_get_type(obj)) {
4846 case LAUNCH_DATA_BOOL:
4847 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
4848 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
4849 semaphoreitem_new(j, why, NULL);
4850 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
4851 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
4852 semaphoreitem_new(j, why, NULL);
4853 j->start_pending = true;
4854 } else {
4855 job_assumes(j, false);
4856 }
4857 break;
4858 case LAUNCH_DATA_DICTIONARY:
4859 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
4860 sdic.why_true = PATH_EXISTS;
4861 sdic.why_false = PATH_MISSING;
4862 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
4863 sdic.why_true = OTHER_JOB_ACTIVE;
4864 sdic.why_false = OTHER_JOB_INACTIVE;
4865 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
4866 sdic.why_true = OTHER_JOB_ENABLED;
4867 sdic.why_false = OTHER_JOB_DISABLED;
4868 } else {
4869 job_assumes(j, false);
4870 break;
4871 }
4872
4873 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
4874 break;
4875 default:
4876 job_assumes(j, false);
4877 break;
4878 }
4879 }
4880
4881 void
4882 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
4883 {
4884 jobmgr_t jmi, jmn;
4885 job_t ji, jn;
4886
4887
4888 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
4889 jobmgr_dispatch_all_semaphores(jmi);
4890 }
4891
4892 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
4893 if (!SLIST_EMPTY(&ji->semaphores)) {
4894 job_dispatch(ji, false);
4895 }
4896 }
4897 }
4898
4899 time_t
4900 cronemu(int mon, int mday, int hour, int min)
4901 {
4902 struct tm workingtm;
4903 time_t now;
4904
4905 now = time(NULL);
4906 workingtm = *localtime(&now);
4907
4908 workingtm.tm_isdst = -1;
4909 workingtm.tm_sec = 0;
4910 workingtm.tm_min++;
4911
4912 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
4913 workingtm.tm_year++;
4914 workingtm.tm_mon = 0;
4915 workingtm.tm_mday = 1;
4916 workingtm.tm_hour = 0;
4917 workingtm.tm_min = 0;
4918 mktime(&workingtm);
4919 }
4920
4921 return mktime(&workingtm);
4922 }
4923
4924 time_t
4925 cronemu_wday(int wday, int hour, int min)
4926 {
4927 struct tm workingtm;
4928 time_t now;
4929
4930 now = time(NULL);
4931 workingtm = *localtime(&now);
4932
4933 workingtm.tm_isdst = -1;
4934 workingtm.tm_sec = 0;
4935 workingtm.tm_min++;
4936
4937 if (wday == 7) {
4938 wday = 0;
4939 }
4940
4941 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
4942 workingtm.tm_mday++;
4943 workingtm.tm_hour = 0;
4944 workingtm.tm_min = 0;
4945 mktime(&workingtm);
4946 }
4947
4948 return mktime(&workingtm);
4949 }
4950
4951 bool
4952 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
4953 {
4954 if (mon == -1) {
4955 struct tm workingtm = *wtm;
4956 int carrytest;
4957
4958 while (!cronemu_mday(&workingtm, mday, hour, min)) {
4959 workingtm.tm_mon++;
4960 workingtm.tm_mday = 1;
4961 workingtm.tm_hour = 0;
4962 workingtm.tm_min = 0;
4963 carrytest = workingtm.tm_mon;
4964 mktime(&workingtm);
4965 if (carrytest != workingtm.tm_mon) {
4966 return false;
4967 }
4968 }
4969 *wtm = workingtm;
4970 return true;
4971 }
4972
4973 if (mon < wtm->tm_mon) {
4974 return false;
4975 }
4976
4977 if (mon > wtm->tm_mon) {
4978 wtm->tm_mon = mon;
4979 wtm->tm_mday = 1;
4980 wtm->tm_hour = 0;
4981 wtm->tm_min = 0;
4982 }
4983
4984 return cronemu_mday(wtm, mday, hour, min);
4985 }
4986
4987 bool
4988 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
4989 {
4990 if (mday == -1) {
4991 struct tm workingtm = *wtm;
4992 int carrytest;
4993
4994 while (!cronemu_hour(&workingtm, hour, min)) {
4995 workingtm.tm_mday++;
4996 workingtm.tm_hour = 0;
4997 workingtm.tm_min = 0;
4998 carrytest = workingtm.tm_mday;
4999 mktime(&workingtm);
5000 if (carrytest != workingtm.tm_mday) {
5001 return false;
5002 }
5003 }
5004 *wtm = workingtm;
5005 return true;
5006 }
5007
5008 if (mday < wtm->tm_mday) {
5009 return false;
5010 }
5011
5012 if (mday > wtm->tm_mday) {
5013 wtm->tm_mday = mday;
5014 wtm->tm_hour = 0;
5015 wtm->tm_min = 0;
5016 }
5017
5018 return cronemu_hour(wtm, hour, min);
5019 }
5020
5021 bool
5022 cronemu_hour(struct tm *wtm, int hour, int min)
5023 {
5024 if (hour == -1) {
5025 struct tm workingtm = *wtm;
5026 int carrytest;
5027
5028 while (!cronemu_min(&workingtm, min)) {
5029 workingtm.tm_hour++;
5030 workingtm.tm_min = 0;
5031 carrytest = workingtm.tm_hour;
5032 mktime(&workingtm);
5033 if (carrytest != workingtm.tm_hour) {
5034 return false;
5035 }
5036 }
5037 *wtm = workingtm;
5038 return true;
5039 }
5040
5041 if (hour < wtm->tm_hour) {
5042 return false;
5043 }
5044
5045 if (hour > wtm->tm_hour) {
5046 wtm->tm_hour = hour;
5047 wtm->tm_min = 0;
5048 }
5049
5050 return cronemu_min(wtm, min);
5051 }
5052
5053 bool
5054 cronemu_min(struct tm *wtm, int min)
5055 {
5056 if (min == -1) {
5057 return true;
5058 }
5059
5060 if (min < wtm->tm_min) {
5061 return false;
5062 }
5063
5064 if (min > wtm->tm_min) {
5065 wtm->tm_min = min;
5066 }
5067
5068 return true;
5069 }
5070
5071 kern_return_t
5072 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
5073 {
5074 struct ldcred ldc;
5075 job_t js;
5076
5077 if (!launchd_assumes(j != NULL)) {
5078 return BOOTSTRAP_NO_MEMORY;
5079 }
5080
5081 runtime_get_caller_creds(&ldc);
5082
5083 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
5084
5085 #define LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5086 /* XXX - This code should go away once the per session launchd is integrated with the rest of the system */
5087 #ifdef LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5088 if (getpid() == 1) {
5089 if (ldc.euid && server_uid && (ldc.euid != server_uid)) {
5090 job_log(j, LOG_WARNING, "Server create: \"%s\": Will run as UID %d, not UID %d as they told us to",
5091 server_cmd, ldc.euid, server_uid);
5092 server_uid = ldc.euid;
5093 }
5094 } else
5095 #endif
5096 if (getuid()) {
5097 if (server_uid != getuid()) {
5098 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
5099 server_cmd, getuid(), server_uid);
5100 }
5101 server_uid = 0; /* zero means "do nothing" */
5102 }
5103
5104 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
5105
5106 if (js == NULL) {
5107 return BOOTSTRAP_NO_MEMORY;
5108 }
5109
5110 *server_portp = js->j_port;
5111 return BOOTSTRAP_SUCCESS;
5112 }
5113
5114 kern_return_t
5115 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
5116 {
5117 struct ldcred ldc;
5118 job_t otherj;
5119
5120 if (!launchd_assumes(j != NULL)) {
5121 return BOOTSTRAP_NO_MEMORY;
5122 }
5123
5124 runtime_get_caller_creds(&ldc);
5125
5126 if (ldc.euid != 0 && ldc.euid != getuid()) {
5127 return BOOTSTRAP_NOT_PRIVILEGED;
5128 }
5129
5130 if (!(otherj = job_find(targetlabel))) {
5131 return BOOTSTRAP_UNKNOWN_SERVICE;
5132 }
5133
5134 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
5135 bool do_block = otherj->p;
5136
5137 if (otherj->anonymous) {
5138 return BOOTSTRAP_NOT_PRIVILEGED;
5139 }
5140
5141 job_remove(otherj);
5142
5143 if (do_block) {
5144 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
5145 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
5146 job_assumes(otherj, waiting4removal_new(otherj, srp));
5147 return MIG_NO_REPLY;
5148 } else {
5149 return 0;
5150 }
5151 } else if (otherj->p) {
5152 job_assumes(j, runtime_kill(otherj->p, sig) != -1);
5153 }
5154
5155 return 0;
5156 }
5157
5158 kern_return_t
5159 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
5160 {
5161 struct ldcred ldc;
5162
5163 if (!launchd_assumes(j != NULL)) {
5164 return BOOTSTRAP_NO_MEMORY;
5165 }
5166
5167 if (!job_assumes(j, j->per_user)) {
5168 return BOOTSTRAP_NOT_PRIVILEGED;
5169 }
5170
5171 runtime_get_caller_creds(&ldc);
5172
5173 return runtime_log_forward(ldc.euid, ldc.egid, inval, invalCnt);
5174 }
5175
5176 kern_return_t
5177 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
5178 {
5179 struct ldcred ldc;
5180
5181 if (!launchd_assumes(j != NULL)) {
5182 return BOOTSTRAP_NO_MEMORY;
5183 }
5184
5185 runtime_get_caller_creds(&ldc);
5186
5187 if (ldc.euid) {
5188 return BOOTSTRAP_NOT_PRIVILEGED;
5189 }
5190
5191 return runtime_log_drain(srp, outval, outvalCnt);
5192 }
5193
5194 kern_return_t
5195 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
5196 vm_offset_t inval, mach_msg_type_number_t invalCnt,
5197 vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
5198 {
5199 const char *action;
5200 launch_data_t input_obj, output_obj;
5201 size_t data_offset = 0;
5202 size_t packed_size;
5203 struct ldcred ldc;
5204
5205 runtime_get_caller_creds(&ldc);
5206
5207 if (!launchd_assumes(j != NULL)) {
5208 return BOOTSTRAP_NO_MEMORY;
5209 }
5210
5211 if (inkey && ldc.euid && ldc.euid != getuid()) {
5212 return BOOTSTRAP_NOT_PRIVILEGED;
5213 }
5214
5215 if (inkey && outkey && !job_assumes(j, inkey == outkey)) {
5216 return 1;
5217 }
5218
5219 if (inkey && outkey) {
5220 action = "Swapping";
5221 } else if (inkey) {
5222 action = "Setting";
5223 } else {
5224 action = "Getting";
5225 }
5226
5227 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
5228
5229 *outvalCnt = 20 * 1024 * 1024;
5230 mig_allocate(outval, *outvalCnt);
5231 if (!job_assumes(j, *outval != 0)) {
5232 return 1;
5233 }
5234
5235 if (invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
5236 goto out_bad;
5237 }
5238
5239 switch (outkey) {
5240 case VPROC_GSK_ENVIRONMENT:
5241 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
5242 goto out_bad;
5243 }
5244 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
5245 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
5246 goto out_bad;
5247 }
5248 launch_data_free(output_obj);
5249 break;
5250 case VPROC_GSK_ALLJOBS:
5251 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
5252 goto out_bad;
5253 }
5254 ipc_revoke_fds(output_obj);
5255 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
5256 if (!job_assumes(j, packed_size != 0)) {
5257 goto out_bad;
5258 }
5259 launch_data_free(output_obj);
5260 break;
5261 case 0:
5262 mig_deallocate(*outval, *outvalCnt);
5263 *outval = 0;
5264 *outvalCnt = 0;
5265 break;
5266 default:
5267 goto out_bad;
5268 }
5269
5270 if (invalCnt) switch (inkey) {
5271 case VPROC_GSK_ENVIRONMENT:
5272 job_assumes(j, false);
5273 break;
5274 case 0:
5275 break;
5276 default:
5277 goto out_bad;
5278 }
5279
5280 mig_deallocate(inval, invalCnt);
5281
5282 return 0;
5283
5284 out_bad:
5285 if (*outval) {
5286 mig_deallocate(*outval, *outvalCnt);
5287 }
5288 return 1;
5289 }
5290
5291 kern_return_t
5292 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
5293 {
5294 const char *action;
5295 kern_return_t kr = 0;
5296 struct ldcred ldc;
5297 int oldmask;
5298
5299 runtime_get_caller_creds(&ldc);
5300
5301 if (!launchd_assumes(j != NULL)) {
5302 return BOOTSTRAP_NO_MEMORY;
5303 }
5304
5305 if (inkey && ldc.euid && ldc.euid != getuid()) {
5306 return BOOTSTRAP_NOT_PRIVILEGED;
5307 }
5308
5309 if (inkey && outkey && !job_assumes(j, inkey == outkey)) {
5310 return 1;
5311 }
5312
5313 if (inkey && outkey) {
5314 action = "Swapping";
5315 } else if (inkey) {
5316 action = "Setting";
5317 } else {
5318 action = "Getting";
5319 }
5320
5321 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
5322
5323 switch (outkey) {
5324 case VPROC_GSK_LAST_EXIT_STATUS:
5325 *outval = j->last_exit_status;
5326 break;
5327 case VPROC_GSK_MGR_UID:
5328 *outval = getuid();
5329 break;
5330 case VPROC_GSK_MGR_PID:
5331 *outval = getpid();
5332 break;
5333 case VPROC_GSK_IS_MANAGED:
5334 *outval = j->anonymous ? 0 : 1;
5335 break;
5336 case VPROC_GSK_BASIC_KEEPALIVE:
5337 *outval = !j->ondemand;
5338 break;
5339 case VPROC_GSK_START_INTERVAL:
5340 *outval = j->start_interval;
5341 break;
5342 case VPROC_GSK_IDLE_TIMEOUT:
5343 *outval = j->timeout;
5344 break;
5345 case VPROC_GSK_EXIT_TIMEOUT:
5346 *outval = j->exit_timeout;
5347 break;
5348 case VPROC_GSK_GLOBAL_LOG_MASK:
5349 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
5350 *outval = oldmask;
5351 runtime_setlogmask(oldmask);
5352 break;
5353 case VPROC_GSK_GLOBAL_UMASK:
5354 oldmask = umask(0);
5355 *outval = oldmask;
5356 umask(oldmask);
5357 break;
5358 case 0:
5359 *outval = 0;
5360 break;
5361 default:
5362 kr = 1;
5363 break;
5364 }
5365
5366 switch (inkey) {
5367 case VPROC_GSK_GLOBAL_ON_DEMAND:
5368 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
5369 break;
5370 case VPROC_GSK_BASIC_KEEPALIVE:
5371 j->ondemand = !inval;
5372 break;
5373 case VPROC_GSK_START_INTERVAL:
5374 if ((uint64_t)inval > UINT32_MAX) {
5375 kr = 1;
5376 } else if (inval) {
5377 if (j->start_interval == 0) {
5378 runtime_add_ref();
5379 } else {
5380 /* Workaround 5225889 */
5381 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
5382 }
5383 j->start_interval = inval;
5384 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
5385 } else if (j->start_interval) {
5386 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
5387 if (j->start_interval != 0) {
5388 runtime_del_ref();
5389 }
5390 j->start_interval = 0;
5391 }
5392 break;
5393 case VPROC_GSK_IDLE_TIMEOUT:
5394 if ((unsigned int)inval > 0) {
5395 j->timeout = inval;
5396 }
5397 break;
5398 case VPROC_GSK_EXIT_TIMEOUT:
5399 if ((unsigned int)inval > 0) {
5400 j->exit_timeout = inval;
5401 }
5402 break;
5403 case VPROC_GSK_GLOBAL_LOG_MASK:
5404 runtime_setlogmask(inval);
5405 break;
5406 case VPROC_GSK_GLOBAL_UMASK:
5407 umask(inval);
5408 break;
5409 case 0:
5410 break;
5411 default:
5412 kr = 1;
5413 break;
5414 }
5415
5416 return kr;
5417 }
5418
5419 kern_return_t
5420 job_mig_post_fork_ping(job_t j, task_t child_task)
5421 {
5422 struct machservice *ms;
5423
5424 if (!launchd_assumes(j != NULL)) {
5425 return BOOTSTRAP_NO_MEMORY;
5426 }
5427
5428 job_log(j, LOG_DEBUG, "Post fork ping.");
5429
5430 job_setup_exception_port(j, child_task);
5431
5432 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
5433 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
5434 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
5435 continue;
5436 }
5437
5438 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
5439
5440 if (errno) {
5441 int desired_log_level = LOG_ERR;
5442
5443 if (j->anonymous) {
5444 /* 5338127 */
5445
5446 desired_log_level = LOG_WARNING;
5447
5448 if (ms->special_port_num == TASK_SEATBELT_PORT) {
5449 desired_log_level = LOG_DEBUG;
5450 }
5451 }
5452
5453 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
5454 }
5455 }
5456
5457 job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
5458
5459 return 0;
5460 }
5461
5462 kern_return_t
5463 job_mig_reboot2(job_t j, uint64_t flags)
5464 {
5465 char who_started_the_reboot[2048] = "";
5466 struct kinfo_proc kp;
5467 struct ldcred ldc;
5468 pid_t pid_to_log;
5469
5470 if (!launchd_assumes(j != NULL)) {
5471 return BOOTSTRAP_NO_MEMORY;
5472 }
5473
5474 if (getpid() != 1) {
5475 return BOOTSTRAP_NOT_PRIVILEGED;
5476 }
5477
5478 runtime_get_caller_creds(&ldc);
5479
5480 if (ldc.euid) {
5481 return BOOTSTRAP_NOT_PRIVILEGED;
5482 }
5483
5484 for (pid_to_log = ldc.pid; pid_to_log; pid_to_log = kp.kp_eproc.e_ppid) {
5485 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid_to_log };
5486 size_t who_offset, len = sizeof(kp);
5487
5488 if (!job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
5489 return 1;
5490 }
5491
5492 who_offset = strlen(who_started_the_reboot);
5493 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
5494 " %s[%u]%s", kp.kp_proc.p_comm, pid_to_log, kp.kp_eproc.e_ppid ? " ->" : "");
5495 }
5496
5497 root_jobmgr->reboot_flags = (int)flags;
5498
5499 launchd_shutdown();
5500
5501 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
5502
5503 return 0;
5504 }
5505
5506 kern_return_t
5507 job_mig_getsocket(job_t j, name_t spr)
5508 {
5509 if (!launchd_assumes(j != NULL)) {
5510 return BOOTSTRAP_NO_MEMORY;
5511 }
5512
5513 ipc_server_init();
5514
5515 if (!sockpath) {
5516 return BOOTSTRAP_NO_MEMORY;
5517 }
5518
5519 strncpy(spr, sockpath, sizeof(name_t));
5520
5521 return BOOTSTRAP_SUCCESS;
5522 }
5523
5524 kern_return_t
5525 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
5526 {
5527 if (!launchd_assumes(j != NULL)) {
5528 return BOOTSTRAP_NO_MEMORY;
5529 }
5530
5531 if ((errno = err)) {
5532 job_log_error(j, pri, "%s", msg);
5533 } else {
5534 job_log(j, pri, "%s", msg);
5535 }
5536
5537 return 0;
5538 }
5539
5540 void
5541 ensure_root_bkgd_setup(void)
5542 {
5543 if (background_jobmgr || getpid() != 1) {
5544 return;
5545 }
5546
5547 if (!jobmgr_assumes(root_jobmgr, (background_jobmgr = jobmgr_new(root_jobmgr, mach_task_self(), MACH_PORT_NULL, false, VPROCMGR_SESSION_BACKGROUND)) != NULL)) {
5548 return;
5549 }
5550
5551 background_jobmgr->req_port = 0;
5552 jobmgr_assumes(root_jobmgr, launchd_mport_make_send(background_jobmgr->jm_port) == KERN_SUCCESS);
5553 }
5554
5555 kern_return_t
5556 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
5557 {
5558 struct ldcred ldc;
5559 job_t ji;
5560
5561 if (!launchd_assumes(j != NULL)) {
5562 return BOOTSTRAP_NO_MEMORY;
5563 }
5564
5565 job_log(j, LOG_DEBUG, "Looking up per user launchd for UID: %u", which_user);
5566
5567 runtime_get_caller_creds(&ldc);
5568
5569 if (getpid() != 1) {
5570 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
5571 return BOOTSTRAP_NOT_PRIVILEGED;
5572 }
5573
5574 if (ldc.euid || ldc.uid) {
5575 which_user = ldc.euid ? ldc.euid : ldc.uid;
5576 }
5577
5578 *up_cont = MACH_PORT_NULL;
5579
5580 if (which_user == 0) {
5581 ensure_root_bkgd_setup();
5582
5583 *up_cont = background_jobmgr->jm_port;
5584
5585 return 0;
5586 }
5587
5588 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
5589 if (!ji->per_user) {
5590 continue;
5591 }
5592 if (ji->mach_uid != which_user) {
5593 continue;
5594 }
5595 if (SLIST_EMPTY(&ji->machservices)) {
5596 continue;
5597 }
5598 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
5599 continue;
5600 }
5601 break;
5602 }
5603
5604 if (ji == NULL) {
5605 struct machservice *ms;
5606 char lbuf[1024];
5607
5608 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
5609
5610 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
5611
5612 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
5613
5614 if (ji == NULL) {
5615 return BOOTSTRAP_NO_MEMORY;
5616 }
5617
5618 ji->mach_uid = which_user;
5619 ji->per_user = true;
5620
5621 if ((ms = machservice_new(ji, lbuf, up_cont, false)) == NULL) {
5622 job_remove(ji);
5623 return BOOTSTRAP_NO_MEMORY;
5624 }
5625
5626 ms->per_user_hack = true;
5627 ms->hide = true;
5628
5629 ji = job_dispatch(ji, false);
5630 } else {
5631 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
5632 }
5633
5634 if (job_assumes(j, ji != NULL)) {
5635 *up_cont = machservice_port(SLIST_FIRST(&ji->machservices));
5636 }
5637
5638 return 0;
5639 }
5640
5641 kern_return_t
5642 job_mig_check_in(job_t j, name_t servicename, mach_port_t *serviceportp)
5643 {
5644 static pid_t last_warned_pid = 0;
5645 struct machservice *ms;
5646 struct ldcred ldc;
5647
5648 if (!launchd_assumes(j != NULL)) {
5649 return BOOTSTRAP_NO_MEMORY;
5650 }
5651
5652 runtime_get_caller_creds(&ldc);
5653
5654 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
5655
5656 if (ms == NULL) {
5657 job_log(j, LOG_DEBUG, "Check-in of Mach service failed. Unknown: %s", servicename);
5658 return BOOTSTRAP_UNKNOWN_SERVICE;
5659 }
5660 if (machservice_job(ms) != j) {
5661 if (last_warned_pid != ldc.pid) {
5662 job_log(j, LOG_NOTICE, "Check-in of Mach service failed. PID %d is not privileged: %s",
5663 ldc.pid, servicename);
5664 last_warned_pid = ldc.pid;
5665 }
5666 return BOOTSTRAP_NOT_PRIVILEGED;
5667 }
5668 if (machservice_active(ms)) {
5669 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
5670 return BOOTSTRAP_SERVICE_ACTIVE;
5671 }
5672
5673 machservice_request_notifications(ms);
5674
5675 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
5676
5677 *serviceportp = machservice_port(ms);
5678 return BOOTSTRAP_SUCCESS;
5679 }
5680
5681 kern_return_t
5682 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
5683 {
5684 struct machservice *ms;
5685 struct ldcred ldc;
5686
5687 if (!launchd_assumes(j != NULL)) {
5688 return BOOTSTRAP_NO_MEMORY;
5689 }
5690
5691 runtime_get_caller_creds(&ldc);
5692
5693 #if 0
5694 job_log(j, LOG_APPLEONLY, "bootstrap_register() is deprecated. Service: %s", servicename);
5695 #endif
5696
5697 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5698
5699 /*
5700 * From a per-user/session launchd's perspective, SecurityAgent (UID
5701 * 92) is a rogue application (not our UID, not root and not a child of
5702 * us). We'll have to reconcile this design friction at a later date.
5703 */
5704 if (j->anonymous && job_get_bs(j)->parentmgr == NULL && ldc.uid != 0 && ldc.uid != getuid() && ldc.uid != 92) {
5705 if (getpid() == 1) {
5706 return VPROC_ERR_TRY_PER_USER;
5707 } else {
5708 return BOOTSTRAP_NOT_PRIVILEGED;
5709 }
5710 }
5711
5712 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc.pid : 0);
5713
5714 if (ms) {
5715 if (machservice_job(ms) != j) {
5716 return BOOTSTRAP_NOT_PRIVILEGED;
5717 }
5718 if (machservice_active(ms)) {
5719 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
5720 return BOOTSTRAP_SERVICE_ACTIVE;
5721 }
5722 job_checkin(j);
5723 machservice_delete(j, ms, false);
5724 }
5725
5726 if (serviceport != MACH_PORT_NULL) {
5727 if ((ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
5728 machservice_request_notifications(ms);
5729 } else {
5730 return BOOTSTRAP_NO_MEMORY;
5731 }
5732 }
5733
5734 return BOOTSTRAP_SUCCESS;
5735 }
5736
5737 kern_return_t
5738 job_mig_look_up2(job_t j, name_t servicename, mach_port_t *serviceportp, mach_msg_type_name_t *ptype, pid_t target_pid, uint64_t flags)
5739 {
5740 struct machservice *ms;
5741 struct ldcred ldc;
5742 kern_return_t kr;
5743
5744 if (!launchd_assumes(j != NULL)) {
5745 return BOOTSTRAP_NO_MEMORY;
5746 }
5747
5748 runtime_get_caller_creds(&ldc);
5749
5750 if (getpid() == 1 && j->anonymous && job_get_bs(j)->parentmgr == NULL && ldc.uid != 0 && ldc.euid != 0) {
5751 return VPROC_ERR_TRY_PER_USER;
5752 }
5753
5754 if (!mspolicy_check(j, servicename, flags & BOOTSTRAP_PER_PID_SERVICE)) {
5755 job_log(j, LOG_NOTICE, "Policy denied Mach service lookup: %s", servicename);
5756 return BOOTSTRAP_NOT_PRIVILEGED;
5757 }
5758
5759 if (flags & BOOTSTRAP_PER_PID_SERVICE) {
5760 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
5761 } else {
5762 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
5763 }
5764
5765 if (ms && machservice_hidden(ms) && !job_active(machservice_job(ms))) {
5766 ms = NULL;
5767 } else if (ms && ms->per_user_hack) {
5768 ms = NULL;
5769 }
5770
5771 if (ms) {
5772 launchd_assumes(machservice_port(ms) != MACH_PORT_NULL);
5773 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5774 #if 0
5775 /* After Leopard ships, we should enable this */
5776 if (j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user) {
5777 ms->bad_perf_cnt++;
5778 job_log(j, LOG_APPLEONLY, "Performance opportunity: Number of bootstrap_lookup(... \"%s\" ...) calls that should have been cached: %llu",
5779 servicename, ms->bad_perf_cnt);
5780 }
5781 j->lastlookup = ms;
5782 j->lastlookup_gennum = ms->gen_num;
5783 #endif
5784 *serviceportp = machservice_port(ms);
5785 *ptype = MACH_MSG_TYPE_COPY_SEND;
5786 kr = BOOTSTRAP_SUCCESS;
5787 } else if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && (inherited_bootstrap_port != MACH_PORT_NULL)) {
5788 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
5789 *ptype = MACH_MSG_TYPE_MOVE_SEND;
5790 kr = bootstrap_look_up(inherited_bootstrap_port, servicename, serviceportp);
5791 } else if (getpid() == 1 && j->anonymous && ldc.euid >= 500 && strcasecmp(job_get_bs(j)->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
5792 /*
5793 * 5240036 Should start background session when a lookup of CCacheServer occurs
5794 *
5795 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
5796 * If we find a EUID that isn't root, we force it over to the per-user context.
5797 */
5798 return VPROC_ERR_TRY_PER_USER;
5799 } else {
5800 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5801 kr = BOOTSTRAP_UNKNOWN_SERVICE;
5802 }
5803
5804 return kr;
5805 }
5806
5807 kern_return_t
5808 job_mig_parent(job_t j, mach_port_t *parentport, mach_msg_type_name_t *pptype)
5809 {
5810 if (!launchd_assumes(j != NULL)) {
5811 return BOOTSTRAP_NO_MEMORY;
5812 }
5813
5814 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
5815 jobmgr_t jm = j->mgr;
5816
5817 *pptype = MACH_MSG_TYPE_MAKE_SEND;
5818
5819 if (jobmgr_parent(jm)) {
5820 *parentport = jobmgr_parent(jm)->jm_port;
5821 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
5822 *parentport = jm->jm_port;
5823 } else {
5824 *pptype = MACH_MSG_TYPE_COPY_SEND;
5825 *parentport = inherited_bootstrap_port;
5826 }
5827 return BOOTSTRAP_SUCCESS;
5828 }
5829
5830 kern_return_t
5831 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt,
5832 bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt)
5833 {
5834 name_array_t service_names = NULL;
5835 bootstrap_status_array_t service_actives = NULL;
5836 unsigned int cnt = 0, cnt2 = 0;
5837 struct machservice *ms;
5838 jobmgr_t jm;
5839 job_t ji;
5840
5841 if (!launchd_assumes(j != NULL)) {
5842 return BOOTSTRAP_NO_MEMORY;
5843 }
5844
5845 jm = j->mgr;
5846
5847 LIST_FOREACH(ji, &jm->jobs, sle) {
5848 SLIST_FOREACH(ms, &ji->machservices, sle) {
5849 if (!ms->per_pid) {
5850 cnt++;
5851 }
5852 }
5853 }
5854
5855 if (cnt == 0) {
5856 goto out;
5857 }
5858
5859 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
5860 if (!launchd_assumes(service_names != NULL)) {
5861 goto out_bad;
5862 }
5863
5864 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
5865 if (!launchd_assumes(service_actives != NULL)) {
5866 goto out_bad;
5867 }
5868
5869 LIST_FOREACH(ji, &jm->jobs, sle) {
5870 SLIST_FOREACH(ms, &ji->machservices, sle) {
5871 if (!ms->per_pid) {
5872 strlcpy(service_names[cnt2], machservice_name(ms), sizeof(service_names[0]));
5873 service_actives[cnt2] = machservice_status(ms);
5874 cnt2++;
5875 }
5876 }
5877 }
5878
5879 launchd_assumes(cnt == cnt2);
5880
5881 out:
5882 *servicenamesp = service_names;
5883 *serviceactivesp = service_actives;
5884 *servicenames_cnt = *serviceactives_cnt = cnt;
5885
5886 return BOOTSTRAP_SUCCESS;
5887
5888 out_bad:
5889 if (service_names) {
5890 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
5891 }
5892 if (service_actives) {
5893 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
5894 }
5895
5896 return BOOTSTRAP_NO_MEMORY;
5897 }
5898
5899 void
5900 job_reparent_hack(job_t j, const char *where)
5901 {
5902 jobmgr_t jmi, jmi2;
5903
5904 ensure_root_bkgd_setup();
5905
5906 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
5907 if (where == NULL) {
5908 if (strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
5909 where = VPROCMGR_SESSION_LOGINWINDOW;
5910 } else {
5911 where = VPROCMGR_SESSION_AQUA;
5912 }
5913 }
5914
5915 if (strcasecmp(j->mgr->name, where) == 0) {
5916 return;
5917 }
5918
5919 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
5920 if (jmi->shutting_down) {
5921 continue;
5922 } else if (strcasecmp(jmi->name, where) == 0) {
5923 goto jm_found;
5924 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && getpid() == 1) {
5925 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
5926 if (strcasecmp(jmi2->name, where) == 0) {
5927 jmi = jmi2;
5928 goto jm_found;
5929 }
5930 }
5931 }
5932 }
5933
5934 jm_found:
5935 if (job_assumes(j, jmi != NULL)) {
5936 struct machservice *msi;
5937
5938 SLIST_FOREACH(msi, &j->machservices, sle) {
5939 LIST_REMOVE(msi, name_hash_sle);
5940 }
5941
5942 LIST_REMOVE(j, sle);
5943 LIST_INSERT_HEAD(&jmi->jobs, j, sle);
5944 j->mgr = jmi;
5945
5946 SLIST_FOREACH(msi, &j->machservices, sle) {
5947 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
5948 }
5949 }
5950 }
5951
5952 kern_return_t
5953 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type)
5954 {
5955 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
5956 mach_port_array_t l2l_ports = NULL;
5957 mach_port_t reqport, rcvright;
5958 kern_return_t kr = 1;
5959 launch_data_t out_obj_array = NULL;
5960 struct ldcred ldc;
5961 jobmgr_t jmr = NULL;
5962
5963 if (!launchd_assumes(j != NULL)) {
5964 return BOOTSTRAP_NO_MEMORY;
5965 }
5966
5967 runtime_get_caller_creds(&ldc);
5968
5969 if (target_subset == MACH_PORT_NULL) {
5970 job_t j2;
5971
5972 if (j->mgr->session_initialized) {
5973 if (ldc.uid == 0 && getpid() == 1) {
5974 if (strcmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
5975 job_t ji, jn;
5976
5977 LIST_FOREACH_SAFE(ji, &j->mgr->jobs, sle, jn) {
5978 if (!ji->anonymous) {
5979 job_remove(ji);
5980 }
5981 }
5982
5983 ensure_root_bkgd_setup();
5984
5985 SLIST_REMOVE(&j->mgr->parentmgr->submgrs, j->mgr, jobmgr_s, sle);
5986 j->mgr->parentmgr = background_jobmgr;
5987 SLIST_INSERT_HEAD(&j->mgr->parentmgr->submgrs, j->mgr, sle);
5988
5989 /*
5990 * We really should wait for all the jobs to die before proceeding. See 5351245 for more info.
5991 *
5992 * We have hacked around this in job_find() by ignoring jobs that are pending removal.
5993 */
5994
5995 } else if (strcmp(j->mgr->name, VPROCMGR_SESSION_AQUA) == 0) {
5996 job_log(j, LOG_DEBUG, "Tried to move the Aqua session.");
5997 return 0;
5998 } else if (strcmp(j->mgr->name, VPROCMGR_SESSION_BACKGROUND) == 0) {
5999 job_log(j, LOG_DEBUG, "Tried to move the background session.");
6000 return 0;
6001 } else {
6002 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
6003 kr = BOOTSTRAP_NOT_PRIVILEGED;
6004 goto out;
6005 }
6006 } else {
6007 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
6008 kr = BOOTSTRAP_NOT_PRIVILEGED;
6009 goto out;
6010 }
6011 } else if (ldc.uid == 0 && getpid() == 1 && strcmp(session_type, VPROCMGR_SESSION_STANDARDIO) == 0) {
6012 ensure_root_bkgd_setup();
6013
6014 SLIST_REMOVE(&j->mgr->parentmgr->submgrs, j->mgr, jobmgr_s, sle);
6015 j->mgr->parentmgr = background_jobmgr;
6016 SLIST_INSERT_HEAD(&j->mgr->parentmgr->submgrs, j->mgr, sle);
6017 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6018 jobmgr_t jmi;
6019
6020 /*
6021 * 5330262
6022 *
6023 * We're working around LoginWindow and the WindowServer.
6024 *
6025 * In practice, there is only one LoginWindow session. Unfortunately, for certain
6026 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
6027 * spawns a replacement loginwindow session before cleaning up the previous one.
6028 *
6029 * We're going to use the creation of a new LoginWindow context as a clue that the
6030 * previous LoginWindow context is on the way out and therefore we should just
6031 * kick-start the shutdown of it.
6032 */
6033
6034 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
6035 if (jmi->shutting_down) {
6036 continue;
6037 } else if (strcasecmp(jmi->name, session_type) == 0) {
6038 jobmgr_shutdown(jmi);
6039 break;
6040 }
6041 }
6042 }
6043
6044 jobmgr_log(j->mgr, LOG_DEBUG, "Renaming to: %s", session_type);
6045 strcpy(j->mgr->name, session_type);
6046
6047 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
6048 job_assumes(j, job_dispatch(j2, true));
6049 }
6050
6051 kr = 0;
6052 goto out;
6053 } else if (job_mig_intran2(root_jobmgr, target_subset, ldc.pid)) {
6054 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
6055
6056 kr = BOOTSTRAP_NOT_PRIVILEGED;
6057 goto out;
6058 }
6059
6060 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
6061
6062 kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
6063
6064 if (!job_assumes(j, kr == 0)) {
6065 goto out;
6066 }
6067
6068 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
6069
6070 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type)) != NULL)) {
6071 kr = BOOTSTRAP_NO_MEMORY;
6072 goto out;
6073 }
6074
6075 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
6076 launch_data_t tmp, obj_at_idx;
6077 struct machservice *ms;
6078 job_t j_for_service;
6079 const char *serv_name;
6080 pid_t target_pid;
6081 bool serv_perpid;
6082
6083 job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
6084 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
6085 target_pid = (pid_t)launch_data_get_integer(tmp);
6086 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
6087 serv_perpid = launch_data_get_bool(tmp);
6088 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
6089 serv_name = launch_data_get_string(tmp);
6090
6091 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
6092
6093 if (!j_for_service) {
6094 /* The PID probably exited */
6095 job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
6096 continue;
6097 }
6098
6099 if ((ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
6100 machservice_request_notifications(ms);
6101 }
6102 }
6103
6104 kr = 0;
6105
6106 out:
6107 if (out_obj_array) {
6108 launch_data_free(out_obj_array);
6109 }
6110
6111 if (l2l_ports) {
6112 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
6113 }
6114
6115 if (kr == 0) {
6116 if (target_subset) {
6117 job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
6118 }
6119 } else if (jmr) {
6120 jobmgr_shutdown(jmr);
6121 }
6122
6123 return kr;
6124 }
6125
6126 kern_return_t
6127 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
6128 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
6129 mach_port_array_t *portsp, unsigned int *ports_cnt)
6130 {
6131 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
6132 mach_port_array_t ports = NULL;
6133 unsigned int cnt = 0, cnt2 = 0;
6134 size_t packed_size;
6135 struct machservice *ms;
6136 jobmgr_t jm;
6137 job_t ji;
6138
6139 if (!launchd_assumes(j != NULL)) {
6140 return BOOTSTRAP_NO_MEMORY;
6141 }
6142
6143 jm = j->mgr;
6144
6145 if (getpid() != 1) {
6146 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
6147 return BOOTSTRAP_NOT_PRIVILEGED;
6148 } else if (jobmgr_parent(jm) == NULL) {
6149 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
6150 return BOOTSTRAP_NOT_PRIVILEGED;
6151 } else if (strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0) {
6152 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
6153 return BOOTSTRAP_NOT_PRIVILEGED;
6154 } else if (!j->anonymous) {
6155 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
6156 return BOOTSTRAP_NOT_PRIVILEGED;
6157 }
6158
6159 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
6160
6161 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
6162 if (!job_assumes(j, outdata_obj_array)) {
6163 goto out_bad;
6164 }
6165
6166 *outdataCnt = 20 * 1024 * 1024;
6167 mig_allocate(outdata, *outdataCnt);
6168 if (!job_assumes(j, *outdata != 0)) {
6169 return 1;
6170 }
6171
6172 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
6173 if (!ji->anonymous) {
6174 continue;
6175 }
6176 SLIST_FOREACH(ms, &ji->machservices, sle) {
6177 cnt++;
6178 }
6179 }
6180
6181 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
6182 if (!launchd_assumes(ports != NULL)) {
6183 goto out_bad;
6184 }
6185
6186 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
6187 if (!ji->anonymous) {
6188 continue;
6189 }
6190
6191 SLIST_FOREACH(ms, &ji->machservices, sle) {
6192 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
6193 job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
6194 } else {
6195 goto out_bad;
6196 }
6197
6198 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
6199 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
6200 } else {
6201 goto out_bad;
6202 }
6203
6204 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
6205 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
6206 } else {
6207 goto out_bad;
6208 }
6209
6210 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
6211 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
6212 } else {
6213 goto out_bad;
6214 }
6215
6216 ports[cnt2] = machservice_port(ms);
6217
6218 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
6219 jobmgr_assumes(jm, (errno = mach_port_mod_refs(mach_task_self(), ports[cnt2], MACH_PORT_RIGHT_SEND, 1)) == 0);
6220 cnt2++;
6221 }
6222 }
6223
6224 launchd_assumes(cnt == cnt2);
6225
6226 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
6227 if (!job_assumes(j, packed_size != 0)) {
6228 goto out_bad;
6229 }
6230
6231 launch_data_free(outdata_obj_array);
6232
6233 *portsp = ports;
6234 *ports_cnt = cnt;
6235
6236 *reqport = jm->req_port;
6237 *rcvright = jm->jm_port;
6238
6239 jm->req_port = 0;
6240 jm->jm_port = 0;
6241
6242 workaround_5477111 = j;
6243
6244 jobmgr_shutdown(jm);
6245
6246 return BOOTSTRAP_SUCCESS;
6247
6248 out_bad:
6249 if (outdata_obj_array) {
6250 launch_data_free(outdata_obj_array);
6251 }
6252 if (*outdata) {
6253 mig_deallocate(*outdata, *outdataCnt);
6254 }
6255 if (ports) {
6256 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
6257 }
6258
6259 return BOOTSTRAP_NO_MEMORY;
6260 }
6261
6262 kern_return_t
6263 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
6264 {
6265 int bsdepth = 0;
6266 jobmgr_t jmr;
6267
6268 if (!launchd_assumes(j != NULL)) {
6269 return BOOTSTRAP_NO_MEMORY;
6270 }
6271
6272 jmr = j->mgr;
6273
6274 while ((jmr = jobmgr_parent(jmr)) != NULL) {
6275 bsdepth++;
6276 }
6277
6278 /* Since we use recursion, we need an artificial depth for subsets */
6279 if (bsdepth > 100) {
6280 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
6281 return BOOTSTRAP_NO_MEMORY;
6282 }
6283
6284 if ((jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, NULL)) == NULL) {
6285 if (requestorport == MACH_PORT_NULL) {
6286 return BOOTSTRAP_NOT_PRIVILEGED;
6287 }
6288 return BOOTSTRAP_NO_MEMORY;
6289 }
6290
6291 *subsetportp = jmr->jm_port;
6292 return BOOTSTRAP_SUCCESS;
6293 }
6294
6295 kern_return_t
6296 job_mig_create_service(job_t j, name_t servicename, mach_port_t *serviceportp)
6297 {
6298 struct machservice *ms;
6299
6300 if (!launchd_assumes(j != NULL)) {
6301 return BOOTSTRAP_NO_MEMORY;
6302 }
6303
6304 if (job_prog(j)[0] == '\0') {
6305 job_log(j, LOG_ERR, "Mach service creation requires a target server: %s", servicename);
6306 return BOOTSTRAP_NOT_PRIVILEGED;
6307 }
6308
6309 if (!j->legacy_mach_job) {
6310 job_log(j, LOG_ERR, "bootstrap_create_service() is only allowed against legacy Mach jobs: %s", servicename);
6311 return BOOTSTRAP_NOT_PRIVILEGED;
6312 }
6313
6314 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
6315 if (ms) {
6316 job_log(j, LOG_DEBUG, "Mach service creation attempt for failed. Already exists: %s", servicename);
6317 return BOOTSTRAP_NAME_IN_USE;
6318 }
6319
6320 job_checkin(j);
6321
6322 *serviceportp = MACH_PORT_NULL;
6323 ms = machservice_new(j, servicename, serviceportp, false);
6324
6325 if (!launchd_assumes(ms != NULL)) {
6326 goto out_bad;
6327 }
6328
6329 return BOOTSTRAP_SUCCESS;
6330
6331 out_bad:
6332 launchd_assumes(launchd_mport_close_recv(*serviceportp) == KERN_SUCCESS);
6333 return BOOTSTRAP_NO_MEMORY;
6334 }
6335
6336 kern_return_t
6337 job_mig_wait(job_t j, mach_port_t srp, integer_t *waitstatus)
6338 {
6339 if (!launchd_assumes(j != NULL)) {
6340 return BOOTSTRAP_NO_MEMORY;
6341 }
6342 #if 0
6343 struct ldcred ldc;
6344 runtime_get_caller_creds(&ldc);
6345 #endif
6346 return job_handle_mpm_wait(j, srp, waitstatus);
6347 }
6348
6349 kern_return_t
6350 job_mig_uncork_fork(job_t j)
6351 {
6352 if (!launchd_assumes(j != NULL)) {
6353 return BOOTSTRAP_NO_MEMORY;
6354 }
6355
6356 if (!j->stall_before_exec) {
6357 job_log(j, LOG_WARNING, "Attempt to uncork a job that isn't in the middle of a fork().");
6358 return 1;
6359 }
6360
6361 job_uncork_fork(j);
6362 j->stall_before_exec = false;
6363 return 0;
6364 }
6365
6366 kern_return_t
6367 job_mig_set_service_policy(job_t j, pid_t target_pid, uint64_t flags, name_t target_service)
6368 {
6369 job_t target_j;
6370
6371 if (!launchd_assumes(j != NULL)) {
6372 return BOOTSTRAP_NO_MEMORY;
6373 }
6374
6375 if (!job_assumes(j, (target_j = jobmgr_find_by_pid(j->mgr, target_pid, true)) != NULL)) {
6376 return BOOTSTRAP_NO_MEMORY;
6377 }
6378
6379 if (SLIST_EMPTY(&j->mspolicies)) {
6380 job_log(j, LOG_DEBUG, "Setting policy on job \"%s\" for Mach service: %s", target_j->label, target_service);
6381 if (target_service[0]) {
6382 job_assumes(j, mspolicy_new(target_j, target_service, flags & BOOTSTRAP_ALLOW_LOOKUP, flags & BOOTSTRAP_PER_PID_SERVICE, false));
6383 } else {
6384 target_j->deny_unknown_mslookups = !(flags & BOOTSTRAP_ALLOW_LOOKUP);
6385 }
6386 } else {
6387 job_log(j, LOG_WARNING, "Jobs that have policies assigned to them may not set policies.");
6388 return BOOTSTRAP_NOT_PRIVILEGED;
6389 }
6390
6391 return 0;
6392 }
6393
6394 kern_return_t
6395 job_mig_spawn(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, pid_t *child_pid, mach_port_t *obsvr_port)
6396 {
6397 launch_data_t input_obj = NULL;
6398 size_t data_offset = 0;
6399 struct ldcred ldc;
6400 job_t jr;
6401
6402 runtime_get_caller_creds(&ldc);
6403
6404 if (!launchd_assumes(j != NULL)) {
6405 return BOOTSTRAP_NO_MEMORY;
6406 }
6407
6408 if (getpid() == 1 && ldc.euid && ldc.uid) {
6409 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
6410 return VPROC_ERR_TRY_PER_USER;
6411 }
6412
6413 if (!job_assumes(j, indataCnt != 0)) {
6414 return 1;
6415 }
6416
6417 if (!job_assumes(j, (input_obj = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
6418 return 1;
6419 }
6420
6421 jr = jobmgr_import2(j->mgr, input_obj);
6422
6423 if (!job_assumes(j, jr != NULL)) {
6424 switch (errno) {
6425 case EEXIST:
6426 return BOOTSTRAP_NAME_IN_USE;
6427 default:
6428 return BOOTSTRAP_NO_MEMORY;
6429 }
6430 }
6431
6432 job_reparent_hack(jr, NULL);
6433
6434 if (getpid() == 1) {
6435 jr->mach_uid = ldc.uid;
6436 }
6437
6438 jr->unload_at_exit = true;
6439 jr->wait4pipe_eof = true;
6440 jr->abandon_pg = true;
6441 jr->stall_before_exec = jr->wait4debugger;
6442 jr->wait4debugger = false;
6443
6444 jr = job_dispatch(jr, true);
6445
6446 if (!job_assumes(j, jr != NULL)) {
6447 return BOOTSTRAP_NO_MEMORY;
6448 }
6449
6450 job_assumes(jr, jr->p);
6451
6452 if (!job_setup_machport(jr)) {
6453 job_remove(jr);
6454 return BOOTSTRAP_NO_MEMORY;
6455 }
6456
6457 job_log(j, LOG_INFO, "Spawned");
6458
6459 *child_pid = jr->p;
6460 *obsvr_port = jr->j_port;
6461
6462 mig_deallocate(indata, indataCnt);
6463
6464 return BOOTSTRAP_SUCCESS;
6465 }
6466
6467 void
6468 jobmgr_init(bool sflag)
6469 {
6470 const char *root_session_type = getpid() == 1 ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
6471
6472 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type)) != NULL);
6473 }
6474
6475 size_t
6476 our_strhash(const char *s)
6477 {
6478 size_t c, r = 5381;
6479
6480 /* djb2
6481 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
6482 */
6483
6484 while ((c = *s++)) {
6485 r = ((r << 5) + r) + c; /* hash*33 + c */
6486 }
6487
6488 return r;
6489 }
6490
6491 size_t
6492 hash_label(const char *label)
6493 {
6494 return our_strhash(label) % LABEL_HASH_SIZE;
6495 }
6496
6497 size_t
6498 hash_ms(const char *msstr)
6499 {
6500 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
6501 }
6502
6503 bool
6504 mspolicy_copy(job_t j_to, job_t j_from)
6505 {
6506 struct mspolicy *msp;
6507
6508 SLIST_FOREACH(msp, &j_from->mspolicies, sle) {
6509 if (!mspolicy_new(j_to, msp->name, msp->allow, msp->per_pid, true)) {
6510 return false;
6511 }
6512 }
6513
6514 return true;
6515 }
6516
6517 bool
6518 mspolicy_new(job_t j, const char *name, bool allow, bool pid_local, bool skip_check)
6519 {
6520 struct mspolicy *msp;
6521
6522 if (!skip_check) SLIST_FOREACH(msp, &j->mspolicies, sle) {
6523 if (msp->per_pid != pid_local) {
6524 continue;
6525 } else if (strcmp(msp->name, name) == 0) {
6526 return false;
6527 }
6528 }
6529
6530 if ((msp = calloc(1, sizeof(struct mspolicy) + strlen(name) + 1)) == NULL) {
6531 return false;
6532 }
6533
6534 strcpy((char *)msp->name, name);
6535 msp->per_pid = pid_local;
6536 msp->allow = allow;
6537
6538 SLIST_INSERT_HEAD(&j->mspolicies, msp, sle);
6539
6540 return true;
6541 }
6542
6543 void
6544 mspolicy_setup(launch_data_t obj, const char *key, void *context)
6545 {
6546 job_t j = context;
6547
6548 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
6549 job_log(j, LOG_WARNING, "Invalid object type for Mach service policy key: %s", key);
6550 return;
6551 }
6552
6553 job_assumes(j, mspolicy_new(j, key, launch_data_get_bool(obj), false, false));
6554 }
6555
6556 bool
6557 mspolicy_check(job_t j, const char *name, bool pid_local)
6558 {
6559 struct mspolicy *mspi;
6560
6561 SLIST_FOREACH(mspi, &j->mspolicies, sle) {
6562 if (mspi->per_pid != pid_local) {
6563 continue;
6564 } else if (strcmp(mspi->name, name) != 0) {
6565 continue;
6566 }
6567 return mspi->allow;
6568 }
6569
6570 return !j->deny_unknown_mslookups;
6571 }
6572
6573 void
6574 mspolicy_delete(job_t j, struct mspolicy *msp)
6575 {
6576 SLIST_REMOVE(&j->mspolicies, msp, mspolicy, sle);
6577
6578 free(msp);
6579 }
6580
6581 bool
6582 waiting4removal_new(job_t j, mach_port_t rp)
6583 {
6584 struct waiting_for_removal *w4r;
6585
6586 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
6587 return false;
6588 }
6589
6590 w4r->reply_port = rp;
6591
6592 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
6593
6594 return true;
6595 }
6596
6597 void
6598 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
6599 {
6600 job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
6601
6602 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
6603
6604 free(w4r);
6605 }
6606
6607 void
6608 do_file_init(void)
6609 {
6610 launchd_assert(mach_timebase_info(&tbi) == 0);
6611
6612 }