]> git.saurik.com Git - apple/launchd.git/blob - launchd/src/launchd_core_logic.c
0b057214700ed0cdce889611f8ddec7fb710c215
[apple/launchd.git] / launchd / src / launchd_core_logic.c
1 /*
2 * @APPLE_APACHE_LICENSE_HEADER_START@
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * @APPLE_APACHE_LICENSE_HEADER_END@
17 */
18
19 static const char *const __rcs_file_version__ = "$Revision: 23714 $";
20
21 #include "config.h"
22 #include "launchd_core_logic.h"
23
24 #include <TargetConditionals.h>
25 #include <mach/mach.h>
26 #include <mach/mach_error.h>
27 #include <mach/mach_time.h>
28 #include <mach/boolean.h>
29 #include <mach/message.h>
30 #include <mach/notify.h>
31 #include <mach/mig_errors.h>
32 #include <mach/mach_traps.h>
33 #include <mach/mach_interface.h>
34 #include <mach/host_info.h>
35 #include <mach/mach_host.h>
36 #include <mach/exception.h>
37 #include <mach/host_reboot.h>
38 #include <sys/types.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
41 #include <sys/stat.h>
42 #include <sys/ucred.h>
43 #include <sys/fcntl.h>
44 #include <sys/un.h>
45 #include <sys/reboot.h>
46 #include <sys/wait.h>
47 #include <sys/sysctl.h>
48 #include <sys/sockio.h>
49 #include <sys/time.h>
50 #include <sys/resource.h>
51 #include <sys/ioctl.h>
52 #include <sys/mount.h>
53 #include <sys/pipe.h>
54 #include <net/if.h>
55 #include <netinet/in.h>
56 #include <netinet/in_var.h>
57 #include <netinet6/nd6.h>
58 #include <bsm/libbsm.h>
59 #include <unistd.h>
60 #include <signal.h>
61 #include <errno.h>
62 #include <libgen.h>
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <stdarg.h>
66 #include <stdbool.h>
67 #include <paths.h>
68 #include <pwd.h>
69 #include <grp.h>
70 #include <ttyent.h>
71 #include <dlfcn.h>
72 #include <dirent.h>
73 #include <string.h>
74 #include <ctype.h>
75 #include <glob.h>
76 #include <spawn.h>
77 #if HAVE_SANDBOX
78 #include <sandbox.h>
79 #endif
80 #if HAVE_QUARANTINE
81 #include <quarantine.h>
82 #endif
83
84 #include "liblaunch_public.h"
85 #include "liblaunch_private.h"
86 #include "liblaunch_internal.h"
87 #include "libbootstrap_public.h"
88 #include "libbootstrap_private.h"
89 #include "libvproc_public.h"
90 #include "libvproc_internal.h"
91
92 #include "reboot2.h"
93
94 #include "launchd.h"
95 #include "launchd_runtime.h"
96 #include "launchd_unix_ipc.h"
97 #include "protocol_vproc.h"
98 #include "protocol_vprocServer.h"
99 #include "job_reply.h"
100
101 #define LAUNCHD_MIN_JOB_RUN_TIME 10
102 #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
103 #define LAUNCHD_SIGKILL_TIMER 5
104
105
106 #define TAKE_SUBSET_NAME "TakeSubsetName"
107 #define TAKE_SUBSET_PID "TakeSubsetPID"
108 #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
109
110 #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
111
112 extern char **environ;
113
114 struct waiting_for_removal {
115 SLIST_ENTRY(waiting_for_removal) sle;
116 mach_port_t reply_port;
117 };
118
119 static bool waiting4removal_new(job_t j, mach_port_t rp);
120 static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
121
122 struct mspolicy {
123 SLIST_ENTRY(mspolicy) sle;
124 unsigned int allow:1, per_pid:1;
125 const char name[0];
126 };
127
128 static bool mspolicy_new(job_t j, const char *name, bool allow, bool pid_local, bool skip_check);
129 static bool mspolicy_copy(job_t j_to, job_t j_from);
130 static void mspolicy_setup(launch_data_t obj, const char *key, void *context);
131 static bool mspolicy_check(job_t j, const char *name, bool pid_local);
132 static void mspolicy_delete(job_t j, struct mspolicy *msp);
133
134 struct machservice {
135 SLIST_ENTRY(machservice) sle;
136 SLIST_ENTRY(machservice) special_port_sle;
137 LIST_ENTRY(machservice) name_hash_sle;
138 LIST_ENTRY(machservice) port_hash_sle;
139 job_t job;
140 uint64_t bad_perf_cnt;
141 unsigned int gen_num;
142 mach_port_name_t port;
143 unsigned int isActive:1, reset:1, recv:1, hide:1, kUNCServer:1, per_user_hack:1, debug_on_close:1, per_pid:1, special_port_num:10;
144 const char name[0];
145 };
146
147 static SLIST_HEAD(, machservice) special_ports; /* hack, this should be per jobmgr_t */
148
149 #define PORT_HASH_SIZE 32
150 #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
151
152 static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
153
154 static void machservice_setup(launch_data_t obj, const char *key, void *context);
155 static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
156 static void machservice_resetport(job_t j, struct machservice *ms);
157 static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
158 static void machservice_ignore(job_t j, struct machservice *ms);
159 static void machservice_watch(job_t j, struct machservice *ms);
160 static void machservice_delete(job_t j, struct machservice *, bool port_died);
161 static void machservice_request_notifications(struct machservice *);
162 static mach_port_t machservice_port(struct machservice *);
163 static job_t machservice_job(struct machservice *);
164 static bool machservice_hidden(struct machservice *);
165 static bool machservice_active(struct machservice *);
166 static const char *machservice_name(struct machservice *);
167 static bootstrap_status_t machservice_status(struct machservice *);
168
169 struct socketgroup {
170 SLIST_ENTRY(socketgroup) sle;
171 int *fds;
172 unsigned int junkfds:1, fd_cnt:31;
173 union {
174 const char name[0];
175 char name_init[0];
176 };
177 };
178
179 static bool socketgroup_new(job_t j, const char *name, int *fds, unsigned int fd_cnt, bool junkfds);
180 static void socketgroup_delete(job_t j, struct socketgroup *sg);
181 static void socketgroup_watch(job_t j, struct socketgroup *sg);
182 static void socketgroup_ignore(job_t j, struct socketgroup *sg);
183 static void socketgroup_callback(job_t j);
184 static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
185 static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
186
187 struct calendarinterval {
188 LIST_ENTRY(calendarinterval) global_sle;
189 SLIST_ENTRY(calendarinterval) sle;
190 job_t job;
191 struct tm when;
192 time_t when_next;
193 };
194
195 static LIST_HEAD(, calendarinterval) sorted_calendar_events;
196
197 static bool calendarinterval_new(job_t j, struct tm *w);
198 static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
199 static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
200 static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
201 static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
202 static void calendarinterval_callback(void);
203 static void calendarinterval_sanity_check(void);
204
205 struct envitem {
206 SLIST_ENTRY(envitem) sle;
207 char *value;
208 union {
209 const char key[0];
210 char key_init[0];
211 };
212 };
213
214 static bool envitem_new(job_t j, const char *k, const char *v, bool global);
215 static void envitem_delete(job_t j, struct envitem *ei, bool global);
216 static void envitem_setup(launch_data_t obj, const char *key, void *context);
217
218 struct limititem {
219 SLIST_ENTRY(limititem) sle;
220 struct rlimit lim;
221 unsigned int setsoft:1, sethard:1, which:30;
222 };
223
224 static bool limititem_update(job_t j, int w, rlim_t r);
225 static void limititem_delete(job_t j, struct limititem *li);
226 static void limititem_setup(launch_data_t obj, const char *key, void *context);
227 #if HAVE_SANDBOX
228 static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
229 #endif
230
231 typedef enum {
232 NETWORK_UP = 1,
233 NETWORK_DOWN,
234 SUCCESSFUL_EXIT,
235 FAILED_EXIT,
236 PATH_EXISTS,
237 PATH_MISSING,
238 OTHER_JOB_ENABLED,
239 OTHER_JOB_DISABLED,
240 OTHER_JOB_ACTIVE,
241 OTHER_JOB_INACTIVE,
242 PATH_CHANGES,
243 DIR_NOT_EMPTY,
244 // FILESYSTEMTYPE_IS_MOUNTED, /* for nfsiod, but maybe others */
245 } semaphore_reason_t;
246
247 struct semaphoreitem {
248 SLIST_ENTRY(semaphoreitem) sle;
249 semaphore_reason_t why;
250 int fd;
251 union {
252 const char what[0];
253 char what_init[0];
254 };
255 };
256
257 struct semaphoreitem_dict_iter_context {
258 job_t j;
259 semaphore_reason_t why_true;
260 semaphore_reason_t why_false;
261 };
262
263 static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
264 static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
265 static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
266 static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
267 static void semaphoreitem_callback(job_t j, struct kevent *kev);
268 static void semaphoreitem_watch(job_t j, struct semaphoreitem *si);
269 static void semaphoreitem_ignore(job_t j, struct semaphoreitem *si);
270 static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
271
272 #define ACTIVE_JOB_HASH_SIZE 32
273 #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
274 #define MACHSERVICE_HASH_SIZE 37
275
276 struct jobmgr_s {
277 kq_callback kqjobmgr_callback;
278 SLIST_ENTRY(jobmgr_s) sle;
279 SLIST_HEAD(, jobmgr_s) submgrs;
280 LIST_HEAD(, job_s) jobs;
281 LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
282 LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
283 mach_port_t jm_port;
284 mach_port_t req_port;
285 jobmgr_t parentmgr;
286 int reboot_flags;
287 unsigned int global_on_demand_cnt;
288 unsigned int hopefully_first_cnt;
289 unsigned int normal_active_cnt;
290 unsigned int sent_stop_to_normal_jobs:1, sent_stop_to_hopefully_last_jobs:1, shutting_down:1, session_initialized:1;
291 union {
292 const char name[0];
293 char name_init[0];
294 };
295 };
296
297 #define jobmgr_assumes(jm, e) \
298 (__builtin_expect(!(e), 0) ? jobmgr_log_bug(jm, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
299
300 static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name);
301 static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
302 static jobmgr_t jobmgr_parent(jobmgr_t jm);
303 static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
304 static bool jobmgr_label_test(jobmgr_t jm, const char *str);
305 static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
306 static void jobmgr_log_stray_children(jobmgr_t jm);
307 static void jobmgr_remove(jobmgr_t jm);
308 static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
309 static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
310 static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
311 static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
312 static void job_export_all2(jobmgr_t jm, launch_data_t where);
313 static void jobmgr_callback(void *obj, struct kevent *kev);
314 static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
315 static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
316 static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
317 static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
318 static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
319 /* static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4))); */
320 static void jobmgr_log_bug(jobmgr_t jm, const char *rcs_rev, const char *path, unsigned int line, const char *test);
321
322 #define DO_RUSAGE_SUMMATION 0
323
324 #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
325
326 struct job_s {
327 kq_callback kqjob_callback;
328 LIST_ENTRY(job_s) sle;
329 LIST_ENTRY(job_s) pid_hash_sle;
330 LIST_ENTRY(job_s) label_hash_sle;
331 SLIST_HEAD(, socketgroup) sockets;
332 SLIST_HEAD(, calendarinterval) cal_intervals;
333 SLIST_HEAD(, envitem) global_env;
334 SLIST_HEAD(, envitem) env;
335 SLIST_HEAD(, limititem) limits;
336 SLIST_HEAD(, mspolicy) mspolicies;
337 SLIST_HEAD(, machservice) machservices;
338 SLIST_HEAD(, semaphoreitem) semaphores;
339 SLIST_HEAD(, waiting_for_removal) removal_watchers;
340 #if DO_RUSAGE_SUMMATION
341 struct rusage ru;
342 #endif
343 cpu_type_t *j_binpref;
344 size_t j_binpref_cnt;
345 mach_port_t j_port;
346 mach_port_t wait_reply_port; /* we probably should switch to a list of waiters */
347 uid_t mach_uid;
348 jobmgr_t mgr;
349 char **argv;
350 char *prog;
351 char *rootdir;
352 char *workingdir;
353 char *username;
354 char *groupname;
355 char *stdoutpath;
356 char *stderrpath;
357 char *alt_exc_handler;
358 struct machservice *lastlookup;
359 unsigned int lastlookup_gennum;
360 #if HAVE_SANDBOX
361 char *seatbelt_profile;
362 uint64_t seatbelt_flags;
363 #endif
364 #if HAVE_QUARANTINE
365 void *quarantine_data;
366 size_t quarantine_data_sz;
367 #endif
368 pid_t p;
369 int argc;
370 int last_exit_status;
371 int forkfd;
372 int log_redirect_fd;
373 int nice;
374 unsigned int timeout;
375 unsigned int exit_timeout;
376 int stdout_err_fd;
377 uint64_t sent_sigterm_time;
378 uint64_t start_time;
379 uint32_t min_run_time;
380 uint32_t start_interval;
381 unsigned int checkedin:1, anonymous:1, debug:1, inetcompat:1, inetcompat_wait:1,
382 ondemand:1, session_create:1, low_pri_io:1, no_init_groups:1, priv_port_has_senders:1,
383 importing_global_env:1, importing_hard_limits:1, setmask:1, legacy_mach_job:1, start_pending:1;
384 mode_t mask;
385 unsigned int globargv:1, wait4debugger:1, unload_at_exit:1, stall_before_exec:1, only_once:1,
386 currently_ignored:1, forced_peers_to_demand_mode:1, setnice:1, hopefully_exits_last:1, removal_pending:1,
387 wait4pipe_eof:1, sent_sigkill:1, debug_before_kill:1, weird_bootstrap:1, start_on_mount:1,
388 per_user:1, hopefully_exits_first:1, deny_unknown_mslookups:1, unload_at_mig_return:1, abandon_pg:1,
389 poll_for_vfs_changes:1, internal_exc_handler:1, deny_job_creation:1;
390 const char label[0];
391 };
392
393 #define LABEL_HASH_SIZE 53
394
395 static LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
396 static size_t hash_label(const char *label) __attribute__((pure));
397 static size_t hash_ms(const char *msstr) __attribute__((pure));
398
399
400 #define job_assumes(j, e) \
401 (__builtin_expect(!(e), 0) ? job_log_bug(j, __rcs_file_version__, __FILE__, __LINE__, #e), false : true)
402
403 static void job_import_keys(launch_data_t obj, const char *key, void *context);
404 static void job_import_bool(job_t j, const char *key, bool value);
405 static void job_import_string(job_t j, const char *key, const char *value);
406 static void job_import_integer(job_t j, const char *key, long long value);
407 static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
408 static void job_import_array(job_t j, const char *key, launch_data_t value);
409 static void job_import_opaque(job_t j, const char *key, launch_data_t value);
410 static bool job_set_global_on_demand(job_t j, bool val);
411 static const char *job_active(job_t j);
412 static void job_watch(job_t j);
413 static void job_ignore(job_t j);
414 static void job_reap(job_t j);
415 static bool job_useless(job_t j);
416 static bool job_keepalive(job_t j);
417 static void job_start(job_t j);
418 static void job_start_child(job_t j) __attribute__((noreturn));
419 static void job_setup_attributes(job_t j);
420 static bool job_setup_machport(job_t j);
421 static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
422 static void job_postfork_become_user(job_t j);
423 static void job_enable_audit_for_user(job_t j, uid_t u, char *name);
424 static void job_find_and_blame_pids_with_weird_uids(job_t j);
425 static void job_force_sampletool(job_t j);
426 static void job_setup_exception_port(job_t j, task_t target_task);
427 static void job_reparent_hack(job_t j, const char *where);
428 static void job_callback(void *obj, struct kevent *kev);
429 static void job_callback_proc(job_t j, int flags, int fflags);
430 static void job_callback_timer(job_t j, void *ident);
431 static void job_callback_read(job_t j, int ident);
432 static void job_log_stray_pg(job_t j);
433 static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid);
434 static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv);
435 static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond);
436 static const char *job_prog(job_t j);
437 static jobmgr_t job_get_bs(job_t j);
438 static void job_kill(job_t j);
439 static void job_uncork_fork(job_t j);
440 static void job_log_stdouterr(job_t j);
441 static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
442 static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
443 static void job_log_bug(job_t j, const char *rcs_rev, const char *path, unsigned int line, const char *test);
444 static void job_log_stdouterr2(job_t j, const char *msg, ...);
445 static void job_set_exeception_port(job_t j, mach_port_t port);
446 static kern_return_t job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus);
447
448
449
450 static const struct {
451 const char *key;
452 int val;
453 } launchd_keys2limits[] = {
454 { LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
455 { LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
456 { LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
457 { LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
458 { LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
459 { LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
460 { LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
461 { LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
462 { LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
463 };
464
465 static time_t cronemu(int mon, int mday, int hour, int min);
466 static time_t cronemu_wday(int wday, int hour, int min);
467 static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
468 static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
469 static bool cronemu_hour(struct tm *wtm, int hour, int min);
470 static bool cronemu_min(struct tm *wtm, int min);
471
472 /* miscellaneous file local functions */
473 static void ensure_root_bkgd_setup(void);
474 static int dir_has_files(job_t j, const char *path);
475 static char **mach_cmd2argv(const char *string);
476 static size_t our_strhash(const char *s) __attribute__((pure));
477 static void extract_rcsid_substr(const char *i, char *o, size_t osz);
478 static void do_first_per_user_launchd_hack(void);
479 static size_t get_kern_max_proc(void);
480 static void do_file_init(void) __attribute__((constructor));
481
482 /* file local globals */
483 static bool do_apple_internal_magic;
484 static size_t total_children;
485 static size_t total_anon_children;
486 static mach_port_t the_exception_server;
487 static bool did_first_per_user_launchd_BootCache_hack;
488 #define JOB_BOOTCACHE_HACK_CHECK(j) (j->per_user && !did_first_per_user_launchd_BootCache_hack && (j->mach_uid >= 500) && (j->mach_uid != (uid_t)-2))
489 static jobmgr_t background_jobmgr;
490 static job_t workaround_5477111;
491 static mach_timebase_info_data_t tbi;
492
493 /* process wide globals */
494 mach_port_t inherited_bootstrap_port;
495 jobmgr_t root_jobmgr;
496
497
498 void
499 job_ignore(job_t j)
500 {
501 struct semaphoreitem *si;
502 struct socketgroup *sg;
503 struct machservice *ms;
504
505 if (j->currently_ignored) {
506 return;
507 }
508
509 job_log(j, LOG_DEBUG, "Ignoring...");
510
511 j->currently_ignored = true;
512
513 if (j->poll_for_vfs_changes) {
514 j->poll_for_vfs_changes = false;
515 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
516 }
517
518 SLIST_FOREACH(sg, &j->sockets, sle) {
519 socketgroup_ignore(j, sg);
520 }
521
522 SLIST_FOREACH(ms, &j->machservices, sle) {
523 machservice_ignore(j, ms);
524 }
525
526 SLIST_FOREACH(si, &j->semaphores, sle) {
527 semaphoreitem_ignore(j, si);
528 }
529 }
530
531 void
532 job_watch(job_t j)
533 {
534 struct semaphoreitem *si;
535 struct socketgroup *sg;
536 struct machservice *ms;
537
538 if (!j->currently_ignored) {
539 return;
540 }
541
542 job_log(j, LOG_DEBUG, "Watching...");
543
544 j->currently_ignored = false;
545
546 SLIST_FOREACH(sg, &j->sockets, sle) {
547 socketgroup_watch(j, sg);
548 }
549
550 SLIST_FOREACH(ms, &j->machservices, sle) {
551 machservice_watch(j, ms);
552 }
553
554 SLIST_FOREACH(si, &j->semaphores, sle) {
555 semaphoreitem_watch(j, si);
556 }
557 }
558
559 void
560 job_stop(job_t j)
561 {
562 if (!j->p || j->anonymous) {
563 return;
564 }
565
566 job_assumes(j, runtime_kill(j->p, SIGTERM) != -1);
567 j->sent_sigterm_time = mach_absolute_time();
568
569 if (j->exit_timeout) {
570 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
571 EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j) != -1);
572 }
573
574 job_log(j, LOG_DEBUG, "Sent SIGTERM signal");
575 }
576
577 launch_data_t
578 job_export(job_t j)
579 {
580 launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
581
582 if (r == NULL) {
583 return NULL;
584 }
585
586 if ((tmp = launch_data_new_string(j->label))) {
587 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
588 }
589 if ((tmp = launch_data_new_string(j->mgr->name))) {
590 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
591 }
592 if ((tmp = launch_data_new_bool(j->ondemand))) {
593 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
594 }
595 if ((tmp = launch_data_new_integer(j->last_exit_status))) {
596 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
597 }
598 if (j->p && (tmp = launch_data_new_integer(j->p))) {
599 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
600 }
601 if ((tmp = launch_data_new_integer(j->timeout))) {
602 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
603 }
604 if (j->prog && (tmp = launch_data_new_string(j->prog))) {
605 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
606 }
607 if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
608 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
609 }
610 if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
611 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
612 }
613 if (j->argv && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
614 int i;
615
616 for (i = 0; i < j->argc; i++) {
617 if ((tmp2 = launch_data_new_string(j->argv[i]))) {
618 launch_data_array_set_index(tmp, tmp2, i);
619 }
620 }
621
622 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
623 }
624
625 if (j->session_create && (tmp = launch_data_new_bool(true))) {
626 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
627 }
628
629 if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
630 if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
631 launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
632 }
633 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
634 }
635
636 if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
637 struct socketgroup *sg;
638 int i;
639
640 SLIST_FOREACH(sg, &j->sockets, sle) {
641 if (sg->junkfds) {
642 continue;
643 }
644 if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
645 for (i = 0; i < sg->fd_cnt; i++) {
646 if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
647 launch_data_array_set_index(tmp2, tmp3, i);
648 }
649 }
650 launch_data_dict_insert(tmp, tmp2, sg->name);
651 }
652 }
653
654 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
655 }
656
657 if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
658 struct machservice *ms;
659
660 tmp3 = NULL;
661
662 SLIST_FOREACH(ms, &j->machservices, sle) {
663 if (ms->per_pid) {
664 if (tmp3 == NULL) {
665 tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
666 }
667 if (tmp3) {
668 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
669 launch_data_dict_insert(tmp3, tmp2, ms->name);
670 }
671 } else {
672 tmp2 = launch_data_new_machport(MACH_PORT_NULL);
673 launch_data_dict_insert(tmp, tmp2, ms->name);
674 }
675 }
676
677 launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
678
679 if (tmp3) {
680 launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
681 }
682 }
683
684 return r;
685 }
686
687 static void
688 jobmgr_log_active_jobs(jobmgr_t jm)
689 {
690 const char *why_active;
691 jobmgr_t jmi;
692 job_t ji;
693
694 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
695 jobmgr_log_active_jobs(jmi);
696 }
697
698 LIST_FOREACH(ji, &jm->jobs, sle) {
699 why_active = job_active(ji);
700
701 job_log(ji, LOG_DEBUG, "%s", why_active ? why_active : "Inactive");
702 }
703
704 }
705
706 static void
707 still_alive_with_check(void)
708 {
709 jobmgr_log(root_jobmgr, LOG_NOTICE, "Still alive with %lu/%lu children", total_children, total_anon_children);
710
711 jobmgr_log_active_jobs(root_jobmgr);
712
713 runtime_closelog(); /* hack to flush logs */
714 }
715
716 jobmgr_t
717 jobmgr_shutdown(jobmgr_t jm)
718 {
719 jobmgr_t jmi, jmn;
720 job_t ji;
721
722 jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
723
724 jm->shutting_down = true;
725
726 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
727 jobmgr_shutdown(jmi);
728 }
729
730 if (jm->hopefully_first_cnt) {
731 LIST_FOREACH(ji, &jm->jobs, sle) {
732 if (ji->p && ji->hopefully_exits_first) {
733 job_stop(ji);
734 }
735 }
736 }
737
738 if (debug_shutdown_hangs && jm->parentmgr == NULL && getpid() == 1) {
739 runtime_set_timeout(still_alive_with_check, 5);
740 }
741
742 return jobmgr_do_garbage_collection(jm);
743 }
744
745 void
746 jobmgr_remove(jobmgr_t jm)
747 {
748 jobmgr_t jmi;
749 job_t ji;
750
751 jobmgr_log(jm, LOG_DEBUG, "Removed job manager");
752
753 if (!jobmgr_assumes(jm, SLIST_EMPTY(&jm->submgrs))) {
754 while ((jmi = SLIST_FIRST(&jm->submgrs))) {
755 jobmgr_remove(jmi);
756 }
757 }
758
759 while ((ji = LIST_FIRST(&jm->jobs))) {
760 /* We should only have anonymous jobs left */
761 job_assumes(ji, ji->anonymous);
762 job_remove(ji);
763 }
764
765 if (jm->req_port) {
766 jobmgr_assumes(jm, launchd_mport_deallocate(jm->req_port) == KERN_SUCCESS);
767 }
768
769 if (jm->jm_port) {
770 jobmgr_assumes(jm, launchd_mport_close_recv(jm->jm_port) == KERN_SUCCESS);
771 }
772
773 if (jm == background_jobmgr) {
774 background_jobmgr = NULL;
775 }
776
777 if (jm->parentmgr) {
778 runtime_del_ref();
779 SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
780 } else if (getpid() == 1) {
781 jobmgr_log(jm, LOG_DEBUG, "About to call: reboot(%s)", reboot_flags_to_C_names(jm->reboot_flags));
782 runtime_closelog();
783 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
784 runtime_closelog();
785 } else {
786 runtime_closelog();
787 jobmgr_log(jm, LOG_DEBUG, "About to exit");
788 exit(EXIT_SUCCESS);
789 }
790
791 free(jm);
792 }
793
794 void
795 job_remove(job_t j)
796 {
797 struct waiting_for_removal *w4r;
798 struct calendarinterval *ci;
799 struct semaphoreitem *si;
800 struct socketgroup *sg;
801 struct machservice *ms;
802 struct limititem *li;
803 struct mspolicy *msp;
804 struct envitem *ei;
805
806 if (j->p && j->anonymous) {
807 job_reap(j);
808 } else if (j->p) {
809 job_log(j, LOG_DEBUG, "Removal pended until the job exits");
810
811 if (!j->removal_pending) {
812 j->removal_pending = true;
813 job_stop(j);
814 }
815
816 return;
817 }
818
819 ipc_close_all_with_job(j);
820
821 if (j->forced_peers_to_demand_mode) {
822 job_set_global_on_demand(j, false);
823 }
824
825 if (!job_assumes(j, j->forkfd == 0)) {
826 job_assumes(j, runtime_close(j->forkfd) != -1);
827 }
828
829 if (!job_assumes(j, j->log_redirect_fd == 0)) {
830 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
831 }
832
833 if (j->j_port) {
834 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
835 }
836
837 if (!job_assumes(j, j->wait_reply_port == MACH_PORT_NULL)) {
838 job_assumes(j, launchd_mport_deallocate(j->wait_reply_port) == KERN_SUCCESS);
839 }
840
841 while ((msp = SLIST_FIRST(&j->mspolicies))) {
842 mspolicy_delete(j, msp);
843 }
844 while ((sg = SLIST_FIRST(&j->sockets))) {
845 socketgroup_delete(j, sg);
846 }
847 while ((ci = SLIST_FIRST(&j->cal_intervals))) {
848 calendarinterval_delete(j, ci);
849 }
850 while ((ei = SLIST_FIRST(&j->env))) {
851 envitem_delete(j, ei, false);
852 }
853 while ((ei = SLIST_FIRST(&j->global_env))) {
854 envitem_delete(j, ei, true);
855 }
856 while ((li = SLIST_FIRST(&j->limits))) {
857 limititem_delete(j, li);
858 }
859 while ((ms = SLIST_FIRST(&j->machservices))) {
860 machservice_delete(j, ms, false);
861 }
862 while ((si = SLIST_FIRST(&j->semaphores))) {
863 semaphoreitem_delete(j, si);
864 }
865 while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
866 waiting4removal_delete(j, w4r);
867 }
868
869 if (j->prog) {
870 free(j->prog);
871 }
872 if (j->argv) {
873 free(j->argv);
874 }
875 if (j->rootdir) {
876 free(j->rootdir);
877 }
878 if (j->workingdir) {
879 free(j->workingdir);
880 }
881 if (j->username) {
882 free(j->username);
883 }
884 if (j->groupname) {
885 free(j->groupname);
886 }
887 if (j->stdoutpath) {
888 free(j->stdoutpath);
889 }
890 if (j->stderrpath) {
891 free(j->stderrpath);
892 }
893 if (j->alt_exc_handler) {
894 free(j->alt_exc_handler);
895 }
896 #if HAVE_SANDBOX
897 if (j->seatbelt_profile) {
898 free(j->seatbelt_profile);
899 }
900 #endif
901 #if HAVE_QUARANTINE
902 if (j->quarantine_data) {
903 free(j->quarantine_data);
904 }
905 #endif
906 if (j->j_binpref) {
907 free(j->j_binpref);
908 }
909 if (j->start_interval) {
910 runtime_del_ref();
911 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
912 }
913 if (j->poll_for_vfs_changes) {
914 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
915 }
916
917 kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
918
919 LIST_REMOVE(j, sle);
920 LIST_REMOVE(j, label_hash_sle);
921
922 job_log(j, LOG_DEBUG, "Removed");
923
924 free(j);
925 }
926
927 void
928 socketgroup_setup(launch_data_t obj, const char *key, void *context)
929 {
930 launch_data_t tmp_oai;
931 job_t j = context;
932 unsigned int i, fd_cnt = 1;
933 int *fds;
934
935 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
936 fd_cnt = launch_data_array_get_count(obj);
937 }
938
939 fds = alloca(fd_cnt * sizeof(int));
940
941 for (i = 0; i < fd_cnt; i++) {
942 if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
943 tmp_oai = launch_data_array_get_index(obj, i);
944 } else {
945 tmp_oai = obj;
946 }
947
948 fds[i] = launch_data_get_fd(tmp_oai);
949 }
950
951 socketgroup_new(j, key, fds, fd_cnt, strcmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0);
952
953 ipc_revoke_fds(obj);
954 }
955
956 bool
957 job_set_global_on_demand(job_t j, bool val)
958 {
959 if (j->forced_peers_to_demand_mode && val) {
960 return false;
961 } else if (!j->forced_peers_to_demand_mode && !val) {
962 return false;
963 }
964
965 if ((j->forced_peers_to_demand_mode = val)) {
966 j->mgr->global_on_demand_cnt++;
967 } else {
968 j->mgr->global_on_demand_cnt--;
969 }
970
971 if (j->mgr->global_on_demand_cnt == 0) {
972 jobmgr_dispatch_all(j->mgr, false);
973 }
974
975 return true;
976 }
977
978 bool
979 job_setup_machport(job_t j)
980 {
981 mach_msg_size_t mxmsgsz;
982
983 if (!job_assumes(j, launchd_mport_create_recv(&j->j_port) == KERN_SUCCESS)) {
984 goto out_bad;
985 }
986
987 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
988 mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
989 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
990 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
991 }
992
993 if (!job_assumes(j, runtime_add_mport(j->j_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
994 goto out_bad2;
995 }
996
997 if (!job_assumes(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS) == KERN_SUCCESS)) {
998 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
999 goto out_bad;
1000 }
1001
1002 return true;
1003 out_bad2:
1004 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
1005 out_bad:
1006 return false;
1007 }
1008
1009 job_t
1010 job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1011 {
1012 const char **argv = (const char **)mach_cmd2argv(cmd);
1013 job_t jr = NULL;
1014
1015 if (!job_assumes(j, argv != NULL)) {
1016 goto out_bad;
1017 }
1018
1019 jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1020
1021 free(argv);
1022
1023 /* jobs can easily be denied creation during shutdown */
1024 if (!jr) {
1025 goto out_bad;
1026 }
1027
1028 jr->mach_uid = uid;
1029 jr->ondemand = ond;
1030 jr->legacy_mach_job = true;
1031 jr->abandon_pg = true;
1032 jr->priv_port_has_senders = true; /* the IPC that called us will make-send on this port */
1033
1034 if (!job_setup_machport(jr)) {
1035 goto out_bad;
1036 }
1037
1038 job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1039
1040 return jr;
1041
1042 out_bad:
1043 if (jr) {
1044 job_remove(jr);
1045 }
1046 return NULL;
1047 }
1048
1049 kern_return_t
1050 job_handle_mpm_wait(job_t j, mach_port_t srp, int *waitstatus)
1051 {
1052 if (j->p) {
1053 j->wait_reply_port = srp;
1054 return MIG_NO_REPLY;
1055 }
1056
1057 *waitstatus = j->last_exit_status;
1058
1059 return 0;
1060 }
1061
1062 job_t
1063 job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1064 {
1065 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, anonpid };
1066 struct kinfo_proc kp;
1067 size_t len = sizeof(kp);
1068 const char *zombie = NULL;
1069 bool shutdown_state;
1070 job_t jp = NULL, jr = NULL;
1071
1072 if (!jobmgr_assumes(jm, anonpid != 0)) {
1073 return NULL;
1074 }
1075
1076 if (!jobmgr_assumes(jm, anonpid < 100000)) {
1077 /* The kernel current defines PID_MAX to be 99999, but that define isn't exported */
1078 return NULL;
1079 }
1080
1081 if (!jobmgr_assumes(jm, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
1082 return NULL;
1083 }
1084
1085 if (len != sizeof(kp)) {
1086 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for nonexistent PID: %u", anonpid);
1087 return NULL;
1088 }
1089
1090 if (!jobmgr_assumes(jm, kp.kp_proc.p_comm[0] != '\0')) {
1091 return NULL;
1092 }
1093
1094 if (kp.kp_proc.p_stat == SZOMB) {
1095 jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID: %u", anonpid);
1096 zombie = "zombie";
1097 }
1098
1099 switch (kp.kp_eproc.e_ppid) {
1100 case 0:
1101 /* the kernel */
1102 break;
1103 case 1:
1104 if (getpid() != 1) {
1105 /* we cannot possibly find a parent job_t that is useful in this function */
1106 break;
1107 }
1108 /* fall through */
1109 default:
1110 jp = jobmgr_find_by_pid(jm, kp.kp_eproc.e_ppid, true);
1111 jobmgr_assumes(jm, jp != NULL);
1112 break;
1113 }
1114
1115 /* A total hack: Normally, job_new() returns an error during shutdown, but anonymous jobs are special. */
1116 if ((shutdown_state = jm->shutting_down)) {
1117 jm->shutting_down = false;
1118 }
1119
1120 if (jobmgr_assumes(jm, (jr = job_new(jm, AUTO_PICK_LEGACY_LABEL, zombie ? zombie : kp.kp_proc.p_comm, NULL)) != NULL)) {
1121 u_int proc_fflags = NOTE_EXEC|NOTE_EXIT /* |NOTE_REAP */;
1122
1123 total_anon_children++;
1124 jr->anonymous = true;
1125 jr->p = anonpid;
1126
1127 /* anonymous process reaping is messy */
1128 LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1129
1130 if (kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1 && job_assumes(jr, errno == ESRCH)) {
1131 /* zombies are weird */
1132 job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1133 jr->unload_at_mig_return = true;
1134 }
1135
1136 if (jp) {
1137 job_assumes(jr, mspolicy_copy(jr, jp));
1138 }
1139
1140 if (shutdown_state && jm->hopefully_first_cnt == 0) {
1141 job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time");
1142 }
1143
1144 job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, kp.kp_eproc.e_ppid, jp ? ": " : "", jp ? jp->label : "");
1145 }
1146
1147 if (shutdown_state) {
1148 jm->shutting_down = true;
1149 }
1150
1151 return jr;
1152 }
1153
1154 job_t
1155 job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
1156 {
1157 const char *const *argv_tmp = argv;
1158 char auto_label[1000];
1159 const char *bn = NULL;
1160 char *co;
1161 size_t minlabel_len;
1162 int i, cc = 0;
1163 job_t j;
1164
1165 launchd_assert(offsetof(struct job_s, kqjob_callback) == 0);
1166
1167 if (jm->shutting_down) {
1168 errno = EINVAL;
1169 return NULL;
1170 }
1171
1172 if (prog == NULL && argv == NULL) {
1173 errno = EINVAL;
1174 return NULL;
1175 }
1176
1177 if (label == AUTO_PICK_LEGACY_LABEL) {
1178 bn = prog ? prog : basename((char *)argv[0]); /* prog for auto labels is kp.kp_kproc.p_comm */
1179 snprintf(auto_label, sizeof(auto_label), "%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", bn);
1180 label = auto_label;
1181 /* This is so we can do gross things later. See NOTE_EXEC for anonymous jobs */
1182 minlabel_len = strlen(label) + MAXCOMLEN;
1183 } else {
1184 minlabel_len = strlen(label);
1185 }
1186
1187 j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
1188
1189 if (!jobmgr_assumes(jm, j != NULL)) {
1190 return NULL;
1191 }
1192
1193 if (label == auto_label) {
1194 snprintf((char *)j->label, strlen(label) + 1, "%p.%s", j, bn);
1195 } else {
1196 strcpy((char *)j->label, label);
1197 }
1198 j->kqjob_callback = job_callback;
1199 j->mgr = jm;
1200 j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
1201 j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
1202 j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
1203 j->currently_ignored = true;
1204 j->ondemand = true;
1205 j->checkedin = true;
1206
1207 if (prog) {
1208 j->prog = strdup(prog);
1209 if (!job_assumes(j, j->prog != NULL)) {
1210 goto out_bad;
1211 }
1212 }
1213
1214 if (argv) {
1215 while (*argv_tmp++)
1216 j->argc++;
1217
1218 for (i = 0; i < j->argc; i++) {
1219 cc += strlen(argv[i]) + 1;
1220 }
1221
1222 j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
1223
1224 if (!job_assumes(j, j->argv != NULL)) {
1225 goto out_bad;
1226 }
1227
1228 co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
1229
1230 for (i = 0; i < j->argc; i++) {
1231 j->argv[i] = co;
1232 strcpy(co, argv[i]);
1233 co += strlen(argv[i]) + 1;
1234 }
1235 j->argv[i] = NULL;
1236 }
1237
1238 LIST_INSERT_HEAD(&jm->jobs, j, sle);
1239 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
1240
1241 job_log(j, LOG_DEBUG, "Conceived");
1242
1243 return j;
1244
1245 out_bad:
1246 if (j->prog) {
1247 free(j->prog);
1248 }
1249 free(j);
1250
1251 return NULL;
1252 }
1253
1254 job_t
1255 job_import(launch_data_t pload)
1256 {
1257 job_t j = jobmgr_import2(root_jobmgr, pload);
1258
1259 if (j == NULL) {
1260 return NULL;
1261 }
1262
1263 return job_dispatch(j, false);
1264 }
1265
1266 launch_data_t
1267 job_import_bulk(launch_data_t pload)
1268 {
1269 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
1270 job_t *ja;
1271 size_t i, c = launch_data_array_get_count(pload);
1272
1273 ja = alloca(c * sizeof(job_t ));
1274
1275 for (i = 0; i < c; i++) {
1276 if ((ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) {
1277 errno = 0;
1278 }
1279 launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
1280 }
1281
1282 for (i = 0; i < c; i++) {
1283 if (ja[i] == NULL) {
1284 continue;
1285 }
1286 job_dispatch(ja[i], false);
1287 }
1288
1289 return resp;
1290 }
1291
1292 void
1293 job_import_bool(job_t j, const char *key, bool value)
1294 {
1295 bool found_key = false;
1296
1297 switch (key[0]) {
1298 case 'a':
1299 case 'A':
1300 if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
1301 j->abandon_pg = value;
1302 found_key = true;
1303 }
1304 break;
1305 case 'k':
1306 case 'K':
1307 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1308 j->ondemand = !value;
1309 found_key = true;
1310 }
1311 break;
1312 case 'o':
1313 case 'O':
1314 if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
1315 j->ondemand = value;
1316 found_key = true;
1317 }
1318 break;
1319 case 'd':
1320 case 'D':
1321 if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
1322 j->debug = value;
1323 found_key = true;
1324 } else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
1325 job_assumes(j, !value);
1326 found_key = true;
1327 }
1328 break;
1329 case 'h':
1330 case 'H':
1331 if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
1332 j->hopefully_exits_last = value;
1333 found_key = true;
1334 } else if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSFIRST) == 0) {
1335 j->hopefully_exits_first = value;
1336 found_key = true;
1337 }
1338 break;
1339 case 's':
1340 case 'S':
1341 if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
1342 j->session_create = value;
1343 found_key = true;
1344 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
1345 j->start_on_mount = value;
1346 found_key = true;
1347 } else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
1348 /* this only does something on Mac OS X 10.4 "Tiger" */
1349 found_key = true;
1350 }
1351 break;
1352 case 'l':
1353 case 'L':
1354 if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
1355 j->low_pri_io = value;
1356 found_key = true;
1357 } else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
1358 j->only_once = value;
1359 found_key = true;
1360 }
1361 break;
1362 case 'm':
1363 case 'M':
1364 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1365 j->internal_exc_handler = value;
1366 found_key = true;
1367 }
1368 break;
1369 case 'i':
1370 case 'I':
1371 if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
1372 if (getuid() != 0) {
1373 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1374 return;
1375 }
1376 j->no_init_groups = !value;
1377 found_key = true;
1378 }
1379 break;
1380 case 'r':
1381 case 'R':
1382 if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
1383 if (value) {
1384 /* We don't want value == false to change j->start_pending */
1385 j->start_pending = true;
1386 }
1387 found_key = true;
1388 }
1389 break;
1390 case 'e':
1391 case 'E':
1392 if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
1393 j->globargv = value;
1394 found_key = true;
1395 } else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
1396 j->debug_before_kill = value;
1397 found_key = true;
1398 }
1399 break;
1400 case 'w':
1401 case 'W':
1402 if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
1403 j->wait4debugger = value;
1404 found_key = true;
1405 }
1406 break;
1407 default:
1408 break;
1409 }
1410
1411 if (!found_key) {
1412 job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
1413 }
1414 }
1415
1416 void
1417 job_import_string(job_t j, const char *key, const char *value)
1418 {
1419 char **where2put = NULL;
1420
1421 switch (key[0]) {
1422 case 'm':
1423 case 'M':
1424 if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
1425 where2put = &j->alt_exc_handler;
1426 }
1427 break;
1428 case 'p':
1429 case 'P':
1430 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
1431 return;
1432 }
1433 break;
1434 case 'l':
1435 case 'L':
1436 if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
1437 return;
1438 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1439 return;
1440 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1441 return;
1442 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1443 job_reparent_hack(j, value);
1444 return;
1445 }
1446 break;
1447 case 'r':
1448 case 'R':
1449 if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
1450 if (getuid() != 0) {
1451 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1452 return;
1453 }
1454 where2put = &j->rootdir;
1455 }
1456 break;
1457 case 'w':
1458 case 'W':
1459 if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
1460 where2put = &j->workingdir;
1461 }
1462 break;
1463 case 'u':
1464 case 'U':
1465 if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
1466 if (getuid() != 0) {
1467 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1468 return;
1469 } else if (strcmp(value, "root") == 0) {
1470 return;
1471 }
1472 where2put = &j->username;
1473 }
1474 break;
1475 case 'g':
1476 case 'G':
1477 if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
1478 if (getuid() != 0) {
1479 job_log(j, LOG_WARNING, "Ignored this key: %s", key);
1480 return;
1481 } else if (strcmp(value, "wheel") == 0) {
1482 return;
1483 }
1484 where2put = &j->groupname;
1485 }
1486 break;
1487 case 's':
1488 case 'S':
1489 if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
1490 where2put = &j->stdoutpath;
1491 } else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
1492 where2put = &j->stderrpath;
1493 #if HAVE_SANDBOX
1494 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
1495 where2put = &j->seatbelt_profile;
1496 #endif
1497 }
1498 break;
1499 default:
1500 job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
1501 break;
1502 }
1503
1504 if (where2put) {
1505 job_assumes(j, (*where2put = strdup(value)) != NULL);
1506 } else {
1507 job_log(j, LOG_WARNING, "Unknown key: %s", key);
1508 }
1509 }
1510
1511 void
1512 job_import_integer(job_t j, const char *key, long long value)
1513 {
1514 switch (key[0]) {
1515 case 'e':
1516 case 'E':
1517 if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
1518 if (value < 0) {
1519 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1520 } else if (value > UINT32_MAX) {
1521 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
1522 } else {
1523 j->exit_timeout = value;
1524 }
1525 }
1526 break;
1527 case 'n':
1528 case 'N':
1529 if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
1530 j->nice = value;
1531 j->setnice = true;
1532 }
1533 break;
1534 case 't':
1535 case 'T':
1536 if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
1537 if (value < 0) {
1538 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1539 } else if (value > UINT32_MAX) {
1540 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
1541 } else {
1542 j->timeout = value;
1543 }
1544 } else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
1545 if (value < 0) {
1546 job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1547 } else if (value > UINT32_MAX) {
1548 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
1549 } else {
1550 j->min_run_time = value;
1551 }
1552 }
1553 break;
1554 case 'u':
1555 case 'U':
1556 if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
1557 j->mask = value;
1558 j->setmask = true;
1559 }
1560 break;
1561 case 's':
1562 case 'S':
1563 if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
1564 if (value <= 0) {
1565 job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1566 } else if (value > UINT32_MAX) {
1567 job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
1568 } else {
1569 runtime_add_ref();
1570 j->start_interval = value;
1571
1572 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, value, j) != -1);
1573 }
1574 #if HAVE_SANDBOX
1575 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1576 j->seatbelt_flags = value;
1577 #endif
1578 }
1579
1580 break;
1581 default:
1582 job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
1583 break;
1584 }
1585 }
1586
1587 void
1588 job_import_opaque(job_t j, const char *key, launch_data_t value)
1589 {
1590 switch (key[0]) {
1591 case 'q':
1592 case 'Q':
1593 #if HAVE_QUARANTINE
1594 if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
1595 size_t tmpsz = launch_data_get_opaque_size(value);
1596
1597 if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
1598 memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
1599 j->quarantine_data_sz = tmpsz;
1600 }
1601 }
1602 #endif
1603 break;
1604 default:
1605 break;
1606 }
1607 }
1608
1609 static void
1610 policy_setup(launch_data_t obj, const char *key, void *context)
1611 {
1612 job_t j = context;
1613 bool found_key = false;
1614
1615 switch (key[0]) {
1616 case 'd':
1617 case 'D':
1618 if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
1619 j->deny_job_creation = launch_data_get_bool(obj);
1620 found_key = true;
1621 }
1622 break;
1623 default:
1624 break;
1625 }
1626
1627 if (unlikely(!found_key)) {
1628 job_log(j, LOG_WARNING, "Unknown policy: %s", key);
1629 }
1630 }
1631
1632 void
1633 job_import_dictionary(job_t j, const char *key, launch_data_t value)
1634 {
1635 launch_data_t tmp;
1636
1637 switch (key[0]) {
1638 case 'p':
1639 case 'P':
1640 if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
1641 launch_data_dict_iterate(value, policy_setup, j);
1642 }
1643 break;
1644 case 'k':
1645 case 'K':
1646 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
1647 launch_data_dict_iterate(value, semaphoreitem_setup, j);
1648 }
1649 break;
1650 case 'i':
1651 case 'I':
1652 if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
1653 j->inetcompat = true;
1654 j->abandon_pg = true;
1655 if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
1656 j->inetcompat_wait = launch_data_get_bool(tmp);
1657 }
1658 }
1659 break;
1660 case 'e':
1661 case 'E':
1662 if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
1663 launch_data_dict_iterate(value, envitem_setup, j);
1664 }
1665 break;
1666 case 'u':
1667 case 'U':
1668 if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
1669 j->importing_global_env = true;
1670 launch_data_dict_iterate(value, envitem_setup, j);
1671 j->importing_global_env = false;
1672 }
1673 break;
1674 case 's':
1675 case 'S':
1676 if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
1677 launch_data_dict_iterate(value, socketgroup_setup, j);
1678 } else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
1679 calendarinterval_new_from_obj(j, value);
1680 } else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
1681 launch_data_dict_iterate(value, limititem_setup, j);
1682 #if HAVE_SANDBOX
1683 } else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
1684 launch_data_dict_iterate(value, seatbelt_setup_flags, j);
1685 #endif
1686 }
1687 break;
1688 case 'h':
1689 case 'H':
1690 if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
1691 j->importing_hard_limits = true;
1692 launch_data_dict_iterate(value, limititem_setup, j);
1693 j->importing_hard_limits = false;
1694 }
1695 break;
1696 case 'm':
1697 case 'M':
1698 if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
1699 launch_data_dict_iterate(value, machservice_setup, j);
1700 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICELOOKUPPOLICIES) == 0) {
1701 launch_data_dict_iterate(value, mspolicy_setup, j);
1702 }
1703 break;
1704 default:
1705 job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
1706 break;
1707 }
1708 }
1709
1710 void
1711 job_import_array(job_t j, const char *key, launch_data_t value)
1712 {
1713 size_t i, value_cnt = launch_data_array_get_count(value);
1714 const char *str;
1715
1716 switch (key[0]) {
1717 case 'p':
1718 case 'P':
1719 if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
1720 return;
1721 }
1722 break;
1723 case 'l':
1724 case 'L':
1725 if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
1726 return;
1727 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
1728 return;
1729 } else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
1730 job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
1731 return;
1732 }
1733 break;
1734 case 'q':
1735 case 'Q':
1736 if (strcasecmp(key, LAUNCH_JOBKEY_QUEUEDIRECTORIES) == 0) {
1737 for (i = 0; i < value_cnt; i++) {
1738 str = launch_data_get_string(launch_data_array_get_index(value, i));
1739 if (job_assumes(j, str != NULL)) {
1740 semaphoreitem_new(j, DIR_NOT_EMPTY, str);
1741 }
1742 }
1743
1744 }
1745 break;
1746 case 'w':
1747 case 'W':
1748 if (strcasecmp(key, LAUNCH_JOBKEY_WATCHPATHS) == 0) {
1749 for (i = 0; i < value_cnt; i++) {
1750 str = launch_data_get_string(launch_data_array_get_index(value, i));
1751 if (job_assumes(j, str != NULL)) {
1752 semaphoreitem_new(j, PATH_CHANGES, str);
1753 }
1754 }
1755 }
1756 break;
1757 case 'b':
1758 case 'B':
1759 if (strcasecmp(key, LAUNCH_JOBKEY_BONJOURFDS) == 0) {
1760 socketgroup_setup(value, LAUNCH_JOBKEY_BONJOURFDS, j);
1761 } else if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
1762 if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
1763 j->j_binpref_cnt = value_cnt;
1764 for (i = 0; i < value_cnt; i++) {
1765 j->j_binpref[i] = launch_data_get_integer(launch_data_array_get_index(value, i));
1766 }
1767 }
1768 }
1769 break;
1770 case 's':
1771 case 'S':
1772 if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
1773 for (i = 0; i < value_cnt; i++) {
1774 calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
1775 }
1776 }
1777 break;
1778 default:
1779 job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
1780 break;
1781 }
1782 }
1783
1784 void
1785 job_import_keys(launch_data_t obj, const char *key, void *context)
1786 {
1787 job_t j = context;
1788 launch_data_type_t kind;
1789
1790 if (obj == NULL) {
1791 return;
1792 }
1793
1794 kind = launch_data_get_type(obj);
1795
1796 switch (kind) {
1797 case LAUNCH_DATA_BOOL:
1798 job_import_bool(j, key, launch_data_get_bool(obj));
1799 break;
1800 case LAUNCH_DATA_STRING:
1801 job_import_string(j, key, launch_data_get_string(obj));
1802 break;
1803 case LAUNCH_DATA_INTEGER:
1804 job_import_integer(j, key, launch_data_get_integer(obj));
1805 break;
1806 case LAUNCH_DATA_DICTIONARY:
1807 job_import_dictionary(j, key, obj);
1808 break;
1809 case LAUNCH_DATA_ARRAY:
1810 job_import_array(j, key, obj);
1811 break;
1812 case LAUNCH_DATA_OPAQUE:
1813 job_import_opaque(j, key, obj);
1814 break;
1815 default:
1816 job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
1817 break;
1818 }
1819 }
1820
1821 job_t
1822 jobmgr_import2(jobmgr_t jm, launch_data_t pload)
1823 {
1824 launch_data_t tmp, ldpa;
1825 const char *label = NULL, *prog = NULL;
1826 const char **argv = NULL;
1827 job_t j;
1828
1829 if (pload == NULL) {
1830 errno = EINVAL;
1831 return NULL;
1832 }
1833
1834 if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
1835 errno = EINVAL;
1836 return NULL;
1837 }
1838
1839 if (!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL))) {
1840 errno = EINVAL;
1841 return NULL;
1842 }
1843
1844 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
1845 errno = EINVAL;
1846 return NULL;
1847 }
1848
1849 if (!(label = launch_data_get_string(tmp))) {
1850 errno = EINVAL;
1851 return NULL;
1852 }
1853
1854 if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM)) &&
1855 (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
1856 prog = launch_data_get_string(tmp);
1857 }
1858
1859 if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
1860 size_t i, c;
1861
1862 if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
1863 errno = EINVAL;
1864 return NULL;
1865 }
1866
1867 c = launch_data_array_get_count(ldpa);
1868
1869 argv = alloca((c + 1) * sizeof(char *));
1870
1871 for (i = 0; i < c; i++) {
1872 tmp = launch_data_array_get_index(ldpa, i);
1873
1874 if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
1875 errno = EINVAL;
1876 return NULL;
1877 }
1878
1879 argv[i] = launch_data_get_string(tmp);
1880 }
1881
1882 argv[i] = NULL;
1883 }
1884
1885 if ((j = job_find(label)) != NULL) {
1886 errno = EEXIST;
1887 return NULL;
1888 } else if (!jobmgr_label_test(jm, label)) {
1889 errno = EINVAL;
1890 return NULL;
1891 }
1892
1893 if ((j = job_new(jm, label, prog, argv))) {
1894 launch_data_dict_iterate(pload, job_import_keys, j);
1895 }
1896
1897 return j;
1898 }
1899
1900 bool
1901 jobmgr_label_test(jobmgr_t jm, const char *str)
1902 {
1903 char *endstr = NULL;
1904 const char *ptr;
1905
1906 if (str[0] == '\0') {
1907 jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
1908 return false;
1909 }
1910
1911 for (ptr = str; *ptr; ptr++) {
1912 if (iscntrl(*ptr)) {
1913 jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
1914 return false;
1915 }
1916 }
1917
1918 strtoll(str, &endstr, 0);
1919
1920 if (str != endstr) {
1921 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to begin with numbers: %s", str);
1922 return false;
1923 }
1924
1925 if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0) ||
1926 (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
1927 jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
1928 return false;
1929 }
1930
1931 return true;
1932 }
1933
1934 job_t
1935 job_find(const char *label)
1936 {
1937 job_t ji;
1938
1939 LIST_FOREACH(ji, &label_hash[hash_label(label)], label_hash_sle) {
1940 if (ji->removal_pending) {
1941 continue; /* 5351245 */
1942 } else if (ji->mgr->shutting_down) {
1943 continue; /* 5488633 */
1944 }
1945
1946 if (strcmp(ji->label, label) == 0) {
1947 return ji;
1948 }
1949 }
1950
1951 errno = ESRCH;
1952 return NULL;
1953 }
1954
1955 job_t
1956 jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
1957 {
1958 job_t ji = NULL;
1959
1960 LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
1961 if (ji->p == p) {
1962 break;
1963 }
1964 }
1965
1966 if (ji) {
1967 return ji;
1968 } else if (create_anon) {
1969 return job_new_anonymous(jm, p);
1970 } else {
1971 return NULL;
1972 }
1973 }
1974
1975 job_t
1976 job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
1977 {
1978 jobmgr_t jmi;
1979 job_t ji;
1980
1981 if (jm->jm_port == mport) {
1982 jobmgr_assumes(jm, (ji = jobmgr_find_by_pid(jm, upid, true)) != NULL);
1983 return ji;
1984 }
1985
1986 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1987 job_t jr;
1988
1989 if ((jr = job_mig_intran2(jmi, mport, upid))) {
1990 return jr;
1991 }
1992 }
1993
1994 LIST_FOREACH(ji, &jm->jobs, sle) {
1995 if (ji->j_port == mport) {
1996 return ji;
1997 }
1998 }
1999
2000 return NULL;
2001 }
2002
2003 job_t
2004 job_mig_intran(mach_port_t p)
2005 {
2006 struct ldcred ldc;
2007 job_t jr;
2008
2009 runtime_get_caller_creds(&ldc);
2010
2011 jr = job_mig_intran2(root_jobmgr, p, ldc.pid);
2012
2013 if (!jobmgr_assumes(root_jobmgr, jr != NULL)) {
2014 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 };
2015 struct kinfo_proc kp;
2016 size_t len = sizeof(kp);
2017
2018 mib[3] = ldc.pid;
2019
2020 if (jobmgr_assumes(root_jobmgr, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && jobmgr_assumes(root_jobmgr, len == sizeof(kp))) {
2021 jobmgr_log(root_jobmgr, LOG_ERR, "%s() was confused by PID %u UID %u EUID %u Mach Port 0x%x: %s", __func__, ldc.pid, ldc.uid, ldc.euid, p, kp.kp_proc.p_comm);
2022 }
2023 }
2024
2025 return jr;
2026 }
2027
2028 job_t
2029 job_find_by_service_port(mach_port_t p)
2030 {
2031 struct machservice *ms;
2032
2033 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
2034 if (ms->recv && (ms->port == p)) {
2035 return ms->job;
2036 }
2037 }
2038
2039 return NULL;
2040 }
2041
2042 void
2043 job_mig_destructor(job_t j)
2044 {
2045 /*
2046 * 5477111
2047 *
2048 * 'j' can be invalid at this point. We should fix this up after Leopard ships.
2049 */
2050
2051 if (j && j != workaround_5477111 && j->unload_at_mig_return) {
2052 job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
2053 job_remove(j);
2054 }
2055
2056 workaround_5477111 = NULL;
2057
2058 calendarinterval_sanity_check();
2059 }
2060
2061 void
2062 job_export_all2(jobmgr_t jm, launch_data_t where)
2063 {
2064 jobmgr_t jmi;
2065 job_t ji;
2066
2067 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2068 job_export_all2(jmi, where);
2069 }
2070
2071 LIST_FOREACH(ji, &jm->jobs, sle) {
2072 launch_data_t tmp;
2073
2074 if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
2075 launch_data_dict_insert(where, tmp, ji->label);
2076 }
2077 }
2078 }
2079
2080 launch_data_t
2081 job_export_all(void)
2082 {
2083 launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
2084
2085 if (launchd_assumes(resp != NULL)) {
2086 job_export_all2(root_jobmgr, resp);
2087 }
2088
2089 return resp;
2090 }
2091
2092 void
2093 job_log_stray_pg(job_t j)
2094 {
2095 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PGRP, j->p };
2096 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
2097 struct kinfo_proc *kp;
2098
2099 #if TARGET_OS_EMBEDDED
2100 if (!do_apple_internal_magic) {
2101 return;
2102 }
2103 #endif
2104
2105 if (!job_assumes(j, (kp = malloc(len)) != NULL)) {
2106 return;
2107 }
2108 if (!job_assumes(j, sysctl(mib, 4, kp, &len, NULL, 0) != -1)) {
2109 goto out;
2110 }
2111
2112 kp_cnt = len / sizeof(struct kinfo_proc);
2113
2114 for (i = 0; i < kp_cnt; i++) {
2115 pid_t p_i = kp[i].kp_proc.p_pid;
2116 pid_t pp_i = kp[i].kp_eproc.e_ppid;
2117 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
2118 const char *n = kp[i].kp_proc.p_comm;
2119
2120 if (p_i == j->p) {
2121 continue;
2122 } else if (!job_assumes(j, p_i != 0 && p_i != 1)) {
2123 continue;
2124 }
2125
2126 job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u %s", z, p_i, pp_i, n);
2127 }
2128
2129 out:
2130 free(kp);
2131 }
2132
2133 void
2134 job_reap(job_t j)
2135 {
2136 struct rusage ru;
2137 int status;
2138
2139 job_log(j, LOG_DEBUG, "Reaping");
2140
2141 if (j->weird_bootstrap) {
2142 mach_msg_size_t mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
2143
2144 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
2145 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
2146 }
2147
2148 job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
2149 j->weird_bootstrap = false;
2150 }
2151
2152 if (j->log_redirect_fd && !j->wait4pipe_eof) {
2153 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2154 j->log_redirect_fd = 0;
2155 }
2156
2157 if (j->forkfd) {
2158 job_assumes(j, runtime_close(j->forkfd) != -1);
2159 j->forkfd = 0;
2160 }
2161
2162 if (j->anonymous) {
2163 status = 0;
2164 memset(&ru, 0, sizeof(ru));
2165 } else {
2166 /*
2167 * The job is dead. While the PID/PGID is still known to be
2168 * valid, try to kill abandoned descendant processes.
2169 */
2170 job_log_stray_pg(j);
2171 if (!j->abandon_pg) {
2172 job_assumes(j, runtime_killpg(j->p, SIGTERM) != -1 || errno == ESRCH);
2173 }
2174
2175 /*
2176 * 5020256
2177 *
2178 * The current implementation of ptrace() causes the traced process to
2179 * be abducted away from the true parent and adopted by the tracer.
2180 *
2181 * Once the tracing process relinquishes control, the kernel then
2182 * restores the true parent/child relationship.
2183 *
2184 * Unfortunately, the wait*() family of APIs is unaware of the temporarily
2185 * data structures changes, and they return an error if reality hasn't
2186 * been restored by the time they are called.
2187 */
2188 if (!job_assumes(j, wait4(j->p, &status, 0, &ru) != -1)) {
2189 job_log(j, LOG_NOTICE, "Working around 5020256. Assuming the job crashed.");
2190
2191 status = W_EXITCODE(0, SIGSEGV);
2192 memset(&ru, 0, sizeof(ru));
2193 }
2194 }
2195
2196 if (j->exit_timeout) {
2197 kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
2198 }
2199
2200 if (j->anonymous) {
2201 total_anon_children--;
2202 } else {
2203 runtime_del_ref();
2204 total_children--;
2205 }
2206 LIST_REMOVE(j, pid_hash_sle);
2207
2208 if (j->wait_reply_port) {
2209 job_log(j, LOG_DEBUG, "MPM wait reply being sent");
2210 job_assumes(j, job_mig_wait_reply(j->wait_reply_port, 0, status) == 0);
2211 j->wait_reply_port = MACH_PORT_NULL;
2212 }
2213
2214 if (j->sent_sigterm_time) {
2215 uint64_t td_sec, td_usec, td = (mach_absolute_time() - j->sent_sigterm_time) * tbi.numer / tbi.denom;
2216
2217 td_sec = td / NSEC_PER_SEC;
2218 td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
2219
2220 job_log(j, LOG_INFO, "Exited %lld.%06lld seconds after %s was sent",
2221 td_sec, td_usec, signal_to_C_name(j->sent_sigkill ? SIGKILL : SIGTERM));
2222 }
2223
2224 #if DO_RUSAGE_SUMMATION
2225 timeradd(&ru.ru_utime, &j->ru.ru_utime, &j->ru.ru_utime);
2226 timeradd(&ru.ru_stime, &j->ru.ru_stime, &j->ru.ru_stime);
2227 j->ru.ru_maxrss += ru.ru_maxrss;
2228 j->ru.ru_ixrss += ru.ru_ixrss;
2229 j->ru.ru_idrss += ru.ru_idrss;
2230 j->ru.ru_isrss += ru.ru_isrss;
2231 j->ru.ru_minflt += ru.ru_minflt;
2232 j->ru.ru_majflt += ru.ru_majflt;
2233 j->ru.ru_nswap += ru.ru_nswap;
2234 j->ru.ru_inblock += ru.ru_inblock;
2235 j->ru.ru_oublock += ru.ru_oublock;
2236 j->ru.ru_msgsnd += ru.ru_msgsnd;
2237 j->ru.ru_msgrcv += ru.ru_msgrcv;
2238 j->ru.ru_nsignals += ru.ru_nsignals;
2239 j->ru.ru_nvcsw += ru.ru_nvcsw;
2240 j->ru.ru_nivcsw += ru.ru_nivcsw;
2241 #endif
2242
2243 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
2244 job_log(j, LOG_WARNING, "Exited with exit code: %d", WEXITSTATUS(status));
2245 }
2246
2247 if (WIFSIGNALED(status)) {
2248 int s = WTERMSIG(status);
2249 if (SIGKILL == s || SIGTERM == s) {
2250 job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
2251 } else {
2252 job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
2253 }
2254 }
2255
2256 if (j->hopefully_exits_first) {
2257 j->mgr->hopefully_first_cnt--;
2258 } else if (!j->anonymous && !j->hopefully_exits_last) {
2259 j->mgr->normal_active_cnt--;
2260 }
2261 j->last_exit_status = status;
2262 j->sent_sigkill = false;
2263 j->lastlookup = NULL;
2264 j->lastlookup_gennum = 0;
2265 j->p = 0;
2266
2267 /*
2268 * We need to someday evaluate other jobs and find those who wish to track the
2269 * active/inactive state of this job. The current job_dispatch() logic makes
2270 * this messy, given that jobs can be deleted at dispatch.
2271 */
2272 }
2273
2274 void
2275 jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
2276 {
2277 jobmgr_t jmi, jmn;
2278 job_t ji, jn;
2279
2280 if (jm->shutting_down) {
2281 return;
2282 }
2283
2284 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
2285 jobmgr_dispatch_all(jmi, newmounthack);
2286 }
2287
2288 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
2289 if (newmounthack && ji->start_on_mount) {
2290 ji->start_pending = true;
2291 }
2292
2293 job_dispatch(ji, false);
2294 }
2295 }
2296
2297 job_t
2298 job_dispatch(job_t j, bool kickstart)
2299 {
2300 /*
2301 * The whole job removal logic needs to be consolidated. The fact that
2302 * a job can be removed from just about anywhere makes it easy to have
2303 * stale pointers left behind somewhere on the stack that might get
2304 * used after the deallocation. In particular, during job iteration.
2305 *
2306 * This is a classic example. The act of dispatching a job may delete it.
2307 */
2308 if (!job_active(j)) {
2309 if (job_useless(j)) {
2310 job_remove(j);
2311 return NULL;
2312 } else if (kickstart || job_keepalive(j)) {
2313 job_start(j);
2314 } else {
2315 job_watch(j);
2316
2317 /*
2318 * 5455720
2319 *
2320 * Path checking and monitoring is really racy right now.
2321 * We should clean this up post Leopard.
2322 */
2323 if (job_keepalive(j)) {
2324 job_start(j);
2325 }
2326 }
2327 } else {
2328 job_log(j, LOG_DEBUG, "Tried to dispatch an already active job.");
2329 }
2330
2331 return j;
2332 }
2333
2334 void
2335 job_log_stdouterr2(job_t j, const char *msg, ...)
2336 {
2337 struct runtime_syslog_attr attr = { j->label, j->label, j->mgr->name, LOG_NOTICE, getuid(), j->p, j->p };
2338 va_list ap;
2339
2340 va_start(ap, msg);
2341 runtime_vsyslog(&attr, msg, ap);
2342 va_end(ap);
2343 }
2344
2345 void
2346 job_log_stdouterr(job_t j)
2347 {
2348 char *msg, *bufindex, *buf = malloc(BIG_PIPE_SIZE + 1);
2349 bool close_log_redir = false;
2350 ssize_t rsz;
2351
2352 if (!job_assumes(j, buf != NULL)) {
2353 return;
2354 }
2355
2356 bufindex = buf;
2357
2358 rsz = read(j->log_redirect_fd, buf, BIG_PIPE_SIZE);
2359
2360 if (rsz == 0) {
2361 job_log(j, LOG_DEBUG, "Standard out/error pipe closed");
2362 close_log_redir = true;
2363 } else if (!job_assumes(j, rsz != -1)) {
2364 close_log_redir = true;
2365 } else {
2366 buf[rsz] = '\0';
2367
2368 while ((msg = strsep(&bufindex, "\n\r"))) {
2369 if (msg[0]) {
2370 job_log_stdouterr2(j, "%s", msg);
2371 }
2372 }
2373 }
2374
2375 free(buf);
2376
2377 if (close_log_redir) {
2378 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
2379 j->log_redirect_fd = 0;
2380 job_dispatch(j, false);
2381 }
2382 }
2383
2384 void
2385 job_kill(job_t j)
2386 {
2387 if (!j->p || j->anonymous) {
2388 return;
2389 }
2390
2391 job_assumes(j, runtime_kill(j->p, SIGKILL) != -1);
2392
2393 j->sent_sigkill = true;
2394
2395 job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
2396 EV_ADD, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j) != -1);
2397
2398 job_log(j, LOG_DEBUG, "Sent SIGKILL signal.");
2399 }
2400
2401 void
2402 job_callback_proc(job_t j, int flags, int fflags)
2403 {
2404 if ((fflags & NOTE_EXEC) && j->anonymous) {
2405 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
2406 struct kinfo_proc kp;
2407 size_t len = sizeof(kp);
2408
2409 if (job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
2410 char newlabel[1000];
2411
2412 snprintf(newlabel, sizeof(newlabel), "%p.%s", j, kp.kp_proc.p_comm);
2413
2414 job_log(j, LOG_DEBUG, "Program changed. Updating the label to: %s", newlabel);
2415
2416 LIST_REMOVE(j, label_hash_sle);
2417 strcpy((char *)j->label, newlabel);
2418 LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
2419 }
2420 }
2421
2422 if (fflags & NOTE_FORK) {
2423 job_log(j, LOG_DEBUG, "Called fork()");
2424 }
2425
2426 if (fflags & NOTE_EXIT) {
2427 job_reap(j);
2428
2429 if (j->anonymous) {
2430 job_remove(j);
2431 j = NULL;
2432 } else {
2433 j = job_dispatch(j, false);
2434 }
2435 }
2436
2437 /* NOTE_REAP sanity checking is disabled for now while we try and diagnose 5289559 */
2438 #if 0
2439 if (j && (fflags & NOTE_REAP)) {
2440 job_assumes(j, flags & EV_ONESHOT);
2441 job_assumes(j, flags & EV_EOF);
2442
2443 job_assumes(j, j->p == 0);
2444 }
2445 #endif
2446 }
2447
2448 void
2449 job_callback_timer(job_t j, void *ident)
2450 {
2451 if (j == ident) {
2452 job_dispatch(j, true);
2453 } else if (&j->semaphores == ident) {
2454 job_dispatch(j, false);
2455 } else if (&j->start_interval == ident) {
2456 j->start_pending = true;
2457 job_dispatch(j, false);
2458 } else if (&j->exit_timeout == ident) {
2459 if (j->sent_sigkill) {
2460 uint64_t td = (mach_absolute_time() - j->sent_sigterm_time) * tbi.numer / tbi.denom;
2461
2462 td /= NSEC_PER_SEC;
2463 td -= j->exit_timeout;
2464
2465 job_log(j, LOG_ERR, "Did not die after sending SIGKILL %llu seconds ago...", td);
2466 } else {
2467 job_force_sampletool(j);
2468 if (j->debug_before_kill) {
2469 job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger.");
2470 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
2471 }
2472 job_log(j, LOG_WARNING, "Exit timeout elapsed (%u seconds). Killing.", j->exit_timeout);
2473 job_kill(j);
2474 }
2475 } else {
2476 job_assumes(j, false);
2477 }
2478 }
2479
2480 void
2481 job_callback_read(job_t j, int ident)
2482 {
2483 if (ident == j->log_redirect_fd) {
2484 job_log_stdouterr(j);
2485 } else {
2486 socketgroup_callback(j);
2487 }
2488 }
2489
2490 void
2491 jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
2492 {
2493 jobmgr_t jmi;
2494 job_t j;
2495
2496 SLIST_FOREACH(jmi, &jm->submgrs, sle) {
2497 jobmgr_reap_bulk(jmi, kev);
2498 }
2499
2500 if ((j = jobmgr_find_by_pid(jm, kev->ident, false))) {
2501 kev->udata = j;
2502 job_callback(j, kev);
2503 }
2504 }
2505
2506 void
2507 jobmgr_callback(void *obj, struct kevent *kev)
2508 {
2509 jobmgr_t jm = obj;
2510
2511 switch (kev->filter) {
2512 case EVFILT_PROC:
2513 jobmgr_reap_bulk(jm, kev);
2514 if (launchd_assumes(root_jobmgr != NULL)) {
2515 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
2516 }
2517 break;
2518 case EVFILT_SIGNAL:
2519 switch (kev->ident) {
2520 case SIGTERM:
2521 return launchd_shutdown();
2522 case SIGUSR1:
2523 return calendarinterval_callback();
2524 default:
2525 return (void)jobmgr_assumes(jm, false);
2526 }
2527 break;
2528 case EVFILT_FS:
2529 if (kev->fflags & VQ_MOUNT) {
2530 jobmgr_dispatch_all(jm, true);
2531 }
2532 jobmgr_dispatch_all_semaphores(jm);
2533 break;
2534 case EVFILT_TIMER:
2535 if (jobmgr_assumes(jm, kev->ident == (uintptr_t)&sorted_calendar_events)) {
2536 calendarinterval_callback();
2537 }
2538 break;
2539 default:
2540 return (void)jobmgr_assumes(jm, false);
2541 }
2542 }
2543
2544 void
2545 job_callback(void *obj, struct kevent *kev)
2546 {
2547 job_t j = obj;
2548
2549 job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
2550
2551 switch (kev->filter) {
2552 case EVFILT_PROC:
2553 return job_callback_proc(j, kev->flags, kev->fflags);
2554 case EVFILT_TIMER:
2555 return job_callback_timer(j, (void *)kev->ident);
2556 case EVFILT_VNODE:
2557 return semaphoreitem_callback(j, kev);
2558 case EVFILT_READ:
2559 return job_callback_read(j, kev->ident);
2560 case EVFILT_MACHPORT:
2561 return (void)job_dispatch(j, true);
2562 default:
2563 return (void)job_assumes(j, false);
2564 }
2565 }
2566
2567 void
2568 job_start(job_t j)
2569 {
2570 uint64_t td, tnow = mach_absolute_time();
2571 int spair[2];
2572 int execspair[2];
2573 int oepair[2];
2574 char nbuf[64];
2575 pid_t c;
2576 bool sipc = false;
2577 u_int proc_fflags = /* NOTE_EXEC|NOTE_FORK| */ NOTE_EXIT /* |NOTE_REAP */;
2578
2579 if (!job_assumes(j, j->mgr != NULL)) {
2580 return;
2581 }
2582
2583 if (job_active(j)) {
2584 job_log(j, LOG_DEBUG, "Already started");
2585 return;
2586 }
2587
2588 job_assumes(j, tnow > j->start_time);
2589
2590 /*
2591 * Some users adjust the wall-clock and then expect software to not notice.
2592 * Therefore, launchd must use an absolute clock instead of gettimeofday()
2593 * or time() wherever possible.
2594 */
2595 td = (tnow - j->start_time) * tbi.numer / tbi.denom;
2596 td /= NSEC_PER_SEC;
2597
2598 if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat) {
2599 time_t respawn_delta = j->min_run_time - (uint32_t)td;
2600
2601 /*
2602 * We technically should ref-count throttled jobs to prevent idle exit,
2603 * but we're not directly tracking the 'throttled' state at the moment.
2604 */
2605
2606 job_log(j, LOG_WARNING, "Throttling respawn: Will start in %ld seconds", respawn_delta);
2607 job_assumes(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j) != -1);
2608 job_ignore(j);
2609 return;
2610 }
2611
2612 j->sent_sigterm_time = 0;
2613
2614 if (!j->legacy_mach_job) {
2615 sipc = (!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices));
2616 #if TARGET_OS_EMBEDDED
2617 if (j->username && strcmp(j->username, "mobile") == 0 && strncmp(j->label, "com.apple.", strlen("com.apple.")) != 0) {
2618 sipc = false;
2619 }
2620 #endif
2621 }
2622
2623 j->checkedin = false;
2624
2625 if (sipc) {
2626 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair) != -1);
2627 }
2628
2629 job_assumes(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair) != -1);
2630
2631 if (!j->legacy_mach_job && job_assumes(j, pipe(oepair) != -1)) {
2632 j->log_redirect_fd = _fd(oepair[0]);
2633 job_assumes(j, fcntl(j->log_redirect_fd, F_SETFL, O_NONBLOCK) != -1);
2634 job_assumes(j, kevent_mod(j->log_redirect_fd, EVFILT_READ, EV_ADD, 0, 0, j) != -1);
2635 }
2636
2637 j->start_time = tnow;
2638
2639 switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
2640 case -1:
2641 job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
2642 job_assumes(j, runtime_close(execspair[0]) == 0);
2643 job_assumes(j, runtime_close(execspair[1]) == 0);
2644 if (sipc) {
2645 job_assumes(j, runtime_close(spair[0]) == 0);
2646 job_assumes(j, runtime_close(spair[1]) == 0);
2647 }
2648 if (!j->legacy_mach_job) {
2649 job_assumes(j, runtime_close(oepair[0]) != -1);
2650 job_assumes(j, runtime_close(oepair[1]) != -1);
2651 j->log_redirect_fd = 0;
2652 }
2653 break;
2654 case 0:
2655 if (_vproc_post_fork_ping()) {
2656 _exit(EXIT_FAILURE);
2657 }
2658 if (!j->legacy_mach_job) {
2659 job_assumes(j, dup2(oepair[1], STDOUT_FILENO) != -1);
2660 job_assumes(j, dup2(oepair[1], STDERR_FILENO) != -1);
2661 job_assumes(j, runtime_close(oepair[1]) != -1);
2662 }
2663 job_assumes(j, runtime_close(execspair[0]) == 0);
2664 /* wait for our parent to say they've attached a kevent to us */
2665 read(_fd(execspair[1]), &c, sizeof(c));
2666
2667 if (sipc) {
2668 job_assumes(j, runtime_close(spair[0]) == 0);
2669 snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
2670 setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
2671 }
2672 job_start_child(j);
2673 break;
2674 default:
2675 job_log(j, LOG_DEBUG, "Started as PID: %u", c);
2676
2677 j->start_pending = false;
2678
2679 runtime_add_ref();
2680 total_children++;
2681 LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
2682
2683 if (JOB_BOOTCACHE_HACK_CHECK(j)) {
2684 did_first_per_user_launchd_BootCache_hack = true;
2685 }
2686
2687 if (!j->legacy_mach_job) {
2688 job_assumes(j, runtime_close(oepair[1]) != -1);
2689 }
2690 j->p = c;
2691 if (j->hopefully_exits_first) {
2692 j->mgr->hopefully_first_cnt++;
2693 } else if (!j->hopefully_exits_last) {
2694 j->mgr->normal_active_cnt++;
2695 }
2696 j->forkfd = _fd(execspair[0]);
2697 job_assumes(j, runtime_close(execspair[1]) == 0);
2698 if (sipc) {
2699 job_assumes(j, runtime_close(spair[1]) == 0);
2700 ipc_open(_fd(spair[0]), j);
2701 }
2702 if (job_assumes(j, kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1)) {
2703 job_ignore(j);
2704 } else {
2705 job_reap(j);
2706 }
2707
2708 if (!j->stall_before_exec) {
2709 job_uncork_fork(j);
2710 }
2711 break;
2712 }
2713 }
2714
2715 void
2716 do_first_per_user_launchd_hack(void)
2717 {
2718 char *bcct_tool[] = { "/usr/sbin/BootCacheControl", "tag", NULL };
2719 int dummystatus;
2720 pid_t bcp;
2721
2722 if (launchd_assumes((bcp = vfork()) != -1)) {
2723 if (bcp == 0) {
2724 execve(bcct_tool[0], bcct_tool, environ);
2725 _exit(EXIT_FAILURE);
2726 } else {
2727 launchd_assumes(waitpid(bcp, &dummystatus, 0) != -1);
2728 }
2729 }
2730 }
2731
2732 void
2733 job_start_child(job_t j)
2734 {
2735 const char *file2exec = "/usr/libexec/launchproxy";
2736 const char **argv;
2737 posix_spawnattr_t spattr;
2738 int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
2739 pid_t junk_pid;
2740 glob_t g;
2741 short spflags = POSIX_SPAWN_SETEXEC;
2742 size_t binpref_out_cnt = 0;
2743 int i;
2744
2745 if (JOB_BOOTCACHE_HACK_CHECK(j)) {
2746 do_first_per_user_launchd_hack();
2747 }
2748
2749 job_assumes(j, posix_spawnattr_init(&spattr) == 0);
2750
2751 job_setup_attributes(j);
2752
2753 if (j->argv && j->globargv) {
2754 g.gl_offs = 1;
2755 for (i = 0; i < j->argc; i++) {
2756 if (i > 0) {
2757 gflags |= GLOB_APPEND;
2758 }
2759 if (glob(j->argv[i], gflags, NULL, &g) != 0) {
2760 job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
2761 exit(EXIT_FAILURE);
2762 }
2763 }
2764 g.gl_pathv[0] = (char *)file2exec;
2765 argv = (const char **)g.gl_pathv;
2766 } else if (j->argv) {
2767 argv = alloca((j->argc + 2) * sizeof(char *));
2768 argv[0] = file2exec;
2769 for (i = 0; i < j->argc; i++) {
2770 argv[i + 1] = j->argv[i];
2771 }
2772 argv[i + 1] = NULL;
2773 } else {
2774 argv = alloca(3 * sizeof(char *));
2775 argv[0] = file2exec;
2776 argv[1] = j->prog;
2777 argv[2] = NULL;
2778 }
2779
2780 if (!j->inetcompat) {
2781 argv++;
2782 }
2783
2784 if (j->wait4debugger) {
2785 job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
2786 spflags |= POSIX_SPAWN_START_SUSPENDED;
2787 }
2788
2789 job_assumes(j, posix_spawnattr_setflags(&spattr, spflags) == 0);
2790
2791 if (j->j_binpref_cnt) {
2792 job_assumes(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt) == 0);
2793 job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
2794 }
2795
2796 #if HAVE_QUARANTINE
2797 if (j->quarantine_data) {
2798 qtn_proc_t qp;
2799
2800 if (job_assumes(j, qp = qtn_proc_alloc())) {
2801 if (job_assumes(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
2802 job_assumes(j, qtn_proc_apply_to_self(qp) == 0);
2803 }
2804 }
2805 }
2806 #endif
2807
2808 #if HAVE_SANDBOX
2809 if (j->seatbelt_profile) {
2810 char *seatbelt_err_buf = NULL;
2811
2812 if (!job_assumes(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf) != -1)) {
2813 if (seatbelt_err_buf) {
2814 job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
2815 }
2816 goto out_bad;
2817 }
2818 }
2819 #endif
2820
2821 if (j->prog) {
2822 errno = posix_spawn(&junk_pid, j->inetcompat ? file2exec : j->prog, NULL, &spattr, (char *const*)argv, environ);
2823 job_log_error(j, LOG_ERR, "posix_spawn(\"%s\", ...)", j->prog);
2824 } else {
2825 errno = posix_spawnp(&junk_pid, j->inetcompat ? file2exec : argv[0], NULL, &spattr, (char *const*)argv, environ);
2826 job_log_error(j, LOG_ERR, "posix_spawnp(\"%s\", ...)", argv[0]);
2827 }
2828
2829 out_bad:
2830 _exit(EXIT_FAILURE);
2831 }
2832
2833 void
2834 jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
2835 {
2836 launch_data_t tmp;
2837 struct envitem *ei;
2838 job_t ji;
2839
2840 if (jm->parentmgr) {
2841 jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
2842 } else {
2843 char **tmpenviron = environ;
2844 for (; *tmpenviron; tmpenviron++) {
2845 char envkey[1024];
2846 launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
2847 launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
2848 strncpy(envkey, *tmpenviron, sizeof(envkey));
2849 *(strchr(envkey, '=')) = '\0';
2850 launch_data_dict_insert(dict, s, envkey);
2851 }
2852 }
2853
2854 LIST_FOREACH(ji, &jm->jobs, sle) {
2855 SLIST_FOREACH(ei, &ji->global_env, sle) {
2856 if ((tmp = launch_data_new_string(ei->value))) {
2857 launch_data_dict_insert(dict, tmp, ei->key);
2858 }
2859 }
2860 }
2861 }
2862
2863 void
2864 jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
2865 {
2866 struct envitem *ei;
2867 job_t ji;
2868
2869 if (jm->parentmgr) {
2870 jobmgr_setup_env_from_other_jobs(jm->parentmgr);
2871 }
2872
2873 LIST_FOREACH(ji, &jm->jobs, sle) {
2874 SLIST_FOREACH(ei, &ji->global_env, sle) {
2875 setenv(ei->key, ei->value, 1);
2876 }
2877 }
2878 }
2879
2880 void
2881 job_find_and_blame_pids_with_weird_uids(job_t j)
2882 {
2883 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
2884 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
2885 struct kinfo_proc *kp;
2886 uid_t u = j->mach_uid;
2887
2888 #if TARGET_OS_EMBEDDED
2889 if (!do_apple_internal_magic) {
2890 return;
2891 }
2892 #endif
2893 kp = malloc(len);
2894
2895 if (!job_assumes(j, kp != NULL)) {
2896 return;
2897 }
2898 if (!job_assumes(j, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
2899 goto out;
2900 }
2901
2902 kp_cnt = len / sizeof(struct kinfo_proc);
2903
2904 for (i = 0; i < kp_cnt; i++) {
2905 uid_t i_euid = kp[i].kp_eproc.e_ucred.cr_uid;
2906 uid_t i_uid = kp[i].kp_eproc.e_pcred.p_ruid;
2907 uid_t i_svuid = kp[i].kp_eproc.e_pcred.p_svuid;
2908 pid_t i_pid = kp[i].kp_proc.p_pid;
2909
2910 if (i_euid != u && i_uid != u && i_svuid != u) {
2911 continue;
2912 }
2913
2914 job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u",
2915 i_pid, kp[i].kp_proc.p_comm, i_uid, i_euid, i_svuid);
2916
2917 /* Temporarily disabled due to 5423935 and 4946119. */
2918 #if 0
2919 /* Ask the accountless process to exit. */
2920 job_assumes(j, runtime_kill(i_pid, SIGTERM) != -1);
2921 #endif
2922 }
2923
2924 out:
2925 free(kp);
2926 }
2927
2928 void
2929 job_enable_audit_for_user(job_t j, uid_t u, char *name)
2930 {
2931 auditinfo_t auinfo = {
2932 .ai_auid = u,
2933 .ai_asid = j->p,
2934 };
2935 long au_cond;
2936
2937 if (!job_assumes(j, auditon(A_GETCOND, &au_cond, sizeof(long)) == 0)) {
2938 _exit(EXIT_FAILURE);
2939 }
2940
2941 if (au_cond != AUC_NOAUDIT) {
2942 if (!job_assumes(j, au_user_mask(name, &auinfo.ai_mask) == 0)) {
2943 _exit(EXIT_FAILURE);
2944 } else if (!job_assumes(j, setaudit(&auinfo) == 0)) {
2945 _exit(EXIT_FAILURE);
2946 }
2947 }
2948 }
2949
2950 void
2951 job_postfork_become_user(job_t j)
2952 {
2953 char loginname[2000];
2954 char tmpdirpath[PATH_MAX];
2955 char shellpath[PATH_MAX];
2956 char homedir[PATH_MAX];
2957 struct passwd *pwe;
2958 size_t r;
2959 gid_t desired_gid = -1;
2960 uid_t desired_uid = -1;
2961
2962 if (getuid() != 0) {
2963 return;
2964 }
2965
2966 /*
2967 * I contend that having UID == 0 and GID != 0 is of dubious value.
2968 * Nevertheless, this used to work in Tiger. See: 5425348
2969 */
2970 if (j->groupname && !j->username) {
2971 j->username = "root";
2972 }
2973
2974 if (j->username) {
2975 if ((pwe = getpwnam(j->username)) == NULL) {
2976 job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
2977 _exit(EXIT_FAILURE);
2978 }
2979 } else if (j->mach_uid) {
2980 if ((pwe = getpwuid(j->mach_uid)) == NULL) {
2981 job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
2982 job_find_and_blame_pids_with_weird_uids(j);
2983 _exit(EXIT_FAILURE);
2984 }
2985 } else {
2986 return;
2987 }
2988
2989 /*
2990 * We must copy the results of getpw*().
2991 *
2992 * Why? Because subsequent API calls may call getpw*() as a part of
2993 * their implementation. Since getpw*() returns a [now thread scoped]
2994 * global, we must therefore cache the results before continuing.
2995 */
2996
2997 desired_uid = pwe->pw_uid;
2998 desired_gid = pwe->pw_gid;
2999
3000 strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
3001 strlcpy(loginname, pwe->pw_name, sizeof(loginname));
3002 strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
3003
3004 if (pwe->pw_expire && time(NULL) >= pwe->pw_expire) {
3005 job_log(j, LOG_ERR, "Expired account");
3006 _exit(EXIT_FAILURE);
3007 }
3008
3009
3010 if (j->username && strcmp(j->username, loginname) != 0) {
3011 job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
3012 } else if (j->mach_uid && (j->mach_uid != desired_uid)) {
3013 job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
3014 }
3015
3016 if (j->groupname) {
3017 struct group *gre;
3018
3019 if ((gre = getgrnam(j->groupname)) == NULL) {
3020 job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
3021 _exit(EXIT_FAILURE);
3022 }
3023
3024 desired_gid = gre->gr_gid;
3025 }
3026
3027 job_enable_audit_for_user(j, desired_uid, loginname);
3028
3029 if (!job_assumes(j, setlogin(loginname) != -1)) {
3030 _exit(EXIT_FAILURE);
3031 }
3032
3033 if (!job_assumes(j, setgid(desired_gid) != -1)) {
3034 _exit(EXIT_FAILURE);
3035 }
3036
3037 /*
3038 * The kernel team and the DirectoryServices team want initgroups()
3039 * called after setgid(). See 4616864 for more information.
3040 */
3041
3042 if (!j->no_init_groups) {
3043 if (!job_assumes(j, initgroups(loginname, desired_gid) != -1)) {
3044 _exit(EXIT_FAILURE);
3045 }
3046 }
3047
3048 if (!job_assumes(j, setuid(desired_uid) != -1)) {
3049 _exit(EXIT_FAILURE);
3050 }
3051
3052 r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
3053
3054 if (r > 0 && r < sizeof(tmpdirpath)) {
3055 setenv("TMPDIR", tmpdirpath, 0);
3056 }
3057
3058 setenv("SHELL", shellpath, 0);
3059 setenv("HOME", homedir, 0);
3060 setenv("USER", loginname, 0);
3061 setenv("LOGNAME", loginname, 0);
3062 }
3063
3064 void
3065 job_setup_attributes(job_t j)
3066 {
3067 struct limititem *li;
3068 struct envitem *ei;
3069
3070 if (j->setnice) {
3071 job_assumes(j, setpriority(PRIO_PROCESS, 0, j->nice) != -1);
3072 }
3073
3074 SLIST_FOREACH(li, &j->limits, sle) {
3075 struct rlimit rl;
3076
3077 if (!job_assumes(j, getrlimit(li->which, &rl) != -1)) {
3078 continue;
3079 }
3080
3081 if (li->sethard) {
3082 rl.rlim_max = li->lim.rlim_max;
3083 }
3084 if (li->setsoft) {
3085 rl.rlim_cur = li->lim.rlim_cur;
3086 }
3087
3088 if (setrlimit(li->which, &rl) == -1) {
3089 job_log_error(j, LOG_WARNING, "setrlimit()");
3090 }
3091 }
3092
3093 if (!j->inetcompat && j->session_create) {
3094 launchd_SessionCreate();
3095 }
3096
3097 if (j->low_pri_io) {
3098 job_assumes(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE) != -1);
3099 }
3100 if (j->rootdir) {
3101 job_assumes(j, chroot(j->rootdir) != -1);
3102 job_assumes(j, chdir(".") != -1);
3103 }
3104
3105 job_postfork_become_user(j);
3106
3107 if (j->workingdir) {
3108 job_assumes(j, chdir(j->workingdir) != -1);
3109 }
3110
3111 if (j->setmask) {
3112 umask(j->mask);
3113 }
3114
3115 job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_APPEND|O_CREAT);
3116 job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_APPEND|O_CREAT);
3117
3118 jobmgr_setup_env_from_other_jobs(j->mgr);
3119
3120 SLIST_FOREACH(ei, &j->env, sle) {
3121 setenv(ei->key, ei->value, 1);
3122 }
3123
3124 /*
3125 * We'd like to call setsid() unconditionally, but we have reason to
3126 * believe that prevents launchd from being able to send signals to
3127 * setuid children. We'll settle for process-groups.
3128 */
3129 if (getppid() != 1) {
3130 job_assumes(j, setpgid(0, 0) != -1);
3131 } else {
3132 job_assumes(j, setsid() != -1);
3133 }
3134 }
3135
3136 void
3137 job_setup_fd(job_t j, int target_fd, const char *path, int flags)
3138 {
3139 int fd;
3140
3141 if (!path) {
3142 return;
3143 }
3144
3145 if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
3146 job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
3147 return;
3148 }
3149
3150 job_assumes(j, dup2(fd, target_fd) != -1);
3151 job_assumes(j, runtime_close(fd) == 0);
3152 }
3153
3154 int
3155 dir_has_files(job_t j, const char *path)
3156 {
3157 DIR *dd = opendir(path);
3158 struct dirent *de;
3159 bool r = 0;
3160
3161 if (!dd) {
3162 return -1;
3163 }
3164
3165 while ((de = readdir(dd))) {
3166 if (strcmp(de->d_name, ".") && strcmp(de->d_name, "..")) {
3167 r = 1;
3168 break;
3169 }
3170 }
3171
3172 job_assumes(j, closedir(dd) == 0);
3173 return r;
3174 }
3175
3176 void
3177 calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
3178 {
3179 struct calendarinterval *ci_iter, *ci_prev = NULL;
3180 time_t later, head_later;
3181
3182 later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
3183
3184 if (ci->when.tm_wday != -1) {
3185 time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
3186
3187 if (ci->when.tm_mday == -1) {
3188 later = otherlater;
3189 } else {
3190 later = later < otherlater ? later : otherlater;
3191 }
3192 }
3193
3194 ci->when_next = later;
3195
3196 LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
3197 if (ci->when_next < ci_iter->when_next) {
3198 LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
3199 break;
3200 }
3201
3202 ci_prev = ci_iter;
3203 }
3204
3205 if (ci_iter == NULL) {
3206 /* ci must want to fire after every other timer, or there are no timers */
3207
3208 if (LIST_EMPTY(&sorted_calendar_events)) {
3209 LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
3210 } else {
3211 LIST_INSERT_AFTER(ci_prev, ci, global_sle);
3212 }
3213 }
3214
3215 head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
3216
3217 /* Workaround 5225889 */
3218 kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_DELETE, 0, 0, root_jobmgr);
3219
3220 if (job_assumes(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr) != -1)) {
3221 char time_string[100];
3222 size_t time_string_len;
3223
3224 ctime_r(&later, time_string);
3225 time_string_len = strlen(time_string);
3226
3227 if (time_string_len && time_string[time_string_len - 1] == '\n') {
3228 time_string[time_string_len - 1] = '\0';
3229 }
3230
3231 job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
3232 }
3233 }
3234
3235 void
3236 extract_rcsid_substr(const char *i, char *o, size_t osz)
3237 {
3238 char *rcs_rev_tmp = strchr(i, ' ');
3239
3240 if (!rcs_rev_tmp) {
3241 strlcpy(o, i, osz);
3242 } else {
3243 strlcpy(o, rcs_rev_tmp + 1, osz);
3244 rcs_rev_tmp = strchr(o, ' ');
3245 if (rcs_rev_tmp) {
3246 *rcs_rev_tmp = '\0';
3247 }
3248 }
3249 }
3250
3251 void
3252 jobmgr_log_bug(jobmgr_t jm, const char *rcs_rev, const char *path, unsigned int line, const char *test)
3253 {
3254 int saved_errno = errno;
3255 const char *file = strrchr(path, '/');
3256 char buf[100];
3257
3258 extract_rcsid_substr(rcs_rev, buf, sizeof(buf));
3259
3260 if (!file) {
3261 file = path;
3262 } else {
3263 file += 1;
3264 }
3265
3266 jobmgr_log(jm, LOG_NOTICE, "Bug: %s:%u (%s):%u: %s", file, line, buf, saved_errno, test);
3267 }
3268
3269 void
3270 job_log_bug(job_t j, const char *rcs_rev, const char *path, unsigned int line, const char *test)
3271 {
3272 int saved_errno = errno;
3273 const char *file = strrchr(path, '/');
3274 char buf[100];
3275
3276 extract_rcsid_substr(rcs_rev, buf, sizeof(buf));
3277
3278 if (!file) {
3279 file = path;
3280 } else {
3281 file += 1;
3282 }
3283
3284 job_log(j, LOG_NOTICE, "Bug: %s:%u (%s):%u: %s", file, line, buf, saved_errno, test);
3285 }
3286
3287 void
3288 job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
3289 {
3290 struct runtime_syslog_attr attr = { "com.apple.launchd", j->label, j->mgr->name, pri, getuid(), getpid(), j->p };
3291 char *newmsg;
3292 int oldmask = 0;
3293 size_t newmsgsz;
3294
3295 /*
3296 * Hack: If bootstrap_port is set, we must be on the child side of a
3297 * fork(), but before the exec*(). Let's route the log message back to
3298 * launchd proper.
3299 */
3300 if (bootstrap_port) {
3301 return _vproc_logv(pri, err, msg, ap);
3302 }
3303
3304 newmsgsz = strlen(msg) + 200;
3305 newmsg = alloca(newmsgsz);
3306
3307 if (err) {
3308 snprintf(newmsg, newmsgsz, "%s: %s", msg, strerror(err));
3309 } else {
3310 snprintf(newmsg, newmsgsz, "%s", msg);
3311 }
3312
3313 if (j->debug) {
3314 oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
3315 }
3316
3317 runtime_vsyslog(&attr, newmsg, ap);
3318
3319 if (j->debug) {
3320 setlogmask(oldmask);
3321 }
3322 }
3323
3324 void
3325 job_log_error(job_t j, int pri, const char *msg, ...)
3326 {
3327 va_list ap;
3328
3329 va_start(ap, msg);
3330 job_logv(j, pri, errno, msg, ap);
3331 va_end(ap);
3332 }
3333
3334 void
3335 job_log(job_t j, int pri, const char *msg, ...)
3336 {
3337 va_list ap;
3338
3339 va_start(ap, msg);
3340 job_logv(j, pri, 0, msg, ap);
3341 va_end(ap);
3342 }
3343
3344 #if 0
3345 void
3346 jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
3347 {
3348 va_list ap;
3349
3350 va_start(ap, msg);
3351 jobmgr_logv(jm, pri, errno, msg, ap);
3352 va_end(ap);
3353 }
3354 #endif
3355
3356 void
3357 jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
3358 {
3359 va_list ap;
3360
3361 va_start(ap, msg);
3362 jobmgr_logv(jm, pri, 0, msg, ap);
3363 va_end(ap);
3364 }
3365
3366 void
3367 jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
3368 {
3369 char *newmsg;
3370 char *newname;
3371 size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
3372
3373 newname = alloca((jmname_len + 1) * 2);
3374 newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
3375 newmsg = alloca(newmsgsz);
3376
3377 for (i = 0, o = 0; i < jmname_len; i++, o++) {
3378 if (jm->name[i] == '%') {
3379 newname[o] = '%';
3380 o++;
3381 }
3382 newname[o] = jm->name[i];
3383 }
3384 newname[o] = '\0';
3385
3386 if (err) {
3387 snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
3388 } else {
3389 snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
3390 }
3391
3392 if (jm->parentmgr) {
3393 jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
3394 } else {
3395 struct runtime_syslog_attr attr = { "com.apple.launchd", "com.apple.launchd", jm->name, pri, getuid(), getpid(), getpid() };
3396
3397 runtime_vsyslog(&attr, newmsg, ap);
3398 }
3399 }
3400
3401 void
3402 semaphoreitem_ignore(job_t j, struct semaphoreitem *si)
3403 {
3404 if (si->fd != -1) {
3405 job_log(j, LOG_DEBUG, "Ignoring Vnode: %d", si->fd);
3406 job_assumes(j, kevent_mod(si->fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL) != -1);
3407 }
3408 }
3409
3410 void
3411 semaphoreitem_watch(job_t j, struct semaphoreitem *si)
3412 {
3413 char *parentdir, tmp_path[PATH_MAX];
3414 const char *which_path = si->what;
3415 int saved_errno = 0;
3416 int fflags = 0;
3417
3418 switch (si->why) {
3419 case PATH_EXISTS:
3420 fflags = NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE;
3421 break;
3422 case PATH_MISSING:
3423 fflags = NOTE_DELETE|NOTE_RENAME;
3424 break;
3425 case DIR_NOT_EMPTY:
3426 case PATH_CHANGES:
3427 fflags = NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE|NOTE_EXTEND|NOTE_WRITE|NOTE_ATTRIB|NOTE_LINK;
3428 break;
3429 default:
3430 return;
3431 }
3432
3433 /* dirname() may modify tmp_path */
3434 strlcpy(tmp_path, si->what, sizeof(tmp_path));
3435
3436 if (!job_assumes(j, (parentdir = dirname(tmp_path)))) {
3437 return;
3438 }
3439
3440 /* See 5321044 for why we do the do-while loop and 5415523 for why ENOENT is checked */
3441 do {
3442 if (si->fd == -1) {
3443 if ((si->fd = _fd(open(which_path, O_EVTONLY|O_NOCTTY))) == -1) {
3444 which_path = parentdir;
3445 si->fd = _fd(open(which_path, O_EVTONLY|O_NOCTTY));
3446 }
3447 }
3448
3449 if (si->fd == -1) {
3450 return job_log_error(j, LOG_ERR, "Path monitoring failed on \"%s\"", which_path);
3451 }
3452
3453 job_log(j, LOG_DEBUG, "Watching Vnode: %d", si->fd);
3454
3455 if (kevent_mod(si->fd, EVFILT_VNODE, EV_ADD, fflags, 0, j) == -1) {
3456 saved_errno = errno;
3457 /*
3458 * The FD can be revoked between the open() and kevent().
3459 * This is similar to the inability for kevents to be
3460 * attached to short lived zombie processes after fork()
3461 * but before kevent().
3462 */
3463 job_assumes(j, runtime_close(si->fd) == 0);
3464 si->fd = -1;
3465 }
3466 } while ((si->fd == -1) && (saved_errno == ENOENT));
3467
3468 if (saved_errno == ENOTSUP) {
3469 /*
3470 * 3524219 NFS needs kqueue support
3471 * 4124079 VFS needs generic kqueue support
3472 * 5226811 EVFILT: Launchd EVFILT_VNODE doesn't work on /dev
3473 */
3474 job_log(j, LOG_DEBUG, "Falling back to polling for path: %s", si->what);
3475
3476 if (!j->poll_for_vfs_changes) {
3477 j->poll_for_vfs_changes = true;
3478 job_assumes(j, kevent_mod((uintptr_t)&j->semaphores, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 3, j) != -1);
3479 }
3480 }
3481 }
3482
3483 void
3484 semaphoreitem_callback(job_t j, struct kevent *kev)
3485 {
3486 char invalidation_reason[100] = "";
3487 struct semaphoreitem *si;
3488
3489 SLIST_FOREACH(si, &j->semaphores, sle) {
3490 switch (si->why) {
3491 case PATH_CHANGES:
3492 case PATH_EXISTS:
3493 case PATH_MISSING:
3494 case DIR_NOT_EMPTY:
3495 break;
3496 default:
3497 continue;
3498 }
3499
3500 if (si->fd == (int)kev->ident) {
3501 break;
3502 }
3503 }
3504
3505 if (!job_assumes(j, si != NULL)) {
3506 return;
3507 }
3508
3509 if (NOTE_DELETE & kev->fflags) {
3510 strcat(invalidation_reason, "deleted");
3511 }
3512
3513 if (NOTE_RENAME & kev->fflags) {
3514 if (invalidation_reason[0]) {
3515 strcat(invalidation_reason, "/renamed");
3516 } else {
3517 strcat(invalidation_reason, "renamed");
3518 }
3519 }
3520
3521 if (NOTE_REVOKE & kev->fflags) {
3522 if (invalidation_reason[0]) {
3523 strcat(invalidation_reason, "/revoked");
3524 } else {
3525 strcat(invalidation_reason, "revoked");
3526 }
3527 }
3528
3529 if (invalidation_reason[0]) {
3530 job_log(j, LOG_DEBUG, "Path %s: %s", invalidation_reason, si->what);
3531 job_assumes(j, runtime_close(si->fd) == 0);
3532 si->fd = -1; /* this will get fixed in semaphoreitem_watch() */
3533 }
3534
3535 job_log(j, LOG_DEBUG, "Watch path modified: %s", si->what);
3536
3537 if (si->why == PATH_CHANGES) {
3538 j->start_pending = true;
3539 }
3540
3541 job_dispatch(j, false);
3542 }
3543
3544 void
3545 calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
3546 {
3547 struct tm *tmptm = context;
3548 int64_t val;
3549
3550 if (LAUNCH_DATA_INTEGER != launch_data_get_type(obj)) {
3551 /* hack to let caller know something went wrong */
3552 tmptm->tm_sec = -1;
3553 return;
3554 }
3555
3556 val = launch_data_get_integer(obj);
3557
3558 if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
3559 tmptm->tm_min = val;
3560 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
3561 tmptm->tm_hour = val;
3562 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
3563 tmptm->tm_mday = val;
3564 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
3565 tmptm->tm_wday = val;
3566 } else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
3567 tmptm->tm_mon = val;
3568 tmptm->tm_mon -= 1; /* 4798263 cron compatibility */
3569 }
3570 }
3571
3572 bool
3573 calendarinterval_new_from_obj(job_t j, launch_data_t obj)
3574 {
3575 struct tm tmptm;
3576
3577 memset(&tmptm, 0, sizeof(0));
3578
3579 tmptm.tm_min = -1;
3580 tmptm.tm_hour = -1;
3581 tmptm.tm_mday = -1;
3582 tmptm.tm_wday = -1;
3583 tmptm.tm_mon = -1;
3584
3585 if (!job_assumes(j, obj != NULL)) {
3586 return false;
3587 }
3588
3589 if (LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj)) {
3590 return false;
3591 }
3592
3593 launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &tmptm);
3594
3595 if (tmptm.tm_sec == -1) {
3596 return false;
3597 }
3598
3599 return calendarinterval_new(j, &tmptm);
3600 }
3601
3602 bool
3603 calendarinterval_new(job_t j, struct tm *w)
3604 {
3605 struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
3606
3607 if (!job_assumes(j, ci != NULL)) {
3608 return false;
3609 }
3610
3611 ci->when = *w;
3612 ci->job = j;
3613
3614 SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
3615
3616 calendarinterval_setalarm(j, ci);
3617
3618 runtime_add_ref();
3619
3620 return true;
3621 }
3622
3623 void
3624 calendarinterval_delete(job_t j, struct calendarinterval *ci)
3625 {
3626 SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
3627 LIST_REMOVE(ci, global_sle);
3628
3629 free(ci);
3630
3631 runtime_del_ref();
3632 }
3633
3634 void
3635 calendarinterval_sanity_check(void)
3636 {
3637 struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
3638 time_t now = time(NULL);
3639
3640 if (ci && (ci->when_next < now)) {
3641 jobmgr_assumes(root_jobmgr, raise(SIGUSR1) != -1);
3642 }
3643 }
3644
3645 void
3646 calendarinterval_callback(void)
3647 {
3648 struct calendarinterval *ci, *ci_next;
3649 time_t now = time(NULL);
3650
3651 LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
3652 job_t j = ci->job;
3653
3654 if (ci->when_next > now) {
3655 break;
3656 }
3657
3658 LIST_REMOVE(ci, global_sle);
3659 calendarinterval_setalarm(j, ci);
3660
3661 j->start_pending = true;
3662 job_dispatch(j, false);
3663 }
3664 }
3665
3666 bool
3667 socketgroup_new(job_t j, const char *name, int *fds, unsigned int fd_cnt, bool junkfds)
3668 {
3669 struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
3670
3671 if (!job_assumes(j, sg != NULL)) {
3672 return false;
3673 }
3674
3675 sg->fds = calloc(1, fd_cnt * sizeof(int));
3676 sg->fd_cnt = fd_cnt;
3677 sg->junkfds = junkfds;
3678
3679 if (!job_assumes(j, sg->fds != NULL)) {
3680 free(sg);
3681 return false;
3682 }
3683
3684 memcpy(sg->fds, fds, fd_cnt * sizeof(int));
3685 strcpy(sg->name_init, name);
3686
3687 SLIST_INSERT_HEAD(&j->sockets, sg, sle);
3688
3689 runtime_add_ref();
3690
3691 return true;
3692 }
3693
3694 void
3695 socketgroup_delete(job_t j, struct socketgroup *sg)
3696 {
3697 unsigned int i;
3698
3699 for (i = 0; i < sg->fd_cnt; i++) {
3700 #if 0
3701 struct sockaddr_storage ss;
3702 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
3703 socklen_t ss_len = sizeof(ss);
3704
3705 /* 5480306 */
3706 if (job_assumes(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
3707 && job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
3708 job_assumes(j, unlink(sun->sun_path) != -1);
3709 /* We might conditionally need to delete a directory here */
3710 }
3711 #endif
3712 job_assumes(j, runtime_close(sg->fds[i]) != -1);
3713 }
3714
3715 SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
3716
3717 free(sg->fds);
3718 free(sg);
3719
3720 runtime_del_ref();
3721 }
3722
3723 void
3724 socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
3725 {
3726 struct kevent kev[sg->fd_cnt];
3727 char buf[10000];
3728 unsigned int i, buf_off = 0;
3729
3730 if (sg->junkfds) {
3731 return;
3732 }
3733
3734 for (i = 0; i < sg->fd_cnt; i++) {
3735 EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
3736 buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
3737 }
3738
3739 job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
3740
3741 job_assumes(j, kevent_bulk_mod(kev, sg->fd_cnt) != -1);
3742
3743 for (i = 0; i < sg->fd_cnt; i++) {
3744 job_assumes(j, kev[i].flags & EV_ERROR);
3745 errno = kev[i].data;
3746 job_assumes(j, kev[i].data == 0);
3747 }
3748 }
3749
3750 void
3751 socketgroup_ignore(job_t j, struct socketgroup *sg)
3752 {
3753 socketgroup_kevent_mod(j, sg, false);
3754 }
3755
3756 void
3757 socketgroup_watch(job_t j, struct socketgroup *sg)
3758 {
3759 socketgroup_kevent_mod(j, sg, true);
3760 }
3761
3762 void
3763 socketgroup_callback(job_t j)
3764 {
3765 job_dispatch(j, true);
3766 }
3767
3768 bool
3769 envitem_new(job_t j, const char *k, const char *v, bool global)
3770 {
3771 struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
3772
3773 if (!job_assumes(j, ei != NULL)) {
3774 return false;
3775 }
3776
3777 strcpy(ei->key_init, k);
3778 ei->value = ei->key_init + strlen(k) + 1;
3779 strcpy(ei->value, v);
3780
3781 if (global) {
3782 SLIST_INSERT_HEAD(&j->global_env, ei, sle);
3783 } else {
3784 SLIST_INSERT_HEAD(&j->env, ei, sle);
3785 }
3786
3787 job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
3788
3789 return true;
3790 }
3791
3792 void
3793 envitem_delete(job_t j, struct envitem *ei, bool global)
3794 {
3795 if (global) {
3796 SLIST_REMOVE(&j->global_env, ei, envitem, sle);
3797 } else {
3798 SLIST_REMOVE(&j->env, ei, envitem, sle);
3799 }
3800
3801 free(ei);
3802 }
3803
3804 void
3805 envitem_setup(launch_data_t obj, const char *key, void *context)
3806 {
3807 job_t j = context;
3808
3809 if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
3810 return;
3811 }
3812
3813 envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
3814 }
3815
3816 bool
3817 limititem_update(job_t j, int w, rlim_t r)
3818 {
3819 struct limititem *li;
3820
3821 SLIST_FOREACH(li, &j->limits, sle) {
3822 if (li->which == w) {
3823 break;
3824 }
3825 }
3826
3827 if (li == NULL) {
3828 li = calloc(1, sizeof(struct limititem));
3829
3830 if (!job_assumes(j, li != NULL)) {
3831 return false;
3832 }
3833
3834 SLIST_INSERT_HEAD(&j->limits, li, sle);
3835
3836 li->which = w;
3837 }
3838
3839 if (j->importing_hard_limits) {
3840 li->lim.rlim_max = r;
3841 li->sethard = true;
3842 } else {
3843 li->lim.rlim_cur = r;
3844 li->setsoft = true;
3845 }
3846
3847 return true;
3848 }
3849
3850 void
3851 limititem_delete(job_t j, struct limititem *li)
3852 {
3853 SLIST_REMOVE(&j->limits, li, limititem, sle);
3854
3855 free(li);
3856 }
3857
3858 #if HAVE_SANDBOX
3859 void
3860 seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
3861 {
3862 job_t j = context;
3863
3864 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
3865 job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
3866 return;
3867 }
3868
3869 if (launch_data_get_bool(obj) == false) {
3870 return;
3871 }
3872
3873 if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
3874 j->seatbelt_flags |= SANDBOX_NAMED;
3875 }
3876 }
3877 #endif
3878
3879 void
3880 limititem_setup(launch_data_t obj, const char *key, void *context)
3881 {
3882 job_t j = context;
3883 int i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
3884 rlim_t rl;
3885
3886 if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
3887 return;
3888 }
3889
3890 rl = launch_data_get_integer(obj);
3891
3892 for (i = 0; i < limits_cnt; i++) {
3893 if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
3894 break;
3895 }
3896 }
3897
3898 if (i == limits_cnt) {
3899 return;
3900 }
3901
3902 limititem_update(j, launchd_keys2limits[i].val, rl);
3903 }
3904
3905 bool
3906 job_useless(job_t j)
3907 {
3908 /* Yes, j->unload_at_exit and j->only_once seem the same, but they'll differ someday... */
3909
3910 if ((j->unload_at_exit || j->only_once) && j->start_time != 0) {
3911 if (j->unload_at_exit && j->j_port) {
3912 return false;
3913 }
3914 job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
3915 return true;
3916 } else if (j->removal_pending) {
3917 job_log(j, LOG_DEBUG, "Exited while removal was pending.");
3918 return true;
3919 } else if (j->mgr->shutting_down && (j->hopefully_exits_first || j->mgr->hopefully_first_cnt == 0)) {
3920 job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
3921 return true;
3922 } else if (j->legacy_mach_job) {
3923 if (SLIST_EMPTY(&j->machservices)) {
3924 job_log(j, LOG_INFO, "Garbage collecting");
3925 return true;
3926 } else if (!j->checkedin) {
3927 job_log(j, LOG_WARNING, "Failed to check-in!");
3928 return true;
3929 }
3930 }
3931
3932 return false;
3933 }
3934
3935 bool
3936 job_keepalive(job_t j)
3937 {
3938 mach_msg_type_number_t statusCnt;
3939 mach_port_status_t status;
3940 struct semaphoreitem *si;
3941 struct machservice *ms;
3942 struct stat sb;
3943 bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
3944
3945 if (j->mgr->shutting_down) {
3946 return false;
3947 }
3948
3949 /*
3950 * 5066316
3951 *
3952 * We definitely need to revisit this after Leopard ships. Please see
3953 * launchctl.c for the other half of this hack.
3954 */
3955 if (j->mgr->global_on_demand_cnt > 0 && strcmp(j->label, "com.apple.kextd") != 0) {
3956 return false;
3957 }
3958
3959 if (j->start_pending) {
3960 job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
3961 return true;
3962 }
3963
3964 if (!j->ondemand) {
3965 job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
3966 return true;
3967 }
3968
3969 SLIST_FOREACH(ms, &j->machservices, sle) {
3970 statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
3971 if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
3972 (mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
3973 continue;
3974 }
3975 if (status.mps_msgcount) {
3976 job_log(j, LOG_DEBUG, "KeepAlive check: job restarted due to %d queued Mach messages on service: %s",
3977 status.mps_msgcount, ms->name);
3978 return true;
3979 }
3980 }
3981
3982
3983 SLIST_FOREACH(si, &j->semaphores, sle) {
3984 bool wanted_state = false;
3985 int qdir_file_cnt;
3986 job_t other_j;
3987
3988 switch (si->why) {
3989 case NETWORK_UP:
3990 wanted_state = true;
3991 case NETWORK_DOWN:
3992 if (network_up == wanted_state) {
3993 job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
3994 return true;
3995 }
3996 break;
3997 case SUCCESSFUL_EXIT:
3998 wanted_state = true;
3999 case FAILED_EXIT:
4000 if (good_exit == wanted_state) {
4001 job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
4002 return true;
4003 }
4004 break;
4005 case OTHER_JOB_ENABLED:
4006 wanted_state = true;
4007 case OTHER_JOB_DISABLED:
4008 if ((bool)job_find(si->what) == wanted_state) {
4009 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
4010 return true;
4011 }
4012 break;
4013 case OTHER_JOB_ACTIVE:
4014 wanted_state = true;
4015 case OTHER_JOB_INACTIVE:
4016 if ((other_j = job_find(si->what))) {
4017 if ((bool)other_j->p == wanted_state) {
4018 job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
4019 return true;
4020 }
4021 }
4022 break;
4023 case PATH_EXISTS:
4024 wanted_state = true;
4025 case PATH_MISSING:
4026 if ((bool)(stat(si->what, &sb) == 0) == wanted_state) {
4027 if (si->fd != -1) {
4028 job_assumes(j, runtime_close(si->fd) == 0);
4029 si->fd = -1;
4030 }
4031 job_log(j, LOG_DEBUG, "KeepAlive: The following path %s: %s", wanted_state ? "exists" : "is missing", si->what);
4032 return true;
4033 }
4034 break;
4035 case PATH_CHANGES:
4036 break;
4037 case DIR_NOT_EMPTY:
4038 if (-1 == (qdir_file_cnt = dir_has_files(j, si->what))) {
4039 job_log_error(j, LOG_ERR, "Failed to count the number of files in \"%s\"", si->what);
4040 } else if (qdir_file_cnt > 0) {
4041 job_log(j, LOG_DEBUG, "KeepAlive: Directory is not empty: %s", si->what);
4042 return true;
4043 }
4044 break;
4045 }
4046 }
4047
4048 return false;
4049 }
4050
4051 const char *
4052 job_prog(job_t j)
4053 {
4054 if (j->prog) {
4055 return j->prog;
4056 } else if (j->argv) {
4057 return j->argv[0];
4058 } else {
4059 return "";
4060 }
4061 }
4062
4063 const char *
4064 job_active(job_t j)
4065 {
4066 struct machservice *ms;
4067
4068 if (j->p) {
4069 return "PID is still valid";
4070 }
4071
4072 if (j->mgr->shutting_down && j->log_redirect_fd) {
4073 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
4074 j->log_redirect_fd = 0;
4075 }
4076
4077 if (j->log_redirect_fd) {
4078 if (job_assumes(j, j->wait4pipe_eof)) {
4079 return "Standard out/error is still valid";
4080 } else {
4081 job_assumes(j, runtime_close(j->log_redirect_fd) != -1);
4082 j->log_redirect_fd = 0;
4083 }
4084 }
4085
4086 if (j->priv_port_has_senders) {
4087 return "Privileged Port still has outstanding senders";
4088 }
4089
4090 SLIST_FOREACH(ms, &j->machservices, sle) {
4091 if (ms->recv && ms->isActive) {
4092 return "Mach service is still active";
4093 }
4094 }
4095
4096 return NULL;
4097 }
4098
4099 void
4100 machservice_watch(job_t j, struct machservice *ms)
4101 {
4102 if (ms->recv) {
4103 job_assumes(j, runtime_add_mport(ms->port, NULL, 0) == KERN_SUCCESS);
4104 }
4105 }
4106
4107 void
4108 machservice_ignore(job_t j, struct machservice *ms)
4109 {
4110 job_assumes(j, runtime_remove_mport(ms->port) == KERN_SUCCESS);
4111 }
4112
4113 void
4114 machservice_resetport(job_t j, struct machservice *ms)
4115 {
4116 LIST_REMOVE(ms, port_hash_sle);
4117 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
4118 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
4119 ms->gen_num++;
4120 job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS);
4121 job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS);
4122 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
4123 }
4124
4125 struct machservice *
4126 machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
4127 {
4128 struct machservice *ms;
4129
4130 if ((ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1)) == NULL) {
4131 return NULL;
4132 }
4133
4134 strcpy((char *)ms->name, name);
4135 ms->job = j;
4136 ms->per_pid = pid_local;
4137
4138 if (*serviceport == MACH_PORT_NULL) {
4139 if (!job_assumes(j, launchd_mport_create_recv(&ms->port) == KERN_SUCCESS)) {
4140 goto out_bad;
4141 }
4142
4143 if (!job_assumes(j, launchd_mport_make_send(ms->port) == KERN_SUCCESS)) {
4144 goto out_bad2;
4145 }
4146 *serviceport = ms->port;
4147 ms->recv = true;
4148 } else {
4149 ms->port = *serviceport;
4150 ms->isActive = true;
4151 }
4152
4153 SLIST_INSERT_HEAD(&j->machservices, ms, sle);
4154 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
4155 LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
4156
4157 job_log(j, LOG_INFO, "Mach service added: %s", name);
4158
4159 return ms;
4160 out_bad2:
4161 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
4162 out_bad:
4163 free(ms);
4164 return NULL;
4165 }
4166
4167 bootstrap_status_t
4168 machservice_status(struct machservice *ms)
4169 {
4170 if (ms->isActive) {
4171 return BOOTSTRAP_STATUS_ACTIVE;
4172 } else if (ms->job->ondemand) {
4173 return BOOTSTRAP_STATUS_ON_DEMAND;
4174 } else {
4175 return BOOTSTRAP_STATUS_INACTIVE;
4176 }
4177 }
4178
4179 void
4180 job_setup_exception_port(job_t j, task_t target_task)
4181 {
4182 struct machservice *ms;
4183 thread_state_flavor_t f = 0;
4184 mach_port_t exc_port = the_exception_server;
4185
4186 if (j->alt_exc_handler) {
4187 ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
4188 if (ms) {
4189 exc_port = machservice_port(ms);
4190 } else {
4191 job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
4192 }
4193 } else if (j->internal_exc_handler) {
4194 exc_port = runtime_get_kernel_port();
4195 } else if (!exc_port) {
4196 return;
4197 }
4198
4199 #if defined (__ppc__)
4200 f = PPC_THREAD_STATE64;
4201 #elif defined(__i386__)
4202 f = x86_THREAD_STATE;
4203 #elif defined(__arm__)
4204 f = ARM_THREAD_STATE;
4205 #else
4206 #error "unknown architecture"
4207 #endif
4208
4209 if (target_task) {
4210 job_assumes(j, task_set_exception_ports(target_task, EXC_MASK_CRASH, exc_port,
4211 EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
4212 } else if (getpid() == 1 && the_exception_server) {
4213 mach_port_t mhp = mach_host_self();
4214 job_assumes(j, host_set_exception_ports(mhp, EXC_MASK_CRASH, the_exception_server,
4215 EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f) == KERN_SUCCESS);
4216 job_assumes(j, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
4217 }
4218
4219 }
4220
4221 void
4222 job_set_exeception_port(job_t j, mach_port_t port)
4223 {
4224 if (!the_exception_server) {
4225 the_exception_server = port;
4226 job_setup_exception_port(j, 0);
4227 } else {
4228 job_log(j, LOG_WARNING, "The exception server is already claimed!");
4229 }
4230 }
4231
4232 void
4233 machservice_setup_options(launch_data_t obj, const char *key, void *context)
4234 {
4235 struct machservice *ms = context;
4236 mach_port_t mhp = mach_host_self();
4237 int which_port;
4238 bool b;
4239
4240 if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
4241 return;
4242 }
4243
4244 switch (launch_data_get_type(obj)) {
4245 case LAUNCH_DATA_INTEGER:
4246 which_port = launch_data_get_integer(obj);
4247 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
4248 switch (which_port) {
4249 case TASK_KERNEL_PORT:
4250 case TASK_HOST_PORT:
4251 case TASK_NAME_PORT:
4252 case TASK_BOOTSTRAP_PORT:
4253 /* I find it a little odd that zero isn't reserved in the header */
4254 case 0:
4255 job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
4256 break;
4257 default:
4258 ms->special_port_num = which_port;
4259 SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
4260 break;
4261 }
4262 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && getpid() == 1) {
4263 if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
4264 job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
4265 } else {
4266 job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
4267 }
4268 }
4269 case LAUNCH_DATA_BOOL:
4270 b = launch_data_get_bool(obj);
4271 if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
4272 ms->debug_on_close = b;
4273 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
4274 ms->reset = b;
4275 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
4276 ms->hide = b;
4277 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
4278 job_set_exeception_port(ms->job, ms->port);
4279 } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
4280 ms->kUNCServer = b;
4281 job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
4282 }
4283 break;
4284 case LAUNCH_DATA_DICTIONARY:
4285 job_set_exeception_port(ms->job, ms->port);
4286 break;
4287 default:
4288 break;
4289 }
4290
4291 job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
4292 }
4293
4294 void
4295 machservice_setup(launch_data_t obj, const char *key, void *context)
4296 {
4297 job_t j = context;
4298 struct machservice *ms;
4299 mach_port_t p = MACH_PORT_NULL;
4300
4301 if ((ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
4302 job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
4303 return;
4304 }
4305
4306 if ((ms = machservice_new(j, key, &p, false)) == NULL) {
4307 job_log_error(j, LOG_WARNING, "Cannot add service: %s", key);
4308 return;
4309 }
4310
4311 ms->isActive = false;
4312
4313 if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
4314 launch_data_dict_iterate(obj, machservice_setup_options, ms);
4315 }
4316 }
4317
4318 jobmgr_t
4319 jobmgr_do_garbage_collection(jobmgr_t jm)
4320 {
4321 jobmgr_t jmi, jmn;
4322 job_t ji, jn;
4323
4324 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
4325 jobmgr_do_garbage_collection(jmi);
4326 }
4327
4328 if (!jm->shutting_down) {
4329 return jm;
4330 }
4331
4332 jobmgr_log(jm, LOG_DEBUG, "Garbage collecting.");
4333
4334 /*
4335 * Normally, we wait for all resources of a job (Unix PIDs/FDs and Mach ports)
4336 * to reset before we conider the job truly dead and ready to be spawned again.
4337 *
4338 * In order to work around 5487724 and 3456090, we're going to call reboot()
4339 * when the last PID dies and not wait for the associated resources to reset.
4340 */
4341 if (getpid() == 1 && jm->parentmgr == NULL && total_children == 0) {
4342 jobmgr_log(jm, LOG_DEBUG, "About to force a call to: reboot(%s)", reboot_flags_to_C_names(jm->reboot_flags));
4343 runtime_closelog();
4344 jobmgr_assumes(jm, reboot(jm->reboot_flags) != -1);
4345 }
4346
4347 if (jm->hopefully_first_cnt) {
4348 return jm;
4349 }
4350
4351 if (jm->parentmgr && jm->parentmgr->shutting_down && jm->parentmgr->hopefully_first_cnt) {
4352 return jm;
4353 }
4354
4355 if (!jm->sent_stop_to_normal_jobs) {
4356 jobmgr_log(jm, LOG_DEBUG, "Asking \"normal\" jobs to exit.");
4357
4358 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
4359 if (!job_active(ji)) {
4360 job_remove(ji);
4361 } else if (!ji->hopefully_exits_last) {
4362 job_stop(ji);
4363 }
4364 }
4365
4366 jm->sent_stop_to_normal_jobs = true;
4367 }
4368
4369 if (jm->normal_active_cnt) {
4370 return jm;
4371 }
4372
4373 if (!jm->sent_stop_to_hopefully_last_jobs) {
4374 jobmgr_log(jm, LOG_DEBUG, "Asking \"hopefully last\" jobs to exit.");
4375
4376 LIST_FOREACH(ji, &jm->jobs, sle) {
4377 if (ji->p && ji->anonymous) {
4378 continue;
4379 } else if (ji->p && job_assumes(ji, ji->hopefully_exits_last)) {
4380 job_stop(ji);
4381 }
4382 }
4383
4384 jm->sent_stop_to_hopefully_last_jobs = true;
4385 }
4386
4387 if (!SLIST_EMPTY(&jm->submgrs)) {
4388 return jm;
4389 }
4390
4391 LIST_FOREACH(ji, &jm->jobs, sle) {
4392 if (!ji->anonymous) {
4393 return jm;
4394 }
4395 }
4396
4397 jobmgr_log_stray_children(jm);
4398 jobmgr_remove(jm);
4399 return NULL;
4400 }
4401
4402 void
4403 jobmgr_log_stray_children(jobmgr_t jm)
4404 {
4405 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
4406 size_t i, kp_cnt, len = sizeof(struct kinfo_proc) * get_kern_max_proc();
4407 struct kinfo_proc *kp;
4408
4409 #if TARGET_OS_EMBEDDED
4410 if (!do_apple_internal_magic) {
4411 return;
4412 }
4413 #endif
4414 if (jm->parentmgr || getpid() != 1) {
4415 return;
4416 }
4417
4418 if (!jobmgr_assumes(jm, (kp = malloc(len)) != NULL)) {
4419 return;
4420 }
4421 if (!jobmgr_assumes(jm, sysctl(mib, 3, kp, &len, NULL, 0) != -1)) {
4422 goto out;
4423 }
4424
4425 kp_cnt = len / sizeof(struct kinfo_proc);
4426
4427 for (i = 0; i < kp_cnt; i++) {
4428 pid_t p_i = kp[i].kp_proc.p_pid;
4429 pid_t pp_i = kp[i].kp_eproc.e_ppid;
4430 pid_t pg_i = kp[i].kp_eproc.e_pgid;
4431 const char *z = (kp[i].kp_proc.p_stat == SZOMB) ? "zombie " : "";
4432 const char *n = kp[i].kp_proc.p_comm;
4433
4434 if (p_i == 0 || p_i == 1) {
4435 continue;
4436 }
4437
4438 jobmgr_log(jm, LOG_WARNING, "Stray %sprocess at shutdown: PID %u PPID %u PGID %u %s", z, p_i, pp_i, pg_i, n);
4439
4440 /*
4441 * The kernel team requested that launchd not do this for Leopard.
4442 * jobmgr_assumes(jm, runtime_kill(p_i, SIGKILL) != -1);
4443 */
4444 }
4445
4446 out:
4447 free(kp);
4448 }
4449
4450 jobmgr_t
4451 jobmgr_parent(jobmgr_t jm)
4452 {
4453 return jm->parentmgr;
4454 }
4455
4456 void
4457 job_uncork_fork(job_t j)
4458 {
4459 pid_t c = j->p;
4460
4461 job_log(j, LOG_DEBUG, "Uncorking the fork().");
4462 /* this unblocks the child and avoids a race
4463 * between the above fork() and the kevent_mod() */
4464 job_assumes(j, write(j->forkfd, &c, sizeof(c)) == sizeof(c));
4465 job_assumes(j, runtime_close(j->forkfd) != -1);
4466 j->forkfd = 0;
4467 }
4468
4469 jobmgr_t
4470 jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name)
4471 {
4472 mach_msg_size_t mxmsgsz;
4473 job_t bootstrapper = NULL;
4474 jobmgr_t jmr;
4475
4476 launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
4477
4478 if (jm && requestorport == MACH_PORT_NULL) {
4479 jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
4480 return NULL;
4481 }
4482
4483 jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : 128));
4484
4485 if (jmr == NULL) {
4486 return NULL;
4487 }
4488
4489 jmr->kqjobmgr_callback = jobmgr_callback;
4490 strcpy(jmr->name_init, name ? name : "Under construction");
4491
4492 jmr->req_port = requestorport;
4493
4494 if ((jmr->parentmgr = jm)) {
4495 SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
4496 }
4497
4498 if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
4499 goto out_bad;
4500 }
4501
4502 if (transfer_port != MACH_PORT_NULL) {
4503 jobmgr_assumes(jmr, jm != NULL);
4504 jmr->jm_port = transfer_port;
4505 } else if (!jm && getpid() != 1) {
4506 char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
4507 name_t service_buf;
4508
4509 snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
4510
4511 if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
4512 goto out_bad;
4513 }
4514
4515 if (trusted_fd) {
4516 int dfd, lfd = strtol(trusted_fd, NULL, 10);
4517
4518 if ((dfd = dup(lfd)) >= 0) {
4519 jobmgr_assumes(jmr, runtime_close(dfd) != -1);
4520 jobmgr_assumes(jmr, runtime_close(lfd) != -1);
4521 }
4522
4523 unsetenv(LAUNCHD_TRUSTED_FD_ENV);
4524 }
4525
4526 /* cut off the Libc cache, we don't want to deadlock against ourself */
4527 inherited_bootstrap_port = bootstrap_port;
4528 bootstrap_port = MACH_PORT_NULL;
4529 launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
4530
4531 /* We set this explicitly as we start each child */
4532 launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
4533 } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
4534 goto out_bad;
4535 }
4536
4537 if (!name) {
4538 sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
4539 }
4540
4541 /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
4542 mxmsgsz = sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
4543 if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
4544 mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
4545 }
4546
4547 if (!jm) {
4548 jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
4549 jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
4550 jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
4551 }
4552
4553 if (name) {
4554 bootstrapper = jobmgr_init_session(jmr, name, sflag);
4555 }
4556
4557 if (!bootstrapper || !bootstrapper->weird_bootstrap) {
4558 if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
4559 goto out_bad;
4560 }
4561 }
4562
4563 jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
4564
4565 if (bootstrapper) {
4566 jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
4567 }
4568
4569 if (jmr->parentmgr) {
4570 runtime_add_ref();
4571 }
4572
4573 return jmr;
4574
4575 out_bad:
4576 if (jmr) {
4577 jobmgr_remove(jmr);
4578 }
4579 return NULL;
4580 }
4581
4582 job_t
4583 jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
4584 {
4585 const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
4586 char thelabel[1000];
4587 job_t bootstrapper;
4588
4589 snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
4590 bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
4591 if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || getuid())) {
4592 char buf[100];
4593
4594 /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
4595 snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
4596 envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
4597 bootstrapper->weird_bootstrap = true;
4598 jobmgr_assumes(jm, job_setup_machport(bootstrapper));
4599 }
4600
4601 jm->session_initialized = true;
4602
4603 return bootstrapper;
4604 }
4605
4606 jobmgr_t
4607 jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
4608 {
4609 struct machservice *ms, *next_ms;
4610 jobmgr_t jmi, jmn;
4611
4612 /* Mach ports, unlike Unix descriptors, are reference counted. In other
4613 * words, when some program hands us a second or subsequent send right
4614 * to a port we already have open, the Mach kernel gives us the same
4615 * port number back and increments an reference count associated with
4616 * the port. This forces us, when discovering that a receive right at
4617 * the other end has been deleted, to wander all of our objects to see
4618 * what weird places clients might have handed us the same send right
4619 * to use.
4620 */
4621
4622 if (jm == root_jobmgr) {
4623 if (port == inherited_bootstrap_port) {
4624 launchd_assumes(launchd_mport_deallocate(port) == KERN_SUCCESS);
4625 inherited_bootstrap_port = MACH_PORT_NULL;
4626
4627 return jobmgr_shutdown(jm);
4628 }
4629
4630 LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
4631 if (ms->port == port) {
4632 machservice_delete(ms->job, ms, true);
4633 }
4634 }
4635 }
4636
4637 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
4638 jobmgr_delete_anything_with_port(jmi, port);
4639 }
4640
4641 if (jm->req_port == port) {
4642 jobmgr_log(jm, LOG_DEBUG, "Request port died: 0x%x", port);
4643 return jobmgr_shutdown(jm);
4644 }
4645
4646 return jm;
4647 }
4648
4649 struct machservice *
4650 jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
4651 {
4652 struct machservice *ms;
4653
4654 if (target_pid) {
4655 jobmgr_assumes(jm, !check_parent);
4656 }
4657
4658 LIST_FOREACH(ms, &jm->ms_hash[hash_ms(name)], name_hash_sle) {
4659 if ((target_pid && ms->per_pid && ms->job->p == target_pid) || (!target_pid && !ms->per_pid)) {
4660 if (strcmp(name, ms->name) == 0) {
4661 return ms;
4662 }
4663 }
4664 }
4665
4666 if (jm->parentmgr == NULL) {
4667 return NULL;
4668 }
4669
4670 if (!check_parent) {
4671 return NULL;
4672 }
4673
4674 return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
4675 }
4676
4677 mach_port_t
4678 machservice_port(struct machservice *ms)
4679 {
4680 return ms->port;
4681 }
4682
4683 job_t
4684 machservice_job(struct machservice *ms)
4685 {
4686 return ms->job;
4687 }
4688
4689 bool
4690 machservice_hidden(struct machservice *ms)
4691 {
4692 return ms->hide;
4693 }
4694
4695 bool
4696 machservice_active(struct machservice *ms)
4697 {
4698 return ms->isActive;
4699 }
4700
4701 const char *
4702 machservice_name(struct machservice *ms)
4703 {
4704 return ms->name;
4705 }
4706
4707 void
4708 machservice_delete(job_t j, struct machservice *ms, bool port_died)
4709 {
4710 if (ms->debug_on_close) {
4711 job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
4712 job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
4713 }
4714
4715 if (ms->recv && job_assumes(j, !ms->isActive)) {
4716 job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
4717 }
4718
4719 job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
4720
4721 if (ms->port == the_exception_server) {
4722 the_exception_server = 0;
4723 }
4724
4725 job_log(j, LOG_INFO, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
4726
4727 if (ms->special_port_num) {
4728 SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
4729 }
4730
4731 SLIST_REMOVE(&j->machservices, ms, machservice, sle);
4732 LIST_REMOVE(ms, name_hash_sle);
4733 LIST_REMOVE(ms, port_hash_sle);
4734
4735 free(ms);
4736 }
4737
4738 void
4739 machservice_request_notifications(struct machservice *ms)
4740 {
4741 mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
4742
4743 ms->isActive = true;
4744
4745 if (ms->recv) {
4746 which = MACH_NOTIFY_PORT_DESTROYED;
4747 job_checkin(ms->job);
4748 }
4749
4750 job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
4751 }
4752
4753 #define NELEM(x) (sizeof(x)/sizeof(x[0]))
4754 #define END_OF(x) (&(x)[NELEM(x)])
4755
4756 char **
4757 mach_cmd2argv(const char *string)
4758 {
4759 char *argv[100], args[1000];
4760 const char *cp;
4761 char *argp = args, term, **argv_ret, *co;
4762 unsigned int nargs = 0, i;
4763
4764 for (cp = string; *cp;) {
4765 while (isspace(*cp))
4766 cp++;
4767 term = (*cp == '"') ? *cp++ : '\0';
4768 if (nargs < NELEM(argv)) {
4769 argv[nargs++] = argp;
4770 }
4771 while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
4772 if (*cp == '\\') {
4773 cp++;
4774 }
4775 *argp++ = *cp;
4776 if (*cp) {
4777 cp++;
4778 }
4779 }
4780 *argp++ = '\0';
4781 }
4782 argv[nargs] = NULL;
4783
4784 if (nargs == 0) {
4785 return NULL;
4786 }
4787
4788 argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
4789
4790 if (!launchd_assumes(argv_ret != NULL)) {
4791 return NULL;
4792 }
4793
4794 co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
4795
4796 for (i = 0; i < nargs; i++) {
4797 strcpy(co, argv[i]);
4798 argv_ret[i] = co;
4799 co += strlen(argv[i]) + 1;
4800 }
4801 argv_ret[i] = NULL;
4802
4803 return argv_ret;
4804 }
4805
4806 void
4807 job_checkin(job_t j)
4808 {
4809 j->checkedin = true;
4810 }
4811
4812 bool
4813 job_ack_port_destruction(mach_port_t p)
4814 {
4815 struct machservice *ms;
4816
4817 LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
4818 if (ms->recv && (ms->port == p)) {
4819 break;
4820 }
4821 }
4822
4823 if (!ms) {
4824 return false;
4825 }
4826
4827 ms->isActive = false;
4828
4829 if (ms->reset) {
4830 machservice_resetport(ms->job, ms);
4831 }
4832
4833 job_log(ms->job, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
4834 job_dispatch(ms->job, false);
4835
4836 root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
4837
4838 return true;
4839 }
4840
4841 void
4842 job_ack_no_senders(job_t j)
4843 {
4844 j->priv_port_has_senders = false;
4845
4846 job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
4847 j->j_port = 0;
4848
4849 job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
4850
4851 job_dispatch(j, false);
4852 }
4853
4854 jobmgr_t
4855 job_get_bs(job_t j)
4856 {
4857 if (job_assumes(j, j->mgr != NULL)) {
4858 return j->mgr;
4859 }
4860
4861 return NULL;
4862 }
4863
4864 bool
4865 job_is_anonymous(job_t j)
4866 {
4867 return j->anonymous;
4868 }
4869
4870 void
4871 job_force_sampletool(job_t j)
4872 {
4873 struct stat sb;
4874 char logfile[PATH_MAX];
4875 char pidstr[100];
4876 char *sample_args[] = { "sample", pidstr, "1", "-mayDie", "-file", logfile, NULL };
4877 char *contents = NULL;
4878 int logfile_fd = -1;
4879 int console_fd = -1;
4880 int wstatus;
4881 pid_t sp;
4882
4883 if (!debug_shutdown_hangs) {
4884 return;
4885 }
4886
4887 snprintf(pidstr, sizeof(pidstr), "%u", j->p);
4888 snprintf(logfile, sizeof(logfile), SHUTDOWN_LOG_DIR "/%s-%u.sample.txt", j->label, j->p);
4889
4890 if (!job_assumes(j, unlink(logfile) != -1 || errno == ENOENT)) {
4891 goto out;
4892 }
4893
4894 /*
4895 * This will stall launchd for as long as the 'sample' tool runs.
4896 *
4897 * We didn't give the 'sample' tool a bootstrap port, so it therefore
4898 * can't deadlock against launchd.
4899 */
4900 if (!job_assumes(j, (errno = posix_spawnp(&sp, sample_args[0], NULL, NULL, sample_args, environ)) == 0)) {
4901 goto out;
4902 }
4903
4904 job_log(j, LOG_DEBUG, "Waiting for 'sample' to finish.");
4905
4906 if (!job_assumes(j, waitpid(sp, &wstatus, 0) != -1)) {
4907 goto out;
4908 }
4909
4910 /*
4911 * This won't work if the VFS or filesystems are sick:
4912 * sync();
4913 */
4914
4915 if (!job_assumes(j, WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0)) {
4916 goto out;
4917 }
4918
4919 if (!job_assumes(j, (logfile_fd = open(logfile, O_RDONLY|O_NOCTTY)) != -1)) {
4920 goto out;
4921 }
4922
4923 if (!job_assumes(j, (console_fd = open(_PATH_CONSOLE, O_WRONLY|O_APPEND|O_NOCTTY)) != -1)) {
4924 goto out;
4925 }
4926
4927 if (!job_assumes(j, fstat(logfile_fd, &sb) != -1)) {
4928 goto out;
4929 }
4930
4931 contents = malloc(sb.st_size);
4932
4933 if (!job_assumes(j, contents != NULL)) {
4934 goto out;
4935 }
4936
4937 if (!job_assumes(j, read(logfile_fd, contents, sb.st_size) == sb.st_size)) {
4938 goto out;
4939 }
4940
4941 job_assumes(j, write(console_fd, contents, sb.st_size) == sb.st_size);
4942
4943 out:
4944 if (contents) {
4945 free(contents);
4946 }
4947
4948 if (logfile_fd != -1) {
4949 job_assumes(j, runtime_fsync(logfile_fd) != -1);
4950 job_assumes(j, runtime_close(logfile_fd) != -1);
4951 }
4952
4953 if (console_fd != -1) {
4954 job_assumes(j, runtime_close(console_fd) != -1);
4955 }
4956
4957 job_log(j, LOG_DEBUG, "Finished sampling.");
4958 }
4959
4960 bool
4961 semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
4962 {
4963 struct semaphoreitem *si;
4964 size_t alloc_sz = sizeof(struct semaphoreitem);
4965
4966 if (what) {
4967 alloc_sz += strlen(what) + 1;
4968 }
4969
4970 if (!job_assumes(j, si = calloc(1, alloc_sz))) {
4971 return false;
4972 }
4973
4974 si->fd = -1;
4975 si->why = why;
4976
4977 if (what) {
4978 strcpy(si->what_init, what);
4979 }
4980
4981 SLIST_INSERT_HEAD(&j->semaphores, si, sle);
4982
4983 semaphoreitem_runtime_mod_ref(si, true);
4984
4985 return true;
4986 }
4987
4988 void
4989 semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
4990 {
4991 /*
4992 * External events need to be tracked.
4993 * Internal events do NOT need to be tracked.
4994 */
4995
4996 switch (si->why) {
4997 case SUCCESSFUL_EXIT:
4998 case FAILED_EXIT:
4999 case OTHER_JOB_ENABLED:
5000 case OTHER_JOB_DISABLED:
5001 case OTHER_JOB_ACTIVE:
5002 case OTHER_JOB_INACTIVE:
5003 return;
5004 default:
5005 break;
5006 }
5007
5008 if (add) {
5009 runtime_add_ref();
5010 } else {
5011 runtime_del_ref();
5012 }
5013 }
5014
5015 void
5016 semaphoreitem_delete(job_t j, struct semaphoreitem *si)
5017 {
5018 semaphoreitem_runtime_mod_ref(si, false);
5019
5020 SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
5021
5022 if (si->fd != -1) {
5023 job_assumes(j, runtime_close(si->fd) != -1);
5024 }
5025
5026 free(si);
5027 }
5028
5029 void
5030 semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
5031 {
5032 struct semaphoreitem_dict_iter_context *sdic = context;
5033 semaphore_reason_t why;
5034
5035 why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
5036
5037 semaphoreitem_new(sdic->j, why, key);
5038 }
5039
5040 void
5041 semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
5042 {
5043 struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
5044 job_t j = context;
5045 semaphore_reason_t why;
5046
5047 switch (launch_data_get_type(obj)) {
5048 case LAUNCH_DATA_BOOL:
5049 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
5050 why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
5051 semaphoreitem_new(j, why, NULL);
5052 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
5053 why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
5054 semaphoreitem_new(j, why, NULL);
5055 j->start_pending = true;
5056 } else {
5057 job_assumes(j, false);
5058 }
5059 break;
5060 case LAUNCH_DATA_DICTIONARY:
5061 if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
5062 sdic.why_true = PATH_EXISTS;
5063 sdic.why_false = PATH_MISSING;
5064 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
5065 sdic.why_true = OTHER_JOB_ACTIVE;
5066 sdic.why_false = OTHER_JOB_INACTIVE;
5067 } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
5068 sdic.why_true = OTHER_JOB_ENABLED;
5069 sdic.why_false = OTHER_JOB_DISABLED;
5070 } else {
5071 job_assumes(j, false);
5072 break;
5073 }
5074
5075 launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
5076 break;
5077 default:
5078 job_assumes(j, false);
5079 break;
5080 }
5081 }
5082
5083 void
5084 jobmgr_dispatch_all_semaphores(jobmgr_t jm)
5085 {
5086 jobmgr_t jmi, jmn;
5087 job_t ji, jn;
5088
5089
5090 SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
5091 jobmgr_dispatch_all_semaphores(jmi);
5092 }
5093
5094 LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
5095 if (!SLIST_EMPTY(&ji->semaphores)) {
5096 job_dispatch(ji, false);
5097 }
5098 }
5099 }
5100
5101 time_t
5102 cronemu(int mon, int mday, int hour, int min)
5103 {
5104 struct tm workingtm;
5105 time_t now;
5106
5107 now = time(NULL);
5108 workingtm = *localtime(&now);
5109
5110 workingtm.tm_isdst = -1;
5111 workingtm.tm_sec = 0;
5112 workingtm.tm_min++;
5113
5114 while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
5115 workingtm.tm_year++;
5116 workingtm.tm_mon = 0;
5117 workingtm.tm_mday = 1;
5118 workingtm.tm_hour = 0;
5119 workingtm.tm_min = 0;
5120 mktime(&workingtm);
5121 }
5122
5123 return mktime(&workingtm);
5124 }
5125
5126 time_t
5127 cronemu_wday(int wday, int hour, int min)
5128 {
5129 struct tm workingtm;
5130 time_t now;
5131
5132 now = time(NULL);
5133 workingtm = *localtime(&now);
5134
5135 workingtm.tm_isdst = -1;
5136 workingtm.tm_sec = 0;
5137 workingtm.tm_min++;
5138
5139 if (wday == 7) {
5140 wday = 0;
5141 }
5142
5143 while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
5144 workingtm.tm_mday++;
5145 workingtm.tm_hour = 0;
5146 workingtm.tm_min = 0;
5147 mktime(&workingtm);
5148 }
5149
5150 return mktime(&workingtm);
5151 }
5152
5153 bool
5154 cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
5155 {
5156 if (mon == -1) {
5157 struct tm workingtm = *wtm;
5158 int carrytest;
5159
5160 while (!cronemu_mday(&workingtm, mday, hour, min)) {
5161 workingtm.tm_mon++;
5162 workingtm.tm_mday = 1;
5163 workingtm.tm_hour = 0;
5164 workingtm.tm_min = 0;
5165 carrytest = workingtm.tm_mon;
5166 mktime(&workingtm);
5167 if (carrytest != workingtm.tm_mon) {
5168 return false;
5169 }
5170 }
5171 *wtm = workingtm;
5172 return true;
5173 }
5174
5175 if (mon < wtm->tm_mon) {
5176 return false;
5177 }
5178
5179 if (mon > wtm->tm_mon) {
5180 wtm->tm_mon = mon;
5181 wtm->tm_mday = 1;
5182 wtm->tm_hour = 0;
5183 wtm->tm_min = 0;
5184 }
5185
5186 return cronemu_mday(wtm, mday, hour, min);
5187 }
5188
5189 bool
5190 cronemu_mday(struct tm *wtm, int mday, int hour, int min)
5191 {
5192 if (mday == -1) {
5193 struct tm workingtm = *wtm;
5194 int carrytest;
5195
5196 while (!cronemu_hour(&workingtm, hour, min)) {
5197 workingtm.tm_mday++;
5198 workingtm.tm_hour = 0;
5199 workingtm.tm_min = 0;
5200 carrytest = workingtm.tm_mday;
5201 mktime(&workingtm);
5202 if (carrytest != workingtm.tm_mday) {
5203 return false;
5204 }
5205 }
5206 *wtm = workingtm;
5207 return true;
5208 }
5209
5210 if (mday < wtm->tm_mday) {
5211 return false;
5212 }
5213
5214 if (mday > wtm->tm_mday) {
5215 wtm->tm_mday = mday;
5216 wtm->tm_hour = 0;
5217 wtm->tm_min = 0;
5218 }
5219
5220 return cronemu_hour(wtm, hour, min);
5221 }
5222
5223 bool
5224 cronemu_hour(struct tm *wtm, int hour, int min)
5225 {
5226 if (hour == -1) {
5227 struct tm workingtm = *wtm;
5228 int carrytest;
5229
5230 while (!cronemu_min(&workingtm, min)) {
5231 workingtm.tm_hour++;
5232 workingtm.tm_min = 0;
5233 carrytest = workingtm.tm_hour;
5234 mktime(&workingtm);
5235 if (carrytest != workingtm.tm_hour) {
5236 return false;
5237 }
5238 }
5239 *wtm = workingtm;
5240 return true;
5241 }
5242
5243 if (hour < wtm->tm_hour) {
5244 return false;
5245 }
5246
5247 if (hour > wtm->tm_hour) {
5248 wtm->tm_hour = hour;
5249 wtm->tm_min = 0;
5250 }
5251
5252 return cronemu_min(wtm, min);
5253 }
5254
5255 bool
5256 cronemu_min(struct tm *wtm, int min)
5257 {
5258 if (min == -1) {
5259 return true;
5260 }
5261
5262 if (min < wtm->tm_min) {
5263 return false;
5264 }
5265
5266 if (min > wtm->tm_min) {
5267 wtm->tm_min = min;
5268 }
5269
5270 return true;
5271 }
5272
5273 kern_return_t
5274 job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
5275 {
5276 struct ldcred ldc;
5277 job_t js;
5278
5279 #if TARGET_OS_EMBEDDED
5280 return BOOTSTRAP_NOT_PRIVILEGED;
5281 #endif
5282
5283 if (!launchd_assumes(j != NULL)) {
5284 return BOOTSTRAP_NO_MEMORY;
5285 }
5286
5287 if (unlikely(j->deny_job_creation)) {
5288 return BOOTSTRAP_NOT_PRIVILEGED;
5289 }
5290
5291 runtime_get_caller_creds(&ldc);
5292
5293 job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
5294
5295 #define LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5296 /* XXX - This code should go away once the per session launchd is integrated with the rest of the system */
5297 #ifdef LET_MERE_MORTALS_ADD_SERVERS_TO_PID1
5298 if (getpid() == 1) {
5299 if (ldc.euid && server_uid && (ldc.euid != server_uid)) {
5300 job_log(j, LOG_WARNING, "Server create: \"%s\": Will run as UID %d, not UID %d as they told us to",
5301 server_cmd, ldc.euid, server_uid);
5302 server_uid = ldc.euid;
5303 }
5304 } else
5305 #endif
5306 if (getuid()) {
5307 if (server_uid != getuid()) {
5308 job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
5309 server_cmd, getuid(), server_uid);
5310 }
5311 server_uid = 0; /* zero means "do nothing" */
5312 }
5313
5314 js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
5315
5316 if (js == NULL) {
5317 return BOOTSTRAP_NO_MEMORY;
5318 }
5319
5320 *server_portp = js->j_port;
5321 return BOOTSTRAP_SUCCESS;
5322 }
5323
5324 kern_return_t
5325 job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
5326 {
5327 struct ldcred ldc;
5328 job_t otherj;
5329
5330 if (!launchd_assumes(j != NULL)) {
5331 return BOOTSTRAP_NO_MEMORY;
5332 }
5333
5334 runtime_get_caller_creds(&ldc);
5335
5336 if (ldc.euid != 0 && ldc.euid != getuid()) {
5337 return BOOTSTRAP_NOT_PRIVILEGED;
5338 }
5339
5340 if (!(otherj = job_find(targetlabel))) {
5341 return BOOTSTRAP_UNKNOWN_SERVICE;
5342 }
5343
5344 if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
5345 bool do_block = otherj->p;
5346
5347 if (otherj->anonymous) {
5348 return BOOTSTRAP_NOT_PRIVILEGED;
5349 }
5350
5351 job_remove(otherj);
5352
5353 if (do_block) {
5354 job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
5355 /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
5356 job_assumes(otherj, waiting4removal_new(otherj, srp));
5357 return MIG_NO_REPLY;
5358 } else {
5359 return 0;
5360 }
5361 } else if (otherj->p) {
5362 job_assumes(j, runtime_kill(otherj->p, sig) != -1);
5363 }
5364
5365 return 0;
5366 }
5367
5368 kern_return_t
5369 job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
5370 {
5371 struct ldcred ldc;
5372
5373 if (!launchd_assumes(j != NULL)) {
5374 return BOOTSTRAP_NO_MEMORY;
5375 }
5376
5377 if (!job_assumes(j, j->per_user)) {
5378 return BOOTSTRAP_NOT_PRIVILEGED;
5379 }
5380
5381 runtime_get_caller_creds(&ldc);
5382
5383 return runtime_log_forward(ldc.euid, ldc.egid, inval, invalCnt);
5384 }
5385
5386 kern_return_t
5387 job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
5388 {
5389 struct ldcred ldc;
5390
5391 if (!launchd_assumes(j != NULL)) {
5392 return BOOTSTRAP_NO_MEMORY;
5393 }
5394
5395 runtime_get_caller_creds(&ldc);
5396
5397 if (ldc.euid) {
5398 return BOOTSTRAP_NOT_PRIVILEGED;
5399 }
5400
5401 return runtime_log_drain(srp, outval, outvalCnt);
5402 }
5403
5404 kern_return_t
5405 job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
5406 vm_offset_t inval, mach_msg_type_number_t invalCnt,
5407 vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
5408 {
5409 const char *action;
5410 launch_data_t input_obj, output_obj;
5411 size_t data_offset = 0;
5412 size_t packed_size;
5413 struct ldcred ldc;
5414
5415 runtime_get_caller_creds(&ldc);
5416
5417 if (!launchd_assumes(j != NULL)) {
5418 return BOOTSTRAP_NO_MEMORY;
5419 }
5420
5421 if (inkey && ldc.euid && ldc.euid != getuid()) {
5422 return BOOTSTRAP_NOT_PRIVILEGED;
5423 }
5424
5425 if (inkey && outkey && !job_assumes(j, inkey == outkey)) {
5426 return 1;
5427 }
5428
5429 if (inkey && outkey) {
5430 action = "Swapping";
5431 } else if (inkey) {
5432 action = "Setting";
5433 } else {
5434 action = "Getting";
5435 }
5436
5437 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
5438
5439 *outvalCnt = 20 * 1024 * 1024;
5440 mig_allocate(outval, *outvalCnt);
5441 if (!job_assumes(j, *outval != 0)) {
5442 return 1;
5443 }
5444
5445 if (invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
5446 goto out_bad;
5447 }
5448
5449 switch (outkey) {
5450 case VPROC_GSK_ENVIRONMENT:
5451 if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
5452 goto out_bad;
5453 }
5454 jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
5455 if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
5456 goto out_bad;
5457 }
5458 launch_data_free(output_obj);
5459 break;
5460 case VPROC_GSK_ALLJOBS:
5461 if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
5462 goto out_bad;
5463 }
5464 ipc_revoke_fds(output_obj);
5465 packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
5466 if (!job_assumes(j, packed_size != 0)) {
5467 goto out_bad;
5468 }
5469 launch_data_free(output_obj);
5470 break;
5471 case 0:
5472 mig_deallocate(*outval, *outvalCnt);
5473 *outval = 0;
5474 *outvalCnt = 0;
5475 break;
5476 default:
5477 goto out_bad;
5478 }
5479
5480 if (invalCnt) switch (inkey) {
5481 case VPROC_GSK_ENVIRONMENT:
5482 job_assumes(j, false);
5483 break;
5484 case 0:
5485 break;
5486 default:
5487 goto out_bad;
5488 }
5489
5490 mig_deallocate(inval, invalCnt);
5491
5492 return 0;
5493
5494 out_bad:
5495 if (*outval) {
5496 mig_deallocate(*outval, *outvalCnt);
5497 }
5498 return 1;
5499 }
5500
5501 kern_return_t
5502 job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
5503 {
5504 const char *action;
5505 kern_return_t kr = 0;
5506 struct ldcred ldc;
5507 int oldmask;
5508
5509 runtime_get_caller_creds(&ldc);
5510
5511 if (!launchd_assumes(j != NULL)) {
5512 return BOOTSTRAP_NO_MEMORY;
5513 }
5514
5515 if (inkey && ldc.euid && ldc.euid != getuid()) {
5516 return BOOTSTRAP_NOT_PRIVILEGED;
5517 }
5518
5519 if (inkey && outkey && !job_assumes(j, inkey == outkey)) {
5520 return 1;
5521 }
5522
5523 if (inkey && outkey) {
5524 action = "Swapping";
5525 } else if (inkey) {
5526 action = "Setting";
5527 } else {
5528 action = "Getting";
5529 }
5530
5531 job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
5532
5533 switch (outkey) {
5534 case VPROC_GSK_LAST_EXIT_STATUS:
5535 *outval = j->last_exit_status;
5536 break;
5537 case VPROC_GSK_MGR_UID:
5538 *outval = getuid();
5539 break;
5540 case VPROC_GSK_MGR_PID:
5541 *outval = getpid();
5542 break;
5543 case VPROC_GSK_IS_MANAGED:
5544 *outval = j->anonymous ? 0 : 1;
5545 break;
5546 case VPROC_GSK_BASIC_KEEPALIVE:
5547 *outval = !j->ondemand;
5548 break;
5549 case VPROC_GSK_START_INTERVAL:
5550 *outval = j->start_interval;
5551 break;
5552 case VPROC_GSK_IDLE_TIMEOUT:
5553 *outval = j->timeout;
5554 break;
5555 case VPROC_GSK_EXIT_TIMEOUT:
5556 *outval = j->exit_timeout;
5557 break;
5558 case VPROC_GSK_GLOBAL_LOG_MASK:
5559 oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
5560 *outval = oldmask;
5561 runtime_setlogmask(oldmask);
5562 break;
5563 case VPROC_GSK_GLOBAL_UMASK:
5564 oldmask = umask(0);
5565 *outval = oldmask;
5566 umask(oldmask);
5567 break;
5568 case 0:
5569 *outval = 0;
5570 break;
5571 default:
5572 kr = 1;
5573 break;
5574 }
5575
5576 switch (inkey) {
5577 case VPROC_GSK_GLOBAL_ON_DEMAND:
5578 kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
5579 break;
5580 case VPROC_GSK_BASIC_KEEPALIVE:
5581 j->ondemand = !inval;
5582 break;
5583 case VPROC_GSK_START_INTERVAL:
5584 if ((uint64_t)inval > UINT32_MAX) {
5585 kr = 1;
5586 } else if (inval) {
5587 if (j->start_interval == 0) {
5588 runtime_add_ref();
5589 } else {
5590 /* Workaround 5225889 */
5591 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, j) != -1);
5592 }
5593 j->start_interval = inval;
5594 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
5595 } else if (j->start_interval) {
5596 job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
5597 if (j->start_interval != 0) {
5598 runtime_del_ref();
5599 }
5600 j->start_interval = 0;
5601 }
5602 break;
5603 case VPROC_GSK_IDLE_TIMEOUT:
5604 if ((unsigned int)inval > 0) {
5605 j->timeout = inval;
5606 }
5607 break;
5608 case VPROC_GSK_EXIT_TIMEOUT:
5609 if ((unsigned int)inval > 0) {
5610 j->exit_timeout = inval;
5611 }
5612 break;
5613 case VPROC_GSK_GLOBAL_LOG_MASK:
5614 runtime_setlogmask(inval);
5615 break;
5616 case VPROC_GSK_GLOBAL_UMASK:
5617 umask(inval);
5618 break;
5619 case 0:
5620 break;
5621 default:
5622 kr = 1;
5623 break;
5624 }
5625
5626 return kr;
5627 }
5628
5629 kern_return_t
5630 job_mig_post_fork_ping(job_t j, task_t child_task)
5631 {
5632 struct machservice *ms;
5633
5634 if (!launchd_assumes(j != NULL)) {
5635 return BOOTSTRAP_NO_MEMORY;
5636 }
5637
5638 job_log(j, LOG_DEBUG, "Post fork ping.");
5639
5640 job_setup_exception_port(j, child_task);
5641
5642 SLIST_FOREACH(ms, &special_ports, special_port_sle) {
5643 if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
5644 /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
5645 continue;
5646 }
5647
5648 errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
5649
5650 if (errno) {
5651 int desired_log_level = LOG_ERR;
5652
5653 if (j->anonymous) {
5654 /* 5338127 */
5655
5656 desired_log_level = LOG_WARNING;
5657
5658 if (ms->special_port_num == TASK_SEATBELT_PORT) {
5659 desired_log_level = LOG_DEBUG;
5660 }
5661 }
5662
5663 job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
5664 }
5665 }
5666
5667 job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
5668
5669 return 0;
5670 }
5671
5672 kern_return_t
5673 job_mig_reboot2(job_t j, uint64_t flags)
5674 {
5675 char who_started_the_reboot[2048] = "";
5676 struct kinfo_proc kp;
5677 struct ldcred ldc;
5678 pid_t pid_to_log;
5679
5680 if (!launchd_assumes(j != NULL)) {
5681 return BOOTSTRAP_NO_MEMORY;
5682 }
5683
5684 if (getpid() != 1) {
5685 return BOOTSTRAP_NOT_PRIVILEGED;
5686 }
5687
5688 runtime_get_caller_creds(&ldc);
5689
5690 if (ldc.euid) {
5691 return BOOTSTRAP_NOT_PRIVILEGED;
5692 }
5693
5694 for (pid_to_log = ldc.pid; pid_to_log; pid_to_log = kp.kp_eproc.e_ppid) {
5695 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid_to_log };
5696 size_t who_offset, len = sizeof(kp);
5697
5698 if (!job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1)) {
5699 return 1;
5700 }
5701
5702 who_offset = strlen(who_started_the_reboot);
5703 snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
5704 " %s[%u]%s", kp.kp_proc.p_comm, pid_to_log, kp.kp_eproc.e_ppid ? " ->" : "");
5705 }
5706
5707 root_jobmgr->reboot_flags = (int)flags;
5708
5709 launchd_shutdown();
5710
5711 job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
5712
5713 return 0;
5714 }
5715
5716 kern_return_t
5717 job_mig_getsocket(job_t j, name_t spr)
5718 {
5719 if (!launchd_assumes(j != NULL)) {
5720 return BOOTSTRAP_NO_MEMORY;
5721 }
5722
5723 ipc_server_init();
5724
5725 if (!sockpath) {
5726 return BOOTSTRAP_NO_MEMORY;
5727 }
5728
5729 strncpy(spr, sockpath, sizeof(name_t));
5730
5731 return BOOTSTRAP_SUCCESS;
5732 }
5733
5734 kern_return_t
5735 job_mig_log(job_t j, int pri, int err, logmsg_t msg)
5736 {
5737 if (!launchd_assumes(j != NULL)) {
5738 return BOOTSTRAP_NO_MEMORY;
5739 }
5740
5741 if ((errno = err)) {
5742 job_log_error(j, pri, "%s", msg);
5743 } else {
5744 job_log(j, pri, "%s", msg);
5745 }
5746
5747 return 0;
5748 }
5749
5750 void
5751 ensure_root_bkgd_setup(void)
5752 {
5753 if (background_jobmgr || getpid() != 1) {
5754 return;
5755 }
5756
5757 if (!jobmgr_assumes(root_jobmgr, (background_jobmgr = jobmgr_new(root_jobmgr, mach_task_self(), MACH_PORT_NULL, false, VPROCMGR_SESSION_BACKGROUND)) != NULL)) {
5758 return;
5759 }
5760
5761 background_jobmgr->req_port = 0;
5762 jobmgr_assumes(root_jobmgr, launchd_mport_make_send(background_jobmgr->jm_port) == KERN_SUCCESS);
5763 }
5764
5765 kern_return_t
5766 job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
5767 {
5768 struct ldcred ldc;
5769 job_t ji;
5770
5771 #if TARGET_OS_EMBEDDED
5772 return BOOTSTRAP_NOT_PRIVILEGED;
5773 #endif
5774
5775 if (!launchd_assumes(j != NULL)) {
5776 return BOOTSTRAP_NO_MEMORY;
5777 }
5778
5779 job_log(j, LOG_DEBUG, "Looking up per user launchd for UID: %u", which_user);
5780
5781 runtime_get_caller_creds(&ldc);
5782
5783 if (getpid() != 1) {
5784 job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
5785 return BOOTSTRAP_NOT_PRIVILEGED;
5786 }
5787
5788 if (ldc.euid || ldc.uid) {
5789 which_user = ldc.euid ? ldc.euid : ldc.uid;
5790 }
5791
5792 *up_cont = MACH_PORT_NULL;
5793
5794 if (which_user == 0) {
5795 ensure_root_bkgd_setup();
5796
5797 *up_cont = background_jobmgr->jm_port;
5798
5799 return 0;
5800 }
5801
5802 LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
5803 if (!ji->per_user) {
5804 continue;
5805 }
5806 if (ji->mach_uid != which_user) {
5807 continue;
5808 }
5809 if (SLIST_EMPTY(&ji->machservices)) {
5810 continue;
5811 }
5812 if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
5813 continue;
5814 }
5815 break;
5816 }
5817
5818 if (ji == NULL) {
5819 struct machservice *ms;
5820 char lbuf[1024];
5821
5822 job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
5823
5824 sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
5825
5826 ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
5827
5828 if (ji == NULL) {
5829 return BOOTSTRAP_NO_MEMORY;
5830 }
5831
5832 ji->mach_uid = which_user;
5833 ji->per_user = true;
5834
5835 if ((ms = machservice_new(ji, lbuf, up_cont, false)) == NULL) {
5836 job_remove(ji);
5837 return BOOTSTRAP_NO_MEMORY;
5838 }
5839
5840 ms->per_user_hack = true;
5841 ms->hide = true;
5842
5843 ji = job_dispatch(ji, false);
5844 } else {
5845 job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
5846 }
5847
5848 if (job_assumes(j, ji != NULL)) {
5849 *up_cont = machservice_port(SLIST_FIRST(&ji->machservices));
5850 }
5851
5852 return 0;
5853 }
5854
5855 kern_return_t
5856 job_mig_check_in(job_t j, name_t servicename, mach_port_t *serviceportp)
5857 {
5858 static pid_t last_warned_pid = 0;
5859 struct machservice *ms;
5860 struct ldcred ldc;
5861
5862 if (!launchd_assumes(j != NULL)) {
5863 return BOOTSTRAP_NO_MEMORY;
5864 }
5865
5866 runtime_get_caller_creds(&ldc);
5867
5868 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
5869
5870 if (ms == NULL) {
5871 job_log(j, LOG_DEBUG, "Check-in of Mach service failed. Unknown: %s", servicename);
5872 return BOOTSTRAP_UNKNOWN_SERVICE;
5873 }
5874 if (machservice_job(ms) != j) {
5875 if (last_warned_pid != ldc.pid) {
5876 job_log(j, LOG_NOTICE, "Check-in of Mach service failed. PID %d is not privileged: %s",
5877 ldc.pid, servicename);
5878 last_warned_pid = ldc.pid;
5879 }
5880 return BOOTSTRAP_NOT_PRIVILEGED;
5881 }
5882 if (machservice_active(ms)) {
5883 job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
5884 return BOOTSTRAP_SERVICE_ACTIVE;
5885 }
5886
5887 machservice_request_notifications(ms);
5888
5889 job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
5890
5891 *serviceportp = machservice_port(ms);
5892 return BOOTSTRAP_SUCCESS;
5893 }
5894
5895 kern_return_t
5896 job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
5897 {
5898 struct machservice *ms;
5899 struct ldcred ldc;
5900
5901 if (!launchd_assumes(j != NULL)) {
5902 return BOOTSTRAP_NO_MEMORY;
5903 }
5904
5905 runtime_get_caller_creds(&ldc);
5906
5907 #if 0
5908 job_log(j, LOG_APPLEONLY, "bootstrap_register() is deprecated. Service: %s", servicename);
5909 #endif
5910
5911 job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5912
5913 /* 5641783 for the embedded hack */
5914 #if !TARGET_OS_EMBEDDED
5915 /*
5916 * From a per-user/session launchd's perspective, SecurityAgent (UID
5917 * 92) is a rogue application (not our UID, not root and not a child of
5918 * us). We'll have to reconcile this design friction at a later date.
5919 */
5920 if (j->anonymous && job_get_bs(j)->parentmgr == NULL && ldc.uid != 0 && ldc.uid != getuid() && ldc.uid != 92) {
5921 if (getpid() == 1) {
5922 return VPROC_ERR_TRY_PER_USER;
5923 } else {
5924 return BOOTSTRAP_NOT_PRIVILEGED;
5925 }
5926 }
5927 #endif
5928
5929 ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc.pid : 0);
5930
5931 if (ms) {
5932 if (machservice_job(ms) != j) {
5933 return BOOTSTRAP_NOT_PRIVILEGED;
5934 }
5935 if (machservice_active(ms)) {
5936 job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
5937 return BOOTSTRAP_SERVICE_ACTIVE;
5938 }
5939 job_checkin(j);
5940 machservice_delete(j, ms, false);
5941 }
5942
5943 if (serviceport != MACH_PORT_NULL) {
5944 if ((ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
5945 machservice_request_notifications(ms);
5946 } else {
5947 return BOOTSTRAP_NO_MEMORY;
5948 }
5949 }
5950
5951 return BOOTSTRAP_SUCCESS;
5952 }
5953
5954 kern_return_t
5955 job_mig_look_up2(job_t j, name_t servicename, mach_port_t *serviceportp, mach_msg_type_name_t *ptype, pid_t target_pid, uint64_t flags)
5956 {
5957 struct machservice *ms;
5958 struct ldcred ldc;
5959 kern_return_t kr;
5960
5961 if (!launchd_assumes(j != NULL)) {
5962 return BOOTSTRAP_NO_MEMORY;
5963 }
5964
5965 runtime_get_caller_creds(&ldc);
5966
5967 /* 5641783 for the embedded hack */
5968 #if !TARGET_OS_EMBEDDED
5969 if (getpid() == 1 && j->anonymous && job_get_bs(j)->parentmgr == NULL && ldc.uid != 0 && ldc.euid != 0) {
5970 return VPROC_ERR_TRY_PER_USER;
5971 }
5972 #endif
5973
5974 if (!mspolicy_check(j, servicename, flags & BOOTSTRAP_PER_PID_SERVICE)) {
5975 job_log(j, LOG_NOTICE, "Policy denied Mach service lookup: %s", servicename);
5976 return BOOTSTRAP_NOT_PRIVILEGED;
5977 }
5978
5979 if (flags & BOOTSTRAP_PER_PID_SERVICE) {
5980 ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
5981 } else {
5982 ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
5983 }
5984
5985 if (ms && machservice_hidden(ms) && !machservice_active(ms)) {
5986 ms = NULL;
5987 } else if (ms && ms->per_user_hack) {
5988 ms = NULL;
5989 }
5990
5991 if (ms) {
5992 launchd_assumes(machservice_port(ms) != MACH_PORT_NULL);
5993 job_log(j, LOG_DEBUG, "%sMach service lookup: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
5994 #if 0
5995 /* After Leopard ships, we should enable this */
5996 if (j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user) {
5997 ms->bad_perf_cnt++;
5998 job_log(j, LOG_APPLEONLY, "Performance opportunity: Number of bootstrap_lookup(... \"%s\" ...) calls that should have been cached: %llu",
5999 servicename, ms->bad_perf_cnt);
6000 }
6001 j->lastlookup = ms;
6002 j->lastlookup_gennum = ms->gen_num;
6003 #endif
6004 *serviceportp = machservice_port(ms);
6005 *ptype = MACH_MSG_TYPE_COPY_SEND;
6006 kr = BOOTSTRAP_SUCCESS;
6007 } else if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && (inherited_bootstrap_port != MACH_PORT_NULL)) {
6008 job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
6009 *ptype = MACH_MSG_TYPE_MOVE_SEND;
6010 kr = bootstrap_look_up(inherited_bootstrap_port, servicename, serviceportp);
6011 } else if (getpid() == 1 && j->anonymous && ldc.euid >= 500 && strcasecmp(job_get_bs(j)->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6012 /*
6013 * 5240036 Should start background session when a lookup of CCacheServer occurs
6014 *
6015 * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
6016 * If we find a EUID that isn't root, we force it over to the per-user context.
6017 */
6018 return VPROC_ERR_TRY_PER_USER;
6019 } else {
6020 job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
6021 kr = BOOTSTRAP_UNKNOWN_SERVICE;
6022 }
6023
6024 return kr;
6025 }
6026
6027 kern_return_t
6028 job_mig_parent(job_t j, mach_port_t *parentport, mach_msg_type_name_t *pptype)
6029 {
6030 if (!launchd_assumes(j != NULL)) {
6031 return BOOTSTRAP_NO_MEMORY;
6032 }
6033
6034 job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
6035 jobmgr_t jm = j->mgr;
6036
6037 *pptype = MACH_MSG_TYPE_MAKE_SEND;
6038
6039 if (jobmgr_parent(jm)) {
6040 *parentport = jobmgr_parent(jm)->jm_port;
6041 } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
6042 *parentport = jm->jm_port;
6043 } else {
6044 *pptype = MACH_MSG_TYPE_COPY_SEND;
6045 *parentport = inherited_bootstrap_port;
6046 }
6047 return BOOTSTRAP_SUCCESS;
6048 }
6049
6050 kern_return_t
6051 job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt,
6052 bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt)
6053 {
6054 name_array_t service_names = NULL;
6055 bootstrap_status_array_t service_actives = NULL;
6056 unsigned int cnt = 0, cnt2 = 0;
6057 struct machservice *ms;
6058 jobmgr_t jm;
6059 job_t ji;
6060
6061 #if TARGET_OS_EMBEDDED
6062 return BOOTSTRAP_NOT_PRIVILEGED;
6063 #endif
6064
6065 if (!launchd_assumes(j != NULL)) {
6066 return BOOTSTRAP_NO_MEMORY;
6067 }
6068
6069 jm = j->mgr;
6070
6071 LIST_FOREACH(ji, &jm->jobs, sle) {
6072 SLIST_FOREACH(ms, &ji->machservices, sle) {
6073 if (!ms->per_pid) {
6074 cnt++;
6075 }
6076 }
6077 }
6078
6079 if (cnt == 0) {
6080 goto out;
6081 }
6082
6083 mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
6084 if (!launchd_assumes(service_names != NULL)) {
6085 goto out_bad;
6086 }
6087
6088 mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
6089 if (!launchd_assumes(service_actives != NULL)) {
6090 goto out_bad;
6091 }
6092
6093 LIST_FOREACH(ji, &jm->jobs, sle) {
6094 SLIST_FOREACH(ms, &ji->machservices, sle) {
6095 if (!ms->per_pid) {
6096 strlcpy(service_names[cnt2], machservice_name(ms), sizeof(service_names[0]));
6097 service_actives[cnt2] = machservice_status(ms);
6098 cnt2++;
6099 }
6100 }
6101 }
6102
6103 launchd_assumes(cnt == cnt2);
6104
6105 out:
6106 *servicenamesp = service_names;
6107 *serviceactivesp = service_actives;
6108 *servicenames_cnt = *serviceactives_cnt = cnt;
6109
6110 return BOOTSTRAP_SUCCESS;
6111
6112 out_bad:
6113 if (service_names) {
6114 mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
6115 }
6116 if (service_actives) {
6117 mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
6118 }
6119
6120 return BOOTSTRAP_NO_MEMORY;
6121 }
6122
6123 void
6124 job_reparent_hack(job_t j, const char *where)
6125 {
6126 jobmgr_t jmi, jmi2;
6127
6128 ensure_root_bkgd_setup();
6129
6130 /* NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic. */
6131 if (where == NULL) {
6132 if (strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6133 where = VPROCMGR_SESSION_LOGINWINDOW;
6134 } else {
6135 where = VPROCMGR_SESSION_AQUA;
6136 }
6137 }
6138
6139 if (strcasecmp(j->mgr->name, where) == 0) {
6140 return;
6141 }
6142
6143 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
6144 if (jmi->shutting_down) {
6145 continue;
6146 } else if (strcasecmp(jmi->name, where) == 0) {
6147 goto jm_found;
6148 } else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && getpid() == 1) {
6149 SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
6150 if (strcasecmp(jmi2->name, where) == 0) {
6151 jmi = jmi2;
6152 goto jm_found;
6153 }
6154 }
6155 }
6156 }
6157
6158 jm_found:
6159 if (job_assumes(j, jmi != NULL)) {
6160 struct machservice *msi;
6161
6162 SLIST_FOREACH(msi, &j->machservices, sle) {
6163 LIST_REMOVE(msi, name_hash_sle);
6164 }
6165
6166 LIST_REMOVE(j, sle);
6167 LIST_INSERT_HEAD(&jmi->jobs, j, sle);
6168 j->mgr = jmi;
6169
6170 SLIST_FOREACH(msi, &j->machservices, sle) {
6171 LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
6172 }
6173 }
6174 }
6175
6176 kern_return_t
6177 job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type)
6178 {
6179 mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
6180 mach_port_array_t l2l_ports = NULL;
6181 mach_port_t reqport, rcvright;
6182 kern_return_t kr = 1;
6183 launch_data_t out_obj_array = NULL;
6184 struct ldcred ldc;
6185 jobmgr_t jmr = NULL;
6186
6187 #if TARGET_OS_EMBEDDED
6188 return BOOTSTRAP_NOT_PRIVILEGED;
6189 #endif
6190
6191 if (!launchd_assumes(j != NULL)) {
6192 return BOOTSTRAP_NO_MEMORY;
6193 }
6194
6195 runtime_get_caller_creds(&ldc);
6196
6197 if (target_subset == MACH_PORT_NULL) {
6198 job_t j2;
6199
6200 if (j->mgr->session_initialized) {
6201 if (ldc.uid == 0 && getpid() == 1) {
6202 if (strcmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6203 job_t ji, jn;
6204
6205 LIST_FOREACH_SAFE(ji, &j->mgr->jobs, sle, jn) {
6206 if (!ji->anonymous) {
6207 job_remove(ji);
6208 }
6209 }
6210
6211 ensure_root_bkgd_setup();
6212
6213 SLIST_REMOVE(&j->mgr->parentmgr->submgrs, j->mgr, jobmgr_s, sle);
6214 j->mgr->parentmgr = background_jobmgr;
6215 SLIST_INSERT_HEAD(&j->mgr->parentmgr->submgrs, j->mgr, sle);
6216
6217 /*
6218 * We really should wait for all the jobs to die before proceeding. See 5351245 for more info.
6219 *
6220 * We have hacked around this in job_find() by ignoring jobs that are pending removal.
6221 */
6222
6223 } else if (strcmp(j->mgr->name, VPROCMGR_SESSION_AQUA) == 0) {
6224 job_log(j, LOG_DEBUG, "Tried to move the Aqua session.");
6225 return 0;
6226 } else if (strcmp(j->mgr->name, VPROCMGR_SESSION_BACKGROUND) == 0) {
6227 job_log(j, LOG_DEBUG, "Tried to move the background session.");
6228 return 0;
6229 } else {
6230 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
6231 kr = BOOTSTRAP_NOT_PRIVILEGED;
6232 goto out;
6233 }
6234 } else {
6235 job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
6236 kr = BOOTSTRAP_NOT_PRIVILEGED;
6237 goto out;
6238 }
6239 } else if (ldc.uid == 0 && getpid() == 1 && strcmp(session_type, VPROCMGR_SESSION_STANDARDIO) == 0) {
6240 ensure_root_bkgd_setup();
6241
6242 SLIST_REMOVE(&j->mgr->parentmgr->submgrs, j->mgr, jobmgr_s, sle);
6243 j->mgr->parentmgr = background_jobmgr;
6244 SLIST_INSERT_HEAD(&j->mgr->parentmgr->submgrs, j->mgr, sle);
6245 } else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
6246 jobmgr_t jmi;
6247
6248 /*
6249 * 5330262
6250 *
6251 * We're working around LoginWindow and the WindowServer.
6252 *
6253 * In practice, there is only one LoginWindow session. Unfortunately, for certain
6254 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
6255 * spawns a replacement loginwindow session before cleaning up the previous one.
6256 *
6257 * We're going to use the creation of a new LoginWindow context as a clue that the
6258 * previous LoginWindow context is on the way out and therefore we should just
6259 * kick-start the shutdown of it.
6260 */
6261
6262 SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
6263 if (jmi->shutting_down) {
6264 continue;
6265 } else if (strcasecmp(jmi->name, session_type) == 0) {
6266 jobmgr_shutdown(jmi);
6267 break;
6268 }
6269 }
6270 }
6271
6272 jobmgr_log(j->mgr, LOG_DEBUG, "Renaming to: %s", session_type);
6273 strcpy(j->mgr->name_init, session_type);
6274
6275 if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
6276 job_assumes(j, job_dispatch(j2, true));
6277 }
6278
6279 kr = 0;
6280 goto out;
6281 } else if (job_mig_intran2(root_jobmgr, target_subset, ldc.pid)) {
6282 job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
6283
6284 kr = BOOTSTRAP_NOT_PRIVILEGED;
6285 goto out;
6286 }
6287
6288 job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
6289
6290 errno = kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
6291
6292 if (!job_assumes(j, kr == 0)) {
6293 goto out;
6294 }
6295
6296 launchd_assert(launch_data_array_get_count(out_obj_array) == l2l_port_cnt);
6297
6298 if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type)) != NULL)) {
6299 kr = BOOTSTRAP_NO_MEMORY;
6300 goto out;
6301 }
6302
6303 for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
6304 launch_data_t tmp, obj_at_idx;
6305 struct machservice *ms;
6306 job_t j_for_service;
6307 const char *serv_name;
6308 pid_t target_pid;
6309 bool serv_perpid;
6310
6311 job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
6312 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
6313 target_pid = (pid_t)launch_data_get_integer(tmp);
6314 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
6315 serv_perpid = launch_data_get_bool(tmp);
6316 job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
6317 serv_name = launch_data_get_string(tmp);
6318
6319 j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
6320
6321 if (!j_for_service) {
6322 /* The PID probably exited */
6323 job_assumes(j, launchd_mport_deallocate(l2l_ports[l2l_i]) == KERN_SUCCESS);
6324 continue;
6325 }
6326
6327 if ((ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
6328 machservice_request_notifications(ms);
6329 }
6330 }
6331
6332 kr = 0;
6333
6334 out:
6335 if (out_obj_array) {
6336 launch_data_free(out_obj_array);
6337 }
6338
6339 if (l2l_ports) {
6340 mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
6341 }
6342
6343 if (kr == 0) {
6344 if (target_subset) {
6345 job_assumes(j, launchd_mport_deallocate(target_subset) == KERN_SUCCESS);
6346 }
6347 } else if (jmr) {
6348 jobmgr_shutdown(jmr);
6349 }
6350
6351 return kr;
6352 }
6353
6354 kern_return_t
6355 job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
6356 vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
6357 mach_port_array_t *portsp, unsigned int *ports_cnt)
6358 {
6359 launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
6360 mach_port_array_t ports = NULL;
6361 unsigned int cnt = 0, cnt2 = 0;
6362 size_t packed_size;
6363 struct machservice *ms;
6364 jobmgr_t jm;
6365 job_t ji;
6366
6367 #if TARGET_OS_EMBEDDED
6368 return BOOTSTRAP_NOT_PRIVILEGED;
6369 #endif
6370
6371 if (!launchd_assumes(j != NULL)) {
6372 return BOOTSTRAP_NO_MEMORY;
6373 }
6374
6375 jm = j->mgr;
6376
6377 if (getpid() != 1) {
6378 job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
6379 return BOOTSTRAP_NOT_PRIVILEGED;
6380 } else if (jobmgr_parent(jm) == NULL) {
6381 job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
6382 return BOOTSTRAP_NOT_PRIVILEGED;
6383 } else if (strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0) {
6384 job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
6385 return BOOTSTRAP_NOT_PRIVILEGED;
6386 } else if (!j->anonymous) {
6387 job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
6388 return BOOTSTRAP_NOT_PRIVILEGED;
6389 }
6390
6391 job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
6392
6393 outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
6394 if (!job_assumes(j, outdata_obj_array)) {
6395 goto out_bad;
6396 }
6397
6398 *outdataCnt = 20 * 1024 * 1024;
6399 mig_allocate(outdata, *outdataCnt);
6400 if (!job_assumes(j, *outdata != 0)) {
6401 return 1;
6402 }
6403
6404 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
6405 if (!ji->anonymous) {
6406 continue;
6407 }
6408 SLIST_FOREACH(ms, &ji->machservices, sle) {
6409 cnt++;
6410 }
6411 }
6412
6413 mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
6414 if (!launchd_assumes(ports != NULL)) {
6415 goto out_bad;
6416 }
6417
6418 LIST_FOREACH(ji, &j->mgr->jobs, sle) {
6419 if (!ji->anonymous) {
6420 continue;
6421 }
6422
6423 SLIST_FOREACH(ms, &ji->machservices, sle) {
6424 if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
6425 job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
6426 } else {
6427 goto out_bad;
6428 }
6429
6430 if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
6431 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
6432 } else {
6433 goto out_bad;
6434 }
6435
6436 if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
6437 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
6438 } else {
6439 goto out_bad;
6440 }
6441
6442 if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
6443 job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
6444 } else {
6445 goto out_bad;
6446 }
6447
6448 ports[cnt2] = machservice_port(ms);
6449
6450 /* Increment the send right by one so we can shutdown the jobmgr cleanly */
6451 jobmgr_assumes(jm, (errno = mach_port_mod_refs(mach_task_self(), ports[cnt2], MACH_PORT_RIGHT_SEND, 1)) == 0);
6452 cnt2++;
6453 }
6454 }
6455
6456 launchd_assumes(cnt == cnt2);
6457
6458 packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
6459 if (!job_assumes(j, packed_size != 0)) {
6460 goto out_bad;
6461 }
6462
6463 launch_data_free(outdata_obj_array);
6464
6465 *portsp = ports;
6466 *ports_cnt = cnt;
6467
6468 *reqport = jm->req_port;
6469 *rcvright = jm->jm_port;
6470
6471 jm->req_port = 0;
6472 jm->jm_port = 0;
6473
6474 workaround_5477111 = j;
6475
6476 jobmgr_shutdown(jm);
6477
6478 return BOOTSTRAP_SUCCESS;
6479
6480 out_bad:
6481 if (outdata_obj_array) {
6482 launch_data_free(outdata_obj_array);
6483 }
6484 if (*outdata) {
6485 mig_deallocate(*outdata, *outdataCnt);
6486 }
6487 if (ports) {
6488 mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
6489 }
6490
6491 return BOOTSTRAP_NO_MEMORY;
6492 }
6493
6494 kern_return_t
6495 job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
6496 {
6497 int bsdepth = 0;
6498 jobmgr_t jmr;
6499
6500 if (!launchd_assumes(j != NULL)) {
6501 return BOOTSTRAP_NO_MEMORY;
6502 }
6503
6504 jmr = j->mgr;
6505
6506 while ((jmr = jobmgr_parent(jmr)) != NULL) {
6507 bsdepth++;
6508 }
6509
6510 /* Since we use recursion, we need an artificial depth for subsets */
6511 if (bsdepth > 100) {
6512 job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
6513 return BOOTSTRAP_NO_MEMORY;
6514 }
6515
6516 if ((jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, NULL)) == NULL) {
6517 if (requestorport == MACH_PORT_NULL) {
6518 return BOOTSTRAP_NOT_PRIVILEGED;
6519 }
6520 return BOOTSTRAP_NO_MEMORY;
6521 }
6522
6523 *subsetportp = jmr->jm_port;
6524 return BOOTSTRAP_SUCCESS;
6525 }
6526
6527 kern_return_t
6528 job_mig_create_service(job_t j, name_t servicename, mach_port_t *serviceportp)
6529 {
6530 struct machservice *ms;
6531
6532 if (!launchd_assumes(j != NULL)) {
6533 return BOOTSTRAP_NO_MEMORY;
6534 }
6535
6536 if (job_prog(j)[0] == '\0') {
6537 job_log(j, LOG_ERR, "Mach service creation requires a target server: %s", servicename);
6538 return BOOTSTRAP_NOT_PRIVILEGED;
6539 }
6540
6541 if (!j->legacy_mach_job) {
6542 job_log(j, LOG_ERR, "bootstrap_create_service() is only allowed against legacy Mach jobs: %s", servicename);
6543 return BOOTSTRAP_NOT_PRIVILEGED;
6544 }
6545
6546 ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
6547 if (ms) {
6548 job_log(j, LOG_DEBUG, "Mach service creation attempt for failed. Already exists: %s", servicename);
6549 return BOOTSTRAP_NAME_IN_USE;
6550 }
6551
6552 job_checkin(j);
6553
6554 *serviceportp = MACH_PORT_NULL;
6555 ms = machservice_new(j, servicename, serviceportp, false);
6556
6557 if (!launchd_assumes(ms != NULL)) {
6558 goto out_bad;
6559 }
6560
6561 return BOOTSTRAP_SUCCESS;
6562
6563 out_bad:
6564 launchd_assumes(launchd_mport_close_recv(*serviceportp) == KERN_SUCCESS);
6565 return BOOTSTRAP_NO_MEMORY;
6566 }
6567
6568 kern_return_t
6569 job_mig_embedded_wait(job_t j, name_t targetlabel, integer_t *waitstatus)
6570 {
6571 job_t otherj;
6572
6573 if (!launchd_assumes(j != NULL)) {
6574 return BOOTSTRAP_NO_MEMORY;
6575 }
6576
6577 if (unlikely(!(otherj = job_find(targetlabel)))) {
6578 return BOOTSTRAP_UNKNOWN_SERVICE;
6579 }
6580
6581 *waitstatus = j->last_exit_status;
6582
6583 return 0;
6584 }
6585
6586 kern_return_t
6587 job_mig_embedded_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, mach_port_t *out_name_port)
6588 {
6589 struct ldcred ldc;
6590 kern_return_t kr;
6591 job_t otherj;
6592
6593 if (!launchd_assumes(j != NULL)) {
6594 return BOOTSTRAP_NO_MEMORY;
6595 }
6596
6597 if (unlikely(!(otherj = job_find(targetlabel)))) {
6598 return BOOTSTRAP_UNKNOWN_SERVICE;
6599 }
6600
6601 runtime_get_caller_creds(&ldc);
6602
6603 if (ldc.euid != 0 && ldc.euid != geteuid()
6604 #if TARGET_OS_EMBEDDED
6605 && j->username && otherj->username
6606 && strcmp(j->username, otherj->username) != 0
6607 #endif
6608 ) {
6609 return BOOTSTRAP_NOT_PRIVILEGED;
6610 }
6611
6612 otherj = job_dispatch(otherj, true);
6613
6614 if (!job_assumes(j, otherj && otherj->p)) {
6615 return BOOTSTRAP_NO_MEMORY;
6616 }
6617
6618 kr = task_name_for_pid(mach_task_self(), otherj->p, out_name_port);
6619 if (!job_assumes(j, kr == 0)) {
6620 return kr;
6621 }
6622
6623 *out_pid = otherj->p;
6624
6625 return 0;
6626 }
6627
6628 kern_return_t
6629 job_mig_wait(job_t j, mach_port_t srp, integer_t *waitstatus)
6630 {
6631 if (!launchd_assumes(j != NULL)) {
6632 return BOOTSTRAP_NO_MEMORY;
6633 }
6634 #if 0
6635 struct ldcred ldc;
6636 runtime_get_caller_creds(&ldc);
6637 #endif
6638 return job_handle_mpm_wait(j, srp, waitstatus);
6639 }
6640
6641 kern_return_t
6642 job_mig_uncork_fork(job_t j)
6643 {
6644 if (!launchd_assumes(j != NULL)) {
6645 return BOOTSTRAP_NO_MEMORY;
6646 }
6647
6648 if (!j->stall_before_exec) {
6649 job_log(j, LOG_WARNING, "Attempt to uncork a job that isn't in the middle of a fork().");
6650 return 1;
6651 }
6652
6653 job_uncork_fork(j);
6654 j->stall_before_exec = false;
6655 return 0;
6656 }
6657
6658 kern_return_t
6659 job_mig_set_service_policy(job_t j, pid_t target_pid, uint64_t flags, name_t target_service)
6660 {
6661 struct ldcred ldc;
6662 job_t target_j;
6663
6664 if (!launchd_assumes(j != NULL)) {
6665 return BOOTSTRAP_NO_MEMORY;
6666 }
6667
6668 runtime_get_caller_creds(&ldc);
6669
6670 #if TARGET_OS_EMBEDDED
6671 if( ldc.euid ) {
6672 return BOOTSTRAP_NOT_PRIVILEGED;
6673 }
6674 #else
6675 if( ldc.euid && (ldc.euid != getuid()) ) {
6676 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, target_pid };
6677 struct kinfo_proc kp;
6678 size_t len = sizeof(kp);
6679
6680 job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1);
6681 job_assumes(j, len == sizeof(kp));
6682
6683 uid_t kp_euid = kp.kp_eproc.e_ucred.cr_uid;
6684 uid_t kp_uid = kp.kp_eproc.e_pcred.p_ruid;
6685
6686 if( ldc.euid == kp_euid ) {
6687 job_log(j, LOG_DEBUG, "Working around rdar://problem/5982485 and allowing job to set policy for PID %u.", target_pid);
6688 } else {
6689 job_log(j, LOG_ERR, "Denied Mach service policy update requested by UID/EUID %u/%u against PID %u with UID/EUID %u/%u due to mismatched credentials.", ldc.uid, ldc.euid, target_pid, kp_uid, kp_euid);
6690
6691 return BOOTSTRAP_NOT_PRIVILEGED;
6692 }
6693 }
6694 #endif
6695
6696 if (!job_assumes(j, (target_j = jobmgr_find_by_pid(j->mgr, target_pid, true)) != NULL)) {
6697 return BOOTSTRAP_NO_MEMORY;
6698 }
6699
6700 if (SLIST_EMPTY(&j->mspolicies)) {
6701 job_log(j, LOG_DEBUG, "Setting policy on job \"%s\" for Mach service: %s", target_j->label, target_service);
6702 if (target_service[0]) {
6703 job_assumes(j, mspolicy_new(target_j, target_service, flags & BOOTSTRAP_ALLOW_LOOKUP, flags & BOOTSTRAP_PER_PID_SERVICE, false));
6704 } else {
6705 target_j->deny_unknown_mslookups = !(flags & BOOTSTRAP_ALLOW_LOOKUP);
6706 target_j->deny_job_creation = (bool)(flags & BOOTSTRAP_DENY_JOB_CREATION);
6707 }
6708 } else {
6709 job_log(j, LOG_WARNING, "Jobs that have policies assigned to them may not set policies.");
6710 return BOOTSTRAP_NOT_PRIVILEGED;
6711 }
6712
6713 return 0;
6714 }
6715
6716 kern_return_t
6717 job_mig_spawn(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, pid_t *child_pid, mach_port_t *obsvr_port)
6718 {
6719 launch_data_t input_obj = NULL;
6720 size_t data_offset = 0;
6721 struct ldcred ldc;
6722 job_t jr;
6723
6724 #if TARGET_OS_EMBEDDED
6725 return BOOTSTRAP_NOT_PRIVILEGED;
6726 #endif
6727
6728 runtime_get_caller_creds(&ldc);
6729
6730 if (!launchd_assumes(j != NULL)) {
6731 return BOOTSTRAP_NO_MEMORY;
6732 }
6733
6734 if (unlikely(j->deny_job_creation)) {
6735 return BOOTSTRAP_NOT_PRIVILEGED;
6736 }
6737
6738 if (getpid() == 1 && ldc.euid && ldc.uid) {
6739 job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
6740 return VPROC_ERR_TRY_PER_USER;
6741 }
6742
6743 if (!job_assumes(j, indataCnt != 0)) {
6744 return 1;
6745 }
6746
6747 if (!job_assumes(j, (input_obj = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
6748 return 1;
6749 }
6750
6751 jr = jobmgr_import2(j->mgr, input_obj);
6752
6753 if (!job_assumes(j, jr != NULL)) {
6754 switch (errno) {
6755 case EEXIST:
6756 return BOOTSTRAP_NAME_IN_USE;
6757 default:
6758 return BOOTSTRAP_NO_MEMORY;
6759 }
6760 }
6761
6762 job_reparent_hack(jr, NULL);
6763
6764 if (getpid() == 1) {
6765 jr->mach_uid = ldc.uid;
6766 }
6767
6768 jr->unload_at_exit = true;
6769 jr->wait4pipe_eof = true;
6770 jr->abandon_pg = true;
6771 jr->stall_before_exec = jr->wait4debugger;
6772 jr->wait4debugger = false;
6773
6774 jr = job_dispatch(jr, true);
6775
6776 if (!job_assumes(j, jr != NULL)) {
6777 return BOOTSTRAP_NO_MEMORY;
6778 }
6779
6780 if (!job_assumes(jr, jr->p)) {
6781 job_remove(jr);
6782 return BOOTSTRAP_NO_MEMORY;
6783 }
6784
6785 if (!job_setup_machport(jr)) {
6786 job_remove(jr);
6787 return BOOTSTRAP_NO_MEMORY;
6788 }
6789
6790 job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
6791
6792 *child_pid = jr->p;
6793 *obsvr_port = jr->j_port;
6794
6795 mig_deallocate(indata, indataCnt);
6796
6797 return BOOTSTRAP_SUCCESS;
6798 }
6799
6800 void
6801 jobmgr_init(bool sflag)
6802 {
6803 const char *root_session_type = getpid() == 1 ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
6804
6805 launchd_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type)) != NULL);
6806 }
6807
6808 size_t
6809 our_strhash(const char *s)
6810 {
6811 size_t c, r = 5381;
6812
6813 /* djb2
6814 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
6815 */
6816
6817 while ((c = *s++)) {
6818 r = ((r << 5) + r) + c; /* hash*33 + c */
6819 }
6820
6821 return r;
6822 }
6823
6824 size_t
6825 hash_label(const char *label)
6826 {
6827 return our_strhash(label) % LABEL_HASH_SIZE;
6828 }
6829
6830 size_t
6831 hash_ms(const char *msstr)
6832 {
6833 return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
6834 }
6835
6836 bool
6837 mspolicy_copy(job_t j_to, job_t j_from)
6838 {
6839 struct mspolicy *msp;
6840
6841 SLIST_FOREACH(msp, &j_from->mspolicies, sle) {
6842 if (!mspolicy_new(j_to, msp->name, msp->allow, msp->per_pid, true)) {
6843 return false;
6844 }
6845 }
6846
6847 return true;
6848 }
6849
6850 bool
6851 mspolicy_new(job_t j, const char *name, bool allow, bool pid_local, bool skip_check)
6852 {
6853 struct mspolicy *msp;
6854
6855 if (!skip_check) SLIST_FOREACH(msp, &j->mspolicies, sle) {
6856 if (msp->per_pid != pid_local) {
6857 continue;
6858 } else if (strcmp(msp->name, name) == 0) {
6859 return false;
6860 }
6861 }
6862
6863 if ((msp = calloc(1, sizeof(struct mspolicy) + strlen(name) + 1)) == NULL) {
6864 return false;
6865 }
6866
6867 strcpy((char *)msp->name, name);
6868 msp->per_pid = pid_local;
6869 msp->allow = allow;
6870
6871 SLIST_INSERT_HEAD(&j->mspolicies, msp, sle);
6872
6873 return true;
6874 }
6875
6876 void
6877 mspolicy_setup(launch_data_t obj, const char *key, void *context)
6878 {
6879 job_t j = context;
6880
6881 if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
6882 job_log(j, LOG_WARNING, "Invalid object type for Mach service policy key: %s", key);
6883 return;
6884 }
6885
6886 job_assumes(j, mspolicy_new(j, key, launch_data_get_bool(obj), false, false));
6887 }
6888
6889 bool
6890 mspolicy_check(job_t j, const char *name, bool pid_local)
6891 {
6892 struct mspolicy *mspi;
6893
6894 SLIST_FOREACH(mspi, &j->mspolicies, sle) {
6895 if (mspi->per_pid != pid_local) {
6896 continue;
6897 } else if (strcmp(mspi->name, name) != 0) {
6898 continue;
6899 }
6900 return mspi->allow;
6901 }
6902
6903 return !j->deny_unknown_mslookups;
6904 }
6905
6906 void
6907 mspolicy_delete(job_t j, struct mspolicy *msp)
6908 {
6909 SLIST_REMOVE(&j->mspolicies, msp, mspolicy, sle);
6910
6911 free(msp);
6912 }
6913
6914 bool
6915 waiting4removal_new(job_t j, mach_port_t rp)
6916 {
6917 struct waiting_for_removal *w4r;
6918
6919 if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
6920 return false;
6921 }
6922
6923 w4r->reply_port = rp;
6924
6925 SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
6926
6927 return true;
6928 }
6929
6930 void
6931 waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
6932 {
6933 job_assumes(j, job_mig_send_signal_reply(w4r->reply_port, 0) == 0);
6934
6935 SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
6936
6937 free(w4r);
6938 }
6939
6940 size_t
6941 get_kern_max_proc(void)
6942 {
6943 int mib[] = { CTL_KERN, KERN_MAXPROC };
6944 int max = 100;
6945 size_t max_sz = sizeof(max);
6946
6947 launchd_assumes(sysctl(mib, 2, &max, &max_sz, NULL, 0) != -1);
6948
6949 return max;
6950 }
6951
6952 void
6953 do_file_init(void)
6954 {
6955 struct stat sb;
6956
6957 launchd_assert(mach_timebase_info(&tbi) == 0);
6958
6959 if (stat("/AppleInternal", &sb) == 0) {
6960 do_apple_internal_magic = true;
6961 }
6962 }