+ switch (which_port) {
+ case TASK_KERNEL_PORT:
+ case TASK_HOST_PORT:
+ case TASK_NAME_PORT:
+ case TASK_BOOTSTRAP_PORT:
+ /* I find it a little odd that zero isn't reserved in the header.
+ * Normally Mach is fairly good about this convention... */
+ case 0:
+ job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
+ break;
+ default:
+ ms->special_port_num = which_port;
+ SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
+ break;
+ }
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
+ if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
+ (void)job_assumes(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)) == KERN_SUCCESS);
+ } else {
+ job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
+ }
+ }
+ case LAUNCH_DATA_BOOL:
+ b = launch_data_get_bool(obj);
+ if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
+ ms->debug_on_close = b;
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
+ ms->reset = b;
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
+ ms->hide = b;
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
+ job_set_exception_port(ms->job, ms->port);
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
+ ms->kUNCServer = b;
+ (void)job_assumes(ms->job, host_set_UNDServer(mhp, ms->port) == KERN_SUCCESS);
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_PINGEVENTUPDATES) == 0) {
+ ms->event_update_port = b;
+ }
+ break;
+ case LAUNCH_DATA_STRING:
+ if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
+ const char *option = launch_data_get_string(obj);
+ if (strcasecmp(option, "One") == 0) {
+ ms->drain_one_on_crash = true;
+ } else if (strcasecmp(option, "All") == 0) {
+ ms->drain_all_on_crash = true;
+ }
+ }
+ break;
+ case LAUNCH_DATA_DICTIONARY:
+ job_set_exception_port(ms->job, ms->port);
+ break;
+ default:
+ break;
+ }
+
+ job_assumes(ms->job, launchd_mport_deallocate(mhp) == KERN_SUCCESS);
+}
+
+void
+machservice_setup(launch_data_t obj, const char *key, void *context)
+{
+ job_t j = context;
+ struct machservice *ms;
+ mach_port_t p = MACH_PORT_NULL;
+
+ if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
+ job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
+ return;
+ }
+
+ if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
+ return;
+ }
+
+ ms->isActive = false;
+ ms->upfront = true;
+
+ if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
+ launch_data_dict_iterate(obj, machservice_setup_options, ms);
+ }
+}
+
+jobmgr_t
+jobmgr_do_garbage_collection(jobmgr_t jm)
+{
+ jobmgr_t jmi = NULL, jmn = NULL;
+ SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
+ jobmgr_do_garbage_collection(jmi);
+ }
+
+ if (!jm->shutting_down) {
+ return jm;
+ }
+
+ if (SLIST_EMPTY(&jm->submgrs)) {
+ jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
+ } else {
+ jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
+ }
+
+ size_t actives = 0;
+ job_t ji = NULL, jn = NULL;
+ LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
+ if (ji->anonymous) {
+ continue;
+ }
+
+ /* Let the shutdown monitor be up until the very end. */
+ if (ji->shutdown_monitor) {
+ continue;
+ }
+
+ /* On our first pass through, open a transaction for all the jobs that
+ * need to be dirty at shutdown. We'll close these transactions once the
+ * jobs that do not need to be dirty at shutdown have all exited.
+ */
+ if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
+ job_open_shutdown_transaction(ji);
+ }
+
+ const char *active = job_active(ji);
+ if (!active) {
+ job_remove(ji);
+ } else {
+ job_log(ji, LOG_DEBUG, "Job is active: %s", active);
+ job_stop(ji);
+
+ if (ji->p && !ji->dirty_at_shutdown) {
+ /* We really only care if the job has not yet been reaped.
+ * There's no reason to delay shutdown if a Mach port has not
+ * yet been sent back to us, for example. While we're shutting
+ * all the "normal" jobs down, do not count the
+ * dirty-at-shutdown jobs toward the total of actives.
+ *
+ * Note that there's a potential race here where we may not get
+ * a port back in time, so that when we hit jobmgr_remove(), we
+ * end up removing the job and then our attempt to close the
+ * Mach port will fail. But at that point, the failure won't
+ * even make it to the syslog, so not a big deal.
+ */
+ actives++;
+ }
+
+ if (ji->clean_kill) {
+ job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
+ } else {
+ job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
+ }
+ }
+ }
+
+ jm->shutdown_jobs_dirtied = true;
+ if (actives == 0) {
+ if (!jm->shutdown_jobs_cleaned) {
+ LIST_FOREACH(ji, &jm->jobs, sle) {
+ if (!ji->anonymous) {
+ job_close_shutdown_transaction(ji);
+ actives++;
+ }
+ }
+
+ jm->shutdown_jobs_cleaned = true;
+ } else if (jm->monitor_shutdown && _s_shutdown_monitor) {
+ /* The rest of shutdown has completed, so we can kill the shutdown
+ * monitor now like it was any other job.
+ */
+ _s_shutdown_monitor->shutdown_monitor = false;
+ actives = 1;
+
+ job_log(_s_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
+ job_stop(_s_shutdown_monitor);
+ _s_shutdown_monitor = NULL;
+ }
+ }
+
+ jobmgr_t r = jm;
+ if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
+ jobmgr_log(jm, LOG_DEBUG, "Removing.");
+ jobmgr_remove(jm);
+ r = NULL;
+ }
+
+ return r;
+}
+
+void
+jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
+{
+ /* I maintain that stray processes should be at the mercy of launchd during shutdown,
+ * but nevertheless, things like diskimages-helper can stick around, and SIGKILLing
+ * them can result in data loss. So we send SIGTERM to all the strays and don't wait
+ * for them to exit before moving on.
+ *
+ * See rdar://problem/6562592
+ */
+ size_t i = 0;
+ for (i = 0; i < np; i++) {
+ if (p[i] != 0) {
+ jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
+ (void)jobmgr_assumes(jm, runtime_kill(p[i], SIGTERM) != -1);
+ }
+ }
+}
+
+void
+jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
+{
+ size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
+ pid_t *pids = NULL;
+ int i = 0, kp_cnt = 0;
+
+ if (likely(jm->parentmgr || !pid1_magic)) {
+ return;
+ }
+
+ if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
+ return;
+ }
+
+ runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
+
+ if (!jobmgr_assumes(jm, (kp_cnt = proc_listallpids(pids, len)) != -1)) {
+ goto out;
+ }
+
+ pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
+ for (i = 0; i < kp_cnt; i++) {
+ struct proc_bsdshortinfo proc;
+ if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
+ if (errno != ESRCH) {
+ jobmgr_assumes(jm, errno == 0);
+ }
+
+ kp_skipped++;
+ continue;
+ }
+
+ pid_t p_i = pids[i];
+ pid_t pp_i = proc.pbsi_ppid;
+ pid_t pg_i = proc.pbsi_pgid;
+ const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
+ const char *n = proc.pbsi_comm;
+
+ if (unlikely(p_i == 0 || p_i == 1)) {
+ kp_skipped++;
+ continue;
+ }
+
+ if (_s_shutdown_monitor && pp_i == _s_shutdown_monitor->p) {
+ kp_skipped++;
+ continue;
+ }
+
+ /* We might have some jobs hanging around that we've decided to shut down in spite of. */
+ job_t j = jobmgr_find_by_pid(jm, p_i, false);
+ if (!j || (j && j->anonymous)) {
+ jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
+
+ int status = 0;
+ if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
+ if (jobmgr_assumes(jm, waitpid(p_i, &status, WNOHANG) == 0)) {
+ jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
+ }
+ kp_skipped++;
+ } else {
+ job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
+ /* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
+ * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
+ * hints to the kernel along the way, so that it could shutdown certain subsystems when
+ * their userspace emissaries go away, before the call to reboot(2).
+ */
+ if (leader && leader->ignore_pg_at_shutdown) {
+ kp_skipped++;
+ } else {
+ ps[i] = p_i;
+ }
+ }
+ } else {
+ kp_skipped++;
+ }
+ }
+
+ if ((kp_cnt - kp_skipped > 0) && kill_strays) {
+ jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
+ }
+
+ free(ps);
+out:
+ free(pids);
+}
+
+jobmgr_t
+jobmgr_parent(jobmgr_t jm)
+{
+ return jm->parentmgr;
+}
+
+void
+job_uncork_fork(job_t j)
+{
+ pid_t c = j->p;
+
+ job_log(j, LOG_DEBUG, "Uncorking the fork().");
+ /* this unblocks the child and avoids a race
+ * between the above fork() and the kevent_mod() */
+ (void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
+ (void)job_assumes(j, runtime_close(j->fork_fd) != -1);
+ j->fork_fd = 0;
+}
+
+jobmgr_t
+jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
+{
+ mach_msg_size_t mxmsgsz;
+ job_t bootstrapper = NULL;
+ jobmgr_t jmr;
+
+ launchd_assert(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
+
+ if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
+ jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
+ return NULL;
+ }
+
+ jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
+
+ if (!jobmgr_assumes(jm, jmr != NULL)) {
+ return NULL;
+ }
+
+ if (jm == NULL) {
+ root_jobmgr = jmr;
+ }
+
+ jmr->kqjobmgr_callback = jobmgr_callback;
+ strcpy(jmr->name_init, name ? name : "Under construction");
+
+ jmr->req_port = requestorport;
+
+ if ((jmr->parentmgr = jm)) {
+ SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
+ }
+
+ if (jm && !jobmgr_assumes(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS)) {
+ goto out_bad;
+ }
+
+ if (transfer_port != MACH_PORT_NULL) {
+ (void)jobmgr_assumes(jmr, jm != NULL);
+ jmr->jm_port = transfer_port;
+ } else if (!jm && !pid1_magic) {
+ char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
+ name_t service_buf;
+
+ snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
+
+ if (!jobmgr_assumes(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port) == 0)) {
+ goto out_bad;
+ }
+
+ if (trusted_fd) {
+ int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
+
+ if ((dfd = dup(lfd)) >= 0) {
+ (void)jobmgr_assumes(jmr, runtime_close(dfd) != -1);
+ (void)jobmgr_assumes(jmr, runtime_close(lfd) != -1);
+ }
+
+ unsetenv(LAUNCHD_TRUSTED_FD_ENV);
+ }
+
+ /* cut off the Libc cache, we don't want to deadlock against ourself */
+ inherited_bootstrap_port = bootstrap_port;
+ bootstrap_port = MACH_PORT_NULL;
+ launchd_assert(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME) == KERN_SUCCESS);
+
+ /* We set this explicitly as we start each child */
+ launchd_assert(launchd_set_bport(MACH_PORT_NULL) == KERN_SUCCESS);
+ } else if (!jobmgr_assumes(jmr, launchd_mport_create_recv(&jmr->jm_port) == KERN_SUCCESS)) {
+ goto out_bad;
+ }
+
+ if (!name) {
+ sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
+ }
+
+ /* Sigh... at the moment, MIG has maxsize == sizeof(reply union) */
+ mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
+ if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
+ mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
+ }
+
+ /* Total hacks. But the MIG server loop is too generic, and the more dynamic
+ * parts of it haven't been tested, or if they have, it was a very long time
+ * ago.
+ */
+ if (xpc_events_xpc_events_subsystem.maxsize > mxmsgsz) {
+ mxmsgsz = xpc_events_xpc_events_subsystem.maxsize;
+ }
+ if (xpc_domain_xpc_domain_subsystem.maxsize > mxmsgsz) {
+ mxmsgsz = xpc_domain_xpc_domain_subsystem.maxsize;
+ }
+
+ if (!jm) {
+ (void)jobmgr_assumes(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
+ (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
+ (void)jobmgr_assumes(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr) != -1);
+ (void)jobmgr_assumes(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr) != -1);
+ }
+
+ if (name && !skip_init) {
+ bootstrapper = jobmgr_init_session(jmr, name, sflag);
+ }
+
+ if (!bootstrapper || !bootstrapper->weird_bootstrap) {
+ if (!jobmgr_assumes(jmr, runtime_add_mport(jmr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS)) {
+ goto out_bad;
+ }
+ }
+
+ jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
+
+ if (bootstrapper) {
+ bootstrapper->asport = asport;
+
+ jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
+ (void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
+ } else {
+ jmr->req_asport = asport;
+ }
+
+ if (asport != MACH_PORT_NULL) {
+ (void)jobmgr_assumes(jmr, launchd_mport_copy_send(asport) == KERN_SUCCESS);
+ }
+
+ if (jmr->parentmgr) {
+ runtime_add_weak_ref();
+ }
+
+ return jmr;
+
+out_bad:
+ if (jmr) {
+ jobmgr_remove(jmr);
+ if (jm == NULL) {
+ root_jobmgr = NULL;
+ }
+ }
+ return NULL;
+}
+
+#ifndef __LAUNCH_DISABLE_XPC_SUPPORT__
+jobmgr_t
+jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
+{
+ jobmgr_t new = NULL;
+
+ /* These job managers are basically singletons, so we use the root Mach
+ * bootstrap port as their requestor ports so they'll never go away.
+ */
+ mach_port_t req_port = root_jobmgr->jm_port;
+ if (jobmgr_assumes(jm, launchd_mport_make_send(req_port) == KERN_SUCCESS)) {
+ new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
+ if (new) {
+ new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
+ new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
+ new->xpc_singleton = true;
+ }
+ }
+
+ return new;
+}
+
+jobmgr_t
+jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
+{
+ jobmgr_t jmi = NULL;
+ LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
+ if (jmi->req_euid == uid) {
+ return jmi;
+ }
+ }
+
+ name_t name;
+ (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
+ jmi = jobmgr_new_xpc_singleton_domain(jm, name);
+ if (jobmgr_assumes(jm, jmi != NULL)) {
+ /* We need to create a per-user launchd for this UID if there isn't one
+ * already so we can grab the bootstrap port.
+ */
+ job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
+ if (jobmgr_assumes(jmi, puj != NULL)) {
+ (void)jobmgr_assumes(jmi, launchd_mport_copy_send(puj->asport) == KERN_SUCCESS);
+ (void)jobmgr_assumes(jmi, launchd_mport_copy_send(jmi->req_bsport) == KERN_SUCCESS);
+ jmi->shortdesc = "per-user";
+ jmi->req_asport = puj->asport;
+ jmi->req_asid = puj->asid;
+ jmi->req_euid = uid;
+ jmi->req_egid = -1;
+
+ LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
+ } else {
+ jobmgr_remove(jmi);
+ }
+ }
+
+ return jmi;
+}
+
+jobmgr_t
+jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
+{
+ jobmgr_t jmi = NULL;
+ LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
+ if (jmi->req_asid == asid) {
+ return jmi;
+ }
+ }
+
+ name_t name;
+ (void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
+ jmi = jobmgr_new_xpc_singleton_domain(jm, name);
+ if (jobmgr_assumes(jm, jmi != NULL)) {
+ (void)jobmgr_assumes(jmi, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
+ jmi->shortdesc = "per-session";
+ jmi->req_bsport = root_jobmgr->jm_port;
+ (void)jobmgr_assumes(jmi, audit_session_port(asid, &jmi->req_asport) == 0);
+ jmi->req_asid = asid;
+ jmi->req_euid = -1;
+ jmi->req_egid = -1;
+
+ LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
+ } else {
+ jobmgr_remove(jmi);
+ }
+
+ return jmi;
+}
+#endif
+
+job_t
+jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
+{
+ const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
+ char thelabel[1000];
+ job_t bootstrapper;
+
+ snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
+ bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
+
+ if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
+ bootstrapper->is_bootstrapper = true;
+ char buf[100];
+
+ /* <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs) */
+ snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
+ envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false, false);
+ bootstrapper->weird_bootstrap = true;
+ (void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
+ } else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
+ bootstrapper->is_bootstrapper = true;
+ if (jobmgr_assumes(jm, pid1_magic)) {
+ /* Have our system bootstrapper print out to the console. */
+ bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
+ bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
+
+ if (g_console) {
+ (void)jobmgr_assumes(jm, kevent_mod((uintptr_t)fileno(g_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm) != -1);
+ }
+ }
+ }
+
+ jm->session_initialized = true;
+ return bootstrapper;
+}
+
+jobmgr_t
+jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
+{
+ struct machservice *ms, *next_ms;
+ jobmgr_t jmi, jmn;
+
+ /* Mach ports, unlike Unix descriptors, are reference counted. In other
+ * words, when some program hands us a second or subsequent send right
+ * to a port we already have open, the Mach kernel gives us the same
+ * port number back and increments an reference count associated with
+ * the port. This forces us, when discovering that a receive right at
+ * the other end has been deleted, to wander all of our objects to see
+ * what weird places clients might have handed us the same send right
+ * to use.
+ */
+
+ if (jm == root_jobmgr) {
+ if (port == inherited_bootstrap_port) {
+ (void)jobmgr_assumes(jm, launchd_mport_deallocate(port) == KERN_SUCCESS);
+ inherited_bootstrap_port = MACH_PORT_NULL;
+
+ return jobmgr_shutdown(jm);
+ }
+
+ LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
+ if (ms->port == port && !ms->recv) {
+ machservice_delete(ms->job, ms, true);
+ }
+ }
+ }
+
+ SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
+ jobmgr_delete_anything_with_port(jmi, port);
+ }
+
+ if (jm->req_port == port) {
+ jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
+ return jobmgr_shutdown(jm);
+ }
+
+ return jm;
+}
+
+struct machservice *
+jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
+{
+ struct machservice *ms;
+ job_t target_j;
+
+ jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
+
+ if (target_pid) {
+ /* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
+ * bootstrap in other bootstraps.
+ */
+
+ /* Start in the given bootstrap. */
+ if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
+ /* If we fail, do a deep traversal. */
+ if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
+ jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
+ return NULL;
+ }
+ }
+
+ SLIST_FOREACH(ms, &target_j->machservices, sle) {
+ if (ms->per_pid && strcmp(name, ms->name) == 0) {
+ return ms;
+ }
+ }
+
+ job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
+ return NULL;
+ }
+
+ jobmgr_t where2look = jm;
+ /* XPC domains are separate from Mach bootstraps. */
+ if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
+ if (g_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
+ where2look = root_jobmgr;
+ }
+ }
+
+ LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
+ if (!ms->per_pid && strcmp(name, ms->name) == 0) {
+ return ms;
+ }
+ }
+
+ if (jm->parentmgr == NULL || !check_parent) {
+ return NULL;
+ }
+
+ return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
+}
+
+mach_port_t
+machservice_port(struct machservice *ms)
+{
+ return ms->port;
+}
+
+job_t
+machservice_job(struct machservice *ms)
+{
+ return ms->job;
+}
+
+bool
+machservice_hidden(struct machservice *ms)
+{
+ return ms->hide;
+}
+
+bool
+machservice_active(struct machservice *ms)
+{
+ return ms->isActive;
+}
+
+const char *
+machservice_name(struct machservice *ms)
+{
+ return ms->name;
+}
+
+void
+machservice_drain_port(struct machservice *ms)
+{
+ bool drain_one = ms->drain_one_on_crash;
+ bool drain_all = ms->drain_all_on_crash;
+
+ if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
+ return;
+ }
+
+ job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
+
+ char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
+ char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
+ mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
+ mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
+
+ mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
+
+ do {
+ /* This should be a direct check on the Mach service to see if it's an exception-handling
+ * port, and it will break things if ReportCrash or SafetyNet start advertising other
+ * Mach services. But for now, it should be okay.
+ */
+ if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
+ mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
+ } else {
+ mach_msg_options_t options = MACH_RCV_MSG |
+ MACH_RCV_TIMEOUT ;
+
+ mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
+ switch (mr) {
+ case MACH_MSG_SUCCESS:
+ mach_msg_destroy((mach_msg_header_t *)req_hdr);
+ break;
+ case MACH_RCV_TIMED_OUT:
+ break;
+ case MACH_RCV_TOO_LARGE:
+ runtime_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
+ break;
+ default:
+ break;
+ }
+ }
+ } while (drain_all && mr != MACH_RCV_TIMED_OUT);
+}
+
+void
+machservice_delete(job_t j, struct machservice *ms, bool port_died)
+{
+ if (ms->alias) {
+ /* HACK: Egregious code duplication. But dealing with aliases is a
+ * pretty simple affair since they can't and shouldn't have any complex
+ * behaviors associated with them.
+ */
+ LIST_REMOVE(ms, name_hash_sle);
+ SLIST_REMOVE(&j->machservices, ms, machservice, sle);
+ free(ms);
+ return;
+ }
+
+ if (unlikely(ms->debug_on_close)) {
+ job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
+ (void)job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
+ }
+
+ if (ms->recv && job_assumes(j, !machservice_active(ms))) {
+ job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
+ (void)job_assumes(j, launchd_mport_close_recv(ms->port) == KERN_SUCCESS);
+ }
+
+ (void)job_assumes(j, launchd_mport_deallocate(ms->port) == KERN_SUCCESS);
+
+ if (unlikely(ms->port == the_exception_server)) {
+ the_exception_server = 0;
+ }
+
+ job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
+
+ if (ms->special_port_num) {
+ SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
+ }
+ SLIST_REMOVE(&j->machservices, ms, machservice, sle);
+
+ if (!(j->dedicated_instance || ms->event_channel)) {
+ LIST_REMOVE(ms, name_hash_sle);
+ }
+ LIST_REMOVE(ms, port_hash_sle);
+
+ free(ms);
+}
+
+void
+machservice_request_notifications(struct machservice *ms)
+{
+ mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
+
+ ms->isActive = true;
+
+ if (ms->recv) {
+ which = MACH_NOTIFY_PORT_DESTROYED;
+ job_checkin(ms->job);
+ }
+
+ (void)job_assumes(ms->job, launchd_mport_notify_req(ms->port, which) == KERN_SUCCESS);
+}
+
+#define NELEM(x) (sizeof(x)/sizeof(x[0]))
+#define END_OF(x) (&(x)[NELEM(x)])
+
+char **
+mach_cmd2argv(const char *string)
+{
+ char *argv[100], args[1000];
+ const char *cp;
+ char *argp = args, term, **argv_ret, *co;
+ unsigned int nargs = 0, i;
+
+ for (cp = string; *cp;) {
+ while (isspace(*cp))
+ cp++;
+ term = (*cp == '"') ? *cp++ : '\0';
+ if (nargs < NELEM(argv)) {
+ argv[nargs++] = argp;
+ }
+ while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
+ if (*cp == '\\') {
+ cp++;
+ }
+ *argp++ = *cp;
+ if (*cp) {
+ cp++;
+ }
+ }
+ *argp++ = '\0';
+ }
+ argv[nargs] = NULL;
+
+ if (nargs == 0) {
+ return NULL;
+ }
+
+ argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
+
+ if (!launchd_assumes(argv_ret != NULL)) {
+ return NULL;
+ }
+
+ co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
+
+ for (i = 0; i < nargs; i++) {
+ strcpy(co, argv[i]);
+ argv_ret[i] = co;
+ co += strlen(argv[i]) + 1;
+ }
+ argv_ret[i] = NULL;
+
+ return argv_ret;
+}
+
+void
+job_checkin(job_t j)
+{
+ j->checkedin = true;
+}
+
+bool job_is_god(job_t j)
+{
+ return j->embedded_special_privileges;
+}
+
+bool
+job_ack_port_destruction(mach_port_t p)
+{
+ struct machservice *ms;
+ job_t j;
+
+ LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
+ if (ms->recv && (ms->port == p)) {
+ break;
+ }
+ }
+
+ if (!jobmgr_assumes(root_jobmgr, ms != NULL)) {
+ return false;
+ }
+
+ j = ms->job;
+
+ jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
+
+ /* Without being the exception handler, NOTE_EXIT is our only way to tell if the job
+ * crashed, and we can't rely on NOTE_EXIT always being processed after all the job's
+ * receive rights have been returned.
+ *
+ * So when we get receive rights back, check to see if the job has been reaped yet. If
+ * not, then we add this service to a list of services to be drained on crash if it's
+ * requested that behavior. So, for a job with N receive rights all requesting that they
+ * be drained on crash, we can safely handle the following sequence of events.
+ *
+ * ReceiveRight0Returned
+ * ReceiveRight1Returned
+ * ReceiveRight2Returned
+ * NOTE_EXIT (reap, get exit status)
+ * ReceiveRight3Returned
+ * .
+ * .
+ * .
+ * ReceiveRight(N - 1)Returned
+ */
+
+ if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
+ if (j->crashed && j->reaped) {
+ job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
+ machservice_drain_port(ms);
+ } else if (!(j->crashed || j->reaped)) {
+ job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
+ }
+ }
+
+ /* If we get this notification after the job has been reaped, then we want to ping
+ * the event port to keep things going.
+ */
+ if (ms->event_update_port && !j->p && job_assumes(j, j->event_monitor)) {
+ if (_s_event_update_port == MACH_PORT_NULL) {
+ (void)job_assumes(j, launchd_mport_make_send_once(ms->port, &_s_event_update_port) == KERN_SUCCESS);
+ }
+ eventsystem_ping();
+ }
+
+ ms->isActive = false;
+ if (ms->delete_on_destruction) {
+ machservice_delete(j, ms, false);
+ } else if (ms->reset) {
+ machservice_resetport(j, ms);
+ }
+
+ job_dispatch(j, false);
+
+ root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
+
+ return true;
+}
+
+void
+job_ack_no_senders(job_t j)
+{
+ j->priv_port_has_senders = false;
+
+ (void)job_assumes(j, launchd_mport_close_recv(j->j_port) == KERN_SUCCESS);
+ j->j_port = 0;
+
+ job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
+
+ job_dispatch(j, false);
+}
+
+bool
+semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
+{
+ struct semaphoreitem *si;
+ size_t alloc_sz = sizeof(struct semaphoreitem);
+
+ if (what) {
+ alloc_sz += strlen(what) + 1;
+ }
+
+ if (!job_assumes(j, si = calloc(1, alloc_sz))) {
+ return false;
+ }
+
+ si->fd = -1;
+ si->why = why;
+
+ if (what) {
+ strcpy(si->what_init, what);
+ }
+
+ SLIST_INSERT_HEAD(&j->semaphores, si, sle);
+
+ if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
+ job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
+ SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
+ j->nosy = true;
+ }
+
+ semaphoreitem_runtime_mod_ref(si, true);
+
+ return true;
+}
+
+void
+semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
+{
+ /*
+ * External events need to be tracked.
+ * Internal events do NOT need to be tracked.
+ */
+
+ switch (si->why) {
+ case SUCCESSFUL_EXIT:
+ case FAILED_EXIT:
+ case OTHER_JOB_ENABLED:
+ case OTHER_JOB_DISABLED:
+ case OTHER_JOB_ACTIVE:
+ case OTHER_JOB_INACTIVE:
+ return;
+ default:
+ break;
+ }
+
+ if (add) {
+ runtime_add_weak_ref();
+ } else {
+ runtime_del_weak_ref();
+ }
+}
+
+void
+semaphoreitem_delete(job_t j, struct semaphoreitem *si)
+{
+ semaphoreitem_runtime_mod_ref(si, false);
+
+ SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
+
+ if (si->fd != -1) {
+ (void)job_assumes(j, runtime_close(si->fd) != -1);
+ }
+
+ /* We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores. */
+ if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
+ j->nosy = false;
+ SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
+ }
+
+ free(si);
+}
+
+void
+semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
+{
+ struct semaphoreitem_dict_iter_context *sdic = context;
+ semaphore_reason_t why;
+
+ why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
+
+ semaphoreitem_new(sdic->j, why, key);
+}
+
+void
+semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
+{
+ struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
+ job_t j = context;
+ semaphore_reason_t why;
+
+ switch (launch_data_get_type(obj)) {
+ case LAUNCH_DATA_BOOL:
+ if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
+ why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
+ semaphoreitem_new(j, why, NULL);
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
+ why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
+ semaphoreitem_new(j, why, NULL);
+ j->start_pending = true;
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
+ j->needs_kickoff = launch_data_get_bool(obj);
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
+ why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
+ semaphoreitem_new(j, why, NULL);
+ j->start_pending = true;
+ } else {
+ (void)job_assumes(j, false);
+ }
+ break;
+ case LAUNCH_DATA_DICTIONARY:
+ if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_PATHSTATE) == 0) {
+ sdic.why_true = PATH_EXISTS;
+ sdic.why_false = PATH_MISSING;
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
+ sdic.why_true = OTHER_JOB_ACTIVE;
+ sdic.why_false = OTHER_JOB_INACTIVE;
+ } else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
+ sdic.why_true = OTHER_JOB_ENABLED;
+ sdic.why_false = OTHER_JOB_DISABLED;
+ } else {
+ (void)job_assumes(j, false);
+ break;
+ }
+
+ launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
+ break;
+ default:
+ (void)job_assumes(j, false);
+ break;
+ }
+}
+
+bool
+externalevent_new(job_t j, struct eventsystem *sys, char *evname, launch_data_t event)
+{
+ struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
+ if (job_assumes(j, ee != NULL)) {
+ ee->event = launch_data_copy(event);
+ if (job_assumes(j, ee->event != NULL)) {
+ strcpy(ee->name, evname);
+ ee->job = j;
+ ee->id = sys->curid;
+ ee->sys = sys;
+ ee->state = false;
+ ee->wanted_state = true;
+ sys->curid++;
+
+ LIST_INSERT_HEAD(&j->events, ee, job_le);
+ LIST_INSERT_HEAD(&sys->events, ee, sys_le);
+
+ job_log(j, LOG_DEBUG, "New event: %s:%s", sys->name, evname);
+ } else {
+ free(ee);
+ ee = NULL;
+ }
+ }
+
+ eventsystem_ping();
+ return ee;
+}
+
+void
+externalevent_delete(struct externalevent *ee)
+{
+ launch_data_free(ee->event);
+ LIST_REMOVE(ee, job_le);
+ LIST_REMOVE(ee, sys_le);
+
+ free(ee);
+
+ eventsystem_ping();
+}
+
+void
+externalevent_setup(launch_data_t obj, const char *key, void *context)
+{
+ struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
+ (void)job_assumes(ctx->j, externalevent_new(ctx->j, ctx->sys, (char *)key, obj));
+}
+
+struct externalevent *
+externalevent_find(const char *sysname, uint64_t id)
+{
+ struct externalevent *ei = NULL;
+
+ struct eventsystem *es = eventsystem_find(sysname);
+ if (launchd_assumes(es != NULL)) {
+ LIST_FOREACH(ei, &es->events, sys_le) {
+ if (ei->id == id) {
+ break;
+ }
+ }
+ }
+
+ return ei;
+}
+
+struct eventsystem *
+eventsystem_new(const char *name)
+{
+ struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
+ if (launchd_assumes(es != NULL)) {
+ strcpy(es->name, name);
+ LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
+ }
+
+ return es;
+}
+
+void
+eventsystem_delete(struct eventsystem *es)
+{
+ struct externalevent *ei = NULL;
+ while ((ei = LIST_FIRST(&es->events))) {
+ externalevent_delete(ei);
+ }
+
+ LIST_REMOVE(es, global_le);
+
+ free(es);
+}
+
+void
+eventsystem_setup(launch_data_t obj, const char *key, void *context)
+{
+ job_t j = (job_t)context;
+ if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
+ return;
+ }
+
+ struct eventsystem *sys = eventsystem_find(key);
+ if (unlikely(sys == NULL)) {
+ sys = eventsystem_new(key);
+ job_log(j, LOG_DEBUG, "New event system: %s", key);
+ }
+
+ if (job_assumes(j, sys != NULL)) {
+ struct externalevent_iter_ctx ctx = {
+ .j = j,
+ .sys = sys,
+ };
+ launch_data_dict_iterate(obj, externalevent_setup, &ctx);
+ sys->has_updates = true;
+ }
+}
+
+struct eventsystem *
+eventsystem_find(const char *name)
+{
+ struct eventsystem *esi = NULL;
+ LIST_FOREACH(esi, &_s_event_systems, global_le) {
+ if (strcmp(name, esi->name) == 0) {
+ break;
+ }
+ }
+
+ return esi;
+}
+
+void
+eventsystem_ping(void)
+{
+ /* We don't wrap this in an assumes() macro because we could potentially
+ * call this function many times before the helper job gets back to us
+ * and gives us another send-once right. So if it's MACH_PORT_NULL, that
+ * means that we've sent a ping, but the helper hasn't yet checked in to
+ * get the new set of notifications.
+ */
+ if (_s_event_update_port != MACH_PORT_NULL) {
+ kern_return_t kr = helper_downcall_ping(_s_event_update_port);
+ if (kr != KERN_SUCCESS) {
+ runtime_syslog(LOG_NOTICE, "helper_downcall_ping(): kr = 0x%x", kr);
+ }
+ _s_event_update_port = MACH_PORT_NULL;
+ }
+}
+
+void
+jobmgr_dispatch_all_semaphores(jobmgr_t jm)
+{
+ jobmgr_t jmi, jmn;
+ job_t ji, jn;
+
+
+ SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
+ jobmgr_dispatch_all_semaphores(jmi);
+ }
+
+ LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
+ if (!SLIST_EMPTY(&ji->semaphores)) {
+ job_dispatch(ji, false);
+ }
+ }
+}
+
+time_t
+cronemu(int mon, int mday, int hour, int min)
+{
+ struct tm workingtm;
+ time_t now;
+
+ now = time(NULL);
+ workingtm = *localtime(&now);
+
+ workingtm.tm_isdst = -1;
+ workingtm.tm_sec = 0;
+ workingtm.tm_min++;
+
+ while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
+ workingtm.tm_year++;
+ workingtm.tm_mon = 0;
+ workingtm.tm_mday = 1;
+ workingtm.tm_hour = 0;
+ workingtm.tm_min = 0;
+ mktime(&workingtm);
+ }
+
+ return mktime(&workingtm);
+}
+
+time_t
+cronemu_wday(int wday, int hour, int min)
+{
+ struct tm workingtm;
+ time_t now;
+
+ now = time(NULL);
+ workingtm = *localtime(&now);
+
+ workingtm.tm_isdst = -1;
+ workingtm.tm_sec = 0;
+ workingtm.tm_min++;
+
+ if (wday == 7) {
+ wday = 0;
+ }
+
+ while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
+ workingtm.tm_mday++;
+ workingtm.tm_hour = 0;
+ workingtm.tm_min = 0;
+ mktime(&workingtm);
+ }
+
+ return mktime(&workingtm);
+}
+
+bool
+cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
+{
+ if (mon == -1) {
+ struct tm workingtm = *wtm;
+ int carrytest;
+
+ while (!cronemu_mday(&workingtm, mday, hour, min)) {
+ workingtm.tm_mon++;
+ workingtm.tm_mday = 1;
+ workingtm.tm_hour = 0;
+ workingtm.tm_min = 0;
+ carrytest = workingtm.tm_mon;
+ mktime(&workingtm);
+ if (carrytest != workingtm.tm_mon) {
+ return false;
+ }
+ }
+ *wtm = workingtm;
+ return true;
+ }
+
+ if (mon < wtm->tm_mon) {
+ return false;
+ }
+
+ if (mon > wtm->tm_mon) {
+ wtm->tm_mon = mon;
+ wtm->tm_mday = 1;
+ wtm->tm_hour = 0;
+ wtm->tm_min = 0;
+ }
+
+ return cronemu_mday(wtm, mday, hour, min);
+}
+
+bool
+cronemu_mday(struct tm *wtm, int mday, int hour, int min)
+{
+ if (mday == -1) {
+ struct tm workingtm = *wtm;
+ int carrytest;
+
+ while (!cronemu_hour(&workingtm, hour, min)) {
+ workingtm.tm_mday++;
+ workingtm.tm_hour = 0;
+ workingtm.tm_min = 0;
+ carrytest = workingtm.tm_mday;
+ mktime(&workingtm);
+ if (carrytest != workingtm.tm_mday) {
+ return false;
+ }
+ }
+ *wtm = workingtm;
+ return true;
+ }
+
+ if (mday < wtm->tm_mday) {
+ return false;
+ }
+
+ if (mday > wtm->tm_mday) {
+ wtm->tm_mday = mday;
+ wtm->tm_hour = 0;
+ wtm->tm_min = 0;
+ }
+
+ return cronemu_hour(wtm, hour, min);
+}
+
+bool
+cronemu_hour(struct tm *wtm, int hour, int min)
+{
+ if (hour == -1) {
+ struct tm workingtm = *wtm;
+ int carrytest;
+
+ while (!cronemu_min(&workingtm, min)) {
+ workingtm.tm_hour++;
+ workingtm.tm_min = 0;
+ carrytest = workingtm.tm_hour;
+ mktime(&workingtm);
+ if (carrytest != workingtm.tm_hour) {
+ return false;
+ }
+ }
+ *wtm = workingtm;
+ return true;
+ }
+
+ if (hour < wtm->tm_hour) {
+ return false;
+ }
+
+ if (hour > wtm->tm_hour) {
+ wtm->tm_hour = hour;
+ wtm->tm_min = 0;
+ }
+
+ return cronemu_min(wtm, min);
+}
+
+bool
+cronemu_min(struct tm *wtm, int min)
+{
+ if (min == -1) {
+ return true;
+ }
+
+ if (min < wtm->tm_min) {
+ return false;
+ }
+
+ if (min > wtm->tm_min) {
+ wtm->tm_min = min;
+ }
+
+ return true;
+}
+
+kern_return_t
+job_mig_setup_shmem(job_t j, mach_port_t *shmem_port)
+{
+ memory_object_size_t size_of_page, size_of_page_orig;
+ vm_address_t vm_addr;
+ kern_return_t kr;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (unlikely(j->anonymous)) {
+ job_log(j, LOG_DEBUG, "Anonymous job tried to setup shared memory");
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ if (unlikely(j->shmem)) {
+ job_log(j, LOG_ERR, "Tried to setup shared memory more than once");
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ size_of_page_orig = size_of_page = getpagesize();
+
+ kr = vm_allocate(mach_task_self(), &vm_addr, size_of_page, true);
+
+ if (!job_assumes(j, kr == 0)) {
+ return kr;
+ }
+
+ j->shmem = (typeof(j->shmem))vm_addr;
+ j->shmem->vp_shmem_standby_timeout = j->timeout;
+
+ kr = mach_make_memory_entry_64(mach_task_self(), &size_of_page,
+ (memory_object_offset_t)vm_addr, VM_PROT_READ|VM_PROT_WRITE, shmem_port, 0);
+
+ if (job_assumes(j, kr == 0)) {
+ (void)job_assumes(j, size_of_page == size_of_page_orig);
+ }
+
+ /* no need to inherit this in child processes */
+ (void)job_assumes(j, vm_inherit(mach_task_self(), (vm_address_t)j->shmem, size_of_page_orig, VM_INHERIT_NONE) == 0);
+
+ return kr;
+}
+
+kern_return_t
+job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
+{
+ struct ldcred *ldc = runtime_get_caller_creds();
+ job_t js;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (unlikely(j->deny_job_creation)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+#if HAVE_SANDBOX
+ const char **argv = (const char **)mach_cmd2argv(server_cmd);
+ if (unlikely(argv == NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+ if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
+ free(argv);
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+ free(argv);
+#endif
+
+ job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
+
+ if (pid1_magic) {
+ if (ldc->euid || ldc->uid) {
+ job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
+ return VPROC_ERR_TRY_PER_USER;
+ }
+ } else {
+ if (unlikely(server_uid != getuid())) {
+ job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
+ server_cmd, getuid(), server_uid);
+ }
+ server_uid = 0; /* zero means "do nothing" */
+ }
+
+ js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
+
+ if (unlikely(js == NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ *server_portp = js->j_port;
+ return BOOTSTRAP_SUCCESS;
+}
+
+kern_return_t
+job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
+{
+ struct ldcred *ldc = runtime_get_caller_creds();
+ job_t otherj;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
+ #if TARGET_OS_EMBEDDED
+ if (!j->embedded_special_privileges) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+ #else
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ #endif
+ }
+
+#if HAVE_SANDBOX
+ if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+#endif
+
+ if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
+ return BOOTSTRAP_UNKNOWN_SERVICE;
+ }
+
+#if TARGET_OS_EMBEDDED
+ if (j->embedded_special_privileges && strcmp(j->username, otherj->username) != 0) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+#endif
+
+ if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
+ bool do_block = otherj->p;
+
+ if (otherj->anonymous) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ job_remove(otherj);
+
+ if (do_block) {
+ job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
+ /* this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first... */
+ (void)job_assumes(otherj, waiting4removal_new(otherj, srp));
+ return MIG_NO_REPLY;
+ } else {
+ return 0;
+ }
+ } else if (sig == VPROC_MAGIC_TRYKILL_SIGNAL) {
+ if (!j->kill_via_shmem) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ if (!j->shmem) {
+ j->sent_kill_via_shmem = true;
+ (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
+ return 0;
+ }
+
+#if !TARGET_OS_EMBEDDED
+ if (__sync_bool_compare_and_swap(&j->shmem->vp_shmem_transaction_cnt, 0, -1)) {
+ j->shmem->vp_shmem_flags |= VPROC_SHMEM_EXITING;
+ j->sent_kill_via_shmem = true;
+ (void)job_assumes(j, runtime_kill(otherj->p, SIGKILL) != -1);
+ return 0;
+ }
+#endif
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ } else if (otherj->p) {
+ (void)job_assumes(j, runtime_kill(otherj->p, sig) != -1);
+ }
+
+ return 0;
+}
+
+kern_return_t
+job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
+{
+ struct ldcred *ldc = runtime_get_caller_creds();
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (!job_assumes(j, j->per_user)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ return runtime_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
+}
+
+kern_return_t
+job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
+{
+ struct ldcred *ldc = runtime_get_caller_creds();
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (unlikely(ldc->euid)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ return runtime_log_drain(srp, outval, outvalCnt);
+}
+
+kern_return_t
+job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
+{
+ const char *action;
+ launch_data_t input_obj = NULL, output_obj = NULL;
+ size_t data_offset = 0;
+ size_t packed_size;
+ struct ldcred *ldc = runtime_get_caller_creds();
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+ if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+ if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
+ return 1;
+ }
+
+ if (inkey && outkey) {
+ action = "Swapping";
+ } else if (inkey) {
+ action = "Setting";
+ } else {
+ action = "Getting";
+ }
+
+ job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
+
+ *outvalCnt = 20 * 1024 * 1024;
+ mig_allocate(outval, *outvalCnt);
+ if (!job_assumes(j, *outval != 0)) {
+ return 1;
+ }
+
+ /* Note to future maintainers: launch_data_unpack() does NOT return a heap object. The data
+ * is decoded in-place. So do not call launch_data_free() on input_obj.
+ */
+ runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
+ if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
+ goto out_bad;
+ }
+
+ switch (outkey) {
+ case VPROC_GSK_ENVIRONMENT:
+ if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
+ goto out_bad;
+ }
+ jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
+ runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
+ if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
+ goto out_bad;
+ }
+ launch_data_free(output_obj);
+ break;
+ case VPROC_GSK_ALLJOBS:
+ if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
+ goto out_bad;
+ }
+ ipc_revoke_fds(output_obj);
+ runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
+ packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
+ if (!job_assumes(j, packed_size != 0)) {
+ goto out_bad;
+ }
+ launch_data_free(output_obj);
+ break;
+ case VPROC_GSK_MGR_NAME:
+ if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
+ goto out_bad;
+ }
+ packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
+ if (!job_assumes(j, packed_size != 0)) {
+ goto out_bad;
+ }
+
+ launch_data_free(output_obj);
+ break;
+ case VPROC_GSK_JOB_OVERRIDES_DB:
+ if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_OVERRIDES))) != NULL)) {
+ goto out_bad;
+ }
+ packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
+ if (!job_assumes(j, packed_size != 0)) {
+ goto out_bad;
+ }
+
+ launch_data_free(output_obj);
+ break;
+ case VPROC_GSK_JOB_CACHE_DB:
+ if (!job_assumes(j, (output_obj = launch_data_new_string(launchd_data_base_path(LAUNCHD_DB_TYPE_JOBCACHE))) != NULL)) {
+ goto out_bad;
+ }
+ packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
+ if (!job_assumes(j, packed_size != 0)) {
+ goto out_bad;
+ }
+
+ job_log(j, LOG_DEBUG, "Location of job cache database: %s", launch_data_get_string(output_obj));
+
+ launch_data_free(output_obj);
+ break;
+ case 0:
+ mig_deallocate(*outval, *outvalCnt);
+ *outval = 0;
+ *outvalCnt = 0;
+ break;
+ default:
+ goto out_bad;
+ }
+
+ if (invalCnt) switch (inkey) {
+ case VPROC_GSK_ENVIRONMENT:
+ if (launch_data_get_type(input_obj) == LAUNCH_DATA_DICTIONARY) {
+ if (j->p) {
+ job_log(j, LOG_INFO, "Setting environment for a currently active job. This environment will take effect on the next invocation of the job.");
+ }
+ launch_data_dict_iterate(input_obj, envitem_setup_one_shot, j);
+ }
+ break;
+ case 0:
+ break;
+ default:
+ goto out_bad;
+ }
+
+ mig_deallocate(inval, invalCnt);
+ return 0;
+
+out_bad:
+ mig_deallocate(inval, invalCnt);
+ if (*outval) {
+ mig_deallocate(*outval, *outvalCnt);
+ }
+ if (output_obj) {
+ launch_data_free(output_obj);
+ }
+
+ return 1;
+}
+
+kern_return_t
+job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
+{
+ const char *action;
+ kern_return_t kr = 0;
+ struct ldcred *ldc = runtime_get_caller_creds();
+ int oldmask;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (unlikely(inkey && ldc->euid && ldc->euid != getuid())) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
+ return 1;
+ }
+
+ if (inkey && outkey) {
+ action = "Swapping";
+ } else if (inkey) {
+ action = "Setting";
+ } else {
+ action = "Getting";
+ }
+
+ job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
+
+ switch (outkey) {
+ case VPROC_GSK_ABANDON_PROCESS_GROUP:
+ *outval = j->abandon_pg;
+ break;
+ case VPROC_GSK_LAST_EXIT_STATUS:
+ *outval = j->last_exit_status;
+ break;
+ case VPROC_GSK_MGR_UID:
+ *outval = getuid();
+ break;
+ case VPROC_GSK_MGR_PID:
+ *outval = getpid();
+ break;
+ case VPROC_GSK_IS_MANAGED:
+ *outval = j->anonymous ? 0 : 1;
+ break;
+ case VPROC_GSK_BASIC_KEEPALIVE:
+ *outval = !j->ondemand;
+ break;
+ case VPROC_GSK_START_INTERVAL:
+ *outval = j->start_interval;
+ break;
+ case VPROC_GSK_IDLE_TIMEOUT:
+ *outval = j->timeout;
+ break;
+ case VPROC_GSK_EXIT_TIMEOUT:
+ *outval = j->exit_timeout;
+ break;
+ case VPROC_GSK_GLOBAL_LOG_MASK:
+ oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
+ *outval = oldmask;
+ runtime_setlogmask(oldmask);
+ break;
+ case VPROC_GSK_GLOBAL_UMASK:
+ oldmask = umask(0);
+ *outval = oldmask;
+ umask(oldmask);
+ break;
+ case VPROC_GSK_TRANSACTIONS_ENABLED:
+ job_log(j, LOG_DEBUG, "Reading transaction model status.");
+ *outval = j->kill_via_shmem;
+ break;
+ case VPROC_GSK_WAITFORDEBUGGER:
+ *outval = j->wait4debugger;
+ break;
+ case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
+ *outval = j->embedded_special_privileges;
+ break;
+ case 0:
+ *outval = 0;
+ break;
+ default:
+ kr = 1;
+ break;
+ }
+
+ switch (inkey) {
+ case VPROC_GSK_ABANDON_PROCESS_GROUP:
+ j->abandon_pg = (bool)inval;
+ break;
+ case VPROC_GSK_GLOBAL_ON_DEMAND:
+ job_log(j, LOG_DEBUG, "Job is setting global on-demand mode to %s (j->forced_peers_to_demand_mode = %s)", (bool)inval ? "true" : "false", j->forced_peers_to_demand_mode ? "true" : "false");
+ kr = job_set_global_on_demand(j, (bool)inval) ? 0 : 1;
+ break;
+ case VPROC_GSK_BASIC_KEEPALIVE:
+ j->ondemand = !inval;
+ break;
+ case VPROC_GSK_START_INTERVAL:
+ if (inval > UINT32_MAX || inval < 0) {
+ kr = 1;
+ } else if (inval) {
+ if (j->start_interval == 0) {
+ runtime_add_weak_ref();
+ }
+ j->start_interval = (typeof(j->start_interval)) inval;
+ (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j) != -1);
+ } else if (j->start_interval) {
+ (void)job_assumes(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL) != -1);
+ if (j->start_interval != 0) {
+ runtime_del_weak_ref();
+ }
+ j->start_interval = 0;
+ }
+ break;
+ case VPROC_GSK_IDLE_TIMEOUT:
+ if (inval < 0 || inval > UINT32_MAX) {
+ kr = 1;
+ } else {
+ j->timeout = (typeof(j->timeout)) inval;
+ }
+ break;
+ case VPROC_GSK_EXIT_TIMEOUT:
+ if (inval < 0 || inval > UINT32_MAX) {
+ kr = 1;
+ } else {
+ j->exit_timeout = (typeof(j->exit_timeout)) inval;
+ }
+ break;
+ case VPROC_GSK_GLOBAL_LOG_MASK:
+ if (inval < 0 || inval > UINT32_MAX) {
+ kr = 1;
+ } else {
+ runtime_setlogmask((int) inval);
+ }
+ break;
+ case VPROC_GSK_GLOBAL_UMASK:
+ launchd_assert(sizeof (mode_t) == 2);
+ if (inval < 0 || inval > UINT16_MAX) {
+ kr = 1;
+ } else {
+#if HAVE_SANDBOX
+ if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
+ kr = 1;
+ } else {
+ umask((mode_t) inval);
+ }
+#endif
+ }
+ break;
+ case VPROC_GSK_TRANSACTIONS_ENABLED:
+ if (!job_assumes(j, inval != 0)) {
+ job_log(j, LOG_WARNING, "Attempt to unregister from transaction model. This is not supported.");
+ kr = 1;
+ } else {
+ j->kill_via_shmem = (bool)inval;
+ }
+ break;
+ case VPROC_GSK_WEIRD_BOOTSTRAP:
+ if (job_assumes(j, j->weird_bootstrap)) {
+ job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
+
+ mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_protocol_vproc_subsystem);
+
+ if (job_mig_protocol_vproc_subsystem.maxsize > mxmsgsz) {
+ mxmsgsz = job_mig_protocol_vproc_subsystem.maxsize;
+ }
+
+ (void)job_assumes(j, runtime_add_mport(j->mgr->jm_port, protocol_vproc_server, mxmsgsz) == KERN_SUCCESS);
+ j->weird_bootstrap = false;
+ }
+ break;
+ case VPROC_GSK_WAITFORDEBUGGER:
+ j->wait4debugger_oneshot = inval;
+ break;
+ case VPROC_GSK_PERUSER_SUSPEND:
+ if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
+ mach_port_t junk = MACH_PORT_NULL;
+ job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
+ if (job_assumes(j, jpu != NULL)) {
+ struct suspended_peruser *spi = NULL;
+ LIST_FOREACH(spi, &j->suspended_perusers, sle) {
+ if ((int64_t)(spi->j->mach_uid) == inval) {
+ job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
+ break;
+ }
+ }
+
+ if (spi == NULL) {
+ job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
+ spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
+ if (job_assumes(j, spi != NULL)) {
+ /* Stop listening for events.
+ *
+ * See <rdar://problem/9014146>.
+ */
+ if (jpu->peruser_suspend_count == 0) {
+ job_ignore(jpu);
+ }
+
+ spi->j = jpu;
+ spi->j->peruser_suspend_count++;
+ LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
+ job_stop(spi->j);
+ *outval = jpu->p;
+ } else {
+ kr = BOOTSTRAP_NO_MEMORY;
+ }
+ }
+ }
+ } else {
+ kr = 1;
+ }
+ break;
+ case VPROC_GSK_PERUSER_RESUME:
+ if (job_assumes(j, pid1_magic == true)) {
+ struct suspended_peruser *spi = NULL, *spt = NULL;
+ LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
+ if ((int64_t)(spi->j->mach_uid) == inval) {
+ spi->j->peruser_suspend_count--;
+ LIST_REMOVE(spi, sle);
+ job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
+ break;
+ }
+ }
+
+ if (!job_assumes(j, spi != NULL)) {
+ job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
+ kr = BOOTSTRAP_NOT_PRIVILEGED;
+ } else if (spi->j->peruser_suspend_count == 0) {
+ job_watch(spi->j);
+ job_dispatch(spi->j, false);
+ free(spi);
+ }
+ } else {
+ kr = 1;
+ }
+ break;
+ case 0:
+ break;
+ default:
+ kr = 1;
+ break;
+ }
+
+ return kr;
+}
+
+kern_return_t
+job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
+{
+ struct machservice *ms;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ job_log(j, LOG_DEBUG, "Post fork ping.");
+
+ job_setup_exception_port(j, child_task);
+
+ SLIST_FOREACH(ms, &special_ports, special_port_sle) {
+ if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
+ /* The TASK_ACCESS_PORT funny business is to workaround 5325399. */
+ continue;
+ }
+
+ errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
+
+ if (unlikely(errno)) {
+ int desired_log_level = LOG_ERR;
+
+ if (j->anonymous) {
+ /* 5338127 */
+
+ desired_log_level = LOG_WARNING;
+
+ if (ms->special_port_num == TASK_SEATBELT_PORT) {
+ desired_log_level = LOG_DEBUG;
+ }
+ }
+
+ job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
+ }
+ }
+
+ /* MIG will not zero-initialize this pointer, so we must always do so. See
+ * <rdar://problem/8562593>.
+ */
+ *asport = MACH_PORT_NULL;
+#if !TARGET_OS_EMBEDDED
+ if (!j->anonymous) {
+ /* XPC services will spawn into the root security session by default.
+ * xpcproxy will switch them away if needed.
+ */
+ if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
+ job_log(j, LOG_DEBUG, "Returning j->asport: %u", j->asport);
+ *asport = j->asport;
+ }
+ }
+#endif
+ (void)job_assumes(j, launchd_mport_deallocate(child_task) == KERN_SUCCESS);
+
+ return 0;
+}
+
+kern_return_t
+job_mig_reboot2(job_t j, uint64_t flags)
+{
+ char who_started_the_reboot[2048] = "";
+ struct proc_bsdshortinfo proc;
+ struct ldcred *ldc = runtime_get_caller_creds();
+ pid_t pid_to_log;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (unlikely(!pid1_magic)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+#if !TARGET_OS_EMBEDDED
+ if (unlikely(ldc->euid)) {
+#else
+ if (unlikely(ldc->euid) && !j->embedded_special_privileges) {
+#endif
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
+ size_t who_offset;
+ if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
+ if (errno != ESRCH) {
+ job_assumes(j, errno == 0);
+ }
+ return 1;
+ }
+
+ if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
+ job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
+ snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
+ break;
+ }
+
+ who_offset = strlen(who_started_the_reboot);
+ snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
+ " %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
+ }
+
+ root_jobmgr->reboot_flags = (int)flags;
+ job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
+ launchd_shutdown();
+
+ return 0;
+}
+
+kern_return_t
+job_mig_getsocket(job_t j, name_t spr)
+{
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (j->deny_job_creation) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+#if HAVE_SANDBOX
+ struct ldcred *ldc = runtime_get_caller_creds();
+ if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+#endif
+
+ ipc_server_init();
+
+ if (unlikely(!sockpath)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ strncpy(spr, sockpath, sizeof(name_t));
+
+ return BOOTSTRAP_SUCCESS;
+}
+
+kern_return_t
+job_mig_log(job_t j, int pri, int err, logmsg_t msg)
+{
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if ((errno = err)) {
+ job_log_error(j, pri, "%s", msg);
+ } else {
+ job_log(j, pri, "%s", msg);
+ }
+
+ return 0;
+}
+
+job_t
+jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
+{
+ job_t ji = NULL;
+ LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
+ if (!ji->per_user) {
+ continue;
+ }
+ if (ji->mach_uid != which_user) {
+ continue;
+ }
+ if (SLIST_EMPTY(&ji->machservices)) {
+ continue;
+ }
+ if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
+ continue;
+ }
+ break;
+ }
+
+ if (unlikely(ji == NULL)) {
+ struct machservice *ms;
+ char lbuf[1024];
+
+ job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
+
+ sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
+
+ ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
+
+ if (ji != NULL) {
+ auditinfo_addr_t auinfo = {
+ .ai_termid = { .at_type = AU_IPv4 },
+ .ai_auid = which_user,
+ .ai_asid = AU_ASSIGN_ASID,
+ };
+
+ if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
+ job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
+ (void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
+
+ /* Kinda lame that we have to do this, but we can't create an
+ * audit session without joining it.
+ */
+ (void)job_assumes(ji, audit_session_join(g_audit_session_port));
+ ji->asid = auinfo.ai_asid;
+ } else {
+ job_log(ji, LOG_WARNING, "Could not set audit session!");
+ job_remove(ji);
+ return NULL;
+ }
+
+ ji->mach_uid = which_user;
+ ji->per_user = true;
+ ji->kill_via_shmem = true;
+
+ struct stat sb;
+ char pu_db[PATH_MAX];
+ snprintf(pu_db, sizeof(pu_db), LAUNCHD_DB_PREFIX "/%s", lbuf);
+
+ bool created = false;
+ int err = stat(pu_db, &sb);
+ if ((err == -1 && errno == ENOENT) || (err == 0 && !S_ISDIR(sb.st_mode))) {
+ if (err == 0) {
+ char move_aside[PATH_MAX];
+ snprintf(move_aside, sizeof(move_aside), LAUNCHD_DB_PREFIX "/%s.movedaside", lbuf);
+
+ (void)job_assumes(ji, rename(pu_db, move_aside) != -1);
+ }
+
+ (void)job_assumes(ji, mkdir(pu_db, S_IRWXU) != -1);
+ (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
+ created = true;
+ }
+
+ if (!created) {
+ if (!job_assumes(ji, sb.st_uid == which_user)) {
+ (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
+ }
+ if (!job_assumes(ji, sb.st_gid == 0)) {
+ (void)job_assumes(ji, chown(pu_db, which_user, 0) != -1);
+ }
+ if (!job_assumes(ji, sb.st_mode == (S_IRWXU | S_IFDIR))) {
+ (void)job_assumes(ji, chmod(pu_db, S_IRWXU) != -1);
+ }
+ }
+
+ if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
+ job_remove(ji);
+ ji = NULL;
+ } else {
+ ms->per_user_hack = true;
+ ms->hide = true;
+
+ ji = job_dispatch(ji, false);
+ }
+ }
+ } else {
+ *mp = machservice_port(SLIST_FIRST(&ji->machservices));
+ job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
+ }
+
+ return ji;
+}
+
+kern_return_t
+job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
+{
+ struct ldcred *ldc = runtime_get_caller_creds();
+ job_t jpu;
+
+#if TARGET_OS_EMBEDDED
+ /* There is no need for per-user launchd's on embedded. */
+ job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
+ return BOOTSTRAP_NOT_PRIVILEGED;
+#endif
+
+#if HAVE_SANDBOX
+ if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+#endif
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
+
+ if (unlikely(!pid1_magic)) {
+ job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ if (ldc->euid || ldc->uid) {
+ which_user = ldc->euid ?: ldc->uid;
+ }
+
+ *up_cont = MACH_PORT_NULL;
+
+ jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
+
+ return 0;
+}
+
+kern_return_t
+job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
+{
+ bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
+ bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
+ struct ldcred *ldc = runtime_get_caller_creds();
+ struct machservice *ms = NULL;
+ job_t jo;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (j->dedicated_instance) {
+ struct machservice *msi = NULL;
+ SLIST_FOREACH(msi, &j->machservices, sle) {
+ if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
+ uuid_copy(instance_id, j->instance_id);
+ ms = msi;
+ break;
+ }
+ }
+ } else {
+ ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
+ }
+
+ if (strict) {
+ if (likely(ms != NULL)) {
+ if (ms->job != j) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ } else if (ms->isActive) {
+ return BOOTSTRAP_SERVICE_ACTIVE;
+ }
+ } else {
+ return BOOTSTRAP_UNKNOWN_SERVICE;
+ }
+ } else if (ms == NULL) {
+ if (job_assumes(j, !j->dedicated_instance)) {
+ *serviceportp = MACH_PORT_NULL;
+
+ if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ /* Treat this like a legacy job. */
+ if (!j->legacy_mach_job) {
+ ms->isActive = true;
+ ms->recv = false;
+ }
+
+ if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
+ job_log(j, LOG_SCOLDING, "Please add the following service to the configuration file for this job: %s", servicename);
+ }
+ } else {
+ return BOOTSTRAP_UNKNOWN_SERVICE;
+ }
+ } else {
+ if (unlikely((jo = machservice_job(ms)) != j)) {
+ static pid_t last_warned_pid;
+
+ if (last_warned_pid != ldc->pid) {
+ job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
+ last_warned_pid = ldc->pid;
+ }
+
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+ if (unlikely(machservice_active(ms))) {
+ job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
+ return BOOTSTRAP_SERVICE_ACTIVE;
+ }
+ }
+
+ job_checkin(j);
+ machservice_request_notifications(ms);
+
+ job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
+
+ *serviceportp = machservice_port(ms);
+ return BOOTSTRAP_SUCCESS;
+}
+
+kern_return_t
+job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
+{
+ struct machservice *ms;
+ struct ldcred *ldc = runtime_get_caller_creds();
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (!(flags & BOOTSTRAP_PER_PID_SERVICE) && !j->legacy_LS_job) {
+ job_log(j, LOG_SCOLDING, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
+ }
+
+ job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
+
+ /* 5641783 for the embedded hack */
+#if !TARGET_OS_EMBEDDED
+ /*
+ * From a per-user/session launchd's perspective, SecurityAgent (UID
+ * 92) is a rogue application (not our UID, not root and not a child of
+ * us). We'll have to reconcile this design friction at a later date.
+ */
+ if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
+ if (pid1_magic) {
+ return VPROC_ERR_TRY_PER_USER;
+ } else {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+ }
+#endif
+
+ ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
+
+ if (unlikely(ms)) {
+ if (machservice_job(ms) != j) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+ if (machservice_active(ms)) {
+ job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
+ return BOOTSTRAP_SERVICE_ACTIVE;
+ }
+ if (ms->recv && (serviceport != MACH_PORT_NULL)) {
+ job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+ job_checkin(j);
+ machservice_delete(j, ms, false);
+ }
+
+ if (likely(serviceport != MACH_PORT_NULL)) {
+ if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
+ machservice_request_notifications(ms);
+ } else {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+ }
+
+
+ return BOOTSTRAP_SUCCESS;
+}
+
+kern_return_t
+job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
+{
+ struct machservice *ms = NULL;
+ struct ldcred *ldc = runtime_get_caller_creds();
+ kern_return_t kr;
+ bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
+ bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
+ bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
+ bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ bool xpc_req = j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN;
+
+ /* 5641783 for the embedded hack */
+#if !TARGET_OS_EMBEDDED
+ if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
+ return VPROC_ERR_TRY_PER_USER;
+ }
+#endif
+
+#if HAVE_SANDBOX
+ /* We don't do sandbox checking for XPC domains because, by definition, all
+ * the services within your domain should be accessibly to you.
+ */
+ if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+#endif
+
+ if (per_pid_lookup) {
+ ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
+ } else {
+ if (xpc_req) {
+ /* Requests from XPC domains stay local. */
+ ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
+ } else {
+ /* A strict lookup which is privileged won't even bother trying to
+ * find a service if we're not hosting the root Mach bootstrap.
+ */
+ if (strict_lookup && privileged) {
+ if (inherited_bootstrap_port == MACH_PORT_NULL) {
+ ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
+ }
+ } else {
+ ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
+ }
+ }
+ }
+
+ if (likely(ms)) {
+ ms = ms->alias ? ms->alias : ms;
+ if (unlikely(specific_instance && ms->job->multiple_instances)) {
+ job_t ji = NULL;
+ job_t instance = NULL;
+ LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
+ if (uuid_compare(instance_id, ji->instance_id) == 0) {
+ instance = ji;
+ break;
+ }
+ }
+
+ if (unlikely(instance == NULL)) {
+ job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
+ instance = job_new_subjob(ms->job, instance_id);
+ if (job_assumes(j, instance != NULL)) {
+ /* Disable this support for now. We only support having
+ * multi-instance jobs within private XPC domains.
+ */
+#if 0
+ /* If the job is multi-instance, in a singleton XPC domain
+ * and the request is not coming from within that singleton
+ * domain, we need to alias the new job into the requesting
+ * domain.
+ */
+ if (!j->mgr->xpc_singleton && xpc_req) {
+ (void)job_assumes(instance, job_new_alias(j->mgr, instance));
+ }
+#endif
+ job_dispatch(instance, false);
+ }
+ }
+
+ ms = NULL;
+ if (job_assumes(j, instance != NULL)) {
+ struct machservice *msi = NULL;
+ SLIST_FOREACH(msi, &instance->machservices, sle) {
+ /* sizeof(servicename) will return the size of a pointer, even though it's
+ * an array type, because when passing arrays as parameters in C, they
+ * implicitly degrade to pointers.
+ */
+ if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
+ ms = msi;
+ break;
+ }
+ }
+ }
+ } else {
+ if (machservice_hidden(ms) && !machservice_active(ms)) {
+ ms = NULL;
+ } else if (unlikely(ms->per_user_hack)) {
+ ms = NULL;
+ }
+ }
+ }
+
+ if (likely(ms)) {
+ (void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
+ job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
+
+ if (unlikely(!per_pid_lookup && j->lastlookup == ms && j->lastlookup_gennum == ms->gen_num && !j->per_user)) {
+ /* we need to think more about the per_pid_lookup logic before logging about repeated lookups */
+ job_log(j, LOG_DEBUG, "Performance: Please fix the framework that talks to \"%s\" to cache the Mach port for service: %s", ms->job->label, servicename);
+ }
+
+ j->lastlookup = ms;
+ j->lastlookup_gennum = ms->gen_num;
+
+ *serviceportp = machservice_port(ms);
+
+ kr = BOOTSTRAP_SUCCESS;
+ } else if (strict_lookup && !privileged) {
+ /* Hack: We need to simulate XPC's desire not to establish a hierarchy. So if
+ * XPC is doing the lookup, and it's not a privileged lookup, we won't forward.
+ * But if it is a privileged lookup (that is, was looked up in XPC_DOMAIN_LOCAL_SYSTEM)
+ * then we must forward.
+ */
+ return BOOTSTRAP_UNKNOWN_SERVICE;
+ } else if (inherited_bootstrap_port != MACH_PORT_NULL) {
+ /* Requests from within an XPC domain don't get forwarded. */
+ job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
+ /* Clients potentially check the audit token of the reply to verify that the returned send right is trustworthy. */
+ (void)job_assumes(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags) == 0);
+ /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
+ return MIG_NO_REPLY;
+ } else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
+ /*
+ * 5240036 Should start background session when a lookup of CCacheServer occurs
+ *
+ * This is a total hack. We sniff out loginwindow session, and attempt to guess what it is up to.
+ * If we find a EUID that isn't root, we force it over to the per-user context.
+ */
+ return VPROC_ERR_TRY_PER_USER;
+ } else {
+ job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
+ kr = BOOTSTRAP_UNKNOWN_SERVICE;
+ }
+
+ return kr;
+}
+
+kern_return_t
+job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
+{
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
+ jobmgr_t jm = j->mgr;
+
+ if (jobmgr_parent(jm)) {
+ *parentport = jobmgr_parent(jm)->jm_port;
+ } else if (MACH_PORT_NULL == inherited_bootstrap_port) {
+ *parentport = jm->jm_port;
+ } else {
+ (void)job_assumes(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp) == 0);
+ /* The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now */
+ return MIG_NO_REPLY;
+ }
+ return BOOTSTRAP_SUCCESS;
+}
+
+kern_return_t
+job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
+{
+ if (inherited_bootstrap_port == MACH_PORT_NULL) {
+ *rootbsp = root_jobmgr->jm_port;
+ (void)job_assumes(j, launchd_mport_make_send(root_jobmgr->jm_port) == KERN_SUCCESS);
+ } else {
+ *rootbsp = inherited_bootstrap_port;
+ (void)job_assumes(j, launchd_mport_copy_send(inherited_bootstrap_port) == KERN_SUCCESS);
+ }
+
+ return BOOTSTRAP_SUCCESS;
+}
+
+kern_return_t
+job_mig_info(job_t j, name_array_t *servicenamesp, unsigned int *servicenames_cnt, name_array_t *servicejobsp, unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp, unsigned int *serviceactives_cnt, uint64_t flags)
+{
+ name_array_t service_names = NULL;
+ name_array_t service_jobs = NULL;
+ bootstrap_status_array_t service_actives = NULL;
+ unsigned int cnt = 0, cnt2 = 0;
+ jobmgr_t jm;
+
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ if (g_flat_mach_namespace) {
+ if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
+ jm = j->mgr;
+ } else {
+ jm = root_jobmgr;
+ }
+ } else {
+ jm = j->mgr;
+ }
+
+ unsigned int i = 0;
+ struct machservice *msi = NULL;
+ for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
+ LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
+ cnt += !msi->per_pid ? 1 : 0;
+ }
+ }
+
+ if (cnt == 0) {
+ goto out;
+ }
+
+ mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
+ if (!job_assumes(j, service_names != NULL)) {
+ goto out_bad;
+ }
+
+ mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
+ if (!job_assumes(j, service_jobs != NULL)) {
+ goto out_bad;
+ }
+
+ mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
+ if (!job_assumes(j, service_actives != NULL)) {
+ goto out_bad;
+ }
+
+ for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
+ LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
+ if (!msi->per_pid) {
+ strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
+ msi = msi->alias ? msi->alias : msi;
+ if (msi->job->mgr->shortdesc) {
+ strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
+ } else {
+ strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
+ }
+ service_actives[cnt2] = machservice_status(msi);
+ cnt2++;
+ }
+ }
+ }
+
+ (void)job_assumes(j, cnt == cnt2);
+
+out:
+ *servicenamesp = service_names;
+ *servicejobsp = service_jobs;
+ *serviceactivesp = service_actives;
+ *servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
+
+ return BOOTSTRAP_SUCCESS;
+
+out_bad:
+ if (service_names) {
+ mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
+ }
+ if (service_jobs) {
+ mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
+ }
+ if (service_actives) {
+ mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
+ }
+
+ return BOOTSTRAP_NO_MEMORY;
+}
+
+kern_return_t
+job_mig_lookup_children(job_t j, mach_port_array_t *child_ports, mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names, mach_msg_type_number_t *child_names_cnt, bootstrap_property_array_t *child_properties,mach_msg_type_number_t *child_properties_cnt)
+{
+ kern_return_t kr = BOOTSTRAP_NO_MEMORY;
+ if (!launchd_assumes(j != NULL)) {
+ return BOOTSTRAP_NO_MEMORY;
+ }
+
+ struct ldcred *ldc = runtime_get_caller_creds();
+
+ /* Only allow root processes to look up children, even if we're in the per-user launchd.
+ * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
+ * in a non-flat namespace.
+ */
+ if (ldc->euid != 0) {
+ job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
+ return BOOTSTRAP_NOT_PRIVILEGED;
+ }
+
+ unsigned int cnt = 0;
+
+ jobmgr_t jmr = j->mgr;
+ jobmgr_t jmi = NULL;
+ SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
+ cnt++;
+ }
+
+ /* Find our per-user launchds if we're PID 1. */
+ job_t ji = NULL;
+ if (pid1_magic) {
+ LIST_FOREACH(ji, &jmr->jobs, sle) {
+ cnt += ji->per_user ? 1 : 0;