+ bool program_changed = false;
+ int fflags = kev->fflags;
+
+ job_log(j, LOG_DEBUG, "EVFILT_PROC event for job:");
+ log_kevent_struct(LOG_DEBUG, kev, 0);
+
+ if( fflags & NOTE_EXIT ) {
+ if( j->p == (pid_t)kev->ident && !j->anonymous && !j->is_being_sampled ) {
+ int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
+ struct kinfo_proc kp;
+ size_t len = sizeof(kp);
+
+ /* Sometimes, the kernel says it succeeded but really didn't. */
+ if( job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && len == sizeof(kp) ) {
+ if( !job_assumes(j, kp.kp_eproc.e_ppid == getpid()) ) {
+ /* Someone has attached to the process with ptrace(). There's a race here.
+ * If we determine that we are not the parent process and then fail to attach
+ * a kevent to the parent PID (who is probably using ptrace()), we can take that as an
+ * indication that the parent exited between sysctl(3) and kevent_mod(). The
+ * reparenting of the PID should be atomic to us, so in that case, we reap the
+ * job as normal.
+ *
+ * Otherwise, we wait for the death of the parent tracer and then reap, just as we
+ * would if a job died while we were sampling it at shutdown.
+ *
+ * Note that we foolishly assume that in the process *tree* a node cannot be its
+ * own parent. Apparently, that is not correct. If this is the case, we forsake
+ * the process to its own devices. Let it reap itself.
+ */
+ if( !job_assumes(j, kp.kp_eproc.e_ppid != (pid_t)kev->ident) ) {
+ job_log(j, LOG_WARNING, "Job is its own parent and has (somehow) exited. Leaving it to waste away.");
+ return;
+ }
+ if( job_assumes(j, kevent_mod(kp.kp_eproc.e_ppid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, j) != -1) ) {
+ j->tracing_pid = kp.kp_eproc.e_ppid;
+ j->reap_after_trace = true;
+ return;
+ }
+ }
+ }
+ } else if( !j->anonymous ) {
+ if( j->tracing_pid == (pid_t)kev->ident ) {
+ job_cleanup_after_tracer(j);
+
+ return;
+ } else if( j->tracing_pid && !j->reap_after_trace ) {
+ /* The job exited before our sample completed. */
+ job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job has exited. Will reap after tracing PID %i exits.", j->tracing_pid);
+ j->reap_after_trace = true;
+ return;
+ }
+ }
+ }
+
+ if (fflags & NOTE_EXEC) {
+ program_changed = true;
+
+ if (j->anonymous) {
+ int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, j->p };
+ struct kinfo_proc kp;
+ size_t len = sizeof(kp);
+
+ /* Sometimes, the kernel says it succeeded but really didn't. */
+ if (job_assumes(j, sysctl(mib, 4, &kp, &len, NULL, 0) != -1) && len == sizeof(kp)) {
+ char newlabel[1000];
+
+ snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, kp.kp_proc.p_comm);
+
+ job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
+ j->lastlookup = NULL;
+ j->lastlookup_gennum = 0;
+
+ LIST_REMOVE(j, label_hash_sle);
+ strcpy((char *)j->label, newlabel);
+ LIST_INSERT_HEAD(&label_hash[hash_label(j->label)], j, label_hash_sle);
+ }
+ } else {
+ j->did_exec = true;
+ job_log(j, LOG_DEBUG, "Program changed");
+ }
+ }
+
+ if (fflags & NOTE_FORK) {
+ job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
+ job_log_children_without_exec(j);
+ }
+
+ if (fflags & NOTE_EXIT) {
+ job_reap(j);
+
+ if( !j->anonymous ) {
+ j = job_dispatch(j, false);
+ } else {
+ job_remove(j);
+ j = NULL;
+ }
+ }
+}
+
+void
+job_callback_timer(job_t j, void *ident)
+{
+ if (j == ident) {
+ job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
+ job_dispatch(j, true);
+ } else if (&j->semaphores == ident) {
+ job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
+ job_dispatch(j, false);
+ } else if (&j->start_interval == ident) {
+ job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
+ j->start_pending = true;
+ job_dispatch(j, false);
+ } else if (&j->exit_timeout == ident) {
+ if( !job_assumes(j, j->p != 0) ) {
+ return;
+ }
+
+ if( j->clean_kill ) {
+ job_log(j, LOG_ERR | LOG_CONSOLE, "Clean job failed to exit %u second after receiving SIGKILL.", LAUNCHD_CLEAN_KILL_TIMER);
+ job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
+ j->clean_exit_timer_expired = true;
+
+ jobmgr_do_garbage_collection(j->mgr);
+ return;
+ }
+
+ /*
+ * This block might be executed up to 3 times for a given (slow) job
+ * - once for the SAMPLE_TIMEOUT timer, at which point sampling is triggered
+ * - once for the exit_timeout timer, at which point:
+ * - sampling is performed if not triggered previously
+ * - SIGKILL is being sent to the job
+ * - once for the SIGKILL_TIMER timer, at which point we log an issue
+ * with the long SIGKILL
+ */
+
+ if( j->per_user ) {
+ /* Don't sample per-user launchd's. */
+ j->sampling_complete = true;
+ }
+ bool was_is_or_will_be_sampled = ( j->sampling_complete || j->is_being_sampled || j->pending_sample );
+ bool should_enqueue = ( !was_is_or_will_be_sampled && do_apple_internal_logging );
+
+ if (j->sent_sigkill) {
+ uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
+
+ td /= NSEC_PER_SEC;
+ td -= j->clean_kill ? 0 : j->exit_timeout;
+
+ job_log(j, LOG_WARNING | LOG_CONSOLE, "Did not die after sending SIGKILL %llu seconds ago...", td);
+ } else if( should_enqueue && (!j->exit_timeout || (LAUNCHD_SAMPLE_TIMEOUT < j->exit_timeout)) ) {
+ /* This should work even if the job changes its exit_timeout midstream */
+ job_log(j, LOG_NOTICE | LOG_CONSOLE, "Sampling timeout elapsed (%u seconds). Scheduling a sample...", LAUNCHD_SAMPLE_TIMEOUT);
+ if (j->exit_timeout) {
+ unsigned int ttk = (j->exit_timeout - LAUNCHD_SAMPLE_TIMEOUT);
+ job_assumes(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER,
+ EV_ADD|EV_ONESHOT, NOTE_SECONDS, ttk, j) != -1);
+ job_log(j, LOG_NOTICE | LOG_CONSOLE, "Scheduled new exit timeout for %u seconds later", ttk);
+ }
+
+ STAILQ_INSERT_TAIL(&j->mgr->pending_samples, j, pending_samples_sle);
+ j->pending_sample = true;
+ jobmgr_dequeue_next_sample(j->mgr);
+ } else {
+ if( do_apple_internal_logging && !j->sampling_complete ) {
+ if( j->is_being_sampled || j->pending_sample ) {
+ char pidstr[24] = { 0 };
+ snprintf(pidstr, sizeof(pidstr), "[%i] ", j->tracing_pid);
+
+ job_log(j, LOG_DEBUG | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Will kill after sample%shas completed.", j->exit_timeout, j->tracing_pid ? pidstr : " ");
+ j->kill_after_sample = true;
+ } else {
+ job_log(j, LOG_DEBUG | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Will sample and then kill.", j->exit_timeout);
+
+ STAILQ_INSERT_TAIL(&j->mgr->pending_samples, j, pending_samples_sle);
+ j->pending_sample = true;
+ }
+
+ jobmgr_dequeue_next_sample(j->mgr);
+ } else {
+ if (unlikely(j->debug_before_kill)) {
+ job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
+ job_assumes(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER) == KERN_SUCCESS);
+ }
+ job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
+ job_kill(j);
+ jobmgr_do_garbage_collection(j->mgr);
+ }
+ }
+ } else {
+ job_assumes(j, false);
+ }
+}
+
+void
+job_callback_read(job_t j, int ident)
+{
+ if (ident == j->log_redirect_fd) {
+ job_log_stdouterr(j);
+ } else if (ident == j->stdin_fd) {
+ job_dispatch(j, true);
+ } else {
+ socketgroup_callback(j);
+ }
+}
+
+void
+jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
+{
+ jobmgr_t jmi;