#include <kern/exc_guard.h>
#include <vm/vm_protos.h>
+#include <os/log.h>
#include <pexpert/pexpert.h>
#include <sys/syscall.h>
#endif /* CONFIG_MACF */
+#if CONFIG_MEMORYSTATUS
+static void proc_memorystatus_remove(proc_t p);
+#endif /* CONFIG_MEMORYSTATUS */
void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
mach_exception_data_type_t code, mach_exception_data_type_t subcode,
int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
+ZONE_DECLARE(zombie_zone, "zombie",
+ sizeof(struct rusage_superset), ZC_NOENCRYPT);
+
/*
* Things which should have prototypes in headers, but don't
void delay(int);
void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor);
+#if __has_feature(ptrauth_calls)
+int exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
+ mach_exception_subcode_t subcode);
+#endif /* __has_feature(ptrauth_calls) */
+
+
/*
* NOTE: Source and target may *NOT* overlap!
* XXX Should share code with bsd/dev/ppc/unix_signal.c
out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
/* following cast works for sival_int because of padding */
out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
- out->si_band = in->si_band; /* range reduction */
+ out->si_band = (user32_long_t)in->si_band; /* range reduction */
}
void
uint64_t ledger_network_nonvolatile;
uint64_t ledger_network_nonvolatile_compressed;
uint64_t ledger_wired_mem;
+ uint64_t ledger_tagged_footprint;
+ uint64_t ledger_tagged_footprint_compressed;
+ uint64_t ledger_media_footprint;
+ uint64_t ledger_media_footprint_compressed;
+ uint64_t ledger_graphics_footprint;
+ uint64_t ledger_graphics_footprint_compressed;
+ uint64_t ledger_neural_footprint;
+ uint64_t ledger_neural_footprint_compressed;
void *crash_info_ptr = task_get_corpseinfo(corpse_task);
}
if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
- char *buf = (char *) kalloc(MAXPATHLEN);
- if (buf != NULL) {
- bzero(buf, MAXPATHLEN);
- proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
- kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
- kfree(buf, MAXPATHLEN);
- }
+ char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
+ proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
+ kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
+ zfree(ZV_NAMEI, buf);
}
- pflags = p->p_flag & (P_LP64 | P_SUGID);
+ pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
}
kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
}
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
+ uid_t persona_id = proc_persona_id(p);
+ kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
+ }
+
#if CONFIG_COALITIONS
if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
uint64_t coalition_ids[COALITION_NUM_TYPES];
#endif /* CONFIG_COALITIONS */
#if CONFIG_MEMORYSTATUS
- memstat_dirty_flags = memorystatus_dirty_get(p);
+ memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
}
#endif
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
+ ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
+ ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
+ ledger_media_footprint = get_task_media_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
+ ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
+ ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
+ ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
+ ledger_neural_footprint = get_task_neural_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
+ ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
+ }
+
if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
reason = p->p_exit_reason;
}
}
if (reason->osr_kcd_buf != 0) {
- uint32_t reason_buf_size = kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
+ uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
assert(reason_buf_size != 0);
if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
return (char *)kcdata_iter_payload(iter);
}
-static __attribute__((noinline)) void
+__abortlike
+static void
launchd_crashed_panic(proc_t p, int rv)
{
char *launchd_exit_reason_desc = launchd_exit_reason_get_string_desc(p->p_exit_reason);
exit(proc_t p, struct exit_args *uap, int *retval)
{
p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
- exit1(p, W_EXITCODE(uap->rval, 0), retval);
+ exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
thread_exception_return();
/* NOTREACHED */
return 0;
}
+#if CONFIG_MEMORYSTATUS
+/*
+ * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
+ * is currently being frozen.
+ * The proc_list_lock is held by the caller.
+ * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
+ * the p_listflag P_LIST_EXITED bit.
+ */
+static void
+proc_memorystatus_remove(proc_t p)
+{
+ LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED);
+ while (memorystatus_remove(p) == EAGAIN) {
+ os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", p->p_pid);
+ msleep(&p->p_memstat_state, proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
+ }
+}
+#endif
+
void
proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
{
*
* If the zombie allocation fails, just punt the stats.
*/
- MALLOC_ZONE(rup, struct rusage_superset *,
- sizeof(*rup), M_ZOMBIE, M_WAITOK);
- if (rup != NULL) {
- gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
- rup->ri.ri_phys_footprint = 0;
- rup->ri.ri_proc_exit_abstime = mach_absolute_time();
+ rup = zalloc(zombie_zone);
+ gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
+ rup->ri.ri_phys_footprint = 0;
+ rup->ri.ri_proc_exit_abstime = mach_absolute_time();
+ /*
+ * Make the rusage_info visible to external observers
+ * only after it has been completely filled in.
+ */
+ p->p_ru = rup;
- /*
- * Make the rusage_info visible to external observers
- * only after it has been completely filled in.
- */
- p->p_ru = rup;
- }
if (create_corpse) {
int est_knotes = 0, num_knotes = 0;
uint64_t *buffer = NULL;
- int buf_size = 0;
+ uint32_t buf_size = 0;
/* Get all the udata pointers from kqueue */
est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
if (est_knotes > 0) {
- buf_size = (est_knotes + 32) * sizeof(uint64_t);
- buffer = (uint64_t *) kalloc(buf_size);
+ buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
+ buffer = kheap_alloc(KHEAP_TEMP, buf_size, Z_WAITOK);
num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
if (num_knotes > est_knotes + 32) {
num_knotes = est_knotes + 32;
populate_corpse_crashinfo(p, p->task, rup,
code, subcode, buffer, num_knotes, NULL);
if (buffer != NULL) {
- kfree(buffer, buf_size);
+ kheap_free(KHEAP_TEMP, buffer, buf_size);
}
}
/*
proc_list_lock();
#if CONFIG_MEMORYSTATUS
- memorystatus_remove(p, TRUE);
+ proc_memorystatus_remove(p);
#endif
LIST_REMOVE(p, p_list);
proc_list_unlock();
-
#ifdef PGINPROF
vmsizmon();
#endif
dtrace_proc_exit(p);
#endif
- nspace_proc_exit(p);
-
/*
* need to cancel async IO requests that can be cancelled and wait for those
* already active. MAY BLOCK!
throttle_lowpri_io(0);
}
+ if (p->p_lflag & P_LNSPACE_RESOLVER) {
+ /*
+ * The namespace resolver is exiting; there may be
+ * outstanding materialization requests to clean up.
+ */
+ nspace_resolver_exited(p);
+ }
+
#if SYSV_SHM
/* Close ref SYSV Shared memory*/
if (p->vm_shm) {
if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
session_unlock(sessp);
- /*
- * We're going to SIGHUP the foreground process
- * group. It can't change from this point on
- * until the revoke is complete.
- * The process group changes under both the tty
- * lock and proc_list_lock but we need only one
- */
- tty_lock(tp);
- ttysetpgrphup(tp);
- tty_unlock(tp);
-
tty_pgsignal(tp, SIGHUP, 1);
session_lock(sessp);
(void) ttywait(tp);
tty_unlock(tp);
}
- context.vc_thread = proc_thread(p); /* XXX */
+
+ context.vc_thread = NULL;
context.vc_ucred = kauth_cred_proc_ref(p);
VNOP_REVOKE(ttyvp, REVOKEALL, &context);
if (cttyflag) {
ttyvp = NULLVP;
}
if (tp) {
- /*
- * This is cleared even if not set. This is also done in
- * spec_close to ensure that the flag is cleared.
- */
- tty_lock(tp);
- ttyclrpgrphup(tp);
- tty_unlock(tp);
-
ttyfree(tp);
}
}
fixjobc(p, pg, 0);
pg_rele(pg);
- p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+ /*
+ * Change RLIMIT_FSIZE for accounting/debugging. proc_limitsetcur_internal() will COW the current plimit
+ * before making changes if the current plimit is shared. The COW'ed plimit will be freed
+ * below by calling proc_limitdrop().
+ */
+ proc_limitsetcur_internal(p, RLIMIT_FSIZE, RLIM_INFINITY);
+
(void)acct_process(p);
proc_list_lock();
/*
* Other substructures are freed from wait().
*/
- FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
+ zfree(proc_stats_zone, p->p_stats);
p->p_stats = NULL;
- FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
+ zfree(proc_sigacts_zone, p->p_sigacts);
p->p_sigacts = NULL;
- proc_limitdrop(p, 1);
- p->p_limit = NULL;
+ proc_limitdrop(p);
/*
* Finish up by terminating the task
ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
proc_unlock(parent);
- FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE);
+ zfree(zombie_zone, child->p_ru);
child->p_ru = NULL;
} else {
printf("Warning : lost p_ru for %s\n", child->p_comm);
os_reason_free(child->p_exit_reason);
- /*
- * Free up credentials.
- */
- if (IS_VALID_CRED(child->p_ucred)) {
- kauth_cred_unref(&child->p_ucred);
- }
-
- /* XXXX Note NOT SAFE TO USE p_ucred from this point onwards */
-
/*
* Finally finished with old proc entry.
* Unlink it from its process group and free it.
proc_list_unlock();
+ /*
+ * Free up credentials.
+ */
+ if (IS_VALID_CRED(child->p_ucred)) {
+ kauth_cred_t tmp_ucred = child->p_ucred;
+ kauth_cred_unref(&tmp_ucred);
+ child->p_ucred = NOCRED;
+ }
lck_mtx_destroy(&child->p_mlock, proc_mlock_grp);
lck_mtx_destroy(&child->p_ucred_mlock, proc_ucred_mlock_grp);
#endif
lck_spin_destroy(&child->p_slock, proc_slock_grp);
- FREE_ZONE(child, sizeof *child, M_PROC);
+ zfree(proc_zone, child);
if ((locked == 1) && (droplock == 0)) {
proc_list_lock();
}
#endif
siginfo.si_signo = SIGCHLD;
siginfo.si_pid = p->p_pid;
- siginfo.si_status = (WEXITSTATUS(p->p_xstat) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
- p->p_xhighbits = 0;
+
+ /* If the child terminated abnormally due to a signal, the signum
+ * needs to be preserved in the exit status.
+ */
if (WIFSIGNALED(p->p_xstat)) {
siginfo.si_code = WCOREDUMP(p->p_xstat) ?
CLD_DUMPED : CLD_KILLED;
+ siginfo.si_status = WTERMSIG(p->p_xstat);
} else {
siginfo.si_code = CLD_EXITED;
+ siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
}
+ siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
+ p->p_xhighbits = 0;
if ((error = copyoutsiginfo(&siginfo,
caller64, uap->infop)) != 0) {
}
#endif
oldparent->p_childrencnt--;
-#if __PROC_INTERNAL_DEBUG1
+#if __PROC_INTERNAL_DEBUG
if (oldparent->p_childrencnt < 0) {
panic("process children count -ve\n");
}
proc_list_lock();
#if CONFIG_MEMORYSTATUS
- memorystatus_remove(p, TRUE);
+ proc_memorystatus_remove(p);
#endif
LIST_REMOVE(p, p_list);
struct session *sessp;
struct rusage_superset *rup;
- /* XXX Zombie allocation may fail, in which case stats get lost */
- MALLOC_ZONE(rup, struct rusage_superset *,
- sizeof(*rup), M_ZOMBIE, M_WAITOK);
+ rup = zalloc(zombie_zone);
proc_refdrain(p);
sessp = proc_session(p);
if (SESS_LEADER(p, sessp)) {
- if (sessp->s_ttyvp != NULLVP) {
- struct vnode *ttyvp;
- int ttyvid;
- int cttyflag = 0;
- struct vfs_context context;
- struct tty *tp;
-
- /*
- * Controlling process.
- * Signal foreground pgrp,
- * drain controlling terminal
- * and revoke access to controlling terminal.
- */
- session_lock(sessp);
- tp = SESSION_TP(sessp);
- if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
- session_unlock(sessp);
-
- /*
- * We're going to SIGHUP the foreground process
- * group. It can't change from this point on
- * until the revoke is complete.
- * The process group changes under both the tty
- * lock and proc_list_lock but we need only one
- */
- tty_lock(tp);
- ttysetpgrphup(tp);
- tty_unlock(tp);
-
- tty_pgsignal(tp, SIGHUP, 1);
-
- session_lock(sessp);
- tp = SESSION_TP(sessp);
- }
- cttyflag = sessp->s_flags & S_CTTYREF;
- sessp->s_flags &= ~S_CTTYREF;
- ttyvp = sessp->s_ttyvp;
- ttyvid = sessp->s_ttyvid;
- sessp->s_ttyvp = NULL;
- sessp->s_ttyvid = 0;
- sessp->s_ttyp = TTY_NULL;
- sessp->s_ttypgrpid = NO_PID;
- session_unlock(sessp);
-
- if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
- if (tp != TTY_NULL) {
- tty_lock(tp);
- (void) ttywait(tp);
- tty_unlock(tp);
- }
- context.vc_thread = proc_thread(p); /* XXX */
- context.vc_ucred = kauth_cred_proc_ref(p);
- VNOP_REVOKE(ttyvp, REVOKEALL, &context);
- if (cttyflag) {
- /*
- * Release the extra usecount taken in cttyopen.
- * usecount should be released after VNOP_REVOKE is called.
- * This usecount was taken to ensure that
- * the VNOP_REVOKE results in a close to
- * the tty since cttyclose is a no-op.
- */
- vnode_rele(ttyvp);
- }
- vnode_put(ttyvp);
- kauth_cred_unref(&context.vc_ucred);
- ttyvp = NULLVP;
- }
- if (tp) {
- /*
- * This is cleared even if not set. This is also done in
- * spec_close to ensure that the flag is cleared.
- */
- tty_lock(tp);
- ttyclrpgrphup(tp);
- tty_unlock(tp);
-
- ttyfree(tp);
- }
- }
- session_lock(sessp);
- sessp->s_leader = NULL;
- session_unlock(sessp);
+ panic("vfork child is session leader");
}
session_rele(sessp);
fixjobc(p, pg, 0);
pg_rele(pg);
- p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+ /*
+ * Change RLIMIT_FSIZE for accounting/debugging. proc_limitsetcur_internal() will COW the current plimit
+ * before making changes if the current plimit is shared. The COW'ed plimit will be freed
+ * below by calling proc_limitdrop().
+ */
+ proc_limitsetcur_internal(p, RLIMIT_FSIZE, RLIM_INFINITY);
proc_list_lock();
+
proc_childdrainstart(p);
while ((q = p->p_children.lh_first) != NULL) {
if (q->p_stat == SZOMB) {
/*
* Other substructures are freed from wait().
*/
- FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
+ zfree(proc_stats_zone, p->p_stats);
p->p_stats = NULL;
- FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
+ zfree(proc_sigacts_zone, p->p_sigacts);
p->p_sigacts = NULL;
- proc_limitdrop(p, 1);
- p->p_limit = NULL;
+ FREE(p->p_subsystem_root_path, M_SBUF);
+ p->p_subsystem_root_path = NULL;
+
+ proc_limitdrop(p);
/*
* Finish up by terminating the task
bzero(a_user_rusage_p, sizeof(struct user32_rusage));
/* timeval changes size, so utime and stime need special handling */
- a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
+ a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
- a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
+ a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
/*
* everything else can be a direct assign. We currently ignore
* the loss of precision
*/
- a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
- a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
- a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
- a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
- a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
- a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
- a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
- a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
- a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
- a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
- a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
- a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
- a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
- a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
+ a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
+ a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
+ a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
+ a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
+ a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
+ a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
+ a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
+ a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
+ a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
+ a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
+ a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
+ a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
+ a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
+ a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
}
void
// See man wait4 for other valid wait4 arguments.
waitinfo->owner = args->pid;
}
+
+#if __has_feature(ptrauth_calls)
+int
+exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
+ mach_exception_subcode_t subcode)
+{
+ thread_t self = current_thread();
+ struct uthread *ut = get_bsdthread_info(self);
+
+ os_reason_t exception_reason = os_reason_create(OS_REASON_PAC_EXCEPTION, (uint64_t)code);
+ assert(exception_reason != OS_REASON_NULL);
+ exception_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
+ ut->uu_exception = exception;
+ ut->uu_code = code;
+ ut->uu_subcode = subcode;
+
+ return exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL, TRUE, FALSE,
+ 0, exception_reason);
+}
+#endif /* __has_feature(ptrauth_calls) */