#include <sys/bsdtask_info.h>
#include <sys/persona.h>
+#ifdef CONFIG_32BIT_TELEMETRY
+#include <sys/kasl.h>
+#endif /* CONFIG_32BIT_TELEMETRY */
+
+#if CONFIG_CSR
+#include <sys/csr.h>
+#endif
+
#if CONFIG_MEMORYSTATUS
#include <sys/kern_memorystatus.h>
#endif
#include <libkern/crypto/sha1.h>
+#ifdef CONFIG_32BIT_TELEMETRY
+#define MAX_32BIT_EXEC_SIG_SIZE 160
+#endif /* CONFIG_32BIT_TELEMETRY */
+
/*
* Structure associated with user cacheing.
*/
#endif
#if CONFIG_COREDUMP
/* Name to give to core files */
+#if defined(XNU_TARGET_OS_BRIDGE)
+__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/internal/%N.core"};
+#elif CONFIG_EMBEDDED
+__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/cores/%N.core"};
+#else
__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"};
#endif
+#endif
#if PROC_REF_DEBUG
#include <kern/backtrace.h>
#endif
+typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
+
static void orphanpg(struct pgrp * pg);
void proc_name_kdp(task_t t, char * buf, int size);
-void * proc_get_uthread_uu_threadlist(void * uthread_v);
int proc_threadname_kdp(void * uth, char * buf, size_t size);
-void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime);
+void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
char * proc_name_address(void * p);
-/* TODO: make a header that's exported and usable in osfmk */
-char* proc_best_name(proc_t p);
-
static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child);
static void pgrp_remove(proc_t p);
static void pgrp_replace(proc_t p, struct pgrp *pgrp);
int fixjob_callback(proc_t, void *);
-uint64_t get_current_unique_pid(void);
-
-
uint64_t
get_current_unique_pid(void)
{
return(p);
}
-#if PROC_REF_DEBUG
void
uthread_reset_proc_refcount(void *uthread) {
uthread_t uth;
+ uth = (uthread_t) uthread;
+ uth->uu_proc_refcount = 0;
+
+#if PROC_REF_DEBUG
if (proc_ref_tracking_disabled) {
return;
}
- uth = (uthread_t) uthread;
-
- uth->uu_proc_refcount = 0;
uth->uu_pindex = 0;
+#endif
}
+#if PROC_REF_DEBUG
int
uthread_get_proc_refcount(void *uthread) {
uthread_t uth;
return uth->uu_proc_refcount;
}
+#endif
static void
-record_procref(proc_t p, int count) {
+record_procref(proc_t p __unused, int count) {
uthread_t uth;
+ uth = current_uthread();
+ uth->uu_proc_refcount += count;
+
+#if PROC_REF_DEBUG
if (proc_ref_tracking_disabled) {
return;
}
- uth = current_uthread();
- uth->uu_proc_refcount += count;
-
if (count == 1) {
if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
uth->uu_pindex++;
}
}
-}
#endif
+}
+
+static boolean_t
+uthread_needs_to_wait_in_proc_refwait(void) {
+ uthread_t uth = current_uthread();
+
+ /*
+ * Allow threads holding no proc refs to wait
+ * in proc_refwait, allowing threads holding
+ * proc refs to wait in proc_refwait causes
+ * deadlocks and makes proc_find non-reentrant.
+ */
+ if (uth->uu_proc_refcount == 0)
+ return TRUE;
+
+ return FALSE;
+}
int
proc_rele(proc_t p)
proc_ref_locked(proc_t p)
{
proc_t p1 = p;
+ int pid = proc_pid(p);
- /* if process still in creation return failure */
- if ((p == PROC_NULL) || ((p->p_listflag & P_LIST_INCREATE) != 0))
+retry:
+ /*
+ * if process still in creation or proc got recycled
+ * during msleep then return failure.
+ */
+ if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0))
return (PROC_NULL);
- /* do not return process marked for termination */
- if ((p->p_stat != SZOMB) && ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & (P_LIST_DRAINWAIT | P_LIST_DRAIN | P_LIST_DEAD)) == 0)) {
+
+ /*
+ * Do not return process marked for termination
+ * or proc_refdrain called without ref wait.
+ * Wait for proc_refdrain_with_refwait to complete if
+ * process in refdrain and refwait flag is set, unless
+ * the current thread is holding to a proc_ref
+ * for any proc.
+ */
+ if ((p->p_stat != SZOMB) &&
+ ((p->p_listflag & P_LIST_EXITED) == 0) &&
+ ((p->p_listflag & P_LIST_DEAD) == 0) &&
+ (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) ||
+ ((p->p_listflag & P_LIST_REFWAIT) != 0))) {
+ if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) {
+ msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0) ;
+ /*
+ * the proc might have been recycled since we dropped
+ * the proc list lock, get the proc again.
+ */
+ p = pfind_locked(pid);
+ goto retry;
+ }
p->p_refcount++;
-#if PROC_REF_DEBUG
record_procref(p, 1);
-#endif
}
else
p1 = PROC_NULL;
if (p->p_refcount > 0) {
p->p_refcount--;
-#if PROC_REF_DEBUG
record_procref(p, -1);
-#endif
if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) {
p->p_listflag &= ~P_LIST_DRAINWAIT;
wakeup(&p->p_refcount);
void
proc_refdrain(proc_t p)
{
+ proc_refdrain_with_refwait(p, FALSE);
+}
+proc_t
+proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait)
+{
+ boolean_t initexec = FALSE;
proc_list_lock();
p->p_listflag |= P_LIST_DRAIN;
- while (p->p_refcount) {
+ if (get_ref_and_allow_wait) {
+ /*
+ * All the calls to proc_ref_locked will wait
+ * for the flag to get cleared before returning a ref,
+ * unless the current thread is holding to a proc ref
+ * for any proc.
+ */
+ p->p_listflag |= P_LIST_REFWAIT;
+ if (p == initproc) {
+ initexec = TRUE;
+ }
+ }
+
+ /* Do not wait in ref drain for launchd exec */
+ while (p->p_refcount && !initexec) {
p->p_listflag |= P_LIST_DRAINWAIT;
msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ;
}
+
p->p_listflag &= ~P_LIST_DRAIN;
- p->p_listflag |= P_LIST_DEAD;
+ if (!get_ref_and_allow_wait) {
+ p->p_listflag |= P_LIST_DEAD;
+ } else {
+ /* Return a ref to the caller */
+ p->p_refcount++;
+ record_procref(p, 1);
+ }
proc_list_unlock();
+ if (get_ref_and_allow_wait) {
+ return (p);
+ }
+ return NULL;
+}
+void
+proc_refwake(proc_t p)
+{
+ proc_list_lock();
+ p->p_listflag &= ~P_LIST_REFWAIT;
+ wakeup(&p->p_listflag);
+ proc_list_unlock();
}
proc_t
return 0;
}
+
/* note that this function is generally going to be called from stackshot,
* and the arguments will be coming from a struct which is declared packed
* thus the input arguments will in general be unaligned. We have to handle
* that here. */
void
-proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec, uint64_t *abstime)
+proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
{
proc_t pp = (proc_t)p;
- struct uint64p {
- uint64_t val;
- } __attribute__((packed));
-
if (pp != PROC_NULL) {
if (tv_sec != NULL)
- ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec;
+ *tv_sec = pp->p_start.tv_sec;
if (tv_usec != NULL)
- ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec;
+ *tv_usec = pp->p_start.tv_usec;
if (abstime != NULL) {
if (pp->p_stats != NULL)
*abstime = pp->p_stats->ps_start;
return(retval? 1: 0);
}
+int
+proc_in_teardown(proc_t p)
+{
+ int retval = 0;
+
+ if (p)
+ retval = p->p_lflag & P_LPEXIT;
+ return(retval? 1: 0);
+
+}
+
int
proc_forcequota(proc_t p)
{
return(IS_64BIT_PROCESS(p));
}
+int
+proc_is64bit_data(proc_t p)
+{
+ assert(p->task);
+ return (int)task_get_64bit_data(p->task);
+}
+
int
proc_pidversion(proc_t p)
{
if (vnode_getwithref(tvp) == 0) {
return tvp;
}
- }
+ }
return NULLVP;
}
}
+#ifndef __arm__
int
IS_64BIT_PROCESS(proc_t p)
{
else
return(0);
}
+#endif
/*
* Locate a process by number
child->p_pptr = parent;
child->p_ppid = parent->p_pid;
child->p_puniqueid = parent->p_uniqueid;
+ child->p_xhighbits = 0;
pg = proc_pgrp(parent);
pgrp_add(pg, parent, child);
case CS_OPS_ENTITLEMENTS_BLOB:
case CS_OPS_IDENTITY:
case CS_OPS_BLOB:
+ case CS_OPS_TEAMID:
break; /* not restricted to root */
default:
if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
case CS_OPS_MARKRESTRICT:
case CS_OPS_SET_STATUS:
case CS_OPS_CLEARINSTALLER:
+ case CS_OPS_CLEARPLATFORM:
if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
goto out;
break;
proc_lock(pt);
retflags = pt->p_csflags;
- if (cs_enforcement(pt))
+ if (cs_process_enforcement(pt))
retflags |= CS_ENFORCEMENT;
if (csproc_get_platform_binary(pt))
retflags |= CS_PLATFORM_BINARY;
if (csproc_get_platform_path(pt))
retflags |= CS_PLATFORM_PATH;
+ //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV
+ if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) {
+ retflags &= (~CS_REQUIRE_LV);
+ }
proc_unlock(pt);
if (uaddr != USER_ADDR_NULL)
error = csops_copy_token(start, length, usize, uaddr);
break;
}
- case CS_OPS_IDENTITY: {
+ case CS_OPS_IDENTITY:
+ case CS_OPS_TEAMID: {
const char *identity;
uint8_t fakeheader[8];
uint32_t idlen;
break;
}
- identity = cs_identity_get(pt);
+ identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt);
proc_unlock(pt);
if (identity == NULL) {
error = ENOENT;
break;
}
-
+
length = strlen(identity) + 1; /* include NUL */
idlen = htonl(length + sizeof(fakeheader));
memcpy(&fakeheader[4], &idlen, sizeof(idlen));
case CS_OPS_CLEARINSTALLER:
proc_lock(pt);
- pt->p_csflags &= ~(CS_INSTALLER | CS_EXEC_SET_INSTALLER);
+ pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP);
+ proc_unlock(pt);
+ break;
+
+ case CS_OPS_CLEARPLATFORM:
+#if DEVELOPMENT || DEBUG
+ if (cs_process_global_enforcement()) {
+ error = ENOTSUP;
+ break;
+ }
+
+#if CONFIG_CSR
+ if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) {
+ error = ENOTSUP;
+ break;
+ }
+#endif
+
+ proc_lock(pt);
+ pt->p_csflags &= ~(CS_PLATFORM_BINARY|CS_PLATFORM_PATH);
+ csproc_clear_platform_binary(pt);
proc_unlock(pt);
break;
+#else
+ error = ENOTSUP;
+ break;
+#endif /* !DEVELOPMENT || DEBUG */
default:
error = EINVAL;
proc_iterate_fn_t filterfn,
void *filterarg)
{
- pid_t *pid_list;
+ pid_t *pid_list = NULL;
vm_size_t pid_list_size = 0;
vm_size_t pid_list_size_needed = 0;
int pid_count = 0;
for (;;) {
proc_list_lock();
- pid_count_available = nprocs;
+ pid_count_available = nprocs + 1 /* kernel_task not counted in nprocs */;
assert(pid_count_available > 0);
pid_list_size_needed = pid_count_available * sizeof(pid_t);
}
pid_list_size = pid_list_size_needed;
}
+ assert(pid_list != NULL);
/* filter pids into pid_list */
struct timeval last_no_space_action = {0, 0};
+#if DEVELOPMENT || DEBUG
+extern boolean_t kill_on_no_paging_space;
+#endif /* DEVELOPMENT || DEBUG */
+
+#define MB_SIZE (1024 * 1024ULL)
+boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
+
+extern int32_t max_kill_priority;
+extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index);
+
int
no_paging_space_action()
{
*/
last_no_space_action = now;
- printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
+ printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size/MB_SIZE));
psignal(p, SIGKILL);
proc_rele(p);
}
}
+ /*
+ * We have some processes within our jetsam bands of consideration and hence can be killed.
+ * So we will invoke the memorystatus thread to go ahead and kill something.
+ */
+ if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) {
+
+ last_no_space_action = now;
+ memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
+ return (1);
+ }
+
+ /*
+ * No eligible processes to kill. So let's suspend/kill the largest
+ * process depending on its policy control specifications.
+ */
+
if (nps.pcs_max_size > 0) {
if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
return retval;
}
-void *
-proc_get_uthread_uu_threadlist(void * uthread_v)
+boolean_t
+proc_send_synchronous_EXC_RESOURCE(proc_t p)
+{
+ if (p == PROC_NULL)
+ return FALSE;
+
+ /* Send sync EXC_RESOURCE if the process is traced */
+ if (ISSET(p->p_lflag, P_LTRACED)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+#ifdef CONFIG_32BIT_TELEMETRY
+void
+proc_log_32bit_telemetry(proc_t p)
{
- uthread_t uth = (uthread_t)uthread_v;
- return (uth != NULL) ? uth->uu_threadlist : NULL;
+ /* Gather info */
+ char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
+ char * signature_cur_end = &signature_buf[0];
+ char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
+ int bytes_printed = 0;
+
+ const char * teamid = NULL;
+ const char * identity = NULL;
+ struct cs_blob * csblob = NULL;
+
+ proc_list_lock();
+
+ /*
+ * Get proc name and parent proc name; if the parent execs, we'll get a
+ * garbled name.
+ */
+ bytes_printed = snprintf(signature_cur_end,
+ signature_buf_end - signature_cur_end,
+ "%s,%s,", p->p_name,
+ (p->p_pptr ? p->p_pptr->p_name : ""));
+
+ if (bytes_printed > 0) {
+ signature_cur_end += bytes_printed;
+ }
+
+ proc_list_unlock();
+
+ /* Get developer info. */
+ vnode_t v = proc_getexecutablevnode(p);
+
+ if (v) {
+ csblob = csvnode_get_blob(v, 0);
+
+ if (csblob) {
+ teamid = csblob_get_teamid(csblob);
+ identity = csblob_get_identity(csblob);
+ }
+ }
+
+ if (teamid == NULL) {
+ teamid = "";
+ }
+
+ if (identity == NULL) {
+ identity = "";
+ }
+
+ bytes_printed = snprintf(signature_cur_end,
+ signature_buf_end - signature_cur_end,
+ "%s,%s", teamid, identity);
+
+ if (bytes_printed > 0) {
+ signature_cur_end += bytes_printed;
+ }
+
+ if (v) {
+ vnode_put(v);
+ }
+
+ /*
+ * We may want to rate limit here, although the SUMMARIZE key should
+ * help us aggregate events in userspace.
+ */
+
+ /* Emit log */
+ kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
+ /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
+ /* 1 */ "com.apple.message.signature", signature_buf,
+ /* 2 */ "com.apple.message.summarize", "YES",
+ NULL);
}
+#endif /* CONFIG_32BIT_TELEMETRY */