/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/proc_info.h>
#include <sys/bsdtask_info.h>
#include <sys/persona.h>
+#include <sys/sysent.h>
+#include <sys/reason.h>
+#include <IOKit/IOBSD.h> /* IOTaskHasEntitlement() */
#ifdef CONFIG_32BIT_TELEMETRY
#include <sys/kasl.h>
extern int cs_debug;
+#if DEVELOPMENT || DEBUG
+int syscallfilter_disable = 0;
+#endif // DEVELOPMENT || DEBUG
+
#if DEBUG
#define __PROC_INTERNAL_DEBUG 1
#endif
static void orphanpg(struct pgrp * pg);
void proc_name_kdp(task_t t, char * buf, int size);
+boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid);
int proc_threadname_kdp(void * uth, char * buf, size_t size);
void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime);
char * proc_name_address(void * p);
return;
}
- if (count == 1) {
- if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
- backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH);
+ if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) {
+ backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex],
+ PROC_REF_STACK_DEPTH, NULL);
- uth->uu_proc_ps[uth->uu_pindex] = p;
- uth->uu_pindex++;
- }
+ uth->uu_proc_ps[uth->uu_pindex] = p;
+ uth->uu_pindex++;
}
#endif
}
return -1;
}
+int
+proc_original_ppid(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_original_ppid;
+ }
+ return -1;
+}
+
int
proc_selfpid(void)
{
return current_proc()->p_ppid;
}
-int
+uint64_t
proc_selfcsflags(void)
{
- return current_proc()->p_csflags;
+ return (uint64_t)current_proc()->p_csflags;
+}
+
+int
+proc_csflags(proc_t p, uint64_t *flags)
+{
+ if (p && flags) {
+ *flags = (uint64_t)p->p_csflags;
+ return 0;
+ }
+ return EINVAL;
+}
+
+uint32_t
+proc_platform(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_platform;
+ }
+ return (uint32_t)-1;
+}
+
+uint32_t
+proc_sdk(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_sdk;
+ }
+ return (uint32_t)-1;
}
#if CONFIG_DTRACE
{
proc_t p;
+ if (size <= 0) {
+ return;
+ }
+
+ bzero(buf, size);
+
if ((p = proc_find(pid)) != PROC_NULL) {
strlcpy(buf, &p->p_comm[0], size);
proc_rele(p);
}
}
+boolean_t
+proc_binary_uuid_kdp(task_t task, uuid_t uuid)
+{
+ proc_t p = get_bsdtask_info(task);
+ if (p == PROC_NULL) {
+ return FALSE;
+ }
+
+ proc_getexecutableuuid(p, uuid, sizeof(uuid_t));
+
+ return TRUE;
+}
+
int
proc_threadname_kdp(void * uth, char * buf, size_t size)
{
return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
}
+int
+proc_exitstatus(proc_t p)
+{
+ return p->p_xstat & 0xffff;
+}
+
void
proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
{
return NULLVP;
}
+int
+proc_gettty(proc_t p, vnode_t *vp)
+{
+ if (!p || !vp) {
+ return EINVAL;
+ }
+
+ struct session *procsp = proc_session(p);
+ int err = EINVAL;
+
+ if (procsp != SESSION_NULL) {
+ session_lock(procsp);
+ vnode_t ttyvp = procsp->s_ttyvp;
+ int ttyvid = procsp->s_ttyvid;
+ session_unlock(procsp);
+
+ if (ttyvp) {
+ if (vnode_getwithvid(ttyvp, ttyvid) == 0) {
+ *vp = ttyvp;
+ err = 0;
+ }
+ } else {
+ err = ENOENT;
+ }
+
+ session_rele(procsp);
+ }
+
+ return err;
+}
+
+int
+proc_gettty_dev(proc_t p, dev_t *dev)
+{
+ struct session *procsp = proc_session(p);
+ boolean_t has_tty = FALSE;
+
+ if (procsp != SESSION_NULL) {
+ session_lock(procsp);
+
+ struct tty * tp = SESSION_TP(procsp);
+ if (tp != TTY_NULL) {
+ *dev = tp->t_dev;
+ has_tty = TRUE;
+ }
+
+ session_unlock(procsp);
+ session_rele(procsp);
+ }
+
+ if (has_tty) {
+ return 0;
+ } else {
+ return EINVAL;
+ }
+}
+
+int
+proc_selfexecutableargs(uint8_t *buf, size_t *buflen)
+{
+ proc_t p = current_proc();
+
+ // buflen must always be provided
+ if (buflen == NULL) {
+ return EINVAL;
+ }
+
+ // If a buf is provided, there must be at least enough room to fit argc
+ if (buf && *buflen < sizeof(p->p_argc)) {
+ return EINVAL;
+ }
+
+ if (!p->user_stack) {
+ return EINVAL;
+ }
+
+ if (buf == NULL) {
+ *buflen = p->p_argslen + sizeof(p->p_argc);
+ return 0;
+ }
+
+ // Copy in argc to the first 4 bytes
+ memcpy(buf, &p->p_argc, sizeof(p->p_argc));
+
+ if (*buflen > sizeof(p->p_argc) && p->p_argslen > 0) {
+ // See memory layout comment in kern_exec.c:exec_copyout_strings()
+ // We want to copy starting from `p_argslen` bytes away from top of stack
+ return copyin(p->user_stack - p->p_argslen,
+ buf + sizeof(p->p_argc),
+ MIN(p->p_argslen, *buflen - sizeof(p->p_argc)));
+ } else {
+ return 0;
+ }
+}
+
+off_t
+proc_getexecutableoffset(proc_t p)
+{
+ return p->p_textoff;
+}
void
bsd_set_dependency_capable(task_t task)
TAILQ_INIT(&child->p_evlist);
child->p_pptr = parent;
child->p_ppid = parent->p_pid;
+ child->p_original_ppid = parent->p_pid;
child->p_puniqueid = parent->p_uniqueid;
child->p_xhighbits = 0;
proc_childrenwalk(p, fixjob_callback, &fjarg);
}
+/*
+ * The pidlist_* routines support the functions in this file that
+ * walk lists of processes applying filters and callouts to the
+ * elements of the list.
+ *
+ * A prior implementation used a single linear array, which can be
+ * tricky to allocate on large systems. This implementation creates
+ * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
+ *
+ * The array should be sized large enough to keep the overhead of
+ * walking the list low, but small enough that blocking allocations of
+ * pidlist_entry_t structures always succeed.
+ */
+
+#define PIDS_PER_ENTRY 1021
+
+typedef struct pidlist_entry {
+ SLIST_ENTRY(pidlist_entry) pe_link;
+ u_int pe_nused;
+ pid_t pe_pid[PIDS_PER_ENTRY];
+} pidlist_entry_t;
+
+typedef struct {
+ SLIST_HEAD(, pidlist_entry) pl_head;
+ struct pidlist_entry *pl_active;
+ u_int pl_nalloc;
+} pidlist_t;
+
+static __inline__ pidlist_t *
+pidlist_init(pidlist_t *pl)
+{
+ SLIST_INIT(&pl->pl_head);
+ pl->pl_active = NULL;
+ pl->pl_nalloc = 0;
+ return pl;
+}
+
+static u_int
+pidlist_alloc(pidlist_t *pl, u_int needed)
+{
+ while (pl->pl_nalloc < needed) {
+ pidlist_entry_t *pe = kalloc(sizeof(*pe));
+ if (NULL == pe) {
+ panic("no space for pidlist entry");
+ }
+ pe->pe_nused = 0;
+ SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link);
+ pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0]));
+ }
+ return pl->pl_nalloc;
+}
+
+static void
+pidlist_free(pidlist_t *pl)
+{
+ pidlist_entry_t *pe;
+ while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) {
+ SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link);
+ kfree(pe, sizeof(*pe));
+ }
+ pl->pl_nalloc = 0;
+}
+
+static __inline__ void
+pidlist_set_active(pidlist_t *pl)
+{
+ pl->pl_active = SLIST_FIRST(&pl->pl_head);
+ assert(pl->pl_active);
+}
+
+static void
+pidlist_add_pid(pidlist_t *pl, pid_t pid)
+{
+ pidlist_entry_t *pe = pl->pl_active;
+ if (pe->pe_nused >= sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])) {
+ if (NULL == (pe = SLIST_NEXT(pe, pe_link))) {
+ panic("pidlist allocation exhausted");
+ }
+ pl->pl_active = pe;
+ }
+ pe->pe_pid[pe->pe_nused++] = pid;
+}
+
+static __inline__ u_int
+pidlist_nalloc(const pidlist_t *pl)
+{
+ return pl->pl_nalloc;
+}
+
/*
* A process group has become orphaned; if there are any stopped processes in
* the group, hang-up all process in that group.
static void
orphanpg(struct pgrp *pgrp)
{
- pid_t *pid_list;
+ pidlist_t pid_list, *pl = pidlist_init(&pid_list);
+ u_int pid_count_available = 0;
proc_t p;
- vm_size_t pid_list_size = 0;
- vm_size_t pid_list_size_needed = 0;
- int pid_count = 0;
- int pid_count_available = 0;
-
- assert(pgrp != NULL);
/* allocate outside of the pgrp_lock */
for (;;) {
PGMEMBERS_FOREACH(pgrp, p) {
pid_count_available++;
-
if (p->p_stat == SSTOP) {
should_iterate = TRUE;
}
}
-
if (pid_count_available == 0 || !should_iterate) {
pgrp_unlock(pgrp);
- return;
+ goto out; /* no orphaned processes OR nothing stopped */
}
-
- pid_list_size_needed = pid_count_available * sizeof(pid_t);
- if (pid_list_size >= pid_list_size_needed) {
+ if (pidlist_nalloc(pl) >= pid_count_available) {
break;
}
pgrp_unlock(pgrp);
- if (pid_list_size != 0) {
- kfree(pid_list, pid_list_size);
- }
- pid_list = kalloc(pid_list_size_needed);
- if (!pid_list) {
- return;
- }
- pid_list_size = pid_list_size_needed;
- }
-
- /* no orphaned processes */
- if (pid_list_size == 0) {
- pgrp_unlock(pgrp);
- return;
+ pidlist_alloc(pl, pid_count_available);
}
+ pidlist_set_active(pl);
+ u_int pid_count = 0;
PGMEMBERS_FOREACH(pgrp, p) {
- pid_list[pid_count++] = proc_pid(p);
- if (pid_count >= pid_count_available) {
+ pidlist_add_pid(pl, proc_pid(p));
+ if (++pid_count >= pid_count_available) {
break;
}
}
pgrp_unlock(pgrp);
- if (pid_count == 0) {
- goto out;
- }
-
- for (int i = 0; i < pid_count; i++) {
- /* do not handle kernproc */
- if (pid_list[i] == 0) {
- continue;
- }
- p = proc_find(pid_list[i]);
- if (!p) {
- continue;
+ const pidlist_entry_t *pe;
+ SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
+ for (u_int i = 0; i < pe->pe_nused; i++) {
+ const pid_t pid = pe->pe_pid[i];
+ if (0 == pid) {
+ continue; /* skip kernproc */
+ }
+ p = proc_find(pid);
+ if (!p) {
+ continue;
+ }
+ proc_transwait(p, 0);
+ pt_setrunnable(p);
+ psignal(p, SIGHUP);
+ psignal(p, SIGCONT);
+ proc_rele(p);
}
-
- proc_transwait(p, 0);
- pt_setrunnable(p);
- psignal(p, SIGHUP);
- psignal(p, SIGCONT);
- proc_rele(p);
}
-
out:
- kfree(pid_list, pid_list_size);
- return;
+ pidlist_free(pl);
}
int
case CS_OPS_IDENTITY:
case CS_OPS_BLOB:
case CS_OPS_TEAMID:
+ case CS_OPS_CLEAR_LV:
break; /* not restricted to root */
default:
if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) {
case CS_OPS_SET_STATUS:
case CS_OPS_CLEARINSTALLER:
case CS_OPS_CLEARPLATFORM:
+ case CS_OPS_CLEAR_LV:
if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) {
goto out;
}
break;
}
+ case CS_OPS_CLEAR_LV: {
+ /*
+ * This option is used to remove library validation from
+ * a running process. This is used in plugin architectures
+ * when a program needs to load untrusted libraries. This
+ * allows the process to maintain library validation as
+ * long as possible, then drop it only when required.
+ * Once a process has loaded the untrusted library,
+ * relying on library validation in the future will
+ * not be effective. An alternative is to re-exec
+ * your application without library validation, or
+ * fork an untrusted child.
+ */
+#ifdef CONFIG_EMBEDDED
+ // On embedded platforms, we don't support dropping LV
+ error = ENOTSUP;
+#else
+ /*
+ * if we have the flag set, and the caller wants
+ * to remove it, and they're entitled to, then
+ * we remove it from the csflags
+ *
+ * NOTE: We are fine to poke into the task because
+ * we get a ref to pt when we do the proc_find
+ * at the beginning of this function.
+ *
+ * We also only allow altering ourselves.
+ */
+ if (forself == 1 && IOTaskHasEntitlement(pt->task, CLEAR_LV_ENTITLEMENT)) {
+ proc_lock(pt);
+ pt->p_csflags &= (~(CS_REQUIRE_LV | CS_FORCED_LV));
+ proc_unlock(pt);
+ error = 0;
+ } else {
+ error = EPERM;
+ }
+#endif
+ break;
+ }
case CS_OPS_BLOB: {
void *start;
size_t length;
return error;
}
-int
+void
proc_iterate(
unsigned int flags,
proc_iterate_fn_t callout,
proc_iterate_fn_t filterfn,
void *filterarg)
{
- pid_t *pid_list = NULL;
- vm_size_t pid_list_size = 0;
- vm_size_t pid_list_size_needed = 0;
- int pid_count = 0;
- int pid_count_available = 0;
+ pidlist_t pid_list, *pl = pidlist_init(&pid_list);
+ u_int pid_count_available = 0;
assert(callout != NULL);
/* allocate outside of the proc_list_lock */
for (;;) {
proc_list_lock();
-
- pid_count_available = nprocs + 1 /* kernel_task not counted in nprocs */;
+ pid_count_available = nprocs + 1; /* kernel_task not counted in nprocs */
assert(pid_count_available > 0);
-
- pid_list_size_needed = pid_count_available * sizeof(pid_t);
- if (pid_list_size >= pid_list_size_needed) {
+ if (pidlist_nalloc(pl) > pid_count_available) {
break;
}
proc_list_unlock();
- if (pid_list_size != 0) {
- kfree(pid_list, pid_list_size);
- }
- pid_list = kalloc(pid_list_size_needed);
- if (!pid_list) {
- return 1;
- }
- pid_list_size = pid_list_size_needed;
+ pidlist_alloc(pl, pid_count_available);
}
- assert(pid_list != NULL);
+ pidlist_set_active(pl);
- /* filter pids into pid_list */
+ /* filter pids into the pid_list */
+ u_int pid_count = 0;
if (flags & PROC_ALLPROCLIST) {
proc_t p;
ALLPROC_FOREACH(p) {
if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
continue;
}
-
- pid_list[pid_count++] = proc_pid(p);
- if (pid_count >= pid_count_available) {
+ pidlist_add_pid(pl, proc_pid(p));
+ if (++pid_count >= pid_count_available) {
break;
}
}
if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
continue;
}
-
- pid_list[pid_count++] = proc_pid(p);
- if (pid_count >= pid_count_available) {
+ pidlist_add_pid(pl, proc_pid(p));
+ if (++pid_count >= pid_count_available) {
break;
}
}
/* call callout on processes in the pid_list */
- for (int i = 0; i < pid_count; i++) {
- proc_t p = proc_find(pid_list[i]);
- if (p) {
- if ((flags & PROC_NOWAITTRANS) == 0) {
- proc_transwait(p, 0);
- }
- int callout_ret = callout(p, arg);
-
- switch (callout_ret) {
- case PROC_RETURNED_DONE:
- proc_rele(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED_DONE:
- goto out;
-
- case PROC_RETURNED:
- proc_rele(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED:
- break;
-
- default:
- panic("proc_iterate: callout returned %d for pid %d",
- callout_ret, pid_list[i]);
- break;
- }
- } else if (flags & PROC_ZOMBPROCLIST) {
- p = proc_find_zombref(pid_list[i]);
- if (!p) {
- continue;
- }
- int callout_ret = callout(p, arg);
-
- switch (callout_ret) {
- case PROC_RETURNED_DONE:
- proc_drop_zombref(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED_DONE:
- goto out;
-
- case PROC_RETURNED:
- proc_drop_zombref(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED:
- break;
-
- default:
- panic("proc_iterate: callout returned %d for zombie pid %d",
- callout_ret, pid_list[i]);
- break;
+ const pidlist_entry_t *pe;
+ SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
+ for (u_int i = 0; i < pe->pe_nused; i++) {
+ const pid_t pid = pe->pe_pid[i];
+ proc_t p = proc_find(pid);
+ if (p) {
+ if ((flags & PROC_NOWAITTRANS) == 0) {
+ proc_transwait(p, 0);
+ }
+ const int callout_ret = callout(p, arg);
+
+ switch (callout_ret) {
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
+
+ case PROC_RETURNED:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+ default:
+ panic("%s: callout =%d for pid %d",
+ __func__, callout_ret, pid);
+ break;
+ }
+ } else if (flags & PROC_ZOMBPROCLIST) {
+ p = proc_find_zombref(pid);
+ if (!p) {
+ continue;
+ }
+ const int callout_ret = callout(p, arg);
+
+ switch (callout_ret) {
+ case PROC_RETURNED_DONE:
+ proc_drop_zombref(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
+
+ case PROC_RETURNED:
+ proc_drop_zombref(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+ default:
+ panic("%s: callout =%d for zombie %d",
+ __func__, callout_ret, pid);
+ break;
+ }
}
}
}
-
out:
- kfree(pid_list, pid_list_size);
- return 0;
+ pidlist_free(pl);
}
void
proc_list_unlock();
}
-int
+void
proc_childrenwalk(
proc_t parent,
proc_iterate_fn_t callout,
void *arg)
{
- pid_t *pid_list;
- vm_size_t pid_list_size = 0;
- vm_size_t pid_list_size_needed = 0;
- int pid_count = 0;
- int pid_count_available = 0;
+ pidlist_t pid_list, *pl = pidlist_init(&pid_list);
+ u_int pid_count_available = 0;
assert(parent != NULL);
assert(callout != NULL);
for (;;) {
proc_list_lock();
-
pid_count_available = parent->p_childrencnt;
if (pid_count_available == 0) {
proc_list_unlock();
- return 0;
+ goto out;
}
-
- pid_list_size_needed = pid_count_available * sizeof(pid_t);
- if (pid_list_size >= pid_list_size_needed) {
+ if (pidlist_nalloc(pl) > pid_count_available) {
break;
}
proc_list_unlock();
- if (pid_list_size != 0) {
- kfree(pid_list, pid_list_size);
- }
- pid_list = kalloc(pid_list_size_needed);
- if (!pid_list) {
- return 1;
- }
- pid_list_size = pid_list_size_needed;
+ pidlist_alloc(pl, pid_count_available);
}
+ pidlist_set_active(pl);
+ u_int pid_count = 0;
proc_t p;
PCHILDREN_FOREACH(parent, p) {
if (p->p_stat == SIDL) {
continue;
}
-
- pid_list[pid_count++] = proc_pid(p);
- if (pid_count >= pid_count_available) {
+ pidlist_add_pid(pl, proc_pid(p));
+ if (++pid_count >= pid_count_available) {
break;
}
}
proc_list_unlock();
- for (int i = 0; i < pid_count; i++) {
- p = proc_find(pid_list[i]);
- if (!p) {
- continue;
- }
-
- int callout_ret = callout(p, arg);
+ const pidlist_entry_t *pe;
+ SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
+ for (u_int i = 0; i < pe->pe_nused; i++) {
+ const pid_t pid = pe->pe_pid[i];
+ p = proc_find(pid);
+ if (!p) {
+ continue;
+ }
+ const int callout_ret = callout(p, arg);
- switch (callout_ret) {
- case PROC_RETURNED_DONE:
- proc_rele(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED_DONE:
- goto out;
+ switch (callout_ret) {
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
- case PROC_RETURNED:
- proc_rele(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED:
- break;
- default:
- panic("proc_childrenwalk: callout returned %d for pid %d",
- callout_ret, pid_list[i]);
- break;
+ case PROC_RETURNED:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+ default:
+ panic("%s: callout =%d for pid %d",
+ __func__, callout_ret, pid);
+ break;
+ }
}
}
-
out:
- kfree(pid_list, pid_list_size);
- return 0;
+ pidlist_free(pl);
}
-int
+void
pgrp_iterate(
struct pgrp *pgrp,
unsigned int flags,
proc_iterate_fn_t filterfn,
void * filterarg)
{
- pid_t *pid_list;
- proc_t p;
- vm_size_t pid_list_size = 0;
- vm_size_t pid_list_size_needed = 0;
- int pid_count = 0;
- int pid_count_available = 0;
-
- pid_t pgid;
+ pidlist_t pid_list, *pl = pidlist_init(&pid_list);
+ u_int pid_count_available = 0;
assert(pgrp != NULL);
assert(callout != NULL);
for (;;) {
pgrp_lock(pgrp);
-
pid_count_available = pgrp->pg_membercnt;
if (pid_count_available == 0) {
pgrp_unlock(pgrp);
- return 0;
+ if (flags & PGRP_DROPREF) {
+ pg_rele(pgrp);
+ }
+ goto out;
}
-
- pid_list_size_needed = pid_count_available * sizeof(pid_t);
- if (pid_list_size >= pid_list_size_needed) {
+ if (pidlist_nalloc(pl) > pid_count_available) {
break;
}
pgrp_unlock(pgrp);
- if (pid_list_size != 0) {
- kfree(pid_list, pid_list_size);
- }
- pid_list = kalloc(pid_list_size_needed);
- if (!pid_list) {
- return 1;
- }
- pid_list_size = pid_list_size_needed;
+ pidlist_alloc(pl, pid_count_available);
}
+ pidlist_set_active(pl);
- pgid = pgrp->pg_id;
-
+ const pid_t pgid = pgrp->pg_id;
+ u_int pid_count = 0;
+ proc_t p;
PGMEMBERS_FOREACH(pgrp, p) {
if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
continue;;
}
- pid_list[pid_count++] = proc_pid(p);
- if (pid_count >= pid_count_available) {
+ pidlist_add_pid(pl, proc_pid(p));
+ if (++pid_count >= pid_count_available) {
break;
}
}
pg_rele(pgrp);
}
- for (int i = 0; i < pid_count; i++) {
- /* do not handle kernproc */
- if (pid_list[i] == 0) {
- continue;
- }
- p = proc_find(pid_list[i]);
- if (!p) {
- continue;
- }
- if (p->p_pgrpid != pgid) {
- proc_rele(p);
- continue;
- }
-
- int callout_ret = callout(p, arg);
-
- switch (callout_ret) {
- case PROC_RETURNED:
- proc_rele(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED:
- break;
+ const pidlist_entry_t *pe;
+ SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
+ for (u_int i = 0; i < pe->pe_nused; i++) {
+ const pid_t pid = pe->pe_pid[i];
+ if (0 == pid) {
+ continue; /* skip kernproc */
+ }
+ p = proc_find(pid);
+ if (!p) {
+ continue;
+ }
+ if (p->p_pgrpid != pgid) {
+ proc_rele(p);
+ continue;
+ }
+ const int callout_ret = callout(p, arg);
- case PROC_RETURNED_DONE:
- proc_rele(p);
- /* FALLTHROUGH */
- case PROC_CLAIMED_DONE:
- goto out;
+ switch (callout_ret) {
+ case PROC_RETURNED:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
- default:
- panic("pgrp_iterate: callout returned %d for pid %d",
- callout_ret, pid_list[i]);
+ default:
+ panic("%s: callout =%d for pid %d",
+ __func__, callout_ret, pid);
+ }
}
}
out:
- kfree(pid_list, pid_list_size);
- return 0;
+ pidlist_free(pl);
}
static void
*/
proc_klist_lock();
while ((kn = SLIST_FIRST(&p->p_klist))) {
- kn->kn_ptr.p_proc = PROC_NULL;
+ kn->kn_proc = PROC_NULL;
KNOTE_DETACH(&p->p_klist, kn);
}
proc_klist_unlock();
return p->p_pgrpid;
}
+pid_t
+proc_sessionid(proc_t p)
+{
+ pid_t sid = -1;
+ struct session * sessp = proc_session(p);
+
+ if (sessp != SESSION_NULL) {
+ sid = sessp->s_sid;
+ session_rele(sessp);
+ }
+
+ return sid;
+}
+
pid_t
proc_selfpgrpid()
{
proc_dopcontrol(proc_t p)
{
int pcontrol;
+ os_reason_t kill_reason;
proc_lock(p);
PROC_SETACTION_STATE(p);
proc_unlock(p);
printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
- psignal(p, SIGKILL);
+ kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
+ psignal_with_reason(p, SIGKILL, kill_reason);
break;
default:
extern uint64_t vm_compressor_pages_compressed(void);
-struct timeval last_no_space_action = {0, 0};
+struct timeval last_no_space_action = {.tv_sec = 0, .tv_usec = 0};
#if DEVELOPMENT || DEBUG
extern boolean_t kill_on_no_paging_space;
proc_t p;
struct no_paging_space nps;
struct timeval now;
+ os_reason_t kill_reason;
/*
* Throttle how often we come through here. Once every 5 seconds should be plenty.
last_no_space_action = now;
printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size / MB_SIZE));
- psignal(p, SIGKILL);
+ kill_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_LOWSWAP);
+ psignal_with_reason(p, SIGKILL, kill_reason);
proc_rele(p);
return FALSE;
}
+size_t
+proc_get_syscall_filter_mask_size(int which)
+{
+ if (which == SYSCALL_MASK_UNIX) {
+ return nsysent;
+ }
+
+ return 0;
+}
+
+int
+proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t masklen)
+{
+#if DEVELOPMENT || DEBUG
+ if (syscallfilter_disable) {
+ printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p));
+ return KERN_SUCCESS;
+ }
+#endif // DEVELOPMENT || DEBUG
+
+ if (which != SYSCALL_MASK_UNIX ||
+ (maskptr != NULL && masklen != nsysent)) {
+ return EINVAL;
+ }
+
+ p->syscall_filter_mask = maskptr;
+
+ return KERN_SUCCESS;
+}
+
+bool
+proc_is_traced(proc_t p)
+{
+ bool ret = FALSE;
+ assert(p != PROC_NULL);
+ proc_lock(p);
+ if (p->p_lflag & P_LTRACED) {
+ ret = TRUE;
+ }
+ proc_unlock(p);
+ return ret;
+}
+
#ifdef CONFIG_32BIT_TELEMETRY
void
proc_log_32bit_telemetry(proc_t p)
* Get proc name and parent proc name; if the parent execs, we'll get a
* garbled name.
*/
- bytes_printed = snprintf(signature_cur_end,
+ bytes_printed = scnprintf(signature_cur_end,
signature_buf_end - signature_cur_end,
"%s,%s,", p->p_name,
(p->p_pptr ? p->p_pptr->p_name : ""));
identity = "";
}
- bytes_printed = snprintf(signature_cur_end,
+ bytes_printed = scnprintf(signature_cur_end,
signature_buf_end - signature_cur_end,
"%s,%s", teamid, identity);