/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/malloc.h>
#include <sys/proc_internal.h>
#include <sys/kauth.h>
-#include <machine/spl.h>
-
#include <sys/mount_internal.h>
#include <sys/sysproto.h>
#include <mach/mach_vm.h>
#include <mach/thread_act.h> /* for thread_policy_set( ) */
#include <kern/thread.h>
+#include <kern/policy_internal.h>
#include <kern/task.h>
#include <kern/clock.h> /* for absolutetime_to_microtime() */
#include <netinet/in.h> /* for TRAFFIC_MGT_SO_* */
#include <sys/socketvar.h> /* for struct socket */
+#if NECP
+#include <net/necp.h>
+#endif /* NECP */
#include <vm/vm_map.h>
#include <kern/assert.h>
#include <sys/resource.h>
+#include <sys/priv.h>
+#include <IOKit/IOBSD.h>
+
+#if CONFIG_MACF
+#include <security/mac_framework.h>
+#endif
int donice(struct proc *curp, struct proc *chgp, int n);
int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
int uthread_get_background_state(uthread_t);
static void do_background_socket(struct proc *p, thread_t thread);
-static int do_background_thread(struct proc *curp, thread_t thread, int priority);
+static int do_background_thread(thread_t thread, int priority);
static int do_background_proc(struct proc *curp, struct proc *targetp, int priority);
static int set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority);
static int proc_set_darwin_role(proc_t curp, proc_t targetp, int priority);
static int proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority);
static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority);
-void proc_apply_task_networkbg_internal(proc_t, thread_t);
-void proc_restore_task_networkbg_internal(proc_t, thread_t);
int proc_pid_rusage(int pid, int flavor, user_addr_t buf, int32_t *retval);
void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor);
int fill_task_rusage(task_t task, rusage_info_current *ri);
void fill_task_billed_usage(task_t task, rusage_info_current *ri);
int fill_task_io_rusage(task_t task, rusage_info_current *ri);
int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
-static void rusage_info_conversion(rusage_info_t ri_info, rusage_info_current *ri_current, int flavor);
+uint64_t get_task_logical_writes(task_t task);
+void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
int proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie);
}
/* No need for iteration as it is a simple scan */
pgrp_lock(pg);
- for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
+ PGMEMBERS_FOREACH(pg, p) {
if (p->p_nice < low)
low = p->p_nice;
}
if (uap->who != 0)
return (EINVAL);
- low = proc_get_task_policy(current_task(), current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG);
+ low = proc_get_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG);
break;
if (uap->who != 0)
return (EINVAL);
- error = do_background_thread(curp, current_thread(), uap->prio);
+ error = do_background_thread(current_thread(), uap->prio);
found++;
break;
}
proc_set_darwin_role(proc_t curp, proc_t targetp, int priority)
{
int error = 0;
- uint32_t flagsp;
+ uint32_t flagsp = 0;
kauth_cred_t ucred, target_cred;
if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
- error = EPERM;
- goto out;
+ if (priv_check_cred(ucred, PRIV_SETPRIORITY_DARWIN_ROLE, 0) != 0) {
+ error = EPERM;
+ goto out;
+ }
}
if (curp != targetp) {
integer_t role = 0;
- switch (priority) {
- case PRIO_DARWIN_ROLE_DEFAULT:
- role = TASK_UNSPECIFIED;
- break;
- case PRIO_DARWIN_ROLE_UI_FOCAL:
- role = TASK_FOREGROUND_APPLICATION;
- break;
- case PRIO_DARWIN_ROLE_UI:
- role = TASK_BACKGROUND_APPLICATION;
- break;
- case PRIO_DARWIN_ROLE_NON_UI:
- role = TASK_NONUI_APPLICATION;
- break;
- default:
- error = EINVAL;
- goto out;
- }
+ if ((error = proc_darwin_role_to_task_role(priority, &role)))
+ goto out;
- proc_set_task_policy(proc_task(targetp), THREAD_NULL,
- TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role);
+ proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE,
+ TASK_POLICY_ROLE, role);
out:
kauth_cred_unref(&target_cred);
#endif
}
- role = proc_get_task_policy(proc_task(targetp), THREAD_NULL,
- TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
+ role = proc_get_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
- switch (role) {
- case TASK_FOREGROUND_APPLICATION:
- *priority = PRIO_DARWIN_ROLE_UI_FOCAL;
- break;
- case TASK_BACKGROUND_APPLICATION:
- *priority = PRIO_DARWIN_ROLE_UI;
- break;
- case TASK_NONUI_APPLICATION:
- *priority = PRIO_DARWIN_ROLE_NON_UI;
- break;
- case TASK_UNSPECIFIED:
- default:
- *priority = PRIO_DARWIN_ROLE_DEFAULT;
- break;
- }
+ *priority = proc_task_role_to_darwin_role(role);
out:
kauth_cred_unref(&target_cred);
external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
- *priority = proc_get_task_policy(current_task(), THREAD_NULL, external, TASK_POLICY_DARWIN_BG);
+ *priority = proc_get_task_policy(current_task(), external, TASK_POLICY_DARWIN_BG);
out:
kauth_cred_unref(&target_cred);
break;
}
- proc_set_task_policy(proc_task(targetp), THREAD_NULL, external, TASK_POLICY_DARWIN_BG, enable);
+ proc_set_task_policy(proc_task(targetp), external, TASK_POLICY_DARWIN_BG, enable);
out:
kauth_cred_unref(&target_cred);
fdp = p->p_fd;
for (i = 0; i < fdp->fd_nfiles; i++) {
- struct socket *sockp;
-
fp = fdp->fd_ofiles[i];
- if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 ||
- FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) {
+ if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
continue;
}
- sockp = (struct socket *)fp->f_fglob->fg_data;
- socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
- sockp->so_background_thread = NULL;
+ if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) {
+ struct socket *sockp = (struct socket *)fp->f_fglob->fg_data;
+ socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
+ sockp->so_background_thread = NULL;
+ }
+#if NECP
+ else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) {
+ necp_set_client_as_background(p, fp, background);
+ }
+#endif /* NECP */
}
}
} else {
struct socket *sockp;
fp = fdp->fd_ofiles[ i ];
- if ( fp == NULL || (fdp->fd_ofileflags[ i ] & UF_RESERVED) != 0 ||
- FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET ) {
+ if (fp == NULL || (fdp->fd_ofileflags[ i ] & UF_RESERVED) != 0) {
continue;
}
- sockp = (struct socket *)fp->f_fglob->fg_data;
- /* skip if only clearing this thread's sockets */
- if ((thread) && (sockp->so_background_thread != thread)) {
- continue;
+ if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) {
+ sockp = (struct socket *)fp->f_fglob->fg_data;
+ /* skip if only clearing this thread's sockets */
+ if ((thread) && (sockp->so_background_thread != thread)) {
+ continue;
+ }
+ socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
+ sockp->so_background_thread = NULL;
+ }
+#if NECP
+ else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) {
+ necp_set_client_as_background(p, fp, background);
}
- socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
- sockp->so_background_thread = NULL;
+#endif /* NECP */
}
}
/*
* do_background_thread
+ *
+ * Requires: thread reference
+ *
* Returns: 0 Success
* EPERM Tried to background while in vfork
* XXX - todo - does this need a MACF hook?
*/
static int
-do_background_thread(struct proc *curp, thread_t thread, int priority)
+do_background_thread(thread_t thread, int priority)
{
struct uthread *ut;
int enable, external;
if ((ut->uu_flag & UT_VFORK) != 0)
return(EPERM);
+ /* Backgrounding is unsupported for workq threads */
if (thread_is_static_param(thread)) {
return(EPERM);
}
enable = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE;
external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
- proc_set_task_policy_thread(curp->task, thread_tid(thread), external,
- TASK_POLICY_DARWIN_BG, enable);
+ proc_set_thread_policy(thread, external, TASK_POLICY_DARWIN_BG, enable);
return rv;
}
int
getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
{
- struct rlimit lim;
+ struct rlimit lim = {};
/*
* Take out flag now in case we need to use it to trigger variant
getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval)
{
struct rusage *rup, rubuf;
- struct user64_rusage rubuf64;
- struct user32_rusage rubuf32;
+ struct user64_rusage rubuf64 = {};
+ struct user32_rusage rubuf32 = {};
size_t retsize = sizeof(rubuf); /* default: 32 bits */
caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */
struct timeval utime;
return(0);
}
+static int
+iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
+static int
+iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
+static int
+iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
+
/*
* iopolicysys
*
* EINVAL Invalid command or invalid policy arguments
*
*/
-
-static int
-iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
-static int
-iopolicysys_vfs(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
-
int
iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval)
{
goto out;
break;
case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY:
- error = iopolicysys_vfs(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
+ error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
+ if (error)
+ goto out;
+ break;
+ case IOPOL_TYPE_VFS_ATIME_UPDATES:
+ error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
if (error)
goto out;
break;
break;
case IOPOL_SCOPE_DARWIN_BG:
+#if CONFIG_EMBEDDED
+ /* Embedded doesn't want this as BG is always IOPOL_THROTTLE */
+ error = ENOTSUP;
+ goto out;
+#else /* CONFIG_EMBEDDED */
thread = THREAD_NULL;
policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL;
break;
+#endif /* CONFIG_EMBEDDED */
default:
error = EINVAL;
/* Perform command */
switch(cmd) {
case IOPOL_CMD_SET:
- proc_set_task_policy(current_task(), thread,
- TASK_POLICY_INTERNAL, policy_flavor,
- policy);
+ if (thread != THREAD_NULL)
+ proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy);
+ else
+ proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy);
break;
case IOPOL_CMD_GET:
- policy = proc_get_task_policy(current_task(), thread,
- TASK_POLICY_INTERNAL, policy_flavor);
-
+ if (thread != THREAD_NULL)
+ policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor);
+ else
+ policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor);
iop_param->iop_policy = policy;
break;
default:
}
static int
-iopolicysys_vfs(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
+iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
{
int error = 0;
switch(cmd) {
case IOPOL_CMD_SET:
if (0 == kauth_cred_issuser(kauth_cred_get())) {
- error = EPERM;
- goto out;
+ /* If it's a non-root process, it needs to have the entitlement to set the policy */
+ boolean_t entitled = FALSE;
+ entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.iopol.case_sensitivity");
+ if (!entitled) {
+ error = EPERM;
+ goto out;
+ }
}
switch (policy) {
return (error);
}
-/* BSD call back function for task_policy */
-void proc_apply_task_networkbg(void * bsd_info, thread_t thread);
+static inline int
+get_thread_atime_policy(struct uthread *ut)
+{
+ return (ut->uu_flag & UT_ATIME_UPDATE)? IOPOL_ATIME_UPDATES_OFF: IOPOL_ATIME_UPDATES_DEFAULT;
+}
+
+static inline void
+set_thread_atime_policy(struct uthread *ut, int policy)
+{
+ if (policy == IOPOL_ATIME_UPDATES_OFF) {
+ ut->uu_flag |= UT_ATIME_UPDATE;
+ } else {
+ ut->uu_flag &= ~UT_ATIME_UPDATE;
+ }
+}
+
+static inline void
+set_task_atime_policy(struct proc *p, int policy)
+{
+ if (policy == IOPOL_ATIME_UPDATES_OFF) {
+ OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, &p->p_vfs_iopolicy);
+ } else {
+ OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), &p->p_vfs_iopolicy);
+ }
+}
+
+static inline int
+get_task_atime_policy(struct proc *p)
+{
+ return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES)? IOPOL_ATIME_UPDATES_OFF: IOPOL_ATIME_UPDATES_DEFAULT;
+}
+
+static int
+iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
+{
+ int error = 0;
+ thread_t thread;
+
+ /* Validate scope */
+ switch (scope) {
+ case IOPOL_SCOPE_THREAD:
+ thread = current_thread();
+ break;
+ case IOPOL_SCOPE_PROCESS:
+ thread = THREAD_NULL;
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Validate policy */
+ if (cmd == IOPOL_CMD_SET) {
+ switch (policy) {
+ case IOPOL_ATIME_UPDATES_DEFAULT:
+ case IOPOL_ATIME_UPDATES_OFF:
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+ }
+
+ /* Perform command */
+ switch(cmd) {
+ case IOPOL_CMD_SET:
+ if (thread != THREAD_NULL)
+ set_thread_atime_policy(get_bsdthread_info(thread), policy);
+ else
+ set_task_atime_policy(p, policy);
+ break;
+ case IOPOL_CMD_GET:
+ if (thread != THREAD_NULL)
+ policy = get_thread_atime_policy(get_bsdthread_info(thread));
+ else
+ policy = get_task_atime_policy(p);
+ iop_param->iop_policy = policy;
+ break;
+ default:
+ error = EINVAL; /* unknown command */
+ break;
+ }
+
+out:
+ return (error);
+}
+/* BSD call back function for task_policy networking changes */
void
proc_apply_task_networkbg(void * bsd_info, thread_t thread)
{
struct rusage_info_child *ri_child;
assert(p->p_stats != NULL);
+ memset(ru, 0, sizeof(*ru));
switch(flavor) {
+ case RUSAGE_INFO_V4:
+ ru->ri_logical_writes = get_task_logical_writes(p->task);
+ ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(p->task);
+#if CONFIG_LEDGER_INTERVAL_MAX
+ ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(p->task, FALSE);
+#endif
+ fill_task_monotonic_rusage(p->task, ru);
+ /* fall through */
case RUSAGE_INFO_V3:
fill_task_qos_rusage(p->task, ru);
}
}
-static void
-rusage_info_conversion(rusage_info_t ri_info, rusage_info_current *ri_current, int flavor)
-{
- struct rusage_info_v0 *ri_v0;
- struct rusage_info_v1 *ri_v1;
- struct rusage_info_v2 *ri_v2;
-
- switch (flavor) {
-
- case RUSAGE_INFO_V2:
- ri_v2 = (struct rusage_info_v2 *)ri_info;
- ri_v2->ri_diskio_bytesread = ri_current->ri_diskio_bytesread;
- ri_v2->ri_diskio_byteswritten = ri_current->ri_diskio_byteswritten;
- /* fall through */
-
- case RUSAGE_INFO_V1:
- ri_v1 = (struct rusage_info_v1 *)ri_info;
- ri_v1->ri_child_user_time = ri_current->ri_child_user_time;
- ri_v1->ri_child_system_time = ri_current->ri_child_system_time;
- ri_v1->ri_child_pkg_idle_wkups = ri_current->ri_child_pkg_idle_wkups;
- ri_v1->ri_child_interrupt_wkups = ri_current->ri_child_interrupt_wkups;
- ri_v1->ri_child_pageins = ri_current->ri_child_pageins;
- ri_v1->ri_child_elapsed_abstime = ri_current->ri_child_elapsed_abstime;
- /* fall through */
-
- case RUSAGE_INFO_V0:
- ri_v0 = (struct rusage_info_v0 *)ri_info;
- memcpy(&ri_v0->ri_uuid[0], &ri_current->ri_uuid[0], sizeof(ri_v0->ri_uuid));
- ri_v0->ri_user_time = ri_current->ri_user_time;
- ri_v0->ri_system_time = ri_current->ri_system_time;
- ri_v0->ri_pkg_idle_wkups = ri_current->ri_pkg_idle_wkups;
- ri_v0->ri_interrupt_wkups = ri_current->ri_interrupt_wkups;
- ri_v0->ri_pageins = ri_current->ri_pageins;
- ri_v0->ri_wired_size = ri_current->ri_wired_size;
- ri_v0->ri_resident_size = ri_current->ri_resident_size;
- ri_v0->ri_phys_footprint = ri_current->ri_phys_footprint;
- ri_v0->ri_proc_start_abstime = ri_current->ri_proc_start_abstime;
- ri_v0->ri_proc_exit_abstime = ri_current->ri_proc_exit_abstime;
-
- break;
-
- default:
- break;
- }
-}
-
-
int
proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie)
{
- struct rusage_info_v0 ri_v0;
- struct rusage_info_v1 ri_v1;
- struct rusage_info_v2 ri_v2;
- struct rusage_info_v3 ri_v3;
-
- rusage_info_current ri_current;
+ rusage_info_current ri_current = {};
int error = 0;
+ size_t size = 0;
switch (flavor) {
case RUSAGE_INFO_V0:
- /*
- * If task is still alive, collect info from the live task itself.
- * Otherwise, look to the cached info in the zombie proc.
- */
- if (p->p_ru == NULL) {
- gather_rusage_info(p, &ri_current, flavor);
- ri_current.ri_proc_exit_abstime = 0;
- rusage_info_conversion(&ri_v0, &ri_current, flavor);
- } else {
- rusage_info_conversion(&ri_v0, &p->p_ru->ri, flavor);
- }
- error = copyout(&ri_v0, buffer, sizeof (ri_v0));
+ size = sizeof(struct rusage_info_v0);
break;
case RUSAGE_INFO_V1:
- /*
- * If task is still alive, collect info from the live task itself.
- * Otherwise, look to the cached info in the zombie proc.
- */
- if (p->p_ru == NULL) {
- gather_rusage_info(p, &ri_current, flavor);
- ri_current.ri_proc_exit_abstime = 0;
- rusage_info_conversion(&ri_v1, &ri_current, flavor);
- } else {
- rusage_info_conversion(&ri_v1, &p->p_ru->ri, flavor);
- }
- error = copyout(&ri_v1, buffer, sizeof (ri_v1));
+ size = sizeof(struct rusage_info_v1);
break;
case RUSAGE_INFO_V2:
- /*
- * If task is still alive, collect info from the live task itself.
- * Otherwise, look to the cached info in the zombie proc.
- */
- if (p->p_ru == NULL) {
- gather_rusage_info(p, &ri_current, flavor);
- ri_current.ri_proc_exit_abstime = 0;
- rusage_info_conversion(&ri_v2, &ri_current, flavor);
- } else {
- rusage_info_conversion(&ri_v2, &p->p_ru->ri, flavor);
- }
- error = copyout(&ri_v2, buffer, sizeof (ri_v2));
+ size = sizeof(struct rusage_info_v2);
break;
case RUSAGE_INFO_V3:
- /*
- * If task is still alive, collect info from the live task itself.
- * Otherwise, look to the cached info in the zombie proc.
- */
- if (p->p_ru == NULL) {
- gather_rusage_info(p, &ri_v3, flavor);
- ri_v3.ri_proc_exit_abstime = 0;
- } else {
- ri_v3 = p->p_ru->ri;
- }
- error = copyout(&ri_v3, buffer, sizeof (ri_v3));
+ size = sizeof(struct rusage_info_v3);
break;
- default:
- error = EINVAL;
+ case RUSAGE_INFO_V4:
+ size = sizeof(struct rusage_info_v4);
break;
+
+ default:
+ return EINVAL;
+ }
+
+ if(size == 0) {
+ return EINVAL;
}
- return (error);
+ /*
+ * If task is still alive, collect info from the live task itself.
+ * Otherwise, look to the cached info in the zombie proc.
+ */
+ if (p->p_ru == NULL) {
+ gather_rusage_info(p, &ri_current, flavor);
+ ri_current.ri_proc_exit_abstime = 0;
+ error = copyout(&ri_current, buffer, size);
+ } else {
+ ri_current = p->p_ru->ri;
+ error = copyout(&p->p_ru->ri, buffer, size);
+ }
+
+ return (error);
}
static int
* uap->flavor available flavors:
*
* RLIMIT_WAKEUPS_MONITOR
+ * RLIMIT_CPU_USAGE_MONITOR
+ * RLIMIT_THREAD_CPULIMITS
+ * RLIMIT_FOOTPRINT_INTERVAL
*/
int
proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval)
uint32_t cpumon_flags;
uint32_t cpulimits_flags;
kauth_cred_t my_cred, target_cred;
+#if CONFIG_LEDGER_INTERVAL_MAX
+ uint32_t footprint_interval_flags;
+ uint64_t interval_max_footprint;
+#endif /* CONFIG_LEDGER_INTERVAL_MAX */
/* -1 implicitly means our own process (perhaps even the current thread for per-thread attributes) */
if (uap->pid == -1) {
error = mach_to_bsd_rv(thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ns_refill));
break;
+
+#if CONFIG_LEDGER_INTERVAL_MAX
+ case RLIMIT_FOOTPRINT_INTERVAL:
+ footprint_interval_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127)
+ /*
+ * There is currently only one option for this flavor.
+ */
+ if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) {
+ error = EINVAL;
+ break;
+ }
+ interval_max_footprint = get_task_phys_footprint_interval_max(targetp->task, TRUE);
+ break;
+#endif /* CONFIG_LEDGER_INTERVAL_MAX */
default:
error = EINVAL;
break;
return (0);
}
+
+#if !MONOTONIC
+int thread_selfcounts(__unused struct proc *p, __unused struct thread_selfcounts_args *uap, __unused int *ret_out)
+{
+ return ENOTSUP;
+}
+#endif /* !MONOTONIC */