+ task_unlock(task);
+
+ if (host_priv != HOST_PRIV_NULL) {
+ kr = host_get_host_priv_port(host_priv, &host_port);
+ } else {
+ kr = host_get_host_port(host_priv_self(), &host_port);
+ }
+ assert(kr == KERN_SUCCESS);
+ kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
+ return(kr);
+}
+
+kern_return_t
+task_send_trace_memory(
+ __unused task_t target_task,
+ __unused uint32_t pid,
+ __unused uint64_t uniqueid)
+{
+ return KERN_INVALID_ARGUMENT;
+}
+
+/*
+ * This routine was added, pretty much exclusively, for registering the
+ * RPC glue vector for in-kernel short circuited tasks. Rather than
+ * removing it completely, I have only disabled that feature (which was
+ * the only feature at the time). It just appears that we are going to
+ * want to add some user data to tasks in the future (i.e. bsd info,
+ * task names, etc...), so I left it in the formal task interface.
+ */
+kern_return_t
+task_set_info(
+ task_t task,
+ task_flavor_t flavor,
+ __unused task_info_t task_info_in, /* pointer to IN array */
+ __unused mach_msg_type_number_t task_info_count)
+{
+ if (task == TASK_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ switch (flavor) {
+
+#if CONFIG_ATM
+ case TASK_TRACE_MEMORY_INFO:
+ {
+ if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT)
+ return (KERN_INVALID_ARGUMENT);
+
+ assert(task_info_in != NULL);
+ task_trace_memory_info_t mem_info;
+ mem_info = (task_trace_memory_info_t) task_info_in;
+ kern_return_t kr = atm_register_trace_memory(task,
+ mem_info->user_memory_address,
+ mem_info->buffer_size);
+ return kr;
+ }
+
+#endif
+ default:
+ return (KERN_INVALID_ARGUMENT);
+ }
+ return (KERN_SUCCESS);
+}
+
+int radar_20146450 = 1;
+kern_return_t
+task_info(
+ task_t task,
+ task_flavor_t flavor,
+ task_info_t task_info_out,
+ mach_msg_type_number_t *task_info_count)
+{
+ kern_return_t error = KERN_SUCCESS;
+ mach_msg_type_number_t original_task_info_count;
+
+ if (task == TASK_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ original_task_info_count = *task_info_count;
+ task_lock(task);
+
+ if ((task != current_task()) && (!task->active)) {
+ task_unlock(task);
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ switch (flavor) {
+
+ case TASK_BASIC_INFO_32:
+ case TASK_BASIC2_INFO_32:
+#if defined(__arm__) || defined(__arm64__)
+ case TASK_BASIC_INFO_64:
+#endif
+ {
+ task_basic_info_32_t basic_info;
+ vm_map_t map;
+ clock_sec_t secs;
+ clock_usec_t usecs;
+
+ if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ basic_info = (task_basic_info_32_t)task_info_out;
+
+ map = (task == kernel_task)? kernel_map: task->map;
+ basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
+ if (flavor == TASK_BASIC2_INFO_32) {
+ /*
+ * The "BASIC2" flavor gets the maximum resident
+ * size instead of the current resident size...
+ */
+ basic_info->resident_size = pmap_resident_max(map->pmap);
+ } else {
+ basic_info->resident_size = pmap_resident_count(map->pmap);
+ }
+ basic_info->resident_size *= PAGE_SIZE;
+
+ basic_info->policy = ((task != kernel_task)?
+ POLICY_TIMESHARE: POLICY_RR);
+ basic_info->suspend_count = task->user_stop_count;
+
+ absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
+ basic_info->user_time.seconds =
+ (typeof(basic_info->user_time.seconds))secs;
+ basic_info->user_time.microseconds = usecs;
+
+ absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
+ basic_info->system_time.seconds =
+ (typeof(basic_info->system_time.seconds))secs;
+ basic_info->system_time.microseconds = usecs;
+
+ *task_info_count = TASK_BASIC_INFO_32_COUNT;
+ break;
+ }
+
+#if defined(__arm__) || defined(__arm64__)
+ case TASK_BASIC_INFO_64_2:
+ {
+ task_basic_info_64_2_t basic_info;
+ vm_map_t map;
+ clock_sec_t secs;
+ clock_usec_t usecs;
+
+ if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ basic_info = (task_basic_info_64_2_t)task_info_out;
+
+ map = (task == kernel_task)? kernel_map: task->map;
+ basic_info->virtual_size = map->size;
+ basic_info->resident_size =
+ (mach_vm_size_t)(pmap_resident_count(map->pmap))
+ * PAGE_SIZE_64;
+
+ basic_info->policy = ((task != kernel_task)?
+ POLICY_TIMESHARE: POLICY_RR);
+ basic_info->suspend_count = task->user_stop_count;
+
+ absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
+ basic_info->user_time.seconds =
+ (typeof(basic_info->user_time.seconds))secs;
+ basic_info->user_time.microseconds = usecs;
+
+ absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
+ basic_info->system_time.seconds =
+ (typeof(basic_info->system_time.seconds))secs;
+ basic_info->system_time.microseconds = usecs;
+
+ *task_info_count = TASK_BASIC_INFO_64_2_COUNT;
+ break;
+ }
+
+#else /* defined(__arm__) || defined(__arm64__) */
+ case TASK_BASIC_INFO_64:
+ {
+ task_basic_info_64_t basic_info;
+ vm_map_t map;
+ clock_sec_t secs;
+ clock_usec_t usecs;
+
+ if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ basic_info = (task_basic_info_64_t)task_info_out;
+
+ map = (task == kernel_task)? kernel_map: task->map;
+ basic_info->virtual_size = map->size;
+ basic_info->resident_size =
+ (mach_vm_size_t)(pmap_resident_count(map->pmap))
+ * PAGE_SIZE_64;
+
+ basic_info->policy = ((task != kernel_task)?
+ POLICY_TIMESHARE: POLICY_RR);
+ basic_info->suspend_count = task->user_stop_count;
+
+ absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
+ basic_info->user_time.seconds =
+ (typeof(basic_info->user_time.seconds))secs;
+ basic_info->user_time.microseconds = usecs;
+
+ absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
+ basic_info->system_time.seconds =
+ (typeof(basic_info->system_time.seconds))secs;
+ basic_info->system_time.microseconds = usecs;
+
+ *task_info_count = TASK_BASIC_INFO_64_COUNT;
+ break;
+ }
+#endif /* defined(__arm__) || defined(__arm64__) */
+
+ case MACH_TASK_BASIC_INFO:
+ {
+ mach_task_basic_info_t basic_info;
+ vm_map_t map;
+ clock_sec_t secs;
+ clock_usec_t usecs;
+
+ if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ basic_info = (mach_task_basic_info_t)task_info_out;
+
+ map = (task == kernel_task) ? kernel_map : task->map;
+
+ basic_info->virtual_size = map->size;
+
+ basic_info->resident_size =
+ (mach_vm_size_t)(pmap_resident_count(map->pmap));
+ basic_info->resident_size *= PAGE_SIZE_64;
+
+ basic_info->resident_size_max =
+ (mach_vm_size_t)(pmap_resident_max(map->pmap));
+ basic_info->resident_size_max *= PAGE_SIZE_64;
+
+ basic_info->policy = ((task != kernel_task) ?
+ POLICY_TIMESHARE : POLICY_RR);
+
+ basic_info->suspend_count = task->user_stop_count;
+
+ absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
+ basic_info->user_time.seconds =
+ (typeof(basic_info->user_time.seconds))secs;
+ basic_info->user_time.microseconds = usecs;
+
+ absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
+ basic_info->system_time.seconds =
+ (typeof(basic_info->system_time.seconds))secs;
+ basic_info->system_time.microseconds = usecs;
+
+ *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
+ break;
+ }
+
+ case TASK_THREAD_TIMES_INFO:
+ {
+ task_thread_times_info_t times_info;
+ thread_t thread;
+
+ if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ times_info = (task_thread_times_info_t) task_info_out;
+ times_info->user_time.seconds = 0;
+ times_info->user_time.microseconds = 0;
+ times_info->system_time.seconds = 0;
+ times_info->system_time.microseconds = 0;
+
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ time_value_t user_time, system_time;
+
+ if (thread->options & TH_OPT_IDLE_THREAD)
+ continue;
+
+ thread_read_times(thread, &user_time, &system_time, NULL);
+
+ time_value_add(×_info->user_time, &user_time);
+ time_value_add(×_info->system_time, &system_time);
+ }
+
+ *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
+ break;
+ }
+
+ case TASK_ABSOLUTETIME_INFO:
+ {
+ task_absolutetime_info_t info;
+ thread_t thread;
+
+ if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ info = (task_absolutetime_info_t)task_info_out;
+ info->threads_user = info->threads_system = 0;
+
+
+ info->total_user = task->total_user_time;
+ info->total_system = task->total_system_time;
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ uint64_t tval;
+ spl_t x;
+
+ if (thread->options & TH_OPT_IDLE_THREAD)
+ continue;
+
+ x = splsched();
+ thread_lock(thread);
+
+ tval = timer_grab(&thread->user_timer);
+ info->threads_user += tval;
+ info->total_user += tval;
+
+ tval = timer_grab(&thread->system_timer);
+ if (thread->precise_user_kernel_time) {
+ info->threads_system += tval;
+ info->total_system += tval;
+ } else {
+ /* system_timer may represent either sys or user */
+ info->threads_user += tval;
+ info->total_user += tval;
+ }
+
+ thread_unlock(thread);
+ splx(x);
+ }
+
+
+ *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
+ break;
+ }
+
+ case TASK_DYLD_INFO:
+ {
+ task_dyld_info_t info;
+
+ /*
+ * We added the format field to TASK_DYLD_INFO output. For
+ * temporary backward compatibility, accept the fact that
+ * clients may ask for the old version - distinquished by the
+ * size of the expected result structure.
+ */
+#define TASK_LEGACY_DYLD_INFO_COUNT \
+ offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
+
+ if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ info = (task_dyld_info_t)task_info_out;
+ info->all_image_info_addr = task->all_image_info_addr;
+ info->all_image_info_size = task->all_image_info_size;
+
+ /* only set format on output for those expecting it */
+ if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
+ info->all_image_info_format = task_has_64Bit_addr(task) ?
+ TASK_DYLD_ALL_IMAGE_INFO_64 :
+ TASK_DYLD_ALL_IMAGE_INFO_32 ;
+ *task_info_count = TASK_DYLD_INFO_COUNT;
+ } else {
+ *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
+ }
+ break;
+ }
+
+ case TASK_EXTMOD_INFO:
+ {
+ task_extmod_info_t info;
+ void *p;
+
+ if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ info = (task_extmod_info_t)task_info_out;
+
+ p = get_bsdtask_info(task);
+ if (p) {
+ proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
+ } else {
+ bzero(info->task_uuid, sizeof(info->task_uuid));
+ }
+ info->extmod_statistics = task->extmod_statistics;
+ *task_info_count = TASK_EXTMOD_INFO_COUNT;
+
+ break;
+ }
+
+ case TASK_KERNELMEMORY_INFO:
+ {
+ task_kernelmemory_info_t tkm_info;
+ ledger_amount_t credit, debit;
+
+ if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ tkm_info = (task_kernelmemory_info_t) task_info_out;
+ tkm_info->total_palloc = 0;
+ tkm_info->total_pfree = 0;
+ tkm_info->total_salloc = 0;
+ tkm_info->total_sfree = 0;
+
+ if (task == kernel_task) {
+ /*
+ * All shared allocs/frees from other tasks count against
+ * the kernel private memory usage. If we are looking up
+ * info for the kernel task, gather from everywhere.
+ */
+ task_unlock(task);
+
+ /* start by accounting for all the terminated tasks against the kernel */
+ tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
+ tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
+
+ /* count all other task/thread shared alloc/free against the kernel */
+ lck_mtx_lock(&tasks_threads_lock);
+
+ /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
+ queue_iterate(&tasks, task, task_t, tasks) {
+ if (task == kernel_task) {
+ if (ledger_get_entries(task->ledger,
+ task_ledgers.tkm_private, &credit,
+ &debit) == KERN_SUCCESS) {
+ tkm_info->total_palloc += credit;
+ tkm_info->total_pfree += debit;
+ }
+ }
+ if (!ledger_get_entries(task->ledger,
+ task_ledgers.tkm_shared, &credit, &debit)) {
+ tkm_info->total_palloc += credit;
+ tkm_info->total_pfree += debit;
+ }
+ }
+ lck_mtx_unlock(&tasks_threads_lock);
+ } else {
+ if (!ledger_get_entries(task->ledger,
+ task_ledgers.tkm_private, &credit, &debit)) {
+ tkm_info->total_palloc = credit;
+ tkm_info->total_pfree = debit;
+ }
+ if (!ledger_get_entries(task->ledger,
+ task_ledgers.tkm_shared, &credit, &debit)) {
+ tkm_info->total_salloc = credit;
+ tkm_info->total_sfree = debit;
+ }
+ task_unlock(task);
+ }
+
+ *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ /* OBSOLETE */
+ case TASK_SCHED_FIFO_INFO:
+ {
+
+ if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ error = KERN_INVALID_POLICY;
+ break;
+ }
+
+ /* OBSOLETE */
+ case TASK_SCHED_RR_INFO:
+ {
+ policy_rr_base_t rr_base;
+ uint32_t quantum_time;
+ uint64_t quantum_ns;
+
+ if (*task_info_count < POLICY_RR_BASE_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ rr_base = (policy_rr_base_t) task_info_out;
+
+ if (task != kernel_task) {
+ error = KERN_INVALID_POLICY;
+ break;
+ }
+
+ rr_base->base_priority = task->priority;
+
+ quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
+ absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
+
+ rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
+
+ *task_info_count = POLICY_RR_BASE_COUNT;
+ break;
+ }
+
+ /* OBSOLETE */
+ case TASK_SCHED_TIMESHARE_INFO:
+ {
+ policy_timeshare_base_t ts_base;
+
+ if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ ts_base = (policy_timeshare_base_t) task_info_out;
+
+ if (task == kernel_task) {
+ error = KERN_INVALID_POLICY;
+ break;
+ }
+
+ ts_base->base_priority = task->priority;
+
+ *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
+ break;
+ }
+
+ case TASK_SECURITY_TOKEN:
+ {
+ security_token_t *sec_token_p;
+
+ if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ sec_token_p = (security_token_t *) task_info_out;
+
+ *sec_token_p = task->sec_token;
+
+ *task_info_count = TASK_SECURITY_TOKEN_COUNT;
+ break;
+ }
+
+ case TASK_AUDIT_TOKEN:
+ {
+ audit_token_t *audit_token_p;
+
+ if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ audit_token_p = (audit_token_t *) task_info_out;
+
+ *audit_token_p = task->audit_token;
+
+ *task_info_count = TASK_AUDIT_TOKEN_COUNT;
+ break;
+ }
+
+ case TASK_SCHED_INFO:
+ error = KERN_INVALID_ARGUMENT;
+ break;
+
+ case TASK_EVENTS_INFO:
+ {
+ task_events_info_t events_info;
+ thread_t thread;
+
+ if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ events_info = (task_events_info_t) task_info_out;
+
+
+ events_info->faults = task->faults;
+ events_info->pageins = task->pageins;
+ events_info->cow_faults = task->cow_faults;
+ events_info->messages_sent = task->messages_sent;
+ events_info->messages_received = task->messages_received;
+ events_info->syscalls_mach = task->syscalls_mach;
+ events_info->syscalls_unix = task->syscalls_unix;
+
+ events_info->csw = task->c_switch;
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ events_info->csw += thread->c_switch;
+ events_info->syscalls_mach += thread->syscalls_mach;
+ events_info->syscalls_unix += thread->syscalls_unix;
+ }
+
+
+ *task_info_count = TASK_EVENTS_INFO_COUNT;
+ break;
+ }
+ case TASK_AFFINITY_TAG_INFO:
+ {
+ if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ error = task_affinity_info(task, task_info_out, task_info_count);
+ break;
+ }
+ case TASK_POWER_INFO:
+ {
+ if (*task_info_count < TASK_POWER_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL);
+ break;
+ }
+
+ case TASK_POWER_INFO_V2:
+ {
+ if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+ task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
+ task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2);
+ break;
+ }
+
+ case TASK_VM_INFO:
+ case TASK_VM_INFO_PURGEABLE:
+ {
+ task_vm_info_t vm_info;
+ vm_map_t map;
+
+ if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ vm_info = (task_vm_info_t)task_info_out;
+
+ if (task == kernel_task) {
+ map = kernel_map;
+ /* no lock */
+ } else {
+ map = task->map;
+ vm_map_lock_read(map);
+ }
+
+ vm_info->virtual_size = (typeof(vm_info->virtual_size))map->size;
+ vm_info->region_count = map->hdr.nentries;
+ vm_info->page_size = vm_map_page_size(map);
+
+ vm_info->resident_size = pmap_resident_count(map->pmap);
+ vm_info->resident_size *= PAGE_SIZE;
+ vm_info->resident_size_peak = pmap_resident_max(map->pmap);
+ vm_info->resident_size_peak *= PAGE_SIZE;
+
+#define _VM_INFO(_name) \
+ vm_info->_name = ((mach_vm_size_t) map->pmap->stats._name) * PAGE_SIZE
+
+ _VM_INFO(device);
+ _VM_INFO(device_peak);
+ _VM_INFO(external);
+ _VM_INFO(external_peak);
+ _VM_INFO(internal);
+ _VM_INFO(internal_peak);
+ _VM_INFO(reusable);
+ _VM_INFO(reusable_peak);
+ _VM_INFO(compressed);
+ _VM_INFO(compressed_peak);
+ _VM_INFO(compressed_lifetime);
+
+ vm_info->purgeable_volatile_pmap = 0;
+ vm_info->purgeable_volatile_resident = 0;
+ vm_info->purgeable_volatile_virtual = 0;
+ if (task == kernel_task) {
+ /*
+ * We do not maintain the detailed stats for the
+ * kernel_pmap, so just count everything as
+ * "internal"...
+ */
+ vm_info->internal = vm_info->resident_size;
+ /*
+ * ... but since the memory held by the VM compressor
+ * in the kernel address space ought to be attributed
+ * to user-space tasks, we subtract it from "internal"
+ * to give memory reporting tools a more accurate idea
+ * of what the kernel itself is actually using, instead
+ * of making it look like the kernel is leaking memory
+ * when the system is under memory pressure.
+ */
+ vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
+ PAGE_SIZE);
+ } else {
+ mach_vm_size_t volatile_virtual_size;
+ mach_vm_size_t volatile_resident_size;
+ mach_vm_size_t volatile_compressed_size;
+ mach_vm_size_t volatile_pmap_size;
+ mach_vm_size_t volatile_compressed_pmap_size;
+ kern_return_t kr;
+
+ if (flavor == TASK_VM_INFO_PURGEABLE) {
+ kr = vm_map_query_volatile(
+ map,
+ &volatile_virtual_size,
+ &volatile_resident_size,
+ &volatile_compressed_size,
+ &volatile_pmap_size,
+ &volatile_compressed_pmap_size);
+ if (kr == KERN_SUCCESS) {
+ vm_info->purgeable_volatile_pmap =
+ volatile_pmap_size;
+ if (radar_20146450) {
+ vm_info->compressed -=
+ volatile_compressed_pmap_size;
+ }
+ vm_info->purgeable_volatile_resident =
+ volatile_resident_size;
+ vm_info->purgeable_volatile_virtual =
+ volatile_virtual_size;
+ }
+ }
+ }
+ *task_info_count = TASK_VM_INFO_REV0_COUNT;
+
+ if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
+ vm_info->phys_footprint =
+ (mach_vm_size_t) get_task_phys_footprint(task);
+ *task_info_count = TASK_VM_INFO_REV1_COUNT;
+ }
+ if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
+ vm_info->min_address = map->min_offset;
+ vm_info->max_address = map->max_offset;
+ *task_info_count = TASK_VM_INFO_REV2_COUNT;
+ }
+
+ if (task != kernel_task) {
+ vm_map_unlock_read(map);
+ }
+
+ break;
+ }
+
+ case TASK_WAIT_STATE_INFO:
+ {
+ /*
+ * Deprecated flavor. Currently allowing some results until all users
+ * stop calling it. The results may not be accurate.
+ */
+ task_wait_state_info_t wait_state_info;
+ uint64_t total_sfi_ledger_val = 0;
+
+ if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ wait_state_info = (task_wait_state_info_t) task_info_out;
+
+ wait_state_info->total_wait_state_time = 0;
+ bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
+
+#if CONFIG_SCHED_SFI
+ int i, prev_lentry = -1;
+ int64_t val_credit, val_debit;
+
+ for (i = 0; i < MAX_SFI_CLASS_ID; i++){
+ val_credit =0;
+ /*
+ * checking with prev_lentry != entry ensures adjacent classes
+ * which share the same ledger do not add wait times twice.
+ * Note: Use ledger() call to get data for each individual sfi class.
+ */
+ if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
+ KERN_SUCCESS == ledger_get_entries(task->ledger,
+ task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
+ total_sfi_ledger_val += val_credit;
+ }
+ prev_lentry = task_ledgers.sfi_wait_times[i];
+ }
+
+#endif /* CONFIG_SCHED_SFI */
+ wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
+ *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
+
+ break;
+ }
+ case TASK_VM_INFO_PURGEABLE_ACCOUNT:
+ {
+#if DEVELOPMENT || DEBUG
+ pvm_account_info_t acnt_info;
+
+ if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ if (task_info_out == NULL) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ acnt_info = (pvm_account_info_t) task_info_out;
+
+ error = vm_purgeable_account(task, acnt_info);
+
+ *task_info_count = PVM_ACCOUNT_INFO_COUNT;
+
+ break;
+#else /* DEVELOPMENT || DEBUG */
+ error = KERN_NOT_SUPPORTED;
+ break;
+#endif /* DEVELOPMENT || DEBUG */
+ }
+ case TASK_FLAGS_INFO:
+ {
+ task_flags_info_t flags_info;
+
+ if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ flags_info = (task_flags_info_t)task_info_out;
+
+ /* only publish the 64-bit flag of the task */
+ flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
+
+ *task_info_count = TASK_FLAGS_INFO_COUNT;
+ break;
+ }
+
+ case TASK_DEBUG_INFO_INTERNAL:
+ {
+#if DEVELOPMENT || DEBUG
+ task_debug_info_internal_t dbg_info;
+ if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
+ error = KERN_NOT_SUPPORTED;
+ break;
+ }
+
+ if (task_info_out == NULL) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+ dbg_info = (task_debug_info_internal_t) task_info_out;
+ dbg_info->ipc_space_size = 0;
+ if (task->itk_space){
+ dbg_info->ipc_space_size = task->itk_space->is_table_size;
+ }
+
+ dbg_info->suspend_count = task->suspend_count;
+
+ error = KERN_SUCCESS;
+ *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
+ break;
+#else /* DEVELOPMENT || DEBUG */
+ error = KERN_NOT_SUPPORTED;
+ break;
+#endif /* DEVELOPMENT || DEBUG */
+ }
+ default:
+ error = KERN_INVALID_ARGUMENT;
+ }
+
+ task_unlock(task);
+ return (error);
+}
+
+/*
+ * task_info_from_user
+ *
+ * When calling task_info from user space,
+ * this function will be executed as mig server side
+ * instead of calling directly into task_info.
+ * This gives the possibility to perform more security
+ * checks on task_port.
+ *
+ * In the case of TASK_DYLD_INFO, we require the more
+ * privileged task_port not the less-privileged task_name_port.
+ *
+ */
+kern_return_t
+task_info_from_user(
+ mach_port_t task_port,
+ task_flavor_t flavor,
+ task_info_t task_info_out,
+ mach_msg_type_number_t *task_info_count)
+{
+ task_t task;
+ kern_return_t ret;
+
+ if (flavor == TASK_DYLD_INFO)
+ task = convert_port_to_task(task_port);
+ else
+ task = convert_port_to_task_name(task_port);
+
+ ret = task_info(task, flavor, task_info_out, task_info_count);
+
+ task_deallocate(task);
+
+ return ret;
+}
+
+/*
+ * task_power_info
+ *
+ * Returns power stats for the task.
+ * Note: Called with task locked.
+ */
+void
+task_power_info_locked(
+ task_t task,
+ task_power_info_t info,
+ gpu_energy_data_t ginfo,
+ task_power_info_v2_t infov2)
+{
+ thread_t thread;
+ ledger_amount_t tmp;
+
+ task_lock_assert_owned(task);
+
+ ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
+ (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
+ ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
+ (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
+
+ info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
+ info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
+
+ info->total_user = task->total_user_time;
+ info->total_system = task->total_system_time;
+
+#if CONFIG_EMBEDDED
+ if (infov2) {
+ infov2->task_energy = task->task_energy;
+ }
+#endif
+
+ if (ginfo) {
+ ginfo->task_gpu_utilisation = task->task_gpu_ns;
+ }
+
+ if (infov2) {
+ infov2->task_ptime = task->total_ptime;
+ infov2->task_pset_switches = task->ps_switch;
+ }
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ uint64_t tval;
+ spl_t x;
+
+ if (thread->options & TH_OPT_IDLE_THREAD)
+ continue;
+
+ x = splsched();
+ thread_lock(thread);
+
+ info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
+ info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
+
+#if CONFIG_EMBEDDED
+ if (infov2) {
+ infov2->task_energy += ml_energy_stat(thread);
+ }
+#endif
+
+ tval = timer_grab(&thread->user_timer);
+ info->total_user += tval;
+
+ if (infov2) {
+ tval = timer_grab(&thread->ptime);
+ infov2->task_ptime += tval;
+ infov2->task_pset_switches += thread->ps_switch;
+ }
+
+ tval = timer_grab(&thread->system_timer);
+ if (thread->precise_user_kernel_time) {
+ info->total_system += tval;
+ } else {
+ /* system_timer may represent either sys or user */
+ info->total_user += tval;
+ }
+
+ if (ginfo) {
+ ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
+ }
+ thread_unlock(thread);
+ splx(x);
+ }
+}
+
+/*
+ * task_gpu_utilisation
+ *
+ * Returns the total gpu time used by the all the threads of the task
+ * (both dead and alive)
+ */
+uint64_t
+task_gpu_utilisation(
+ task_t task)
+{
+ uint64_t gpu_time = 0;
+#if !CONFIG_EMBEDDED
+ thread_t thread;
+
+ task_lock(task);
+ gpu_time += task->task_gpu_ns;
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ spl_t x;
+ x = splsched();
+ thread_lock(thread);
+ gpu_time += ml_gpu_stat(thread);
+ thread_unlock(thread);
+ splx(x);
+ }
+
+ task_unlock(task);
+#else /* CONFIG_EMBEDDED */
+ /* silence compiler warning */
+ (void)task;
+#endif /* !CONFIG_EMBEDDED */
+ return gpu_time;
+}
+
+/*
+ * task_energy
+ *
+ * Returns the total energy used by the all the threads of the task
+ * (both dead and alive)
+ */
+uint64_t
+task_energy(
+ task_t task)
+{
+ uint64_t energy = 0;
+ thread_t thread;
+
+ task_lock(task);
+ energy += task->task_energy;
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ spl_t x;
+ x = splsched();
+ thread_lock(thread);
+ energy += ml_energy_stat(thread);
+ thread_unlock(thread);
+ splx(x);
+ }
+
+ task_unlock(task);
+ return energy;
+}
+
+
+uint64_t
+task_cpu_ptime(
+ __unused task_t task)
+{
+ return 0;
+}
+
+
+/* This function updates the cpu time in the arrays for each
+ * effective and requested QoS class
+ */
+void
+task_update_cpu_time_qos_stats(
+ task_t task,
+ uint64_t *eqos_stats,
+ uint64_t *rqos_stats)
+{
+ if (!eqos_stats && !rqos_stats) {
+ return;
+ }
+
+ task_lock(task);
+ thread_t thread;
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ if (thread->options & TH_OPT_IDLE_THREAD) {
+ continue;
+ }
+
+ thread_update_qos_cpu_time(thread);
+ }
+
+ if (eqos_stats) {
+ eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
+ eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
+ eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
+ eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
+ eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
+ eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
+ eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
+ }
+
+ if (rqos_stats) {
+ rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
+ rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
+ rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
+ rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
+ rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
+ rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
+ rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
+ }
+
+ task_unlock(task);
+}
+
+kern_return_t
+task_purgable_info(
+ task_t task,
+ task_purgable_info_t *stats)
+{
+ if (task == TASK_NULL || stats == NULL)
+ return KERN_INVALID_ARGUMENT;
+ /* Take task reference */
+ task_reference(task);
+ vm_purgeable_stats((vm_purgeable_info_t)stats, task);
+ /* Drop task reference */
+ task_deallocate(task);
+ return KERN_SUCCESS;
+}
+
+void
+task_vtimer_set(
+ task_t task,
+ integer_t which)
+{
+ thread_t thread;
+ spl_t x;
+
+ task_lock(task);
+
+ task->vtimers |= which;
+
+ switch (which) {
+
+ case TASK_VTIMER_USER:
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ x = splsched();
+ thread_lock(thread);
+ if (thread->precise_user_kernel_time)
+ thread->vtimer_user_save = timer_grab(&thread->user_timer);
+ else
+ thread->vtimer_user_save = timer_grab(&thread->system_timer);
+ thread_unlock(thread);
+ splx(x);
+ }
+ break;
+
+ case TASK_VTIMER_PROF:
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ x = splsched();
+ thread_lock(thread);
+ thread->vtimer_prof_save = timer_grab(&thread->user_timer);
+ thread->vtimer_prof_save += timer_grab(&thread->system_timer);
+ thread_unlock(thread);
+ splx(x);
+ }
+ break;
+
+ case TASK_VTIMER_RLIM:
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ x = splsched();
+ thread_lock(thread);
+ thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
+ thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
+ thread_unlock(thread);
+ splx(x);
+ }
+ break;
+ }
+
+ task_unlock(task);
+}
+
+void
+task_vtimer_clear(
+ task_t task,
+ integer_t which)
+{
+ assert(task == current_task());
+
+ task_lock(task);
+
+ task->vtimers &= ~which;
+
+ task_unlock(task);
+}
+
+void
+task_vtimer_update(
+__unused
+ task_t task,
+ integer_t which,
+ uint32_t *microsecs)
+{
+ thread_t thread = current_thread();
+ uint32_t tdelt = 0;
+ clock_sec_t secs = 0;
+ uint64_t tsum;
+
+ assert(task == current_task());
+
+ spl_t s = splsched();
+ thread_lock(thread);
+
+ if ((task->vtimers & which) != (uint32_t)which) {
+ thread_unlock(thread);
+ splx(s);
+ return;
+ }
+
+ switch (which) {
+
+ case TASK_VTIMER_USER:
+ if (thread->precise_user_kernel_time) {
+ tdelt = (uint32_t)timer_delta(&thread->user_timer,
+ &thread->vtimer_user_save);
+ } else {
+ tdelt = (uint32_t)timer_delta(&thread->system_timer,
+ &thread->vtimer_user_save);
+ }
+ absolutetime_to_microtime(tdelt, &secs, microsecs);
+ break;
+
+ case TASK_VTIMER_PROF:
+ tsum = timer_grab(&thread->user_timer);
+ tsum += timer_grab(&thread->system_timer);
+ tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
+ absolutetime_to_microtime(tdelt, &secs, microsecs);
+ /* if the time delta is smaller than a usec, ignore */
+ if (*microsecs != 0)
+ thread->vtimer_prof_save = tsum;
+ break;
+
+ case TASK_VTIMER_RLIM:
+ tsum = timer_grab(&thread->user_timer);
+ tsum += timer_grab(&thread->system_timer);
+ tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
+ thread->vtimer_rlim_save = tsum;
+ absolutetime_to_microtime(tdelt, &secs, microsecs);
+ break;
+ }
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * task_assign:
+ *
+ * Change the assigned processor set for the task
+ */
+kern_return_t
+task_assign(
+ __unused task_t task,
+ __unused processor_set_t new_pset,
+ __unused boolean_t assign_threads)
+{
+ return(KERN_FAILURE);
+}
+
+/*
+ * task_assign_default:
+ *
+ * Version of task_assign to assign to default processor set.
+ */
+kern_return_t
+task_assign_default(
+ task_t task,
+ boolean_t assign_threads)
+{
+ return (task_assign(task, &pset0, assign_threads));
+}
+
+/*
+ * task_get_assignment
+ *
+ * Return name of processor set that task is assigned to.
+ */
+kern_return_t
+task_get_assignment(
+ task_t task,
+ processor_set_t *pset)
+{
+ if (!task || !task->active)
+ return KERN_FAILURE;
+
+ *pset = &pset0;
+
+ return KERN_SUCCESS;
+}
+
+uint64_t
+get_task_dispatchqueue_offset(
+ task_t task)
+{
+ return task->dispatchqueue_offset;
+}
+
+/*
+ * task_policy
+ *
+ * Set scheduling policy and parameters, both base and limit, for
+ * the given task. Policy must be a policy which is enabled for the
+ * processor set. Change contained threads if requested.
+ */
+kern_return_t
+task_policy(
+ __unused task_t task,
+ __unused policy_t policy_id,
+ __unused policy_base_t base,
+ __unused mach_msg_type_number_t count,
+ __unused boolean_t set_limit,
+ __unused boolean_t change)
+{
+ return(KERN_FAILURE);
+}
+
+/*
+ * task_set_policy
+ *
+ * Set scheduling policy and parameters, both base and limit, for
+ * the given task. Policy can be any policy implemented by the
+ * processor set, whether enabled or not. Change contained threads
+ * if requested.
+ */
+kern_return_t
+task_set_policy(
+ __unused task_t task,
+ __unused processor_set_t pset,
+ __unused policy_t policy_id,
+ __unused policy_base_t base,
+ __unused mach_msg_type_number_t base_count,
+ __unused policy_limit_t limit,
+ __unused mach_msg_type_number_t limit_count,
+ __unused boolean_t change)
+{
+ return(KERN_FAILURE);
+}
+
+kern_return_t
+task_set_ras_pc(
+ __unused task_t task,
+ __unused vm_offset_t pc,
+ __unused vm_offset_t endpc)
+{
+ return KERN_FAILURE;
+}
+
+void
+task_synchronizer_destroy_all(task_t task)
+{
+ /*
+ * Destroy owned semaphores
+ */
+ semaphore_destroy_all(task);
+}
+
+/*
+ * Install default (machine-dependent) initial thread state
+ * on the task. Subsequent thread creation will have this initial
+ * state set on the thread by machine_thread_inherit_taskwide().
+ * Flavors and structures are exactly the same as those to thread_set_state()
+ */
+kern_return_t
+task_set_state(
+ task_t task,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t state_count)
+{
+ kern_return_t ret;
+
+ if (task == TASK_NULL) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ task_lock(task);
+
+ if (!task->active) {
+ task_unlock(task);
+ return (KERN_FAILURE);
+ }
+
+ ret = machine_task_set_state(task, flavor, state, state_count);
+
+ task_unlock(task);
+ return ret;
+}
+
+/*
+ * Examine the default (machine-dependent) initial thread state
+ * on the task, as set by task_set_state(). Flavors and structures
+ * are exactly the same as those passed to thread_get_state().
+ */
+kern_return_t
+task_get_state(
+ task_t task,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t *state_count)
+{
+ kern_return_t ret;
+
+ if (task == TASK_NULL) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ task_lock(task);
+
+ if (!task->active) {
+ task_unlock(task);
+ return (KERN_FAILURE);
+ }
+
+ ret = machine_task_get_state(task, flavor, state, state_count);
+
+ task_unlock(task);
+ return ret;
+}
+
+
+static kern_return_t __attribute__((noinline,not_tail_called))
+PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(
+ mach_exception_code_t code,
+ mach_exception_subcode_t subcode,
+ void *reason)
+{
+#ifdef MACH_BSD
+ if (1 == proc_selfpid())
+ return KERN_NOT_SUPPORTED; // initproc is immune
+#endif
+ mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
+ [0] = code,
+ [1] = subcode,
+ };
+ task_t task = current_task();
+ kern_return_t kr;
+
+ /* (See jetsam-related comments below) */
+
+ proc_memstat_terminated(task->bsd_info, TRUE);
+ kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason);
+ proc_memstat_terminated(task->bsd_info, FALSE);
+ return kr;
+}
+
+kern_return_t
+task_violated_guard(
+ mach_exception_code_t code,
+ mach_exception_subcode_t subcode,
+ void *reason)
+{
+ return PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(code, subcode, reason);
+}
+
+
+#if CONFIG_MEMORYSTATUS
+
+boolean_t
+task_get_memlimit_is_active(task_t task)
+{
+ assert (task != NULL);
+
+ if (task->memlimit_is_active == 1) {
+ return(TRUE);
+ } else {
+ return (FALSE);
+ }
+}
+
+void
+task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
+{
+ assert (task != NULL);
+
+ if (memlimit_is_active) {
+ task->memlimit_is_active = 1;
+ } else {
+ task->memlimit_is_active = 0;
+ }
+}
+
+boolean_t
+task_get_memlimit_is_fatal(task_t task)
+{
+ assert(task != NULL);
+
+ if (task->memlimit_is_fatal == 1) {
+ return(TRUE);
+ } else {
+ return(FALSE);
+ }
+}
+
+void
+task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
+{
+ assert (task != NULL);
+
+ if (memlimit_is_fatal) {
+ task->memlimit_is_fatal = 1;
+ } else {
+ task->memlimit_is_fatal = 0;
+ }
+}
+
+boolean_t
+task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
+{
+ boolean_t triggered = FALSE;
+
+ assert(task == current_task());
+
+ /*
+ * Returns true, if task has already triggered an exc_resource exception.
+ */
+
+ if (memlimit_is_active) {
+ triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
+ } else {
+ triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
+ }
+
+ return(triggered);
+}
+
+void
+task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
+{
+ assert(task == current_task());
+
+ /*
+ * We allow one exc_resource per process per active/inactive limit.
+ * The limit's fatal attribute does not come into play.
+ */
+
+ if (memlimit_is_active) {
+ task->memlimit_active_exc_resource = 1;
+ } else {
+ task->memlimit_inactive_exc_resource = 1;
+ }
+}
+
+#define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
+
+void __attribute__((noinline))
+PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal)
+{
+ task_t task = current_task();
+ int pid = 0;
+ const char *procname = "unknown";
+ mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
+ boolean_t send_sync_exc_resource = FALSE;
+
+#ifdef MACH_BSD
+ pid = proc_selfpid();
+
+ if (pid == 1) {
+ /*
+ * Cannot have ReportCrash analyzing
+ * a suspended initproc.
+ */
+ return;
+ }
+
+ if (task->bsd_info != NULL) {
+ procname = proc_name_address(current_task()->bsd_info);
+ send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(current_task()->bsd_info);
+ }
+#endif
+#if CONFIG_COREDUMP
+ if (hwm_user_cores) {
+ int error;
+ uint64_t starttime, end;
+ clock_sec_t secs = 0;
+ uint32_t microsecs = 0;
+
+ starttime = mach_absolute_time();
+ /*
+ * Trigger a coredump of this process. Don't proceed unless we know we won't
+ * be filling up the disk; and ignore the core size resource limit for this
+ * core file.
+ */
+ if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
+ printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
+ }
+ /*
+ * coredump() leaves the task suspended.
+ */
+ task_resume_internal(current_task());
+
+ end = mach_absolute_time();
+ absolutetime_to_microtime(end - starttime, &secs, µsecs);
+ printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
+ proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs);
+ }
+#endif /* CONFIG_COREDUMP */
+
+ if (disable_exc_resource) {
+ printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
+ "supressed by a boot-arg.\n", procname, pid, max_footprint_mb);
+ return;
+ }
+
+ /*
+ * A task that has triggered an EXC_RESOURCE, should not be
+ * jetsammed when the device is under memory pressure. Here
+ * we set the P_MEMSTAT_TERMINATED flag so that the process
+ * will be skipped if the memorystatus_thread wakes up.
+ */
+ proc_memstat_terminated(current_task()->bsd_info, TRUE);
+
+ code[0] = code[1] = 0;
+ EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
+ EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK);
+ EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
+
+ /*
+ * Do not generate a corpse fork if the violation is a fatal one
+ * or the process wants synchronous EXC_RESOURCE exceptions.
+ */
+ if (is_fatal || send_sync_exc_resource || exc_via_corpse_forking == 0) {
+ /* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
+ if (send_sync_exc_resource || corpse_for_fatal_memkill == 0) {
+ /*
+ * Use the _internal_ variant so that no user-space
+ * process can resume our task from under us.
+ */
+ task_suspend_internal(task);
+ exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
+ task_resume_internal(task);
+ }
+ } else {
+ if (audio_active) {
+ printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
+ "supressed due to audio playback.\n", procname, pid, max_footprint_mb);
+ } else {
+ task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
+ code, EXCEPTION_CODE_MAX, NULL);
+ }
+ }
+
+ /*
+ * After the EXC_RESOURCE has been handled, we must clear the
+ * P_MEMSTAT_TERMINATED flag so that the process can again be
+ * considered for jetsam if the memorystatus_thread wakes up.
+ */
+ proc_memstat_terminated(current_task()->bsd_info, FALSE); /* clear the flag */
+}
+
+/*
+ * Callback invoked when a task exceeds its physical footprint limit.
+ */
+void
+task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
+{
+ ledger_amount_t max_footprint, max_footprint_mb;
+ task_t task;
+ boolean_t is_warning;
+ boolean_t memlimit_is_active;
+ boolean_t memlimit_is_fatal;
+
+ if (warning == LEDGER_WARNING_DIPPED_BELOW) {
+ /*
+ * Task memory limits only provide a warning on the way up.
+ */
+ return;
+ } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
+ /*
+ * This task is in danger of violating a memory limit,
+ * It has exceeded a percentage level of the limit.
+ */
+ is_warning = TRUE;
+ } else {
+ /*
+ * The task has exceeded the physical footprint limit.
+ * This is not a warning but a true limit violation.
+ */
+ is_warning = FALSE;
+ }
+
+ task = current_task();
+
+ ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
+ max_footprint_mb = max_footprint >> 20;
+
+ memlimit_is_active = task_get_memlimit_is_active(task);
+ memlimit_is_fatal = task_get_memlimit_is_fatal(task);
+
+ /*
+ * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
+ * We only generate the exception once per process per memlimit (active/inactive limit).
+ * To enforce this, we monitor state based on the memlimit's active/inactive attribute
+ * and we disable it by marking that memlimit as exception triggered.
+ */
+ if ((is_warning == FALSE) && (!task_has_triggered_exc_resource(task, memlimit_is_active))) {
+ PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, memlimit_is_fatal);
+ memorystatus_log_exception((int)max_footprint_mb, memlimit_is_active, memlimit_is_fatal);
+ task_mark_has_triggered_exc_resource(task, memlimit_is_active);
+ }
+
+ memorystatus_on_ledger_footprint_exceeded(is_warning, memlimit_is_active, memlimit_is_fatal);
+}
+
+extern int proc_check_footprint_priv(void);
+
+kern_return_t
+task_set_phys_footprint_limit(
+ task_t task,
+ int new_limit_mb,
+ int *old_limit_mb)
+{
+ kern_return_t error;
+
+ boolean_t memlimit_is_active;
+ boolean_t memlimit_is_fatal;
+
+ if ((error = proc_check_footprint_priv())) {
+ return (KERN_NO_ACCESS);
+ }
+
+ /*
+ * This call should probably be obsoleted.
+ * But for now, we default to current state.
+ */
+ memlimit_is_active = task_get_memlimit_is_active(task);
+ memlimit_is_fatal = task_get_memlimit_is_fatal(task);
+
+ return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
+}
+
+kern_return_t
+task_convert_phys_footprint_limit(
+ int limit_mb,
+ int *converted_limit_mb)
+{
+ if (limit_mb == -1) {
+ /*
+ * No limit
+ */
+ if (max_task_footprint != 0) {
+ *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
+ } else {
+ *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
+ }
+ } else {
+ /* nothing to convert */
+ *converted_limit_mb = limit_mb;
+ }
+ return (KERN_SUCCESS);
+}
+
+
+kern_return_t
+task_set_phys_footprint_limit_internal(
+ task_t task,
+ int new_limit_mb,
+ int *old_limit_mb,
+ boolean_t memlimit_is_active,
+ boolean_t memlimit_is_fatal)
+{
+ ledger_amount_t old;
+
+ ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
+
+ /*
+ * Check that limit >> 20 will not give an "unexpected" 32-bit
+ * result. There are, however, implicit assumptions that -1 mb limit
+ * equates to LEDGER_LIMIT_INFINITY.
+ */
+ assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
+
+ if (old_limit_mb) {
+ *old_limit_mb = (int)(old >> 20);
+ }
+
+ if (new_limit_mb == -1) {
+ /*
+ * Caller wishes to remove the limit.
+ */
+ ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
+ max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
+ max_task_footprint ? max_task_footprint_warning_level : 0);
+
+ task_lock(task);
+ task_set_memlimit_is_active(task, memlimit_is_active);
+ task_set_memlimit_is_fatal(task, memlimit_is_fatal);
+ task_unlock(task);
+
+ return (KERN_SUCCESS);
+ }
+
+#ifdef CONFIG_NOMONITORS
+ return (KERN_SUCCESS);
+#endif /* CONFIG_NOMONITORS */
+
+ task_lock(task);
+
+ if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
+ (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
+ (((ledger_amount_t)new_limit_mb << 20) == old)) {
+ /*
+ * memlimit state is not changing
+ */
+ task_unlock(task);
+ return(KERN_SUCCESS);
+ }
+
+ task_set_memlimit_is_active(task, memlimit_is_active);
+ task_set_memlimit_is_fatal(task, memlimit_is_fatal);
+
+ ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
+ (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
+
+ if (task == current_task()) {
+ ledger_check_new_balance(current_thread(), task->ledger,
+ task_ledgers.phys_footprint);
+ }
+
+ task_unlock(task);
+
+ return (KERN_SUCCESS);
+}
+
+kern_return_t
+task_get_phys_footprint_limit(
+ task_t task,
+ int *limit_mb)
+{
+ ledger_amount_t limit;
+
+ ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
+ /*
+ * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
+ * result. There are, however, implicit assumptions that -1 mb limit
+ * equates to LEDGER_LIMIT_INFINITY.
+ */
+ assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
+ *limit_mb = (int)(limit >> 20);
+
+ return (KERN_SUCCESS);
+}
+#else /* CONFIG_MEMORYSTATUS */
+kern_return_t
+task_set_phys_footprint_limit(
+ __unused task_t task,
+ __unused int new_limit_mb,
+ __unused int *old_limit_mb)
+{
+ return (KERN_FAILURE);
+}
+
+kern_return_t
+task_get_phys_footprint_limit(
+ __unused task_t task,
+ __unused int *limit_mb)
+{
+ return (KERN_FAILURE);
+}
+#endif /* CONFIG_MEMORYSTATUS */
+
+void
+task_set_thread_limit(task_t task, uint16_t thread_limit)
+{
+ assert(task != kernel_task);
+ if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
+ task_lock(task);
+ task->task_thread_limit = thread_limit;
+ task_unlock(task);
+ }
+}
+
+/*
+ * We need to export some functions to other components that
+ * are currently implemented in macros within the osfmk
+ * component. Just export them as functions of the same name.
+ */
+boolean_t is_kerneltask(task_t t)
+{
+ if (t == kernel_task)
+ return (TRUE);
+
+ return (FALSE);
+}
+
+boolean_t is_corpsetask(task_t t)
+{
+ return (task_is_a_corpse(t));
+}
+
+#undef current_task
+task_t current_task(void);
+task_t current_task(void)
+{
+ return (current_task_fast());
+}
+
+#undef task_reference
+void task_reference(task_t task);
+void
+task_reference(
+ task_t task)
+{
+ if (task != TASK_NULL)
+ task_reference_internal(task);
+}
+
+/* defined in bsd/kern/kern_prot.c */
+extern int get_audit_token_pid(audit_token_t *audit_token);
+
+int task_pid(task_t task)
+{
+ if (task)
+ return get_audit_token_pid(&task->audit_token);
+ return -1;
+}
+
+
+/*
+ * This routine finds a thread in a task by its unique id
+ * Returns a referenced thread or THREAD_NULL if the thread was not found
+ *
+ * TODO: This is super inefficient - it's an O(threads in task) list walk!
+ * We should make a tid hash, or transition all tid clients to thread ports
+ *
+ * Precondition: No locks held (will take task lock)
+ */
+thread_t
+task_findtid(task_t task, uint64_t tid)
+{
+ thread_t self = current_thread();
+ thread_t found_thread = THREAD_NULL;
+ thread_t iter_thread = THREAD_NULL;
+
+ /* Short-circuit the lookup if we're looking up ourselves */
+ if (tid == self->thread_id || tid == TID_NULL) {
+ assert(self->task == task);
+
+ thread_reference(self);
+
+ return self;
+ }
+
+ task_lock(task);
+
+ queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
+ if (iter_thread->thread_id == tid) {
+ found_thread = iter_thread;
+ thread_reference(found_thread);
+ break;
+ }
+ }
+
+ task_unlock(task);
+
+ return (found_thread);
+}
+
+int pid_from_task(task_t task)
+{
+ int pid = -1;
+
+ if (task->bsd_info) {
+ pid = proc_pid(task->bsd_info);
+ } else {
+ pid = task_pid(task);
+ }
+
+ return pid;
+}
+
+/*
+ * Control the CPU usage monitor for a task.
+ */
+kern_return_t
+task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
+{
+ int error = KERN_SUCCESS;
+
+ if (*flags & CPUMON_MAKE_FATAL) {
+ task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
+ } else {
+ error = KERN_INVALID_ARGUMENT;
+ }
+
+ return error;
+}
+
+/*
+ * Control the wakeups monitor for a task.
+ */
+kern_return_t
+task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
+{
+ ledger_t ledger = task->ledger;
+
+ task_lock(task);
+ if (*flags & WAKEMON_GET_PARAMS) {
+ ledger_amount_t limit;
+ uint64_t period;
+
+ ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
+ ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
+
+ if (limit != LEDGER_LIMIT_INFINITY) {
+ /*
+ * An active limit means the wakeups monitor is enabled.
+ */
+ *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
+ *flags = WAKEMON_ENABLE;
+ if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
+ *flags |= WAKEMON_MAKE_FATAL;
+ }
+ } else {
+ *flags = WAKEMON_DISABLE;
+ *rate_hz = -1;
+ }
+
+ /*
+ * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
+ */
+ task_unlock(task);
+ return KERN_SUCCESS;
+ }
+
+ if (*flags & WAKEMON_ENABLE) {
+ if (*flags & WAKEMON_SET_DEFAULTS) {
+ *rate_hz = task_wakeups_monitor_rate;
+ }
+
+#ifndef CONFIG_NOMONITORS
+ if (*flags & WAKEMON_MAKE_FATAL) {
+ task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
+ }
+#endif /* CONFIG_NOMONITORS */
+
+ if (*rate_hz <= 0) {
+ task_unlock(task);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+#ifndef CONFIG_NOMONITORS
+ ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
+ task_wakeups_monitor_ustackshots_trigger_pct);
+ ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
+ ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
+#endif /* CONFIG_NOMONITORS */
+ } else if (*flags & WAKEMON_DISABLE) {
+ /*
+ * Caller wishes to disable wakeups monitor on the task.
+ *
+ * Disable telemetry if it was triggered by the wakeups monitor, and
+ * remove the limit & callback on the wakeups ledger entry.
+ */
+#if CONFIG_TELEMETRY
+ telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
+#endif
+ ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
+ ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
+ }
+
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+void
+task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
+{
+ if (warning == LEDGER_WARNING_ROSE_ABOVE) {
+#if CONFIG_TELEMETRY
+ /*
+ * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
+ * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
+ */
+ telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
+#endif
+ return;
+ }
+
+#if CONFIG_TELEMETRY
+ /*
+ * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
+ * exceeded the limit, turn telemetry off for the task.
+ */
+ telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
+#endif
+
+ if (warning == 0) {
+ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
+ }
+}
+
+void __attribute__((noinline))
+SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
+{
+ task_t task = current_task();
+ int pid = 0;
+ const char *procname = "unknown";
+ boolean_t fatal;
+ kern_return_t kr;
+#ifdef EXC_RESOURCE_MONITORS
+ mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
+#endif /* EXC_RESOURCE_MONITORS */
+ struct ledger_entry_info lei;
+
+#ifdef MACH_BSD
+ pid = proc_selfpid();
+ if (task->bsd_info != NULL)
+ procname = proc_name_address(current_task()->bsd_info);
+#endif
+
+ ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
+
+ /*
+ * Disable the exception notification so we don't overwhelm
+ * the listener with an endless stream of redundant exceptions.
+ * TODO: detect whether another thread is already reporting the violation.
+ */
+ uint32_t flags = WAKEMON_DISABLE;
+ task_wakeups_monitor_ctl(task, &flags, NULL);
+
+ fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
+ trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
+ os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
+ "over ~%llu seconds, averaging %llu wakes / second and "
+ "violating a %slimit of %llu wakes over %llu seconds.\n",
+ procname, pid,
+ lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
+ lei.lei_last_refill == 0 ? 0 :
+ (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
+ fatal ? "FATAL " : "",
+ lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
+
+ kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
+ fatal ? kRNFatalLimitFlag : 0);
+ if (kr) {
+ printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
+ }
+
+#ifdef EXC_RESOURCE_MONITORS
+ if (disable_exc_resource) {
+ printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
+ "supressed by a boot-arg\n", procname, pid);
+ return;
+ }
+ if (audio_active) {
+ os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
+ "supressed due to audio playback\n", procname, pid);
+ return;
+ }
+ if (lei.lei_last_refill == 0) {
+ os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
+ "supressed due to lei.lei_last_refill = 0 \n", procname, pid);
+ }
+
+ code[0] = code[1] = 0;
+ EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
+ EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
+ EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
+ NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
+ EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
+ lei.lei_last_refill);
+ EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
+ NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
+ exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
+#endif /* EXC_RESOURCE_MONITORS */
+
+ if (fatal) {
+ task_terminate_internal(task);
+ }
+}
+
+static boolean_t
+global_update_logical_writes(int64_t io_delta)
+{
+ int64_t old_count, new_count;
+ boolean_t needs_telemetry;
+
+ do {
+ new_count = old_count = global_logical_writes_count;
+ new_count += io_delta;
+ if (new_count >= io_telemetry_limit) {
+ new_count = 0;
+ needs_telemetry = TRUE;
+ } else {
+ needs_telemetry = FALSE;
+ }
+ } while(!OSCompareAndSwap64(old_count, new_count, &global_logical_writes_count));
+ return needs_telemetry;
+}
+
+void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
+{
+ int64_t io_delta = 0;
+ boolean_t needs_telemetry = FALSE;
+
+ if ((!task) || (!io_size) || (!vp))
+ return;
+
+ KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
+ task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
+ DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
+ switch(flags) {
+ case TASK_WRITE_IMMEDIATE:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_immediate_writes));
+ ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ case TASK_WRITE_DEFERRED:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_deferred_writes));
+ ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ case TASK_WRITE_INVALIDATED:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_invalidated_writes));
+ ledger_debit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ case TASK_WRITE_METADATA:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_metadata_writes));
+ ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ }
+
+ io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
+ if (io_telemetry_limit != 0) {
+ /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
+ needs_telemetry = global_update_logical_writes(io_delta);
+ if (needs_telemetry) {
+ act_set_io_telemetry_ast(current_thread());
+ }
+ }
+}
+
+/*
+ * Control the I/O monitor for a task.
+ */
+kern_return_t
+task_io_monitor_ctl(task_t task, uint32_t *flags)
+{
+ ledger_t ledger = task->ledger;
+
+ task_lock(task);
+ if (*flags & IOMON_ENABLE) {
+ /* Configure the physical I/O ledger */
+ ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
+ ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
+
+ /* Configure the logical I/O ledger */
+ ledger_set_limit(ledger, task_ledgers.logical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
+ ledger_set_period(ledger, task_ledgers.logical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
+
+ } else if (*flags & IOMON_DISABLE) {
+ /*
+ * Caller wishes to disable I/O monitor on the task.
+ */
+ ledger_disable_refill(ledger, task_ledgers.physical_writes);
+ ledger_disable_callback(ledger, task_ledgers.physical_writes);
+ ledger_disable_refill(ledger, task_ledgers.logical_writes);
+ ledger_disable_callback(ledger, task_ledgers.logical_writes);
+ }
+
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+void
+task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
+{
+ if (warning == 0) {
+ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
+ }
+}
+
+void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
+{
+ int pid = 0;
+ task_t task = current_task();
+#ifdef EXC_RESOURCE_MONITORS
+ mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
+#endif /* EXC_RESOURCE_MONITORS */
+ struct ledger_entry_info lei;
+ kern_return_t kr;
+
+#ifdef MACH_BSD
+ pid = proc_selfpid();
+#endif
+ /*
+ * Get the ledger entry info. We need to do this before disabling the exception
+ * to get correct values for all fields.
+ */
+ switch(flavor) {
+ case FLAVOR_IO_PHYSICAL_WRITES:
+ ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
+ break;
+ case FLAVOR_IO_LOGICAL_WRITES:
+ ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
+ break;
+ }