+ events_info->csw = task->c_switch;
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ events_info->csw += thread->c_switch;
+ events_info->syscalls_mach += thread->syscalls_mach;
+ events_info->syscalls_unix += thread->syscalls_unix;
+ }
+
+
+ *task_info_count = TASK_EVENTS_INFO_COUNT;
+ break;
+ }
+ case TASK_AFFINITY_TAG_INFO:
+ {
+ if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ error = task_affinity_info(task, task_info_out, task_info_count);
+ break;
+ }
+ case TASK_POWER_INFO:
+ {
+ if (*task_info_count < TASK_POWER_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL);
+ break;
+ }
+
+ case TASK_POWER_INFO_V2:
+ {
+ if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+ task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
+ task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2);
+ break;
+ }
+
+ case TASK_VM_INFO:
+ case TASK_VM_INFO_PURGEABLE:
+ {
+ task_vm_info_t vm_info;
+ vm_map_t map;
+
+ if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ vm_info = (task_vm_info_t)task_info_out;
+
+ if (task == kernel_task) {
+ map = kernel_map;
+ /* no lock */
+ } else {
+ map = task->map;
+ vm_map_lock_read(map);
+ }
+
+ vm_info->virtual_size = (typeof(vm_info->virtual_size))map->size;
+ vm_info->region_count = map->hdr.nentries;
+ vm_info->page_size = vm_map_page_size(map);
+
+ vm_info->resident_size = pmap_resident_count(map->pmap);
+ vm_info->resident_size *= PAGE_SIZE;
+ vm_info->resident_size_peak = pmap_resident_max(map->pmap);
+ vm_info->resident_size_peak *= PAGE_SIZE;
+
+#define _VM_INFO(_name) \
+ vm_info->_name = ((mach_vm_size_t) map->pmap->stats._name) * PAGE_SIZE
+
+ _VM_INFO(device);
+ _VM_INFO(device_peak);
+ _VM_INFO(external);
+ _VM_INFO(external_peak);
+ _VM_INFO(internal);
+ _VM_INFO(internal_peak);
+ _VM_INFO(reusable);
+ _VM_INFO(reusable_peak);
+ _VM_INFO(compressed);
+ _VM_INFO(compressed_peak);
+ _VM_INFO(compressed_lifetime);
+
+ vm_info->purgeable_volatile_pmap = 0;
+ vm_info->purgeable_volatile_resident = 0;
+ vm_info->purgeable_volatile_virtual = 0;
+ if (task == kernel_task) {
+ /*
+ * We do not maintain the detailed stats for the
+ * kernel_pmap, so just count everything as
+ * "internal"...
+ */
+ vm_info->internal = vm_info->resident_size;
+ /*
+ * ... but since the memory held by the VM compressor
+ * in the kernel address space ought to be attributed
+ * to user-space tasks, we subtract it from "internal"
+ * to give memory reporting tools a more accurate idea
+ * of what the kernel itself is actually using, instead
+ * of making it look like the kernel is leaking memory
+ * when the system is under memory pressure.
+ */
+ vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
+ PAGE_SIZE);
+ } else {
+ mach_vm_size_t volatile_virtual_size;
+ mach_vm_size_t volatile_resident_size;
+ mach_vm_size_t volatile_compressed_size;
+ mach_vm_size_t volatile_pmap_size;
+ mach_vm_size_t volatile_compressed_pmap_size;
+ kern_return_t kr;
+
+ if (flavor == TASK_VM_INFO_PURGEABLE) {
+ kr = vm_map_query_volatile(
+ map,
+ &volatile_virtual_size,
+ &volatile_resident_size,
+ &volatile_compressed_size,
+ &volatile_pmap_size,
+ &volatile_compressed_pmap_size);
+ if (kr == KERN_SUCCESS) {
+ vm_info->purgeable_volatile_pmap =
+ volatile_pmap_size;
+ if (radar_20146450) {
+ vm_info->compressed -=
+ volatile_compressed_pmap_size;
+ }
+ vm_info->purgeable_volatile_resident =
+ volatile_resident_size;
+ vm_info->purgeable_volatile_virtual =
+ volatile_virtual_size;
+ }
+ }
+ }
+ *task_info_count = TASK_VM_INFO_REV0_COUNT;
+
+ if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
+ vm_info->phys_footprint =
+ (mach_vm_size_t) get_task_phys_footprint(task);
+ *task_info_count = TASK_VM_INFO_REV1_COUNT;
+ }
+ if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
+ vm_info->min_address = map->min_offset;
+ vm_info->max_address = map->max_offset;
+ *task_info_count = TASK_VM_INFO_REV2_COUNT;
+ }
+
+ if (task != kernel_task) {
+ vm_map_unlock_read(map);
+ }
+
+ break;
+ }
+
+ case TASK_WAIT_STATE_INFO:
+ {
+ /*
+ * Deprecated flavor. Currently allowing some results until all users
+ * stop calling it. The results may not be accurate.
+ */
+ task_wait_state_info_t wait_state_info;
+ uint64_t total_sfi_ledger_val = 0;
+
+ if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ wait_state_info = (task_wait_state_info_t) task_info_out;
+
+ wait_state_info->total_wait_state_time = 0;
+ bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
+
+#if CONFIG_SCHED_SFI
+ int i, prev_lentry = -1;
+ int64_t val_credit, val_debit;
+
+ for (i = 0; i < MAX_SFI_CLASS_ID; i++){
+ val_credit =0;
+ /*
+ * checking with prev_lentry != entry ensures adjacent classes
+ * which share the same ledger do not add wait times twice.
+ * Note: Use ledger() call to get data for each individual sfi class.
+ */
+ if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
+ KERN_SUCCESS == ledger_get_entries(task->ledger,
+ task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
+ total_sfi_ledger_val += val_credit;
+ }
+ prev_lentry = task_ledgers.sfi_wait_times[i];
+ }
+
+#endif /* CONFIG_SCHED_SFI */
+ wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
+ *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
+
+ break;
+ }
+ case TASK_VM_INFO_PURGEABLE_ACCOUNT:
+ {
+#if DEVELOPMENT || DEBUG
+ pvm_account_info_t acnt_info;
+
+ if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ if (task_info_out == NULL) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ acnt_info = (pvm_account_info_t) task_info_out;
+
+ error = vm_purgeable_account(task, acnt_info);
+
+ *task_info_count = PVM_ACCOUNT_INFO_COUNT;
+
+ break;
+#else /* DEVELOPMENT || DEBUG */
+ error = KERN_NOT_SUPPORTED;
+ break;
+#endif /* DEVELOPMENT || DEBUG */
+ }
+ case TASK_FLAGS_INFO:
+ {
+ task_flags_info_t flags_info;
+
+ if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ flags_info = (task_flags_info_t)task_info_out;
+
+ /* only publish the 64-bit flag of the task */
+ flags_info->flags = task->t_flags & TF_64B_ADDR;
+
+ *task_info_count = TASK_FLAGS_INFO_COUNT;
+ break;
+ }
+
+ case TASK_DEBUG_INFO_INTERNAL:
+ {
+#if DEVELOPMENT || DEBUG
+ task_debug_info_internal_t dbg_info;
+ if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
+ error = KERN_NOT_SUPPORTED;
+ break;
+ }
+
+ if (task_info_out == NULL) {
+ error = KERN_INVALID_ARGUMENT;
+ break;
+ }
+ dbg_info = (task_debug_info_internal_t) task_info_out;
+ dbg_info->ipc_space_size = 0;
+ if (task->itk_space){
+ dbg_info->ipc_space_size = task->itk_space->is_table_size;
+ }
+
+ error = KERN_SUCCESS;
+ *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
+ break;
+#else /* DEVELOPMENT || DEBUG */
+ error = KERN_NOT_SUPPORTED;
+ break;
+#endif /* DEVELOPMENT || DEBUG */
+ }
+ default:
+ error = KERN_INVALID_ARGUMENT;
+ }
+
+ task_unlock(task);
+ return (error);
+}
+
+/*
+ * task_info_from_user
+ *
+ * When calling task_info from user space,
+ * this function will be executed as mig server side
+ * instead of calling directly into task_info.
+ * This gives the possibility to perform more security
+ * checks on task_port.
+ *
+ * In the case of TASK_DYLD_INFO, we require the more
+ * privileged task_port not the less-privileged task_name_port.
+ *
+ */
+kern_return_t
+task_info_from_user(
+ mach_port_t task_port,
+ task_flavor_t flavor,
+ task_info_t task_info_out,
+ mach_msg_type_number_t *task_info_count)
+{
+ task_t task;
+ kern_return_t ret;
+
+ if (flavor == TASK_DYLD_INFO)
+ task = convert_port_to_task(task_port);
+ else
+ task = convert_port_to_task_name(task_port);
+
+ ret = task_info(task, flavor, task_info_out, task_info_count);
+
+ task_deallocate(task);
+
+ return ret;
+}
+
+/*
+ * task_power_info
+ *
+ * Returns power stats for the task.
+ * Note: Called with task locked.
+ */
+void
+task_power_info_locked(
+ task_t task,
+ task_power_info_t info,
+ gpu_energy_data_t ginfo,
+ task_power_info_v2_t infov2)
+{
+ thread_t thread;
+ ledger_amount_t tmp;
+
+ task_lock_assert_owned(task);
+
+ ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
+ (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
+ ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
+ (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
+
+ info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
+ info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
+
+ info->total_user = task->total_user_time;
+ info->total_system = task->total_system_time;
+
+#if CONFIG_EMBEDDED
+ if (infov2) {
+ infov2->task_energy = task->task_energy;
+ }
+#endif
+
+ if (ginfo) {
+ ginfo->task_gpu_utilisation = task->task_gpu_ns;
+ }
+
+ if (infov2) {
+ infov2->task_ptime = task->total_ptime;
+ infov2->task_pset_switches = task->ps_switch;
+ }
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ uint64_t tval;
+ spl_t x;
+
+ if (thread->options & TH_OPT_IDLE_THREAD)
+ continue;
+
+ x = splsched();
+ thread_lock(thread);
+
+ info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
+ info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
+
+#if CONFIG_EMBEDDED
+ if (infov2) {
+ infov2->task_energy += ml_energy_stat(thread);
+ }
+#endif
+
+ tval = timer_grab(&thread->user_timer);
+ info->total_user += tval;
+
+ if (infov2) {
+ tval = timer_grab(&thread->ptime);
+ infov2->task_ptime += tval;
+ infov2->task_pset_switches += thread->ps_switch;
+ }
+
+ tval = timer_grab(&thread->system_timer);
+ if (thread->precise_user_kernel_time) {
+ info->total_system += tval;
+ } else {
+ /* system_timer may represent either sys or user */
+ info->total_user += tval;
+ }
+
+ if (ginfo) {
+ ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
+ }
+ thread_unlock(thread);
+ splx(x);
+ }
+}
+
+/*
+ * task_gpu_utilisation
+ *
+ * Returns the total gpu time used by the all the threads of the task
+ * (both dead and alive)
+ */
+uint64_t
+task_gpu_utilisation(
+ task_t task)
+{
+ uint64_t gpu_time = 0;
+#if !CONFIG_EMBEDDED
+ thread_t thread;
+
+ task_lock(task);
+ gpu_time += task->task_gpu_ns;
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ spl_t x;
+ x = splsched();
+ thread_lock(thread);
+ gpu_time += ml_gpu_stat(thread);
+ thread_unlock(thread);
+ splx(x);
+ }
+
+ task_unlock(task);
+#else /* CONFIG_EMBEDDED */
+ /* silence compiler warning */
+ (void)task;
+#endif /* !CONFIG_EMBEDDED */
+ return gpu_time;
+}
+
+/*
+ * task_energy
+ *
+ * Returns the total energy used by the all the threads of the task
+ * (both dead and alive)
+ */
+uint64_t
+task_energy(
+ task_t task)
+{
+ uint64_t energy = 0;
+ thread_t thread;
+
+ task_lock(task);
+ energy += task->task_energy;
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ spl_t x;
+ x = splsched();
+ thread_lock(thread);
+ energy += ml_energy_stat(thread);
+ thread_unlock(thread);
+ splx(x);
+ }
+
+ task_unlock(task);
+ return energy;
+}
+
+
+uint64_t
+task_cpu_ptime(
+ __unused task_t task)
+{
+ return 0;
+}
+
+
+kern_return_t
+task_purgable_info(
+ task_t task,
+ task_purgable_info_t *stats)
+{
+ if (task == TASK_NULL || stats == NULL)
+ return KERN_INVALID_ARGUMENT;
+ /* Take task reference */
+ task_reference(task);
+ vm_purgeable_stats((vm_purgeable_info_t)stats, task);
+ /* Drop task reference */
+ task_deallocate(task);
+ return KERN_SUCCESS;
+}
+
+void
+task_vtimer_set(
+ task_t task,
+ integer_t which)
+{
+ thread_t thread;
+ spl_t x;
+
+ task_lock(task);
+
+ task->vtimers |= which;
+
+ switch (which) {
+
+ case TASK_VTIMER_USER:
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ x = splsched();
+ thread_lock(thread);
+ if (thread->precise_user_kernel_time)
+ thread->vtimer_user_save = timer_grab(&thread->user_timer);
+ else
+ thread->vtimer_user_save = timer_grab(&thread->system_timer);
+ thread_unlock(thread);
+ splx(x);
+ }
+ break;
+
+ case TASK_VTIMER_PROF:
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ x = splsched();
+ thread_lock(thread);
+ thread->vtimer_prof_save = timer_grab(&thread->user_timer);
+ thread->vtimer_prof_save += timer_grab(&thread->system_timer);
+ thread_unlock(thread);
+ splx(x);
+ }
+ break;
+
+ case TASK_VTIMER_RLIM:
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ x = splsched();
+ thread_lock(thread);
+ thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
+ thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
+ thread_unlock(thread);
+ splx(x);
+ }
+ break;
+ }
+
+ task_unlock(task);
+}
+
+void
+task_vtimer_clear(
+ task_t task,
+ integer_t which)
+{
+ assert(task == current_task());
+
+ task_lock(task);
+
+ task->vtimers &= ~which;
+
+ task_unlock(task);
+}
+
+void
+task_vtimer_update(
+__unused
+ task_t task,
+ integer_t which,
+ uint32_t *microsecs)
+{
+ thread_t thread = current_thread();
+ uint32_t tdelt = 0;
+ clock_sec_t secs = 0;
+ uint64_t tsum;
+
+ assert(task == current_task());
+
+ spl_t s = splsched();
+ thread_lock(thread);
+
+ if ((task->vtimers & which) != (uint32_t)which) {
+ thread_unlock(thread);
+ splx(s);
+ return;
+ }
+
+ switch (which) {
+
+ case TASK_VTIMER_USER:
+ if (thread->precise_user_kernel_time) {
+ tdelt = (uint32_t)timer_delta(&thread->user_timer,
+ &thread->vtimer_user_save);
+ } else {
+ tdelt = (uint32_t)timer_delta(&thread->system_timer,
+ &thread->vtimer_user_save);
+ }
+ absolutetime_to_microtime(tdelt, &secs, microsecs);
+ break;
+
+ case TASK_VTIMER_PROF:
+ tsum = timer_grab(&thread->user_timer);
+ tsum += timer_grab(&thread->system_timer);
+ tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
+ absolutetime_to_microtime(tdelt, &secs, microsecs);
+ /* if the time delta is smaller than a usec, ignore */
+ if (*microsecs != 0)
+ thread->vtimer_prof_save = tsum;
+ break;
+
+ case TASK_VTIMER_RLIM:
+ tsum = timer_grab(&thread->user_timer);
+ tsum += timer_grab(&thread->system_timer);
+ tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
+ thread->vtimer_rlim_save = tsum;
+ absolutetime_to_microtime(tdelt, &secs, microsecs);
+ break;
+ }
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * task_assign:
+ *
+ * Change the assigned processor set for the task
+ */
+kern_return_t
+task_assign(
+ __unused task_t task,
+ __unused processor_set_t new_pset,
+ __unused boolean_t assign_threads)
+{
+ return(KERN_FAILURE);
+}
+
+/*
+ * task_assign_default:
+ *
+ * Version of task_assign to assign to default processor set.
+ */
+kern_return_t
+task_assign_default(
+ task_t task,
+ boolean_t assign_threads)
+{
+ return (task_assign(task, &pset0, assign_threads));
+}
+
+/*
+ * task_get_assignment
+ *
+ * Return name of processor set that task is assigned to.
+ */
+kern_return_t
+task_get_assignment(
+ task_t task,
+ processor_set_t *pset)
+{
+ if (!task || !task->active)
+ return KERN_FAILURE;
+
+ *pset = &pset0;
+
+ return KERN_SUCCESS;
+}
+
+uint64_t
+get_task_dispatchqueue_offset(
+ task_t task)
+{
+ return task->dispatchqueue_offset;
+}
+
+/*
+ * task_policy
+ *
+ * Set scheduling policy and parameters, both base and limit, for
+ * the given task. Policy must be a policy which is enabled for the
+ * processor set. Change contained threads if requested.
+ */
+kern_return_t
+task_policy(
+ __unused task_t task,
+ __unused policy_t policy_id,
+ __unused policy_base_t base,
+ __unused mach_msg_type_number_t count,
+ __unused boolean_t set_limit,
+ __unused boolean_t change)
+{
+ return(KERN_FAILURE);
+}
+
+/*
+ * task_set_policy
+ *
+ * Set scheduling policy and parameters, both base and limit, for
+ * the given task. Policy can be any policy implemented by the
+ * processor set, whether enabled or not. Change contained threads
+ * if requested.
+ */
+kern_return_t
+task_set_policy(
+ __unused task_t task,
+ __unused processor_set_t pset,
+ __unused policy_t policy_id,
+ __unused policy_base_t base,
+ __unused mach_msg_type_number_t base_count,
+ __unused policy_limit_t limit,
+ __unused mach_msg_type_number_t limit_count,
+ __unused boolean_t change)
+{
+ return(KERN_FAILURE);
+}
+
+kern_return_t
+task_set_ras_pc(
+ __unused task_t task,
+ __unused vm_offset_t pc,
+ __unused vm_offset_t endpc)
+{
+ return KERN_FAILURE;
+}
+
+void
+task_synchronizer_destroy_all(task_t task)
+{
+ /*
+ * Destroy owned semaphores
+ */
+ semaphore_destroy_all(task);
+}
+
+/*
+ * Install default (machine-dependent) initial thread state
+ * on the task. Subsequent thread creation will have this initial
+ * state set on the thread by machine_thread_inherit_taskwide().
+ * Flavors and structures are exactly the same as those to thread_set_state()
+ */
+kern_return_t
+task_set_state(
+ task_t task,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t state_count)
+{
+ kern_return_t ret;
+
+ if (task == TASK_NULL) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ task_lock(task);
+
+ if (!task->active) {
+ task_unlock(task);
+ return (KERN_FAILURE);
+ }
+
+ ret = machine_task_set_state(task, flavor, state, state_count);
+
+ task_unlock(task);
+ return ret;
+}
+
+/*
+ * Examine the default (machine-dependent) initial thread state
+ * on the task, as set by task_set_state(). Flavors and structures
+ * are exactly the same as those passed to thread_get_state().
+ */
+kern_return_t
+task_get_state(
+ task_t task,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t *state_count)
+{
+ kern_return_t ret;
+
+ if (task == TASK_NULL) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ task_lock(task);
+
+ if (!task->active) {
+ task_unlock(task);
+ return (KERN_FAILURE);
+ }
+
+ ret = machine_task_get_state(task, flavor, state, state_count);
+
+ task_unlock(task);
+ return ret;
+}
+
+
+static kern_return_t __attribute__((noinline,not_tail_called))
+PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(
+ mach_exception_code_t code,
+ mach_exception_subcode_t subcode,
+ void *reason)
+{
+#ifdef MACH_BSD
+ if (1 == proc_selfpid())
+ return KERN_NOT_SUPPORTED; // initproc is immune
+#endif
+ mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
+ [0] = code,
+ [1] = subcode,
+ };
+ task_t task = current_task();
+ kern_return_t kr;
+
+ /* (See jetsam-related comments below) */
+
+ proc_memstat_terminated(task->bsd_info, TRUE);
+ kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason);
+ proc_memstat_terminated(task->bsd_info, FALSE);
+ return kr;
+}
+
+extern kern_return_t
+task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *);
+
+kern_return_t
+task_violated_guard(
+ mach_exception_code_t code,
+ mach_exception_subcode_t subcode,
+ void *reason)
+{
+ return PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(code, subcode, reason);
+}
+
+
+#if CONFIG_MEMORYSTATUS
+
+boolean_t
+task_get_memlimit_is_active(task_t task)
+{
+ assert (task != NULL);
+
+ if (task->memlimit_is_active == 1) {
+ return(TRUE);
+ } else {
+ return (FALSE);
+ }
+}
+
+void
+task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
+{
+ assert (task != NULL);
+
+ if (memlimit_is_active) {
+ task->memlimit_is_active = 1;
+ } else {
+ task->memlimit_is_active = 0;
+ }
+}
+
+boolean_t
+task_get_memlimit_is_fatal(task_t task)
+{
+ assert(task != NULL);
+
+ if (task->memlimit_is_fatal == 1) {
+ return(TRUE);
+ } else {
+ return(FALSE);
+ }
+}
+
+void
+task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
+{
+ assert (task != NULL);
+
+ if (memlimit_is_fatal) {
+ task->memlimit_is_fatal = 1;
+ } else {
+ task->memlimit_is_fatal = 0;
+ }
+}
+
+boolean_t
+task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
+{
+ boolean_t triggered = FALSE;
+
+ assert(task == current_task());
+
+ /*
+ * Returns true, if task has already triggered an exc_resource exception.
+ */
+
+ if (memlimit_is_active) {
+ triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
+ } else {
+ triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
+ }
+
+ return(triggered);
+}
+
+void
+task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
+{
+ assert(task == current_task());
+
+ /*
+ * We allow one exc_resource per process per active/inactive limit.
+ * The limit's fatal attribute does not come into play.
+ */
+
+ if (memlimit_is_active) {
+ task->memlimit_active_exc_resource = 1;
+ } else {
+ task->memlimit_inactive_exc_resource = 1;
+ }
+}
+
+#define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
+
+void __attribute__((noinline))
+PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal)
+{
+ task_t task = current_task();
+ int pid = 0;
+ const char *procname = "unknown";
+ mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
+
+#ifdef MACH_BSD
+ pid = proc_selfpid();
+
+ if (pid == 1) {
+ /*
+ * Cannot have ReportCrash analyzing
+ * a suspended initproc.
+ */
+ return;
+ }
+
+ if (task->bsd_info != NULL)
+ procname = proc_name_address(current_task()->bsd_info);
+#endif
+#if CONFIG_COREDUMP
+ if (hwm_user_cores) {
+ int error;
+ uint64_t starttime, end;
+ clock_sec_t secs = 0;
+ uint32_t microsecs = 0;
+
+ starttime = mach_absolute_time();
+ /*
+ * Trigger a coredump of this process. Don't proceed unless we know we won't
+ * be filling up the disk; and ignore the core size resource limit for this
+ * core file.
+ */
+ if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
+ printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
+ }
+ /*
+ * coredump() leaves the task suspended.
+ */
+ task_resume_internal(current_task());
+
+ end = mach_absolute_time();
+ absolutetime_to_microtime(end - starttime, &secs, µsecs);
+ printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
+ proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs);
+ }
+#endif /* CONFIG_COREDUMP */
+
+ if (disable_exc_resource) {
+ printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
+ "supressed by a boot-arg.\n", procname, pid, max_footprint_mb);
+ return;
+ }
+
+ /*
+ * A task that has triggered an EXC_RESOURCE, should not be
+ * jetsammed when the device is under memory pressure. Here
+ * we set the P_MEMSTAT_TERMINATED flag so that the process
+ * will be skipped if the memorystatus_thread wakes up.
+ */
+ proc_memstat_terminated(current_task()->bsd_info, TRUE);
+
+ code[0] = code[1] = 0;
+ EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
+ EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK);
+ EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
+
+ /* Do not generate a corpse fork if the violation is a fatal one */
+ if (is_fatal || exc_via_corpse_forking == 0) {
+ /* Do not send a EXC_RESOURCE is corpse_for_fatal_memkill is set */
+ if (corpse_for_fatal_memkill == 0) {
+ /*
+ * Use the _internal_ variant so that no user-space
+ * process can resume our task from under us.
+ */
+ task_suspend_internal(task);
+ exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
+ task_resume_internal(task);
+ }
+ } else {
+ task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
+ code, EXCEPTION_CODE_MAX, NULL);
+ }
+
+ /*
+ * After the EXC_RESOURCE has been handled, we must clear the
+ * P_MEMSTAT_TERMINATED flag so that the process can again be
+ * considered for jetsam if the memorystatus_thread wakes up.
+ */
+ proc_memstat_terminated(current_task()->bsd_info, FALSE); /* clear the flag */
+}