+ ledger_t ledger = task->ledger;
+
+ task_lock(task);
+ if (*flags & WAKEMON_GET_PARAMS) {
+ ledger_amount_t limit;
+ uint64_t period;
+
+ ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
+ ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
+
+ if (limit != LEDGER_LIMIT_INFINITY) {
+ /*
+ * An active limit means the wakeups monitor is enabled.
+ */
+ *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
+ *flags = WAKEMON_ENABLE;
+ if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
+ *flags |= WAKEMON_MAKE_FATAL;
+ }
+ } else {
+ *flags = WAKEMON_DISABLE;
+ *rate_hz = -1;
+ }
+
+ /*
+ * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
+ */
+ task_unlock(task);
+ return KERN_SUCCESS;
+ }
+
+ if (*flags & WAKEMON_ENABLE) {
+ if (*flags & WAKEMON_SET_DEFAULTS) {
+ *rate_hz = task_wakeups_monitor_rate;
+ }
+
+#ifndef CONFIG_NOMONITORS
+ if (*flags & WAKEMON_MAKE_FATAL) {
+ task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
+ }
+#endif /* CONFIG_NOMONITORS */
+
+ if (*rate_hz <= 0) {
+ task_unlock(task);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+#ifndef CONFIG_NOMONITORS
+ ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
+ task_wakeups_monitor_ustackshots_trigger_pct);
+ ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
+ ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
+#endif /* CONFIG_NOMONITORS */
+ } else if (*flags & WAKEMON_DISABLE) {
+ /*
+ * Caller wishes to disable wakeups monitor on the task.
+ *
+ * Disable telemetry if it was triggered by the wakeups monitor, and
+ * remove the limit & callback on the wakeups ledger entry.
+ */
+#if CONFIG_TELEMETRY
+ telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
+#endif
+ ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
+ ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
+ }
+
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+void
+task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
+{
+ if (warning == LEDGER_WARNING_ROSE_ABOVE) {
+#if CONFIG_TELEMETRY
+ /*
+ * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
+ * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
+ */
+ telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
+#endif
+ return;
+ }
+
+#if CONFIG_TELEMETRY
+ /*
+ * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
+ * exceeded the limit, turn telemetry off for the task.
+ */
+ telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
+#endif
+
+ if (warning == 0) {
+ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
+ }
+}
+
+void __attribute__((noinline))
+SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
+{
+ task_t task = current_task();
+ int pid = 0;
+ const char *procname = "unknown";
+ boolean_t fatal;
+ kern_return_t kr;
+#ifdef EXC_RESOURCE_MONITORS
+ mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
+#endif /* EXC_RESOURCE_MONITORS */
+ struct ledger_entry_info lei;
+
+#ifdef MACH_BSD
+ pid = proc_selfpid();
+ if (task->bsd_info != NULL)
+ procname = proc_name_address(current_task()->bsd_info);
+#endif
+
+ ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
+
+ /*
+ * Disable the exception notification so we don't overwhelm
+ * the listener with an endless stream of redundant exceptions.
+ * TODO: detect whether another thread is already reporting the violation.
+ */
+ uint32_t flags = WAKEMON_DISABLE;
+ task_wakeups_monitor_ctl(task, &flags, NULL);
+
+ fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
+ trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
+ os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
+ "over ~%llu seconds, averaging %llu wakes / second and "
+ "violating a %slimit of %llu wakes over %llu seconds.\n",
+ procname, pid,
+ lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
+ lei.lei_last_refill == 0 ? 0 :
+ (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
+ fatal ? "FATAL " : "",
+ lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
+
+ kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
+ fatal ? kRNFatalLimitFlag : 0);
+ if (kr) {
+ printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
+ }
+
+#ifdef EXC_RESOURCE_MONITORS
+ if (disable_exc_resource) {
+ printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
+ "supressed by a boot-arg\n", procname, pid);
+ return;
+ }
+ if (audio_active) {
+ os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
+ "supressed due to audio playback\n", procname, pid);
+ return;
+ }
+ if (lei.lei_last_refill == 0) {
+ os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
+ "supressed due to lei.lei_last_refill = 0 \n", procname, pid);
+ }
+
+ code[0] = code[1] = 0;
+ EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
+ EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
+ EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
+ NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
+ EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
+ lei.lei_last_refill);
+ EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
+ NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
+ exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
+#endif /* EXC_RESOURCE_MONITORS */
+
+ if (fatal) {
+ task_terminate_internal(task);
+ }
+}
+
+static boolean_t
+global_update_logical_writes(int64_t io_delta)
+{
+ int64_t old_count, new_count;
+ boolean_t needs_telemetry;
+
+ do {
+ new_count = old_count = global_logical_writes_count;
+ new_count += io_delta;
+ if (new_count >= io_telemetry_limit) {
+ new_count = 0;
+ needs_telemetry = TRUE;
+ } else {
+ needs_telemetry = FALSE;
+ }
+ } while(!OSCompareAndSwap64(old_count, new_count, &global_logical_writes_count));
+ return needs_telemetry;
+}
+
+void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
+{
+ int64_t io_delta = 0;
+ boolean_t needs_telemetry = FALSE;
+
+ if ((!task) || (!io_size) || (!vp))
+ return;
+
+ KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
+ task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
+ DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
+ switch(flags) {
+ case TASK_WRITE_IMMEDIATE:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_immediate_writes));
+ ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ case TASK_WRITE_DEFERRED:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_deferred_writes));
+ ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ case TASK_WRITE_INVALIDATED:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_invalidated_writes));
+ ledger_debit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ case TASK_WRITE_METADATA:
+ OSAddAtomic64(io_size, (SInt64 *)&(task->task_metadata_writes));
+ ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
+ break;
+ }
+
+ io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
+ if (io_telemetry_limit != 0) {
+ /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
+ needs_telemetry = global_update_logical_writes(io_delta);
+ if (needs_telemetry) {
+ act_set_io_telemetry_ast(current_thread());
+ }
+ }
+}
+
+/*
+ * Control the I/O monitor for a task.
+ */
+kern_return_t
+task_io_monitor_ctl(task_t task, uint32_t *flags)
+{
+ ledger_t ledger = task->ledger;
+
+ task_lock(task);
+ if (*flags & IOMON_ENABLE) {
+ /* Configure the physical I/O ledger */
+ ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
+ ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
+
+ /* Configure the logical I/O ledger */
+ ledger_set_limit(ledger, task_ledgers.logical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
+ ledger_set_period(ledger, task_ledgers.logical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
+
+ } else if (*flags & IOMON_DISABLE) {
+ /*
+ * Caller wishes to disable I/O monitor on the task.
+ */
+ ledger_disable_refill(ledger, task_ledgers.physical_writes);
+ ledger_disable_callback(ledger, task_ledgers.physical_writes);
+ ledger_disable_refill(ledger, task_ledgers.logical_writes);
+ ledger_disable_callback(ledger, task_ledgers.logical_writes);
+ }
+
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+void
+task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
+{
+ if (warning == 0) {
+ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
+ }
+}
+
+void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
+{
+ int pid = 0;
+ task_t task = current_task();
+#ifdef EXC_RESOURCE_MONITORS
+ mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
+#endif /* EXC_RESOURCE_MONITORS */
+ struct ledger_entry_info lei;
+ kern_return_t kr;
+
+#ifdef MACH_BSD
+ pid = proc_selfpid();
+#endif
+ /*
+ * Get the ledger entry info. We need to do this before disabling the exception
+ * to get correct values for all fields.
+ */
+ switch(flavor) {
+ case FLAVOR_IO_PHYSICAL_WRITES:
+ ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
+ break;
+ case FLAVOR_IO_LOGICAL_WRITES:
+ ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
+ break;
+ }
+
+
+ /*
+ * Disable the exception notification so we don't overwhelm
+ * the listener with an endless stream of redundant exceptions.
+ * TODO: detect whether another thread is already reporting the violation.
+ */
+ uint32_t flags = IOMON_DISABLE;
+ task_io_monitor_ctl(task, &flags);
+
+ if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
+ trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
+ }
+ os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
+ pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
+
+ kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
+ if (kr) {
+ printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
+ }
+
+#ifdef EXC_RESOURCE_MONITORS
+ code[0] = code[1] = 0;
+ EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
+ EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
+ EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
+ EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
+ EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
+ exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
+#endif /* EXC_RESOURCE_MONITORS */
+}
+
+/* Placeholders for the task set/get voucher interfaces */
+kern_return_t
+task_get_mach_voucher(
+ task_t task,
+ mach_voucher_selector_t __unused which,
+ ipc_voucher_t *voucher)
+{
+ if (TASK_NULL == task)
+ return KERN_INVALID_TASK;
+
+ *voucher = NULL;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_set_mach_voucher(
+ task_t task,
+ ipc_voucher_t __unused voucher)
+{
+ if (TASK_NULL == task)
+ return KERN_INVALID_TASK;
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_swap_mach_voucher(
+ task_t task,
+ ipc_voucher_t new_voucher,
+ ipc_voucher_t *in_out_old_voucher)
+{
+ if (TASK_NULL == task)
+ return KERN_INVALID_TASK;
+
+ *in_out_old_voucher = new_voucher;
+ return KERN_SUCCESS;
+}
+
+void task_set_gpu_denied(task_t task, boolean_t denied)
+{
+ task_lock(task);
+
+ if (denied) {
+ task->t_flags |= TF_GPU_DENIED;
+ } else {
+ task->t_flags &= ~TF_GPU_DENIED;
+ }
+
+ task_unlock(task);
+}
+
+boolean_t task_is_gpu_denied(task_t task)
+{
+ /* We don't need the lock to read this flag */
+ return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
+}
+
+
+uint64_t get_task_memory_region_count(task_t task)
+{
+ vm_map_t map;
+ map = (task == kernel_task) ? kernel_map: task->map;
+ return((uint64_t)get_map_nentries(map));
+}
+
+static void
+kdebug_trace_dyld_internal(uint32_t base_code,
+ struct dyld_kernel_image_info *info)
+{
+ static_assert(sizeof(info->uuid) >= 16);
+
+#if defined(__LP64__)
+ uint64_t *uuid = (uint64_t *)&(info->uuid);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
+ uuid[1], info->load_addr,
+ (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
+ 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
+ (uint64_t)info->fsobjid.fid_objno |
+ ((uint64_t)info->fsobjid.fid_generation << 32),
+ 0, 0, 0, 0);
+#else /* defined(__LP64__) */
+ uint32_t *uuid = (uint32_t *)&(info->uuid);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
+ uuid[1], uuid[2], uuid[3], 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
+ (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
+ info->fsobjid.fid_objno, 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
+ info->fsobjid.fid_generation, 0, 0, 0, 0);
+#endif /* !defined(__LP64__) */
+}
+
+static kern_return_t
+kdebug_trace_dyld(task_t task, uint32_t base_code,
+ vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
+{
+ kern_return_t kr;
+ dyld_kernel_image_info_array_t infos;
+ vm_map_offset_t map_data;
+ vm_offset_t data;
+
+ if (!infos_copy) {
+ return KERN_INVALID_ADDRESS;
+ }
+
+ if (!kdebug_enable ||
+ !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0)))
+ {
+ vm_map_copy_discard(infos_copy);
+ return KERN_SUCCESS;
+ }
+
+ if (task == NULL || task != current_task()) {
+ return KERN_INVALID_TASK;
+ }
+
+ kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
+
+ for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
+ kdebug_trace_dyld_internal(base_code, &(infos[i]));
+ }
+
+ data = CAST_DOWN(vm_offset_t, map_data);
+ mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_register_dyld_image_infos(task_t task,
+ dyld_kernel_image_info_array_t infos_copy,
+ mach_msg_type_number_t infos_len)
+{
+ return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
+ (vm_map_copy_t)infos_copy, infos_len);
+}
+
+kern_return_t
+task_unregister_dyld_image_infos(task_t task,
+ dyld_kernel_image_info_array_t infos_copy,
+ mach_msg_type_number_t infos_len)
+{
+ return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
+ (vm_map_copy_t)infos_copy, infos_len);
+}
+
+kern_return_t
+task_get_dyld_image_infos(__unused task_t task,
+ __unused dyld_kernel_image_info_array_t * dyld_images,
+ __unused mach_msg_type_number_t * dyld_imagesCnt)
+{
+ return KERN_NOT_SUPPORTED;
+}
+
+kern_return_t
+task_register_dyld_shared_cache_image_info(task_t task,
+ dyld_kernel_image_info_t cache_img,
+ __unused boolean_t no_cache,
+ __unused boolean_t private_cache)
+{
+ if (task == NULL || task != current_task()) {
+ return KERN_INVALID_TASK;
+ }
+
+ kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_register_dyld_set_dyld_state(__unused task_t task,
+ __unused uint8_t dyld_state)
+{
+ return KERN_NOT_SUPPORTED;
+}
+
+kern_return_t
+task_register_dyld_get_process_state(__unused task_t task,
+ __unused dyld_kernel_process_info_t * dyld_process_state)
+{
+ return KERN_NOT_SUPPORTED;
+}
+
+kern_return_t
+task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
+ task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
+{
+#if MONOTONIC
+ task_t task = (task_t)task_insp;
+ kern_return_t kr = KERN_SUCCESS;
+ mach_msg_type_number_t size;
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ size = *size_in_out;
+
+ switch (flavor) {
+ case TASK_INSPECT_BASIC_COUNTS: {
+ struct task_inspect_basic_counts *bc;
+ uint64_t task_counts[MT_CORE_NFIXED] = { 0 };
+
+ if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ mt_fixed_task_counts(task, task_counts);
+ bc = (struct task_inspect_basic_counts *)info_out;
+#ifdef MT_CORE_INSTRS
+ bc->instructions = task_counts[MT_CORE_INSTRS];
+#else /* defined(MT_CORE_INSTRS) */
+ bc->instructions = 0;
+#endif /* !defined(MT_CORE_INSTRS) */
+ bc->cycles = task_counts[MT_CORE_CYCLES];
+ size = TASK_INSPECT_BASIC_COUNTS_COUNT;
+ break;
+ }
+ default:
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ if (kr == KERN_SUCCESS) {
+ *size_in_out = size;
+ }
+ return kr;
+#else /* MONOTONIC */
+#pragma unused(task_insp, flavor, info_out, size_in_out)
+ return KERN_NOT_SUPPORTED;
+#endif /* !MONOTONIC */
+}
+
+#if CONFIG_SECLUDED_MEMORY
+int num_tasks_can_use_secluded_mem = 0;
+
+void
+task_set_can_use_secluded_mem(
+ task_t task,
+ boolean_t can_use_secluded_mem)
+{
+ if (!task->task_could_use_secluded_mem) {
+ return;
+ }
+ task_lock(task);
+ task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
+ task_unlock(task);
+}
+
+void
+task_set_can_use_secluded_mem_locked(
+ task_t task,
+ boolean_t can_use_secluded_mem)
+{
+ assert(task->task_could_use_secluded_mem);
+ if (can_use_secluded_mem &&
+ secluded_for_apps && /* global boot-arg */
+ !task->task_can_use_secluded_mem) {
+ assert(num_tasks_can_use_secluded_mem >= 0);
+ OSAddAtomic(+1,
+ (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
+ task->task_can_use_secluded_mem = TRUE;
+ } else if (!can_use_secluded_mem &&
+ task->task_can_use_secluded_mem) {
+ assert(num_tasks_can_use_secluded_mem > 0);
+ OSAddAtomic(-1,
+ (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
+ task->task_can_use_secluded_mem = FALSE;
+ }
+}
+
+void
+task_set_could_use_secluded_mem(
+ task_t task,
+ boolean_t could_use_secluded_mem)
+{
+ task->task_could_use_secluded_mem = could_use_secluded_mem;
+}
+
+void
+task_set_could_also_use_secluded_mem(
+ task_t task,
+ boolean_t could_also_use_secluded_mem)
+{
+ task->task_could_also_use_secluded_mem = could_also_use_secluded_mem;
+}
+
+boolean_t
+task_can_use_secluded_mem(
+ task_t task,
+ boolean_t is_alloc)
+{
+ if (task->task_can_use_secluded_mem) {
+ assert(task->task_could_use_secluded_mem);
+ assert(num_tasks_can_use_secluded_mem > 0);
+ return TRUE;
+ }
+ if (task->task_could_also_use_secluded_mem &&
+ num_tasks_can_use_secluded_mem > 0) {
+ assert(num_tasks_can_use_secluded_mem > 0);
+ return TRUE;
+ }
+
+ /*
+ * If a single task is using more than some amount of
+ * memory, allow it to dip into secluded and also begin
+ * suppression of secluded memory until the tasks exits.
+ */
+ if (is_alloc && secluded_shutoff_trigger != 0) {
+ uint64_t phys_used = get_task_phys_footprint(task);
+ if (phys_used > secluded_shutoff_trigger) {
+ start_secluded_suppression(task);
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+boolean_t
+task_could_use_secluded_mem(
+ task_t task)
+{
+ return task->task_could_use_secluded_mem;
+}
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+queue_head_t *
+task_io_user_clients(task_t task)
+{
+ return (&task->io_user_clients);
+}
+
+void
+task_copy_fields_for_exec(task_t dst_task, task_t src_task)
+{
+ dst_task->vtimers = src_task->vtimers;
+}
+
+#if DEVELOPMENT || DEBUG
+int vm_region_footprint = 0;
+#endif /* DEVELOPMENT || DEBUG */
+
+boolean_t
+task_self_region_footprint(void)
+{
+#if DEVELOPMENT || DEBUG
+ if (vm_region_footprint) {
+ /* system-wide override */
+ return TRUE;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+ return current_task()->task_region_footprint;
+}
+
+void
+task_self_region_footprint_set(
+ boolean_t newval)
+{
+ task_t curtask;
+
+ curtask = current_task();
+ task_lock(curtask);
+ if (newval) {
+ curtask->task_region_footprint = TRUE;
+ } else {
+ curtask->task_region_footprint = FALSE;
+ }
+ task_unlock(curtask);
+}
+
+void
+task_set_darkwake_mode(task_t task, boolean_t set_mode)
+{
+ assert(task);
+
+ task_lock(task);
+
+ if (set_mode) {
+ task->t_flags |= TF_DARKWAKE_MODE;
+ } else {
+ task->t_flags &= ~(TF_DARKWAKE_MODE);
+ }
+
+ task_unlock(task);
+}
+
+boolean_t
+task_get_darkwake_mode(task_t task)
+{
+ assert(task);
+ return ((task->t_flags & TF_DARKWAKE_MODE) != 0);
+}
+
+#if __arm64__
+void
+task_set_legacy_footprint(
+ task_t task,
+ boolean_t new_val)
+{
+ task_lock(task);
+ task->task_legacy_footprint = new_val;
+ task_unlock(task);
+}
+#endif /* __arm64__ */