X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/39236c6e673c41db228275375ab7fdb0f837b292..e8c3f78193f1895ea514044358b93b1add9322f3:/osfmk/kern/task.c diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index d39ed2047..c80e30d30 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -86,18 +86,19 @@ * Copyright (c) 2005 SPARTA, Inc. */ -#include -#include - #include #include #include #include #include +#include #include #include +#include #include +#include +#include #include #include #include @@ -108,8 +109,10 @@ #include #include #include +#include #include #include +#include #include #include /* for thread_wakeup */ #include @@ -120,10 +123,21 @@ #include #include #include +#include +#include + +#include #if CONFIG_TELEMETRY #include #endif +#if MONOTONIC +#include +#include +#endif /* MONOTONIC */ + +#include + #include #include #include /* for kernel_map, ipc_kernel_map */ @@ -132,6 +146,8 @@ #include #include +#include /* for coredump */ + /* * Exported interfaces */ @@ -140,20 +156,26 @@ #include #include #include -#include #include -#if CONFIG_MACF_MACH -#include +#include +#include +#include + +#if CONFIG_ATM +#include #endif -#if CONFIG_COUNTERS -#include -#endif /* CONFIG_COUNTERS */ +#include /* picks up ledger.h */ -#include -#include +#if CONFIG_MACF +#include +#endif + +#if KPERF +extern int kpc_force_all_ctrs(task_t, int); +#endif task_t kernel_task; zone_t task_zone; @@ -161,6 +183,13 @@ lck_attr_t task_lck_attr; lck_grp_t task_lck_grp; lck_grp_attr_t task_lck_grp_attr; +extern int exc_via_corpse_forking; +extern int corpse_for_fatal_memkill; +extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p); + +/* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */ +int audio_active = 0; + zinfo_usage_store_t tasks_tkm_private; zinfo_usage_store_t tasks_tkm_shared; @@ -168,19 +197,63 @@ zinfo_usage_store_t tasks_tkm_shared; expired_task_statistics_t dead_task_statistics; lck_spin_t dead_task_statistics_lock; -static ledger_template_t task_ledger_template = NULL; -struct _task_ledger_indices task_ledgers __attribute__((used)) = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; +ledger_template_t task_ledger_template = NULL; + +SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) = +{.cpu_time = -1, + .tkm_private = -1, + .tkm_shared = -1, + .phys_mem = -1, + .wired_mem = -1, + .internal = -1, + .iokit_mapped = -1, + .alternate_accounting = -1, + .alternate_accounting_compressed = -1, + .page_table = -1, + .phys_footprint = -1, + .internal_compressed = -1, + .purgeable_volatile = -1, + .purgeable_nonvolatile = -1, + .purgeable_volatile_compressed = -1, + .purgeable_nonvolatile_compressed = -1, + .network_volatile = -1, + .network_nonvolatile = -1, + .network_volatile_compressed = -1, + .network_nonvolatile_compressed = -1, + .platform_idle_wakeups = -1, + .interrupt_wakeups = -1, +#if !CONFIG_EMBEDDED + .sfi_wait_times = { 0 /* initialized at runtime */}, +#endif /* !CONFIG_EMBEDDED */ + .cpu_time_billed_to_me = -1, + .cpu_time_billed_to_others = -1, + .physical_writes = -1, + .logical_writes = -1, + .energy_billed_to_me = -1, + .energy_billed_to_others = -1 +}; + +/* System sleep state */ +boolean_t tasks_suspend_state; + + void init_task_ledgers(void); void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1); void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1); -void __attribute__((noinline)) THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS__SENDING_EXC_RESOURCE(void); -void __attribute__((noinline)) THIS_PROCESS_CROSSED_HIGH_WATERMARK__SENDING_EXC_RESOURCE(int max_footprint_mb); -int coredump(void *core_proc, int reserve_mb, int ignore_ulimit); +void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1); +void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void); +void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal); +void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor); kern_return_t task_suspend_internal(task_t); kern_return_t task_resume_internal(task_t); +static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse); + +extern kern_return_t iokit_task_terminate(task_t task); -void proc_init_cpumon_params(void); +extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *); +extern void bsd_copythreadname(void *dst_uth, void *src_uth); +extern kern_return_t thread_resume(thread_t thread); // Warn tasks when they hit 80% of their memory limit. #define PHYS_FOOTPRINT_WARNING_LEVEL 80 @@ -203,69 +276,120 @@ int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which int disable_exc_resource; /* Global override to supress EXC_RESOURCE for resource monitor violations. */ -int max_task_footprint = 0; /* Per-task limit on physical memory consumption */ +ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */ +int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */ +int max_task_footprint_mb = 0; /* Per-task limit on physical memory consumption in megabytes */ + +/* I/O Monitor Limits */ +#define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */ +#define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */ + +uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */ +uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */ + +#define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll) +int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */ +int64_t global_logical_writes_count = 0; /* Global count for logical writes */ +static boolean_t global_update_logical_writes(int64_t); + +#define TASK_MAX_THREAD_LIMIT 256 + +#if MACH_ASSERT +int pmap_ledgers_panic = 1; +int pmap_ledgers_panic_leeway = 3; +#endif /* MACH_ASSERT */ + int task_max = CONFIG_TASK_MAX; /* Max number of tasks */ +#if CONFIG_COREDUMP int hwm_user_cores = 0; /* high watermark violations generate user core files */ +#endif #ifdef MACH_BSD extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long); extern int proc_pid(struct proc *p); extern int proc_selfpid(void); +extern struct proc *current_proc(void); extern char *proc_name_address(struct proc *p); -#if CONFIG_JETSAM -extern void memorystatus_on_ledger_footprint_exceeded(int warning, const int max_footprint_mb); +extern uint64_t get_dispatchqueue_offset_from_proc(void *); +extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize); +extern void workq_proc_suspended(struct proc *p); +extern void workq_proc_resumed(struct proc *p); + +#if CONFIG_MEMORYSTATUS +extern void proc_memstat_terminated(struct proc* p, boolean_t set); +extern void memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); +extern void memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); +extern boolean_t memorystatus_allowed_vm_map_fork(task_t task); + +#if DEVELOPMENT || DEBUG +extern void memorystatus_abort_vm_map_fork(task_t); #endif + +#endif /* CONFIG_MEMORYSTATUS */ + +#endif /* MACH_BSD */ + +#if DEVELOPMENT || DEBUG +int exc_resource_threads_enabled; +#endif /* DEVELOPMENT || DEBUG */ + +#if (DEVELOPMENT || DEBUG) && TASK_EXC_GUARD_DELIVER_CORPSE +uint32_t task_exc_guard_default = TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE | + TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_CORPSE; +#else +uint32_t task_exc_guard_default = 0; #endif /* Forwards */ -void task_hold_locked( - task_t task); -void task_wait_locked( - task_t task, - boolean_t until_not_runnable); -void task_release_locked( - task_t task); -void task_free( - task_t task ); -void task_synchronizer_destroy_all( - task_t task); - -int check_for_tasksuspend( - task_t task); +static void task_hold_locked(task_t task); +static void task_wait_locked(task_t task, boolean_t until_not_runnable); +static void task_release_locked(task_t task); -void -task_backing_store_privileged( - task_t task) -{ - task_lock(task); - task->priv_flags |= VM_BACKING_STORE_PRIV; - task_unlock(task); - return; -} +static void task_synchronizer_destroy_all(task_t task); void task_set_64bit( task_t task, - boolean_t is64bit) + boolean_t is_64bit, + boolean_t is_64bit_data) { -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) || defined(__arm64__) thread_t thread; -#endif /* defined(__i386__) || defined(__x86_64__) */ +#endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */ task_lock(task); - if (is64bit) { - if (task_has_64BitAddr(task)) + /* + * Switching to/from 64-bit address spaces + */ + if (is_64bit) { + if (!task_has_64Bit_addr(task)) { + task_set_64Bit_addr(task); + } + } else { + if (task_has_64Bit_addr(task)) { + task_clear_64Bit_addr(task); + } + } + + /* + * Switching to/from 64-bit register state. + */ + if (is_64bit_data) { + if (task_has_64Bit_data(task)) goto out; - task_set_64BitAddr(task); + + task_set_64Bit_data(task); } else { - if ( !task_has_64BitAddr(task)) + if ( !task_has_64Bit_data(task)) goto out; - task_clear_64BitAddr(task); + + task_clear_64Bit_data(task); } + /* FIXME: On x86, the thread save state flavor can diverge from the * task's 64-bit feature flag due to the 32-bit/64-bit register save * state dichotomy. Since we can be pre-empted in this interval, @@ -273,73 +397,282 @@ task_set_64bit( * state with respect to its task's 64-bitness. */ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__x86_64__) || defined(__arm64__) queue_iterate(&task->threads, thread, thread_t, task_threads) { thread_mtx_lock(thread); machine_thread_switch_addrmode(thread); thread_mtx_unlock(thread); + +#if defined(__arm64__) + /* specifically, if running on H9 */ + if (thread == current_thread()) { + uint64_t arg1, arg2; + int urgency; + spl_t spl = splsched(); + /* + * This call tell that the current thread changed it's 32bitness. + * Other thread were no more on core when 32bitness was changed, + * but current_thread() is on core and the previous call to + * machine_thread_going_on_core() gave 32bitness which is now wrong. + * + * This is needed for bring-up, a different callback should be used + * in the future. + * + * TODO: Remove this callout when we no longer support 32-bit code on H9 + */ + thread_lock(thread); + urgency = thread_get_urgency(thread, &arg1, &arg2); + machine_thread_going_on_core(thread, urgency, 0, 0, mach_approximate_time()); + thread_unlock(thread); + splx(spl); + } +#endif /* defined(__arm64__) */ } -#endif /* defined(__i386__) || defined(__x86_64__) */ +#endif /* defined(__x86_64__) || defined(__arm64__) */ out: task_unlock(task); } +boolean_t +task_get_64bit_data(task_t task) +{ + return task_has_64Bit_data(task); +} + +void +task_set_platform_binary( + task_t task, + boolean_t is_platform) +{ + task_lock(task); + if (is_platform) { + task->t_flags |= TF_PLATFORM; + } else { + task->t_flags &= ~(TF_PLATFORM); + } + task_unlock(task); +} + +/* + * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument. + * Returns "false" if flag is already set, and "true" in other cases. + */ +bool +task_set_ca_client_wi( + task_t task, + boolean_t set_or_clear) +{ + bool ret = true; + task_lock(task); + if (set_or_clear) { + /* Tasks can have only one CA_CLIENT work interval */ + if (task->t_flags & TF_CA_CLIENT_WI) + ret = false; + else + task->t_flags |= TF_CA_CLIENT_WI; + } else { + task->t_flags &= ~TF_CA_CLIENT_WI; + } + task_unlock(task); + return ret; +} void -task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size) +task_set_dyld_info( + task_t task, + mach_vm_address_t addr, + mach_vm_size_t size) { task_lock(task); task->all_image_info_addr = addr; - task->all_image_info_size = size; + task->all_image_info_size = size; + task_unlock(task); +} + +void +task_atm_reset(__unused task_t task) { + +#if CONFIG_ATM + if (task->atm_context != NULL) { + atm_task_descriptor_destroy(task->atm_context); + task->atm_context = NULL; + } +#endif + +} + +void +task_bank_reset(__unused task_t task) { + + if (task->bank_context != NULL) { + bank_task_destroy(task); + } +} + +/* + * NOTE: This should only be called when the P_LINTRANSIT + * flag is set (the proc_trans lock is held) on the + * proc associated with the task. + */ +void +task_bank_init(__unused task_t task) { + + if (task->bank_context != NULL) { + panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context); + } + bank_task_initialize(task); +} + +void +task_set_did_exec_flag(task_t task) +{ + task->t_procflags |= TPF_DID_EXEC; +} + +void +task_clear_exec_copy_flag(task_t task) +{ + task->t_procflags &= ~TPF_EXEC_COPY; +} + +/* + * This wait event is t_procflags instead of t_flags because t_flags is volatile + * + * TODO: store the flags in the same place as the event + * rdar://problem/28501994 + */ +event_t +task_get_return_wait_event(task_t task) +{ + return (event_t)&task->t_procflags; +} + +void +task_clear_return_wait(task_t task) +{ + task_lock(task); + + task->t_flags &= ~TF_LRETURNWAIT; + + if (task->t_flags & TF_LRETURNWAITER) { + thread_wakeup(task_get_return_wait_event(task)); + task->t_flags &= ~TF_LRETURNWAITER; + } + + task_unlock(task); +} + +void __attribute__((noreturn)) +task_wait_to_return(void) +{ + task_t task; + + task = current_task(); + task_lock(task); + + if (task->t_flags & TF_LRETURNWAIT) { + do { + task->t_flags |= TF_LRETURNWAITER; + assert_wait(task_get_return_wait_event(task), THREAD_UNINT); + task_unlock(task); + + thread_block(THREAD_CONTINUE_NULL); + + task_lock(task); + } while (task->t_flags & TF_LRETURNWAIT); + } + task_unlock(task); + +#if CONFIG_MACF + /* + * Before jumping to userspace and allowing this process to execute any code, + * notify any interested parties. + */ + mac_proc_notify_exec_complete(current_proc()); +#endif + + thread_bootstrap_return(); +} + +#ifdef CONFIG_32BIT_TELEMETRY +boolean_t +task_consume_32bit_log_flag(task_t task) +{ + if ((task->t_procflags & TPF_LOG_32BIT_TELEMETRY) != 0) { + task->t_procflags &= ~TPF_LOG_32BIT_TELEMETRY; + return TRUE; + } else { + return FALSE; + } +} + +void +task_set_32bit_log_flag(task_t task) +{ + task->t_procflags |= TPF_LOG_32BIT_TELEMETRY; +} +#endif /* CONFIG_32BIT_TELEMETRY */ + +boolean_t +task_is_exec_copy(task_t task) +{ + return task_is_exec_copy_internal(task); +} + +boolean_t +task_did_exec(task_t task) +{ + return task_did_exec_internal(task); +} + +boolean_t +task_is_active(task_t task) +{ + return task->active; +} + +boolean_t +task_is_halting(task_t task) +{ + return task->halting; } #if TASK_REFERENCE_LEAK_DEBUG #include -decl_simple_lock_data(static,task_ref_lock); static btlog_t *task_ref_btlog; #define TASK_REF_OP_INCR 0x1 #define TASK_REF_OP_DECR 0x2 +#define TASK_REF_NUM_RECORDS 100000 #define TASK_REF_BTDEPTH 7 -static void -task_ref_lock_lock(void *context) -{ - simple_lock((simple_lock_t)context); -} -static void -task_ref_lock_unlock(void *context) -{ - simple_unlock((simple_lock_t)context); -} - void task_reference_internal(task_t task) { void * bt[TASK_REF_BTDEPTH]; int numsaved = 0; + os_ref_retain(&task->ref_count); + numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH); - - (void)hw_atomic_add(&(task)->ref_count, 1); btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR, bt, numsaved); } -uint32_t +os_ref_count_t task_deallocate_internal(task_t task) { void * bt[TASK_REF_BTDEPTH]; int numsaved = 0; numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH); - btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR, bt, numsaved); - return hw_atomic_sub(&(task)->ref_count, 1); + + return os_ref_release(&task->ref_count); } #endif /* TASK_REFERENCE_LEAK_DEBUG */ @@ -352,6 +685,7 @@ task_init(void) lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr); lck_attr_setdefault(&task_lck_attr); lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr); + lck_mtx_init(&tasks_corpse_lock, &task_lck_grp, &task_lck_attr); task_zone = zinit( sizeof(struct task), @@ -361,44 +695,97 @@ task_init(void) zone_change(task_zone, Z_NOENCRYPT, TRUE); +#if CONFIG_EMBEDDED + task_watch_init(); +#endif /* CONFIG_EMBEDDED */ + /* - * Configure per-task memory limit. The boot arg takes precedence over the - * device tree. + * Configure per-task memory limit. + * The boot-arg is interpreted as Megabytes, + * and takes precedence over the device tree. + * Setting the boot-arg to 0 disables task limits. */ - if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint, - sizeof (max_task_footprint))) { - max_task_footprint = 0; - } - - if (max_task_footprint == 0) { + if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint_mb, + sizeof (max_task_footprint_mb))) { /* * No limit was found in boot-args, so go look in the device tree. */ - if (!PE_get_default("kern.max_task_pmem", &max_task_footprint, - sizeof(max_task_footprint))) { - max_task_footprint = 0; + if (!PE_get_default("kern.max_task_pmem", &max_task_footprint_mb, + sizeof(max_task_footprint_mb))) { + /* + * No limit was found in device tree. + */ + max_task_footprint_mb = 0; } } - if (max_task_footprint != 0) { -#if CONFIG_JETSAM - if (max_task_footprint < 50) { + if (max_task_footprint_mb != 0) { +#if CONFIG_MEMORYSTATUS + if (max_task_footprint_mb < 50) { printf("Warning: max_task_pmem %d below minimum.\n", - max_task_footprint); - max_task_footprint = 50; + max_task_footprint_mb); + max_task_footprint_mb = 50; } printf("Limiting task physical memory footprint to %d MB\n", - max_task_footprint); - max_task_footprint *= 1024 * 1024; // Convert MB to bytes + max_task_footprint_mb); + + max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes + + /* + * Configure the per-task memory limit warning level. + * This is computed as a percentage. + */ + max_task_footprint_warning_level = 0; + + if (max_mem < 0x40000000) { + /* + * On devices with < 1GB of memory: + * -- set warnings to 50MB below the per-task limit. + */ + if (max_task_footprint_mb > 50) { + max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb; + } + } else { + /* + * On devices with >= 1GB of memory: + * -- set warnings to 100MB below the per-task limit. + */ + if (max_task_footprint_mb > 100) { + max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb; + } + } + + /* + * Never allow warning level to land below the default. + */ + if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) { + max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL; + } + + printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level); + #else - printf("Warning: max_task_footprint specified, but jetsam not configured; ignoring.\n"); -#endif + printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n"); +#endif /* CONFIG_MEMORYSTATUS */ + } + +#if DEVELOPMENT || DEBUG + if (!PE_parse_boot_argn("exc_resource_threads", + &exc_resource_threads_enabled, + sizeof(exc_resource_threads_enabled))) { + exc_resource_threads_enabled = 1; } + PE_parse_boot_argn("task_exc_guard_default", + &task_exc_guard_default, + sizeof(task_exc_guard_default)); +#endif /* DEVELOPMENT || DEBUG */ +#if CONFIG_COREDUMP if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores, sizeof (hwm_user_cores))) { hwm_user_cores = 0; } +#endif proc_init_cpumon_params(); @@ -420,15 +807,31 @@ task_init(void) disable_exc_resource = 0; } + if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof (task_iomon_limit_mb))) { + task_iomon_limit_mb = IOMON_DEFAULT_LIMIT; + } + + if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof (task_iomon_interval_secs))) { + task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL; + } + + if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof (io_telemetry_limit))) { + io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT; + } + +/* + * If we have coalitions, coalition_init() will call init_task_ledgers() as it + * sets up the ledgers for the default coalition. If we don't have coalitions, + * then we have to call it now. + */ +#if CONFIG_COALITIONS + assert(task_ledger_template); +#else /* CONFIG_COALITIONS */ init_task_ledgers(); +#endif /* CONFIG_COALITIONS */ #if TASK_REFERENCE_LEAK_DEBUG - simple_lock_init(&task_ref_lock, 0); - task_ref_btlog = btlog_create(100000, - TASK_REF_BTDEPTH, - task_ref_lock_lock, - task_ref_lock_unlock, - &task_ref_lock); + task_ref_btlog = btlog_create(TASK_REF_NUM_RECORDS, TASK_REF_BTDEPTH, TRUE /* caller_will_remove_entries_for_element? */); assert(task_ref_btlog); #endif @@ -436,12 +839,13 @@ task_init(void) * Create the kernel task as the first task. */ #ifdef __LP64__ - if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS) + if (task_create_internal(TASK_NULL, NULL, FALSE, TRUE, TRUE, TF_NONE, TPF_NONE, &kernel_task) != KERN_SUCCESS) #else - if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS) + if (task_create_internal(TASK_NULL, NULL, FALSE, FALSE, FALSE, TF_NONE, TPF_NONE, &kernel_task) != KERN_SUCCESS) #endif panic("task_init\n"); + vm_map_deallocate(kernel_task->map); kernel_task->map = kernel_map; lck_spin_init(&dead_task_statistics_lock, &task_lck_grp, &task_lck_attr); @@ -509,18 +913,30 @@ host_security_create_task_token( * * phys_footprint * Physical footprint: This is the sum of: - * + phys_mem [task's resident memory] - * + phys_compressed - * + iokit_mem + * + (internal - alternate_accounting) + * + (internal_compressed - alternate_accounting_compressed) + * + iokit_mapped + * + purgeable_nonvolatile + * + purgeable_nonvolatile_compressed + * + page_table * - * iokit_mem - * IOKit mappings: The total size of all IOKit mappings in this task [regardless of clean/dirty state]. - * - * phys_compressed - * Physical compressed: Amount of this task's resident memory which is held by the compressor. + * internal + * The task's anonymous memory, which on iOS is always resident. + * + * internal_compressed + * Amount of this task's internal memory which is held by the compressor. * Such memory is no longer actually resident for the task [i.e., resident in its pmap], * and could be either decompressed back into memory, or paged out to storage, depending * on our implementation. + * + * iokit_mapped + * IOKit mappings: The total size of all IOKit mappings in this task, regardless of + clean/dirty or internal/external state]. + * + * alternate_accounting + * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages + * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid + * double counting. */ void init_task_ledgers(void) @@ -530,6 +946,15 @@ init_task_ledgers(void) assert(task_ledger_template == NULL); assert(kernel_task == TASK_NULL); +#if MACH_ASSERT + PE_parse_boot_argn("pmap_ledgers_panic", + &pmap_ledgers_panic, + sizeof (pmap_ledgers_panic)); + PE_parse_boot_argn("pmap_ledgers_panic_leeway", + &pmap_ledgers_panic_leeway, + sizeof (pmap_ledgers_panic_leeway)); +#endif /* MACH_ASSERT */ + if ((t = ledger_template_create("Per-task ledger")) == NULL) panic("couldn't create task ledger template"); @@ -542,42 +967,165 @@ init_task_ledgers(void) "bytes"); task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem", "bytes"); - task_ledgers.iokit_mem = ledger_entry_add(t, "iokit_mem", "mappings", + task_ledgers.internal = ledger_entry_add(t, "internal", "physmem", + "bytes"); + task_ledgers.iokit_mapped = ledger_entry_add(t, "iokit_mapped", "mappings", + "bytes"); + task_ledgers.alternate_accounting = ledger_entry_add(t, "alternate_accounting", "physmem", + "bytes"); + task_ledgers.alternate_accounting_compressed = ledger_entry_add(t, "alternate_accounting_compressed", "physmem", "bytes"); + task_ledgers.page_table = ledger_entry_add(t, "page_table", "physmem", + "bytes"); task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem", "bytes"); - task_ledgers.phys_compressed = ledger_entry_add(t, "phys_compressed", "physmem", + task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem", "bytes"); + task_ledgers.purgeable_volatile = ledger_entry_add(t, "purgeable_volatile", "physmem", "bytes"); + task_ledgers.purgeable_nonvolatile = ledger_entry_add(t, "purgeable_nonvolatile", "physmem", "bytes"); + task_ledgers.purgeable_volatile_compressed = ledger_entry_add(t, "purgeable_volatile_compress", "physmem", "bytes"); + task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add(t, "purgeable_nonvolatile_compress", "physmem", "bytes"); + + task_ledgers.network_volatile = ledger_entry_add(t, "network_volatile", "physmem", "bytes"); + task_ledgers.network_nonvolatile = ledger_entry_add(t, "network_nonvolatile", "physmem", "bytes"); + task_ledgers.network_volatile_compressed = ledger_entry_add(t, "network_volatile_compressed", "physmem", "bytes"); + task_ledgers.network_nonvolatile_compressed = ledger_entry_add(t, "network_nonvolatile_compressed", "physmem", "bytes"); + task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power", "count"); task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power", "count"); + +#if CONFIG_SCHED_SFI + sfi_class_id_t class_id, ledger_alias; + for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) { + task_ledgers.sfi_wait_times[class_id] = -1; + } + + /* don't account for UNSPECIFIED */ + for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) { + ledger_alias = sfi_get_ledger_alias_for_class(class_id); + if (ledger_alias != SFI_CLASS_UNSPECIFIED) { + /* Check to see if alias has been registered yet */ + if (task_ledgers.sfi_wait_times[ledger_alias] != -1) { + task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias]; + } else { + /* Otherwise, initialize it first */ + task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias); + } + } else { + task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id); + } + + if (task_ledgers.sfi_wait_times[class_id] < 0) { + panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id); + } + } - if ((task_ledgers.cpu_time < 0) || (task_ledgers.tkm_private < 0) || - (task_ledgers.tkm_shared < 0) || (task_ledgers.phys_mem < 0) || - (task_ledgers.wired_mem < 0) || (task_ledgers.iokit_mem < 0) || - (task_ledgers.phys_footprint < 0) || (task_ledgers.phys_compressed < 0) || - (task_ledgers.platform_idle_wakeups < 0) || (task_ledgers.interrupt_wakeups < 0)) { + assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID -1] != -1); +#endif /* CONFIG_SCHED_SFI */ + + task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns"); + task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns"); + task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes"); + task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes"); + task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj"); + task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj"); + + if ((task_ledgers.cpu_time < 0) || + (task_ledgers.tkm_private < 0) || + (task_ledgers.tkm_shared < 0) || + (task_ledgers.phys_mem < 0) || + (task_ledgers.wired_mem < 0) || + (task_ledgers.internal < 0) || + (task_ledgers.iokit_mapped < 0) || + (task_ledgers.alternate_accounting < 0) || + (task_ledgers.alternate_accounting_compressed < 0) || + (task_ledgers.page_table < 0) || + (task_ledgers.phys_footprint < 0) || + (task_ledgers.internal_compressed < 0) || + (task_ledgers.purgeable_volatile < 0) || + (task_ledgers.purgeable_nonvolatile < 0) || + (task_ledgers.purgeable_volatile_compressed < 0) || + (task_ledgers.purgeable_nonvolatile_compressed < 0) || + (task_ledgers.network_volatile < 0) || + (task_ledgers.network_nonvolatile < 0) || + (task_ledgers.network_volatile_compressed < 0) || + (task_ledgers.network_nonvolatile_compressed < 0) || + (task_ledgers.platform_idle_wakeups < 0) || + (task_ledgers.interrupt_wakeups < 0) || + (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) || + (task_ledgers.physical_writes < 0) || + (task_ledgers.logical_writes < 0) || + (task_ledgers.energy_billed_to_me < 0) || + (task_ledgers.energy_billed_to_others < 0) + ) { panic("couldn't create entries for task ledger template"); } + ledger_track_credit_only(t, task_ledgers.phys_footprint); + ledger_track_credit_only(t, task_ledgers.page_table); + ledger_track_credit_only(t, task_ledgers.internal); + ledger_track_credit_only(t, task_ledgers.internal_compressed); + ledger_track_credit_only(t, task_ledgers.iokit_mapped); + ledger_track_credit_only(t, task_ledgers.alternate_accounting); + ledger_track_credit_only(t, task_ledgers.alternate_accounting_compressed); + ledger_track_credit_only(t, task_ledgers.purgeable_volatile); + ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile); + ledger_track_credit_only(t, task_ledgers.purgeable_volatile_compressed); + ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile_compressed); + + ledger_track_credit_only(t, task_ledgers.network_volatile); + ledger_track_credit_only(t, task_ledgers.network_nonvolatile); + ledger_track_credit_only(t, task_ledgers.network_volatile_compressed); + ledger_track_credit_only(t, task_ledgers.network_nonvolatile_compressed); + ledger_track_maximum(t, task_ledgers.phys_footprint, 60); +#if MACH_ASSERT + if (pmap_ledgers_panic) { + ledger_panic_on_negative(t, task_ledgers.phys_footprint); + ledger_panic_on_negative(t, task_ledgers.page_table); + ledger_panic_on_negative(t, task_ledgers.internal); + ledger_panic_on_negative(t, task_ledgers.internal_compressed); + ledger_panic_on_negative(t, task_ledgers.iokit_mapped); + ledger_panic_on_negative(t, task_ledgers.alternate_accounting); + ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed); + ledger_panic_on_negative(t, task_ledgers.purgeable_volatile); + ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile); + ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed); + ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed); + + ledger_panic_on_negative(t, task_ledgers.network_volatile); + ledger_panic_on_negative(t, task_ledgers.network_nonvolatile); + ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed); + ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed); + } +#endif /* MACH_ASSERT */ -#if CONFIG_JETSAM +#if CONFIG_MEMORYSTATUS ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL); -#endif +#endif /* CONFIG_MEMORYSTATUS */ ledger_set_callback(t, task_ledgers.interrupt_wakeups, task_wakeups_rate_exceeded, NULL, NULL); - + ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL); + ledger_set_callback(t, task_ledgers.logical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_LOGICAL_WRITES, NULL); + + ledger_template_complete(t); task_ledger_template = t; } +os_refgrp_decl(static, task_refgrp, "task", NULL); + kern_return_t task_create_internal( task_t parent_task, + coalition_t *parent_coalitions __unused, boolean_t inherit_memory, - boolean_t is_64bit, + __unused boolean_t is_64bit, + boolean_t is_64bit_data, + uint32_t t_flags, + uint32_t t_procflags, task_t *child_task) /* OUT */ { task_t new_task; @@ -590,7 +1138,7 @@ task_create_internal( return(KERN_RESOURCE_SHORTAGE); /* one ref for just being alive; one for our caller */ - new_task->ref_count = 2; + os_ref_init_count(&new_task->ref_count, &task_refgrp, 2); /* allocate with active entries */ assert(task_ledger_template != NULL); @@ -600,11 +1148,16 @@ task_create_internal( return(KERN_RESOURCE_SHORTAGE); } + new_task->ledger = ledger; +#if defined(CONFIG_SCHED_MULTIQ) + new_task->sched_group = sched_group_create(); +#endif + /* if inherit_memory is true, parent_task MUST not be NULL */ - if (inherit_memory) - new_task->map = vm_map_fork(ledger, parent_task->map); + if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) + new_task->map = vm_map_fork(ledger, parent_task->map, 0); else new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit), (vm_map_offset_t)(VM_MIN_ADDRESS), @@ -623,30 +1176,34 @@ task_create_internal( new_task->legacy_stop_count = 0; new_task->active = TRUE; new_task->halting = FALSE; - new_task->user_data = NULL; - new_task->faults = 0; - new_task->cow_faults = 0; - new_task->pageins = 0; - new_task->messages_sent = 0; - new_task->messages_received = 0; - new_task->syscalls_mach = 0; new_task->priv_flags = 0; - new_task->syscalls_unix=0; - new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0; - new_task->t_flags = 0; + new_task->t_flags = t_flags; + new_task->t_procflags = t_procflags; new_task->importance = 0; + new_task->crashed_thread_id = 0; + new_task->exec_token = 0; - zinfo_task_init(new_task); + new_task->task_exc_guard = task_exc_guard_default; + +#if CONFIG_ATM + new_task->atm_context = NULL; +#endif + new_task->bank_context = NULL; #ifdef MACH_BSD new_task->bsd_info = NULL; + new_task->corpse_info = NULL; #endif /* MACH_BSD */ -#if CONFIG_JETSAM +#if CONFIG_MACF + new_task->crash_label = NULL; +#endif + +#if CONFIG_MEMORYSTATUS if (max_task_footprint != 0) { ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL); } -#endif +#endif /* CONFIG_MEMORYSTATUS */ if (task_wakeups_monitor_rate != 0) { uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS; @@ -654,34 +1211,31 @@ task_create_internal( task_wakeups_monitor_ctl(new_task, &flags, &rate); } -#if defined(__i386__) || defined(__x86_64__) - new_task->i386_ldt = 0; -#endif +#if CONFIG_IO_ACCOUNTING + uint32_t flags = IOMON_ENABLE; + task_io_monitor_ctl(new_task, &flags); +#endif /* CONFIG_IO_ACCOUNTING */ + + machine_task_init(new_task, parent_task, inherit_memory); new_task->task_debug = NULL; +#if DEVELOPMENT || DEBUG + new_task->task_unnested = FALSE; + new_task->task_disconnected_count = 0; +#endif queue_init(&new_task->semaphore_list); new_task->semaphores_owned = 0; -#if CONFIG_MACF_MACH - new_task->label = labelh_new(1); - mac_task_label_init (&new_task->maclabel); -#endif - ipc_task_init(new_task, parent_task); - new_task->total_user_time = 0; - new_task->total_system_time = 0; - new_task->vtimers = 0; new_task->shared_region = NULL; new_task->affinity_space = NULL; -#if CONFIG_COUNTERS - new_task->t_chud = 0U; -#endif + new_task->t_kpc = 0; new_task->pidsuspended = FALSE; new_task->frozen = FALSE; @@ -695,27 +1249,23 @@ task_create_internal( new_task->suspends_outstanding = 0; #endif +#if HYPERVISOR + new_task->hv_task_target = NULL; +#endif /* HYPERVISOR */ - new_task->low_mem_notified_warn = 0; - new_task->low_mem_notified_critical = 0; - new_task->purged_memory_warn = 0; - new_task->purged_memory_critical = 0; - new_task->mem_notify_reserved = 0; -#if IMPORTANCE_INHERITANCE - new_task->imp_receiver = 0; - new_task->imp_donor = 0; - new_task->imp_reserved = 0; - new_task->task_imp_assertcnt = 0; - new_task->task_imp_externcnt = 0; -#endif /* IMPORTANCE_INHERITANCE */ +#if CONFIG_EMBEDDED + queue_init(&new_task->task_watchers); + new_task->num_taskwatchers = 0; + new_task->watchapplying = 0; +#endif /* CONFIG_EMBEDDED */ -#if defined(__x86_64__) - new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0; -#endif + new_task->mem_notify_reserved = 0; + new_task->memlimit_attrs_reserved = 0; new_task->requested_policy = default_task_requested_policy; new_task->effective_policy = default_task_effective_policy; - new_task->pended_policy = default_task_pended_policy; + + task_importance_init_from_parent(new_task, parent_task); if (parent_task != TASK_NULL) { new_task->sec_token = parent_task->sec_token; @@ -725,80 +1275,253 @@ task_create_internal( shared_region = vm_shared_region_get(parent_task); vm_shared_region_set(new_task, shared_region); - if(task_has_64BitAddr(parent_task)) - task_set_64BitAddr(new_task); + if(task_has_64Bit_addr(parent_task)) { + task_set_64Bit_addr(new_task); + } + + if(task_has_64Bit_data(parent_task)) { + task_set_64Bit_data(new_task); + } + new_task->all_image_info_addr = parent_task->all_image_info_addr; new_task->all_image_info_size = parent_task->all_image_info_size; -#if defined(__i386__) || defined(__x86_64__) - if (inherit_memory && parent_task->i386_ldt) - new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt); -#endif if (inherit_memory && parent_task->affinity_space) task_affinity_create(parent_task, new_task); new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task); -#if IMPORTANCE_INHERITANCE - new_task->imp_donor = parent_task->imp_donor; - /* Embedded doesn't want this to inherit */ - new_task->imp_receiver = parent_task->imp_receiver; -#endif /* IMPORTANCE_INHERITANCE */ - - new_task->requested_policy.t_apptype = parent_task->requested_policy.t_apptype; - - new_task->requested_policy.int_darwinbg = parent_task->requested_policy.int_darwinbg; - new_task->requested_policy.ext_darwinbg = parent_task->requested_policy.ext_darwinbg; - new_task->requested_policy.int_iotier = parent_task->requested_policy.int_iotier; - new_task->requested_policy.ext_iotier = parent_task->requested_policy.ext_iotier; - new_task->requested_policy.int_iopassive = parent_task->requested_policy.int_iopassive; - new_task->requested_policy.ext_iopassive = parent_task->requested_policy.ext_iopassive; - new_task->requested_policy.bg_iotier = parent_task->requested_policy.bg_iotier; - new_task->requested_policy.terminated = parent_task->requested_policy.terminated; + new_task->priority = BASEPRI_DEFAULT; + new_task->max_priority = MAXPRI_USER; - task_policy_create(new_task, parent_task->requested_policy.t_boosted); + task_policy_create(new_task, parent_task); } else { new_task->sec_token = KERNEL_SECURITY_TOKEN; new_task->audit_token = KERNEL_AUDIT_TOKEN; #ifdef __LP64__ - if(is_64bit) - task_set_64BitAddr(new_task); + if(is_64bit) { + task_set_64Bit_addr(new_task); + } #endif + + if(is_64bit_data) { + task_set_64Bit_data(new_task); + } + new_task->all_image_info_addr = (mach_vm_address_t)0; new_task->all_image_info_size = (mach_vm_size_t)0; new_task->pset_hint = PROCESSOR_SET_NULL; + + if (kernel_task == TASK_NULL) { + new_task->priority = BASEPRI_KERNEL; + new_task->max_priority = MAXPRI_KERNEL; + } else { + new_task->priority = BASEPRI_DEFAULT; + new_task->max_priority = MAXPRI_USER; + } + } + + bzero(new_task->coalition, sizeof(new_task->coalition)); + for (int i = 0; i < COALITION_NUM_TYPES; i++) + queue_chain_init(new_task->task_coalition[i]); + + /* Allocate I/O Statistics */ + new_task->task_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info)); + assert(new_task->task_io_stats != NULL); + bzero(new_task->task_io_stats, sizeof(struct io_stat_info)); + + bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats)); + bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats)); + + bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics)); + + /* Copy resource acc. info from Parent for Corpe Forked task. */ + if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) { + task_rollup_accounting_info(new_task, parent_task); + } else { + /* Initialize to zero for standard fork/spawn case */ + new_task->total_user_time = 0; + new_task->total_system_time = 0; + new_task->total_ptime = 0; + new_task->total_runnable_time = 0; + new_task->faults = 0; + new_task->pageins = 0; + new_task->cow_faults = 0; + new_task->messages_sent = 0; + new_task->messages_received = 0; + new_task->syscalls_mach = 0; + new_task->syscalls_unix = 0; + new_task->c_switch = 0; + new_task->p_switch = 0; + new_task->ps_switch = 0; + new_task->low_mem_notified_warn = 0; + new_task->low_mem_notified_critical = 0; + new_task->purged_memory_warn = 0; + new_task->purged_memory_critical = 0; + new_task->low_mem_privileged_listener = 0; + new_task->memlimit_is_active = 0; + new_task->memlimit_is_fatal = 0; + new_task->memlimit_active_exc_resource = 0; + new_task->memlimit_inactive_exc_resource = 0; + new_task->task_timer_wakeups_bin_1 = 0; + new_task->task_timer_wakeups_bin_2 = 0; + new_task->task_gpu_ns = 0; + new_task->task_immediate_writes = 0; + new_task->task_deferred_writes = 0; + new_task->task_invalidated_writes = 0; + new_task->task_metadata_writes = 0; + new_task->task_energy = 0; +#if MONOTONIC + memset(&new_task->task_monotonic, 0, sizeof(new_task->task_monotonic)); +#endif /* MONOTONIC */ } - if (kernel_task == TASK_NULL) { - new_task->priority = BASEPRI_KERNEL; - new_task->max_priority = MAXPRI_KERNEL; - } else if (proc_get_effective_task_policy(new_task, TASK_POLICY_LOWPRI_CPU)) { - new_task->priority = MAXPRI_THROTTLE; - new_task->max_priority = MAXPRI_THROTTLE; + +#if CONFIG_COALITIONS + if (!(t_flags & TF_CORPSE_FORK)) { + /* TODO: there is no graceful failure path here... */ + if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) { + coalitions_adopt_task(parent_coalitions, new_task); + } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) { + /* + * all tasks at least have a resource coalition, so + * if the parent has one then inherit all coalitions + * the parent is a part of + */ + coalitions_adopt_task(parent_task->coalition, new_task); + } else { + /* TODO: assert that new_task will be PID 1 (launchd) */ + coalitions_adopt_init_task(new_task); + } + /* + * on exec, we need to transfer the coalition roles from the + * parent task to the exec copy task. + */ + if (parent_task && (t_procflags & TPF_EXEC_COPY)) { + int coal_roles[COALITION_NUM_TYPES]; + task_coalition_roles(parent_task, coal_roles); + (void)coalitions_set_roles(new_task->coalition, new_task, coal_roles); + } } else { - new_task->priority = BASEPRI_DEFAULT; - new_task->max_priority = MAXPRI_USER; + coalitions_adopt_corpse_task(new_task); } - bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics)); - new_task->task_timer_wakeups_bin_1 = new_task->task_timer_wakeups_bin_2 = 0; - lck_mtx_lock(&tasks_threads_lock); - queue_enter(&tasks, new_task, task_t, tasks); - tasks_count++; - lck_mtx_unlock(&tasks_threads_lock); + if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) { + panic("created task is not a member of a resource coalition"); + } +#endif /* CONFIG_COALITIONS */ - if (vm_backing_store_low && parent_task != NULL) - new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV); + new_task->dispatchqueue_offset = 0; + if (parent_task != NULL) { + new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset; + } new_task->task_volatile_objects = 0; + new_task->task_nonvolatile_objects = 0; + new_task->task_purgeable_disowning = FALSE; + new_task->task_purgeable_disowned = FALSE; + queue_init(&new_task->task_objq); + task_objq_lock_init(new_task); + +#if __arm64__ + new_task->task_legacy_footprint = FALSE; +#endif /* __arm64__ */ + new_task->task_region_footprint = FALSE; + new_task->task_has_crossed_thread_limit = FALSE; + new_task->task_thread_limit = 0; +#if CONFIG_SECLUDED_MEMORY + new_task->task_can_use_secluded_mem = FALSE; + new_task->task_could_use_secluded_mem = FALSE; + new_task->task_could_also_use_secluded_mem = FALSE; + new_task->task_suppressed_secluded = FALSE; +#endif /* CONFIG_SECLUDED_MEMORY */ + + /* + * t_flags is set up above. But since we don't + * support darkwake mode being set that way + * currently, we clear it out here explicitly. + */ + new_task->t_flags &= ~(TF_DARKWAKE_MODE); + + queue_init(&new_task->io_user_clients); ipc_task_enable(new_task); + lck_mtx_lock(&tasks_threads_lock); + queue_enter(&tasks, new_task, task_t, tasks); + tasks_count++; + if (tasks_suspend_state) { + task_suspend_internal(new_task); + } + lck_mtx_unlock(&tasks_threads_lock); + *child_task = new_task; return(KERN_SUCCESS); } +/* + * task_rollup_accounting_info + * + * Roll up accounting stats. Used to rollup stats + * for exec copy task and corpse fork. + */ +void +task_rollup_accounting_info(task_t to_task, task_t from_task) +{ + assert(from_task != to_task); + + to_task->total_user_time = from_task->total_user_time; + to_task->total_system_time = from_task->total_system_time; + to_task->total_ptime = from_task->total_ptime; + to_task->total_runnable_time = from_task->total_runnable_time; + to_task->faults = from_task->faults; + to_task->pageins = from_task->pageins; + to_task->cow_faults = from_task->cow_faults; + to_task->messages_sent = from_task->messages_sent; + to_task->messages_received = from_task->messages_received; + to_task->syscalls_mach = from_task->syscalls_mach; + to_task->syscalls_unix = from_task->syscalls_unix; + to_task->c_switch = from_task->c_switch; + to_task->p_switch = from_task->p_switch; + to_task->ps_switch = from_task->ps_switch; + to_task->extmod_statistics = from_task->extmod_statistics; + to_task->low_mem_notified_warn = from_task->low_mem_notified_warn; + to_task->low_mem_notified_critical = from_task->low_mem_notified_critical; + to_task->purged_memory_warn = from_task->purged_memory_warn; + to_task->purged_memory_critical = from_task->purged_memory_critical; + to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener; + *to_task->task_io_stats = *from_task->task_io_stats; + to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats; + to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats; + to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1; + to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2; + to_task->task_gpu_ns = from_task->task_gpu_ns; + to_task->task_immediate_writes = from_task->task_immediate_writes; + to_task->task_deferred_writes = from_task->task_deferred_writes; + to_task->task_invalidated_writes = from_task->task_invalidated_writes; + to_task->task_metadata_writes = from_task->task_metadata_writes; + to_task->task_energy = from_task->task_energy; + + /* Skip ledger roll up for memory accounting entries */ + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time); + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups); + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups); +#if CONFIG_SCHED_SFI + for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) { + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]); + } +#endif + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me); + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others); + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes); + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes); + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me); + ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others); +} + +int task_dropped_imp_count = 0; + /* * task_deallocate: * @@ -809,18 +1532,50 @@ task_deallocate( task_t task) { ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups; + os_ref_count_t refs; if (task == TASK_NULL) return; - if (task_deallocate_internal(task) > 0) + refs = task_deallocate_internal(task); + +#if IMPORTANCE_INHERITANCE + if (refs == 1) { + /* + * If last ref potentially comes from the task's importance, + * disconnect it. But more task refs may be added before + * that completes, so wait for the reference to go to zero + * naturally (it may happen on a recursive task_deallocate() + * from the ipc_importance_disconnect_task() call). + */ + if (IIT_NULL != task->task_imp_base) + ipc_importance_disconnect_task(task); + return; + } +#endif /* IMPORTANCE_INHERITANCE */ + + if (refs > 0) { return; + } lck_mtx_lock(&tasks_threads_lock); queue_remove(&terminated_tasks, task, task_t, tasks); terminated_tasks_count--; lck_mtx_unlock(&tasks_threads_lock); + /* + * remove the reference on atm descriptor + */ + task_atm_reset(task); + + /* + * remove the reference on bank context + */ + task_bank_reset(task); + + if (task->task_io_stats) + kfree(task->task_io_stats, sizeof(struct io_stat_info)); + /* * Give the machine dependent code a chance * to perform cleanup before ripping apart @@ -830,9 +1585,32 @@ task_deallocate( ipc_task_terminate(task); + /* let iokit know */ + iokit_task_terminate(task); + if (task->affinity_space) task_affinity_deallocate(task); +#if MACH_ASSERT + if (task->ledger != NULL && + task->map != NULL && + task->map->pmap != NULL && + task->map->pmap->ledger != NULL) { + assert(task->ledger == task->map->pmap->ledger); + } +#endif /* MACH_ASSERT */ + + vm_purgeable_disown(task); + assert(task->task_purgeable_disowned); + if (task->task_volatile_objects != 0 || + task->task_nonvolatile_objects != 0) { + panic("task_deallocate(%p): " + "volatile_objects=%d nonvolatile_objects=%d\n", + task, + task->task_volatile_objects, + task->task_nonvolatile_objects); + } + vm_map_deallocate(task->map); is_release(task->itk_space); @@ -841,6 +1619,10 @@ task_deallocate( ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups, &platform_idle_wakeups, &debit); +#if defined(CONFIG_SCHED_MULTIQ) + sched_group_destroy(task->sched_group); +#endif + /* Accumulate statistics for dead tasks */ lck_spin_lock(&dead_task_statistics_lock); dead_task_statistics.total_user_time += task->total_user_time; @@ -851,14 +1633,14 @@ task_deallocate( dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1; dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2; + dead_task_statistics.total_ptime += task->total_ptime; + dead_task_statistics.total_pset_switches += task->ps_switch; + dead_task_statistics.task_gpu_ns += task->task_gpu_ns; + dead_task_statistics.task_energy += task->task_energy; lck_spin_unlock(&dead_task_statistics_lock); lck_mtx_destroy(&task->lock, &task_lck_grp); -#if CONFIG_MACF_MACH - labelh_release(task->label); -#endif - if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit, &debit)) { OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc); @@ -870,21 +1652,37 @@ task_deallocate( OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free); } ledger_dereference(task->ledger); - zinfo_task_free(task); #if TASK_REFERENCE_LEAK_DEBUG btlog_remove_entries_for_element(task_ref_btlog, task); #endif - if (task->task_volatile_objects) { - /* - * This task still "owns" some volatile VM objects. - * Disown them now to avoid leaving them pointing back at - * an invalid task. - */ - vm_purgeable_disown(task); - assert(task->task_volatile_objects == 0); +#if CONFIG_COALITIONS + task_release_coalitions(task); +#endif /* CONFIG_COALITIONS */ + + bzero(task->coalition, sizeof(task->coalition)); + +#if MACH_BSD + /* clean up collected information since last reference to task is gone */ + if (task->corpse_info) { + void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info); + task_crashinfo_destroy(task->corpse_info); + task->corpse_info = NULL; + if (corpse_info_kernel) { + kfree(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE); + } + } +#endif + +#if CONFIG_MACF + if (task->crash_label) { + mac_exc_free_label(task->crash_label); + task->crash_label = NULL; } +#endif + + assert(queue_empty(&task->task_objq)); zfree(task_zone, task); } @@ -901,6 +1699,18 @@ task_name_deallocate( return(task_deallocate((task_t)task_name)); } +/* + * task_inspect_deallocate: + * + * Drop a task inspection reference. + */ +void +task_inspect_deallocate( + task_inspect_t task_inspect) +{ + return(task_deallocate((task_t)task_inspect)); +} + /* * task_suspension_token_deallocate: * @@ -913,6 +1723,166 @@ task_suspension_token_deallocate( return(task_deallocate((task_t)token)); } + +/* + * task_collect_crash_info: + * + * collect crash info from bsd and mach based data + */ +kern_return_t +task_collect_crash_info( + task_t task, +#ifdef CONFIG_MACF + struct label *crash_label, +#endif + int is_corpse_fork) +{ + kern_return_t kr = KERN_SUCCESS; + + kcdata_descriptor_t crash_data = NULL; + kcdata_descriptor_t crash_data_release = NULL; + mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE; + mach_vm_offset_t crash_data_ptr = 0; + void *crash_data_kernel = NULL; + void *crash_data_kernel_release = NULL; +#if CONFIG_MACF + struct label *label, *free_label; +#endif + + if (!corpses_enabled()) { + return KERN_NOT_SUPPORTED; + } + +#if CONFIG_MACF + free_label = label = mac_exc_create_label(); +#endif + + task_lock(task); + + assert(is_corpse_fork || task->bsd_info != NULL); + if (task->corpse_info == NULL && (is_corpse_fork || task->bsd_info != NULL)) { +#if CONFIG_MACF + /* Set the crash label, used by the exception delivery mac hook */ + free_label = task->crash_label; // Most likely NULL. + task->crash_label = label; + mac_exc_update_task_crash_label(task, crash_label); +#endif + task_unlock(task); + + crash_data_kernel = (void *) kalloc(CORPSEINFO_ALLOCATION_SIZE); + if (crash_data_kernel == NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto out_no_lock; + } + bzero(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE); + crash_data_ptr = (mach_vm_offset_t) crash_data_kernel; + + /* Do not get a corpse ref for corpse fork */ + crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size, + is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF, + KCFLAG_USE_MEMCOPY); + if (crash_data) { + task_lock(task); + crash_data_release = task->corpse_info; + crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release); + task->corpse_info = crash_data; + + task_unlock(task); + kr = KERN_SUCCESS; + } else { + kfree(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE); + kr = KERN_FAILURE; + } + + if (crash_data_release != NULL) { + task_crashinfo_destroy(crash_data_release); + } + if (crash_data_kernel_release != NULL) { + kfree(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE); + } + } else { + task_unlock(task); + } + +out_no_lock: +#if CONFIG_MACF + if (free_label != NULL) { + mac_exc_free_label(free_label); + } +#endif + return kr; +} + +/* + * task_deliver_crash_notification: + * + * Makes outcall to registered host port for a corpse. + */ +kern_return_t +task_deliver_crash_notification( + task_t task, + thread_t thread, + exception_type_t etype, + mach_exception_subcode_t subcode) +{ + kcdata_descriptor_t crash_info = task->corpse_info; + thread_t th_iter = NULL; + kern_return_t kr = KERN_SUCCESS; + wait_interrupt_t wsave; + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + ipc_port_t task_port, old_notify; + + if (crash_info == NULL) + return KERN_FAILURE; + + task_lock(task); + if (task_is_a_corpse_fork(task)) { + /* Populate code with EXC_{RESOURCE,GUARD} for corpse fork */ + code[0] = etype; + code[1] = subcode; + } else { + /* Populate code with EXC_CRASH for corpses */ + code[0] = EXC_CRASH; + code[1] = 0; + /* Update the code[1] if the boot-arg corpse_for_fatal_memkill is set */ + if (corpse_for_fatal_memkill) { + code[1] = subcode; + } + } + + queue_iterate(&task->threads, th_iter, thread_t, task_threads) + { + if (th_iter->corpse_dup == FALSE) { + ipc_thread_reset(th_iter); + } + } + task_unlock(task); + + /* Arm the no-sender notification for taskport */ + task_reference(task); + task_port = convert_task_to_port(task); + ip_lock(task_port); + assert(ip_active(task_port)); + ipc_port_nsrequest(task_port, task_port->ip_mscount, ipc_port_make_sonce_locked(task_port), &old_notify); + /* port unlocked */ + assert(IP_NULL == old_notify); + + wsave = thread_interrupt_level(THREAD_UNINT); + kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread); + if (kr != KERN_SUCCESS) { + printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(task)); + } + + (void)thread_interrupt_level(wsave); + + /* + * Drop the send right on task port, will fire the + * no-sender notification if exception deliver failed. + */ + ipc_port_release_send(task_port); + return kr; +} + /* * task_terminate: * @@ -933,80 +1903,443 @@ task_terminate( return (task_terminate_internal(task)); } +#if MACH_ASSERT +extern int proc_pid(struct proc *); +extern void proc_name_kdp(task_t t, char *buf, int size); +#endif /* MACH_ASSERT */ + +#define VM_MAP_PARTIAL_REAP 0x54 /* 0x150 */ +static void +__unused task_partial_reap(task_t task, __unused int pid) +{ + unsigned int reclaimed_resident = 0; + unsigned int reclaimed_compressed = 0; + uint64_t task_page_count; + + task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64); + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START), + pid, task_page_count, 0, 0, 0); + + vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed); + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END), + pid, reclaimed_resident, reclaimed_compressed, 0, 0); +} + kern_return_t -task_terminate_internal( - task_t task) +task_mark_corpse(task_t task) { - thread_t thread, self; - task_t self_task; - boolean_t interrupt_save; + kern_return_t kr = KERN_SUCCESS; + thread_t self_thread; + (void) self_thread; + wait_interrupt_t wsave; +#if CONFIG_MACF + struct label *crash_label = NULL; +#endif assert(task != kernel_task); + assert(task == current_task()); + assert(!task_is_a_corpse(task)); - self = current_thread(); - self_task = self->task; - - /* - * Get the task locked and make sure that we are not racing - * with someone else trying to terminate us. - */ - if (task == self_task) - task_lock(task); - else - if (task < self_task) { - task_lock(task); - task_lock(self_task); - } - else { - task_lock(self_task); - task_lock(task); +#if CONFIG_MACF + crash_label = mac_exc_create_label_for_proc((struct proc*)task->bsd_info); +#endif + + kr = task_collect_crash_info(task, +#if CONFIG_MACF + crash_label, +#endif + FALSE); + if (kr != KERN_SUCCESS) { + goto out; } - if (!task->active) { - /* - * Task is already being terminated. - * Just return an error. If we are dying, this will - * just get us to our AST special handler and that - * will get us to finalize the termination of ourselves. - */ - task_unlock(task); - if (self_task != task) - task_unlock(self_task); + self_thread = current_thread(); - return (KERN_FAILURE); - } + wsave = thread_interrupt_level(THREAD_UNINT); + task_lock(task); -#if MACH_ASSERT - if (task->suspends_outstanding != 0) { - printf("WARNING: %s (%d) exiting with %d outstanding suspensions\n", - proc_name_address(task->bsd_info), proc_pid(task->bsd_info), - task->suspends_outstanding); - } -#endif + task_set_corpse_pending_report(task); + task_set_corpse(task); + task->crashed_thread_id = thread_tid(self_thread); - if (self_task != task) - task_unlock(self_task); + kr = task_start_halt_locked(task, TRUE); + assert(kr == KERN_SUCCESS); - /* - * Make sure the current thread does not get aborted out of - * the waits inside these operations. - */ - interrupt_save = thread_interrupt_level(THREAD_UNINT); + ipc_task_reset(task); + /* Remove the naked send right for task port, needed to arm no sender notification */ + task_set_special_port(task, TASK_KERNEL_PORT, IPC_PORT_NULL); + ipc_task_enable(task); - /* - * Indicate that we want all the threads to stop executing - * at user space by holding the task (we would have held - * each thread independently in thread_terminate_internal - - * but this way we may be more likely to already find it - * held there). Mark the task inactive, and prevent - * further task operations via the task port. - */ - task_hold_locked(task); - task->active = FALSE; - ipc_task_disable(task); + task_unlock(task); + /* terminate the ipc space */ + ipc_space_terminate(task->itk_space); -#if CONFIG_TELEMETRY - /* + /* Add it to global corpse task list */ + task_add_to_corpse_task_list(task); + + task_start_halt(task); + thread_terminate_internal(self_thread); + + (void) thread_interrupt_level(wsave); + assert(task->halting == TRUE); + +out: +#if CONFIG_MACF + mac_exc_free_label(crash_label); +#endif + return kr; +} + +/* + * task_clear_corpse + * + * Clears the corpse pending bit on task. + * Removes inspection bit on the threads. + */ +void +task_clear_corpse(task_t task) +{ + thread_t th_iter = NULL; + + task_lock(task); + queue_iterate(&task->threads, th_iter, thread_t, task_threads) + { + thread_mtx_lock(th_iter); + th_iter->inspection = FALSE; + thread_mtx_unlock(th_iter); + } + + thread_terminate_crashed_threads(); + /* remove the pending corpse report flag */ + task_clear_corpse_pending_report(task); + + task_unlock(task); +} + +/* + * task_port_notify + * + * Called whenever the Mach port system detects no-senders on + * the task port of a corpse. + * Each notification that comes in should terminate the task (corpse). + */ +void +task_port_notify(mach_msg_header_t *msg) +{ + mach_no_senders_notification_t *notification = (void *)msg; + ipc_port_t port = notification->not_header.msgh_remote_port; + task_t task; + + assert(ip_active(port)); + assert(IKOT_TASK == ip_kotype(port)); + task = (task_t) port->ip_kobject; + + assert(task_is_a_corpse(task)); + + /* Remove the task from global corpse task list */ + task_remove_from_corpse_task_list(task); + + task_clear_corpse(task); + task_terminate_internal(task); +} + +/* + * task_wait_till_threads_terminate_locked + * + * Wait till all the threads in the task are terminated. + * Might release the task lock and re-acquire it. + */ +void +task_wait_till_threads_terminate_locked(task_t task) +{ + /* wait for all the threads in the task to terminate */ + while (task->active_thread_count != 0) { + assert_wait((event_t)&task->active_thread_count, THREAD_UNINT); + task_unlock(task); + thread_block(THREAD_CONTINUE_NULL); + + task_lock(task); + } +} + +/* + * task_duplicate_map_and_threads + * + * Copy vmmap of source task. + * Copy active threads from source task to destination task. + * Source task would be suspended during the copy. + */ +kern_return_t +task_duplicate_map_and_threads( + task_t task, + void *p, + task_t new_task, + thread_t *thread_ret, + uint64_t **udata_buffer, + int *size, + int *num_udata) +{ + kern_return_t kr = KERN_SUCCESS; + int active; + thread_t thread, self, thread_return = THREAD_NULL; + thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL; + thread_t *thread_array; + uint32_t active_thread_count = 0, array_count = 0, i; + vm_map_t oldmap; + uint64_t *buffer = NULL; + int buf_size = 0; + int est_knotes = 0, num_knotes = 0; + + self = current_thread(); + + /* + * Suspend the task to copy thread state, use the internal + * variant so that no user-space process can resume + * the task from under us + */ + kr = task_suspend_internal(task); + if (kr != KERN_SUCCESS) { + return kr; + } + + if (task->map->disable_vmentry_reuse == TRUE) { + /* + * Quite likely GuardMalloc (or some debugging tool) + * is being used on this task. And it has gone through + * its limit. Making a corpse will likely encounter + * a lot of VM entries that will need COW. + * + * Skip it. + */ +#if DEVELOPMENT || DEBUG + memorystatus_abort_vm_map_fork(task); +#endif + task_resume_internal(task); + return KERN_FAILURE; + } + + /* Check with VM if vm_map_fork is allowed for this task */ + if (memorystatus_allowed_vm_map_fork(task)) { + + /* Setup new task's vmmap, switch from parent task's map to it COW map */ + oldmap = new_task->map; + new_task->map = vm_map_fork(new_task->ledger, + task->map, + (VM_MAP_FORK_SHARE_IF_INHERIT_NONE | + VM_MAP_FORK_PRESERVE_PURGEABLE | + VM_MAP_FORK_CORPSE_FOOTPRINT)); + vm_map_deallocate(oldmap); + + /* copy ledgers that impact the memory footprint */ + vm_map_copy_footprint_ledgers(task, new_task); + + /* Get all the udata pointers from kqueue */ + est_knotes = kevent_proc_copy_uptrs(p, NULL, 0); + if (est_knotes > 0) { + buf_size = (est_knotes + 32) * sizeof(uint64_t); + buffer = (uint64_t *) kalloc(buf_size); + num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size); + if (num_knotes > est_knotes + 32) { + num_knotes = est_knotes + 32; + } + } + } + + active_thread_count = task->active_thread_count; + if (active_thread_count == 0) { + if (buffer != NULL) { + kfree(buffer, buf_size); + } + task_resume_internal(task); + return KERN_FAILURE; + } + + thread_array = (thread_t *) kalloc(sizeof(thread_t) * active_thread_count); + + /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */ + task_lock(task); + queue_iterate(&task->threads, thread, thread_t, task_threads) { + /* Skip inactive threads */ + active = thread->active; + if (!active) { + continue; + } + + if (array_count >= active_thread_count) { + break; + } + + thread_array[array_count++] = thread; + thread_reference(thread); + } + task_unlock(task); + + for (i = 0; i < array_count; i++) { + + kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue); + if (kr != KERN_SUCCESS) { + break; + } + + /* Equivalent of current thread in corpse */ + if (thread_array[i] == self) { + thread_return = new_thread; + new_task->crashed_thread_id = thread_tid(new_thread); + } else if (first_thread == NULL) { + first_thread = new_thread; + } else { + /* drop the extra ref returned by thread_create_with_continuation */ + thread_deallocate(new_thread); + } + + kr = thread_dup2(thread_array[i], new_thread); + if (kr != KERN_SUCCESS) { + thread_mtx_lock(new_thread); + new_thread->corpse_dup = TRUE; + thread_mtx_unlock(new_thread); + continue; + } + + /* Copy thread name */ + bsd_copythreadname(new_thread->uthread, thread_array[i]->uthread); + new_thread->thread_tag = thread_array[i]->thread_tag; + thread_copy_resource_info(new_thread, thread_array[i]); + } + + /* return the first thread if we couldn't find the equivalent of current */ + if (thread_return == THREAD_NULL) { + thread_return = first_thread; + } + else if (first_thread != THREAD_NULL) { + /* drop the extra ref returned by thread_create_with_continuation */ + thread_deallocate(first_thread); + } + + task_resume_internal(task); + + for (i = 0; i < array_count; i++) { + thread_deallocate(thread_array[i]); + } + kfree(thread_array, sizeof(thread_t) * active_thread_count); + + if (kr == KERN_SUCCESS) { + *thread_ret = thread_return; + *udata_buffer = buffer; + *size = buf_size; + *num_udata = num_knotes; + } else { + if (thread_return != THREAD_NULL) { + thread_deallocate(thread_return); + } + if (buffer != NULL) { + kfree(buffer, buf_size); + } + } + + return kr; +} + +#if CONFIG_SECLUDED_MEMORY +extern void task_set_can_use_secluded_mem_locked( + task_t task, + boolean_t can_use_secluded_mem); +#endif /* CONFIG_SECLUDED_MEMORY */ + +kern_return_t +task_terminate_internal( + task_t task) +{ + thread_t thread, self; + task_t self_task; + boolean_t interrupt_save; + int pid = 0; + + assert(task != kernel_task); + + self = current_thread(); + self_task = self->task; + + /* + * Get the task locked and make sure that we are not racing + * with someone else trying to terminate us. + */ + if (task == self_task) + task_lock(task); + else + if (task < self_task) { + task_lock(task); + task_lock(self_task); + } + else { + task_lock(self_task); + task_lock(task); + } + +#if CONFIG_SECLUDED_MEMORY + if (task->task_can_use_secluded_mem) { + task_set_can_use_secluded_mem_locked(task, FALSE); + } + task->task_could_use_secluded_mem = FALSE; + task->task_could_also_use_secluded_mem = FALSE; + + if (task->task_suppressed_secluded) { + stop_secluded_suppression(task); + } +#endif /* CONFIG_SECLUDED_MEMORY */ + + if (!task->active) { + /* + * Task is already being terminated. + * Just return an error. If we are dying, this will + * just get us to our AST special handler and that + * will get us to finalize the termination of ourselves. + */ + task_unlock(task); + if (self_task != task) + task_unlock(self_task); + + return (KERN_FAILURE); + } + + if (task_corpse_pending_report(task)) { + /* + * Task is marked for reporting as corpse. + * Just return an error. This will + * just get us to our AST special handler and that + * will get us to finish the path to death + */ + task_unlock(task); + if (self_task != task) + task_unlock(self_task); + + return (KERN_FAILURE); + } + + if (self_task != task) + task_unlock(self_task); + + /* + * Make sure the current thread does not get aborted out of + * the waits inside these operations. + */ + interrupt_save = thread_interrupt_level(THREAD_UNINT); + + /* + * Indicate that we want all the threads to stop executing + * at user space by holding the task (we would have held + * each thread independently in thread_terminate_internal - + * but this way we may be more likely to already find it + * held there). Mark the task inactive, and prevent + * further task operations via the task port. + */ + task_hold_locked(task); + task->active = FALSE; + ipc_task_disable(task); + +#if CONFIG_TELEMETRY + /* * Notify telemetry that this task is going away. */ telemetry_task_ctl_locked(task, TF_TELEMETRY, 0); @@ -1019,8 +2352,29 @@ task_terminate_internal( thread_terminate_internal(thread); } +#ifdef MACH_BSD + if (task->bsd_info != NULL && !task_is_exec_copy(task)) { + pid = proc_pid(task->bsd_info); + } +#endif /* MACH_BSD */ + task_unlock(task); + proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, + TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); + + /* Early object reap phase */ + +// PR-17045188: Revisit implementation +// task_partial_reap(task, pid); + +#if CONFIG_EMBEDDED + /* + * remove all task watchers + */ + task_removewatchers(task); + +#endif /* CONFIG_EMBEDDED */ /* * Destroy all synchronizers owned by the task. @@ -1032,8 +2386,21 @@ task_terminate_internal( */ ipc_space_terminate(task->itk_space); - if (vm_map_has_4GB_pagezero(task->map)) - vm_map_clear_4GB_pagezero(task->map); +#if 00 + /* if some ledgers go negative on tear-down again... */ + ledger_disable_panic_on_negative(task->map->pmap->ledger, + task_ledgers.phys_footprint); + ledger_disable_panic_on_negative(task->map->pmap->ledger, + task_ledgers.internal); + ledger_disable_panic_on_negative(task->map->pmap->ledger, + task_ledgers.internal_compressed); + ledger_disable_panic_on_negative(task->map->pmap->ledger, + task_ledgers.iokit_mapped); + ledger_disable_panic_on_negative(task->map->pmap->ledger, + task_ledgers.alternate_accounting); + ledger_disable_panic_on_negative(task->map->pmap->ledger, + task_ledgers.alternate_accounting_compressed); +#endif /* * If the current thread is a member of the task @@ -1043,14 +2410,44 @@ task_terminate_internal( * expense of removing the address space regions * at reap time, we do it explictly here. */ + + vm_map_lock(task->map); + vm_map_disable_hole_optimization(task->map); + vm_map_unlock(task->map); + +#if MACH_ASSERT + /* + * Identify the pmap's process, in case the pmap ledgers drift + * and we have to report it. + */ + char procname[17]; + if (task->bsd_info && !task_is_exec_copy(task)) { + pid = proc_pid(task->bsd_info); + proc_name_kdp(task, procname, sizeof (procname)); + } else { + pid = 0; + strlcpy(procname, "", sizeof (procname)); + } + pmap_set_process(task->map->pmap, pid, procname); +#endif /* MACH_ASSERT */ + vm_map_remove(task->map, task->map->min_offset, task->map->max_offset, - VM_MAP_NO_FLAGS); + /* + * Final cleanup: + * + no unnesting + * + remove immutable mappings + * + allow gaps in range + */ + (VM_MAP_REMOVE_NO_UNNESTING | + VM_MAP_REMOVE_IMMUTABLE | + VM_MAP_REMOVE_GAPS_OK)); /* release our shared region */ vm_shared_region_set(task, NULL); + lck_mtx_lock(&tasks_threads_lock); queue_remove(&tasks, task, task_t, tasks); queue_enter(&terminated_tasks, task, task_t, tasks); @@ -1064,6 +2461,19 @@ task_terminate_internal( */ thread_interrupt_level(interrupt_save); +#if KPC + /* force the task to release all ctrs */ + if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) + kpc_force_all_ctrs(task, 0); +#endif /* KPC */ + +#if CONFIG_COALITIONS + /* + * Leave our coalitions. (drop activation but not reference) + */ + coalitions_remove_task(task); +#endif + /* * Get rid of the task active reference on itself. */ @@ -1072,6 +2482,23 @@ task_terminate_internal( return (KERN_SUCCESS); } +void +tasks_system_suspend(boolean_t suspend) +{ + task_t task; + + lck_mtx_lock(&tasks_threads_lock); + assert(tasks_suspend_state != suspend); + tasks_suspend_state = suspend; + queue_iterate(&tasks, task, task_t, tasks) { + if (task == kernel_task) { + continue; + } + suspend ? task_suspend_internal(task) : task_resume_internal(task); + } + lck_mtx_unlock(&tasks_threads_lock); +} + /* * task_start_halt: * @@ -1081,55 +2508,66 @@ task_terminate_internal( * termination. */ kern_return_t -task_start_halt( - task_t task) +task_start_halt(task_t task) { - thread_t thread, self; + kern_return_t kr = KERN_SUCCESS; + task_lock(task); + kr = task_start_halt_locked(task, FALSE); + task_unlock(task); + return kr; +} + +static kern_return_t +task_start_halt_locked(task_t task, boolean_t should_mark_corpse) +{ + thread_t thread, self; + uint64_t dispatchqueue_offset; assert(task != kernel_task); self = current_thread(); - if (task != self->task) + if (task != self->task && !task_is_a_corpse_fork(task)) return (KERN_INVALID_ARGUMENT); - task_lock(task); - if (task->halting || !task->active || !self->active) { /* - * Task or current thread is already being terminated. - * Hurry up and return out of the current kernel context - * so that we run our AST special handler to terminate - * ourselves. + * Task or current thread is already being terminated. + * Hurry up and return out of the current kernel context + * so that we run our AST special handler to terminate + * ourselves. */ - task_unlock(task); - return (KERN_FAILURE); } task->halting = TRUE; - if (task->thread_count > 1) { - - /* - * Mark all the threads to keep them from starting any more - * user-level execution. The thread_terminate_internal code - * would do this on a thread by thread basis anyway, but this - * gives us a better chance of not having to wait there. - */ - task_hold_locked(task); - - /* - * Terminate all the other threads in the task. - */ - queue_iterate(&task->threads, thread, thread_t, task_threads) { - if (thread != self) - thread_terminate_internal(thread); - } + /* + * Mark all the threads to keep them from starting any more + * user-level execution. The thread_terminate_internal code + * would do this on a thread by thread basis anyway, but this + * gives us a better chance of not having to wait there. + */ + task_hold_locked(task); + dispatchqueue_offset = get_dispatchqueue_offset_from_proc(task->bsd_info); - task_release_locked(task); + /* + * Terminate all the other threads in the task. + */ + queue_iterate(&task->threads, thread, thread_t, task_threads) + { + if (should_mark_corpse) { + thread_mtx_lock(thread); + thread->inspection = TRUE; + thread_mtx_unlock(thread); + } + if (thread != self) + thread_terminate_internal(thread); } - task_unlock(task); + task->dispatchqueue_offset = dispatchqueue_offset; + + task_release_locked(task); + return KERN_SUCCESS; } @@ -1140,6 +2578,9 @@ task_start_halt( * Complete task halt by waiting for threads to terminate, then clean * up task resources (VM, port namespace, etc...) and then let the * current thread go in the (practically empty) task context. + * + * Note: task->halting flag is not cleared in order to avoid creation + * of new thread in old exec'ed task. */ void task_complete_halt(task_t task) @@ -1185,9 +2626,22 @@ task_complete_halt(task_t task) * getting a new one. */ vm_map_remove(task->map, task->map->min_offset, - task->map->max_offset, VM_MAP_NO_FLAGS); + task->map->max_offset, + /* + * Final cleanup: + * + no unnesting + * + remove immutable mappings + * + allow gaps in the range + */ + (VM_MAP_REMOVE_NO_UNNESTING | + VM_MAP_REMOVE_IMMUTABLE | + VM_MAP_REMOVE_GAPS_OK)); - task->halting = FALSE; + /* + * Kick out any IOKitUser handles to the task. At best they're stale, + * at worst someone is racing a SUID exec. + */ + iokit_task_terminate(task); } /* @@ -1197,19 +2651,23 @@ task_complete_halt(task_t task) * This is a recursive-style suspension of the task, a count of * suspends is maintained. * - * CONDITIONS: the task is locked and active. + * CONDITIONS: the task is locked and active. */ void task_hold_locked( - register task_t task) + task_t task) { - register thread_t thread; + thread_t thread; assert(task->active); if (task->suspend_count++ > 0) return; + if (task->bsd_info) { + workq_proc_suspended(task->bsd_info); + } + /* * Iterate through all the threads and hold them. */ @@ -1233,7 +2691,7 @@ task_hold_locked( */ kern_return_t task_hold( - register task_t task) + task_t task) { if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); @@ -1284,10 +2742,10 @@ task_wait( */ void task_wait_locked( - register task_t task, + task_t task, boolean_t until_not_runnable) { - register thread_t thread, self; + thread_t thread, self; assert(task->active); assert(task->suspend_count > 0); @@ -1314,9 +2772,9 @@ task_wait_locked( */ void task_release_locked( - register task_t task) + task_t task) { - register thread_t thread; + thread_t thread; assert(task->active); assert(task->suspend_count > 0); @@ -1324,6 +2782,10 @@ task_release_locked( if (--task->suspend_count > 0) return; + if (task->bsd_info) { + workq_proc_resumed(task->bsd_info); + } + queue_iterate(&task->threads, thread, thread_t, task_threads) { thread_mtx_lock(thread); thread_release(thread); @@ -1476,16 +2938,21 @@ task_threads( static kern_return_t place_task_hold ( - register task_t task, + task_t task, int mode) { - if (!task->active) { + if (!task->active && !task_is_a_corpse(task)) { return (KERN_FAILURE); } + /* Return success for corpse task */ + if (task_is_a_corpse(task)) { + return KERN_SUCCESS; + } + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_SUSPEND) | DBG_FUNC_NONE, - proc_pid(task->bsd_info), ((thread_t)queue_first(&task->threads))->thread_id, + task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id, task->user_stop_count, task->user_stop_count + 1, 0); #if MACH_ASSERT @@ -1517,14 +2984,19 @@ place_task_hold ( static kern_return_t release_task_hold ( - register task_t task, + task_t task, int mode) { - register boolean_t release = FALSE; + boolean_t release = FALSE; - if (!task->active) { + if (!task->active && !task_is_a_corpse(task)) { return (KERN_FAILURE); } + + /* Return success for corpse task */ + if (task_is_a_corpse(task)) { + return KERN_SUCCESS; + } if (mode == TASK_HOLD_PIDSUSPEND) { if (task->pidsuspended == FALSE) { @@ -1537,7 +3009,7 @@ release_task_hold ( KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_RESUME) | DBG_FUNC_NONE, - proc_pid(task->bsd_info), ((thread_t)queue_first(&task->threads))->thread_id, + task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id, task->user_stop_count, mode, task->legacy_stop_count); #if MACH_ASSERT @@ -1595,7 +3067,7 @@ release_task_hold ( */ kern_return_t task_suspend( - register task_t task) + task_t task) { kern_return_t kr; mach_port_t port, send, old_notify; @@ -1651,9 +3123,9 @@ task_suspend( */ if ((kr = ipc_kmsg_copyout_object(current_task()->itk_space, (ipc_object_t)send, MACH_MSG_TYPE_MOVE_SEND, &name)) != KERN_SUCCESS) { - printf("warning: %s(%d) failed to copyout suspension token for task %s(%d) with error: %d\n", - proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info), - proc_name_address(task->bsd_info), proc_pid(task->bsd_info), kr); + printf("warning: %s(%d) failed to copyout suspension token for pid %d with error: %d\n", + proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info), + task_pid(task), kr); return (kr); } @@ -1669,7 +3141,7 @@ task_suspend( */ kern_return_t task_resume( - register task_t task) + task_t task) { kern_return_t kr; mach_port_name_t resume_port_name; @@ -1701,9 +3173,9 @@ task_resume( } else { is_write_unlock(space); if (kr == KERN_SUCCESS) - printf("warning: %s(%d) performed out-of-band resume on %s(%d)\n", + printf("warning: %s(%d) performed out-of-band resume on pid %d\n", proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info), - proc_name_address(task->bsd_info), proc_pid(task->bsd_info)); + task_pid(task)); } return kr; @@ -1733,7 +3205,7 @@ task_suspend_internal(task_t task) */ kern_return_t task_suspend2( - register task_t task, + task_t task, task_suspension_token_t *suspend_token) { kern_return_t kr; @@ -1761,7 +3233,7 @@ task_suspend2( */ kern_return_t task_resume_internal( - register task_suspension_token_t task) + task_suspension_token_t task) { kern_return_t kr; @@ -1779,7 +3251,7 @@ task_resume_internal( */ kern_return_t task_resume2( - register task_suspension_token_t task) + task_suspension_token_t task) { kern_return_t kr; @@ -1874,7 +3346,7 @@ out: */ kern_return_t task_pidsuspend( - register task_t task) + task_t task) { kern_return_t kr; @@ -1890,9 +3362,6 @@ task_pidsuspend( return (kr); } -/* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */ -#define THAW_ON_RESUME 1 - /* * task_pidresume: * Resumes a previously suspended task. @@ -1902,7 +3371,7 @@ task_pidsuspend( */ kern_return_t task_pidresume( - register task_t task) + task_t task) { kern_return_t kr; @@ -1911,7 +3380,7 @@ task_pidresume( task_lock(task); -#if (CONFIG_FREEZE && THAW_ON_RESUME) +#if CONFIG_FREEZE while (task->changing_freeze_state) { @@ -1928,17 +3397,8 @@ task_pidresume( task_unlock(task); -#if (CONFIG_FREEZE && THAW_ON_RESUME) - if ((kr == KERN_SUCCESS) && (task->frozen == TRUE)) { - - if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { - - kr = KERN_SUCCESS; - } else { +#if CONFIG_FREEZE - kr = vm_map_thaw(task->map); - } - } task_lock(task); if (kr == KERN_SUCCESS) @@ -1952,6 +3412,84 @@ task_pidresume( return (kr); } + +#if DEVELOPMENT || DEBUG + +extern void IOSleep(int); + +kern_return_t +task_disconnect_page_mappings(task_t task) +{ + int n; + + if (task == TASK_NULL || task == kernel_task) + return (KERN_INVALID_ARGUMENT); + + /* + * this function is used to strip all of the mappings from + * the pmap for the specified task to force the task to + * re-fault all of the pages it is actively using... this + * allows us to approximate the true working set of the + * specified task. We only engage if at least 1 of the + * threads in the task is runnable, but we want to continuously + * sweep (at least for a while - I've arbitrarily set the limit at + * 100 sweeps to be re-looked at as we gain experience) to get a better + * view into what areas within a page are being visited (as opposed to only + * seeing the first fault of a page after the task becomes + * runnable)... in the future I may + * try to block until awakened by a thread in this task + * being made runnable, but for now we'll periodically poll from the + * user level debug tool driving the sysctl + */ + for (n = 0; n < 100; n++) { + thread_t thread; + boolean_t runnable; + boolean_t do_unnest; + int page_count; + + runnable = FALSE; + do_unnest = FALSE; + + task_lock(task); + + queue_iterate(&task->threads, thread, thread_t, task_threads) { + + if (thread->state & TH_RUN) { + runnable = TRUE; + break; + } + } + if (n == 0) + task->task_disconnected_count++; + + if (task->task_unnested == FALSE) { + if (runnable == TRUE) { + task->task_unnested = TRUE; + do_unnest = TRUE; + } + } + task_unlock(task); + + if (runnable == FALSE) + break; + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START, + task, do_unnest, task->task_disconnected_count, 0, 0); + + page_count = vm_map_disconnect_page_mappings(task->map, do_unnest); + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END, + task, page_count, 0, 0, 0); + + if ((n % 5) == 4) + IOSleep(1); + } + return (KERN_SUCCESS); +} + +#endif + + #if CONFIG_FREEZE /* @@ -1962,18 +3500,22 @@ task_pidresume( * Conditions: * The caller holds a reference to the task */ +extern void vm_wake_compactor_swapper(void); +extern queue_head_t c_swapout_list_head; + kern_return_t task_freeze( - register task_t task, + task_t task, uint32_t *purgeable_count, uint32_t *wired_count, uint32_t *clean_count, uint32_t *dirty_count, uint32_t dirty_budget, - boolean_t *shared, - boolean_t walk_only) + uint32_t *shared_count, + int *freezer_error_code, + boolean_t eval_only) { - kern_return_t kr; + kern_return_t kr = KERN_SUCCESS; if (task == TASK_NULL || task == kernel_task) return (KERN_INVALID_ARGUMENT); @@ -1996,21 +3538,40 @@ task_freeze( task_unlock(task); - if (walk_only) { - kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared); - } else { - kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared); - } + kr = vm_map_freeze(task->map, + purgeable_count, + wired_count, + clean_count, + dirty_count, + dirty_budget, + shared_count, + freezer_error_code, + eval_only); task_lock(task); - if (walk_only == FALSE && kr == KERN_SUCCESS) + if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) { task->frozen = TRUE; + } + task->changing_freeze_state = FALSE; thread_wakeup(&task->changing_freeze_state); task_unlock(task); + if (VM_CONFIG_COMPRESSOR_IS_PRESENT && + (eval_only == FALSE)) { + vm_wake_compactor_swapper(); + /* + * We do an explicit wakeup of the swapout thread here + * because the compact_and_swap routines don't have + * knowledge about these kind of "per-task packed c_segs" + * and so will not be evaluating whether we need to do + * a wakeup there. + */ + thread_wakeup((event_t)&c_swapout_list_head); + } + return (kr); } @@ -2022,15 +3583,10 @@ task_freeze( * Conditions: * The caller holds a reference to the task */ -extern void -vm_consider_waking_compactor_swapper(void); - kern_return_t task_thaw( - register task_t task) + task_t task) { - kern_return_t kr; - if (task == TASK_NULL || task == kernel_task) return (KERN_INVALID_ARGUMENT); @@ -2048,32 +3604,11 @@ task_thaw( task_unlock(task); return (KERN_FAILURE); } - task->changing_freeze_state = TRUE; - - if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) { - task_unlock(task); - - kr = vm_map_thaw(task->map); - - task_lock(task); - - if (kr == KERN_SUCCESS) - task->frozen = FALSE; - } else { - task->frozen = FALSE; - kr = KERN_SUCCESS; - } - - task->changing_freeze_state = FALSE; - thread_wakeup(&task->changing_freeze_state); + task->frozen = FALSE; task_unlock(task); - if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { - vm_consider_waking_compactor_swapper(); - } - - return (kr); + return (KERN_SUCCESS); } #endif /* CONFIG_FREEZE */ @@ -2111,6 +3646,15 @@ host_security_set_task_token( return(kr); } +kern_return_t +task_send_trace_memory( + __unused task_t target_task, + __unused uint32_t pid, + __unused uint64_t uniqueid) +{ + return KERN_INVALID_ARGUMENT; +} + /* * This routine was added, pretty much exclusively, for registering the * RPC glue vector for in-kernel short circuited tasks. Rather than @@ -2130,12 +3674,30 @@ task_set_info( return(KERN_INVALID_ARGUMENT); switch (flavor) { + +#if CONFIG_ATM + case TASK_TRACE_MEMORY_INFO: + { + if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); + + assert(task_info_in != NULL); + task_trace_memory_info_t mem_info; + mem_info = (task_trace_memory_info_t) task_info_in; + kern_return_t kr = atm_register_trace_memory(task, + mem_info->user_memory_address, + mem_info->buffer_size); + return kr; + } + +#endif default: return (KERN_INVALID_ARGUMENT); } return (KERN_SUCCESS); } +int radar_20146450 = 1; kern_return_t task_info( task_t task, @@ -2144,10 +3706,12 @@ task_info( mach_msg_type_number_t *task_info_count) { kern_return_t error = KERN_SUCCESS; + mach_msg_type_number_t original_task_info_count; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); + original_task_info_count = *task_info_count; task_lock(task); if ((task != current_task()) && (!task->active)) { @@ -2159,6 +3723,9 @@ task_info( case TASK_BASIC_INFO_32: case TASK_BASIC2_INFO_32: +#if defined(__arm__) || defined(__arm64__) + case TASK_BASIC_INFO_64: +#endif { task_basic_info_32_t basic_info; vm_map_t map; @@ -2203,6 +3770,46 @@ task_info( break; } +#if defined(__arm__) || defined(__arm64__) + case TASK_BASIC_INFO_64_2: + { + task_basic_info_64_2_t basic_info; + vm_map_t map; + clock_sec_t secs; + clock_usec_t usecs; + + if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) { + error = KERN_INVALID_ARGUMENT; + break; + } + + basic_info = (task_basic_info_64_2_t)task_info_out; + + map = (task == kernel_task)? kernel_map: task->map; + basic_info->virtual_size = map->size; + basic_info->resident_size = + (mach_vm_size_t)(pmap_resident_count(map->pmap)) + * PAGE_SIZE_64; + + basic_info->policy = ((task != kernel_task)? + POLICY_TIMESHARE: POLICY_RR); + basic_info->suspend_count = task->user_stop_count; + + absolutetime_to_microtime(task->total_user_time, &secs, &usecs); + basic_info->user_time.seconds = + (typeof(basic_info->user_time.seconds))secs; + basic_info->user_time.microseconds = usecs; + + absolutetime_to_microtime(task->total_system_time, &secs, &usecs); + basic_info->system_time.seconds = + (typeof(basic_info->system_time.seconds))secs; + basic_info->system_time.microseconds = usecs; + + *task_info_count = TASK_BASIC_INFO_64_2_COUNT; + break; + } + +#else /* defined(__arm__) || defined(__arm64__) */ case TASK_BASIC_INFO_64: { task_basic_info_64_t basic_info; @@ -2240,6 +3847,7 @@ task_info( *task_info_count = TASK_BASIC_INFO_64_COUNT; break; } +#endif /* defined(__arm__) || defined(__arm64__) */ case MACH_TASK_BASIC_INFO: { @@ -2288,8 +3896,8 @@ task_info( case TASK_THREAD_TIMES_INFO: { - register task_thread_times_info_t times_info; - register thread_t thread; + task_thread_times_info_t times_info; + thread_t thread; if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -2309,7 +3917,7 @@ task_info( if (thread->options & TH_OPT_IDLE_THREAD) continue; - thread_read_times(thread, &user_time, &system_time); + thread_read_times(thread, &user_time, &system_time, NULL); time_value_add(×_info->user_time, &user_time); time_value_add(×_info->system_time, &system_time); @@ -2322,7 +3930,7 @@ task_info( case TASK_ABSOLUTETIME_INFO: { task_absolutetime_info_t info; - register thread_t thread; + thread_t thread; if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -2393,7 +4001,7 @@ task_info( /* only set format on output for those expecting it */ if (*task_info_count >= TASK_DYLD_INFO_COUNT) { - info->all_image_info_format = task_has_64BitAddr(task) ? + info->all_image_info_format = task_has_64Bit_addr(task) ? TASK_DYLD_ALL_IMAGE_INFO_64 : TASK_DYLD_ALL_IMAGE_INFO_32 ; *task_info_count = TASK_DYLD_INFO_COUNT; @@ -2509,7 +4117,7 @@ task_info( /* OBSOLETE */ case TASK_SCHED_RR_INFO: { - register policy_rr_base_t rr_base; + policy_rr_base_t rr_base; uint32_t quantum_time; uint64_t quantum_ns; @@ -2539,7 +4147,7 @@ task_info( /* OBSOLETE */ case TASK_SCHED_TIMESHARE_INFO: { - register policy_timeshare_base_t ts_base; + policy_timeshare_base_t ts_base; if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -2561,7 +4169,7 @@ task_info( case TASK_SECURITY_TOKEN: { - register security_token_t *sec_token_p; + security_token_t *sec_token_p; if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -2578,7 +4186,7 @@ task_info( case TASK_AUDIT_TOKEN: { - register audit_token_t *audit_token_p; + audit_token_t *audit_token_p; if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -2599,8 +4207,8 @@ task_info( case TASK_EVENTS_INFO: { - register task_events_info_t events_info; - register thread_t thread; + task_events_info_t events_info; + thread_t thread; if (*task_info_count < TASK_EVENTS_INFO_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -2647,7 +4255,18 @@ task_info( break; } - task_power_info_locked(task, (task_power_info_t)task_info_out); + task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL); + break; + } + + case TASK_POWER_INFO_V2: + { + if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) { + error = KERN_INVALID_ARGUMENT; + break; + } + task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out; + task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2); break; } @@ -2657,7 +4276,7 @@ task_info( task_vm_info_t vm_info; vm_map_t map; - if (*task_info_count < TASK_VM_INFO_COUNT) { + if (*task_info_count < TASK_VM_INFO_REV0_COUNT) { error = KERN_INVALID_ARGUMENT; break; } @@ -2720,7 +4339,9 @@ task_info( } else { mach_vm_size_t volatile_virtual_size; mach_vm_size_t volatile_resident_size; + mach_vm_size_t volatile_compressed_size; mach_vm_size_t volatile_pmap_size; + mach_vm_size_t volatile_compressed_pmap_size; kern_return_t kr; if (flavor == TASK_VM_INFO_PURGEABLE) { @@ -2728,23 +4349,161 @@ task_info( map, &volatile_virtual_size, &volatile_resident_size, - &volatile_pmap_size); + &volatile_compressed_size, + &volatile_pmap_size, + &volatile_compressed_pmap_size); if (kr == KERN_SUCCESS) { vm_info->purgeable_volatile_pmap = volatile_pmap_size; + if (radar_20146450) { + vm_info->compressed -= + volatile_compressed_pmap_size; + } vm_info->purgeable_volatile_resident = volatile_resident_size; vm_info->purgeable_volatile_virtual = volatile_virtual_size; } } + } + *task_info_count = TASK_VM_INFO_REV0_COUNT; + + if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) { + vm_info->phys_footprint = + (mach_vm_size_t) get_task_phys_footprint(task); + *task_info_count = TASK_VM_INFO_REV1_COUNT; + } + if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) { + vm_info->min_address = map->min_offset; + vm_info->max_address = map->max_offset; + *task_info_count = TASK_VM_INFO_REV2_COUNT; + } + + if (task != kernel_task) { vm_map_unlock_read(map); } - *task_info_count = TASK_VM_INFO_COUNT; break; } + case TASK_WAIT_STATE_INFO: + { + /* + * Deprecated flavor. Currently allowing some results until all users + * stop calling it. The results may not be accurate. + */ + task_wait_state_info_t wait_state_info; + uint64_t total_sfi_ledger_val = 0; + + if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) { + error = KERN_INVALID_ARGUMENT; + break; + } + + wait_state_info = (task_wait_state_info_t) task_info_out; + + wait_state_info->total_wait_state_time = 0; + bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved)); + +#if CONFIG_SCHED_SFI + int i, prev_lentry = -1; + int64_t val_credit, val_debit; + + for (i = 0; i < MAX_SFI_CLASS_ID; i++){ + val_credit =0; + /* + * checking with prev_lentry != entry ensures adjacent classes + * which share the same ledger do not add wait times twice. + * Note: Use ledger() call to get data for each individual sfi class. + */ + if (prev_lentry != task_ledgers.sfi_wait_times[i] && + KERN_SUCCESS == ledger_get_entries(task->ledger, + task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) { + total_sfi_ledger_val += val_credit; + } + prev_lentry = task_ledgers.sfi_wait_times[i]; + } + +#endif /* CONFIG_SCHED_SFI */ + wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val; + *task_info_count = TASK_WAIT_STATE_INFO_COUNT; + + break; + } + case TASK_VM_INFO_PURGEABLE_ACCOUNT: + { +#if DEVELOPMENT || DEBUG + pvm_account_info_t acnt_info; + + if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) { + error = KERN_INVALID_ARGUMENT; + break; + } + + if (task_info_out == NULL) { + error = KERN_INVALID_ARGUMENT; + break; + } + + acnt_info = (pvm_account_info_t) task_info_out; + + error = vm_purgeable_account(task, acnt_info); + + *task_info_count = PVM_ACCOUNT_INFO_COUNT; + + break; +#else /* DEVELOPMENT || DEBUG */ + error = KERN_NOT_SUPPORTED; + break; +#endif /* DEVELOPMENT || DEBUG */ + } + case TASK_FLAGS_INFO: + { + task_flags_info_t flags_info; + + if (*task_info_count < TASK_FLAGS_INFO_COUNT) { + error = KERN_INVALID_ARGUMENT; + break; + } + + flags_info = (task_flags_info_t)task_info_out; + + /* only publish the 64-bit flag of the task */ + flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA); + + *task_info_count = TASK_FLAGS_INFO_COUNT; + break; + } + + case TASK_DEBUG_INFO_INTERNAL: + { +#if DEVELOPMENT || DEBUG + task_debug_info_internal_t dbg_info; + if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) { + error = KERN_NOT_SUPPORTED; + break; + } + + if (task_info_out == NULL) { + error = KERN_INVALID_ARGUMENT; + break; + } + dbg_info = (task_debug_info_internal_t) task_info_out; + dbg_info->ipc_space_size = 0; + if (task->itk_space){ + dbg_info->ipc_space_size = task->itk_space->is_table_size; + } + + dbg_info->suspend_count = task->suspend_count; + + error = KERN_SUCCESS; + *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT; + break; +#else /* DEVELOPMENT || DEBUG */ + error = KERN_NOT_SUPPORTED; + break; +#endif /* DEVELOPMENT || DEBUG */ + } default: error = KERN_INVALID_ARGUMENT; } @@ -2753,6 +4512,41 @@ task_info( return (error); } +/* + * task_info_from_user + * + * When calling task_info from user space, + * this function will be executed as mig server side + * instead of calling directly into task_info. + * This gives the possibility to perform more security + * checks on task_port. + * + * In the case of TASK_DYLD_INFO, we require the more + * privileged task_port not the less-privileged task_name_port. + * + */ +kern_return_t +task_info_from_user( + mach_port_t task_port, + task_flavor_t flavor, + task_info_t task_info_out, + mach_msg_type_number_t *task_info_count) +{ + task_t task; + kern_return_t ret; + + if (flavor == TASK_DYLD_INFO) + task = convert_port_to_task(task_port); + else + task = convert_port_to_task_name(task_port); + + ret = task_info(task, flavor, task_info_out, task_info_count); + + task_deallocate(task); + + return ret; +} + /* * task_power_info * @@ -2762,7 +4556,9 @@ task_info( void task_power_info_locked( task_t task, - task_power_info_t info) + task_power_info_t info, + gpu_energy_data_t ginfo, + task_power_info_v2_t infov2) { thread_t thread; ledger_amount_t tmp; @@ -2780,6 +4576,21 @@ task_power_info_locked( info->total_user = task->total_user_time; info->total_system = task->total_system_time; +#if CONFIG_EMBEDDED + if (infov2) { + infov2->task_energy = task->task_energy; + } +#endif + + if (ginfo) { + ginfo->task_gpu_utilisation = task->task_gpu_ns; + } + + if (infov2) { + infov2->task_ptime = task->total_ptime; + infov2->task_pset_switches = task->ps_switch; + } + queue_iterate(&task->threads, thread, thread_t, task_threads) { uint64_t tval; spl_t x; @@ -2793,9 +4604,21 @@ task_power_info_locked( info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1; info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2; - tval = timer_grab(&thread->user_timer); +#if CONFIG_EMBEDDED + if (infov2) { + infov2->task_energy += ml_energy_stat(thread); + } +#endif + + tval = timer_grab(&thread->user_timer); info->total_user += tval; + if (infov2) { + tval = timer_grab(&thread->ptime); + infov2->task_ptime += tval; + infov2->task_pset_switches += thread->ps_switch; + } + tval = timer_grab(&thread->system_timer); if (thread->precise_user_kernel_time) { info->total_system += tval; @@ -2804,9 +4627,130 @@ task_power_info_locked( info->total_user += tval; } + if (ginfo) { + ginfo->task_gpu_utilisation += ml_gpu_stat(thread); + } + thread_unlock(thread); + splx(x); + } +} + +/* + * task_gpu_utilisation + * + * Returns the total gpu time used by the all the threads of the task + * (both dead and alive) + */ +uint64_t +task_gpu_utilisation( + task_t task) +{ + uint64_t gpu_time = 0; +#if !CONFIG_EMBEDDED + thread_t thread; + + task_lock(task); + gpu_time += task->task_gpu_ns; + + queue_iterate(&task->threads, thread, thread_t, task_threads) { + spl_t x; + x = splsched(); + thread_lock(thread); + gpu_time += ml_gpu_stat(thread); + thread_unlock(thread); + splx(x); + } + + task_unlock(task); +#else /* CONFIG_EMBEDDED */ + /* silence compiler warning */ + (void)task; +#endif /* !CONFIG_EMBEDDED */ + return gpu_time; +} + +/* + * task_energy + * + * Returns the total energy used by the all the threads of the task + * (both dead and alive) + */ +uint64_t +task_energy( + task_t task) +{ + uint64_t energy = 0; + thread_t thread; + + task_lock(task); + energy += task->task_energy; + + queue_iterate(&task->threads, thread, thread_t, task_threads) { + spl_t x; + x = splsched(); + thread_lock(thread); + energy += ml_energy_stat(thread); thread_unlock(thread); splx(x); } + + task_unlock(task); + return energy; +} + + +uint64_t +task_cpu_ptime( + __unused task_t task) +{ + return 0; +} + + +/* This function updates the cpu time in the arrays for each + * effective and requested QoS class + */ +void +task_update_cpu_time_qos_stats( + task_t task, + uint64_t *eqos_stats, + uint64_t *rqos_stats) +{ + if (!eqos_stats && !rqos_stats) { + return; + } + + task_lock(task); + thread_t thread; + queue_iterate(&task->threads, thread, thread_t, task_threads) { + if (thread->options & TH_OPT_IDLE_THREAD) { + continue; + } + + thread_update_qos_cpu_time(thread); + } + + if (eqos_stats) { + eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default; + eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance; + eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background; + eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility; + eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy; + eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated; + eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive; + } + + if (rqos_stats) { + rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default; + rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance; + rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background; + rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility; + rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy; + rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated; + rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive; + } + + task_unlock(task); } kern_return_t @@ -2832,8 +4776,6 @@ task_vtimer_set( thread_t thread; spl_t x; - /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */ - task_lock(task); task->vtimers |= which; @@ -2901,15 +4843,20 @@ __unused uint32_t *microsecs) { thread_t thread = current_thread(); - uint32_t tdelt; - clock_sec_t secs; + uint32_t tdelt = 0; + clock_sec_t secs = 0; uint64_t tsum; assert(task == current_task()); - assert(task->vtimers & which); + spl_t s = splsched(); + thread_lock(thread); - secs = tdelt = 0; + if ((task->vtimers & which) != (uint32_t)which) { + thread_unlock(thread); + splx(s); + return; + } switch (which) { @@ -2943,6 +4890,8 @@ __unused break; } + thread_unlock(thread); + splx(s); } /* @@ -2982,14 +4931,20 @@ task_get_assignment( task_t task, processor_set_t *pset) { - if (!task->active) - return(KERN_FAILURE); + if (!task || !task->active) + return KERN_FAILURE; *pset = &pset0; - return (KERN_SUCCESS); + return KERN_SUCCESS; } +uint64_t +get_task_dispatchqueue_offset( + task_t task) +{ + return task->dispatchqueue_offset; +} /* * task_policy @@ -3032,26 +4987,6 @@ task_set_policy( return(KERN_FAILURE); } -#if FAST_TAS -kern_return_t -task_set_ras_pc( - task_t task, - vm_offset_t pc, - vm_offset_t endpc) -{ - extern int fast_tas_debug; - - if (fast_tas_debug) { - printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n", - task, pc, endpc); - } - task_lock(task); - task->fast_tas_base = pc; - task->fast_tas_end = endpc; - task_unlock(task); - return KERN_SUCCESS; -} -#else /* FAST_TAS */ kern_return_t task_set_ras_pc( __unused task_t task, @@ -3060,21 +4995,14 @@ task_set_ras_pc( { return KERN_FAILURE; } -#endif /* FAST_TAS */ void task_synchronizer_destroy_all(task_t task) { - semaphore_t semaphore; - /* * Destroy owned semaphores */ - - while (!queue_empty(&task->semaphore_list)) { - semaphore = (semaphore_t) queue_first(&task->semaphore_list); - (void) semaphore_destroy(task, semaphore); - } + semaphore_destroy_all(task); } /* @@ -3140,23 +5068,157 @@ task_get_state( return ret; } -#if CONFIG_JETSAM + +static kern_return_t __attribute__((noinline,not_tail_called)) +PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND( + mach_exception_code_t code, + mach_exception_subcode_t subcode, + void *reason) +{ +#ifdef MACH_BSD + if (1 == proc_selfpid()) + return KERN_NOT_SUPPORTED; // initproc is immune +#endif + mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = { + [0] = code, + [1] = subcode, + }; + task_t task = current_task(); + kern_return_t kr; + + /* (See jetsam-related comments below) */ + + proc_memstat_terminated(task->bsd_info, TRUE); + kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason); + proc_memstat_terminated(task->bsd_info, FALSE); + return kr; +} + +kern_return_t +task_violated_guard( + mach_exception_code_t code, + mach_exception_subcode_t subcode, + void *reason) +{ + return PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(code, subcode, reason); +} + + +#if CONFIG_MEMORYSTATUS + +boolean_t +task_get_memlimit_is_active(task_t task) +{ + assert (task != NULL); + + if (task->memlimit_is_active == 1) { + return(TRUE); + } else { + return (FALSE); + } +} + +void +task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active) +{ + assert (task != NULL); + + if (memlimit_is_active) { + task->memlimit_is_active = 1; + } else { + task->memlimit_is_active = 0; + } +} + +boolean_t +task_get_memlimit_is_fatal(task_t task) +{ + assert(task != NULL); + + if (task->memlimit_is_fatal == 1) { + return(TRUE); + } else { + return(FALSE); + } +} + +void +task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal) +{ + assert (task != NULL); + + if (memlimit_is_fatal) { + task->memlimit_is_fatal = 1; + } else { + task->memlimit_is_fatal = 0; + } +} + +boolean_t +task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active) +{ + boolean_t triggered = FALSE; + + assert(task == current_task()); + + /* + * Returns true, if task has already triggered an exc_resource exception. + */ + + if (memlimit_is_active) { + triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE); + } else { + triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE); + } + + return(triggered); +} + +void +task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active) +{ + assert(task == current_task()); + + /* + * We allow one exc_resource per process per active/inactive limit. + * The limit's fatal attribute does not come into play. + */ + + if (memlimit_is_active) { + task->memlimit_active_exc_resource = 1; + } else { + task->memlimit_inactive_exc_resource = 1; + } +} + #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation void __attribute__((noinline)) -THIS_PROCESS_CROSSED_HIGH_WATERMARK__SENDING_EXC_RESOURCE(int max_footprint_mb) +PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal) { task_t task = current_task(); int pid = 0; - char *procname = (char *) "unknown"; + const char *procname = "unknown"; mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + boolean_t send_sync_exc_resource = FALSE; #ifdef MACH_BSD pid = proc_selfpid(); - if (task->bsd_info != NULL) + + if (pid == 1) { + /* + * Cannot have ReportCrash analyzing + * a suspended initproc. + */ + return; + } + + if (task->bsd_info != NULL) { procname = proc_name_address(current_task()->bsd_info); + send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(current_task()->bsd_info); + } #endif - +#if CONFIG_COREDUMP if (hwm_user_cores) { int error; uint64_t starttime, end; @@ -3169,7 +5231,7 @@ THIS_PROCESS_CROSSED_HIGH_WATERMARK__SENDING_EXC_RESOURCE(int max_footprint_mb) * be filling up the disk; and ignore the core size resource limit for this * core file. */ - if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, 1)) != 0) { + if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) { printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error); } /* @@ -3182,6 +5244,7 @@ THIS_PROCESS_CROSSED_HIGH_WATERMARK__SENDING_EXC_RESOURCE(int max_footprint_mb) printf("coredump of %s[%d] taken in %d secs %d microsecs\n", proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs); } +#endif /* CONFIG_COREDUMP */ if (disable_exc_resource) { printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE " @@ -3189,14 +5252,50 @@ THIS_PROCESS_CROSSED_HIGH_WATERMARK__SENDING_EXC_RESOURCE(int max_footprint_mb) return; } - printf("process %s[%d] crossed memory high watermark (%d MB); sending " - "EXC_RESOURCE.\n", procname, pid, max_footprint_mb); + /* + * A task that has triggered an EXC_RESOURCE, should not be + * jetsammed when the device is under memory pressure. Here + * we set the P_MEMSTAT_TERMINATED flag so that the process + * will be skipped if the memorystatus_thread wakes up. + */ + proc_memstat_terminated(current_task()->bsd_info, TRUE); code[0] = code[1] = 0; EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY); EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK); EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb); - exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); + + /* + * Do not generate a corpse fork if the violation is a fatal one + * or the process wants synchronous EXC_RESOURCE exceptions. + */ + if (is_fatal || send_sync_exc_resource || exc_via_corpse_forking == 0) { + /* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */ + if (send_sync_exc_resource || corpse_for_fatal_memkill == 0) { + /* + * Use the _internal_ variant so that no user-space + * process can resume our task from under us. + */ + task_suspend_internal(task); + exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); + task_resume_internal(task); + } + } else { + if (audio_active) { + printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE " + "supressed due to audio playback.\n", procname, pid, max_footprint_mb); + } else { + task_enqueue_exception_with_corpse(task, EXC_RESOURCE, + code, EXCEPTION_CODE_MAX, NULL); + } + } + + /* + * After the EXC_RESOURCE has been handled, we must clear the + * P_MEMSTAT_TERMINATED flag so that the process can again be + * considered for jetsam if the memorystatus_thread wakes up. + */ + proc_memstat_terminated(current_task()->bsd_info, FALSE); /* clear the flag */ } /* @@ -3205,28 +5304,52 @@ THIS_PROCESS_CROSSED_HIGH_WATERMARK__SENDING_EXC_RESOURCE(int max_footprint_mb) void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1) { - ledger_amount_t max_footprint_mb; + ledger_amount_t max_footprint, max_footprint_mb; + task_t task; + boolean_t is_warning; + boolean_t memlimit_is_active; + boolean_t memlimit_is_fatal; if (warning == LEDGER_WARNING_DIPPED_BELOW) { /* * Task memory limits only provide a warning on the way up. */ return; - } - - ledger_get_limit(current_task()->ledger, task_ledgers.phys_footprint, &max_footprint_mb); - max_footprint_mb >>= 20; + } else if (warning == LEDGER_WARNING_ROSE_ABOVE) { + /* + * This task is in danger of violating a memory limit, + * It has exceeded a percentage level of the limit. + */ + is_warning = TRUE; + } else { + /* + * The task has exceeded the physical footprint limit. + * This is not a warning but a true limit violation. + */ + is_warning = FALSE; + } + + task = current_task(); + + ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint); + max_footprint_mb = max_footprint >> 20; + + memlimit_is_active = task_get_memlimit_is_active(task); + memlimit_is_fatal = task_get_memlimit_is_fatal(task); /* - * If this an actual violation (not a warning), - * generate a non-fatal high watermark EXC_RESOURCE. + * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception. + * We only generate the exception once per process per memlimit (active/inactive limit). + * To enforce this, we monitor state based on the memlimit's active/inactive attribute + * and we disable it by marking that memlimit as exception triggered. */ - if ((warning == 0) && (current_task()->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PHYS_FOOTPRINT_EXCEPTION)) { - THIS_PROCESS_CROSSED_HIGH_WATERMARK__SENDING_EXC_RESOURCE((int)max_footprint_mb); + if ((is_warning == FALSE) && (!task_has_triggered_exc_resource(task, memlimit_is_active))) { + PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, memlimit_is_fatal); + memorystatus_log_exception((int)max_footprint_mb, memlimit_is_active, memlimit_is_fatal); + task_mark_has_triggered_exc_resource(task, memlimit_is_active); } - memorystatus_on_ledger_footprint_exceeded((warning == LEDGER_WARNING_ROSE_ABOVE) ? TRUE : FALSE, - (int)max_footprint_mb); + memorystatus_on_ledger_footprint_exceeded(is_warning, memlimit_is_active, memlimit_is_fatal); } extern int proc_check_footprint_priv(void); @@ -3239,26 +5362,66 @@ task_set_phys_footprint_limit( { kern_return_t error; + boolean_t memlimit_is_active; + boolean_t memlimit_is_fatal; + if ((error = proc_check_footprint_priv())) { return (KERN_NO_ACCESS); } - return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, FALSE); + /* + * This call should probably be obsoleted. + * But for now, we default to current state. + */ + memlimit_is_active = task_get_memlimit_is_active(task); + memlimit_is_fatal = task_get_memlimit_is_fatal(task); + + return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal); +} + +kern_return_t +task_convert_phys_footprint_limit( + int limit_mb, + int *converted_limit_mb) +{ + if (limit_mb == -1) { + /* + * No limit + */ + if (max_task_footprint != 0) { + *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */ + } else { + *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20); + } + } else { + /* nothing to convert */ + *converted_limit_mb = limit_mb; + } + return (KERN_SUCCESS); } + kern_return_t task_set_phys_footprint_limit_internal( task_t task, int new_limit_mb, int *old_limit_mb, - boolean_t trigger_exception) + boolean_t memlimit_is_active, + boolean_t memlimit_is_fatal) { ledger_amount_t old; ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old); + + /* + * Check that limit >> 20 will not give an "unexpected" 32-bit + * result. There are, however, implicit assumptions that -1 mb limit + * equates to LEDGER_LIMIT_INFINITY. + */ + assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY)); if (old_limit_mb) { - *old_limit_mb = old >> 20; + *old_limit_mb = (int)(old >> 20); } if (new_limit_mb == -1) { @@ -3267,7 +5430,13 @@ task_set_phys_footprint_limit_internal( */ ledger_set_limit(task->ledger, task_ledgers.phys_footprint, max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY, - max_task_footprint ? PHYS_FOOTPRINT_WARNING_LEVEL : 0); + max_task_footprint ? max_task_footprint_warning_level : 0); + + task_lock(task); + task_set_memlimit_is_active(task, memlimit_is_active); + task_set_memlimit_is_fatal(task, memlimit_is_fatal); + task_unlock(task); + return (KERN_SUCCESS); } @@ -3277,15 +5446,27 @@ task_set_phys_footprint_limit_internal( task_lock(task); - if (trigger_exception) { - task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PHYS_FOOTPRINT_EXCEPTION; - } else { - task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PHYS_FOOTPRINT_EXCEPTION; + if ((memlimit_is_active == task_get_memlimit_is_active(task)) && + (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) && + (((ledger_amount_t)new_limit_mb << 20) == old)) { + /* + * memlimit state is not changing + */ + task_unlock(task); + return(KERN_SUCCESS); } + task_set_memlimit_is_active(task, memlimit_is_active); + task_set_memlimit_is_fatal(task, memlimit_is_fatal); + ledger_set_limit(task->ledger, task_ledgers.phys_footprint, (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL); + if (task == current_task()) { + ledger_check_new_balance(current_thread(), task->ledger, + task_ledgers.phys_footprint); + } + task_unlock(task); return (KERN_SUCCESS); @@ -3299,11 +5480,17 @@ task_get_phys_footprint_limit( ledger_amount_t limit; ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit); - *limit_mb = limit >> 20; + /* + * Check that limit >> 20 will not give an "unexpected" signed, 32-bit + * result. There are, however, implicit assumptions that -1 mb limit + * equates to LEDGER_LIMIT_INFINITY. + */ + assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY)); + *limit_mb = (int)(limit >> 20); return (KERN_SUCCESS); } -#else /* CONFIG_JETSAM */ +#else /* CONFIG_MEMORYSTATUS */ kern_return_t task_set_phys_footprint_limit( __unused task_t task, @@ -3320,7 +5507,18 @@ task_get_phys_footprint_limit( { return (KERN_FAILURE); } -#endif /* CONFIG_JETSAM */ +#endif /* CONFIG_MEMORYSTATUS */ + +void +task_set_thread_limit(task_t task, uint16_t thread_limit) +{ + assert(task != kernel_task); + if (thread_limit <= TASK_MAX_THREAD_LIMIT) { + task_lock(task); + task->task_thread_limit = thread_limit; + task_unlock(task); + } +} /* * We need to export some functions to other components that @@ -3335,14 +5533,9 @@ boolean_t is_kerneltask(task_t t) return (FALSE); } -int -check_for_tasksuspend(task_t task) +boolean_t is_corpsetask(task_t t) { - - if (task == TASK_NULL) - return (0); - - return (task->suspend_count > 0); + return (task_is_a_corpse(t)); } #undef current_task @@ -3362,84 +5555,69 @@ task_reference( task_reference_internal(task); } -/* - * This routine is called always with task lock held. - * And it returns a thread handle without reference as the caller - * operates on it under the task lock held. +/* defined in bsd/kern/kern_prot.c */ +extern int get_audit_token_pid(audit_token_t *audit_token); + +int task_pid(task_t task) +{ + if (task) + return get_audit_token_pid(&task->audit_token); + return -1; +} + + +/* + * This routine finds a thread in a task by its unique id + * Returns a referenced thread or THREAD_NULL if the thread was not found + * + * TODO: This is super inefficient - it's an O(threads in task) list walk! + * We should make a tid hash, or transition all tid clients to thread ports + * + * Precondition: No locks held (will take task lock) */ thread_t task_findtid(task_t task, uint64_t tid) { - thread_t thread= THREAD_NULL; + thread_t self = current_thread(); + thread_t found_thread = THREAD_NULL; + thread_t iter_thread = THREAD_NULL; - queue_iterate(&task->threads, thread, thread_t, task_threads) { - if (thread->thread_id == tid) - return(thread); - } - return(THREAD_NULL); -} + /* Short-circuit the lookup if we're looking up ourselves */ + if (tid == self->thread_id || tid == TID_NULL) { + assert(self->task == task); + thread_reference(self); -#if CONFIG_MACF_MACH -/* - * Protect 2 task labels against modification by adding a reference on - * both label handles. The locks do not actually have to be held while - * using the labels as only labels with one reference can be modified - * in place. - */ + return self; + } -void -tasklabel_lock2( - task_t a, - task_t b) -{ - labelh_reference(a->label); - labelh_reference(b->label); -} + task_lock(task); -void -tasklabel_unlock2( - task_t a, - task_t b) -{ - labelh_release(a->label); - labelh_release(b->label); -} + queue_iterate(&task->threads, iter_thread, thread_t, task_threads) { + if (iter_thread->thread_id == tid) { + found_thread = iter_thread; + thread_reference(found_thread); + break; + } + } -void -mac_task_label_update_internal( - struct label *pl, - struct task *task) -{ + task_unlock(task); - tasklabel_lock(task); - task->label = labelh_modify(task->label); - mac_task_label_update(pl, &task->maclabel); - tasklabel_unlock(task); - ip_lock(task->itk_self); - mac_port_label_update_cred(pl, &task->itk_self->ip_label); - ip_unlock(task->itk_self); + return (found_thread); } -void -mac_task_label_modify( - struct task *task, - void *arg, - void (*f) (struct label *l, void *arg)) +int pid_from_task(task_t task) { + int pid = -1; - tasklabel_lock(task); - task->label = labelh_modify(task->label); - (*f)(&task->maclabel, arg); - tasklabel_unlock(task); -} + if (task->bsd_info) { + pid = proc_pid(task->bsd_info); + } else { + pid = task_pid(task); + } -struct label * -mac_task_get_label(struct task *task) -{ - return (&task->maclabel); + return pid; } -#endif /* * Control the CPU usage monitor for a task. @@ -3506,7 +5684,7 @@ task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz) } #endif /* CONFIG_NOMONITORS */ - if (*rate_hz < 0) { + if (*rate_hz <= 0) { task_unlock(task); return KERN_INVALID_ARGUMENT; } @@ -3525,7 +5703,7 @@ task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz) * remove the limit & callback on the wakeups ledger entry. */ #if CONFIG_TELEMETRY - telemetry_task_ctl_locked(current_task(), TF_WAKEMON_WARNING, 0); + telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0); #endif ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups); ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups); @@ -3558,21 +5736,22 @@ task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused co #endif if (warning == 0) { - THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS__SENDING_EXC_RESOURCE(); + SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(); } } void __attribute__((noinline)) -THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS__SENDING_EXC_RESOURCE(void) +SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void) { - task_t task = current_task(); - int pid = 0; - char *procname = (char *) "unknown"; - uint64_t observed_wakeups_rate; - uint64_t permitted_wakeups_rate; - uint64_t observation_interval; - mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; - struct ledger_entry_info lei; + task_t task = current_task(); + int pid = 0; + const char *procname = "unknown"; + boolean_t fatal; + kern_return_t kr; +#ifdef EXC_RESOURCE_MONITORS + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; +#endif /* EXC_RESOURCE_MONITORS */ + struct ledger_entry_info lei; #ifdef MACH_BSD pid = proc_selfpid(); @@ -3585,34 +5764,632 @@ THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS__SENDING_EXC_RESOURCE(void) /* * Disable the exception notification so we don't overwhelm * the listener with an endless stream of redundant exceptions. + * TODO: detect whether another thread is already reporting the violation. */ uint32_t flags = WAKEMON_DISABLE; task_wakeups_monitor_ctl(task, &flags, NULL); - observed_wakeups_rate = (lei.lei_balance * (int64_t)NSEC_PER_SEC) / lei.lei_last_refill; - permitted_wakeups_rate = lei.lei_limit / task_wakeups_monitor_interval; - observation_interval = lei.lei_refill_period / NSEC_PER_SEC; + fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON; + trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei); + os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times " + "over ~%llu seconds, averaging %llu wakes / second and " + "violating a %slimit of %llu wakes over %llu seconds.\n", + procname, pid, + lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC, + lei.lei_last_refill == 0 ? 0 : + (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill), + fatal ? "FATAL " : "", + lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC); + + kr = send_resource_violation(send_cpu_wakes_violation, task, &lei, + fatal ? kRNFatalLimitFlag : 0); + if (kr) { + printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr); + } +#ifdef EXC_RESOURCE_MONITORS if (disable_exc_resource) { printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE " "supressed by a boot-arg\n", procname, pid); return; } - printf("process %s[%d] caught causing excessive wakeups. Observed wakeups rate " - "(per sec): %lld; Maximum permitted wakeups rate (per sec): %lld; Observation " - "period: %lld seconds; Task lifetime number of wakeups: %lld\n", - procname, pid, observed_wakeups_rate, permitted_wakeups_rate, - observation_interval, lei.lei_credit); + if (audio_active) { + os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE " + "supressed due to audio playback\n", procname, pid); + return; + } + if (lei.lei_last_refill == 0) { + os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE " + "supressed due to lei.lei_last_refill = 0 \n", procname, pid); + } code[0] = code[1] = 0; EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS); EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR); - EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0], task_wakeups_monitor_rate); - EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0], observation_interval); - EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1], lei.lei_balance * (int64_t)NSEC_PER_SEC / lei.lei_last_refill); + EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0], + NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period); + EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0], + lei.lei_last_refill); + EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1], + NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill); exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); +#endif /* EXC_RESOURCE_MONITORS */ - if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) { + if (fatal) { task_terminate_internal(task); } } + +static boolean_t +global_update_logical_writes(int64_t io_delta) +{ + int64_t old_count, new_count; + boolean_t needs_telemetry; + + do { + new_count = old_count = global_logical_writes_count; + new_count += io_delta; + if (new_count >= io_telemetry_limit) { + new_count = 0; + needs_telemetry = TRUE; + } else { + needs_telemetry = FALSE; + } + } while(!OSCompareAndSwap64(old_count, new_count, &global_logical_writes_count)); + return needs_telemetry; +} + +void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp) +{ + int64_t io_delta = 0; + boolean_t needs_telemetry = FALSE; + + if ((!task) || (!io_size) || (!vp)) + return; + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE, + task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0); + DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp); + switch(flags) { + case TASK_WRITE_IMMEDIATE: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_immediate_writes)); + ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); + break; + case TASK_WRITE_DEFERRED: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_deferred_writes)); + ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); + break; + case TASK_WRITE_INVALIDATED: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_invalidated_writes)); + ledger_debit(task->ledger, task_ledgers.logical_writes, io_size); + break; + case TASK_WRITE_METADATA: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_metadata_writes)); + ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); + break; + } + + io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size); + if (io_telemetry_limit != 0) { + /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */ + needs_telemetry = global_update_logical_writes(io_delta); + if (needs_telemetry) { + act_set_io_telemetry_ast(current_thread()); + } + } +} + +/* + * Control the I/O monitor for a task. + */ +kern_return_t +task_io_monitor_ctl(task_t task, uint32_t *flags) +{ + ledger_t ledger = task->ledger; + + task_lock(task); + if (*flags & IOMON_ENABLE) { + /* Configure the physical I/O ledger */ + ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0); + ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC)); + + /* Configure the logical I/O ledger */ + ledger_set_limit(ledger, task_ledgers.logical_writes, (task_iomon_limit_mb * 1024 * 1024), 0); + ledger_set_period(ledger, task_ledgers.logical_writes, (task_iomon_interval_secs * NSEC_PER_SEC)); + + } else if (*flags & IOMON_DISABLE) { + /* + * Caller wishes to disable I/O monitor on the task. + */ + ledger_disable_refill(ledger, task_ledgers.physical_writes); + ledger_disable_callback(ledger, task_ledgers.physical_writes); + ledger_disable_refill(ledger, task_ledgers.logical_writes); + ledger_disable_callback(ledger, task_ledgers.logical_writes); + } + + task_unlock(task); + return KERN_SUCCESS; +} + +void +task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1) +{ + if (warning == 0) { + SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0); + } +} + +void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor) +{ + int pid = 0; + task_t task = current_task(); +#ifdef EXC_RESOURCE_MONITORS + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; +#endif /* EXC_RESOURCE_MONITORS */ + struct ledger_entry_info lei; + kern_return_t kr; + +#ifdef MACH_BSD + pid = proc_selfpid(); +#endif + /* + * Get the ledger entry info. We need to do this before disabling the exception + * to get correct values for all fields. + */ + switch(flavor) { + case FLAVOR_IO_PHYSICAL_WRITES: + ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei); + break; + case FLAVOR_IO_LOGICAL_WRITES: + ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei); + break; + } + + + /* + * Disable the exception notification so we don't overwhelm + * the listener with an endless stream of redundant exceptions. + * TODO: detect whether another thread is already reporting the violation. + */ + uint32_t flags = IOMON_DISABLE; + task_io_monitor_ctl(task, &flags); + + if (flavor == FLAVOR_IO_LOGICAL_WRITES) { + trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei); + } + os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n", + pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC)); + + kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone); + if (kr) { + printf("send_resource_violation(disk_writes, ...): error %#x\n", kr); + } + +#ifdef EXC_RESOURCE_MONITORS + code[0] = code[1] = 0; + EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO); + EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor); + EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC)); + EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024))); + EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024))); + exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); +#endif /* EXC_RESOURCE_MONITORS */ +} + +/* Placeholders for the task set/get voucher interfaces */ +kern_return_t +task_get_mach_voucher( + task_t task, + mach_voucher_selector_t __unused which, + ipc_voucher_t *voucher) +{ + if (TASK_NULL == task) + return KERN_INVALID_TASK; + + *voucher = NULL; + return KERN_SUCCESS; +} + +kern_return_t +task_set_mach_voucher( + task_t task, + ipc_voucher_t __unused voucher) +{ + if (TASK_NULL == task) + return KERN_INVALID_TASK; + + return KERN_SUCCESS; +} + +kern_return_t +task_swap_mach_voucher( + task_t task, + ipc_voucher_t new_voucher, + ipc_voucher_t *in_out_old_voucher) +{ + if (TASK_NULL == task) + return KERN_INVALID_TASK; + + *in_out_old_voucher = new_voucher; + return KERN_SUCCESS; +} + +void task_set_gpu_denied(task_t task, boolean_t denied) +{ + task_lock(task); + + if (denied) { + task->t_flags |= TF_GPU_DENIED; + } else { + task->t_flags &= ~TF_GPU_DENIED; + } + + task_unlock(task); +} + +boolean_t task_is_gpu_denied(task_t task) +{ + /* We don't need the lock to read this flag */ + return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE; +} + + +uint64_t get_task_memory_region_count(task_t task) +{ + vm_map_t map; + map = (task == kernel_task) ? kernel_map: task->map; + return((uint64_t)get_map_nentries(map)); +} + +static void +kdebug_trace_dyld_internal(uint32_t base_code, + struct dyld_kernel_image_info *info) +{ + static_assert(sizeof(info->uuid) >= 16); + +#if defined(__LP64__) + uint64_t *uuid = (uint64_t *)&(info->uuid); + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0], + uuid[1], info->load_addr, + (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32), + 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1), + (uint64_t)info->fsobjid.fid_objno | + ((uint64_t)info->fsobjid.fid_generation << 32), + 0, 0, 0, 0); +#else /* defined(__LP64__) */ + uint32_t *uuid = (uint32_t *)&(info->uuid); + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0], + uuid[1], uuid[2], uuid[3], 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3), + (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1], + info->fsobjid.fid_objno, 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4), + info->fsobjid.fid_generation, 0, 0, 0, 0); +#endif /* !defined(__LP64__) */ +} + +static kern_return_t +kdebug_trace_dyld(task_t task, uint32_t base_code, + vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len) +{ + kern_return_t kr; + dyld_kernel_image_info_array_t infos; + vm_map_offset_t map_data; + vm_offset_t data; + + if (!infos_copy) { + return KERN_INVALID_ADDRESS; + } + + if (!kdebug_enable || + !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) + { + vm_map_copy_discard(infos_copy); + return KERN_SUCCESS; + } + + if (task == NULL || task != current_task()) { + return KERN_INVALID_TASK; + } + + kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy); + if (kr != KERN_SUCCESS) { + return kr; + } + + infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data); + + for (mach_msg_type_number_t i = 0; i < infos_len; i++) { + kdebug_trace_dyld_internal(base_code, &(infos[i])); + } + + data = CAST_DOWN(vm_offset_t, map_data); + mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0])); + return KERN_SUCCESS; +} + +kern_return_t +task_register_dyld_image_infos(task_t task, + dyld_kernel_image_info_array_t infos_copy, + mach_msg_type_number_t infos_len) +{ + return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A, + (vm_map_copy_t)infos_copy, infos_len); +} + +kern_return_t +task_unregister_dyld_image_infos(task_t task, + dyld_kernel_image_info_array_t infos_copy, + mach_msg_type_number_t infos_len) +{ + return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A, + (vm_map_copy_t)infos_copy, infos_len); +} + +kern_return_t +task_get_dyld_image_infos(__unused task_t task, + __unused dyld_kernel_image_info_array_t * dyld_images, + __unused mach_msg_type_number_t * dyld_imagesCnt) +{ + return KERN_NOT_SUPPORTED; +} + +kern_return_t +task_register_dyld_shared_cache_image_info(task_t task, + dyld_kernel_image_info_t cache_img, + __unused boolean_t no_cache, + __unused boolean_t private_cache) +{ + if (task == NULL || task != current_task()) { + return KERN_INVALID_TASK; + } + + kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img); + return KERN_SUCCESS; +} + +kern_return_t +task_register_dyld_set_dyld_state(__unused task_t task, + __unused uint8_t dyld_state) +{ + return KERN_NOT_SUPPORTED; +} + +kern_return_t +task_register_dyld_get_process_state(__unused task_t task, + __unused dyld_kernel_process_info_t * dyld_process_state) +{ + return KERN_NOT_SUPPORTED; +} + +kern_return_t +task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor, + task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out) +{ +#if MONOTONIC + task_t task = (task_t)task_insp; + kern_return_t kr = KERN_SUCCESS; + mach_msg_type_number_t size; + + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } + + size = *size_in_out; + + switch (flavor) { + case TASK_INSPECT_BASIC_COUNTS: { + struct task_inspect_basic_counts *bc; + uint64_t task_counts[MT_CORE_NFIXED] = { 0 }; + + if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) { + kr = KERN_INVALID_ARGUMENT; + break; + } + + mt_fixed_task_counts(task, task_counts); + bc = (struct task_inspect_basic_counts *)info_out; +#ifdef MT_CORE_INSTRS + bc->instructions = task_counts[MT_CORE_INSTRS]; +#else /* defined(MT_CORE_INSTRS) */ + bc->instructions = 0; +#endif /* !defined(MT_CORE_INSTRS) */ + bc->cycles = task_counts[MT_CORE_CYCLES]; + size = TASK_INSPECT_BASIC_COUNTS_COUNT; + break; + } + default: + kr = KERN_INVALID_ARGUMENT; + break; + } + + if (kr == KERN_SUCCESS) { + *size_in_out = size; + } + return kr; +#else /* MONOTONIC */ +#pragma unused(task_insp, flavor, info_out, size_in_out) + return KERN_NOT_SUPPORTED; +#endif /* !MONOTONIC */ +} + +#if CONFIG_SECLUDED_MEMORY +int num_tasks_can_use_secluded_mem = 0; + +void +task_set_can_use_secluded_mem( + task_t task, + boolean_t can_use_secluded_mem) +{ + if (!task->task_could_use_secluded_mem) { + return; + } + task_lock(task); + task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem); + task_unlock(task); +} + +void +task_set_can_use_secluded_mem_locked( + task_t task, + boolean_t can_use_secluded_mem) +{ + assert(task->task_could_use_secluded_mem); + if (can_use_secluded_mem && + secluded_for_apps && /* global boot-arg */ + !task->task_can_use_secluded_mem) { + assert(num_tasks_can_use_secluded_mem >= 0); + OSAddAtomic(+1, + (volatile SInt32 *)&num_tasks_can_use_secluded_mem); + task->task_can_use_secluded_mem = TRUE; + } else if (!can_use_secluded_mem && + task->task_can_use_secluded_mem) { + assert(num_tasks_can_use_secluded_mem > 0); + OSAddAtomic(-1, + (volatile SInt32 *)&num_tasks_can_use_secluded_mem); + task->task_can_use_secluded_mem = FALSE; + } +} + +void +task_set_could_use_secluded_mem( + task_t task, + boolean_t could_use_secluded_mem) +{ + task->task_could_use_secluded_mem = could_use_secluded_mem; +} + +void +task_set_could_also_use_secluded_mem( + task_t task, + boolean_t could_also_use_secluded_mem) +{ + task->task_could_also_use_secluded_mem = could_also_use_secluded_mem; +} + +boolean_t +task_can_use_secluded_mem( + task_t task, + boolean_t is_alloc) +{ + if (task->task_can_use_secluded_mem) { + assert(task->task_could_use_secluded_mem); + assert(num_tasks_can_use_secluded_mem > 0); + return TRUE; + } + if (task->task_could_also_use_secluded_mem && + num_tasks_can_use_secluded_mem > 0) { + assert(num_tasks_can_use_secluded_mem > 0); + return TRUE; + } + + /* + * If a single task is using more than some amount of + * memory, allow it to dip into secluded and also begin + * suppression of secluded memory until the tasks exits. + */ + if (is_alloc && secluded_shutoff_trigger != 0) { + uint64_t phys_used = get_task_phys_footprint(task); + if (phys_used > secluded_shutoff_trigger) { + start_secluded_suppression(task); + return TRUE; + } + } + + return FALSE; +} + +boolean_t +task_could_use_secluded_mem( + task_t task) +{ + return task->task_could_use_secluded_mem; +} +#endif /* CONFIG_SECLUDED_MEMORY */ + +queue_head_t * +task_io_user_clients(task_t task) +{ + return (&task->io_user_clients); +} + +void +task_copy_fields_for_exec(task_t dst_task, task_t src_task) +{ + dst_task->vtimers = src_task->vtimers; +} + +#if DEVELOPMENT || DEBUG +int vm_region_footprint = 0; +#endif /* DEVELOPMENT || DEBUG */ + +boolean_t +task_self_region_footprint(void) +{ +#if DEVELOPMENT || DEBUG + if (vm_region_footprint) { + /* system-wide override */ + return TRUE; + } +#endif /* DEVELOPMENT || DEBUG */ + return current_task()->task_region_footprint; +} + +void +task_self_region_footprint_set( + boolean_t newval) +{ + task_t curtask; + + curtask = current_task(); + task_lock(curtask); + if (newval) { + curtask->task_region_footprint = TRUE; + } else { + curtask->task_region_footprint = FALSE; + } + task_unlock(curtask); +} + +void +task_set_darkwake_mode(task_t task, boolean_t set_mode) +{ + assert(task); + + task_lock(task); + + if (set_mode) { + task->t_flags |= TF_DARKWAKE_MODE; + } else { + task->t_flags &= ~(TF_DARKWAKE_MODE); + } + + task_unlock(task); +} + +boolean_t +task_get_darkwake_mode(task_t task) +{ + assert(task); + return ((task->t_flags & TF_DARKWAKE_MODE) != 0); +} + +#if __arm64__ +void +task_set_legacy_footprint( + task_t task, + boolean_t new_val) +{ + task_lock(task); + task->task_legacy_footprint = new_val; + task_unlock(task); +} +#endif /* __arm64__ */