X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3e170ce000f1506b7b5d2c5c7faec85ceabb573d..f427ee49d309d8fc33ebf3042c3a775f2f530ded:/osfmk/kern/task.h?ds=sidebyside diff --git a/osfmk/kern/task.h b/osfmk/kern/task.h index 7b7c15306..d74ddc935 100644 --- a/osfmk/kern/task.h +++ b/osfmk/kern/task.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2010, 2015 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -88,14 +88,21 @@ * Copyright (c) 2005 SPARTA, Inc. */ -#ifndef _KERN_TASK_H_ +#ifndef _KERN_TASK_H_ #define _KERN_TASK_H_ #include #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE +#include +#include +#include +#include +#endif /* XNU_KERNEL_PRIVATE */ + +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -107,162 +114,155 @@ #include #include +#if MONOTONIC +#include +#endif /* MONOTONIC */ + #include #include #include #include #include #include -#endif /* MACH_KERNEL_PRIVATE */ - -#ifdef XNU_KERNEL_PRIVATE - -#include -#include - -/* defns for task->rsu_controldata */ -#define TASK_POLICY_CPU_RESOURCE_USAGE 0 -#define TASK_POLICY_WIREDMEM_RESOURCE_USAGE 1 -#define TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE 2 -#define TASK_POLICY_DISK_RESOURCE_USAGE 3 -#define TASK_POLICY_NETWORK_RESOURCE_USAGE 4 -#define TASK_POLICY_POWER_RESOURCE_USAGE 5 - -#define TASK_POLICY_RESOURCE_USAGE_COUNT 6 - -#define TASK_POLICY_CPUMON_DISABLE 0xFF -#define TASK_POLICY_CPUMON_DEFAULTS 0xFE - -/* Resource usage/low resource attributes */ -#define TASK_POLICY_RESOURCE_ATTRIBUTE_NONE 0x00 -#define TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE 0x01 -#define TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND 0x02 -#define TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE 0x03 -#define TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ 0x04 -#define TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC 0x05 -#define TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT TASK_POLICY_RESOURCE_ATTRIBUTE_NONE - -#endif /* XNU_KERNEL_PRIVATE */ - -#ifdef MACH_KERNEL_PRIVATE - #include #include - -#ifdef CONFIG_ATM -#include -#endif +#include +#include struct _cpu_time_qos_stats { - uint64_t cpu_time_qos_default; - uint64_t cpu_time_qos_maintenance; - uint64_t cpu_time_qos_background; - uint64_t cpu_time_qos_utility; - uint64_t cpu_time_qos_legacy; - uint64_t cpu_time_qos_user_initiated; - uint64_t cpu_time_qos_user_interactive; + uint64_t cpu_time_qos_default; + uint64_t cpu_time_qos_maintenance; + uint64_t cpu_time_qos_background; + uint64_t cpu_time_qos_utility; + uint64_t cpu_time_qos_legacy; + uint64_t cpu_time_qos_user_initiated; + uint64_t cpu_time_qos_user_interactive; +}; + +struct task_writes_counters { + uint64_t task_immediate_writes; + uint64_t task_deferred_writes; + uint64_t task_invalidated_writes; + uint64_t task_metadata_writes; }; -#ifdef CONFIG_BANK +struct task_watchports; #include -#endif struct task { /* Synchronization/destruction information */ - decl_lck_mtx_data(,lock) /* Task's lock */ - uint32_t ref_count; /* Number of references to me */ - boolean_t active; /* Task has not been terminated */ - boolean_t halting; /* Task is being halted */ + decl_lck_mtx_data(, lock); /* Task's lock */ + os_refcnt_t ref_count; /* Number of references to me */ + boolean_t active; /* Task has not been terminated */ + boolean_t halting; /* Task is being halted */ + boolean_t message_app_suspended; /* Let iokit know when pidsuspended */ + + /* Virtual timers */ + uint32_t vtimers; /* Miscellaneous */ - vm_map_t map; /* Address space description */ - queue_chain_t tasks; /* global list of tasks */ - void *user_data; /* Arbitrary data settable via IPC */ + vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */ + queue_chain_t tasks; /* global list of tasks */ + struct task_watchports *watchports; /* watchports passed in spawn */ + turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */ #if defined(CONFIG_SCHED_MULTIQ) sched_group_t sched_group; #endif /* defined(CONFIG_SCHED_MULTIQ) */ /* Threads in this task */ - queue_head_t threads; + queue_head_t threads; + struct restartable_ranges *restartable_ranges; - processor_set_t pset_hint; - struct affinity_space *affinity_space; + processor_set_t pset_hint; + struct affinity_space *affinity_space; - int thread_count; - uint32_t active_thread_count; - int suspend_count; /* Internal scheduling only */ + int thread_count; + uint32_t active_thread_count; + int suspend_count; /* Internal scheduling only */ /* User-visible scheduling information */ - integer_t user_stop_count; /* outstanding stops */ - integer_t legacy_stop_count; /* outstanding legacy stops */ + integer_t user_stop_count; /* outstanding stops */ + integer_t legacy_stop_count; /* outstanding legacy stops */ - integer_t priority; /* base priority for threads */ - integer_t max_priority; /* maximum priority for threads */ + int16_t priority; /* base priority for threads */ + int16_t max_priority; /* maximum priority for threads */ - integer_t importance; /* priority offset (BSD 'nice' value) */ + integer_t importance; /* priority offset (BSD 'nice' value) */ /* Task security and audit tokens */ security_token_t sec_token; - audit_token_t audit_token; - + audit_token_t audit_token; + /* Statistics */ - uint64_t total_user_time; /* terminated threads only */ - uint64_t total_system_time; - - /* Virtual timers */ - uint32_t vtimers; + uint64_t total_user_time; /* terminated threads only */ + uint64_t total_system_time; + uint64_t total_ptime; + uint64_t total_runnable_time; /* IPC structures */ - decl_lck_mtx_data(,itk_lock_data) - struct ipc_port *itk_self; /* not a right, doesn't hold ref */ - struct ipc_port *itk_nself; /* not a right, doesn't hold ref */ - struct ipc_port *itk_sself; /* a send right */ + decl_lck_mtx_data(, itk_lock_data); + /* + * Different flavors of task port. + * These flavors TASK_FLAVOR_* are defined in mach_types.h + */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_self") itk_self[TASK_SELF_PORT_COUNT]; /* does not hold right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */ struct exception_action exc_actions[EXC_TYPES_COUNT]; - /* a send right each valid element */ - struct ipc_port *itk_host; /* a send right */ - struct ipc_port *itk_bootstrap; /* a send right */ - struct ipc_port *itk_seatbelt; /* a send right */ - struct ipc_port *itk_gssd; /* yet another send right */ - struct ipc_port *itk_debug_control; /* send right for debugmode communications */ - struct ipc_port *itk_task_access; /* and another send right */ - struct ipc_port *itk_resume; /* a receive right to resume this task */ - struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX]; - /* all send rights */ - - struct ipc_space *itk_space; - + /* a send right each valid element */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_seatbelt") itk_seatbelt; /* a send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_gssd") itk_gssd; /* yet another send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX]; + /* all send rights */ + + struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space; + + ledger_t ledger; /* Synchronizer ownership information */ - queue_head_t semaphore_list; /* list of owned semaphores */ - int semaphores_owned; /* number of semaphores owned */ + queue_head_t semaphore_list; /* list of owned semaphores */ + int semaphores_owned; /* number of semaphores owned */ - ledger_t ledger; - - unsigned int priv_flags; /* privilege resource flags */ -#define VM_BACKING_STORE_PRIV 0x1 + unsigned int priv_flags; /* privilege resource flags */ +#define VM_BACKING_STORE_PRIV 0x1 MACHINE_TASK - + integer_t faults; /* faults counter */ - integer_t pageins; /* pageins counter */ - integer_t cow_faults; /* copy on write fault counter */ - integer_t messages_sent; /* messages sent counter */ - integer_t messages_received; /* messages received counter */ - integer_t syscalls_mach; /* mach system call counter */ - integer_t syscalls_unix; /* unix system call counter */ - uint32_t c_switch; /* total context switches */ - uint32_t p_switch; /* total processor switches */ - uint32_t ps_switch; /* total pset switches */ - - zinfo_usage_t tkm_zinfo; /* per-task, per-zone usage statistics */ - -#ifdef MACH_BSD - void *bsd_info; -#endif - kcdata_descriptor_t corpse_info; - struct vm_shared_region *shared_region; + integer_t decompressions; /* decompression counter */ + integer_t pageins; /* pageins counter */ + integer_t cow_faults; /* copy on write fault counter */ + integer_t messages_sent; /* messages sent counter */ + integer_t messages_received; /* messages received counter */ + integer_t syscalls_mach; /* mach system call counter */ + integer_t syscalls_unix; /* unix system call counter */ + uint32_t c_switch; /* total context switches */ + uint32_t p_switch; /* total processor switches */ + uint32_t ps_switch; /* total pset switches */ + +#ifdef MACH_BSD + void * XNU_PTRAUTH_SIGNED_PTR("task.bsd_info") bsd_info; +#endif + kcdata_descriptor_t corpse_info; + uint64_t crashed_thread_id; + queue_chain_t corpse_tasks; +#ifdef CONFIG_MACF + struct label * crash_label; + uint8_t * mach_trap_filter_mask; /* Mach trap filter bitmask (len: mach_trap_count bits) */ + uint8_t * mach_kobj_filter_mask; /* Mach kobject filter bitmask (len: mach_kobj_count bits) */ +#endif + struct vm_shared_region *shared_region; +#if __has_feature(ptrauth_calls) + char *shared_region_id; /* determines which ptr auth key to use */ + bool shared_region_auth_remapped; /* authenticated sections ready for use */ +#endif /* __has_feature(ptrauth_calls) */ volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */ +#define TF_NONE 0 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */ #define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */ #define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */ @@ -271,15 +271,35 @@ struct task { #define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */ #define TF_CORPSE 0x00000020 /* task is a corpse */ #define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */ +#define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */ +#define TF_PLATFORM 0x00000400 /* task is a platform binary */ +#define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */ +#define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */ +#define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */ +#define TF_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */ +#define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */ +#define TF_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */ +#define TF_TECS 0x00020000 /* task threads must enable CPU security */ -#define task_has_64BitAddr(task) \ - (((task)->t_flags & TF_64B_ADDR) != 0) -#define task_set_64BitAddr(task) \ - ((task)->t_flags |= TF_64B_ADDR) -#define task_clear_64BitAddr(task) \ - ((task)->t_flags &= ~TF_64B_ADDR) -#define task_has_64BitData(task) \ - (((task)->t_flags & TF_64B_DATA) != 0) +/* + * Task is running within a 64-bit address space. + */ +#define task_has_64Bit_addr(task) \ + (((task)->t_flags & TF_64B_ADDR) != 0) +#define task_set_64Bit_addr(task) \ + ((task)->t_flags |= TF_64B_ADDR) +#define task_clear_64Bit_addr(task) \ + ((task)->t_flags &= ~TF_64B_ADDR) + +/* + * Task is using 64-bit machine state. + */ +#define task_has_64Bit_data(task) \ + (((task)->t_flags & TF_64B_DATA) != 0) +#define task_set_64Bit_data(task) \ + ((task)->t_flags |= TF_64B_DATA) +#define task_clear_64Bit_data(task) \ + ((task)->t_flags &= ~TF_64B_DATA) #define task_is_a_corpse(task) \ (((task)->t_flags & TF_CORPSE) != 0) @@ -287,7 +307,7 @@ struct task { #define task_set_corpse(task) \ ((task)->t_flags |= TF_CORPSE) -#define task_corpse_pending_report(task) \ +#define task_corpse_pending_report(task) \ (((task)->t_flags & TF_PENDING_CORPSE) != 0) #define task_set_corpse_pending_report(task) \ @@ -296,81 +316,128 @@ struct task { #define task_clear_corpse_pending_report(task) \ ((task)->t_flags &= ~TF_PENDING_CORPSE) - mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */ - mach_vm_size_t all_image_info_size; /* section location and size */ - -#if KPERF -#define TASK_PMC_FLAG 0x1 /* Bit in "t_chud" signifying PMC interest */ -#define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_chud" signifying KPC forced all counters */ +#define task_is_a_corpse_fork(task) \ + (((task)->t_flags & TF_CORPSE_FORK) != 0) - uint32_t t_chud; /* CHUD flags, used for Shark */ + uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */ +#define TPF_NONE 0 +#define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */ +#define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */ +#ifdef CONFIG_32BIT_TELEMETRY +#define TPF_LOG_32BIT_TELEMETRY 0x00000004 /* task should log identifying information */ #endif +#define task_did_exec_internal(task) \ + (((task)->t_procflags & TPF_DID_EXEC) != 0) + +#define task_is_exec_copy_internal(task) \ + (((task)->t_procflags & TPF_EXEC_COPY) != 0) + + uint8_t t_returnwaitflags; +#define TWF_NONE 0 +#define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */ +#define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */ + + mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */ + mach_vm_size_t all_image_info_size; /* section location and size */ + +#if KPC +#define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */ + uint32_t t_kpc; /* kpc flags */ +#endif /* KPC */ + boolean_t pidsuspended; /* pid_suspend called; no threads can execute */ boolean_t frozen; /* frozen; private resident pages committed to swap */ - boolean_t changing_freeze_state; /* in the process of freezing or thawing */ + boolean_t changing_freeze_state; /* in the process of freezing or thawing */ uint16_t policy_ru_cpu :4, - policy_ru_cpu_ext :4, - applied_ru_cpu :4, - applied_ru_cpu_ext :4; + policy_ru_cpu_ext :4, + applied_ru_cpu :4, + applied_ru_cpu_ext :4; uint8_t rusage_cpu_flags; - uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */ - uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */ + uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */ uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */ +#if MACH_ASSERT + int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */ +#endif + uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */ uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */ uint64_t rusage_cpu_deadline; thread_call_t rusage_cpu_callt; +#if CONFIG_TASKWATCH + queue_head_t task_watchers; /* app state watcher threads */ + int num_taskwatchers; + int watchapplying; +#endif /* CONFIG_TASKWATCH */ -#if CONFIG_ATM - struct atm_task_descriptor *atm_context; /* pointer to per task atm descriptor */ -#endif -#if CONFIG_BANK struct bank_task *bank_context; /* pointer to per task bank structure */ -#endif #if IMPORTANCE_INHERITANCE - struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */ + struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */ #endif /* IMPORTANCE_INHERITANCE */ - vm_extmod_statistics_data_t extmod_statistics; - -#if MACH_ASSERT - int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */ -#endif + vm_extmod_statistics_data_t extmod_statistics; struct task_requested_policy requested_policy; struct task_effective_policy effective_policy; - struct task_pended_policy pended_policy; /* * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away. */ - uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */ - low_mem_notified_critical :1, /* critical low memory notification is sent to the task */ - purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */ - purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */ - low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */ - mem_notify_reserved :27; /* reserved for future use */ - - io_stat_info_t task_io_stats; - - /* + uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */ + low_mem_notified_critical :1, /* critical low memory notification is sent to the task */ + purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */ + purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */ + low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */ + mem_notify_reserved :27; /* reserved for future use */ + + uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */ + memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */ + memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */ + memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */ + memlimit_attrs_reserved :28; /* reserved for future use */ + + io_stat_info_t task_io_stats; + + struct task_writes_counters task_writes_counters_internal; + struct task_writes_counters task_writes_counters_external; + + /* * The cpu_time_qos_stats fields are protected by the task lock */ - struct _cpu_time_qos_stats cpu_time_qos_stats; + struct _cpu_time_qos_stats cpu_time_eqos_stats; + struct _cpu_time_qos_stats cpu_time_rqos_stats; /* Statistics accumulated for terminated threads from this task */ - uint32_t task_timer_wakeups_bin_1; - uint32_t task_timer_wakeups_bin_2; - uint64_t task_gpu_ns; - + uint32_t task_timer_wakeups_bin_1; + uint32_t task_timer_wakeups_bin_2; + uint64_t task_gpu_ns; + uint64_t task_energy; + +#if MONOTONIC + /* Read and written under task_lock */ + struct mt_task task_monotonic; +#endif /* MONOTONIC */ + + uint8_t task_can_transfer_memory_ownership; + uint8_t task_objects_disowning; + uint8_t task_objects_disowned; /* # of purgeable volatile VM objects owned by this task: */ - int task_volatile_objects; + int task_volatile_objects; /* # of purgeable but not volatile VM objects owned by this task: */ - int task_nonvolatile_objects; - boolean_t task_purgeable_disowning; - boolean_t task_purgeable_disowned; - + int task_nonvolatile_objects; + int task_owned_objects; + queue_head_t task_objq; + decl_lck_mtx_data(, task_objq_lock); /* protects "task_objq" */ + + unsigned int task_thread_limit:16; +#if __arm64__ + unsigned int task_legacy_footprint:1; + unsigned int task_extra_footprint_limit:1; + unsigned int task_ios13extended_footprint_limit:1; +#endif /* __arm64__ */ + unsigned int task_region_footprint:1; + unsigned int task_has_crossed_thread_limit:1; + uint32_t exec_token; /* * A task's coalition set is "adopted" in task_create_internal * and unset in task_deallocate_internal, so each array member @@ -378,98 +445,207 @@ struct task { * Note: these fields are protected by coalition->lock, * not the task lock. */ - coalition_t coalition[COALITION_NUM_TYPES]; + coalition_t coalition[COALITION_NUM_TYPES]; queue_chain_t task_coalition[COALITION_NUM_TYPES]; uint64_t dispatchqueue_offset; +#if DEVELOPMENT || DEBUG + boolean_t task_unnested; + int task_disconnected_count; +#endif + #if HYPERVISOR - void *hv_task_target; /* hypervisor virtual machine object associated with this task */ + void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */ #endif /* HYPERVISOR */ -}; -#define task_lock(task) lck_mtx_lock(&(task)->lock) -#define task_lock_assert_owned(task) lck_mtx_assert(&(task)->lock, LCK_MTX_ASSERT_OWNED) -#define task_lock_try(task) lck_mtx_try_lock(&(task)->lock) -#define task_unlock(task) lck_mtx_unlock(&(task)->lock) +#if CONFIG_SECLUDED_MEMORY + uint8_t task_can_use_secluded_mem; + uint8_t task_could_use_secluded_mem; + uint8_t task_could_also_use_secluded_mem; + uint8_t task_suppressed_secluded; +#endif /* CONFIG_SECLUDED_MEMORY */ + + task_exc_guard_behavior_t task_exc_guard; + + queue_head_t io_user_clients; -#define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr) -#define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp) -#define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data) -#define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data) + mach_vm_address_t mach_header_vm_address; + + uint32_t loadTag; /* dext ID used for logging identity */ +#if CONFIG_FREEZE + queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */ +#endif /* CONFIG_FREEZE */ +#if CONFIG_PHYS_WRITE_ACCT + uint64_t task_fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ +}; + +/* + * EXC_GUARD default delivery behavior for optional Mach port and VM guards. + * Applied to new tasks at creation time. + */ +extern task_exc_guard_behavior_t task_exc_guard_default; + +extern kern_return_t + task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); + +static inline void +task_require(struct task *task) +{ + zone_id_require(ZONE_ID_TASK, sizeof(struct task), task); +} + +#define task_lock(task) lck_mtx_lock(&(task)->lock) +#define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED) +#define task_lock_try(task) lck_mtx_try_lock(&(task)->lock) +#define task_unlock(task) lck_mtx_unlock(&(task)->lock) + +#define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr) +#define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp) +#define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock) +#define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED) +#define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock) +#define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock) + +#define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr) +#define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp) +#define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data) +#define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data) + +/* task clear return wait flags */ +#define TCRW_CLEAR_INITIAL_WAIT 0x1 +#define TCRW_CLEAR_FINAL_WAIT 0x2 +#define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT) #define TASK_REFERENCE_LEAK_DEBUG 0 #if TASK_REFERENCE_LEAK_DEBUG extern void task_reference_internal(task_t task); -extern uint32_t task_deallocate_internal(task_t task); +extern os_ref_count_t task_deallocate_internal(task_t task); #else -#define task_reference_internal(task) \ - (void)hw_atomic_add(&(task)->ref_count, 1) - -#define task_deallocate_internal(task) \ - hw_atomic_sub(&(task)->ref_count, 1) +#define task_reference_internal(task) \ +MACRO_BEGIN \ + task_require(task); \ + os_ref_retain(&(task)->ref_count); \ +MACRO_END +#define task_deallocate_internal(task) os_ref_release(&(task)->ref_count) #endif -#define task_reference(task) \ -MACRO_BEGIN \ - if ((task) != TASK_NULL) \ - task_reference_internal(task); \ +#define task_reference(task) \ +MACRO_BEGIN \ + if ((task) != TASK_NULL) \ + task_reference_internal(task); \ MACRO_END -extern kern_return_t kernel_task_create( - task_t task, - vm_offset_t map_base, - vm_size_t map_size, - task_t *child); +extern kern_return_t kernel_task_create( + task_t task, + vm_offset_t map_base, + vm_size_t map_size, + task_t *child); /* Initialize task module */ -extern void task_init(void); +extern void task_init(void); /* coalition_init() calls this to initialize ledgers before task_init() */ -extern void init_task_ledgers(void); +extern void init_task_ledgers(void); -#define current_task_fast() (current_thread()->task) -#define current_task() current_task_fast() +#define current_task_fast() (current_thread()->task) +#define current_task() current_task_fast() + +extern bool task_is_driver(task_t task); extern lck_attr_t task_lck_attr; extern lck_grp_t task_lck_grp; -#define QOS_OVERRIDE_MODE_OVERHANG_PEAK 0 -#define QOS_OVERRIDE_MODE_IGNORE_OVERRIDE 1 -#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE 2 -#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH 3 -#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE 4 +struct task_watchport_elem { + task_t twe_task; + ipc_port_t twe_port; /* (Space lock) */ +}; + +struct task_watchports { + os_refcnt_t tw_refcount; /* (Space lock) */ + task_t tw_task; /* (Space lock) & tw_refcount == 0 */ + thread_t tw_thread; /* (Space lock) & tw_refcount == 0 */ + uint32_t tw_elem_array_count; /* (Space lock) */ + struct task_watchport_elem tw_elem[]; /* (Space lock) & (Portlock) & (mq lock) */ +}; + +#define task_watchports_retain(x) (os_ref_retain(&(x)->tw_refcount)) +#define task_watchports_release(x) (os_ref_release(&(x)->tw_refcount)) + +#define task_watchport_elem_init(elem, task, port) \ +do { \ + (elem)->twe_task = (task); \ + (elem)->twe_port = (port); \ +} while(0) + +#define task_watchport_elem_clear(elem) task_watchport_elem_init((elem), NULL, NULL) + +extern void +task_add_turnstile_watchports( + task_t task, + thread_t thread, + ipc_port_t *portwatch_ports, + uint32_t portwatch_count); -extern uint32_t qos_override_mode; +extern void +task_watchport_elem_deallocate( + struct task_watchport_elem *watchport_elem); -#else /* MACH_KERNEL_PRIVATE */ +extern boolean_t +task_has_watchports(task_t task); + +#else /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -extern task_t current_task(void); +extern task_t current_task(void); + +extern void task_reference(task_t task); +extern bool task_is_driver(task_t task); + +#define TF_NONE 0 + +#define TWF_NONE 0 +#define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */ +#define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */ + +/* task clear return wait flags */ +#define TCRW_CLEAR_INITIAL_WAIT 0x1 +#define TCRW_CLEAR_FINAL_WAIT 0x2 +#define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT) + + +#define TPF_NONE 0 +#define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */ -extern void task_reference(task_t task); __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -#ifdef XNU_KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE +extern boolean_t task_is_app_suspended(task_t task); +extern bool task_is_exotic(task_t task); +extern bool task_is_alien(task_t task); +#endif + +#ifdef XNU_KERNEL_PRIVATE /* Hold all threads in a task */ -extern kern_return_t task_hold( - task_t task); +extern kern_return_t task_hold( + task_t task); /* Wait for task to stop running, either just to get off CPU or to cease being runnable */ -extern kern_return_t task_wait( - task_t task, - boolean_t until_not_runnable); +extern kern_return_t task_wait( + task_t task, + boolean_t until_not_runnable); /* Release hold on all threads in a task */ -extern kern_return_t task_release( - task_t task); +extern kern_return_t task_release( + task_t task); /* Suspend/resume a task where the kernel owns the suspend count */ extern kern_return_t task_suspend_internal( task_t task); @@ -477,137 +653,256 @@ extern kern_return_t task_resume_internal( task_t task); /* Suspends a task by placing a hold on its threads */ extern kern_return_t task_pidsuspend( - task_t task); -extern kern_return_t task_pidsuspend_locked( - task_t task); + task_t task); /* Resumes a previously paused task */ extern kern_return_t task_pidresume( - task_t task); - -extern kern_return_t task_send_trace_memory( - task_t task, - uint32_t pid, - uint64_t uniqueid); - -#if CONFIG_FREEZE + task_t task); -/* Freeze a task's resident pages */ -extern kern_return_t task_freeze( - task_t task, - uint32_t *purgeable_count, - uint32_t *wired_count, - uint32_t *clean_count, - uint32_t *dirty_count, - uint32_t dirty_budget, - boolean_t *shared, - boolean_t walk_only); - -/* Thaw a currently frozen task */ -extern kern_return_t task_thaw( - task_t task); - -#endif /* CONFIG_FREEZE */ - -/* Halt all other threads in the current task */ -extern kern_return_t task_start_halt( - task_t task); +extern kern_return_t task_send_trace_memory( + task_t task, + uint32_t pid, + uint64_t uniqueid); -/* Wait for other threads to halt and free halting task resources */ -extern void task_complete_halt( - task_t task); +extern void task_remove_turnstile_watchports( + task_t task); -extern kern_return_t task_terminate_internal( - task_t task); +extern void task_transfer_turnstile_watchports( + task_t old_task, + task_t new_task, + thread_t new_thread); -extern kern_return_t task_create_internal( - task_t parent_task, - coalition_t *parent_coalitions, - boolean_t inherit_memory, - boolean_t is_64bit, - task_t *child_task); /* OUT */ +#if DEVELOPMENT || DEBUG -extern kern_return_t task_importance( - task_t task, - integer_t importance); +extern kern_return_t task_disconnect_page_mappings( + task_t task); +#endif -extern void task_power_info_locked( - task_t task, - task_power_info_t info, - gpu_energy_data_t gpu_energy); +extern void tasks_system_suspend(boolean_t suspend); -extern uint64_t task_gpu_utilisation( - task_t task); +#if CONFIG_FREEZE -extern void task_vtimer_set( - task_t task, - integer_t which); +/* Freeze a task's resident pages */ +extern kern_return_t task_freeze( + task_t task, + uint32_t *purgeable_count, + uint32_t *wired_count, + uint32_t *clean_count, + uint32_t *dirty_count, + uint32_t dirty_budget, + uint32_t *shared_count, + int *freezer_error_code, + boolean_t eval_only); -extern void task_vtimer_clear( - task_t task, - integer_t which); +/* Thaw a currently frozen task */ +extern kern_return_t task_thaw( + task_t task); -extern void task_vtimer_update( - task_t task, - integer_t which, - uint32_t *microsecs); +typedef enum { + CREDIT_TO_SWAP = 1, + DEBIT_FROM_SWAP = 2 +} freezer_acct_op_t; -#define TASK_VTIMER_USER 0x01 -#define TASK_VTIMER_PROF 0x02 -#define TASK_VTIMER_RLIM 0x04 +extern void task_update_frozen_to_swap_acct( + task_t task, + int64_t amount, + freezer_acct_op_t op); -extern void task_set_64bit( - task_t task, - boolean_t is64bit); +#endif /* CONFIG_FREEZE */ -extern void task_backing_store_privileged( - task_t task); +/* Halt all other threads in the current task */ +extern kern_return_t task_start_halt( + task_t task); -extern void task_set_dyld_info( - task_t task, - mach_vm_address_t addr, - mach_vm_size_t size); +/* Wait for other threads to halt and free halting task resources */ +extern void task_complete_halt( + task_t task); + +extern kern_return_t task_terminate_internal( + task_t task); + +extern kern_return_t task_create_internal( + task_t parent_task, + coalition_t *parent_coalitions, + boolean_t inherit_memory, + boolean_t is_64bit, + boolean_t is_64bit_data, + uint32_t flags, + uint32_t procflags, + uint8_t t_returnwaitflags, + task_t *child_task); /* OUT */ + +extern kern_return_t task_set_special_port_internal( + task_t task, + int which, + ipc_port_t port); + +extern kern_return_t task_info( + task_t task, + task_flavor_t flavor, + task_info_t task_info_out, + mach_msg_type_number_t *task_info_count); + +extern void task_power_info_locked( + task_t task, + task_power_info_t info, + gpu_energy_data_t gpu_energy, + task_power_info_v2_t infov2, + uint64_t *runnable_time); + +extern uint64_t task_gpu_utilisation( + task_t task); + +extern uint64_t task_energy( + task_t task); + +extern uint64_t task_cpu_ptime( + task_t task); +extern void task_update_cpu_time_qos_stats( + task_t task, + uint64_t *eqos_stats, + uint64_t *rqos_stats); + +extern void task_vtimer_set( + task_t task, + integer_t which); + +extern void task_vtimer_clear( + task_t task, + integer_t which); + +extern void task_vtimer_update( + task_t task, + integer_t which, + uint32_t *microsecs); + +#define TASK_VTIMER_USER 0x01 +#define TASK_VTIMER_PROF 0x02 +#define TASK_VTIMER_RLIM 0x04 + +extern void task_set_64bit( + task_t task, + boolean_t is_64bit, + boolean_t is_64bit_data); + +extern boolean_t task_get_64bit_data( + task_t task); + +extern void task_set_platform_binary( + task_t task, + boolean_t is_platform); +extern bool task_set_ca_client_wi( + task_t task, + boolean_t ca_client_wi); + +extern void task_set_dyld_info( + task_t task, + mach_vm_address_t addr, + mach_vm_size_t size); + +extern void task_set_mach_header_address( + task_t task, + mach_vm_address_t addr); /* Get number of activations in a task */ -extern int get_task_numacts( - task_t task); - -extern int get_task_numactivethreads(task_t task); -extern kern_return_t task_collect_crash_info(task_t task); +extern int get_task_numacts( + task_t task); + +struct label; +extern kern_return_t task_collect_crash_info( + task_t task, +#if CONFIG_MACF + struct label *crash_label, +#endif + int is_corpse_fork); +void task_port_notify(mach_msg_header_t *msg); +void task_port_with_flavor_notify(mach_msg_header_t *msg); +void task_wait_till_threads_terminate_locked(task_t task); /* JMM - should just be temporary (implementation in bsd_kern still) */ -extern void set_bsdtask_info(task_t,void *); +extern void set_bsdtask_info(task_t, void *); +extern uint32_t set_task_loadTag(task_t task, uint32_t loadTag); extern vm_map_t get_task_map_reference(task_t); -extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t, boolean_t); -extern pmap_t get_task_pmap(task_t); -extern uint64_t get_task_resident_size(task_t); -extern uint64_t get_task_compressed(task_t); -extern uint64_t get_task_resident_max(task_t); -extern uint64_t get_task_phys_footprint(task_t); -extern uint64_t get_task_phys_footprint_max(task_t); -extern uint64_t get_task_purgeable_size(task_t); -extern uint64_t get_task_cpu_time(task_t); +extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t); +extern pmap_t get_task_pmap(task_t); +extern uint64_t get_task_resident_size(task_t); +extern uint64_t get_task_compressed(task_t); +extern uint64_t get_task_resident_max(task_t); +extern uint64_t get_task_phys_footprint(task_t); +#if CONFIG_LEDGER_INTERVAL_MAX +extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset); +#endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */ +extern uint64_t get_task_phys_footprint_lifetime_max(task_t); +extern uint64_t get_task_phys_footprint_limit(task_t); +extern uint64_t get_task_purgeable_size(task_t); +extern uint64_t get_task_cpu_time(task_t); extern uint64_t get_task_dispatchqueue_offset(task_t); +extern uint64_t get_task_dispatchqueue_serialno_offset(task_t); +extern uint64_t get_task_dispatchqueue_label_offset(task_t); +extern uint64_t get_task_uniqueid(task_t task); +extern int get_task_version(task_t task); + +extern uint64_t get_task_internal(task_t); +extern uint64_t get_task_internal_compressed(task_t); +extern uint64_t get_task_purgeable_nonvolatile(task_t); +extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t); +extern uint64_t get_task_iokit_mapped(task_t); +extern uint64_t get_task_alternate_accounting(task_t); +extern uint64_t get_task_alternate_accounting_compressed(task_t); +extern uint64_t get_task_memory_region_count(task_t); +extern uint64_t get_task_page_table(task_t); +#if CONFIG_FREEZE +extern uint64_t get_task_frozen_to_swap(task_t); +#endif +extern uint64_t get_task_network_nonvolatile(task_t); +extern uint64_t get_task_network_nonvolatile_compressed(task_t); +extern uint64_t get_task_wired_mem(task_t); +extern uint32_t get_task_loadTag(task_t task); + +extern uint64_t get_task_tagged_footprint(task_t task); +extern uint64_t get_task_tagged_footprint_compressed(task_t task); +extern uint64_t get_task_media_footprint(task_t task); +extern uint64_t get_task_media_footprint_compressed(task_t task); +extern uint64_t get_task_graphics_footprint(task_t task); +extern uint64_t get_task_graphics_footprint_compressed(task_t task); +extern uint64_t get_task_neural_footprint(task_t task); +extern uint64_t get_task_neural_footprint_compressed(task_t task); extern kern_return_t task_convert_phys_footprint_limit(int, int *); -extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t); +extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t); extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb); -extern boolean_t is_kerneltask(task_t task); +/* Jetsam memlimit attributes */ +extern boolean_t task_get_memlimit_is_active(task_t task); +extern boolean_t task_get_memlimit_is_fatal(task_t task); +extern void task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active); +extern void task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal); +extern boolean_t task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active); +extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active); + +extern void task_set_thread_limit(task_t task, uint16_t thread_limit); + +#if XNU_TARGET_OS_OSX +extern boolean_t task_has_system_version_compat_enabled(task_t task); +extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat); +#endif + +extern boolean_t is_kerneltask(task_t task); +extern boolean_t is_corpsetask(task_t task); extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast); extern kern_return_t machine_task_get_state( - task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t *state_count); + task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count); extern kern_return_t machine_task_set_state( - task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count); + task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count); extern void machine_task_terminate(task_t task); @@ -621,252 +916,140 @@ struct _task_ledger_indices { int iokit_mapped; int alternate_accounting; int alternate_accounting_compressed; + int page_table; int phys_footprint; int internal_compressed; int purgeable_volatile; int purgeable_nonvolatile; int purgeable_volatile_compressed; int purgeable_nonvolatile_compressed; + int tagged_nofootprint; + int tagged_footprint; + int tagged_nofootprint_compressed; + int tagged_footprint_compressed; + int network_volatile; + int network_nonvolatile; + int network_volatile_compressed; + int network_nonvolatile_compressed; + int media_nofootprint; + int media_footprint; + int media_nofootprint_compressed; + int media_footprint_compressed; + int graphics_nofootprint; + int graphics_footprint; + int graphics_nofootprint_compressed; + int graphics_footprint_compressed; + int neural_nofootprint; + int neural_footprint; + int neural_nofootprint_compressed; + int neural_footprint_compressed; int platform_idle_wakeups; int interrupt_wakeups; #if CONFIG_SCHED_SFI int sfi_wait_times[MAX_SFI_CLASS_ID]; #endif /* CONFIG_SCHED_SFI */ -#ifdef CONFIG_BANK int cpu_time_billed_to_me; int cpu_time_billed_to_others; + int physical_writes; + int logical_writes; + int logical_writes_to_external; + int energy_billed_to_me; + int energy_billed_to_others; +#if DEBUG || DEVELOPMENT + int pages_grabbed; + int pages_grabbed_kern; + int pages_grabbed_iopl; + int pages_grabbed_upl; #endif +#if CONFIG_FREEZE + int frozen_to_swap; +#endif /* CONFIG_FREEZE */ +#if CONFIG_PHYS_WRITE_ACCT + int fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ }; extern struct _task_ledger_indices task_ledgers; -/* Begin task_policy */ - -/* value */ -#define TASK_POLICY_DISABLE 0x0 -#define TASK_POLICY_ENABLE 0x1 - -/* category */ -#define TASK_POLICY_INTERNAL 0x0 -#define TASK_POLICY_EXTERNAL 0x1 -#define TASK_POLICY_ATTRIBUTE 0x2 - -/* for tracing */ -#define TASK_POLICY_TASK 0x4 -#define TASK_POLICY_THREAD 0x8 - -/* flavors (also DBG_IMPORTANCE subclasses 0x20 - 0x3F) */ - -/* internal or external, thread or task */ -#define TASK_POLICY_DARWIN_BG 0x21 -#define TASK_POLICY_IOPOL 0x22 -#define TASK_POLICY_IO 0x23 -#define TASK_POLICY_PASSIVE_IO 0x24 - -/* internal, task only */ -#define TASK_POLICY_DARWIN_BG_IOPOL 0x27 - -/* task-only attributes */ -#define TASK_POLICY_TAL 0x28 -#define TASK_POLICY_BOOST 0x29 -#define TASK_POLICY_ROLE 0x2A -#define TASK_POLICY_SUPPRESSED_CPU 0x2B -#define TASK_POLICY_TERMINATED 0x2C -#define TASK_POLICY_NEW_SOCKETS_BG 0x2D -#define TASK_POLICY_LOWPRI_CPU 0x2E -#define TASK_POLICY_LATENCY_QOS 0x2F -#define TASK_POLICY_THROUGH_QOS 0x30 -#define TASK_POLICY_WATCHERS_BG 0x31 - -#define TASK_POLICY_SFI_MANAGED 0x34 -#define TASK_POLICY_ALL_SOCKETS_BG 0x37 - -#define TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS 0x39 /* latency as value1, throughput as value2 */ -#define TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS 0x3A /* latency as value1, throughput as value2 */ - -/* thread-only attributes */ -#define TASK_POLICY_PIDBIND_BG 0x32 -#define TASK_POLICY_WORKQ_BG 0x33 -#define TASK_POLICY_QOS 0x35 -#define TASK_POLICY_QOS_OVERRIDE 0x36 -#define TASK_POLICY_QOS_AND_RELPRIO 0x38 /* QoS as value1, relative priority as value2 */ - -#define TASK_POLICY_MAX 0x3F - -/* The main entrance to task policy is this function */ -extern void proc_set_task_policy(task_t task, thread_t thread, int category, int flavor, int value); -extern int proc_get_task_policy(task_t task, thread_t thread, int category, int flavor); - -/* For attributes that have two scalars as input/output */ -extern void proc_set_task_policy2(task_t task, thread_t thread, int category, int flavor, int value1, int value2); -extern void proc_get_task_policy2(task_t task, thread_t thread, int category, int flavor, int *value1, int *value2); - -/* For use by kernel threads and others who don't hold a reference on the target thread */ -extern void proc_set_task_policy_thread(task_t task, uint64_t tid, int category, int flavor, int value); - -extern void proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, - ipc_port_t * portwatch_ports, int portwatch_count); - -extern void task_set_main_thread_qos(task_t task, thread_t main_thread); - -extern int proc_darwin_role_to_task_role(int darwin_role, int* task_role); -extern int proc_task_role_to_darwin_role(int task_role); - - -/* IO Throttle tiers */ -#define THROTTLE_LEVEL_NONE -1 -#define THROTTLE_LEVEL_TIER0 0 /* IOPOL_NORMAL, IOPOL_DEFAULT, IOPOL_PASSIVE */ - -#define THROTTLE_LEVEL_THROTTLED 1 -#define THROTTLE_LEVEL_TIER1 1 /* IOPOL_STANDARD */ -#define THROTTLE_LEVEL_TIER2 2 /* IOPOL_UTILITY */ -#define THROTTLE_LEVEL_TIER3 3 /* IOPOL_THROTTLE */ - -#define THROTTLE_LEVEL_START 0 -#define THROTTLE_LEVEL_END 3 - -#define THROTTLE_LEVEL_COMPRESSOR_TIER0 THROTTLE_LEVEL_TIER0 -#define THROTTLE_LEVEL_COMPRESSOR_TIER1 THROTTLE_LEVEL_TIER1 -#define THROTTLE_LEVEL_COMPRESSOR_TIER2 THROTTLE_LEVEL_TIER2 - -#define THROTTLE_LEVEL_PAGEOUT_THROTTLED THROTTLE_LEVEL_TIER2 -#define THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED THROTTLE_LEVEL_TIER1 - -#if CONFIG_IOSCHED -#define IOSCHED_METADATA_TIER THROTTLE_LEVEL_TIER1 -#endif /* CONFIG_IOSCHED */ - -extern int proc_apply_workq_bgthreadpolicy(thread_t thread); -extern int proc_restore_workq_bgthreadpolicy(thread_t thread); - -extern int proc_get_darwinbgstate(task_t task, uint32_t *flagsp); -extern boolean_t proc_task_is_tal(task_t task); -extern int task_get_apptype(task_t); -extern integer_t task_grab_latency_qos(task_t task); -extern void task_policy_create(task_t task, int parent_boosted); -extern void thread_policy_create(thread_t thread); - -/* - * for IPC importance hooks into task policy - */ -typedef struct task_pend_token { - uint32_t tpt_update_sockets :1, - tpt_update_timers :1, - tpt_update_watchers :1, - tpt_update_live_donor :1, - tpt_update_coal_sfi :1; -} *task_pend_token_t; - -extern void task_policy_update_complete_unlocked(task_t task, thread_t thread, task_pend_token_t pend_token); -extern void task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token); -extern void task_set_boost_locked(task_t task, boolean_t boost_active); - -/* - * Get effective policy - * Only for use by relevant subsystem, should never be passed into a setter! - */ - -extern int proc_get_effective_task_policy(task_t task, int flavor); -extern int proc_get_effective_thread_policy(thread_t thread, int flavor); - -/* temporary compatibility */ -int proc_setthread_saved_importance(thread_t thread, int importance); - -int proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep); -int proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline, int cpumon_entitled); -int proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled); -thread_t task_findtid(task_t, uint64_t); -void set_thread_iotier_override(thread_t, int policy); - -boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type); -boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type); -boolean_t proc_thread_qos_reset_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type); -void proc_thread_qos_deallocate(thread_t thread); - -#define TASK_RUSECPU_FLAGS_PROC_LIMIT 0x01 -#define TASK_RUSECPU_FLAGS_PERTHR_LIMIT 0x02 -#define TASK_RUSECPU_FLAGS_DEADLINE 0x04 -#define TASK_RUSECPU_FLAGS_FATAL_CPUMON 0x08 /* CPU usage monitor violations are fatal */ -#define TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON 0x10 /* wakeups monitor violations are fatal */ -#define TASK_RUSECPU_FLAGS_PHYS_FOOTPRINT_EXCEPTION 0x20 /* exceeding physical footprint generates EXC_RESOURCE */ - -/* BSD call back functions */ -extern int proc_apply_resource_actions(void * p, int type, int action); -extern int proc_restore_resource_actions(void * p, int type, int action); -extern int task_restore_resource_actions(task_t task, int type); - -extern int task_clear_cpuusage(task_t task, int cpumon_entitled); +/* requires task to be unlocked, returns a referenced thread */ +thread_t task_findtid(task_t task, uint64_t tid); +int pid_from_task(task_t task); extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags); extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags); +extern void task_rollup_accounting_info(task_t new_task, task_t parent_task); +extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags); +extern void task_set_did_exec_flag(task_t task); +extern void task_clear_exec_copy_flag(task_t task); +extern boolean_t task_is_exec_copy(task_t); +extern boolean_t task_did_exec(task_t task); +#ifdef CONFIG_32BIT_TELEMETRY +extern boolean_t task_consume_32bit_log_flag(task_t task); +extern void task_set_32bit_log_flag(task_t task); +#endif /* CONFIG_32BIT_TELEMETRY */ +extern boolean_t task_is_active(task_t task); +extern boolean_t task_is_halting(task_t task); +extern void task_clear_return_wait(task_t task, uint32_t flags); +extern void task_wait_to_return(void) __attribute__((noreturn)); +extern event_t task_get_return_wait_event(task_t task); + +extern void task_bank_reset(task_t task); +extern void task_bank_init(task_t task); + +#if CONFIG_ARCADE +extern void task_prep_arcade(task_t task, thread_t thread); +#endif /* CONFIG_ARCADE */ +extern int task_pid(task_t task); -extern void task_importance_mark_donor(task_t task, boolean_t donating); -extern void task_importance_mark_live_donor(task_t task, boolean_t donating); -extern void task_importance_mark_receiver(task_t task, boolean_t receiving); -extern void task_importance_mark_denap_receiver(task_t task, boolean_t denap); -extern void task_importance_reset(task_t task); -extern void task_atm_reset(task_t task); - -#if IMPORTANCE_INHERITANCE - -extern boolean_t task_is_importance_donor(task_t task); -extern boolean_t task_is_marked_importance_donor(task_t task); -extern boolean_t task_is_marked_live_importance_donor(task_t task); - -extern boolean_t task_is_importance_receiver(task_t task); -extern boolean_t task_is_marked_importance_receiver(task_t task); - -extern boolean_t task_is_importance_denap_receiver(task_t task); -extern boolean_t task_is_marked_importance_denap_receiver(task_t task); - -extern boolean_t task_is_importance_receiver_type(task_t task); +#if __has_feature(ptrauth_calls) +char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *); +void task_set_shared_region_id(task_t task, char *id); +#endif /* __has_feature(ptrauth_calls) */ -extern int task_importance_hold_watchport_assertion(task_t target_task, uint32_t count); -extern int task_importance_hold_internal_assertion(task_t target_task, uint32_t count); -extern int task_importance_drop_internal_assertion(task_t target_task, uint32_t count); +extern boolean_t task_has_assertions(task_t task); +/* End task_policy */ -extern int task_importance_hold_file_lock_assertion(task_t target_task, uint32_t count); -extern int task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count); +extern void task_set_gpu_denied(task_t task, boolean_t denied); +extern boolean_t task_is_gpu_denied(task_t task); -extern int task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t count); -extern int task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t count); +extern queue_head_t * task_io_user_clients(task_t task); +extern void task_set_message_app_suspended(task_t task, boolean_t enable); -#endif /* IMPORTANCE_INHERITANCE */ +extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task); -extern int task_low_mem_privileged_listener(task_t task, boolean_t new_value, boolean_t *old_value); -extern boolean_t task_has_been_notified(task_t task, int pressurelevel); -extern boolean_t task_used_for_purging(task_t task, int pressurelevel); -extern void task_mark_has_been_notified(task_t task, int pressurelevel); -extern void task_mark_used_for_purging(task_t task, int pressurelevel); -extern void task_clear_has_been_notified(task_t task, int pressurelevel); -extern void task_clear_used_for_purging(task_t task); -extern int task_importance_estimate(task_t task); +extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num); -extern int task_pid(task_t task); +extern void task_set_filter_msg_flag(task_t task, boolean_t flag); +extern boolean_t task_get_filter_msg_flag(task_t task); -/* End task_policy */ +extern void task_transfer_mach_filter_bits(task_t new_task, task_t old_mask); -extern kern_return_t task_purge_volatile_memory(task_t task); +#if __has_feature(ptrauth_calls) +extern bool task_is_pac_exception_fatal(task_t task); +extern void task_set_pac_exception_fatal_flag(task_t task); +#endif /*__has_feature(ptrauth_calls)*/ -extern void task_set_gpu_denied(task_t task, boolean_t denied); -extern boolean_t task_is_gpu_denied(task_t task); +extern void task_set_tecs(task_t task); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -extern void *get_bsdtask_info(task_t); -extern void *get_bsdthreadtask_info(thread_t); +extern void *get_bsdtask_info(task_t); +extern void *get_bsdthreadtask_info(thread_t); extern void task_bsdtask_kill(task_t); extern vm_map_t get_task_map(task_t); -extern ledger_t get_task_ledger(task_t); +extern ledger_t get_task_ledger(task_t); extern boolean_t get_task_pidsuspended(task_t); +extern boolean_t get_task_suspended(task_t); extern boolean_t get_task_frozen(task_t); /* Convert from a task to a port */ extern ipc_port_t convert_task_to_port(task_t); extern ipc_port_t convert_task_name_to_port(task_name_t); +extern ipc_port_t convert_task_inspect_to_port(task_inspect_t); +extern ipc_port_t convert_task_read_to_port(task_read_t); extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task); /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */ @@ -874,18 +1057,90 @@ extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t extern boolean_t task_suspension_notify(mach_msg_header_t *); -#endif /* KERNEL_PRIVATE */ - -extern task_t kernel_task; - -extern void task_deallocate( - task_t task); +#define TASK_WRITE_IMMEDIATE 0x1 +#define TASK_WRITE_DEFERRED 0x2 +#define TASK_WRITE_INVALIDATED 0x4 +#define TASK_WRITE_METADATA 0x8 +extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp); + +__enum_decl(task_balance_flags_t, uint8_t, { + TASK_BALANCE_CREDIT = 0x1, + TASK_BALANCE_DEBIT = 0x2, +}); + +__enum_decl(task_physical_write_flavor_t, uint8_t, { + TASK_PHYSICAL_WRITE_METADATA = 0x1, +}); +extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor, + uint64_t io_size, task_balance_flags_t flags); + +#if CONFIG_SECLUDED_MEMORY +extern void task_set_can_use_secluded_mem( + task_t task, + boolean_t can_use_secluded_mem); +extern void task_set_could_use_secluded_mem( + task_t task, + boolean_t could_use_secluded_mem); +extern void task_set_could_also_use_secluded_mem( + task_t task, + boolean_t could_also_use_secluded_mem); +extern boolean_t task_can_use_secluded_mem( + task_t task, + boolean_t is_allocate); +extern boolean_t task_could_use_secluded_mem(task_t task); +extern boolean_t task_could_also_use_secluded_mem(task_t task); +#endif /* CONFIG_SECLUDED_MEMORY */ + +extern void task_set_darkwake_mode(task_t, boolean_t); +extern boolean_t task_get_darkwake_mode(task_t); + +#if __arm64__ +extern void task_set_legacy_footprint(task_t task); +extern void task_set_extra_footprint_limit(task_t task); +extern void task_set_ios13extended_footprint_limit(task_t task); +#endif /* __arm64__ */ + +#if CONFIG_MACF +extern struct label *get_task_crash_label(task_t task); +#endif /* CONFIG_MACF */ + +extern int get_task_cdhash(task_t task, char cdhash[]); + +#endif /* KERNEL_PRIVATE */ + +extern task_t kernel_task; + +extern void task_deallocate( + task_t task); + +extern void task_name_deallocate( + task_name_t task_name); + +extern void task_policy_set_deallocate( + task_policy_set_t task_policy_set); + +extern void task_policy_get_deallocate( + task_policy_get_t task_policy_get); + +extern void task_inspect_deallocate( + task_inspect_t task_inspect); + +extern void task_read_deallocate( + task_read_t task_read); + +extern void task_suspension_token_deallocate( + task_suspension_token_t token); + +extern boolean_t task_self_region_footprint(void); +extern void task_self_region_footprint_set(boolean_t newval); +extern void task_ledgers_footprint(ledger_t ledger, + ledger_amount_t *ledger_resident, + ledger_amount_t *ledger_compressed); +extern void task_set_memory_ownership_transfer( + task_t task, + boolean_t value); -extern void task_name_deallocate( - task_name_t task_name); -extern void task_suspension_token_deallocate( - task_suspension_token_t token); __END_DECLS -#endif /* _KERN_TASK_H_ */ +#endif /* _KERN_TASK_H_ */