#include <stdatomic.h>
#include <os/refcnt.h>
-#ifdef CONFIG_ATM
-#include <atm/atm_internal.h>
-#endif
-
struct _cpu_time_qos_stats {
uint64_t cpu_time_qos_default;
uint64_t cpu_time_qos_maintenance;
uint32_t vtimers;
/* Miscellaneous */
- vm_map_t map; /* Address space description */
+ vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */
queue_chain_t tasks; /* global list of tasks */
struct task_watchports *watchports; /* watchports passed in spawn */
turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */
integer_t user_stop_count; /* outstanding stops */
integer_t legacy_stop_count; /* outstanding legacy stops */
- integer_t priority; /* base priority for threads */
- integer_t max_priority; /* maximum priority for threads */
+ int16_t priority; /* base priority for threads */
+ int16_t max_priority; /* maximum priority for threads */
integer_t importance; /* priority offset (BSD 'nice' value) */
/* IPC structures */
decl_lck_mtx_data(, itk_lock_data);
- struct ipc_port *itk_self; /* not a right, doesn't hold ref */
- struct ipc_port *itk_nself; /* not a right, doesn't hold ref */
- struct ipc_port *itk_sself; /* a send right */
+ /*
+ * Different flavors of task port.
+ * These flavors TASK_FLAVOR_* are defined in mach_types.h
+ */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_self") itk_self[TASK_SELF_PORT_COUNT]; /* does not hold right */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */
struct exception_action exc_actions[EXC_TYPES_COUNT];
/* a send right each valid element */
- struct ipc_port *itk_host; /* a send right */
- struct ipc_port *itk_bootstrap; /* a send right */
- struct ipc_port *itk_seatbelt; /* a send right */
- struct ipc_port *itk_gssd; /* yet another send right */
- struct ipc_port *itk_debug_control; /* send right for debugmode communications */
- struct ipc_port *itk_task_access; /* and another send right */
- struct ipc_port *itk_resume; /* a receive right to resume this task */
- struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX];
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_seatbelt") itk_seatbelt; /* a send right */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_gssd") itk_gssd; /* yet another send right */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */
+ struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX];
/* all send rights */
- struct ipc_space *itk_space;
+ struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space;
ledger_t ledger;
/* Synchronizer ownership information */
uint32_t ps_switch; /* total pset switches */
#ifdef MACH_BSD
- void *bsd_info;
+ void * XNU_PTRAUTH_SIGNED_PTR("task.bsd_info") bsd_info;
#endif
kcdata_descriptor_t corpse_info;
uint64_t crashed_thread_id;
queue_chain_t corpse_tasks;
#ifdef CONFIG_MACF
struct label * crash_label;
+ uint8_t * mach_trap_filter_mask; /* Mach trap filter bitmask (len: mach_trap_count bits) */
+ uint8_t * mach_kobj_filter_mask; /* Mach kobject filter bitmask (len: mach_kobj_count bits) */
#endif
struct vm_shared_region *shared_region;
+#if __has_feature(ptrauth_calls)
+ char *shared_region_id; /* determines which ptr auth key to use */
+ bool shared_region_auth_remapped; /* authenticated sections ready for use */
+#endif /* __has_feature(ptrauth_calls) */
volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */
#define TF_NONE 0
#define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */
#define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */
#define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */
#define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */
-#define TF_PAC_EXC_FATAL 0x00004000 /* task is marked a corpse if a PAC exception occurs */
+#define TF_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */
+#define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */
+#define TF_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */
+#define TF_TECS 0x00020000 /* task threads must enable CPU security */
/*
* Task is running within a 64-bit address space.
uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */
uint64_t rusage_cpu_deadline;
thread_call_t rusage_cpu_callt;
-#if CONFIG_EMBEDDED
+#if CONFIG_TASKWATCH
queue_head_t task_watchers; /* app state watcher threads */
int num_taskwatchers;
int watchapplying;
-#endif /* CONFIG_EMBEDDED */
+#endif /* CONFIG_TASKWATCH */
-#if CONFIG_ATM
- struct atm_task_descriptor *atm_context; /* pointer to per task atm descriptor */
-#endif
struct bank_task *bank_context; /* pointer to per task bank structure */
#if IMPORTANCE_INHERITANCE
#endif
#if HYPERVISOR
- void *hv_task_target; /* hypervisor virtual machine object associated with this task */
+ void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */
#endif /* HYPERVISOR */
#if CONFIG_SECLUDED_MEMORY
mach_vm_address_t mach_header_vm_address;
uint32_t loadTag; /* dext ID used for logging identity */
+#if CONFIG_FREEZE
+ queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */
+#endif /* CONFIG_FREEZE */
+#if CONFIG_PHYS_WRITE_ACCT
+ uint64_t task_fs_metadata_writes;
+#endif /* CONFIG_PHYS_WRITE_ACCT */
};
/*
extern kern_return_t
task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *);
+static inline void
+task_require(struct task *task)
+{
+ zone_id_require(ZONE_ID_TASK, sizeof(struct task), task);
+}
+
#define task_lock(task) lck_mtx_lock(&(task)->lock)
#define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
#define task_lock_try(task) lck_mtx_try_lock(&(task)->lock)
#define task_unlock(task) lck_mtx_unlock(&(task)->lock)
#define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr)
+#define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp)
#define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock)
#define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED)
#define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock)
#define TASK_REFERENCE_LEAK_DEBUG 0
-extern zone_t task_zone;
-
#if TASK_REFERENCE_LEAK_DEBUG
extern void task_reference_internal(task_t task);
extern os_ref_count_t task_deallocate_internal(task_t task);
#else
-#define task_reference_internal(task) \
-MACRO_BEGIN \
- zone_require(task, task_zone); \
+#define task_reference_internal(task) \
+MACRO_BEGIN \
+ task_require(task); \
os_ref_retain(&(task)->ref_count); \
MACRO_END
#define task_deallocate_internal(task) os_ref_release(&(task)->ref_count)
#ifdef KERNEL_PRIVATE
extern boolean_t task_is_app_suspended(task_t task);
+extern bool task_is_exotic(task_t task);
+extern bool task_is_alien(task_t task);
#endif
#ifdef XNU_KERNEL_PRIVATE
extern kern_return_t task_thaw(
task_t task);
+typedef enum {
+ CREDIT_TO_SWAP = 1,
+ DEBIT_FROM_SWAP = 2
+} freezer_acct_op_t;
+
+extern void task_update_frozen_to_swap_acct(
+ task_t task,
+ int64_t amount,
+ freezer_acct_op_t op);
+
#endif /* CONFIG_FREEZE */
/* Halt all other threads in the current task */
#endif
int is_corpse_fork);
void task_port_notify(mach_msg_header_t *msg);
+void task_port_with_flavor_notify(mach_msg_header_t *msg);
void task_wait_till_threads_terminate_locked(task_t task);
/* JMM - should just be temporary (implementation in bsd_kern still) */
extern uint64_t get_task_alternate_accounting_compressed(task_t);
extern uint64_t get_task_memory_region_count(task_t);
extern uint64_t get_task_page_table(task_t);
+#if CONFIG_FREEZE
+extern uint64_t get_task_frozen_to_swap(task_t);
+#endif
extern uint64_t get_task_network_nonvolatile(task_t);
extern uint64_t get_task_network_nonvolatile_compressed(task_t);
extern uint64_t get_task_wired_mem(task_t);
extern uint32_t get_task_loadTag(task_t task);
+extern uint64_t get_task_tagged_footprint(task_t task);
+extern uint64_t get_task_tagged_footprint_compressed(task_t task);
+extern uint64_t get_task_media_footprint(task_t task);
+extern uint64_t get_task_media_footprint_compressed(task_t task);
+extern uint64_t get_task_graphics_footprint(task_t task);
+extern uint64_t get_task_graphics_footprint_compressed(task_t task);
+extern uint64_t get_task_neural_footprint(task_t task);
+extern uint64_t get_task_neural_footprint_compressed(task_t task);
+
extern kern_return_t task_convert_phys_footprint_limit(int, int *);
extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t);
extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb);
extern void task_set_thread_limit(task_t task, uint16_t thread_limit);
+#if XNU_TARGET_OS_OSX
+extern boolean_t task_has_system_version_compat_enabled(task_t task);
+extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat);
+#endif
extern boolean_t is_kerneltask(task_t task);
extern boolean_t is_corpsetask(task_t task);
int pages_grabbed_iopl;
int pages_grabbed_upl;
#endif
+#if CONFIG_FREEZE
+ int frozen_to_swap;
+#endif /* CONFIG_FREEZE */
+#if CONFIG_PHYS_WRITE_ACCT
+ int fs_metadata_writes;
+#endif /* CONFIG_PHYS_WRITE_ACCT */
};
extern struct _task_ledger_indices task_ledgers;
extern void task_wait_to_return(void) __attribute__((noreturn));
extern event_t task_get_return_wait_event(task_t task);
-extern void task_atm_reset(task_t task);
extern void task_bank_reset(task_t task);
extern void task_bank_init(task_t task);
#endif /* CONFIG_ARCADE */
extern int task_pid(task_t task);
+
+#if __has_feature(ptrauth_calls)
+char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *);
+void task_set_shared_region_id(task_t task, char *id);
+#endif /* __has_feature(ptrauth_calls) */
+
extern boolean_t task_has_assertions(task_t task);
/* End task_policy */
extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task);
-extern void task_copy_vmobjects(task_t task, vm_object_query_t query, int len, int64_t* num);
+extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num);
+
+extern void task_set_filter_msg_flag(task_t task, boolean_t flag);
+extern boolean_t task_get_filter_msg_flag(task_t task);
+
+extern void task_transfer_mach_filter_bits(task_t new_task, task_t old_mask);
#if __has_feature(ptrauth_calls)
extern bool task_is_pac_exception_fatal(task_t task);
extern void task_set_pac_exception_fatal_flag(task_t task);
#endif /*__has_feature(ptrauth_calls)*/
+extern void task_set_tecs(task_t task);
+
#endif /* XNU_KERNEL_PRIVATE */
#ifdef KERNEL_PRIVATE
extern ipc_port_t convert_task_to_port(task_t);
extern ipc_port_t convert_task_name_to_port(task_name_t);
extern ipc_port_t convert_task_inspect_to_port(task_inspect_t);
+extern ipc_port_t convert_task_read_to_port(task_read_t);
extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task);
/* Convert from a port (in this case, an SO right to a task's resume port) to a task. */
#define TASK_WRITE_METADATA 0x8
extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp);
+__enum_decl(task_balance_flags_t, uint8_t, {
+ TASK_BALANCE_CREDIT = 0x1,
+ TASK_BALANCE_DEBIT = 0x2,
+});
+
+__enum_decl(task_physical_write_flavor_t, uint8_t, {
+ TASK_PHYSICAL_WRITE_METADATA = 0x1,
+});
+extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor,
+ uint64_t io_size, task_balance_flags_t flags);
+
#if CONFIG_SECLUDED_MEMORY
extern void task_set_can_use_secluded_mem(
task_t task,
extern void task_name_deallocate(
task_name_t task_name);
+extern void task_policy_set_deallocate(
+ task_policy_set_t task_policy_set);
+
+extern void task_policy_get_deallocate(
+ task_policy_get_t task_policy_get);
+
extern void task_inspect_deallocate(
task_inspect_t task_inspect);
+extern void task_read_deallocate(
+ task_read_t task_read);
+
extern void task_suspension_token_deallocate(
task_suspension_token_t token);
task_t task,
boolean_t value);
+
__END_DECLS
#endif /* _KERN_TASK_H_ */