/*
- * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define _ARM_MACHINE_ROUTINES_H_
#include <mach/mach_types.h>
+#include <mach/vm_types.h>
#include <mach/boolean.h>
#include <kern/kern_types.h>
#include <pexpert/pexpert.h>
__BEGIN_DECLS
+#ifdef XNU_KERNEL_PRIVATE
+#ifdef __arm64__
+typedef bool (*expected_fault_handler_t)(arm_saved_state_t *);
+#endif /* __arm64__ */
+#endif /* XNU_KERNEL_PRIVATE */
+
/* Interrupt handling */
void ml_cpu_signal(unsigned int cpu_id);
uint64_t ml_cpu_signal_deferred_get_timer(void);
void ml_cpu_signal_deferred(unsigned int cpu_id);
void ml_cpu_signal_retract(unsigned int cpu_id);
+bool ml_cpu_signal_is_enabled(void);
/* Initialize Interrupts */
void ml_init_interrupt(void);
/* Clear interrupt spin debug state for thread */
#if INTERRUPT_MASKED_DEBUG
+extern boolean_t interrupt_masked_debug;
+extern uint64_t interrupt_masked_timeout;
+extern uint64_t stackshot_interrupt_masked_timeout;
+
+#define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) \
+do { \
+ if (interrupt_masked_debug) { \
+ thread_t thread = current_thread(); \
+ thread->machine.int_type = type; \
+ thread->machine.int_handler_addr = (uintptr_t)VM_KERNEL_STRIP_PTR(handler_addr); \
+ thread->machine.inthandler_timestamp = ml_get_timebase(); \
+ thread->machine.int_vector = (uintptr_t)NULL; \
+ } \
+} while (0)
+
+#define INTERRUPT_MASKED_DEBUG_END() \
+do { \
+ if (interrupt_masked_debug) { \
+ thread_t thread = current_thread(); \
+ ml_check_interrupt_handler_duration(thread); \
+ } \
+} while (0)
+
+void ml_irq_debug_start(uintptr_t handler, uintptr_t vector);
+void ml_irq_debug_end(void);
+
void ml_spin_debug_reset(thread_t thread);
void ml_spin_debug_clear(thread_t thread);
void ml_spin_debug_clear_self(void);
void ml_check_interrupts_disabled_duration(thread_t thread);
+void ml_check_stackshot_interrupt_disabled_duration(thread_t thread);
+void ml_check_interrupt_handler_duration(thread_t thread);
+#else
+#define INTERRUPT_MASKED_DEBUG_START(handler_addr, type)
+#define INTERRUPT_MASKED_DEBUG_END()
#endif
#ifdef XNU_KERNEL_PRIVATE
/* Type for the Time Base Enable function */
typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable);
-#if MACH_KERNEL_PRIVATE
+#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
/* Type for the Processor Cache Dispatch function */
typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1);
+
+typedef uint32_t (*get_decrementer_t)(void);
+typedef void (*set_decrementer_t)(uint32_t);
+typedef void (*fiq_handler_t)(void);
+
#endif
#define CacheConfig 0x00000000UL
unsigned int ml_get_cpu_count(void);
+unsigned int ml_get_cluster_count(void);
+
int ml_get_boot_cpu_number(void);
int ml_get_cpu_number(uint32_t phys_id);
+int ml_get_cluster_number(uint32_t phys_id);
+
int ml_get_max_cpu_number(void);
+int ml_get_max_cluster_number(void);
+
+unsigned int ml_get_first_cpu_id(unsigned int cluster_id);
+
+#ifdef __arm64__
+int ml_get_cluster_number_local(void);
+unsigned int ml_get_cpu_number_local(void);
+#endif /* __arm64__ */
+
/* Struct for ml_cpu_get_info */
struct ml_cpu_info {
unsigned long vector_unit;
cluster_type_t ml_get_boot_cluster(void);
+/*!
+ * @typedef ml_topology_cpu_t
+ * @brief Describes one CPU core in the topology.
+ *
+ * @field cpu_id Logical CPU ID (EDT: cpu-id): 0, 1, 2, 3, 4, ...
+ * @field phys_id Physical CPU ID (EDT: reg). Same as MPIDR[15:0], i.e.
+ * (cluster_id << 8) | core_number_within_cluster
+ * @field cluster_id Cluster ID (EDT: cluster-id)
+ * @field die_id Die ID (EDT: die-id)
+ * @field cluster_type The type of CPUs found in this cluster.
+ * @field l2_access_penalty Indicates that the scheduler should try to de-prioritize a core because
+ * L2 accesses are slower than on the boot processor.
+ * @field l2_cache_size Size of the L2 cache, in bytes. 0 if unknown or not present.
+ * @field l2_cache_id l2-cache-id property read from EDT.
+ * @field l3_cache_size Size of the L3 cache, in bytes. 0 if unknown or not present.
+ * @field l3_cache_id l3-cache-id property read from EDT.
+ * @field cpu_IMPL_regs IO-mapped virtual address of cpuX_IMPL (implementation-defined) register block.
+ * @field cpu_IMPL_pa Physical address of cpuX_IMPL register block.
+ * @field cpu_IMPL_len Length of cpuX_IMPL register block.
+ * @field cpu_UTTDBG_regs IO-mapped virtual address of cpuX_UTTDBG register block.
+ * @field cpu_UTTDBG_pa Physical address of cpuX_UTTDBG register block, if set in DT, else zero
+ * @field cpu_UTTDBG_len Length of cpuX_UTTDBG register block, if set in DT, else zero
+ * @field coresight_regs IO-mapped virtual address of CoreSight debug register block.
+ * @field coresight_pa Physical address of CoreSight register block.
+ * @field coresight_len Length of CoreSight register block.
+ * @field self_ipi_irq AIC IRQ vector for self IPI (cpuX->cpuX). 0 if unsupported.
+ * @field other_ipi_irq AIC IRQ vector for other IPI (cpuX->cpuY). 0 if unsupported.
+ * @field pmi_irq AIC IRQ vector for performance management IRQ. 0 if unsupported.
+ * @field die_cluster_id Cluster ID within the local die (EDT: die-cluster-id)
+ * @field cluster_core_id Core ID within the local cluster (EDT: cluster-core-id)
+ */
+typedef struct ml_topology_cpu {
+ unsigned int cpu_id;
+ uint32_t phys_id;
+ unsigned int cluster_id;
+ unsigned int die_id;
+ cluster_type_t cluster_type;
+ uint32_t l2_access_penalty;
+ uint32_t l2_cache_size;
+ uint32_t l2_cache_id;
+ uint32_t l3_cache_size;
+ uint32_t l3_cache_id;
+ vm_offset_t cpu_IMPL_regs;
+ uint64_t cpu_IMPL_pa;
+ uint64_t cpu_IMPL_len;
+ vm_offset_t cpu_UTTDBG_regs;
+ uint64_t cpu_UTTDBG_pa;
+ uint64_t cpu_UTTDBG_len;
+ vm_offset_t coresight_regs;
+ uint64_t coresight_pa;
+ uint64_t coresight_len;
+ int self_ipi_irq;
+ int other_ipi_irq;
+ int pmi_irq;
+ unsigned int die_cluster_id;
+ unsigned int cluster_core_id;
+} ml_topology_cpu_t;
+
+/*!
+ * @typedef ml_topology_cluster_t
+ * @brief Describes one cluster in the topology.
+ *
+ * @field cluster_id Cluster ID (EDT: cluster-id)
+ * @field cluster_type The type of CPUs found in this cluster.
+ * @field num_cpus Total number of usable CPU cores in this cluster.
+ * @field first_cpu_id The cpu_id of the first CPU in the cluster.
+ * @field cpu_mask A bitmask representing the cpu_id's that belong to the cluster. Example:
+ * If the cluster contains CPU4 and CPU5, cpu_mask will be 0x30.
+ * @field acc_IMPL_regs IO-mapped virtual address of acc_IMPL (implementation-defined) register block.
+ * @field acc_IMPL_pa Physical address of acc_IMPL register block.
+ * @field acc_IMPL_len Length of acc_IMPL register block.
+ * @field cpm_IMPL_regs IO-mapped virtual address of cpm_IMPL (implementation-defined) register block.
+ * @field cpm_IMPL_pa Physical address of cpm_IMPL register block.
+ * @field cpm_IMPL_len Length of cpm_IMPL register block.
+ */
+typedef struct ml_topology_cluster {
+ unsigned int cluster_id;
+ cluster_type_t cluster_type;
+ unsigned int num_cpus;
+ unsigned int first_cpu_id;
+ uint64_t cpu_mask;
+ vm_offset_t acc_IMPL_regs;
+ uint64_t acc_IMPL_pa;
+ uint64_t acc_IMPL_len;
+ vm_offset_t cpm_IMPL_regs;
+ uint64_t cpm_IMPL_pa;
+ uint64_t cpm_IMPL_len;
+} ml_topology_cluster_t;
+
+// Bump this version number any time any ml_topology_* struct changes, so
+// that KPI users can check whether their headers are compatible with
+// the running kernel.
+#define CPU_TOPOLOGY_VERSION 1
+
+/*!
+ * @typedef ml_topology_info_t
+ * @brief Describes the CPU topology for all APs in the system. Populated from EDT and read-only at runtime.
+ * @discussion This struct only lists CPU cores that are considered usable by both iBoot and XNU. Some
+ * physically present CPU cores may be considered unusable due to configuration options like
+ * the "cpus=" boot-arg. Cores that are disabled in hardware will not show up in EDT at all, so
+ * they also will not be present in this struct.
+ *
+ * @field version Version of the struct (set to CPU_TOPOLOGY_VERSION).
+ * @field num_cpus Total number of usable CPU cores.
+ * @field max_cpu_id The highest usable logical CPU ID.
+ * @field num_clusters Total number of AP CPU clusters on the system (usable or not).
+ * @field max_cluster_id The highest cluster ID found in EDT.
+ * @field cpus List of |num_cpus| entries.
+ * @field clusters List of |num_clusters| entries.
+ * @field boot_cpu Points to the |cpus| entry for the boot CPU.
+ * @field boot_cluster Points to the |clusters| entry which contains the boot CPU.
+ * @field chip_revision Silicon revision reported by iBoot, which comes from the
+ * SoC-specific fuse bits. See CPU_VERSION_xx macros for definitions.
+ */
+typedef struct ml_topology_info {
+ unsigned int version;
+ unsigned int num_cpus;
+ unsigned int max_cpu_id;
+ unsigned int num_clusters;
+ unsigned int max_cluster_id;
+ unsigned int max_die_id;
+ ml_topology_cpu_t *cpus;
+ ml_topology_cluster_t *clusters;
+ ml_topology_cpu_t *boot_cpu;
+ ml_topology_cluster_t *boot_cluster;
+ unsigned int chip_revision;
+} ml_topology_info_t;
+
+/*!
+ * @function ml_get_topology_info
+ * @result A pointer to the read-only topology struct. Does not need to be freed. Returns NULL
+ * if the struct hasn't been initialized or the feature is unsupported.
+ */
+const ml_topology_info_t *ml_get_topology_info(void);
+
+/*!
+ * @function ml_map_cpu_pio
+ * @brief Maps per-CPU and per-cluster PIO registers found in EDT. This needs to be
+ * called after arm_vm_init() so it can't be part of ml_parse_cpu_topology().
+ */
+void ml_map_cpu_pio(void);
+
/* Struct for ml_processor_register */
struct ml_processor_info {
cpu_id_t cpu_id;
};
typedef struct ml_processor_info ml_processor_info_t;
-#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
+#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
/* Struct for ml_init_timebase */
struct tbd_ops {
- void (*tbd_fiq_handler)(void);
- uint32_t (*tbd_get_decrementer)(void);
- void (*tbd_set_decrementer)(uint32_t dec_value);
+ fiq_handler_t tbd_fiq_handler;
+ get_decrementer_t tbd_get_decrementer;
+ set_decrementer_t tbd_set_decrementer;
};
typedef struct tbd_ops *tbd_ops_t;
typedef struct tbd_ops tbd_ops_data_t;
#if XNU_KERNEL_PRIVATE
void ml_lockdown_init(void);
-/* Check if the machine layer wants to intercept a panic call */
-boolean_t ml_wants_panic_trap_to_debugger(void);
-
/* Machine layer routine for intercepting panics */
void ml_panic_trap_to_debugger(const char *panic_format_str,
va_list *panic_args,
ml_static_vtop(
vm_offset_t);
+kern_return_t
+ml_static_verify_page_protections(
+ uint64_t base, uint64_t size, vm_prot_t prot);
+
vm_offset_t
ml_static_ptovirt(
vm_offset_t);
vm_offset_t ml_vtophys(
vm_offset_t vaddr);
-/* Get processor info */
+/* Get processor cache info */
void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
#endif /* __APPLE_API_UNSTABLE */
vm_size_t size,
vm_prot_t prot);
+void ml_io_unmap(
+ vm_offset_t addr,
+ vm_size_t sz);
+
void ml_get_bouncepool_info(
vm_offset_t *phys_addr,
vm_size_t *size);
uint32_t ml_get_decrementer(void);
-#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
+#include <machine/config.h>
+
+#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
void timer_state_event_user_to_kernel(void);
void timer_state_event_kernel_to_user(void);
-#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
+#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
uint64_t ml_get_hwclock(void);
*/
void fill32_dczva(addr64_t, vm_size_t);
void fill32_nt(addr64_t, vm_size_t, uint32_t);
+int cpu_interrupt_is_pending(void);
#endif
#endif
#define MACHINE_NETWORK_WORKLOOP 0x00000001
#define MACHINE_NETWORK_NETISR 0x00000002
-/* Initialize the maximum number of CPUs */
-void ml_init_max_cpus(
+/* Set the maximum number of CPUs */
+void ml_set_max_cpus(
unsigned int max_cpus);
-/* Return the maximum number of CPUs set by ml_init_max_cpus() */
-unsigned int ml_get_max_cpus(
+/* Return the maximum number of CPUs set by ml_set_max_cpus(), waiting if necessary */
+unsigned int ml_wait_max_cpus(
void);
/* Return the maximum memory size */
#ifdef MACH_KERNEL_PRIVATE
uint32_t get_fpscr(void);
void set_fpscr(uint32_t);
+void machine_conf(void);
+void machine_lockdown(void);
#ifdef __arm64__
unsigned long update_mdscr(unsigned long clear, unsigned long set);
#endif /* __arm64__ */
-extern void init_vfp(void);
-extern boolean_t get_vfp_enabled(void);
extern void arm_debug_set_cp14(arm_debug_state_t *debug_state);
extern void fiq_context_init(boolean_t enable_fiq);
-extern void fiq_context_bootstrap(boolean_t enable_fiq);
extern void reenable_async_aborts(void);
+#ifdef __arm__
+extern boolean_t get_vfp_enabled(void);
extern void cpu_idle_wfi(boolean_t wfi_fast);
+#endif
+
+#ifdef __arm64__
+uint64_t ml_cluster_wfe_timeout(uint32_t wfe_cluster_id);
+#endif
#ifdef MONITOR
#define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */
uintptr_t arg2, uintptr_t arg3);
#endif /* MONITOR */
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
-void rorgn_stash_range(void);
-void rorgn_lockdown(void);
-#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
-
#if __ARM_KERNEL_PROTECT__
extern void set_vbar_el1(uint64_t);
#endif /* __ARM_KERNEL_PROTECT__ */
extern int clr_be_bit(void);
extern int be_tracing(void);
+/* Please note that cpu_broadcast_xcall is not as simple is you would like it to be.
+ * It will sometimes put the calling thread to sleep, and it is up to your callback
+ * to wake it up as needed, where "as needed" is defined as "all other CPUs have
+ * called the broadcast func". Look around the kernel for examples, or instead use
+ * cpu_broadcast_xcall_simple() which does indeed act like you would expect, given
+ * the prototype. cpu_broadcast_immediate_xcall has the same caveats and has a similar
+ * _simple() wrapper
+ */
typedef void (*broadcastFunc) (void *);
unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *);
+unsigned int cpu_broadcast_xcall_simple(boolean_t, broadcastFunc, void *);
kern_return_t cpu_xcall(int, broadcastFunc, void *);
unsigned int cpu_broadcast_immediate_xcall(uint32_t *, boolean_t, broadcastFunc, void *);
+unsigned int cpu_broadcast_immediate_xcall_simple(boolean_t, broadcastFunc, void *);
kern_return_t cpu_immediate_xcall(int, broadcastFunc, void *);
#ifdef KERNEL_PRIVATE
perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags,
struct perfcontrol_thread_data *thr_data, __unused void *unused);
+/*
+ * Thread Group Blocking Relationship Callout
+ *
+ * Parameters:
+ * blocked_tg - Thread group blocking on progress of another thread group
+ * blocking_tg - Thread group blocking progress of another thread group
+ * flags - Flags for other relevant information
+ * blocked_thr_state - Per-thread perfcontrol state for blocked thread
+ */
+typedef void (*sched_perfcontrol_thread_group_blocked_t)(
+ thread_group_data_t blocked_tg, thread_group_data_t blocking_tg, uint32_t flags, perfcontrol_state_t blocked_thr_state);
+
+/*
+ * Thread Group Unblocking Callout
+ *
+ * Parameters:
+ * unblocked_tg - Thread group being unblocked from making forward progress
+ * unblocking_tg - Thread group unblocking progress of another thread group
+ * flags - Flags for other relevant information
+ * unblocked_thr_state - Per-thread perfcontrol state for unblocked thread
+ */
+typedef void (*sched_perfcontrol_thread_group_unblocked_t)(
+ thread_group_data_t unblocked_tg, thread_group_data_t unblocking_tg, uint32_t flags, perfcontrol_state_t unblocked_thr_state);
+
/*
* Callers should always use the CURRENT version so that the kernel can detect both older
* and newer structure layouts. New callbacks should always be added at the end of the
#define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */
#define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */
#define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */
+#define SCHED_PERFCONTROL_CALLBACKS_VERSION_8 (8) /* up-to thread_group_unblocked */
#define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6
struct sched_perfcontrol_callbacks {
sched_perfcontrol_state_update_t state_update;
sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update;
sched_perfcontrol_work_interval_ctl_t work_interval_ctl;
+ sched_perfcontrol_thread_group_blocked_t thread_group_blocked;
+ sched_perfcontrol_thread_group_unblocked_t thread_group_unblocked;
};
typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t;
extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation);
extern void sched_override_recommended_cores_for_sleep(void);
extern void sched_restore_recommended_cores_after_sleep(void);
+extern void sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class, boolean_t inherit);
extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores);
+/*
+ * Edge Scheduler-CLPC Interface
+ *
+ * sched_perfcontrol_thread_group_preferred_clusters_set()
+ *
+ * The Edge scheduler expects thread group recommendations to be specific clusters rather
+ * than just E/P. In order to allow more fine grained control, CLPC can specify an override
+ * preferred cluster per QoS bucket. CLPC passes a common preferred cluster `tg_preferred_cluster`
+ * and an array of size [PERFCONTROL_CLASS_MAX] with overrides for specific perfctl classes.
+ * The scheduler translates these preferences into sched_bucket
+ * preferences and applies the changes.
+ *
+ */
+/* Token to indicate a particular perfctl class is not overriden */
+#define SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE ((uint32_t)~0)
+
+/*
+ * CLPC can also indicate if there should be an immediate rebalancing of threads of this TG as
+ * part of this preferred cluster change. It does that by specifying the following options.
+ */
+#define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING 0x1
+#define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNABLE 0x2
+typedef uint64_t sched_perfcontrol_preferred_cluster_options_t;
+
+extern void sched_perfcontrol_thread_group_preferred_clusters_set(void *machine_data, uint32_t tg_preferred_cluster,
+ uint32_t overrides[PERFCONTROL_CLASS_MAX], sched_perfcontrol_preferred_cluster_options_t options);
+
+/*
+ * Edge Scheduler-CLPC Interface
+ *
+ * sched_perfcontrol_edge_matrix_get()/sched_perfcontrol_edge_matrix_set()
+ *
+ * The Edge scheduler uses edges between clusters to define the likelihood of migrating threads
+ * across clusters. The edge config between any two clusters defines the edge weight and whether
+ * migation and steal operations are allowed across that edge. The getter and setter allow CLPC
+ * to query and configure edge properties between various clusters on the platform.
+ */
+
+extern void sched_perfcontrol_edge_matrix_get(sched_clutch_edge *edge_matrix, bool *edge_request_bitmap, uint64_t flags, uint64_t matrix_order);
+extern void sched_perfcontrol_edge_matrix_set(sched_clutch_edge *edge_matrix, bool *edge_changes_bitmap, uint64_t flags, uint64_t matrix_order);
+
/*
* Update the deadline after which sched_perfcontrol_deadline_passed will be called.
* Returns TRUE if it successfully canceled a previously set callback,
uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
perfcontrol_callout_stat_t stat);
+#ifdef __arm64__
+/* The performance controller may use this interface to recommend
+ * that CPUs in the designated cluster employ WFE rather than WFI
+ * within the idle loop, falling back to WFI after the specified
+ * timeout. The updates are expected to be serialized by the caller,
+ * the implementation is not required to perform internal synchronization.
+ */
+uint32_t ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id, uint64_t wfe_timeout_abstime_interval, uint64_t wfe_hint_flags);
+#endif /* __arm64__ */
+
#if defined(HAS_APPLE_PAC)
#define ONES(x) (BIT((x))-1)
#define PTR_MASK ONES(64-T1SZ_BOOT)
#define UNSIGN_PTR(p) \
SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK)
+uint64_t ml_default_jop_pid(void);
void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit);
-void ml_task_set_disable_user_jop(task_t task, boolean_t disable_user_jop);
-void ml_thread_set_disable_user_jop(thread_t thread, boolean_t disable_user_jop);
-void ml_set_kernelkey_enabled(boolean_t enable);
+void ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit);
+void ml_task_set_jop_pid_from_shared_region(task_t task);
+void ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop);
+void ml_thread_set_disable_user_jop(thread_t thread, uint8_t disable_user_jop);
+void ml_thread_set_jop_pid(thread_t thread, task_t task);
void *ml_auth_ptr_unchecked(void *ptr, unsigned key, uint64_t modifier);
+
+/**
+ * Temporarily enables a userspace JOP key in kernel space, so that the kernel
+ * can sign or auth pointers on that process's behalf.
+ *
+ * @note The caller must disable interrupts before calling
+ * ml_enable_user_jop_key(), and may only re-enable interrupts after the
+ * complementary ml_disable_user_jop_key() call.
+ *
+ * @param user_jop_key The userspace JOP key to temporarily use
+ * @return Saved JOP state, to be passed to the complementary
+ * ml_disable_user_jop_key() call
+ */
+uint64_t ml_enable_user_jop_key(uint64_t user_jop_key);
+
+/**
+ * Restores the previous JOP key state after a previous ml_enable_user_jop_key()
+ * call.
+ *
+ * @param user_jop_key The userspace JOP key previously passed to
+ * ml_enable_user_jop_key()
+ * @param saved_jop_state The saved JOP state returned by
+ * ml_enable_user_jop_key()
+ */
+void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state);
#endif /* defined(HAS_APPLE_PAC) */
boolean_t ml_thread_is64bit(thread_t thread);
#ifdef __arm64__
+bool ml_feature_supported(uint32_t feature_bit);
void ml_set_align_checking(void);
-boolean_t arm64_wfe_allowed(void);
+extern void wfe_timeout_configure(void);
+extern void wfe_timeout_init(void);
#endif /* __arm64__ */
void ml_timer_evaluate(void);
uint64_t ml_gpu_stat(thread_t);
#endif /* __APPLE_API_PRIVATE */
+
+
+#if __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE)
+extern void ml_expect_fault_begin(expected_fault_handler_t, uintptr_t);
+extern void ml_expect_fault_end(void);
+#endif /* __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) */
+
+
+void ml_hibernate_active_pre(void);
+void ml_hibernate_active_post(void);
+
__END_DECLS
#endif /* _ARM_MACHINE_ROUTINES_H_ */