2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #ifndef _ARM_MACHINE_ROUTINES_H_
33 #define _ARM_MACHINE_ROUTINES_H_
35 #include <mach/mach_types.h>
36 #include <mach/vm_types.h>
37 #include <mach/boolean.h>
38 #include <kern/kern_types.h>
39 #include <pexpert/pexpert.h>
41 #include <sys/cdefs.h>
42 #include <sys/appleapiopts.h>
48 #ifdef XNU_KERNEL_PRIVATE
50 typedef bool (*expected_fault_handler_t
)(arm_saved_state_t
*);
51 #endif /* __arm64__ */
52 #endif /* XNU_KERNEL_PRIVATE */
54 /* Interrupt handling */
56 void ml_cpu_signal(unsigned int cpu_id
);
57 void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs
);
58 uint64_t ml_cpu_signal_deferred_get_timer(void);
59 void ml_cpu_signal_deferred(unsigned int cpu_id
);
60 void ml_cpu_signal_retract(unsigned int cpu_id
);
61 bool ml_cpu_signal_is_enabled(void);
63 /* Initialize Interrupts */
64 void ml_init_interrupt(void);
66 /* Get Interrupts Enabled */
67 boolean_t
ml_get_interrupts_enabled(void);
69 /* Set Interrupts Enabled */
70 boolean_t
ml_set_interrupts_enabled(boolean_t enable
);
71 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable
);
73 /* Check if running at interrupt context */
74 boolean_t
ml_at_interrupt_context(void);
76 /* Generate a fake interrupt */
77 void ml_cause_interrupt(void);
79 /* Clear interrupt spin debug state for thread */
80 #if INTERRUPT_MASKED_DEBUG
81 extern boolean_t interrupt_masked_debug
;
82 extern uint64_t interrupt_masked_timeout
;
83 extern uint64_t stackshot_interrupt_masked_timeout
;
85 #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) \
87 if (interrupt_masked_debug) { \
88 thread_t thread = current_thread(); \
89 thread->machine.int_type = type; \
90 thread->machine.int_handler_addr = (uintptr_t)VM_KERNEL_STRIP_PTR(handler_addr); \
91 thread->machine.inthandler_timestamp = ml_get_timebase(); \
92 thread->machine.int_vector = (uintptr_t)NULL; \
96 #define INTERRUPT_MASKED_DEBUG_END() \
98 if (interrupt_masked_debug) { \
99 thread_t thread = current_thread(); \
100 ml_check_interrupt_handler_duration(thread); \
104 void ml_irq_debug_start(uintptr_t handler
, uintptr_t vector
);
105 void ml_irq_debug_end(void);
107 void ml_spin_debug_reset(thread_t thread
);
108 void ml_spin_debug_clear(thread_t thread
);
109 void ml_spin_debug_clear_self(void);
110 void ml_check_interrupts_disabled_duration(thread_t thread
);
111 void ml_check_stackshot_interrupt_disabled_duration(thread_t thread
);
112 void ml_check_interrupt_handler_duration(thread_t thread
);
114 #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type)
115 #define INTERRUPT_MASKED_DEBUG_END()
118 #ifdef XNU_KERNEL_PRIVATE
119 extern bool ml_snoop_thread_is_on_core(thread_t thread
);
120 extern boolean_t
ml_is_quiescing(void);
121 extern void ml_set_is_quiescing(boolean_t
);
122 extern uint64_t ml_get_booter_memory_size(void);
125 /* Type for the Time Base Enable function */
126 typedef void (*time_base_enable_t
)(cpu_id_t cpu_id
, boolean_t enable
);
127 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
128 /* Type for the Processor Cache Dispatch function */
129 typedef void (*cache_dispatch_t
)(cpu_id_t cpu_id
, unsigned int select
, unsigned int param0
, unsigned int param1
);
131 typedef uint32_t (*get_decrementer_t
)(void);
132 typedef void (*set_decrementer_t
)(uint32_t);
133 typedef void (*fiq_handler_t
)(void);
137 #define CacheConfig 0x00000000UL
138 #define CacheControl 0x00000001UL
139 #define CacheClean 0x00000002UL
140 #define CacheCleanRegion 0x00000003UL
141 #define CacheCleanFlush 0x00000004UL
142 #define CacheCleanFlushRegion 0x00000005UL
143 #define CacheShutdown 0x00000006UL
145 #define CacheControlEnable 0x00000000UL
147 #define CacheConfigCCSIDR 0x00000001UL
148 #define CacheConfigSize 0x00000100UL
150 /* Type for the Processor Idle function */
151 typedef void (*processor_idle_t
)(cpu_id_t cpu_id
, boolean_t enter
, uint64_t *new_timeout_ticks
);
153 /* Type for the Idle Tickle function */
154 typedef void (*idle_tickle_t
)(void);
156 /* Type for the Idle Timer function */
157 typedef void (*idle_timer_t
)(void *refcon
, uint64_t *new_timeout_ticks
);
159 /* Type for the IPI Hander */
160 typedef void (*ipi_handler_t
)(void);
162 /* Type for the Lockdown Hander */
163 typedef void (*lockdown_handler_t
)(void *);
165 /* Type for the Platform specific Error Handler */
166 typedef void (*platform_error_handler_t
)(void *refcon
, vm_offset_t fault_addr
);
169 * The exception callback (ex_cb) module allows kernel drivers to
170 * register and receive callbacks for exceptions, and indicate
171 * actions to be taken by the platform kernel
172 * Currently this is supported for ARM64 but extending support for ARM32
173 * should be straightforward
176 /* Supported exception classes for callbacks */
178 EXCB_CLASS_ILLEGAL_INSTR_SET
,
179 #ifdef CONFIG_XNUPOST
184 EXCB_CLASS_MAX
// this must be last
188 /* Actions indicated by callbacks to be taken by platform kernel */
190 EXCB_ACTION_RERUN
, // re-run the faulting instruction
191 EXCB_ACTION_NONE
, // continue normal exception handling
192 #ifdef CONFIG_XNUPOST
193 EXCB_ACTION_TEST_FAIL
,
200 * We cannot use a private kernel data structure such as arm_saved_state_t
201 * The CPSR and ESR are not clobbered when the callback function is invoked so
202 * those registers can be examined by the callback function;
203 * the same is done in the platform error handlers
210 /* callback type definition */
211 typedef ex_cb_action_t (*ex_cb_t
) (
212 ex_cb_class_t cb_class
,
213 void *refcon
,// provided at registration
214 const ex_cb_state_t
*state
// exception state
218 * Callback registration
219 * Currently we support only one registered callback per class but
220 * it should be possible to support more callbacks
222 kern_return_t
ex_cb_register(
223 ex_cb_class_t cb_class
,
228 * Called internally by platform kernel to invoke the registered callback for class
230 ex_cb_action_t
ex_cb_invoke(
231 ex_cb_class_t cb_class
,
235 void ml_parse_cpu_topology(void);
237 unsigned int ml_get_cpu_count(void);
239 unsigned int ml_get_cluster_count(void);
241 int ml_get_boot_cpu_number(void);
243 int ml_get_cpu_number(uint32_t phys_id
);
245 int ml_get_cluster_number(uint32_t phys_id
);
247 int ml_get_max_cpu_number(void);
249 int ml_get_max_cluster_number(void);
251 unsigned int ml_get_first_cpu_id(unsigned int cluster_id
);
254 int ml_get_cluster_number_local(void);
255 unsigned int ml_get_cpu_number_local(void);
256 #endif /* __arm64__ */
258 /* Struct for ml_cpu_get_info */
260 unsigned long vector_unit
;
261 unsigned long cache_line_size
;
262 unsigned long l1_icache_size
;
263 unsigned long l1_dcache_size
;
264 unsigned long l2_settings
;
265 unsigned long l2_cache_size
;
266 unsigned long l3_settings
;
267 unsigned long l3_cache_size
;
269 typedef struct ml_cpu_info ml_cpu_info_t
;
277 cluster_type_t
ml_get_boot_cluster(void);
280 * @typedef ml_topology_cpu_t
281 * @brief Describes one CPU core in the topology.
283 * @field cpu_id Logical CPU ID (EDT: cpu-id): 0, 1, 2, 3, 4, ...
284 * @field phys_id Physical CPU ID (EDT: reg). Same as MPIDR[15:0], i.e.
285 * (cluster_id << 8) | core_number_within_cluster
286 * @field cluster_id Cluster ID (EDT: cluster-id)
287 * @field die_id Die ID (EDT: die-id)
288 * @field cluster_type The type of CPUs found in this cluster.
289 * @field l2_access_penalty Indicates that the scheduler should try to de-prioritize a core because
290 * L2 accesses are slower than on the boot processor.
291 * @field l2_cache_size Size of the L2 cache, in bytes. 0 if unknown or not present.
292 * @field l2_cache_id l2-cache-id property read from EDT.
293 * @field l3_cache_size Size of the L3 cache, in bytes. 0 if unknown or not present.
294 * @field l3_cache_id l3-cache-id property read from EDT.
295 * @field cpu_IMPL_regs IO-mapped virtual address of cpuX_IMPL (implementation-defined) register block.
296 * @field cpu_IMPL_pa Physical address of cpuX_IMPL register block.
297 * @field cpu_IMPL_len Length of cpuX_IMPL register block.
298 * @field cpu_UTTDBG_regs IO-mapped virtual address of cpuX_UTTDBG register block.
299 * @field cpu_UTTDBG_pa Physical address of cpuX_UTTDBG register block, if set in DT, else zero
300 * @field cpu_UTTDBG_len Length of cpuX_UTTDBG register block, if set in DT, else zero
301 * @field coresight_regs IO-mapped virtual address of CoreSight debug register block.
302 * @field coresight_pa Physical address of CoreSight register block.
303 * @field coresight_len Length of CoreSight register block.
304 * @field self_ipi_irq AIC IRQ vector for self IPI (cpuX->cpuX). 0 if unsupported.
305 * @field other_ipi_irq AIC IRQ vector for other IPI (cpuX->cpuY). 0 if unsupported.
306 * @field pmi_irq AIC IRQ vector for performance management IRQ. 0 if unsupported.
307 * @field die_cluster_id Cluster ID within the local die (EDT: die-cluster-id)
308 * @field cluster_core_id Core ID within the local cluster (EDT: cluster-core-id)
310 typedef struct ml_topology_cpu
{
313 unsigned int cluster_id
;
315 cluster_type_t cluster_type
;
316 uint32_t l2_access_penalty
;
317 uint32_t l2_cache_size
;
318 uint32_t l2_cache_id
;
319 uint32_t l3_cache_size
;
320 uint32_t l3_cache_id
;
321 vm_offset_t cpu_IMPL_regs
;
322 uint64_t cpu_IMPL_pa
;
323 uint64_t cpu_IMPL_len
;
324 vm_offset_t cpu_UTTDBG_regs
;
325 uint64_t cpu_UTTDBG_pa
;
326 uint64_t cpu_UTTDBG_len
;
327 vm_offset_t coresight_regs
;
328 uint64_t coresight_pa
;
329 uint64_t coresight_len
;
333 unsigned int die_cluster_id
;
334 unsigned int cluster_core_id
;
338 * @typedef ml_topology_cluster_t
339 * @brief Describes one cluster in the topology.
341 * @field cluster_id Cluster ID (EDT: cluster-id)
342 * @field cluster_type The type of CPUs found in this cluster.
343 * @field num_cpus Total number of usable CPU cores in this cluster.
344 * @field first_cpu_id The cpu_id of the first CPU in the cluster.
345 * @field cpu_mask A bitmask representing the cpu_id's that belong to the cluster. Example:
346 * If the cluster contains CPU4 and CPU5, cpu_mask will be 0x30.
347 * @field acc_IMPL_regs IO-mapped virtual address of acc_IMPL (implementation-defined) register block.
348 * @field acc_IMPL_pa Physical address of acc_IMPL register block.
349 * @field acc_IMPL_len Length of acc_IMPL register block.
350 * @field cpm_IMPL_regs IO-mapped virtual address of cpm_IMPL (implementation-defined) register block.
351 * @field cpm_IMPL_pa Physical address of cpm_IMPL register block.
352 * @field cpm_IMPL_len Length of cpm_IMPL register block.
354 typedef struct ml_topology_cluster
{
355 unsigned int cluster_id
;
356 cluster_type_t cluster_type
;
357 unsigned int num_cpus
;
358 unsigned int first_cpu_id
;
360 vm_offset_t acc_IMPL_regs
;
361 uint64_t acc_IMPL_pa
;
362 uint64_t acc_IMPL_len
;
363 vm_offset_t cpm_IMPL_regs
;
364 uint64_t cpm_IMPL_pa
;
365 uint64_t cpm_IMPL_len
;
366 } ml_topology_cluster_t
;
368 // Bump this version number any time any ml_topology_* struct changes, so
369 // that KPI users can check whether their headers are compatible with
370 // the running kernel.
371 #define CPU_TOPOLOGY_VERSION 1
374 * @typedef ml_topology_info_t
375 * @brief Describes the CPU topology for all APs in the system. Populated from EDT and read-only at runtime.
376 * @discussion This struct only lists CPU cores that are considered usable by both iBoot and XNU. Some
377 * physically present CPU cores may be considered unusable due to configuration options like
378 * the "cpus=" boot-arg. Cores that are disabled in hardware will not show up in EDT at all, so
379 * they also will not be present in this struct.
381 * @field version Version of the struct (set to CPU_TOPOLOGY_VERSION).
382 * @field num_cpus Total number of usable CPU cores.
383 * @field max_cpu_id The highest usable logical CPU ID.
384 * @field num_clusters Total number of AP CPU clusters on the system (usable or not).
385 * @field max_cluster_id The highest cluster ID found in EDT.
386 * @field cpus List of |num_cpus| entries.
387 * @field clusters List of |num_clusters| entries.
388 * @field boot_cpu Points to the |cpus| entry for the boot CPU.
389 * @field boot_cluster Points to the |clusters| entry which contains the boot CPU.
390 * @field chip_revision Silicon revision reported by iBoot, which comes from the
391 * SoC-specific fuse bits. See CPU_VERSION_xx macros for definitions.
393 typedef struct ml_topology_info
{
394 unsigned int version
;
395 unsigned int num_cpus
;
396 unsigned int max_cpu_id
;
397 unsigned int num_clusters
;
398 unsigned int max_cluster_id
;
399 unsigned int max_die_id
;
400 ml_topology_cpu_t
*cpus
;
401 ml_topology_cluster_t
*clusters
;
402 ml_topology_cpu_t
*boot_cpu
;
403 ml_topology_cluster_t
*boot_cluster
;
404 unsigned int chip_revision
;
405 } ml_topology_info_t
;
408 * @function ml_get_topology_info
409 * @result A pointer to the read-only topology struct. Does not need to be freed. Returns NULL
410 * if the struct hasn't been initialized or the feature is unsupported.
412 const ml_topology_info_t
*ml_get_topology_info(void);
415 * @function ml_map_cpu_pio
416 * @brief Maps per-CPU and per-cluster PIO registers found in EDT. This needs to be
417 * called after arm_vm_init() so it can't be part of ml_parse_cpu_topology().
419 void ml_map_cpu_pio(void);
421 /* Struct for ml_processor_register */
422 struct ml_processor_info
{
424 vm_offset_t start_paddr
;
425 boolean_t supports_nap
;
426 void *platform_cache_dispatch
;
427 time_base_enable_t time_base_enable
;
428 processor_idle_t processor_idle
;
429 idle_tickle_t
*idle_tickle
;
430 idle_timer_t idle_timer
;
431 void *idle_timer_refcon
;
432 vm_offset_t powergate_stub_addr
;
433 uint32_t powergate_stub_length
;
434 uint32_t powergate_latency
;
435 platform_error_handler_t platform_error_handler
;
436 uint64_t regmap_paddr
;
439 uint32_t l2_access_penalty
;
441 cluster_type_t cluster_type
;
442 uint32_t l2_cache_id
;
443 uint32_t l2_cache_size
;
444 uint32_t l3_cache_id
;
445 uint32_t l3_cache_size
;
447 typedef struct ml_processor_info ml_processor_info_t
;
449 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
450 /* Struct for ml_init_timebase */
452 fiq_handler_t tbd_fiq_handler
;
453 get_decrementer_t tbd_get_decrementer
;
454 set_decrementer_t tbd_set_decrementer
;
456 typedef struct tbd_ops
*tbd_ops_t
;
457 typedef struct tbd_ops tbd_ops_data_t
;
461 * @function ml_processor_register
463 * @abstract callback from platform kext to register processor
465 * @discussion This function is called by the platform kext when a processor is
466 * being registered. This is called while running on the CPU itself, as part of
467 * its initialization.
469 * @param ml_processor_info provides machine-specific information about the
472 * @param processor is set as an out-parameter to an opaque handle that should
473 * be used by the platform kext when referring to this processor in the future.
475 * @param ipi_handler is set as an out-parameter to the function that should be
476 * registered as the IPI handler.
478 * @param pmi_handler is set as an out-parameter to the function that should be
479 * registered as the PMI handler.
481 * @returns KERN_SUCCESS on success and an error code, otherwise.
483 kern_return_t
ml_processor_register(ml_processor_info_t
*ml_processor_info
,
484 processor_t
*processor
, ipi_handler_t
*ipi_handler
,
485 perfmon_interrupt_handler_func
*pmi_handler
);
487 /* Register a lockdown handler */
488 kern_return_t
ml_lockdown_handler_register(lockdown_handler_t
, void *);
490 #if XNU_KERNEL_PRIVATE
491 void ml_lockdown_init(void);
493 /* Machine layer routine for intercepting panics */
494 void ml_panic_trap_to_debugger(const char *panic_format_str
,
498 uint64_t panic_options_mask
,
499 unsigned long panic_caller
);
500 #endif /* XNU_KERNEL_PRIVATE */
502 /* Initialize Interrupts */
503 void ml_install_interrupt_handler(
507 IOInterruptHandler handler
,
515 ml_static_verify_page_protections(
516 uint64_t base
, uint64_t size
, vm_prot_t prot
);
522 vm_offset_t
ml_static_slide(
525 vm_offset_t
ml_static_unslide(
528 /* Offset required to obtain absolute time value from tick counter */
529 uint64_t ml_get_abstime_offset(void);
531 /* Offset required to obtain continuous time value from tick counter */
532 uint64_t ml_get_conttime_offset(void);
534 #ifdef __APPLE_API_UNSTABLE
535 /* PCI config cycle probing */
536 boolean_t
ml_probe_read(
539 boolean_t
ml_probe_read_64(
543 /* Read physical address byte */
544 unsigned int ml_phys_read_byte(
546 unsigned int ml_phys_read_byte_64(
549 /* Read physical address half word */
550 unsigned int ml_phys_read_half(
552 unsigned int ml_phys_read_half_64(
555 /* Read physical address word*/
556 unsigned int ml_phys_read(
558 unsigned int ml_phys_read_64(
560 unsigned int ml_phys_read_word(
562 unsigned int ml_phys_read_word_64(
565 unsigned long long ml_io_read(uintptr_t iovaddr
, int iovsz
);
566 unsigned int ml_io_read8(uintptr_t iovaddr
);
567 unsigned int ml_io_read16(uintptr_t iovaddr
);
568 unsigned int ml_io_read32(uintptr_t iovaddr
);
569 unsigned long long ml_io_read64(uintptr_t iovaddr
);
571 extern void ml_io_write(uintptr_t vaddr
, uint64_t val
, int size
);
572 extern void ml_io_write8(uintptr_t vaddr
, uint8_t val
);
573 extern void ml_io_write16(uintptr_t vaddr
, uint16_t val
);
574 extern void ml_io_write32(uintptr_t vaddr
, uint32_t val
);
575 extern void ml_io_write64(uintptr_t vaddr
, uint64_t val
);
577 /* Read physical address double word */
578 unsigned long long ml_phys_read_double(
580 unsigned long long ml_phys_read_double_64(
583 /* Write physical address byte */
584 void ml_phys_write_byte(
585 vm_offset_t paddr
, unsigned int data
);
586 void ml_phys_write_byte_64(
587 addr64_t paddr
, unsigned int data
);
589 /* Write physical address half word */
590 void ml_phys_write_half(
591 vm_offset_t paddr
, unsigned int data
);
592 void ml_phys_write_half_64(
593 addr64_t paddr
, unsigned int data
);
595 /* Write physical address word */
597 vm_offset_t paddr
, unsigned int data
);
598 void ml_phys_write_64(
599 addr64_t paddr
, unsigned int data
);
600 void ml_phys_write_word(
601 vm_offset_t paddr
, unsigned int data
);
602 void ml_phys_write_word_64(
603 addr64_t paddr
, unsigned int data
);
605 /* Write physical address double word */
606 void ml_phys_write_double(
607 vm_offset_t paddr
, unsigned long long data
);
608 void ml_phys_write_double_64(
609 addr64_t paddr
, unsigned long long data
);
611 void ml_static_mfree(
621 /* virtual to physical on wired pages */
622 vm_offset_t
ml_vtophys(
625 /* Get processor cache info */
626 void ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
);
628 #endif /* __APPLE_API_UNSTABLE */
630 #ifdef __APPLE_API_PRIVATE
631 #ifdef XNU_KERNEL_PRIVATE
632 vm_size_t
ml_nofault_copy(
636 boolean_t
ml_validate_nofault(
637 vm_offset_t virtsrc
, vm_size_t size
);
638 #endif /* XNU_KERNEL_PRIVATE */
639 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
640 /* IO memory map services */
642 /* Map memory map IO space */
643 vm_offset_t
ml_io_map(
644 vm_offset_t phys_addr
,
647 vm_offset_t
ml_io_map_wcomb(
648 vm_offset_t phys_addr
,
651 vm_offset_t
ml_io_map_with_prot(
652 vm_offset_t phys_addr
,
660 void ml_get_bouncepool_info(
661 vm_offset_t
*phys_addr
,
664 vm_map_address_t
ml_map_high_window(
665 vm_offset_t phys_addr
,
668 /* boot memory allocation */
669 vm_offset_t
ml_static_malloc(
672 void ml_init_timebase(
675 vm_offset_t int_address
,
676 vm_offset_t int_value
);
678 uint64_t ml_get_timebase(void);
680 uint64_t ml_get_speculative_timebase(void);
682 uint64_t ml_get_timebase_entropy(void);
684 void ml_init_lock_timeout(void);
686 boolean_t
ml_delay_should_spin(uint64_t interval
);
688 void ml_delay_on_yield(void);
690 uint32_t ml_get_decrementer(void);
692 #include <machine/config.h>
694 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
695 void timer_state_event_user_to_kernel(void);
696 void timer_state_event_kernel_to_user(void);
697 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
699 uint64_t ml_get_hwclock(void);
702 boolean_t
ml_get_timer_pending(void);
705 void platform_syscall(
706 struct arm_saved_state
*);
708 void ml_set_decrementer(
711 boolean_t
is_user_contex(
714 void ml_init_arm_debug_interface(void *args
, vm_offset_t virt_address
);
716 /* These calls are only valid if __ARM_USER_PROTECT__ is defined */
717 uintptr_t arm_user_protect_begin(
720 void arm_user_protect_end(
723 boolean_t disable_interrupts
);
725 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
727 /* Zero bytes starting at a physical address */
729 addr64_t phys_address
,
732 void bzero_phys_nc(addr64_t src64
, vm_size_t bytes
);
734 #if MACH_KERNEL_PRIVATE
736 /* Pattern-fill buffer with zeros or a 32-bit pattern;
737 * target must be 128-byte aligned and sized a multiple of 128
738 * Both variants emit stores with non-temporal properties.
740 void fill32_dczva(addr64_t
, vm_size_t
);
741 void fill32_nt(addr64_t
, vm_size_t
, uint32_t);
742 int cpu_interrupt_is_pending(void);
746 void ml_thread_policy(
749 unsigned policy_info
);
751 #define MACHINE_GROUP 0x00000001
752 #define MACHINE_NETWORK_GROUP 0x10000000
753 #define MACHINE_NETWORK_WORKLOOP 0x00000001
754 #define MACHINE_NETWORK_NETISR 0x00000002
756 /* Set the maximum number of CPUs */
757 void ml_set_max_cpus(
758 unsigned int max_cpus
);
760 /* Return the maximum number of CPUs set by ml_set_max_cpus(), waiting if necessary */
761 unsigned int ml_wait_max_cpus(
764 /* Return the maximum memory size */
765 unsigned int ml_get_machine_mem(void);
767 #ifdef XNU_KERNEL_PRIVATE
768 /* Return max offset */
769 vm_map_offset_t
ml_get_max_offset(
771 unsigned int option
);
772 #define MACHINE_MAX_OFFSET_DEFAULT 0x01
773 #define MACHINE_MAX_OFFSET_MIN 0x02
774 #define MACHINE_MAX_OFFSET_MAX 0x04
775 #define MACHINE_MAX_OFFSET_DEVICE 0x08
778 extern void ml_cpu_up(void);
779 extern void ml_cpu_down(void);
780 extern void ml_arm_sleep(void);
782 extern uint64_t ml_get_wake_timebase(void);
783 extern uint64_t ml_get_conttime_wake_time(void);
785 /* Time since the system was reset (as part of boot/wake) */
786 uint64_t ml_get_time_since_reset(void);
789 * Called by ApplePMGR to set wake time. Units and epoch are identical
790 * to mach_continuous_time(). Has no effect on !HAS_CONTINUOUS_HWCLOCK
791 * chips. If wake_time == UINT64_MAX, that means the wake time is
792 * unknown and calls to ml_get_time_since_reset() will return UINT64_MAX.
794 void ml_set_reset_time(uint64_t wake_time
);
796 #ifdef XNU_KERNEL_PRIVATE
797 /* Just a stub on ARM */
798 extern kern_return_t
ml_interrupt_prewarm(uint64_t deadline
);
799 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
800 #endif /* XNU_KERNEL_PRIVATE */
802 /* Bytes available on current stack */
803 vm_offset_t
ml_stack_remaining(void);
805 #ifdef MACH_KERNEL_PRIVATE
806 uint32_t get_fpscr(void);
807 void set_fpscr(uint32_t);
808 void machine_conf(void);
809 void machine_lockdown(void);
812 unsigned long update_mdscr(unsigned long clear
, unsigned long set
);
813 #endif /* __arm64__ */
815 extern void arm_debug_set_cp14(arm_debug_state_t
*debug_state
);
816 extern void fiq_context_init(boolean_t enable_fiq
);
818 extern void reenable_async_aborts(void);
820 extern boolean_t
get_vfp_enabled(void);
821 extern void cpu_idle_wfi(boolean_t wfi_fast
);
825 uint64_t ml_cluster_wfe_timeout(uint32_t wfe_cluster_id
);
829 #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */
830 #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */
831 unsigned long monitor_call(uintptr_t callnum
, uintptr_t arg1
,
832 uintptr_t arg2
, uintptr_t arg3
);
835 #if __ARM_KERNEL_PROTECT__
836 extern void set_vbar_el1(uint64_t);
837 #endif /* __ARM_KERNEL_PROTECT__ */
838 #endif /* MACH_KERNEL_PRIVATE */
840 extern uint32_t arm_debug_read_dscr(void);
842 extern int set_be_bit(void);
843 extern int clr_be_bit(void);
844 extern int be_tracing(void);
846 /* Please note that cpu_broadcast_xcall is not as simple is you would like it to be.
847 * It will sometimes put the calling thread to sleep, and it is up to your callback
848 * to wake it up as needed, where "as needed" is defined as "all other CPUs have
849 * called the broadcast func". Look around the kernel for examples, or instead use
850 * cpu_broadcast_xcall_simple() which does indeed act like you would expect, given
851 * the prototype. cpu_broadcast_immediate_xcall has the same caveats and has a similar
854 typedef void (*broadcastFunc
) (void *);
855 unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t
, broadcastFunc
, void *);
856 unsigned int cpu_broadcast_xcall_simple(boolean_t
, broadcastFunc
, void *);
857 kern_return_t
cpu_xcall(int, broadcastFunc
, void *);
858 unsigned int cpu_broadcast_immediate_xcall(uint32_t *, boolean_t
, broadcastFunc
, void *);
859 unsigned int cpu_broadcast_immediate_xcall_simple(boolean_t
, broadcastFunc
, void *);
860 kern_return_t
cpu_immediate_xcall(int, broadcastFunc
, void *);
862 #ifdef KERNEL_PRIVATE
864 /* Interface to be used by the perf. controller to register a callback, in a
865 * single-threaded fashion. The callback will receive notifications of
866 * processor performance quality-of-service changes from the scheduler.
870 typedef void (*cpu_qos_update_t
)(int throughput_qos
, uint64_t qos_param1
, uint64_t qos_param2
);
871 void cpu_qos_update_register(cpu_qos_update_t
);
872 #endif /* __arm64__ */
874 struct going_on_core
{
877 uint16_t urgency
; /* XCPM compatibility */
878 uint32_t is_32_bit
: 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */
879 uint32_t is_kernel_thread
: 1;
880 uint64_t thread_group_id
;
881 void *thread_group_data
;
882 uint64_t scheduling_latency
; /* absolute time between when thread was made runnable and this ctx switch */
884 uint64_t scheduling_latency_at_same_basepri
;
885 uint32_t energy_estimate_nj
; /* return: In nanojoules */
886 /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */
888 typedef struct going_on_core
*going_on_core_t
;
890 struct going_off_core
{
892 uint32_t energy_estimate_nj
; /* return: In nanojoules */
895 uint64_t thread_group_id
;
896 void *thread_group_data
;
898 typedef struct going_off_core
*going_off_core_t
;
900 struct thread_group_data
{
901 uint64_t thread_group_id
;
902 void *thread_group_data
;
903 uint32_t thread_group_size
;
904 uint32_t thread_group_flags
;
906 typedef struct thread_group_data
*thread_group_data_t
;
908 struct perfcontrol_max_runnable_latency
{
909 uint64_t max_scheduling_latencies
[4 /* THREAD_URGENCY_MAX */];
911 typedef struct perfcontrol_max_runnable_latency
*perfcontrol_max_runnable_latency_t
;
913 struct perfcontrol_work_interval
{
917 uint32_t flags
; // notify
918 uint64_t work_interval_id
;
923 uint64_t thread_group_id
;
924 void *thread_group_data
;
925 uint32_t create_flags
;
927 typedef struct perfcontrol_work_interval
*perfcontrol_work_interval_t
;
931 WORK_INTERVAL_UPDATE
,
933 } work_interval_ctl_t
;
935 struct perfcontrol_work_interval_instance
{
936 work_interval_ctl_t ctl
;
937 uint32_t create_flags
;
940 uint64_t work_interval_id
;
941 uint64_t instance_id
; /* out: start, in: update/finish */
945 uint64_t thread_group_id
;
946 void *thread_group_data
;
948 typedef struct perfcontrol_work_interval_instance
*perfcontrol_work_interval_instance_t
;
951 * Structure to export per-CPU counters as part of the CLPC callout.
952 * Contains only the fixed CPU counters (instructions and cycles); CLPC
953 * would call back into XNU to get the configurable counters if needed.
955 struct perfcontrol_cpu_counters
{
956 uint64_t instructions
;
961 * Structure used to pass information about a thread to CLPC
963 struct perfcontrol_thread_data
{
965 * Energy estimate (return value)
966 * The field is populated by CLPC and used to update the
967 * energy estimate of the thread
969 uint32_t energy_estimate_nj
;
970 /* Perfcontrol class for thread */
971 perfcontrol_class_t perfctl_class
;
972 /* Thread ID for the thread */
974 /* Thread Group ID */
975 uint64_t thread_group_id
;
977 * Scheduling latency for threads at the same base priority.
978 * Calculated by the scheduler and passed into CLPC. The field is
979 * populated only in the thread_data structure for the thread
982 uint64_t scheduling_latency_at_same_basepri
;
983 /* Thread Group data pointer */
984 void *thread_group_data
;
985 /* perfctl state pointer */
990 * All callouts from the scheduler are executed with interrupts
991 * disabled. Callouts should be implemented in C with minimal
992 * abstractions, and only use KPI exported by the mach/libkern
993 * symbolset, restricted to routines like spinlocks and atomic
994 * operations and scheduler routines as noted below. Spinlocks that
995 * are used to synchronize data in the perfcontrol_state_t should only
996 * ever be acquired with interrupts disabled, to avoid deadlocks where
997 * an quantum expiration timer interrupt attempts to perform a callout
998 * that attempts to lock a spinlock that is already held.
1002 * When a processor is switching between two threads (after the
1003 * scheduler has chosen a new thread), the low-level platform layer
1004 * will call this routine, which should perform required timestamps,
1005 * MMIO register reads, or other state switching. No scheduler locks
1006 * are held during this callout.
1008 * This function is called with interrupts ENABLED.
1010 typedef void (*sched_perfcontrol_context_switch_t
)(perfcontrol_state_t
, perfcontrol_state_t
);
1013 * Once the processor has switched to the new thread, the offcore
1014 * callout will indicate the old thread that is no longer being
1015 * run. The thread's scheduler lock is held, so it will not begin
1016 * running on another processor (in the case of preemption where it
1017 * remains runnable) until it completes. If the "thread_terminating"
1018 * boolean is TRUE, this will be the last callout for this thread_id.
1020 typedef void (*sched_perfcontrol_offcore_t
)(perfcontrol_state_t
, going_off_core_t
/* populated by callee */, boolean_t
);
1023 * After the offcore callout and after the old thread can potentially
1024 * start running on another processor, the oncore callout will be
1025 * called with the thread's scheduler lock held. The oncore callout is
1026 * also called any time one of the parameters in the going_on_core_t
1027 * structure changes, like priority/QoS changes, and quantum
1028 * expiration, so the callout must not assume callouts are paired with
1031 typedef void (*sched_perfcontrol_oncore_t
)(perfcontrol_state_t
, going_on_core_t
);
1034 * Periodically (on hundreds of ms scale), the scheduler will perform
1035 * maintenance and report the maximum latency for runnable (but not currently
1036 * running) threads for each urgency class.
1038 typedef void (*sched_perfcontrol_max_runnable_latency_t
)(perfcontrol_max_runnable_latency_t
);
1041 * When the kernel receives information about work intervals from userland,
1042 * it is passed along using this callback. No locks are held, although the state
1043 * object will not go away during the callout.
1045 typedef void (*sched_perfcontrol_work_interval_notify_t
)(perfcontrol_state_t
, perfcontrol_work_interval_t
);
1048 * Start, update and finish work interval instance with optional complexity estimate.
1050 typedef void (*sched_perfcontrol_work_interval_ctl_t
)(perfcontrol_state_t
, perfcontrol_work_interval_instance_t
);
1053 * These callbacks are used when thread groups are added, removed or properties
1055 * No blocking allocations (or anything else blocking) are allowed inside these
1056 * callbacks. No locks allowed in these callbacks as well since the kernel might
1057 * be holding the thread/task locks.
1059 typedef void (*sched_perfcontrol_thread_group_init_t
)(thread_group_data_t
);
1060 typedef void (*sched_perfcontrol_thread_group_deinit_t
)(thread_group_data_t
);
1061 typedef void (*sched_perfcontrol_thread_group_flags_update_t
)(thread_group_data_t
);
1064 * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed,
1065 * this function will be called, passing the timeout deadline that was previously armed as an argument.
1067 * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context.
1069 typedef void (*sched_perfcontrol_deadline_passed_t
)(uint64_t deadline
);
1072 * Context Switch Callout
1075 * event - The perfcontrol_event for this callout
1076 * cpu_id - The CPU doing the context switch
1077 * timestamp - The timestamp for the context switch
1078 * flags - Flags for other relevant information
1079 * offcore - perfcontrol_data structure for thread going off-core
1080 * oncore - perfcontrol_data structure for thread going on-core
1081 * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch
1083 typedef void (*sched_perfcontrol_csw_t
)(
1084 perfcontrol_event event
, uint32_t cpu_id
, uint64_t timestamp
, uint32_t flags
,
1085 struct perfcontrol_thread_data
*offcore
, struct perfcontrol_thread_data
*oncore
,
1086 struct perfcontrol_cpu_counters
*cpu_counters
, __unused
void *unused
);
1090 * Thread State Update Callout
1093 * event - The perfcontrol_event for this callout
1094 * cpu_id - The CPU doing the state update
1095 * timestamp - The timestamp for the state update
1096 * flags - Flags for other relevant information
1097 * thr_data - perfcontrol_data structure for the thread being updated
1099 typedef void (*sched_perfcontrol_state_update_t
)(
1100 perfcontrol_event event
, uint32_t cpu_id
, uint64_t timestamp
, uint32_t flags
,
1101 struct perfcontrol_thread_data
*thr_data
, __unused
void *unused
);
1104 * Thread Group Blocking Relationship Callout
1107 * blocked_tg - Thread group blocking on progress of another thread group
1108 * blocking_tg - Thread group blocking progress of another thread group
1109 * flags - Flags for other relevant information
1110 * blocked_thr_state - Per-thread perfcontrol state for blocked thread
1112 typedef void (*sched_perfcontrol_thread_group_blocked_t
)(
1113 thread_group_data_t blocked_tg
, thread_group_data_t blocking_tg
, uint32_t flags
, perfcontrol_state_t blocked_thr_state
);
1116 * Thread Group Unblocking Callout
1119 * unblocked_tg - Thread group being unblocked from making forward progress
1120 * unblocking_tg - Thread group unblocking progress of another thread group
1121 * flags - Flags for other relevant information
1122 * unblocked_thr_state - Per-thread perfcontrol state for unblocked thread
1124 typedef void (*sched_perfcontrol_thread_group_unblocked_t
)(
1125 thread_group_data_t unblocked_tg
, thread_group_data_t unblocking_tg
, uint32_t flags
, perfcontrol_state_t unblocked_thr_state
);
1128 * Callers should always use the CURRENT version so that the kernel can detect both older
1129 * and newer structure layouts. New callbacks should always be added at the end of the
1130 * structure, and xnu should expect existing source recompiled against newer headers
1131 * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter
1132 * to reset callbacks to their default in-kernel values.
1135 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */
1136 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */
1137 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */
1138 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */
1139 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */
1140 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */
1141 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */
1142 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */
1143 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_8 (8) /* up-to thread_group_unblocked */
1144 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6
1146 struct sched_perfcontrol_callbacks
{
1147 unsigned long version
; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */
1148 sched_perfcontrol_offcore_t offcore
;
1149 sched_perfcontrol_context_switch_t context_switch
;
1150 sched_perfcontrol_oncore_t oncore
;
1151 sched_perfcontrol_max_runnable_latency_t max_runnable_latency
;
1152 sched_perfcontrol_work_interval_notify_t work_interval_notify
;
1153 sched_perfcontrol_thread_group_init_t thread_group_init
;
1154 sched_perfcontrol_thread_group_deinit_t thread_group_deinit
;
1155 sched_perfcontrol_deadline_passed_t deadline_passed
;
1156 sched_perfcontrol_csw_t csw
;
1157 sched_perfcontrol_state_update_t state_update
;
1158 sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update
;
1159 sched_perfcontrol_work_interval_ctl_t work_interval_ctl
;
1160 sched_perfcontrol_thread_group_blocked_t thread_group_blocked
;
1161 sched_perfcontrol_thread_group_unblocked_t thread_group_unblocked
;
1163 typedef struct sched_perfcontrol_callbacks
*sched_perfcontrol_callbacks_t
;
1165 extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks
, unsigned long size_of_state
);
1168 * Update the scheduler with the set of cores that should be used to dispatch new threads.
1169 * Non-recommended cores can still be used to field interrupts or run bound threads.
1170 * This should be called with interrupts enabled and no scheduler locks held.
1172 #define ALL_CORES_RECOMMENDED (~(uint32_t)0)
1174 extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores
);
1175 extern void sched_perfcontrol_thread_group_recommend(void *data
, cluster_type_t recommendation
);
1176 extern void sched_override_recommended_cores_for_sleep(void);
1177 extern void sched_restore_recommended_cores_after_sleep(void);
1178 extern void sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class
, boolean_t inherit
);
1180 extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores
);
1183 * Edge Scheduler-CLPC Interface
1185 * sched_perfcontrol_thread_group_preferred_clusters_set()
1187 * The Edge scheduler expects thread group recommendations to be specific clusters rather
1188 * than just E/P. In order to allow more fine grained control, CLPC can specify an override
1189 * preferred cluster per QoS bucket. CLPC passes a common preferred cluster `tg_preferred_cluster`
1190 * and an array of size [PERFCONTROL_CLASS_MAX] with overrides for specific perfctl classes.
1191 * The scheduler translates these preferences into sched_bucket
1192 * preferences and applies the changes.
1195 /* Token to indicate a particular perfctl class is not overriden */
1196 #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE ((uint32_t)~0)
1199 * CLPC can also indicate if there should be an immediate rebalancing of threads of this TG as
1200 * part of this preferred cluster change. It does that by specifying the following options.
1202 #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING 0x1
1203 #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNABLE 0x2
1204 typedef uint64_t sched_perfcontrol_preferred_cluster_options_t
;
1206 extern void sched_perfcontrol_thread_group_preferred_clusters_set(void *machine_data
, uint32_t tg_preferred_cluster
,
1207 uint32_t overrides
[PERFCONTROL_CLASS_MAX
], sched_perfcontrol_preferred_cluster_options_t options
);
1210 * Edge Scheduler-CLPC Interface
1212 * sched_perfcontrol_edge_matrix_get()/sched_perfcontrol_edge_matrix_set()
1214 * The Edge scheduler uses edges between clusters to define the likelihood of migrating threads
1215 * across clusters. The edge config between any two clusters defines the edge weight and whether
1216 * migation and steal operations are allowed across that edge. The getter and setter allow CLPC
1217 * to query and configure edge properties between various clusters on the platform.
1220 extern void sched_perfcontrol_edge_matrix_get(sched_clutch_edge
*edge_matrix
, bool *edge_request_bitmap
, uint64_t flags
, uint64_t matrix_order
);
1221 extern void sched_perfcontrol_edge_matrix_set(sched_clutch_edge
*edge_matrix
, bool *edge_changes_bitmap
, uint64_t flags
, uint64_t matrix_order
);
1224 * Update the deadline after which sched_perfcontrol_deadline_passed will be called.
1225 * Returns TRUE if it successfully canceled a previously set callback,
1226 * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight).
1227 * The callback is automatically canceled when it fires, and does not repeat unless rearmed.
1229 * This 'timer' executes as the scheduler switches between threads, on a non-idle core
1231 * There can be only one outstanding timer globally.
1233 extern boolean_t
sched_perfcontrol_update_callback_deadline(uint64_t deadline
);
1235 typedef enum perfcontrol_callout_type
{
1236 PERFCONTROL_CALLOUT_ON_CORE
,
1237 PERFCONTROL_CALLOUT_OFF_CORE
,
1238 PERFCONTROL_CALLOUT_CONTEXT
,
1239 PERFCONTROL_CALLOUT_STATE_UPDATE
,
1240 /* Add other callout types here */
1241 PERFCONTROL_CALLOUT_MAX
1242 } perfcontrol_callout_type_t
;
1244 typedef enum perfcontrol_callout_stat
{
1245 PERFCONTROL_STAT_INSTRS
,
1246 PERFCONTROL_STAT_CYCLES
,
1247 /* Add other stat types here */
1248 PERFCONTROL_STAT_MAX
1249 } perfcontrol_callout_stat_t
;
1251 uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
1252 perfcontrol_callout_stat_t stat
);
1255 /* The performance controller may use this interface to recommend
1256 * that CPUs in the designated cluster employ WFE rather than WFI
1257 * within the idle loop, falling back to WFI after the specified
1258 * timeout. The updates are expected to be serialized by the caller,
1259 * the implementation is not required to perform internal synchronization.
1261 uint32_t ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id
, uint64_t wfe_timeout_abstime_interval
, uint64_t wfe_hint_flags
);
1262 #endif /* __arm64__ */
1264 #if defined(HAS_APPLE_PAC)
1265 #define ONES(x) (BIT((x))-1)
1266 #define PTR_MASK ONES(64-T1SZ_BOOT)
1267 #define PAC_MASK ~PTR_MASK
1268 #define SIGN(p) ((p) & BIT(55))
1269 #define UNSIGN_PTR(p) \
1270 SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK)
1272 uint64_t ml_default_jop_pid(void);
1273 void ml_task_set_rop_pid(task_t task
, task_t parent_task
, boolean_t inherit
);
1274 void ml_task_set_jop_pid(task_t task
, task_t parent_task
, boolean_t inherit
);
1275 void ml_task_set_jop_pid_from_shared_region(task_t task
);
1276 void ml_task_set_disable_user_jop(task_t task
, uint8_t disable_user_jop
);
1277 void ml_thread_set_disable_user_jop(thread_t thread
, uint8_t disable_user_jop
);
1278 void ml_thread_set_jop_pid(thread_t thread
, task_t task
);
1279 void *ml_auth_ptr_unchecked(void *ptr
, unsigned key
, uint64_t modifier
);
1281 uint64_t ml_enable_user_jop_key(uint64_t user_jop_key
);
1284 * Restores the previous JOP key state after a previous ml_enable_user_jop_key()
1287 * @param user_jop_key The userspace JOP key previously passed to
1288 * ml_enable_user_jop_key()
1289 * @param saved_jop_state The saved JOP state returned by
1290 * ml_enable_user_jop_key()
1292 void ml_disable_user_jop_key(uint64_t user_jop_key
, uint64_t saved_jop_state
);
1293 #endif /* defined(HAS_APPLE_PAC) */
1295 void ml_enable_monitor(void);
1298 #endif /* KERNEL_PRIVATE */
1300 boolean_t
machine_timeout_suspended(void);
1301 void ml_get_power_state(boolean_t
*, boolean_t
*);
1303 uint32_t get_arm_cpu_version(void);
1304 boolean_t
user_cont_hwclock_allowed(void);
1305 uint8_t user_timebase_type(void);
1306 boolean_t
ml_thread_is64bit(thread_t thread
);
1309 bool ml_feature_supported(uint32_t feature_bit
);
1310 void ml_set_align_checking(void);
1311 extern void wfe_timeout_configure(void);
1312 extern void wfe_timeout_init(void);
1313 #endif /* __arm64__ */
1315 void ml_timer_evaluate(void);
1316 boolean_t
ml_timer_forced_evaluation(void);
1317 uint64_t ml_energy_stat(thread_t
);
1318 void ml_gpu_stat_update(uint64_t);
1319 uint64_t ml_gpu_stat(thread_t
);
1320 #endif /* __APPLE_API_PRIVATE */
1324 #if __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE)
1325 extern void ml_expect_fault_begin(expected_fault_handler_t
, uintptr_t);
1326 extern void ml_expect_fault_end(void);
1327 #endif /* __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) */
1330 void ml_hibernate_active_pre(void);
1331 void ml_hibernate_active_post(void);
1335 #endif /* _ARM_MACHINE_ROUTINES_H_ */