2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #ifndef _ARM_MACHINE_ROUTINES_H_
33 #define _ARM_MACHINE_ROUTINES_H_
35 #include <mach/mach_types.h>
36 #include <mach/vm_types.h>
37 #include <mach/boolean.h>
38 #include <kern/kern_types.h>
39 #include <pexpert/pexpert.h>
41 #include <sys/cdefs.h>
42 #include <sys/appleapiopts.h>
48 #ifdef XNU_KERNEL_PRIVATE
50 typedef bool (*expected_fault_handler_t
)(arm_saved_state_t
*);
51 #endif /* __arm64__ */
52 #endif /* XNU_KERNEL_PRIVATE */
54 /* Interrupt handling */
56 void ml_cpu_signal(unsigned int cpu_id
);
57 void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs
);
58 uint64_t ml_cpu_signal_deferred_get_timer(void);
59 void ml_cpu_signal_deferred(unsigned int cpu_id
);
60 void ml_cpu_signal_retract(unsigned int cpu_id
);
61 bool ml_cpu_signal_is_enabled(void);
63 /* Initialize Interrupts */
64 void ml_init_interrupt(void);
66 /* Get Interrupts Enabled */
67 boolean_t
ml_get_interrupts_enabled(void);
69 /* Set Interrupts Enabled */
70 boolean_t
ml_set_interrupts_enabled(boolean_t enable
);
71 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable
);
73 /* Check if running at interrupt context */
74 boolean_t
ml_at_interrupt_context(void);
76 /* Generate a fake interrupt */
77 void ml_cause_interrupt(void);
79 /* Clear interrupt spin debug state for thread */
80 #if INTERRUPT_MASKED_DEBUG
81 extern boolean_t interrupt_masked_debug
;
82 extern uint64_t interrupt_masked_timeout
;
83 extern uint64_t stackshot_interrupt_masked_timeout
;
85 #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) \
87 if (interrupt_masked_debug) { \
88 thread_t thread = current_thread(); \
89 thread->machine.int_type = type; \
90 thread->machine.int_handler_addr = (uintptr_t)VM_KERNEL_STRIP_PTR(handler_addr); \
91 thread->machine.inthandler_timestamp = ml_get_timebase(); \
92 thread->machine.int_vector = (uintptr_t)NULL; \
96 #define INTERRUPT_MASKED_DEBUG_END() \
98 if (interrupt_masked_debug) { \
99 thread_t thread = current_thread(); \
100 ml_check_interrupt_handler_duration(thread); \
104 void ml_irq_debug_start(uintptr_t handler
, uintptr_t vector
);
105 void ml_irq_debug_end(void);
107 void ml_spin_debug_reset(thread_t thread
);
108 void ml_spin_debug_clear(thread_t thread
);
109 void ml_spin_debug_clear_self(void);
110 void ml_check_interrupts_disabled_duration(thread_t thread
);
111 void ml_check_stackshot_interrupt_disabled_duration(thread_t thread
);
112 void ml_check_interrupt_handler_duration(thread_t thread
);
114 #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type)
115 #define INTERRUPT_MASKED_DEBUG_END()
118 #ifdef XNU_KERNEL_PRIVATE
119 extern bool ml_snoop_thread_is_on_core(thread_t thread
);
120 extern boolean_t
ml_is_quiescing(void);
121 extern void ml_set_is_quiescing(boolean_t
);
122 extern uint64_t ml_get_booter_memory_size(void);
125 /* Type for the Time Base Enable function */
126 typedef void (*time_base_enable_t
)(cpu_id_t cpu_id
, boolean_t enable
);
127 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
128 /* Type for the Processor Cache Dispatch function */
129 typedef void (*cache_dispatch_t
)(cpu_id_t cpu_id
, unsigned int select
, unsigned int param0
, unsigned int param1
);
131 typedef uint32_t (*get_decrementer_t
)(void);
132 typedef void (*set_decrementer_t
)(uint32_t);
133 typedef void (*fiq_handler_t
)(void);
137 #define CacheConfig 0x00000000UL
138 #define CacheControl 0x00000001UL
139 #define CacheClean 0x00000002UL
140 #define CacheCleanRegion 0x00000003UL
141 #define CacheCleanFlush 0x00000004UL
142 #define CacheCleanFlushRegion 0x00000005UL
143 #define CacheShutdown 0x00000006UL
145 #define CacheControlEnable 0x00000000UL
147 #define CacheConfigCCSIDR 0x00000001UL
148 #define CacheConfigSize 0x00000100UL
150 /* Type for the Processor Idle function */
151 typedef void (*processor_idle_t
)(cpu_id_t cpu_id
, boolean_t enter
, uint64_t *new_timeout_ticks
);
153 /* Type for the Idle Tickle function */
154 typedef void (*idle_tickle_t
)(void);
156 /* Type for the Idle Timer function */
157 typedef void (*idle_timer_t
)(void *refcon
, uint64_t *new_timeout_ticks
);
159 /* Type for the IPI Hander */
160 typedef void (*ipi_handler_t
)(void);
162 /* Type for the Lockdown Hander */
163 typedef void (*lockdown_handler_t
)(void *);
165 /* Type for the Platform specific Error Handler */
166 typedef void (*platform_error_handler_t
)(void *refcon
, vm_offset_t fault_addr
);
169 * The exception callback (ex_cb) module allows kernel drivers to
170 * register and receive callbacks for exceptions, and indicate
171 * actions to be taken by the platform kernel
172 * Currently this is supported for ARM64 but extending support for ARM32
173 * should be straightforward
176 /* Supported exception classes for callbacks */
178 EXCB_CLASS_ILLEGAL_INSTR_SET
,
179 #ifdef CONFIG_XNUPOST
184 EXCB_CLASS_MAX
// this must be last
188 /* Actions indicated by callbacks to be taken by platform kernel */
190 EXCB_ACTION_RERUN
, // re-run the faulting instruction
191 EXCB_ACTION_NONE
, // continue normal exception handling
192 #ifdef CONFIG_XNUPOST
193 EXCB_ACTION_TEST_FAIL
,
200 * We cannot use a private kernel data structure such as arm_saved_state_t
201 * The CPSR and ESR are not clobbered when the callback function is invoked so
202 * those registers can be examined by the callback function;
203 * the same is done in the platform error handlers
210 /* callback type definition */
211 typedef ex_cb_action_t (*ex_cb_t
) (
212 ex_cb_class_t cb_class
,
213 void *refcon
,// provided at registration
214 const ex_cb_state_t
*state
// exception state
218 * Callback registration
219 * Currently we support only one registered callback per class but
220 * it should be possible to support more callbacks
222 kern_return_t
ex_cb_register(
223 ex_cb_class_t cb_class
,
228 * Called internally by platform kernel to invoke the registered callback for class
230 ex_cb_action_t
ex_cb_invoke(
231 ex_cb_class_t cb_class
,
235 void ml_parse_cpu_topology(void);
237 unsigned int ml_get_cpu_count(void);
239 unsigned int ml_get_cluster_count(void);
241 int ml_get_boot_cpu_number(void);
243 int ml_get_cpu_number(uint32_t phys_id
);
245 int ml_get_cluster_number(uint32_t phys_id
);
247 int ml_get_max_cpu_number(void);
249 int ml_get_max_cluster_number(void);
251 unsigned int ml_get_first_cpu_id(unsigned int cluster_id
);
254 int ml_get_cluster_number_local(void);
255 unsigned int ml_get_cpu_number_local(void);
256 #endif /* __arm64__ */
258 /* Struct for ml_cpu_get_info */
260 unsigned long vector_unit
;
261 unsigned long cache_line_size
;
262 unsigned long l1_icache_size
;
263 unsigned long l1_dcache_size
;
264 unsigned long l2_settings
;
265 unsigned long l2_cache_size
;
266 unsigned long l3_settings
;
267 unsigned long l3_cache_size
;
269 typedef struct ml_cpu_info ml_cpu_info_t
;
275 cluster_type_t
ml_get_boot_cluster(void);
278 * @typedef ml_topology_cpu_t
279 * @brief Describes one CPU core in the topology.
281 * @field cpu_id Logical CPU ID (EDT: cpu-id): 0, 1, 2, 3, 4, ...
282 * @field phys_id Physical CPU ID (EDT: reg). Same as MPIDR[15:0], i.e.
283 * (cluster_id << 8) | core_number_within_cluster
284 * @field cluster_id Cluster ID (EDT: cluster-id)
285 * @field die_id Die ID (EDT: die-id)
286 * @field cluster_type The type of CPUs found in this cluster.
287 * @field l2_access_penalty Indicates that the scheduler should try to de-prioritize a core because
288 * L2 accesses are slower than on the boot processor.
289 * @field l2_cache_size Size of the L2 cache, in bytes. 0 if unknown or not present.
290 * @field l2_cache_id l2-cache-id property read from EDT.
291 * @field l3_cache_size Size of the L3 cache, in bytes. 0 if unknown or not present.
292 * @field l3_cache_id l3-cache-id property read from EDT.
293 * @field cpu_IMPL_regs IO-mapped virtual address of cpuX_IMPL (implementation-defined) register block.
294 * @field cpu_IMPL_pa Physical address of cpuX_IMPL register block.
295 * @field cpu_IMPL_len Length of cpuX_IMPL register block.
296 * @field cpu_UTTDBG_regs IO-mapped virtual address of cpuX_UTTDBG register block.
297 * @field cpu_UTTDBG_pa Physical address of cpuX_UTTDBG register block, if set in DT, else zero
298 * @field cpu_UTTDBG_len Length of cpuX_UTTDBG register block, if set in DT, else zero
299 * @field coresight_regs IO-mapped virtual address of CoreSight debug register block.
300 * @field coresight_pa Physical address of CoreSight register block.
301 * @field coresight_len Length of CoreSight register block.
302 * @field self_ipi_irq AIC IRQ vector for self IPI (cpuX->cpuX). 0 if unsupported.
303 * @field other_ipi_irq AIC IRQ vector for other IPI (cpuX->cpuY). 0 if unsupported.
304 * @field pmi_irq AIC IRQ vector for performance management IRQ. 0 if unsupported.
305 * @field die_cluster_id Cluster ID within the local die (EDT: die-cluster-id)
306 * @field cluster_core_id Core ID within the local cluster (EDT: cluster-core-id)
308 typedef struct ml_topology_cpu
{
311 unsigned int cluster_id
;
313 cluster_type_t cluster_type
;
314 uint32_t l2_access_penalty
;
315 uint32_t l2_cache_size
;
316 uint32_t l2_cache_id
;
317 uint32_t l3_cache_size
;
318 uint32_t l3_cache_id
;
319 vm_offset_t cpu_IMPL_regs
;
320 uint64_t cpu_IMPL_pa
;
321 uint64_t cpu_IMPL_len
;
322 vm_offset_t cpu_UTTDBG_regs
;
323 uint64_t cpu_UTTDBG_pa
;
324 uint64_t cpu_UTTDBG_len
;
325 vm_offset_t coresight_regs
;
326 uint64_t coresight_pa
;
327 uint64_t coresight_len
;
331 unsigned int die_cluster_id
;
332 unsigned int cluster_core_id
;
336 * @typedef ml_topology_cluster_t
337 * @brief Describes one cluster in the topology.
339 * @field cluster_id Cluster ID (EDT: cluster-id)
340 * @field cluster_type The type of CPUs found in this cluster.
341 * @field num_cpus Total number of usable CPU cores in this cluster.
342 * @field first_cpu_id The cpu_id of the first CPU in the cluster.
343 * @field cpu_mask A bitmask representing the cpu_id's that belong to the cluster. Example:
344 * If the cluster contains CPU4 and CPU5, cpu_mask will be 0x30.
345 * @field acc_IMPL_regs IO-mapped virtual address of acc_IMPL (implementation-defined) register block.
346 * @field acc_IMPL_pa Physical address of acc_IMPL register block.
347 * @field acc_IMPL_len Length of acc_IMPL register block.
348 * @field cpm_IMPL_regs IO-mapped virtual address of cpm_IMPL (implementation-defined) register block.
349 * @field cpm_IMPL_pa Physical address of cpm_IMPL register block.
350 * @field cpm_IMPL_len Length of cpm_IMPL register block.
352 typedef struct ml_topology_cluster
{
353 unsigned int cluster_id
;
354 cluster_type_t cluster_type
;
355 unsigned int num_cpus
;
356 unsigned int first_cpu_id
;
358 vm_offset_t acc_IMPL_regs
;
359 uint64_t acc_IMPL_pa
;
360 uint64_t acc_IMPL_len
;
361 vm_offset_t cpm_IMPL_regs
;
362 uint64_t cpm_IMPL_pa
;
363 uint64_t cpm_IMPL_len
;
364 } ml_topology_cluster_t
;
366 // Bump this version number any time any ml_topology_* struct changes, so
367 // that KPI users can check whether their headers are compatible with
368 // the running kernel.
369 #define CPU_TOPOLOGY_VERSION 1
372 * @typedef ml_topology_info_t
373 * @brief Describes the CPU topology for all APs in the system. Populated from EDT and read-only at runtime.
374 * @discussion This struct only lists CPU cores that are considered usable by both iBoot and XNU. Some
375 * physically present CPU cores may be considered unusable due to configuration options like
376 * the "cpus=" boot-arg. Cores that are disabled in hardware will not show up in EDT at all, so
377 * they also will not be present in this struct.
379 * @field version Version of the struct (set to CPU_TOPOLOGY_VERSION).
380 * @field num_cpus Total number of usable CPU cores.
381 * @field max_cpu_id The highest usable logical CPU ID.
382 * @field num_clusters Total number of AP CPU clusters on the system (usable or not).
383 * @field max_cluster_id The highest cluster ID found in EDT.
384 * @field cpus List of |num_cpus| entries.
385 * @field clusters List of |num_clusters| entries.
386 * @field boot_cpu Points to the |cpus| entry for the boot CPU.
387 * @field boot_cluster Points to the |clusters| entry which contains the boot CPU.
388 * @field chip_revision Silicon revision reported by iBoot, which comes from the
389 * SoC-specific fuse bits. See CPU_VERSION_xx macros for definitions.
391 typedef struct ml_topology_info
{
392 unsigned int version
;
393 unsigned int num_cpus
;
394 unsigned int max_cpu_id
;
395 unsigned int num_clusters
;
396 unsigned int max_cluster_id
;
397 unsigned int max_die_id
;
398 ml_topology_cpu_t
*cpus
;
399 ml_topology_cluster_t
*clusters
;
400 ml_topology_cpu_t
*boot_cpu
;
401 ml_topology_cluster_t
*boot_cluster
;
402 unsigned int chip_revision
;
403 } ml_topology_info_t
;
406 * @function ml_get_topology_info
407 * @result A pointer to the read-only topology struct. Does not need to be freed. Returns NULL
408 * if the struct hasn't been initialized or the feature is unsupported.
410 const ml_topology_info_t
*ml_get_topology_info(void);
413 * @function ml_map_cpu_pio
414 * @brief Maps per-CPU and per-cluster PIO registers found in EDT. This needs to be
415 * called after arm_vm_init() so it can't be part of ml_parse_cpu_topology().
417 void ml_map_cpu_pio(void);
419 /* Struct for ml_processor_register */
420 struct ml_processor_info
{
422 vm_offset_t start_paddr
;
423 boolean_t supports_nap
;
424 void *platform_cache_dispatch
;
425 time_base_enable_t time_base_enable
;
426 processor_idle_t processor_idle
;
427 idle_tickle_t
*idle_tickle
;
428 idle_timer_t idle_timer
;
429 void *idle_timer_refcon
;
430 vm_offset_t powergate_stub_addr
;
431 uint32_t powergate_stub_length
;
432 uint32_t powergate_latency
;
433 platform_error_handler_t platform_error_handler
;
434 uint64_t regmap_paddr
;
437 uint32_t l2_access_penalty
;
439 cluster_type_t cluster_type
;
440 uint32_t l2_cache_id
;
441 uint32_t l2_cache_size
;
442 uint32_t l3_cache_id
;
443 uint32_t l3_cache_size
;
445 typedef struct ml_processor_info ml_processor_info_t
;
447 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
448 /* Struct for ml_init_timebase */
450 fiq_handler_t tbd_fiq_handler
;
451 get_decrementer_t tbd_get_decrementer
;
452 set_decrementer_t tbd_set_decrementer
;
454 typedef struct tbd_ops
*tbd_ops_t
;
455 typedef struct tbd_ops tbd_ops_data_t
;
459 * @function ml_processor_register
461 * @abstract callback from platform kext to register processor
463 * @discussion This function is called by the platform kext when a processor is
464 * being registered. This is called while running on the CPU itself, as part of
465 * its initialization.
467 * @param ml_processor_info provides machine-specific information about the
470 * @param processor is set as an out-parameter to an opaque handle that should
471 * be used by the platform kext when referring to this processor in the future.
473 * @param ipi_handler is set as an out-parameter to the function that should be
474 * registered as the IPI handler.
476 * @param pmi_handler is set as an out-parameter to the function that should be
477 * registered as the PMI handler.
479 * @returns KERN_SUCCESS on success and an error code, otherwise.
481 kern_return_t
ml_processor_register(ml_processor_info_t
*ml_processor_info
,
482 processor_t
*processor
, ipi_handler_t
*ipi_handler
,
483 perfmon_interrupt_handler_func
*pmi_handler
);
485 /* Register a lockdown handler */
486 kern_return_t
ml_lockdown_handler_register(lockdown_handler_t
, void *);
488 #if XNU_KERNEL_PRIVATE
489 void ml_lockdown_init(void);
491 /* Machine layer routine for intercepting panics */
492 void ml_panic_trap_to_debugger(const char *panic_format_str
,
496 uint64_t panic_options_mask
,
497 unsigned long panic_caller
);
498 #endif /* XNU_KERNEL_PRIVATE */
500 /* Initialize Interrupts */
501 void ml_install_interrupt_handler(
505 IOInterruptHandler handler
,
513 ml_static_verify_page_protections(
514 uint64_t base
, uint64_t size
, vm_prot_t prot
);
520 vm_offset_t
ml_static_slide(
523 vm_offset_t
ml_static_unslide(
526 /* Offset required to obtain absolute time value from tick counter */
527 uint64_t ml_get_abstime_offset(void);
529 /* Offset required to obtain continuous time value from tick counter */
530 uint64_t ml_get_conttime_offset(void);
532 #ifdef __APPLE_API_UNSTABLE
533 /* PCI config cycle probing */
534 boolean_t
ml_probe_read(
537 boolean_t
ml_probe_read_64(
541 /* Read physical address byte */
542 unsigned int ml_phys_read_byte(
544 unsigned int ml_phys_read_byte_64(
547 /* Read physical address half word */
548 unsigned int ml_phys_read_half(
550 unsigned int ml_phys_read_half_64(
553 /* Read physical address word*/
554 unsigned int ml_phys_read(
556 unsigned int ml_phys_read_64(
558 unsigned int ml_phys_read_word(
560 unsigned int ml_phys_read_word_64(
563 unsigned long long ml_io_read(uintptr_t iovaddr
, int iovsz
);
564 unsigned int ml_io_read8(uintptr_t iovaddr
);
565 unsigned int ml_io_read16(uintptr_t iovaddr
);
566 unsigned int ml_io_read32(uintptr_t iovaddr
);
567 unsigned long long ml_io_read64(uintptr_t iovaddr
);
569 extern void ml_io_write(uintptr_t vaddr
, uint64_t val
, int size
);
570 extern void ml_io_write8(uintptr_t vaddr
, uint8_t val
);
571 extern void ml_io_write16(uintptr_t vaddr
, uint16_t val
);
572 extern void ml_io_write32(uintptr_t vaddr
, uint32_t val
);
573 extern void ml_io_write64(uintptr_t vaddr
, uint64_t val
);
575 /* Read physical address double word */
576 unsigned long long ml_phys_read_double(
578 unsigned long long ml_phys_read_double_64(
581 /* Write physical address byte */
582 void ml_phys_write_byte(
583 vm_offset_t paddr
, unsigned int data
);
584 void ml_phys_write_byte_64(
585 addr64_t paddr
, unsigned int data
);
587 /* Write physical address half word */
588 void ml_phys_write_half(
589 vm_offset_t paddr
, unsigned int data
);
590 void ml_phys_write_half_64(
591 addr64_t paddr
, unsigned int data
);
593 /* Write physical address word */
595 vm_offset_t paddr
, unsigned int data
);
596 void ml_phys_write_64(
597 addr64_t paddr
, unsigned int data
);
598 void ml_phys_write_word(
599 vm_offset_t paddr
, unsigned int data
);
600 void ml_phys_write_word_64(
601 addr64_t paddr
, unsigned int data
);
603 /* Write physical address double word */
604 void ml_phys_write_double(
605 vm_offset_t paddr
, unsigned long long data
);
606 void ml_phys_write_double_64(
607 addr64_t paddr
, unsigned long long data
);
609 void ml_static_mfree(
619 /* virtual to physical on wired pages */
620 vm_offset_t
ml_vtophys(
623 /* Get processor cache info */
624 void ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
);
626 #endif /* __APPLE_API_UNSTABLE */
628 #ifdef __APPLE_API_PRIVATE
629 #ifdef XNU_KERNEL_PRIVATE
630 vm_size_t
ml_nofault_copy(
634 boolean_t
ml_validate_nofault(
635 vm_offset_t virtsrc
, vm_size_t size
);
636 #endif /* XNU_KERNEL_PRIVATE */
637 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
638 /* IO memory map services */
640 /* Map memory map IO space */
641 vm_offset_t
ml_io_map(
642 vm_offset_t phys_addr
,
645 vm_offset_t
ml_io_map_wcomb(
646 vm_offset_t phys_addr
,
649 vm_offset_t
ml_io_map_with_prot(
650 vm_offset_t phys_addr
,
658 void ml_get_bouncepool_info(
659 vm_offset_t
*phys_addr
,
662 vm_map_address_t
ml_map_high_window(
663 vm_offset_t phys_addr
,
666 /* boot memory allocation */
667 vm_offset_t
ml_static_malloc(
670 void ml_init_timebase(
673 vm_offset_t int_address
,
674 vm_offset_t int_value
);
676 uint64_t ml_get_timebase(void);
678 void ml_init_lock_timeout(void);
680 boolean_t
ml_delay_should_spin(uint64_t interval
);
682 void ml_delay_on_yield(void);
684 uint32_t ml_get_decrementer(void);
686 #include <machine/config.h>
688 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
689 void timer_state_event_user_to_kernel(void);
690 void timer_state_event_kernel_to_user(void);
691 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
693 uint64_t ml_get_hwclock(void);
696 boolean_t
ml_get_timer_pending(void);
699 void platform_syscall(
700 struct arm_saved_state
*);
702 void ml_set_decrementer(
705 boolean_t
is_user_contex(
708 void ml_init_arm_debug_interface(void *args
, vm_offset_t virt_address
);
710 /* These calls are only valid if __ARM_USER_PROTECT__ is defined */
711 uintptr_t arm_user_protect_begin(
714 void arm_user_protect_end(
717 boolean_t disable_interrupts
);
719 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
721 /* Zero bytes starting at a physical address */
723 addr64_t phys_address
,
726 void bzero_phys_nc(addr64_t src64
, vm_size_t bytes
);
728 #if MACH_KERNEL_PRIVATE
730 /* Pattern-fill buffer with zeros or a 32-bit pattern;
731 * target must be 128-byte aligned and sized a multiple of 128
732 * Both variants emit stores with non-temporal properties.
734 void fill32_dczva(addr64_t
, vm_size_t
);
735 void fill32_nt(addr64_t
, vm_size_t
, uint32_t);
736 int cpu_interrupt_is_pending(void);
740 void ml_thread_policy(
743 unsigned policy_info
);
745 #define MACHINE_GROUP 0x00000001
746 #define MACHINE_NETWORK_GROUP 0x10000000
747 #define MACHINE_NETWORK_WORKLOOP 0x00000001
748 #define MACHINE_NETWORK_NETISR 0x00000002
750 /* Set the maximum number of CPUs */
751 void ml_set_max_cpus(
752 unsigned int max_cpus
);
754 /* Return the maximum number of CPUs set by ml_set_max_cpus(), waiting if necessary */
755 unsigned int ml_wait_max_cpus(
758 /* Return the maximum memory size */
759 unsigned int ml_get_machine_mem(void);
761 #ifdef XNU_KERNEL_PRIVATE
762 /* Return max offset */
763 vm_map_offset_t
ml_get_max_offset(
765 unsigned int option
);
766 #define MACHINE_MAX_OFFSET_DEFAULT 0x01
767 #define MACHINE_MAX_OFFSET_MIN 0x02
768 #define MACHINE_MAX_OFFSET_MAX 0x04
769 #define MACHINE_MAX_OFFSET_DEVICE 0x08
772 extern void ml_cpu_up(void);
773 extern void ml_cpu_down(void);
774 extern void ml_arm_sleep(void);
776 extern uint64_t ml_get_wake_timebase(void);
777 extern uint64_t ml_get_conttime_wake_time(void);
779 /* Time since the system was reset (as part of boot/wake) */
780 uint64_t ml_get_time_since_reset(void);
783 * Called by ApplePMGR to set wake time. Units and epoch are identical
784 * to mach_continuous_time(). Has no effect on !HAS_CONTINUOUS_HWCLOCK
785 * chips. If wake_time == UINT64_MAX, that means the wake time is
786 * unknown and calls to ml_get_time_since_reset() will return UINT64_MAX.
788 void ml_set_reset_time(uint64_t wake_time
);
790 #ifdef XNU_KERNEL_PRIVATE
791 /* Just a stub on ARM */
792 extern kern_return_t
ml_interrupt_prewarm(uint64_t deadline
);
793 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
794 #endif /* XNU_KERNEL_PRIVATE */
796 /* Bytes available on current stack */
797 vm_offset_t
ml_stack_remaining(void);
799 #ifdef MACH_KERNEL_PRIVATE
800 uint32_t get_fpscr(void);
801 void set_fpscr(uint32_t);
802 void machine_conf(void);
803 void machine_lockdown(void);
806 unsigned long update_mdscr(unsigned long clear
, unsigned long set
);
807 #endif /* __arm64__ */
809 extern void arm_debug_set_cp14(arm_debug_state_t
*debug_state
);
810 extern void fiq_context_init(boolean_t enable_fiq
);
812 extern void reenable_async_aborts(void);
814 extern boolean_t
get_vfp_enabled(void);
815 extern void cpu_idle_wfi(boolean_t wfi_fast
);
819 uint64_t ml_cluster_wfe_timeout(uint32_t wfe_cluster_id
);
823 #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */
824 #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */
825 unsigned long monitor_call(uintptr_t callnum
, uintptr_t arg1
,
826 uintptr_t arg2
, uintptr_t arg3
);
829 #if __ARM_KERNEL_PROTECT__
830 extern void set_vbar_el1(uint64_t);
831 #endif /* __ARM_KERNEL_PROTECT__ */
832 #endif /* MACH_KERNEL_PRIVATE */
834 extern uint32_t arm_debug_read_dscr(void);
836 extern int set_be_bit(void);
837 extern int clr_be_bit(void);
838 extern int be_tracing(void);
840 /* Please note that cpu_broadcast_xcall is not as simple is you would like it to be.
841 * It will sometimes put the calling thread to sleep, and it is up to your callback
842 * to wake it up as needed, where "as needed" is defined as "all other CPUs have
843 * called the broadcast func". Look around the kernel for examples, or instead use
844 * cpu_broadcast_xcall_simple() which does indeed act like you would expect, given
845 * the prototype. cpu_broadcast_immediate_xcall has the same caveats and has a similar
848 typedef void (*broadcastFunc
) (void *);
849 unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t
, broadcastFunc
, void *);
850 unsigned int cpu_broadcast_xcall_simple(boolean_t
, broadcastFunc
, void *);
851 kern_return_t
cpu_xcall(int, broadcastFunc
, void *);
852 unsigned int cpu_broadcast_immediate_xcall(uint32_t *, boolean_t
, broadcastFunc
, void *);
853 unsigned int cpu_broadcast_immediate_xcall_simple(boolean_t
, broadcastFunc
, void *);
854 kern_return_t
cpu_immediate_xcall(int, broadcastFunc
, void *);
856 #ifdef KERNEL_PRIVATE
858 /* Interface to be used by the perf. controller to register a callback, in a
859 * single-threaded fashion. The callback will receive notifications of
860 * processor performance quality-of-service changes from the scheduler.
864 typedef void (*cpu_qos_update_t
)(int throughput_qos
, uint64_t qos_param1
, uint64_t qos_param2
);
865 void cpu_qos_update_register(cpu_qos_update_t
);
866 #endif /* __arm64__ */
868 struct going_on_core
{
871 uint16_t urgency
; /* XCPM compatibility */
872 uint32_t is_32_bit
: 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */
873 uint32_t is_kernel_thread
: 1;
874 uint64_t thread_group_id
;
875 void *thread_group_data
;
876 uint64_t scheduling_latency
; /* absolute time between when thread was made runnable and this ctx switch */
878 uint64_t scheduling_latency_at_same_basepri
;
879 uint32_t energy_estimate_nj
; /* return: In nanojoules */
880 /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */
882 typedef struct going_on_core
*going_on_core_t
;
884 struct going_off_core
{
886 uint32_t energy_estimate_nj
; /* return: In nanojoules */
889 uint64_t thread_group_id
;
890 void *thread_group_data
;
892 typedef struct going_off_core
*going_off_core_t
;
894 struct thread_group_data
{
895 uint64_t thread_group_id
;
896 void *thread_group_data
;
897 uint32_t thread_group_size
;
898 uint32_t thread_group_flags
;
900 typedef struct thread_group_data
*thread_group_data_t
;
902 struct perfcontrol_max_runnable_latency
{
903 uint64_t max_scheduling_latencies
[4 /* THREAD_URGENCY_MAX */];
905 typedef struct perfcontrol_max_runnable_latency
*perfcontrol_max_runnable_latency_t
;
907 struct perfcontrol_work_interval
{
911 uint32_t flags
; // notify
912 uint64_t work_interval_id
;
917 uint64_t thread_group_id
;
918 void *thread_group_data
;
919 uint32_t create_flags
;
921 typedef struct perfcontrol_work_interval
*perfcontrol_work_interval_t
;
925 WORK_INTERVAL_UPDATE
,
927 } work_interval_ctl_t
;
929 struct perfcontrol_work_interval_instance
{
930 work_interval_ctl_t ctl
;
931 uint32_t create_flags
;
934 uint64_t work_interval_id
;
935 uint64_t instance_id
; /* out: start, in: update/finish */
939 uint64_t thread_group_id
;
940 void *thread_group_data
;
942 typedef struct perfcontrol_work_interval_instance
*perfcontrol_work_interval_instance_t
;
945 * Structure to export per-CPU counters as part of the CLPC callout.
946 * Contains only the fixed CPU counters (instructions and cycles); CLPC
947 * would call back into XNU to get the configurable counters if needed.
949 struct perfcontrol_cpu_counters
{
950 uint64_t instructions
;
955 * Structure used to pass information about a thread to CLPC
957 struct perfcontrol_thread_data
{
959 * Energy estimate (return value)
960 * The field is populated by CLPC and used to update the
961 * energy estimate of the thread
963 uint32_t energy_estimate_nj
;
964 /* Perfcontrol class for thread */
965 perfcontrol_class_t perfctl_class
;
966 /* Thread ID for the thread */
968 /* Thread Group ID */
969 uint64_t thread_group_id
;
971 * Scheduling latency for threads at the same base priority.
972 * Calculated by the scheduler and passed into CLPC. The field is
973 * populated only in the thread_data structure for the thread
976 uint64_t scheduling_latency_at_same_basepri
;
977 /* Thread Group data pointer */
978 void *thread_group_data
;
979 /* perfctl state pointer */
984 * All callouts from the scheduler are executed with interrupts
985 * disabled. Callouts should be implemented in C with minimal
986 * abstractions, and only use KPI exported by the mach/libkern
987 * symbolset, restricted to routines like spinlocks and atomic
988 * operations and scheduler routines as noted below. Spinlocks that
989 * are used to synchronize data in the perfcontrol_state_t should only
990 * ever be acquired with interrupts disabled, to avoid deadlocks where
991 * an quantum expiration timer interrupt attempts to perform a callout
992 * that attempts to lock a spinlock that is already held.
996 * When a processor is switching between two threads (after the
997 * scheduler has chosen a new thread), the low-level platform layer
998 * will call this routine, which should perform required timestamps,
999 * MMIO register reads, or other state switching. No scheduler locks
1000 * are held during this callout.
1002 * This function is called with interrupts ENABLED.
1004 typedef void (*sched_perfcontrol_context_switch_t
)(perfcontrol_state_t
, perfcontrol_state_t
);
1007 * Once the processor has switched to the new thread, the offcore
1008 * callout will indicate the old thread that is no longer being
1009 * run. The thread's scheduler lock is held, so it will not begin
1010 * running on another processor (in the case of preemption where it
1011 * remains runnable) until it completes. If the "thread_terminating"
1012 * boolean is TRUE, this will be the last callout for this thread_id.
1014 typedef void (*sched_perfcontrol_offcore_t
)(perfcontrol_state_t
, going_off_core_t
/* populated by callee */, boolean_t
);
1017 * After the offcore callout and after the old thread can potentially
1018 * start running on another processor, the oncore callout will be
1019 * called with the thread's scheduler lock held. The oncore callout is
1020 * also called any time one of the parameters in the going_on_core_t
1021 * structure changes, like priority/QoS changes, and quantum
1022 * expiration, so the callout must not assume callouts are paired with
1025 typedef void (*sched_perfcontrol_oncore_t
)(perfcontrol_state_t
, going_on_core_t
);
1028 * Periodically (on hundreds of ms scale), the scheduler will perform
1029 * maintenance and report the maximum latency for runnable (but not currently
1030 * running) threads for each urgency class.
1032 typedef void (*sched_perfcontrol_max_runnable_latency_t
)(perfcontrol_max_runnable_latency_t
);
1035 * When the kernel receives information about work intervals from userland,
1036 * it is passed along using this callback. No locks are held, although the state
1037 * object will not go away during the callout.
1039 typedef void (*sched_perfcontrol_work_interval_notify_t
)(perfcontrol_state_t
, perfcontrol_work_interval_t
);
1042 * Start, update and finish work interval instance with optional complexity estimate.
1044 typedef void (*sched_perfcontrol_work_interval_ctl_t
)(perfcontrol_state_t
, perfcontrol_work_interval_instance_t
);
1047 * These callbacks are used when thread groups are added, removed or properties
1049 * No blocking allocations (or anything else blocking) are allowed inside these
1050 * callbacks. No locks allowed in these callbacks as well since the kernel might
1051 * be holding the thread/task locks.
1053 typedef void (*sched_perfcontrol_thread_group_init_t
)(thread_group_data_t
);
1054 typedef void (*sched_perfcontrol_thread_group_deinit_t
)(thread_group_data_t
);
1055 typedef void (*sched_perfcontrol_thread_group_flags_update_t
)(thread_group_data_t
);
1058 * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed,
1059 * this function will be called, passing the timeout deadline that was previously armed as an argument.
1061 * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context.
1063 typedef void (*sched_perfcontrol_deadline_passed_t
)(uint64_t deadline
);
1066 * Context Switch Callout
1069 * event - The perfcontrol_event for this callout
1070 * cpu_id - The CPU doing the context switch
1071 * timestamp - The timestamp for the context switch
1072 * flags - Flags for other relevant information
1073 * offcore - perfcontrol_data structure for thread going off-core
1074 * oncore - perfcontrol_data structure for thread going on-core
1075 * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch
1077 typedef void (*sched_perfcontrol_csw_t
)(
1078 perfcontrol_event event
, uint32_t cpu_id
, uint64_t timestamp
, uint32_t flags
,
1079 struct perfcontrol_thread_data
*offcore
, struct perfcontrol_thread_data
*oncore
,
1080 struct perfcontrol_cpu_counters
*cpu_counters
, __unused
void *unused
);
1084 * Thread State Update Callout
1087 * event - The perfcontrol_event for this callout
1088 * cpu_id - The CPU doing the state update
1089 * timestamp - The timestamp for the state update
1090 * flags - Flags for other relevant information
1091 * thr_data - perfcontrol_data structure for the thread being updated
1093 typedef void (*sched_perfcontrol_state_update_t
)(
1094 perfcontrol_event event
, uint32_t cpu_id
, uint64_t timestamp
, uint32_t flags
,
1095 struct perfcontrol_thread_data
*thr_data
, __unused
void *unused
);
1098 * Thread Group Blocking Relationship Callout
1101 * blocked_tg - Thread group blocking on progress of another thread group
1102 * blocking_tg - Thread group blocking progress of another thread group
1103 * flags - Flags for other relevant information
1104 * blocked_thr_state - Per-thread perfcontrol state for blocked thread
1106 typedef void (*sched_perfcontrol_thread_group_blocked_t
)(
1107 thread_group_data_t blocked_tg
, thread_group_data_t blocking_tg
, uint32_t flags
, perfcontrol_state_t blocked_thr_state
);
1110 * Thread Group Unblocking Callout
1113 * unblocked_tg - Thread group being unblocked from making forward progress
1114 * unblocking_tg - Thread group unblocking progress of another thread group
1115 * flags - Flags for other relevant information
1116 * unblocked_thr_state - Per-thread perfcontrol state for unblocked thread
1118 typedef void (*sched_perfcontrol_thread_group_unblocked_t
)(
1119 thread_group_data_t unblocked_tg
, thread_group_data_t unblocking_tg
, uint32_t flags
, perfcontrol_state_t unblocked_thr_state
);
1122 * Callers should always use the CURRENT version so that the kernel can detect both older
1123 * and newer structure layouts. New callbacks should always be added at the end of the
1124 * structure, and xnu should expect existing source recompiled against newer headers
1125 * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter
1126 * to reset callbacks to their default in-kernel values.
1129 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */
1130 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */
1131 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */
1132 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */
1133 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */
1134 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */
1135 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */
1136 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */
1137 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_8 (8) /* up-to thread_group_unblocked */
1138 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6
1140 struct sched_perfcontrol_callbacks
{
1141 unsigned long version
; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */
1142 sched_perfcontrol_offcore_t offcore
;
1143 sched_perfcontrol_context_switch_t context_switch
;
1144 sched_perfcontrol_oncore_t oncore
;
1145 sched_perfcontrol_max_runnable_latency_t max_runnable_latency
;
1146 sched_perfcontrol_work_interval_notify_t work_interval_notify
;
1147 sched_perfcontrol_thread_group_init_t thread_group_init
;
1148 sched_perfcontrol_thread_group_deinit_t thread_group_deinit
;
1149 sched_perfcontrol_deadline_passed_t deadline_passed
;
1150 sched_perfcontrol_csw_t csw
;
1151 sched_perfcontrol_state_update_t state_update
;
1152 sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update
;
1153 sched_perfcontrol_work_interval_ctl_t work_interval_ctl
;
1154 sched_perfcontrol_thread_group_blocked_t thread_group_blocked
;
1155 sched_perfcontrol_thread_group_unblocked_t thread_group_unblocked
;
1157 typedef struct sched_perfcontrol_callbacks
*sched_perfcontrol_callbacks_t
;
1159 extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks
, unsigned long size_of_state
);
1162 * Update the scheduler with the set of cores that should be used to dispatch new threads.
1163 * Non-recommended cores can still be used to field interrupts or run bound threads.
1164 * This should be called with interrupts enabled and no scheduler locks held.
1166 #define ALL_CORES_RECOMMENDED (~(uint32_t)0)
1168 extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores
);
1169 extern void sched_perfcontrol_thread_group_recommend(void *data
, cluster_type_t recommendation
);
1170 extern void sched_override_recommended_cores_for_sleep(void);
1171 extern void sched_restore_recommended_cores_after_sleep(void);
1172 extern void sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class
, boolean_t inherit
);
1174 extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores
);
1177 * Edge Scheduler-CLPC Interface
1179 * sched_perfcontrol_thread_group_preferred_clusters_set()
1181 * The Edge scheduler expects thread group recommendations to be specific clusters rather
1182 * than just E/P. In order to allow more fine grained control, CLPC can specify an override
1183 * preferred cluster per QoS bucket. CLPC passes a common preferred cluster `tg_preferred_cluster`
1184 * and an array of size [PERFCONTROL_CLASS_MAX] with overrides for specific perfctl classes.
1185 * The scheduler translates these preferences into sched_bucket
1186 * preferences and applies the changes.
1189 /* Token to indicate a particular perfctl class is not overriden */
1190 #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE ((uint32_t)~0)
1193 * CLPC can also indicate if there should be an immediate rebalancing of threads of this TG as
1194 * part of this preferred cluster change. It does that by specifying the following options.
1196 #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING 0x1
1197 #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNABLE 0x2
1198 typedef uint64_t sched_perfcontrol_preferred_cluster_options_t
;
1200 extern void sched_perfcontrol_thread_group_preferred_clusters_set(void *machine_data
, uint32_t tg_preferred_cluster
,
1201 uint32_t overrides
[PERFCONTROL_CLASS_MAX
], sched_perfcontrol_preferred_cluster_options_t options
);
1204 * Edge Scheduler-CLPC Interface
1206 * sched_perfcontrol_edge_matrix_get()/sched_perfcontrol_edge_matrix_set()
1208 * The Edge scheduler uses edges between clusters to define the likelihood of migrating threads
1209 * across clusters. The edge config between any two clusters defines the edge weight and whether
1210 * migation and steal operations are allowed across that edge. The getter and setter allow CLPC
1211 * to query and configure edge properties between various clusters on the platform.
1214 extern void sched_perfcontrol_edge_matrix_get(sched_clutch_edge
*edge_matrix
, bool *edge_request_bitmap
, uint64_t flags
, uint64_t matrix_order
);
1215 extern void sched_perfcontrol_edge_matrix_set(sched_clutch_edge
*edge_matrix
, bool *edge_changes_bitmap
, uint64_t flags
, uint64_t matrix_order
);
1218 * Update the deadline after which sched_perfcontrol_deadline_passed will be called.
1219 * Returns TRUE if it successfully canceled a previously set callback,
1220 * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight).
1221 * The callback is automatically canceled when it fires, and does not repeat unless rearmed.
1223 * This 'timer' executes as the scheduler switches between threads, on a non-idle core
1225 * There can be only one outstanding timer globally.
1227 extern boolean_t
sched_perfcontrol_update_callback_deadline(uint64_t deadline
);
1229 typedef enum perfcontrol_callout_type
{
1230 PERFCONTROL_CALLOUT_ON_CORE
,
1231 PERFCONTROL_CALLOUT_OFF_CORE
,
1232 PERFCONTROL_CALLOUT_CONTEXT
,
1233 PERFCONTROL_CALLOUT_STATE_UPDATE
,
1234 /* Add other callout types here */
1235 PERFCONTROL_CALLOUT_MAX
1236 } perfcontrol_callout_type_t
;
1238 typedef enum perfcontrol_callout_stat
{
1239 PERFCONTROL_STAT_INSTRS
,
1240 PERFCONTROL_STAT_CYCLES
,
1241 /* Add other stat types here */
1242 PERFCONTROL_STAT_MAX
1243 } perfcontrol_callout_stat_t
;
1245 uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
1246 perfcontrol_callout_stat_t stat
);
1249 /* The performance controller may use this interface to recommend
1250 * that CPUs in the designated cluster employ WFE rather than WFI
1251 * within the idle loop, falling back to WFI after the specified
1252 * timeout. The updates are expected to be serialized by the caller,
1253 * the implementation is not required to perform internal synchronization.
1255 uint32_t ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id
, uint64_t wfe_timeout_abstime_interval
, uint64_t wfe_hint_flags
);
1256 #endif /* __arm64__ */
1258 #if defined(HAS_APPLE_PAC)
1259 #define ONES(x) (BIT((x))-1)
1260 #define PTR_MASK ONES(64-T1SZ_BOOT)
1261 #define PAC_MASK ~PTR_MASK
1262 #define SIGN(p) ((p) & BIT(55))
1263 #define UNSIGN_PTR(p) \
1264 SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK)
1266 uint64_t ml_default_jop_pid(void);
1267 void ml_task_set_rop_pid(task_t task
, task_t parent_task
, boolean_t inherit
);
1268 void ml_task_set_jop_pid(task_t task
, task_t parent_task
, boolean_t inherit
);
1269 void ml_task_set_jop_pid_from_shared_region(task_t task
);
1270 void ml_task_set_disable_user_jop(task_t task
, uint8_t disable_user_jop
);
1271 void ml_thread_set_disable_user_jop(thread_t thread
, uint8_t disable_user_jop
);
1272 void ml_thread_set_jop_pid(thread_t thread
, task_t task
);
1273 void *ml_auth_ptr_unchecked(void *ptr
, unsigned key
, uint64_t modifier
);
1276 * Temporarily enables a userspace JOP key in kernel space, so that the kernel
1277 * can sign or auth pointers on that process's behalf.
1279 * @note The caller must disable interrupts before calling
1280 * ml_enable_user_jop_key(), and may only re-enable interrupts after the
1281 * complementary ml_disable_user_jop_key() call.
1283 * @param user_jop_key The userspace JOP key to temporarily use
1284 * @return Saved JOP state, to be passed to the complementary
1285 * ml_disable_user_jop_key() call
1287 uint64_t ml_enable_user_jop_key(uint64_t user_jop_key
);
1290 * Restores the previous JOP key state after a previous ml_enable_user_jop_key()
1293 * @param user_jop_key The userspace JOP key previously passed to
1294 * ml_enable_user_jop_key()
1295 * @param saved_jop_state The saved JOP state returned by
1296 * ml_enable_user_jop_key()
1298 void ml_disable_user_jop_key(uint64_t user_jop_key
, uint64_t saved_jop_state
);
1299 #endif /* defined(HAS_APPLE_PAC) */
1303 #endif /* KERNEL_PRIVATE */
1305 boolean_t
machine_timeout_suspended(void);
1306 void ml_get_power_state(boolean_t
*, boolean_t
*);
1308 uint32_t get_arm_cpu_version(void);
1309 boolean_t
user_cont_hwclock_allowed(void);
1310 uint8_t user_timebase_type(void);
1311 boolean_t
ml_thread_is64bit(thread_t thread
);
1314 bool ml_feature_supported(uint32_t feature_bit
);
1315 void ml_set_align_checking(void);
1316 extern void wfe_timeout_configure(void);
1317 extern void wfe_timeout_init(void);
1318 #endif /* __arm64__ */
1320 void ml_timer_evaluate(void);
1321 boolean_t
ml_timer_forced_evaluation(void);
1322 uint64_t ml_energy_stat(thread_t
);
1323 void ml_gpu_stat_update(uint64_t);
1324 uint64_t ml_gpu_stat(thread_t
);
1325 #endif /* __APPLE_API_PRIVATE */
1329 #if __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE)
1330 extern void ml_expect_fault_begin(expected_fault_handler_t
, uintptr_t);
1331 extern void ml_expect_fault_end(void);
1332 #endif /* __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) */
1335 void ml_hibernate_active_pre(void);
1336 void ml_hibernate_active_post(void);
1340 #endif /* _ARM_MACHINE_ROUTINES_H_ */