2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #ifndef _ARM_MACHINE_ROUTINES_H_
33 #define _ARM_MACHINE_ROUTINES_H_
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <kern/kern_types.h>
38 #include <pexpert/pexpert.h>
40 #include <sys/cdefs.h>
41 #include <sys/appleapiopts.h>
47 /* Interrupt handling */
49 void ml_cpu_signal(unsigned int cpu_id
);
50 void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs
);
51 uint64_t ml_cpu_signal_deferred_get_timer(void);
52 void ml_cpu_signal_deferred(unsigned int cpu_id
);
53 void ml_cpu_signal_retract(unsigned int cpu_id
);
55 /* Initialize Interrupts */
56 void ml_init_interrupt(void);
58 /* Get Interrupts Enabled */
59 boolean_t
ml_get_interrupts_enabled(void);
61 /* Set Interrupts Enabled */
62 boolean_t
ml_set_interrupts_enabled(boolean_t enable
);
63 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable
);
65 /* Check if running at interrupt context */
66 boolean_t
ml_at_interrupt_context(void);
68 /* Generate a fake interrupt */
69 void ml_cause_interrupt(void);
71 /* Clear interrupt spin debug state for thread */
72 #if INTERRUPT_MASKED_DEBUG
73 void ml_spin_debug_reset(thread_t thread
);
74 void ml_spin_debug_clear(thread_t thread
);
75 void ml_spin_debug_clear_self(void);
76 void ml_check_interrupts_disabled_duration(thread_t thread
);
79 #ifdef XNU_KERNEL_PRIVATE
80 extern bool ml_snoop_thread_is_on_core(thread_t thread
);
81 extern boolean_t
ml_is_quiescing(void);
82 extern void ml_set_is_quiescing(boolean_t
);
83 extern uint64_t ml_get_booter_memory_size(void);
86 /* Type for the Time Base Enable function */
87 typedef void (*time_base_enable_t
)(cpu_id_t cpu_id
, boolean_t enable
);
88 #if MACH_KERNEL_PRIVATE
89 /* Type for the Processor Cache Dispatch function */
90 typedef void (*cache_dispatch_t
)(cpu_id_t cpu_id
, unsigned int select
, unsigned int param0
, unsigned int param1
);
93 #define CacheConfig 0x00000000UL
94 #define CacheControl 0x00000001UL
95 #define CacheClean 0x00000002UL
96 #define CacheCleanRegion 0x00000003UL
97 #define CacheCleanFlush 0x00000004UL
98 #define CacheCleanFlushRegion 0x00000005UL
99 #define CacheShutdown 0x00000006UL
101 #define CacheControlEnable 0x00000000UL
103 #define CacheConfigCCSIDR 0x00000001UL
104 #define CacheConfigSize 0x00000100UL
106 /* Type for the Processor Idle function */
107 typedef void (*processor_idle_t
)(cpu_id_t cpu_id
, boolean_t enter
, uint64_t *new_timeout_ticks
);
109 /* Type for the Idle Tickle function */
110 typedef void (*idle_tickle_t
)(void);
112 /* Type for the Idle Timer function */
113 typedef void (*idle_timer_t
)(void *refcon
, uint64_t *new_timeout_ticks
);
115 /* Type for the IPI Hander */
116 typedef void (*ipi_handler_t
)(void);
118 /* Type for the Lockdown Hander */
119 typedef void (*lockdown_handler_t
)(void *);
121 /* Type for the Platform specific Error Handler */
122 typedef void (*platform_error_handler_t
)(void *refcon
, vm_offset_t fault_addr
);
125 * The exception callback (ex_cb) module allows kernel drivers to
126 * register and receive callbacks for exceptions, and indicate
127 * actions to be taken by the platform kernel
128 * Currently this is supported for ARM64 but extending support for ARM32
129 * should be straightforward
132 /* Supported exception classes for callbacks */
134 EXCB_CLASS_ILLEGAL_INSTR_SET
,
135 #ifdef CONFIG_XNUPOST
140 EXCB_CLASS_MAX
// this must be last
144 /* Actions indicated by callbacks to be taken by platform kernel */
146 EXCB_ACTION_RERUN
, // re-run the faulting instruction
147 EXCB_ACTION_NONE
, // continue normal exception handling
148 #ifdef CONFIG_XNUPOST
149 EXCB_ACTION_TEST_FAIL
,
156 * We cannot use a private kernel data structure such as arm_saved_state_t
157 * The CPSR and ESR are not clobbered when the callback function is invoked so
158 * those registers can be examined by the callback function;
159 * the same is done in the platform error handlers
166 /* callback type definition */
167 typedef ex_cb_action_t (*ex_cb_t
) (
168 ex_cb_class_t cb_class
,
169 void *refcon
,// provided at registration
170 const ex_cb_state_t
*state
// exception state
174 * Callback registration
175 * Currently we support only one registered callback per class but
176 * it should be possible to support more callbacks
178 kern_return_t
ex_cb_register(
179 ex_cb_class_t cb_class
,
184 * Called internally by platform kernel to invoke the registered callback for class
186 ex_cb_action_t
ex_cb_invoke(
187 ex_cb_class_t cb_class
,
191 void ml_parse_cpu_topology(void);
193 unsigned int ml_get_cpu_count(void);
195 int ml_get_boot_cpu_number(void);
197 int ml_get_cpu_number(uint32_t phys_id
);
199 int ml_get_max_cpu_number(void);
201 /* Struct for ml_cpu_get_info */
203 unsigned long vector_unit
;
204 unsigned long cache_line_size
;
205 unsigned long l1_icache_size
;
206 unsigned long l1_dcache_size
;
207 unsigned long l2_settings
;
208 unsigned long l2_cache_size
;
209 unsigned long l3_settings
;
210 unsigned long l3_cache_size
;
212 typedef struct ml_cpu_info ml_cpu_info_t
;
218 cluster_type_t
ml_get_boot_cluster(void);
220 /* Struct for ml_processor_register */
221 struct ml_processor_info
{
223 vm_offset_t start_paddr
;
224 boolean_t supports_nap
;
225 void *platform_cache_dispatch
;
226 time_base_enable_t time_base_enable
;
227 processor_idle_t processor_idle
;
228 idle_tickle_t
*idle_tickle
;
229 idle_timer_t idle_timer
;
230 void *idle_timer_refcon
;
231 vm_offset_t powergate_stub_addr
;
232 uint32_t powergate_stub_length
;
233 uint32_t powergate_latency
;
234 platform_error_handler_t platform_error_handler
;
235 uint64_t regmap_paddr
;
238 uint32_t l2_access_penalty
;
240 cluster_type_t cluster_type
;
241 uint32_t l2_cache_id
;
242 uint32_t l2_cache_size
;
243 uint32_t l3_cache_id
;
244 uint32_t l3_cache_size
;
246 typedef struct ml_processor_info ml_processor_info_t
;
248 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
249 /* Struct for ml_init_timebase */
251 void (*tbd_fiq_handler
)(void);
252 uint32_t (*tbd_get_decrementer
)(void);
253 void (*tbd_set_decrementer
)(uint32_t dec_value
);
255 typedef struct tbd_ops
*tbd_ops_t
;
256 typedef struct tbd_ops tbd_ops_data_t
;
260 * @function ml_processor_register
262 * @abstract callback from platform kext to register processor
264 * @discussion This function is called by the platform kext when a processor is
265 * being registered. This is called while running on the CPU itself, as part of
266 * its initialization.
268 * @param ml_processor_info provides machine-specific information about the
271 * @param processor is set as an out-parameter to an opaque handle that should
272 * be used by the platform kext when referring to this processor in the future.
274 * @param ipi_handler is set as an out-parameter to the function that should be
275 * registered as the IPI handler.
277 * @param pmi_handler is set as an out-parameter to the function that should be
278 * registered as the PMI handler.
280 * @returns KERN_SUCCESS on success and an error code, otherwise.
282 kern_return_t
ml_processor_register(ml_processor_info_t
*ml_processor_info
,
283 processor_t
*processor
, ipi_handler_t
*ipi_handler
,
284 perfmon_interrupt_handler_func
*pmi_handler
);
286 /* Register a lockdown handler */
287 kern_return_t
ml_lockdown_handler_register(lockdown_handler_t
, void *);
289 #if XNU_KERNEL_PRIVATE
290 void ml_lockdown_init(void);
292 /* Check if the machine layer wants to intercept a panic call */
293 boolean_t
ml_wants_panic_trap_to_debugger(void);
295 /* Machine layer routine for intercepting panics */
296 void ml_panic_trap_to_debugger(const char *panic_format_str
,
300 uint64_t panic_options_mask
,
301 unsigned long panic_caller
);
302 #endif /* XNU_KERNEL_PRIVATE */
304 /* Initialize Interrupts */
305 void ml_install_interrupt_handler(
309 IOInterruptHandler handler
,
320 vm_offset_t
ml_static_slide(
323 vm_offset_t
ml_static_unslide(
326 /* Offset required to obtain absolute time value from tick counter */
327 uint64_t ml_get_abstime_offset(void);
329 /* Offset required to obtain continuous time value from tick counter */
330 uint64_t ml_get_conttime_offset(void);
332 #ifdef __APPLE_API_UNSTABLE
333 /* PCI config cycle probing */
334 boolean_t
ml_probe_read(
337 boolean_t
ml_probe_read_64(
341 /* Read physical address byte */
342 unsigned int ml_phys_read_byte(
344 unsigned int ml_phys_read_byte_64(
347 /* Read physical address half word */
348 unsigned int ml_phys_read_half(
350 unsigned int ml_phys_read_half_64(
353 /* Read physical address word*/
354 unsigned int ml_phys_read(
356 unsigned int ml_phys_read_64(
358 unsigned int ml_phys_read_word(
360 unsigned int ml_phys_read_word_64(
363 unsigned long long ml_io_read(uintptr_t iovaddr
, int iovsz
);
364 unsigned int ml_io_read8(uintptr_t iovaddr
);
365 unsigned int ml_io_read16(uintptr_t iovaddr
);
366 unsigned int ml_io_read32(uintptr_t iovaddr
);
367 unsigned long long ml_io_read64(uintptr_t iovaddr
);
369 extern void ml_io_write(uintptr_t vaddr
, uint64_t val
, int size
);
370 extern void ml_io_write8(uintptr_t vaddr
, uint8_t val
);
371 extern void ml_io_write16(uintptr_t vaddr
, uint16_t val
);
372 extern void ml_io_write32(uintptr_t vaddr
, uint32_t val
);
373 extern void ml_io_write64(uintptr_t vaddr
, uint64_t val
);
375 /* Read physical address double word */
376 unsigned long long ml_phys_read_double(
378 unsigned long long ml_phys_read_double_64(
381 /* Write physical address byte */
382 void ml_phys_write_byte(
383 vm_offset_t paddr
, unsigned int data
);
384 void ml_phys_write_byte_64(
385 addr64_t paddr
, unsigned int data
);
387 /* Write physical address half word */
388 void ml_phys_write_half(
389 vm_offset_t paddr
, unsigned int data
);
390 void ml_phys_write_half_64(
391 addr64_t paddr
, unsigned int data
);
393 /* Write physical address word */
395 vm_offset_t paddr
, unsigned int data
);
396 void ml_phys_write_64(
397 addr64_t paddr
, unsigned int data
);
398 void ml_phys_write_word(
399 vm_offset_t paddr
, unsigned int data
);
400 void ml_phys_write_word_64(
401 addr64_t paddr
, unsigned int data
);
403 /* Write physical address double word */
404 void ml_phys_write_double(
405 vm_offset_t paddr
, unsigned long long data
);
406 void ml_phys_write_double_64(
407 addr64_t paddr
, unsigned long long data
);
409 void ml_static_mfree(
419 /* virtual to physical on wired pages */
420 vm_offset_t
ml_vtophys(
423 /* Get processor info */
424 void ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
);
426 #endif /* __APPLE_API_UNSTABLE */
428 #ifdef __APPLE_API_PRIVATE
429 #ifdef XNU_KERNEL_PRIVATE
430 vm_size_t
ml_nofault_copy(
434 boolean_t
ml_validate_nofault(
435 vm_offset_t virtsrc
, vm_size_t size
);
436 #endif /* XNU_KERNEL_PRIVATE */
437 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
438 /* IO memory map services */
440 /* Map memory map IO space */
441 vm_offset_t
ml_io_map(
442 vm_offset_t phys_addr
,
445 vm_offset_t
ml_io_map_wcomb(
446 vm_offset_t phys_addr
,
449 void ml_get_bouncepool_info(
450 vm_offset_t
*phys_addr
,
453 vm_map_address_t
ml_map_high_window(
454 vm_offset_t phys_addr
,
457 /* boot memory allocation */
458 vm_offset_t
ml_static_malloc(
461 void ml_init_timebase(
464 vm_offset_t int_address
,
465 vm_offset_t int_value
);
467 uint64_t ml_get_timebase(void);
469 void ml_init_lock_timeout(void);
471 boolean_t
ml_delay_should_spin(uint64_t interval
);
473 void ml_delay_on_yield(void);
475 uint32_t ml_get_decrementer(void);
477 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
478 void timer_state_event_user_to_kernel(void);
479 void timer_state_event_kernel_to_user(void);
480 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
482 uint64_t ml_get_hwclock(void);
485 boolean_t
ml_get_timer_pending(void);
488 void platform_syscall(
489 struct arm_saved_state
*);
491 void ml_set_decrementer(
494 boolean_t
is_user_contex(
497 void ml_init_arm_debug_interface(void *args
, vm_offset_t virt_address
);
499 /* These calls are only valid if __ARM_USER_PROTECT__ is defined */
500 uintptr_t arm_user_protect_begin(
503 void arm_user_protect_end(
506 boolean_t disable_interrupts
);
508 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
510 /* Zero bytes starting at a physical address */
512 addr64_t phys_address
,
515 void bzero_phys_nc(addr64_t src64
, vm_size_t bytes
);
517 void ml_thread_policy(
520 unsigned policy_info
);
522 #define MACHINE_GROUP 0x00000001
523 #define MACHINE_NETWORK_GROUP 0x10000000
524 #define MACHINE_NETWORK_WORKLOOP 0x00000001
525 #define MACHINE_NETWORK_NETISR 0x00000002
527 /* Initialize the maximum number of CPUs */
528 void ml_init_max_cpus(
529 unsigned int max_cpus
);
531 /* Return the maximum number of CPUs set by ml_init_max_cpus() */
532 unsigned int ml_get_max_cpus(
535 /* Return the maximum memory size */
536 unsigned int ml_get_machine_mem(void);
538 #ifdef XNU_KERNEL_PRIVATE
539 /* Return max offset */
540 vm_map_offset_t
ml_get_max_offset(
542 unsigned int option
);
543 #define MACHINE_MAX_OFFSET_DEFAULT 0x01
544 #define MACHINE_MAX_OFFSET_MIN 0x02
545 #define MACHINE_MAX_OFFSET_MAX 0x04
546 #define MACHINE_MAX_OFFSET_DEVICE 0x08
549 extern void ml_cpu_up(void);
550 extern void ml_cpu_down(void);
551 extern void ml_arm_sleep(void);
553 extern uint64_t ml_get_wake_timebase(void);
554 extern uint64_t ml_get_conttime_wake_time(void);
556 /* Time since the system was reset (as part of boot/wake) */
557 uint64_t ml_get_time_since_reset(void);
559 #ifdef XNU_KERNEL_PRIVATE
560 /* Just a stub on ARM */
561 extern kern_return_t
ml_interrupt_prewarm(uint64_t deadline
);
562 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
563 #endif /* XNU_KERNEL_PRIVATE */
565 /* Bytes available on current stack */
566 vm_offset_t
ml_stack_remaining(void);
568 #ifdef MACH_KERNEL_PRIVATE
569 uint32_t get_fpscr(void);
570 void set_fpscr(uint32_t);
573 unsigned long update_mdscr(unsigned long clear
, unsigned long set
);
574 #endif /* __arm64__ */
576 extern void init_vfp(void);
577 extern boolean_t
get_vfp_enabled(void);
578 extern void arm_debug_set_cp14(arm_debug_state_t
*debug_state
);
579 extern void fiq_context_init(boolean_t enable_fiq
);
580 extern void fiq_context_bootstrap(boolean_t enable_fiq
);
582 extern void reenable_async_aborts(void);
583 extern void cpu_idle_wfi(boolean_t wfi_fast
);
586 #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */
587 #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */
588 unsigned long monitor_call(uintptr_t callnum
, uintptr_t arg1
,
589 uintptr_t arg2
, uintptr_t arg3
);
592 #if defined(KERNEL_INTEGRITY_KTRR)
593 void rorgn_stash_range(void);
594 void rorgn_lockdown(void);
595 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
597 #if __ARM_KERNEL_PROTECT__
598 extern void set_vbar_el1(uint64_t);
599 #endif /* __ARM_KERNEL_PROTECT__ */
600 #endif /* MACH_KERNEL_PRIVATE */
602 extern uint32_t arm_debug_read_dscr(void);
604 extern int set_be_bit(void);
605 extern int clr_be_bit(void);
606 extern int be_tracing(void);
608 typedef void (*broadcastFunc
) (void *);
609 unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t
, broadcastFunc
, void *);
610 kern_return_t
cpu_xcall(int, broadcastFunc
, void *);
612 #ifdef KERNEL_PRIVATE
614 /* Interface to be used by the perf. controller to register a callback, in a
615 * single-threaded fashion. The callback will receive notifications of
616 * processor performance quality-of-service changes from the scheduler.
620 typedef void (*cpu_qos_update_t
)(int throughput_qos
, uint64_t qos_param1
, uint64_t qos_param2
);
621 void cpu_qos_update_register(cpu_qos_update_t
);
622 #endif /* __arm64__ */
624 struct going_on_core
{
627 uint16_t urgency
; /* XCPM compatibility */
628 uint32_t is_32_bit
: 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */
629 uint32_t is_kernel_thread
: 1;
630 uint64_t thread_group_id
;
631 void *thread_group_data
;
632 uint64_t scheduling_latency
; /* absolute time between when thread was made runnable and this ctx switch */
634 uint64_t scheduling_latency_at_same_basepri
;
635 uint32_t energy_estimate_nj
; /* return: In nanojoules */
636 /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */
638 typedef struct going_on_core
*going_on_core_t
;
640 struct going_off_core
{
642 uint32_t energy_estimate_nj
; /* return: In nanojoules */
645 uint64_t thread_group_id
;
646 void *thread_group_data
;
648 typedef struct going_off_core
*going_off_core_t
;
650 struct thread_group_data
{
651 uint64_t thread_group_id
;
652 void *thread_group_data
;
653 uint32_t thread_group_size
;
654 uint32_t thread_group_flags
;
656 typedef struct thread_group_data
*thread_group_data_t
;
658 struct perfcontrol_max_runnable_latency
{
659 uint64_t max_scheduling_latencies
[4 /* THREAD_URGENCY_MAX */];
661 typedef struct perfcontrol_max_runnable_latency
*perfcontrol_max_runnable_latency_t
;
663 struct perfcontrol_work_interval
{
667 uint32_t flags
; // notify
668 uint64_t work_interval_id
;
673 uint64_t thread_group_id
;
674 void *thread_group_data
;
675 uint32_t create_flags
;
677 typedef struct perfcontrol_work_interval
*perfcontrol_work_interval_t
;
681 WORK_INTERVAL_UPDATE
,
683 } work_interval_ctl_t
;
685 struct perfcontrol_work_interval_instance
{
686 work_interval_ctl_t ctl
;
687 uint32_t create_flags
;
690 uint64_t work_interval_id
;
691 uint64_t instance_id
; /* out: start, in: update/finish */
695 uint64_t thread_group_id
;
696 void *thread_group_data
;
698 typedef struct perfcontrol_work_interval_instance
*perfcontrol_work_interval_instance_t
;
701 * Structure to export per-CPU counters as part of the CLPC callout.
702 * Contains only the fixed CPU counters (instructions and cycles); CLPC
703 * would call back into XNU to get the configurable counters if needed.
705 struct perfcontrol_cpu_counters
{
706 uint64_t instructions
;
711 * Structure used to pass information about a thread to CLPC
713 struct perfcontrol_thread_data
{
715 * Energy estimate (return value)
716 * The field is populated by CLPC and used to update the
717 * energy estimate of the thread
719 uint32_t energy_estimate_nj
;
720 /* Perfcontrol class for thread */
721 perfcontrol_class_t perfctl_class
;
722 /* Thread ID for the thread */
724 /* Thread Group ID */
725 uint64_t thread_group_id
;
727 * Scheduling latency for threads at the same base priority.
728 * Calculated by the scheduler and passed into CLPC. The field is
729 * populated only in the thread_data structure for the thread
732 uint64_t scheduling_latency_at_same_basepri
;
733 /* Thread Group data pointer */
734 void *thread_group_data
;
735 /* perfctl state pointer */
740 * All callouts from the scheduler are executed with interrupts
741 * disabled. Callouts should be implemented in C with minimal
742 * abstractions, and only use KPI exported by the mach/libkern
743 * symbolset, restricted to routines like spinlocks and atomic
744 * operations and scheduler routines as noted below. Spinlocks that
745 * are used to synchronize data in the perfcontrol_state_t should only
746 * ever be acquired with interrupts disabled, to avoid deadlocks where
747 * an quantum expiration timer interrupt attempts to perform a callout
748 * that attempts to lock a spinlock that is already held.
752 * When a processor is switching between two threads (after the
753 * scheduler has chosen a new thread), the low-level platform layer
754 * will call this routine, which should perform required timestamps,
755 * MMIO register reads, or other state switching. No scheduler locks
756 * are held during this callout.
758 * This function is called with interrupts ENABLED.
760 typedef void (*sched_perfcontrol_context_switch_t
)(perfcontrol_state_t
, perfcontrol_state_t
);
763 * Once the processor has switched to the new thread, the offcore
764 * callout will indicate the old thread that is no longer being
765 * run. The thread's scheduler lock is held, so it will not begin
766 * running on another processor (in the case of preemption where it
767 * remains runnable) until it completes. If the "thread_terminating"
768 * boolean is TRUE, this will be the last callout for this thread_id.
770 typedef void (*sched_perfcontrol_offcore_t
)(perfcontrol_state_t
, going_off_core_t
/* populated by callee */, boolean_t
);
773 * After the offcore callout and after the old thread can potentially
774 * start running on another processor, the oncore callout will be
775 * called with the thread's scheduler lock held. The oncore callout is
776 * also called any time one of the parameters in the going_on_core_t
777 * structure changes, like priority/QoS changes, and quantum
778 * expiration, so the callout must not assume callouts are paired with
781 typedef void (*sched_perfcontrol_oncore_t
)(perfcontrol_state_t
, going_on_core_t
);
784 * Periodically (on hundreds of ms scale), the scheduler will perform
785 * maintenance and report the maximum latency for runnable (but not currently
786 * running) threads for each urgency class.
788 typedef void (*sched_perfcontrol_max_runnable_latency_t
)(perfcontrol_max_runnable_latency_t
);
791 * When the kernel receives information about work intervals from userland,
792 * it is passed along using this callback. No locks are held, although the state
793 * object will not go away during the callout.
795 typedef void (*sched_perfcontrol_work_interval_notify_t
)(perfcontrol_state_t
, perfcontrol_work_interval_t
);
798 * Start, update and finish work interval instance with optional complexity estimate.
800 typedef void (*sched_perfcontrol_work_interval_ctl_t
)(perfcontrol_state_t
, perfcontrol_work_interval_instance_t
);
803 * These callbacks are used when thread groups are added, removed or properties
805 * No blocking allocations (or anything else blocking) are allowed inside these
806 * callbacks. No locks allowed in these callbacks as well since the kernel might
807 * be holding the thread/task locks.
809 typedef void (*sched_perfcontrol_thread_group_init_t
)(thread_group_data_t
);
810 typedef void (*sched_perfcontrol_thread_group_deinit_t
)(thread_group_data_t
);
811 typedef void (*sched_perfcontrol_thread_group_flags_update_t
)(thread_group_data_t
);
814 * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed,
815 * this function will be called, passing the timeout deadline that was previously armed as an argument.
817 * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context.
819 typedef void (*sched_perfcontrol_deadline_passed_t
)(uint64_t deadline
);
822 * Context Switch Callout
825 * event - The perfcontrol_event for this callout
826 * cpu_id - The CPU doing the context switch
827 * timestamp - The timestamp for the context switch
828 * flags - Flags for other relevant information
829 * offcore - perfcontrol_data structure for thread going off-core
830 * oncore - perfcontrol_data structure for thread going on-core
831 * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch
833 typedef void (*sched_perfcontrol_csw_t
)(
834 perfcontrol_event event
, uint32_t cpu_id
, uint64_t timestamp
, uint32_t flags
,
835 struct perfcontrol_thread_data
*offcore
, struct perfcontrol_thread_data
*oncore
,
836 struct perfcontrol_cpu_counters
*cpu_counters
, __unused
void *unused
);
840 * Thread State Update Callout
843 * event - The perfcontrol_event for this callout
844 * cpu_id - The CPU doing the state update
845 * timestamp - The timestamp for the state update
846 * flags - Flags for other relevant information
847 * thr_data - perfcontrol_data structure for the thread being updated
849 typedef void (*sched_perfcontrol_state_update_t
)(
850 perfcontrol_event event
, uint32_t cpu_id
, uint64_t timestamp
, uint32_t flags
,
851 struct perfcontrol_thread_data
*thr_data
, __unused
void *unused
);
854 * Callers should always use the CURRENT version so that the kernel can detect both older
855 * and newer structure layouts. New callbacks should always be added at the end of the
856 * structure, and xnu should expect existing source recompiled against newer headers
857 * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter
858 * to reset callbacks to their default in-kernel values.
861 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */
862 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */
863 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */
864 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */
865 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */
866 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */
867 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */
868 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */
869 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6
871 struct sched_perfcontrol_callbacks
{
872 unsigned long version
; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */
873 sched_perfcontrol_offcore_t offcore
;
874 sched_perfcontrol_context_switch_t context_switch
;
875 sched_perfcontrol_oncore_t oncore
;
876 sched_perfcontrol_max_runnable_latency_t max_runnable_latency
;
877 sched_perfcontrol_work_interval_notify_t work_interval_notify
;
878 sched_perfcontrol_thread_group_init_t thread_group_init
;
879 sched_perfcontrol_thread_group_deinit_t thread_group_deinit
;
880 sched_perfcontrol_deadline_passed_t deadline_passed
;
881 sched_perfcontrol_csw_t csw
;
882 sched_perfcontrol_state_update_t state_update
;
883 sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update
;
884 sched_perfcontrol_work_interval_ctl_t work_interval_ctl
;
886 typedef struct sched_perfcontrol_callbacks
*sched_perfcontrol_callbacks_t
;
888 extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks
, unsigned long size_of_state
);
891 * Update the scheduler with the set of cores that should be used to dispatch new threads.
892 * Non-recommended cores can still be used to field interrupts or run bound threads.
893 * This should be called with interrupts enabled and no scheduler locks held.
895 #define ALL_CORES_RECOMMENDED (~(uint32_t)0)
897 extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores
);
898 extern void sched_perfcontrol_thread_group_recommend(void *data
, cluster_type_t recommendation
);
899 extern void sched_override_recommended_cores_for_sleep(void);
900 extern void sched_restore_recommended_cores_after_sleep(void);
902 extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores
);
905 * Update the deadline after which sched_perfcontrol_deadline_passed will be called.
906 * Returns TRUE if it successfully canceled a previously set callback,
907 * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight).
908 * The callback is automatically canceled when it fires, and does not repeat unless rearmed.
910 * This 'timer' executes as the scheduler switches between threads, on a non-idle core
912 * There can be only one outstanding timer globally.
914 extern boolean_t
sched_perfcontrol_update_callback_deadline(uint64_t deadline
);
916 typedef enum perfcontrol_callout_type
{
917 PERFCONTROL_CALLOUT_ON_CORE
,
918 PERFCONTROL_CALLOUT_OFF_CORE
,
919 PERFCONTROL_CALLOUT_CONTEXT
,
920 PERFCONTROL_CALLOUT_STATE_UPDATE
,
921 /* Add other callout types here */
922 PERFCONTROL_CALLOUT_MAX
923 } perfcontrol_callout_type_t
;
925 typedef enum perfcontrol_callout_stat
{
926 PERFCONTROL_STAT_INSTRS
,
927 PERFCONTROL_STAT_CYCLES
,
928 /* Add other stat types here */
930 } perfcontrol_callout_stat_t
;
932 uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
933 perfcontrol_callout_stat_t stat
);
936 #endif /* KERNEL_PRIVATE */
938 boolean_t
machine_timeout_suspended(void);
939 void ml_get_power_state(boolean_t
*, boolean_t
*);
941 uint32_t get_arm_cpu_version(void);
942 boolean_t
user_cont_hwclock_allowed(void);
943 boolean_t
user_timebase_allowed(void);
944 boolean_t
ml_thread_is64bit(thread_t thread
);
947 void ml_set_align_checking(void);
948 boolean_t
arm64_wfe_allowed(void);
949 #endif /* __arm64__ */
951 void ml_timer_evaluate(void);
952 boolean_t
ml_timer_forced_evaluation(void);
953 uint64_t ml_energy_stat(thread_t
);
954 void ml_gpu_stat_update(uint64_t);
955 uint64_t ml_gpu_stat(thread_t
);
956 #endif /* __APPLE_API_PRIVATE */
960 #endif /* _ARM_MACHINE_ROUTINES_H_ */