]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines.h
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.h
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #ifndef _ARM_MACHINE_ROUTINES_H_
33 #define _ARM_MACHINE_ROUTINES_H_
34
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <kern/kern_types.h>
38 #include <pexpert/pexpert.h>
39
40 #include <sys/cdefs.h>
41 #include <sys/appleapiopts.h>
42
43 #include <stdarg.h>
44
45 __BEGIN_DECLS
46
47 /* Interrupt handling */
48
49 void ml_cpu_signal(unsigned int cpu_id);
50 void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs);
51 uint64_t ml_cpu_signal_deferred_get_timer(void);
52 void ml_cpu_signal_deferred(unsigned int cpu_id);
53 void ml_cpu_signal_retract(unsigned int cpu_id);
54
55 /* Initialize Interrupts */
56 void ml_init_interrupt(void);
57
58 /* Get Interrupts Enabled */
59 boolean_t ml_get_interrupts_enabled(void);
60
61 /* Set Interrupts Enabled */
62 boolean_t ml_set_interrupts_enabled(boolean_t enable);
63
64 /* Check if running at interrupt context */
65 boolean_t ml_at_interrupt_context(void);
66
67 /* Generate a fake interrupt */
68 void ml_cause_interrupt(void);
69
70 /* Clear interrupt spin debug state for thread */
71 #if INTERRUPT_MASKED_DEBUG
72 void ml_spin_debug_reset(thread_t thread);
73 void ml_spin_debug_clear(thread_t thread);
74 void ml_spin_debug_clear_self(void);
75 void ml_check_interrupts_disabled_duration(thread_t thread);
76 #endif
77
78 #ifdef XNU_KERNEL_PRIVATE
79 extern boolean_t ml_is_quiescing(void);
80 extern void ml_set_is_quiescing(boolean_t);
81 extern uint64_t ml_get_booter_memory_size(void);
82 #endif
83
84 /* Type for the Time Base Enable function */
85 typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable);
86 #if MACH_KERNEL_PRIVATE
87 /* Type for the Processor Cache Dispatch function */
88 typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1);
89 #endif
90
91 #define CacheConfig 0x00000000UL
92 #define CacheControl 0x00000001UL
93 #define CacheClean 0x00000002UL
94 #define CacheCleanRegion 0x00000003UL
95 #define CacheCleanFlush 0x00000004UL
96 #define CacheCleanFlushRegion 0x00000005UL
97 #define CacheShutdown 0x00000006UL
98
99 #define CacheControlEnable 0x00000000UL
100
101 #define CacheConfigCCSIDR 0x00000001UL
102 #define CacheConfigSize 0x00000100UL
103
104 /* Type for the Processor Idle function */
105 typedef void (*processor_idle_t)(cpu_id_t cpu_id, boolean_t enter, uint64_t *new_timeout_ticks);
106
107 /* Type for the Idle Tickle function */
108 typedef void (*idle_tickle_t)(void);
109
110 /* Type for the Idle Timer function */
111 typedef void (*idle_timer_t)(void *refcon, uint64_t *new_timeout_ticks);
112
113 /* Type for the IPI Hander */
114 typedef void (*ipi_handler_t)(void);
115
116 /* Type for the Lockdown Hander */
117 typedef void (*lockdown_handler_t)(void *);
118
119 /* Type for the Platform specific Error Handler */
120 typedef void (*platform_error_handler_t)(void *refcon, vm_offset_t fault_addr);
121
122 /*
123 * The exception callback (ex_cb) module allows kernel drivers to
124 * register and receive callbacks for exceptions, and indicate
125 * actions to be taken by the platform kernel
126 * Currently this is supported for ARM64 but extending support for ARM32
127 * should be straightforward
128 */
129
130 /* Supported exception classes for callbacks */
131 typedef enum
132 {
133 EXCB_CLASS_ILLEGAL_INSTR_SET,
134 EXCB_CLASS_MAX // this must be last
135 }
136 ex_cb_class_t;
137
138 /* Actions indicated by callbacks to be taken by platform kernel */
139 typedef enum
140 {
141 EXCB_ACTION_RERUN, // re-run the faulting instruction
142 EXCB_ACTION_NONE, // continue normal exception handling
143 }
144 ex_cb_action_t;
145
146 /*
147 * Exception state
148 * We cannot use a private kernel data structure such as arm_saved_state_t
149 * The CPSR and ESR are not clobbered when the callback function is invoked so
150 * those registers can be examined by the callback function;
151 * the same is done in the platform error handlers
152 */
153 typedef struct
154 {
155 vm_offset_t far;
156 }
157 ex_cb_state_t;
158
159 /* callback type definition */
160 typedef ex_cb_action_t (*ex_cb_t) (
161 ex_cb_class_t cb_class,
162 void *refcon,// provided at registration
163 const ex_cb_state_t *state // exception state
164 );
165
166 /*
167 * Callback registration
168 * Currently we support only one registered callback per class but
169 * it should be possible to support more callbacks
170 */
171 kern_return_t ex_cb_register(
172 ex_cb_class_t cb_class,
173 ex_cb_t cb,
174 void *refcon );
175
176 /*
177 * Called internally by platform kernel to invoke the registered callback for class
178 */
179 ex_cb_action_t ex_cb_invoke(
180 ex_cb_class_t cb_class,
181 vm_offset_t far);
182
183
184 void ml_parse_cpu_topology(void);
185
186 unsigned int ml_get_cpu_count(void);
187
188 int ml_get_boot_cpu_number(void);
189
190 int ml_get_cpu_number(uint32_t phys_id);
191
192 int ml_get_max_cpu_number(void);
193
194 /* Struct for ml_cpu_get_info */
195 struct ml_cpu_info {
196 unsigned long vector_unit;
197 unsigned long cache_line_size;
198 unsigned long l1_icache_size;
199 unsigned long l1_dcache_size;
200 unsigned long l2_settings;
201 unsigned long l2_cache_size;
202 unsigned long l3_settings;
203 unsigned long l3_cache_size;
204 };
205 typedef struct ml_cpu_info ml_cpu_info_t;
206
207 typedef enum {
208 CLUSTER_TYPE_SMP,
209 } cluster_type_t;
210
211 cluster_type_t ml_get_boot_cluster(void);
212
213 /* Struct for ml_processor_register */
214 struct ml_processor_info {
215 cpu_id_t cpu_id;
216 vm_offset_t start_paddr;
217 boolean_t supports_nap;
218 void *platform_cache_dispatch;
219 time_base_enable_t time_base_enable;
220 processor_idle_t processor_idle;
221 idle_tickle_t *idle_tickle;
222 idle_timer_t idle_timer;
223 void *idle_timer_refcon;
224 vm_offset_t powergate_stub_addr;
225 uint32_t powergate_stub_length;
226 uint32_t powergate_latency;
227 platform_error_handler_t platform_error_handler;
228 uint64_t regmap_paddr;
229 uint32_t phys_id;
230 uint32_t log_id;
231 uint32_t l2_access_penalty;
232 uint32_t cluster_id;
233 cluster_type_t cluster_type;
234 uint32_t l2_cache_id;
235 uint32_t l2_cache_size;
236 uint32_t l3_cache_id;
237 uint32_t l3_cache_size;
238 };
239 typedef struct ml_processor_info ml_processor_info_t;
240
241 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
242 /* Struct for ml_init_timebase */
243 struct tbd_ops {
244 void (*tbd_fiq_handler)(void);
245 uint32_t (*tbd_get_decrementer)(void);
246 void (*tbd_set_decrementer)(uint32_t dec_value);
247 };
248 typedef struct tbd_ops *tbd_ops_t;
249 typedef struct tbd_ops tbd_ops_data_t;
250 #endif
251
252 /* Register a processor */
253 kern_return_t ml_processor_register(
254 ml_processor_info_t *ml_processor_info,
255 processor_t *processor,
256 ipi_handler_t *ipi_handler);
257
258 /* Register a lockdown handler */
259 kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *);
260
261 #if XNU_KERNEL_PRIVATE
262 void ml_lockdown_init(void);
263
264 /* Check if the machine layer wants to intercept a panic call */
265 boolean_t ml_wants_panic_trap_to_debugger(void);
266
267 /* Machine layer routine for intercepting panics */
268 void ml_panic_trap_to_debugger(const char *panic_format_str,
269 va_list *panic_args,
270 unsigned int reason,
271 void *ctx,
272 uint64_t panic_options_mask,
273 unsigned long panic_caller);
274 #endif /* XNU_KERNEL_PRIVATE */
275
276 /* Initialize Interrupts */
277 void ml_install_interrupt_handler(
278 void *nub,
279 int source,
280 void *target,
281 IOInterruptHandler handler,
282 void *refCon);
283
284 vm_offset_t
285 ml_static_vtop(
286 vm_offset_t);
287
288 vm_offset_t
289 ml_static_ptovirt(
290 vm_offset_t);
291
292 /* Offset required to obtain absolute time value from tick counter */
293 uint64_t ml_get_abstime_offset(void);
294
295 /* Offset required to obtain continuous time value from tick counter */
296 uint64_t ml_get_conttime_offset(void);
297
298 #ifdef __APPLE_API_UNSTABLE
299 /* PCI config cycle probing */
300 boolean_t ml_probe_read(
301 vm_offset_t paddr,
302 unsigned int *val);
303 boolean_t ml_probe_read_64(
304 addr64_t paddr,
305 unsigned int *val);
306
307 /* Read physical address byte */
308 unsigned int ml_phys_read_byte(
309 vm_offset_t paddr);
310 unsigned int ml_phys_read_byte_64(
311 addr64_t paddr);
312
313 /* Read physical address half word */
314 unsigned int ml_phys_read_half(
315 vm_offset_t paddr);
316 unsigned int ml_phys_read_half_64(
317 addr64_t paddr);
318
319 /* Read physical address word*/
320 unsigned int ml_phys_read(
321 vm_offset_t paddr);
322 unsigned int ml_phys_read_64(
323 addr64_t paddr);
324 unsigned int ml_phys_read_word(
325 vm_offset_t paddr);
326 unsigned int ml_phys_read_word_64(
327 addr64_t paddr);
328
329 unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz);
330 unsigned int ml_io_read8(uintptr_t iovaddr);
331 unsigned int ml_io_read16(uintptr_t iovaddr);
332 unsigned int ml_io_read32(uintptr_t iovaddr);
333 unsigned long long ml_io_read64(uintptr_t iovaddr);
334
335 /* Read physical address double word */
336 unsigned long long ml_phys_read_double(
337 vm_offset_t paddr);
338 unsigned long long ml_phys_read_double_64(
339 addr64_t paddr);
340
341 /* Write physical address byte */
342 void ml_phys_write_byte(
343 vm_offset_t paddr, unsigned int data);
344 void ml_phys_write_byte_64(
345 addr64_t paddr, unsigned int data);
346
347 /* Write physical address half word */
348 void ml_phys_write_half(
349 vm_offset_t paddr, unsigned int data);
350 void ml_phys_write_half_64(
351 addr64_t paddr, unsigned int data);
352
353 /* Write physical address word */
354 void ml_phys_write(
355 vm_offset_t paddr, unsigned int data);
356 void ml_phys_write_64(
357 addr64_t paddr, unsigned int data);
358 void ml_phys_write_word(
359 vm_offset_t paddr, unsigned int data);
360 void ml_phys_write_word_64(
361 addr64_t paddr, unsigned int data);
362
363 /* Write physical address double word */
364 void ml_phys_write_double(
365 vm_offset_t paddr, unsigned long long data);
366 void ml_phys_write_double_64(
367 addr64_t paddr, unsigned long long data);
368
369 void ml_static_mfree(
370 vm_offset_t,
371 vm_size_t);
372
373 kern_return_t
374 ml_static_protect(
375 vm_offset_t start,
376 vm_size_t size,
377 vm_prot_t new_prot);
378
379 /* virtual to physical on wired pages */
380 vm_offset_t ml_vtophys(
381 vm_offset_t vaddr);
382
383 /* Get processor info */
384 void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
385
386 #endif /* __APPLE_API_UNSTABLE */
387
388 #ifdef __APPLE_API_PRIVATE
389 #ifdef XNU_KERNEL_PRIVATE
390 vm_size_t ml_nofault_copy(
391 vm_offset_t virtsrc,
392 vm_offset_t virtdst,
393 vm_size_t size);
394 boolean_t ml_validate_nofault(
395 vm_offset_t virtsrc, vm_size_t size);
396 #endif /* XNU_KERNEL_PRIVATE */
397 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
398 /* IO memory map services */
399
400 /* Map memory map IO space */
401 vm_offset_t ml_io_map(
402 vm_offset_t phys_addr,
403 vm_size_t size);
404
405 vm_offset_t ml_io_map_wcomb(
406 vm_offset_t phys_addr,
407 vm_size_t size);
408
409 void ml_get_bouncepool_info(
410 vm_offset_t *phys_addr,
411 vm_size_t *size);
412
413 vm_map_address_t ml_map_high_window(
414 vm_offset_t phys_addr,
415 vm_size_t len);
416
417 /* boot memory allocation */
418 vm_offset_t ml_static_malloc(
419 vm_size_t size);
420
421 void ml_init_timebase(
422 void *args,
423 tbd_ops_t tbd_funcs,
424 vm_offset_t int_address,
425 vm_offset_t int_value);
426
427 uint64_t ml_get_timebase(void);
428
429 void ml_init_lock_timeout(void);
430
431 boolean_t ml_delay_should_spin(uint64_t interval);
432
433 uint32_t ml_get_decrementer(void);
434
435 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
436 void timer_state_event_user_to_kernel(void);
437 void timer_state_event_kernel_to_user(void);
438 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
439
440 uint64_t ml_get_hwclock(void);
441
442 #ifdef __arm64__
443 boolean_t ml_get_timer_pending(void);
444 #endif
445
446 void platform_syscall(
447 struct arm_saved_state *);
448
449 void ml_set_decrementer(
450 uint32_t dec_value);
451
452 boolean_t is_user_contex(
453 void);
454
455 void ml_init_arm_debug_interface(void *args, vm_offset_t virt_address);
456
457 /* These calls are only valid if __ARM_USER_PROTECT__ is defined */
458 uintptr_t arm_user_protect_begin(
459 thread_t thread);
460
461 void arm_user_protect_end(
462 thread_t thread,
463 uintptr_t up,
464 boolean_t disable_interrupts);
465
466 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
467
468 /* Zero bytes starting at a physical address */
469 void bzero_phys(
470 addr64_t phys_address,
471 vm_size_t length);
472
473 void bzero_phys_nc(addr64_t src64, vm_size_t bytes);
474
475 void ml_thread_policy(
476 thread_t thread,
477 unsigned policy_id,
478 unsigned policy_info);
479
480 #define MACHINE_GROUP 0x00000001
481 #define MACHINE_NETWORK_GROUP 0x10000000
482 #define MACHINE_NETWORK_WORKLOOP 0x00000001
483 #define MACHINE_NETWORK_NETISR 0x00000002
484
485 /* Initialize the maximum number of CPUs */
486 void ml_init_max_cpus(
487 unsigned int max_cpus);
488
489 /* Return the maximum number of CPUs set by ml_init_max_cpus() */
490 unsigned int ml_get_max_cpus(
491 void);
492
493 /* Return the maximum memory size */
494 unsigned int ml_get_machine_mem(void);
495
496 #ifdef XNU_KERNEL_PRIVATE
497 /* Return max offset */
498 vm_map_offset_t ml_get_max_offset(
499 boolean_t is64,
500 unsigned int option);
501 #define MACHINE_MAX_OFFSET_DEFAULT 0x01
502 #define MACHINE_MAX_OFFSET_MIN 0x02
503 #define MACHINE_MAX_OFFSET_MAX 0x04
504 #define MACHINE_MAX_OFFSET_DEVICE 0x08
505 #endif
506
507 extern void ml_cpu_up(void);
508 extern void ml_cpu_down(void);
509 extern void ml_arm_sleep(void);
510
511 extern uint64_t ml_get_wake_timebase(void);
512 extern uint64_t ml_get_conttime_wake_time(void);
513
514 /* Time since the system was reset (as part of boot/wake) */
515 uint64_t ml_get_time_since_reset(void);
516
517 #ifdef XNU_KERNEL_PRIVATE
518 /* Just a stub on ARM */
519 extern kern_return_t ml_interrupt_prewarm(uint64_t deadline);
520 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
521 #endif /* XNU_KERNEL_PRIVATE */
522
523 /* Bytes available on current stack */
524 vm_offset_t ml_stack_remaining(void);
525
526 #ifdef MACH_KERNEL_PRIVATE
527 uint32_t get_fpscr(void);
528 void set_fpscr(uint32_t);
529
530 extern void init_vfp(void);
531 extern boolean_t get_vfp_enabled(void);
532 #if (__ARM_VFP__ >= 3)
533 extern unsigned int get_mvfr0(void);
534 extern unsigned int get_mvfr1(void);
535 #endif
536 extern void arm_debug_set_cp14(arm_debug_state_t *debug_state);
537 extern void fiq_context_init(boolean_t enable_fiq);
538
539 extern void reenable_async_aborts(void);
540 extern void cpu_idle_wfi(boolean_t wfi_fast);
541
542 #ifdef MONITOR
543 #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */
544 #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */
545 unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
546 uintptr_t arg2, uintptr_t arg3);
547 #endif /* MONITOR */
548
549 #if defined(KERNEL_INTEGRITY_KTRR)
550 void rorgn_stash_range(void);
551 void rorgn_lockdown(void);
552 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
553
554 #endif /* MACH_KERNEL_PRIVATE */
555
556 extern uint32_t arm_debug_read_dscr(void);
557
558 extern int set_be_bit(void);
559 extern int clr_be_bit(void);
560 extern int be_tracing(void);
561
562 typedef void (*broadcastFunc) (void *);
563 unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *);
564 kern_return_t cpu_xcall(int, broadcastFunc, void *);
565
566 #ifdef KERNEL_PRIVATE
567
568 /* Interface to be used by the perf. controller to register a callback, in a
569 * single-threaded fashion. The callback will receive notifications of
570 * processor performance quality-of-service changes from the scheduler.
571 */
572
573 #ifdef __arm64__
574 typedef void (*cpu_qos_update_t)(int throughput_qos, uint64_t qos_param1, uint64_t qos_param2);
575 void cpu_qos_update_register(cpu_qos_update_t);
576 #endif /* __arm64__ */
577
578 struct going_on_core {
579 uint64_t thread_id;
580 uint16_t qos_class;
581 uint16_t urgency; /* XCPM compatibility */
582 uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */
583 uint32_t is_kernel_thread : 1;
584 uint64_t thread_group_id;
585 void *thread_group_data;
586 uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */
587 uint64_t start_time;
588 uint64_t scheduling_latency_at_same_basepri;
589 uint32_t energy_estimate_nj; /* return: In nanojoules */
590 /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */
591 };
592 typedef struct going_on_core *going_on_core_t;
593
594 struct going_off_core {
595 uint64_t thread_id;
596 uint32_t energy_estimate_nj; /* return: In nanojoules */
597 uint32_t reserved;
598 uint64_t end_time;
599 uint64_t thread_group_id;
600 void *thread_group_data;
601 };
602 typedef struct going_off_core *going_off_core_t;
603
604 struct thread_group_data {
605 uint64_t thread_group_id;
606 void *thread_group_data;
607 uint32_t thread_group_size;
608 uint32_t thread_group_flags;
609 };
610 typedef struct thread_group_data *thread_group_data_t;
611
612 struct perfcontrol_max_runnable_latency {
613 uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */];
614 };
615 typedef struct perfcontrol_max_runnable_latency *perfcontrol_max_runnable_latency_t;
616
617 struct perfcontrol_work_interval {
618 uint64_t thread_id;
619 uint16_t qos_class;
620 uint16_t urgency;
621 uint32_t flags; // notify
622 uint64_t work_interval_id;
623 uint64_t start;
624 uint64_t finish;
625 uint64_t deadline;
626 uint64_t next_start;
627 uint64_t thread_group_id;
628 void *thread_group_data;
629 uint32_t create_flags;
630 };
631 typedef struct perfcontrol_work_interval *perfcontrol_work_interval_t;
632
633
634 /*
635 * Structure to export per-CPU counters as part of the CLPC callout.
636 * Contains only the fixed CPU counters (instructions and cycles); CLPC
637 * would call back into XNU to get the configurable counters if needed.
638 */
639 struct perfcontrol_cpu_counters {
640 uint64_t instructions;
641 uint64_t cycles;
642 };
643
644 /*
645 * Structure used to pass information about a thread to CLPC
646 */
647 struct perfcontrol_thread_data {
648 /*
649 * Energy estimate (return value)
650 * The field is populated by CLPC and used to update the
651 * energy estimate of the thread
652 */
653 uint32_t energy_estimate_nj;
654 /* Perfcontrol class for thread */
655 perfcontrol_class_t perfctl_class;
656 /* Thread ID for the thread */
657 uint64_t thread_id;
658 /* Thread Group ID */
659 uint64_t thread_group_id;
660 /*
661 * Scheduling latency for threads at the same base priority.
662 * Calculated by the scheduler and passed into CLPC. The field is
663 * populated only in the thread_data structure for the thread
664 * going on-core.
665 */
666 uint64_t scheduling_latency_at_same_basepri;
667 /* Thread Group data pointer */
668 void *thread_group_data;
669 /* perfctl state pointer */
670 void *perfctl_state;
671 };
672
673 /*
674 * All callouts from the scheduler are executed with interrupts
675 * disabled. Callouts should be implemented in C with minimal
676 * abstractions, and only use KPI exported by the mach/libkern
677 * symbolset, restricted to routines like spinlocks and atomic
678 * operations and scheduler routines as noted below. Spinlocks that
679 * are used to synchronize data in the perfcontrol_state_t should only
680 * ever be acquired with interrupts disabled, to avoid deadlocks where
681 * an quantum expiration timer interrupt attempts to perform a callout
682 * that attempts to lock a spinlock that is already held.
683 */
684
685 /*
686 * When a processor is switching between two threads (after the
687 * scheduler has chosen a new thread), the low-level platform layer
688 * will call this routine, which should perform required timestamps,
689 * MMIO register reads, or other state switching. No scheduler locks
690 * are held during this callout.
691 *
692 * This function is called with interrupts ENABLED.
693 */
694 typedef void (*sched_perfcontrol_context_switch_t)(perfcontrol_state_t, perfcontrol_state_t);
695
696 /*
697 * Once the processor has switched to the new thread, the offcore
698 * callout will indicate the old thread that is no longer being
699 * run. The thread's scheduler lock is held, so it will not begin
700 * running on another processor (in the case of preemption where it
701 * remains runnable) until it completes. If the "thread_terminating"
702 * boolean is TRUE, this will be the last callout for this thread_id.
703 */
704 typedef void (*sched_perfcontrol_offcore_t)(perfcontrol_state_t, going_off_core_t /* populated by callee */, boolean_t);
705
706 /*
707 * After the offcore callout and after the old thread can potentially
708 * start running on another processor, the oncore callout will be
709 * called with the thread's scheduler lock held. The oncore callout is
710 * also called any time one of the parameters in the going_on_core_t
711 * structure changes, like priority/QoS changes, and quantum
712 * expiration, so the callout must not assume callouts are paired with
713 * offcore callouts.
714 */
715 typedef void (*sched_perfcontrol_oncore_t)(perfcontrol_state_t, going_on_core_t);
716
717 /*
718 * Periodically (on hundreds of ms scale), the scheduler will perform
719 * maintenance and report the maximum latency for runnable (but not currently
720 * running) threads for each urgency class.
721 */
722 typedef void (*sched_perfcontrol_max_runnable_latency_t)(perfcontrol_max_runnable_latency_t);
723
724 /*
725 * When the kernel receives information about work intervals from userland,
726 * it is passed along using this callback. No locks are held, although the state
727 * object will not go away during the callout.
728 */
729 typedef void (*sched_perfcontrol_work_interval_notify_t)(perfcontrol_state_t, perfcontrol_work_interval_t);
730
731 /*
732 * These callbacks are used when thread groups are added, removed or properties
733 * updated.
734 * No blocking allocations (or anything else blocking) are allowed inside these
735 * callbacks. No locks allowed in these callbacks as well since the kernel might
736 * be holding the thread/task locks.
737 */
738 typedef void (*sched_perfcontrol_thread_group_init_t)(thread_group_data_t);
739 typedef void (*sched_perfcontrol_thread_group_deinit_t)(thread_group_data_t);
740 typedef void (*sched_perfcontrol_thread_group_flags_update_t)(thread_group_data_t);
741
742 /*
743 * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed,
744 * this function will be called, passing the timeout deadline that was previously armed as an argument.
745 *
746 * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context.
747 */
748 typedef void (*sched_perfcontrol_deadline_passed_t)(uint64_t deadline);
749
750 /*
751 * Context Switch Callout
752 *
753 * Parameters:
754 * event - The perfcontrol_event for this callout
755 * cpu_id - The CPU doing the context switch
756 * timestamp - The timestamp for the context switch
757 * flags - Flags for other relevant information
758 * offcore - perfcontrol_data structure for thread going off-core
759 * oncore - perfcontrol_data structure for thread going on-core
760 * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch
761 */
762 typedef void (*sched_perfcontrol_csw_t)(
763 perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags,
764 struct perfcontrol_thread_data *offcore, struct perfcontrol_thread_data *oncore,
765 struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused);
766
767
768 /*
769 * Thread State Update Callout
770 *
771 * Parameters:
772 * event - The perfcontrol_event for this callout
773 * cpu_id - The CPU doing the state update
774 * timestamp - The timestamp for the state update
775 * flags - Flags for other relevant information
776 * thr_data - perfcontrol_data structure for the thread being updated
777 */
778 typedef void (*sched_perfcontrol_state_update_t)(
779 perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags,
780 struct perfcontrol_thread_data *thr_data, __unused void *unused);
781
782
783 /*
784 * Callers should always use the CURRENT version so that the kernel can detect both older
785 * and newer structure layouts. New callbacks should always be added at the end of the
786 * structure, and xnu should expect existing source recompiled against newer headers
787 * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter
788 * to reset callbacks to their default in-kernel values.
789 */
790
791 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */
792 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */
793 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */
794 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */
795 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */
796 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */
797 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */
798 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6
799
800 struct sched_perfcontrol_callbacks {
801 unsigned long version; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */
802 sched_perfcontrol_offcore_t offcore;
803 sched_perfcontrol_context_switch_t context_switch;
804 sched_perfcontrol_oncore_t oncore;
805 sched_perfcontrol_max_runnable_latency_t max_runnable_latency;
806 sched_perfcontrol_work_interval_notify_t work_interval_notify;
807 sched_perfcontrol_thread_group_init_t thread_group_init;
808 sched_perfcontrol_thread_group_deinit_t thread_group_deinit;
809 sched_perfcontrol_deadline_passed_t deadline_passed;
810 sched_perfcontrol_csw_t csw;
811 sched_perfcontrol_state_update_t state_update;
812 sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update;
813 };
814 typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t;
815
816 extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state);
817
818 /*
819 * Update the scheduler with the set of cores that should be used to dispatch new threads.
820 * Non-recommended cores can still be used to field interrupts or run bound threads.
821 * This should be called with interrupts enabled and no scheduler locks held.
822 */
823 #define ALL_CORES_RECOMMENDED (~(uint32_t)0)
824
825 extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores);
826 extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation);
827
828 /*
829 * Update the deadline after which sched_perfcontrol_deadline_passed will be called.
830 * Returns TRUE if it successfully canceled a previously set callback,
831 * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight).
832 * The callback is automatically canceled when it fires, and does not repeat unless rearmed.
833 *
834 * This 'timer' executes as the scheduler switches between threads, on a non-idle core
835 *
836 * There can be only one outstanding timer globally.
837 */
838 extern boolean_t sched_perfcontrol_update_callback_deadline(uint64_t deadline);
839
840 typedef enum perfcontrol_callout_type {
841 PERFCONTROL_CALLOUT_ON_CORE,
842 PERFCONTROL_CALLOUT_OFF_CORE,
843 PERFCONTROL_CALLOUT_CONTEXT,
844 PERFCONTROL_CALLOUT_STATE_UPDATE,
845 /* Add other callout types here */
846 PERFCONTROL_CALLOUT_MAX
847 } perfcontrol_callout_type_t;
848
849 typedef enum perfcontrol_callout_stat {
850 PERFCONTROL_STAT_INSTRS,
851 PERFCONTROL_STAT_CYCLES,
852 /* Add other stat types here */
853 PERFCONTROL_STAT_MAX
854 } perfcontrol_callout_stat_t;
855
856 uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
857 perfcontrol_callout_stat_t stat);
858
859
860 #endif /* KERNEL_PRIVATE */
861
862 boolean_t machine_timeout_suspended(void);
863 void ml_get_power_state(boolean_t *, boolean_t *);
864
865 boolean_t user_cont_hwclock_allowed(void);
866 boolean_t user_timebase_allowed(void);
867 boolean_t ml_thread_is64bit(thread_t thread);
868 void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit);
869
870 #ifdef __arm64__
871 void ml_set_align_checking(void);
872 boolean_t arm64_wfe_allowed(void);
873 #endif /* __arm64__ */
874
875 void ml_timer_evaluate(void);
876 boolean_t ml_timer_forced_evaluation(void);
877 uint64_t ml_energy_stat(thread_t);
878 void ml_gpu_stat_update(uint64_t);
879 uint64_t ml_gpu_stat(thread_t);
880 #endif /* __APPLE_API_PRIVATE */
881
882 __END_DECLS
883
884 #endif /* _ARM_MACHINE_ROUTINES_H_ */