]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | ||
32 | #ifndef _ARM_MACHINE_ROUTINES_H_ | |
33 | #define _ARM_MACHINE_ROUTINES_H_ | |
34 | ||
35 | #include <mach/mach_types.h> | |
36 | #include <mach/boolean.h> | |
37 | #include <kern/kern_types.h> | |
38 | #include <pexpert/pexpert.h> | |
39 | ||
40 | #include <sys/cdefs.h> | |
41 | #include <sys/appleapiopts.h> | |
42 | ||
43 | #include <stdarg.h> | |
44 | ||
45 | __BEGIN_DECLS | |
46 | ||
47 | /* Interrupt handling */ | |
48 | ||
49 | void ml_cpu_signal(unsigned int cpu_id); | |
50 | void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs); | |
51 | uint64_t ml_cpu_signal_deferred_get_timer(void); | |
52 | void ml_cpu_signal_deferred(unsigned int cpu_id); | |
53 | void ml_cpu_signal_retract(unsigned int cpu_id); | |
54 | ||
55 | /* Initialize Interrupts */ | |
56 | void ml_init_interrupt(void); | |
57 | ||
58 | /* Get Interrupts Enabled */ | |
59 | boolean_t ml_get_interrupts_enabled(void); | |
60 | ||
61 | /* Set Interrupts Enabled */ | |
62 | boolean_t ml_set_interrupts_enabled(boolean_t enable); | |
63 | ||
64 | /* Check if running at interrupt context */ | |
65 | boolean_t ml_at_interrupt_context(void); | |
66 | ||
67 | /* Generate a fake interrupt */ | |
68 | void ml_cause_interrupt(void); | |
69 | ||
70 | /* Clear interrupt spin debug state for thread */ | |
71 | #if INTERRUPT_MASKED_DEBUG | |
72 | void ml_spin_debug_reset(thread_t thread); | |
73 | void ml_spin_debug_clear(thread_t thread); | |
74 | void ml_spin_debug_clear_self(void); | |
75 | void ml_check_interrupts_disabled_duration(thread_t thread); | |
76 | #endif | |
77 | ||
78 | #ifdef XNU_KERNEL_PRIVATE | |
79 | extern boolean_t ml_is_quiescing(void); | |
80 | extern void ml_set_is_quiescing(boolean_t); | |
81 | extern uint64_t ml_get_booter_memory_size(void); | |
82 | #endif | |
83 | ||
84 | /* Type for the Time Base Enable function */ | |
85 | typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); | |
86 | #if MACH_KERNEL_PRIVATE | |
87 | /* Type for the Processor Cache Dispatch function */ | |
88 | typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1); | |
89 | #endif | |
90 | ||
91 | #define CacheConfig 0x00000000UL | |
92 | #define CacheControl 0x00000001UL | |
93 | #define CacheClean 0x00000002UL | |
94 | #define CacheCleanRegion 0x00000003UL | |
95 | #define CacheCleanFlush 0x00000004UL | |
96 | #define CacheCleanFlushRegion 0x00000005UL | |
97 | #define CacheShutdown 0x00000006UL | |
98 | ||
99 | #define CacheControlEnable 0x00000000UL | |
100 | ||
101 | #define CacheConfigCCSIDR 0x00000001UL | |
102 | #define CacheConfigSize 0x00000100UL | |
103 | ||
104 | /* Type for the Processor Idle function */ | |
105 | typedef void (*processor_idle_t)(cpu_id_t cpu_id, boolean_t enter, uint64_t *new_timeout_ticks); | |
106 | ||
107 | /* Type for the Idle Tickle function */ | |
108 | typedef void (*idle_tickle_t)(void); | |
109 | ||
110 | /* Type for the Idle Timer function */ | |
111 | typedef void (*idle_timer_t)(void *refcon, uint64_t *new_timeout_ticks); | |
112 | ||
113 | /* Type for the IPI Hander */ | |
114 | typedef void (*ipi_handler_t)(void); | |
115 | ||
116 | /* Type for the Lockdown Hander */ | |
117 | typedef void (*lockdown_handler_t)(void *); | |
118 | ||
119 | /* Type for the Platform specific Error Handler */ | |
120 | typedef void (*platform_error_handler_t)(void *refcon, vm_offset_t fault_addr); | |
121 | ||
122 | /* | |
123 | * The exception callback (ex_cb) module allows kernel drivers to | |
124 | * register and receive callbacks for exceptions, and indicate | |
125 | * actions to be taken by the platform kernel | |
126 | * Currently this is supported for ARM64 but extending support for ARM32 | |
127 | * should be straightforward | |
128 | */ | |
129 | ||
130 | /* Supported exception classes for callbacks */ | |
131 | typedef enum | |
132 | { | |
133 | EXCB_CLASS_ILLEGAL_INSTR_SET, | |
134 | EXCB_CLASS_MAX // this must be last | |
135 | } | |
136 | ex_cb_class_t; | |
137 | ||
138 | /* Actions indicated by callbacks to be taken by platform kernel */ | |
139 | typedef enum | |
140 | { | |
141 | EXCB_ACTION_RERUN, // re-run the faulting instruction | |
142 | EXCB_ACTION_NONE, // continue normal exception handling | |
143 | } | |
144 | ex_cb_action_t; | |
145 | ||
146 | /* | |
147 | * Exception state | |
148 | * We cannot use a private kernel data structure such as arm_saved_state_t | |
149 | * The CPSR and ESR are not clobbered when the callback function is invoked so | |
150 | * those registers can be examined by the callback function; | |
151 | * the same is done in the platform error handlers | |
152 | */ | |
153 | typedef struct | |
154 | { | |
155 | vm_offset_t far; | |
156 | } | |
157 | ex_cb_state_t; | |
158 | ||
159 | /* callback type definition */ | |
160 | typedef ex_cb_action_t (*ex_cb_t) ( | |
161 | ex_cb_class_t cb_class, | |
162 | void *refcon,// provided at registration | |
163 | const ex_cb_state_t *state // exception state | |
164 | ); | |
165 | ||
166 | /* | |
167 | * Callback registration | |
168 | * Currently we support only one registered callback per class but | |
169 | * it should be possible to support more callbacks | |
170 | */ | |
171 | kern_return_t ex_cb_register( | |
172 | ex_cb_class_t cb_class, | |
173 | ex_cb_t cb, | |
174 | void *refcon ); | |
175 | ||
176 | /* | |
177 | * Called internally by platform kernel to invoke the registered callback for class | |
178 | */ | |
179 | ex_cb_action_t ex_cb_invoke( | |
180 | ex_cb_class_t cb_class, | |
181 | vm_offset_t far); | |
182 | ||
183 | ||
184 | void ml_parse_cpu_topology(void); | |
185 | ||
186 | unsigned int ml_get_cpu_count(void); | |
187 | ||
188 | int ml_get_boot_cpu_number(void); | |
189 | ||
190 | int ml_get_cpu_number(uint32_t phys_id); | |
191 | ||
192 | int ml_get_max_cpu_number(void); | |
193 | ||
194 | /* Struct for ml_cpu_get_info */ | |
195 | struct ml_cpu_info { | |
196 | unsigned long vector_unit; | |
197 | unsigned long cache_line_size; | |
198 | unsigned long l1_icache_size; | |
199 | unsigned long l1_dcache_size; | |
200 | unsigned long l2_settings; | |
201 | unsigned long l2_cache_size; | |
202 | unsigned long l3_settings; | |
203 | unsigned long l3_cache_size; | |
204 | }; | |
205 | typedef struct ml_cpu_info ml_cpu_info_t; | |
206 | ||
207 | typedef enum { | |
208 | CLUSTER_TYPE_SMP, | |
209 | } cluster_type_t; | |
210 | ||
211 | cluster_type_t ml_get_boot_cluster(void); | |
212 | ||
213 | /* Struct for ml_processor_register */ | |
214 | struct ml_processor_info { | |
215 | cpu_id_t cpu_id; | |
216 | vm_offset_t start_paddr; | |
217 | boolean_t supports_nap; | |
218 | void *platform_cache_dispatch; | |
219 | time_base_enable_t time_base_enable; | |
220 | processor_idle_t processor_idle; | |
221 | idle_tickle_t *idle_tickle; | |
222 | idle_timer_t idle_timer; | |
223 | void *idle_timer_refcon; | |
224 | vm_offset_t powergate_stub_addr; | |
225 | uint32_t powergate_stub_length; | |
226 | uint32_t powergate_latency; | |
227 | platform_error_handler_t platform_error_handler; | |
228 | uint64_t regmap_paddr; | |
229 | uint32_t phys_id; | |
230 | uint32_t log_id; | |
231 | uint32_t l2_access_penalty; | |
232 | uint32_t cluster_id; | |
233 | cluster_type_t cluster_type; | |
234 | uint32_t l2_cache_id; | |
235 | uint32_t l2_cache_size; | |
236 | uint32_t l3_cache_id; | |
237 | uint32_t l3_cache_size; | |
238 | }; | |
239 | typedef struct ml_processor_info ml_processor_info_t; | |
240 | ||
241 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) | |
242 | /* Struct for ml_init_timebase */ | |
243 | struct tbd_ops { | |
244 | void (*tbd_fiq_handler)(void); | |
245 | uint32_t (*tbd_get_decrementer)(void); | |
246 | void (*tbd_set_decrementer)(uint32_t dec_value); | |
247 | }; | |
248 | typedef struct tbd_ops *tbd_ops_t; | |
249 | typedef struct tbd_ops tbd_ops_data_t; | |
250 | #endif | |
251 | ||
252 | /* Register a processor */ | |
253 | kern_return_t ml_processor_register( | |
254 | ml_processor_info_t *ml_processor_info, | |
255 | processor_t *processor, | |
256 | ipi_handler_t *ipi_handler); | |
257 | ||
258 | /* Register a lockdown handler */ | |
259 | kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *); | |
260 | ||
261 | #if XNU_KERNEL_PRIVATE | |
262 | void ml_lockdown_init(void); | |
263 | ||
264 | /* Check if the machine layer wants to intercept a panic call */ | |
265 | boolean_t ml_wants_panic_trap_to_debugger(void); | |
266 | ||
267 | /* Machine layer routine for intercepting panics */ | |
268 | void ml_panic_trap_to_debugger(const char *panic_format_str, | |
269 | va_list *panic_args, | |
270 | unsigned int reason, | |
271 | void *ctx, | |
272 | uint64_t panic_options_mask, | |
273 | unsigned long panic_caller); | |
274 | #endif /* XNU_KERNEL_PRIVATE */ | |
275 | ||
276 | /* Initialize Interrupts */ | |
277 | void ml_install_interrupt_handler( | |
278 | void *nub, | |
279 | int source, | |
280 | void *target, | |
281 | IOInterruptHandler handler, | |
282 | void *refCon); | |
283 | ||
284 | vm_offset_t | |
285 | ml_static_vtop( | |
286 | vm_offset_t); | |
287 | ||
288 | vm_offset_t | |
289 | ml_static_ptovirt( | |
290 | vm_offset_t); | |
291 | ||
292 | /* Offset required to obtain absolute time value from tick counter */ | |
293 | uint64_t ml_get_abstime_offset(void); | |
294 | ||
295 | /* Offset required to obtain continuous time value from tick counter */ | |
296 | uint64_t ml_get_conttime_offset(void); | |
297 | ||
298 | #ifdef __APPLE_API_UNSTABLE | |
299 | /* PCI config cycle probing */ | |
300 | boolean_t ml_probe_read( | |
301 | vm_offset_t paddr, | |
302 | unsigned int *val); | |
303 | boolean_t ml_probe_read_64( | |
304 | addr64_t paddr, | |
305 | unsigned int *val); | |
306 | ||
307 | /* Read physical address byte */ | |
308 | unsigned int ml_phys_read_byte( | |
309 | vm_offset_t paddr); | |
310 | unsigned int ml_phys_read_byte_64( | |
311 | addr64_t paddr); | |
312 | ||
313 | /* Read physical address half word */ | |
314 | unsigned int ml_phys_read_half( | |
315 | vm_offset_t paddr); | |
316 | unsigned int ml_phys_read_half_64( | |
317 | addr64_t paddr); | |
318 | ||
319 | /* Read physical address word*/ | |
320 | unsigned int ml_phys_read( | |
321 | vm_offset_t paddr); | |
322 | unsigned int ml_phys_read_64( | |
323 | addr64_t paddr); | |
324 | unsigned int ml_phys_read_word( | |
325 | vm_offset_t paddr); | |
326 | unsigned int ml_phys_read_word_64( | |
327 | addr64_t paddr); | |
328 | ||
329 | unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz); | |
330 | unsigned int ml_io_read8(uintptr_t iovaddr); | |
331 | unsigned int ml_io_read16(uintptr_t iovaddr); | |
332 | unsigned int ml_io_read32(uintptr_t iovaddr); | |
333 | unsigned long long ml_io_read64(uintptr_t iovaddr); | |
334 | ||
335 | /* Read physical address double word */ | |
336 | unsigned long long ml_phys_read_double( | |
337 | vm_offset_t paddr); | |
338 | unsigned long long ml_phys_read_double_64( | |
339 | addr64_t paddr); | |
340 | ||
341 | /* Write physical address byte */ | |
342 | void ml_phys_write_byte( | |
343 | vm_offset_t paddr, unsigned int data); | |
344 | void ml_phys_write_byte_64( | |
345 | addr64_t paddr, unsigned int data); | |
346 | ||
347 | /* Write physical address half word */ | |
348 | void ml_phys_write_half( | |
349 | vm_offset_t paddr, unsigned int data); | |
350 | void ml_phys_write_half_64( | |
351 | addr64_t paddr, unsigned int data); | |
352 | ||
353 | /* Write physical address word */ | |
354 | void ml_phys_write( | |
355 | vm_offset_t paddr, unsigned int data); | |
356 | void ml_phys_write_64( | |
357 | addr64_t paddr, unsigned int data); | |
358 | void ml_phys_write_word( | |
359 | vm_offset_t paddr, unsigned int data); | |
360 | void ml_phys_write_word_64( | |
361 | addr64_t paddr, unsigned int data); | |
362 | ||
363 | /* Write physical address double word */ | |
364 | void ml_phys_write_double( | |
365 | vm_offset_t paddr, unsigned long long data); | |
366 | void ml_phys_write_double_64( | |
367 | addr64_t paddr, unsigned long long data); | |
368 | ||
369 | void ml_static_mfree( | |
370 | vm_offset_t, | |
371 | vm_size_t); | |
372 | ||
373 | kern_return_t | |
374 | ml_static_protect( | |
375 | vm_offset_t start, | |
376 | vm_size_t size, | |
377 | vm_prot_t new_prot); | |
378 | ||
379 | /* virtual to physical on wired pages */ | |
380 | vm_offset_t ml_vtophys( | |
381 | vm_offset_t vaddr); | |
382 | ||
383 | /* Get processor info */ | |
384 | void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info); | |
385 | ||
386 | #endif /* __APPLE_API_UNSTABLE */ | |
387 | ||
388 | #ifdef __APPLE_API_PRIVATE | |
389 | #ifdef XNU_KERNEL_PRIVATE | |
390 | vm_size_t ml_nofault_copy( | |
391 | vm_offset_t virtsrc, | |
392 | vm_offset_t virtdst, | |
393 | vm_size_t size); | |
394 | boolean_t ml_validate_nofault( | |
395 | vm_offset_t virtsrc, vm_size_t size); | |
396 | #endif /* XNU_KERNEL_PRIVATE */ | |
397 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) | |
398 | /* IO memory map services */ | |
399 | ||
400 | /* Map memory map IO space */ | |
401 | vm_offset_t ml_io_map( | |
402 | vm_offset_t phys_addr, | |
403 | vm_size_t size); | |
404 | ||
405 | vm_offset_t ml_io_map_wcomb( | |
406 | vm_offset_t phys_addr, | |
407 | vm_size_t size); | |
408 | ||
409 | void ml_get_bouncepool_info( | |
410 | vm_offset_t *phys_addr, | |
411 | vm_size_t *size); | |
412 | ||
413 | vm_map_address_t ml_map_high_window( | |
414 | vm_offset_t phys_addr, | |
415 | vm_size_t len); | |
416 | ||
417 | /* boot memory allocation */ | |
418 | vm_offset_t ml_static_malloc( | |
419 | vm_size_t size); | |
420 | ||
421 | void ml_init_timebase( | |
422 | void *args, | |
423 | tbd_ops_t tbd_funcs, | |
424 | vm_offset_t int_address, | |
425 | vm_offset_t int_value); | |
426 | ||
427 | uint64_t ml_get_timebase(void); | |
428 | ||
429 | void ml_init_lock_timeout(void); | |
430 | ||
431 | boolean_t ml_delay_should_spin(uint64_t interval); | |
432 | ||
433 | uint32_t ml_get_decrementer(void); | |
434 | ||
435 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
436 | void timer_state_event_user_to_kernel(void); | |
437 | void timer_state_event_kernel_to_user(void); | |
438 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
439 | ||
440 | uint64_t ml_get_hwclock(void); | |
441 | ||
442 | #ifdef __arm64__ | |
443 | boolean_t ml_get_timer_pending(void); | |
444 | #endif | |
445 | ||
446 | void platform_syscall( | |
447 | struct arm_saved_state *); | |
448 | ||
449 | void ml_set_decrementer( | |
450 | uint32_t dec_value); | |
451 | ||
452 | boolean_t is_user_contex( | |
453 | void); | |
454 | ||
455 | void ml_init_arm_debug_interface(void *args, vm_offset_t virt_address); | |
456 | ||
457 | /* These calls are only valid if __ARM_USER_PROTECT__ is defined */ | |
458 | uintptr_t arm_user_protect_begin( | |
459 | thread_t thread); | |
460 | ||
461 | void arm_user_protect_end( | |
462 | thread_t thread, | |
463 | uintptr_t up, | |
464 | boolean_t disable_interrupts); | |
465 | ||
466 | #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */ | |
467 | ||
468 | /* Zero bytes starting at a physical address */ | |
469 | void bzero_phys( | |
470 | addr64_t phys_address, | |
471 | vm_size_t length); | |
472 | ||
473 | void bzero_phys_nc(addr64_t src64, vm_size_t bytes); | |
474 | ||
475 | void ml_thread_policy( | |
476 | thread_t thread, | |
477 | unsigned policy_id, | |
478 | unsigned policy_info); | |
479 | ||
480 | #define MACHINE_GROUP 0x00000001 | |
481 | #define MACHINE_NETWORK_GROUP 0x10000000 | |
482 | #define MACHINE_NETWORK_WORKLOOP 0x00000001 | |
483 | #define MACHINE_NETWORK_NETISR 0x00000002 | |
484 | ||
485 | /* Initialize the maximum number of CPUs */ | |
486 | void ml_init_max_cpus( | |
487 | unsigned int max_cpus); | |
488 | ||
489 | /* Return the maximum number of CPUs set by ml_init_max_cpus() */ | |
490 | unsigned int ml_get_max_cpus( | |
491 | void); | |
492 | ||
493 | /* Return the maximum memory size */ | |
494 | unsigned int ml_get_machine_mem(void); | |
495 | ||
496 | #ifdef XNU_KERNEL_PRIVATE | |
497 | /* Return max offset */ | |
498 | vm_map_offset_t ml_get_max_offset( | |
499 | boolean_t is64, | |
500 | unsigned int option); | |
501 | #define MACHINE_MAX_OFFSET_DEFAULT 0x01 | |
502 | #define MACHINE_MAX_OFFSET_MIN 0x02 | |
503 | #define MACHINE_MAX_OFFSET_MAX 0x04 | |
504 | #define MACHINE_MAX_OFFSET_DEVICE 0x08 | |
505 | #endif | |
506 | ||
507 | extern void ml_cpu_up(void); | |
508 | extern void ml_cpu_down(void); | |
509 | extern void ml_arm_sleep(void); | |
510 | ||
511 | extern uint64_t ml_get_wake_timebase(void); | |
512 | extern uint64_t ml_get_conttime_wake_time(void); | |
513 | ||
514 | /* Time since the system was reset (as part of boot/wake) */ | |
515 | uint64_t ml_get_time_since_reset(void); | |
516 | ||
517 | #ifdef XNU_KERNEL_PRIVATE | |
518 | /* Just a stub on ARM */ | |
519 | extern kern_return_t ml_interrupt_prewarm(uint64_t deadline); | |
520 | #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0) | |
521 | #endif /* XNU_KERNEL_PRIVATE */ | |
522 | ||
523 | /* Bytes available on current stack */ | |
524 | vm_offset_t ml_stack_remaining(void); | |
525 | ||
526 | #ifdef MACH_KERNEL_PRIVATE | |
527 | uint32_t get_fpscr(void); | |
528 | void set_fpscr(uint32_t); | |
529 | ||
530 | extern void init_vfp(void); | |
531 | extern boolean_t get_vfp_enabled(void); | |
5ba3f43e A |
532 | extern void arm_debug_set_cp14(arm_debug_state_t *debug_state); |
533 | extern void fiq_context_init(boolean_t enable_fiq); | |
534 | ||
535 | extern void reenable_async_aborts(void); | |
536 | extern void cpu_idle_wfi(boolean_t wfi_fast); | |
537 | ||
538 | #ifdef MONITOR | |
539 | #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */ | |
540 | #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */ | |
541 | unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
542 | uintptr_t arg2, uintptr_t arg3); | |
543 | #endif /* MONITOR */ | |
544 | ||
545 | #if defined(KERNEL_INTEGRITY_KTRR) | |
546 | void rorgn_stash_range(void); | |
547 | void rorgn_lockdown(void); | |
548 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ | |
549 | ||
5c9f4661 A |
550 | #if __ARM_KERNEL_PROTECT__ |
551 | extern void set_vbar_el1(uint64_t); | |
552 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
553 | #endif /* MACH_KERNEL_PRIVATE */ |
554 | ||
555 | extern uint32_t arm_debug_read_dscr(void); | |
556 | ||
557 | extern int set_be_bit(void); | |
558 | extern int clr_be_bit(void); | |
559 | extern int be_tracing(void); | |
560 | ||
561 | typedef void (*broadcastFunc) (void *); | |
562 | unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *); | |
563 | kern_return_t cpu_xcall(int, broadcastFunc, void *); | |
564 | ||
565 | #ifdef KERNEL_PRIVATE | |
566 | ||
567 | /* Interface to be used by the perf. controller to register a callback, in a | |
568 | * single-threaded fashion. The callback will receive notifications of | |
569 | * processor performance quality-of-service changes from the scheduler. | |
570 | */ | |
571 | ||
572 | #ifdef __arm64__ | |
573 | typedef void (*cpu_qos_update_t)(int throughput_qos, uint64_t qos_param1, uint64_t qos_param2); | |
574 | void cpu_qos_update_register(cpu_qos_update_t); | |
575 | #endif /* __arm64__ */ | |
576 | ||
577 | struct going_on_core { | |
578 | uint64_t thread_id; | |
579 | uint16_t qos_class; | |
580 | uint16_t urgency; /* XCPM compatibility */ | |
581 | uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */ | |
582 | uint32_t is_kernel_thread : 1; | |
583 | uint64_t thread_group_id; | |
584 | void *thread_group_data; | |
585 | uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */ | |
586 | uint64_t start_time; | |
587 | uint64_t scheduling_latency_at_same_basepri; | |
588 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
589 | /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */ | |
590 | }; | |
591 | typedef struct going_on_core *going_on_core_t; | |
592 | ||
593 | struct going_off_core { | |
594 | uint64_t thread_id; | |
595 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
596 | uint32_t reserved; | |
597 | uint64_t end_time; | |
598 | uint64_t thread_group_id; | |
599 | void *thread_group_data; | |
600 | }; | |
601 | typedef struct going_off_core *going_off_core_t; | |
602 | ||
603 | struct thread_group_data { | |
604 | uint64_t thread_group_id; | |
605 | void *thread_group_data; | |
606 | uint32_t thread_group_size; | |
607 | uint32_t thread_group_flags; | |
608 | }; | |
609 | typedef struct thread_group_data *thread_group_data_t; | |
610 | ||
611 | struct perfcontrol_max_runnable_latency { | |
612 | uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */]; | |
613 | }; | |
614 | typedef struct perfcontrol_max_runnable_latency *perfcontrol_max_runnable_latency_t; | |
615 | ||
616 | struct perfcontrol_work_interval { | |
617 | uint64_t thread_id; | |
618 | uint16_t qos_class; | |
619 | uint16_t urgency; | |
620 | uint32_t flags; // notify | |
621 | uint64_t work_interval_id; | |
622 | uint64_t start; | |
623 | uint64_t finish; | |
624 | uint64_t deadline; | |
625 | uint64_t next_start; | |
626 | uint64_t thread_group_id; | |
627 | void *thread_group_data; | |
628 | uint32_t create_flags; | |
629 | }; | |
630 | typedef struct perfcontrol_work_interval *perfcontrol_work_interval_t; | |
631 | ||
a39ff7e2 A |
632 | typedef enum { |
633 | WORK_INTERVAL_START, | |
634 | WORK_INTERVAL_UPDATE, | |
635 | WORK_INTERVAL_FINISH | |
636 | } work_interval_ctl_t; | |
637 | ||
638 | struct perfcontrol_work_interval_instance { | |
639 | work_interval_ctl_t ctl; | |
640 | uint32_t create_flags; | |
641 | uint64_t complexity; | |
642 | uint64_t thread_id; | |
643 | uint64_t work_interval_id; | |
644 | uint64_t instance_id; /* out: start, in: update/finish */ | |
645 | uint64_t start; | |
646 | uint64_t finish; | |
647 | uint64_t deadline; | |
648 | uint64_t thread_group_id; | |
649 | void *thread_group_data; | |
650 | }; | |
651 | typedef struct perfcontrol_work_interval_instance *perfcontrol_work_interval_instance_t; | |
5ba3f43e A |
652 | |
653 | /* | |
654 | * Structure to export per-CPU counters as part of the CLPC callout. | |
655 | * Contains only the fixed CPU counters (instructions and cycles); CLPC | |
656 | * would call back into XNU to get the configurable counters if needed. | |
657 | */ | |
658 | struct perfcontrol_cpu_counters { | |
659 | uint64_t instructions; | |
660 | uint64_t cycles; | |
661 | }; | |
662 | ||
663 | /* | |
664 | * Structure used to pass information about a thread to CLPC | |
665 | */ | |
666 | struct perfcontrol_thread_data { | |
667 | /* | |
668 | * Energy estimate (return value) | |
669 | * The field is populated by CLPC and used to update the | |
670 | * energy estimate of the thread | |
671 | */ | |
672 | uint32_t energy_estimate_nj; | |
673 | /* Perfcontrol class for thread */ | |
674 | perfcontrol_class_t perfctl_class; | |
675 | /* Thread ID for the thread */ | |
676 | uint64_t thread_id; | |
677 | /* Thread Group ID */ | |
678 | uint64_t thread_group_id; | |
679 | /* | |
680 | * Scheduling latency for threads at the same base priority. | |
681 | * Calculated by the scheduler and passed into CLPC. The field is | |
682 | * populated only in the thread_data structure for the thread | |
683 | * going on-core. | |
684 | */ | |
685 | uint64_t scheduling_latency_at_same_basepri; | |
686 | /* Thread Group data pointer */ | |
687 | void *thread_group_data; | |
688 | /* perfctl state pointer */ | |
689 | void *perfctl_state; | |
690 | }; | |
691 | ||
692 | /* | |
693 | * All callouts from the scheduler are executed with interrupts | |
694 | * disabled. Callouts should be implemented in C with minimal | |
695 | * abstractions, and only use KPI exported by the mach/libkern | |
696 | * symbolset, restricted to routines like spinlocks and atomic | |
697 | * operations and scheduler routines as noted below. Spinlocks that | |
698 | * are used to synchronize data in the perfcontrol_state_t should only | |
699 | * ever be acquired with interrupts disabled, to avoid deadlocks where | |
700 | * an quantum expiration timer interrupt attempts to perform a callout | |
701 | * that attempts to lock a spinlock that is already held. | |
702 | */ | |
703 | ||
704 | /* | |
705 | * When a processor is switching between two threads (after the | |
706 | * scheduler has chosen a new thread), the low-level platform layer | |
707 | * will call this routine, which should perform required timestamps, | |
708 | * MMIO register reads, or other state switching. No scheduler locks | |
709 | * are held during this callout. | |
710 | * | |
711 | * This function is called with interrupts ENABLED. | |
712 | */ | |
713 | typedef void (*sched_perfcontrol_context_switch_t)(perfcontrol_state_t, perfcontrol_state_t); | |
714 | ||
715 | /* | |
716 | * Once the processor has switched to the new thread, the offcore | |
717 | * callout will indicate the old thread that is no longer being | |
718 | * run. The thread's scheduler lock is held, so it will not begin | |
719 | * running on another processor (in the case of preemption where it | |
720 | * remains runnable) until it completes. If the "thread_terminating" | |
721 | * boolean is TRUE, this will be the last callout for this thread_id. | |
722 | */ | |
723 | typedef void (*sched_perfcontrol_offcore_t)(perfcontrol_state_t, going_off_core_t /* populated by callee */, boolean_t); | |
724 | ||
725 | /* | |
726 | * After the offcore callout and after the old thread can potentially | |
727 | * start running on another processor, the oncore callout will be | |
728 | * called with the thread's scheduler lock held. The oncore callout is | |
729 | * also called any time one of the parameters in the going_on_core_t | |
730 | * structure changes, like priority/QoS changes, and quantum | |
731 | * expiration, so the callout must not assume callouts are paired with | |
732 | * offcore callouts. | |
733 | */ | |
734 | typedef void (*sched_perfcontrol_oncore_t)(perfcontrol_state_t, going_on_core_t); | |
735 | ||
736 | /* | |
737 | * Periodically (on hundreds of ms scale), the scheduler will perform | |
738 | * maintenance and report the maximum latency for runnable (but not currently | |
739 | * running) threads for each urgency class. | |
740 | */ | |
741 | typedef void (*sched_perfcontrol_max_runnable_latency_t)(perfcontrol_max_runnable_latency_t); | |
742 | ||
743 | /* | |
744 | * When the kernel receives information about work intervals from userland, | |
745 | * it is passed along using this callback. No locks are held, although the state | |
746 | * object will not go away during the callout. | |
747 | */ | |
748 | typedef void (*sched_perfcontrol_work_interval_notify_t)(perfcontrol_state_t, perfcontrol_work_interval_t); | |
749 | ||
a39ff7e2 A |
750 | /* |
751 | * Start, update and finish work interval instance with optional complexity estimate. | |
752 | */ | |
753 | typedef void (*sched_perfcontrol_work_interval_ctl_t)(perfcontrol_state_t, perfcontrol_work_interval_instance_t); | |
754 | ||
5ba3f43e A |
755 | /* |
756 | * These callbacks are used when thread groups are added, removed or properties | |
757 | * updated. | |
758 | * No blocking allocations (or anything else blocking) are allowed inside these | |
759 | * callbacks. No locks allowed in these callbacks as well since the kernel might | |
760 | * be holding the thread/task locks. | |
761 | */ | |
762 | typedef void (*sched_perfcontrol_thread_group_init_t)(thread_group_data_t); | |
763 | typedef void (*sched_perfcontrol_thread_group_deinit_t)(thread_group_data_t); | |
764 | typedef void (*sched_perfcontrol_thread_group_flags_update_t)(thread_group_data_t); | |
765 | ||
766 | /* | |
767 | * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed, | |
768 | * this function will be called, passing the timeout deadline that was previously armed as an argument. | |
769 | * | |
770 | * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context. | |
771 | */ | |
772 | typedef void (*sched_perfcontrol_deadline_passed_t)(uint64_t deadline); | |
773 | ||
774 | /* | |
775 | * Context Switch Callout | |
776 | * | |
777 | * Parameters: | |
778 | * event - The perfcontrol_event for this callout | |
779 | * cpu_id - The CPU doing the context switch | |
780 | * timestamp - The timestamp for the context switch | |
781 | * flags - Flags for other relevant information | |
782 | * offcore - perfcontrol_data structure for thread going off-core | |
783 | * oncore - perfcontrol_data structure for thread going on-core | |
784 | * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch | |
785 | */ | |
786 | typedef void (*sched_perfcontrol_csw_t)( | |
787 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
788 | struct perfcontrol_thread_data *offcore, struct perfcontrol_thread_data *oncore, | |
789 | struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused); | |
790 | ||
791 | ||
792 | /* | |
793 | * Thread State Update Callout | |
794 | * | |
795 | * Parameters: | |
796 | * event - The perfcontrol_event for this callout | |
797 | * cpu_id - The CPU doing the state update | |
798 | * timestamp - The timestamp for the state update | |
799 | * flags - Flags for other relevant information | |
800 | * thr_data - perfcontrol_data structure for the thread being updated | |
801 | */ | |
802 | typedef void (*sched_perfcontrol_state_update_t)( | |
803 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
804 | struct perfcontrol_thread_data *thr_data, __unused void *unused); | |
805 | ||
5ba3f43e A |
806 | /* |
807 | * Callers should always use the CURRENT version so that the kernel can detect both older | |
808 | * and newer structure layouts. New callbacks should always be added at the end of the | |
809 | * structure, and xnu should expect existing source recompiled against newer headers | |
810 | * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter | |
811 | * to reset callbacks to their default in-kernel values. | |
812 | */ | |
813 | ||
814 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */ | |
815 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */ | |
816 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */ | |
817 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */ | |
818 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */ | |
819 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */ | |
820 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */ | |
a39ff7e2 | 821 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */ |
5ba3f43e A |
822 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6 |
823 | ||
824 | struct sched_perfcontrol_callbacks { | |
825 | unsigned long version; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */ | |
826 | sched_perfcontrol_offcore_t offcore; | |
827 | sched_perfcontrol_context_switch_t context_switch; | |
828 | sched_perfcontrol_oncore_t oncore; | |
829 | sched_perfcontrol_max_runnable_latency_t max_runnable_latency; | |
830 | sched_perfcontrol_work_interval_notify_t work_interval_notify; | |
831 | sched_perfcontrol_thread_group_init_t thread_group_init; | |
832 | sched_perfcontrol_thread_group_deinit_t thread_group_deinit; | |
833 | sched_perfcontrol_deadline_passed_t deadline_passed; | |
834 | sched_perfcontrol_csw_t csw; | |
835 | sched_perfcontrol_state_update_t state_update; | |
836 | sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update; | |
a39ff7e2 | 837 | sched_perfcontrol_work_interval_ctl_t work_interval_ctl; |
5ba3f43e A |
838 | }; |
839 | typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t; | |
840 | ||
841 | extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state); | |
842 | ||
843 | /* | |
844 | * Update the scheduler with the set of cores that should be used to dispatch new threads. | |
845 | * Non-recommended cores can still be used to field interrupts or run bound threads. | |
846 | * This should be called with interrupts enabled and no scheduler locks held. | |
847 | */ | |
848 | #define ALL_CORES_RECOMMENDED (~(uint32_t)0) | |
849 | ||
850 | extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores); | |
851 | extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation); | |
852 | ||
853 | /* | |
854 | * Update the deadline after which sched_perfcontrol_deadline_passed will be called. | |
855 | * Returns TRUE if it successfully canceled a previously set callback, | |
856 | * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight). | |
857 | * The callback is automatically canceled when it fires, and does not repeat unless rearmed. | |
858 | * | |
859 | * This 'timer' executes as the scheduler switches between threads, on a non-idle core | |
860 | * | |
861 | * There can be only one outstanding timer globally. | |
862 | */ | |
863 | extern boolean_t sched_perfcontrol_update_callback_deadline(uint64_t deadline); | |
864 | ||
865 | typedef enum perfcontrol_callout_type { | |
866 | PERFCONTROL_CALLOUT_ON_CORE, | |
867 | PERFCONTROL_CALLOUT_OFF_CORE, | |
868 | PERFCONTROL_CALLOUT_CONTEXT, | |
869 | PERFCONTROL_CALLOUT_STATE_UPDATE, | |
870 | /* Add other callout types here */ | |
871 | PERFCONTROL_CALLOUT_MAX | |
872 | } perfcontrol_callout_type_t; | |
873 | ||
874 | typedef enum perfcontrol_callout_stat { | |
875 | PERFCONTROL_STAT_INSTRS, | |
876 | PERFCONTROL_STAT_CYCLES, | |
877 | /* Add other stat types here */ | |
878 | PERFCONTROL_STAT_MAX | |
879 | } perfcontrol_callout_stat_t; | |
880 | ||
881 | uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, | |
882 | perfcontrol_callout_stat_t stat); | |
883 | ||
884 | ||
885 | #endif /* KERNEL_PRIVATE */ | |
886 | ||
887 | boolean_t machine_timeout_suspended(void); | |
888 | void ml_get_power_state(boolean_t *, boolean_t *); | |
889 | ||
890 | boolean_t user_cont_hwclock_allowed(void); | |
891 | boolean_t user_timebase_allowed(void); | |
892 | boolean_t ml_thread_is64bit(thread_t thread); | |
893 | void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit); | |
894 | ||
895 | #ifdef __arm64__ | |
896 | void ml_set_align_checking(void); | |
897 | boolean_t arm64_wfe_allowed(void); | |
898 | #endif /* __arm64__ */ | |
899 | ||
900 | void ml_timer_evaluate(void); | |
901 | boolean_t ml_timer_forced_evaluation(void); | |
902 | uint64_t ml_energy_stat(thread_t); | |
903 | void ml_gpu_stat_update(uint64_t); | |
904 | uint64_t ml_gpu_stat(thread_t); | |
905 | #endif /* __APPLE_API_PRIVATE */ | |
906 | ||
907 | __END_DECLS | |
908 | ||
909 | #endif /* _ARM_MACHINE_ROUTINES_H_ */ |