]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
0a7de745 | 2 | * Copyright (c) 2007-2019 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | ||
0a7de745 A |
32 | #ifndef _ARM_MACHINE_ROUTINES_H_ |
33 | #define _ARM_MACHINE_ROUTINES_H_ | |
5ba3f43e A |
34 | |
35 | #include <mach/mach_types.h> | |
36 | #include <mach/boolean.h> | |
37 | #include <kern/kern_types.h> | |
38 | #include <pexpert/pexpert.h> | |
39 | ||
40 | #include <sys/cdefs.h> | |
41 | #include <sys/appleapiopts.h> | |
42 | ||
43 | #include <stdarg.h> | |
44 | ||
45 | __BEGIN_DECLS | |
46 | ||
47 | /* Interrupt handling */ | |
48 | ||
49 | void ml_cpu_signal(unsigned int cpu_id); | |
50 | void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs); | |
51 | uint64_t ml_cpu_signal_deferred_get_timer(void); | |
52 | void ml_cpu_signal_deferred(unsigned int cpu_id); | |
53 | void ml_cpu_signal_retract(unsigned int cpu_id); | |
54 | ||
55 | /* Initialize Interrupts */ | |
56 | void ml_init_interrupt(void); | |
57 | ||
58 | /* Get Interrupts Enabled */ | |
59 | boolean_t ml_get_interrupts_enabled(void); | |
60 | ||
61 | /* Set Interrupts Enabled */ | |
62 | boolean_t ml_set_interrupts_enabled(boolean_t enable); | |
0a7de745 | 63 | boolean_t ml_early_set_interrupts_enabled(boolean_t enable); |
5ba3f43e A |
64 | |
65 | /* Check if running at interrupt context */ | |
66 | boolean_t ml_at_interrupt_context(void); | |
67 | ||
68 | /* Generate a fake interrupt */ | |
69 | void ml_cause_interrupt(void); | |
70 | ||
71 | /* Clear interrupt spin debug state for thread */ | |
72 | #if INTERRUPT_MASKED_DEBUG | |
73 | void ml_spin_debug_reset(thread_t thread); | |
74 | void ml_spin_debug_clear(thread_t thread); | |
75 | void ml_spin_debug_clear_self(void); | |
76 | void ml_check_interrupts_disabled_duration(thread_t thread); | |
77 | #endif | |
78 | ||
79 | #ifdef XNU_KERNEL_PRIVATE | |
0a7de745 | 80 | extern bool ml_snoop_thread_is_on_core(thread_t thread); |
5ba3f43e A |
81 | extern boolean_t ml_is_quiescing(void); |
82 | extern void ml_set_is_quiescing(boolean_t); | |
83 | extern uint64_t ml_get_booter_memory_size(void); | |
84 | #endif | |
85 | ||
86 | /* Type for the Time Base Enable function */ | |
87 | typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); | |
88 | #if MACH_KERNEL_PRIVATE | |
89 | /* Type for the Processor Cache Dispatch function */ | |
90 | typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1); | |
91 | #endif | |
92 | ||
0a7de745 A |
93 | #define CacheConfig 0x00000000UL |
94 | #define CacheControl 0x00000001UL | |
95 | #define CacheClean 0x00000002UL | |
96 | #define CacheCleanRegion 0x00000003UL | |
97 | #define CacheCleanFlush 0x00000004UL | |
98 | #define CacheCleanFlushRegion 0x00000005UL | |
99 | #define CacheShutdown 0x00000006UL | |
5ba3f43e | 100 | |
0a7de745 | 101 | #define CacheControlEnable 0x00000000UL |
5ba3f43e | 102 | |
0a7de745 A |
103 | #define CacheConfigCCSIDR 0x00000001UL |
104 | #define CacheConfigSize 0x00000100UL | |
5ba3f43e A |
105 | |
106 | /* Type for the Processor Idle function */ | |
107 | typedef void (*processor_idle_t)(cpu_id_t cpu_id, boolean_t enter, uint64_t *new_timeout_ticks); | |
108 | ||
109 | /* Type for the Idle Tickle function */ | |
110 | typedef void (*idle_tickle_t)(void); | |
111 | ||
112 | /* Type for the Idle Timer function */ | |
113 | typedef void (*idle_timer_t)(void *refcon, uint64_t *new_timeout_ticks); | |
114 | ||
115 | /* Type for the IPI Hander */ | |
116 | typedef void (*ipi_handler_t)(void); | |
117 | ||
118 | /* Type for the Lockdown Hander */ | |
119 | typedef void (*lockdown_handler_t)(void *); | |
120 | ||
121 | /* Type for the Platform specific Error Handler */ | |
122 | typedef void (*platform_error_handler_t)(void *refcon, vm_offset_t fault_addr); | |
123 | ||
124 | /* | |
0a7de745 A |
125 | * The exception callback (ex_cb) module allows kernel drivers to |
126 | * register and receive callbacks for exceptions, and indicate | |
5ba3f43e | 127 | * actions to be taken by the platform kernel |
0a7de745 | 128 | * Currently this is supported for ARM64 but extending support for ARM32 |
5ba3f43e A |
129 | * should be straightforward |
130 | */ | |
131 | ||
132 | /* Supported exception classes for callbacks */ | |
0a7de745 | 133 | typedef enum{ |
5ba3f43e | 134 | EXCB_CLASS_ILLEGAL_INSTR_SET, |
d9a64523 A |
135 | #ifdef CONFIG_XNUPOST |
136 | EXCB_CLASS_TEST1, | |
137 | EXCB_CLASS_TEST2, | |
138 | EXCB_CLASS_TEST3, | |
139 | #endif | |
0a7de745 | 140 | EXCB_CLASS_MAX // this must be last |
5ba3f43e A |
141 | } |
142 | ex_cb_class_t; | |
143 | ||
144 | /* Actions indicated by callbacks to be taken by platform kernel */ | |
0a7de745 A |
145 | typedef enum{ |
146 | EXCB_ACTION_RERUN, // re-run the faulting instruction | |
147 | EXCB_ACTION_NONE, // continue normal exception handling | |
d9a64523 A |
148 | #ifdef CONFIG_XNUPOST |
149 | EXCB_ACTION_TEST_FAIL, | |
150 | #endif | |
5ba3f43e A |
151 | } |
152 | ex_cb_action_t; | |
153 | ||
0a7de745 A |
154 | /* |
155 | * Exception state | |
156 | * We cannot use a private kernel data structure such as arm_saved_state_t | |
157 | * The CPSR and ESR are not clobbered when the callback function is invoked so | |
5ba3f43e A |
158 | * those registers can be examined by the callback function; |
159 | * the same is done in the platform error handlers | |
160 | */ | |
0a7de745 | 161 | typedef struct{ |
5ba3f43e A |
162 | vm_offset_t far; |
163 | } | |
164 | ex_cb_state_t; | |
165 | ||
166 | /* callback type definition */ | |
167 | typedef ex_cb_action_t (*ex_cb_t) ( | |
0a7de745 A |
168 | ex_cb_class_t cb_class, |
169 | void *refcon,// provided at registration | |
170 | const ex_cb_state_t *state // exception state | |
5ba3f43e A |
171 | ); |
172 | ||
0a7de745 A |
173 | /* |
174 | * Callback registration | |
175 | * Currently we support only one registered callback per class but | |
5ba3f43e A |
176 | * it should be possible to support more callbacks |
177 | */ | |
178 | kern_return_t ex_cb_register( | |
0a7de745 A |
179 | ex_cb_class_t cb_class, |
180 | ex_cb_t cb, | |
181 | void *refcon ); | |
5ba3f43e A |
182 | |
183 | /* | |
184 | * Called internally by platform kernel to invoke the registered callback for class | |
185 | */ | |
186 | ex_cb_action_t ex_cb_invoke( | |
0a7de745 A |
187 | ex_cb_class_t cb_class, |
188 | vm_offset_t far); | |
5ba3f43e A |
189 | |
190 | ||
191 | void ml_parse_cpu_topology(void); | |
192 | ||
193 | unsigned int ml_get_cpu_count(void); | |
194 | ||
195 | int ml_get_boot_cpu_number(void); | |
196 | ||
197 | int ml_get_cpu_number(uint32_t phys_id); | |
198 | ||
199 | int ml_get_max_cpu_number(void); | |
200 | ||
201 | /* Struct for ml_cpu_get_info */ | |
202 | struct ml_cpu_info { | |
0a7de745 A |
203 | unsigned long vector_unit; |
204 | unsigned long cache_line_size; | |
205 | unsigned long l1_icache_size; | |
206 | unsigned long l1_dcache_size; | |
207 | unsigned long l2_settings; | |
208 | unsigned long l2_cache_size; | |
209 | unsigned long l3_settings; | |
210 | unsigned long l3_cache_size; | |
5ba3f43e A |
211 | }; |
212 | typedef struct ml_cpu_info ml_cpu_info_t; | |
213 | ||
214 | typedef enum { | |
215 | CLUSTER_TYPE_SMP, | |
216 | } cluster_type_t; | |
217 | ||
218 | cluster_type_t ml_get_boot_cluster(void); | |
219 | ||
220 | /* Struct for ml_processor_register */ | |
221 | struct ml_processor_info { | |
0a7de745 A |
222 | cpu_id_t cpu_id; |
223 | vm_offset_t start_paddr; | |
224 | boolean_t supports_nap; | |
225 | void *platform_cache_dispatch; | |
226 | time_base_enable_t time_base_enable; | |
227 | processor_idle_t processor_idle; | |
228 | idle_tickle_t *idle_tickle; | |
229 | idle_timer_t idle_timer; | |
230 | void *idle_timer_refcon; | |
231 | vm_offset_t powergate_stub_addr; | |
232 | uint32_t powergate_stub_length; | |
233 | uint32_t powergate_latency; | |
234 | platform_error_handler_t platform_error_handler; | |
235 | uint64_t regmap_paddr; | |
236 | uint32_t phys_id; | |
237 | uint32_t log_id; | |
238 | uint32_t l2_access_penalty; | |
239 | uint32_t cluster_id; | |
240 | cluster_type_t cluster_type; | |
241 | uint32_t l2_cache_id; | |
242 | uint32_t l2_cache_size; | |
243 | uint32_t l3_cache_id; | |
244 | uint32_t l3_cache_size; | |
5ba3f43e A |
245 | }; |
246 | typedef struct ml_processor_info ml_processor_info_t; | |
247 | ||
0a7de745 | 248 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
249 | /* Struct for ml_init_timebase */ |
250 | struct tbd_ops { | |
0a7de745 A |
251 | void (*tbd_fiq_handler)(void); |
252 | uint32_t (*tbd_get_decrementer)(void); | |
253 | void (*tbd_set_decrementer)(uint32_t dec_value); | |
5ba3f43e A |
254 | }; |
255 | typedef struct tbd_ops *tbd_ops_t; | |
256 | typedef struct tbd_ops tbd_ops_data_t; | |
257 | #endif | |
258 | ||
0a7de745 A |
259 | /*! |
260 | * @function ml_processor_register | |
261 | * | |
262 | * @abstract callback from platform kext to register processor | |
263 | * | |
264 | * @discussion This function is called by the platform kext when a processor is | |
265 | * being registered. This is called while running on the CPU itself, as part of | |
266 | * its initialization. | |
267 | * | |
268 | * @param ml_processor_info provides machine-specific information about the | |
269 | * processor to xnu. | |
270 | * | |
271 | * @param processor is set as an out-parameter to an opaque handle that should | |
272 | * be used by the platform kext when referring to this processor in the future. | |
273 | * | |
274 | * @param ipi_handler is set as an out-parameter to the function that should be | |
275 | * registered as the IPI handler. | |
276 | * | |
277 | * @param pmi_handler is set as an out-parameter to the function that should be | |
278 | * registered as the PMI handler. | |
279 | * | |
280 | * @returns KERN_SUCCESS on success and an error code, otherwise. | |
281 | */ | |
282 | kern_return_t ml_processor_register(ml_processor_info_t *ml_processor_info, | |
283 | processor_t *processor, ipi_handler_t *ipi_handler, | |
284 | perfmon_interrupt_handler_func *pmi_handler); | |
5ba3f43e A |
285 | |
286 | /* Register a lockdown handler */ | |
287 | kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *); | |
288 | ||
289 | #if XNU_KERNEL_PRIVATE | |
290 | void ml_lockdown_init(void); | |
291 | ||
292 | /* Check if the machine layer wants to intercept a panic call */ | |
293 | boolean_t ml_wants_panic_trap_to_debugger(void); | |
294 | ||
295 | /* Machine layer routine for intercepting panics */ | |
296 | void ml_panic_trap_to_debugger(const char *panic_format_str, | |
0a7de745 A |
297 | va_list *panic_args, |
298 | unsigned int reason, | |
299 | void *ctx, | |
300 | uint64_t panic_options_mask, | |
301 | unsigned long panic_caller); | |
5ba3f43e A |
302 | #endif /* XNU_KERNEL_PRIVATE */ |
303 | ||
304 | /* Initialize Interrupts */ | |
305 | void ml_install_interrupt_handler( | |
0a7de745 A |
306 | void *nub, |
307 | int source, | |
308 | void *target, | |
309 | IOInterruptHandler handler, | |
310 | void *refCon); | |
5ba3f43e A |
311 | |
312 | vm_offset_t | |
0a7de745 | 313 | ml_static_vtop( |
5ba3f43e A |
314 | vm_offset_t); |
315 | ||
316 | vm_offset_t | |
0a7de745 | 317 | ml_static_ptovirt( |
5ba3f43e A |
318 | vm_offset_t); |
319 | ||
d9a64523 A |
320 | vm_offset_t ml_static_slide( |
321 | vm_offset_t vaddr); | |
322 | ||
323 | vm_offset_t ml_static_unslide( | |
324 | vm_offset_t vaddr); | |
325 | ||
5ba3f43e A |
326 | /* Offset required to obtain absolute time value from tick counter */ |
327 | uint64_t ml_get_abstime_offset(void); | |
328 | ||
329 | /* Offset required to obtain continuous time value from tick counter */ | |
330 | uint64_t ml_get_conttime_offset(void); | |
331 | ||
332 | #ifdef __APPLE_API_UNSTABLE | |
333 | /* PCI config cycle probing */ | |
334 | boolean_t ml_probe_read( | |
335 | vm_offset_t paddr, | |
336 | unsigned int *val); | |
337 | boolean_t ml_probe_read_64( | |
338 | addr64_t paddr, | |
339 | unsigned int *val); | |
340 | ||
341 | /* Read physical address byte */ | |
342 | unsigned int ml_phys_read_byte( | |
343 | vm_offset_t paddr); | |
344 | unsigned int ml_phys_read_byte_64( | |
345 | addr64_t paddr); | |
346 | ||
347 | /* Read physical address half word */ | |
348 | unsigned int ml_phys_read_half( | |
349 | vm_offset_t paddr); | |
350 | unsigned int ml_phys_read_half_64( | |
351 | addr64_t paddr); | |
352 | ||
353 | /* Read physical address word*/ | |
354 | unsigned int ml_phys_read( | |
355 | vm_offset_t paddr); | |
356 | unsigned int ml_phys_read_64( | |
357 | addr64_t paddr); | |
358 | unsigned int ml_phys_read_word( | |
359 | vm_offset_t paddr); | |
360 | unsigned int ml_phys_read_word_64( | |
361 | addr64_t paddr); | |
362 | ||
363 | unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz); | |
364 | unsigned int ml_io_read8(uintptr_t iovaddr); | |
365 | unsigned int ml_io_read16(uintptr_t iovaddr); | |
366 | unsigned int ml_io_read32(uintptr_t iovaddr); | |
367 | unsigned long long ml_io_read64(uintptr_t iovaddr); | |
368 | ||
0a7de745 A |
369 | extern void ml_io_write(uintptr_t vaddr, uint64_t val, int size); |
370 | extern void ml_io_write8(uintptr_t vaddr, uint8_t val); | |
371 | extern void ml_io_write16(uintptr_t vaddr, uint16_t val); | |
372 | extern void ml_io_write32(uintptr_t vaddr, uint32_t val); | |
373 | extern void ml_io_write64(uintptr_t vaddr, uint64_t val); | |
374 | ||
5ba3f43e A |
375 | /* Read physical address double word */ |
376 | unsigned long long ml_phys_read_double( | |
377 | vm_offset_t paddr); | |
378 | unsigned long long ml_phys_read_double_64( | |
379 | addr64_t paddr); | |
380 | ||
381 | /* Write physical address byte */ | |
382 | void ml_phys_write_byte( | |
383 | vm_offset_t paddr, unsigned int data); | |
384 | void ml_phys_write_byte_64( | |
385 | addr64_t paddr, unsigned int data); | |
386 | ||
387 | /* Write physical address half word */ | |
388 | void ml_phys_write_half( | |
389 | vm_offset_t paddr, unsigned int data); | |
390 | void ml_phys_write_half_64( | |
391 | addr64_t paddr, unsigned int data); | |
392 | ||
393 | /* Write physical address word */ | |
394 | void ml_phys_write( | |
395 | vm_offset_t paddr, unsigned int data); | |
396 | void ml_phys_write_64( | |
397 | addr64_t paddr, unsigned int data); | |
398 | void ml_phys_write_word( | |
399 | vm_offset_t paddr, unsigned int data); | |
400 | void ml_phys_write_word_64( | |
401 | addr64_t paddr, unsigned int data); | |
402 | ||
403 | /* Write physical address double word */ | |
404 | void ml_phys_write_double( | |
405 | vm_offset_t paddr, unsigned long long data); | |
406 | void ml_phys_write_double_64( | |
407 | addr64_t paddr, unsigned long long data); | |
408 | ||
409 | void ml_static_mfree( | |
410 | vm_offset_t, | |
411 | vm_size_t); | |
412 | ||
413 | kern_return_t | |
414 | ml_static_protect( | |
0a7de745 A |
415 | vm_offset_t start, |
416 | vm_size_t size, | |
417 | vm_prot_t new_prot); | |
5ba3f43e A |
418 | |
419 | /* virtual to physical on wired pages */ | |
420 | vm_offset_t ml_vtophys( | |
421 | vm_offset_t vaddr); | |
422 | ||
423 | /* Get processor info */ | |
424 | void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info); | |
425 | ||
426 | #endif /* __APPLE_API_UNSTABLE */ | |
427 | ||
428 | #ifdef __APPLE_API_PRIVATE | |
0a7de745 | 429 | #ifdef XNU_KERNEL_PRIVATE |
5ba3f43e | 430 | vm_size_t ml_nofault_copy( |
0a7de745 A |
431 | vm_offset_t virtsrc, |
432 | vm_offset_t virtdst, | |
5ba3f43e A |
433 | vm_size_t size); |
434 | boolean_t ml_validate_nofault( | |
435 | vm_offset_t virtsrc, vm_size_t size); | |
436 | #endif /* XNU_KERNEL_PRIVATE */ | |
0a7de745 | 437 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
438 | /* IO memory map services */ |
439 | ||
440 | /* Map memory map IO space */ | |
441 | vm_offset_t ml_io_map( | |
0a7de745 | 442 | vm_offset_t phys_addr, |
5ba3f43e A |
443 | vm_size_t size); |
444 | ||
445 | vm_offset_t ml_io_map_wcomb( | |
0a7de745 | 446 | vm_offset_t phys_addr, |
5ba3f43e A |
447 | vm_size_t size); |
448 | ||
cb323159 A |
449 | vm_offset_t ml_io_map_with_prot( |
450 | vm_offset_t phys_addr, | |
451 | vm_size_t size, | |
452 | vm_prot_t prot); | |
453 | ||
5ba3f43e A |
454 | void ml_get_bouncepool_info( |
455 | vm_offset_t *phys_addr, | |
456 | vm_size_t *size); | |
457 | ||
458 | vm_map_address_t ml_map_high_window( | |
0a7de745 A |
459 | vm_offset_t phys_addr, |
460 | vm_size_t len); | |
5ba3f43e A |
461 | |
462 | /* boot memory allocation */ | |
463 | vm_offset_t ml_static_malloc( | |
464 | vm_size_t size); | |
465 | ||
466 | void ml_init_timebase( | |
0a7de745 A |
467 | void *args, |
468 | tbd_ops_t tbd_funcs, | |
469 | vm_offset_t int_address, | |
470 | vm_offset_t int_value); | |
5ba3f43e A |
471 | |
472 | uint64_t ml_get_timebase(void); | |
473 | ||
474 | void ml_init_lock_timeout(void); | |
475 | ||
476 | boolean_t ml_delay_should_spin(uint64_t interval); | |
477 | ||
e8c3f781 A |
478 | void ml_delay_on_yield(void); |
479 | ||
5ba3f43e A |
480 | uint32_t ml_get_decrementer(void); |
481 | ||
482 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
483 | void timer_state_event_user_to_kernel(void); | |
484 | void timer_state_event_kernel_to_user(void); | |
485 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
486 | ||
487 | uint64_t ml_get_hwclock(void); | |
488 | ||
489 | #ifdef __arm64__ | |
490 | boolean_t ml_get_timer_pending(void); | |
491 | #endif | |
492 | ||
493 | void platform_syscall( | |
494 | struct arm_saved_state *); | |
495 | ||
496 | void ml_set_decrementer( | |
497 | uint32_t dec_value); | |
498 | ||
499 | boolean_t is_user_contex( | |
500 | void); | |
501 | ||
502 | void ml_init_arm_debug_interface(void *args, vm_offset_t virt_address); | |
503 | ||
504 | /* These calls are only valid if __ARM_USER_PROTECT__ is defined */ | |
505 | uintptr_t arm_user_protect_begin( | |
0a7de745 | 506 | thread_t thread); |
5ba3f43e A |
507 | |
508 | void arm_user_protect_end( | |
0a7de745 A |
509 | thread_t thread, |
510 | uintptr_t up, | |
511 | boolean_t disable_interrupts); | |
5ba3f43e A |
512 | |
513 | #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */ | |
514 | ||
515 | /* Zero bytes starting at a physical address */ | |
516 | void bzero_phys( | |
517 | addr64_t phys_address, | |
518 | vm_size_t length); | |
519 | ||
520 | void bzero_phys_nc(addr64_t src64, vm_size_t bytes); | |
521 | ||
cb323159 A |
522 | #if MACH_KERNEL_PRIVATE |
523 | #ifdef __arm64__ | |
524 | /* Pattern-fill buffer with zeros or a 32-bit pattern; | |
525 | * target must be 128-byte aligned and sized a multiple of 128 | |
526 | * Both variants emit stores with non-temporal properties. | |
527 | */ | |
528 | void fill32_dczva(addr64_t, vm_size_t); | |
529 | void fill32_nt(addr64_t, vm_size_t, uint32_t); | |
530 | #endif | |
531 | #endif | |
532 | ||
5ba3f43e A |
533 | void ml_thread_policy( |
534 | thread_t thread, | |
535 | unsigned policy_id, | |
536 | unsigned policy_info); | |
537 | ||
0a7de745 A |
538 | #define MACHINE_GROUP 0x00000001 |
539 | #define MACHINE_NETWORK_GROUP 0x10000000 | |
540 | #define MACHINE_NETWORK_WORKLOOP 0x00000001 | |
541 | #define MACHINE_NETWORK_NETISR 0x00000002 | |
5ba3f43e A |
542 | |
543 | /* Initialize the maximum number of CPUs */ | |
544 | void ml_init_max_cpus( | |
545 | unsigned int max_cpus); | |
546 | ||
547 | /* Return the maximum number of CPUs set by ml_init_max_cpus() */ | |
548 | unsigned int ml_get_max_cpus( | |
549 | void); | |
550 | ||
551 | /* Return the maximum memory size */ | |
552 | unsigned int ml_get_machine_mem(void); | |
553 | ||
554 | #ifdef XNU_KERNEL_PRIVATE | |
555 | /* Return max offset */ | |
556 | vm_map_offset_t ml_get_max_offset( | |
0a7de745 | 557 | boolean_t is64, |
5ba3f43e | 558 | unsigned int option); |
0a7de745 A |
559 | #define MACHINE_MAX_OFFSET_DEFAULT 0x01 |
560 | #define MACHINE_MAX_OFFSET_MIN 0x02 | |
561 | #define MACHINE_MAX_OFFSET_MAX 0x04 | |
562 | #define MACHINE_MAX_OFFSET_DEVICE 0x08 | |
5ba3f43e A |
563 | #endif |
564 | ||
0a7de745 A |
565 | extern void ml_cpu_up(void); |
566 | extern void ml_cpu_down(void); | |
567 | extern void ml_arm_sleep(void); | |
5ba3f43e A |
568 | |
569 | extern uint64_t ml_get_wake_timebase(void); | |
570 | extern uint64_t ml_get_conttime_wake_time(void); | |
571 | ||
572 | /* Time since the system was reset (as part of boot/wake) */ | |
573 | uint64_t ml_get_time_since_reset(void); | |
574 | ||
cb323159 A |
575 | /* |
576 | * Called by ApplePMGR to set wake time. Units and epoch are identical | |
577 | * to mach_continuous_time(). Has no effect on !HAS_CONTINUOUS_HWCLOCK | |
578 | * chips. If wake_time == UINT64_MAX, that means the wake time is | |
579 | * unknown and calls to ml_get_time_since_reset() will return UINT64_MAX. | |
580 | */ | |
581 | void ml_set_reset_time(uint64_t wake_time); | |
582 | ||
5ba3f43e A |
583 | #ifdef XNU_KERNEL_PRIVATE |
584 | /* Just a stub on ARM */ | |
585 | extern kern_return_t ml_interrupt_prewarm(uint64_t deadline); | |
586 | #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0) | |
587 | #endif /* XNU_KERNEL_PRIVATE */ | |
588 | ||
589 | /* Bytes available on current stack */ | |
590 | vm_offset_t ml_stack_remaining(void); | |
591 | ||
592 | #ifdef MACH_KERNEL_PRIVATE | |
0a7de745 A |
593 | uint32_t get_fpscr(void); |
594 | void set_fpscr(uint32_t); | |
5ba3f43e | 595 | |
d9a64523 A |
596 | #ifdef __arm64__ |
597 | unsigned long update_mdscr(unsigned long clear, unsigned long set); | |
598 | #endif /* __arm64__ */ | |
599 | ||
0a7de745 A |
600 | extern void init_vfp(void); |
601 | extern boolean_t get_vfp_enabled(void); | |
602 | extern void arm_debug_set_cp14(arm_debug_state_t *debug_state); | |
603 | extern void fiq_context_init(boolean_t enable_fiq); | |
604 | extern void fiq_context_bootstrap(boolean_t enable_fiq); | |
5ba3f43e | 605 | |
0a7de745 A |
606 | extern void reenable_async_aborts(void); |
607 | extern void cpu_idle_wfi(boolean_t wfi_fast); | |
5ba3f43e A |
608 | |
609 | #ifdef MONITOR | |
0a7de745 A |
610 | #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */ |
611 | #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */ | |
612 | unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
613 | uintptr_t arg2, uintptr_t arg3); | |
5ba3f43e A |
614 | #endif /* MONITOR */ |
615 | ||
c6bf4f31 | 616 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
617 | void rorgn_stash_range(void); |
618 | void rorgn_lockdown(void); | |
c6bf4f31 | 619 | #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ |
5ba3f43e | 620 | |
5c9f4661 A |
621 | #if __ARM_KERNEL_PROTECT__ |
622 | extern void set_vbar_el1(uint64_t); | |
623 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
624 | #endif /* MACH_KERNEL_PRIVATE */ |
625 | ||
0a7de745 | 626 | extern uint32_t arm_debug_read_dscr(void); |
5ba3f43e | 627 | |
0a7de745 A |
628 | extern int set_be_bit(void); |
629 | extern int clr_be_bit(void); | |
630 | extern int be_tracing(void); | |
5ba3f43e A |
631 | |
632 | typedef void (*broadcastFunc) (void *); | |
633 | unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *); | |
634 | kern_return_t cpu_xcall(int, broadcastFunc, void *); | |
cb323159 A |
635 | unsigned int cpu_broadcast_immediate_xcall(uint32_t *, boolean_t, broadcastFunc, void *); |
636 | kern_return_t cpu_immediate_xcall(int, broadcastFunc, void *); | |
5ba3f43e A |
637 | |
638 | #ifdef KERNEL_PRIVATE | |
639 | ||
640 | /* Interface to be used by the perf. controller to register a callback, in a | |
641 | * single-threaded fashion. The callback will receive notifications of | |
642 | * processor performance quality-of-service changes from the scheduler. | |
643 | */ | |
644 | ||
645 | #ifdef __arm64__ | |
646 | typedef void (*cpu_qos_update_t)(int throughput_qos, uint64_t qos_param1, uint64_t qos_param2); | |
647 | void cpu_qos_update_register(cpu_qos_update_t); | |
648 | #endif /* __arm64__ */ | |
649 | ||
650 | struct going_on_core { | |
0a7de745 A |
651 | uint64_t thread_id; |
652 | uint16_t qos_class; | |
653 | uint16_t urgency; /* XCPM compatibility */ | |
654 | uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */ | |
655 | uint32_t is_kernel_thread : 1; | |
656 | uint64_t thread_group_id; | |
657 | void *thread_group_data; | |
658 | uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */ | |
659 | uint64_t start_time; | |
660 | uint64_t scheduling_latency_at_same_basepri; | |
661 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
662 | /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */ | |
5ba3f43e A |
663 | }; |
664 | typedef struct going_on_core *going_on_core_t; | |
665 | ||
666 | struct going_off_core { | |
0a7de745 A |
667 | uint64_t thread_id; |
668 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
669 | uint32_t reserved; | |
670 | uint64_t end_time; | |
671 | uint64_t thread_group_id; | |
672 | void *thread_group_data; | |
5ba3f43e A |
673 | }; |
674 | typedef struct going_off_core *going_off_core_t; | |
675 | ||
676 | struct thread_group_data { | |
0a7de745 A |
677 | uint64_t thread_group_id; |
678 | void *thread_group_data; | |
679 | uint32_t thread_group_size; | |
680 | uint32_t thread_group_flags; | |
5ba3f43e A |
681 | }; |
682 | typedef struct thread_group_data *thread_group_data_t; | |
683 | ||
684 | struct perfcontrol_max_runnable_latency { | |
0a7de745 | 685 | uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */]; |
5ba3f43e A |
686 | }; |
687 | typedef struct perfcontrol_max_runnable_latency *perfcontrol_max_runnable_latency_t; | |
688 | ||
689 | struct perfcontrol_work_interval { | |
0a7de745 A |
690 | uint64_t thread_id; |
691 | uint16_t qos_class; | |
692 | uint16_t urgency; | |
693 | uint32_t flags; // notify | |
694 | uint64_t work_interval_id; | |
695 | uint64_t start; | |
696 | uint64_t finish; | |
697 | uint64_t deadline; | |
698 | uint64_t next_start; | |
699 | uint64_t thread_group_id; | |
700 | void *thread_group_data; | |
701 | uint32_t create_flags; | |
5ba3f43e A |
702 | }; |
703 | typedef struct perfcontrol_work_interval *perfcontrol_work_interval_t; | |
704 | ||
a39ff7e2 A |
705 | typedef enum { |
706 | WORK_INTERVAL_START, | |
707 | WORK_INTERVAL_UPDATE, | |
708 | WORK_INTERVAL_FINISH | |
709 | } work_interval_ctl_t; | |
710 | ||
711 | struct perfcontrol_work_interval_instance { | |
0a7de745 A |
712 | work_interval_ctl_t ctl; |
713 | uint32_t create_flags; | |
714 | uint64_t complexity; | |
715 | uint64_t thread_id; | |
716 | uint64_t work_interval_id; | |
717 | uint64_t instance_id; /* out: start, in: update/finish */ | |
718 | uint64_t start; | |
719 | uint64_t finish; | |
720 | uint64_t deadline; | |
721 | uint64_t thread_group_id; | |
722 | void *thread_group_data; | |
a39ff7e2 A |
723 | }; |
724 | typedef struct perfcontrol_work_interval_instance *perfcontrol_work_interval_instance_t; | |
5ba3f43e | 725 | |
0a7de745 A |
726 | /* |
727 | * Structure to export per-CPU counters as part of the CLPC callout. | |
728 | * Contains only the fixed CPU counters (instructions and cycles); CLPC | |
729 | * would call back into XNU to get the configurable counters if needed. | |
5ba3f43e A |
730 | */ |
731 | struct perfcontrol_cpu_counters { | |
0a7de745 | 732 | uint64_t instructions; |
5ba3f43e A |
733 | uint64_t cycles; |
734 | }; | |
735 | ||
736 | /* | |
737 | * Structure used to pass information about a thread to CLPC | |
738 | */ | |
739 | struct perfcontrol_thread_data { | |
740 | /* | |
741 | * Energy estimate (return value) | |
0a7de745 | 742 | * The field is populated by CLPC and used to update the |
5ba3f43e A |
743 | * energy estimate of the thread |
744 | */ | |
745 | uint32_t energy_estimate_nj; | |
746 | /* Perfcontrol class for thread */ | |
747 | perfcontrol_class_t perfctl_class; | |
748 | /* Thread ID for the thread */ | |
749 | uint64_t thread_id; | |
750 | /* Thread Group ID */ | |
751 | uint64_t thread_group_id; | |
0a7de745 A |
752 | /* |
753 | * Scheduling latency for threads at the same base priority. | |
754 | * Calculated by the scheduler and passed into CLPC. The field is | |
755 | * populated only in the thread_data structure for the thread | |
756 | * going on-core. | |
5ba3f43e A |
757 | */ |
758 | uint64_t scheduling_latency_at_same_basepri; | |
759 | /* Thread Group data pointer */ | |
760 | void *thread_group_data; | |
761 | /* perfctl state pointer */ | |
762 | void *perfctl_state; | |
763 | }; | |
764 | ||
765 | /* | |
766 | * All callouts from the scheduler are executed with interrupts | |
767 | * disabled. Callouts should be implemented in C with minimal | |
768 | * abstractions, and only use KPI exported by the mach/libkern | |
769 | * symbolset, restricted to routines like spinlocks and atomic | |
770 | * operations and scheduler routines as noted below. Spinlocks that | |
771 | * are used to synchronize data in the perfcontrol_state_t should only | |
772 | * ever be acquired with interrupts disabled, to avoid deadlocks where | |
773 | * an quantum expiration timer interrupt attempts to perform a callout | |
774 | * that attempts to lock a spinlock that is already held. | |
775 | */ | |
776 | ||
777 | /* | |
778 | * When a processor is switching between two threads (after the | |
779 | * scheduler has chosen a new thread), the low-level platform layer | |
780 | * will call this routine, which should perform required timestamps, | |
781 | * MMIO register reads, or other state switching. No scheduler locks | |
782 | * are held during this callout. | |
783 | * | |
784 | * This function is called with interrupts ENABLED. | |
785 | */ | |
786 | typedef void (*sched_perfcontrol_context_switch_t)(perfcontrol_state_t, perfcontrol_state_t); | |
787 | ||
788 | /* | |
789 | * Once the processor has switched to the new thread, the offcore | |
790 | * callout will indicate the old thread that is no longer being | |
791 | * run. The thread's scheduler lock is held, so it will not begin | |
792 | * running on another processor (in the case of preemption where it | |
793 | * remains runnable) until it completes. If the "thread_terminating" | |
794 | * boolean is TRUE, this will be the last callout for this thread_id. | |
795 | */ | |
796 | typedef void (*sched_perfcontrol_offcore_t)(perfcontrol_state_t, going_off_core_t /* populated by callee */, boolean_t); | |
797 | ||
798 | /* | |
799 | * After the offcore callout and after the old thread can potentially | |
800 | * start running on another processor, the oncore callout will be | |
801 | * called with the thread's scheduler lock held. The oncore callout is | |
802 | * also called any time one of the parameters in the going_on_core_t | |
803 | * structure changes, like priority/QoS changes, and quantum | |
804 | * expiration, so the callout must not assume callouts are paired with | |
805 | * offcore callouts. | |
806 | */ | |
807 | typedef void (*sched_perfcontrol_oncore_t)(perfcontrol_state_t, going_on_core_t); | |
808 | ||
809 | /* | |
810 | * Periodically (on hundreds of ms scale), the scheduler will perform | |
811 | * maintenance and report the maximum latency for runnable (but not currently | |
812 | * running) threads for each urgency class. | |
813 | */ | |
814 | typedef void (*sched_perfcontrol_max_runnable_latency_t)(perfcontrol_max_runnable_latency_t); | |
815 | ||
816 | /* | |
817 | * When the kernel receives information about work intervals from userland, | |
818 | * it is passed along using this callback. No locks are held, although the state | |
819 | * object will not go away during the callout. | |
820 | */ | |
821 | typedef void (*sched_perfcontrol_work_interval_notify_t)(perfcontrol_state_t, perfcontrol_work_interval_t); | |
822 | ||
a39ff7e2 A |
823 | /* |
824 | * Start, update and finish work interval instance with optional complexity estimate. | |
825 | */ | |
826 | typedef void (*sched_perfcontrol_work_interval_ctl_t)(perfcontrol_state_t, perfcontrol_work_interval_instance_t); | |
827 | ||
5ba3f43e A |
828 | /* |
829 | * These callbacks are used when thread groups are added, removed or properties | |
830 | * updated. | |
831 | * No blocking allocations (or anything else blocking) are allowed inside these | |
832 | * callbacks. No locks allowed in these callbacks as well since the kernel might | |
833 | * be holding the thread/task locks. | |
834 | */ | |
835 | typedef void (*sched_perfcontrol_thread_group_init_t)(thread_group_data_t); | |
836 | typedef void (*sched_perfcontrol_thread_group_deinit_t)(thread_group_data_t); | |
837 | typedef void (*sched_perfcontrol_thread_group_flags_update_t)(thread_group_data_t); | |
838 | ||
839 | /* | |
840 | * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed, | |
841 | * this function will be called, passing the timeout deadline that was previously armed as an argument. | |
842 | * | |
843 | * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context. | |
844 | */ | |
845 | typedef void (*sched_perfcontrol_deadline_passed_t)(uint64_t deadline); | |
846 | ||
847 | /* | |
848 | * Context Switch Callout | |
0a7de745 | 849 | * |
5ba3f43e A |
850 | * Parameters: |
851 | * event - The perfcontrol_event for this callout | |
852 | * cpu_id - The CPU doing the context switch | |
853 | * timestamp - The timestamp for the context switch | |
854 | * flags - Flags for other relevant information | |
855 | * offcore - perfcontrol_data structure for thread going off-core | |
856 | * oncore - perfcontrol_data structure for thread going on-core | |
857 | * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch | |
858 | */ | |
859 | typedef void (*sched_perfcontrol_csw_t)( | |
860 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
861 | struct perfcontrol_thread_data *offcore, struct perfcontrol_thread_data *oncore, | |
862 | struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused); | |
863 | ||
864 | ||
865 | /* | |
866 | * Thread State Update Callout | |
867 | * | |
868 | * Parameters: | |
869 | * event - The perfcontrol_event for this callout | |
870 | * cpu_id - The CPU doing the state update | |
871 | * timestamp - The timestamp for the state update | |
872 | * flags - Flags for other relevant information | |
873 | * thr_data - perfcontrol_data structure for the thread being updated | |
874 | */ | |
875 | typedef void (*sched_perfcontrol_state_update_t)( | |
876 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
877 | struct perfcontrol_thread_data *thr_data, __unused void *unused); | |
878 | ||
5ba3f43e A |
879 | /* |
880 | * Callers should always use the CURRENT version so that the kernel can detect both older | |
881 | * and newer structure layouts. New callbacks should always be added at the end of the | |
882 | * structure, and xnu should expect existing source recompiled against newer headers | |
883 | * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter | |
884 | * to reset callbacks to their default in-kernel values. | |
885 | */ | |
886 | ||
887 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */ | |
888 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */ | |
889 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */ | |
890 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */ | |
891 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */ | |
892 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */ | |
893 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */ | |
a39ff7e2 | 894 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */ |
5ba3f43e A |
895 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6 |
896 | ||
897 | struct sched_perfcontrol_callbacks { | |
898 | unsigned long version; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */ | |
899 | sched_perfcontrol_offcore_t offcore; | |
900 | sched_perfcontrol_context_switch_t context_switch; | |
901 | sched_perfcontrol_oncore_t oncore; | |
902 | sched_perfcontrol_max_runnable_latency_t max_runnable_latency; | |
903 | sched_perfcontrol_work_interval_notify_t work_interval_notify; | |
904 | sched_perfcontrol_thread_group_init_t thread_group_init; | |
905 | sched_perfcontrol_thread_group_deinit_t thread_group_deinit; | |
906 | sched_perfcontrol_deadline_passed_t deadline_passed; | |
907 | sched_perfcontrol_csw_t csw; | |
908 | sched_perfcontrol_state_update_t state_update; | |
909 | sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update; | |
a39ff7e2 | 910 | sched_perfcontrol_work_interval_ctl_t work_interval_ctl; |
5ba3f43e A |
911 | }; |
912 | typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t; | |
913 | ||
914 | extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state); | |
915 | ||
916 | /* | |
917 | * Update the scheduler with the set of cores that should be used to dispatch new threads. | |
918 | * Non-recommended cores can still be used to field interrupts or run bound threads. | |
919 | * This should be called with interrupts enabled and no scheduler locks held. | |
920 | */ | |
0a7de745 | 921 | #define ALL_CORES_RECOMMENDED (~(uint32_t)0) |
5ba3f43e A |
922 | |
923 | extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores); | |
924 | extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation); | |
d9a64523 A |
925 | extern void sched_override_recommended_cores_for_sleep(void); |
926 | extern void sched_restore_recommended_cores_after_sleep(void); | |
5ba3f43e | 927 | |
0a7de745 A |
928 | extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores); |
929 | ||
5ba3f43e A |
930 | /* |
931 | * Update the deadline after which sched_perfcontrol_deadline_passed will be called. | |
932 | * Returns TRUE if it successfully canceled a previously set callback, | |
933 | * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight). | |
934 | * The callback is automatically canceled when it fires, and does not repeat unless rearmed. | |
935 | * | |
936 | * This 'timer' executes as the scheduler switches between threads, on a non-idle core | |
937 | * | |
938 | * There can be only one outstanding timer globally. | |
939 | */ | |
940 | extern boolean_t sched_perfcontrol_update_callback_deadline(uint64_t deadline); | |
941 | ||
942 | typedef enum perfcontrol_callout_type { | |
0a7de745 A |
943 | PERFCONTROL_CALLOUT_ON_CORE, |
944 | PERFCONTROL_CALLOUT_OFF_CORE, | |
945 | PERFCONTROL_CALLOUT_CONTEXT, | |
946 | PERFCONTROL_CALLOUT_STATE_UPDATE, | |
947 | /* Add other callout types here */ | |
948 | PERFCONTROL_CALLOUT_MAX | |
5ba3f43e A |
949 | } perfcontrol_callout_type_t; |
950 | ||
951 | typedef enum perfcontrol_callout_stat { | |
0a7de745 A |
952 | PERFCONTROL_STAT_INSTRS, |
953 | PERFCONTROL_STAT_CYCLES, | |
954 | /* Add other stat types here */ | |
955 | PERFCONTROL_STAT_MAX | |
5ba3f43e A |
956 | } perfcontrol_callout_stat_t; |
957 | ||
958 | uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, | |
0a7de745 | 959 | perfcontrol_callout_stat_t stat); |
5ba3f43e | 960 | |
cb323159 A |
961 | #if defined(HAS_APPLE_PAC) |
962 | #define ONES(x) (BIT((x))-1) | |
963 | #define PTR_MASK ONES(64-T1SZ_BOOT) | |
964 | #define PAC_MASK ~PTR_MASK | |
965 | #define SIGN(p) ((p) & BIT(55)) | |
966 | #define UNSIGN_PTR(p) \ | |
967 | SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK) | |
968 | ||
969 | void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit); | |
970 | void ml_task_set_disable_user_jop(task_t task, boolean_t disable_user_jop); | |
971 | void ml_thread_set_disable_user_jop(thread_t thread, boolean_t disable_user_jop); | |
972 | void ml_set_kernelkey_enabled(boolean_t enable); | |
973 | void *ml_auth_ptr_unchecked(void *ptr, unsigned key, uint64_t modifier); | |
974 | #endif /* defined(HAS_APPLE_PAC) */ | |
975 | ||
976 | ||
5ba3f43e A |
977 | |
978 | #endif /* KERNEL_PRIVATE */ | |
979 | ||
980 | boolean_t machine_timeout_suspended(void); | |
981 | void ml_get_power_state(boolean_t *, boolean_t *); | |
982 | ||
0a7de745 | 983 | uint32_t get_arm_cpu_version(void); |
5ba3f43e | 984 | boolean_t user_cont_hwclock_allowed(void); |
cb323159 | 985 | uint8_t user_timebase_type(void); |
5ba3f43e | 986 | boolean_t ml_thread_is64bit(thread_t thread); |
5ba3f43e A |
987 | |
988 | #ifdef __arm64__ | |
989 | void ml_set_align_checking(void); | |
990 | boolean_t arm64_wfe_allowed(void); | |
991 | #endif /* __arm64__ */ | |
992 | ||
993 | void ml_timer_evaluate(void); | |
994 | boolean_t ml_timer_forced_evaluation(void); | |
995 | uint64_t ml_energy_stat(thread_t); | |
996 | void ml_gpu_stat_update(uint64_t); | |
997 | uint64_t ml_gpu_stat(thread_t); | |
998 | #endif /* __APPLE_API_PRIVATE */ | |
999 | ||
1000 | __END_DECLS | |
1001 | ||
1002 | #endif /* _ARM_MACHINE_ROUTINES_H_ */ |