]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2007-2020 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | ||
0a7de745 A |
32 | #ifndef _ARM_MACHINE_ROUTINES_H_ |
33 | #define _ARM_MACHINE_ROUTINES_H_ | |
5ba3f43e A |
34 | |
35 | #include <mach/mach_types.h> | |
f427ee49 | 36 | #include <mach/vm_types.h> |
5ba3f43e A |
37 | #include <mach/boolean.h> |
38 | #include <kern/kern_types.h> | |
39 | #include <pexpert/pexpert.h> | |
40 | ||
41 | #include <sys/cdefs.h> | |
42 | #include <sys/appleapiopts.h> | |
43 | ||
44 | #include <stdarg.h> | |
45 | ||
46 | __BEGIN_DECLS | |
47 | ||
f427ee49 A |
48 | #ifdef XNU_KERNEL_PRIVATE |
49 | #ifdef __arm64__ | |
50 | typedef bool (*expected_fault_handler_t)(arm_saved_state_t *); | |
51 | #endif /* __arm64__ */ | |
52 | #endif /* XNU_KERNEL_PRIVATE */ | |
53 | ||
5ba3f43e A |
54 | /* Interrupt handling */ |
55 | ||
56 | void ml_cpu_signal(unsigned int cpu_id); | |
57 | void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs); | |
58 | uint64_t ml_cpu_signal_deferred_get_timer(void); | |
59 | void ml_cpu_signal_deferred(unsigned int cpu_id); | |
60 | void ml_cpu_signal_retract(unsigned int cpu_id); | |
f427ee49 | 61 | bool ml_cpu_signal_is_enabled(void); |
5ba3f43e A |
62 | |
63 | /* Initialize Interrupts */ | |
64 | void ml_init_interrupt(void); | |
65 | ||
66 | /* Get Interrupts Enabled */ | |
67 | boolean_t ml_get_interrupts_enabled(void); | |
68 | ||
69 | /* Set Interrupts Enabled */ | |
70 | boolean_t ml_set_interrupts_enabled(boolean_t enable); | |
0a7de745 | 71 | boolean_t ml_early_set_interrupts_enabled(boolean_t enable); |
5ba3f43e A |
72 | |
73 | /* Check if running at interrupt context */ | |
74 | boolean_t ml_at_interrupt_context(void); | |
75 | ||
76 | /* Generate a fake interrupt */ | |
77 | void ml_cause_interrupt(void); | |
78 | ||
79 | /* Clear interrupt spin debug state for thread */ | |
80 | #if INTERRUPT_MASKED_DEBUG | |
f427ee49 A |
81 | extern boolean_t interrupt_masked_debug; |
82 | extern uint64_t interrupt_masked_timeout; | |
83 | extern uint64_t stackshot_interrupt_masked_timeout; | |
84 | ||
85 | #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) \ | |
86 | do { \ | |
87 | if (interrupt_masked_debug) { \ | |
88 | thread_t thread = current_thread(); \ | |
89 | thread->machine.int_type = type; \ | |
90 | thread->machine.int_handler_addr = (uintptr_t)VM_KERNEL_STRIP_PTR(handler_addr); \ | |
91 | thread->machine.inthandler_timestamp = ml_get_timebase(); \ | |
92 | thread->machine.int_vector = (uintptr_t)NULL; \ | |
93 | } \ | |
94 | } while (0) | |
95 | ||
96 | #define INTERRUPT_MASKED_DEBUG_END() \ | |
97 | do { \ | |
98 | if (interrupt_masked_debug) { \ | |
99 | thread_t thread = current_thread(); \ | |
100 | ml_check_interrupt_handler_duration(thread); \ | |
101 | } \ | |
102 | } while (0) | |
103 | ||
104 | void ml_irq_debug_start(uintptr_t handler, uintptr_t vector); | |
105 | void ml_irq_debug_end(void); | |
106 | ||
5ba3f43e A |
107 | void ml_spin_debug_reset(thread_t thread); |
108 | void ml_spin_debug_clear(thread_t thread); | |
109 | void ml_spin_debug_clear_self(void); | |
110 | void ml_check_interrupts_disabled_duration(thread_t thread); | |
f427ee49 A |
111 | void ml_check_stackshot_interrupt_disabled_duration(thread_t thread); |
112 | void ml_check_interrupt_handler_duration(thread_t thread); | |
113 | #else | |
114 | #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) | |
115 | #define INTERRUPT_MASKED_DEBUG_END() | |
5ba3f43e A |
116 | #endif |
117 | ||
118 | #ifdef XNU_KERNEL_PRIVATE | |
0a7de745 | 119 | extern bool ml_snoop_thread_is_on_core(thread_t thread); |
5ba3f43e A |
120 | extern boolean_t ml_is_quiescing(void); |
121 | extern void ml_set_is_quiescing(boolean_t); | |
122 | extern uint64_t ml_get_booter_memory_size(void); | |
123 | #endif | |
124 | ||
125 | /* Type for the Time Base Enable function */ | |
126 | typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); | |
f427ee49 | 127 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
128 | /* Type for the Processor Cache Dispatch function */ |
129 | typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1); | |
f427ee49 A |
130 | |
131 | typedef uint32_t (*get_decrementer_t)(void); | |
132 | typedef void (*set_decrementer_t)(uint32_t); | |
133 | typedef void (*fiq_handler_t)(void); | |
134 | ||
5ba3f43e A |
135 | #endif |
136 | ||
0a7de745 A |
137 | #define CacheConfig 0x00000000UL |
138 | #define CacheControl 0x00000001UL | |
139 | #define CacheClean 0x00000002UL | |
140 | #define CacheCleanRegion 0x00000003UL | |
141 | #define CacheCleanFlush 0x00000004UL | |
142 | #define CacheCleanFlushRegion 0x00000005UL | |
143 | #define CacheShutdown 0x00000006UL | |
5ba3f43e | 144 | |
0a7de745 | 145 | #define CacheControlEnable 0x00000000UL |
5ba3f43e | 146 | |
0a7de745 A |
147 | #define CacheConfigCCSIDR 0x00000001UL |
148 | #define CacheConfigSize 0x00000100UL | |
5ba3f43e A |
149 | |
150 | /* Type for the Processor Idle function */ | |
151 | typedef void (*processor_idle_t)(cpu_id_t cpu_id, boolean_t enter, uint64_t *new_timeout_ticks); | |
152 | ||
153 | /* Type for the Idle Tickle function */ | |
154 | typedef void (*idle_tickle_t)(void); | |
155 | ||
156 | /* Type for the Idle Timer function */ | |
157 | typedef void (*idle_timer_t)(void *refcon, uint64_t *new_timeout_ticks); | |
158 | ||
159 | /* Type for the IPI Hander */ | |
160 | typedef void (*ipi_handler_t)(void); | |
161 | ||
162 | /* Type for the Lockdown Hander */ | |
163 | typedef void (*lockdown_handler_t)(void *); | |
164 | ||
165 | /* Type for the Platform specific Error Handler */ | |
166 | typedef void (*platform_error_handler_t)(void *refcon, vm_offset_t fault_addr); | |
167 | ||
168 | /* | |
0a7de745 A |
169 | * The exception callback (ex_cb) module allows kernel drivers to |
170 | * register and receive callbacks for exceptions, and indicate | |
5ba3f43e | 171 | * actions to be taken by the platform kernel |
0a7de745 | 172 | * Currently this is supported for ARM64 but extending support for ARM32 |
5ba3f43e A |
173 | * should be straightforward |
174 | */ | |
175 | ||
176 | /* Supported exception classes for callbacks */ | |
0a7de745 | 177 | typedef enum{ |
5ba3f43e | 178 | EXCB_CLASS_ILLEGAL_INSTR_SET, |
d9a64523 A |
179 | #ifdef CONFIG_XNUPOST |
180 | EXCB_CLASS_TEST1, | |
181 | EXCB_CLASS_TEST2, | |
182 | EXCB_CLASS_TEST3, | |
183 | #endif | |
0a7de745 | 184 | EXCB_CLASS_MAX // this must be last |
5ba3f43e A |
185 | } |
186 | ex_cb_class_t; | |
187 | ||
188 | /* Actions indicated by callbacks to be taken by platform kernel */ | |
0a7de745 A |
189 | typedef enum{ |
190 | EXCB_ACTION_RERUN, // re-run the faulting instruction | |
191 | EXCB_ACTION_NONE, // continue normal exception handling | |
d9a64523 A |
192 | #ifdef CONFIG_XNUPOST |
193 | EXCB_ACTION_TEST_FAIL, | |
194 | #endif | |
5ba3f43e A |
195 | } |
196 | ex_cb_action_t; | |
197 | ||
0a7de745 A |
198 | /* |
199 | * Exception state | |
200 | * We cannot use a private kernel data structure such as arm_saved_state_t | |
201 | * The CPSR and ESR are not clobbered when the callback function is invoked so | |
5ba3f43e A |
202 | * those registers can be examined by the callback function; |
203 | * the same is done in the platform error handlers | |
204 | */ | |
0a7de745 | 205 | typedef struct{ |
5ba3f43e A |
206 | vm_offset_t far; |
207 | } | |
208 | ex_cb_state_t; | |
209 | ||
210 | /* callback type definition */ | |
211 | typedef ex_cb_action_t (*ex_cb_t) ( | |
0a7de745 A |
212 | ex_cb_class_t cb_class, |
213 | void *refcon,// provided at registration | |
214 | const ex_cb_state_t *state // exception state | |
5ba3f43e A |
215 | ); |
216 | ||
0a7de745 A |
217 | /* |
218 | * Callback registration | |
219 | * Currently we support only one registered callback per class but | |
5ba3f43e A |
220 | * it should be possible to support more callbacks |
221 | */ | |
222 | kern_return_t ex_cb_register( | |
0a7de745 A |
223 | ex_cb_class_t cb_class, |
224 | ex_cb_t cb, | |
225 | void *refcon ); | |
5ba3f43e A |
226 | |
227 | /* | |
228 | * Called internally by platform kernel to invoke the registered callback for class | |
229 | */ | |
230 | ex_cb_action_t ex_cb_invoke( | |
0a7de745 A |
231 | ex_cb_class_t cb_class, |
232 | vm_offset_t far); | |
5ba3f43e A |
233 | |
234 | ||
235 | void ml_parse_cpu_topology(void); | |
236 | ||
237 | unsigned int ml_get_cpu_count(void); | |
238 | ||
f427ee49 A |
239 | unsigned int ml_get_cluster_count(void); |
240 | ||
5ba3f43e A |
241 | int ml_get_boot_cpu_number(void); |
242 | ||
243 | int ml_get_cpu_number(uint32_t phys_id); | |
244 | ||
f427ee49 A |
245 | int ml_get_cluster_number(uint32_t phys_id); |
246 | ||
5ba3f43e A |
247 | int ml_get_max_cpu_number(void); |
248 | ||
f427ee49 A |
249 | int ml_get_max_cluster_number(void); |
250 | ||
251 | unsigned int ml_get_first_cpu_id(unsigned int cluster_id); | |
252 | ||
253 | #ifdef __arm64__ | |
254 | int ml_get_cluster_number_local(void); | |
255 | unsigned int ml_get_cpu_number_local(void); | |
256 | #endif /* __arm64__ */ | |
257 | ||
5ba3f43e A |
258 | /* Struct for ml_cpu_get_info */ |
259 | struct ml_cpu_info { | |
0a7de745 A |
260 | unsigned long vector_unit; |
261 | unsigned long cache_line_size; | |
262 | unsigned long l1_icache_size; | |
263 | unsigned long l1_dcache_size; | |
264 | unsigned long l2_settings; | |
265 | unsigned long l2_cache_size; | |
266 | unsigned long l3_settings; | |
267 | unsigned long l3_cache_size; | |
5ba3f43e A |
268 | }; |
269 | typedef struct ml_cpu_info ml_cpu_info_t; | |
270 | ||
271 | typedef enum { | |
272 | CLUSTER_TYPE_SMP, | |
2a1bd2d3 A |
273 | CLUSTER_TYPE_E, |
274 | CLUSTER_TYPE_P, | |
5ba3f43e A |
275 | } cluster_type_t; |
276 | ||
277 | cluster_type_t ml_get_boot_cluster(void); | |
278 | ||
f427ee49 A |
279 | /*! |
280 | * @typedef ml_topology_cpu_t | |
281 | * @brief Describes one CPU core in the topology. | |
282 | * | |
283 | * @field cpu_id Logical CPU ID (EDT: cpu-id): 0, 1, 2, 3, 4, ... | |
284 | * @field phys_id Physical CPU ID (EDT: reg). Same as MPIDR[15:0], i.e. | |
285 | * (cluster_id << 8) | core_number_within_cluster | |
286 | * @field cluster_id Cluster ID (EDT: cluster-id) | |
287 | * @field die_id Die ID (EDT: die-id) | |
288 | * @field cluster_type The type of CPUs found in this cluster. | |
289 | * @field l2_access_penalty Indicates that the scheduler should try to de-prioritize a core because | |
290 | * L2 accesses are slower than on the boot processor. | |
291 | * @field l2_cache_size Size of the L2 cache, in bytes. 0 if unknown or not present. | |
292 | * @field l2_cache_id l2-cache-id property read from EDT. | |
293 | * @field l3_cache_size Size of the L3 cache, in bytes. 0 if unknown or not present. | |
294 | * @field l3_cache_id l3-cache-id property read from EDT. | |
295 | * @field cpu_IMPL_regs IO-mapped virtual address of cpuX_IMPL (implementation-defined) register block. | |
296 | * @field cpu_IMPL_pa Physical address of cpuX_IMPL register block. | |
297 | * @field cpu_IMPL_len Length of cpuX_IMPL register block. | |
298 | * @field cpu_UTTDBG_regs IO-mapped virtual address of cpuX_UTTDBG register block. | |
299 | * @field cpu_UTTDBG_pa Physical address of cpuX_UTTDBG register block, if set in DT, else zero | |
300 | * @field cpu_UTTDBG_len Length of cpuX_UTTDBG register block, if set in DT, else zero | |
301 | * @field coresight_regs IO-mapped virtual address of CoreSight debug register block. | |
302 | * @field coresight_pa Physical address of CoreSight register block. | |
303 | * @field coresight_len Length of CoreSight register block. | |
304 | * @field self_ipi_irq AIC IRQ vector for self IPI (cpuX->cpuX). 0 if unsupported. | |
305 | * @field other_ipi_irq AIC IRQ vector for other IPI (cpuX->cpuY). 0 if unsupported. | |
306 | * @field pmi_irq AIC IRQ vector for performance management IRQ. 0 if unsupported. | |
307 | * @field die_cluster_id Cluster ID within the local die (EDT: die-cluster-id) | |
308 | * @field cluster_core_id Core ID within the local cluster (EDT: cluster-core-id) | |
309 | */ | |
310 | typedef struct ml_topology_cpu { | |
311 | unsigned int cpu_id; | |
312 | uint32_t phys_id; | |
313 | unsigned int cluster_id; | |
314 | unsigned int die_id; | |
315 | cluster_type_t cluster_type; | |
316 | uint32_t l2_access_penalty; | |
317 | uint32_t l2_cache_size; | |
318 | uint32_t l2_cache_id; | |
319 | uint32_t l3_cache_size; | |
320 | uint32_t l3_cache_id; | |
321 | vm_offset_t cpu_IMPL_regs; | |
322 | uint64_t cpu_IMPL_pa; | |
323 | uint64_t cpu_IMPL_len; | |
324 | vm_offset_t cpu_UTTDBG_regs; | |
325 | uint64_t cpu_UTTDBG_pa; | |
326 | uint64_t cpu_UTTDBG_len; | |
327 | vm_offset_t coresight_regs; | |
328 | uint64_t coresight_pa; | |
329 | uint64_t coresight_len; | |
330 | int self_ipi_irq; | |
331 | int other_ipi_irq; | |
332 | int pmi_irq; | |
333 | unsigned int die_cluster_id; | |
334 | unsigned int cluster_core_id; | |
335 | } ml_topology_cpu_t; | |
336 | ||
337 | /*! | |
338 | * @typedef ml_topology_cluster_t | |
339 | * @brief Describes one cluster in the topology. | |
340 | * | |
341 | * @field cluster_id Cluster ID (EDT: cluster-id) | |
342 | * @field cluster_type The type of CPUs found in this cluster. | |
343 | * @field num_cpus Total number of usable CPU cores in this cluster. | |
344 | * @field first_cpu_id The cpu_id of the first CPU in the cluster. | |
345 | * @field cpu_mask A bitmask representing the cpu_id's that belong to the cluster. Example: | |
346 | * If the cluster contains CPU4 and CPU5, cpu_mask will be 0x30. | |
347 | * @field acc_IMPL_regs IO-mapped virtual address of acc_IMPL (implementation-defined) register block. | |
348 | * @field acc_IMPL_pa Physical address of acc_IMPL register block. | |
349 | * @field acc_IMPL_len Length of acc_IMPL register block. | |
350 | * @field cpm_IMPL_regs IO-mapped virtual address of cpm_IMPL (implementation-defined) register block. | |
351 | * @field cpm_IMPL_pa Physical address of cpm_IMPL register block. | |
352 | * @field cpm_IMPL_len Length of cpm_IMPL register block. | |
353 | */ | |
354 | typedef struct ml_topology_cluster { | |
355 | unsigned int cluster_id; | |
356 | cluster_type_t cluster_type; | |
357 | unsigned int num_cpus; | |
358 | unsigned int first_cpu_id; | |
359 | uint64_t cpu_mask; | |
360 | vm_offset_t acc_IMPL_regs; | |
361 | uint64_t acc_IMPL_pa; | |
362 | uint64_t acc_IMPL_len; | |
363 | vm_offset_t cpm_IMPL_regs; | |
364 | uint64_t cpm_IMPL_pa; | |
365 | uint64_t cpm_IMPL_len; | |
366 | } ml_topology_cluster_t; | |
367 | ||
368 | // Bump this version number any time any ml_topology_* struct changes, so | |
369 | // that KPI users can check whether their headers are compatible with | |
370 | // the running kernel. | |
371 | #define CPU_TOPOLOGY_VERSION 1 | |
372 | ||
373 | /*! | |
374 | * @typedef ml_topology_info_t | |
375 | * @brief Describes the CPU topology for all APs in the system. Populated from EDT and read-only at runtime. | |
376 | * @discussion This struct only lists CPU cores that are considered usable by both iBoot and XNU. Some | |
377 | * physically present CPU cores may be considered unusable due to configuration options like | |
378 | * the "cpus=" boot-arg. Cores that are disabled in hardware will not show up in EDT at all, so | |
379 | * they also will not be present in this struct. | |
380 | * | |
381 | * @field version Version of the struct (set to CPU_TOPOLOGY_VERSION). | |
382 | * @field num_cpus Total number of usable CPU cores. | |
383 | * @field max_cpu_id The highest usable logical CPU ID. | |
384 | * @field num_clusters Total number of AP CPU clusters on the system (usable or not). | |
385 | * @field max_cluster_id The highest cluster ID found in EDT. | |
386 | * @field cpus List of |num_cpus| entries. | |
387 | * @field clusters List of |num_clusters| entries. | |
388 | * @field boot_cpu Points to the |cpus| entry for the boot CPU. | |
389 | * @field boot_cluster Points to the |clusters| entry which contains the boot CPU. | |
390 | * @field chip_revision Silicon revision reported by iBoot, which comes from the | |
391 | * SoC-specific fuse bits. See CPU_VERSION_xx macros for definitions. | |
392 | */ | |
393 | typedef struct ml_topology_info { | |
394 | unsigned int version; | |
395 | unsigned int num_cpus; | |
396 | unsigned int max_cpu_id; | |
397 | unsigned int num_clusters; | |
398 | unsigned int max_cluster_id; | |
399 | unsigned int max_die_id; | |
400 | ml_topology_cpu_t *cpus; | |
401 | ml_topology_cluster_t *clusters; | |
402 | ml_topology_cpu_t *boot_cpu; | |
403 | ml_topology_cluster_t *boot_cluster; | |
404 | unsigned int chip_revision; | |
405 | } ml_topology_info_t; | |
406 | ||
407 | /*! | |
408 | * @function ml_get_topology_info | |
409 | * @result A pointer to the read-only topology struct. Does not need to be freed. Returns NULL | |
410 | * if the struct hasn't been initialized or the feature is unsupported. | |
411 | */ | |
412 | const ml_topology_info_t *ml_get_topology_info(void); | |
413 | ||
414 | /*! | |
415 | * @function ml_map_cpu_pio | |
416 | * @brief Maps per-CPU and per-cluster PIO registers found in EDT. This needs to be | |
417 | * called after arm_vm_init() so it can't be part of ml_parse_cpu_topology(). | |
418 | */ | |
419 | void ml_map_cpu_pio(void); | |
420 | ||
5ba3f43e A |
421 | /* Struct for ml_processor_register */ |
422 | struct ml_processor_info { | |
0a7de745 A |
423 | cpu_id_t cpu_id; |
424 | vm_offset_t start_paddr; | |
425 | boolean_t supports_nap; | |
426 | void *platform_cache_dispatch; | |
427 | time_base_enable_t time_base_enable; | |
428 | processor_idle_t processor_idle; | |
429 | idle_tickle_t *idle_tickle; | |
430 | idle_timer_t idle_timer; | |
431 | void *idle_timer_refcon; | |
432 | vm_offset_t powergate_stub_addr; | |
433 | uint32_t powergate_stub_length; | |
434 | uint32_t powergate_latency; | |
435 | platform_error_handler_t platform_error_handler; | |
436 | uint64_t regmap_paddr; | |
437 | uint32_t phys_id; | |
438 | uint32_t log_id; | |
439 | uint32_t l2_access_penalty; | |
440 | uint32_t cluster_id; | |
441 | cluster_type_t cluster_type; | |
442 | uint32_t l2_cache_id; | |
443 | uint32_t l2_cache_size; | |
444 | uint32_t l3_cache_id; | |
445 | uint32_t l3_cache_size; | |
5ba3f43e A |
446 | }; |
447 | typedef struct ml_processor_info ml_processor_info_t; | |
448 | ||
f427ee49 | 449 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
450 | /* Struct for ml_init_timebase */ |
451 | struct tbd_ops { | |
f427ee49 A |
452 | fiq_handler_t tbd_fiq_handler; |
453 | get_decrementer_t tbd_get_decrementer; | |
454 | set_decrementer_t tbd_set_decrementer; | |
5ba3f43e A |
455 | }; |
456 | typedef struct tbd_ops *tbd_ops_t; | |
457 | typedef struct tbd_ops tbd_ops_data_t; | |
458 | #endif | |
459 | ||
0a7de745 A |
460 | /*! |
461 | * @function ml_processor_register | |
462 | * | |
463 | * @abstract callback from platform kext to register processor | |
464 | * | |
465 | * @discussion This function is called by the platform kext when a processor is | |
466 | * being registered. This is called while running on the CPU itself, as part of | |
467 | * its initialization. | |
468 | * | |
469 | * @param ml_processor_info provides machine-specific information about the | |
470 | * processor to xnu. | |
471 | * | |
472 | * @param processor is set as an out-parameter to an opaque handle that should | |
473 | * be used by the platform kext when referring to this processor in the future. | |
474 | * | |
475 | * @param ipi_handler is set as an out-parameter to the function that should be | |
476 | * registered as the IPI handler. | |
477 | * | |
478 | * @param pmi_handler is set as an out-parameter to the function that should be | |
479 | * registered as the PMI handler. | |
480 | * | |
481 | * @returns KERN_SUCCESS on success and an error code, otherwise. | |
482 | */ | |
483 | kern_return_t ml_processor_register(ml_processor_info_t *ml_processor_info, | |
484 | processor_t *processor, ipi_handler_t *ipi_handler, | |
485 | perfmon_interrupt_handler_func *pmi_handler); | |
5ba3f43e A |
486 | |
487 | /* Register a lockdown handler */ | |
488 | kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *); | |
489 | ||
490 | #if XNU_KERNEL_PRIVATE | |
491 | void ml_lockdown_init(void); | |
492 | ||
5ba3f43e A |
493 | /* Machine layer routine for intercepting panics */ |
494 | void ml_panic_trap_to_debugger(const char *panic_format_str, | |
0a7de745 A |
495 | va_list *panic_args, |
496 | unsigned int reason, | |
497 | void *ctx, | |
498 | uint64_t panic_options_mask, | |
499 | unsigned long panic_caller); | |
5ba3f43e A |
500 | #endif /* XNU_KERNEL_PRIVATE */ |
501 | ||
502 | /* Initialize Interrupts */ | |
503 | void ml_install_interrupt_handler( | |
0a7de745 A |
504 | void *nub, |
505 | int source, | |
506 | void *target, | |
507 | IOInterruptHandler handler, | |
508 | void *refCon); | |
5ba3f43e A |
509 | |
510 | vm_offset_t | |
0a7de745 | 511 | ml_static_vtop( |
5ba3f43e A |
512 | vm_offset_t); |
513 | ||
f427ee49 A |
514 | kern_return_t |
515 | ml_static_verify_page_protections( | |
516 | uint64_t base, uint64_t size, vm_prot_t prot); | |
517 | ||
5ba3f43e | 518 | vm_offset_t |
0a7de745 | 519 | ml_static_ptovirt( |
5ba3f43e A |
520 | vm_offset_t); |
521 | ||
d9a64523 A |
522 | vm_offset_t ml_static_slide( |
523 | vm_offset_t vaddr); | |
524 | ||
525 | vm_offset_t ml_static_unslide( | |
526 | vm_offset_t vaddr); | |
527 | ||
5ba3f43e A |
528 | /* Offset required to obtain absolute time value from tick counter */ |
529 | uint64_t ml_get_abstime_offset(void); | |
530 | ||
531 | /* Offset required to obtain continuous time value from tick counter */ | |
532 | uint64_t ml_get_conttime_offset(void); | |
533 | ||
534 | #ifdef __APPLE_API_UNSTABLE | |
535 | /* PCI config cycle probing */ | |
536 | boolean_t ml_probe_read( | |
537 | vm_offset_t paddr, | |
538 | unsigned int *val); | |
539 | boolean_t ml_probe_read_64( | |
540 | addr64_t paddr, | |
541 | unsigned int *val); | |
542 | ||
543 | /* Read physical address byte */ | |
544 | unsigned int ml_phys_read_byte( | |
545 | vm_offset_t paddr); | |
546 | unsigned int ml_phys_read_byte_64( | |
547 | addr64_t paddr); | |
548 | ||
549 | /* Read physical address half word */ | |
550 | unsigned int ml_phys_read_half( | |
551 | vm_offset_t paddr); | |
552 | unsigned int ml_phys_read_half_64( | |
553 | addr64_t paddr); | |
554 | ||
555 | /* Read physical address word*/ | |
556 | unsigned int ml_phys_read( | |
557 | vm_offset_t paddr); | |
558 | unsigned int ml_phys_read_64( | |
559 | addr64_t paddr); | |
560 | unsigned int ml_phys_read_word( | |
561 | vm_offset_t paddr); | |
562 | unsigned int ml_phys_read_word_64( | |
563 | addr64_t paddr); | |
564 | ||
565 | unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz); | |
566 | unsigned int ml_io_read8(uintptr_t iovaddr); | |
567 | unsigned int ml_io_read16(uintptr_t iovaddr); | |
568 | unsigned int ml_io_read32(uintptr_t iovaddr); | |
569 | unsigned long long ml_io_read64(uintptr_t iovaddr); | |
570 | ||
0a7de745 A |
571 | extern void ml_io_write(uintptr_t vaddr, uint64_t val, int size); |
572 | extern void ml_io_write8(uintptr_t vaddr, uint8_t val); | |
573 | extern void ml_io_write16(uintptr_t vaddr, uint16_t val); | |
574 | extern void ml_io_write32(uintptr_t vaddr, uint32_t val); | |
575 | extern void ml_io_write64(uintptr_t vaddr, uint64_t val); | |
576 | ||
5ba3f43e A |
577 | /* Read physical address double word */ |
578 | unsigned long long ml_phys_read_double( | |
579 | vm_offset_t paddr); | |
580 | unsigned long long ml_phys_read_double_64( | |
581 | addr64_t paddr); | |
582 | ||
583 | /* Write physical address byte */ | |
584 | void ml_phys_write_byte( | |
585 | vm_offset_t paddr, unsigned int data); | |
586 | void ml_phys_write_byte_64( | |
587 | addr64_t paddr, unsigned int data); | |
588 | ||
589 | /* Write physical address half word */ | |
590 | void ml_phys_write_half( | |
591 | vm_offset_t paddr, unsigned int data); | |
592 | void ml_phys_write_half_64( | |
593 | addr64_t paddr, unsigned int data); | |
594 | ||
595 | /* Write physical address word */ | |
596 | void ml_phys_write( | |
597 | vm_offset_t paddr, unsigned int data); | |
598 | void ml_phys_write_64( | |
599 | addr64_t paddr, unsigned int data); | |
600 | void ml_phys_write_word( | |
601 | vm_offset_t paddr, unsigned int data); | |
602 | void ml_phys_write_word_64( | |
603 | addr64_t paddr, unsigned int data); | |
604 | ||
605 | /* Write physical address double word */ | |
606 | void ml_phys_write_double( | |
607 | vm_offset_t paddr, unsigned long long data); | |
608 | void ml_phys_write_double_64( | |
609 | addr64_t paddr, unsigned long long data); | |
610 | ||
611 | void ml_static_mfree( | |
612 | vm_offset_t, | |
613 | vm_size_t); | |
614 | ||
615 | kern_return_t | |
616 | ml_static_protect( | |
0a7de745 A |
617 | vm_offset_t start, |
618 | vm_size_t size, | |
619 | vm_prot_t new_prot); | |
5ba3f43e A |
620 | |
621 | /* virtual to physical on wired pages */ | |
622 | vm_offset_t ml_vtophys( | |
623 | vm_offset_t vaddr); | |
624 | ||
f427ee49 | 625 | /* Get processor cache info */ |
5ba3f43e A |
626 | void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info); |
627 | ||
628 | #endif /* __APPLE_API_UNSTABLE */ | |
629 | ||
630 | #ifdef __APPLE_API_PRIVATE | |
0a7de745 | 631 | #ifdef XNU_KERNEL_PRIVATE |
5ba3f43e | 632 | vm_size_t ml_nofault_copy( |
0a7de745 A |
633 | vm_offset_t virtsrc, |
634 | vm_offset_t virtdst, | |
5ba3f43e A |
635 | vm_size_t size); |
636 | boolean_t ml_validate_nofault( | |
637 | vm_offset_t virtsrc, vm_size_t size); | |
638 | #endif /* XNU_KERNEL_PRIVATE */ | |
0a7de745 | 639 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
640 | /* IO memory map services */ |
641 | ||
642 | /* Map memory map IO space */ | |
643 | vm_offset_t ml_io_map( | |
0a7de745 | 644 | vm_offset_t phys_addr, |
5ba3f43e A |
645 | vm_size_t size); |
646 | ||
647 | vm_offset_t ml_io_map_wcomb( | |
0a7de745 | 648 | vm_offset_t phys_addr, |
5ba3f43e A |
649 | vm_size_t size); |
650 | ||
cb323159 A |
651 | vm_offset_t ml_io_map_with_prot( |
652 | vm_offset_t phys_addr, | |
653 | vm_size_t size, | |
654 | vm_prot_t prot); | |
655 | ||
f427ee49 A |
656 | void ml_io_unmap( |
657 | vm_offset_t addr, | |
658 | vm_size_t sz); | |
659 | ||
5ba3f43e A |
660 | void ml_get_bouncepool_info( |
661 | vm_offset_t *phys_addr, | |
662 | vm_size_t *size); | |
663 | ||
664 | vm_map_address_t ml_map_high_window( | |
0a7de745 A |
665 | vm_offset_t phys_addr, |
666 | vm_size_t len); | |
5ba3f43e A |
667 | |
668 | /* boot memory allocation */ | |
669 | vm_offset_t ml_static_malloc( | |
670 | vm_size_t size); | |
671 | ||
672 | void ml_init_timebase( | |
0a7de745 A |
673 | void *args, |
674 | tbd_ops_t tbd_funcs, | |
675 | vm_offset_t int_address, | |
676 | vm_offset_t int_value); | |
5ba3f43e A |
677 | |
678 | uint64_t ml_get_timebase(void); | |
679 | ||
2a1bd2d3 A |
680 | uint64_t ml_get_speculative_timebase(void); |
681 | ||
682 | uint64_t ml_get_timebase_entropy(void); | |
683 | ||
5ba3f43e A |
684 | void ml_init_lock_timeout(void); |
685 | ||
686 | boolean_t ml_delay_should_spin(uint64_t interval); | |
687 | ||
e8c3f781 A |
688 | void ml_delay_on_yield(void); |
689 | ||
5ba3f43e A |
690 | uint32_t ml_get_decrementer(void); |
691 | ||
f427ee49 A |
692 | #include <machine/config.h> |
693 | ||
694 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT | |
5ba3f43e A |
695 | void timer_state_event_user_to_kernel(void); |
696 | void timer_state_event_kernel_to_user(void); | |
f427ee49 | 697 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ |
5ba3f43e A |
698 | |
699 | uint64_t ml_get_hwclock(void); | |
700 | ||
701 | #ifdef __arm64__ | |
702 | boolean_t ml_get_timer_pending(void); | |
703 | #endif | |
704 | ||
705 | void platform_syscall( | |
706 | struct arm_saved_state *); | |
707 | ||
708 | void ml_set_decrementer( | |
709 | uint32_t dec_value); | |
710 | ||
711 | boolean_t is_user_contex( | |
712 | void); | |
713 | ||
714 | void ml_init_arm_debug_interface(void *args, vm_offset_t virt_address); | |
715 | ||
716 | /* These calls are only valid if __ARM_USER_PROTECT__ is defined */ | |
717 | uintptr_t arm_user_protect_begin( | |
0a7de745 | 718 | thread_t thread); |
5ba3f43e A |
719 | |
720 | void arm_user_protect_end( | |
0a7de745 A |
721 | thread_t thread, |
722 | uintptr_t up, | |
723 | boolean_t disable_interrupts); | |
5ba3f43e A |
724 | |
725 | #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */ | |
726 | ||
727 | /* Zero bytes starting at a physical address */ | |
728 | void bzero_phys( | |
729 | addr64_t phys_address, | |
730 | vm_size_t length); | |
731 | ||
732 | void bzero_phys_nc(addr64_t src64, vm_size_t bytes); | |
733 | ||
cb323159 A |
734 | #if MACH_KERNEL_PRIVATE |
735 | #ifdef __arm64__ | |
736 | /* Pattern-fill buffer with zeros or a 32-bit pattern; | |
737 | * target must be 128-byte aligned and sized a multiple of 128 | |
738 | * Both variants emit stores with non-temporal properties. | |
739 | */ | |
740 | void fill32_dczva(addr64_t, vm_size_t); | |
741 | void fill32_nt(addr64_t, vm_size_t, uint32_t); | |
f427ee49 | 742 | int cpu_interrupt_is_pending(void); |
cb323159 A |
743 | #endif |
744 | #endif | |
745 | ||
5ba3f43e A |
746 | void ml_thread_policy( |
747 | thread_t thread, | |
748 | unsigned policy_id, | |
749 | unsigned policy_info); | |
750 | ||
0a7de745 A |
751 | #define MACHINE_GROUP 0x00000001 |
752 | #define MACHINE_NETWORK_GROUP 0x10000000 | |
753 | #define MACHINE_NETWORK_WORKLOOP 0x00000001 | |
754 | #define MACHINE_NETWORK_NETISR 0x00000002 | |
5ba3f43e | 755 | |
f427ee49 A |
756 | /* Set the maximum number of CPUs */ |
757 | void ml_set_max_cpus( | |
5ba3f43e A |
758 | unsigned int max_cpus); |
759 | ||
f427ee49 A |
760 | /* Return the maximum number of CPUs set by ml_set_max_cpus(), waiting if necessary */ |
761 | unsigned int ml_wait_max_cpus( | |
5ba3f43e A |
762 | void); |
763 | ||
764 | /* Return the maximum memory size */ | |
765 | unsigned int ml_get_machine_mem(void); | |
766 | ||
767 | #ifdef XNU_KERNEL_PRIVATE | |
768 | /* Return max offset */ | |
769 | vm_map_offset_t ml_get_max_offset( | |
0a7de745 | 770 | boolean_t is64, |
5ba3f43e | 771 | unsigned int option); |
0a7de745 A |
772 | #define MACHINE_MAX_OFFSET_DEFAULT 0x01 |
773 | #define MACHINE_MAX_OFFSET_MIN 0x02 | |
774 | #define MACHINE_MAX_OFFSET_MAX 0x04 | |
775 | #define MACHINE_MAX_OFFSET_DEVICE 0x08 | |
5ba3f43e A |
776 | #endif |
777 | ||
0a7de745 A |
778 | extern void ml_cpu_up(void); |
779 | extern void ml_cpu_down(void); | |
780 | extern void ml_arm_sleep(void); | |
5ba3f43e A |
781 | |
782 | extern uint64_t ml_get_wake_timebase(void); | |
783 | extern uint64_t ml_get_conttime_wake_time(void); | |
784 | ||
785 | /* Time since the system was reset (as part of boot/wake) */ | |
786 | uint64_t ml_get_time_since_reset(void); | |
787 | ||
cb323159 A |
788 | /* |
789 | * Called by ApplePMGR to set wake time. Units and epoch are identical | |
790 | * to mach_continuous_time(). Has no effect on !HAS_CONTINUOUS_HWCLOCK | |
791 | * chips. If wake_time == UINT64_MAX, that means the wake time is | |
792 | * unknown and calls to ml_get_time_since_reset() will return UINT64_MAX. | |
793 | */ | |
794 | void ml_set_reset_time(uint64_t wake_time); | |
795 | ||
5ba3f43e A |
796 | #ifdef XNU_KERNEL_PRIVATE |
797 | /* Just a stub on ARM */ | |
798 | extern kern_return_t ml_interrupt_prewarm(uint64_t deadline); | |
799 | #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0) | |
800 | #endif /* XNU_KERNEL_PRIVATE */ | |
801 | ||
802 | /* Bytes available on current stack */ | |
803 | vm_offset_t ml_stack_remaining(void); | |
804 | ||
805 | #ifdef MACH_KERNEL_PRIVATE | |
0a7de745 A |
806 | uint32_t get_fpscr(void); |
807 | void set_fpscr(uint32_t); | |
f427ee49 A |
808 | void machine_conf(void); |
809 | void machine_lockdown(void); | |
5ba3f43e | 810 | |
d9a64523 A |
811 | #ifdef __arm64__ |
812 | unsigned long update_mdscr(unsigned long clear, unsigned long set); | |
813 | #endif /* __arm64__ */ | |
814 | ||
0a7de745 A |
815 | extern void arm_debug_set_cp14(arm_debug_state_t *debug_state); |
816 | extern void fiq_context_init(boolean_t enable_fiq); | |
5ba3f43e | 817 | |
0a7de745 | 818 | extern void reenable_async_aborts(void); |
f427ee49 A |
819 | #ifdef __arm__ |
820 | extern boolean_t get_vfp_enabled(void); | |
0a7de745 | 821 | extern void cpu_idle_wfi(boolean_t wfi_fast); |
f427ee49 A |
822 | #endif |
823 | ||
824 | #ifdef __arm64__ | |
825 | uint64_t ml_cluster_wfe_timeout(uint32_t wfe_cluster_id); | |
826 | #endif | |
5ba3f43e A |
827 | |
828 | #ifdef MONITOR | |
0a7de745 A |
829 | #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */ |
830 | #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */ | |
831 | unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
832 | uintptr_t arg2, uintptr_t arg3); | |
5ba3f43e A |
833 | #endif /* MONITOR */ |
834 | ||
5c9f4661 A |
835 | #if __ARM_KERNEL_PROTECT__ |
836 | extern void set_vbar_el1(uint64_t); | |
837 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
838 | #endif /* MACH_KERNEL_PRIVATE */ |
839 | ||
0a7de745 | 840 | extern uint32_t arm_debug_read_dscr(void); |
5ba3f43e | 841 | |
0a7de745 A |
842 | extern int set_be_bit(void); |
843 | extern int clr_be_bit(void); | |
844 | extern int be_tracing(void); | |
5ba3f43e | 845 | |
f427ee49 A |
846 | /* Please note that cpu_broadcast_xcall is not as simple is you would like it to be. |
847 | * It will sometimes put the calling thread to sleep, and it is up to your callback | |
848 | * to wake it up as needed, where "as needed" is defined as "all other CPUs have | |
849 | * called the broadcast func". Look around the kernel for examples, or instead use | |
850 | * cpu_broadcast_xcall_simple() which does indeed act like you would expect, given | |
851 | * the prototype. cpu_broadcast_immediate_xcall has the same caveats and has a similar | |
852 | * _simple() wrapper | |
853 | */ | |
5ba3f43e A |
854 | typedef void (*broadcastFunc) (void *); |
855 | unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *); | |
f427ee49 | 856 | unsigned int cpu_broadcast_xcall_simple(boolean_t, broadcastFunc, void *); |
5ba3f43e | 857 | kern_return_t cpu_xcall(int, broadcastFunc, void *); |
cb323159 | 858 | unsigned int cpu_broadcast_immediate_xcall(uint32_t *, boolean_t, broadcastFunc, void *); |
f427ee49 | 859 | unsigned int cpu_broadcast_immediate_xcall_simple(boolean_t, broadcastFunc, void *); |
cb323159 | 860 | kern_return_t cpu_immediate_xcall(int, broadcastFunc, void *); |
5ba3f43e A |
861 | |
862 | #ifdef KERNEL_PRIVATE | |
863 | ||
864 | /* Interface to be used by the perf. controller to register a callback, in a | |
865 | * single-threaded fashion. The callback will receive notifications of | |
866 | * processor performance quality-of-service changes from the scheduler. | |
867 | */ | |
868 | ||
869 | #ifdef __arm64__ | |
870 | typedef void (*cpu_qos_update_t)(int throughput_qos, uint64_t qos_param1, uint64_t qos_param2); | |
871 | void cpu_qos_update_register(cpu_qos_update_t); | |
872 | #endif /* __arm64__ */ | |
873 | ||
874 | struct going_on_core { | |
0a7de745 A |
875 | uint64_t thread_id; |
876 | uint16_t qos_class; | |
877 | uint16_t urgency; /* XCPM compatibility */ | |
878 | uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */ | |
879 | uint32_t is_kernel_thread : 1; | |
880 | uint64_t thread_group_id; | |
881 | void *thread_group_data; | |
882 | uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */ | |
883 | uint64_t start_time; | |
884 | uint64_t scheduling_latency_at_same_basepri; | |
885 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
886 | /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */ | |
5ba3f43e A |
887 | }; |
888 | typedef struct going_on_core *going_on_core_t; | |
889 | ||
890 | struct going_off_core { | |
0a7de745 A |
891 | uint64_t thread_id; |
892 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
893 | uint32_t reserved; | |
894 | uint64_t end_time; | |
895 | uint64_t thread_group_id; | |
896 | void *thread_group_data; | |
5ba3f43e A |
897 | }; |
898 | typedef struct going_off_core *going_off_core_t; | |
899 | ||
900 | struct thread_group_data { | |
0a7de745 A |
901 | uint64_t thread_group_id; |
902 | void *thread_group_data; | |
903 | uint32_t thread_group_size; | |
904 | uint32_t thread_group_flags; | |
5ba3f43e A |
905 | }; |
906 | typedef struct thread_group_data *thread_group_data_t; | |
907 | ||
908 | struct perfcontrol_max_runnable_latency { | |
0a7de745 | 909 | uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */]; |
5ba3f43e A |
910 | }; |
911 | typedef struct perfcontrol_max_runnable_latency *perfcontrol_max_runnable_latency_t; | |
912 | ||
913 | struct perfcontrol_work_interval { | |
0a7de745 A |
914 | uint64_t thread_id; |
915 | uint16_t qos_class; | |
916 | uint16_t urgency; | |
917 | uint32_t flags; // notify | |
918 | uint64_t work_interval_id; | |
919 | uint64_t start; | |
920 | uint64_t finish; | |
921 | uint64_t deadline; | |
922 | uint64_t next_start; | |
923 | uint64_t thread_group_id; | |
924 | void *thread_group_data; | |
925 | uint32_t create_flags; | |
5ba3f43e A |
926 | }; |
927 | typedef struct perfcontrol_work_interval *perfcontrol_work_interval_t; | |
928 | ||
a39ff7e2 A |
929 | typedef enum { |
930 | WORK_INTERVAL_START, | |
931 | WORK_INTERVAL_UPDATE, | |
932 | WORK_INTERVAL_FINISH | |
933 | } work_interval_ctl_t; | |
934 | ||
935 | struct perfcontrol_work_interval_instance { | |
0a7de745 A |
936 | work_interval_ctl_t ctl; |
937 | uint32_t create_flags; | |
938 | uint64_t complexity; | |
939 | uint64_t thread_id; | |
940 | uint64_t work_interval_id; | |
941 | uint64_t instance_id; /* out: start, in: update/finish */ | |
942 | uint64_t start; | |
943 | uint64_t finish; | |
944 | uint64_t deadline; | |
945 | uint64_t thread_group_id; | |
946 | void *thread_group_data; | |
a39ff7e2 A |
947 | }; |
948 | typedef struct perfcontrol_work_interval_instance *perfcontrol_work_interval_instance_t; | |
5ba3f43e | 949 | |
0a7de745 A |
950 | /* |
951 | * Structure to export per-CPU counters as part of the CLPC callout. | |
952 | * Contains only the fixed CPU counters (instructions and cycles); CLPC | |
953 | * would call back into XNU to get the configurable counters if needed. | |
5ba3f43e A |
954 | */ |
955 | struct perfcontrol_cpu_counters { | |
0a7de745 | 956 | uint64_t instructions; |
5ba3f43e A |
957 | uint64_t cycles; |
958 | }; | |
959 | ||
960 | /* | |
961 | * Structure used to pass information about a thread to CLPC | |
962 | */ | |
963 | struct perfcontrol_thread_data { | |
964 | /* | |
965 | * Energy estimate (return value) | |
0a7de745 | 966 | * The field is populated by CLPC and used to update the |
5ba3f43e A |
967 | * energy estimate of the thread |
968 | */ | |
969 | uint32_t energy_estimate_nj; | |
970 | /* Perfcontrol class for thread */ | |
971 | perfcontrol_class_t perfctl_class; | |
972 | /* Thread ID for the thread */ | |
973 | uint64_t thread_id; | |
974 | /* Thread Group ID */ | |
975 | uint64_t thread_group_id; | |
0a7de745 A |
976 | /* |
977 | * Scheduling latency for threads at the same base priority. | |
978 | * Calculated by the scheduler and passed into CLPC. The field is | |
979 | * populated only in the thread_data structure for the thread | |
980 | * going on-core. | |
5ba3f43e A |
981 | */ |
982 | uint64_t scheduling_latency_at_same_basepri; | |
983 | /* Thread Group data pointer */ | |
984 | void *thread_group_data; | |
985 | /* perfctl state pointer */ | |
986 | void *perfctl_state; | |
987 | }; | |
988 | ||
989 | /* | |
990 | * All callouts from the scheduler are executed with interrupts | |
991 | * disabled. Callouts should be implemented in C with minimal | |
992 | * abstractions, and only use KPI exported by the mach/libkern | |
993 | * symbolset, restricted to routines like spinlocks and atomic | |
994 | * operations and scheduler routines as noted below. Spinlocks that | |
995 | * are used to synchronize data in the perfcontrol_state_t should only | |
996 | * ever be acquired with interrupts disabled, to avoid deadlocks where | |
997 | * an quantum expiration timer interrupt attempts to perform a callout | |
998 | * that attempts to lock a spinlock that is already held. | |
999 | */ | |
1000 | ||
1001 | /* | |
1002 | * When a processor is switching between two threads (after the | |
1003 | * scheduler has chosen a new thread), the low-level platform layer | |
1004 | * will call this routine, which should perform required timestamps, | |
1005 | * MMIO register reads, or other state switching. No scheduler locks | |
1006 | * are held during this callout. | |
1007 | * | |
1008 | * This function is called with interrupts ENABLED. | |
1009 | */ | |
1010 | typedef void (*sched_perfcontrol_context_switch_t)(perfcontrol_state_t, perfcontrol_state_t); | |
1011 | ||
1012 | /* | |
1013 | * Once the processor has switched to the new thread, the offcore | |
1014 | * callout will indicate the old thread that is no longer being | |
1015 | * run. The thread's scheduler lock is held, so it will not begin | |
1016 | * running on another processor (in the case of preemption where it | |
1017 | * remains runnable) until it completes. If the "thread_terminating" | |
1018 | * boolean is TRUE, this will be the last callout for this thread_id. | |
1019 | */ | |
1020 | typedef void (*sched_perfcontrol_offcore_t)(perfcontrol_state_t, going_off_core_t /* populated by callee */, boolean_t); | |
1021 | ||
1022 | /* | |
1023 | * After the offcore callout and after the old thread can potentially | |
1024 | * start running on another processor, the oncore callout will be | |
1025 | * called with the thread's scheduler lock held. The oncore callout is | |
1026 | * also called any time one of the parameters in the going_on_core_t | |
1027 | * structure changes, like priority/QoS changes, and quantum | |
1028 | * expiration, so the callout must not assume callouts are paired with | |
1029 | * offcore callouts. | |
1030 | */ | |
1031 | typedef void (*sched_perfcontrol_oncore_t)(perfcontrol_state_t, going_on_core_t); | |
1032 | ||
1033 | /* | |
1034 | * Periodically (on hundreds of ms scale), the scheduler will perform | |
1035 | * maintenance and report the maximum latency for runnable (but not currently | |
1036 | * running) threads for each urgency class. | |
1037 | */ | |
1038 | typedef void (*sched_perfcontrol_max_runnable_latency_t)(perfcontrol_max_runnable_latency_t); | |
1039 | ||
1040 | /* | |
1041 | * When the kernel receives information about work intervals from userland, | |
1042 | * it is passed along using this callback. No locks are held, although the state | |
1043 | * object will not go away during the callout. | |
1044 | */ | |
1045 | typedef void (*sched_perfcontrol_work_interval_notify_t)(perfcontrol_state_t, perfcontrol_work_interval_t); | |
1046 | ||
a39ff7e2 A |
1047 | /* |
1048 | * Start, update and finish work interval instance with optional complexity estimate. | |
1049 | */ | |
1050 | typedef void (*sched_perfcontrol_work_interval_ctl_t)(perfcontrol_state_t, perfcontrol_work_interval_instance_t); | |
1051 | ||
5ba3f43e A |
1052 | /* |
1053 | * These callbacks are used when thread groups are added, removed or properties | |
1054 | * updated. | |
1055 | * No blocking allocations (or anything else blocking) are allowed inside these | |
1056 | * callbacks. No locks allowed in these callbacks as well since the kernel might | |
1057 | * be holding the thread/task locks. | |
1058 | */ | |
1059 | typedef void (*sched_perfcontrol_thread_group_init_t)(thread_group_data_t); | |
1060 | typedef void (*sched_perfcontrol_thread_group_deinit_t)(thread_group_data_t); | |
1061 | typedef void (*sched_perfcontrol_thread_group_flags_update_t)(thread_group_data_t); | |
1062 | ||
1063 | /* | |
1064 | * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed, | |
1065 | * this function will be called, passing the timeout deadline that was previously armed as an argument. | |
1066 | * | |
1067 | * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context. | |
1068 | */ | |
1069 | typedef void (*sched_perfcontrol_deadline_passed_t)(uint64_t deadline); | |
1070 | ||
1071 | /* | |
1072 | * Context Switch Callout | |
0a7de745 | 1073 | * |
5ba3f43e A |
1074 | * Parameters: |
1075 | * event - The perfcontrol_event for this callout | |
1076 | * cpu_id - The CPU doing the context switch | |
1077 | * timestamp - The timestamp for the context switch | |
1078 | * flags - Flags for other relevant information | |
1079 | * offcore - perfcontrol_data structure for thread going off-core | |
1080 | * oncore - perfcontrol_data structure for thread going on-core | |
1081 | * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch | |
1082 | */ | |
1083 | typedef void (*sched_perfcontrol_csw_t)( | |
1084 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
1085 | struct perfcontrol_thread_data *offcore, struct perfcontrol_thread_data *oncore, | |
1086 | struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused); | |
1087 | ||
1088 | ||
1089 | /* | |
1090 | * Thread State Update Callout | |
1091 | * | |
1092 | * Parameters: | |
1093 | * event - The perfcontrol_event for this callout | |
1094 | * cpu_id - The CPU doing the state update | |
1095 | * timestamp - The timestamp for the state update | |
1096 | * flags - Flags for other relevant information | |
1097 | * thr_data - perfcontrol_data structure for the thread being updated | |
1098 | */ | |
1099 | typedef void (*sched_perfcontrol_state_update_t)( | |
1100 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
1101 | struct perfcontrol_thread_data *thr_data, __unused void *unused); | |
1102 | ||
f427ee49 A |
1103 | /* |
1104 | * Thread Group Blocking Relationship Callout | |
1105 | * | |
1106 | * Parameters: | |
1107 | * blocked_tg - Thread group blocking on progress of another thread group | |
1108 | * blocking_tg - Thread group blocking progress of another thread group | |
1109 | * flags - Flags for other relevant information | |
1110 | * blocked_thr_state - Per-thread perfcontrol state for blocked thread | |
1111 | */ | |
1112 | typedef void (*sched_perfcontrol_thread_group_blocked_t)( | |
1113 | thread_group_data_t blocked_tg, thread_group_data_t blocking_tg, uint32_t flags, perfcontrol_state_t blocked_thr_state); | |
1114 | ||
1115 | /* | |
1116 | * Thread Group Unblocking Callout | |
1117 | * | |
1118 | * Parameters: | |
1119 | * unblocked_tg - Thread group being unblocked from making forward progress | |
1120 | * unblocking_tg - Thread group unblocking progress of another thread group | |
1121 | * flags - Flags for other relevant information | |
1122 | * unblocked_thr_state - Per-thread perfcontrol state for unblocked thread | |
1123 | */ | |
1124 | typedef void (*sched_perfcontrol_thread_group_unblocked_t)( | |
1125 | thread_group_data_t unblocked_tg, thread_group_data_t unblocking_tg, uint32_t flags, perfcontrol_state_t unblocked_thr_state); | |
1126 | ||
5ba3f43e A |
1127 | /* |
1128 | * Callers should always use the CURRENT version so that the kernel can detect both older | |
1129 | * and newer structure layouts. New callbacks should always be added at the end of the | |
1130 | * structure, and xnu should expect existing source recompiled against newer headers | |
1131 | * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter | |
1132 | * to reset callbacks to their default in-kernel values. | |
1133 | */ | |
1134 | ||
1135 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */ | |
1136 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */ | |
1137 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */ | |
1138 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */ | |
1139 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */ | |
1140 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */ | |
1141 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */ | |
a39ff7e2 | 1142 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */ |
f427ee49 | 1143 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_8 (8) /* up-to thread_group_unblocked */ |
5ba3f43e A |
1144 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6 |
1145 | ||
1146 | struct sched_perfcontrol_callbacks { | |
1147 | unsigned long version; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */ | |
1148 | sched_perfcontrol_offcore_t offcore; | |
1149 | sched_perfcontrol_context_switch_t context_switch; | |
1150 | sched_perfcontrol_oncore_t oncore; | |
1151 | sched_perfcontrol_max_runnable_latency_t max_runnable_latency; | |
1152 | sched_perfcontrol_work_interval_notify_t work_interval_notify; | |
1153 | sched_perfcontrol_thread_group_init_t thread_group_init; | |
1154 | sched_perfcontrol_thread_group_deinit_t thread_group_deinit; | |
1155 | sched_perfcontrol_deadline_passed_t deadline_passed; | |
1156 | sched_perfcontrol_csw_t csw; | |
1157 | sched_perfcontrol_state_update_t state_update; | |
1158 | sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update; | |
a39ff7e2 | 1159 | sched_perfcontrol_work_interval_ctl_t work_interval_ctl; |
f427ee49 A |
1160 | sched_perfcontrol_thread_group_blocked_t thread_group_blocked; |
1161 | sched_perfcontrol_thread_group_unblocked_t thread_group_unblocked; | |
5ba3f43e A |
1162 | }; |
1163 | typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t; | |
1164 | ||
1165 | extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state); | |
1166 | ||
1167 | /* | |
1168 | * Update the scheduler with the set of cores that should be used to dispatch new threads. | |
1169 | * Non-recommended cores can still be used to field interrupts or run bound threads. | |
1170 | * This should be called with interrupts enabled and no scheduler locks held. | |
1171 | */ | |
0a7de745 | 1172 | #define ALL_CORES_RECOMMENDED (~(uint32_t)0) |
5ba3f43e A |
1173 | |
1174 | extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores); | |
1175 | extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation); | |
d9a64523 A |
1176 | extern void sched_override_recommended_cores_for_sleep(void); |
1177 | extern void sched_restore_recommended_cores_after_sleep(void); | |
f427ee49 | 1178 | extern void sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class, boolean_t inherit); |
5ba3f43e | 1179 | |
0a7de745 A |
1180 | extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores); |
1181 | ||
f427ee49 A |
1182 | /* |
1183 | * Edge Scheduler-CLPC Interface | |
1184 | * | |
1185 | * sched_perfcontrol_thread_group_preferred_clusters_set() | |
1186 | * | |
1187 | * The Edge scheduler expects thread group recommendations to be specific clusters rather | |
1188 | * than just E/P. In order to allow more fine grained control, CLPC can specify an override | |
1189 | * preferred cluster per QoS bucket. CLPC passes a common preferred cluster `tg_preferred_cluster` | |
1190 | * and an array of size [PERFCONTROL_CLASS_MAX] with overrides for specific perfctl classes. | |
1191 | * The scheduler translates these preferences into sched_bucket | |
1192 | * preferences and applies the changes. | |
1193 | * | |
1194 | */ | |
1195 | /* Token to indicate a particular perfctl class is not overriden */ | |
1196 | #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE ((uint32_t)~0) | |
1197 | ||
1198 | /* | |
1199 | * CLPC can also indicate if there should be an immediate rebalancing of threads of this TG as | |
1200 | * part of this preferred cluster change. It does that by specifying the following options. | |
1201 | */ | |
1202 | #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING 0x1 | |
1203 | #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNABLE 0x2 | |
1204 | typedef uint64_t sched_perfcontrol_preferred_cluster_options_t; | |
1205 | ||
1206 | extern void sched_perfcontrol_thread_group_preferred_clusters_set(void *machine_data, uint32_t tg_preferred_cluster, | |
1207 | uint32_t overrides[PERFCONTROL_CLASS_MAX], sched_perfcontrol_preferred_cluster_options_t options); | |
1208 | ||
1209 | /* | |
1210 | * Edge Scheduler-CLPC Interface | |
1211 | * | |
1212 | * sched_perfcontrol_edge_matrix_get()/sched_perfcontrol_edge_matrix_set() | |
1213 | * | |
1214 | * The Edge scheduler uses edges between clusters to define the likelihood of migrating threads | |
1215 | * across clusters. The edge config between any two clusters defines the edge weight and whether | |
1216 | * migation and steal operations are allowed across that edge. The getter and setter allow CLPC | |
1217 | * to query and configure edge properties between various clusters on the platform. | |
1218 | */ | |
1219 | ||
1220 | extern void sched_perfcontrol_edge_matrix_get(sched_clutch_edge *edge_matrix, bool *edge_request_bitmap, uint64_t flags, uint64_t matrix_order); | |
1221 | extern void sched_perfcontrol_edge_matrix_set(sched_clutch_edge *edge_matrix, bool *edge_changes_bitmap, uint64_t flags, uint64_t matrix_order); | |
1222 | ||
5ba3f43e A |
1223 | /* |
1224 | * Update the deadline after which sched_perfcontrol_deadline_passed will be called. | |
1225 | * Returns TRUE if it successfully canceled a previously set callback, | |
1226 | * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight). | |
1227 | * The callback is automatically canceled when it fires, and does not repeat unless rearmed. | |
1228 | * | |
1229 | * This 'timer' executes as the scheduler switches between threads, on a non-idle core | |
1230 | * | |
1231 | * There can be only one outstanding timer globally. | |
1232 | */ | |
1233 | extern boolean_t sched_perfcontrol_update_callback_deadline(uint64_t deadline); | |
1234 | ||
1235 | typedef enum perfcontrol_callout_type { | |
0a7de745 A |
1236 | PERFCONTROL_CALLOUT_ON_CORE, |
1237 | PERFCONTROL_CALLOUT_OFF_CORE, | |
1238 | PERFCONTROL_CALLOUT_CONTEXT, | |
1239 | PERFCONTROL_CALLOUT_STATE_UPDATE, | |
1240 | /* Add other callout types here */ | |
1241 | PERFCONTROL_CALLOUT_MAX | |
5ba3f43e A |
1242 | } perfcontrol_callout_type_t; |
1243 | ||
1244 | typedef enum perfcontrol_callout_stat { | |
0a7de745 A |
1245 | PERFCONTROL_STAT_INSTRS, |
1246 | PERFCONTROL_STAT_CYCLES, | |
1247 | /* Add other stat types here */ | |
1248 | PERFCONTROL_STAT_MAX | |
5ba3f43e A |
1249 | } perfcontrol_callout_stat_t; |
1250 | ||
1251 | uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, | |
0a7de745 | 1252 | perfcontrol_callout_stat_t stat); |
5ba3f43e | 1253 | |
f427ee49 A |
1254 | #ifdef __arm64__ |
1255 | /* The performance controller may use this interface to recommend | |
1256 | * that CPUs in the designated cluster employ WFE rather than WFI | |
1257 | * within the idle loop, falling back to WFI after the specified | |
1258 | * timeout. The updates are expected to be serialized by the caller, | |
1259 | * the implementation is not required to perform internal synchronization. | |
1260 | */ | |
1261 | uint32_t ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id, uint64_t wfe_timeout_abstime_interval, uint64_t wfe_hint_flags); | |
1262 | #endif /* __arm64__ */ | |
1263 | ||
cb323159 A |
1264 | #if defined(HAS_APPLE_PAC) |
1265 | #define ONES(x) (BIT((x))-1) | |
1266 | #define PTR_MASK ONES(64-T1SZ_BOOT) | |
1267 | #define PAC_MASK ~PTR_MASK | |
1268 | #define SIGN(p) ((p) & BIT(55)) | |
1269 | #define UNSIGN_PTR(p) \ | |
1270 | SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK) | |
1271 | ||
f427ee49 | 1272 | uint64_t ml_default_jop_pid(void); |
cb323159 | 1273 | void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit); |
f427ee49 A |
1274 | void ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit); |
1275 | void ml_task_set_jop_pid_from_shared_region(task_t task); | |
1276 | void ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop); | |
1277 | void ml_thread_set_disable_user_jop(thread_t thread, uint8_t disable_user_jop); | |
1278 | void ml_thread_set_jop_pid(thread_t thread, task_t task); | |
cb323159 | 1279 | void *ml_auth_ptr_unchecked(void *ptr, unsigned key, uint64_t modifier); |
f427ee49 | 1280 | |
f427ee49 A |
1281 | uint64_t ml_enable_user_jop_key(uint64_t user_jop_key); |
1282 | ||
1283 | /** | |
1284 | * Restores the previous JOP key state after a previous ml_enable_user_jop_key() | |
1285 | * call. | |
1286 | * | |
1287 | * @param user_jop_key The userspace JOP key previously passed to | |
1288 | * ml_enable_user_jop_key() | |
1289 | * @param saved_jop_state The saved JOP state returned by | |
1290 | * ml_enable_user_jop_key() | |
1291 | */ | |
1292 | void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state); | |
cb323159 A |
1293 | #endif /* defined(HAS_APPLE_PAC) */ |
1294 | ||
2a1bd2d3 | 1295 | void ml_enable_monitor(void); |
cb323159 | 1296 | |
5ba3f43e A |
1297 | |
1298 | #endif /* KERNEL_PRIVATE */ | |
1299 | ||
1300 | boolean_t machine_timeout_suspended(void); | |
1301 | void ml_get_power_state(boolean_t *, boolean_t *); | |
1302 | ||
0a7de745 | 1303 | uint32_t get_arm_cpu_version(void); |
5ba3f43e | 1304 | boolean_t user_cont_hwclock_allowed(void); |
cb323159 | 1305 | uint8_t user_timebase_type(void); |
5ba3f43e | 1306 | boolean_t ml_thread_is64bit(thread_t thread); |
5ba3f43e A |
1307 | |
1308 | #ifdef __arm64__ | |
f427ee49 | 1309 | bool ml_feature_supported(uint32_t feature_bit); |
5ba3f43e | 1310 | void ml_set_align_checking(void); |
f427ee49 A |
1311 | extern void wfe_timeout_configure(void); |
1312 | extern void wfe_timeout_init(void); | |
5ba3f43e A |
1313 | #endif /* __arm64__ */ |
1314 | ||
1315 | void ml_timer_evaluate(void); | |
1316 | boolean_t ml_timer_forced_evaluation(void); | |
1317 | uint64_t ml_energy_stat(thread_t); | |
1318 | void ml_gpu_stat_update(uint64_t); | |
1319 | uint64_t ml_gpu_stat(thread_t); | |
1320 | #endif /* __APPLE_API_PRIVATE */ | |
1321 | ||
f427ee49 A |
1322 | |
1323 | ||
1324 | #if __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) | |
1325 | extern void ml_expect_fault_begin(expected_fault_handler_t, uintptr_t); | |
1326 | extern void ml_expect_fault_end(void); | |
1327 | #endif /* __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) */ | |
1328 | ||
1329 | ||
1330 | void ml_hibernate_active_pre(void); | |
1331 | void ml_hibernate_active_post(void); | |
1332 | ||
5ba3f43e A |
1333 | __END_DECLS |
1334 | ||
1335 | #endif /* _ARM_MACHINE_ROUTINES_H_ */ |