]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2007-2020 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | ||
0a7de745 A |
32 | #ifndef _ARM_MACHINE_ROUTINES_H_ |
33 | #define _ARM_MACHINE_ROUTINES_H_ | |
5ba3f43e A |
34 | |
35 | #include <mach/mach_types.h> | |
f427ee49 | 36 | #include <mach/vm_types.h> |
5ba3f43e A |
37 | #include <mach/boolean.h> |
38 | #include <kern/kern_types.h> | |
39 | #include <pexpert/pexpert.h> | |
40 | ||
41 | #include <sys/cdefs.h> | |
42 | #include <sys/appleapiopts.h> | |
43 | ||
44 | #include <stdarg.h> | |
45 | ||
46 | __BEGIN_DECLS | |
47 | ||
f427ee49 A |
48 | #ifdef XNU_KERNEL_PRIVATE |
49 | #ifdef __arm64__ | |
50 | typedef bool (*expected_fault_handler_t)(arm_saved_state_t *); | |
51 | #endif /* __arm64__ */ | |
52 | #endif /* XNU_KERNEL_PRIVATE */ | |
53 | ||
5ba3f43e A |
54 | /* Interrupt handling */ |
55 | ||
56 | void ml_cpu_signal(unsigned int cpu_id); | |
57 | void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs); | |
58 | uint64_t ml_cpu_signal_deferred_get_timer(void); | |
59 | void ml_cpu_signal_deferred(unsigned int cpu_id); | |
60 | void ml_cpu_signal_retract(unsigned int cpu_id); | |
f427ee49 | 61 | bool ml_cpu_signal_is_enabled(void); |
5ba3f43e A |
62 | |
63 | /* Initialize Interrupts */ | |
64 | void ml_init_interrupt(void); | |
65 | ||
66 | /* Get Interrupts Enabled */ | |
67 | boolean_t ml_get_interrupts_enabled(void); | |
68 | ||
69 | /* Set Interrupts Enabled */ | |
70 | boolean_t ml_set_interrupts_enabled(boolean_t enable); | |
0a7de745 | 71 | boolean_t ml_early_set_interrupts_enabled(boolean_t enable); |
5ba3f43e A |
72 | |
73 | /* Check if running at interrupt context */ | |
74 | boolean_t ml_at_interrupt_context(void); | |
75 | ||
76 | /* Generate a fake interrupt */ | |
77 | void ml_cause_interrupt(void); | |
78 | ||
79 | /* Clear interrupt spin debug state for thread */ | |
80 | #if INTERRUPT_MASKED_DEBUG | |
f427ee49 A |
81 | extern boolean_t interrupt_masked_debug; |
82 | extern uint64_t interrupt_masked_timeout; | |
83 | extern uint64_t stackshot_interrupt_masked_timeout; | |
84 | ||
85 | #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) \ | |
86 | do { \ | |
87 | if (interrupt_masked_debug) { \ | |
88 | thread_t thread = current_thread(); \ | |
89 | thread->machine.int_type = type; \ | |
90 | thread->machine.int_handler_addr = (uintptr_t)VM_KERNEL_STRIP_PTR(handler_addr); \ | |
91 | thread->machine.inthandler_timestamp = ml_get_timebase(); \ | |
92 | thread->machine.int_vector = (uintptr_t)NULL; \ | |
93 | } \ | |
94 | } while (0) | |
95 | ||
96 | #define INTERRUPT_MASKED_DEBUG_END() \ | |
97 | do { \ | |
98 | if (interrupt_masked_debug) { \ | |
99 | thread_t thread = current_thread(); \ | |
100 | ml_check_interrupt_handler_duration(thread); \ | |
101 | } \ | |
102 | } while (0) | |
103 | ||
104 | void ml_irq_debug_start(uintptr_t handler, uintptr_t vector); | |
105 | void ml_irq_debug_end(void); | |
106 | ||
5ba3f43e A |
107 | void ml_spin_debug_reset(thread_t thread); |
108 | void ml_spin_debug_clear(thread_t thread); | |
109 | void ml_spin_debug_clear_self(void); | |
110 | void ml_check_interrupts_disabled_duration(thread_t thread); | |
f427ee49 A |
111 | void ml_check_stackshot_interrupt_disabled_duration(thread_t thread); |
112 | void ml_check_interrupt_handler_duration(thread_t thread); | |
113 | #else | |
114 | #define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) | |
115 | #define INTERRUPT_MASKED_DEBUG_END() | |
5ba3f43e A |
116 | #endif |
117 | ||
118 | #ifdef XNU_KERNEL_PRIVATE | |
0a7de745 | 119 | extern bool ml_snoop_thread_is_on_core(thread_t thread); |
5ba3f43e A |
120 | extern boolean_t ml_is_quiescing(void); |
121 | extern void ml_set_is_quiescing(boolean_t); | |
122 | extern uint64_t ml_get_booter_memory_size(void); | |
123 | #endif | |
124 | ||
125 | /* Type for the Time Base Enable function */ | |
126 | typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); | |
f427ee49 | 127 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
128 | /* Type for the Processor Cache Dispatch function */ |
129 | typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1); | |
f427ee49 A |
130 | |
131 | typedef uint32_t (*get_decrementer_t)(void); | |
132 | typedef void (*set_decrementer_t)(uint32_t); | |
133 | typedef void (*fiq_handler_t)(void); | |
134 | ||
5ba3f43e A |
135 | #endif |
136 | ||
0a7de745 A |
137 | #define CacheConfig 0x00000000UL |
138 | #define CacheControl 0x00000001UL | |
139 | #define CacheClean 0x00000002UL | |
140 | #define CacheCleanRegion 0x00000003UL | |
141 | #define CacheCleanFlush 0x00000004UL | |
142 | #define CacheCleanFlushRegion 0x00000005UL | |
143 | #define CacheShutdown 0x00000006UL | |
5ba3f43e | 144 | |
0a7de745 | 145 | #define CacheControlEnable 0x00000000UL |
5ba3f43e | 146 | |
0a7de745 A |
147 | #define CacheConfigCCSIDR 0x00000001UL |
148 | #define CacheConfigSize 0x00000100UL | |
5ba3f43e A |
149 | |
150 | /* Type for the Processor Idle function */ | |
151 | typedef void (*processor_idle_t)(cpu_id_t cpu_id, boolean_t enter, uint64_t *new_timeout_ticks); | |
152 | ||
153 | /* Type for the Idle Tickle function */ | |
154 | typedef void (*idle_tickle_t)(void); | |
155 | ||
156 | /* Type for the Idle Timer function */ | |
157 | typedef void (*idle_timer_t)(void *refcon, uint64_t *new_timeout_ticks); | |
158 | ||
159 | /* Type for the IPI Hander */ | |
160 | typedef void (*ipi_handler_t)(void); | |
161 | ||
162 | /* Type for the Lockdown Hander */ | |
163 | typedef void (*lockdown_handler_t)(void *); | |
164 | ||
165 | /* Type for the Platform specific Error Handler */ | |
166 | typedef void (*platform_error_handler_t)(void *refcon, vm_offset_t fault_addr); | |
167 | ||
168 | /* | |
0a7de745 A |
169 | * The exception callback (ex_cb) module allows kernel drivers to |
170 | * register and receive callbacks for exceptions, and indicate | |
5ba3f43e | 171 | * actions to be taken by the platform kernel |
0a7de745 | 172 | * Currently this is supported for ARM64 but extending support for ARM32 |
5ba3f43e A |
173 | * should be straightforward |
174 | */ | |
175 | ||
176 | /* Supported exception classes for callbacks */ | |
0a7de745 | 177 | typedef enum{ |
5ba3f43e | 178 | EXCB_CLASS_ILLEGAL_INSTR_SET, |
d9a64523 A |
179 | #ifdef CONFIG_XNUPOST |
180 | EXCB_CLASS_TEST1, | |
181 | EXCB_CLASS_TEST2, | |
182 | EXCB_CLASS_TEST3, | |
183 | #endif | |
0a7de745 | 184 | EXCB_CLASS_MAX // this must be last |
5ba3f43e A |
185 | } |
186 | ex_cb_class_t; | |
187 | ||
188 | /* Actions indicated by callbacks to be taken by platform kernel */ | |
0a7de745 A |
189 | typedef enum{ |
190 | EXCB_ACTION_RERUN, // re-run the faulting instruction | |
191 | EXCB_ACTION_NONE, // continue normal exception handling | |
d9a64523 A |
192 | #ifdef CONFIG_XNUPOST |
193 | EXCB_ACTION_TEST_FAIL, | |
194 | #endif | |
5ba3f43e A |
195 | } |
196 | ex_cb_action_t; | |
197 | ||
0a7de745 A |
198 | /* |
199 | * Exception state | |
200 | * We cannot use a private kernel data structure such as arm_saved_state_t | |
201 | * The CPSR and ESR are not clobbered when the callback function is invoked so | |
5ba3f43e A |
202 | * those registers can be examined by the callback function; |
203 | * the same is done in the platform error handlers | |
204 | */ | |
0a7de745 | 205 | typedef struct{ |
5ba3f43e A |
206 | vm_offset_t far; |
207 | } | |
208 | ex_cb_state_t; | |
209 | ||
210 | /* callback type definition */ | |
211 | typedef ex_cb_action_t (*ex_cb_t) ( | |
0a7de745 A |
212 | ex_cb_class_t cb_class, |
213 | void *refcon,// provided at registration | |
214 | const ex_cb_state_t *state // exception state | |
5ba3f43e A |
215 | ); |
216 | ||
0a7de745 A |
217 | /* |
218 | * Callback registration | |
219 | * Currently we support only one registered callback per class but | |
5ba3f43e A |
220 | * it should be possible to support more callbacks |
221 | */ | |
222 | kern_return_t ex_cb_register( | |
0a7de745 A |
223 | ex_cb_class_t cb_class, |
224 | ex_cb_t cb, | |
225 | void *refcon ); | |
5ba3f43e A |
226 | |
227 | /* | |
228 | * Called internally by platform kernel to invoke the registered callback for class | |
229 | */ | |
230 | ex_cb_action_t ex_cb_invoke( | |
0a7de745 A |
231 | ex_cb_class_t cb_class, |
232 | vm_offset_t far); | |
5ba3f43e A |
233 | |
234 | ||
235 | void ml_parse_cpu_topology(void); | |
236 | ||
237 | unsigned int ml_get_cpu_count(void); | |
238 | ||
f427ee49 A |
239 | unsigned int ml_get_cluster_count(void); |
240 | ||
5ba3f43e A |
241 | int ml_get_boot_cpu_number(void); |
242 | ||
243 | int ml_get_cpu_number(uint32_t phys_id); | |
244 | ||
f427ee49 A |
245 | int ml_get_cluster_number(uint32_t phys_id); |
246 | ||
5ba3f43e A |
247 | int ml_get_max_cpu_number(void); |
248 | ||
f427ee49 A |
249 | int ml_get_max_cluster_number(void); |
250 | ||
251 | unsigned int ml_get_first_cpu_id(unsigned int cluster_id); | |
252 | ||
253 | #ifdef __arm64__ | |
254 | int ml_get_cluster_number_local(void); | |
255 | unsigned int ml_get_cpu_number_local(void); | |
256 | #endif /* __arm64__ */ | |
257 | ||
5ba3f43e A |
258 | /* Struct for ml_cpu_get_info */ |
259 | struct ml_cpu_info { | |
0a7de745 A |
260 | unsigned long vector_unit; |
261 | unsigned long cache_line_size; | |
262 | unsigned long l1_icache_size; | |
263 | unsigned long l1_dcache_size; | |
264 | unsigned long l2_settings; | |
265 | unsigned long l2_cache_size; | |
266 | unsigned long l3_settings; | |
267 | unsigned long l3_cache_size; | |
5ba3f43e A |
268 | }; |
269 | typedef struct ml_cpu_info ml_cpu_info_t; | |
270 | ||
271 | typedef enum { | |
272 | CLUSTER_TYPE_SMP, | |
273 | } cluster_type_t; | |
274 | ||
275 | cluster_type_t ml_get_boot_cluster(void); | |
276 | ||
f427ee49 A |
277 | /*! |
278 | * @typedef ml_topology_cpu_t | |
279 | * @brief Describes one CPU core in the topology. | |
280 | * | |
281 | * @field cpu_id Logical CPU ID (EDT: cpu-id): 0, 1, 2, 3, 4, ... | |
282 | * @field phys_id Physical CPU ID (EDT: reg). Same as MPIDR[15:0], i.e. | |
283 | * (cluster_id << 8) | core_number_within_cluster | |
284 | * @field cluster_id Cluster ID (EDT: cluster-id) | |
285 | * @field die_id Die ID (EDT: die-id) | |
286 | * @field cluster_type The type of CPUs found in this cluster. | |
287 | * @field l2_access_penalty Indicates that the scheduler should try to de-prioritize a core because | |
288 | * L2 accesses are slower than on the boot processor. | |
289 | * @field l2_cache_size Size of the L2 cache, in bytes. 0 if unknown or not present. | |
290 | * @field l2_cache_id l2-cache-id property read from EDT. | |
291 | * @field l3_cache_size Size of the L3 cache, in bytes. 0 if unknown or not present. | |
292 | * @field l3_cache_id l3-cache-id property read from EDT. | |
293 | * @field cpu_IMPL_regs IO-mapped virtual address of cpuX_IMPL (implementation-defined) register block. | |
294 | * @field cpu_IMPL_pa Physical address of cpuX_IMPL register block. | |
295 | * @field cpu_IMPL_len Length of cpuX_IMPL register block. | |
296 | * @field cpu_UTTDBG_regs IO-mapped virtual address of cpuX_UTTDBG register block. | |
297 | * @field cpu_UTTDBG_pa Physical address of cpuX_UTTDBG register block, if set in DT, else zero | |
298 | * @field cpu_UTTDBG_len Length of cpuX_UTTDBG register block, if set in DT, else zero | |
299 | * @field coresight_regs IO-mapped virtual address of CoreSight debug register block. | |
300 | * @field coresight_pa Physical address of CoreSight register block. | |
301 | * @field coresight_len Length of CoreSight register block. | |
302 | * @field self_ipi_irq AIC IRQ vector for self IPI (cpuX->cpuX). 0 if unsupported. | |
303 | * @field other_ipi_irq AIC IRQ vector for other IPI (cpuX->cpuY). 0 if unsupported. | |
304 | * @field pmi_irq AIC IRQ vector for performance management IRQ. 0 if unsupported. | |
305 | * @field die_cluster_id Cluster ID within the local die (EDT: die-cluster-id) | |
306 | * @field cluster_core_id Core ID within the local cluster (EDT: cluster-core-id) | |
307 | */ | |
308 | typedef struct ml_topology_cpu { | |
309 | unsigned int cpu_id; | |
310 | uint32_t phys_id; | |
311 | unsigned int cluster_id; | |
312 | unsigned int die_id; | |
313 | cluster_type_t cluster_type; | |
314 | uint32_t l2_access_penalty; | |
315 | uint32_t l2_cache_size; | |
316 | uint32_t l2_cache_id; | |
317 | uint32_t l3_cache_size; | |
318 | uint32_t l3_cache_id; | |
319 | vm_offset_t cpu_IMPL_regs; | |
320 | uint64_t cpu_IMPL_pa; | |
321 | uint64_t cpu_IMPL_len; | |
322 | vm_offset_t cpu_UTTDBG_regs; | |
323 | uint64_t cpu_UTTDBG_pa; | |
324 | uint64_t cpu_UTTDBG_len; | |
325 | vm_offset_t coresight_regs; | |
326 | uint64_t coresight_pa; | |
327 | uint64_t coresight_len; | |
328 | int self_ipi_irq; | |
329 | int other_ipi_irq; | |
330 | int pmi_irq; | |
331 | unsigned int die_cluster_id; | |
332 | unsigned int cluster_core_id; | |
333 | } ml_topology_cpu_t; | |
334 | ||
335 | /*! | |
336 | * @typedef ml_topology_cluster_t | |
337 | * @brief Describes one cluster in the topology. | |
338 | * | |
339 | * @field cluster_id Cluster ID (EDT: cluster-id) | |
340 | * @field cluster_type The type of CPUs found in this cluster. | |
341 | * @field num_cpus Total number of usable CPU cores in this cluster. | |
342 | * @field first_cpu_id The cpu_id of the first CPU in the cluster. | |
343 | * @field cpu_mask A bitmask representing the cpu_id's that belong to the cluster. Example: | |
344 | * If the cluster contains CPU4 and CPU5, cpu_mask will be 0x30. | |
345 | * @field acc_IMPL_regs IO-mapped virtual address of acc_IMPL (implementation-defined) register block. | |
346 | * @field acc_IMPL_pa Physical address of acc_IMPL register block. | |
347 | * @field acc_IMPL_len Length of acc_IMPL register block. | |
348 | * @field cpm_IMPL_regs IO-mapped virtual address of cpm_IMPL (implementation-defined) register block. | |
349 | * @field cpm_IMPL_pa Physical address of cpm_IMPL register block. | |
350 | * @field cpm_IMPL_len Length of cpm_IMPL register block. | |
351 | */ | |
352 | typedef struct ml_topology_cluster { | |
353 | unsigned int cluster_id; | |
354 | cluster_type_t cluster_type; | |
355 | unsigned int num_cpus; | |
356 | unsigned int first_cpu_id; | |
357 | uint64_t cpu_mask; | |
358 | vm_offset_t acc_IMPL_regs; | |
359 | uint64_t acc_IMPL_pa; | |
360 | uint64_t acc_IMPL_len; | |
361 | vm_offset_t cpm_IMPL_regs; | |
362 | uint64_t cpm_IMPL_pa; | |
363 | uint64_t cpm_IMPL_len; | |
364 | } ml_topology_cluster_t; | |
365 | ||
366 | // Bump this version number any time any ml_topology_* struct changes, so | |
367 | // that KPI users can check whether their headers are compatible with | |
368 | // the running kernel. | |
369 | #define CPU_TOPOLOGY_VERSION 1 | |
370 | ||
371 | /*! | |
372 | * @typedef ml_topology_info_t | |
373 | * @brief Describes the CPU topology for all APs in the system. Populated from EDT and read-only at runtime. | |
374 | * @discussion This struct only lists CPU cores that are considered usable by both iBoot and XNU. Some | |
375 | * physically present CPU cores may be considered unusable due to configuration options like | |
376 | * the "cpus=" boot-arg. Cores that are disabled in hardware will not show up in EDT at all, so | |
377 | * they also will not be present in this struct. | |
378 | * | |
379 | * @field version Version of the struct (set to CPU_TOPOLOGY_VERSION). | |
380 | * @field num_cpus Total number of usable CPU cores. | |
381 | * @field max_cpu_id The highest usable logical CPU ID. | |
382 | * @field num_clusters Total number of AP CPU clusters on the system (usable or not). | |
383 | * @field max_cluster_id The highest cluster ID found in EDT. | |
384 | * @field cpus List of |num_cpus| entries. | |
385 | * @field clusters List of |num_clusters| entries. | |
386 | * @field boot_cpu Points to the |cpus| entry for the boot CPU. | |
387 | * @field boot_cluster Points to the |clusters| entry which contains the boot CPU. | |
388 | * @field chip_revision Silicon revision reported by iBoot, which comes from the | |
389 | * SoC-specific fuse bits. See CPU_VERSION_xx macros for definitions. | |
390 | */ | |
391 | typedef struct ml_topology_info { | |
392 | unsigned int version; | |
393 | unsigned int num_cpus; | |
394 | unsigned int max_cpu_id; | |
395 | unsigned int num_clusters; | |
396 | unsigned int max_cluster_id; | |
397 | unsigned int max_die_id; | |
398 | ml_topology_cpu_t *cpus; | |
399 | ml_topology_cluster_t *clusters; | |
400 | ml_topology_cpu_t *boot_cpu; | |
401 | ml_topology_cluster_t *boot_cluster; | |
402 | unsigned int chip_revision; | |
403 | } ml_topology_info_t; | |
404 | ||
405 | /*! | |
406 | * @function ml_get_topology_info | |
407 | * @result A pointer to the read-only topology struct. Does not need to be freed. Returns NULL | |
408 | * if the struct hasn't been initialized or the feature is unsupported. | |
409 | */ | |
410 | const ml_topology_info_t *ml_get_topology_info(void); | |
411 | ||
412 | /*! | |
413 | * @function ml_map_cpu_pio | |
414 | * @brief Maps per-CPU and per-cluster PIO registers found in EDT. This needs to be | |
415 | * called after arm_vm_init() so it can't be part of ml_parse_cpu_topology(). | |
416 | */ | |
417 | void ml_map_cpu_pio(void); | |
418 | ||
5ba3f43e A |
419 | /* Struct for ml_processor_register */ |
420 | struct ml_processor_info { | |
0a7de745 A |
421 | cpu_id_t cpu_id; |
422 | vm_offset_t start_paddr; | |
423 | boolean_t supports_nap; | |
424 | void *platform_cache_dispatch; | |
425 | time_base_enable_t time_base_enable; | |
426 | processor_idle_t processor_idle; | |
427 | idle_tickle_t *idle_tickle; | |
428 | idle_timer_t idle_timer; | |
429 | void *idle_timer_refcon; | |
430 | vm_offset_t powergate_stub_addr; | |
431 | uint32_t powergate_stub_length; | |
432 | uint32_t powergate_latency; | |
433 | platform_error_handler_t platform_error_handler; | |
434 | uint64_t regmap_paddr; | |
435 | uint32_t phys_id; | |
436 | uint32_t log_id; | |
437 | uint32_t l2_access_penalty; | |
438 | uint32_t cluster_id; | |
439 | cluster_type_t cluster_type; | |
440 | uint32_t l2_cache_id; | |
441 | uint32_t l2_cache_size; | |
442 | uint32_t l3_cache_id; | |
443 | uint32_t l3_cache_size; | |
5ba3f43e A |
444 | }; |
445 | typedef struct ml_processor_info ml_processor_info_t; | |
446 | ||
f427ee49 | 447 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
448 | /* Struct for ml_init_timebase */ |
449 | struct tbd_ops { | |
f427ee49 A |
450 | fiq_handler_t tbd_fiq_handler; |
451 | get_decrementer_t tbd_get_decrementer; | |
452 | set_decrementer_t tbd_set_decrementer; | |
5ba3f43e A |
453 | }; |
454 | typedef struct tbd_ops *tbd_ops_t; | |
455 | typedef struct tbd_ops tbd_ops_data_t; | |
456 | #endif | |
457 | ||
0a7de745 A |
458 | /*! |
459 | * @function ml_processor_register | |
460 | * | |
461 | * @abstract callback from platform kext to register processor | |
462 | * | |
463 | * @discussion This function is called by the platform kext when a processor is | |
464 | * being registered. This is called while running on the CPU itself, as part of | |
465 | * its initialization. | |
466 | * | |
467 | * @param ml_processor_info provides machine-specific information about the | |
468 | * processor to xnu. | |
469 | * | |
470 | * @param processor is set as an out-parameter to an opaque handle that should | |
471 | * be used by the platform kext when referring to this processor in the future. | |
472 | * | |
473 | * @param ipi_handler is set as an out-parameter to the function that should be | |
474 | * registered as the IPI handler. | |
475 | * | |
476 | * @param pmi_handler is set as an out-parameter to the function that should be | |
477 | * registered as the PMI handler. | |
478 | * | |
479 | * @returns KERN_SUCCESS on success and an error code, otherwise. | |
480 | */ | |
481 | kern_return_t ml_processor_register(ml_processor_info_t *ml_processor_info, | |
482 | processor_t *processor, ipi_handler_t *ipi_handler, | |
483 | perfmon_interrupt_handler_func *pmi_handler); | |
5ba3f43e A |
484 | |
485 | /* Register a lockdown handler */ | |
486 | kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *); | |
487 | ||
488 | #if XNU_KERNEL_PRIVATE | |
489 | void ml_lockdown_init(void); | |
490 | ||
5ba3f43e A |
491 | /* Machine layer routine for intercepting panics */ |
492 | void ml_panic_trap_to_debugger(const char *panic_format_str, | |
0a7de745 A |
493 | va_list *panic_args, |
494 | unsigned int reason, | |
495 | void *ctx, | |
496 | uint64_t panic_options_mask, | |
497 | unsigned long panic_caller); | |
5ba3f43e A |
498 | #endif /* XNU_KERNEL_PRIVATE */ |
499 | ||
500 | /* Initialize Interrupts */ | |
501 | void ml_install_interrupt_handler( | |
0a7de745 A |
502 | void *nub, |
503 | int source, | |
504 | void *target, | |
505 | IOInterruptHandler handler, | |
506 | void *refCon); | |
5ba3f43e A |
507 | |
508 | vm_offset_t | |
0a7de745 | 509 | ml_static_vtop( |
5ba3f43e A |
510 | vm_offset_t); |
511 | ||
f427ee49 A |
512 | kern_return_t |
513 | ml_static_verify_page_protections( | |
514 | uint64_t base, uint64_t size, vm_prot_t prot); | |
515 | ||
5ba3f43e | 516 | vm_offset_t |
0a7de745 | 517 | ml_static_ptovirt( |
5ba3f43e A |
518 | vm_offset_t); |
519 | ||
d9a64523 A |
520 | vm_offset_t ml_static_slide( |
521 | vm_offset_t vaddr); | |
522 | ||
523 | vm_offset_t ml_static_unslide( | |
524 | vm_offset_t vaddr); | |
525 | ||
5ba3f43e A |
526 | /* Offset required to obtain absolute time value from tick counter */ |
527 | uint64_t ml_get_abstime_offset(void); | |
528 | ||
529 | /* Offset required to obtain continuous time value from tick counter */ | |
530 | uint64_t ml_get_conttime_offset(void); | |
531 | ||
532 | #ifdef __APPLE_API_UNSTABLE | |
533 | /* PCI config cycle probing */ | |
534 | boolean_t ml_probe_read( | |
535 | vm_offset_t paddr, | |
536 | unsigned int *val); | |
537 | boolean_t ml_probe_read_64( | |
538 | addr64_t paddr, | |
539 | unsigned int *val); | |
540 | ||
541 | /* Read physical address byte */ | |
542 | unsigned int ml_phys_read_byte( | |
543 | vm_offset_t paddr); | |
544 | unsigned int ml_phys_read_byte_64( | |
545 | addr64_t paddr); | |
546 | ||
547 | /* Read physical address half word */ | |
548 | unsigned int ml_phys_read_half( | |
549 | vm_offset_t paddr); | |
550 | unsigned int ml_phys_read_half_64( | |
551 | addr64_t paddr); | |
552 | ||
553 | /* Read physical address word*/ | |
554 | unsigned int ml_phys_read( | |
555 | vm_offset_t paddr); | |
556 | unsigned int ml_phys_read_64( | |
557 | addr64_t paddr); | |
558 | unsigned int ml_phys_read_word( | |
559 | vm_offset_t paddr); | |
560 | unsigned int ml_phys_read_word_64( | |
561 | addr64_t paddr); | |
562 | ||
563 | unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz); | |
564 | unsigned int ml_io_read8(uintptr_t iovaddr); | |
565 | unsigned int ml_io_read16(uintptr_t iovaddr); | |
566 | unsigned int ml_io_read32(uintptr_t iovaddr); | |
567 | unsigned long long ml_io_read64(uintptr_t iovaddr); | |
568 | ||
0a7de745 A |
569 | extern void ml_io_write(uintptr_t vaddr, uint64_t val, int size); |
570 | extern void ml_io_write8(uintptr_t vaddr, uint8_t val); | |
571 | extern void ml_io_write16(uintptr_t vaddr, uint16_t val); | |
572 | extern void ml_io_write32(uintptr_t vaddr, uint32_t val); | |
573 | extern void ml_io_write64(uintptr_t vaddr, uint64_t val); | |
574 | ||
5ba3f43e A |
575 | /* Read physical address double word */ |
576 | unsigned long long ml_phys_read_double( | |
577 | vm_offset_t paddr); | |
578 | unsigned long long ml_phys_read_double_64( | |
579 | addr64_t paddr); | |
580 | ||
581 | /* Write physical address byte */ | |
582 | void ml_phys_write_byte( | |
583 | vm_offset_t paddr, unsigned int data); | |
584 | void ml_phys_write_byte_64( | |
585 | addr64_t paddr, unsigned int data); | |
586 | ||
587 | /* Write physical address half word */ | |
588 | void ml_phys_write_half( | |
589 | vm_offset_t paddr, unsigned int data); | |
590 | void ml_phys_write_half_64( | |
591 | addr64_t paddr, unsigned int data); | |
592 | ||
593 | /* Write physical address word */ | |
594 | void ml_phys_write( | |
595 | vm_offset_t paddr, unsigned int data); | |
596 | void ml_phys_write_64( | |
597 | addr64_t paddr, unsigned int data); | |
598 | void ml_phys_write_word( | |
599 | vm_offset_t paddr, unsigned int data); | |
600 | void ml_phys_write_word_64( | |
601 | addr64_t paddr, unsigned int data); | |
602 | ||
603 | /* Write physical address double word */ | |
604 | void ml_phys_write_double( | |
605 | vm_offset_t paddr, unsigned long long data); | |
606 | void ml_phys_write_double_64( | |
607 | addr64_t paddr, unsigned long long data); | |
608 | ||
609 | void ml_static_mfree( | |
610 | vm_offset_t, | |
611 | vm_size_t); | |
612 | ||
613 | kern_return_t | |
614 | ml_static_protect( | |
0a7de745 A |
615 | vm_offset_t start, |
616 | vm_size_t size, | |
617 | vm_prot_t new_prot); | |
5ba3f43e A |
618 | |
619 | /* virtual to physical on wired pages */ | |
620 | vm_offset_t ml_vtophys( | |
621 | vm_offset_t vaddr); | |
622 | ||
f427ee49 | 623 | /* Get processor cache info */ |
5ba3f43e A |
624 | void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info); |
625 | ||
626 | #endif /* __APPLE_API_UNSTABLE */ | |
627 | ||
628 | #ifdef __APPLE_API_PRIVATE | |
0a7de745 | 629 | #ifdef XNU_KERNEL_PRIVATE |
5ba3f43e | 630 | vm_size_t ml_nofault_copy( |
0a7de745 A |
631 | vm_offset_t virtsrc, |
632 | vm_offset_t virtdst, | |
5ba3f43e A |
633 | vm_size_t size); |
634 | boolean_t ml_validate_nofault( | |
635 | vm_offset_t virtsrc, vm_size_t size); | |
636 | #endif /* XNU_KERNEL_PRIVATE */ | |
0a7de745 | 637 | #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) |
5ba3f43e A |
638 | /* IO memory map services */ |
639 | ||
640 | /* Map memory map IO space */ | |
641 | vm_offset_t ml_io_map( | |
0a7de745 | 642 | vm_offset_t phys_addr, |
5ba3f43e A |
643 | vm_size_t size); |
644 | ||
645 | vm_offset_t ml_io_map_wcomb( | |
0a7de745 | 646 | vm_offset_t phys_addr, |
5ba3f43e A |
647 | vm_size_t size); |
648 | ||
cb323159 A |
649 | vm_offset_t ml_io_map_with_prot( |
650 | vm_offset_t phys_addr, | |
651 | vm_size_t size, | |
652 | vm_prot_t prot); | |
653 | ||
f427ee49 A |
654 | void ml_io_unmap( |
655 | vm_offset_t addr, | |
656 | vm_size_t sz); | |
657 | ||
5ba3f43e A |
658 | void ml_get_bouncepool_info( |
659 | vm_offset_t *phys_addr, | |
660 | vm_size_t *size); | |
661 | ||
662 | vm_map_address_t ml_map_high_window( | |
0a7de745 A |
663 | vm_offset_t phys_addr, |
664 | vm_size_t len); | |
5ba3f43e A |
665 | |
666 | /* boot memory allocation */ | |
667 | vm_offset_t ml_static_malloc( | |
668 | vm_size_t size); | |
669 | ||
670 | void ml_init_timebase( | |
0a7de745 A |
671 | void *args, |
672 | tbd_ops_t tbd_funcs, | |
673 | vm_offset_t int_address, | |
674 | vm_offset_t int_value); | |
5ba3f43e A |
675 | |
676 | uint64_t ml_get_timebase(void); | |
677 | ||
678 | void ml_init_lock_timeout(void); | |
679 | ||
680 | boolean_t ml_delay_should_spin(uint64_t interval); | |
681 | ||
e8c3f781 A |
682 | void ml_delay_on_yield(void); |
683 | ||
5ba3f43e A |
684 | uint32_t ml_get_decrementer(void); |
685 | ||
f427ee49 A |
686 | #include <machine/config.h> |
687 | ||
688 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT | |
5ba3f43e A |
689 | void timer_state_event_user_to_kernel(void); |
690 | void timer_state_event_kernel_to_user(void); | |
f427ee49 | 691 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ |
5ba3f43e A |
692 | |
693 | uint64_t ml_get_hwclock(void); | |
694 | ||
695 | #ifdef __arm64__ | |
696 | boolean_t ml_get_timer_pending(void); | |
697 | #endif | |
698 | ||
699 | void platform_syscall( | |
700 | struct arm_saved_state *); | |
701 | ||
702 | void ml_set_decrementer( | |
703 | uint32_t dec_value); | |
704 | ||
705 | boolean_t is_user_contex( | |
706 | void); | |
707 | ||
708 | void ml_init_arm_debug_interface(void *args, vm_offset_t virt_address); | |
709 | ||
710 | /* These calls are only valid if __ARM_USER_PROTECT__ is defined */ | |
711 | uintptr_t arm_user_protect_begin( | |
0a7de745 | 712 | thread_t thread); |
5ba3f43e A |
713 | |
714 | void arm_user_protect_end( | |
0a7de745 A |
715 | thread_t thread, |
716 | uintptr_t up, | |
717 | boolean_t disable_interrupts); | |
5ba3f43e A |
718 | |
719 | #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */ | |
720 | ||
721 | /* Zero bytes starting at a physical address */ | |
722 | void bzero_phys( | |
723 | addr64_t phys_address, | |
724 | vm_size_t length); | |
725 | ||
726 | void bzero_phys_nc(addr64_t src64, vm_size_t bytes); | |
727 | ||
cb323159 A |
728 | #if MACH_KERNEL_PRIVATE |
729 | #ifdef __arm64__ | |
730 | /* Pattern-fill buffer with zeros or a 32-bit pattern; | |
731 | * target must be 128-byte aligned and sized a multiple of 128 | |
732 | * Both variants emit stores with non-temporal properties. | |
733 | */ | |
734 | void fill32_dczva(addr64_t, vm_size_t); | |
735 | void fill32_nt(addr64_t, vm_size_t, uint32_t); | |
f427ee49 | 736 | int cpu_interrupt_is_pending(void); |
cb323159 A |
737 | #endif |
738 | #endif | |
739 | ||
5ba3f43e A |
740 | void ml_thread_policy( |
741 | thread_t thread, | |
742 | unsigned policy_id, | |
743 | unsigned policy_info); | |
744 | ||
0a7de745 A |
745 | #define MACHINE_GROUP 0x00000001 |
746 | #define MACHINE_NETWORK_GROUP 0x10000000 | |
747 | #define MACHINE_NETWORK_WORKLOOP 0x00000001 | |
748 | #define MACHINE_NETWORK_NETISR 0x00000002 | |
5ba3f43e | 749 | |
f427ee49 A |
750 | /* Set the maximum number of CPUs */ |
751 | void ml_set_max_cpus( | |
5ba3f43e A |
752 | unsigned int max_cpus); |
753 | ||
f427ee49 A |
754 | /* Return the maximum number of CPUs set by ml_set_max_cpus(), waiting if necessary */ |
755 | unsigned int ml_wait_max_cpus( | |
5ba3f43e A |
756 | void); |
757 | ||
758 | /* Return the maximum memory size */ | |
759 | unsigned int ml_get_machine_mem(void); | |
760 | ||
761 | #ifdef XNU_KERNEL_PRIVATE | |
762 | /* Return max offset */ | |
763 | vm_map_offset_t ml_get_max_offset( | |
0a7de745 | 764 | boolean_t is64, |
5ba3f43e | 765 | unsigned int option); |
0a7de745 A |
766 | #define MACHINE_MAX_OFFSET_DEFAULT 0x01 |
767 | #define MACHINE_MAX_OFFSET_MIN 0x02 | |
768 | #define MACHINE_MAX_OFFSET_MAX 0x04 | |
769 | #define MACHINE_MAX_OFFSET_DEVICE 0x08 | |
5ba3f43e A |
770 | #endif |
771 | ||
0a7de745 A |
772 | extern void ml_cpu_up(void); |
773 | extern void ml_cpu_down(void); | |
774 | extern void ml_arm_sleep(void); | |
5ba3f43e A |
775 | |
776 | extern uint64_t ml_get_wake_timebase(void); | |
777 | extern uint64_t ml_get_conttime_wake_time(void); | |
778 | ||
779 | /* Time since the system was reset (as part of boot/wake) */ | |
780 | uint64_t ml_get_time_since_reset(void); | |
781 | ||
cb323159 A |
782 | /* |
783 | * Called by ApplePMGR to set wake time. Units and epoch are identical | |
784 | * to mach_continuous_time(). Has no effect on !HAS_CONTINUOUS_HWCLOCK | |
785 | * chips. If wake_time == UINT64_MAX, that means the wake time is | |
786 | * unknown and calls to ml_get_time_since_reset() will return UINT64_MAX. | |
787 | */ | |
788 | void ml_set_reset_time(uint64_t wake_time); | |
789 | ||
5ba3f43e A |
790 | #ifdef XNU_KERNEL_PRIVATE |
791 | /* Just a stub on ARM */ | |
792 | extern kern_return_t ml_interrupt_prewarm(uint64_t deadline); | |
793 | #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0) | |
794 | #endif /* XNU_KERNEL_PRIVATE */ | |
795 | ||
796 | /* Bytes available on current stack */ | |
797 | vm_offset_t ml_stack_remaining(void); | |
798 | ||
799 | #ifdef MACH_KERNEL_PRIVATE | |
0a7de745 A |
800 | uint32_t get_fpscr(void); |
801 | void set_fpscr(uint32_t); | |
f427ee49 A |
802 | void machine_conf(void); |
803 | void machine_lockdown(void); | |
5ba3f43e | 804 | |
d9a64523 A |
805 | #ifdef __arm64__ |
806 | unsigned long update_mdscr(unsigned long clear, unsigned long set); | |
807 | #endif /* __arm64__ */ | |
808 | ||
0a7de745 A |
809 | extern void arm_debug_set_cp14(arm_debug_state_t *debug_state); |
810 | extern void fiq_context_init(boolean_t enable_fiq); | |
5ba3f43e | 811 | |
0a7de745 | 812 | extern void reenable_async_aborts(void); |
f427ee49 A |
813 | #ifdef __arm__ |
814 | extern boolean_t get_vfp_enabled(void); | |
0a7de745 | 815 | extern void cpu_idle_wfi(boolean_t wfi_fast); |
f427ee49 A |
816 | #endif |
817 | ||
818 | #ifdef __arm64__ | |
819 | uint64_t ml_cluster_wfe_timeout(uint32_t wfe_cluster_id); | |
820 | #endif | |
5ba3f43e A |
821 | |
822 | #ifdef MONITOR | |
0a7de745 A |
823 | #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */ |
824 | #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */ | |
825 | unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
826 | uintptr_t arg2, uintptr_t arg3); | |
5ba3f43e A |
827 | #endif /* MONITOR */ |
828 | ||
5c9f4661 A |
829 | #if __ARM_KERNEL_PROTECT__ |
830 | extern void set_vbar_el1(uint64_t); | |
831 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
832 | #endif /* MACH_KERNEL_PRIVATE */ |
833 | ||
0a7de745 | 834 | extern uint32_t arm_debug_read_dscr(void); |
5ba3f43e | 835 | |
0a7de745 A |
836 | extern int set_be_bit(void); |
837 | extern int clr_be_bit(void); | |
838 | extern int be_tracing(void); | |
5ba3f43e | 839 | |
f427ee49 A |
840 | /* Please note that cpu_broadcast_xcall is not as simple is you would like it to be. |
841 | * It will sometimes put the calling thread to sleep, and it is up to your callback | |
842 | * to wake it up as needed, where "as needed" is defined as "all other CPUs have | |
843 | * called the broadcast func". Look around the kernel for examples, or instead use | |
844 | * cpu_broadcast_xcall_simple() which does indeed act like you would expect, given | |
845 | * the prototype. cpu_broadcast_immediate_xcall has the same caveats and has a similar | |
846 | * _simple() wrapper | |
847 | */ | |
5ba3f43e A |
848 | typedef void (*broadcastFunc) (void *); |
849 | unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *); | |
f427ee49 | 850 | unsigned int cpu_broadcast_xcall_simple(boolean_t, broadcastFunc, void *); |
5ba3f43e | 851 | kern_return_t cpu_xcall(int, broadcastFunc, void *); |
cb323159 | 852 | unsigned int cpu_broadcast_immediate_xcall(uint32_t *, boolean_t, broadcastFunc, void *); |
f427ee49 | 853 | unsigned int cpu_broadcast_immediate_xcall_simple(boolean_t, broadcastFunc, void *); |
cb323159 | 854 | kern_return_t cpu_immediate_xcall(int, broadcastFunc, void *); |
5ba3f43e A |
855 | |
856 | #ifdef KERNEL_PRIVATE | |
857 | ||
858 | /* Interface to be used by the perf. controller to register a callback, in a | |
859 | * single-threaded fashion. The callback will receive notifications of | |
860 | * processor performance quality-of-service changes from the scheduler. | |
861 | */ | |
862 | ||
863 | #ifdef __arm64__ | |
864 | typedef void (*cpu_qos_update_t)(int throughput_qos, uint64_t qos_param1, uint64_t qos_param2); | |
865 | void cpu_qos_update_register(cpu_qos_update_t); | |
866 | #endif /* __arm64__ */ | |
867 | ||
868 | struct going_on_core { | |
0a7de745 A |
869 | uint64_t thread_id; |
870 | uint16_t qos_class; | |
871 | uint16_t urgency; /* XCPM compatibility */ | |
872 | uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */ | |
873 | uint32_t is_kernel_thread : 1; | |
874 | uint64_t thread_group_id; | |
875 | void *thread_group_data; | |
876 | uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */ | |
877 | uint64_t start_time; | |
878 | uint64_t scheduling_latency_at_same_basepri; | |
879 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
880 | /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */ | |
5ba3f43e A |
881 | }; |
882 | typedef struct going_on_core *going_on_core_t; | |
883 | ||
884 | struct going_off_core { | |
0a7de745 A |
885 | uint64_t thread_id; |
886 | uint32_t energy_estimate_nj; /* return: In nanojoules */ | |
887 | uint32_t reserved; | |
888 | uint64_t end_time; | |
889 | uint64_t thread_group_id; | |
890 | void *thread_group_data; | |
5ba3f43e A |
891 | }; |
892 | typedef struct going_off_core *going_off_core_t; | |
893 | ||
894 | struct thread_group_data { | |
0a7de745 A |
895 | uint64_t thread_group_id; |
896 | void *thread_group_data; | |
897 | uint32_t thread_group_size; | |
898 | uint32_t thread_group_flags; | |
5ba3f43e A |
899 | }; |
900 | typedef struct thread_group_data *thread_group_data_t; | |
901 | ||
902 | struct perfcontrol_max_runnable_latency { | |
0a7de745 | 903 | uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */]; |
5ba3f43e A |
904 | }; |
905 | typedef struct perfcontrol_max_runnable_latency *perfcontrol_max_runnable_latency_t; | |
906 | ||
907 | struct perfcontrol_work_interval { | |
0a7de745 A |
908 | uint64_t thread_id; |
909 | uint16_t qos_class; | |
910 | uint16_t urgency; | |
911 | uint32_t flags; // notify | |
912 | uint64_t work_interval_id; | |
913 | uint64_t start; | |
914 | uint64_t finish; | |
915 | uint64_t deadline; | |
916 | uint64_t next_start; | |
917 | uint64_t thread_group_id; | |
918 | void *thread_group_data; | |
919 | uint32_t create_flags; | |
5ba3f43e A |
920 | }; |
921 | typedef struct perfcontrol_work_interval *perfcontrol_work_interval_t; | |
922 | ||
a39ff7e2 A |
923 | typedef enum { |
924 | WORK_INTERVAL_START, | |
925 | WORK_INTERVAL_UPDATE, | |
926 | WORK_INTERVAL_FINISH | |
927 | } work_interval_ctl_t; | |
928 | ||
929 | struct perfcontrol_work_interval_instance { | |
0a7de745 A |
930 | work_interval_ctl_t ctl; |
931 | uint32_t create_flags; | |
932 | uint64_t complexity; | |
933 | uint64_t thread_id; | |
934 | uint64_t work_interval_id; | |
935 | uint64_t instance_id; /* out: start, in: update/finish */ | |
936 | uint64_t start; | |
937 | uint64_t finish; | |
938 | uint64_t deadline; | |
939 | uint64_t thread_group_id; | |
940 | void *thread_group_data; | |
a39ff7e2 A |
941 | }; |
942 | typedef struct perfcontrol_work_interval_instance *perfcontrol_work_interval_instance_t; | |
5ba3f43e | 943 | |
0a7de745 A |
944 | /* |
945 | * Structure to export per-CPU counters as part of the CLPC callout. | |
946 | * Contains only the fixed CPU counters (instructions and cycles); CLPC | |
947 | * would call back into XNU to get the configurable counters if needed. | |
5ba3f43e A |
948 | */ |
949 | struct perfcontrol_cpu_counters { | |
0a7de745 | 950 | uint64_t instructions; |
5ba3f43e A |
951 | uint64_t cycles; |
952 | }; | |
953 | ||
954 | /* | |
955 | * Structure used to pass information about a thread to CLPC | |
956 | */ | |
957 | struct perfcontrol_thread_data { | |
958 | /* | |
959 | * Energy estimate (return value) | |
0a7de745 | 960 | * The field is populated by CLPC and used to update the |
5ba3f43e A |
961 | * energy estimate of the thread |
962 | */ | |
963 | uint32_t energy_estimate_nj; | |
964 | /* Perfcontrol class for thread */ | |
965 | perfcontrol_class_t perfctl_class; | |
966 | /* Thread ID for the thread */ | |
967 | uint64_t thread_id; | |
968 | /* Thread Group ID */ | |
969 | uint64_t thread_group_id; | |
0a7de745 A |
970 | /* |
971 | * Scheduling latency for threads at the same base priority. | |
972 | * Calculated by the scheduler and passed into CLPC. The field is | |
973 | * populated only in the thread_data structure for the thread | |
974 | * going on-core. | |
5ba3f43e A |
975 | */ |
976 | uint64_t scheduling_latency_at_same_basepri; | |
977 | /* Thread Group data pointer */ | |
978 | void *thread_group_data; | |
979 | /* perfctl state pointer */ | |
980 | void *perfctl_state; | |
981 | }; | |
982 | ||
983 | /* | |
984 | * All callouts from the scheduler are executed with interrupts | |
985 | * disabled. Callouts should be implemented in C with minimal | |
986 | * abstractions, and only use KPI exported by the mach/libkern | |
987 | * symbolset, restricted to routines like spinlocks and atomic | |
988 | * operations and scheduler routines as noted below. Spinlocks that | |
989 | * are used to synchronize data in the perfcontrol_state_t should only | |
990 | * ever be acquired with interrupts disabled, to avoid deadlocks where | |
991 | * an quantum expiration timer interrupt attempts to perform a callout | |
992 | * that attempts to lock a spinlock that is already held. | |
993 | */ | |
994 | ||
995 | /* | |
996 | * When a processor is switching between two threads (after the | |
997 | * scheduler has chosen a new thread), the low-level platform layer | |
998 | * will call this routine, which should perform required timestamps, | |
999 | * MMIO register reads, or other state switching. No scheduler locks | |
1000 | * are held during this callout. | |
1001 | * | |
1002 | * This function is called with interrupts ENABLED. | |
1003 | */ | |
1004 | typedef void (*sched_perfcontrol_context_switch_t)(perfcontrol_state_t, perfcontrol_state_t); | |
1005 | ||
1006 | /* | |
1007 | * Once the processor has switched to the new thread, the offcore | |
1008 | * callout will indicate the old thread that is no longer being | |
1009 | * run. The thread's scheduler lock is held, so it will not begin | |
1010 | * running on another processor (in the case of preemption where it | |
1011 | * remains runnable) until it completes. If the "thread_terminating" | |
1012 | * boolean is TRUE, this will be the last callout for this thread_id. | |
1013 | */ | |
1014 | typedef void (*sched_perfcontrol_offcore_t)(perfcontrol_state_t, going_off_core_t /* populated by callee */, boolean_t); | |
1015 | ||
1016 | /* | |
1017 | * After the offcore callout and after the old thread can potentially | |
1018 | * start running on another processor, the oncore callout will be | |
1019 | * called with the thread's scheduler lock held. The oncore callout is | |
1020 | * also called any time one of the parameters in the going_on_core_t | |
1021 | * structure changes, like priority/QoS changes, and quantum | |
1022 | * expiration, so the callout must not assume callouts are paired with | |
1023 | * offcore callouts. | |
1024 | */ | |
1025 | typedef void (*sched_perfcontrol_oncore_t)(perfcontrol_state_t, going_on_core_t); | |
1026 | ||
1027 | /* | |
1028 | * Periodically (on hundreds of ms scale), the scheduler will perform | |
1029 | * maintenance and report the maximum latency for runnable (but not currently | |
1030 | * running) threads for each urgency class. | |
1031 | */ | |
1032 | typedef void (*sched_perfcontrol_max_runnable_latency_t)(perfcontrol_max_runnable_latency_t); | |
1033 | ||
1034 | /* | |
1035 | * When the kernel receives information about work intervals from userland, | |
1036 | * it is passed along using this callback. No locks are held, although the state | |
1037 | * object will not go away during the callout. | |
1038 | */ | |
1039 | typedef void (*sched_perfcontrol_work_interval_notify_t)(perfcontrol_state_t, perfcontrol_work_interval_t); | |
1040 | ||
a39ff7e2 A |
1041 | /* |
1042 | * Start, update and finish work interval instance with optional complexity estimate. | |
1043 | */ | |
1044 | typedef void (*sched_perfcontrol_work_interval_ctl_t)(perfcontrol_state_t, perfcontrol_work_interval_instance_t); | |
1045 | ||
5ba3f43e A |
1046 | /* |
1047 | * These callbacks are used when thread groups are added, removed or properties | |
1048 | * updated. | |
1049 | * No blocking allocations (or anything else blocking) are allowed inside these | |
1050 | * callbacks. No locks allowed in these callbacks as well since the kernel might | |
1051 | * be holding the thread/task locks. | |
1052 | */ | |
1053 | typedef void (*sched_perfcontrol_thread_group_init_t)(thread_group_data_t); | |
1054 | typedef void (*sched_perfcontrol_thread_group_deinit_t)(thread_group_data_t); | |
1055 | typedef void (*sched_perfcontrol_thread_group_flags_update_t)(thread_group_data_t); | |
1056 | ||
1057 | /* | |
1058 | * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed, | |
1059 | * this function will be called, passing the timeout deadline that was previously armed as an argument. | |
1060 | * | |
1061 | * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context. | |
1062 | */ | |
1063 | typedef void (*sched_perfcontrol_deadline_passed_t)(uint64_t deadline); | |
1064 | ||
1065 | /* | |
1066 | * Context Switch Callout | |
0a7de745 | 1067 | * |
5ba3f43e A |
1068 | * Parameters: |
1069 | * event - The perfcontrol_event for this callout | |
1070 | * cpu_id - The CPU doing the context switch | |
1071 | * timestamp - The timestamp for the context switch | |
1072 | * flags - Flags for other relevant information | |
1073 | * offcore - perfcontrol_data structure for thread going off-core | |
1074 | * oncore - perfcontrol_data structure for thread going on-core | |
1075 | * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch | |
1076 | */ | |
1077 | typedef void (*sched_perfcontrol_csw_t)( | |
1078 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
1079 | struct perfcontrol_thread_data *offcore, struct perfcontrol_thread_data *oncore, | |
1080 | struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused); | |
1081 | ||
1082 | ||
1083 | /* | |
1084 | * Thread State Update Callout | |
1085 | * | |
1086 | * Parameters: | |
1087 | * event - The perfcontrol_event for this callout | |
1088 | * cpu_id - The CPU doing the state update | |
1089 | * timestamp - The timestamp for the state update | |
1090 | * flags - Flags for other relevant information | |
1091 | * thr_data - perfcontrol_data structure for the thread being updated | |
1092 | */ | |
1093 | typedef void (*sched_perfcontrol_state_update_t)( | |
1094 | perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, | |
1095 | struct perfcontrol_thread_data *thr_data, __unused void *unused); | |
1096 | ||
f427ee49 A |
1097 | /* |
1098 | * Thread Group Blocking Relationship Callout | |
1099 | * | |
1100 | * Parameters: | |
1101 | * blocked_tg - Thread group blocking on progress of another thread group | |
1102 | * blocking_tg - Thread group blocking progress of another thread group | |
1103 | * flags - Flags for other relevant information | |
1104 | * blocked_thr_state - Per-thread perfcontrol state for blocked thread | |
1105 | */ | |
1106 | typedef void (*sched_perfcontrol_thread_group_blocked_t)( | |
1107 | thread_group_data_t blocked_tg, thread_group_data_t blocking_tg, uint32_t flags, perfcontrol_state_t blocked_thr_state); | |
1108 | ||
1109 | /* | |
1110 | * Thread Group Unblocking Callout | |
1111 | * | |
1112 | * Parameters: | |
1113 | * unblocked_tg - Thread group being unblocked from making forward progress | |
1114 | * unblocking_tg - Thread group unblocking progress of another thread group | |
1115 | * flags - Flags for other relevant information | |
1116 | * unblocked_thr_state - Per-thread perfcontrol state for unblocked thread | |
1117 | */ | |
1118 | typedef void (*sched_perfcontrol_thread_group_unblocked_t)( | |
1119 | thread_group_data_t unblocked_tg, thread_group_data_t unblocking_tg, uint32_t flags, perfcontrol_state_t unblocked_thr_state); | |
1120 | ||
5ba3f43e A |
1121 | /* |
1122 | * Callers should always use the CURRENT version so that the kernel can detect both older | |
1123 | * and newer structure layouts. New callbacks should always be added at the end of the | |
1124 | * structure, and xnu should expect existing source recompiled against newer headers | |
1125 | * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter | |
1126 | * to reset callbacks to their default in-kernel values. | |
1127 | */ | |
1128 | ||
1129 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */ | |
1130 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */ | |
1131 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */ | |
1132 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */ | |
1133 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */ | |
1134 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */ | |
1135 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */ | |
a39ff7e2 | 1136 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */ |
f427ee49 | 1137 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_8 (8) /* up-to thread_group_unblocked */ |
5ba3f43e A |
1138 | #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6 |
1139 | ||
1140 | struct sched_perfcontrol_callbacks { | |
1141 | unsigned long version; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */ | |
1142 | sched_perfcontrol_offcore_t offcore; | |
1143 | sched_perfcontrol_context_switch_t context_switch; | |
1144 | sched_perfcontrol_oncore_t oncore; | |
1145 | sched_perfcontrol_max_runnable_latency_t max_runnable_latency; | |
1146 | sched_perfcontrol_work_interval_notify_t work_interval_notify; | |
1147 | sched_perfcontrol_thread_group_init_t thread_group_init; | |
1148 | sched_perfcontrol_thread_group_deinit_t thread_group_deinit; | |
1149 | sched_perfcontrol_deadline_passed_t deadline_passed; | |
1150 | sched_perfcontrol_csw_t csw; | |
1151 | sched_perfcontrol_state_update_t state_update; | |
1152 | sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update; | |
a39ff7e2 | 1153 | sched_perfcontrol_work_interval_ctl_t work_interval_ctl; |
f427ee49 A |
1154 | sched_perfcontrol_thread_group_blocked_t thread_group_blocked; |
1155 | sched_perfcontrol_thread_group_unblocked_t thread_group_unblocked; | |
5ba3f43e A |
1156 | }; |
1157 | typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t; | |
1158 | ||
1159 | extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state); | |
1160 | ||
1161 | /* | |
1162 | * Update the scheduler with the set of cores that should be used to dispatch new threads. | |
1163 | * Non-recommended cores can still be used to field interrupts or run bound threads. | |
1164 | * This should be called with interrupts enabled and no scheduler locks held. | |
1165 | */ | |
0a7de745 | 1166 | #define ALL_CORES_RECOMMENDED (~(uint32_t)0) |
5ba3f43e A |
1167 | |
1168 | extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores); | |
1169 | extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation); | |
d9a64523 A |
1170 | extern void sched_override_recommended_cores_for_sleep(void); |
1171 | extern void sched_restore_recommended_cores_after_sleep(void); | |
f427ee49 | 1172 | extern void sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class, boolean_t inherit); |
5ba3f43e | 1173 | |
0a7de745 A |
1174 | extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores); |
1175 | ||
f427ee49 A |
1176 | /* |
1177 | * Edge Scheduler-CLPC Interface | |
1178 | * | |
1179 | * sched_perfcontrol_thread_group_preferred_clusters_set() | |
1180 | * | |
1181 | * The Edge scheduler expects thread group recommendations to be specific clusters rather | |
1182 | * than just E/P. In order to allow more fine grained control, CLPC can specify an override | |
1183 | * preferred cluster per QoS bucket. CLPC passes a common preferred cluster `tg_preferred_cluster` | |
1184 | * and an array of size [PERFCONTROL_CLASS_MAX] with overrides for specific perfctl classes. | |
1185 | * The scheduler translates these preferences into sched_bucket | |
1186 | * preferences and applies the changes. | |
1187 | * | |
1188 | */ | |
1189 | /* Token to indicate a particular perfctl class is not overriden */ | |
1190 | #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE ((uint32_t)~0) | |
1191 | ||
1192 | /* | |
1193 | * CLPC can also indicate if there should be an immediate rebalancing of threads of this TG as | |
1194 | * part of this preferred cluster change. It does that by specifying the following options. | |
1195 | */ | |
1196 | #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING 0x1 | |
1197 | #define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNABLE 0x2 | |
1198 | typedef uint64_t sched_perfcontrol_preferred_cluster_options_t; | |
1199 | ||
1200 | extern void sched_perfcontrol_thread_group_preferred_clusters_set(void *machine_data, uint32_t tg_preferred_cluster, | |
1201 | uint32_t overrides[PERFCONTROL_CLASS_MAX], sched_perfcontrol_preferred_cluster_options_t options); | |
1202 | ||
1203 | /* | |
1204 | * Edge Scheduler-CLPC Interface | |
1205 | * | |
1206 | * sched_perfcontrol_edge_matrix_get()/sched_perfcontrol_edge_matrix_set() | |
1207 | * | |
1208 | * The Edge scheduler uses edges between clusters to define the likelihood of migrating threads | |
1209 | * across clusters. The edge config between any two clusters defines the edge weight and whether | |
1210 | * migation and steal operations are allowed across that edge. The getter and setter allow CLPC | |
1211 | * to query and configure edge properties between various clusters on the platform. | |
1212 | */ | |
1213 | ||
1214 | extern void sched_perfcontrol_edge_matrix_get(sched_clutch_edge *edge_matrix, bool *edge_request_bitmap, uint64_t flags, uint64_t matrix_order); | |
1215 | extern void sched_perfcontrol_edge_matrix_set(sched_clutch_edge *edge_matrix, bool *edge_changes_bitmap, uint64_t flags, uint64_t matrix_order); | |
1216 | ||
5ba3f43e A |
1217 | /* |
1218 | * Update the deadline after which sched_perfcontrol_deadline_passed will be called. | |
1219 | * Returns TRUE if it successfully canceled a previously set callback, | |
1220 | * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight). | |
1221 | * The callback is automatically canceled when it fires, and does not repeat unless rearmed. | |
1222 | * | |
1223 | * This 'timer' executes as the scheduler switches between threads, on a non-idle core | |
1224 | * | |
1225 | * There can be only one outstanding timer globally. | |
1226 | */ | |
1227 | extern boolean_t sched_perfcontrol_update_callback_deadline(uint64_t deadline); | |
1228 | ||
1229 | typedef enum perfcontrol_callout_type { | |
0a7de745 A |
1230 | PERFCONTROL_CALLOUT_ON_CORE, |
1231 | PERFCONTROL_CALLOUT_OFF_CORE, | |
1232 | PERFCONTROL_CALLOUT_CONTEXT, | |
1233 | PERFCONTROL_CALLOUT_STATE_UPDATE, | |
1234 | /* Add other callout types here */ | |
1235 | PERFCONTROL_CALLOUT_MAX | |
5ba3f43e A |
1236 | } perfcontrol_callout_type_t; |
1237 | ||
1238 | typedef enum perfcontrol_callout_stat { | |
0a7de745 A |
1239 | PERFCONTROL_STAT_INSTRS, |
1240 | PERFCONTROL_STAT_CYCLES, | |
1241 | /* Add other stat types here */ | |
1242 | PERFCONTROL_STAT_MAX | |
5ba3f43e A |
1243 | } perfcontrol_callout_stat_t; |
1244 | ||
1245 | uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, | |
0a7de745 | 1246 | perfcontrol_callout_stat_t stat); |
5ba3f43e | 1247 | |
f427ee49 A |
1248 | #ifdef __arm64__ |
1249 | /* The performance controller may use this interface to recommend | |
1250 | * that CPUs in the designated cluster employ WFE rather than WFI | |
1251 | * within the idle loop, falling back to WFI after the specified | |
1252 | * timeout. The updates are expected to be serialized by the caller, | |
1253 | * the implementation is not required to perform internal synchronization. | |
1254 | */ | |
1255 | uint32_t ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id, uint64_t wfe_timeout_abstime_interval, uint64_t wfe_hint_flags); | |
1256 | #endif /* __arm64__ */ | |
1257 | ||
cb323159 A |
1258 | #if defined(HAS_APPLE_PAC) |
1259 | #define ONES(x) (BIT((x))-1) | |
1260 | #define PTR_MASK ONES(64-T1SZ_BOOT) | |
1261 | #define PAC_MASK ~PTR_MASK | |
1262 | #define SIGN(p) ((p) & BIT(55)) | |
1263 | #define UNSIGN_PTR(p) \ | |
1264 | SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK) | |
1265 | ||
f427ee49 | 1266 | uint64_t ml_default_jop_pid(void); |
cb323159 | 1267 | void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit); |
f427ee49 A |
1268 | void ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit); |
1269 | void ml_task_set_jop_pid_from_shared_region(task_t task); | |
1270 | void ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop); | |
1271 | void ml_thread_set_disable_user_jop(thread_t thread, uint8_t disable_user_jop); | |
1272 | void ml_thread_set_jop_pid(thread_t thread, task_t task); | |
cb323159 | 1273 | void *ml_auth_ptr_unchecked(void *ptr, unsigned key, uint64_t modifier); |
f427ee49 A |
1274 | |
1275 | /** | |
1276 | * Temporarily enables a userspace JOP key in kernel space, so that the kernel | |
1277 | * can sign or auth pointers on that process's behalf. | |
1278 | * | |
1279 | * @note The caller must disable interrupts before calling | |
1280 | * ml_enable_user_jop_key(), and may only re-enable interrupts after the | |
1281 | * complementary ml_disable_user_jop_key() call. | |
1282 | * | |
1283 | * @param user_jop_key The userspace JOP key to temporarily use | |
1284 | * @return Saved JOP state, to be passed to the complementary | |
1285 | * ml_disable_user_jop_key() call | |
1286 | */ | |
1287 | uint64_t ml_enable_user_jop_key(uint64_t user_jop_key); | |
1288 | ||
1289 | /** | |
1290 | * Restores the previous JOP key state after a previous ml_enable_user_jop_key() | |
1291 | * call. | |
1292 | * | |
1293 | * @param user_jop_key The userspace JOP key previously passed to | |
1294 | * ml_enable_user_jop_key() | |
1295 | * @param saved_jop_state The saved JOP state returned by | |
1296 | * ml_enable_user_jop_key() | |
1297 | */ | |
1298 | void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state); | |
cb323159 A |
1299 | #endif /* defined(HAS_APPLE_PAC) */ |
1300 | ||
1301 | ||
5ba3f43e A |
1302 | |
1303 | #endif /* KERNEL_PRIVATE */ | |
1304 | ||
1305 | boolean_t machine_timeout_suspended(void); | |
1306 | void ml_get_power_state(boolean_t *, boolean_t *); | |
1307 | ||
0a7de745 | 1308 | uint32_t get_arm_cpu_version(void); |
5ba3f43e | 1309 | boolean_t user_cont_hwclock_allowed(void); |
cb323159 | 1310 | uint8_t user_timebase_type(void); |
5ba3f43e | 1311 | boolean_t ml_thread_is64bit(thread_t thread); |
5ba3f43e A |
1312 | |
1313 | #ifdef __arm64__ | |
f427ee49 | 1314 | bool ml_feature_supported(uint32_t feature_bit); |
5ba3f43e | 1315 | void ml_set_align_checking(void); |
f427ee49 A |
1316 | extern void wfe_timeout_configure(void); |
1317 | extern void wfe_timeout_init(void); | |
5ba3f43e A |
1318 | #endif /* __arm64__ */ |
1319 | ||
1320 | void ml_timer_evaluate(void); | |
1321 | boolean_t ml_timer_forced_evaluation(void); | |
1322 | uint64_t ml_energy_stat(thread_t); | |
1323 | void ml_gpu_stat_update(uint64_t); | |
1324 | uint64_t ml_gpu_stat(thread_t); | |
1325 | #endif /* __APPLE_API_PRIVATE */ | |
1326 | ||
f427ee49 A |
1327 | |
1328 | ||
1329 | #if __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) | |
1330 | extern void ml_expect_fault_begin(expected_fault_handler_t, uintptr_t); | |
1331 | extern void ml_expect_fault_end(void); | |
1332 | #endif /* __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) */ | |
1333 | ||
1334 | ||
1335 | void ml_hibernate_active_pre(void); | |
1336 | void ml_hibernate_active_post(void); | |
1337 | ||
5ba3f43e A |
1338 | __END_DECLS |
1339 | ||
1340 | #endif /* _ARM_MACHINE_ROUTINES_H_ */ |