]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines.h
xnu-4903.231.4.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.h
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #ifndef _ARM_MACHINE_ROUTINES_H_
33 #define _ARM_MACHINE_ROUTINES_H_
34
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <kern/kern_types.h>
38 #include <pexpert/pexpert.h>
39
40 #include <sys/cdefs.h>
41 #include <sys/appleapiopts.h>
42
43 #include <stdarg.h>
44
45 __BEGIN_DECLS
46
47 /* Interrupt handling */
48
49 void ml_cpu_signal(unsigned int cpu_id);
50 void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs);
51 uint64_t ml_cpu_signal_deferred_get_timer(void);
52 void ml_cpu_signal_deferred(unsigned int cpu_id);
53 void ml_cpu_signal_retract(unsigned int cpu_id);
54
55 /* Initialize Interrupts */
56 void ml_init_interrupt(void);
57
58 /* Get Interrupts Enabled */
59 boolean_t ml_get_interrupts_enabled(void);
60
61 /* Set Interrupts Enabled */
62 boolean_t ml_set_interrupts_enabled(boolean_t enable);
63
64 /* Check if running at interrupt context */
65 boolean_t ml_at_interrupt_context(void);
66
67 /* Generate a fake interrupt */
68 void ml_cause_interrupt(void);
69
70 /* Clear interrupt spin debug state for thread */
71 #if INTERRUPT_MASKED_DEBUG
72 void ml_spin_debug_reset(thread_t thread);
73 void ml_spin_debug_clear(thread_t thread);
74 void ml_spin_debug_clear_self(void);
75 void ml_check_interrupts_disabled_duration(thread_t thread);
76 #endif
77
78 #ifdef XNU_KERNEL_PRIVATE
79 extern boolean_t ml_is_quiescing(void);
80 extern void ml_set_is_quiescing(boolean_t);
81 extern uint64_t ml_get_booter_memory_size(void);
82 #endif
83
84 /* Type for the Time Base Enable function */
85 typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable);
86 #if MACH_KERNEL_PRIVATE
87 /* Type for the Processor Cache Dispatch function */
88 typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1);
89 #endif
90
91 #define CacheConfig 0x00000000UL
92 #define CacheControl 0x00000001UL
93 #define CacheClean 0x00000002UL
94 #define CacheCleanRegion 0x00000003UL
95 #define CacheCleanFlush 0x00000004UL
96 #define CacheCleanFlushRegion 0x00000005UL
97 #define CacheShutdown 0x00000006UL
98
99 #define CacheControlEnable 0x00000000UL
100
101 #define CacheConfigCCSIDR 0x00000001UL
102 #define CacheConfigSize 0x00000100UL
103
104 /* Type for the Processor Idle function */
105 typedef void (*processor_idle_t)(cpu_id_t cpu_id, boolean_t enter, uint64_t *new_timeout_ticks);
106
107 /* Type for the Idle Tickle function */
108 typedef void (*idle_tickle_t)(void);
109
110 /* Type for the Idle Timer function */
111 typedef void (*idle_timer_t)(void *refcon, uint64_t *new_timeout_ticks);
112
113 /* Type for the IPI Hander */
114 typedef void (*ipi_handler_t)(void);
115
116 /* Type for the Lockdown Hander */
117 typedef void (*lockdown_handler_t)(void *);
118
119 /* Type for the Platform specific Error Handler */
120 typedef void (*platform_error_handler_t)(void *refcon, vm_offset_t fault_addr);
121
122 /*
123 * The exception callback (ex_cb) module allows kernel drivers to
124 * register and receive callbacks for exceptions, and indicate
125 * actions to be taken by the platform kernel
126 * Currently this is supported for ARM64 but extending support for ARM32
127 * should be straightforward
128 */
129
130 /* Supported exception classes for callbacks */
131 typedef enum
132 {
133 EXCB_CLASS_ILLEGAL_INSTR_SET,
134 #ifdef CONFIG_XNUPOST
135 EXCB_CLASS_TEST1,
136 EXCB_CLASS_TEST2,
137 EXCB_CLASS_TEST3,
138 #endif
139 EXCB_CLASS_MAX // this must be last
140 }
141 ex_cb_class_t;
142
143 /* Actions indicated by callbacks to be taken by platform kernel */
144 typedef enum
145 {
146 EXCB_ACTION_RERUN, // re-run the faulting instruction
147 EXCB_ACTION_NONE, // continue normal exception handling
148 #ifdef CONFIG_XNUPOST
149 EXCB_ACTION_TEST_FAIL,
150 #endif
151 }
152 ex_cb_action_t;
153
154 /*
155 * Exception state
156 * We cannot use a private kernel data structure such as arm_saved_state_t
157 * The CPSR and ESR are not clobbered when the callback function is invoked so
158 * those registers can be examined by the callback function;
159 * the same is done in the platform error handlers
160 */
161 typedef struct
162 {
163 vm_offset_t far;
164 }
165 ex_cb_state_t;
166
167 /* callback type definition */
168 typedef ex_cb_action_t (*ex_cb_t) (
169 ex_cb_class_t cb_class,
170 void *refcon,// provided at registration
171 const ex_cb_state_t *state // exception state
172 );
173
174 /*
175 * Callback registration
176 * Currently we support only one registered callback per class but
177 * it should be possible to support more callbacks
178 */
179 kern_return_t ex_cb_register(
180 ex_cb_class_t cb_class,
181 ex_cb_t cb,
182 void *refcon );
183
184 /*
185 * Called internally by platform kernel to invoke the registered callback for class
186 */
187 ex_cb_action_t ex_cb_invoke(
188 ex_cb_class_t cb_class,
189 vm_offset_t far);
190
191
192 void ml_parse_cpu_topology(void);
193
194 unsigned int ml_get_cpu_count(void);
195
196 int ml_get_boot_cpu_number(void);
197
198 int ml_get_cpu_number(uint32_t phys_id);
199
200 int ml_get_max_cpu_number(void);
201
202 /* Struct for ml_cpu_get_info */
203 struct ml_cpu_info {
204 unsigned long vector_unit;
205 unsigned long cache_line_size;
206 unsigned long l1_icache_size;
207 unsigned long l1_dcache_size;
208 unsigned long l2_settings;
209 unsigned long l2_cache_size;
210 unsigned long l3_settings;
211 unsigned long l3_cache_size;
212 };
213 typedef struct ml_cpu_info ml_cpu_info_t;
214
215 typedef enum {
216 CLUSTER_TYPE_SMP,
217 } cluster_type_t;
218
219 cluster_type_t ml_get_boot_cluster(void);
220
221 /* Struct for ml_processor_register */
222 struct ml_processor_info {
223 cpu_id_t cpu_id;
224 vm_offset_t start_paddr;
225 boolean_t supports_nap;
226 void *platform_cache_dispatch;
227 time_base_enable_t time_base_enable;
228 processor_idle_t processor_idle;
229 idle_tickle_t *idle_tickle;
230 idle_timer_t idle_timer;
231 void *idle_timer_refcon;
232 vm_offset_t powergate_stub_addr;
233 uint32_t powergate_stub_length;
234 uint32_t powergate_latency;
235 platform_error_handler_t platform_error_handler;
236 uint64_t regmap_paddr;
237 uint32_t phys_id;
238 uint32_t log_id;
239 uint32_t l2_access_penalty;
240 uint32_t cluster_id;
241 cluster_type_t cluster_type;
242 uint32_t l2_cache_id;
243 uint32_t l2_cache_size;
244 uint32_t l3_cache_id;
245 uint32_t l3_cache_size;
246 };
247 typedef struct ml_processor_info ml_processor_info_t;
248
249 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
250 /* Struct for ml_init_timebase */
251 struct tbd_ops {
252 void (*tbd_fiq_handler)(void);
253 uint32_t (*tbd_get_decrementer)(void);
254 void (*tbd_set_decrementer)(uint32_t dec_value);
255 };
256 typedef struct tbd_ops *tbd_ops_t;
257 typedef struct tbd_ops tbd_ops_data_t;
258 #endif
259
260 /* Register a processor */
261 kern_return_t ml_processor_register(
262 ml_processor_info_t *ml_processor_info,
263 processor_t *processor,
264 ipi_handler_t *ipi_handler);
265
266 /* Register a lockdown handler */
267 kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *);
268
269 #if XNU_KERNEL_PRIVATE
270 void ml_lockdown_init(void);
271
272 /* Check if the machine layer wants to intercept a panic call */
273 boolean_t ml_wants_panic_trap_to_debugger(void);
274
275 /* Machine layer routine for intercepting panics */
276 void ml_panic_trap_to_debugger(const char *panic_format_str,
277 va_list *panic_args,
278 unsigned int reason,
279 void *ctx,
280 uint64_t panic_options_mask,
281 unsigned long panic_caller);
282 #endif /* XNU_KERNEL_PRIVATE */
283
284 /* Initialize Interrupts */
285 void ml_install_interrupt_handler(
286 void *nub,
287 int source,
288 void *target,
289 IOInterruptHandler handler,
290 void *refCon);
291
292 vm_offset_t
293 ml_static_vtop(
294 vm_offset_t);
295
296 vm_offset_t
297 ml_static_ptovirt(
298 vm_offset_t);
299
300 vm_offset_t ml_static_slide(
301 vm_offset_t vaddr);
302
303 vm_offset_t ml_static_unslide(
304 vm_offset_t vaddr);
305
306 /* Offset required to obtain absolute time value from tick counter */
307 uint64_t ml_get_abstime_offset(void);
308
309 /* Offset required to obtain continuous time value from tick counter */
310 uint64_t ml_get_conttime_offset(void);
311
312 #ifdef __APPLE_API_UNSTABLE
313 /* PCI config cycle probing */
314 boolean_t ml_probe_read(
315 vm_offset_t paddr,
316 unsigned int *val);
317 boolean_t ml_probe_read_64(
318 addr64_t paddr,
319 unsigned int *val);
320
321 /* Read physical address byte */
322 unsigned int ml_phys_read_byte(
323 vm_offset_t paddr);
324 unsigned int ml_phys_read_byte_64(
325 addr64_t paddr);
326
327 /* Read physical address half word */
328 unsigned int ml_phys_read_half(
329 vm_offset_t paddr);
330 unsigned int ml_phys_read_half_64(
331 addr64_t paddr);
332
333 /* Read physical address word*/
334 unsigned int ml_phys_read(
335 vm_offset_t paddr);
336 unsigned int ml_phys_read_64(
337 addr64_t paddr);
338 unsigned int ml_phys_read_word(
339 vm_offset_t paddr);
340 unsigned int ml_phys_read_word_64(
341 addr64_t paddr);
342
343 unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz);
344 unsigned int ml_io_read8(uintptr_t iovaddr);
345 unsigned int ml_io_read16(uintptr_t iovaddr);
346 unsigned int ml_io_read32(uintptr_t iovaddr);
347 unsigned long long ml_io_read64(uintptr_t iovaddr);
348
349 /* Read physical address double word */
350 unsigned long long ml_phys_read_double(
351 vm_offset_t paddr);
352 unsigned long long ml_phys_read_double_64(
353 addr64_t paddr);
354
355 /* Write physical address byte */
356 void ml_phys_write_byte(
357 vm_offset_t paddr, unsigned int data);
358 void ml_phys_write_byte_64(
359 addr64_t paddr, unsigned int data);
360
361 /* Write physical address half word */
362 void ml_phys_write_half(
363 vm_offset_t paddr, unsigned int data);
364 void ml_phys_write_half_64(
365 addr64_t paddr, unsigned int data);
366
367 /* Write physical address word */
368 void ml_phys_write(
369 vm_offset_t paddr, unsigned int data);
370 void ml_phys_write_64(
371 addr64_t paddr, unsigned int data);
372 void ml_phys_write_word(
373 vm_offset_t paddr, unsigned int data);
374 void ml_phys_write_word_64(
375 addr64_t paddr, unsigned int data);
376
377 /* Write physical address double word */
378 void ml_phys_write_double(
379 vm_offset_t paddr, unsigned long long data);
380 void ml_phys_write_double_64(
381 addr64_t paddr, unsigned long long data);
382
383 void ml_static_mfree(
384 vm_offset_t,
385 vm_size_t);
386
387 kern_return_t
388 ml_static_protect(
389 vm_offset_t start,
390 vm_size_t size,
391 vm_prot_t new_prot);
392
393 /* virtual to physical on wired pages */
394 vm_offset_t ml_vtophys(
395 vm_offset_t vaddr);
396
397 /* Get processor info */
398 void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
399
400 #endif /* __APPLE_API_UNSTABLE */
401
402 #ifdef __APPLE_API_PRIVATE
403 #ifdef XNU_KERNEL_PRIVATE
404 vm_size_t ml_nofault_copy(
405 vm_offset_t virtsrc,
406 vm_offset_t virtdst,
407 vm_size_t size);
408 boolean_t ml_validate_nofault(
409 vm_offset_t virtsrc, vm_size_t size);
410 #endif /* XNU_KERNEL_PRIVATE */
411 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
412 /* IO memory map services */
413
414 /* Map memory map IO space */
415 vm_offset_t ml_io_map(
416 vm_offset_t phys_addr,
417 vm_size_t size);
418
419 vm_offset_t ml_io_map_wcomb(
420 vm_offset_t phys_addr,
421 vm_size_t size);
422
423 void ml_get_bouncepool_info(
424 vm_offset_t *phys_addr,
425 vm_size_t *size);
426
427 vm_map_address_t ml_map_high_window(
428 vm_offset_t phys_addr,
429 vm_size_t len);
430
431 /* boot memory allocation */
432 vm_offset_t ml_static_malloc(
433 vm_size_t size);
434
435 void ml_init_timebase(
436 void *args,
437 tbd_ops_t tbd_funcs,
438 vm_offset_t int_address,
439 vm_offset_t int_value);
440
441 uint64_t ml_get_timebase(void);
442
443 void ml_init_lock_timeout(void);
444
445 boolean_t ml_delay_should_spin(uint64_t interval);
446
447 void ml_delay_on_yield(void);
448
449 uint32_t ml_get_decrementer(void);
450
451 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
452 void timer_state_event_user_to_kernel(void);
453 void timer_state_event_kernel_to_user(void);
454 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
455
456 uint64_t ml_get_hwclock(void);
457
458 #ifdef __arm64__
459 boolean_t ml_get_timer_pending(void);
460 #endif
461
462 void platform_syscall(
463 struct arm_saved_state *);
464
465 void ml_set_decrementer(
466 uint32_t dec_value);
467
468 boolean_t is_user_contex(
469 void);
470
471 void ml_init_arm_debug_interface(void *args, vm_offset_t virt_address);
472
473 /* These calls are only valid if __ARM_USER_PROTECT__ is defined */
474 uintptr_t arm_user_protect_begin(
475 thread_t thread);
476
477 void arm_user_protect_end(
478 thread_t thread,
479 uintptr_t up,
480 boolean_t disable_interrupts);
481
482 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
483
484 /* Zero bytes starting at a physical address */
485 void bzero_phys(
486 addr64_t phys_address,
487 vm_size_t length);
488
489 void bzero_phys_nc(addr64_t src64, vm_size_t bytes);
490
491 void ml_thread_policy(
492 thread_t thread,
493 unsigned policy_id,
494 unsigned policy_info);
495
496 #define MACHINE_GROUP 0x00000001
497 #define MACHINE_NETWORK_GROUP 0x10000000
498 #define MACHINE_NETWORK_WORKLOOP 0x00000001
499 #define MACHINE_NETWORK_NETISR 0x00000002
500
501 /* Initialize the maximum number of CPUs */
502 void ml_init_max_cpus(
503 unsigned int max_cpus);
504
505 /* Return the maximum number of CPUs set by ml_init_max_cpus() */
506 unsigned int ml_get_max_cpus(
507 void);
508
509 /* Return the maximum memory size */
510 unsigned int ml_get_machine_mem(void);
511
512 #ifdef XNU_KERNEL_PRIVATE
513 /* Return max offset */
514 vm_map_offset_t ml_get_max_offset(
515 boolean_t is64,
516 unsigned int option);
517 #define MACHINE_MAX_OFFSET_DEFAULT 0x01
518 #define MACHINE_MAX_OFFSET_MIN 0x02
519 #define MACHINE_MAX_OFFSET_MAX 0x04
520 #define MACHINE_MAX_OFFSET_DEVICE 0x08
521 #endif
522
523 extern void ml_cpu_up(void);
524 extern void ml_cpu_down(void);
525 extern void ml_arm_sleep(void);
526
527 extern uint64_t ml_get_wake_timebase(void);
528 extern uint64_t ml_get_conttime_wake_time(void);
529
530 /* Time since the system was reset (as part of boot/wake) */
531 uint64_t ml_get_time_since_reset(void);
532
533 #ifdef XNU_KERNEL_PRIVATE
534 /* Just a stub on ARM */
535 extern kern_return_t ml_interrupt_prewarm(uint64_t deadline);
536 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
537 #endif /* XNU_KERNEL_PRIVATE */
538
539 /* Bytes available on current stack */
540 vm_offset_t ml_stack_remaining(void);
541
542 #ifdef MACH_KERNEL_PRIVATE
543 uint32_t get_fpscr(void);
544 void set_fpscr(uint32_t);
545
546 #ifdef __arm64__
547 unsigned long update_mdscr(unsigned long clear, unsigned long set);
548 #endif /* __arm64__ */
549
550 extern void init_vfp(void);
551 extern boolean_t get_vfp_enabled(void);
552 extern void arm_debug_set_cp14(arm_debug_state_t *debug_state);
553 extern void fiq_context_init(boolean_t enable_fiq);
554 extern void fiq_context_bootstrap(boolean_t enable_fiq);
555
556 extern void reenable_async_aborts(void);
557 extern void cpu_idle_wfi(boolean_t wfi_fast);
558
559 #ifdef MONITOR
560 #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */
561 #define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */
562 unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
563 uintptr_t arg2, uintptr_t arg3);
564 #endif /* MONITOR */
565
566 #if defined(KERNEL_INTEGRITY_KTRR)
567 void rorgn_stash_range(void);
568 void rorgn_lockdown(void);
569 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
570
571 #if __ARM_KERNEL_PROTECT__
572 extern void set_vbar_el1(uint64_t);
573 #endif /* __ARM_KERNEL_PROTECT__ */
574 #endif /* MACH_KERNEL_PRIVATE */
575
576 extern uint32_t arm_debug_read_dscr(void);
577
578 extern int set_be_bit(void);
579 extern int clr_be_bit(void);
580 extern int be_tracing(void);
581
582 typedef void (*broadcastFunc) (void *);
583 unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *);
584 kern_return_t cpu_xcall(int, broadcastFunc, void *);
585
586 #ifdef KERNEL_PRIVATE
587
588 /* Interface to be used by the perf. controller to register a callback, in a
589 * single-threaded fashion. The callback will receive notifications of
590 * processor performance quality-of-service changes from the scheduler.
591 */
592
593 #ifdef __arm64__
594 typedef void (*cpu_qos_update_t)(int throughput_qos, uint64_t qos_param1, uint64_t qos_param2);
595 void cpu_qos_update_register(cpu_qos_update_t);
596 #endif /* __arm64__ */
597
598 struct going_on_core {
599 uint64_t thread_id;
600 uint16_t qos_class;
601 uint16_t urgency; /* XCPM compatibility */
602 uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */
603 uint32_t is_kernel_thread : 1;
604 uint64_t thread_group_id;
605 void *thread_group_data;
606 uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */
607 uint64_t start_time;
608 uint64_t scheduling_latency_at_same_basepri;
609 uint32_t energy_estimate_nj; /* return: In nanojoules */
610 /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */
611 };
612 typedef struct going_on_core *going_on_core_t;
613
614 struct going_off_core {
615 uint64_t thread_id;
616 uint32_t energy_estimate_nj; /* return: In nanojoules */
617 uint32_t reserved;
618 uint64_t end_time;
619 uint64_t thread_group_id;
620 void *thread_group_data;
621 };
622 typedef struct going_off_core *going_off_core_t;
623
624 struct thread_group_data {
625 uint64_t thread_group_id;
626 void *thread_group_data;
627 uint32_t thread_group_size;
628 uint32_t thread_group_flags;
629 };
630 typedef struct thread_group_data *thread_group_data_t;
631
632 struct perfcontrol_max_runnable_latency {
633 uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */];
634 };
635 typedef struct perfcontrol_max_runnable_latency *perfcontrol_max_runnable_latency_t;
636
637 struct perfcontrol_work_interval {
638 uint64_t thread_id;
639 uint16_t qos_class;
640 uint16_t urgency;
641 uint32_t flags; // notify
642 uint64_t work_interval_id;
643 uint64_t start;
644 uint64_t finish;
645 uint64_t deadline;
646 uint64_t next_start;
647 uint64_t thread_group_id;
648 void *thread_group_data;
649 uint32_t create_flags;
650 };
651 typedef struct perfcontrol_work_interval *perfcontrol_work_interval_t;
652
653 typedef enum {
654 WORK_INTERVAL_START,
655 WORK_INTERVAL_UPDATE,
656 WORK_INTERVAL_FINISH
657 } work_interval_ctl_t;
658
659 struct perfcontrol_work_interval_instance {
660 work_interval_ctl_t ctl;
661 uint32_t create_flags;
662 uint64_t complexity;
663 uint64_t thread_id;
664 uint64_t work_interval_id;
665 uint64_t instance_id; /* out: start, in: update/finish */
666 uint64_t start;
667 uint64_t finish;
668 uint64_t deadline;
669 uint64_t thread_group_id;
670 void *thread_group_data;
671 };
672 typedef struct perfcontrol_work_interval_instance *perfcontrol_work_interval_instance_t;
673
674 /*
675 * Structure to export per-CPU counters as part of the CLPC callout.
676 * Contains only the fixed CPU counters (instructions and cycles); CLPC
677 * would call back into XNU to get the configurable counters if needed.
678 */
679 struct perfcontrol_cpu_counters {
680 uint64_t instructions;
681 uint64_t cycles;
682 };
683
684 /*
685 * Structure used to pass information about a thread to CLPC
686 */
687 struct perfcontrol_thread_data {
688 /*
689 * Energy estimate (return value)
690 * The field is populated by CLPC and used to update the
691 * energy estimate of the thread
692 */
693 uint32_t energy_estimate_nj;
694 /* Perfcontrol class for thread */
695 perfcontrol_class_t perfctl_class;
696 /* Thread ID for the thread */
697 uint64_t thread_id;
698 /* Thread Group ID */
699 uint64_t thread_group_id;
700 /*
701 * Scheduling latency for threads at the same base priority.
702 * Calculated by the scheduler and passed into CLPC. The field is
703 * populated only in the thread_data structure for the thread
704 * going on-core.
705 */
706 uint64_t scheduling_latency_at_same_basepri;
707 /* Thread Group data pointer */
708 void *thread_group_data;
709 /* perfctl state pointer */
710 void *perfctl_state;
711 };
712
713 /*
714 * All callouts from the scheduler are executed with interrupts
715 * disabled. Callouts should be implemented in C with minimal
716 * abstractions, and only use KPI exported by the mach/libkern
717 * symbolset, restricted to routines like spinlocks and atomic
718 * operations and scheduler routines as noted below. Spinlocks that
719 * are used to synchronize data in the perfcontrol_state_t should only
720 * ever be acquired with interrupts disabled, to avoid deadlocks where
721 * an quantum expiration timer interrupt attempts to perform a callout
722 * that attempts to lock a spinlock that is already held.
723 */
724
725 /*
726 * When a processor is switching between two threads (after the
727 * scheduler has chosen a new thread), the low-level platform layer
728 * will call this routine, which should perform required timestamps,
729 * MMIO register reads, or other state switching. No scheduler locks
730 * are held during this callout.
731 *
732 * This function is called with interrupts ENABLED.
733 */
734 typedef void (*sched_perfcontrol_context_switch_t)(perfcontrol_state_t, perfcontrol_state_t);
735
736 /*
737 * Once the processor has switched to the new thread, the offcore
738 * callout will indicate the old thread that is no longer being
739 * run. The thread's scheduler lock is held, so it will not begin
740 * running on another processor (in the case of preemption where it
741 * remains runnable) until it completes. If the "thread_terminating"
742 * boolean is TRUE, this will be the last callout for this thread_id.
743 */
744 typedef void (*sched_perfcontrol_offcore_t)(perfcontrol_state_t, going_off_core_t /* populated by callee */, boolean_t);
745
746 /*
747 * After the offcore callout and after the old thread can potentially
748 * start running on another processor, the oncore callout will be
749 * called with the thread's scheduler lock held. The oncore callout is
750 * also called any time one of the parameters in the going_on_core_t
751 * structure changes, like priority/QoS changes, and quantum
752 * expiration, so the callout must not assume callouts are paired with
753 * offcore callouts.
754 */
755 typedef void (*sched_perfcontrol_oncore_t)(perfcontrol_state_t, going_on_core_t);
756
757 /*
758 * Periodically (on hundreds of ms scale), the scheduler will perform
759 * maintenance and report the maximum latency for runnable (but not currently
760 * running) threads for each urgency class.
761 */
762 typedef void (*sched_perfcontrol_max_runnable_latency_t)(perfcontrol_max_runnable_latency_t);
763
764 /*
765 * When the kernel receives information about work intervals from userland,
766 * it is passed along using this callback. No locks are held, although the state
767 * object will not go away during the callout.
768 */
769 typedef void (*sched_perfcontrol_work_interval_notify_t)(perfcontrol_state_t, perfcontrol_work_interval_t);
770
771 /*
772 * Start, update and finish work interval instance with optional complexity estimate.
773 */
774 typedef void (*sched_perfcontrol_work_interval_ctl_t)(perfcontrol_state_t, perfcontrol_work_interval_instance_t);
775
776 /*
777 * These callbacks are used when thread groups are added, removed or properties
778 * updated.
779 * No blocking allocations (or anything else blocking) are allowed inside these
780 * callbacks. No locks allowed in these callbacks as well since the kernel might
781 * be holding the thread/task locks.
782 */
783 typedef void (*sched_perfcontrol_thread_group_init_t)(thread_group_data_t);
784 typedef void (*sched_perfcontrol_thread_group_deinit_t)(thread_group_data_t);
785 typedef void (*sched_perfcontrol_thread_group_flags_update_t)(thread_group_data_t);
786
787 /*
788 * Sometime after the timeout set by sched_perfcontrol_update_callback_deadline has passed,
789 * this function will be called, passing the timeout deadline that was previously armed as an argument.
790 *
791 * This is called inside context-switch/quantum-interrupt context and must follow the safety rules for that context.
792 */
793 typedef void (*sched_perfcontrol_deadline_passed_t)(uint64_t deadline);
794
795 /*
796 * Context Switch Callout
797 *
798 * Parameters:
799 * event - The perfcontrol_event for this callout
800 * cpu_id - The CPU doing the context switch
801 * timestamp - The timestamp for the context switch
802 * flags - Flags for other relevant information
803 * offcore - perfcontrol_data structure for thread going off-core
804 * oncore - perfcontrol_data structure for thread going on-core
805 * cpu_counters - perfcontrol_cpu_counters for the CPU doing the switch
806 */
807 typedef void (*sched_perfcontrol_csw_t)(
808 perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags,
809 struct perfcontrol_thread_data *offcore, struct perfcontrol_thread_data *oncore,
810 struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused);
811
812
813 /*
814 * Thread State Update Callout
815 *
816 * Parameters:
817 * event - The perfcontrol_event for this callout
818 * cpu_id - The CPU doing the state update
819 * timestamp - The timestamp for the state update
820 * flags - Flags for other relevant information
821 * thr_data - perfcontrol_data structure for the thread being updated
822 */
823 typedef void (*sched_perfcontrol_state_update_t)(
824 perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags,
825 struct perfcontrol_thread_data *thr_data, __unused void *unused);
826
827 /*
828 * Callers should always use the CURRENT version so that the kernel can detect both older
829 * and newer structure layouts. New callbacks should always be added at the end of the
830 * structure, and xnu should expect existing source recompiled against newer headers
831 * to pass NULL for unimplemented callbacks. Pass NULL as the as the callbacks parameter
832 * to reset callbacks to their default in-kernel values.
833 */
834
835 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_0 (0) /* up-to oncore */
836 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_1 (1) /* up-to max_runnable_latency */
837 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_2 (2) /* up-to work_interval_notify */
838 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_3 (3) /* up-to thread_group_deinit */
839 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_4 (4) /* up-to deadline_passed */
840 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */
841 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */
842 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */
843 #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6
844
845 struct sched_perfcontrol_callbacks {
846 unsigned long version; /* Use SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT */
847 sched_perfcontrol_offcore_t offcore;
848 sched_perfcontrol_context_switch_t context_switch;
849 sched_perfcontrol_oncore_t oncore;
850 sched_perfcontrol_max_runnable_latency_t max_runnable_latency;
851 sched_perfcontrol_work_interval_notify_t work_interval_notify;
852 sched_perfcontrol_thread_group_init_t thread_group_init;
853 sched_perfcontrol_thread_group_deinit_t thread_group_deinit;
854 sched_perfcontrol_deadline_passed_t deadline_passed;
855 sched_perfcontrol_csw_t csw;
856 sched_perfcontrol_state_update_t state_update;
857 sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update;
858 sched_perfcontrol_work_interval_ctl_t work_interval_ctl;
859 };
860 typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t;
861
862 extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state);
863
864 /*
865 * Update the scheduler with the set of cores that should be used to dispatch new threads.
866 * Non-recommended cores can still be used to field interrupts or run bound threads.
867 * This should be called with interrupts enabled and no scheduler locks held.
868 */
869 #define ALL_CORES_RECOMMENDED (~(uint32_t)0)
870
871 extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores);
872 extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation);
873 extern void sched_override_recommended_cores_for_sleep(void);
874 extern void sched_restore_recommended_cores_after_sleep(void);
875
876 /*
877 * Update the deadline after which sched_perfcontrol_deadline_passed will be called.
878 * Returns TRUE if it successfully canceled a previously set callback,
879 * and FALSE if it did not (i.e. one wasn't set, or callback already fired / is in flight).
880 * The callback is automatically canceled when it fires, and does not repeat unless rearmed.
881 *
882 * This 'timer' executes as the scheduler switches between threads, on a non-idle core
883 *
884 * There can be only one outstanding timer globally.
885 */
886 extern boolean_t sched_perfcontrol_update_callback_deadline(uint64_t deadline);
887
888 typedef enum perfcontrol_callout_type {
889 PERFCONTROL_CALLOUT_ON_CORE,
890 PERFCONTROL_CALLOUT_OFF_CORE,
891 PERFCONTROL_CALLOUT_CONTEXT,
892 PERFCONTROL_CALLOUT_STATE_UPDATE,
893 /* Add other callout types here */
894 PERFCONTROL_CALLOUT_MAX
895 } perfcontrol_callout_type_t;
896
897 typedef enum perfcontrol_callout_stat {
898 PERFCONTROL_STAT_INSTRS,
899 PERFCONTROL_STAT_CYCLES,
900 /* Add other stat types here */
901 PERFCONTROL_STAT_MAX
902 } perfcontrol_callout_stat_t;
903
904 uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
905 perfcontrol_callout_stat_t stat);
906
907
908 #endif /* KERNEL_PRIVATE */
909
910 boolean_t machine_timeout_suspended(void);
911 void ml_get_power_state(boolean_t *, boolean_t *);
912
913 boolean_t user_cont_hwclock_allowed(void);
914 boolean_t user_timebase_allowed(void);
915 boolean_t ml_thread_is64bit(thread_t thread);
916
917 #ifdef __arm64__
918 void ml_set_align_checking(void);
919 boolean_t arm64_wfe_allowed(void);
920 #endif /* __arm64__ */
921
922 void ml_timer_evaluate(void);
923 boolean_t ml_timer_forced_evaluation(void);
924 uint64_t ml_energy_stat(thread_t);
925 void ml_gpu_stat_update(uint64_t);
926 uint64_t ml_gpu_stat(thread_t);
927 #endif /* __APPLE_API_PRIVATE */
928
929 __END_DECLS
930
931 #endif /* _ARM_MACHINE_ROUTINES_H_ */