]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.h
cedcdb7de7cd8cab0a0a88dbeb5d54b4223aa57c
[apple/xnu.git] / osfmk / i386 / machine_routines.h
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #ifndef _I386_MACHINE_ROUTINES_H_
33 #define _I386_MACHINE_ROUTINES_H_
34
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <kern/kern_types.h>
38 #include <pexpert/pexpert.h>
39
40 #include <sys/cdefs.h>
41 #include <sys/appleapiopts.h>
42
43 #include <stdarg.h>
44
45 __BEGIN_DECLS
46
47 #ifdef XNU_KERNEL_PRIVATE
48
49 /* are we a 64 bit platform ? */
50
51 boolean_t ml_is64bit(void);
52
53 /* is this a 64bit thread? */
54
55 boolean_t ml_thread_is64bit(thread_t);
56
57 /* is this a 64bit thread? */
58
59 boolean_t ml_state_is64bit(void *);
60
61 /* set state of fpu save area for signal handling */
62
63 void ml_fp_setvalid(boolean_t);
64
65 void ml_cpu_set_ldt(int);
66
67 /* Interrupt handling */
68
69 /* Initialize Interrupts */
70 void ml_init_interrupt(void);
71
72 /* Generate a fake interrupt */
73 void ml_cause_interrupt(void);
74
75 /* Initialize Interrupts */
76 void ml_install_interrupt_handler(
77 void *nub,
78 int source,
79 void *target,
80 IOInterruptHandler handler,
81 void *refCon);
82
83 void ml_entropy_collect(void);
84
85 uint64_t ml_get_timebase(void);
86 void ml_init_lock_timeout(void);
87 void ml_init_delay_spin_threshold(int);
88
89 boolean_t ml_delay_should_spin(uint64_t interval);
90
91 extern void ml_delay_on_yield(void);
92
93 vm_offset_t
94 ml_static_ptovirt(
95 vm_offset_t);
96
97 void ml_static_mfree(
98 vm_offset_t,
99 vm_size_t);
100
101 kern_return_t
102 ml_static_protect(
103 vm_offset_t start,
104 vm_size_t size,
105 vm_prot_t new_prot);
106
107 /* boot memory allocation */
108 vm_offset_t ml_static_malloc(
109 vm_size_t size);
110
111 vm_offset_t ml_static_slide(
112 vm_offset_t vaddr);
113
114 kern_return_t
115 ml_static_verify_page_protections(
116 uint64_t base, uint64_t size, vm_prot_t prot);
117
118 vm_offset_t ml_static_unslide(
119 vm_offset_t vaddr);
120
121 /* virtual to physical on wired pages */
122 vm_offset_t ml_vtophys(
123 vm_offset_t vaddr);
124
125 vm_size_t ml_nofault_copy(
126 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size);
127
128 boolean_t ml_validate_nofault(
129 vm_offset_t virtsrc, vm_size_t size);
130
131 /* Machine topology info */
132 uint64_t ml_cpu_cache_size(unsigned int level);
133 uint64_t ml_cpu_cache_sharing(unsigned int level);
134
135 /* Set the maximum number of CPUs */
136 void ml_set_max_cpus(
137 unsigned int max_cpus);
138
139 extern void ml_cpu_up(void);
140 extern void ml_cpu_down(void);
141
142 void bzero_phys_nc(
143 addr64_t phys_address,
144 uint32_t length);
145 extern uint32_t interrupt_timer_coalescing_enabled;
146 extern uint32_t idle_entry_timer_processing_hdeadline_threshold;
147
148 #if TCOAL_INSTRUMENT
149 #define TCOAL_DEBUG KERNEL_DEBUG_CONSTANT
150 #else
151 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
152 #endif /* TCOAL_INSTRUMENT */
153
154 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
155 /* IO memory map services */
156
157 /* Map memory map IO space */
158 vm_offset_t ml_io_map(
159 vm_offset_t phys_addr,
160 vm_size_t size);
161
162
163 void ml_get_bouncepool_info(
164 vm_offset_t *phys_addr,
165 vm_size_t *size);
166 /* Indicates if spinlock, IPI and other timeouts should be suspended */
167 boolean_t machine_timeout_suspended(void);
168 void plctrace_disable(void);
169 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
170
171 /* Warm up a CPU to receive an interrupt */
172 kern_return_t ml_interrupt_prewarm(uint64_t deadline);
173
174 /* Machine layer routine for intercepting panics */
175 void ml_panic_trap_to_debugger(const char *panic_format_str,
176 va_list *panic_args,
177 unsigned int reason,
178 void *ctx,
179 uint64_t panic_options_mask,
180 unsigned long panic_caller);
181 #endif /* XNU_KERNEL_PRIVATE */
182
183 #ifdef KERNEL_PRIVATE
184
185 /* Type for the Time Base Enable function */
186 typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable);
187
188 /* Type for the IPI Hander */
189 typedef void (*ipi_handler_t)(void);
190
191 /* Struct for ml_processor_register */
192 struct ml_processor_info {
193 cpu_id_t cpu_id;
194 boolean_t boot_cpu;
195 vm_offset_t start_paddr;
196 boolean_t supports_nap;
197 unsigned long l2cr_value;
198 time_base_enable_t time_base_enable;
199 };
200
201 typedef struct ml_processor_info ml_processor_info_t;
202
203
204 /* Register a processor */
205 kern_return_t
206 ml_processor_register(
207 cpu_id_t cpu_id,
208 uint32_t lapic_id,
209 processor_t *processor_out,
210 boolean_t boot_cpu,
211 boolean_t start );
212
213 /* PCI config cycle probing */
214 boolean_t ml_probe_read(
215 vm_offset_t paddr,
216 unsigned int *val);
217 boolean_t ml_probe_read_64(
218 addr64_t paddr,
219 unsigned int *val);
220
221 /* Read physical address byte */
222 unsigned int ml_phys_read_byte(
223 vm_offset_t paddr);
224 unsigned int ml_phys_read_byte_64(
225 addr64_t paddr);
226
227 /* Read physical address half word */
228 unsigned int ml_phys_read_half(
229 vm_offset_t paddr);
230 unsigned int ml_phys_read_half_64(
231 addr64_t paddr);
232
233 /* Read physical address word*/
234 unsigned int ml_phys_read(
235 vm_offset_t paddr);
236 unsigned int ml_phys_read_64(
237 addr64_t paddr);
238 unsigned int ml_phys_read_word(
239 vm_offset_t paddr);
240 unsigned int ml_phys_read_word_64(
241 addr64_t paddr);
242
243 /* Read physical address double word */
244 unsigned long long ml_phys_read_double(
245 vm_offset_t paddr);
246 unsigned long long ml_phys_read_double_64(
247 addr64_t paddr);
248
249 unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz);
250 unsigned int ml_io_read8(uintptr_t iovaddr);
251 unsigned int ml_io_read16(uintptr_t iovaddr);
252 unsigned int ml_io_read32(uintptr_t iovaddr);
253 unsigned long long ml_io_read64(uintptr_t iovaddr);
254
255 extern void ml_io_write(uintptr_t vaddr, uint64_t val, int size);
256 extern void ml_io_write8(uintptr_t vaddr, uint8_t val);
257 extern void ml_io_write16(uintptr_t vaddr, uint16_t val);
258 extern void ml_io_write32(uintptr_t vaddr, uint32_t val);
259 extern void ml_io_write64(uintptr_t vaddr, uint64_t val);
260
261 extern uint32_t ml_port_io_read(uint16_t ioport, int size);
262 extern uint8_t ml_port_io_read8(uint16_t ioport);
263 extern uint16_t ml_port_io_read16(uint16_t ioport);
264 extern uint32_t ml_port_io_read32(uint16_t ioport);
265 extern void ml_port_io_write(uint16_t ioport, uint32_t val, int size);
266 extern void ml_port_io_write8(uint16_t ioport, uint8_t val);
267 extern void ml_port_io_write16(uint16_t ioport, uint16_t val);
268 extern void ml_port_io_write32(uint16_t ioport, uint32_t val);
269
270 /* Write physical address byte */
271 void ml_phys_write_byte(
272 vm_offset_t paddr, unsigned int data);
273 void ml_phys_write_byte_64(
274 addr64_t paddr, unsigned int data);
275
276 /* Write physical address half word */
277 void ml_phys_write_half(
278 vm_offset_t paddr, unsigned int data);
279 void ml_phys_write_half_64(
280 addr64_t paddr, unsigned int data);
281
282 /* Write physical address word */
283 void ml_phys_write(
284 vm_offset_t paddr, unsigned int data);
285 void ml_phys_write_64(
286 addr64_t paddr, unsigned int data);
287 void ml_phys_write_word(
288 vm_offset_t paddr, unsigned int data);
289 void ml_phys_write_word_64(
290 addr64_t paddr, unsigned int data);
291
292 /* Write physical address double word */
293 void ml_phys_write_double(
294 vm_offset_t paddr, unsigned long long data);
295 void ml_phys_write_double_64(
296 addr64_t paddr, unsigned long long data);
297
298 /* Struct for ml_cpu_get_info */
299 struct ml_cpu_info {
300 uint32_t vector_unit;
301 uint32_t cache_line_size;
302 uint32_t l1_icache_size;
303 uint32_t l1_dcache_size;
304 uint32_t l2_settings;
305 uint32_t l2_cache_size;
306 uint32_t l3_settings;
307 uint32_t l3_cache_size;
308 };
309
310 typedef struct ml_cpu_info ml_cpu_info_t;
311
312 typedef enum {
313 CLUSTER_TYPE_SMP,
314 } cluster_type_t;
315
316 /* Get processor info */
317 void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
318
319 void ml_thread_policy(
320 thread_t thread,
321 unsigned policy_id,
322 unsigned policy_info);
323
324 #define MACHINE_GROUP 0x00000001
325 #define MACHINE_NETWORK_GROUP 0x10000000
326 #define MACHINE_NETWORK_WORKLOOP 0x00000001
327 #define MACHINE_NETWORK_NETISR 0x00000002
328
329 /* Return the maximum number of CPUs set by ml_set_max_cpus(), blocking if necessary */
330 unsigned int ml_wait_max_cpus(
331 void);
332
333 /*
334 * The following are in pmCPU.c not machine_routines.c.
335 */
336 extern void ml_set_maxsnoop(uint32_t maxdelay);
337 extern unsigned ml_get_maxsnoop(void);
338 extern void ml_set_maxbusdelay(uint32_t mdelay);
339 extern uint32_t ml_get_maxbusdelay(void);
340 extern void ml_set_maxintdelay(uint64_t mdelay);
341 extern uint64_t ml_get_maxintdelay(void);
342 extern boolean_t ml_get_interrupt_prewake_applicable(void);
343
344
345 extern uint64_t tmrCvt(uint64_t time, uint64_t conversion);
346
347 extern uint64_t ml_cpu_int_event_time(void);
348
349 #endif /* KERNEL_PRIVATE */
350
351 /* Get Interrupts Enabled */
352 boolean_t ml_get_interrupts_enabled(void);
353
354 /* Set Interrupts Enabled */
355 boolean_t ml_set_interrupts_enabled(boolean_t enable);
356 boolean_t ml_early_set_interrupts_enabled(boolean_t enable);
357
358 /* Check if running at interrupt context */
359 boolean_t ml_at_interrupt_context(void);
360
361 #ifdef XNU_KERNEL_PRIVATE
362 extern boolean_t ml_is_quiescing(void);
363 extern void ml_set_is_quiescing(boolean_t);
364 extern uint64_t ml_get_booter_memory_size(void);
365 #endif
366
367 /* Zero bytes starting at a physical address */
368 void bzero_phys(
369 addr64_t phys_address,
370 uint32_t length);
371
372 /* Bytes available on current stack */
373 vm_offset_t ml_stack_remaining(void);
374
375 __END_DECLS
376 #if defined(MACH_KERNEL_PRIVATE)
377 __private_extern__ uint64_t ml_phys_read_data(uint64_t paddr, int psz);
378 __private_extern__ void ml_phys_write_data(uint64_t paddr,
379 unsigned long long data, int size);
380 __private_extern__ uintptr_t
381 pmap_verify_noncacheable(uintptr_t vaddr);
382 void machine_lockdown(void);
383 #endif /* MACH_KERNEL_PRIVATE */
384 #ifdef XNU_KERNEL_PRIVATE
385
386 boolean_t ml_fpu_avx_enabled(void);
387 boolean_t ml_fpu_avx512_enabled(void);
388
389 void interrupt_latency_tracker_setup(void);
390 void interrupt_reset_latency_stats(void);
391 void interrupt_populate_latency_stats(char *, unsigned);
392 void ml_get_power_state(boolean_t *, boolean_t *);
393
394 void timer_queue_expire_rescan(void*);
395 void ml_timer_evaluate(void);
396 boolean_t ml_timer_forced_evaluation(void);
397
398 uint64_t ml_energy_stat(thread_t);
399 void ml_gpu_stat_update(uint64_t);
400 uint64_t ml_gpu_stat(thread_t);
401 boolean_t ml_recent_wake(void);
402
403 #ifdef MACH_KERNEL_PRIVATE
404 struct i386_cpu_info;
405 struct machine_thread;
406 /* LBR support */
407 void i386_lbr_init(struct i386_cpu_info *info_p, bool is_master);
408 void i386_switch_lbrs(thread_t old, thread_t new);
409 int i386_lbr_native_state_to_mach_thread_state(struct machine_thread *pcb, last_branch_state_t *machlbrp);
410 void i386_lbr_synch(thread_t thr);
411 void i386_lbr_enable(void);
412 void i386_lbr_disable(void);
413 extern bool last_branch_support_enabled;
414 #endif
415
416 #define ALL_CORES_RECOMMENDED (~(uint64_t)0)
417
418 extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores);
419
420
421 extern uint64_t reportphyreaddelayabs;
422 extern uint64_t reportphywritedelayabs;
423 extern uint32_t reportphyreadosbt;
424 extern uint32_t reportphywriteosbt;
425 extern uint32_t phyreadpanic;
426 extern uint32_t phywritepanic;
427 extern uint64_t tracephyreaddelayabs;
428 extern uint64_t tracephywritedelayabs;
429
430 void ml_hibernate_active_pre(void);
431 void ml_hibernate_active_post(void);
432
433 #endif /* XNU_KERNEL_PRIVATE */
434 #endif /* _I386_MACHINE_ROUTINES_H_ */