2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #ifndef _I386_MACHINE_ROUTINES_H_
33 #define _I386_MACHINE_ROUTINES_H_
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <kern/kern_types.h>
38 #include <pexpert/pexpert.h>
40 #include <sys/cdefs.h>
41 #include <sys/appleapiopts.h>
47 #ifdef XNU_KERNEL_PRIVATE
49 /* are we a 64 bit platform ? */
51 boolean_t
ml_is64bit(void);
53 /* is this a 64bit thread? */
55 boolean_t
ml_thread_is64bit(thread_t
);
57 /* is this a 64bit thread? */
59 boolean_t
ml_state_is64bit(void *);
61 /* set state of fpu save area for signal handling */
63 void ml_fp_setvalid(boolean_t
);
65 void ml_cpu_set_ldt(int);
67 /* Interrupt handling */
69 /* Initialize Interrupts */
70 void ml_init_interrupt(void);
72 /* Generate a fake interrupt */
73 void ml_cause_interrupt(void);
75 /* Initialize Interrupts */
76 void ml_install_interrupt_handler(
80 IOInterruptHandler handler
,
83 void ml_entropy_collect(void);
85 uint64_t ml_get_timebase(void);
86 void ml_init_lock_timeout(void);
87 void ml_init_delay_spin_threshold(int);
89 boolean_t
ml_delay_should_spin(uint64_t interval
);
91 extern void ml_delay_on_yield(void);
107 /* boot memory allocation */
108 vm_offset_t
ml_static_malloc(
111 vm_offset_t
ml_static_slide(
115 ml_static_verify_page_protections(
116 uint64_t base
, uint64_t size
, vm_prot_t prot
);
118 vm_offset_t
ml_static_unslide(
121 /* virtual to physical on wired pages */
122 vm_offset_t
ml_vtophys(
125 vm_size_t
ml_nofault_copy(
126 vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
);
128 boolean_t
ml_validate_nofault(
129 vm_offset_t virtsrc
, vm_size_t size
);
131 /* Machine topology info */
132 uint64_t ml_cpu_cache_size(unsigned int level
);
133 uint64_t ml_cpu_cache_sharing(unsigned int level
);
135 /* Set the maximum number of CPUs */
136 void ml_set_max_cpus(
137 unsigned int max_cpus
);
139 extern void ml_cpu_up(void);
140 extern void ml_cpu_down(void);
143 addr64_t phys_address
,
145 extern uint32_t interrupt_timer_coalescing_enabled
;
146 extern uint32_t idle_entry_timer_processing_hdeadline_threshold
;
149 #define TCOAL_DEBUG KERNEL_DEBUG_CONSTANT
151 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
152 #endif /* TCOAL_INSTRUMENT */
154 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
155 /* IO memory map services */
157 /* Map memory map IO space */
158 vm_offset_t
ml_io_map(
159 vm_offset_t phys_addr
,
163 void ml_get_bouncepool_info(
164 vm_offset_t
*phys_addr
,
166 /* Indicates if spinlock, IPI and other timeouts should be suspended */
167 boolean_t
machine_timeout_suspended(void);
168 void plctrace_disable(void);
169 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
171 /* Warm up a CPU to receive an interrupt */
172 kern_return_t
ml_interrupt_prewarm(uint64_t deadline
);
174 /* Machine layer routine for intercepting panics */
175 void ml_panic_trap_to_debugger(const char *panic_format_str
,
179 uint64_t panic_options_mask
,
180 unsigned long panic_caller
);
181 #endif /* XNU_KERNEL_PRIVATE */
183 #ifdef KERNEL_PRIVATE
185 /* Type for the Time Base Enable function */
186 typedef void (*time_base_enable_t
)(cpu_id_t cpu_id
, boolean_t enable
);
188 /* Type for the IPI Hander */
189 typedef void (*ipi_handler_t
)(void);
191 /* Struct for ml_processor_register */
192 struct ml_processor_info
{
195 vm_offset_t start_paddr
;
196 boolean_t supports_nap
;
197 unsigned long l2cr_value
;
198 time_base_enable_t time_base_enable
;
201 typedef struct ml_processor_info ml_processor_info_t
;
204 /* Register a processor */
206 ml_processor_register(
209 processor_t
*processor_out
,
213 /* PCI config cycle probing */
214 boolean_t
ml_probe_read(
217 boolean_t
ml_probe_read_64(
221 /* Read physical address byte */
222 unsigned int ml_phys_read_byte(
224 unsigned int ml_phys_read_byte_64(
227 /* Read physical address half word */
228 unsigned int ml_phys_read_half(
230 unsigned int ml_phys_read_half_64(
233 /* Read physical address word*/
234 unsigned int ml_phys_read(
236 unsigned int ml_phys_read_64(
238 unsigned int ml_phys_read_word(
240 unsigned int ml_phys_read_word_64(
243 /* Read physical address double word */
244 unsigned long long ml_phys_read_double(
246 unsigned long long ml_phys_read_double_64(
249 unsigned long long ml_io_read(uintptr_t iovaddr
, int iovsz
);
250 unsigned int ml_io_read8(uintptr_t iovaddr
);
251 unsigned int ml_io_read16(uintptr_t iovaddr
);
252 unsigned int ml_io_read32(uintptr_t iovaddr
);
253 unsigned long long ml_io_read64(uintptr_t iovaddr
);
255 extern void ml_io_write(uintptr_t vaddr
, uint64_t val
, int size
);
256 extern void ml_io_write8(uintptr_t vaddr
, uint8_t val
);
257 extern void ml_io_write16(uintptr_t vaddr
, uint16_t val
);
258 extern void ml_io_write32(uintptr_t vaddr
, uint32_t val
);
259 extern void ml_io_write64(uintptr_t vaddr
, uint64_t val
);
261 extern uint32_t ml_port_io_read(uint16_t ioport
, int size
);
262 extern uint8_t ml_port_io_read8(uint16_t ioport
);
263 extern uint16_t ml_port_io_read16(uint16_t ioport
);
264 extern uint32_t ml_port_io_read32(uint16_t ioport
);
265 extern void ml_port_io_write(uint16_t ioport
, uint32_t val
, int size
);
266 extern void ml_port_io_write8(uint16_t ioport
, uint8_t val
);
267 extern void ml_port_io_write16(uint16_t ioport
, uint16_t val
);
268 extern void ml_port_io_write32(uint16_t ioport
, uint32_t val
);
270 /* Write physical address byte */
271 void ml_phys_write_byte(
272 vm_offset_t paddr
, unsigned int data
);
273 void ml_phys_write_byte_64(
274 addr64_t paddr
, unsigned int data
);
276 /* Write physical address half word */
277 void ml_phys_write_half(
278 vm_offset_t paddr
, unsigned int data
);
279 void ml_phys_write_half_64(
280 addr64_t paddr
, unsigned int data
);
282 /* Write physical address word */
284 vm_offset_t paddr
, unsigned int data
);
285 void ml_phys_write_64(
286 addr64_t paddr
, unsigned int data
);
287 void ml_phys_write_word(
288 vm_offset_t paddr
, unsigned int data
);
289 void ml_phys_write_word_64(
290 addr64_t paddr
, unsigned int data
);
292 /* Write physical address double word */
293 void ml_phys_write_double(
294 vm_offset_t paddr
, unsigned long long data
);
295 void ml_phys_write_double_64(
296 addr64_t paddr
, unsigned long long data
);
298 /* Struct for ml_cpu_get_info */
300 uint32_t vector_unit
;
301 uint32_t cache_line_size
;
302 uint32_t l1_icache_size
;
303 uint32_t l1_dcache_size
;
304 uint32_t l2_settings
;
305 uint32_t l2_cache_size
;
306 uint32_t l3_settings
;
307 uint32_t l3_cache_size
;
310 typedef struct ml_cpu_info ml_cpu_info_t
;
316 /* Get processor info */
317 void ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
);
319 void ml_thread_policy(
322 unsigned policy_info
);
324 #define MACHINE_GROUP 0x00000001
325 #define MACHINE_NETWORK_GROUP 0x10000000
326 #define MACHINE_NETWORK_WORKLOOP 0x00000001
327 #define MACHINE_NETWORK_NETISR 0x00000002
329 /* Return the maximum number of CPUs set by ml_set_max_cpus(), blocking if necessary */
330 unsigned int ml_wait_max_cpus(
334 * The following are in pmCPU.c not machine_routines.c.
336 extern void ml_set_maxsnoop(uint32_t maxdelay
);
337 extern unsigned ml_get_maxsnoop(void);
338 extern void ml_set_maxbusdelay(uint32_t mdelay
);
339 extern uint32_t ml_get_maxbusdelay(void);
340 extern void ml_set_maxintdelay(uint64_t mdelay
);
341 extern uint64_t ml_get_maxintdelay(void);
342 extern boolean_t
ml_get_interrupt_prewake_applicable(void);
345 extern uint64_t tmrCvt(uint64_t time
, uint64_t conversion
);
347 extern uint64_t ml_cpu_int_event_time(void);
349 #endif /* KERNEL_PRIVATE */
351 /* Get Interrupts Enabled */
352 boolean_t
ml_get_interrupts_enabled(void);
354 /* Set Interrupts Enabled */
355 boolean_t
ml_set_interrupts_enabled(boolean_t enable
);
356 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable
);
358 /* Check if running at interrupt context */
359 boolean_t
ml_at_interrupt_context(void);
361 #ifdef XNU_KERNEL_PRIVATE
362 extern boolean_t
ml_is_quiescing(void);
363 extern void ml_set_is_quiescing(boolean_t
);
364 extern uint64_t ml_get_booter_memory_size(void);
367 /* Zero bytes starting at a physical address */
369 addr64_t phys_address
,
372 /* Bytes available on current stack */
373 vm_offset_t
ml_stack_remaining(void);
376 #if defined(MACH_KERNEL_PRIVATE)
377 __private_extern__
uint64_t ml_phys_read_data(uint64_t paddr
, int psz
);
378 __private_extern__
void ml_phys_write_data(uint64_t paddr
,
379 unsigned long long data
, int size
);
380 __private_extern__
uintptr_t
381 pmap_verify_noncacheable(uintptr_t vaddr
);
382 void machine_lockdown(void);
383 #endif /* MACH_KERNEL_PRIVATE */
384 #ifdef XNU_KERNEL_PRIVATE
386 boolean_t
ml_fpu_avx_enabled(void);
387 boolean_t
ml_fpu_avx512_enabled(void);
389 void interrupt_latency_tracker_setup(void);
390 void interrupt_reset_latency_stats(void);
391 void interrupt_populate_latency_stats(char *, unsigned);
392 void ml_get_power_state(boolean_t
*, boolean_t
*);
394 void timer_queue_expire_rescan(void*);
395 void ml_timer_evaluate(void);
396 boolean_t
ml_timer_forced_evaluation(void);
398 uint64_t ml_energy_stat(thread_t
);
399 void ml_gpu_stat_update(uint64_t);
400 uint64_t ml_gpu_stat(thread_t
);
401 boolean_t
ml_recent_wake(void);
403 #ifdef MACH_KERNEL_PRIVATE
404 struct i386_cpu_info
;
405 struct machine_thread
;
407 void i386_lbr_init(struct i386_cpu_info
*info_p
, bool is_master
);
408 void i386_switch_lbrs(thread_t old
, thread_t
new);
409 int i386_lbr_native_state_to_mach_thread_state(struct machine_thread
*pcb
, last_branch_state_t
*machlbrp
);
410 void i386_lbr_synch(thread_t thr
);
411 void i386_lbr_enable(void);
412 void i386_lbr_disable(void);
413 extern bool last_branch_support_enabled
;
416 #define ALL_CORES_RECOMMENDED (~(uint64_t)0)
418 extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores
);
421 extern uint64_t reportphyreaddelayabs
;
422 extern uint64_t reportphywritedelayabs
;
423 extern uint32_t reportphyreadosbt
;
424 extern uint32_t reportphywriteosbt
;
425 extern uint32_t phyreadpanic
;
426 extern uint32_t phywritepanic
;
427 extern uint64_t tracephyreaddelayabs
;
428 extern uint64_t tracephywritedelayabs
;
430 void ml_hibernate_active_pre(void);
431 void ml_hibernate_active_post(void);
433 #endif /* XNU_KERNEL_PRIVATE */
434 #endif /* _I386_MACHINE_ROUTINES_H_ */