/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
-#ifndef _I386_MACHINE_ROUTINES_H_
-#define _I386_MACHINE_ROUTINES_H_
+#ifndef _I386_MACHINE_ROUTINES_H_
+#define _I386_MACHINE_ROUTINES_H_
#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <sys/cdefs.h>
#include <sys/appleapiopts.h>
+#include <stdarg.h>
+
__BEGIN_DECLS
#ifdef XNU_KERNEL_PRIVATE
/* set state of fpu save area for signal handling */
-void ml_fp_setvalid(boolean_t);
+void ml_fp_setvalid(boolean_t);
-void ml_cpu_set_ldt(int);
+void ml_cpu_set_ldt(int);
/* Interrupt handling */
/* Initialize Interrupts */
void ml_init_interrupt(void);
-
/* Generate a fake interrupt */
void ml_cause_interrupt(void);
/* Initialize Interrupts */
void ml_install_interrupt_handler(
- void *nub,
- int source,
- void *target,
- IOInterruptHandler handler,
- void *refCon);
+ void *nub,
+ int source,
+ void *target,
+ IOInterruptHandler handler,
+ void *refCon);
+
+uint64_t ml_get_timebase(void);
+uint64_t ml_get_timebase_entropy(void);
+
+void ml_init_lock_timeout(void);
+void ml_init_delay_spin_threshold(int);
-void ml_get_timebase(unsigned long long *timestamp);
-void ml_init_lock_timeout(void);
+boolean_t ml_delay_should_spin(uint64_t interval);
+
+extern void ml_delay_on_yield(void);
vm_offset_t
-ml_static_ptovirt(
+ ml_static_ptovirt(
vm_offset_t);
void ml_static_mfree(
vm_offset_t,
vm_size_t);
+kern_return_t
+ml_static_protect(
+ vm_offset_t start,
+ vm_size_t size,
+ vm_prot_t new_prot);
+
/* boot memory allocation */
vm_offset_t ml_static_malloc(
vm_size_t size);
+vm_offset_t ml_static_slide(
+ vm_offset_t vaddr);
+
+kern_return_t
+ml_static_verify_page_protections(
+ uint64_t base, uint64_t size, vm_prot_t prot);
+
+vm_offset_t ml_static_unslide(
+ vm_offset_t vaddr);
+
/* virtual to physical on wired pages */
vm_offset_t ml_vtophys(
vm_offset_t vaddr);
vm_size_t ml_nofault_copy(
vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size);
+boolean_t ml_validate_nofault(
+ vm_offset_t virtsrc, vm_size_t size);
+
/* Machine topology info */
-uint64_t ml_cpu_cache_size(unsigned int level);
-uint64_t ml_cpu_cache_sharing(unsigned int level);
+uint64_t ml_cpu_cache_size(unsigned int level);
+uint64_t ml_cpu_cache_sharing(unsigned int level);
-/* Initialize the maximum number of CPUs */
-void ml_init_max_cpus(
- unsigned long max_cpus);
+/* Set the maximum number of CPUs */
+void ml_set_max_cpus(
+ unsigned int max_cpus);
-extern void ml_cpu_up(void);
-extern void ml_cpu_down(void);
+extern void ml_cpu_up(void);
+extern void ml_cpu_down(void);
void bzero_phys_nc(
- addr64_t phys_address,
- uint32_t length);
+ addr64_t phys_address,
+ uint32_t length);
+extern uint32_t interrupt_timer_coalescing_enabled;
+extern uint32_t idle_entry_timer_processing_hdeadline_threshold;
+
+#if TCOAL_INSTRUMENT
+#define TCOAL_DEBUG KERNEL_DEBUG_CONSTANT
+#else
+#define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
+#endif /* TCOAL_INSTRUMENT */
-#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
+#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
/* IO memory map services */
/* Map memory map IO space */
vm_offset_t ml_io_map(
- vm_offset_t phys_addr,
+ vm_offset_t phys_addr,
vm_size_t size);
-extern uint32_t bounce_pool_base;
-extern uint32_t bounce_pool_size;
-
-void ml_get_bouncepool_info(
- vm_offset_t *phys_addr,
- vm_size_t *size);
-
+void ml_get_bouncepool_info(
+ vm_offset_t *phys_addr,
+ vm_size_t *size);
+/* Indicates if spinlock, IPI and other timeouts should be suspended */
+boolean_t machine_timeout_suspended(void);
+void plctrace_disable(void);
#endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
+/* Warm up a CPU to receive an interrupt */
+kern_return_t ml_interrupt_prewarm(uint64_t deadline);
+
+/* Machine layer routine for intercepting panics */
+void ml_panic_trap_to_debugger(const char *panic_format_str,
+ va_list *panic_args,
+ unsigned int reason,
+ void *ctx,
+ uint64_t panic_options_mask,
+ unsigned long panic_caller);
#endif /* XNU_KERNEL_PRIVATE */
#ifdef KERNEL_PRIVATE
/* Struct for ml_processor_register */
struct ml_processor_info {
- cpu_id_t cpu_id;
- boolean_t boot_cpu;
- vm_offset_t start_paddr;
- boolean_t supports_nap;
- unsigned long l2cr_value;
- time_base_enable_t time_base_enable;
+ cpu_id_t cpu_id;
+ boolean_t boot_cpu;
+ vm_offset_t start_paddr;
+ boolean_t supports_nap;
+ unsigned long l2cr_value;
+ time_base_enable_t time_base_enable;
};
typedef struct ml_processor_info ml_processor_info_t;
/* Register a processor */
kern_return_t
ml_processor_register(
- cpu_id_t cpu_id,
- uint32_t lapic_id,
- processor_t *processor_out,
- boolean_t boot_cpu,
+ cpu_id_t cpu_id,
+ uint32_t lapic_id,
+ processor_t *processor_out,
+ boolean_t boot_cpu,
boolean_t start );
/* PCI config cycle probing */
unsigned long long ml_phys_read_double_64(
addr64_t paddr);
+unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz);
+unsigned int ml_io_read8(uintptr_t iovaddr);
+unsigned int ml_io_read16(uintptr_t iovaddr);
+unsigned int ml_io_read32(uintptr_t iovaddr);
+unsigned long long ml_io_read64(uintptr_t iovaddr);
+
+extern void ml_io_write(uintptr_t vaddr, uint64_t val, int size);
+extern void ml_io_write8(uintptr_t vaddr, uint8_t val);
+extern void ml_io_write16(uintptr_t vaddr, uint16_t val);
+extern void ml_io_write32(uintptr_t vaddr, uint32_t val);
+extern void ml_io_write64(uintptr_t vaddr, uint64_t val);
+
+extern uint32_t ml_port_io_read(uint16_t ioport, int size);
+extern uint8_t ml_port_io_read8(uint16_t ioport);
+extern uint16_t ml_port_io_read16(uint16_t ioport);
+extern uint32_t ml_port_io_read32(uint16_t ioport);
+extern void ml_port_io_write(uint16_t ioport, uint32_t val, int size);
+extern void ml_port_io_write8(uint16_t ioport, uint8_t val);
+extern void ml_port_io_write16(uint16_t ioport, uint16_t val);
+extern void ml_port_io_write32(uint16_t ioport, uint32_t val);
+
/* Write physical address byte */
void ml_phys_write_byte(
vm_offset_t paddr, unsigned int data);
/* Struct for ml_cpu_get_info */
struct ml_cpu_info {
- unsigned long vector_unit;
- unsigned long cache_line_size;
- unsigned long l1_icache_size;
- unsigned long l1_dcache_size;
- unsigned long l2_settings;
- unsigned long l2_cache_size;
- unsigned long l3_settings;
- unsigned long l3_cache_size;
+ uint32_t vector_unit;
+ uint32_t cache_line_size;
+ uint32_t l1_icache_size;
+ uint32_t l1_dcache_size;
+ uint32_t l2_settings;
+ uint32_t l2_cache_size;
+ uint32_t l3_settings;
+ uint32_t l3_cache_size;
};
typedef struct ml_cpu_info ml_cpu_info_t;
+typedef enum {
+ CLUSTER_TYPE_SMP,
+} cluster_type_t;
+
/* Get processor info */
void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
unsigned policy_id,
unsigned policy_info);
-#define MACHINE_GROUP 0x00000001
-#define MACHINE_NETWORK_GROUP 0x10000000
-#define MACHINE_NETWORK_WORKLOOP 0x00000001
-#define MACHINE_NETWORK_NETISR 0x00000002
+#define MACHINE_GROUP 0x00000001
+#define MACHINE_NETWORK_GROUP 0x10000000
+#define MACHINE_NETWORK_WORKLOOP 0x00000001
+#define MACHINE_NETWORK_NETISR 0x00000002
-/* Return the maximum number of CPUs set by ml_init_max_cpus() */
-int ml_get_max_cpus(
+/* Return the maximum number of CPUs set by ml_set_max_cpus(), blocking if necessary */
+unsigned int ml_wait_max_cpus(
void);
/*
extern uint32_t ml_get_maxbusdelay(void);
extern void ml_set_maxintdelay(uint64_t mdelay);
extern uint64_t ml_get_maxintdelay(void);
+extern boolean_t ml_get_interrupt_prewake_applicable(void);
extern uint64_t tmrCvt(uint64_t time, uint64_t conversion);
/* Set Interrupts Enabled */
boolean_t ml_set_interrupts_enabled(boolean_t enable);
+boolean_t ml_early_set_interrupts_enabled(boolean_t enable);
/* Check if running at interrupt context */
boolean_t ml_at_interrupt_context(void);
+#ifdef XNU_KERNEL_PRIVATE
+extern boolean_t ml_is_quiescing(void);
+extern void ml_set_is_quiescing(boolean_t);
+extern uint64_t ml_get_booter_memory_size(void);
+#endif
+
/* Zero bytes starting at a physical address */
void bzero_phys(
addr64_t phys_address,
/* Bytes available on current stack */
vm_offset_t ml_stack_remaining(void);
-#if CONFIG_COUNTERS
-void ml_get_csw_threads(thread_t * /*old*/, thread_t * /*new*/);
-#endif /* CONFIG_COUNTERS */
-
__END_DECLS
+#if defined(MACH_KERNEL_PRIVATE)
+__private_extern__ uint64_t ml_phys_read_data(uint64_t paddr, int psz);
+__private_extern__ void ml_phys_write_data(uint64_t paddr,
+ unsigned long long data, int size);
+__private_extern__ uintptr_t
+pmap_verify_noncacheable(uintptr_t vaddr);
+void machine_lockdown(void);
+#endif /* MACH_KERNEL_PRIVATE */
+#ifdef XNU_KERNEL_PRIVATE
+
+boolean_t ml_fpu_avx_enabled(void);
+boolean_t ml_fpu_avx512_enabled(void);
+
+void interrupt_latency_tracker_setup(void);
+void interrupt_reset_latency_stats(void);
+void interrupt_populate_latency_stats(char *, unsigned);
+void ml_get_power_state(boolean_t *, boolean_t *);
+
+void timer_queue_expire_rescan(void*);
+void ml_timer_evaluate(void);
+boolean_t ml_timer_forced_evaluation(void);
+
+uint64_t ml_energy_stat(thread_t);
+void ml_gpu_stat_update(uint64_t);
+uint64_t ml_gpu_stat(thread_t);
+boolean_t ml_recent_wake(void);
+
+#ifdef MACH_KERNEL_PRIVATE
+struct i386_cpu_info;
+struct machine_thread;
+/* LBR support */
+void i386_lbr_init(struct i386_cpu_info *info_p, bool is_master);
+void i386_switch_lbrs(thread_t old, thread_t new);
+int i386_lbr_native_state_to_mach_thread_state(struct machine_thread *pcb, last_branch_state_t *machlbrp);
+void i386_lbr_synch(thread_t thr);
+void i386_lbr_enable(void);
+void i386_lbr_disable(void);
+extern bool last_branch_support_enabled;
+#endif
+
+#define ALL_CORES_RECOMMENDED (~(uint64_t)0)
+
+extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores);
+
+
+extern uint64_t reportphyreaddelayabs;
+extern uint64_t reportphywritedelayabs;
+extern uint32_t reportphyreadosbt;
+extern uint32_t reportphywriteosbt;
+extern uint32_t phyreadpanic;
+extern uint32_t phywritepanic;
+extern uint64_t tracephyreaddelayabs;
+extern uint64_t tracephywritedelayabs;
+
+void ml_hibernate_active_pre(void);
+void ml_hibernate_active_post(void);
+#endif /* XNU_KERNEL_PRIVATE */
#endif /* _I386_MACHINE_ROUTINES_H_ */