/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
__BEGIN_DECLS
+#ifdef XNU_KERNEL_PRIVATE
+
/* are we a 64 bit platform ? */
boolean_t ml_is64bit(void);
/* Initialize Interrupts */
void ml_init_interrupt(void);
-/* Get Interrupts Enabled */
-boolean_t ml_get_interrupts_enabled(void);
-
-/* Set Interrupts Enabled */
-boolean_t ml_set_interrupts_enabled(boolean_t enable);
-
-/* Check if running at interrupt context */
-boolean_t ml_at_interrupt_context(void);
-
/* Generate a fake interrupt */
void ml_cause_interrupt(void);
+/* Initialize Interrupts */
+void ml_install_interrupt_handler(
+ void *nub,
+ int source,
+ void *target,
+ IOInterruptHandler handler,
+ void *refCon);
+
void ml_get_timebase(unsigned long long *timestamp);
void ml_init_lock_timeout(void);
+vm_offset_t
+ml_static_ptovirt(
+ vm_offset_t);
+
+void ml_static_mfree(
+ vm_offset_t,
+ vm_size_t);
+
+/* boot memory allocation */
+vm_offset_t ml_static_malloc(
+ vm_size_t size);
+
+/* virtual to physical on wired pages */
+vm_offset_t ml_vtophys(
+ vm_offset_t vaddr);
+
+vm_size_t ml_nofault_copy(
+ vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size);
+
+/* Machine topology info */
+uint64_t ml_cpu_cache_size(unsigned int level);
+uint64_t ml_cpu_cache_sharing(unsigned int level);
+
+/* Initialize the maximum number of CPUs */
+void ml_init_max_cpus(
+ unsigned long max_cpus);
+
+extern void ml_cpu_up(void);
+extern void ml_cpu_down(void);
+
+void bzero_phys_nc(
+ addr64_t phys_address,
+ uint32_t length);
+
+#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
+/* IO memory map services */
+
+/* Map memory map IO space */
+vm_offset_t ml_io_map(
+ vm_offset_t phys_addr,
+ vm_size_t size);
+
+
+void ml_get_bouncepool_info(
+ vm_offset_t *phys_addr,
+ vm_size_t *size);
+/* Indicates if spinlock, IPI and other timeouts should be suspended */
+boolean_t machine_timeout_suspended(void);
+#endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
+
+/* Warm up a CPU to receive an interrupt */
+kern_return_t ml_interrupt_prewarm(uint64_t deadline);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+#ifdef KERNEL_PRIVATE
+
/* Type for the Time Base Enable function */
typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable);
/* Register a processor */
-kern_return_t ml_processor_register(
- cpu_id_t cpu_id,
- uint32_t lapic_id,
- processor_t *processor,
- ipi_handler_t *ipi_handler,
- boolean_t boot_cpu);
-
-/* Initialize Interrupts */
-void ml_install_interrupt_handler(
- void *nub,
- int source,
- void *target,
- IOInterruptHandler handler,
- void *refCon);
-
-#ifdef __APPLE_API_UNSTABLE
-vm_offset_t
-ml_static_ptovirt(
- vm_offset_t);
-
-#ifdef XNU_KERNEL_PRIVATE
-vm_offset_t
-ml_boot_ptovirt(
- vm_offset_t);
-#endif
+kern_return_t
+ml_processor_register(
+ cpu_id_t cpu_id,
+ uint32_t lapic_id,
+ processor_t *processor_out,
+ boolean_t boot_cpu,
+ boolean_t start );
/* PCI config cycle probing */
boolean_t ml_probe_read(
void ml_phys_write_double_64(
addr64_t paddr, unsigned long long data);
-void ml_static_mfree(
- vm_offset_t,
- vm_size_t);
-
-/* virtual to physical on wired pages */
-vm_offset_t ml_vtophys(
- vm_offset_t vaddr);
-
/* Struct for ml_cpu_get_info */
struct ml_cpu_info {
- unsigned long vector_unit;
- unsigned long cache_line_size;
- unsigned long l1_icache_size;
- unsigned long l1_dcache_size;
- unsigned long l2_settings;
- unsigned long l2_cache_size;
- unsigned long l3_settings;
- unsigned long l3_cache_size;
+ uint32_t vector_unit;
+ uint32_t cache_line_size;
+ uint32_t l1_icache_size;
+ uint32_t l1_dcache_size;
+ uint32_t l2_settings;
+ uint32_t l2_cache_size;
+ uint32_t l3_settings;
+ uint32_t l3_cache_size;
};
typedef struct ml_cpu_info ml_cpu_info_t;
/* Get processor info */
void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
-#endif /* __APPLE_API_UNSTABLE */
-
-#ifdef __APPLE_API_PRIVATE
-#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
-/* IO memory map services */
-
-/* Map memory map IO space */
-vm_offset_t ml_io_map(
- vm_offset_t phys_addr,
- vm_size_t size);
-
-/* boot memory allocation */
-vm_offset_t ml_static_malloc(
- vm_size_t size);
-
-
-extern uint32_t bounce_pool_base;
-extern uint32_t bounce_pool_size;
-
-void ml_get_bouncepool_info(
- vm_offset_t *phys_addr,
- vm_size_t *size);
-
-
-#endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
-
-/* Zero bytes starting at a physical address */
-void bzero_phys(
- addr64_t phys_address,
- uint32_t length);
-
void ml_thread_policy(
thread_t thread,
unsigned policy_id,
#define MACHINE_NETWORK_WORKLOOP 0x00000001
#define MACHINE_NETWORK_NETISR 0x00000002
-/* Initialize the maximum number of CPUs */
-void ml_init_max_cpus(
- unsigned long max_cpus);
-
/* Return the maximum number of CPUs set by ml_init_max_cpus() */
int ml_get_max_cpus(
void);
-extern void ml_cpu_up(void);
-extern void ml_cpu_down(void);
-
-extern int set_be_bit(void);
-extern int clr_be_bit(void);
-extern int be_tracing(void);
-
+/*
+ * The following are in pmCPU.c not machine_routines.c.
+ */
extern void ml_set_maxsnoop(uint32_t maxdelay);
extern unsigned ml_get_maxsnoop(void);
extern void ml_set_maxbusdelay(uint32_t mdelay);
extern uint32_t ml_get_maxbusdelay(void);
-extern void ml_hpet_cfg(uint32_t cpu, uint32_t hpetVect);
+extern void ml_set_maxintdelay(uint64_t mdelay);
+extern uint64_t ml_get_maxintdelay(void);
+extern boolean_t ml_get_interrupt_prewake_applicable(void);
+
extern uint64_t tmrCvt(uint64_t time, uint64_t conversion);
-#endif /* __APPLE_API_PRIVATE */
+extern uint64_t ml_cpu_int_event_time(void);
+
+#endif /* KERNEL_PRIVATE */
+
+/* Get Interrupts Enabled */
+boolean_t ml_get_interrupts_enabled(void);
+
+/* Set Interrupts Enabled */
+boolean_t ml_set_interrupts_enabled(boolean_t enable);
+
+/* Check if running at interrupt context */
+boolean_t ml_at_interrupt_context(void);
+
+/* Zero bytes starting at a physical address */
+void bzero_phys(
+ addr64_t phys_address,
+ uint32_t length);
+
+/* Bytes available on current stack */
+vm_offset_t ml_stack_remaining(void);
+
+#if CONFIG_COUNTERS
+void ml_get_csw_threads(thread_t * /*old*/, thread_t * /*new*/);
+#endif /* CONFIG_COUNTERS */
__END_DECLS
+#ifdef XNU_KERNEL_PRIVATE
+boolean_t ml_fpu_avx_enabled(void);
+void interrupt_latency_tracker_setup(void);
+void interrupt_reset_latency_stats(void);
+void interrupt_populate_latency_stats(char *, unsigned);
+
+#endif /* XNU_KERNEL_PRIVATE */
#endif /* _I386_MACHINE_ROUTINES_H_ */