+void ml_init_lock_timeout(void);
+
+vm_offset_t
+ml_static_ptovirt(
+ vm_offset_t);
+
+void ml_static_mfree(
+ vm_offset_t,
+ vm_size_t);
+
+/* boot memory allocation */
+vm_offset_t ml_static_malloc(
+ vm_size_t size);
+
+/* virtual to physical on wired pages */
+vm_offset_t ml_vtophys(
+ vm_offset_t vaddr);
+
+vm_size_t ml_nofault_copy(
+ vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size);
+
+/* Machine topology info */
+uint64_t ml_cpu_cache_size(unsigned int level);
+uint64_t ml_cpu_cache_sharing(unsigned int level);
+
+/* Initialize the maximum number of CPUs */
+void ml_init_max_cpus(
+ unsigned long max_cpus);
+
+extern void ml_cpu_up(void);
+extern void ml_cpu_down(void);
+
+void bzero_phys_nc(
+ addr64_t phys_address,
+ uint32_t length);
+
+#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
+/* IO memory map services */
+
+/* Map memory map IO space */
+vm_offset_t ml_io_map(
+ vm_offset_t phys_addr,
+ vm_size_t size);
+
+
+void ml_get_bouncepool_info(
+ vm_offset_t *phys_addr,
+ vm_size_t *size);
+/* Indicates if spinlock, IPI and other timeouts should be suspended */
+boolean_t machine_timeout_suspended(void);
+#endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
+
+/* Warm up a CPU to receive an interrupt */
+kern_return_t ml_interrupt_prewarm(uint64_t deadline);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+#ifdef KERNEL_PRIVATE