+vm_size_t ml_nofault_copy(
+ vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size);
+
+boolean_t ml_validate_nofault(
+ vm_offset_t virtsrc, vm_size_t size);
+
+/* Machine topology info */
+uint64_t ml_cpu_cache_size(unsigned int level);
+uint64_t ml_cpu_cache_sharing(unsigned int level);
+
+/* Initialize the maximum number of CPUs */
+void ml_init_max_cpus(
+ unsigned long max_cpus);
+
+extern void ml_cpu_up(void);
+extern void ml_cpu_down(void);
+
+void bzero_phys_nc(
+ addr64_t phys_address,
+ uint32_t length);
+extern uint32_t interrupt_timer_coalescing_enabled;
+extern uint32_t idle_entry_timer_processing_hdeadline_threshold;
+
+#if TCOAL_INSTRUMENT
+#define TCOAL_DEBUG KERNEL_DEBUG_CONSTANT
+#else
+#define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
+#endif /* TCOAL_INSTRUMENT */
+
+#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
+/* IO memory map services */
+
+/* Map memory map IO space */
+vm_offset_t ml_io_map(
+ vm_offset_t phys_addr,
+ vm_size_t size);
+
+
+void ml_get_bouncepool_info(
+ vm_offset_t *phys_addr,
+ vm_size_t *size);
+/* Indicates if spinlock, IPI and other timeouts should be suspended */
+boolean_t machine_timeout_suspended(void);
+void plctrace_disable(void);
+#endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
+
+/* Warm up a CPU to receive an interrupt */
+kern_return_t ml_interrupt_prewarm(uint64_t deadline);
+
+/* Check if the machine layer wants to intercept a panic call */
+boolean_t ml_wants_panic_trap_to_debugger(void);
+
+/* Machine layer routine for intercepting panics */
+void ml_panic_trap_to_debugger(const char *panic_format_str,
+ va_list *panic_args,
+ unsigned int reason,
+ void *ctx,
+ uint64_t panic_options_mask,
+ unsigned long panic_caller);
+#endif /* XNU_KERNEL_PRIVATE */
+
+#ifdef KERNEL_PRIVATE
+
+/* Type for the Time Base Enable function */
+typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable);
+
+/* Type for the IPI Hander */
+typedef void (*ipi_handler_t)(void);
+
+/* Struct for ml_processor_register */
+struct ml_processor_info {
+ cpu_id_t cpu_id;
+ boolean_t boot_cpu;
+ vm_offset_t start_paddr;
+ boolean_t supports_nap;
+ unsigned long l2cr_value;
+ time_base_enable_t time_base_enable;
+};
+
+typedef struct ml_processor_info ml_processor_info_t;
+
+
+/* Register a processor */
+kern_return_t
+ml_processor_register(
+ cpu_id_t cpu_id,
+ uint32_t lapic_id,
+ processor_t *processor_out,
+ boolean_t boot_cpu,
+ boolean_t start );
+
+/* PCI config cycle probing */
+boolean_t ml_probe_read(
+ vm_offset_t paddr,
+ unsigned int *val);
+boolean_t ml_probe_read_64(
+ addr64_t paddr,
+ unsigned int *val);
+
+/* Read physical address byte */
+unsigned int ml_phys_read_byte(
+ vm_offset_t paddr);
+unsigned int ml_phys_read_byte_64(
+ addr64_t paddr);
+
+/* Read physical address half word */
+unsigned int ml_phys_read_half(
+ vm_offset_t paddr);
+unsigned int ml_phys_read_half_64(
+ addr64_t paddr);
+
+/* Read physical address word*/
+unsigned int ml_phys_read(
+ vm_offset_t paddr);
+unsigned int ml_phys_read_64(
+ addr64_t paddr);
+unsigned int ml_phys_read_word(
+ vm_offset_t paddr);
+unsigned int ml_phys_read_word_64(
+ addr64_t paddr);
+
+/* Read physical address double word */
+unsigned long long ml_phys_read_double(
+ vm_offset_t paddr);
+unsigned long long ml_phys_read_double_64(
+ addr64_t paddr);
+
+unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz);
+unsigned int ml_io_read8(uintptr_t iovaddr);
+unsigned int ml_io_read16(uintptr_t iovaddr);
+unsigned int ml_io_read32(uintptr_t iovaddr);
+unsigned long long ml_io_read64(uintptr_t iovaddr);
+
+/* Write physical address byte */
+void ml_phys_write_byte(
+ vm_offset_t paddr, unsigned int data);
+void ml_phys_write_byte_64(
+ addr64_t paddr, unsigned int data);
+
+/* Write physical address half word */
+void ml_phys_write_half(
+ vm_offset_t paddr, unsigned int data);
+void ml_phys_write_half_64(
+ addr64_t paddr, unsigned int data);
+
+/* Write physical address word */
+void ml_phys_write(
+ vm_offset_t paddr, unsigned int data);
+void ml_phys_write_64(
+ addr64_t paddr, unsigned int data);
+void ml_phys_write_word(
+ vm_offset_t paddr, unsigned int data);
+void ml_phys_write_word_64(
+ addr64_t paddr, unsigned int data);
+
+/* Write physical address double word */
+void ml_phys_write_double(
+ vm_offset_t paddr, unsigned long long data);
+void ml_phys_write_double_64(
+ addr64_t paddr, unsigned long long data);
+
+/* Struct for ml_cpu_get_info */
+struct ml_cpu_info {
+ uint32_t vector_unit;
+ uint32_t cache_line_size;
+ uint32_t l1_icache_size;
+ uint32_t l1_dcache_size;
+ uint32_t l2_settings;
+ uint32_t l2_cache_size;
+ uint32_t l3_settings;
+ uint32_t l3_cache_size;
+};
+
+typedef struct ml_cpu_info ml_cpu_info_t;
+
+/* Get processor info */
+void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
+
+void ml_thread_policy(
+ thread_t thread,
+ unsigned policy_id,
+ unsigned policy_info);
+
+#define MACHINE_GROUP 0x00000001
+#define MACHINE_NETWORK_GROUP 0x10000000
+#define MACHINE_NETWORK_WORKLOOP 0x00000001
+#define MACHINE_NETWORK_NETISR 0x00000002
+
+/* Return the maximum number of CPUs set by ml_init_max_cpus() */
+int ml_get_max_cpus(
+ void);
+
+/*
+ * The following are in pmCPU.c not machine_routines.c.
+ */
+extern void ml_set_maxsnoop(uint32_t maxdelay);
+extern unsigned ml_get_maxsnoop(void);
+extern void ml_set_maxbusdelay(uint32_t mdelay);
+extern uint32_t ml_get_maxbusdelay(void);
+extern void ml_set_maxintdelay(uint64_t mdelay);
+extern uint64_t ml_get_maxintdelay(void);
+extern boolean_t ml_get_interrupt_prewake_applicable(void);
+
+
+extern uint64_t tmrCvt(uint64_t time, uint64_t conversion);
+
+extern uint64_t ml_cpu_int_event_time(void);
+
+#endif /* KERNEL_PRIVATE */