+extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t nbytes);
+
+/* allow a function to get a quick virtual mapping of a physical page */
+extern int apply_func_phys(addr64_t src64, vm_size_t bytes, int (*func)(void * buffer, vm_size_t bytes, void * arg), void * arg);
+
+extern int ml_copy_phys(addr64_t, addr64_t, vm_size_t);
+
+/* Flush all cachelines for a page. */
+extern void cache_flush_page_phys(ppnum_t pa);
+
+/* Flushing for incoherent I/O */
+extern void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
+extern void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
+
+extern void sysclk_gettime_interrupts_disabled(
+ mach_timespec_t *cur_time);
+
+extern void rtc_nanotime_init_commpage(void);
+
+extern void rtc_sleep_wakeup(uint64_t base);
+
+extern void rtc_timer_start(void);
+
+extern void rtc_clock_napped(uint64_t, uint64_t);
+extern void rtc_clock_adjust(uint64_t);
+
+extern void pmap_lowmem_finalize(void);
+
+thread_t Switch_context(thread_t, thread_continue_t, thread_t);
+
+__not_tail_called thread_t
+Shutdown_context(thread_t thread, void (*doshutdown)(processor_t), processor_t processor);
+
+#ifdef __x86_64__
+uint64_t x86_64_pre_sleep(void);
+void x86_64_post_sleep(uint64_t new_cr3);
+#endif
+
+boolean_t
+debug_state_is_valid32(x86_debug_state32_t *ds);
+
+boolean_t
+debug_state_is_valid64(x86_debug_state64_t *ds);
+
+void
+copy_debug_state32(x86_debug_state32_t *src, x86_debug_state32_t *target, boolean_t all);
+
+void
+copy_debug_state64(x86_debug_state64_t *src, x86_debug_state64_t *target, boolean_t all);
+
+extern void act_machine_switch_pcb(thread_t old, thread_t new);
+
+extern void Idle_PTs_release(vm_offset_t start, vm_offset_t end);
+extern ppnum_t released_PT_ppn;
+extern uint32_t released_PT_cnt;
+
+/* Fast-restart parameters */
+#define FULL_SLAVE_INIT (NULL)
+#define FAST_SLAVE_INIT ((void *)(uintptr_t)1)
+
+void cpu_pmc_control(void *);
+
+extern void pstate_trace(void);
+
+extern void mp_interrupt_watchdog(void);
+
+extern kern_return_t i386_slide_individual_kext(kernel_mach_header_t *mh, uintptr_t slide);
+
+extern kern_return_t i386_slide_kext_collection_mh_addrs(kernel_mach_header_t *mh, uintptr_t slide, bool adjust_mach_headers);
+
+#endif /* _I386_MISC_PROTOS_H_ */