- pmap_map(trunc_page_32(sectDATAB), trunc_page_32(sectDATAB),
- round_page_32(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE);
-
-/* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
-* but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
-* to map both segments page-by-page.
-*/
-
- for (addr = trunc_page_32(sectKLDB);
- addr < round_page_32(sectKLDB+sectSizeKLD);
+#ifdef __MACHO__
+#if DEBUG
+ kprintf("Mapping memory:\n");
+ kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry),
+ trunc_page(exception_entry), round_page(exception_end));
+ kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB),
+ trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT));
+ kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB),
+ trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA));
+ kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB),
+ trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK));
+ kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB),
+ trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD));
+ kprintf(" end: %08X, %08X - %08X\n", trunc_page(end),
+ trunc_page(end), static_memory_end);
+#endif /* DEBUG */
+ pmap_map(trunc_page(exception_entry), trunc_page(exception_entry),
+ round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE);
+ pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB),
+ round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE);
+ pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB),
+ round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE);
+
+
+ /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
+ * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
+ * to map both segments page-by-page.
+ */
+ for (addr = trunc_page(sectKLDB);
+ addr < round_page(sectKLDB+sectSizeKLD);