- lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
- result = vm_map_find_space(kernel_map,
- &lapic_vbase64,
- round_page(LAPIC_SIZE), 0,
- VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
- /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
- */
- lapic_vbase = (vm_offset_t) lapic_vbase64;
- if (result != KERN_SUCCESS) {
- panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
+ if (lapic_vbase == 0) {
+ lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
+ result = vm_map_find_space(kernel_map,
+ &lapic_vbase64,
+ round_page(LAPIC_SIZE), 0,
+ VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT), &entry);
+ /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
+ */
+ lapic_vbase = (vm_offset_t) lapic_vbase64;
+ if (result != KERN_SUCCESS) {
+ panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
+ }
+ vm_map_unlock(kernel_map);
+
+ /*
+ * Map in the local APIC non-cacheable, as recommended by Intel
+ * in section 8.4.1 of the "System Programming Guide".
+ * In fact, this is redundant because EFI will have assigned an
+ * MTRR physical range containing the local APIC's MMIO space as
+ * UC and this will override the default PAT setting.
+ */
+ pmap_enter(pmap_kernel(),
+ lapic_vbase,
+ (ppnum_t) i386_btop(lapic_pbase),
+ VM_PROT_READ|VM_PROT_WRITE,
+ VM_PROT_NONE,
+ VM_WIMG_IO,
+ TRUE);