+void
+pmap_switch(pmap_t tpmap)
+{
+ spl_t s;
+ int my_cpu;
+
+ s = splhigh(); /* Make sure interruptions are disabled */
+ my_cpu = cpu_number();
+
+ set_dirbase(tpmap, my_cpu);
+
+ splx(s);
+}
+
+
+/*
+ * disable no-execute capability on
+ * the specified pmap
+ */
+void pmap_disable_NX(pmap_t pmap) {
+
+ pmap->nx_enabled = 0;
+}
+
+void
+pt_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
+ vm_size_t *alloc_size, int *collectable, int *exhaustable)
+{
+ *count = inuse_ptepages_count;
+ *cur_size = PAGE_SIZE * inuse_ptepages_count;
+ *max_size = PAGE_SIZE * (inuse_ptepages_count + vm_page_inactive_count + vm_page_active_count + vm_page_free_count);
+ *elem_size = PAGE_SIZE;
+ *alloc_size = PAGE_SIZE;
+
+ *collectable = 1;
+ *exhaustable = 0;
+}
+
+vm_offset_t pmap_cpu_high_map_vaddr(int cpu, enum high_cpu_types e)
+{
+ enum high_fixed_addresses a;
+ a = e + HIGH_CPU_END * cpu;
+ return pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
+}
+
+vm_offset_t pmap_high_map_vaddr(enum high_cpu_types e)
+{
+ return pmap_cpu_high_map_vaddr(cpu_number(), e);
+}
+
+vm_offset_t pmap_high_map(pt_entry_t pte, enum high_cpu_types e)
+{
+ enum high_fixed_addresses a;
+ vm_offset_t vaddr;
+
+ a = e + HIGH_CPU_END * cpu_number();
+ vaddr = (vm_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
+ pmap_store_pte(pte_unique_base + a, pte);
+
+ /* TLB flush for this page for this cpu */
+ invlpg((uintptr_t)vaddr);
+
+ return vaddr;
+}
+
+static inline void
+pmap_cpuset_NMIPI(cpu_set cpu_mask) {
+ unsigned int cpu, cpu_bit;
+ uint64_t deadline;
+
+ for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+ if (cpu_mask & cpu_bit)
+ cpu_NMI_interrupt(cpu);
+ }
+ deadline = mach_absolute_time() + (LockTimeOut >> 2);
+ while (mach_absolute_time() < deadline)
+ cpu_pause();
+}
+
+
+/*
+ * Called with pmap locked, we:
+ * - scan through per-cpu data to see which other cpus need to flush
+ * - send an IPI to each non-idle cpu to be flushed
+ * - wait for all to signal back that they are inactive or we see that
+ * they are in an interrupt handler or at a safe point
+ * - flush the local tlb is active for this pmap
+ * - return ... the caller will unlock the pmap
+ */
+void
+pmap_flush_tlbs(pmap_t pmap)
+{
+ unsigned int cpu;
+ unsigned int cpu_bit;
+ cpu_set cpus_to_signal;
+ unsigned int my_cpu = cpu_number();
+ pmap_paddr_t pmap_cr3 = pmap->pm_cr3;
+ boolean_t flush_self = FALSE;
+ uint64_t deadline;
+
+ assert((processor_avail_count < 2) ||
+ (ml_get_interrupts_enabled() && get_preemption_level() != 0));
+
+ /*
+ * Scan other cpus for matching active or task CR3.
+ * For idle cpus (with no active map) we mark them invalid but
+ * don't signal -- they'll check as they go busy.
+ * Note: for the kernel pmap we look for 64-bit shared address maps.
+ */
+ cpus_to_signal = 0;
+ for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+ if (!cpu_datap(cpu)->cpu_running)
+ continue;
+ if ((cpu_datap(cpu)->cpu_task_cr3 == pmap_cr3) ||
+ (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) ||
+ (pmap->pm_shared) ||
+ ((pmap == kernel_pmap) &&
+ (!CPU_CR3_IS_ACTIVE(cpu) ||
+ cpu_datap(cpu)->cpu_task_map == TASK_MAP_64BIT_SHARED))) {
+ if (cpu == my_cpu) {
+ flush_self = TRUE;
+ continue;
+ }
+ cpu_datap(cpu)->cpu_tlb_invalid = TRUE;
+ __asm__ volatile("mfence");
+
+ if (CPU_CR3_IS_ACTIVE(cpu)) {
+ cpus_to_signal |= cpu_bit;
+ i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
+ }
+ }
+ }
+
+ PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
+ (int) pmap, cpus_to_signal, flush_self, 0, 0);
+
+ if (cpus_to_signal) {
+ cpu_set cpus_to_respond = cpus_to_signal;
+
+ deadline = mach_absolute_time() + LockTimeOut;
+ /*
+ * Wait for those other cpus to acknowledge
+ */
+ while (cpus_to_respond != 0) {
+ if (mach_absolute_time() > deadline) {
+ pmap_tlb_flush_timeout = TRUE;
+ pmap_cpuset_NMIPI(cpus_to_respond);
+ panic("pmap_flush_tlbs() timeout: "
+ "cpu(s) failing to respond to interrupts, pmap=%p cpus_to_respond=0x%lx",
+ pmap, cpus_to_respond);
+ }
+
+ for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+ if ((cpus_to_respond & cpu_bit) != 0) {
+ if (!cpu_datap(cpu)->cpu_running ||
+ cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
+ !CPU_CR3_IS_ACTIVE(cpu)) {
+ cpus_to_respond &= ~cpu_bit;
+ }
+ cpu_pause();
+ }
+ if (cpus_to_respond == 0)
+ break;
+ }
+ }
+ }
+
+ /*
+ * Flush local tlb if required.
+ * We need this flush even if the pmap being changed
+ * is the user map... in case we do a copyin/out
+ * before returning to user mode.
+ */
+ if (flush_self)
+ flush_tlb();
+
+ PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
+ (int) pmap, cpus_to_signal, flush_self, 0, 0);
+}
+
+void
+process_pmap_updates(void)
+{
+ assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+ flush_tlb();
+
+ current_cpu_datap()->cpu_tlb_invalid = FALSE;
+ __asm__ volatile("mfence");
+}
+
+void
+pmap_update_interrupt(void)
+{
+ PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
+ 0, 0, 0, 0, 0);
+
+ process_pmap_updates();
+
+ PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
+ 0, 0, 0, 0, 0);
+}
+
+
+unsigned int pmap_cache_attributes(ppnum_t pn) {
+
+ if (!managed_page(ppn_to_pai(pn)))
+ return (VM_WIMG_IO);
+
+ return (VM_WIMG_COPYBACK);
+}
+
+#ifdef PMAP_DEBUG
+void
+pmap_dump(pmap_t p)
+{
+ int i;
+
+ kprintf("pmap 0x%x\n",p);
+
+ kprintf(" pm_cr3 0x%llx\n",p->pm_cr3);
+ kprintf(" pm_pml4 0x%x\n",p->pm_pml4);
+ kprintf(" pm_pdpt 0x%x\n",p->pm_pdpt);
+
+ kprintf(" pml4[0] 0x%llx\n",*p->pm_pml4);
+ for (i=0;i<8;i++)
+ kprintf(" pdpt[%d] 0x%llx\n",i, p->pm_pdpt[i]);
+}
+
+void pmap_dump_wrap(void)
+{
+ pmap_dump(current_cpu_datap()->cpu_active_thread->task->map->pmap);
+}
+
+void
+dump_4GB_pdpt(pmap_t p)
+{
+ int spl;
+ pdpt_entry_t *user_pdptp;
+ pdpt_entry_t *kern_pdptp;
+ pdpt_entry_t *pml4p;
+
+ spl = splhigh();
+ while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) {
+ splx(spl);
+ pmap_expand_pml4(p, 0x0);
+ spl = splhigh();
+ }
+ kern_pdptp = kernel_pmap->pm_pdpt;
+ if (kern_pdptp == NULL)
+ panic("kern_pdptp == NULL");
+ kprintf("dump_4GB_pdpt(%p)\n"
+ "kern_pdptp=%p (phys=0x%016llx)\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "user_pdptp=%p (phys=0x%016llx)\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n",
+ p, kern_pdptp, kvtophys(kern_pdptp),
+ kern_pdptp+0, *(kern_pdptp+0),
+ kern_pdptp+1, *(kern_pdptp+1),
+ kern_pdptp+2, *(kern_pdptp+2),
+ kern_pdptp+3, *(kern_pdptp+3),
+ kern_pdptp+4, *(kern_pdptp+4),
+ user_pdptp, kvtophys(user_pdptp),
+ user_pdptp+0, *(user_pdptp+0),
+ user_pdptp+1, *(user_pdptp+1),
+ user_pdptp+2, *(user_pdptp+2),
+ user_pdptp+3, *(user_pdptp+3),
+ user_pdptp+4, *(user_pdptp+4));
+ kprintf("user pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
+ p->pm_cr3, p->pm_hold, p->pm_pml4);
+ pml4p = (pdpt_entry_t *)p->pm_hold;
+ if (pml4p == NULL)
+ panic("user pml4p == NULL");
+ kprintf("\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n",
+ pml4p+0, *(pml4p),
+ pml4p+KERNEL_UBER_PML4_INDEX, *(pml4p+KERNEL_UBER_PML4_INDEX));
+ kprintf("kern pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
+ kernel_pmap->pm_cr3, kernel_pmap->pm_hold, kernel_pmap->pm_pml4);
+ pml4p = (pdpt_entry_t *)kernel_pmap->pm_hold;
+ if (pml4p == NULL)
+ panic("kern pml4p == NULL");
+ kprintf("\t 0x%08x: 0x%016llx\n"
+ "\t 0x%08x: 0x%016llx\n",
+ pml4p+0, *(pml4p),
+ pml4p+511, *(pml4p+511));
+ splx(spl);
+}
+
+void dump_4GB_pdpt_thread(thread_t tp)
+{
+ dump_4GB_pdpt(tp->map->pmap);
+}
+
+
+#endif