+ pmap_paddr_t phys;
+
+ assert(pn != vm_page_fictitious_addr);
+
+ if (!pmap_initialized)
+ return (TRUE);
+ phys = (pmap_paddr_t) i386_ptob(pn);
+ if (!pmap_valid_page(pn))
+ return (FALSE);
+
+ return TRUE;
+}
+
+void
+mapping_free_prime()
+{
+ int i;
+ pv_entry_t pv_e;
+
+ for (i = 0; i < (5 * PV_ALLOC_CHUNK); i++) {
+ pv_e = (pv_entry_t) zalloc(pv_list_zone);
+ PV_FREE(pv_e);
+ }
+}
+
+void
+mapping_adjust()
+{
+ pv_entry_t pv_e;
+ int i;
+ int spl;
+
+ if (mapping_adjust_call == NULL) {
+ thread_call_setup(&mapping_adjust_call_data,
+ (thread_call_func_t) mapping_adjust,
+ (thread_call_param_t) NULL);
+ mapping_adjust_call = &mapping_adjust_call_data;
+ }
+ /* XXX rethink best way to do locking here */
+ if (pv_free_count < PV_LOW_WATER_MARK) {
+ for (i = 0; i < PV_ALLOC_CHUNK; i++) {
+ pv_e = (pv_entry_t) zalloc(pv_list_zone);
+ SPLVM(spl);
+ PV_FREE(pv_e);
+ SPLX(spl);
+ }
+ }
+ mappingrecurse = 0;
+}
+
+void
+pmap_commpage_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt)
+{
+ int i;
+ pt_entry_t *opte, *npte;
+ pt_entry_t pte;
+
+ for (i = 0; i < cnt; i++) {
+ opte = pmap_pte(kernel_pmap, kernel_commpage);
+ if (0 == opte) panic("kernel_commpage");
+ npte = pmap_pte(kernel_pmap, user_commpage);
+ if (0 == npte) panic("user_commpage");
+ pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
+ pte &= ~INTEL_PTE_WRITE; // ensure read only
+ WRITE_PTE_FAST(npte, pte);
+ kernel_commpage += INTEL_PGBYTES;
+ user_commpage += INTEL_PGBYTES;
+ }
+}
+
+static cpu_pmap_t cpu_pmap_master;
+static struct pmap_update_list cpu_update_list_master;
+
+struct cpu_pmap *
+pmap_cpu_alloc(boolean_t is_boot_cpu)
+{
+ int ret;
+ int i;
+ cpu_pmap_t *cp;
+ pmap_update_list_t up;
+ vm_offset_t address;
+ vm_map_entry_t entry;
+
+ if (is_boot_cpu) {
+ cp = &cpu_pmap_master;
+ up = &cpu_update_list_master;
+ } else {
+ /*
+ * The per-cpu pmap data structure itself.
+ */
+ ret = kmem_alloc(kernel_map,
+ (vm_offset_t *) &cp, sizeof(cpu_pmap_t));
+ if (ret != KERN_SUCCESS) {
+ printf("pmap_cpu_alloc() failed ret=%d\n", ret);
+ return NULL;
+ }
+ bzero((void *)cp, sizeof(cpu_pmap_t));
+
+ /*
+ * The tlb flush update list.
+ */
+ ret = kmem_alloc(kernel_map,
+ (vm_offset_t *) &up, sizeof(*up));
+ if (ret != KERN_SUCCESS) {
+ printf("pmap_cpu_alloc() failed ret=%d\n", ret);
+ pmap_cpu_free(cp);
+ return NULL;
+ }
+
+ /*
+ * The temporary windows used for copy/zero - see loose_ends.c
+ */
+ for (i = 0; i < PMAP_NWINDOWS; i++) {
+ ret = vm_map_find_space(kernel_map,
+ &address, PAGE_SIZE, 0, &entry);
+ if (ret != KERN_SUCCESS) {
+ printf("pmap_cpu_alloc() "
+ "vm_map_find_space ret=%d\n", ret);
+ pmap_cpu_free(cp);
+ return NULL;
+ }
+ vm_map_unlock(kernel_map);
+
+ cp->mapwindow[i].prv_CADDR = (caddr_t) address;
+ cp->mapwindow[i].prv_CMAP = vtopte(address);
+ * (int *) cp->mapwindow[i].prv_CMAP = 0;
+
+ kprintf("pmap_cpu_alloc() "
+ "window=%d CADDR=0x%x CMAP=0x%x\n",
+ i, address, vtopte(address));
+ }
+ }
+
+ /*
+ * Set up the pmap request list
+ */
+ cp->update_list = up;
+ simple_lock_init(&up->lock, 0);
+ up->count = 0;
+
+ return cp;
+}
+
+void
+pmap_cpu_free(struct cpu_pmap *cp)
+{
+ if (cp != NULL && cp != &cpu_pmap_master) {
+ if (cp->update_list != NULL)
+ kfree((void *) cp->update_list,
+ sizeof(*cp->update_list));
+ kfree((void *) cp, sizeof(cpu_pmap_t));
+ }