]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmap.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: pmap.c
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * (These guys wrote the Vax version)
57 *
58 * Physical Map management code for Intel i386, i486, and i860.
59 *
60 * Manages physical address maps.
61 *
62 * In addition to hardware address maps, this
63 * module is called upon to provide software-use-only
64 * maps which may or may not be stored in the same
65 * form as hardware maps. These pseudo-maps are
66 * used to store intermediate results from copy
67 * operations to and from address spaces.
68 *
69 * Since the information managed by this module is
70 * also stored by the logical address mapping module,
71 * this module may throw away valid virtual-to-physical
72 * mappings at almost any time. However, invalidations
73 * of virtual-to-physical mappings must be done as
74 * requested.
75 *
76 * In order to cope with hardware architectures which
77 * make virtual-to-physical map invalidates expensive,
78 * this module may delay invalidate or reduced protection
79 * operations until such time as they are actually
80 * necessary. This module is given full information as
81 * to which processors are currently using which maps,
82 * and to when physical maps must be made correct.
83 */
84
85 #include <cpus.h>
86
87 #include <string.h>
88 #include <norma_vm.h>
89 #include <mach_kdb.h>
90 #include <mach_ldebug.h>
91
92 #include <mach/machine/vm_types.h>
93
94 #include <mach/boolean.h>
95 #include <kern/thread.h>
96 #include <kern/zalloc.h>
97
98 #include <kern/lock.h>
99 #include <kern/spl.h>
100
101 #include <vm/pmap.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_kern.h>
104 #include <mach/vm_param.h>
105 #include <mach/vm_prot.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108
109 #include <mach/machine/vm_param.h>
110 #include <machine/thread.h>
111
112 #include <kern/misc_protos.h> /* prototyping */
113 #include <i386/misc_protos.h>
114
115 #include <i386/cpuid.h>
116 #include <i386/cpu_number.h>
117 #include <i386/machine_cpu.h>
118
119 #if MACH_KDB
120 #include <ddb/db_command.h>
121 #include <ddb/db_output.h>
122 #include <ddb/db_sym.h>
123 #include <ddb/db_print.h>
124 #endif /* MACH_KDB */
125
126 #include <kern/xpr.h>
127
128 #if NCPUS > 1
129 #include <i386/mp_events.h>
130 #endif
131
132 /*
133 * Forward declarations for internal functions.
134 */
135 void pmap_expand(
136 pmap_t map,
137 vm_offset_t v);
138
139 extern void pmap_remove_range(
140 pmap_t pmap,
141 vm_offset_t va,
142 pt_entry_t *spte,
143 pt_entry_t *epte);
144
145 void phys_attribute_clear(
146 vm_offset_t phys,
147 int bits);
148
149 boolean_t phys_attribute_test(
150 vm_offset_t phys,
151 int bits);
152
153 void pmap_set_modify(ppnum_t pn);
154
155 void phys_attribute_set(
156 vm_offset_t phys,
157 int bits);
158
159
160 #ifndef set_dirbase
161 void set_dirbase(vm_offset_t dirbase);
162 #endif /* set_dirbase */
163
164 #define PA_TO_PTE(pa) (pa_to_pte((pa) - VM_MIN_KERNEL_ADDRESS))
165 #define iswired(pte) ((pte) & INTEL_PTE_WIRED)
166
167 pmap_t real_pmap[NCPUS];
168
169 #define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
170 #define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry);
171
172 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
173 #define low32(x) ((unsigned int)((x) & 0x00000000ffffffffLL))
174
175 /*
176 * Private data structures.
177 */
178
179 /*
180 * For each vm_page_t, there is a list of all currently
181 * valid virtual mappings of that page. An entry is
182 * a pv_entry_t; the list is the pv_table.
183 */
184
185 typedef struct pv_entry {
186 struct pv_entry *next; /* next pv_entry */
187 pmap_t pmap; /* pmap where mapping lies */
188 vm_offset_t va; /* virtual address for mapping */
189 } *pv_entry_t;
190
191 #define PV_ENTRY_NULL ((pv_entry_t) 0)
192
193 pv_entry_t pv_head_table; /* array of entries, one per page */
194
195 /*
196 * pv_list entries are kept on a list that can only be accessed
197 * with the pmap system locked (at SPLVM, not in the cpus_active set).
198 * The list is refilled from the pv_list_zone if it becomes empty.
199 */
200 pv_entry_t pv_free_list; /* free list at SPLVM */
201 decl_simple_lock_data(,pv_free_list_lock)
202
203 #define PV_ALLOC(pv_e) { \
204 simple_lock(&pv_free_list_lock); \
205 if ((pv_e = pv_free_list) != 0) { \
206 pv_free_list = pv_e->next; \
207 } \
208 simple_unlock(&pv_free_list_lock); \
209 }
210
211 #define PV_FREE(pv_e) { \
212 simple_lock(&pv_free_list_lock); \
213 pv_e->next = pv_free_list; \
214 pv_free_list = pv_e; \
215 simple_unlock(&pv_free_list_lock); \
216 }
217
218 zone_t pv_list_zone; /* zone of pv_entry structures */
219
220 /*
221 * Each entry in the pv_head_table is locked by a bit in the
222 * pv_lock_table. The lock bits are accessed by the physical
223 * address of the page they lock.
224 */
225
226 char *pv_lock_table; /* pointer to array of bits */
227 #define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
228
229 /*
230 * First and last physical addresses that we maintain any information
231 * for. Initialized to zero so that pmap operations done before
232 * pmap_init won't touch any non-existent structures.
233 */
234 vm_offset_t vm_first_phys = (vm_offset_t) 0;
235 vm_offset_t vm_last_phys = (vm_offset_t) 0;
236 boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
237
238 /*
239 * Index into pv_head table, its lock bits, and the modify/reference
240 * bits starting at vm_first_phys.
241 */
242
243 #define pa_index(pa) (atop(pa - vm_first_phys))
244
245 #define pai_to_pvh(pai) (&pv_head_table[pai])
246 #define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table)
247 #define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table)
248
249 /*
250 * Array of physical page attribites for managed pages.
251 * One byte per physical page.
252 */
253 char *pmap_phys_attributes;
254
255 /*
256 * Physical page attributes. Copy bits from PTE definition.
257 */
258 #define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
259 #define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
260 #define PHYS_NCACHE INTEL_PTE_NCACHE
261
262 /*
263 * Amount of virtual memory mapped by one
264 * page-directory entry.
265 */
266 #define PDE_MAPPED_SIZE (pdetova(1))
267
268 /*
269 * We allocate page table pages directly from the VM system
270 * through this object. It maps physical memory.
271 */
272 vm_object_t pmap_object = VM_OBJECT_NULL;
273
274 /*
275 * Locking and TLB invalidation
276 */
277
278 /*
279 * Locking Protocols:
280 *
281 * There are two structures in the pmap module that need locking:
282 * the pmaps themselves, and the per-page pv_lists (which are locked
283 * by locking the pv_lock_table entry that corresponds to the pv_head
284 * for the list in question.) Most routines want to lock a pmap and
285 * then do operations in it that require pv_list locking -- however
286 * pmap_remove_all and pmap_copy_on_write operate on a physical page
287 * basis and want to do the locking in the reverse order, i.e. lock
288 * a pv_list and then go through all the pmaps referenced by that list.
289 * To protect against deadlock between these two cases, the pmap_lock
290 * is used. There are three different locking protocols as a result:
291 *
292 * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
293 * the pmap.
294 *
295 * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
296 * lock on the pmap_lock (shared read), then lock the pmap
297 * and finally the pv_lists as needed [i.e. pmap lock before
298 * pv_list lock.]
299 *
300 * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
301 * Get a write lock on the pmap_lock (exclusive write); this
302 * also guaranteees exclusive access to the pv_lists. Lock the
303 * pmaps as needed.
304 *
305 * At no time may any routine hold more than one pmap lock or more than
306 * one pv_list lock. Because interrupt level routines can allocate
307 * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
308 * kernel_pmap can only be held at splhigh.
309 */
310
311 #if NCPUS > 1
312 /*
313 * We raise the interrupt level to splvm, to block interprocessor
314 * interrupts during pmap operations. We must take the CPU out of
315 * the cpus_active set while interrupts are blocked.
316 */
317 #define SPLVM(spl) { \
318 spl = splhigh(); \
319 mp_disable_preemption(); \
320 i_bit_clear(cpu_number(), &cpus_active); \
321 mp_enable_preemption(); \
322 }
323
324 #define SPLX(spl) { \
325 mp_disable_preemption(); \
326 i_bit_set(cpu_number(), &cpus_active); \
327 mp_enable_preemption(); \
328 splx(spl); \
329 }
330
331 /*
332 * Lock on pmap system
333 */
334 lock_t pmap_system_lock;
335
336 #define PMAP_READ_LOCK(pmap, spl) { \
337 SPLVM(spl); \
338 lock_read(&pmap_system_lock); \
339 simple_lock(&(pmap)->lock); \
340 }
341
342 #define PMAP_WRITE_LOCK(spl) { \
343 SPLVM(spl); \
344 lock_write(&pmap_system_lock); \
345 }
346
347 #define PMAP_READ_UNLOCK(pmap, spl) { \
348 simple_unlock(&(pmap)->lock); \
349 lock_read_done(&pmap_system_lock); \
350 SPLX(spl); \
351 }
352
353 #define PMAP_WRITE_UNLOCK(spl) { \
354 lock_write_done(&pmap_system_lock); \
355 SPLX(spl); \
356 }
357
358 #define PMAP_WRITE_TO_READ_LOCK(pmap) { \
359 simple_lock(&(pmap)->lock); \
360 lock_write_to_read(&pmap_system_lock); \
361 }
362
363 #define LOCK_PVH(index) lock_pvh_pai(index)
364
365 #define UNLOCK_PVH(index) unlock_pvh_pai(index)
366
367 #if USLOCK_DEBUG
368 extern int max_lock_loops;
369 #define LOOP_VAR int loop_count = 0
370 #define LOOP_CHECK(msg, pmap) \
371 if (loop_count++ > max_lock_loops) { \
372 mp_disable_preemption(); \
373 kprintf("%s: cpu %d pmap %x, cpus_active %d\n", \
374 msg, cpu_number(), pmap, cpus_active); \
375 Debugger("deadlock detection"); \
376 mp_enable_preemption(); \
377 loop_count = 0; \
378 }
379 #else /* USLOCK_DEBUG */
380 #define LOOP_VAR
381 #define LOOP_CHECK(msg, pmap)
382 #endif /* USLOCK_DEBUG */
383
384 #define PMAP_UPDATE_TLBS(pmap, s, e) \
385 { \
386 cpu_set cpu_mask; \
387 cpu_set users; \
388 \
389 mp_disable_preemption(); \
390 cpu_mask = 1 << cpu_number(); \
391 \
392 /* Since the pmap is locked, other updates are locked */ \
393 /* out, and any pmap_activate has finished. */ \
394 \
395 /* find other cpus using the pmap */ \
396 users = (pmap)->cpus_using & ~cpu_mask; \
397 if (users) { \
398 LOOP_VAR; \
399 /* signal them, and wait for them to finish */ \
400 /* using the pmap */ \
401 signal_cpus(users, (pmap), (s), (e)); \
402 while (((pmap)->cpus_using & cpus_active & ~cpu_mask)) { \
403 LOOP_CHECK("PMAP_UPDATE_TLBS", pmap); \
404 cpu_pause(); \
405 } \
406 } \
407 /* invalidate our own TLB if pmap is in use */ \
408 \
409 if ((pmap)->cpus_using & cpu_mask) { \
410 INVALIDATE_TLB((pmap), (s), (e)); \
411 } \
412 \
413 mp_enable_preemption(); \
414 }
415
416 #else /* NCPUS > 1 */
417
418 #if MACH_RT
419 #define SPLVM(spl) { (spl) = splhigh(); }
420 #define SPLX(spl) splx (spl)
421 #else /* MACH_RT */
422 #define SPLVM(spl)
423 #define SPLX(spl)
424 #endif /* MACH_RT */
425
426 #define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
427 #define PMAP_WRITE_LOCK(spl) SPLVM(spl)
428 #define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
429 #define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
430 #define PMAP_WRITE_TO_READ_LOCK(pmap)
431
432 #if MACH_RT
433 #define LOCK_PVH(index) disable_preemption()
434 #define UNLOCK_PVH(index) enable_preemption()
435 #else /* MACH_RT */
436 #define LOCK_PVH(index)
437 #define UNLOCK_PVH(index)
438 #endif /* MACH_RT */
439
440 #define PMAP_FLUSH_TLBS() flush_tlb()
441 #define PMAP_RELOAD_TLBS() set_cr3(kernel_pmap->pdirbase)
442 #define PMAP_INVALIDATE_PAGE(map, saddr, eaddr) { \
443 if (map == kernel_pmap) \
444 invlpg((vm_offset_t) saddr); \
445 else \
446 flush_tlb(); \
447 }
448
449 #endif /* NCPUS > 1 */
450
451 #define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
452
453 #define INVALIDATE_TLB(m, s, e) { \
454 flush_tlb(); \
455 }
456
457 #if NCPUS > 1
458 /*
459 * Structures to keep track of pending TLB invalidations
460 */
461 cpu_set cpus_active;
462 cpu_set cpus_idle;
463 volatile boolean_t cpu_update_needed[NCPUS];
464
465 #define UPDATE_LIST_SIZE 4
466
467 struct pmap_update_item {
468 pmap_t pmap; /* pmap to invalidate */
469 vm_offset_t start; /* start address to invalidate */
470 vm_offset_t end; /* end address to invalidate */
471 };
472
473 typedef struct pmap_update_item *pmap_update_item_t;
474
475 /*
476 * List of pmap updates. If the list overflows,
477 * the last entry is changed to invalidate all.
478 */
479 struct pmap_update_list {
480 decl_simple_lock_data(,lock)
481 int count;
482 struct pmap_update_item item[UPDATE_LIST_SIZE];
483 } ;
484 typedef struct pmap_update_list *pmap_update_list_t;
485
486 struct pmap_update_list cpu_update_list[NCPUS];
487
488 extern void signal_cpus(
489 cpu_set use_list,
490 pmap_t pmap,
491 vm_offset_t start,
492 vm_offset_t end);
493
494 #endif /* NCPUS > 1 */
495
496 /*
497 * Other useful macros.
498 */
499 #define current_pmap() (vm_map_pmap(current_act()->map))
500 #define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
501
502 struct pmap kernel_pmap_store;
503 pmap_t kernel_pmap;
504
505 struct zone *pmap_zone; /* zone of pmap structures */
506
507 int pmap_debug = 0; /* flag for debugging prints */
508 int ptes_per_vm_page; /* number of hardware ptes needed
509 to map one VM page. */
510 unsigned int inuse_ptepages_count = 0; /* debugging */
511
512 /*
513 * Pmap cache. Cache is threaded through ref_count field of pmap.
514 * Max will eventually be constant -- variable for experimentation.
515 */
516 int pmap_cache_max = 32;
517 int pmap_alloc_chunk = 8;
518 pmap_t pmap_cache_list;
519 int pmap_cache_count;
520 decl_simple_lock_data(,pmap_cache_lock)
521
522 extern vm_offset_t hole_start, hole_end;
523
524 extern char end;
525
526 /*
527 * Page directory for kernel.
528 */
529 pt_entry_t *kpde = 0; /* set by start.s - keep out of bss */
530
531 #if DEBUG_ALIAS
532 #define PMAP_ALIAS_MAX 32
533 struct pmap_alias {
534 vm_offset_t rpc;
535 pmap_t pmap;
536 vm_offset_t va;
537 int cookie;
538 #define PMAP_ALIAS_COOKIE 0xdeadbeef
539 } pmap_aliasbuf[PMAP_ALIAS_MAX];
540 int pmap_alias_index = 0;
541 extern vm_offset_t get_rpc();
542
543 #endif /* DEBUG_ALIAS */
544
545 /*
546 * Given an offset and a map, compute the address of the
547 * pte. If the address is invalid with respect to the map
548 * then PT_ENTRY_NULL is returned (and the map may need to grow).
549 *
550 * This is only used in machine-dependent code.
551 */
552
553 pt_entry_t *
554 pmap_pte(
555 register pmap_t pmap,
556 register vm_offset_t addr)
557 {
558 register pt_entry_t *ptp;
559 register pt_entry_t pte;
560
561 pte = pmap->dirbase[pdenum(pmap, addr)];
562 if ((pte & INTEL_PTE_VALID) == 0)
563 return(PT_ENTRY_NULL);
564 ptp = (pt_entry_t *)ptetokv(pte);
565 return(&ptp[ptenum(addr)]);
566
567 }
568
569 #define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(pmap, addr)])
570
571 #define DEBUG_PTE_PAGE 0
572
573 #if DEBUG_PTE_PAGE
574 void
575 ptep_check(
576 ptep_t ptep)
577 {
578 register pt_entry_t *pte, *epte;
579 int ctu, ctw;
580
581 /* check the use and wired counts */
582 if (ptep == PTE_PAGE_NULL)
583 return;
584 pte = pmap_pte(ptep->pmap, ptep->va);
585 epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
586 ctu = 0;
587 ctw = 0;
588 while (pte < epte) {
589 if (pte->pfn != 0) {
590 ctu++;
591 if (pte->wired)
592 ctw++;
593 }
594 pte += ptes_per_vm_page;
595 }
596
597 if (ctu != ptep->use_count || ctw != ptep->wired_count) {
598 printf("use %d wired %d - actual use %d wired %d\n",
599 ptep->use_count, ptep->wired_count, ctu, ctw);
600 panic("pte count");
601 }
602 }
603 #endif /* DEBUG_PTE_PAGE */
604
605 /*
606 * Map memory at initialization. The physical addresses being
607 * mapped are not managed and are never unmapped.
608 *
609 * For now, VM is already on, we only need to map the
610 * specified memory.
611 */
612 vm_offset_t
613 pmap_map(
614 register vm_offset_t virt,
615 register vm_offset_t start,
616 register vm_offset_t end,
617 register vm_prot_t prot)
618 {
619 register int ps;
620
621 ps = PAGE_SIZE;
622 while (start < end) {
623 pmap_enter(kernel_pmap, virt, (ppnum_t)i386_btop(start), prot, 0, FALSE);
624 virt += ps;
625 start += ps;
626 }
627 return(virt);
628 }
629
630 /*
631 * Back-door routine for mapping kernel VM at initialization.
632 * Useful for mapping memory outside the range
633 * Sets no-cache, A, D.
634 * [vm_first_phys, vm_last_phys) (i.e., devices).
635 * Otherwise like pmap_map.
636 */
637 vm_offset_t
638 pmap_map_bd(
639 register vm_offset_t virt,
640 register vm_offset_t start,
641 register vm_offset_t end,
642 vm_prot_t prot)
643 {
644 register pt_entry_t template;
645 register pt_entry_t *pte;
646
647 template = pa_to_pte(start)
648 | INTEL_PTE_NCACHE
649 | INTEL_PTE_REF
650 | INTEL_PTE_MOD
651 | INTEL_PTE_WIRED
652 | INTEL_PTE_VALID;
653 if (prot & VM_PROT_WRITE)
654 template |= INTEL_PTE_WRITE;
655
656 while (start < end) {
657 pte = pmap_pte(kernel_pmap, virt);
658 if (pte == PT_ENTRY_NULL)
659 panic("pmap_map_bd: Invalid kernel address\n");
660 WRITE_PTE_FAST(pte, template)
661 pte_increment_pa(template);
662 virt += PAGE_SIZE;
663 start += PAGE_SIZE;
664 }
665
666 flush_tlb();
667 return(virt);
668 }
669
670 extern int cnvmem;
671 extern char *first_avail;
672 extern vm_offset_t virtual_avail, virtual_end;
673 extern vm_offset_t avail_start, avail_end, avail_next;
674
675 /*
676 * Bootstrap the system enough to run with virtual memory.
677 * Map the kernel's code and data, and allocate the system page table.
678 * Called with mapping OFF. Page_size must already be set.
679 *
680 * Parameters:
681 * load_start: PA where kernel was loaded
682 * avail_start PA of first available physical page -
683 * after kernel page tables
684 * avail_end PA of last available physical page
685 * virtual_avail VA of first available page -
686 * after kernel page tables
687 * virtual_end VA of last available page -
688 * end of kernel address space
689 *
690 * &start_text start of kernel text
691 * &etext end of kernel text
692 */
693
694 void
695 pmap_bootstrap(
696 vm_offset_t load_start)
697 {
698 vm_offset_t va, tva, paddr;
699 ppnum_t pn;
700 pt_entry_t template;
701 pt_entry_t *pde, *pte, *ptend;
702 vm_size_t morevm; /* VM space for kernel map */
703
704 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address known to VM */
705
706 /*
707 * Set ptes_per_vm_page for general use.
708 */
709 ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
710
711 /*
712 * The kernel's pmap is statically allocated so we don't
713 * have to use pmap_create, which is unlikely to work
714 * correctly at this part of the boot sequence.
715 */
716
717 kernel_pmap = &kernel_pmap_store;
718
719 #if NCPUS > 1
720 lock_init(&pmap_system_lock,
721 FALSE, /* NOT a sleep lock */
722 ETAP_VM_PMAP_SYS,
723 ETAP_VM_PMAP_SYS_I);
724 #endif /* NCPUS > 1 */
725
726 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
727 simple_lock_init(&pv_free_list_lock, ETAP_VM_PMAP_FREE);
728
729 kernel_pmap->ref_count = 1;
730
731 /*
732 * The kernel page directory has been allocated;
733 * its virtual address is in kpde.
734 *
735 * Enough kernel page table pages have been allocated
736 * to map low system memory, kernel text, kernel data/bss,
737 * kdb's symbols, and the page directory and page tables.
738 *
739 * No other physical memory has been allocated.
740 */
741
742 /*
743 * Start mapping virtual memory to physical memory, 1-1,
744 * at end of mapped memory.
745 */
746
747 virtual_avail = phystokv(avail_start);
748 virtual_end = phystokv(avail_end);
749
750 pde = kpde;
751 pde += pdenum(kernel_pmap, virtual_avail);
752
753 if (pte_to_pa(*pde) == 0) {
754 /* This pte has not been allocated */
755 pte = 0; ptend = 0;
756 }
757 else {
758 pte = (pt_entry_t *)ptetokv(*pde);
759 /* first pte of page */
760 ptend = pte+NPTES; /* last pte of page */
761 pte += ptenum(virtual_avail); /* point to pte that
762 maps first avail VA */
763 pde++; /* point pde to first empty slot */
764 }
765
766 template = pa_to_pte(avail_start)
767 | INTEL_PTE_VALID
768 | INTEL_PTE_WRITE;
769
770 for (va = virtual_avail; va < virtual_end; va += INTEL_PGBYTES) {
771 if (pte >= ptend) {
772 pte = (pt_entry_t *)phystokv(virtual_avail);
773 ptend = pte + NPTES;
774 virtual_avail = (vm_offset_t)ptend;
775 if (virtual_avail == hole_start)
776 virtual_avail = hole_end;
777 *pde = PA_TO_PTE((vm_offset_t) pte)
778 | INTEL_PTE_VALID
779 | INTEL_PTE_WRITE;
780 pde++;
781 }
782 WRITE_PTE_FAST(pte, template)
783 pte++;
784 pte_increment_pa(template);
785 }
786
787 avail_start = virtual_avail - VM_MIN_KERNEL_ADDRESS;
788 avail_next = avail_start;
789
790 /*
791 * Figure out maximum kernel address.
792 * Kernel virtual space is:
793 * - at least three times physical memory
794 * - at least VM_MIN_KERNEL_ADDRESS
795 * - limited by VM_MAX_KERNEL_ADDRESS
796 */
797
798 morevm = 3*avail_end;
799 if (virtual_end + morevm > VM_MAX_KERNEL_ADDRESS)
800 morevm = VM_MAX_KERNEL_ADDRESS - virtual_end + 1;
801
802 /*
803 * startup requires additional virtual memory (for tables, buffers,
804 * etc.). The kd driver may also require some of that memory to
805 * access the graphics board.
806 *
807 */
808 *(int *)&template = 0;
809
810 /*
811 * Leave room for kernel-loaded servers, which have been linked at
812 * addresses from VM_MIN_KERNEL_LOADED_ADDRESS to
813 * VM_MAX_KERNEL_LOADED_ADDRESS.
814 */
815 if (virtual_end + morevm < VM_MAX_KERNEL_LOADED_ADDRESS + 1)
816 morevm = VM_MAX_KERNEL_LOADED_ADDRESS + 1 - virtual_end;
817
818 virtual_end += morevm;
819 for (tva = va; tva < virtual_end; tva += INTEL_PGBYTES) {
820 if (pte >= ptend) {
821 pmap_next_page(&pn);
822 paddr = i386_ptob(pn);
823 pte = (pt_entry_t *)phystokv(paddr);
824 ptend = pte + NPTES;
825 *pde = PA_TO_PTE((vm_offset_t) pte)
826 | INTEL_PTE_VALID
827 | INTEL_PTE_WRITE;
828 pde++;
829 }
830 WRITE_PTE_FAST(pte, template)
831 pte++;
832 }
833
834 virtual_avail = va;
835
836 /* Push the virtual avail address above hole_end */
837 if (virtual_avail < hole_end)
838 virtual_avail = hole_end;
839
840 /*
841 * c.f. comment above
842 *
843 */
844 virtual_end = va + morevm;
845 while (pte < ptend)
846 *pte++ = 0;
847
848 /*
849 * invalidate user virtual addresses
850 */
851 memset((char *)kpde,
852 0,
853 pdenum(kernel_pmap,VM_MIN_KERNEL_ADDRESS)*sizeof(pt_entry_t));
854 kernel_pmap->dirbase = kpde;
855 printf("Kernel virtual space from 0x%x to 0x%x.\n",
856 VM_MIN_KERNEL_ADDRESS, virtual_end);
857
858 avail_start = avail_next;
859 printf("Available physical space from 0x%x to 0x%x\n",
860 avail_start, avail_end);
861
862 kernel_pmap->pdirbase = kvtophys((vm_offset_t)kernel_pmap->dirbase);
863
864 if (cpuid_features() & CPUID_FEATURE_PAT)
865 {
866 uint64_t pat;
867 uint32_t msr;
868
869 msr = 0x277;
870 asm volatile("rdmsr" : "=A" (pat) : "c" (msr));
871
872 pat &= ~(0xfULL << 48);
873 pat |= 0x01ULL << 48;
874
875 asm volatile("wrmsr" :: "A" (pat), "c" (msr));
876 }
877 }
878
879 void
880 pmap_virtual_space(
881 vm_offset_t *startp,
882 vm_offset_t *endp)
883 {
884 *startp = virtual_avail;
885 *endp = virtual_end;
886 }
887
888 /*
889 * Initialize the pmap module.
890 * Called by vm_init, to initialize any structures that the pmap
891 * system needs to map virtual memory.
892 */
893 void
894 pmap_init(void)
895 {
896 register long npages;
897 vm_offset_t addr;
898 register vm_size_t s;
899 int i;
900
901 /*
902 * Allocate memory for the pv_head_table and its lock bits,
903 * the modify bit array, and the pte_page table.
904 */
905
906 npages = atop(avail_end - avail_start);
907 s = (vm_size_t) (sizeof(struct pv_entry) * npages
908 + pv_lock_table_size(npages)
909 + npages);
910
911 s = round_page(s);
912 if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
913 panic("pmap_init");
914
915 memset((char *)addr, 0, s);
916
917 /*
918 * Allocate the structures first to preserve word-alignment.
919 */
920 pv_head_table = (pv_entry_t) addr;
921 addr = (vm_offset_t) (pv_head_table + npages);
922
923 pv_lock_table = (char *) addr;
924 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
925
926 pmap_phys_attributes = (char *) addr;
927
928 /*
929 * Create the zone of physical maps,
930 * and of the physical-to-virtual entries.
931 */
932 s = (vm_size_t) sizeof(struct pmap);
933 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
934 s = (vm_size_t) sizeof(struct pv_entry);
935 pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
936
937 #if NCPUS > 1
938 /*
939 * Set up the pmap request lists
940 */
941 for (i = 0; i < NCPUS; i++) {
942 pmap_update_list_t up = &cpu_update_list[i];
943
944 simple_lock_init(&up->lock, ETAP_VM_PMAP_UPDATE);
945 up->count = 0;
946 }
947 #endif /* NCPUS > 1 */
948
949 /*
950 * Only now, when all of the data structures are allocated,
951 * can we set vm_first_phys and vm_last_phys. If we set them
952 * too soon, the kmem_alloc_wired above will try to use these
953 * data structures and blow up.
954 */
955
956 vm_first_phys = avail_start;
957 vm_last_phys = avail_end;
958 pmap_initialized = TRUE;
959
960 /*
961 * Initializie pmap cache.
962 */
963 pmap_cache_list = PMAP_NULL;
964 pmap_cache_count = 0;
965 simple_lock_init(&pmap_cache_lock, ETAP_VM_PMAP_CACHE);
966 }
967
968
969 #define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end))
970
971
972 #define valid_page(x) (pmap_initialized && pmap_valid_page(x))
973
974 boolean_t
975 pmap_verify_free(
976 ppnum_t pn)
977 {
978 vm_offset_t phys;
979 pv_entry_t pv_h;
980 int pai;
981 spl_t spl;
982 boolean_t result;
983
984 assert(pn != vm_page_fictitious_addr);
985 phys = (vm_offset_t)i386_ptob(pn);
986 if (!pmap_initialized)
987 return(TRUE);
988
989 if (!pmap_valid_page(phys))
990 return(FALSE);
991
992 PMAP_WRITE_LOCK(spl);
993
994 pai = pa_index(phys);
995 pv_h = pai_to_pvh(pai);
996
997 result = (pv_h->pmap == PMAP_NULL);
998 PMAP_WRITE_UNLOCK(spl);
999
1000 return(result);
1001 }
1002
1003 /*
1004 * Create and return a physical map.
1005 *
1006 * If the size specified for the map
1007 * is zero, the map is an actual physical
1008 * map, and may be referenced by the
1009 * hardware.
1010 *
1011 * If the size specified is non-zero,
1012 * the map will be used in software only, and
1013 * is bounded by that size.
1014 */
1015 pmap_t
1016 pmap_create(
1017 vm_size_t size)
1018 {
1019 register pmap_t p;
1020 register pmap_statistics_t stats;
1021
1022 /*
1023 * A software use-only map doesn't even need a map.
1024 */
1025
1026 if (size != 0) {
1027 return(PMAP_NULL);
1028 }
1029
1030 /*
1031 * Try to get cached pmap, if this fails,
1032 * allocate a pmap struct from the pmap_zone. Then allocate
1033 * the page descriptor table from the pd_zone.
1034 */
1035
1036 simple_lock(&pmap_cache_lock);
1037 while ((p = pmap_cache_list) == PMAP_NULL) {
1038
1039 vm_offset_t dirbases;
1040 register int i;
1041
1042 simple_unlock(&pmap_cache_lock);
1043
1044 #if NCPUS > 1
1045 /*
1046 * XXX NEEDS MP DOING ALLOC logic so that if multiple processors
1047 * XXX get here, only one allocates a chunk of pmaps.
1048 * (for now we'll just let it go - safe but wasteful)
1049 */
1050 #endif
1051
1052 /*
1053 * Allocate a chunck of pmaps. Single kmem_alloc_wired
1054 * operation reduces kernel map fragmentation.
1055 */
1056
1057 if (kmem_alloc_wired(kernel_map, &dirbases,
1058 pmap_alloc_chunk * INTEL_PGBYTES)
1059 != KERN_SUCCESS)
1060 panic("pmap_create.1");
1061
1062 for (i = pmap_alloc_chunk; i > 0 ; i--) {
1063 p = (pmap_t) zalloc(pmap_zone);
1064 if (p == PMAP_NULL)
1065 panic("pmap_create.2");
1066
1067 /*
1068 * Initialize pmap. Don't bother with
1069 * ref count as cache list is threaded
1070 * through it. It'll be set on cache removal.
1071 */
1072 p->dirbase = (pt_entry_t *) dirbases;
1073 dirbases += INTEL_PGBYTES;
1074 memcpy(p->dirbase, kpde, INTEL_PGBYTES);
1075 p->pdirbase = kvtophys((vm_offset_t)p->dirbase);
1076
1077 simple_lock_init(&p->lock, ETAP_VM_PMAP);
1078 p->cpus_using = 0;
1079
1080 /*
1081 * Initialize statistics.
1082 */
1083 stats = &p->stats;
1084 stats->resident_count = 0;
1085 stats->wired_count = 0;
1086
1087 /*
1088 * Insert into cache
1089 */
1090 simple_lock(&pmap_cache_lock);
1091 p->ref_count = (int) pmap_cache_list;
1092 pmap_cache_list = p;
1093 pmap_cache_count++;
1094 simple_unlock(&pmap_cache_lock);
1095 }
1096 simple_lock(&pmap_cache_lock);
1097 }
1098
1099 p->stats.resident_count = 0;
1100 p->stats.wired_count = 0;
1101
1102 pmap_cache_list = (pmap_t) p->ref_count;
1103 p->ref_count = 1;
1104 pmap_cache_count--;
1105 simple_unlock(&pmap_cache_lock);
1106
1107 return(p);
1108 }
1109
1110 /*
1111 * Retire the given physical map from service.
1112 * Should only be called if the map contains
1113 * no valid mappings.
1114 */
1115
1116 void
1117 pmap_destroy(
1118 register pmap_t p)
1119 {
1120 register pt_entry_t *pdep;
1121 register vm_offset_t pa;
1122 register int c;
1123 spl_t s;
1124 register vm_page_t m;
1125
1126 if (p == PMAP_NULL)
1127 return;
1128
1129 SPLVM(s);
1130 simple_lock(&p->lock);
1131 c = --p->ref_count;
1132 if (c == 0) {
1133 register int my_cpu;
1134
1135 mp_disable_preemption();
1136 my_cpu = cpu_number();
1137
1138 /*
1139 * If some cpu is not using the physical pmap pointer that it
1140 * is supposed to be (see set_dirbase), we might be using the
1141 * pmap that is being destroyed! Make sure we are
1142 * physically on the right pmap:
1143 */
1144
1145 #if NCPUS > 1
1146 /* force pmap/cr3 update */
1147 PMAP_UPDATE_TLBS(p,
1148 VM_MIN_ADDRESS,
1149 VM_MAX_KERNEL_ADDRESS);
1150 #endif /* NCPUS > 1 */
1151
1152 if (real_pmap[my_cpu] == p) {
1153 PMAP_CPU_CLR(p, my_cpu);
1154 real_pmap[my_cpu] = kernel_pmap;
1155 set_cr3(kernel_pmap->pdirbase);
1156 }
1157 mp_enable_preemption();
1158 }
1159 simple_unlock(&p->lock);
1160 SPLX(s);
1161
1162 if (c != 0) {
1163 return; /* still in use */
1164 }
1165
1166 /*
1167 * Free the memory maps, then the
1168 * pmap structure.
1169 */
1170 pdep = p->dirbase;
1171 while (pdep < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]) {
1172 if (*pdep & INTEL_PTE_VALID) {
1173 pa = pte_to_pa(*pdep);
1174 vm_object_lock(pmap_object);
1175 m = vm_page_lookup(pmap_object, pa);
1176 if (m == VM_PAGE_NULL)
1177 panic("pmap_destroy: pte page not in object");
1178 vm_page_lock_queues();
1179 vm_page_free(m);
1180 inuse_ptepages_count--;
1181 vm_object_unlock(pmap_object);
1182 vm_page_unlock_queues();
1183
1184 /*
1185 * Clear pdes, this might be headed for the cache.
1186 */
1187 c = ptes_per_vm_page;
1188 do {
1189 *pdep = 0;
1190 pdep++;
1191 } while (--c > 0);
1192 }
1193 else {
1194 pdep += ptes_per_vm_page;
1195 }
1196
1197 }
1198
1199 /*
1200 * XXX These asserts fail on system shutdown.
1201 *
1202 assert(p->stats.resident_count == 0);
1203 assert(p->stats.wired_count == 0);
1204 *
1205 */
1206
1207 /*
1208 * Add to cache if not already full
1209 */
1210 simple_lock(&pmap_cache_lock);
1211 if (pmap_cache_count <= pmap_cache_max) {
1212 p->ref_count = (int) pmap_cache_list;
1213 pmap_cache_list = p;
1214 pmap_cache_count++;
1215 simple_unlock(&pmap_cache_lock);
1216 }
1217 else {
1218 simple_unlock(&pmap_cache_lock);
1219 kmem_free(kernel_map, (vm_offset_t)p->dirbase, INTEL_PGBYTES);
1220 zfree(pmap_zone, (vm_offset_t) p);
1221 }
1222 }
1223
1224 /*
1225 * Add a reference to the specified pmap.
1226 */
1227
1228 void
1229 pmap_reference(
1230 register pmap_t p)
1231 {
1232 spl_t s;
1233
1234 if (p != PMAP_NULL) {
1235 SPLVM(s);
1236 simple_lock(&p->lock);
1237 p->ref_count++;
1238 simple_unlock(&p->lock);
1239 SPLX(s);
1240 }
1241 }
1242
1243 /*
1244 * Remove a range of hardware page-table entries.
1245 * The entries given are the first (inclusive)
1246 * and last (exclusive) entries for the VM pages.
1247 * The virtual address is the va for the first pte.
1248 *
1249 * The pmap must be locked.
1250 * If the pmap is not the kernel pmap, the range must lie
1251 * entirely within one pte-page. This is NOT checked.
1252 * Assumes that the pte-page exists.
1253 */
1254
1255 /* static */
1256 void
1257 pmap_remove_range(
1258 pmap_t pmap,
1259 vm_offset_t va,
1260 pt_entry_t *spte,
1261 pt_entry_t *epte)
1262 {
1263 register pt_entry_t *cpte;
1264 int num_removed, num_unwired;
1265 int pai;
1266 vm_offset_t pa;
1267
1268 #if DEBUG_PTE_PAGE
1269 if (pmap != kernel_pmap)
1270 ptep_check(get_pte_page(spte));
1271 #endif /* DEBUG_PTE_PAGE */
1272 num_removed = 0;
1273 num_unwired = 0;
1274
1275 for (cpte = spte; cpte < epte;
1276 cpte += ptes_per_vm_page, va += PAGE_SIZE) {
1277
1278 pa = pte_to_pa(*cpte);
1279 if (pa == 0)
1280 continue;
1281
1282 num_removed++;
1283 if (iswired(*cpte))
1284 num_unwired++;
1285
1286 if (!valid_page(pa)) {
1287
1288 /*
1289 * Outside range of managed physical memory.
1290 * Just remove the mappings.
1291 */
1292 register int i = ptes_per_vm_page;
1293 register pt_entry_t *lpte = cpte;
1294 do {
1295 *lpte = 0;
1296 lpte++;
1297 } while (--i > 0);
1298 continue;
1299 }
1300
1301 pai = pa_index(pa);
1302 LOCK_PVH(pai);
1303
1304 /*
1305 * Get the modify and reference bits.
1306 */
1307 {
1308 register int i;
1309 register pt_entry_t *lpte;
1310
1311 i = ptes_per_vm_page;
1312 lpte = cpte;
1313 do {
1314 pmap_phys_attributes[pai] |=
1315 *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
1316 *lpte = 0;
1317 lpte++;
1318 } while (--i > 0);
1319 }
1320
1321 /*
1322 * Remove the mapping from the pvlist for
1323 * this physical page.
1324 */
1325 {
1326 register pv_entry_t pv_h, prev, cur;
1327
1328 pv_h = pai_to_pvh(pai);
1329 if (pv_h->pmap == PMAP_NULL) {
1330 panic("pmap_remove: null pv_list!");
1331 }
1332 if (pv_h->va == va && pv_h->pmap == pmap) {
1333 /*
1334 * Header is the pv_entry. Copy the next one
1335 * to header and free the next one (we cannot
1336 * free the header)
1337 */
1338 cur = pv_h->next;
1339 if (cur != PV_ENTRY_NULL) {
1340 *pv_h = *cur;
1341 PV_FREE(cur);
1342 }
1343 else {
1344 pv_h->pmap = PMAP_NULL;
1345 }
1346 }
1347 else {
1348 cur = pv_h;
1349 do {
1350 prev = cur;
1351 if ((cur = prev->next) == PV_ENTRY_NULL) {
1352 panic("pmap-remove: mapping not in pv_list!");
1353 }
1354 } while (cur->va != va || cur->pmap != pmap);
1355 prev->next = cur->next;
1356 PV_FREE(cur);
1357 }
1358 UNLOCK_PVH(pai);
1359 }
1360 }
1361
1362 /*
1363 * Update the counts
1364 */
1365 assert(pmap->stats.resident_count >= num_removed);
1366 pmap->stats.resident_count -= num_removed;
1367 assert(pmap->stats.wired_count >= num_unwired);
1368 pmap->stats.wired_count -= num_unwired;
1369 }
1370
1371 /*
1372 * Remove phys addr if mapped in specified map
1373 *
1374 */
1375 void
1376 pmap_remove_some_phys(
1377 pmap_t map,
1378 ppnum_t pn)
1379 {
1380
1381 /* Implement to support working set code */
1382
1383 }
1384
1385
1386 /*
1387 * Remove the given range of addresses
1388 * from the specified map.
1389 *
1390 * It is assumed that the start and end are properly
1391 * rounded to the hardware page size.
1392 */
1393
1394
1395 void
1396 pmap_remove(
1397 pmap_t map,
1398 addr64_t s64,
1399 addr64_t e64)
1400 {
1401 spl_t spl;
1402 register pt_entry_t *pde;
1403 register pt_entry_t *spte, *epte;
1404 vm_offset_t l;
1405 vm_offset_t s, e;
1406
1407 if (map == PMAP_NULL)
1408 return;
1409
1410 PMAP_READ_LOCK(map, spl);
1411
1412 if (value_64bit(s64) || value_64bit(e64)) {
1413 panic("pmap_remove addr overflow");
1414 }
1415
1416 s = (vm_offset_t)low32(s64);
1417 e = (vm_offset_t)low32(e64);
1418
1419 /*
1420 * Invalidate the translation buffer first
1421 */
1422 PMAP_UPDATE_TLBS(map, s, e);
1423
1424 pde = pmap_pde(map, s);
1425
1426 while (s < e) {
1427 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
1428 if (l > e)
1429 l = e;
1430 if (*pde & INTEL_PTE_VALID) {
1431 spte = (pt_entry_t *)ptetokv(*pde);
1432 spte = &spte[ptenum(s)];
1433 epte = &spte[intel_btop(l-s)];
1434 pmap_remove_range(map, s, spte, epte);
1435 }
1436 s = l;
1437 pde++;
1438 }
1439
1440 PMAP_READ_UNLOCK(map, spl);
1441 }
1442
1443 /*
1444 * Routine: pmap_page_protect
1445 *
1446 * Function:
1447 * Lower the permission for all mappings to a given
1448 * page.
1449 */
1450 void
1451 pmap_page_protect(
1452 ppnum_t pn,
1453 vm_prot_t prot)
1454 {
1455 pv_entry_t pv_h, prev;
1456 register pv_entry_t pv_e;
1457 register pt_entry_t *pte;
1458 int pai;
1459 register pmap_t pmap;
1460 spl_t spl;
1461 boolean_t remove;
1462 vm_offset_t phys;
1463
1464 assert(pn != vm_page_fictitious_addr);
1465 phys = (vm_offset_t)i386_ptob(pn);
1466 if (!valid_page(phys)) {
1467 /*
1468 * Not a managed page.
1469 */
1470 return;
1471 }
1472
1473 /*
1474 * Determine the new protection.
1475 */
1476 switch (prot) {
1477 case VM_PROT_READ:
1478 case VM_PROT_READ|VM_PROT_EXECUTE:
1479 remove = FALSE;
1480 break;
1481 case VM_PROT_ALL:
1482 return; /* nothing to do */
1483 default:
1484 remove = TRUE;
1485 break;
1486 }
1487
1488 /*
1489 * Lock the pmap system first, since we will be changing
1490 * several pmaps.
1491 */
1492
1493 PMAP_WRITE_LOCK(spl);
1494
1495 pai = pa_index(phys);
1496 pv_h = pai_to_pvh(pai);
1497
1498 /*
1499 * Walk down PV list, changing or removing all mappings.
1500 * We do not have to lock the pv_list because we have
1501 * the entire pmap system locked.
1502 */
1503 if (pv_h->pmap != PMAP_NULL) {
1504
1505 prev = pv_e = pv_h;
1506 do {
1507 pmap = pv_e->pmap;
1508 /*
1509 * Lock the pmap to block pmap_extract and similar routines.
1510 */
1511 simple_lock(&pmap->lock);
1512
1513 {
1514 register vm_offset_t va;
1515
1516 va = pv_e->va;
1517 pte = pmap_pte(pmap, va);
1518
1519 /*
1520 * Consistency checks.
1521 */
1522 /* assert(*pte & INTEL_PTE_VALID); XXX */
1523 /* assert(pte_to_phys(*pte) == phys); */
1524
1525 /*
1526 * Invalidate TLBs for all CPUs using this mapping.
1527 */
1528 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
1529 }
1530
1531 /*
1532 * Remove the mapping if new protection is NONE
1533 * or if write-protecting a kernel mapping.
1534 */
1535 if (remove || pmap == kernel_pmap) {
1536 /*
1537 * Remove the mapping, collecting any modify bits.
1538 */
1539 {
1540 register int i = ptes_per_vm_page;
1541
1542 do {
1543 pmap_phys_attributes[pai] |=
1544 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1545 *pte++ = 0;
1546 } while (--i > 0);
1547 }
1548
1549 assert(pmap->stats.resident_count >= 1);
1550 pmap->stats.resident_count--;
1551
1552 /*
1553 * Remove the pv_entry.
1554 */
1555 if (pv_e == pv_h) {
1556 /*
1557 * Fix up head later.
1558 */
1559 pv_h->pmap = PMAP_NULL;
1560 }
1561 else {
1562 /*
1563 * Delete this entry.
1564 */
1565 prev->next = pv_e->next;
1566 PV_FREE(pv_e);
1567 }
1568 }
1569 else {
1570 /*
1571 * Write-protect.
1572 */
1573 register int i = ptes_per_vm_page;
1574
1575 do {
1576 *pte &= ~INTEL_PTE_WRITE;
1577 pte++;
1578 } while (--i > 0);
1579
1580 /*
1581 * Advance prev.
1582 */
1583 prev = pv_e;
1584 }
1585
1586 simple_unlock(&pmap->lock);
1587
1588 } while ((pv_e = prev->next) != PV_ENTRY_NULL);
1589
1590 /*
1591 * If pv_head mapping was removed, fix it up.
1592 */
1593 if (pv_h->pmap == PMAP_NULL) {
1594 pv_e = pv_h->next;
1595 if (pv_e != PV_ENTRY_NULL) {
1596 *pv_h = *pv_e;
1597 PV_FREE(pv_e);
1598 }
1599 }
1600 }
1601
1602 PMAP_WRITE_UNLOCK(spl);
1603 }
1604
1605 /*
1606 * Set the physical protection on the
1607 * specified range of this map as requested.
1608 * Will not increase permissions.
1609 */
1610 void
1611 pmap_protect(
1612 pmap_t map,
1613 vm_offset_t s,
1614 vm_offset_t e,
1615 vm_prot_t prot)
1616 {
1617 register pt_entry_t *pde;
1618 register pt_entry_t *spte, *epte;
1619 vm_offset_t l;
1620 spl_t spl;
1621
1622
1623 if (map == PMAP_NULL)
1624 return;
1625
1626 /*
1627 * Determine the new protection.
1628 */
1629 switch (prot) {
1630 case VM_PROT_READ:
1631 case VM_PROT_READ|VM_PROT_EXECUTE:
1632 break;
1633 case VM_PROT_READ|VM_PROT_WRITE:
1634 case VM_PROT_ALL:
1635 return; /* nothing to do */
1636 default:
1637 pmap_remove(map, (addr64_t)s, (addr64_t)e);
1638 return;
1639 }
1640
1641 /*
1642 * If write-protecting in the kernel pmap,
1643 * remove the mappings; the i386 ignores
1644 * the write-permission bit in kernel mode.
1645 *
1646 * XXX should be #if'd for i386
1647 */
1648
1649 if (cpuid_family() == CPUID_FAMILY_386)
1650 if (map == kernel_pmap) {
1651 pmap_remove(map, (addr64_t)s, (addr64_t)e);
1652 return;
1653 }
1654
1655 SPLVM(spl);
1656 simple_lock(&map->lock);
1657
1658 /*
1659 * Invalidate the translation buffer first
1660 */
1661 PMAP_UPDATE_TLBS(map, s, e);
1662
1663 pde = pmap_pde(map, s);
1664 while (s < e) {
1665 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
1666 if (l > e)
1667 l = e;
1668 if (*pde & INTEL_PTE_VALID) {
1669 spte = (pt_entry_t *)ptetokv(*pde);
1670 spte = &spte[ptenum(s)];
1671 epte = &spte[intel_btop(l-s)];
1672
1673 while (spte < epte) {
1674 if (*spte & INTEL_PTE_VALID)
1675 *spte &= ~INTEL_PTE_WRITE;
1676 spte++;
1677 }
1678 }
1679 s = l;
1680 pde++;
1681 }
1682
1683 simple_unlock(&map->lock);
1684 SPLX(spl);
1685 }
1686
1687
1688
1689 /*
1690 * Insert the given physical page (p) at
1691 * the specified virtual address (v) in the
1692 * target physical map with the protection requested.
1693 *
1694 * If specified, the page will be wired down, meaning
1695 * that the related pte cannot be reclaimed.
1696 *
1697 * NB: This is the only routine which MAY NOT lazy-evaluate
1698 * or lose information. That is, this routine must actually
1699 * insert this page into the given map NOW.
1700 */
1701 void
1702 pmap_enter(
1703 register pmap_t pmap,
1704 vm_offset_t v,
1705 ppnum_t pn,
1706 vm_prot_t prot,
1707 unsigned int flags,
1708 boolean_t wired)
1709 {
1710 register pt_entry_t *pte;
1711 register pv_entry_t pv_h;
1712 register int i, pai;
1713 pv_entry_t pv_e;
1714 pt_entry_t template;
1715 spl_t spl;
1716 vm_offset_t old_pa;
1717 vm_offset_t pa = (vm_offset_t)i386_ptob(pn);
1718
1719 XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n",
1720 current_thread()->top_act,
1721 current_thread(),
1722 pmap, v, pn);
1723
1724 assert(pn != vm_page_fictitious_addr);
1725 if (pmap_debug)
1726 printf("pmap(%x, %x)\n", v, pn);
1727 if (pmap == PMAP_NULL)
1728 return;
1729
1730 if (cpuid_family() == CPUID_FAMILY_386)
1731 if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
1732 && !wired /* hack for io_wire */ ) {
1733 /*
1734 * Because the 386 ignores write protection in kernel mode,
1735 * we cannot enter a read-only kernel mapping, and must
1736 * remove an existing mapping if changing it.
1737 *
1738 * XXX should be #if'd for i386
1739 */
1740 PMAP_READ_LOCK(pmap, spl);
1741
1742 pte = pmap_pte(pmap, v);
1743 if (pte != PT_ENTRY_NULL && pte_to_pa(*pte) != 0) {
1744 /*
1745 * Invalidate the translation buffer,
1746 * then remove the mapping.
1747 */
1748 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
1749 pmap_remove_range(pmap, v, pte,
1750 pte + ptes_per_vm_page);
1751 }
1752 PMAP_READ_UNLOCK(pmap, spl);
1753 return;
1754 }
1755
1756 /*
1757 * Must allocate a new pvlist entry while we're unlocked;
1758 * zalloc may cause pageout (which will lock the pmap system).
1759 * If we determine we need a pvlist entry, we will unlock
1760 * and allocate one. Then we will retry, throughing away
1761 * the allocated entry later (if we no longer need it).
1762 */
1763 pv_e = PV_ENTRY_NULL;
1764 Retry:
1765 PMAP_READ_LOCK(pmap, spl);
1766
1767 /*
1768 * Expand pmap to include this pte. Assume that
1769 * pmap is always expanded to include enough hardware
1770 * pages to map one VM page.
1771 */
1772
1773 while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
1774 /*
1775 * Must unlock to expand the pmap.
1776 */
1777 PMAP_READ_UNLOCK(pmap, spl);
1778
1779 pmap_expand(pmap, v);
1780
1781 PMAP_READ_LOCK(pmap, spl);
1782 }
1783 /*
1784 * Special case if the physical page is already mapped
1785 * at this address.
1786 */
1787 old_pa = pte_to_pa(*pte);
1788 if (old_pa == pa) {
1789 /*
1790 * May be changing its wired attribute or protection
1791 */
1792
1793 template = pa_to_pte(pa) | INTEL_PTE_VALID;
1794
1795 if(flags & VM_MEM_NOT_CACHEABLE) {
1796 if(!(flags & VM_MEM_GUARDED))
1797 template |= INTEL_PTE_PTA;
1798 template |= INTEL_PTE_NCACHE;
1799 }
1800
1801 if (pmap != kernel_pmap)
1802 template |= INTEL_PTE_USER;
1803 if (prot & VM_PROT_WRITE)
1804 template |= INTEL_PTE_WRITE;
1805 if (wired) {
1806 template |= INTEL_PTE_WIRED;
1807 if (!iswired(*pte))
1808 pmap->stats.wired_count++;
1809 }
1810 else {
1811 if (iswired(*pte)) {
1812 assert(pmap->stats.wired_count >= 1);
1813 pmap->stats.wired_count--;
1814 }
1815 }
1816
1817 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
1818 i = ptes_per_vm_page;
1819 do {
1820 if (*pte & INTEL_PTE_MOD)
1821 template |= INTEL_PTE_MOD;
1822 WRITE_PTE(pte, template)
1823 pte++;
1824 pte_increment_pa(template);
1825 } while (--i > 0);
1826
1827 goto Done;
1828 }
1829
1830 /*
1831 * Outline of code from here:
1832 * 1) If va was mapped, update TLBs, remove the mapping
1833 * and remove old pvlist entry.
1834 * 2) Add pvlist entry for new mapping
1835 * 3) Enter new mapping.
1836 *
1837 * SHARING_FAULTS complicates this slightly in that it cannot
1838 * replace the mapping, but must remove it (because adding the
1839 * pvlist entry for the new mapping may remove others), and
1840 * hence always enters the new mapping at step 3)
1841 *
1842 * If the old physical page is not managed step 1) is skipped
1843 * (except for updating the TLBs), and the mapping is
1844 * overwritten at step 3). If the new physical page is not
1845 * managed, step 2) is skipped.
1846 */
1847
1848 if (old_pa != (vm_offset_t) 0) {
1849
1850 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
1851
1852 #if DEBUG_PTE_PAGE
1853 if (pmap != kernel_pmap)
1854 ptep_check(get_pte_page(pte));
1855 #endif /* DEBUG_PTE_PAGE */
1856
1857 /*
1858 * Don't do anything to pages outside valid memory here.
1859 * Instead convince the code that enters a new mapping
1860 * to overwrite the old one.
1861 */
1862
1863 if (valid_page(old_pa)) {
1864
1865 pai = pa_index(old_pa);
1866 LOCK_PVH(pai);
1867
1868 assert(pmap->stats.resident_count >= 1);
1869 pmap->stats.resident_count--;
1870 if (iswired(*pte)) {
1871 assert(pmap->stats.wired_count >= 1);
1872 pmap->stats.wired_count--;
1873 }
1874 i = ptes_per_vm_page;
1875 do {
1876 pmap_phys_attributes[pai] |=
1877 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1878 WRITE_PTE(pte, 0)
1879 pte++;
1880 pte_increment_pa(template);
1881 } while (--i > 0);
1882
1883 /*
1884 * Put pte back to beginning of page since it'll be
1885 * used later to enter the new page.
1886 */
1887 pte -= ptes_per_vm_page;
1888
1889 /*
1890 * Remove the mapping from the pvlist for
1891 * this physical page.
1892 */
1893 {
1894 register pv_entry_t prev, cur;
1895
1896 pv_h = pai_to_pvh(pai);
1897 if (pv_h->pmap == PMAP_NULL) {
1898 panic("pmap_enter: null pv_list!");
1899 }
1900 if (pv_h->va == v && pv_h->pmap == pmap) {
1901 /*
1902 * Header is the pv_entry. Copy the next one
1903 * to header and free the next one (we cannot
1904 * free the header)
1905 */
1906 cur = pv_h->next;
1907 if (cur != PV_ENTRY_NULL) {
1908 *pv_h = *cur;
1909 pv_e = cur;
1910 }
1911 else {
1912 pv_h->pmap = PMAP_NULL;
1913 }
1914 }
1915 else {
1916 cur = pv_h;
1917 do {
1918 prev = cur;
1919 if ((cur = prev->next) == PV_ENTRY_NULL) {
1920 panic("pmap_enter: mapping not in pv_list!");
1921 }
1922 } while (cur->va != v || cur->pmap != pmap);
1923 prev->next = cur->next;
1924 pv_e = cur;
1925 }
1926 }
1927 UNLOCK_PVH(pai);
1928 }
1929 else {
1930
1931 /*
1932 * old_pa is not managed. Pretend it's zero so code
1933 * at Step 3) will enter new mapping (overwriting old
1934 * one). Do removal part of accounting.
1935 */
1936 old_pa = (vm_offset_t) 0;
1937 assert(pmap->stats.resident_count >= 1);
1938 pmap->stats.resident_count--;
1939 if (iswired(*pte)) {
1940 assert(pmap->stats.wired_count >= 1);
1941 pmap->stats.wired_count--;
1942 }
1943 }
1944 }
1945
1946 if (valid_page(pa)) {
1947
1948 /*
1949 * Step 2) Enter the mapping in the PV list for this
1950 * physical page.
1951 */
1952
1953 pai = pa_index(pa);
1954
1955
1956 #if SHARING_FAULTS
1957 RetryPvList:
1958 /*
1959 * We can return here from the sharing fault code below
1960 * in case we removed the only entry on the pv list and thus
1961 * must enter the new one in the list header.
1962 */
1963 #endif /* SHARING_FAULTS */
1964 LOCK_PVH(pai);
1965 pv_h = pai_to_pvh(pai);
1966
1967 if (pv_h->pmap == PMAP_NULL) {
1968 /*
1969 * No mappings yet
1970 */
1971 pv_h->va = v;
1972 pv_h->pmap = pmap;
1973 pv_h->next = PV_ENTRY_NULL;
1974 }
1975 else {
1976 #if DEBUG
1977 {
1978 /*
1979 * check that this mapping is not already there
1980 * or there is no alias for this mapping in the same map
1981 */
1982 pv_entry_t e = pv_h;
1983 while (e != PV_ENTRY_NULL) {
1984 if (e->pmap == pmap && e->va == v)
1985 panic("pmap_enter: already in pv_list");
1986 e = e->next;
1987 }
1988 }
1989 #endif /* DEBUG */
1990 #if SHARING_FAULTS
1991 {
1992 /*
1993 * do sharing faults.
1994 * if we find an entry on this pv list in the same address
1995 * space, remove it. we know there will not be more
1996 * than one.
1997 */
1998 pv_entry_t e = pv_h;
1999 pt_entry_t *opte;
2000
2001 while (e != PV_ENTRY_NULL) {
2002 if (e->pmap == pmap) {
2003 /*
2004 * Remove it, drop pv list lock first.
2005 */
2006 UNLOCK_PVH(pai);
2007
2008 opte = pmap_pte(pmap, e->va);
2009 assert(opte != PT_ENTRY_NULL);
2010 /*
2011 * Invalidate the translation buffer,
2012 * then remove the mapping.
2013 */
2014 PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE);
2015 pmap_remove_range(pmap, e->va, opte,
2016 opte + ptes_per_vm_page);
2017 /*
2018 * We could have remove the head entry,
2019 * so there could be no more entries
2020 * and so we have to use the pv head entry.
2021 * so, go back to the top and try the entry
2022 * again.
2023 */
2024 goto RetryPvList;
2025 }
2026 e = e->next;
2027 }
2028
2029 /*
2030 * check that this mapping is not already there
2031 */
2032 e = pv_h;
2033 while (e != PV_ENTRY_NULL) {
2034 if (e->pmap == pmap)
2035 panic("pmap_enter: alias in pv_list");
2036 e = e->next;
2037 }
2038 }
2039 #endif /* SHARING_FAULTS */
2040 #if DEBUG_ALIAS
2041 {
2042 /*
2043 * check for aliases within the same address space.
2044 */
2045 pv_entry_t e = pv_h;
2046 vm_offset_t rpc = get_rpc();
2047
2048 while (e != PV_ENTRY_NULL) {
2049 if (e->pmap == pmap) {
2050 /*
2051 * log this entry in the alias ring buffer
2052 * if it's not there already.
2053 */
2054 struct pmap_alias *pma;
2055 int ii, logit;
2056
2057 logit = TRUE;
2058 for (ii = 0; ii < pmap_alias_index; ii++) {
2059 if (pmap_aliasbuf[ii].rpc == rpc) {
2060 /* found it in the log already */
2061 logit = FALSE;
2062 break;
2063 }
2064 }
2065 if (logit) {
2066 pma = &pmap_aliasbuf[pmap_alias_index];
2067 pma->pmap = pmap;
2068 pma->va = v;
2069 pma->rpc = rpc;
2070 pma->cookie = PMAP_ALIAS_COOKIE;
2071 if (++pmap_alias_index >= PMAP_ALIAS_MAX)
2072 panic("pmap_enter: exhausted alias log");
2073 }
2074 }
2075 e = e->next;
2076 }
2077 }
2078 #endif /* DEBUG_ALIAS */
2079 /*
2080 * Add new pv_entry after header.
2081 */
2082 if (pv_e == PV_ENTRY_NULL) {
2083 PV_ALLOC(pv_e);
2084 if (pv_e == PV_ENTRY_NULL) {
2085 UNLOCK_PVH(pai);
2086 PMAP_READ_UNLOCK(pmap, spl);
2087
2088 /*
2089 * Refill from zone.
2090 */
2091 pv_e = (pv_entry_t) zalloc(pv_list_zone);
2092 goto Retry;
2093 }
2094 }
2095 pv_e->va = v;
2096 pv_e->pmap = pmap;
2097 pv_e->next = pv_h->next;
2098 pv_h->next = pv_e;
2099 /*
2100 * Remember that we used the pvlist entry.
2101 */
2102 pv_e = PV_ENTRY_NULL;
2103 }
2104 UNLOCK_PVH(pai);
2105 }
2106
2107 /*
2108 * Step 3) Enter and count the mapping.
2109 */
2110
2111 pmap->stats.resident_count++;
2112
2113 /*
2114 * Build a template to speed up entering -
2115 * only the pfn changes.
2116 */
2117 template = pa_to_pte(pa) | INTEL_PTE_VALID;
2118
2119 if(flags & VM_MEM_NOT_CACHEABLE) {
2120 if(!(flags & VM_MEM_GUARDED))
2121 template |= INTEL_PTE_PTA;
2122 template |= INTEL_PTE_NCACHE;
2123 }
2124
2125 if (pmap != kernel_pmap)
2126 template |= INTEL_PTE_USER;
2127 if (prot & VM_PROT_WRITE)
2128 template |= INTEL_PTE_WRITE;
2129 if (wired) {
2130 template |= INTEL_PTE_WIRED;
2131 pmap->stats.wired_count++;
2132 }
2133 i = ptes_per_vm_page;
2134 do {
2135 WRITE_PTE(pte, template)
2136 pte++;
2137 pte_increment_pa(template);
2138 } while (--i > 0);
2139 Done:
2140 if (pv_e != PV_ENTRY_NULL) {
2141 PV_FREE(pv_e);
2142 }
2143
2144 PMAP_READ_UNLOCK(pmap, spl);
2145 }
2146
2147 /*
2148 * Routine: pmap_change_wiring
2149 * Function: Change the wiring attribute for a map/virtual-address
2150 * pair.
2151 * In/out conditions:
2152 * The mapping must already exist in the pmap.
2153 */
2154 void
2155 pmap_change_wiring(
2156 register pmap_t map,
2157 vm_offset_t v,
2158 boolean_t wired)
2159 {
2160 register pt_entry_t *pte;
2161 register int i;
2162 spl_t spl;
2163
2164 #if 1
2165 /*
2166 * We must grab the pmap system lock because we may
2167 * change a pte_page queue.
2168 */
2169 PMAP_READ_LOCK(map, spl);
2170
2171 if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
2172 panic("pmap_change_wiring: pte missing");
2173
2174 if (wired && !iswired(*pte)) {
2175 /*
2176 * wiring down mapping
2177 */
2178 map->stats.wired_count++;
2179 i = ptes_per_vm_page;
2180 do {
2181 *pte++ |= INTEL_PTE_WIRED;
2182 } while (--i > 0);
2183 }
2184 else if (!wired && iswired(*pte)) {
2185 /*
2186 * unwiring mapping
2187 */
2188 assert(map->stats.wired_count >= 1);
2189 map->stats.wired_count--;
2190 i = ptes_per_vm_page;
2191 do {
2192 *pte++ &= ~INTEL_PTE_WIRED;
2193 } while (--i > 0);
2194 }
2195
2196 PMAP_READ_UNLOCK(map, spl);
2197
2198 #else
2199 return;
2200 #endif
2201
2202 }
2203
2204 ppnum_t
2205 pmap_find_phys(pmap_t pmap, addr64_t va)
2206 {
2207 pt_entry_t *ptp;
2208 vm_offset_t a32;
2209 ppnum_t ppn;
2210
2211 if (value_64bit(va)) panic("pmap_find_phys 64 bit value");
2212 a32 = (vm_offset_t)low32(va);
2213 ptp = pmap_pte(pmap, a32);
2214 if (PT_ENTRY_NULL == ptp)
2215 return 0;
2216 ppn = (ppnum_t)i386_btop(pte_to_pa(*ptp));
2217 return ppn;
2218 }
2219
2220 /*
2221 * Routine: pmap_extract
2222 * Function:
2223 * Extract the physical page address associated
2224 * with the given map/virtual_address pair.
2225 */
2226
2227 vm_offset_t
2228 pmap_extract(
2229 register pmap_t pmap,
2230 vm_offset_t va)
2231 {
2232 register pt_entry_t *pte;
2233 register vm_offset_t pa;
2234 spl_t spl;
2235
2236 SPLVM(spl);
2237 simple_lock(&pmap->lock);
2238 if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
2239 pa = (vm_offset_t) 0;
2240 else if (!(*pte & INTEL_PTE_VALID))
2241 pa = (vm_offset_t) 0;
2242 else
2243 pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
2244 simple_unlock(&pmap->lock);
2245 SPLX(spl);
2246 return(pa);
2247 }
2248
2249 /*
2250 * Routine: pmap_expand
2251 *
2252 * Expands a pmap to be able to map the specified virtual address.
2253 *
2254 * Allocates new virtual memory for the P0 or P1 portion of the
2255 * pmap, then re-maps the physical pages that were in the old
2256 * pmap to be in the new pmap.
2257 *
2258 * Must be called with the pmap system and the pmap unlocked,
2259 * since these must be unlocked to use vm_allocate or vm_deallocate.
2260 * Thus it must be called in a loop that checks whether the map
2261 * has been expanded enough.
2262 * (We won't loop forever, since page tables aren't shrunk.)
2263 */
2264 void
2265 pmap_expand(
2266 register pmap_t map,
2267 register vm_offset_t v)
2268 {
2269 pt_entry_t *pdp;
2270 register vm_page_t m;
2271 register vm_offset_t pa;
2272 register int i;
2273 spl_t spl;
2274 ppnum_t pn;
2275
2276 if (map == kernel_pmap)
2277 panic("pmap_expand");
2278
2279 /*
2280 * We cannot allocate the pmap_object in pmap_init,
2281 * because it is called before the zone package is up.
2282 * Allocate it now if it is missing.
2283 */
2284 if (pmap_object == VM_OBJECT_NULL)
2285 pmap_object = vm_object_allocate(avail_end);
2286
2287 /*
2288 * Allocate a VM page for the level 2 page table entries.
2289 */
2290 while ((m = vm_page_grab()) == VM_PAGE_NULL)
2291 VM_PAGE_WAIT();
2292
2293 /*
2294 * Map the page to its physical address so that it
2295 * can be found later.
2296 */
2297 pn = m->phys_page;
2298 pa = i386_ptob(pn);
2299 vm_object_lock(pmap_object);
2300 vm_page_insert(m, pmap_object, (vm_object_offset_t)pa);
2301 vm_page_lock_queues();
2302 vm_page_wire(m);
2303 inuse_ptepages_count++;
2304 vm_object_unlock(pmap_object);
2305 vm_page_unlock_queues();
2306
2307 /*
2308 * Zero the page.
2309 */
2310 memset((void *)phystokv(pa), 0, PAGE_SIZE);
2311
2312 PMAP_READ_LOCK(map, spl);
2313 /*
2314 * See if someone else expanded us first
2315 */
2316 if (pmap_pte(map, v) != PT_ENTRY_NULL) {
2317 PMAP_READ_UNLOCK(map, spl);
2318 vm_object_lock(pmap_object);
2319 vm_page_lock_queues();
2320 vm_page_free(m);
2321 inuse_ptepages_count--;
2322 vm_page_unlock_queues();
2323 vm_object_unlock(pmap_object);
2324 return;
2325 }
2326
2327 /*
2328 * Set the page directory entry for this page table.
2329 * If we have allocated more than one hardware page,
2330 * set several page directory entries.
2331 */
2332
2333 i = ptes_per_vm_page;
2334 pdp = &map->dirbase[pdenum(map, v) & ~(i-1)];
2335 do {
2336 *pdp = pa_to_pte(pa)
2337 | INTEL_PTE_VALID
2338 | INTEL_PTE_USER
2339 | INTEL_PTE_WRITE;
2340 pdp++;
2341 pa += INTEL_PGBYTES;
2342 } while (--i > 0);
2343
2344 PMAP_READ_UNLOCK(map, spl);
2345 return;
2346 }
2347
2348 /*
2349 * Copy the range specified by src_addr/len
2350 * from the source map to the range dst_addr/len
2351 * in the destination map.
2352 *
2353 * This routine is only advisory and need not do anything.
2354 */
2355 #if 0
2356 void
2357 pmap_copy(
2358 pmap_t dst_pmap,
2359 pmap_t src_pmap,
2360 vm_offset_t dst_addr,
2361 vm_size_t len,
2362 vm_offset_t src_addr)
2363 {
2364 #ifdef lint
2365 dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
2366 #endif /* lint */
2367 }
2368 #endif/* 0 */
2369
2370 /*
2371 * pmap_sync_caches_phys(ppnum_t pa)
2372 *
2373 * Invalidates all of the instruction cache on a physical page and
2374 * pushes any dirty data from the data cache for the same physical page
2375 */
2376
2377 void pmap_sync_caches_phys(ppnum_t pa)
2378 {
2379 // if (!(cpuid_features() & CPUID_FEATURE_SS))
2380 {
2381 __asm__ volatile("wbinvd");
2382 }
2383 return;
2384 }
2385
2386 int collect_ref;
2387 int collect_unref;
2388
2389 /*
2390 * Routine: pmap_collect
2391 * Function:
2392 * Garbage collects the physical map system for
2393 * pages which are no longer used.
2394 * Success need not be guaranteed -- that is, there
2395 * may well be pages which are not referenced, but
2396 * others may be collected.
2397 * Usage:
2398 * Called by the pageout daemon when pages are scarce.
2399 */
2400 void
2401 pmap_collect(
2402 pmap_t p)
2403 {
2404 register pt_entry_t *pdp, *ptp;
2405 pt_entry_t *eptp;
2406 vm_offset_t pa;
2407 int wired;
2408 spl_t spl;
2409
2410 if (p == PMAP_NULL)
2411 return;
2412
2413 if (p == kernel_pmap)
2414 return;
2415
2416 /*
2417 * Garbage collect map.
2418 */
2419 PMAP_READ_LOCK(p, spl);
2420 PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
2421
2422 for (pdp = p->dirbase;
2423 pdp < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)];
2424 pdp += ptes_per_vm_page)
2425 {
2426 if (*pdp & INTEL_PTE_VALID)
2427 if(*pdp & INTEL_PTE_REF) {
2428 *pdp &= ~INTEL_PTE_REF;
2429 collect_ref++;
2430 } else {
2431 collect_unref++;
2432 pa = pte_to_pa(*pdp);
2433 ptp = (pt_entry_t *)phystokv(pa);
2434 eptp = ptp + NPTES*ptes_per_vm_page;
2435
2436 /*
2437 * If the pte page has any wired mappings, we cannot
2438 * free it.
2439 */
2440 wired = 0;
2441 {
2442 register pt_entry_t *ptep;
2443 for (ptep = ptp; ptep < eptp; ptep++) {
2444 if (iswired(*ptep)) {
2445 wired = 1;
2446 break;
2447 }
2448 }
2449 }
2450 if (!wired) {
2451 /*
2452 * Remove the virtual addresses mapped by this pte page.
2453 */
2454 pmap_remove_range(p,
2455 pdetova(pdp - p->dirbase),
2456 ptp,
2457 eptp);
2458
2459 /*
2460 * Invalidate the page directory pointer.
2461 */
2462 {
2463 register int i = ptes_per_vm_page;
2464 register pt_entry_t *pdep = pdp;
2465 do {
2466 *pdep++ = 0;
2467 } while (--i > 0);
2468 }
2469
2470 PMAP_READ_UNLOCK(p, spl);
2471
2472 /*
2473 * And free the pte page itself.
2474 */
2475 {
2476 register vm_page_t m;
2477
2478 vm_object_lock(pmap_object);
2479 m = vm_page_lookup(pmap_object, pa);
2480 if (m == VM_PAGE_NULL)
2481 panic("pmap_collect: pte page not in object");
2482 vm_page_lock_queues();
2483 vm_page_free(m);
2484 inuse_ptepages_count--;
2485 vm_page_unlock_queues();
2486 vm_object_unlock(pmap_object);
2487 }
2488
2489 PMAP_READ_LOCK(p, spl);
2490 }
2491 }
2492 }
2493 PMAP_READ_UNLOCK(p, spl);
2494 return;
2495
2496 }
2497
2498 /*
2499 * Routine: pmap_kernel
2500 * Function:
2501 * Returns the physical map handle for the kernel.
2502 */
2503 #if 0
2504 pmap_t
2505 pmap_kernel(void)
2506 {
2507 return (kernel_pmap);
2508 }
2509 #endif/* 0 */
2510
2511 /*
2512 * pmap_zero_page zeros the specified (machine independent) page.
2513 * See machine/phys.c or machine/phys.s for implementation.
2514 */
2515 #if 0
2516 void
2517 pmap_zero_page(
2518 register vm_offset_t phys)
2519 {
2520 register int i;
2521
2522 assert(phys != vm_page_fictitious_addr);
2523 i = PAGE_SIZE / INTEL_PGBYTES;
2524 phys = intel_pfn(phys);
2525
2526 while (i--)
2527 zero_phys(phys++);
2528 }
2529 #endif/* 0 */
2530
2531 /*
2532 * pmap_copy_page copies the specified (machine independent) page.
2533 * See machine/phys.c or machine/phys.s for implementation.
2534 */
2535 #if 0
2536 void
2537 pmap_copy_page(
2538 vm_offset_t src,
2539 vm_offset_t dst)
2540 {
2541 int i;
2542
2543 assert(src != vm_page_fictitious_addr);
2544 assert(dst != vm_page_fictitious_addr);
2545 i = PAGE_SIZE / INTEL_PGBYTES;
2546
2547 while (i--) {
2548 copy_phys(intel_pfn(src), intel_pfn(dst));
2549 src += INTEL_PGBYTES;
2550 dst += INTEL_PGBYTES;
2551 }
2552 }
2553 #endif/* 0 */
2554
2555 /*
2556 * Routine: pmap_pageable
2557 * Function:
2558 * Make the specified pages (by pmap, offset)
2559 * pageable (or not) as requested.
2560 *
2561 * A page which is not pageable may not take
2562 * a fault; therefore, its page table entry
2563 * must remain valid for the duration.
2564 *
2565 * This routine is merely advisory; pmap_enter
2566 * will specify that these pages are to be wired
2567 * down (or not) as appropriate.
2568 */
2569 void
2570 pmap_pageable(
2571 pmap_t pmap,
2572 vm_offset_t start,
2573 vm_offset_t end,
2574 boolean_t pageable)
2575 {
2576 #ifdef lint
2577 pmap++; start++; end++; pageable++;
2578 #endif /* lint */
2579 }
2580
2581 /*
2582 * Clear specified attribute bits.
2583 */
2584 void
2585 phys_attribute_clear(
2586 vm_offset_t phys,
2587 int bits)
2588 {
2589 pv_entry_t pv_h;
2590 register pv_entry_t pv_e;
2591 register pt_entry_t *pte;
2592 int pai;
2593 register pmap_t pmap;
2594 spl_t spl;
2595
2596 assert(phys != vm_page_fictitious_addr);
2597 if (!valid_page(phys)) {
2598 /*
2599 * Not a managed page.
2600 */
2601 return;
2602 }
2603
2604 /*
2605 * Lock the pmap system first, since we will be changing
2606 * several pmaps.
2607 */
2608
2609 PMAP_WRITE_LOCK(spl);
2610
2611 pai = pa_index(phys);
2612 pv_h = pai_to_pvh(pai);
2613
2614 /*
2615 * Walk down PV list, clearing all modify or reference bits.
2616 * We do not have to lock the pv_list because we have
2617 * the entire pmap system locked.
2618 */
2619 if (pv_h->pmap != PMAP_NULL) {
2620 /*
2621 * There are some mappings.
2622 */
2623 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
2624
2625 pmap = pv_e->pmap;
2626 /*
2627 * Lock the pmap to block pmap_extract and similar routines.
2628 */
2629 simple_lock(&pmap->lock);
2630
2631 {
2632 register vm_offset_t va;
2633
2634 va = pv_e->va;
2635 pte = pmap_pte(pmap, va);
2636
2637 #if 0
2638 /*
2639 * Consistency checks.
2640 */
2641 assert(*pte & INTEL_PTE_VALID);
2642 /* assert(pte_to_phys(*pte) == phys); */
2643 #endif
2644
2645 /*
2646 * Invalidate TLBs for all CPUs using this mapping.
2647 */
2648 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
2649 }
2650
2651 /*
2652 * Clear modify or reference bits.
2653 */
2654 {
2655 register int i = ptes_per_vm_page;
2656 do {
2657 *pte++ &= ~bits;
2658 } while (--i > 0);
2659 }
2660 simple_unlock(&pmap->lock);
2661 }
2662 }
2663
2664 pmap_phys_attributes[pai] &= ~bits;
2665
2666 PMAP_WRITE_UNLOCK(spl);
2667 }
2668
2669 /*
2670 * Check specified attribute bits.
2671 */
2672 boolean_t
2673 phys_attribute_test(
2674 vm_offset_t phys,
2675 int bits)
2676 {
2677 pv_entry_t pv_h;
2678 register pv_entry_t pv_e;
2679 register pt_entry_t *pte;
2680 int pai;
2681 register pmap_t pmap;
2682 spl_t spl;
2683
2684 assert(phys != vm_page_fictitious_addr);
2685 if (!valid_page(phys)) {
2686 /*
2687 * Not a managed page.
2688 */
2689 return (FALSE);
2690 }
2691
2692 /*
2693 * Lock the pmap system first, since we will be checking
2694 * several pmaps.
2695 */
2696
2697 PMAP_WRITE_LOCK(spl);
2698
2699 pai = pa_index(phys);
2700 pv_h = pai_to_pvh(pai);
2701
2702 if (pmap_phys_attributes[pai] & bits) {
2703 PMAP_WRITE_UNLOCK(spl);
2704 return (TRUE);
2705 }
2706
2707 /*
2708 * Walk down PV list, checking all mappings.
2709 * We do not have to lock the pv_list because we have
2710 * the entire pmap system locked.
2711 */
2712 if (pv_h->pmap != PMAP_NULL) {
2713 /*
2714 * There are some mappings.
2715 */
2716 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
2717
2718 pmap = pv_e->pmap;
2719 /*
2720 * Lock the pmap to block pmap_extract and similar routines.
2721 */
2722 simple_lock(&pmap->lock);
2723
2724 {
2725 register vm_offset_t va;
2726
2727 va = pv_e->va;
2728 pte = pmap_pte(pmap, va);
2729
2730 #if 0
2731 /*
2732 * Consistency checks.
2733 */
2734 assert(*pte & INTEL_PTE_VALID);
2735 /* assert(pte_to_phys(*pte) == phys); */
2736 #endif
2737 }
2738
2739 /*
2740 * Check modify or reference bits.
2741 */
2742 {
2743 register int i = ptes_per_vm_page;
2744
2745 do {
2746 if (*pte++ & bits) {
2747 simple_unlock(&pmap->lock);
2748 PMAP_WRITE_UNLOCK(spl);
2749 return (TRUE);
2750 }
2751 } while (--i > 0);
2752 }
2753 simple_unlock(&pmap->lock);
2754 }
2755 }
2756 PMAP_WRITE_UNLOCK(spl);
2757 return (FALSE);
2758 }
2759
2760 /*
2761 * Set specified attribute bits.
2762 */
2763 void
2764 phys_attribute_set(
2765 vm_offset_t phys,
2766 int bits)
2767 {
2768 int spl;
2769
2770 assert(phys != vm_page_fictitious_addr);
2771 if (!valid_page(phys)) {
2772 /*
2773 * Not a managed page.
2774 */
2775 return;
2776 }
2777
2778 /*
2779 * Lock the pmap system and set the requested bits in
2780 * the phys attributes array. Don't need to bother with
2781 * ptes because the test routine looks here first.
2782 */
2783
2784 PMAP_WRITE_LOCK(spl);
2785 pmap_phys_attributes[pa_index(phys)] |= bits;
2786 PMAP_WRITE_UNLOCK(spl);
2787 }
2788
2789 /*
2790 * Set the modify bit on the specified physical page.
2791 */
2792
2793 void pmap_set_modify(
2794 ppnum_t pn)
2795 {
2796 vm_offset_t phys = (vm_offset_t)i386_ptob(pn);
2797 phys_attribute_set(phys, PHYS_MODIFIED);
2798 }
2799
2800 /*
2801 * Clear the modify bits on the specified physical page.
2802 */
2803
2804 void
2805 pmap_clear_modify(
2806 ppnum_t pn)
2807 {
2808 vm_offset_t phys = (vm_offset_t)i386_ptob(pn);
2809 phys_attribute_clear(phys, PHYS_MODIFIED);
2810 }
2811
2812 /*
2813 * pmap_is_modified:
2814 *
2815 * Return whether or not the specified physical page is modified
2816 * by any physical maps.
2817 */
2818
2819 boolean_t
2820 pmap_is_modified(
2821 ppnum_t pn)
2822 {
2823 vm_offset_t phys = (vm_offset_t)i386_ptob(pn);
2824 return (phys_attribute_test(phys, PHYS_MODIFIED));
2825 }
2826
2827 /*
2828 * pmap_clear_reference:
2829 *
2830 * Clear the reference bit on the specified physical page.
2831 */
2832
2833 void
2834 pmap_clear_reference(
2835 ppnum_t pn)
2836 {
2837 vm_offset_t phys = (vm_offset_t)i386_ptob(pn);
2838 phys_attribute_clear(phys, PHYS_REFERENCED);
2839 }
2840
2841 /*
2842 * pmap_is_referenced:
2843 *
2844 * Return whether or not the specified physical page is referenced
2845 * by any physical maps.
2846 */
2847
2848 boolean_t
2849 pmap_is_referenced(
2850 ppnum_t pn)
2851 {
2852 vm_offset_t phys = (vm_offset_t)i386_ptob(pn);
2853 return (phys_attribute_test(phys, PHYS_REFERENCED));
2854 }
2855
2856 /*
2857 * Set the modify bit on the specified range
2858 * of this map as requested.
2859 *
2860 * This optimization stands only if each time the dirty bit
2861 * in vm_page_t is tested, it is also tested in the pmap.
2862 */
2863 void
2864 pmap_modify_pages(
2865 pmap_t map,
2866 vm_offset_t s,
2867 vm_offset_t e)
2868 {
2869 spl_t spl;
2870 register pt_entry_t *pde;
2871 register pt_entry_t *spte, *epte;
2872 vm_offset_t l;
2873
2874 if (map == PMAP_NULL)
2875 return;
2876
2877 PMAP_READ_LOCK(map, spl);
2878
2879 /*
2880 * Invalidate the translation buffer first
2881 */
2882 PMAP_UPDATE_TLBS(map, s, e);
2883
2884 pde = pmap_pde(map, s);
2885 while (s && s < e) {
2886 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
2887 if (l > e)
2888 l = e;
2889 if (*pde & INTEL_PTE_VALID) {
2890 spte = (pt_entry_t *)ptetokv(*pde);
2891 if (l) {
2892 spte = &spte[ptenum(s)];
2893 epte = &spte[intel_btop(l-s)];
2894 } else {
2895 epte = &spte[intel_btop(PDE_MAPPED_SIZE)];
2896 spte = &spte[ptenum(s)];
2897 }
2898 while (spte < epte) {
2899 if (*spte & INTEL_PTE_VALID) {
2900 *spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE);
2901 }
2902 spte++;
2903 }
2904 }
2905 s = l;
2906 pde++;
2907 }
2908 PMAP_READ_UNLOCK(map, spl);
2909 }
2910
2911
2912 void
2913 invalidate_icache(vm_offset_t addr, unsigned cnt, int phys)
2914 {
2915 return;
2916 }
2917 void
2918 flush_dcache(vm_offset_t addr, unsigned count, int phys)
2919 {
2920 return;
2921 }
2922
2923 #if NCPUS > 1
2924 /*
2925 * TLB Coherence Code (TLB "shootdown" code)
2926 *
2927 * Threads that belong to the same task share the same address space and
2928 * hence share a pmap. However, they may run on distinct cpus and thus
2929 * have distinct TLBs that cache page table entries. In order to guarantee
2930 * the TLBs are consistent, whenever a pmap is changed, all threads that
2931 * are active in that pmap must have their TLB updated. To keep track of
2932 * this information, the set of cpus that are currently using a pmap is
2933 * maintained within each pmap structure (cpus_using). Pmap_activate() and
2934 * pmap_deactivate add and remove, respectively, a cpu from this set.
2935 * Since the TLBs are not addressable over the bus, each processor must
2936 * flush its own TLB; a processor that needs to invalidate another TLB
2937 * needs to interrupt the processor that owns that TLB to signal the
2938 * update.
2939 *
2940 * Whenever a pmap is updated, the lock on that pmap is locked, and all
2941 * cpus using the pmap are signaled to invalidate. All threads that need
2942 * to activate a pmap must wait for the lock to clear to await any updates
2943 * in progress before using the pmap. They must ACQUIRE the lock to add
2944 * their cpu to the cpus_using set. An implicit assumption made
2945 * throughout the TLB code is that all kernel code that runs at or higher
2946 * than splvm blocks out update interrupts, and that such code does not
2947 * touch pageable pages.
2948 *
2949 * A shootdown interrupt serves another function besides signaling a
2950 * processor to invalidate. The interrupt routine (pmap_update_interrupt)
2951 * waits for the both the pmap lock (and the kernel pmap lock) to clear,
2952 * preventing user code from making implicit pmap updates while the
2953 * sending processor is performing its update. (This could happen via a
2954 * user data write reference that turns on the modify bit in the page
2955 * table). It must wait for any kernel updates that may have started
2956 * concurrently with a user pmap update because the IPC code
2957 * changes mappings.
2958 * Spinning on the VALUES of the locks is sufficient (rather than
2959 * having to acquire the locks) because any updates that occur subsequent
2960 * to finding the lock unlocked will be signaled via another interrupt.
2961 * (This assumes the interrupt is cleared before the low level interrupt code
2962 * calls pmap_update_interrupt()).
2963 *
2964 * The signaling processor must wait for any implicit updates in progress
2965 * to terminate before continuing with its update. Thus it must wait for an
2966 * acknowledgement of the interrupt from each processor for which such
2967 * references could be made. For maintaining this information, a set
2968 * cpus_active is used. A cpu is in this set if and only if it can
2969 * use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
2970 * this set; when all such cpus are removed, it is safe to update.
2971 *
2972 * Before attempting to acquire the update lock on a pmap, a cpu (A) must
2973 * be at least at the priority of the interprocessor interrupt
2974 * (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
2975 * kernel update; it would spin forever in pmap_update_interrupt() trying
2976 * to acquire the user pmap lock it had already acquired. Furthermore A
2977 * must remove itself from cpus_active. Otherwise, another cpu holding
2978 * the lock (B) could be in the process of sending an update signal to A,
2979 * and thus be waiting for A to remove itself from cpus_active. If A is
2980 * spinning on the lock at priority this will never happen and a deadlock
2981 * will result.
2982 */
2983
2984 /*
2985 * Signal another CPU that it must flush its TLB
2986 */
2987 void
2988 signal_cpus(
2989 cpu_set use_list,
2990 pmap_t pmap,
2991 vm_offset_t start,
2992 vm_offset_t end)
2993 {
2994 register int which_cpu, j;
2995 register pmap_update_list_t update_list_p;
2996
2997 while ((which_cpu = ffs((unsigned long)use_list)) != 0) {
2998 which_cpu -= 1; /* convert to 0 origin */
2999
3000 update_list_p = &cpu_update_list[which_cpu];
3001 simple_lock(&update_list_p->lock);
3002
3003 j = update_list_p->count;
3004 if (j >= UPDATE_LIST_SIZE) {
3005 /*
3006 * list overflowed. Change last item to
3007 * indicate overflow.
3008 */
3009 update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
3010 update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS;
3011 update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS;
3012 }
3013 else {
3014 update_list_p->item[j].pmap = pmap;
3015 update_list_p->item[j].start = start;
3016 update_list_p->item[j].end = end;
3017 update_list_p->count = j+1;
3018 }
3019 cpu_update_needed[which_cpu] = TRUE;
3020 simple_unlock(&update_list_p->lock);
3021
3022 /* if its the kernel pmap, ignore cpus_idle */
3023 if (((cpus_idle & (1 << which_cpu)) == 0) ||
3024 (pmap == kernel_pmap) || real_pmap[which_cpu] == pmap)
3025 {
3026 i386_signal_cpu(which_cpu, MP_TLB_FLUSH, ASYNC);
3027 }
3028 use_list &= ~(1 << which_cpu);
3029 }
3030 }
3031
3032 void
3033 process_pmap_updates(
3034 register pmap_t my_pmap)
3035 {
3036 register int my_cpu;
3037 register pmap_update_list_t update_list_p;
3038 register int j;
3039 register pmap_t pmap;
3040
3041 mp_disable_preemption();
3042 my_cpu = cpu_number();
3043 update_list_p = &cpu_update_list[my_cpu];
3044 simple_lock(&update_list_p->lock);
3045
3046 for (j = 0; j < update_list_p->count; j++) {
3047 pmap = update_list_p->item[j].pmap;
3048 if (pmap == my_pmap ||
3049 pmap == kernel_pmap) {
3050
3051 if (pmap->ref_count <= 0) {
3052 PMAP_CPU_CLR(pmap, my_cpu);
3053 real_pmap[my_cpu] = kernel_pmap;
3054 set_cr3(kernel_pmap->pdirbase);
3055 } else
3056 INVALIDATE_TLB(pmap,
3057 update_list_p->item[j].start,
3058 update_list_p->item[j].end);
3059 }
3060 }
3061 update_list_p->count = 0;
3062 cpu_update_needed[my_cpu] = FALSE;
3063 simple_unlock(&update_list_p->lock);
3064 mp_enable_preemption();
3065 }
3066
3067 /*
3068 * Interrupt routine for TBIA requested from other processor.
3069 * This routine can also be called at all interrupts time if
3070 * the cpu was idle. Some driver interrupt routines might access
3071 * newly allocated vm. (This is the case for hd)
3072 */
3073 void
3074 pmap_update_interrupt(void)
3075 {
3076 register int my_cpu;
3077 spl_t s;
3078 register pmap_t my_pmap;
3079
3080 mp_disable_preemption();
3081 my_cpu = cpu_number();
3082
3083 /*
3084 * Raise spl to splvm (above splip) to block out pmap_extract
3085 * from IO code (which would put this cpu back in the active
3086 * set).
3087 */
3088 s = splhigh();
3089
3090 my_pmap = real_pmap[my_cpu];
3091
3092 if (!(my_pmap && pmap_in_use(my_pmap, my_cpu)))
3093 my_pmap = kernel_pmap;
3094
3095 do {
3096 LOOP_VAR;
3097
3098 /*
3099 * Indicate that we're not using either user or kernel
3100 * pmap.
3101 */
3102 i_bit_clear(my_cpu, &cpus_active);
3103
3104 /*
3105 * Wait for any pmap updates in progress, on either user
3106 * or kernel pmap.
3107 */
3108 while (*(volatile hw_lock_t)&my_pmap->lock.interlock ||
3109 *(volatile hw_lock_t)&kernel_pmap->lock.interlock) {
3110 LOOP_CHECK("pmap_update_interrupt", my_pmap);
3111 cpu_pause();
3112 }
3113
3114 process_pmap_updates(my_pmap);
3115
3116 i_bit_set(my_cpu, &cpus_active);
3117
3118 } while (cpu_update_needed[my_cpu]);
3119
3120 splx(s);
3121 mp_enable_preemption();
3122 }
3123 #endif /* NCPUS > 1 */
3124
3125 #if MACH_KDB
3126
3127 /* show phys page mappings and attributes */
3128
3129 extern void db_show_page(vm_offset_t pa);
3130
3131 void
3132 db_show_page(vm_offset_t pa)
3133 {
3134 pv_entry_t pv_h;
3135 int pai;
3136 char attr;
3137
3138 pai = pa_index(pa);
3139 pv_h = pai_to_pvh(pai);
3140
3141 attr = pmap_phys_attributes[pai];
3142 printf("phys page %x ", pa);
3143 if (attr & PHYS_MODIFIED)
3144 printf("modified, ");
3145 if (attr & PHYS_REFERENCED)
3146 printf("referenced, ");
3147 if (pv_h->pmap || pv_h->next)
3148 printf(" mapped at\n");
3149 else
3150 printf(" not mapped\n");
3151 for (; pv_h; pv_h = pv_h->next)
3152 if (pv_h->pmap)
3153 printf("%x in pmap %x\n", pv_h->va, pv_h->pmap);
3154 }
3155
3156 #endif /* MACH_KDB */
3157
3158 #if MACH_KDB
3159 void db_kvtophys(vm_offset_t);
3160 void db_show_vaddrs(pt_entry_t *);
3161
3162 /*
3163 * print out the results of kvtophys(arg)
3164 */
3165 void
3166 db_kvtophys(
3167 vm_offset_t vaddr)
3168 {
3169 db_printf("0x%x", kvtophys(vaddr));
3170 }
3171
3172 /*
3173 * Walk the pages tables.
3174 */
3175 void
3176 db_show_vaddrs(
3177 pt_entry_t *dirbase)
3178 {
3179 pt_entry_t *ptep, *pdep, tmp;
3180 int x, y, pdecnt, ptecnt;
3181
3182 if (dirbase == 0) {
3183 dirbase = kernel_pmap->dirbase;
3184 }
3185 if (dirbase == 0) {
3186 db_printf("need a dirbase...\n");
3187 return;
3188 }
3189 dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK);
3190
3191 db_printf("dirbase: 0x%x\n", dirbase);
3192
3193 pdecnt = ptecnt = 0;
3194 pdep = &dirbase[0];
3195 for (y = 0; y < NPDES; y++, pdep++) {
3196 if (((tmp = *pdep) & INTEL_PTE_VALID) == 0) {
3197 continue;
3198 }
3199 pdecnt++;
3200 ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
3201 db_printf("dir[%4d]: 0x%x\n", y, *pdep);
3202 for (x = 0; x < NPTES; x++, ptep++) {
3203 if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
3204 continue;
3205 }
3206 ptecnt++;
3207 db_printf(" tab[%4d]: 0x%x, va=0x%x, pa=0x%x\n",
3208 x,
3209 *ptep,
3210 (y << 22) | (x << 12),
3211 *ptep & ~INTEL_OFFMASK);
3212 }
3213 }
3214
3215 db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt);
3216
3217 }
3218 #endif /* MACH_KDB */
3219
3220 #include <mach_vm_debug.h>
3221 #if MACH_VM_DEBUG
3222 #include <vm/vm_debug.h>
3223
3224 int
3225 pmap_list_resident_pages(
3226 register pmap_t pmap,
3227 register vm_offset_t *listp,
3228 register int space)
3229 {
3230 return 0;
3231 }
3232 #endif /* MACH_VM_DEBUG */
3233
3234 #ifdef MACH_BSD
3235 /*
3236 * pmap_pagemove
3237 *
3238 * BSD support routine to reassign virtual addresses.
3239 */
3240
3241 void
3242 pmap_movepage(unsigned long from, unsigned long to, vm_size_t size)
3243 {
3244 spl_t spl;
3245 pt_entry_t *pte, saved_pte;
3246
3247 /* Lock the kernel map */
3248 PMAP_READ_LOCK(kernel_pmap, spl);
3249
3250
3251 while (size > 0) {
3252 pte = pmap_pte(kernel_pmap, from);
3253 if (pte == NULL)
3254 panic("pmap_pagemove from pte NULL");
3255 saved_pte = *pte;
3256 PMAP_READ_UNLOCK(kernel_pmap, spl);
3257
3258 pmap_enter(kernel_pmap, to, (ppnum_t)i386_btop(i386_trunc_page(*pte)),
3259 VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED);
3260
3261 pmap_remove(kernel_pmap, (addr64_t)from, (addr64_t)(from+PAGE_SIZE));
3262
3263 PMAP_READ_LOCK(kernel_pmap, spl);
3264 pte = pmap_pte(kernel_pmap, to);
3265 if (pte == NULL)
3266 panic("pmap_pagemove 'to' pte NULL");
3267
3268 *pte = saved_pte;
3269
3270 from += PAGE_SIZE;
3271 to += PAGE_SIZE;
3272 size -= PAGE_SIZE;
3273 }
3274
3275 /* Get the processors to update the TLBs */
3276 PMAP_UPDATE_TLBS(kernel_pmap, from, from+size);
3277 PMAP_UPDATE_TLBS(kernel_pmap, to, to+size);
3278
3279 PMAP_READ_UNLOCK(kernel_pmap, spl);
3280
3281 }
3282
3283 kern_return_t bmapvideo(vm_offset_t *info);
3284 kern_return_t bmapvideo(vm_offset_t *info) {
3285
3286 extern struct vc_info vinfo;
3287 #ifdef NOTIMPLEMENTED
3288 (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */
3289 #endif
3290 return KERN_SUCCESS;
3291 }
3292
3293 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
3294 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
3295
3296 #ifdef NOTIMPLEMENTED
3297 pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr); /* Map it in */
3298 #endif
3299 return KERN_SUCCESS;
3300 }
3301
3302 kern_return_t bmapmapr(vm_offset_t va);
3303 kern_return_t bmapmapr(vm_offset_t va) {
3304
3305 #ifdef NOTIMPLEMENTED
3306 mapping_remove(current_act()->task->map->pmap, va); /* Remove map */
3307 #endif
3308 return KERN_SUCCESS;
3309 }
3310 #endif
3311
3312 /* temporary workaround */
3313 boolean_t
3314 coredumpok(vm_map_t map, vm_offset_t va)
3315 {
3316 pt_entry_t *ptep;
3317 ptep = pmap_pte(map->pmap, va);
3318 if (0 == ptep) return FALSE;
3319 return ((*ptep & (INTEL_PTE_NCACHE|INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE|INTEL_PTE_WIRED));
3320 }