]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmap.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55
56 /*
57 * File: pmap.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young
59 * (These guys wrote the Vax version)
60 *
61 * Physical Map management code for Intel i386, i486, and i860.
62 *
63 * Manages physical address maps.
64 *
65 * In addition to hardware address maps, this
66 * module is called upon to provide software-use-only
67 * maps which may or may not be stored in the same
68 * form as hardware maps. These pseudo-maps are
69 * used to store intermediate results from copy
70 * operations to and from address spaces.
71 *
72 * Since the information managed by this module is
73 * also stored by the logical address mapping module,
74 * this module may throw away valid virtual-to-physical
75 * mappings at almost any time. However, invalidations
76 * of virtual-to-physical mappings must be done as
77 * requested.
78 *
79 * In order to cope with hardware architectures which
80 * make virtual-to-physical map invalidates expensive,
81 * this module may delay invalidate or reduced protection
82 * operations until such time as they are actually
83 * necessary. This module is given full information as
84 * to which processors are currently using which maps,
85 * and to when physical maps must be made correct.
86 */
87
88 #include <cpus.h>
89
90 #include <string.h>
91 #include <norma_vm.h>
92 #include <mach_kdb.h>
93 #include <mach_ldebug.h>
94
95 #include <mach/machine/vm_types.h>
96
97 #include <mach/boolean.h>
98 #include <kern/thread.h>
99 #include <kern/zalloc.h>
100
101 #include <kern/lock.h>
102 #include <kern/spl.h>
103
104 #include <vm/pmap.h>
105 #include <vm/vm_map.h>
106 #include <vm/vm_kern.h>
107 #include <mach/vm_param.h>
108 #include <mach/vm_prot.h>
109 #include <vm/vm_object.h>
110 #include <vm/vm_page.h>
111
112 #include <mach/machine/vm_param.h>
113 #include <machine/thread.h>
114
115 #include <kern/misc_protos.h> /* prototyping */
116 #include <i386/misc_protos.h>
117
118 #include <i386/cpuid.h>
119
120 #if MACH_KDB
121 #include <ddb/db_command.h>
122 #include <ddb/db_output.h>
123 #include <ddb/db_sym.h>
124 #include <ddb/db_print.h>
125 #endif /* MACH_KDB */
126
127 #include <kern/xpr.h>
128
129 #if NCPUS > 1
130 #include <i386/AT386/mp/mp_events.h>
131 #endif
132
133 /*
134 * Forward declarations for internal functions.
135 */
136 void pmap_expand(
137 pmap_t map,
138 vm_offset_t v);
139
140 extern void pmap_remove_range(
141 pmap_t pmap,
142 vm_offset_t va,
143 pt_entry_t *spte,
144 pt_entry_t *epte);
145
146 void phys_attribute_clear(
147 vm_offset_t phys,
148 int bits);
149
150 boolean_t phys_attribute_test(
151 vm_offset_t phys,
152 int bits);
153
154 void pmap_set_modify(vm_offset_t phys);
155
156 void phys_attribute_set(
157 vm_offset_t phys,
158 int bits);
159
160
161 #ifndef set_dirbase
162 void set_dirbase(vm_offset_t dirbase);
163 #endif /* set_dirbase */
164
165 #define PA_TO_PTE(pa) (pa_to_pte((pa) - VM_MIN_KERNEL_ADDRESS))
166 #define iswired(pte) ((pte) & INTEL_PTE_WIRED)
167
168 pmap_t real_pmap[NCPUS];
169
170 #define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
171 #define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry);
172
173 /*
174 * Private data structures.
175 */
176
177 /*
178 * For each vm_page_t, there is a list of all currently
179 * valid virtual mappings of that page. An entry is
180 * a pv_entry_t; the list is the pv_table.
181 */
182
183 typedef struct pv_entry {
184 struct pv_entry *next; /* next pv_entry */
185 pmap_t pmap; /* pmap where mapping lies */
186 vm_offset_t va; /* virtual address for mapping */
187 } *pv_entry_t;
188
189 #define PV_ENTRY_NULL ((pv_entry_t) 0)
190
191 pv_entry_t pv_head_table; /* array of entries, one per page */
192
193 /*
194 * pv_list entries are kept on a list that can only be accessed
195 * with the pmap system locked (at SPLVM, not in the cpus_active set).
196 * The list is refilled from the pv_list_zone if it becomes empty.
197 */
198 pv_entry_t pv_free_list; /* free list at SPLVM */
199 decl_simple_lock_data(,pv_free_list_lock)
200
201 #define PV_ALLOC(pv_e) { \
202 simple_lock(&pv_free_list_lock); \
203 if ((pv_e = pv_free_list) != 0) { \
204 pv_free_list = pv_e->next; \
205 } \
206 simple_unlock(&pv_free_list_lock); \
207 }
208
209 #define PV_FREE(pv_e) { \
210 simple_lock(&pv_free_list_lock); \
211 pv_e->next = pv_free_list; \
212 pv_free_list = pv_e; \
213 simple_unlock(&pv_free_list_lock); \
214 }
215
216 zone_t pv_list_zone; /* zone of pv_entry structures */
217
218 /*
219 * Each entry in the pv_head_table is locked by a bit in the
220 * pv_lock_table. The lock bits are accessed by the physical
221 * address of the page they lock.
222 */
223
224 char *pv_lock_table; /* pointer to array of bits */
225 #define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
226
227 /*
228 * First and last physical addresses that we maintain any information
229 * for. Initialized to zero so that pmap operations done before
230 * pmap_init won't touch any non-existent structures.
231 */
232 vm_offset_t vm_first_phys = (vm_offset_t) 0;
233 vm_offset_t vm_last_phys = (vm_offset_t) 0;
234 boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
235
236 /*
237 * Index into pv_head table, its lock bits, and the modify/reference
238 * bits starting at vm_first_phys.
239 */
240
241 #define pa_index(pa) (atop(pa - vm_first_phys))
242
243 #define pai_to_pvh(pai) (&pv_head_table[pai])
244 #define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table)
245 #define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table)
246
247 /*
248 * Array of physical page attribites for managed pages.
249 * One byte per physical page.
250 */
251 char *pmap_phys_attributes;
252
253 /*
254 * Physical page attributes. Copy bits from PTE definition.
255 */
256 #define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
257 #define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
258 #define PHYS_NCACHE INTEL_PTE_NCACHE
259
260 /*
261 * Amount of virtual memory mapped by one
262 * page-directory entry.
263 */
264 #define PDE_MAPPED_SIZE (pdetova(1))
265
266 /*
267 * We allocate page table pages directly from the VM system
268 * through this object. It maps physical memory.
269 */
270 vm_object_t pmap_object = VM_OBJECT_NULL;
271
272 /*
273 * Locking and TLB invalidation
274 */
275
276 /*
277 * Locking Protocols:
278 *
279 * There are two structures in the pmap module that need locking:
280 * the pmaps themselves, and the per-page pv_lists (which are locked
281 * by locking the pv_lock_table entry that corresponds to the pv_head
282 * for the list in question.) Most routines want to lock a pmap and
283 * then do operations in it that require pv_list locking -- however
284 * pmap_remove_all and pmap_copy_on_write operate on a physical page
285 * basis and want to do the locking in the reverse order, i.e. lock
286 * a pv_list and then go through all the pmaps referenced by that list.
287 * To protect against deadlock between these two cases, the pmap_lock
288 * is used. There are three different locking protocols as a result:
289 *
290 * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
291 * the pmap.
292 *
293 * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
294 * lock on the pmap_lock (shared read), then lock the pmap
295 * and finally the pv_lists as needed [i.e. pmap lock before
296 * pv_list lock.]
297 *
298 * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
299 * Get a write lock on the pmap_lock (exclusive write); this
300 * also guaranteees exclusive access to the pv_lists. Lock the
301 * pmaps as needed.
302 *
303 * At no time may any routine hold more than one pmap lock or more than
304 * one pv_list lock. Because interrupt level routines can allocate
305 * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
306 * kernel_pmap can only be held at splhigh.
307 */
308
309 #if NCPUS > 1
310 /*
311 * We raise the interrupt level to splhigh, to block interprocessor
312 * interrupts during pmap operations. We must take the CPU out of
313 * the cpus_active set while interrupts are blocked.
314 */
315 #define SPLVM(spl) { \
316 spl = splhigh(); \
317 mp_disable_preemption(); \
318 i_bit_clear(cpu_number(), &cpus_active); \
319 mp_enable_preemption(); \
320 }
321
322 #define SPLX(spl) { \
323 mp_disable_preemption(); \
324 i_bit_set(cpu_number(), &cpus_active); \
325 mp_enable_preemption(); \
326 splx(spl); \
327 }
328
329 /*
330 * Lock on pmap system
331 */
332 lock_t pmap_system_lock;
333
334 #define PMAP_READ_LOCK(pmap, spl) { \
335 SPLVM(spl); \
336 lock_read(&pmap_system_lock); \
337 simple_lock(&(pmap)->lock); \
338 }
339
340 #define PMAP_WRITE_LOCK(spl) { \
341 SPLVM(spl); \
342 lock_write(&pmap_system_lock); \
343 }
344
345 #define PMAP_READ_UNLOCK(pmap, spl) { \
346 simple_unlock(&(pmap)->lock); \
347 lock_read_done(&pmap_system_lock); \
348 SPLX(spl); \
349 }
350
351 #define PMAP_WRITE_UNLOCK(spl) { \
352 lock_write_done(&pmap_system_lock); \
353 SPLX(spl); \
354 }
355
356 #define PMAP_WRITE_TO_READ_LOCK(pmap) { \
357 simple_lock(&(pmap)->lock); \
358 lock_write_to_read(&pmap_system_lock); \
359 }
360
361 #define LOCK_PVH(index) lock_pvh_pai(index)
362
363 #define UNLOCK_PVH(index) unlock_pvh_pai(index)
364
365 #define PMAP_FLUSH_TLBS() \
366 { \
367 flush_tlb(); \
368 i386_signal_cpus(MP_TLB_FLUSH); \
369 }
370
371 #define PMAP_RELOAD_TLBS() { \
372 i386_signal_cpus(MP_TLB_RELOAD); \
373 set_cr3(kernel_pmap->pdirbase); \
374 }
375
376 #define PMAP_INVALIDATE_PAGE(map, addr) { \
377 if (map == kernel_pmap) \
378 invlpg((vm_offset_t) addr); \
379 else \
380 flush_tlb(); \
381 i386_signal_cpus(MP_TLB_FLUSH); \
382 }
383
384 #else /* NCPUS > 1 */
385
386 #if MACH_RT
387 #define SPLVM(spl) { (spl) = splhigh(); }
388 #define SPLX(spl) splx (spl)
389 #else /* MACH_RT */
390 #define SPLVM(spl)
391 #define SPLX(spl)
392 #endif /* MACH_RT */
393
394 #define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
395 #define PMAP_WRITE_LOCK(spl) SPLVM(spl)
396 #define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
397 #define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
398 #define PMAP_WRITE_TO_READ_LOCK(pmap)
399
400 #if MACH_RT
401 #define LOCK_PVH(index) disable_preemption()
402 #define UNLOCK_PVH(index) enable_preemption()
403 #else /* MACH_RT */
404 #define LOCK_PVH(index)
405 #define UNLOCK_PVH(index)
406 #endif /* MACH_RT */
407
408 #define PMAP_FLUSH_TLBS() flush_tlb()
409 #define PMAP_RELOAD_TLBS() set_cr3(kernel_pmap->pdirbase)
410 #define PMAP_INVALIDATE_PAGE(map, addr) { \
411 if (map == kernel_pmap) \
412 invlpg((vm_offset_t) addr); \
413 else \
414 flush_tlb(); \
415 }
416
417 #endif /* NCPUS > 1 */
418
419 #define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
420
421 #if NCPUS > 1
422 /*
423 * Structures to keep track of pending TLB invalidations
424 */
425 cpu_set cpus_active;
426 cpu_set cpus_idle;
427 volatile boolean_t cpu_update_needed[NCPUS];
428
429
430 #endif /* NCPUS > 1 */
431
432 /*
433 * Other useful macros.
434 */
435 #define current_pmap() (vm_map_pmap(current_act()->map))
436 #define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
437
438 struct pmap kernel_pmap_store;
439 pmap_t kernel_pmap;
440
441 struct zone *pmap_zone; /* zone of pmap structures */
442
443 int pmap_debug = 0; /* flag for debugging prints */
444 int ptes_per_vm_page; /* number of hardware ptes needed
445 to map one VM page. */
446 unsigned int inuse_ptepages_count = 0; /* debugging */
447
448 /*
449 * Pmap cache. Cache is threaded through ref_count field of pmap.
450 * Max will eventually be constant -- variable for experimentation.
451 */
452 int pmap_cache_max = 32;
453 int pmap_alloc_chunk = 8;
454 pmap_t pmap_cache_list;
455 int pmap_cache_count;
456 decl_simple_lock_data(,pmap_cache_lock)
457
458 extern vm_offset_t hole_start, hole_end;
459
460 extern char end;
461
462 /*
463 * Page directory for kernel.
464 */
465 pt_entry_t *kpde = 0; /* set by start.s - keep out of bss */
466
467 #if DEBUG_ALIAS
468 #define PMAP_ALIAS_MAX 32
469 struct pmap_alias {
470 vm_offset_t rpc;
471 pmap_t pmap;
472 vm_offset_t va;
473 int cookie;
474 #define PMAP_ALIAS_COOKIE 0xdeadbeef
475 } pmap_aliasbuf[PMAP_ALIAS_MAX];
476 int pmap_alias_index = 0;
477 extern vm_offset_t get_rpc();
478
479 #endif /* DEBUG_ALIAS */
480
481 /*
482 * Given an offset and a map, compute the address of the
483 * pte. If the address is invalid with respect to the map
484 * then PT_ENTRY_NULL is returned (and the map may need to grow).
485 *
486 * This is only used in machine-dependent code.
487 */
488
489 pt_entry_t *
490 pmap_pte(
491 register pmap_t pmap,
492 register vm_offset_t addr)
493 {
494 register pt_entry_t *ptp;
495 register pt_entry_t pte;
496
497 pte = pmap->dirbase[pdenum(pmap, addr)];
498 if ((pte & INTEL_PTE_VALID) == 0)
499 return(PT_ENTRY_NULL);
500 ptp = (pt_entry_t *)ptetokv(pte);
501 return(&ptp[ptenum(addr)]);
502
503 }
504
505 #define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(pmap, addr)])
506
507 #define DEBUG_PTE_PAGE 0
508
509 #if DEBUG_PTE_PAGE
510 void
511 ptep_check(
512 ptep_t ptep)
513 {
514 register pt_entry_t *pte, *epte;
515 int ctu, ctw;
516
517 /* check the use and wired counts */
518 if (ptep == PTE_PAGE_NULL)
519 return;
520 pte = pmap_pte(ptep->pmap, ptep->va);
521 epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
522 ctu = 0;
523 ctw = 0;
524 while (pte < epte) {
525 if (pte->pfn != 0) {
526 ctu++;
527 if (pte->wired)
528 ctw++;
529 }
530 pte += ptes_per_vm_page;
531 }
532
533 if (ctu != ptep->use_count || ctw != ptep->wired_count) {
534 printf("use %d wired %d - actual use %d wired %d\n",
535 ptep->use_count, ptep->wired_count, ctu, ctw);
536 panic("pte count");
537 }
538 }
539 #endif /* DEBUG_PTE_PAGE */
540
541 /*
542 * Map memory at initialization. The physical addresses being
543 * mapped are not managed and are never unmapped.
544 *
545 * For now, VM is already on, we only need to map the
546 * specified memory.
547 */
548 vm_offset_t
549 pmap_map(
550 register vm_offset_t virt,
551 register vm_offset_t start,
552 register vm_offset_t end,
553 register vm_prot_t prot)
554 {
555 register int ps;
556
557 ps = PAGE_SIZE;
558 while (start < end) {
559 pmap_enter(kernel_pmap, virt, start, prot, 0, FALSE);
560 virt += ps;
561 start += ps;
562 }
563 return(virt);
564 }
565
566 /*
567 * Back-door routine for mapping kernel VM at initialization.
568 * Useful for mapping memory outside the range
569 * Sets no-cache, A, D.
570 * [vm_first_phys, vm_last_phys) (i.e., devices).
571 * Otherwise like pmap_map.
572 */
573 vm_offset_t
574 pmap_map_bd(
575 register vm_offset_t virt,
576 register vm_offset_t start,
577 register vm_offset_t end,
578 vm_prot_t prot)
579 {
580 register pt_entry_t template;
581 register pt_entry_t *pte;
582
583 template = pa_to_pte(start)
584 | INTEL_PTE_NCACHE
585 | INTEL_PTE_REF
586 | INTEL_PTE_MOD
587 | INTEL_PTE_WIRED
588 | INTEL_PTE_VALID;
589 if (prot & VM_PROT_WRITE)
590 template |= INTEL_PTE_WRITE;
591
592 while (start < end) {
593 pte = pmap_pte(kernel_pmap, virt);
594 if (pte == PT_ENTRY_NULL)
595 panic("pmap_map_bd: Invalid kernel address\n");
596 WRITE_PTE_FAST(pte, template)
597 pte_increment_pa(template);
598 virt += PAGE_SIZE;
599 start += PAGE_SIZE;
600 }
601
602 PMAP_FLUSH_TLBS();
603
604 return(virt);
605 }
606
607 extern int cnvmem;
608 extern char *first_avail;
609 extern vm_offset_t virtual_avail, virtual_end;
610 extern vm_offset_t avail_start, avail_end, avail_next;
611
612 /*
613 * Bootstrap the system enough to run with virtual memory.
614 * Map the kernel's code and data, and allocate the system page table.
615 * Called with mapping OFF. Page_size must already be set.
616 *
617 * Parameters:
618 * load_start: PA where kernel was loaded
619 * avail_start PA of first available physical page -
620 * after kernel page tables
621 * avail_end PA of last available physical page
622 * virtual_avail VA of first available page -
623 * after kernel page tables
624 * virtual_end VA of last available page -
625 * end of kernel address space
626 *
627 * &start_text start of kernel text
628 * &etext end of kernel text
629 */
630
631 void
632 pmap_bootstrap(
633 vm_offset_t load_start)
634 {
635 vm_offset_t va, tva, paddr;
636 pt_entry_t template;
637 pt_entry_t *pde, *pte, *ptend;
638 vm_size_t morevm; /* VM space for kernel map */
639
640 /*
641 * Set ptes_per_vm_page for general use.
642 */
643 ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
644
645 /*
646 * The kernel's pmap is statically allocated so we don't
647 * have to use pmap_create, which is unlikely to work
648 * correctly at this part of the boot sequence.
649 */
650
651 kernel_pmap = &kernel_pmap_store;
652
653 #if NCPUS > 1
654 lock_init(&pmap_system_lock,
655 FALSE, /* NOT a sleep lock */
656 ETAP_VM_PMAP_SYS,
657 ETAP_VM_PMAP_SYS_I);
658 #endif /* NCPUS > 1 */
659
660 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
661 simple_lock_init(&pv_free_list_lock, ETAP_VM_PMAP_FREE);
662
663 kernel_pmap->ref_count = 1;
664
665 /*
666 * The kernel page directory has been allocated;
667 * its virtual address is in kpde.
668 *
669 * Enough kernel page table pages have been allocated
670 * to map low system memory, kernel text, kernel data/bss,
671 * kdb's symbols, and the page directory and page tables.
672 *
673 * No other physical memory has been allocated.
674 */
675
676 /*
677 * Start mapping virtual memory to physical memory, 1-1,
678 * at end of mapped memory.
679 */
680
681 virtual_avail = phystokv(avail_start);
682 virtual_end = phystokv(avail_end);
683
684 pde = kpde;
685 pde += pdenum(kernel_pmap, virtual_avail);
686
687 if (pte_to_pa(*pde) == 0) {
688 /* This pte has not been allocated */
689 pte = 0; ptend = 0;
690 }
691 else {
692 pte = (pt_entry_t *)ptetokv(*pde);
693 /* first pte of page */
694 ptend = pte+NPTES; /* last pte of page */
695 pte += ptenum(virtual_avail); /* point to pte that
696 maps first avail VA */
697 pde++; /* point pde to first empty slot */
698 }
699
700 template = pa_to_pte(avail_start)
701 | INTEL_PTE_VALID
702 | INTEL_PTE_WRITE;
703
704 for (va = virtual_avail; va < virtual_end; va += INTEL_PGBYTES) {
705 if (pte >= ptend) {
706 pte = (pt_entry_t *)phystokv(virtual_avail);
707 ptend = pte + NPTES;
708 virtual_avail = (vm_offset_t)ptend;
709 if (virtual_avail == hole_start)
710 virtual_avail = hole_end;
711 *pde = PA_TO_PTE((vm_offset_t) pte)
712 | INTEL_PTE_VALID
713 | INTEL_PTE_WRITE;
714 pde++;
715 }
716 WRITE_PTE_FAST(pte, template)
717 pte++;
718 pte_increment_pa(template);
719 }
720
721 avail_start = virtual_avail - VM_MIN_KERNEL_ADDRESS;
722 avail_next = avail_start;
723
724 /*
725 * Figure out maximum kernel address.
726 * Kernel virtual space is:
727 * - at least three times physical memory
728 * - at least VM_MIN_KERNEL_ADDRESS
729 * - limited by VM_MAX_KERNEL_ADDRESS
730 */
731
732 morevm = 3*avail_end;
733 if (virtual_end + morevm > VM_MAX_KERNEL_ADDRESS)
734 morevm = VM_MAX_KERNEL_ADDRESS - virtual_end + 1;
735
736 /*
737 * startup requires additional virtual memory (for tables, buffers,
738 * etc.). The kd driver may also require some of that memory to
739 * access the graphics board.
740 *
741 */
742 *(int *)&template = 0;
743
744 /*
745 * Leave room for kernel-loaded servers, which have been linked at
746 * addresses from VM_MIN_KERNEL_LOADED_ADDRESS to
747 * VM_MAX_KERNEL_LOADED_ADDRESS.
748 */
749 if (virtual_end + morevm < VM_MAX_KERNEL_LOADED_ADDRESS + 1)
750 morevm = VM_MAX_KERNEL_LOADED_ADDRESS + 1 - virtual_end;
751
752
753 virtual_end += morevm;
754 for (tva = va; tva < virtual_end; tva += INTEL_PGBYTES) {
755 if (pte >= ptend) {
756 pmap_next_page(&paddr);
757 pte = (pt_entry_t *)phystokv(paddr);
758 ptend = pte + NPTES;
759 *pde = PA_TO_PTE((vm_offset_t) pte)
760 | INTEL_PTE_VALID
761 | INTEL_PTE_WRITE;
762 pde++;
763 }
764 WRITE_PTE_FAST(pte, template)
765 pte++;
766 }
767
768 virtual_avail = va;
769
770 /* Push the virtual avail address above hole_end */
771 if (virtual_avail < hole_end)
772 virtual_avail = hole_end;
773
774 /*
775 * c.f. comment above
776 *
777 */
778 virtual_end = va + morevm;
779 while (pte < ptend)
780 *pte++ = 0;
781
782 /*
783 * invalidate user virtual addresses
784 */
785 memset((char *)kpde,
786 0,
787 pdenum(kernel_pmap,VM_MIN_KERNEL_ADDRESS)*sizeof(pt_entry_t));
788 kernel_pmap->dirbase = kpde;
789 printf("Kernel virtual space from 0x%x to 0x%x.\n",
790 VM_MIN_KERNEL_ADDRESS, virtual_end);
791
792 avail_start = avail_next;
793 printf("Available physical space from 0x%x to 0x%x\n",
794 avail_start, avail_end);
795
796 kernel_pmap->pdirbase = kvtophys((vm_offset_t)kernel_pmap->dirbase);
797
798 if (cpuid_features() & CPUID_FEATURE_PAT)
799 {
800 uint64_t pat;
801 uint32_t msr;
802
803 msr = 0x277;
804 asm volatile("rdmsr" : "=A" (pat) : "c" (msr));
805
806 pat &= ~(0xfULL << 48);
807 pat |= 0x01ULL << 48;
808
809 asm volatile("wrmsr" :: "A" (pat), "c" (msr));
810 }
811 }
812
813 void
814 pmap_virtual_space(
815 vm_offset_t *startp,
816 vm_offset_t *endp)
817 {
818 *startp = virtual_avail;
819 *endp = virtual_end;
820 }
821
822 /*
823 * Initialize the pmap module.
824 * Called by vm_init, to initialize any structures that the pmap
825 * system needs to map virtual memory.
826 */
827 void
828 pmap_init(void)
829 {
830 register long npages;
831 vm_offset_t addr;
832 register vm_size_t s;
833 int i;
834
835 /*
836 * Allocate memory for the pv_head_table and its lock bits,
837 * the modify bit array, and the pte_page table.
838 */
839
840 npages = atop(avail_end - avail_start);
841 s = (vm_size_t) (sizeof(struct pv_entry) * npages
842 + pv_lock_table_size(npages)
843 + npages);
844
845 s = round_page(s);
846 if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
847 panic("pmap_init");
848
849 memset((char *)addr, 0, s);
850
851 /*
852 * Allocate the structures first to preserve word-alignment.
853 */
854 pv_head_table = (pv_entry_t) addr;
855 addr = (vm_offset_t) (pv_head_table + npages);
856
857 pv_lock_table = (char *) addr;
858 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
859
860 pmap_phys_attributes = (char *) addr;
861
862 /*
863 * Create the zone of physical maps,
864 * and of the physical-to-virtual entries.
865 */
866 s = (vm_size_t) sizeof(struct pmap);
867 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
868 s = (vm_size_t) sizeof(struct pv_entry);
869 pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
870
871 /*
872 * Only now, when all of the data structures are allocated,
873 * can we set vm_first_phys and vm_last_phys. If we set them
874 * too soon, the kmem_alloc_wired above will try to use these
875 * data structures and blow up.
876 */
877
878 vm_first_phys = avail_start;
879 vm_last_phys = avail_end;
880 pmap_initialized = TRUE;
881
882 /*
883 * Initializie pmap cache.
884 */
885 pmap_cache_list = PMAP_NULL;
886 pmap_cache_count = 0;
887 simple_lock_init(&pmap_cache_lock, ETAP_VM_PMAP_CACHE);
888 }
889
890
891 #define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end))
892
893
894 #define valid_page(x) (pmap_initialized && pmap_valid_page(x))
895
896 boolean_t
897 pmap_verify_free(
898 vm_offset_t phys)
899 {
900 pv_entry_t pv_h;
901 int pai;
902 spl_t spl;
903 boolean_t result;
904
905 assert(phys != vm_page_fictitious_addr);
906 if (!pmap_initialized)
907 return(TRUE);
908
909 if (!pmap_valid_page(phys))
910 return(FALSE);
911
912 PMAP_WRITE_LOCK(spl);
913
914 pai = pa_index(phys);
915 pv_h = pai_to_pvh(pai);
916
917 result = (pv_h->pmap == PMAP_NULL);
918 PMAP_WRITE_UNLOCK(spl);
919
920 return(result);
921 }
922
923 /*
924 * Create and return a physical map.
925 *
926 * If the size specified for the map
927 * is zero, the map is an actual physical
928 * map, and may be referenced by the
929 * hardware.
930 *
931 * If the size specified is non-zero,
932 * the map will be used in software only, and
933 * is bounded by that size.
934 */
935 pmap_t
936 pmap_create(
937 vm_size_t size)
938 {
939 register pmap_t p;
940 register pmap_statistics_t stats;
941
942 /*
943 * A software use-only map doesn't even need a map.
944 */
945
946 if (size != 0) {
947 return(PMAP_NULL);
948 }
949
950 /*
951 * Try to get cached pmap, if this fails,
952 * allocate a pmap struct from the pmap_zone. Then allocate
953 * the page descriptor table from the pd_zone.
954 */
955
956 simple_lock(&pmap_cache_lock);
957 while ((p = pmap_cache_list) == PMAP_NULL) {
958
959 vm_offset_t dirbases;
960 register int i;
961
962 simple_unlock(&pmap_cache_lock);
963
964 #if NCPUS > 1
965 /*
966 * XXX NEEDS MP DOING ALLOC logic so that if multiple processors
967 * XXX get here, only one allocates a chunk of pmaps.
968 * (for now we'll just let it go - safe but wasteful)
969 */
970 #endif
971
972 /*
973 * Allocate a chunck of pmaps. Single kmem_alloc_wired
974 * operation reduces kernel map fragmentation.
975 */
976
977 if (kmem_alloc_wired(kernel_map, &dirbases,
978 pmap_alloc_chunk * INTEL_PGBYTES)
979 != KERN_SUCCESS)
980 panic("pmap_create.1");
981
982 for (i = pmap_alloc_chunk; i > 0 ; i--) {
983 p = (pmap_t) zalloc(pmap_zone);
984 if (p == PMAP_NULL)
985 panic("pmap_create.2");
986
987 /*
988 * Initialize pmap. Don't bother with
989 * ref count as cache list is threaded
990 * through it. It'll be set on cache removal.
991 */
992 p->dirbase = (pt_entry_t *) dirbases;
993 dirbases += INTEL_PGBYTES;
994 memcpy(p->dirbase, kpde, INTEL_PGBYTES);
995 p->pdirbase = kvtophys((vm_offset_t)p->dirbase);
996
997 simple_lock_init(&p->lock, ETAP_VM_PMAP);
998 p->cpus_using = 0;
999
1000 /*
1001 * Initialize statistics.
1002 */
1003 stats = &p->stats;
1004 stats->resident_count = 0;
1005 stats->wired_count = 0;
1006
1007 /*
1008 * Insert into cache
1009 */
1010 simple_lock(&pmap_cache_lock);
1011 p->ref_count = (int) pmap_cache_list;
1012 pmap_cache_list = p;
1013 pmap_cache_count++;
1014 simple_unlock(&pmap_cache_lock);
1015 }
1016 simple_lock(&pmap_cache_lock);
1017 }
1018
1019 assert(p->stats.resident_count == 0);
1020 assert(p->stats.wired_count == 0);
1021 p->stats.resident_count = 0;
1022 p->stats.wired_count = 0;
1023
1024 pmap_cache_list = (pmap_t) p->ref_count;
1025 p->ref_count = 1;
1026 pmap_cache_count--;
1027 simple_unlock(&pmap_cache_lock);
1028
1029 return(p);
1030 }
1031
1032 /*
1033 * Retire the given physical map from service.
1034 * Should only be called if the map contains
1035 * no valid mappings.
1036 */
1037
1038 void
1039 pmap_destroy(
1040 register pmap_t p)
1041 {
1042 register pt_entry_t *pdep;
1043 register vm_offset_t pa;
1044 register int c;
1045 spl_t s;
1046 register vm_page_t m;
1047
1048 if (p == PMAP_NULL)
1049 return;
1050
1051 SPLVM(s);
1052 simple_lock(&p->lock);
1053 c = --p->ref_count;
1054 if (c == 0) {
1055 register int my_cpu;
1056
1057 mp_disable_preemption();
1058 my_cpu = cpu_number();
1059
1060 /*
1061 * If some cpu is not using the physical pmap pointer that it
1062 * is supposed to be (see set_dirbase), we might be using the
1063 * pmap that is being destroyed! Make sure we are
1064 * physically on the right pmap:
1065 */
1066
1067
1068 if (real_pmap[my_cpu] == p) {
1069 PMAP_CPU_CLR(p, my_cpu);
1070 real_pmap[my_cpu] = kernel_pmap;
1071 PMAP_RELOAD_TLBS();
1072 }
1073 mp_enable_preemption();
1074 }
1075 simple_unlock(&p->lock);
1076 SPLX(s);
1077
1078 if (c != 0) {
1079 return; /* still in use */
1080 }
1081
1082 /*
1083 * Free the memory maps, then the
1084 * pmap structure.
1085 */
1086 pdep = p->dirbase;
1087 while (pdep < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]) {
1088 if (*pdep & INTEL_PTE_VALID) {
1089 pa = pte_to_pa(*pdep);
1090 vm_object_lock(pmap_object);
1091 m = vm_page_lookup(pmap_object, pa);
1092 if (m == VM_PAGE_NULL)
1093 panic("pmap_destroy: pte page not in object");
1094 vm_page_lock_queues();
1095 vm_page_free(m);
1096 inuse_ptepages_count--;
1097 vm_object_unlock(pmap_object);
1098 vm_page_unlock_queues();
1099
1100 /*
1101 * Clear pdes, this might be headed for the cache.
1102 */
1103 c = ptes_per_vm_page;
1104 do {
1105 *pdep = 0;
1106 pdep++;
1107 } while (--c > 0);
1108 }
1109 else {
1110 pdep += ptes_per_vm_page;
1111 }
1112
1113 }
1114 assert(p->stats.resident_count == 0);
1115 assert(p->stats.wired_count == 0);
1116
1117 /*
1118 * Add to cache if not already full
1119 */
1120 simple_lock(&pmap_cache_lock);
1121 if (pmap_cache_count <= pmap_cache_max) {
1122 p->ref_count = (int) pmap_cache_list;
1123 pmap_cache_list = p;
1124 pmap_cache_count++;
1125 simple_unlock(&pmap_cache_lock);
1126 }
1127 else {
1128 simple_unlock(&pmap_cache_lock);
1129 kmem_free(kernel_map, (vm_offset_t)p->dirbase, INTEL_PGBYTES);
1130 zfree(pmap_zone, (vm_offset_t) p);
1131 }
1132 }
1133
1134 /*
1135 * Add a reference to the specified pmap.
1136 */
1137
1138 void
1139 pmap_reference(
1140 register pmap_t p)
1141 {
1142 spl_t s;
1143
1144 if (p != PMAP_NULL) {
1145 SPLVM(s);
1146 simple_lock(&p->lock);
1147 p->ref_count++;
1148 simple_unlock(&p->lock);
1149 SPLX(s);
1150 }
1151 }
1152
1153 /*
1154 * Remove a range of hardware page-table entries.
1155 * The entries given are the first (inclusive)
1156 * and last (exclusive) entries for the VM pages.
1157 * The virtual address is the va for the first pte.
1158 *
1159 * The pmap must be locked.
1160 * If the pmap is not the kernel pmap, the range must lie
1161 * entirely within one pte-page. This is NOT checked.
1162 * Assumes that the pte-page exists.
1163 */
1164
1165 /* static */
1166 void
1167 pmap_remove_range(
1168 pmap_t pmap,
1169 vm_offset_t va,
1170 pt_entry_t *spte,
1171 pt_entry_t *epte)
1172 {
1173 register pt_entry_t *cpte;
1174 int num_removed, num_unwired;
1175 int pai;
1176 vm_offset_t pa;
1177
1178 #if DEBUG_PTE_PAGE
1179 if (pmap != kernel_pmap)
1180 ptep_check(get_pte_page(spte));
1181 #endif /* DEBUG_PTE_PAGE */
1182 num_removed = 0;
1183 num_unwired = 0;
1184
1185 for (cpte = spte; cpte < epte;
1186 cpte += ptes_per_vm_page, va += PAGE_SIZE) {
1187
1188 pa = pte_to_pa(*cpte);
1189 if (pa == 0)
1190 continue;
1191
1192 num_removed++;
1193 if (iswired(*cpte))
1194 num_unwired++;
1195
1196 if (!valid_page(pa)) {
1197
1198 /*
1199 * Outside range of managed physical memory.
1200 * Just remove the mappings.
1201 */
1202 register int i = ptes_per_vm_page;
1203 register pt_entry_t *lpte = cpte;
1204 do {
1205 *lpte = 0;
1206 lpte++;
1207 } while (--i > 0);
1208 continue;
1209 }
1210
1211 pai = pa_index(pa);
1212 LOCK_PVH(pai);
1213
1214 /*
1215 * Get the modify and reference bits.
1216 */
1217 {
1218 register int i;
1219 register pt_entry_t *lpte;
1220
1221 i = ptes_per_vm_page;
1222 lpte = cpte;
1223 do {
1224 pmap_phys_attributes[pai] |=
1225 *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
1226 *lpte = 0;
1227 lpte++;
1228 } while (--i > 0);
1229 }
1230
1231 /*
1232 * Remove the mapping from the pvlist for
1233 * this physical page.
1234 */
1235 {
1236 register pv_entry_t pv_h, prev, cur;
1237
1238 pv_h = pai_to_pvh(pai);
1239 if (pv_h->pmap == PMAP_NULL) {
1240 panic("pmap_remove: null pv_list!");
1241 }
1242 if (pv_h->va == va && pv_h->pmap == pmap) {
1243 /*
1244 * Header is the pv_entry. Copy the next one
1245 * to header and free the next one (we cannot
1246 * free the header)
1247 */
1248 cur = pv_h->next;
1249 if (cur != PV_ENTRY_NULL) {
1250 *pv_h = *cur;
1251 PV_FREE(cur);
1252 }
1253 else {
1254 pv_h->pmap = PMAP_NULL;
1255 }
1256 }
1257 else {
1258 cur = pv_h;
1259 do {
1260 prev = cur;
1261 if ((cur = prev->next) == PV_ENTRY_NULL) {
1262 panic("pmap-remove: mapping not in pv_list!");
1263 }
1264 } while (cur->va != va || cur->pmap != pmap);
1265 prev->next = cur->next;
1266 PV_FREE(cur);
1267 }
1268 UNLOCK_PVH(pai);
1269 }
1270 }
1271
1272 /*
1273 * Update the counts
1274 */
1275 assert(pmap->stats.resident_count >= num_removed);
1276 pmap->stats.resident_count -= num_removed;
1277 assert(pmap->stats.wired_count >= num_unwired);
1278 pmap->stats.wired_count -= num_unwired;
1279 }
1280
1281 /*
1282 * Remove phys addr if mapped in specified map
1283 *
1284 */
1285 void
1286 pmap_remove_some_phys(
1287 pmap_t map,
1288 vm_offset_t phys_addr)
1289 {
1290
1291 /* Implement to support working set code */
1292
1293 }
1294
1295
1296 /*
1297 * Remove the given range of addresses
1298 * from the specified map.
1299 *
1300 * It is assumed that the start and end are properly
1301 * rounded to the hardware page size.
1302 */
1303
1304
1305 /* FIXMEx86 */
1306 void
1307 pmap_remove(
1308 pmap_t map,
1309 addr64_t s,
1310 addr64_t e)
1311 {
1312 spl_t spl;
1313 register pt_entry_t *pde;
1314 register pt_entry_t *spte, *epte;
1315 vm_offset_t l;
1316
1317 if (map == PMAP_NULL)
1318 return;
1319
1320 PMAP_READ_LOCK(map, spl);
1321
1322 pde = pmap_pde(map, s);
1323
1324 while (s < e) {
1325 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
1326 if (l > e)
1327 l = e;
1328 if (*pde & INTEL_PTE_VALID) {
1329 spte = (pt_entry_t *)ptetokv(*pde);
1330 spte = &spte[ptenum(s)];
1331 epte = &spte[intel_btop(l-s)];
1332 pmap_remove_range(map, s, spte, epte);
1333 }
1334 s = l;
1335 pde++;
1336 }
1337
1338 PMAP_FLUSH_TLBS();
1339
1340 PMAP_READ_UNLOCK(map, spl);
1341 }
1342
1343 /*
1344 * Routine: pmap_page_protect
1345 *
1346 * Function:
1347 * Lower the permission for all mappings to a given
1348 * page.
1349 */
1350 void
1351 pmap_page_protect(
1352 vm_offset_t phys,
1353 vm_prot_t prot)
1354 {
1355 pv_entry_t pv_h, prev;
1356 register pv_entry_t pv_e;
1357 register pt_entry_t *pte;
1358 int pai;
1359 register pmap_t pmap;
1360 spl_t spl;
1361 boolean_t remove;
1362
1363 assert(phys != vm_page_fictitious_addr);
1364 if (!valid_page(phys)) {
1365 /*
1366 * Not a managed page.
1367 */
1368 return;
1369 }
1370
1371 /*
1372 * Determine the new protection.
1373 */
1374 switch (prot) {
1375 case VM_PROT_READ:
1376 case VM_PROT_READ|VM_PROT_EXECUTE:
1377 remove = FALSE;
1378 break;
1379 case VM_PROT_ALL:
1380 return; /* nothing to do */
1381 default:
1382 remove = TRUE;
1383 break;
1384 }
1385
1386 /*
1387 * Lock the pmap system first, since we will be changing
1388 * several pmaps.
1389 */
1390
1391 PMAP_WRITE_LOCK(spl);
1392
1393 pai = pa_index(phys);
1394 pv_h = pai_to_pvh(pai);
1395
1396 /*
1397 * Walk down PV list, changing or removing all mappings.
1398 * We do not have to lock the pv_list because we have
1399 * the entire pmap system locked.
1400 */
1401 if (pv_h->pmap != PMAP_NULL) {
1402
1403 prev = pv_e = pv_h;
1404 do {
1405 pmap = pv_e->pmap;
1406 /*
1407 * Lock the pmap to block pmap_extract and similar routines.
1408 */
1409 simple_lock(&pmap->lock);
1410
1411 {
1412 register vm_offset_t va;
1413
1414 va = pv_e->va;
1415 pte = pmap_pte(pmap, va);
1416
1417 /*
1418 * Consistency checks.
1419 */
1420 /* assert(*pte & INTEL_PTE_VALID); XXX */
1421 /* assert(pte_to_phys(*pte) == phys); */
1422
1423 /*
1424 * Invalidate TLBs for all CPUs using this mapping.
1425 */
1426 PMAP_INVALIDATE_PAGE(pmap, va);
1427 }
1428
1429 /*
1430 * Remove the mapping if new protection is NONE
1431 * or if write-protecting a kernel mapping.
1432 */
1433 if (remove || pmap == kernel_pmap) {
1434 /*
1435 * Remove the mapping, collecting any modify bits.
1436 */
1437 {
1438 register int i = ptes_per_vm_page;
1439
1440 do {
1441 pmap_phys_attributes[pai] |=
1442 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1443 *pte++ = 0;
1444 } while (--i > 0);
1445 }
1446
1447 assert(pmap->stats.resident_count >= 1);
1448 pmap->stats.resident_count--;
1449
1450 /*
1451 * Remove the pv_entry.
1452 */
1453 if (pv_e == pv_h) {
1454 /*
1455 * Fix up head later.
1456 */
1457 pv_h->pmap = PMAP_NULL;
1458 }
1459 else {
1460 /*
1461 * Delete this entry.
1462 */
1463 prev->next = pv_e->next;
1464 PV_FREE(pv_e);
1465 }
1466 }
1467 else {
1468 /*
1469 * Write-protect.
1470 */
1471 register int i = ptes_per_vm_page;
1472
1473 do {
1474 *pte &= ~INTEL_PTE_WRITE;
1475 pte++;
1476 } while (--i > 0);
1477
1478 /*
1479 * Advance prev.
1480 */
1481 prev = pv_e;
1482 }
1483
1484 simple_unlock(&pmap->lock);
1485
1486 } while ((pv_e = prev->next) != PV_ENTRY_NULL);
1487
1488 /*
1489 * If pv_head mapping was removed, fix it up.
1490 */
1491 if (pv_h->pmap == PMAP_NULL) {
1492 pv_e = pv_h->next;
1493 if (pv_e != PV_ENTRY_NULL) {
1494 *pv_h = *pv_e;
1495 PV_FREE(pv_e);
1496 }
1497 }
1498 }
1499
1500 PMAP_WRITE_UNLOCK(spl);
1501 }
1502
1503 /*
1504 * Set the physical protection on the
1505 * specified range of this map as requested.
1506 * Will not increase permissions.
1507 */
1508 void
1509 pmap_protect(
1510 pmap_t map,
1511 vm_offset_t s,
1512 vm_offset_t e,
1513 vm_prot_t prot)
1514 {
1515 register pt_entry_t *pde;
1516 register pt_entry_t *spte, *epte;
1517 vm_offset_t l;
1518 spl_t spl;
1519
1520
1521 if (map == PMAP_NULL)
1522 return;
1523
1524 /*
1525 * Determine the new protection.
1526 */
1527 switch (prot) {
1528 case VM_PROT_READ:
1529 case VM_PROT_READ|VM_PROT_EXECUTE:
1530 break;
1531 case VM_PROT_READ|VM_PROT_WRITE:
1532 case VM_PROT_ALL:
1533 return; /* nothing to do */
1534 default:
1535 pmap_remove(map, s, e);
1536 return;
1537 }
1538
1539 /*
1540 * If write-protecting in the kernel pmap,
1541 * remove the mappings; the i386 ignores
1542 * the write-permission bit in kernel mode.
1543 *
1544 * XXX should be #if'd for i386
1545 */
1546
1547 if (cpuid_family == CPUID_FAMILY_386)
1548 if (map == kernel_pmap) {
1549 pmap_remove(map, s, e);
1550 return;
1551 }
1552
1553 SPLVM(spl);
1554 simple_lock(&map->lock);
1555
1556
1557 pde = pmap_pde(map, s);
1558 while (s < e) {
1559 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
1560 if (l > e)
1561 l = e;
1562 if (*pde & INTEL_PTE_VALID) {
1563 spte = (pt_entry_t *)ptetokv(*pde);
1564 spte = &spte[ptenum(s)];
1565 epte = &spte[intel_btop(l-s)];
1566
1567 while (spte < epte) {
1568 if (*spte & INTEL_PTE_VALID)
1569 *spte &= ~INTEL_PTE_WRITE;
1570 spte++;
1571 }
1572 }
1573 s = l;
1574 pde++;
1575 }
1576
1577 PMAP_FLUSH_TLBS();
1578
1579 simple_unlock(&map->lock);
1580 SPLX(spl);
1581 }
1582
1583
1584
1585 /*
1586 * Insert the given physical page (p) at
1587 * the specified virtual address (v) in the
1588 * target physical map with the protection requested.
1589 *
1590 * If specified, the page will be wired down, meaning
1591 * that the related pte cannot be reclaimed.
1592 *
1593 * NB: This is the only routine which MAY NOT lazy-evaluate
1594 * or lose information. That is, this routine must actually
1595 * insert this page into the given map NOW.
1596 */
1597 void
1598 pmap_enter(
1599 register pmap_t pmap,
1600 vm_offset_t v,
1601 register vm_offset_t pa,
1602 vm_prot_t prot,
1603 unsigned int flags,
1604 boolean_t wired)
1605 {
1606 register pt_entry_t *pte;
1607 register pv_entry_t pv_h;
1608 register int i, pai;
1609 pv_entry_t pv_e;
1610 pt_entry_t template;
1611 spl_t spl;
1612 vm_offset_t old_pa;
1613
1614 XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n",
1615 current_thread()->top_act,
1616 current_thread(),
1617 pmap, v, pa);
1618
1619 assert(pa != vm_page_fictitious_addr);
1620 if (pmap_debug)
1621 printf("pmap(%x, %x)\n", v, pa);
1622 if (pmap == PMAP_NULL)
1623 return;
1624
1625 if (cpuid_family == CPUID_FAMILY_386)
1626 if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
1627 && !wired /* hack for io_wire */ ) {
1628 /*
1629 * Because the 386 ignores write protection in kernel mode,
1630 * we cannot enter a read-only kernel mapping, and must
1631 * remove an existing mapping if changing it.
1632 *
1633 * XXX should be #if'd for i386
1634 */
1635 PMAP_READ_LOCK(pmap, spl);
1636
1637 pte = pmap_pte(pmap, v);
1638 if (pte != PT_ENTRY_NULL && pte_to_pa(*pte) != 0) {
1639 /*
1640 * Invalidate the translation buffer,
1641 * then remove the mapping.
1642 */
1643 PMAP_INVALIDATE_PAGE(pmap, v);
1644 pmap_remove_range(pmap, v, pte,
1645 pte + ptes_per_vm_page);
1646 }
1647 PMAP_READ_UNLOCK(pmap, spl);
1648 return;
1649 }
1650
1651 /*
1652 * Must allocate a new pvlist entry while we're unlocked;
1653 * zalloc may cause pageout (which will lock the pmap system).
1654 * If we determine we need a pvlist entry, we will unlock
1655 * and allocate one. Then we will retry, throughing away
1656 * the allocated entry later (if we no longer need it).
1657 */
1658 pv_e = PV_ENTRY_NULL;
1659 Retry:
1660 PMAP_READ_LOCK(pmap, spl);
1661
1662 /*
1663 * Expand pmap to include this pte. Assume that
1664 * pmap is always expanded to include enough hardware
1665 * pages to map one VM page.
1666 */
1667
1668 while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
1669 /*
1670 * Must unlock to expand the pmap.
1671 */
1672 PMAP_READ_UNLOCK(pmap, spl);
1673
1674 pmap_expand(pmap, v);
1675
1676 PMAP_READ_LOCK(pmap, spl);
1677 }
1678 /*
1679 * Special case if the physical page is already mapped
1680 * at this address.
1681 */
1682 old_pa = pte_to_pa(*pte);
1683 if (old_pa == pa) {
1684 /*
1685 * May be changing its wired attribute or protection
1686 */
1687
1688 template = pa_to_pte(pa) | INTEL_PTE_VALID;
1689
1690 if(flags & VM_MEM_NOT_CACHEABLE) {
1691 if(!(flags & VM_MEM_GUARDED))
1692 template |= INTEL_PTE_PTA;
1693 template |= INTEL_PTE_NCACHE;
1694 }
1695
1696 if (pmap != kernel_pmap)
1697 template |= INTEL_PTE_USER;
1698 if (prot & VM_PROT_WRITE)
1699 template |= INTEL_PTE_WRITE;
1700 if (wired) {
1701 template |= INTEL_PTE_WIRED;
1702 if (!iswired(*pte))
1703 pmap->stats.wired_count++;
1704 }
1705 else {
1706 if (iswired(*pte)) {
1707 assert(pmap->stats.wired_count >= 1);
1708 pmap->stats.wired_count--;
1709 }
1710 }
1711
1712 PMAP_INVALIDATE_PAGE(pmap, v);
1713
1714 i = ptes_per_vm_page;
1715 do {
1716 if (*pte & INTEL_PTE_MOD)
1717 template |= INTEL_PTE_MOD;
1718 WRITE_PTE(pte, template)
1719 pte++;
1720 pte_increment_pa(template);
1721 } while (--i > 0);
1722
1723 goto Done;
1724 }
1725
1726 /*
1727 * Outline of code from here:
1728 * 1) If va was mapped, update TLBs, remove the mapping
1729 * and remove old pvlist entry.
1730 * 2) Add pvlist entry for new mapping
1731 * 3) Enter new mapping.
1732 *
1733 * SHARING_FAULTS complicates this slightly in that it cannot
1734 * replace the mapping, but must remove it (because adding the
1735 * pvlist entry for the new mapping may remove others), and
1736 * hence always enters the new mapping at step 3)
1737 *
1738 * If the old physical page is not managed step 1) is skipped
1739 * (except for updating the TLBs), and the mapping is
1740 * overwritten at step 3). If the new physical page is not
1741 * managed, step 2) is skipped.
1742 */
1743
1744 if (old_pa != (vm_offset_t) 0) {
1745
1746 PMAP_INVALIDATE_PAGE(pmap, v);
1747
1748 #if DEBUG_PTE_PAGE
1749 if (pmap != kernel_pmap)
1750 ptep_check(get_pte_page(pte));
1751 #endif /* DEBUG_PTE_PAGE */
1752
1753 /*
1754 * Don't do anything to pages outside valid memory here.
1755 * Instead convince the code that enters a new mapping
1756 * to overwrite the old one.
1757 */
1758
1759 if (valid_page(old_pa)) {
1760
1761 pai = pa_index(old_pa);
1762 LOCK_PVH(pai);
1763
1764 assert(pmap->stats.resident_count >= 1);
1765 pmap->stats.resident_count--;
1766 if (iswired(*pte)) {
1767 assert(pmap->stats.wired_count >= 1);
1768 pmap->stats.wired_count--;
1769 }
1770 i = ptes_per_vm_page;
1771 do {
1772 pmap_phys_attributes[pai] |=
1773 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1774 WRITE_PTE(pte, 0)
1775 pte++;
1776 pte_increment_pa(template);
1777 } while (--i > 0);
1778
1779 /*
1780 * Put pte back to beginning of page since it'll be
1781 * used later to enter the new page.
1782 */
1783 pte -= ptes_per_vm_page;
1784
1785 /*
1786 * Remove the mapping from the pvlist for
1787 * this physical page.
1788 */
1789 {
1790 register pv_entry_t prev, cur;
1791
1792 pv_h = pai_to_pvh(pai);
1793 if (pv_h->pmap == PMAP_NULL) {
1794 panic("pmap_enter: null pv_list!");
1795 }
1796 if (pv_h->va == v && pv_h->pmap == pmap) {
1797 /*
1798 * Header is the pv_entry. Copy the next one
1799 * to header and free the next one (we cannot
1800 * free the header)
1801 */
1802 cur = pv_h->next;
1803 if (cur != PV_ENTRY_NULL) {
1804 *pv_h = *cur;
1805 pv_e = cur;
1806 }
1807 else {
1808 pv_h->pmap = PMAP_NULL;
1809 }
1810 }
1811 else {
1812 cur = pv_h;
1813 do {
1814 prev = cur;
1815 if ((cur = prev->next) == PV_ENTRY_NULL) {
1816 panic("pmap_enter: mapping not in pv_list!");
1817 }
1818 } while (cur->va != v || cur->pmap != pmap);
1819 prev->next = cur->next;
1820 pv_e = cur;
1821 }
1822 }
1823 UNLOCK_PVH(pai);
1824 }
1825 else {
1826
1827 /*
1828 * old_pa is not managed. Pretend it's zero so code
1829 * at Step 3) will enter new mapping (overwriting old
1830 * one). Do removal part of accounting.
1831 */
1832 old_pa = (vm_offset_t) 0;
1833 assert(pmap->stats.resident_count >= 1);
1834 pmap->stats.resident_count--;
1835 if (iswired(*pte)) {
1836 assert(pmap->stats.wired_count >= 1);
1837 pmap->stats.wired_count--;
1838 }
1839 }
1840 }
1841
1842 if (valid_page(pa)) {
1843
1844 /*
1845 * Step 2) Enter the mapping in the PV list for this
1846 * physical page.
1847 */
1848
1849 pai = pa_index(pa);
1850
1851
1852 #if SHARING_FAULTS
1853 RetryPvList:
1854 /*
1855 * We can return here from the sharing fault code below
1856 * in case we removed the only entry on the pv list and thus
1857 * must enter the new one in the list header.
1858 */
1859 #endif /* SHARING_FAULTS */
1860 LOCK_PVH(pai);
1861 pv_h = pai_to_pvh(pai);
1862
1863 if (pv_h->pmap == PMAP_NULL) {
1864 /*
1865 * No mappings yet
1866 */
1867 pv_h->va = v;
1868 pv_h->pmap = pmap;
1869 pv_h->next = PV_ENTRY_NULL;
1870 }
1871 else {
1872 #if DEBUG
1873 {
1874 /*
1875 * check that this mapping is not already there
1876 * or there is no alias for this mapping in the same map
1877 */
1878 pv_entry_t e = pv_h;
1879 while (e != PV_ENTRY_NULL) {
1880 if (e->pmap == pmap && e->va == v)
1881 panic("pmap_enter: already in pv_list");
1882 e = e->next;
1883 }
1884 }
1885 #endif /* DEBUG */
1886 #if SHARING_FAULTS
1887 {
1888 /*
1889 * do sharing faults.
1890 * if we find an entry on this pv list in the same address
1891 * space, remove it. we know there will not be more
1892 * than one.
1893 */
1894 pv_entry_t e = pv_h;
1895 pt_entry_t *opte;
1896
1897 while (e != PV_ENTRY_NULL) {
1898 if (e->pmap == pmap) {
1899 /*
1900 * Remove it, drop pv list lock first.
1901 */
1902 UNLOCK_PVH(pai);
1903
1904 opte = pmap_pte(pmap, e->va);
1905 assert(opte != PT_ENTRY_NULL);
1906 /*
1907 * Invalidate the translation buffer,
1908 * then remove the mapping.
1909 */
1910 PMAP_INVALIDATE_PAGE(pmap, e->va);
1911 pmap_remove_range(pmap, e->va, opte,
1912 opte + ptes_per_vm_page);
1913 /*
1914 * We could have remove the head entry,
1915 * so there could be no more entries
1916 * and so we have to use the pv head entry.
1917 * so, go back to the top and try the entry
1918 * again.
1919 */
1920 goto RetryPvList;
1921 }
1922 e = e->next;
1923 }
1924
1925 /*
1926 * check that this mapping is not already there
1927 */
1928 e = pv_h;
1929 while (e != PV_ENTRY_NULL) {
1930 if (e->pmap == pmap)
1931 panic("pmap_enter: alias in pv_list");
1932 e = e->next;
1933 }
1934 }
1935 #endif /* SHARING_FAULTS */
1936 #if DEBUG_ALIAS
1937 {
1938 /*
1939 * check for aliases within the same address space.
1940 */
1941 pv_entry_t e = pv_h;
1942 vm_offset_t rpc = get_rpc();
1943
1944 while (e != PV_ENTRY_NULL) {
1945 if (e->pmap == pmap) {
1946 /*
1947 * log this entry in the alias ring buffer
1948 * if it's not there already.
1949 */
1950 struct pmap_alias *pma;
1951 int ii, logit;
1952
1953 logit = TRUE;
1954 for (ii = 0; ii < pmap_alias_index; ii++) {
1955 if (pmap_aliasbuf[ii].rpc == rpc) {
1956 /* found it in the log already */
1957 logit = FALSE;
1958 break;
1959 }
1960 }
1961 if (logit) {
1962 pma = &pmap_aliasbuf[pmap_alias_index];
1963 pma->pmap = pmap;
1964 pma->va = v;
1965 pma->rpc = rpc;
1966 pma->cookie = PMAP_ALIAS_COOKIE;
1967 if (++pmap_alias_index >= PMAP_ALIAS_MAX)
1968 panic("pmap_enter: exhausted alias log");
1969 }
1970 }
1971 e = e->next;
1972 }
1973 }
1974 #endif /* DEBUG_ALIAS */
1975 /*
1976 * Add new pv_entry after header.
1977 */
1978 if (pv_e == PV_ENTRY_NULL) {
1979 PV_ALLOC(pv_e);
1980 if (pv_e == PV_ENTRY_NULL) {
1981 UNLOCK_PVH(pai);
1982 PMAP_READ_UNLOCK(pmap, spl);
1983
1984 /*
1985 * Refill from zone.
1986 */
1987 pv_e = (pv_entry_t) zalloc(pv_list_zone);
1988 goto Retry;
1989 }
1990 }
1991 pv_e->va = v;
1992 pv_e->pmap = pmap;
1993 pv_e->next = pv_h->next;
1994 pv_h->next = pv_e;
1995 /*
1996 * Remember that we used the pvlist entry.
1997 */
1998 pv_e = PV_ENTRY_NULL;
1999 }
2000 UNLOCK_PVH(pai);
2001 }
2002
2003 /*
2004 * Step 3) Enter and count the mapping.
2005 */
2006
2007 pmap->stats.resident_count++;
2008
2009 /*
2010 * Build a template to speed up entering -
2011 * only the pfn changes.
2012 */
2013 template = pa_to_pte(pa) | INTEL_PTE_VALID;
2014
2015 if(flags & VM_MEM_NOT_CACHEABLE) {
2016 if(!(flags & VM_MEM_GUARDED))
2017 template |= INTEL_PTE_PTA;
2018 template |= INTEL_PTE_NCACHE;
2019 }
2020
2021 if (pmap != kernel_pmap)
2022 template |= INTEL_PTE_USER;
2023 if (prot & VM_PROT_WRITE)
2024 template |= INTEL_PTE_WRITE;
2025 if (wired) {
2026 template |= INTEL_PTE_WIRED;
2027 pmap->stats.wired_count++;
2028 }
2029 i = ptes_per_vm_page;
2030 do {
2031 WRITE_PTE(pte, template)
2032 pte++;
2033 pte_increment_pa(template);
2034 } while (--i > 0);
2035 Done:
2036 if (pv_e != PV_ENTRY_NULL) {
2037 PV_FREE(pv_e);
2038 }
2039
2040 PMAP_READ_UNLOCK(pmap, spl);
2041 }
2042
2043 /*
2044 * Routine: pmap_change_wiring
2045 * Function: Change the wiring attribute for a map/virtual-address
2046 * pair.
2047 * In/out conditions:
2048 * The mapping must already exist in the pmap.
2049 */
2050 void
2051 pmap_change_wiring(
2052 register pmap_t map,
2053 vm_offset_t v,
2054 boolean_t wired)
2055 {
2056 register pt_entry_t *pte;
2057 register int i;
2058 spl_t spl;
2059
2060 #if 0
2061 /*
2062 * We must grab the pmap system lock because we may
2063 * change a pte_page queue.
2064 */
2065 PMAP_READ_LOCK(map, spl);
2066
2067 if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
2068 panic("pmap_change_wiring: pte missing");
2069
2070 if (wired && !iswired(*pte)) {
2071 /*
2072 * wiring down mapping
2073 */
2074 map->stats.wired_count++;
2075 i = ptes_per_vm_page;
2076 do {
2077 *pte++ |= INTEL_PTE_WIRED;
2078 } while (--i > 0);
2079 }
2080 else if (!wired && iswired(*pte)) {
2081 /*
2082 * unwiring mapping
2083 */
2084 assert(map->stats.wired_count >= 1);
2085 map->stats.wired_count--;
2086 i = ptes_per_vm_page;
2087 do {
2088 *pte++ &= ~INTEL_PTE_WIRED;
2089 } while (--i > 0);
2090 }
2091
2092 PMAP_READ_UNLOCK(map, spl);
2093
2094 #else
2095 return;
2096 #endif
2097
2098 }
2099
2100 /*
2101 * Routine: pmap_extract
2102 * Function:
2103 * Extract the physical page address associated
2104 * with the given map/virtual_address pair.
2105 */
2106
2107 vm_offset_t
2108 pmap_extract(
2109 register pmap_t pmap,
2110 vm_offset_t va)
2111 {
2112 register pt_entry_t *pte;
2113 register vm_offset_t pa;
2114 spl_t spl;
2115
2116 SPLVM(spl);
2117 simple_lock(&pmap->lock);
2118 if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
2119 pa = (vm_offset_t) 0;
2120 else if (!(*pte & INTEL_PTE_VALID))
2121 pa = (vm_offset_t) 0;
2122 else
2123 pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
2124 simple_unlock(&pmap->lock);
2125 SPLX(spl);
2126 return(pa);
2127 }
2128
2129 /*
2130 * Routine: pmap_expand
2131 *
2132 * Expands a pmap to be able to map the specified virtual address.
2133 *
2134 * Allocates new virtual memory for the P0 or P1 portion of the
2135 * pmap, then re-maps the physical pages that were in the old
2136 * pmap to be in the new pmap.
2137 *
2138 * Must be called with the pmap system and the pmap unlocked,
2139 * since these must be unlocked to use vm_allocate or vm_deallocate.
2140 * Thus it must be called in a loop that checks whether the map
2141 * has been expanded enough.
2142 * (We won't loop forever, since page tables aren't shrunk.)
2143 */
2144 void
2145 pmap_expand(
2146 register pmap_t map,
2147 register vm_offset_t v)
2148 {
2149 pt_entry_t *pdp;
2150 register vm_page_t m;
2151 register vm_offset_t pa;
2152 register int i;
2153 spl_t spl;
2154
2155 if (map == kernel_pmap)
2156 panic("pmap_expand");
2157
2158 /*
2159 * We cannot allocate the pmap_object in pmap_init,
2160 * because it is called before the zone package is up.
2161 * Allocate it now if it is missing.
2162 */
2163 if (pmap_object == VM_OBJECT_NULL)
2164 pmap_object = vm_object_allocate(avail_end);
2165
2166 /*
2167 * Allocate a VM page for the level 2 page table entries.
2168 */
2169 while ((m = vm_page_grab()) == VM_PAGE_NULL)
2170 VM_PAGE_WAIT();
2171
2172 /*
2173 * Map the page to its physical address so that it
2174 * can be found later.
2175 */
2176 pa = m->phys_page;
2177 vm_object_lock(pmap_object);
2178 vm_page_insert(m, pmap_object, pa);
2179 vm_page_lock_queues();
2180 vm_page_wire(m);
2181 inuse_ptepages_count++;
2182 vm_object_unlock(pmap_object);
2183 vm_page_unlock_queues();
2184
2185 /*
2186 * Zero the page.
2187 */
2188 memset((void *)phystokv(pa), 0, PAGE_SIZE);
2189
2190 PMAP_READ_LOCK(map, spl);
2191 /*
2192 * See if someone else expanded us first
2193 */
2194 if (pmap_pte(map, v) != PT_ENTRY_NULL) {
2195 PMAP_READ_UNLOCK(map, spl);
2196 vm_object_lock(pmap_object);
2197 vm_page_lock_queues();
2198 vm_page_free(m);
2199 inuse_ptepages_count--;
2200 vm_page_unlock_queues();
2201 vm_object_unlock(pmap_object);
2202 return;
2203 }
2204
2205 /*
2206 * Set the page directory entry for this page table.
2207 * If we have allocated more than one hardware page,
2208 * set several page directory entries.
2209 */
2210
2211 i = ptes_per_vm_page;
2212 pdp = &map->dirbase[pdenum(map, v) & ~(i-1)];
2213 do {
2214 *pdp = pa_to_pte(pa)
2215 | INTEL_PTE_VALID
2216 | INTEL_PTE_USER
2217 | INTEL_PTE_WRITE;
2218 pdp++;
2219 pa += INTEL_PGBYTES;
2220 } while (--i > 0);
2221
2222 PMAP_READ_UNLOCK(map, spl);
2223 return;
2224 }
2225
2226 /*
2227 * Copy the range specified by src_addr/len
2228 * from the source map to the range dst_addr/len
2229 * in the destination map.
2230 *
2231 * This routine is only advisory and need not do anything.
2232 */
2233 #if 0
2234 void
2235 pmap_copy(
2236 pmap_t dst_pmap,
2237 pmap_t src_pmap,
2238 vm_offset_t dst_addr,
2239 vm_size_t len,
2240 vm_offset_t src_addr)
2241 {
2242 #ifdef lint
2243 dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
2244 #endif /* lint */
2245 }
2246 #endif/* 0 */
2247
2248 /*
2249 * pmap_sync_caches_phys(ppnum_t pa)
2250 *
2251 * Invalidates all of the instruction cache on a physical page and
2252 * pushes any dirty data from the data cache for the same physical page
2253 */
2254
2255 void pmap_sync_caches_phys(ppnum_t pa)
2256 {
2257 if (!(cpuid_features() & CPUID_FEATURE_SS))
2258 {
2259 __asm__ volatile("wbinvd");
2260 }
2261 return;
2262 }
2263
2264 int collect_ref;
2265 int collect_unref;
2266
2267 /*
2268 * Routine: pmap_collect
2269 * Function:
2270 * Garbage collects the physical map system for
2271 * pages which are no longer used.
2272 * Success need not be guaranteed -- that is, there
2273 * may well be pages which are not referenced, but
2274 * others may be collected.
2275 * Usage:
2276 * Called by the pageout daemon when pages are scarce.
2277 */
2278 void
2279 pmap_collect(
2280 pmap_t p)
2281 {
2282 register pt_entry_t *pdp, *ptp;
2283 pt_entry_t *eptp;
2284 vm_offset_t pa;
2285 int wired;
2286 spl_t spl;
2287
2288 if (p == PMAP_NULL)
2289 return;
2290
2291 if (p == kernel_pmap)
2292 return;
2293
2294 /*
2295 * Garbage collect map.
2296 */
2297 PMAP_READ_LOCK(p, spl);
2298 PMAP_FLUSH_TLBS();
2299
2300 for (pdp = p->dirbase;
2301 pdp < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)];
2302 pdp += ptes_per_vm_page)
2303 {
2304 if (*pdp & INTEL_PTE_VALID)
2305 if(*pdp & INTEL_PTE_REF) {
2306 *pdp &= ~INTEL_PTE_REF;
2307 collect_ref++;
2308 } else {
2309 collect_unref++;
2310 pa = pte_to_pa(*pdp);
2311 ptp = (pt_entry_t *)phystokv(pa);
2312 eptp = ptp + NPTES*ptes_per_vm_page;
2313
2314 /*
2315 * If the pte page has any wired mappings, we cannot
2316 * free it.
2317 */
2318 wired = 0;
2319 {
2320 register pt_entry_t *ptep;
2321 for (ptep = ptp; ptep < eptp; ptep++) {
2322 if (iswired(*ptep)) {
2323 wired = 1;
2324 break;
2325 }
2326 }
2327 }
2328 if (!wired) {
2329 /*
2330 * Remove the virtual addresses mapped by this pte page.
2331 */
2332 pmap_remove_range(p,
2333 pdetova(pdp - p->dirbase),
2334 ptp,
2335 eptp);
2336
2337 /*
2338 * Invalidate the page directory pointer.
2339 */
2340 {
2341 register int i = ptes_per_vm_page;
2342 register pt_entry_t *pdep = pdp;
2343 do {
2344 *pdep++ = 0;
2345 } while (--i > 0);
2346 }
2347
2348 PMAP_READ_UNLOCK(p, spl);
2349
2350 /*
2351 * And free the pte page itself.
2352 */
2353 {
2354 register vm_page_t m;
2355
2356 vm_object_lock(pmap_object);
2357 m = vm_page_lookup(pmap_object, pa);
2358 if (m == VM_PAGE_NULL)
2359 panic("pmap_collect: pte page not in object");
2360 vm_page_lock_queues();
2361 vm_page_free(m);
2362 inuse_ptepages_count--;
2363 vm_page_unlock_queues();
2364 vm_object_unlock(pmap_object);
2365 }
2366
2367 PMAP_READ_LOCK(p, spl);
2368 }
2369 }
2370 }
2371 PMAP_READ_UNLOCK(p, spl);
2372 return;
2373
2374 }
2375
2376 /*
2377 * Routine: pmap_kernel
2378 * Function:
2379 * Returns the physical map handle for the kernel.
2380 */
2381 #if 0
2382 pmap_t
2383 pmap_kernel(void)
2384 {
2385 return (kernel_pmap);
2386 }
2387 #endif/* 0 */
2388
2389 /*
2390 * pmap_zero_page zeros the specified (machine independent) page.
2391 * See machine/phys.c or machine/phys.s for implementation.
2392 */
2393 #if 0
2394 void
2395 pmap_zero_page(
2396 register vm_offset_t phys)
2397 {
2398 register int i;
2399
2400 assert(phys != vm_page_fictitious_addr);
2401 i = PAGE_SIZE / INTEL_PGBYTES;
2402 phys = intel_pfn(phys);
2403
2404 while (i--)
2405 zero_phys(phys++);
2406 }
2407 #endif/* 0 */
2408
2409 /*
2410 * pmap_copy_page copies the specified (machine independent) page.
2411 * See machine/phys.c or machine/phys.s for implementation.
2412 */
2413 #if 0
2414 void
2415 pmap_copy_page(
2416 vm_offset_t src,
2417 vm_offset_t dst)
2418 {
2419 int i;
2420
2421 assert(src != vm_page_fictitious_addr);
2422 assert(dst != vm_page_fictitious_addr);
2423 i = PAGE_SIZE / INTEL_PGBYTES;
2424
2425 while (i--) {
2426 copy_phys(intel_pfn(src), intel_pfn(dst));
2427 src += INTEL_PGBYTES;
2428 dst += INTEL_PGBYTES;
2429 }
2430 }
2431 #endif/* 0 */
2432
2433 /*
2434 * Routine: pmap_pageable
2435 * Function:
2436 * Make the specified pages (by pmap, offset)
2437 * pageable (or not) as requested.
2438 *
2439 * A page which is not pageable may not take
2440 * a fault; therefore, its page table entry
2441 * must remain valid for the duration.
2442 *
2443 * This routine is merely advisory; pmap_enter
2444 * will specify that these pages are to be wired
2445 * down (or not) as appropriate.
2446 */
2447 void
2448 pmap_pageable(
2449 pmap_t pmap,
2450 vm_offset_t start,
2451 vm_offset_t end,
2452 boolean_t pageable)
2453 {
2454 #ifdef lint
2455 pmap++; start++; end++; pageable++;
2456 #endif /* lint */
2457 }
2458
2459 /*
2460 * Clear specified attribute bits.
2461 */
2462 void
2463 phys_attribute_clear(
2464 vm_offset_t phys,
2465 int bits)
2466 {
2467 pv_entry_t pv_h;
2468 register pv_entry_t pv_e;
2469 register pt_entry_t *pte;
2470 int pai;
2471 register pmap_t pmap;
2472 spl_t spl;
2473
2474 assert(phys != vm_page_fictitious_addr);
2475 if (!valid_page(phys)) {
2476 /*
2477 * Not a managed page.
2478 */
2479 return;
2480 }
2481
2482 /*
2483 * Lock the pmap system first, since we will be changing
2484 * several pmaps.
2485 */
2486
2487 PMAP_WRITE_LOCK(spl);
2488
2489 pai = pa_index(phys);
2490 pv_h = pai_to_pvh(pai);
2491
2492 /*
2493 * Walk down PV list, clearing all modify or reference bits.
2494 * We do not have to lock the pv_list because we have
2495 * the entire pmap system locked.
2496 */
2497 if (pv_h->pmap != PMAP_NULL) {
2498 /*
2499 * There are some mappings.
2500 */
2501 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
2502
2503 pmap = pv_e->pmap;
2504 /*
2505 * Lock the pmap to block pmap_extract and similar routines.
2506 */
2507 simple_lock(&pmap->lock);
2508
2509 {
2510 register vm_offset_t va;
2511
2512 va = pv_e->va;
2513 pte = pmap_pte(pmap, va);
2514
2515 #if 0
2516 /*
2517 * Consistency checks.
2518 */
2519 assert(*pte & INTEL_PTE_VALID);
2520 /* assert(pte_to_phys(*pte) == phys); */
2521 #endif
2522
2523 /*
2524 * Invalidate TLBs for all CPUs using this mapping.
2525 */
2526 PMAP_INVALIDATE_PAGE(pmap, va);
2527 }
2528
2529 /*
2530 * Clear modify or reference bits.
2531 */
2532 {
2533 register int i = ptes_per_vm_page;
2534 do {
2535 *pte++ &= ~bits;
2536 } while (--i > 0);
2537 }
2538 simple_unlock(&pmap->lock);
2539 }
2540 }
2541
2542 pmap_phys_attributes[pai] &= ~bits;
2543
2544 PMAP_WRITE_UNLOCK(spl);
2545 }
2546
2547 /*
2548 * Check specified attribute bits.
2549 */
2550 boolean_t
2551 phys_attribute_test(
2552 vm_offset_t phys,
2553 int bits)
2554 {
2555 pv_entry_t pv_h;
2556 register pv_entry_t pv_e;
2557 register pt_entry_t *pte;
2558 int pai;
2559 register pmap_t pmap;
2560 spl_t spl;
2561
2562 assert(phys != vm_page_fictitious_addr);
2563 if (!valid_page(phys)) {
2564 /*
2565 * Not a managed page.
2566 */
2567 return (FALSE);
2568 }
2569
2570 /*
2571 * Lock the pmap system first, since we will be checking
2572 * several pmaps.
2573 */
2574
2575 PMAP_WRITE_LOCK(spl);
2576
2577 pai = pa_index(phys);
2578 pv_h = pai_to_pvh(pai);
2579
2580 if (pmap_phys_attributes[pai] & bits) {
2581 PMAP_WRITE_UNLOCK(spl);
2582 return (TRUE);
2583 }
2584
2585 /*
2586 * Walk down PV list, checking all mappings.
2587 * We do not have to lock the pv_list because we have
2588 * the entire pmap system locked.
2589 */
2590 if (pv_h->pmap != PMAP_NULL) {
2591 /*
2592 * There are some mappings.
2593 */
2594 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
2595
2596 pmap = pv_e->pmap;
2597 /*
2598 * Lock the pmap to block pmap_extract and similar routines.
2599 */
2600 simple_lock(&pmap->lock);
2601
2602 {
2603 register vm_offset_t va;
2604
2605 va = pv_e->va;
2606 pte = pmap_pte(pmap, va);
2607
2608 #if 0
2609 /*
2610 * Consistency checks.
2611 */
2612 assert(*pte & INTEL_PTE_VALID);
2613 /* assert(pte_to_phys(*pte) == phys); */
2614 #endif
2615 }
2616
2617 /*
2618 * Check modify or reference bits.
2619 */
2620 {
2621 register int i = ptes_per_vm_page;
2622
2623 do {
2624 if (*pte++ & bits) {
2625 simple_unlock(&pmap->lock);
2626 PMAP_WRITE_UNLOCK(spl);
2627 return (TRUE);
2628 }
2629 } while (--i > 0);
2630 }
2631 simple_unlock(&pmap->lock);
2632 }
2633 }
2634 PMAP_WRITE_UNLOCK(spl);
2635 return (FALSE);
2636 }
2637
2638 /*
2639 * Set specified attribute bits.
2640 */
2641 void
2642 phys_attribute_set(
2643 vm_offset_t phys,
2644 int bits)
2645 {
2646 int spl;
2647
2648 assert(phys != vm_page_fictitious_addr);
2649 if (!valid_page(phys)) {
2650 /*
2651 * Not a managed page.
2652 */
2653 return;
2654 }
2655
2656 /*
2657 * Lock the pmap system and set the requested bits in
2658 * the phys attributes array. Don't need to bother with
2659 * ptes because the test routine looks here first.
2660 */
2661
2662 PMAP_WRITE_LOCK(spl);
2663 pmap_phys_attributes[pa_index(phys)] |= bits;
2664 PMAP_WRITE_UNLOCK(spl);
2665 }
2666
2667 /*
2668 * Set the modify bit on the specified physical page.
2669 */
2670
2671 void pmap_set_modify(
2672 register vm_offset_t phys)
2673 {
2674 phys_attribute_set(phys, PHYS_MODIFIED);
2675 }
2676
2677 /*
2678 * Clear the modify bits on the specified physical page.
2679 */
2680
2681 void
2682 pmap_clear_modify(
2683 register vm_offset_t phys)
2684 {
2685 phys_attribute_clear(phys, PHYS_MODIFIED);
2686 }
2687
2688 /*
2689 * pmap_is_modified:
2690 *
2691 * Return whether or not the specified physical page is modified
2692 * by any physical maps.
2693 */
2694
2695 boolean_t
2696 pmap_is_modified(
2697 register vm_offset_t phys)
2698 {
2699 return (phys_attribute_test(phys, PHYS_MODIFIED));
2700 }
2701
2702 /*
2703 * pmap_clear_reference:
2704 *
2705 * Clear the reference bit on the specified physical page.
2706 */
2707
2708 void
2709 pmap_clear_reference(
2710 vm_offset_t phys)
2711 {
2712 phys_attribute_clear(phys, PHYS_REFERENCED);
2713 }
2714
2715 /*
2716 * pmap_is_referenced:
2717 *
2718 * Return whether or not the specified physical page is referenced
2719 * by any physical maps.
2720 */
2721
2722 boolean_t
2723 pmap_is_referenced(
2724 vm_offset_t phys)
2725 {
2726 return (phys_attribute_test(phys, PHYS_REFERENCED));
2727 }
2728
2729 /*
2730 * Set the modify bit on the specified range
2731 * of this map as requested.
2732 *
2733 * This optimization stands only if each time the dirty bit
2734 * in vm_page_t is tested, it is also tested in the pmap.
2735 */
2736 void
2737 pmap_modify_pages(
2738 pmap_t map,
2739 vm_offset_t s,
2740 vm_offset_t e)
2741 {
2742 spl_t spl;
2743 register pt_entry_t *pde;
2744 register pt_entry_t *spte, *epte;
2745 vm_offset_t l;
2746
2747 if (map == PMAP_NULL)
2748 return;
2749
2750 PMAP_READ_LOCK(map, spl);
2751
2752 pde = pmap_pde(map, s);
2753 while (s && s < e) {
2754 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
2755 if (l > e)
2756 l = e;
2757 if (*pde & INTEL_PTE_VALID) {
2758 spte = (pt_entry_t *)ptetokv(*pde);
2759 if (l) {
2760 spte = &spte[ptenum(s)];
2761 epte = &spte[intel_btop(l-s)];
2762 } else {
2763 epte = &spte[intel_btop(PDE_MAPPED_SIZE)];
2764 spte = &spte[ptenum(s)];
2765 }
2766 while (spte < epte) {
2767 if (*spte & INTEL_PTE_VALID) {
2768 *spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE);
2769 }
2770 spte++;
2771 }
2772 }
2773 s = l;
2774 pde++;
2775 }
2776 PMAP_FLUSH_TLBS();
2777 PMAP_READ_UNLOCK(map, spl);
2778 }
2779
2780
2781 void
2782 invalidate_icache(vm_offset_t addr, unsigned cnt, int phys)
2783 {
2784 return;
2785 }
2786 void
2787 flush_dcache(vm_offset_t addr, unsigned count, int phys)
2788 {
2789 return;
2790 }
2791
2792 #if NCPUS > 1
2793
2794 void inline
2795 pmap_wait_for_clear()
2796 {
2797 register int my_cpu;
2798 spl_t s;
2799 register pmap_t my_pmap;
2800
2801 mp_disable_preemption();
2802 my_cpu = cpu_number();
2803
2804
2805 my_pmap = real_pmap[my_cpu];
2806
2807 if (!(my_pmap && pmap_in_use(my_pmap, my_cpu)))
2808 my_pmap = kernel_pmap;
2809
2810 /*
2811 * Raise spl to splhigh (above splip) to block out pmap_extract
2812 * from IO code (which would put this cpu back in the active
2813 * set).
2814 */
2815 s = splhigh();
2816
2817 /*
2818 * Wait for any pmap updates in progress, on either user
2819 * or kernel pmap.
2820 */
2821 while (*(volatile hw_lock_t)&my_pmap->lock.interlock ||
2822 *(volatile hw_lock_t)&kernel_pmap->lock.interlock) {
2823 continue;
2824 }
2825
2826 splx(s);
2827 mp_enable_preemption();
2828 }
2829
2830 void
2831 pmap_flush_tlb_interrupt(void) {
2832 pmap_wait_for_clear();
2833
2834 flush_tlb();
2835 }
2836
2837 void
2838 pmap_reload_tlb_interrupt(void) {
2839 pmap_wait_for_clear();
2840
2841 set_cr3(kernel_pmap->pdirbase);
2842 }
2843
2844
2845 #endif /* NCPUS > 1 */
2846
2847 #if MACH_KDB
2848
2849 /* show phys page mappings and attributes */
2850
2851 extern void db_show_page(vm_offset_t pa);
2852
2853 void
2854 db_show_page(vm_offset_t pa)
2855 {
2856 pv_entry_t pv_h;
2857 int pai;
2858 char attr;
2859
2860 pai = pa_index(pa);
2861 pv_h = pai_to_pvh(pai);
2862
2863 attr = pmap_phys_attributes[pai];
2864 printf("phys page %x ", pa);
2865 if (attr & PHYS_MODIFIED)
2866 printf("modified, ");
2867 if (attr & PHYS_REFERENCED)
2868 printf("referenced, ");
2869 if (pv_h->pmap || pv_h->next)
2870 printf(" mapped at\n");
2871 else
2872 printf(" not mapped\n");
2873 for (; pv_h; pv_h = pv_h->next)
2874 if (pv_h->pmap)
2875 printf("%x in pmap %x\n", pv_h->va, pv_h->pmap);
2876 }
2877
2878 #endif /* MACH_KDB */
2879
2880 #if MACH_KDB
2881 void db_kvtophys(vm_offset_t);
2882 void db_show_vaddrs(pt_entry_t *);
2883
2884 /*
2885 * print out the results of kvtophys(arg)
2886 */
2887 void
2888 db_kvtophys(
2889 vm_offset_t vaddr)
2890 {
2891 db_printf("0x%x", kvtophys(vaddr));
2892 }
2893
2894 /*
2895 * Walk the pages tables.
2896 */
2897 void
2898 db_show_vaddrs(
2899 pt_entry_t *dirbase)
2900 {
2901 pt_entry_t *ptep, *pdep, tmp;
2902 int x, y, pdecnt, ptecnt;
2903
2904 if (dirbase == 0) {
2905 dirbase = kernel_pmap->dirbase;
2906 }
2907 if (dirbase == 0) {
2908 db_printf("need a dirbase...\n");
2909 return;
2910 }
2911 dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK);
2912
2913 db_printf("dirbase: 0x%x\n", dirbase);
2914
2915 pdecnt = ptecnt = 0;
2916 pdep = &dirbase[0];
2917 for (y = 0; y < NPDES; y++, pdep++) {
2918 if (((tmp = *pdep) & INTEL_PTE_VALID) == 0) {
2919 continue;
2920 }
2921 pdecnt++;
2922 ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
2923 db_printf("dir[%4d]: 0x%x\n", y, *pdep);
2924 for (x = 0; x < NPTES; x++, ptep++) {
2925 if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
2926 continue;
2927 }
2928 ptecnt++;
2929 db_printf(" tab[%4d]: 0x%x, va=0x%x, pa=0x%x\n",
2930 x,
2931 *ptep,
2932 (y << 22) | (x << 12),
2933 *ptep & ~INTEL_OFFMASK);
2934 }
2935 }
2936
2937 db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt);
2938
2939 }
2940 #endif /* MACH_KDB */
2941
2942 #include <mach_vm_debug.h>
2943 #if MACH_VM_DEBUG
2944 #include <vm/vm_debug.h>
2945
2946 int
2947 pmap_list_resident_pages(
2948 register pmap_t pmap,
2949 register vm_offset_t *listp,
2950 register int space)
2951 {
2952 return 0;
2953 }
2954 #endif /* MACH_VM_DEBUG */
2955
2956 #ifdef MACH_BSD
2957 /*
2958 * pmap_pagemove
2959 *
2960 * BSD support routine to reassign virtual addresses.
2961 */
2962
2963 void
2964 pmap_movepage(unsigned long from, unsigned long to, vm_size_t size)
2965 {
2966 spl_t spl;
2967 pt_entry_t *pte, saved_pte;
2968 /* Lock the kernel map */
2969
2970
2971 while (size > 0) {
2972 PMAP_READ_LOCK(kernel_pmap, spl);
2973 pte = pmap_pte(kernel_pmap, from);
2974 if (pte == NULL)
2975 panic("pmap_pagemove from pte NULL");
2976 saved_pte = *pte;
2977 PMAP_READ_UNLOCK(kernel_pmap, spl);
2978
2979 pmap_enter(kernel_pmap, to, i386_trunc_page(*pte),
2980 VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED);
2981
2982 pmap_remove(kernel_pmap, from, from+PAGE_SIZE);
2983
2984 PMAP_READ_LOCK(kernel_pmap, spl);
2985 pte = pmap_pte(kernel_pmap, to);
2986 if (pte == NULL)
2987 panic("pmap_pagemove 'to' pte NULL");
2988
2989 *pte = saved_pte;
2990 PMAP_READ_UNLOCK(kernel_pmap, spl);
2991
2992 from += PAGE_SIZE;
2993 to += PAGE_SIZE;
2994 size -= PAGE_SIZE;
2995 }
2996
2997 /* Get the processors to update the TLBs */
2998 PMAP_FLUSH_TLBS();
2999
3000 }
3001
3002 kern_return_t bmapvideo(vm_offset_t *info);
3003 kern_return_t bmapvideo(vm_offset_t *info) {
3004
3005 extern struct vc_info vinfo;
3006 #ifdef NOTIMPLEMENTED
3007 (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */
3008 #endif
3009 return KERN_SUCCESS;
3010 }
3011
3012 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
3013 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
3014
3015 #ifdef NOTIMPLEMENTED
3016 pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr); /* Map it in */
3017 #endif
3018 return KERN_SUCCESS;
3019 }
3020
3021 kern_return_t bmapmapr(vm_offset_t va);
3022 kern_return_t bmapmapr(vm_offset_t va) {
3023
3024 #ifdef NOTIMPLEMENTED
3025 mapping_remove(current_act()->task->map->pmap, va); /* Remove map */
3026 #endif
3027 return KERN_SUCCESS;
3028 }
3029 #endif
3030
3031 /* temporary workaround */
3032 boolean_t
3033 coredumpok(vm_map_t map, vm_offset_t va)
3034 {
3035 pt_entry_t *ptep;
3036 ptep = pmap_pte(map->pmap, va);
3037 if (0 == ptep) return FALSE;
3038 return ((*ptep & (INTEL_PTE_NCACHE|INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE|INTEL_PTE_WIRED));
3039 }