]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmap.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52
53/*
54 * File: pmap.c
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * (These guys wrote the Vax version)
57 *
58 * Physical Map management code for Intel i386, i486, and i860.
59 *
60 * Manages physical address maps.
61 *
62 * In addition to hardware address maps, this
63 * module is called upon to provide software-use-only
64 * maps which may or may not be stored in the same
65 * form as hardware maps. These pseudo-maps are
66 * used to store intermediate results from copy
67 * operations to and from address spaces.
68 *
69 * Since the information managed by this module is
70 * also stored by the logical address mapping module,
71 * this module may throw away valid virtual-to-physical
72 * mappings at almost any time. However, invalidations
73 * of virtual-to-physical mappings must be done as
74 * requested.
75 *
76 * In order to cope with hardware architectures which
77 * make virtual-to-physical map invalidates expensive,
78 * this module may delay invalidate or reduced protection
79 * operations until such time as they are actually
80 * necessary. This module is given full information as
81 * to which processors are currently using which maps,
82 * and to when physical maps must be made correct.
83 */
84
85#include <cpus.h>
86
87#include <string.h>
88#include <norma_vm.h>
89#include <mach_kdb.h>
90#include <mach_ldebug.h>
91
92#include <mach/machine/vm_types.h>
93
94#include <mach/boolean.h>
95#include <kern/thread.h>
96#include <kern/zalloc.h>
97
98#include <kern/lock.h>
99#include <kern/spl.h>
100
101#include <vm/pmap.h>
102#include <vm/vm_map.h>
103#include <vm/vm_kern.h>
104#include <mach/vm_param.h>
105#include <mach/vm_prot.h>
106#include <vm/vm_object.h>
107#include <vm/vm_page.h>
108
109#include <mach/machine/vm_param.h>
110#include <machine/thread.h>
111
112#include <kern/misc_protos.h> /* prototyping */
113#include <i386/misc_protos.h>
114
115#include <i386/cpuid.h>
116
117#if MACH_KDB
118#include <ddb/db_command.h>
119#include <ddb/db_output.h>
120#include <ddb/db_sym.h>
121#include <ddb/db_print.h>
122#endif /* MACH_KDB */
123
124#include <kern/xpr.h>
125
126#if NCPUS > 1
127#include <i386/AT386/mp/mp_events.h>
128#endif
129
130/*
131 * Forward declarations for internal functions.
132 */
133void pmap_expand(
134 pmap_t map,
135 vm_offset_t v);
136
137extern void pmap_remove_range(
138 pmap_t pmap,
139 vm_offset_t va,
140 pt_entry_t *spte,
141 pt_entry_t *epte);
142
143void phys_attribute_clear(
144 vm_offset_t phys,
145 int bits);
146
147boolean_t phys_attribute_test(
148 vm_offset_t phys,
149 int bits);
150
151void pmap_set_modify(vm_offset_t phys);
152
153void phys_attribute_set(
154 vm_offset_t phys,
155 int bits);
156
157
158#ifndef set_dirbase
159void set_dirbase(vm_offset_t dirbase);
160#endif /* set_dirbase */
161
162#define PA_TO_PTE(pa) (pa_to_pte((pa) - VM_MIN_KERNEL_ADDRESS))
163#define iswired(pte) ((pte) & INTEL_PTE_WIRED)
164
165pmap_t real_pmap[NCPUS];
166
167#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
168#define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry);
169
170/*
171 * Private data structures.
172 */
173
174/*
175 * For each vm_page_t, there is a list of all currently
176 * valid virtual mappings of that page. An entry is
177 * a pv_entry_t; the list is the pv_table.
178 */
179
180typedef struct pv_entry {
181 struct pv_entry *next; /* next pv_entry */
182 pmap_t pmap; /* pmap where mapping lies */
183 vm_offset_t va; /* virtual address for mapping */
184} *pv_entry_t;
185
186#define PV_ENTRY_NULL ((pv_entry_t) 0)
187
188pv_entry_t pv_head_table; /* array of entries, one per page */
189
190/*
191 * pv_list entries are kept on a list that can only be accessed
192 * with the pmap system locked (at SPLVM, not in the cpus_active set).
193 * The list is refilled from the pv_list_zone if it becomes empty.
194 */
195pv_entry_t pv_free_list; /* free list at SPLVM */
196decl_simple_lock_data(,pv_free_list_lock)
197
198#define PV_ALLOC(pv_e) { \
199 simple_lock(&pv_free_list_lock); \
200 if ((pv_e = pv_free_list) != 0) { \
201 pv_free_list = pv_e->next; \
202 } \
203 simple_unlock(&pv_free_list_lock); \
204}
205
206#define PV_FREE(pv_e) { \
207 simple_lock(&pv_free_list_lock); \
208 pv_e->next = pv_free_list; \
209 pv_free_list = pv_e; \
210 simple_unlock(&pv_free_list_lock); \
211}
212
213zone_t pv_list_zone; /* zone of pv_entry structures */
214
215/*
216 * Each entry in the pv_head_table is locked by a bit in the
217 * pv_lock_table. The lock bits are accessed by the physical
218 * address of the page they lock.
219 */
220
221char *pv_lock_table; /* pointer to array of bits */
222#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
223
224/*
225 * First and last physical addresses that we maintain any information
226 * for. Initialized to zero so that pmap operations done before
227 * pmap_init won't touch any non-existent structures.
228 */
229vm_offset_t vm_first_phys = (vm_offset_t) 0;
230vm_offset_t vm_last_phys = (vm_offset_t) 0;
231boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
232
233/*
234 * Index into pv_head table, its lock bits, and the modify/reference
235 * bits starting at vm_first_phys.
236 */
237
238#define pa_index(pa) (atop(pa - vm_first_phys))
239
240#define pai_to_pvh(pai) (&pv_head_table[pai])
241#define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table)
242#define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table)
243
244/*
245 * Array of physical page attribites for managed pages.
246 * One byte per physical page.
247 */
248char *pmap_phys_attributes;
249
250/*
251 * Physical page attributes. Copy bits from PTE definition.
252 */
253#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
254#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
255
256/*
257 * Amount of virtual memory mapped by one
258 * page-directory entry.
259 */
260#define PDE_MAPPED_SIZE (pdetova(1))
261
262/*
263 * We allocate page table pages directly from the VM system
264 * through this object. It maps physical memory.
265 */
266vm_object_t pmap_object = VM_OBJECT_NULL;
267
268/*
269 * Locking and TLB invalidation
270 */
271
272/*
273 * Locking Protocols:
274 *
275 * There are two structures in the pmap module that need locking:
276 * the pmaps themselves, and the per-page pv_lists (which are locked
277 * by locking the pv_lock_table entry that corresponds to the pv_head
278 * for the list in question.) Most routines want to lock a pmap and
279 * then do operations in it that require pv_list locking -- however
280 * pmap_remove_all and pmap_copy_on_write operate on a physical page
281 * basis and want to do the locking in the reverse order, i.e. lock
282 * a pv_list and then go through all the pmaps referenced by that list.
283 * To protect against deadlock between these two cases, the pmap_lock
284 * is used. There are three different locking protocols as a result:
285 *
286 * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
287 * the pmap.
288 *
289 * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
290 * lock on the pmap_lock (shared read), then lock the pmap
291 * and finally the pv_lists as needed [i.e. pmap lock before
292 * pv_list lock.]
293 *
294 * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
295 * Get a write lock on the pmap_lock (exclusive write); this
296 * also guaranteees exclusive access to the pv_lists. Lock the
297 * pmaps as needed.
298 *
299 * At no time may any routine hold more than one pmap lock or more than
300 * one pv_list lock. Because interrupt level routines can allocate
301 * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
302 * kernel_pmap can only be held at splhigh.
303 */
304
305#if NCPUS > 1
306/*
307 * We raise the interrupt level to splhigh, to block interprocessor
308 * interrupts during pmap operations. We must take the CPU out of
309 * the cpus_active set while interrupts are blocked.
310 */
311#define SPLVM(spl) { \
312 spl = splhigh(); \
313 mp_disable_preemption(); \
314 i_bit_clear(cpu_number(), &cpus_active); \
315 mp_enable_preemption(); \
316}
317
318#define SPLX(spl) { \
319 mp_disable_preemption(); \
320 i_bit_set(cpu_number(), &cpus_active); \
321 mp_enable_preemption(); \
322 splx(spl); \
323}
324
325/*
326 * Lock on pmap system
327 */
328lock_t pmap_system_lock;
329
330#define PMAP_READ_LOCK(pmap, spl) { \
331 SPLVM(spl); \
332 lock_read(&pmap_system_lock); \
333 simple_lock(&(pmap)->lock); \
334}
335
336#define PMAP_WRITE_LOCK(spl) { \
337 SPLVM(spl); \
338 lock_write(&pmap_system_lock); \
339}
340
341#define PMAP_READ_UNLOCK(pmap, spl) { \
342 simple_unlock(&(pmap)->lock); \
343 lock_read_done(&pmap_system_lock); \
344 SPLX(spl); \
345}
346
347#define PMAP_WRITE_UNLOCK(spl) { \
348 lock_write_done(&pmap_system_lock); \
349 SPLX(spl); \
350}
351
352#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
353 simple_lock(&(pmap)->lock); \
354 lock_write_to_read(&pmap_system_lock); \
355}
356
357#define LOCK_PVH(index) lock_pvh_pai(index)
358
359#define UNLOCK_PVH(index) unlock_pvh_pai(index)
360
361#define PMAP_FLUSH_TLBS() \
362{ \
363 flush_tlb(); \
364 i386_signal_cpus(MP_TLB_FLUSH); \
365}
366
367#define PMAP_RELOAD_TLBS() { \
368 i386_signal_cpus(MP_TLB_RELOAD); \
369 set_cr3(kernel_pmap->pdirbase); \
370}
371
372#define PMAP_INVALIDATE_PAGE(map, addr) { \
373 if (map == kernel_pmap) \
374 invlpg((vm_offset_t) addr); \
375 else \
376 flush_tlb(); \
377 i386_signal_cpus(MP_TLB_FLUSH); \
378}
379
380#else /* NCPUS > 1 */
381
382#if MACH_RT
383#define SPLVM(spl) { (spl) = splhigh(); }
384#define SPLX(spl) splx (spl)
385#else /* MACH_RT */
386#define SPLVM(spl)
387#define SPLX(spl)
388#endif /* MACH_RT */
389
390#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
391#define PMAP_WRITE_LOCK(spl) SPLVM(spl)
392#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
393#define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
394#define PMAP_WRITE_TO_READ_LOCK(pmap)
395
396#if MACH_RT
397#define LOCK_PVH(index) disable_preemption()
398#define UNLOCK_PVH(index) enable_preemption()
399#else /* MACH_RT */
400#define LOCK_PVH(index)
401#define UNLOCK_PVH(index)
402#endif /* MACH_RT */
403
404#define PMAP_FLUSH_TLBS() flush_tlb()
405#define PMAP_RELOAD_TLBS() set_cr3(kernel_pmap->pdirbase)
406#define PMAP_INVALIDATE_PAGE(map, addr) { \
407 if (map == kernel_pmap) \
408 invlpg((vm_offset_t) addr); \
409 else \
410 flush_tlb(); \
411}
412
413#endif /* NCPUS > 1 */
414
415#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
416
417#if NCPUS > 1
418/*
419 * Structures to keep track of pending TLB invalidations
420 */
421cpu_set cpus_active;
422cpu_set cpus_idle;
423volatile boolean_t cpu_update_needed[NCPUS];
424
425
426#endif /* NCPUS > 1 */
427
428/*
429 * Other useful macros.
430 */
431#define current_pmap() (vm_map_pmap(current_act()->map))
432#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
433
434struct pmap kernel_pmap_store;
435pmap_t kernel_pmap;
436
437struct zone *pmap_zone; /* zone of pmap structures */
438
439int pmap_debug = 0; /* flag for debugging prints */
440int ptes_per_vm_page; /* number of hardware ptes needed
441 to map one VM page. */
442unsigned int inuse_ptepages_count = 0; /* debugging */
443
444/*
445 * Pmap cache. Cache is threaded through ref_count field of pmap.
446 * Max will eventually be constant -- variable for experimentation.
447 */
448int pmap_cache_max = 32;
449int pmap_alloc_chunk = 8;
450pmap_t pmap_cache_list;
451int pmap_cache_count;
452decl_simple_lock_data(,pmap_cache_lock)
453
454extern vm_offset_t hole_start, hole_end;
455
456extern char end;
457
458/*
459 * Page directory for kernel.
460 */
461pt_entry_t *kpde = 0; /* set by start.s - keep out of bss */
462
463#if DEBUG_ALIAS
464#define PMAP_ALIAS_MAX 32
465struct pmap_alias {
466 vm_offset_t rpc;
467 pmap_t pmap;
468 vm_offset_t va;
469 int cookie;
470#define PMAP_ALIAS_COOKIE 0xdeadbeef
471} pmap_aliasbuf[PMAP_ALIAS_MAX];
472int pmap_alias_index = 0;
473extern vm_offset_t get_rpc();
474
475#endif /* DEBUG_ALIAS */
476
477/*
478 * Given an offset and a map, compute the address of the
479 * pte. If the address is invalid with respect to the map
480 * then PT_ENTRY_NULL is returned (and the map may need to grow).
481 *
482 * This is only used in machine-dependent code.
483 */
484
485pt_entry_t *
486pmap_pte(
487 register pmap_t pmap,
488 register vm_offset_t addr)
489{
490 register pt_entry_t *ptp;
491 register pt_entry_t pte;
492
493 pte = pmap->dirbase[pdenum(pmap, addr)];
494 if ((pte & INTEL_PTE_VALID) == 0)
495 return(PT_ENTRY_NULL);
496 ptp = (pt_entry_t *)ptetokv(pte);
497 return(&ptp[ptenum(addr)]);
498
499}
500
501#define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(pmap, addr)])
502
503#define DEBUG_PTE_PAGE 0
504
505#if DEBUG_PTE_PAGE
506void
507ptep_check(
508 ptep_t ptep)
509{
510 register pt_entry_t *pte, *epte;
511 int ctu, ctw;
512
513 /* check the use and wired counts */
514 if (ptep == PTE_PAGE_NULL)
515 return;
516 pte = pmap_pte(ptep->pmap, ptep->va);
517 epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
518 ctu = 0;
519 ctw = 0;
520 while (pte < epte) {
521 if (pte->pfn != 0) {
522 ctu++;
523 if (pte->wired)
524 ctw++;
525 }
526 pte += ptes_per_vm_page;
527 }
528
529 if (ctu != ptep->use_count || ctw != ptep->wired_count) {
530 printf("use %d wired %d - actual use %d wired %d\n",
531 ptep->use_count, ptep->wired_count, ctu, ctw);
532 panic("pte count");
533 }
534}
535#endif /* DEBUG_PTE_PAGE */
536
537/*
538 * Map memory at initialization. The physical addresses being
539 * mapped are not managed and are never unmapped.
540 *
541 * For now, VM is already on, we only need to map the
542 * specified memory.
543 */
544vm_offset_t
545pmap_map(
546 register vm_offset_t virt,
547 register vm_offset_t start,
548 register vm_offset_t end,
549 register vm_prot_t prot)
550{
551 register int ps;
552
553 ps = PAGE_SIZE;
554 while (start < end) {
9bccf70c 555 pmap_enter(kernel_pmap, virt, start, prot, 0, FALSE);
1c79356b
A
556 virt += ps;
557 start += ps;
558 }
559 return(virt);
560}
561
562/*
563 * Back-door routine for mapping kernel VM at initialization.
564 * Useful for mapping memory outside the range
565 * Sets no-cache, A, D.
566 * [vm_first_phys, vm_last_phys) (i.e., devices).
567 * Otherwise like pmap_map.
568 */
569vm_offset_t
570pmap_map_bd(
571 register vm_offset_t virt,
572 register vm_offset_t start,
573 register vm_offset_t end,
574 vm_prot_t prot)
575{
576 register pt_entry_t template;
577 register pt_entry_t *pte;
578
579 template = pa_to_pte(start)
580 | INTEL_PTE_NCACHE
581 | INTEL_PTE_REF
582 | INTEL_PTE_MOD
583 | INTEL_PTE_WIRED
584 | INTEL_PTE_VALID;
585 if (prot & VM_PROT_WRITE)
586 template |= INTEL_PTE_WRITE;
587
588 while (start < end) {
589 pte = pmap_pte(kernel_pmap, virt);
590 if (pte == PT_ENTRY_NULL)
591 panic("pmap_map_bd: Invalid kernel address\n");
592 WRITE_PTE_FAST(pte, template)
593 pte_increment_pa(template);
594 virt += PAGE_SIZE;
595 start += PAGE_SIZE;
596 }
597
598 PMAP_FLUSH_TLBS();
599
600 return(virt);
601}
602
603extern int cnvmem;
604extern char *first_avail;
605extern vm_offset_t virtual_avail, virtual_end;
606extern vm_offset_t avail_start, avail_end, avail_next;
607
608/*
609 * Bootstrap the system enough to run with virtual memory.
610 * Map the kernel's code and data, and allocate the system page table.
611 * Called with mapping OFF. Page_size must already be set.
612 *
613 * Parameters:
614 * load_start: PA where kernel was loaded
615 * avail_start PA of first available physical page -
616 * after kernel page tables
617 * avail_end PA of last available physical page
618 * virtual_avail VA of first available page -
619 * after kernel page tables
620 * virtual_end VA of last available page -
621 * end of kernel address space
622 *
623 * &start_text start of kernel text
624 * &etext end of kernel text
625 */
626
627void
628pmap_bootstrap(
629 vm_offset_t load_start)
630{
631 vm_offset_t va, tva, paddr;
632 pt_entry_t template;
633 pt_entry_t *pde, *pte, *ptend;
634 vm_size_t morevm; /* VM space for kernel map */
635
636 /*
637 * Set ptes_per_vm_page for general use.
638 */
639 ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
640
641 /*
642 * The kernel's pmap is statically allocated so we don't
643 * have to use pmap_create, which is unlikely to work
644 * correctly at this part of the boot sequence.
645 */
646
647 kernel_pmap = &kernel_pmap_store;
648
649#if NCPUS > 1
650 lock_init(&pmap_system_lock,
651 FALSE, /* NOT a sleep lock */
652 ETAP_VM_PMAP_SYS,
653 ETAP_VM_PMAP_SYS_I);
654#endif /* NCPUS > 1 */
655
656 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
657 simple_lock_init(&pv_free_list_lock, ETAP_VM_PMAP_FREE);
658
659 kernel_pmap->ref_count = 1;
660
661 /*
662 * The kernel page directory has been allocated;
663 * its virtual address is in kpde.
664 *
665 * Enough kernel page table pages have been allocated
666 * to map low system memory, kernel text, kernel data/bss,
667 * kdb's symbols, and the page directory and page tables.
668 *
669 * No other physical memory has been allocated.
670 */
671
672 /*
673 * Start mapping virtual memory to physical memory, 1-1,
674 * at end of mapped memory.
675 */
676
677 virtual_avail = phystokv(avail_start);
678 virtual_end = phystokv(avail_end);
679
680 pde = kpde;
681 pde += pdenum(kernel_pmap, virtual_avail);
682
683 if (pte_to_pa(*pde) == 0) {
684 /* This pte has not been allocated */
685 pte = 0; ptend = 0;
686 }
687 else {
688 pte = (pt_entry_t *)ptetokv(*pde);
689 /* first pte of page */
690 ptend = pte+NPTES; /* last pte of page */
691 pte += ptenum(virtual_avail); /* point to pte that
692 maps first avail VA */
693 pde++; /* point pde to first empty slot */
694 }
695
696 template = pa_to_pte(avail_start)
697 | INTEL_PTE_VALID
698 | INTEL_PTE_WRITE;
699
700 for (va = virtual_avail; va < virtual_end; va += INTEL_PGBYTES) {
701 if (pte >= ptend) {
702 pte = (pt_entry_t *)phystokv(virtual_avail);
703 ptend = pte + NPTES;
704 virtual_avail = (vm_offset_t)ptend;
705 if (virtual_avail == hole_start)
706 virtual_avail = hole_end;
707 *pde = PA_TO_PTE((vm_offset_t) pte)
708 | INTEL_PTE_VALID
709 | INTEL_PTE_WRITE;
710 pde++;
711 }
712 WRITE_PTE_FAST(pte, template)
713 pte++;
714 pte_increment_pa(template);
715 }
716
717 avail_start = virtual_avail - VM_MIN_KERNEL_ADDRESS;
718 avail_next = avail_start;
719
720 /*
721 * Figure out maximum kernel address.
722 * Kernel virtual space is:
723 * - at least three times physical memory
724 * - at least VM_MIN_KERNEL_ADDRESS
725 * - limited by VM_MAX_KERNEL_ADDRESS
726 */
727
728 morevm = 3*avail_end;
729 if (virtual_end + morevm > VM_MAX_KERNEL_ADDRESS)
730 morevm = VM_MAX_KERNEL_ADDRESS - virtual_end + 1;
731
732/*
733 * startup requires additional virtual memory (for tables, buffers,
734 * etc.). The kd driver may also require some of that memory to
735 * access the graphics board.
736 *
737 */
738 *(int *)&template = 0;
739
740 /*
741 * Leave room for kernel-loaded servers, which have been linked at
742 * addresses from VM_MIN_KERNEL_LOADED_ADDRESS to
743 * VM_MAX_KERNEL_LOADED_ADDRESS.
744 */
745 if (virtual_end + morevm < VM_MAX_KERNEL_LOADED_ADDRESS + 1)
746 morevm = VM_MAX_KERNEL_LOADED_ADDRESS + 1 - virtual_end;
747
748
749 virtual_end += morevm;
750 for (tva = va; tva < virtual_end; tva += INTEL_PGBYTES) {
751 if (pte >= ptend) {
752 pmap_next_page(&paddr);
753 pte = (pt_entry_t *)phystokv(paddr);
754 ptend = pte + NPTES;
755 *pde = PA_TO_PTE((vm_offset_t) pte)
756 | INTEL_PTE_VALID
757 | INTEL_PTE_WRITE;
758 pde++;
759 }
760 WRITE_PTE_FAST(pte, template)
761 pte++;
762 }
763
764 virtual_avail = va;
765
766 /* Push the virtual avail address above hole_end */
767 if (virtual_avail < hole_end)
768 virtual_avail = hole_end;
769
770 /*
771 * c.f. comment above
772 *
773 */
774 virtual_end = va + morevm;
775 while (pte < ptend)
776 *pte++ = 0;
777
778 /*
779 * invalidate user virtual addresses
780 */
781 memset((char *)kpde,
782 0,
783 pdenum(kernel_pmap,VM_MIN_KERNEL_ADDRESS)*sizeof(pt_entry_t));
784 kernel_pmap->dirbase = kpde;
785 printf("Kernel virtual space from 0x%x to 0x%x.\n",
786 VM_MIN_KERNEL_ADDRESS, virtual_end);
787
788 avail_start = avail_next;
789 printf("Available physical space from 0x%x to 0x%x\n",
790 avail_start, avail_end);
791
792 kernel_pmap->pdirbase = kvtophys((vm_offset_t)kernel_pmap->dirbase);
793
794}
795
796void
797pmap_virtual_space(
798 vm_offset_t *startp,
799 vm_offset_t *endp)
800{
801 *startp = virtual_avail;
802 *endp = virtual_end;
803}
804
805/*
806 * Initialize the pmap module.
807 * Called by vm_init, to initialize any structures that the pmap
808 * system needs to map virtual memory.
809 */
810void
811pmap_init(void)
812{
813 register long npages;
814 vm_offset_t addr;
815 register vm_size_t s;
816 int i;
817
818 /*
819 * Allocate memory for the pv_head_table and its lock bits,
820 * the modify bit array, and the pte_page table.
821 */
822
823 npages = atop(avail_end - avail_start);
824 s = (vm_size_t) (sizeof(struct pv_entry) * npages
825 + pv_lock_table_size(npages)
826 + npages);
827
828 s = round_page(s);
829 if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
830 panic("pmap_init");
831
832 memset((char *)addr, 0, s);
833
834 /*
835 * Allocate the structures first to preserve word-alignment.
836 */
837 pv_head_table = (pv_entry_t) addr;
838 addr = (vm_offset_t) (pv_head_table + npages);
839
840 pv_lock_table = (char *) addr;
841 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
842
843 pmap_phys_attributes = (char *) addr;
844
845 /*
846 * Create the zone of physical maps,
847 * and of the physical-to-virtual entries.
848 */
849 s = (vm_size_t) sizeof(struct pmap);
850 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
851 s = (vm_size_t) sizeof(struct pv_entry);
852 pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
853
854 /*
855 * Only now, when all of the data structures are allocated,
856 * can we set vm_first_phys and vm_last_phys. If we set them
857 * too soon, the kmem_alloc_wired above will try to use these
858 * data structures and blow up.
859 */
860
861 vm_first_phys = avail_start;
862 vm_last_phys = avail_end;
863 pmap_initialized = TRUE;
864
865 /*
866 * Initializie pmap cache.
867 */
868 pmap_cache_list = PMAP_NULL;
869 pmap_cache_count = 0;
870 simple_lock_init(&pmap_cache_lock, ETAP_VM_PMAP_CACHE);
871}
872
873
874#define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end))
875
876
877#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
878
879boolean_t
880pmap_verify_free(
881 vm_offset_t phys)
882{
883 pv_entry_t pv_h;
884 int pai;
885 spl_t spl;
886 boolean_t result;
887
888 assert(phys != vm_page_fictitious_addr);
889 if (!pmap_initialized)
890 return(TRUE);
891
892 if (!pmap_valid_page(phys))
893 return(FALSE);
894
895 PMAP_WRITE_LOCK(spl);
896
897 pai = pa_index(phys);
898 pv_h = pai_to_pvh(pai);
899
900 result = (pv_h->pmap == PMAP_NULL);
901 PMAP_WRITE_UNLOCK(spl);
902
903 return(result);
904}
905
906/*
907 * Create and return a physical map.
908 *
909 * If the size specified for the map
910 * is zero, the map is an actual physical
911 * map, and may be referenced by the
912 * hardware.
913 *
914 * If the size specified is non-zero,
915 * the map will be used in software only, and
916 * is bounded by that size.
917 */
918pmap_t
919pmap_create(
920 vm_size_t size)
921{
922 register pmap_t p;
923 register pmap_statistics_t stats;
924
925 /*
926 * A software use-only map doesn't even need a map.
927 */
928
929 if (size != 0) {
930 return(PMAP_NULL);
931 }
932
933 /*
934 * Try to get cached pmap, if this fails,
935 * allocate a pmap struct from the pmap_zone. Then allocate
936 * the page descriptor table from the pd_zone.
937 */
938
939 simple_lock(&pmap_cache_lock);
940 while ((p = pmap_cache_list) == PMAP_NULL) {
941
942 vm_offset_t dirbases;
943 register int i;
944
945 simple_unlock(&pmap_cache_lock);
946
947#if NCPUS > 1
948 /*
949 * XXX NEEDS MP DOING ALLOC logic so that if multiple processors
950 * XXX get here, only one allocates a chunk of pmaps.
951 * (for now we'll just let it go - safe but wasteful)
952 */
953#endif
954
955 /*
956 * Allocate a chunck of pmaps. Single kmem_alloc_wired
957 * operation reduces kernel map fragmentation.
958 */
959
960 if (kmem_alloc_wired(kernel_map, &dirbases,
961 pmap_alloc_chunk * INTEL_PGBYTES)
962 != KERN_SUCCESS)
963 panic("pmap_create.1");
964
965 for (i = pmap_alloc_chunk; i > 0 ; i--) {
966 p = (pmap_t) zalloc(pmap_zone);
967 if (p == PMAP_NULL)
968 panic("pmap_create.2");
969
970 /*
971 * Initialize pmap. Don't bother with
972 * ref count as cache list is threaded
973 * through it. It'll be set on cache removal.
974 */
975 p->dirbase = (pt_entry_t *) dirbases;
976 dirbases += INTEL_PGBYTES;
977 memcpy(p->dirbase, kpde, INTEL_PGBYTES);
978 p->pdirbase = kvtophys((vm_offset_t)p->dirbase);
979
980 simple_lock_init(&p->lock, ETAP_VM_PMAP);
981 p->cpus_using = 0;
982
983 /*
984 * Initialize statistics.
985 */
986 stats = &p->stats;
987 stats->resident_count = 0;
988 stats->wired_count = 0;
989
990 /*
991 * Insert into cache
992 */
993 simple_lock(&pmap_cache_lock);
994 p->ref_count = (int) pmap_cache_list;
995 pmap_cache_list = p;
996 pmap_cache_count++;
997 simple_unlock(&pmap_cache_lock);
998 }
999 simple_lock(&pmap_cache_lock);
1000 }
1001
1002 assert(p->stats.resident_count == 0);
1003 assert(p->stats.wired_count == 0);
1004 p->stats.resident_count = 0;
1005 p->stats.wired_count = 0;
1006
1007 pmap_cache_list = (pmap_t) p->ref_count;
1008 p->ref_count = 1;
1009 pmap_cache_count--;
1010 simple_unlock(&pmap_cache_lock);
1011
1012 return(p);
1013}
1014
1015/*
1016 * Retire the given physical map from service.
1017 * Should only be called if the map contains
1018 * no valid mappings.
1019 */
1020
1021void
1022pmap_destroy(
1023 register pmap_t p)
1024{
1025 register pt_entry_t *pdep;
1026 register vm_offset_t pa;
1027 register int c;
1028 spl_t s;
1029 register vm_page_t m;
1030
1031 if (p == PMAP_NULL)
1032 return;
1033
1034 SPLVM(s);
1035 simple_lock(&p->lock);
1036 c = --p->ref_count;
1037 if (c == 0) {
1038 register int my_cpu;
1039
1040 mp_disable_preemption();
1041 my_cpu = cpu_number();
1042
1043 /*
1044 * If some cpu is not using the physical pmap pointer that it
1045 * is supposed to be (see set_dirbase), we might be using the
1046 * pmap that is being destroyed! Make sure we are
1047 * physically on the right pmap:
1048 */
1049
1050
1051 if (real_pmap[my_cpu] == p) {
1052 PMAP_CPU_CLR(p, my_cpu);
1053 real_pmap[my_cpu] = kernel_pmap;
1054 PMAP_RELOAD_TLBS();
1055 }
1056 mp_enable_preemption();
1057 }
1058 simple_unlock(&p->lock);
1059 SPLX(s);
1060
1061 if (c != 0) {
1062 return; /* still in use */
1063 }
1064
1065 /*
1066 * Free the memory maps, then the
1067 * pmap structure.
1068 */
1069 pdep = p->dirbase;
1070 while (pdep < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]) {
1071 if (*pdep & INTEL_PTE_VALID) {
1072 pa = pte_to_pa(*pdep);
1073 vm_object_lock(pmap_object);
1074 m = vm_page_lookup(pmap_object, pa);
1075 if (m == VM_PAGE_NULL)
1076 panic("pmap_destroy: pte page not in object");
1077 vm_page_lock_queues();
1078 vm_page_free(m);
1079 inuse_ptepages_count--;
1080 vm_object_unlock(pmap_object);
1081 vm_page_unlock_queues();
1082
1083 /*
1084 * Clear pdes, this might be headed for the cache.
1085 */
1086 c = ptes_per_vm_page;
1087 do {
1088 *pdep = 0;
1089 pdep++;
1090 } while (--c > 0);
1091 }
1092 else {
1093 pdep += ptes_per_vm_page;
1094 }
1095
1096 }
1097 assert(p->stats.resident_count == 0);
1098 assert(p->stats.wired_count == 0);
1099
1100 /*
1101 * Add to cache if not already full
1102 */
1103 simple_lock(&pmap_cache_lock);
1104 if (pmap_cache_count <= pmap_cache_max) {
1105 p->ref_count = (int) pmap_cache_list;
1106 pmap_cache_list = p;
1107 pmap_cache_count++;
1108 simple_unlock(&pmap_cache_lock);
1109 }
1110 else {
1111 simple_unlock(&pmap_cache_lock);
1112 kmem_free(kernel_map, (vm_offset_t)p->dirbase, INTEL_PGBYTES);
1113 zfree(pmap_zone, (vm_offset_t) p);
1114 }
1115}
1116
1117/*
1118 * Add a reference to the specified pmap.
1119 */
1120
1121void
1122pmap_reference(
1123 register pmap_t p)
1124{
1125 spl_t s;
1126
1127 if (p != PMAP_NULL) {
1128 SPLVM(s);
1129 simple_lock(&p->lock);
1130 p->ref_count++;
1131 simple_unlock(&p->lock);
1132 SPLX(s);
1133 }
1134}
1135
1136/*
1137 * Remove a range of hardware page-table entries.
1138 * The entries given are the first (inclusive)
1139 * and last (exclusive) entries for the VM pages.
1140 * The virtual address is the va for the first pte.
1141 *
1142 * The pmap must be locked.
1143 * If the pmap is not the kernel pmap, the range must lie
1144 * entirely within one pte-page. This is NOT checked.
1145 * Assumes that the pte-page exists.
1146 */
1147
1148/* static */
1149void
1150pmap_remove_range(
1151 pmap_t pmap,
1152 vm_offset_t va,
1153 pt_entry_t *spte,
1154 pt_entry_t *epte)
1155{
1156 register pt_entry_t *cpte;
1157 int num_removed, num_unwired;
1158 int pai;
1159 vm_offset_t pa;
1160
1161#if DEBUG_PTE_PAGE
1162 if (pmap != kernel_pmap)
1163 ptep_check(get_pte_page(spte));
1164#endif /* DEBUG_PTE_PAGE */
1165 num_removed = 0;
1166 num_unwired = 0;
1167
1168 for (cpte = spte; cpte < epte;
1169 cpte += ptes_per_vm_page, va += PAGE_SIZE) {
1170
1171 pa = pte_to_pa(*cpte);
1172 if (pa == 0)
1173 continue;
1174
1175 num_removed++;
1176 if (iswired(*cpte))
1177 num_unwired++;
1178
1179 if (!valid_page(pa)) {
1180
1181 /*
1182 * Outside range of managed physical memory.
1183 * Just remove the mappings.
1184 */
1185 register int i = ptes_per_vm_page;
1186 register pt_entry_t *lpte = cpte;
1187 do {
1188 *lpte = 0;
1189 lpte++;
1190 } while (--i > 0);
1191 continue;
1192 }
1193
1194 pai = pa_index(pa);
1195 LOCK_PVH(pai);
1196
1197 /*
1198 * Get the modify and reference bits.
1199 */
1200 {
1201 register int i;
1202 register pt_entry_t *lpte;
1203
1204 i = ptes_per_vm_page;
1205 lpte = cpte;
1206 do {
1207 pmap_phys_attributes[pai] |=
1208 *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
1209 *lpte = 0;
1210 lpte++;
1211 } while (--i > 0);
1212 }
1213
1214 /*
1215 * Remove the mapping from the pvlist for
1216 * this physical page.
1217 */
1218 {
1219 register pv_entry_t pv_h, prev, cur;
1220
1221 pv_h = pai_to_pvh(pai);
1222 if (pv_h->pmap == PMAP_NULL) {
1223 panic("pmap_remove: null pv_list!");
1224 }
1225 if (pv_h->va == va && pv_h->pmap == pmap) {
1226 /*
1227 * Header is the pv_entry. Copy the next one
1228 * to header and free the next one (we cannot
1229 * free the header)
1230 */
1231 cur = pv_h->next;
1232 if (cur != PV_ENTRY_NULL) {
1233 *pv_h = *cur;
1234 PV_FREE(cur);
1235 }
1236 else {
1237 pv_h->pmap = PMAP_NULL;
1238 }
1239 }
1240 else {
1241 cur = pv_h;
1242 do {
1243 prev = cur;
1244 if ((cur = prev->next) == PV_ENTRY_NULL) {
1245 panic("pmap-remove: mapping not in pv_list!");
1246 }
1247 } while (cur->va != va || cur->pmap != pmap);
1248 prev->next = cur->next;
1249 PV_FREE(cur);
1250 }
1251 UNLOCK_PVH(pai);
1252 }
1253 }
1254
1255 /*
1256 * Update the counts
1257 */
1258 assert(pmap->stats.resident_count >= num_removed);
1259 pmap->stats.resident_count -= num_removed;
1260 assert(pmap->stats.wired_count >= num_unwired);
1261 pmap->stats.wired_count -= num_unwired;
1262}
1263
0b4e3aa0
A
1264/*
1265 * Remove phys addr if mapped in specified map
1266 *
1267 */
1268void
1269pmap_remove_some_phys(
1270 pmap_t map,
1271 vm_offset_t phys_addr)
1272{
1273
1274/* Implement to support working set code */
1275
1276}
1277
1278
1c79356b
A
1279/*
1280 * Remove the given range of addresses
1281 * from the specified map.
1282 *
1283 * It is assumed that the start and end are properly
1284 * rounded to the hardware page size.
1285 */
1286
1287void
1288pmap_remove(
1289 pmap_t map,
de355530
A
1290 vm_offset_t s,
1291 vm_offset_t e)
1c79356b
A
1292{
1293 spl_t spl;
1294 register pt_entry_t *pde;
1295 register pt_entry_t *spte, *epte;
1296 vm_offset_t l;
1297
1298 if (map == PMAP_NULL)
1299 return;
1300
1301 PMAP_READ_LOCK(map, spl);
1302
1303 pde = pmap_pde(map, s);
1304
1305 while (s < e) {
1306 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
1307 if (l > e)
1308 l = e;
1309 if (*pde & INTEL_PTE_VALID) {
1310 spte = (pt_entry_t *)ptetokv(*pde);
1311 spte = &spte[ptenum(s)];
1312 epte = &spte[intel_btop(l-s)];
1313 pmap_remove_range(map, s, spte, epte);
1314 }
1315 s = l;
1316 pde++;
1317 }
1318
1319 PMAP_FLUSH_TLBS();
1320
1321 PMAP_READ_UNLOCK(map, spl);
1322}
1323
1324/*
1325 * Routine: pmap_page_protect
1326 *
1327 * Function:
1328 * Lower the permission for all mappings to a given
1329 * page.
1330 */
1331void
1332pmap_page_protect(
1333 vm_offset_t phys,
1334 vm_prot_t prot)
1335{
1336 pv_entry_t pv_h, prev;
1337 register pv_entry_t pv_e;
1338 register pt_entry_t *pte;
1339 int pai;
1340 register pmap_t pmap;
1341 spl_t spl;
1342 boolean_t remove;
1343
1344 assert(phys != vm_page_fictitious_addr);
1345 if (!valid_page(phys)) {
1346 /*
1347 * Not a managed page.
1348 */
1349 return;
1350 }
1351
1352 /*
1353 * Determine the new protection.
1354 */
1355 switch (prot) {
1356 case VM_PROT_READ:
1357 case VM_PROT_READ|VM_PROT_EXECUTE:
1358 remove = FALSE;
1359 break;
1360 case VM_PROT_ALL:
1361 return; /* nothing to do */
1362 default:
1363 remove = TRUE;
1364 break;
1365 }
1366
1367 /*
1368 * Lock the pmap system first, since we will be changing
1369 * several pmaps.
1370 */
1371
1372 PMAP_WRITE_LOCK(spl);
1373
1374 pai = pa_index(phys);
1375 pv_h = pai_to_pvh(pai);
1376
1377 /*
1378 * Walk down PV list, changing or removing all mappings.
1379 * We do not have to lock the pv_list because we have
1380 * the entire pmap system locked.
1381 */
1382 if (pv_h->pmap != PMAP_NULL) {
1383
1384 prev = pv_e = pv_h;
1385 do {
1386 pmap = pv_e->pmap;
1387 /*
1388 * Lock the pmap to block pmap_extract and similar routines.
1389 */
1390 simple_lock(&pmap->lock);
1391
1392 {
1393 register vm_offset_t va;
1394
1395 va = pv_e->va;
1396 pte = pmap_pte(pmap, va);
1397
1398 /*
1399 * Consistency checks.
1400 */
1401 /* assert(*pte & INTEL_PTE_VALID); XXX */
1402 /* assert(pte_to_phys(*pte) == phys); */
1403
1404 /*
1405 * Invalidate TLBs for all CPUs using this mapping.
1406 */
1407 PMAP_INVALIDATE_PAGE(pmap, va);
1408 }
1409
1410 /*
1411 * Remove the mapping if new protection is NONE
1412 * or if write-protecting a kernel mapping.
1413 */
1414 if (remove || pmap == kernel_pmap) {
1415 /*
1416 * Remove the mapping, collecting any modify bits.
1417 */
1c79356b
A
1418 {
1419 register int i = ptes_per_vm_page;
1420
1421 do {
1422 pmap_phys_attributes[pai] |=
1423 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1424 *pte++ = 0;
1425 } while (--i > 0);
1426 }
1427
1428 assert(pmap->stats.resident_count >= 1);
1429 pmap->stats.resident_count--;
1430
1431 /*
1432 * Remove the pv_entry.
1433 */
1434 if (pv_e == pv_h) {
1435 /*
1436 * Fix up head later.
1437 */
1438 pv_h->pmap = PMAP_NULL;
1439 }
1440 else {
1441 /*
1442 * Delete this entry.
1443 */
1444 prev->next = pv_e->next;
1445 PV_FREE(pv_e);
1446 }
1447 }
1448 else {
1449 /*
1450 * Write-protect.
1451 */
1452 register int i = ptes_per_vm_page;
1453
1454 do {
1455 *pte &= ~INTEL_PTE_WRITE;
1456 pte++;
1457 } while (--i > 0);
1458
1459 /*
1460 * Advance prev.
1461 */
1462 prev = pv_e;
1463 }
1464
1465 simple_unlock(&pmap->lock);
1466
1467 } while ((pv_e = prev->next) != PV_ENTRY_NULL);
1468
1469 /*
1470 * If pv_head mapping was removed, fix it up.
1471 */
1472 if (pv_h->pmap == PMAP_NULL) {
1473 pv_e = pv_h->next;
1474 if (pv_e != PV_ENTRY_NULL) {
1475 *pv_h = *pv_e;
1476 PV_FREE(pv_e);
1477 }
1478 }
1479 }
1480
1481 PMAP_WRITE_UNLOCK(spl);
1482}
1483
1484/*
1485 * Set the physical protection on the
1486 * specified range of this map as requested.
1487 * Will not increase permissions.
1488 */
1489void
1490pmap_protect(
1491 pmap_t map,
1492 vm_offset_t s,
1493 vm_offset_t e,
1494 vm_prot_t prot)
1495{
1496 register pt_entry_t *pde;
1497 register pt_entry_t *spte, *epte;
1498 vm_offset_t l;
1499 spl_t spl;
1500
1501
1502 if (map == PMAP_NULL)
1503 return;
1504
1505 /*
1506 * Determine the new protection.
1507 */
1508 switch (prot) {
1509 case VM_PROT_READ:
1510 case VM_PROT_READ|VM_PROT_EXECUTE:
1511 break;
1512 case VM_PROT_READ|VM_PROT_WRITE:
1513 case VM_PROT_ALL:
1514 return; /* nothing to do */
1515 default:
1516 pmap_remove(map, s, e);
1517 return;
1518 }
1519
1520 /*
1521 * If write-protecting in the kernel pmap,
1522 * remove the mappings; the i386 ignores
1523 * the write-permission bit in kernel mode.
1524 *
1525 * XXX should be #if'd for i386
1526 */
1527
1528 if (cpuid_family == CPUID_FAMILY_386)
1529 if (map == kernel_pmap) {
1530 pmap_remove(map, s, e);
1531 return;
1532 }
1533
1534 SPLVM(spl);
1535 simple_lock(&map->lock);
1536
1537
1538 pde = pmap_pde(map, s);
1539 while (s < e) {
1540 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
1541 if (l > e)
1542 l = e;
1543 if (*pde & INTEL_PTE_VALID) {
1544 spte = (pt_entry_t *)ptetokv(*pde);
1545 spte = &spte[ptenum(s)];
1546 epte = &spte[intel_btop(l-s)];
1547
1548 while (spte < epte) {
1549 if (*spte & INTEL_PTE_VALID)
1550 *spte &= ~INTEL_PTE_WRITE;
1551 spte++;
1552 }
1553 }
1554 s = l;
1555 pde++;
1556 }
1557
1558 PMAP_FLUSH_TLBS();
1559
1560 simple_unlock(&map->lock);
1561 SPLX(spl);
1562}
1563
1564
1565
1566/*
1567 * Insert the given physical page (p) at
1568 * the specified virtual address (v) in the
1569 * target physical map with the protection requested.
1570 *
1571 * If specified, the page will be wired down, meaning
1572 * that the related pte cannot be reclaimed.
1573 *
1574 * NB: This is the only routine which MAY NOT lazy-evaluate
1575 * or lose information. That is, this routine must actually
1576 * insert this page into the given map NOW.
1577 */
1578void
1579pmap_enter(
1580 register pmap_t pmap,
1581 vm_offset_t v,
1582 register vm_offset_t pa,
1583 vm_prot_t prot,
9bccf70c 1584 unsigned int flags,
1c79356b
A
1585 boolean_t wired)
1586{
1587 register pt_entry_t *pte;
1588 register pv_entry_t pv_h;
1589 register int i, pai;
1590 pv_entry_t pv_e;
1591 pt_entry_t template;
1592 spl_t spl;
1593 vm_offset_t old_pa;
1594
1595 XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n",
1596 current_thread()->top_act,
1597 current_thread(),
1598 pmap, v, pa);
1599
1600 assert(pa != vm_page_fictitious_addr);
1601 if (pmap_debug)
1602 printf("pmap(%x, %x)\n", v, pa);
1603 if (pmap == PMAP_NULL)
1604 return;
1605
1606 if (cpuid_family == CPUID_FAMILY_386)
1607 if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
1608 && !wired /* hack for io_wire */ ) {
1609 /*
1610 * Because the 386 ignores write protection in kernel mode,
1611 * we cannot enter a read-only kernel mapping, and must
1612 * remove an existing mapping if changing it.
1613 *
1614 * XXX should be #if'd for i386
1615 */
1616 PMAP_READ_LOCK(pmap, spl);
1617
1618 pte = pmap_pte(pmap, v);
1619 if (pte != PT_ENTRY_NULL && pte_to_pa(*pte) != 0) {
1620 /*
1621 * Invalidate the translation buffer,
1622 * then remove the mapping.
1623 */
1624 PMAP_INVALIDATE_PAGE(pmap, v);
1625 pmap_remove_range(pmap, v, pte,
1626 pte + ptes_per_vm_page);
1627 }
1628 PMAP_READ_UNLOCK(pmap, spl);
1629 return;
1630 }
1631
1632 /*
1633 * Must allocate a new pvlist entry while we're unlocked;
1634 * zalloc may cause pageout (which will lock the pmap system).
1635 * If we determine we need a pvlist entry, we will unlock
1636 * and allocate one. Then we will retry, throughing away
1637 * the allocated entry later (if we no longer need it).
1638 */
1639 pv_e = PV_ENTRY_NULL;
1640Retry:
1641 PMAP_READ_LOCK(pmap, spl);
1642
1643 /*
1644 * Expand pmap to include this pte. Assume that
1645 * pmap is always expanded to include enough hardware
1646 * pages to map one VM page.
1647 */
1648
1649 while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
1650 /*
1651 * Must unlock to expand the pmap.
1652 */
1653 PMAP_READ_UNLOCK(pmap, spl);
1654
1655 pmap_expand(pmap, v);
1656
1657 PMAP_READ_LOCK(pmap, spl);
1658 }
1659 /*
1660 * Special case if the physical page is already mapped
1661 * at this address.
1662 */
1663 old_pa = pte_to_pa(*pte);
1664 if (old_pa == pa) {
1665 /*
1666 * May be changing its wired attribute or protection
1667 */
de355530 1668
1c79356b
A
1669 template = pa_to_pte(pa) | INTEL_PTE_VALID;
1670 if (pmap != kernel_pmap)
1671 template |= INTEL_PTE_USER;
1672 if (prot & VM_PROT_WRITE)
1673 template |= INTEL_PTE_WRITE;
1674 if (wired) {
1675 template |= INTEL_PTE_WIRED;
1676 if (!iswired(*pte))
1677 pmap->stats.wired_count++;
1678 }
1679 else {
1680 if (iswired(*pte)) {
1681 assert(pmap->stats.wired_count >= 1);
1682 pmap->stats.wired_count--;
1683 }
1684 }
1685
1686 PMAP_INVALIDATE_PAGE(pmap, v);
1687
1688 i = ptes_per_vm_page;
1689 do {
1690 if (*pte & INTEL_PTE_MOD)
1691 template |= INTEL_PTE_MOD;
1692 WRITE_PTE(pte, template)
1693 pte++;
1694 pte_increment_pa(template);
1695 } while (--i > 0);
1696
1697 goto Done;
1698 }
1699
1700 /*
1701 * Outline of code from here:
1702 * 1) If va was mapped, update TLBs, remove the mapping
1703 * and remove old pvlist entry.
1704 * 2) Add pvlist entry for new mapping
1705 * 3) Enter new mapping.
1706 *
1707 * SHARING_FAULTS complicates this slightly in that it cannot
1708 * replace the mapping, but must remove it (because adding the
1709 * pvlist entry for the new mapping may remove others), and
1710 * hence always enters the new mapping at step 3)
1711 *
1712 * If the old physical page is not managed step 1) is skipped
1713 * (except for updating the TLBs), and the mapping is
1714 * overwritten at step 3). If the new physical page is not
1715 * managed, step 2) is skipped.
1716 */
1717
1718 if (old_pa != (vm_offset_t) 0) {
1719
1720 PMAP_INVALIDATE_PAGE(pmap, v);
1721
1722#if DEBUG_PTE_PAGE
1723 if (pmap != kernel_pmap)
1724 ptep_check(get_pte_page(pte));
1725#endif /* DEBUG_PTE_PAGE */
1726
1727 /*
1728 * Don't do anything to pages outside valid memory here.
1729 * Instead convince the code that enters a new mapping
1730 * to overwrite the old one.
1731 */
1732
1733 if (valid_page(old_pa)) {
1734
1735 pai = pa_index(old_pa);
1736 LOCK_PVH(pai);
1737
1738 assert(pmap->stats.resident_count >= 1);
1739 pmap->stats.resident_count--;
1740 if (iswired(*pte)) {
1741 assert(pmap->stats.wired_count >= 1);
1742 pmap->stats.wired_count--;
1743 }
1744 i = ptes_per_vm_page;
1745 do {
1746 pmap_phys_attributes[pai] |=
1747 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1748 WRITE_PTE(pte, 0)
1749 pte++;
1750 pte_increment_pa(template);
1751 } while (--i > 0);
1752
1753 /*
1754 * Put pte back to beginning of page since it'll be
1755 * used later to enter the new page.
1756 */
1757 pte -= ptes_per_vm_page;
1758
1759 /*
1760 * Remove the mapping from the pvlist for
1761 * this physical page.
1762 */
1763 {
1764 register pv_entry_t prev, cur;
1765
1766 pv_h = pai_to_pvh(pai);
1767 if (pv_h->pmap == PMAP_NULL) {
1768 panic("pmap_enter: null pv_list!");
1769 }
1770 if (pv_h->va == v && pv_h->pmap == pmap) {
1771 /*
1772 * Header is the pv_entry. Copy the next one
1773 * to header and free the next one (we cannot
1774 * free the header)
1775 */
1776 cur = pv_h->next;
1777 if (cur != PV_ENTRY_NULL) {
1778 *pv_h = *cur;
1779 pv_e = cur;
1780 }
1781 else {
1782 pv_h->pmap = PMAP_NULL;
1783 }
1784 }
1785 else {
1786 cur = pv_h;
1787 do {
1788 prev = cur;
1789 if ((cur = prev->next) == PV_ENTRY_NULL) {
1790 panic("pmap_enter: mapping not in pv_list!");
1791 }
1792 } while (cur->va != v || cur->pmap != pmap);
1793 prev->next = cur->next;
1794 pv_e = cur;
1795 }
1796 }
1797 UNLOCK_PVH(pai);
1798 }
1799 else {
1800
1801 /*
1802 * old_pa is not managed. Pretend it's zero so code
1803 * at Step 3) will enter new mapping (overwriting old
1804 * one). Do removal part of accounting.
1805 */
1806 old_pa = (vm_offset_t) 0;
1807 assert(pmap->stats.resident_count >= 1);
1808 pmap->stats.resident_count--;
1809 if (iswired(*pte)) {
1810 assert(pmap->stats.wired_count >= 1);
1811 pmap->stats.wired_count--;
1812 }
1813 }
1814 }
1815
1816 if (valid_page(pa)) {
1817
1818 /*
1819 * Step 2) Enter the mapping in the PV list for this
1820 * physical page.
1821 */
1822
1823 pai = pa_index(pa);
1824
1825
1826#if SHARING_FAULTS
1827RetryPvList:
1828 /*
1829 * We can return here from the sharing fault code below
1830 * in case we removed the only entry on the pv list and thus
1831 * must enter the new one in the list header.
1832 */
1833#endif /* SHARING_FAULTS */
1834 LOCK_PVH(pai);
1835 pv_h = pai_to_pvh(pai);
1836
1837 if (pv_h->pmap == PMAP_NULL) {
1838 /*
1839 * No mappings yet
1840 */
1841 pv_h->va = v;
1842 pv_h->pmap = pmap;
1843 pv_h->next = PV_ENTRY_NULL;
1844 }
1845 else {
1846#if DEBUG
1847 {
1848 /*
1849 * check that this mapping is not already there
1850 * or there is no alias for this mapping in the same map
1851 */
1852 pv_entry_t e = pv_h;
1853 while (e != PV_ENTRY_NULL) {
1854 if (e->pmap == pmap && e->va == v)
1855 panic("pmap_enter: already in pv_list");
1856 e = e->next;
1857 }
1858 }
1859#endif /* DEBUG */
1860#if SHARING_FAULTS
1861 {
1862 /*
1863 * do sharing faults.
1864 * if we find an entry on this pv list in the same address
1865 * space, remove it. we know there will not be more
1866 * than one.
1867 */
1868 pv_entry_t e = pv_h;
1869 pt_entry_t *opte;
1870
1871 while (e != PV_ENTRY_NULL) {
1872 if (e->pmap == pmap) {
1873 /*
1874 * Remove it, drop pv list lock first.
1875 */
1876 UNLOCK_PVH(pai);
1877
1878 opte = pmap_pte(pmap, e->va);
1879 assert(opte != PT_ENTRY_NULL);
1880 /*
1881 * Invalidate the translation buffer,
1882 * then remove the mapping.
1883 */
1884 PMAP_INVALIDATE_PAGE(pmap, e->va);
1885 pmap_remove_range(pmap, e->va, opte,
1886 opte + ptes_per_vm_page);
1887 /*
1888 * We could have remove the head entry,
1889 * so there could be no more entries
1890 * and so we have to use the pv head entry.
1891 * so, go back to the top and try the entry
1892 * again.
1893 */
1894 goto RetryPvList;
1895 }
1896 e = e->next;
1897 }
1898
1899 /*
1900 * check that this mapping is not already there
1901 */
1902 e = pv_h;
1903 while (e != PV_ENTRY_NULL) {
1904 if (e->pmap == pmap)
1905 panic("pmap_enter: alias in pv_list");
1906 e = e->next;
1907 }
1908 }
1909#endif /* SHARING_FAULTS */
1910#if DEBUG_ALIAS
1911 {
1912 /*
1913 * check for aliases within the same address space.
1914 */
1915 pv_entry_t e = pv_h;
1916 vm_offset_t rpc = get_rpc();
1917
1918 while (e != PV_ENTRY_NULL) {
1919 if (e->pmap == pmap) {
1920 /*
1921 * log this entry in the alias ring buffer
1922 * if it's not there already.
1923 */
1924 struct pmap_alias *pma;
1925 int ii, logit;
1926
1927 logit = TRUE;
1928 for (ii = 0; ii < pmap_alias_index; ii++) {
1929 if (pmap_aliasbuf[ii].rpc == rpc) {
1930 /* found it in the log already */
1931 logit = FALSE;
1932 break;
1933 }
1934 }
1935 if (logit) {
1936 pma = &pmap_aliasbuf[pmap_alias_index];
1937 pma->pmap = pmap;
1938 pma->va = v;
1939 pma->rpc = rpc;
1940 pma->cookie = PMAP_ALIAS_COOKIE;
1941 if (++pmap_alias_index >= PMAP_ALIAS_MAX)
1942 panic("pmap_enter: exhausted alias log");
1943 }
1944 }
1945 e = e->next;
1946 }
1947 }
1948#endif /* DEBUG_ALIAS */
1949 /*
1950 * Add new pv_entry after header.
1951 */
1952 if (pv_e == PV_ENTRY_NULL) {
1953 PV_ALLOC(pv_e);
1954 if (pv_e == PV_ENTRY_NULL) {
1955 UNLOCK_PVH(pai);
1956 PMAP_READ_UNLOCK(pmap, spl);
1957
1958 /*
1959 * Refill from zone.
1960 */
1961 pv_e = (pv_entry_t) zalloc(pv_list_zone);
1962 goto Retry;
1963 }
1964 }
1965 pv_e->va = v;
1966 pv_e->pmap = pmap;
1967 pv_e->next = pv_h->next;
1968 pv_h->next = pv_e;
1969 /*
1970 * Remember that we used the pvlist entry.
1971 */
1972 pv_e = PV_ENTRY_NULL;
1973 }
1974 UNLOCK_PVH(pai);
1975 }
1976
1977 /*
1978 * Step 3) Enter and count the mapping.
1979 */
1980
1981 pmap->stats.resident_count++;
1982
1983 /*
1984 * Build a template to speed up entering -
1985 * only the pfn changes.
1986 */
1987 template = pa_to_pte(pa) | INTEL_PTE_VALID;
1988 if (pmap != kernel_pmap)
1989 template |= INTEL_PTE_USER;
1990 if (prot & VM_PROT_WRITE)
1991 template |= INTEL_PTE_WRITE;
1992 if (wired) {
1993 template |= INTEL_PTE_WIRED;
1994 pmap->stats.wired_count++;
1995 }
1996 i = ptes_per_vm_page;
1997 do {
1998 WRITE_PTE(pte, template)
1999 pte++;
2000 pte_increment_pa(template);
2001 } while (--i > 0);
2002Done:
2003 if (pv_e != PV_ENTRY_NULL) {
2004 PV_FREE(pv_e);
2005 }
2006
2007 PMAP_READ_UNLOCK(pmap, spl);
2008}
2009
2010/*
2011 * Routine: pmap_change_wiring
2012 * Function: Change the wiring attribute for a map/virtual-address
2013 * pair.
2014 * In/out conditions:
2015 * The mapping must already exist in the pmap.
2016 */
2017void
2018pmap_change_wiring(
2019 register pmap_t map,
2020 vm_offset_t v,
2021 boolean_t wired)
2022{
2023 register pt_entry_t *pte;
2024 register int i;
2025 spl_t spl;
2026
9bccf70c 2027#if 0
1c79356b
A
2028 /*
2029 * We must grab the pmap system lock because we may
2030 * change a pte_page queue.
2031 */
2032 PMAP_READ_LOCK(map, spl);
2033
2034 if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
2035 panic("pmap_change_wiring: pte missing");
2036
2037 if (wired && !iswired(*pte)) {
2038 /*
2039 * wiring down mapping
2040 */
2041 map->stats.wired_count++;
2042 i = ptes_per_vm_page;
2043 do {
2044 *pte++ |= INTEL_PTE_WIRED;
2045 } while (--i > 0);
2046 }
2047 else if (!wired && iswired(*pte)) {
2048 /*
2049 * unwiring mapping
2050 */
2051 assert(map->stats.wired_count >= 1);
2052 map->stats.wired_count--;
2053 i = ptes_per_vm_page;
2054 do {
2055 *pte++ &= ~INTEL_PTE_WIRED;
2056 } while (--i > 0);
2057 }
2058
2059 PMAP_READ_UNLOCK(map, spl);
9bccf70c
A
2060
2061#else
2062 return;
2063#endif
2064
1c79356b
A
2065}
2066
2067/*
2068 * Routine: pmap_extract
2069 * Function:
2070 * Extract the physical page address associated
2071 * with the given map/virtual_address pair.
2072 */
2073
2074vm_offset_t
2075pmap_extract(
2076 register pmap_t pmap,
2077 vm_offset_t va)
2078{
2079 register pt_entry_t *pte;
2080 register vm_offset_t pa;
2081 spl_t spl;
2082
2083 SPLVM(spl);
2084 simple_lock(&pmap->lock);
2085 if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
2086 pa = (vm_offset_t) 0;
2087 else if (!(*pte & INTEL_PTE_VALID))
2088 pa = (vm_offset_t) 0;
2089 else
2090 pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
2091 simple_unlock(&pmap->lock);
2092 SPLX(spl);
2093 return(pa);
2094}
2095
2096/*
2097 * Routine: pmap_expand
2098 *
2099 * Expands a pmap to be able to map the specified virtual address.
2100 *
2101 * Allocates new virtual memory for the P0 or P1 portion of the
2102 * pmap, then re-maps the physical pages that were in the old
2103 * pmap to be in the new pmap.
2104 *
2105 * Must be called with the pmap system and the pmap unlocked,
2106 * since these must be unlocked to use vm_allocate or vm_deallocate.
2107 * Thus it must be called in a loop that checks whether the map
2108 * has been expanded enough.
2109 * (We won't loop forever, since page tables aren't shrunk.)
2110 */
2111void
2112pmap_expand(
2113 register pmap_t map,
2114 register vm_offset_t v)
2115{
2116 pt_entry_t *pdp;
2117 register vm_page_t m;
2118 register vm_offset_t pa;
2119 register int i;
2120 spl_t spl;
2121
2122 if (map == kernel_pmap)
2123 panic("pmap_expand");
2124
2125 /*
2126 * We cannot allocate the pmap_object in pmap_init,
2127 * because it is called before the zone package is up.
2128 * Allocate it now if it is missing.
2129 */
2130 if (pmap_object == VM_OBJECT_NULL)
2131 pmap_object = vm_object_allocate(avail_end);
2132
2133 /*
2134 * Allocate a VM page for the level 2 page table entries.
2135 */
2136 while ((m = vm_page_grab()) == VM_PAGE_NULL)
2137 VM_PAGE_WAIT();
2138
2139 /*
2140 * Map the page to its physical address so that it
2141 * can be found later.
2142 */
de355530 2143 pa = m->phys_addr;
1c79356b
A
2144 vm_object_lock(pmap_object);
2145 vm_page_insert(m, pmap_object, pa);
2146 vm_page_lock_queues();
2147 vm_page_wire(m);
2148 inuse_ptepages_count++;
2149 vm_object_unlock(pmap_object);
2150 vm_page_unlock_queues();
2151
2152 /*
2153 * Zero the page.
2154 */
2155 memset((void *)phystokv(pa), 0, PAGE_SIZE);
2156
2157 PMAP_READ_LOCK(map, spl);
2158 /*
2159 * See if someone else expanded us first
2160 */
2161 if (pmap_pte(map, v) != PT_ENTRY_NULL) {
2162 PMAP_READ_UNLOCK(map, spl);
2163 vm_object_lock(pmap_object);
2164 vm_page_lock_queues();
2165 vm_page_free(m);
2166 inuse_ptepages_count--;
2167 vm_page_unlock_queues();
2168 vm_object_unlock(pmap_object);
2169 return;
2170 }
2171
2172 /*
2173 * Set the page directory entry for this page table.
2174 * If we have allocated more than one hardware page,
2175 * set several page directory entries.
2176 */
2177
2178 i = ptes_per_vm_page;
2179 pdp = &map->dirbase[pdenum(map, v) & ~(i-1)];
2180 do {
2181 *pdp = pa_to_pte(pa)
2182 | INTEL_PTE_VALID
2183 | INTEL_PTE_USER
2184 | INTEL_PTE_WRITE;
2185 pdp++;
2186 pa += INTEL_PGBYTES;
2187 } while (--i > 0);
2188
2189 PMAP_READ_UNLOCK(map, spl);
2190 return;
2191}
2192
2193/*
2194 * Copy the range specified by src_addr/len
2195 * from the source map to the range dst_addr/len
2196 * in the destination map.
2197 *
2198 * This routine is only advisory and need not do anything.
2199 */
2200#if 0
2201void
2202pmap_copy(
2203 pmap_t dst_pmap,
2204 pmap_t src_pmap,
2205 vm_offset_t dst_addr,
2206 vm_size_t len,
2207 vm_offset_t src_addr)
2208{
2209#ifdef lint
2210 dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
2211#endif /* lint */
2212}
2213#endif/* 0 */
2214
2215int collect_ref;
2216int collect_unref;
2217
2218/*
2219 * Routine: pmap_collect
2220 * Function:
2221 * Garbage collects the physical map system for
2222 * pages which are no longer used.
2223 * Success need not be guaranteed -- that is, there
2224 * may well be pages which are not referenced, but
2225 * others may be collected.
2226 * Usage:
2227 * Called by the pageout daemon when pages are scarce.
2228 */
2229void
2230pmap_collect(
2231 pmap_t p)
2232{
2233 register pt_entry_t *pdp, *ptp;
2234 pt_entry_t *eptp;
2235 vm_offset_t pa;
2236 int wired;
2237 spl_t spl;
2238
2239 if (p == PMAP_NULL)
2240 return;
2241
2242 if (p == kernel_pmap)
2243 return;
2244
2245 /*
2246 * Garbage collect map.
2247 */
2248 PMAP_READ_LOCK(p, spl);
2249 PMAP_FLUSH_TLBS();
2250
2251 for (pdp = p->dirbase;
2252 pdp < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)];
2253 pdp += ptes_per_vm_page)
2254 {
2255 if (*pdp & INTEL_PTE_VALID)
2256 if(*pdp & INTEL_PTE_REF) {
2257 *pdp &= ~INTEL_PTE_REF;
2258 collect_ref++;
2259 } else {
2260 collect_unref++;
2261 pa = pte_to_pa(*pdp);
2262 ptp = (pt_entry_t *)phystokv(pa);
2263 eptp = ptp + NPTES*ptes_per_vm_page;
2264
2265 /*
2266 * If the pte page has any wired mappings, we cannot
2267 * free it.
2268 */
2269 wired = 0;
2270 {
2271 register pt_entry_t *ptep;
2272 for (ptep = ptp; ptep < eptp; ptep++) {
2273 if (iswired(*ptep)) {
2274 wired = 1;
2275 break;
2276 }
2277 }
2278 }
2279 if (!wired) {
2280 /*
2281 * Remove the virtual addresses mapped by this pte page.
2282 */
2283 pmap_remove_range(p,
2284 pdetova(pdp - p->dirbase),
2285 ptp,
2286 eptp);
2287
2288 /*
2289 * Invalidate the page directory pointer.
2290 */
2291 {
2292 register int i = ptes_per_vm_page;
2293 register pt_entry_t *pdep = pdp;
2294 do {
2295 *pdep++ = 0;
2296 } while (--i > 0);
2297 }
2298
2299 PMAP_READ_UNLOCK(p, spl);
2300
2301 /*
2302 * And free the pte page itself.
2303 */
2304 {
2305 register vm_page_t m;
2306
2307 vm_object_lock(pmap_object);
2308 m = vm_page_lookup(pmap_object, pa);
2309 if (m == VM_PAGE_NULL)
2310 panic("pmap_collect: pte page not in object");
2311 vm_page_lock_queues();
2312 vm_page_free(m);
2313 inuse_ptepages_count--;
2314 vm_page_unlock_queues();
2315 vm_object_unlock(pmap_object);
2316 }
2317
2318 PMAP_READ_LOCK(p, spl);
2319 }
2320 }
2321 }
2322 PMAP_READ_UNLOCK(p, spl);
2323 return;
2324
2325}
2326
2327/*
2328 * Routine: pmap_kernel
2329 * Function:
2330 * Returns the physical map handle for the kernel.
2331 */
2332#if 0
2333pmap_t
2334pmap_kernel(void)
2335{
2336 return (kernel_pmap);
2337}
2338#endif/* 0 */
2339
2340/*
2341 * pmap_zero_page zeros the specified (machine independent) page.
2342 * See machine/phys.c or machine/phys.s for implementation.
2343 */
2344#if 0
2345void
2346pmap_zero_page(
2347 register vm_offset_t phys)
2348{
2349 register int i;
2350
2351 assert(phys != vm_page_fictitious_addr);
2352 i = PAGE_SIZE / INTEL_PGBYTES;
2353 phys = intel_pfn(phys);
2354
2355 while (i--)
2356 zero_phys(phys++);
2357}
2358#endif/* 0 */
2359
2360/*
2361 * pmap_copy_page copies the specified (machine independent) page.
2362 * See machine/phys.c or machine/phys.s for implementation.
2363 */
2364#if 0
2365void
2366pmap_copy_page(
2367 vm_offset_t src,
2368 vm_offset_t dst)
2369{
2370 int i;
2371
2372 assert(src != vm_page_fictitious_addr);
2373 assert(dst != vm_page_fictitious_addr);
2374 i = PAGE_SIZE / INTEL_PGBYTES;
2375
2376 while (i--) {
2377 copy_phys(intel_pfn(src), intel_pfn(dst));
2378 src += INTEL_PGBYTES;
2379 dst += INTEL_PGBYTES;
2380 }
2381}
2382#endif/* 0 */
2383
2384/*
2385 * Routine: pmap_pageable
2386 * Function:
2387 * Make the specified pages (by pmap, offset)
2388 * pageable (or not) as requested.
2389 *
2390 * A page which is not pageable may not take
2391 * a fault; therefore, its page table entry
2392 * must remain valid for the duration.
2393 *
2394 * This routine is merely advisory; pmap_enter
2395 * will specify that these pages are to be wired
2396 * down (or not) as appropriate.
2397 */
2398void
2399pmap_pageable(
2400 pmap_t pmap,
2401 vm_offset_t start,
2402 vm_offset_t end,
2403 boolean_t pageable)
2404{
2405#ifdef lint
2406 pmap++; start++; end++; pageable++;
2407#endif /* lint */
2408}
2409
2410/*
2411 * Clear specified attribute bits.
2412 */
2413void
2414phys_attribute_clear(
2415 vm_offset_t phys,
2416 int bits)
2417{
2418 pv_entry_t pv_h;
2419 register pv_entry_t pv_e;
2420 register pt_entry_t *pte;
2421 int pai;
2422 register pmap_t pmap;
2423 spl_t spl;
2424
2425 assert(phys != vm_page_fictitious_addr);
2426 if (!valid_page(phys)) {
2427 /*
2428 * Not a managed page.
2429 */
2430 return;
2431 }
2432
2433 /*
2434 * Lock the pmap system first, since we will be changing
2435 * several pmaps.
2436 */
2437
2438 PMAP_WRITE_LOCK(spl);
2439
2440 pai = pa_index(phys);
2441 pv_h = pai_to_pvh(pai);
2442
2443 /*
2444 * Walk down PV list, clearing all modify or reference bits.
2445 * We do not have to lock the pv_list because we have
2446 * the entire pmap system locked.
2447 */
2448 if (pv_h->pmap != PMAP_NULL) {
2449 /*
2450 * There are some mappings.
2451 */
2452 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
2453
2454 pmap = pv_e->pmap;
2455 /*
2456 * Lock the pmap to block pmap_extract and similar routines.
2457 */
2458 simple_lock(&pmap->lock);
2459
2460 {
2461 register vm_offset_t va;
2462
2463 va = pv_e->va;
2464 pte = pmap_pte(pmap, va);
2465
2466#if 0
2467 /*
2468 * Consistency checks.
2469 */
2470 assert(*pte & INTEL_PTE_VALID);
2471 /* assert(pte_to_phys(*pte) == phys); */
2472#endif
2473
2474 /*
2475 * Invalidate TLBs for all CPUs using this mapping.
2476 */
2477 PMAP_INVALIDATE_PAGE(pmap, va);
2478 }
2479
2480 /*
2481 * Clear modify or reference bits.
2482 */
2483 {
2484 register int i = ptes_per_vm_page;
2485 do {
2486 *pte++ &= ~bits;
2487 } while (--i > 0);
2488 }
2489 simple_unlock(&pmap->lock);
2490 }
2491 }
2492
2493 pmap_phys_attributes[pai] &= ~bits;
2494
2495 PMAP_WRITE_UNLOCK(spl);
2496}
2497
2498/*
2499 * Check specified attribute bits.
2500 */
2501boolean_t
2502phys_attribute_test(
2503 vm_offset_t phys,
2504 int bits)
2505{
2506 pv_entry_t pv_h;
2507 register pv_entry_t pv_e;
2508 register pt_entry_t *pte;
2509 int pai;
2510 register pmap_t pmap;
2511 spl_t spl;
2512
2513 assert(phys != vm_page_fictitious_addr);
2514 if (!valid_page(phys)) {
2515 /*
2516 * Not a managed page.
2517 */
2518 return (FALSE);
2519 }
2520
2521 /*
2522 * Lock the pmap system first, since we will be checking
2523 * several pmaps.
2524 */
2525
2526 PMAP_WRITE_LOCK(spl);
2527
2528 pai = pa_index(phys);
2529 pv_h = pai_to_pvh(pai);
2530
2531 if (pmap_phys_attributes[pai] & bits) {
2532 PMAP_WRITE_UNLOCK(spl);
2533 return (TRUE);
2534 }
2535
2536 /*
2537 * Walk down PV list, checking all mappings.
2538 * We do not have to lock the pv_list because we have
2539 * the entire pmap system locked.
2540 */
2541 if (pv_h->pmap != PMAP_NULL) {
2542 /*
2543 * There are some mappings.
2544 */
2545 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
2546
2547 pmap = pv_e->pmap;
2548 /*
2549 * Lock the pmap to block pmap_extract and similar routines.
2550 */
2551 simple_lock(&pmap->lock);
2552
2553 {
2554 register vm_offset_t va;
2555
2556 va = pv_e->va;
2557 pte = pmap_pte(pmap, va);
2558
2559#if 0
2560 /*
2561 * Consistency checks.
2562 */
2563 assert(*pte & INTEL_PTE_VALID);
2564 /* assert(pte_to_phys(*pte) == phys); */
2565#endif
2566 }
2567
2568 /*
2569 * Check modify or reference bits.
2570 */
2571 {
2572 register int i = ptes_per_vm_page;
2573
2574 do {
2575 if (*pte++ & bits) {
2576 simple_unlock(&pmap->lock);
2577 PMAP_WRITE_UNLOCK(spl);
2578 return (TRUE);
2579 }
2580 } while (--i > 0);
2581 }
2582 simple_unlock(&pmap->lock);
2583 }
2584 }
2585 PMAP_WRITE_UNLOCK(spl);
2586 return (FALSE);
2587}
2588
2589/*
2590 * Set specified attribute bits.
2591 */
2592void
2593phys_attribute_set(
2594 vm_offset_t phys,
2595 int bits)
2596{
2597 int spl;
2598
2599 assert(phys != vm_page_fictitious_addr);
2600 if (!valid_page(phys)) {
2601 /*
2602 * Not a managed page.
2603 */
2604 return;
2605 }
2606
2607 /*
2608 * Lock the pmap system and set the requested bits in
2609 * the phys attributes array. Don't need to bother with
2610 * ptes because the test routine looks here first.
2611 */
2612
2613 PMAP_WRITE_LOCK(spl);
2614 pmap_phys_attributes[pa_index(phys)] |= bits;
2615 PMAP_WRITE_UNLOCK(spl);
2616}
2617
2618/*
2619 * Set the modify bit on the specified physical page.
2620 */
2621
2622void pmap_set_modify(
2623 register vm_offset_t phys)
2624{
2625 phys_attribute_set(phys, PHYS_MODIFIED);
2626}
2627
2628/*
2629 * Clear the modify bits on the specified physical page.
2630 */
2631
2632void
2633pmap_clear_modify(
2634 register vm_offset_t phys)
2635{
2636 phys_attribute_clear(phys, PHYS_MODIFIED);
2637}
2638
2639/*
2640 * pmap_is_modified:
2641 *
2642 * Return whether or not the specified physical page is modified
2643 * by any physical maps.
2644 */
2645
2646boolean_t
2647pmap_is_modified(
2648 register vm_offset_t phys)
2649{
2650 return (phys_attribute_test(phys, PHYS_MODIFIED));
2651}
2652
2653/*
2654 * pmap_clear_reference:
2655 *
2656 * Clear the reference bit on the specified physical page.
2657 */
2658
2659void
2660pmap_clear_reference(
2661 vm_offset_t phys)
2662{
2663 phys_attribute_clear(phys, PHYS_REFERENCED);
2664}
2665
2666/*
2667 * pmap_is_referenced:
2668 *
2669 * Return whether or not the specified physical page is referenced
2670 * by any physical maps.
2671 */
2672
2673boolean_t
2674pmap_is_referenced(
2675 vm_offset_t phys)
2676{
2677 return (phys_attribute_test(phys, PHYS_REFERENCED));
2678}
2679
2680/*
2681 * Set the modify bit on the specified range
2682 * of this map as requested.
2683 *
2684 * This optimization stands only if each time the dirty bit
2685 * in vm_page_t is tested, it is also tested in the pmap.
2686 */
2687void
2688pmap_modify_pages(
2689 pmap_t map,
2690 vm_offset_t s,
2691 vm_offset_t e)
2692{
2693 spl_t spl;
2694 register pt_entry_t *pde;
2695 register pt_entry_t *spte, *epte;
2696 vm_offset_t l;
2697
2698 if (map == PMAP_NULL)
2699 return;
2700
2701 PMAP_READ_LOCK(map, spl);
2702
2703 pde = pmap_pde(map, s);
2704 while (s && s < e) {
2705 l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
2706 if (l > e)
2707 l = e;
2708 if (*pde & INTEL_PTE_VALID) {
2709 spte = (pt_entry_t *)ptetokv(*pde);
2710 if (l) {
2711 spte = &spte[ptenum(s)];
2712 epte = &spte[intel_btop(l-s)];
2713 } else {
2714 epte = &spte[intel_btop(PDE_MAPPED_SIZE)];
2715 spte = &spte[ptenum(s)];
2716 }
2717 while (spte < epte) {
2718 if (*spte & INTEL_PTE_VALID) {
2719 *spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE);
2720 }
2721 spte++;
2722 }
2723 }
2724 s = l;
2725 pde++;
2726 }
2727 PMAP_FLUSH_TLBS();
2728 PMAP_READ_UNLOCK(map, spl);
2729}
2730
2731
2732void
2733invalidate_icache(vm_offset_t addr, unsigned cnt, int phys)
2734{
2735 return;
2736}
2737void
2738flush_dcache(vm_offset_t addr, unsigned count, int phys)
2739{
2740 return;
2741}
2742
2743#if NCPUS > 1
2744
2745void inline
2746pmap_wait_for_clear()
2747{
2748 register int my_cpu;
2749 spl_t s;
2750 register pmap_t my_pmap;
2751
2752 mp_disable_preemption();
2753 my_cpu = cpu_number();
2754
2755
2756 my_pmap = real_pmap[my_cpu];
2757
2758 if (!(my_pmap && pmap_in_use(my_pmap, my_cpu)))
2759 my_pmap = kernel_pmap;
2760
2761 /*
2762 * Raise spl to splhigh (above splip) to block out pmap_extract
2763 * from IO code (which would put this cpu back in the active
2764 * set).
2765 */
2766 s = splhigh();
2767
2768 /*
2769 * Wait for any pmap updates in progress, on either user
2770 * or kernel pmap.
2771 */
2772 while (*(volatile hw_lock_t)&my_pmap->lock.interlock ||
2773 *(volatile hw_lock_t)&kernel_pmap->lock.interlock) {
2774 continue;
2775 }
2776
2777 splx(s);
2778 mp_enable_preemption();
2779}
2780
2781void
2782pmap_flush_tlb_interrupt(void) {
2783 pmap_wait_for_clear();
2784
2785 flush_tlb();
2786}
2787
2788void
2789pmap_reload_tlb_interrupt(void) {
2790 pmap_wait_for_clear();
2791
2792 set_cr3(kernel_pmap->pdirbase);
2793}
2794
2795
2796#endif /* NCPUS > 1 */
2797
2798#if MACH_KDB
2799
2800/* show phys page mappings and attributes */
2801
2802extern void db_show_page(vm_offset_t pa);
2803
2804void
2805db_show_page(vm_offset_t pa)
2806{
2807 pv_entry_t pv_h;
2808 int pai;
2809 char attr;
2810
2811 pai = pa_index(pa);
2812 pv_h = pai_to_pvh(pai);
2813
2814 attr = pmap_phys_attributes[pai];
2815 printf("phys page %x ", pa);
2816 if (attr & PHYS_MODIFIED)
2817 printf("modified, ");
2818 if (attr & PHYS_REFERENCED)
2819 printf("referenced, ");
2820 if (pv_h->pmap || pv_h->next)
2821 printf(" mapped at\n");
2822 else
2823 printf(" not mapped\n");
2824 for (; pv_h; pv_h = pv_h->next)
2825 if (pv_h->pmap)
2826 printf("%x in pmap %x\n", pv_h->va, pv_h->pmap);
2827}
2828
2829#endif /* MACH_KDB */
2830
2831#if MACH_KDB
2832void db_kvtophys(vm_offset_t);
2833void db_show_vaddrs(pt_entry_t *);
2834
2835/*
2836 * print out the results of kvtophys(arg)
2837 */
2838void
2839db_kvtophys(
2840 vm_offset_t vaddr)
2841{
2842 db_printf("0x%x", kvtophys(vaddr));
2843}
2844
2845/*
2846 * Walk the pages tables.
2847 */
2848void
2849db_show_vaddrs(
2850 pt_entry_t *dirbase)
2851{
2852 pt_entry_t *ptep, *pdep, tmp;
2853 int x, y, pdecnt, ptecnt;
2854
2855 if (dirbase == 0) {
2856 dirbase = kernel_pmap->dirbase;
2857 }
2858 if (dirbase == 0) {
2859 db_printf("need a dirbase...\n");
2860 return;
2861 }
2862 dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK);
2863
2864 db_printf("dirbase: 0x%x\n", dirbase);
2865
2866 pdecnt = ptecnt = 0;
2867 pdep = &dirbase[0];
2868 for (y = 0; y < NPDES; y++, pdep++) {
2869 if (((tmp = *pdep) & INTEL_PTE_VALID) == 0) {
2870 continue;
2871 }
2872 pdecnt++;
2873 ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
2874 db_printf("dir[%4d]: 0x%x\n", y, *pdep);
2875 for (x = 0; x < NPTES; x++, ptep++) {
2876 if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
2877 continue;
2878 }
2879 ptecnt++;
2880 db_printf(" tab[%4d]: 0x%x, va=0x%x, pa=0x%x\n",
2881 x,
2882 *ptep,
2883 (y << 22) | (x << 12),
2884 *ptep & ~INTEL_OFFMASK);
2885 }
2886 }
2887
2888 db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt);
2889
2890}
2891#endif /* MACH_KDB */
2892
2893#include <mach_vm_debug.h>
2894#if MACH_VM_DEBUG
2895#include <vm/vm_debug.h>
2896
2897int
2898pmap_list_resident_pages(
2899 register pmap_t pmap,
2900 register vm_offset_t *listp,
2901 register int space)
2902{
2903 return 0;
2904}
2905#endif /* MACH_VM_DEBUG */
2906
2907#ifdef MACH_BSD
2908/*
2909 * pmap_pagemove
2910 *
2911 * BSD support routine to reassign virtual addresses.
2912 */
2913
2914void
2915pmap_movepage(unsigned long from, unsigned long to, vm_size_t size)
2916{
2917 spl_t spl;
2918 pt_entry_t *pte, saved_pte;
2919 /* Lock the kernel map */
2920
2921
2922 while (size > 0) {
2923 PMAP_READ_LOCK(kernel_pmap, spl);
2924 pte = pmap_pte(kernel_pmap, from);
2925 if (pte == NULL)
2926 panic("pmap_pagemove from pte NULL");
2927 saved_pte = *pte;
2928 PMAP_READ_UNLOCK(kernel_pmap, spl);
2929
2930 pmap_enter(kernel_pmap, to, i386_trunc_page(*pte),
9bccf70c 2931 VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED);
1c79356b
A
2932
2933 pmap_remove(kernel_pmap, from, from+PAGE_SIZE);
2934
2935 PMAP_READ_LOCK(kernel_pmap, spl);
2936 pte = pmap_pte(kernel_pmap, to);
2937 if (pte == NULL)
2938 panic("pmap_pagemove 'to' pte NULL");
2939
2940 *pte = saved_pte;
2941 PMAP_READ_UNLOCK(kernel_pmap, spl);
2942
2943 from += PAGE_SIZE;
2944 to += PAGE_SIZE;
2945 size -= PAGE_SIZE;
2946 }
2947
2948 /* Get the processors to update the TLBs */
2949 PMAP_FLUSH_TLBS();
2950
2951}
2952
2953kern_return_t bmapvideo(vm_offset_t *info);
2954kern_return_t bmapvideo(vm_offset_t *info) {
2955
2956 extern struct vc_info vinfo;
2957#ifdef NOTIMPLEMENTED
2958 (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */
2959#endif
2960 return KERN_SUCCESS;
2961}
2962
2963kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
2964kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
2965
2966#ifdef NOTIMPLEMENTED
2967 pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr); /* Map it in */
2968#endif
2969 return KERN_SUCCESS;
2970}
2971
2972kern_return_t bmapmapr(vm_offset_t va);
2973kern_return_t bmapmapr(vm_offset_t va) {
2974
2975#ifdef NOTIMPLEMENTED
2976 mapping_remove(current_act()->task->map->pmap, va); /* Remove map */
2977#endif
2978 return KERN_SUCCESS;
2979}
2980#endif
2981
9bccf70c
A
2982/* temporary workaround */
2983boolean_t
2984coredumpok(vm_map_t map, vm_offset_t va)
2985{
2986 pt_entry_t *ptep;
2987 ptep = pmap_pte(map->pmap, va);
2988 if (0 == ptep) return FALSE;
2989 return ((*ptep & (INTEL_PTE_NCACHE|INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE|INTEL_PTE_WIRED));
2990}