2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * (These guys wrote the Vax version)
64 * Physical Map management code for Intel i386, i486, and i860.
66 * Manages physical address maps.
68 * In addition to hardware address maps, this
69 * module is called upon to provide software-use-only
70 * maps which may or may not be stored in the same
71 * form as hardware maps. These pseudo-maps are
72 * used to store intermediate results from copy
73 * operations to and from address spaces.
75 * Since the information managed by this module is
76 * also stored by the logical address mapping module,
77 * this module may throw away valid virtual-to-physical
78 * mappings at almost any time. However, invalidations
79 * of virtual-to-physical mappings must be done as
82 * In order to cope with hardware architectures which
83 * make virtual-to-physical map invalidates expensive,
84 * this module may delay invalidate or reduced protection
85 * operations until such time as they are actually
86 * necessary. This module is given full information as
87 * to which processors are currently using which maps,
88 * and to when physical maps must be made correct.
93 #include <mach_ldebug.h>
95 #include <libkern/OSAtomic.h>
97 #include <mach/machine/vm_types.h>
99 #include <mach/boolean.h>
100 #include <kern/thread.h>
101 #include <kern/zalloc.h>
102 #include <kern/queue.h>
104 #include <kern/lock.h>
105 #include <kern/kalloc.h>
106 #include <kern/spl.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_kern.h>
111 #include <mach/vm_param.h>
112 #include <mach/vm_prot.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
116 #include <mach/machine/vm_param.h>
117 #include <machine/thread.h>
119 #include <kern/misc_protos.h> /* prototyping */
120 #include <i386/misc_protos.h>
122 #include <i386/cpuid.h>
123 #include <i386/cpu_data.h>
124 #include <i386/cpu_number.h>
125 #include <i386/machine_cpu.h>
126 #include <i386/seg.h>
127 #include <i386/serial_io.h>
128 #include <i386/cpu_capabilities.h>
129 #include <i386/machine_routines.h>
130 #include <i386/proc_reg.h>
131 #include <i386/tsc.h>
132 #include <i386/acpi.h>
133 #include <i386/pmap_internal.h>
136 #include <ddb/db_command.h>
137 #include <ddb/db_output.h>
138 #include <ddb/db_sym.h>
139 #include <ddb/db_print.h>
140 #endif /* MACH_KDB */
142 #include <vm/vm_protos.h>
145 #include <i386/mp_desc.h>
146 #include <i386/i386_lowmem.h>
147 #include <i386/lowglobals.h>
150 /* #define DEBUGINTERRUPTS 1 uncomment to ensure pmap callers have interrupts enabled */
151 #ifdef DEBUGINTERRUPTS
152 #define pmap_intr_assert() {if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) panic("pmap interrupt assert %s, %d",__FILE__, __LINE__);}
154 #define pmap_intr_assert()
160 #define POSTCODE_DELAY 1
161 #include <i386/postcode.h>
162 #endif /* IWANTTODEBUG */
165 void dump_pmap(pmap_t
);
166 void dump_4GB_pdpt(pmap_t p
);
167 void dump_4GB_pdpt_thread(thread_t tp
);
170 int nx_enabled
= 1; /* enable no-execute protection */
171 #ifdef CONFIG_EMBEDDED
172 int allow_data_exec
= 0; /* no exec from data, embedded is hardcore like that */
174 int allow_data_exec
= VM_ABI_32
; /* 32-bit apps may execute data by default, 64-bit apps may not */
176 int allow_stack_exec
= 0; /* No apps may execute from the stack by default */
179 boolean_t cpu_64bit
= FALSE
;
181 const boolean_t cpu_64bit
= TRUE
;
183 boolean_t pmap_trace
= FALSE
;
185 uint64_t max_preemption_latency_tsc
= 0;
187 pv_hashed_entry_t
*pv_hash_table
; /* hash lists */
189 uint32_t npvhash
= 0;
192 * pv_list entries are kept on a list that can only be accessed
193 * with the pmap system locked (at SPLVM, not in the cpus_active set).
194 * The list is refilled from the pv_hashed_list_zone if it becomes empty.
196 pv_rooted_entry_t pv_free_list
= PV_ROOTED_ENTRY_NULL
; /* free list at SPLVM */
197 pv_hashed_entry_t pv_hashed_free_list
= PV_HASHED_ENTRY_NULL
;
198 pv_hashed_entry_t pv_hashed_kern_free_list
= PV_HASHED_ENTRY_NULL
;
199 decl_simple_lock_data(,pv_hashed_free_list_lock
)
200 decl_simple_lock_data(,pv_hashed_kern_free_list_lock
)
201 decl_simple_lock_data(,pv_hash_table_lock
)
203 zone_t pv_hashed_list_zone
; /* zone of pv_hashed_entry structures */
205 static zone_t pdpt_zone
;
208 * First and last physical addresses that we maintain any information
209 * for. Initialized to zero so that pmap operations done before
210 * pmap_init won't touch any non-existent structures.
212 boolean_t pmap_initialized
= FALSE
;/* Has pmap_init completed? */
214 static struct vm_object kptobj_object_store
;
215 static vm_object_t kptobj
;
218 * Index into pv_head table, its lock bits, and the modify/reference and managed bits
222 * Array of physical page attribites for managed pages.
223 * One byte per physical page.
225 char *pmap_phys_attributes
;
226 unsigned int last_managed_page
= 0;
228 uint64_t pde_mapped_size
;
231 * Locking and TLB invalidation
235 * Locking Protocols: (changed 2/2007 JK)
237 * There are two structures in the pmap module that need locking:
238 * the pmaps themselves, and the per-page pv_lists (which are locked
239 * by locking the pv_lock_table entry that corresponds to the pv_head
240 * for the list in question.) Most routines want to lock a pmap and
241 * then do operations in it that require pv_list locking -- however
242 * pmap_remove_all and pmap_copy_on_write operate on a physical page
243 * basis and want to do the locking in the reverse order, i.e. lock
244 * a pv_list and then go through all the pmaps referenced by that list.
246 * The system wide pmap lock has been removed. Now, paths take a lock
247 * on the pmap before changing its 'shape' and the reverse order lockers
248 * (coming in by phys ppn) take a lock on the corresponding pv and then
249 * retest to be sure nothing changed during the window before they locked
250 * and can then run up/down the pv lists holding the list lock. This also
251 * lets the pmap layer run (nearly completely) interrupt enabled, unlike
260 #define LOCK_PVH(index) { \
261 mp_disable_preemption(); \
262 lock_pvh_pai(index); \
265 #define UNLOCK_PVH(index) { \
266 unlock_pvh_pai(index); \
267 mp_enable_preemption(); \
274 #define LOCK_PV_HASH(hash) lock_hash_hash(hash)
276 #define UNLOCK_PV_HASH(hash) unlock_hash_hash(hash)
279 extern int max_lock_loops
;
281 unsigned int loop_count; \
282 loop_count = disable_serial_output ? max_lock_loops \
284 #define LOOP_CHECK(msg, pmap) \
285 if (--loop_count == 0) { \
286 mp_disable_preemption(); \
287 kprintf("%s: cpu %d pmap %x\n", \
288 msg, cpu_number(), pmap); \
289 Debugger("deadlock detection"); \
290 mp_enable_preemption(); \
291 loop_count = max_lock_loops; \
293 #else /* USLOCK_DEBUG */
295 #define LOOP_CHECK(msg, pmap)
296 #endif /* USLOCK_DEBUG */
298 unsigned pmap_memory_region_count
;
299 unsigned pmap_memory_region_current
;
301 pmap_memory_region_t pmap_memory_regions
[PMAP_MEMORY_REGIONS_SIZE
];
304 * Other useful macros.
306 #define current_pmap() (vm_map_pmap(current_thread()->map))
308 struct pmap kernel_pmap_store
;
311 pd_entry_t high_shared_pde
;
312 pd_entry_t commpage64_pde
;
314 struct zone
*pmap_zone
; /* zone of pmap structures */
316 int pmap_debug
= 0; /* flag for debugging prints */
318 unsigned int inuse_ptepages_count
= 0;
319 long long alloc_ptepages_count
__attribute__((aligned(8))) = 0LL; /* aligned for atomic access */
320 unsigned int bootstrap_wired_pages
= 0;
321 int pt_fake_zone_index
= -1;
323 extern long NMIPI_acks
;
326 PMAP_ZINFO_SALLOC(vm_size_t bytes
)
328 current_thread()->tkm_shared
.alloc
+= bytes
;
332 PMAP_ZINFO_SFREE(vm_size_t bytes
)
334 current_thread()->tkm_shared
.free
+= (bytes
);
337 addr64_t kernel64_cr3
;
338 boolean_t no_shared_cr3
= FALSE
; /* -no_shared_cr3 boot arg */
340 boolean_t kernel_text_ps_4K
= TRUE
;
341 boolean_t wpkernel
= TRUE
;
346 pt_entry_t
*DMAP1
, *DMAP2
;
351 * for legacy, returns the address of the pde entry.
352 * for 64 bit, causes the pdpt page containing the pde entry to be mapped,
353 * then returns the mapped address of the pde entry in that page
356 pmap_pde(pmap_t m
, vm_map_offset_t v
)
359 if (!cpu_64bit
|| (m
== kernel_pmap
)) {
360 pde
= (&((m
)->dirbase
[(vm_offset_t
)(v
) >> PDESHIFT
]));
363 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
364 pde
= pmap64_pde(m
, v
);
370 * the single pml4 page per pmap is allocated at pmap create time and exists
371 * for the duration of the pmap. we allocate this page in kernel vm (to save us one
372 * level of page table dynamic mapping.
373 * this returns the address of the requested pml4 entry in the top level page.
377 pmap64_pml4(pmap_t pmap
, vm_map_offset_t vaddr
)
379 return ((pml4_entry_t
*)pmap
->pm_hold
+ ((vm_offset_t
)((vaddr
>>PML4SHIFT
)&(NPML4PG
-1))));
383 * maps in the pml4 page, if any, containing the pdpt entry requested
384 * and returns the address of the pdpt entry in that mapped page
387 pmap64_pdpt(pmap_t pmap
, vm_map_offset_t vaddr
)
394 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
395 if ((vaddr
> 0x00007FFFFFFFFFFFULL
) && (vaddr
< 0xFFFF800000000000ULL
)) {
399 pml4
= pmap64_pml4(pmap
, vaddr
);
401 if (pml4
&& ((*pml4
& INTEL_PTE_VALID
))) {
403 newpf
= *pml4
& PG_FRAME
;
406 for (i
=PMAP_PDPT_FIRST_WINDOW
; i
< PMAP_PDPT_FIRST_WINDOW
+PMAP_PDPT_NWINDOWS
; i
++) {
407 if (((*(current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CMAP
)) & PG_FRAME
) == newpf
) {
408 return((pdpt_entry_t
*)(current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CADDR
) +
409 ((vm_offset_t
)((vaddr
>>PDPTSHIFT
)&(NPDPTPG
-1))));
413 current_cpu_datap()->cpu_pmap
->pdpt_window_index
++;
414 if (current_cpu_datap()->cpu_pmap
->pdpt_window_index
> (PMAP_PDPT_FIRST_WINDOW
+PMAP_PDPT_NWINDOWS
-1))
415 current_cpu_datap()->cpu_pmap
->pdpt_window_index
= PMAP_PDPT_FIRST_WINDOW
;
417 (current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pdpt_window_index
].prv_CMAP
),
418 newpf
| INTEL_PTE_RW
| INTEL_PTE_VALID
);
419 invlpg((u_int
)(current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pdpt_window_index
].prv_CADDR
));
420 return ((pdpt_entry_t
*)(current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pdpt_window_index
].prv_CADDR
) +
421 ((vm_offset_t
)((vaddr
>>PDPTSHIFT
)&(NPDPTPG
-1))));
428 * maps in the pdpt page, if any, containing the pde entry requested
429 * and returns the address of the pde entry in that mapped page
432 pmap64_pde(pmap_t pmap
, vm_map_offset_t vaddr
)
439 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
440 if ((vaddr
> 0x00007FFFFFFFFFFFULL
) && (vaddr
< 0xFFFF800000000000ULL
)) {
444 /* if (vaddr & (1ULL << 63)) panic("neg addr");*/
445 pdpt
= pmap64_pdpt(pmap
, vaddr
);
447 if (pdpt
&& ((*pdpt
& INTEL_PTE_VALID
))) {
449 newpf
= *pdpt
& PG_FRAME
;
451 for (i
=PMAP_PDE_FIRST_WINDOW
; i
< PMAP_PDE_FIRST_WINDOW
+PMAP_PDE_NWINDOWS
; i
++) {
452 if (((*(current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CMAP
)) & PG_FRAME
) == newpf
) {
453 return((pd_entry_t
*)(current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CADDR
) +
454 ((vm_offset_t
)((vaddr
>>PDSHIFT
)&(NPDPG
-1))));
458 current_cpu_datap()->cpu_pmap
->pde_window_index
++;
459 if (current_cpu_datap()->cpu_pmap
->pde_window_index
> (PMAP_PDE_FIRST_WINDOW
+PMAP_PDE_NWINDOWS
-1))
460 current_cpu_datap()->cpu_pmap
->pde_window_index
= PMAP_PDE_FIRST_WINDOW
;
462 (current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pde_window_index
].prv_CMAP
),
463 newpf
| INTEL_PTE_RW
| INTEL_PTE_VALID
);
464 invlpg((u_int
)(current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pde_window_index
].prv_CADDR
));
465 return ((pd_entry_t
*)(current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pde_window_index
].prv_CADDR
) +
466 ((vm_offset_t
)((vaddr
>>PDSHIFT
)&(NPDPG
-1))));
473 * Because the page tables (top 3 levels) are mapped into per cpu windows,
474 * callers must either disable interrupts or disable preemption before calling
475 * one of the pte mapping routines (e.g. pmap_pte()) as the returned vaddr
476 * is in one of those mapped windows and that cannot be allowed to change until
477 * the caller is done using the returned pte pointer. When done, the caller
478 * restores interrupts or preemption to its previous state after which point the
479 * vaddr for the returned pte can no longer be used
484 * return address of mapped pte for vaddr va in pmap pmap.
485 * must be called with pre-emption or interrupts disabled
486 * if targeted pmap is not the kernel pmap
487 * since we may be passing back a virtual address that is
488 * associated with this cpu... pre-emption or interrupts
489 * must remain disabled until the caller is done using
490 * the pointer that was passed back .
492 * maps the pde page, if any, containing the pte in and returns
493 * the address of the pte in that mapped page
496 pmap_pte(pmap_t pmap
, vm_map_offset_t vaddr
)
503 pde
= pmap_pde(pmap
,vaddr
);
505 if (pde
&& ((*pde
& INTEL_PTE_VALID
))) {
506 if (*pde
& INTEL_PTE_PS
)
508 if (pmap
== kernel_pmap
)
509 return (vtopte(vaddr
)); /* compat kernel still has pte's mapped */
511 if (ml_get_interrupts_enabled() && get_preemption_level() == 0)
512 panic("pmap_pte: unsafe call");
514 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
516 newpf
= *pde
& PG_FRAME
;
518 for (i
=PMAP_PTE_FIRST_WINDOW
; i
< PMAP_PTE_FIRST_WINDOW
+PMAP_PTE_NWINDOWS
; i
++) {
519 if (((*(current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CMAP
)) & PG_FRAME
) == newpf
) {
520 return((pt_entry_t
*)(current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CADDR
) +
521 ((vm_offset_t
)i386_btop(vaddr
) & (NPTEPG
-1)));
525 current_cpu_datap()->cpu_pmap
->pte_window_index
++;
526 if (current_cpu_datap()->cpu_pmap
->pte_window_index
> (PMAP_PTE_FIRST_WINDOW
+PMAP_PTE_NWINDOWS
-1))
527 current_cpu_datap()->cpu_pmap
->pte_window_index
= PMAP_PTE_FIRST_WINDOW
;
529 (current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pte_window_index
].prv_CMAP
),
530 newpf
| INTEL_PTE_RW
| INTEL_PTE_VALID
);
531 invlpg((u_int
)(current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pte_window_index
].prv_CADDR
));
532 return ((pt_entry_t
*)(current_cpu_datap()->cpu_pmap
->mapwindow
[current_cpu_datap()->cpu_pmap
->pte_window_index
].prv_CADDR
) +
533 ((vm_offset_t
)i386_btop(vaddr
) & (NPTEPG
-1)));
541 * Map memory at initialization. The physical addresses being
542 * mapped are not managed and are never unmapped.
544 * For now, VM is already on, we only need to map the
550 vm_map_offset_t start_addr
,
551 vm_map_offset_t end_addr
,
558 while (start_addr
< end_addr
) {
559 pmap_enter(kernel_pmap
, (vm_map_offset_t
)virt
,
560 (ppnum_t
) i386_btop(start_addr
), prot
, flags
, FALSE
);
568 * Back-door routine for mapping kernel VM at initialization.
569 * Useful for mapping memory outside the range
570 * Sets no-cache, A, D.
571 * Otherwise like pmap_map.
576 vm_map_offset_t start_addr
,
577 vm_map_offset_t end_addr
,
585 template = pa_to_pte(start_addr
)
591 if(flags
& (VM_MEM_NOT_CACHEABLE
| VM_WIMG_USE_DEFAULT
)) {
592 template |= INTEL_PTE_NCACHE
;
593 if(!(flags
& (VM_MEM_GUARDED
| VM_WIMG_USE_DEFAULT
)))
594 template |= INTEL_PTE_PTA
;
597 if (prot
& VM_PROT_WRITE
)
598 template |= INTEL_PTE_WRITE
;
600 while (start_addr
< end_addr
) {
602 pte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)virt
);
603 if (pte
== PT_ENTRY_NULL
) {
604 panic("pmap_map_bd: Invalid kernel address\n");
606 pmap_store_pte(pte
, template);
608 pte_increment_pa(template);
610 start_addr
+= PAGE_SIZE
;
617 extern pmap_paddr_t first_avail
;
618 extern vm_offset_t virtual_avail
, virtual_end
;
619 extern pmap_paddr_t avail_start
, avail_end
;
620 extern vm_offset_t sHIB
;
621 extern vm_offset_t eHIB
;
622 extern vm_offset_t stext
;
623 extern vm_offset_t etext
;
624 extern vm_offset_t sdata
;
626 extern void *KPTphys
;
632 * Here early in the life of a processor (from cpu_mode_init()).
636 * Initialize the per-cpu, TLB-related fields.
638 current_cpu_datap()->cpu_active_cr3
= kernel_pmap
->pm_cr3
;
639 current_cpu_datap()->cpu_tlb_invalid
= FALSE
;
643 pmap_high_shared_remap(enum high_fixed_addresses e
, vm_offset_t va
, int sz
)
645 vm_offset_t ve
= pmap_index_to_virt(e
);
651 assert(0 == (va
& PAGE_MASK
)); /* expecting page aligned */
653 ptep
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)ve
);
655 for (i
=0; i
< sz
; i
++) {
656 pa
= (pmap_paddr_t
) kvtophys(va
);
657 pmap_store_pte(ptep
, (pa
& PG_FRAME
)
671 pmap_cpu_high_shared_remap(int cpu
, enum high_cpu_types e
, vm_offset_t va
, int sz
)
673 enum high_fixed_addresses a
= e
+ HIGH_CPU_END
* cpu
;
674 return pmap_high_shared_remap(HIGH_FIXED_CPUS_BEGIN
+ a
, va
, sz
);
677 void pmap_init_high_shared(void);
679 extern vm_offset_t gdtptr
, idtptr
;
681 extern uint32_t low_intstack
;
683 extern struct fake_descriptor ldt_desc_pattern
;
684 extern struct fake_descriptor tss_desc_pattern
;
686 extern char hi_remap_text
, hi_remap_etext
;
687 extern char t_zero_div
;
689 pt_entry_t
*pte_unique_base
;
692 pmap_init_high_shared(void)
698 struct i386_tss
*ttss
;
701 cpu_desc_index_t
* cdi
= &cpu_data_master
.cpu_desc_index
;
703 kprintf("HIGH_MEM_BASE 0x%x fixed per-cpu begin 0x%x\n",
704 HIGH_MEM_BASE
,pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN
));
706 pte_unique_base
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN
));
709 if (i386_btop(&hi_remap_etext
- &hi_remap_text
+ 1) >
710 HIGH_FIXED_TRAMPS_END
- HIGH_FIXED_TRAMPS
+ 1)
711 panic("tramps too large");
712 haddr
= pmap_high_shared_remap(HIGH_FIXED_TRAMPS
,
713 (vm_offset_t
) &hi_remap_text
, 3);
714 kprintf("tramp: 0x%x, ",haddr
);
715 /* map gdt up high and update ptr for reload */
716 haddr
= pmap_high_shared_remap(HIGH_FIXED_GDT
,
717 (vm_offset_t
) master_gdt
, 1);
718 cdi
->cdi_gdt
.ptr
= (void *)haddr
;
719 kprintf("GDT: 0x%x, ",haddr
);
720 /* map ldt up high */
721 haddr
= pmap_high_shared_remap(HIGH_FIXED_LDT_BEGIN
,
722 (vm_offset_t
) master_ldt
,
723 HIGH_FIXED_LDT_END
- HIGH_FIXED_LDT_BEGIN
+ 1);
724 cdi
->cdi_ldt
= (struct fake_descriptor
*)haddr
;
725 kprintf("LDT: 0x%x, ",haddr
);
726 /* put new ldt addr into gdt */
727 struct fake_descriptor temp_fake_desc
;
728 temp_fake_desc
= ldt_desc_pattern
;
729 temp_fake_desc
.offset
= (vm_offset_t
) haddr
;
730 fix_desc(&temp_fake_desc
, 1);
732 *(struct fake_descriptor
*) &master_gdt
[sel_idx(KERNEL_LDT
)] = temp_fake_desc
;
733 *(struct fake_descriptor
*) &master_gdt
[sel_idx(USER_LDT
)] = temp_fake_desc
;
735 /* map idt up high */
736 haddr
= pmap_high_shared_remap(HIGH_FIXED_IDT
,
737 (vm_offset_t
) master_idt
, 1);
738 cdi
->cdi_idt
.ptr
= (void *)haddr
;
739 kprintf("IDT: 0x%x, ", haddr
);
740 /* remap ktss up high and put new high addr into gdt */
741 haddr
= pmap_high_shared_remap(HIGH_FIXED_KTSS
,
742 (vm_offset_t
) &master_ktss
, 1);
744 temp_fake_desc
= tss_desc_pattern
;
745 temp_fake_desc
.offset
= (vm_offset_t
) haddr
;
746 fix_desc(&temp_fake_desc
, 1);
747 *(struct fake_descriptor
*) &master_gdt
[sel_idx(KERNEL_TSS
)] = temp_fake_desc
;
748 kprintf("KTSS: 0x%x, ",haddr
);
750 /* remap dbtss up high and put new high addr into gdt */
751 haddr
= pmap_high_shared_remap(HIGH_FIXED_DBTSS
,
752 (vm_offset_t
) &master_dbtss
, 1);
753 temp_fake_desc
= tss_desc_pattern
;
754 temp_fake_desc
.offset
= (vm_offset_t
) haddr
;
755 fix_desc(&temp_fake_desc
, 1);
756 *(struct fake_descriptor
*)&master_gdt
[sel_idx(DEBUG_TSS
)] = temp_fake_desc
;
757 ttss
= (struct i386_tss
*)haddr
;
758 kprintf("DBTSS: 0x%x, ",haddr
);
759 #endif /* MACH_KDB */
761 /* remap dftss up high and put new high addr into gdt */
762 haddr
= pmap_high_shared_remap(HIGH_FIXED_DFTSS
,
763 (vm_offset_t
) &master_dftss
, 1);
764 temp_fake_desc
= tss_desc_pattern
;
765 temp_fake_desc
.offset
= (vm_offset_t
) haddr
;
766 fix_desc(&temp_fake_desc
, 1);
767 *(struct fake_descriptor
*) &master_gdt
[sel_idx(DF_TSS
)] = temp_fake_desc
;
768 kprintf("DFTSS: 0x%x\n",haddr
);
770 /* remap mctss up high and put new high addr into gdt */
771 haddr
= pmap_high_shared_remap(HIGH_FIXED_DFTSS
,
772 (vm_offset_t
) &master_mctss
, 1);
773 temp_fake_desc
= tss_desc_pattern
;
774 temp_fake_desc
.offset
= (vm_offset_t
) haddr
;
775 fix_desc(&temp_fake_desc
, 1);
776 *(struct fake_descriptor
*) &master_gdt
[sel_idx(MC_TSS
)] = temp_fake_desc
;
777 kprintf("MCTSS: 0x%x\n",haddr
);
779 cpu_desc_load(&cpu_data_master
);
784 * Bootstrap the system enough to run with virtual memory.
785 * Map the kernel's code and data, and allocate the system page table.
786 * Called with mapping OFF. Page_size must already be set.
791 __unused vm_offset_t load_start
,
799 vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Set the highest address
802 * The kernel's pmap is statically allocated so we don't
803 * have to use pmap_create, which is unlikely to work
804 * correctly at this part of the boot sequence.
808 kernel_pmap
= &kernel_pmap_store
;
809 kernel_pmap
->ref_count
= 1;
810 kernel_pmap
->nx_enabled
= FALSE
;
811 kernel_pmap
->pm_task_map
= TASK_MAP_32BIT
;
812 kernel_pmap
->pm_obj
= (vm_object_t
) NULL
;
813 kernel_pmap
->dirbase
= (pd_entry_t
*)((unsigned int)IdlePTD
| KERNBASE
);
814 kernel_pmap
->pdirbase
= (pmap_paddr_t
)((int)IdlePTD
);
815 pdpt
= (pd_entry_t
*)((unsigned int)IdlePDPT
| KERNBASE
);
816 kernel_pmap
->pm_pdpt
= pdpt
;
817 kernel_pmap
->pm_cr3
= (pmap_paddr_t
)((int)IdlePDPT
);
820 va
= (vm_offset_t
)kernel_pmap
->dirbase
;
821 /* setup self referential mapping(s) */
822 for (i
= 0; i
< NPGPTD
; i
++, pdpt
++) {
824 pa
= (pmap_paddr_t
) kvtophys((vm_offset_t
)(va
+ i386_ptob(i
)));
826 (pd_entry_t
*) (kernel_pmap
->dirbase
+ PTDPTDI
+ i
),
827 (pa
& PG_FRAME
) | INTEL_PTE_VALID
| INTEL_PTE_RW
| INTEL_PTE_REF
|
828 INTEL_PTE_MOD
| INTEL_PTE_WIRED
) ;
829 pmap_store_pte(pdpt
, pa
| INTEL_PTE_VALID
);
833 /* 32-bit and legacy support depends on IA32e mode being disabled */
837 lo_kernel_cr3
= kernel_pmap
->pm_cr3
;
838 current_cpu_datap()->cpu_kernel_cr3
= (addr64_t
) kernel_pmap
->pm_cr3
;
840 /* save the value we stuff into created pmaps to share the gdts etc */
841 high_shared_pde
= *pmap_pde(kernel_pmap
, HIGH_MEM_BASE
);
842 /* make sure G bit is on for high shared pde entry */
843 high_shared_pde
|= INTEL_PTE_GLOBAL
;
845 pmap_store_pte(pmap_pde(kernel_pmap
, HIGH_MEM_BASE
), high_shared_pde
);
849 OSAddAtomic(NKPT
, &inuse_ptepages_count
);
850 OSAddAtomic64(NKPT
, &alloc_ptepages_count
);
851 bootstrap_wired_pages
= NKPT
;
853 virtual_avail
= (vm_offset_t
)VADDR(KPTDI
,0) + (vm_offset_t
)first_avail
;
854 virtual_end
= (vm_offset_t
)(VM_MAX_KERNEL_ADDRESS
);
857 * Reserve some special page table entries/VA space for temporary
863 #define SYSMAP(c, p, v, n) \
864 v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n)
866 for (i
=0; i
<PMAP_NWINDOWS
; i
++) {
868 (current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CMAP
),
869 (current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CADDR
),
871 *current_cpu_datap()->cpu_pmap
->mapwindow
[i
].prv_CMAP
= 0;
874 /* DMAP user for debugger */
875 SYSMAP(caddr_t
, DMAP1
, DADDR1
, 1);
876 SYSMAP(caddr_t
, DMAP2
, DADDR2
, 1); /* XXX temporary - can remove */
880 if (PE_parse_boot_argn("npvhash", &npvhash
, sizeof (npvhash
))) {
881 if (0 != ((npvhash
+1) & npvhash
)) {
882 kprintf("invalid hash %d, must be ((2^N)-1), using default %d\n",npvhash
,NPVHASH
);
888 printf("npvhash=%d\n",npvhash
);
890 simple_lock_init(&kernel_pmap
->lock
, 0);
891 simple_lock_init(&pv_hashed_free_list_lock
, 0);
892 simple_lock_init(&pv_hashed_kern_free_list_lock
, 0);
893 simple_lock_init(&pv_hash_table_lock
,0);
895 pmap_init_high_shared();
897 pde_mapped_size
= PDE_MAPPED_SIZE
;
900 pdpt_entry_t
*ppdpt
= IdlePDPT
;
901 pdpt_entry_t
*ppdpt64
= (pdpt_entry_t
*)IdlePDPT64
;
902 pdpt_entry_t
*ppml4
= (pdpt_entry_t
*)IdlePML4
;
903 int istate
= ml_set_interrupts_enabled(FALSE
);
906 * Clone a new 64-bit 3rd-level page table directory, IdlePML4,
907 * with page bits set for the correct IA-32e operation and so that
908 * the legacy-mode IdlePDPT is retained for slave processor start-up.
909 * This is necessary due to the incompatible use of page bits between
910 * 64-bit and legacy modes.
912 kernel_pmap
->pm_cr3
= (pmap_paddr_t
)((int)IdlePML4
); /* setup in start.s for us */
913 kernel_pmap
->pm_pml4
= IdlePML4
;
914 kernel_pmap
->pm_pdpt
= (pd_entry_t
*)
915 ((unsigned int)IdlePDPT64
| KERNBASE
);
916 #define PAGE_BITS INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF
917 pmap_store_pte(kernel_pmap
->pm_pml4
,
918 (uint32_t)IdlePDPT64
| PAGE_BITS
);
919 pmap_store_pte((ppdpt64
+0), *(ppdpt
+0) | PAGE_BITS
);
920 pmap_store_pte((ppdpt64
+1), *(ppdpt
+1) | PAGE_BITS
);
921 pmap_store_pte((ppdpt64
+2), *(ppdpt
+2) | PAGE_BITS
);
922 pmap_store_pte((ppdpt64
+3), *(ppdpt
+3) | PAGE_BITS
);
925 * The kernel is also mapped in the uber-sapce at the 4GB starting
926 * 0xFFFFFF80:00000000. This is the highest entry in the 4th-level.
928 pmap_store_pte((ppml4
+KERNEL_UBER_PML4_INDEX
), *(ppml4
+0));
930 kernel64_cr3
= (addr64_t
) kernel_pmap
->pm_cr3
;
932 /* Re-initialize descriptors and prepare to switch modes */
933 cpu_desc_init64(&cpu_data_master
);
934 current_cpu_datap()->cpu_is64bit
= TRUE
;
935 current_cpu_datap()->cpu_active_cr3
= kernel64_cr3
;
937 pde_mapped_size
= 512*4096 ;
939 ml_set_interrupts_enabled(istate
);
942 /* Sets 64-bit mode if required. */
943 cpu_mode_init(&cpu_data_master
);
944 /* Update in-kernel CPUID information if we're now in 64-bit mode */
948 kernel_pmap
->pm_hold
= (vm_offset_t
)kernel_pmap
->pm_pml4
;
950 kprintf("Kernel virtual space from 0x%x to 0x%x.\n",
951 VADDR(KPTDI
,0), virtual_end
);
952 printf("PAE enabled\n");
954 printf("64 bit mode enabled\n");kprintf("64 bit mode enabled\n"); }
956 kprintf("Available physical space from 0x%llx to 0x%llx\n",
957 avail_start
, avail_end
);
960 * By default for 64-bit users loaded at 4GB, share kernel mapping.
961 * But this may be overridden by the -no_shared_cr3 boot-arg.
963 if (PE_parse_boot_argn("-no_shared_cr3", &no_shared_cr3
, sizeof (no_shared_cr3
))) {
964 kprintf("Shared kernel address space disabled\n");
968 if (PE_parse_boot_argn("-pmap_trace", &pmap_trace
, sizeof (pmap_trace
))) {
969 kprintf("Kernel traces for pmap operations enabled\n");
971 #endif /* PMAP_TRACES */
979 *startp
= virtual_avail
;
984 * Initialize the pmap module.
985 * Called by vm_init, to initialize any structures that the pmap
986 * system needs to map virtual memory.
992 vm_map_offset_t vaddr
;
998 * Allocate memory for the pv_head_table and its lock bits,
999 * the modify bit array, and the pte_page table.
1003 * zero bias all these arrays now instead of off avail_start
1004 * so we cover all memory
1007 npages
= (long)i386_btop(avail_end
);
1008 s
= (vm_size_t
) (sizeof(struct pv_rooted_entry
) * npages
1009 + (sizeof (struct pv_hashed_entry_t
*) * (npvhash
+1))
1010 + pv_lock_table_size(npages
)
1011 + pv_hash_lock_table_size((npvhash
+1))
1015 if (kernel_memory_allocate(kernel_map
, &addr
, s
, 0,
1016 KMA_KOBJECT
| KMA_PERMANENT
)
1020 memset((char *)addr
, 0, s
);
1026 if (0 == npvhash
) panic("npvhash not initialized");
1030 * Allocate the structures first to preserve word-alignment.
1032 pv_head_table
= (pv_rooted_entry_t
) addr
;
1033 addr
= (vm_offset_t
) (pv_head_table
+ npages
);
1035 pv_hash_table
= (pv_hashed_entry_t
*)addr
;
1036 addr
= (vm_offset_t
) (pv_hash_table
+ (npvhash
+ 1));
1038 pv_lock_table
= (char *) addr
;
1039 addr
= (vm_offset_t
) (pv_lock_table
+ pv_lock_table_size(npages
));
1041 pv_hash_lock_table
= (char *) addr
;
1042 addr
= (vm_offset_t
) (pv_hash_lock_table
+ pv_hash_lock_table_size((npvhash
+1)));
1044 pmap_phys_attributes
= (char *) addr
;
1049 pmap_memory_region_t
*pmptr
= pmap_memory_regions
;
1051 last_pn
= (ppnum_t
)i386_btop(avail_end
);
1053 for (i
= 0; i
< pmap_memory_region_count
; i
++, pmptr
++) {
1054 if (pmptr
->type
== kEfiConventionalMemory
) {
1056 for (pn
= pmptr
->base
; pn
<= pmptr
->end
; pn
++) {
1058 pmap_phys_attributes
[pn
] |= PHYS_MANAGED
;
1060 if (pn
> last_managed_page
)
1061 last_managed_page
= pn
;
1064 pmap_phys_attributes
[pn
] |= PHYS_NOENCRYPT
;
1065 else if (pn
>= lowest_hi
&& pn
<= highest_hi
)
1066 pmap_phys_attributes
[pn
] |= PHYS_NOENCRYPT
;
1073 ppn
= pmap_find_phys(kernel_pmap
, vaddr
);
1075 pmap_phys_attributes
[ppn
] |= PHYS_NOENCRYPT
;
1081 * Create the zone of physical maps,
1082 * and of the physical-to-virtual entries.
1084 s
= (vm_size_t
) sizeof(struct pmap
);
1085 pmap_zone
= zinit(s
, 400*s
, 4096, "pmap"); /* XXX */
1086 zone_change(pmap_zone
, Z_NOENCRYPT
, TRUE
);
1088 s
= (vm_size_t
) sizeof(struct pv_hashed_entry
);
1089 pv_hashed_list_zone
= zinit(s
, 10000*s
/* Expandable zone */,
1090 4096 * 4 /* LCM i386 */, "pv_list");
1091 zone_change(pv_hashed_list_zone
, Z_NOENCRYPT
, TRUE
);
1094 pdpt_zone
= zinit(s
, 400*s
, 4096, "pdpt"); /* XXX */
1095 zone_change(pdpt_zone
, Z_NOENCRYPT
, TRUE
);
1097 kptobj
= &kptobj_object_store
;
1098 _vm_object_allocate((vm_object_size_t
)(NPGPTD
*NPTDPG
), kptobj
);
1099 kernel_pmap
->pm_obj
= kptobj
;
1101 /* create pv entries for kernel pages mapped by low level
1102 startup code. these have to exist so we can pmap_remove()
1103 e.g. kext pages from the middle of our addr space */
1105 vaddr
= (vm_map_offset_t
)0;
1106 for (ppn
= 0; ppn
< i386_btop(avail_start
) ; ppn
++ ) {
1107 pv_rooted_entry_t pv_e
;
1109 pv_e
= pai_to_pvh(ppn
);
1112 pv_e
->pmap
= kernel_pmap
;
1113 queue_init(&pv_e
->qlink
);
1116 pmap_initialized
= TRUE
;
1118 max_preemption_latency_tsc
= tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS
, tscFCvtn2t
);
1123 #define DBG(x...) kprintf("DBG: " x)
1129 * Called once VM is fully initialized so that we can release unused
1130 * sections of low memory to the general pool.
1131 * Also complete the set-up of identity-mapped sections of the kernel:
1132 * 1) write-protect kernel text
1133 * 2) map kernel text using large pages if possible
1134 * 3) read and write-protect page zero (for K32)
1135 * 4) map the global page at the appropriate virtual address.
1137 * Use of large pages
1138 * ------------------
1139 * To effectively map and write-protect all kernel text pages, the text
1140 * must be 2M-aligned at the base, and the data section above must also be
1141 * 2M-aligned. That is, there's padding below and above. This is achieved
1142 * through linker directives. Large pages are used only if this alignment
1143 * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
1148 * sdata: ================== 2Meg
1152 * etext: ------------------
1160 * stext: ================== 2Meg
1164 * eHIB: ------------------
1168 * Prior to changing the mapping from 4K to 2M, the zero-padding pages
1169 * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
1170 * 4K pages covering [stext,etext] are coalesced as 2M large pages.
1171 * The now unused level-1 PTE pages are also freed.
1173 extern uint32_t pmap_reserved_ranges
;
1175 pmap_lowmem_finalize(void)
1180 /* Check the kernel is linked at the expected base address */
1181 if (i386_btop(kvtophys((vm_offset_t
) &IdlePML4
)) !=
1182 I386_KERNEL_IMAGE_BASE_PAGE
)
1183 panic("pmap_lowmem_finalize() unexpected kernel base address");
1186 * Update wired memory statistics for early boot pages
1188 PMAP_ZINFO_PALLOC(bootstrap_wired_pages
* PAGE_SIZE
);
1191 * Free all pages in pmap regions below the base:
1193 * We can't free all the pages to VM that EFI reports available.
1194 * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
1195 * There's also a size miscalculation here: pend is one page less
1196 * than it should be but this is not fixed to be backwards
1198 * Due to this current EFI limitation, we take only the first
1199 * entry in the memory region table. However, the loop is retained
1200 * (with the intended termination criteria commented out) in the
1201 * hope that some day we can free all low-memory ranges.
1204 // pmap_memory_regions[i].end <= I386_KERNEL_IMAGE_BASE_PAGE;
1205 i
< 1 && (pmap_reserved_ranges
== 0);
1207 vm_offset_t pbase
= (vm_offset_t
)i386_ptob(pmap_memory_regions
[i
].base
);
1208 vm_offset_t pend
= (vm_offset_t
)i386_ptob(pmap_memory_regions
[i
].end
);
1209 // vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1);
1211 DBG("ml_static_mfree(%p,%p) for pmap region %d\n",
1212 (void *) ml_static_ptovirt(pbase
),
1213 (void *) (pend
- pbase
), i
);
1214 ml_static_mfree(ml_static_ptovirt(pbase
), pend
- pbase
);
1218 * If text and data are both 2MB-aligned,
1219 * we can map text with large-pages,
1220 * unless the -kernel_text_ps_4K boot-arg overrides.
1222 if ((stext
& I386_LPGMASK
) == 0 && (sdata
& I386_LPGMASK
) == 0) {
1223 kprintf("Kernel text is 2MB aligned");
1224 kernel_text_ps_4K
= FALSE
;
1225 if (PE_parse_boot_argn("-kernel_text_ps_4K",
1227 sizeof (kernel_text_ps_4K
)))
1228 kprintf(" but will be mapped with 4K pages\n");
1230 kprintf(" and will be mapped with 2M pages\n");
1233 (void) PE_parse_boot_argn("wpkernel", &wpkernel
, sizeof (wpkernel
));
1235 kprintf("Kernel text %p-%p to be write-protected\n",
1236 (void *) stext
, (void *) etext
);
1241 * Scan over text if mappings are to be changed:
1242 * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
1243 * - Change to large-pages if possible and not overriden.
1245 if (kernel_text_ps_4K
&& wpkernel
) {
1247 for (myva
= stext
; myva
< etext
; myva
+= PAGE_SIZE
) {
1250 ptep
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)myva
);
1252 pmap_store_pte(ptep
, *ptep
& ~INTEL_PTE_RW
);
1256 if (!kernel_text_ps_4K
) {
1260 * Release zero-filled page padding used for 2M-alignment.
1262 DBG("ml_static_mfree(%p,%p) for padding below text\n",
1263 (void *) eHIB
, (void *) (stext
- eHIB
));
1264 ml_static_mfree(eHIB
, stext
- eHIB
);
1265 DBG("ml_static_mfree(%p,%p) for padding above text\n",
1266 (void *) etext
, (void *) (sdata
- etext
));
1267 ml_static_mfree(etext
, sdata
- etext
);
1270 * Coalesce text pages into large pages.
1272 for (myva
= stext
; myva
< sdata
; myva
+= I386_LPGBYTES
) {
1274 vm_offset_t pte_phys
;
1278 pdep
= pmap_pde(kernel_pmap
, (vm_map_offset_t
)myva
);
1279 ptep
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)myva
);
1280 DBG("myva: %p pdep: %p ptep: %p\n",
1281 (void *) myva
, (void *) pdep
, (void *) ptep
);
1282 if ((*ptep
& INTEL_PTE_VALID
) == 0)
1284 pte_phys
= (vm_offset_t
)(*ptep
& PG_FRAME
);
1285 pde
= *pdep
& PTMASK
; /* page attributes from pde */
1286 pde
|= INTEL_PTE_PS
; /* make it a 2M entry */
1287 pde
|= pte_phys
; /* take page frame from pte */
1290 pde
&= ~INTEL_PTE_RW
;
1291 DBG("pmap_store_pte(%p,0x%llx)\n",
1293 pmap_store_pte(pdep
, pde
);
1296 * Free the now-unused level-1 pte.
1297 * Note: ptep is a virtual address to the pte in the
1298 * recursive map. We can't use this address to free
1299 * the page. Instead we need to compute its address
1300 * in the Idle PTEs in "low memory".
1302 vm_offset_t vm_ptep
= (vm_offset_t
) KPTphys
1303 + (pte_phys
>> PTPGSHIFT
);
1304 DBG("ml_static_mfree(%p,0x%x) for pte\n",
1305 (void *) vm_ptep
, PAGE_SIZE
);
1306 ml_static_mfree(vm_ptep
, PAGE_SIZE
);
1309 /* Change variable read by sysctl machdep.pmap */
1310 pmap_kernel_text_ps
= I386_LPGBYTES
;
1313 /* no matter what, kernel page zero is not accessible */
1314 pmap_store_pte(pmap_pte(kernel_pmap
, 0), INTEL_PTE_INVALID
);
1316 /* map lowmem global page into fixed addr */
1317 pt_entry_t
*pte
= NULL
;
1318 if (0 == (pte
= pmap_pte(kernel_pmap
,
1319 VM_MIN_KERNEL_LOADED_ADDRESS
+ 0x2000)))
1320 panic("lowmem pte");
1321 /* make sure it is defined on page boundary */
1322 assert(0 == ((vm_offset_t
) &lowGlo
& PAGE_MASK
));
1323 pmap_store_pte(pte
, kvtophys((vm_offset_t
)&lowGlo
)
1333 #define managed_page(x) ( (unsigned int)x <= last_managed_page && (pmap_phys_attributes[x] & PHYS_MANAGED) )
1336 * this function is only used for debugging fron the vm layer
1342 pv_rooted_entry_t pv_h
;
1346 assert(pn
!= vm_page_fictitious_addr
);
1348 if (!pmap_initialized
)
1351 if (pn
== vm_page_guard_addr
)
1354 pai
= ppn_to_pai(pn
);
1355 if (!managed_page(pai
))
1357 pv_h
= pai_to_pvh(pn
);
1358 result
= (pv_h
->pmap
== PMAP_NULL
);
1365 vm_map_offset_t va_start
,
1366 vm_map_offset_t va_end
)
1368 vm_map_offset_t offset
;
1371 if (pmap
== PMAP_NULL
) {
1376 * Check the resident page count
1377 * - if it's zero, the pmap is completely empty.
1378 * This short-circuit test prevents a virtual address scan which is
1379 * painfully slow for 64-bit spaces.
1380 * This assumes the count is correct
1381 * .. the debug kernel ought to be checking perhaps by page table walk.
1383 if (pmap
->stats
.resident_count
== 0)
1386 for (offset
= va_start
;
1388 offset
+= PAGE_SIZE_64
) {
1389 phys_page
= pmap_find_phys(pmap
, offset
);
1391 if (pmap
!= kernel_pmap
&&
1392 pmap
->pm_task_map
== TASK_MAP_32BIT
&&
1393 offset
>= HIGH_MEM_BASE
) {
1395 * The "high_shared_pde" is used to share
1396 * the entire top-most 2MB of address space
1397 * between the kernel and all 32-bit tasks.
1398 * So none of this can be removed from 32-bit
1400 * Let's pretend there's nothing up
1405 kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
1406 "page %d at 0x%llx\n",
1407 pmap
, va_start
, va_end
, phys_page
, offset
);
1417 * Create and return a physical map.
1419 * If the size specified for the map
1420 * is zero, the map is an actual physical
1421 * map, and may be referenced by the
1424 * If the size specified is non-zero,
1425 * the map will be used in software only, and
1426 * is bounded by that size.
1438 pml4_entry_t
*pml4p
;
1443 PMAP_TRACE(PMAP_CODE(PMAP__CREATE
) | DBG_FUNC_START
,
1444 (int) (sz
>>32), (int) sz
, (int) is_64bit
, 0, 0);
1446 size
= (vm_size_t
) sz
;
1449 * A software use-only map doesn't even need a map.
1456 p
= (pmap_t
) zalloc(pmap_zone
);
1458 panic("pmap_create zalloc");
1460 /* init counts now since we'll be bumping some */
1461 simple_lock_init(&p
->lock
, 0);
1462 p
->stats
.resident_count
= 0;
1463 p
->stats
.resident_max
= 0;
1464 p
->stats
.wired_count
= 0;
1467 p
->pm_shared
= FALSE
;
1469 assert(!is_64bit
|| cpu_64bit
);
1470 p
->pm_task_map
= is_64bit
? TASK_MAP_64BIT
: TASK_MAP_32BIT
;;
1473 /* legacy 32 bit setup */
1474 /* in the legacy case the pdpt layer is hardwired to 4 entries and each
1475 * entry covers 1GB of addr space */
1476 if (KERN_SUCCESS
!= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)(&p
->dirbase
), NBPTD
))
1477 panic("pmap_create kmem_alloc_kobject");
1478 p
->pm_hold
= (vm_offset_t
)zalloc(pdpt_zone
);
1479 if ((vm_offset_t
)NULL
== p
->pm_hold
) {
1480 panic("pdpt zalloc");
1482 pdpt
= (pdpt_entry_t
*) (( p
->pm_hold
+ 31) & ~31);
1483 p
->pm_cr3
= (pmap_paddr_t
)kvtophys((vm_offset_t
)pdpt
);
1484 if (NULL
== (p
->pm_obj
= vm_object_allocate((vm_object_size_t
)(NPGPTD
*NPTDPG
))))
1485 panic("pmap_create vm_object_allocate");
1487 memset((char *)p
->dirbase
, 0, NBPTD
);
1489 va
= (vm_offset_t
)p
->dirbase
;
1490 p
->pdirbase
= kvtophys(va
);
1492 PMAP_ZINFO_SALLOC(NBPTD
);
1494 template = INTEL_PTE_VALID
;
1495 for (i
= 0; i
< NPGPTD
; i
++, pdpt
++ ) {
1497 pa
= (pmap_paddr_t
) kvtophys((vm_offset_t
)(va
+ i386_ptob(i
)));
1498 pmap_store_pte(pdpt
, pa
| template);
1501 /* map the high shared pde */
1503 pmap_store_pte(pmap_pde(p
, HIGH_MEM_BASE
), high_shared_pde
);
1509 /* alloc the pml4 page in kernel vm */
1510 if (KERN_SUCCESS
!= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)(&p
->pm_hold
), PAGE_SIZE
))
1511 panic("pmap_create kmem_alloc_kobject pml4");
1513 memset((char *)p
->pm_hold
, 0, PAGE_SIZE
);
1514 p
->pm_cr3
= (pmap_paddr_t
)kvtophys((vm_offset_t
)p
->pm_hold
);
1516 OSAddAtomic(1, &inuse_ptepages_count
);
1517 OSAddAtomic64(1, &alloc_ptepages_count
);
1518 PMAP_ZINFO_SALLOC(PAGE_SIZE
);
1520 /* allocate the vm_objs to hold the pdpt, pde and pte pages */
1522 if (NULL
== (p
->pm_obj_pml4
= vm_object_allocate((vm_object_size_t
)(NPML4PGS
))))
1523 panic("pmap_create pdpt obj");
1525 if (NULL
== (p
->pm_obj_pdpt
= vm_object_allocate((vm_object_size_t
)(NPDPTPGS
))))
1526 panic("pmap_create pdpt obj");
1528 if (NULL
== (p
->pm_obj
= vm_object_allocate((vm_object_size_t
)(NPDEPGS
))))
1529 panic("pmap_create pte obj");
1531 /* uber space points to uber mapped kernel */
1533 pml4p
= pmap64_pml4(p
, 0ULL);
1534 pmap_store_pte((pml4p
+KERNEL_UBER_PML4_INDEX
),*kernel_pmap
->pm_pml4
);
1538 while ((pdp
= pmap64_pde(p
, (uint64_t)HIGH_MEM_BASE
)) == PD_ENTRY_NULL
) {
1540 pmap_expand_pdpt(p
, (uint64_t)HIGH_MEM_BASE
); /* need room for another pde entry */
1543 pmap_store_pte(pdp
, high_shared_pde
);
1548 PMAP_TRACE(PMAP_CODE(PMAP__CREATE
) | DBG_FUNC_START
,
1549 (int) p
, is_64bit
, 0, 0, 0);
1555 * The following routines implement the shared address optmization for 64-bit
1556 * users with a 4GB page zero.
1558 * pmap_set_4GB_pagezero()
1559 * is called in the exec and fork paths to mirror the kernel's
1560 * mapping in the bottom 4G of the user's pmap. The task mapping changes
1561 * from TASK_MAP_64BIT to TASK_MAP_64BIT_SHARED. This routine returns
1562 * without doing anything if the -no_shared_cr3 boot-arg is set.
1564 * pmap_clear_4GB_pagezero()
1565 * is called in the exec/exit paths to undo this mirror. The task mapping
1566 * reverts to TASK_MAP_64BIT. In addition, we switch to the kernel's
1567 * CR3 by calling pmap_load_kernel_cr3().
1569 * pmap_load_kernel_cr3()
1570 * loads cr3 with the kernel's page table. In addition to being called
1571 * by pmap_clear_4GB_pagezero(), it is used both prior to teardown and
1572 * when we go idle in the context of a shared map.
1574 * Further notes on per-cpu data used:
1576 * cpu_kernel_cr3 is the cr3 for the kernel's pmap.
1577 * This is loaded in a trampoline on entering the kernel
1578 * from a 32-bit user (or non-shared-cr3 64-bit user).
1579 * cpu_task_cr3 is the cr3 for the current thread.
1580 * This is loaded in a trampoline as we exit the kernel.
1581 * cpu_active_cr3 reflects the cr3 currently loaded.
1582 * However, the low order bit is set when the
1583 * processor is idle or interrupts are disabled
1584 * while the system pmap lock is held. It is used by
1586 * cpu_task_map indicates whether the task cr3 belongs to
1587 * a 32-bit, a 64-bit or a 64-bit shared map.
1588 * The latter allows the avoidance of the cr3 load
1589 * on kernel entry and exit.
1590 * cpu_tlb_invalid set TRUE when a tlb flush is requested.
1591 * If the cr3 is "inactive" (the cpu is idle or the
1592 * system-wide pmap lock is held) this not serviced by
1593 * an IPI but at time when the cr3 becomes "active".
1597 pmap_set_4GB_pagezero(pmap_t p
)
1599 pdpt_entry_t
*user_pdptp
;
1600 pdpt_entry_t
*kern_pdptp
;
1602 assert(p
->pm_task_map
!= TASK_MAP_32BIT
);
1604 /* Kernel-shared cr3 may be disabled by boot arg. */
1609 * Set the bottom 4 3rd-level pte's to be the kernel's.
1612 while ((user_pdptp
= pmap64_pdpt(p
, 0x0)) == PDPT_ENTRY_NULL
) {
1614 pmap_expand_pml4(p
, 0x0);
1617 kern_pdptp
= kernel_pmap
->pm_pdpt
;
1618 pmap_store_pte(user_pdptp
+0, *(kern_pdptp
+0));
1619 pmap_store_pte(user_pdptp
+1, *(kern_pdptp
+1));
1620 pmap_store_pte(user_pdptp
+2, *(kern_pdptp
+2));
1621 pmap_store_pte(user_pdptp
+3, *(kern_pdptp
+3));
1622 p
->pm_task_map
= TASK_MAP_64BIT_SHARED
;
1627 pmap_clear_4GB_pagezero(pmap_t p
)
1629 pdpt_entry_t
*user_pdptp
;
1632 if (p
->pm_task_map
!= TASK_MAP_64BIT_SHARED
)
1637 p
->pm_task_map
= TASK_MAP_64BIT
;
1639 istate
= ml_set_interrupts_enabled(FALSE
);
1640 if (current_cpu_datap()->cpu_task_map
== TASK_MAP_64BIT_SHARED
)
1641 current_cpu_datap()->cpu_task_map
= TASK_MAP_64BIT
;
1642 pmap_load_kernel_cr3();
1644 user_pdptp
= pmap64_pdpt(p
, 0x0);
1645 pmap_store_pte(user_pdptp
+0, 0);
1646 pmap_store_pte(user_pdptp
+1, 0);
1647 pmap_store_pte(user_pdptp
+2, 0);
1648 pmap_store_pte(user_pdptp
+3, 0);
1650 ml_set_interrupts_enabled(istate
);
1656 pmap_load_kernel_cr3(void)
1658 uint64_t kernel_cr3
;
1660 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
1663 * Reload cr3 with the true kernel cr3.
1665 kernel_cr3
= current_cpu_datap()->cpu_kernel_cr3
;
1666 set64_cr3(kernel_cr3
);
1667 current_cpu_datap()->cpu_active_cr3
= kernel_cr3
;
1668 current_cpu_datap()->cpu_tlb_invalid
= FALSE
;
1669 __asm__
volatile("mfence");
1673 * Retire the given physical map from service.
1674 * Should only be called if the map contains
1675 * no valid mappings.
1687 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY
) | DBG_FUNC_START
,
1688 (int) p
, 0, 0, 0, 0);
1696 * If some cpu is not using the physical pmap pointer that it
1697 * is supposed to be (see set_dirbase), we might be using the
1698 * pmap that is being destroyed! Make sure we are
1699 * physically on the right pmap:
1703 0xFFFFFFFFFFFFF000ULL
);
1709 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY
) | DBG_FUNC_END
,
1710 (int) p
, 1, 0, 0, 0);
1711 return; /* still in use */
1715 * Free the memory maps, then the
1719 OSAddAtomic(-p
->pm_obj
->resident_page_count
, &inuse_ptepages_count
);
1720 PMAP_ZINFO_PFREE(p
->pm_obj
->resident_page_count
* PAGE_SIZE
);
1722 kmem_free(kernel_map
, (vm_offset_t
)p
->dirbase
, NBPTD
);
1723 PMAP_ZINFO_SFREE(NBPTD
);
1725 zfree(pdpt_zone
, (void *)p
->pm_hold
);
1727 vm_object_deallocate(p
->pm_obj
);
1730 int inuse_ptepages
= 0;
1732 /* free 64 bit mode structs */
1733 kmem_free(kernel_map
, (vm_offset_t
)p
->pm_hold
, PAGE_SIZE
);
1734 PMAP_ZINFO_SFREE(PAGE_SIZE
);
1736 inuse_ptepages
+= p
->pm_obj_pml4
->resident_page_count
;
1737 vm_object_deallocate(p
->pm_obj_pml4
);
1739 inuse_ptepages
+= p
->pm_obj_pdpt
->resident_page_count
;
1740 vm_object_deallocate(p
->pm_obj_pdpt
);
1742 inuse_ptepages
+= p
->pm_obj
->resident_page_count
;
1743 vm_object_deallocate(p
->pm_obj
);
1745 OSAddAtomic(-(inuse_ptepages
+1), &inuse_ptepages_count
);
1746 PMAP_ZINFO_PFREE(inuse_ptepages
* PAGE_SIZE
);
1749 zfree(pmap_zone
, p
);
1751 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY
) | DBG_FUNC_END
,
1757 * Add a reference to the specified pmap.
1765 if (p
!= PMAP_NULL
) {
1773 * Remove phys addr if mapped in specified map
1777 pmap_remove_some_phys(
1778 __unused pmap_t map
,
1779 __unused ppnum_t pn
)
1782 /* Implement to support working set code */
1787 * Set the physical protection on the
1788 * specified range of this map as requested.
1789 * Will not increase permissions.
1794 vm_map_offset_t sva
,
1795 vm_map_offset_t eva
,
1798 register pt_entry_t
*pde
;
1799 register pt_entry_t
*spte
, *epte
;
1800 vm_map_offset_t lva
;
1801 vm_map_offset_t orig_sva
;
1807 if (map
== PMAP_NULL
)
1810 if (prot
== VM_PROT_NONE
) {
1811 pmap_remove(map
, sva
, eva
);
1815 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT
) | DBG_FUNC_START
,
1817 (int) (sva
>>32), (int) sva
,
1818 (int) (eva
>>32), (int) eva
);
1820 if ( (prot
& VM_PROT_EXECUTE
) || !nx_enabled
|| !map
->nx_enabled
)
1829 lva
= (sva
+ pde_mapped_size
) & ~(pde_mapped_size
-1);
1832 pde
= pmap_pde(map
, sva
);
1833 if (pde
&& (*pde
& INTEL_PTE_VALID
)) {
1834 spte
= (pt_entry_t
*)pmap_pte(map
, (sva
& ~(pde_mapped_size
-1)));
1835 spte
= &spte
[ptenum(sva
)];
1836 epte
= &spte
[intel_btop(lva
-sva
)];
1838 while (spte
< epte
) {
1840 if (*spte
& INTEL_PTE_VALID
) {
1842 if (prot
& VM_PROT_WRITE
)
1843 pmap_update_pte(spte
, *spte
, (*spte
| INTEL_PTE_WRITE
));
1845 pmap_update_pte(spte
, *spte
, (*spte
& ~INTEL_PTE_WRITE
));
1848 pmap_update_pte(spte
, *spte
, (*spte
| INTEL_PTE_NX
));
1850 pmap_update_pte(spte
, *spte
, (*spte
& ~INTEL_PTE_NX
));
1861 PMAP_UPDATE_TLBS(map
, orig_sva
, eva
);
1866 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT
) | DBG_FUNC_END
,
1871 /* Map a (possibly) autogenned block */
1880 __unused
unsigned int flags
)
1884 for (page
= 0; page
< size
; page
++) {
1885 pmap_enter(pmap
, va
, pa
, prot
, attr
, TRUE
);
1892 * Routine: pmap_extract
1894 * Extract the physical page address associated
1895 * with the given map/virtual_address pair.
1896 * Change to shim for backwards compatibility but will not
1897 * work for 64 bit systems. Some old drivers that we cannot
1903 register pmap_t pmap
,
1904 vm_map_offset_t vaddr
)
1909 paddr
= (vm_offset_t
)0;
1910 ppn
= pmap_find_phys(pmap
, vaddr
);
1913 paddr
= ((vm_offset_t
)i386_ptob(ppn
)) | ((vm_offset_t
)vaddr
& INTEL_OFFMASK
);
1921 vm_map_offset_t vaddr
)
1923 register vm_page_t m
;
1924 register pmap_paddr_t pa
;
1928 pml4_entry_t
*pml4p
;
1930 if (kernel_pmap
== map
) panic("expand kernel pml4");
1933 pml4p
= pmap64_pml4(map
, vaddr
);
1935 if (PML4_ENTRY_NULL
== pml4p
) panic("pmap_expand_pml4 no pml4p");
1938 * Allocate a VM page for the pml4 page
1940 while ((m
= vm_page_grab()) == VM_PAGE_NULL
)
1944 * put the page into the pmap's obj list so it
1945 * can be found later.
1949 i
= pml4idx(map
, vaddr
);
1956 vm_page_lockspin_queues();
1958 vm_page_unlock_queues();
1960 OSAddAtomic(1, &inuse_ptepages_count
);
1961 OSAddAtomic64(1, &alloc_ptepages_count
);
1962 PMAP_ZINFO_PALLOC(PAGE_SIZE
);
1964 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1965 vm_object_lock(map
->pm_obj_pml4
);
1969 * See if someone else expanded us first
1971 if (pmap64_pdpt(map
, vaddr
) != PDPT_ENTRY_NULL
) {
1973 vm_object_unlock(map
->pm_obj_pml4
);
1977 OSAddAtomic(-1, &inuse_ptepages_count
);
1978 PMAP_ZINFO_PFREE(PAGE_SIZE
);
1981 pmap_set_noencrypt(pn
);
1984 if (0 != vm_page_lookup(map
->pm_obj_pml4
, (vm_object_offset_t
)i
)) {
1985 panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1986 map
, map
->pm_obj_pml4
, vaddr
, i
);
1989 vm_page_insert(m
, map
->pm_obj_pml4
, (vm_object_offset_t
)i
);
1990 vm_object_unlock(map
->pm_obj_pml4
);
1993 * Set the page directory entry for this page table.
1995 pml4p
= pmap64_pml4(map
, vaddr
); /* refetch under lock */
1997 pmap_store_pte(pml4p
, pa_to_pte(pa
)
2011 vm_map_offset_t vaddr
)
2013 register vm_page_t m
;
2014 register pmap_paddr_t pa
;
2018 pdpt_entry_t
*pdptp
;
2020 if (kernel_pmap
== map
) panic("expand kernel pdpt");
2023 while ((pdptp
= pmap64_pdpt(map
, vaddr
)) == PDPT_ENTRY_NULL
) {
2025 pmap_expand_pml4(map
, vaddr
); /* need room for another pdpt entry */
2031 * Allocate a VM page for the pdpt page
2033 while ((m
= vm_page_grab()) == VM_PAGE_NULL
)
2037 * put the page into the pmap's obj list so it
2038 * can be found later.
2042 i
= pdptidx(map
, vaddr
);
2049 vm_page_lockspin_queues();
2051 vm_page_unlock_queues();
2053 OSAddAtomic(1, &inuse_ptepages_count
);
2054 OSAddAtomic64(1, &alloc_ptepages_count
);
2055 PMAP_ZINFO_PALLOC(PAGE_SIZE
);
2057 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
2058 vm_object_lock(map
->pm_obj_pdpt
);
2062 * See if someone else expanded us first
2064 if (pmap64_pde(map
, vaddr
) != PD_ENTRY_NULL
) {
2066 vm_object_unlock(map
->pm_obj_pdpt
);
2070 OSAddAtomic(-1, &inuse_ptepages_count
);
2071 PMAP_ZINFO_PFREE(PAGE_SIZE
);
2074 pmap_set_noencrypt(pn
);
2077 if (0 != vm_page_lookup(map
->pm_obj_pdpt
, (vm_object_offset_t
)i
)) {
2078 panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
2079 map
, map
->pm_obj_pdpt
, vaddr
, i
);
2082 vm_page_insert(m
, map
->pm_obj_pdpt
, (vm_object_offset_t
)i
);
2083 vm_object_unlock(map
->pm_obj_pdpt
);
2086 * Set the page directory entry for this page table.
2088 pdptp
= pmap64_pdpt(map
, vaddr
); /* refetch under lock */
2090 pmap_store_pte(pdptp
, pa_to_pte(pa
)
2104 * Routine: pmap_expand
2106 * Expands a pmap to be able to map the specified virtual address.
2108 * Allocates new virtual memory for the P0 or P1 portion of the
2109 * pmap, then re-maps the physical pages that were in the old
2110 * pmap to be in the new pmap.
2112 * Must be called with the pmap system and the pmap unlocked,
2113 * since these must be unlocked to use vm_allocate or vm_deallocate.
2114 * Thus it must be called in a loop that checks whether the map
2115 * has been expanded enough.
2116 * (We won't loop forever, since page tables aren't shrunk.)
2121 vm_map_offset_t vaddr
)
2124 register vm_page_t m
;
2125 register pmap_paddr_t pa
;
2131 * if not the kernel map (while we are still compat kernel mode)
2132 * and we are 64 bit, propagate expand upwards
2135 if (cpu_64bit
&& (map
!= kernel_pmap
)) {
2137 while ((pdp
= pmap64_pde(map
, vaddr
)) == PD_ENTRY_NULL
) {
2139 pmap_expand_pdpt(map
, vaddr
); /* need room for another pde entry */
2146 * Allocate a VM page for the pde entries.
2148 while ((m
= vm_page_grab()) == VM_PAGE_NULL
)
2152 * put the page into the pmap's obj list so it
2153 * can be found later.
2157 i
= pdeidx(map
, vaddr
);
2164 vm_page_lockspin_queues();
2166 vm_page_unlock_queues();
2168 OSAddAtomic(1, &inuse_ptepages_count
);
2169 OSAddAtomic64(1, &alloc_ptepages_count
);
2170 PMAP_ZINFO_PALLOC(PAGE_SIZE
);
2172 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
2173 vm_object_lock(map
->pm_obj
);
2177 * See if someone else expanded us first
2180 if (pmap_pte(map
, vaddr
) != PT_ENTRY_NULL
) {
2182 vm_object_unlock(map
->pm_obj
);
2186 OSAddAtomic(-1, &inuse_ptepages_count
);
2187 PMAP_ZINFO_PFREE(PAGE_SIZE
);
2190 pmap_set_noencrypt(pn
);
2193 if (0 != vm_page_lookup(map
->pm_obj
, (vm_object_offset_t
)i
)) {
2194 panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
2195 map
, map
->pm_obj
, vaddr
, i
);
2198 vm_page_insert(m
, map
->pm_obj
, (vm_object_offset_t
)i
);
2199 vm_object_unlock(map
->pm_obj
);
2202 * refetch while locked
2205 pdp
= pmap_pde(map
, vaddr
);
2208 * Set the page directory entry for this page table.
2210 pmap_store_pte(pdp
, pa_to_pte(pa
)
2222 * pmap_sync_page_data_phys(ppnum_t pa)
2224 * Invalidates all of the instruction cache on a physical page and
2225 * pushes any dirty data from the data cache for the same physical page
2226 * Not required in i386.
2229 pmap_sync_page_data_phys(__unused ppnum_t pa
)
2235 * pmap_sync_page_attributes_phys(ppnum_t pa)
2237 * Write back and invalidate all cachelines on a physical page.
2240 pmap_sync_page_attributes_phys(ppnum_t pa
)
2242 cache_flush_page_phys(pa
);
2247 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
2253 * Routine: pmap_collect
2255 * Garbage collects the physical map system for
2256 * pages which are no longer used.
2257 * Success need not be guaranteed -- that is, there
2258 * may well be pages which are not referenced, but
2259 * others may be collected.
2261 * Called by the pageout daemon when pages are scarce.
2267 register pt_entry_t
*pdp
, *ptp
;
2274 if (p
== kernel_pmap
)
2278 * Garbage collect map.
2282 for (pdp
= (pt_entry_t
*)p
->dirbase
;
2283 pdp
< (pt_entry_t
*)&p
->dirbase
[(UMAXPTDI
+1)];
2286 if (*pdp
& INTEL_PTE_VALID
) {
2287 if(*pdp
& INTEL_PTE_REF
) {
2288 pmap_store_pte(pdp
, *pdp
& ~INTEL_PTE_REF
);
2292 ptp
= pmap_pte(p
, pdetova(pdp
- (pt_entry_t
*)p
->dirbase
));
2293 eptp
= ptp
+ NPTEPG
;
2296 * If the pte page has any wired mappings, we cannot
2301 register pt_entry_t
*ptep
;
2302 for (ptep
= ptp
; ptep
< eptp
; ptep
++) {
2303 if (iswired(*ptep
)) {
2311 * Remove the virtual addresses mapped by this pte page.
2313 pmap_remove_range(p
,
2314 pdetova(pdp
- (pt_entry_t
*)p
->dirbase
),
2319 * Invalidate the page directory pointer.
2321 pmap_store_pte(pdp
, 0x0);
2326 * And free the pte page itself.
2329 register vm_page_t m
;
2331 vm_object_lock(p
->pm_obj
);
2333 m
= vm_page_lookup(p
->pm_obj
,(vm_object_offset_t
)(pdp
- (pt_entry_t
*)&p
->dirbase
[0]));
2334 if (m
== VM_PAGE_NULL
)
2335 panic("pmap_collect: pte page not in object");
2337 vm_object_unlock(p
->pm_obj
);
2341 OSAddAtomic(-1, &inuse_ptepages_count
);
2342 PMAP_ZINFO_PFREE(PAGE_SIZE
);
2351 PMAP_UPDATE_TLBS(p
, 0x0, 0xFFFFFFFFFFFFF000ULL
);
2360 pmap_copy_page(ppnum_t src
, ppnum_t dst
)
2362 bcopy_phys((addr64_t
)i386_ptob(src
),
2363 (addr64_t
)i386_ptob(dst
),
2369 * Routine: pmap_pageable
2371 * Make the specified pages (by pmap, offset)
2372 * pageable (or not) as requested.
2374 * A page which is not pageable may not take
2375 * a fault; therefore, its page table entry
2376 * must remain valid for the duration.
2378 * This routine is merely advisory; pmap_enter
2379 * will specify that these pages are to be wired
2380 * down (or not) as appropriate.
2384 __unused pmap_t pmap
,
2385 __unused vm_map_offset_t start_addr
,
2386 __unused vm_map_offset_t end_addr
,
2387 __unused boolean_t pageable
)
2390 pmap
++; start_addr
++; end_addr
++; pageable
++;
2395 invalidate_icache(__unused vm_offset_t addr
,
2396 __unused
unsigned cnt
,
2402 flush_dcache(__unused vm_offset_t addr
,
2403 __unused
unsigned count
,
2411 * Constrain DTrace copyin/copyout actions
2413 extern kern_return_t
dtrace_copyio_preflight(addr64_t
);
2414 extern kern_return_t
dtrace_copyio_postflight(addr64_t
);
2416 kern_return_t
dtrace_copyio_preflight(__unused addr64_t va
)
2418 thread_t thread
= current_thread();
2420 if (current_map() == kernel_map
)
2421 return KERN_FAILURE
;
2422 else if (thread
->machine
.specFlags
& CopyIOActive
)
2423 return KERN_FAILURE
;
2425 return KERN_SUCCESS
;
2428 kern_return_t
dtrace_copyio_postflight(__unused addr64_t va
)
2430 return KERN_SUCCESS
;
2432 #endif /* CONFIG_DTRACE */
2436 /* show phys page mappings and attributes */
2438 extern void db_show_page(pmap_paddr_t pa
);
2442 db_show_page(pmap_paddr_t pa
)
2449 pv_h
= pai_to_pvh(pai
);
2451 attr
= pmap_phys_attributes
[pai
];
2452 printf("phys page %llx ", pa
);
2453 if (attr
& PHYS_MODIFIED
)
2454 printf("modified, ");
2455 if (attr
& PHYS_REFERENCED
)
2456 printf("referenced, ");
2457 if (pv_h
->pmap
|| pv_h
->next
)
2458 printf(" mapped at\n");
2460 printf(" not mapped\n");
2461 for (; pv_h
; pv_h
= pv_h
->next
)
2463 printf("%llx in pmap %p\n", pv_h
->va
, pv_h
->pmap
);
2467 #endif /* MACH_KDB */
2471 void db_kvtophys(vm_offset_t
);
2472 void db_show_vaddrs(pt_entry_t
*);
2475 * print out the results of kvtophys(arg)
2481 db_printf("0x%qx", kvtophys(vaddr
));
2485 * Walk the pages tables.
2489 pt_entry_t
*dirbase
)
2491 pt_entry_t
*ptep
, *pdep
, tmp
;
2492 unsigned int x
, y
, pdecnt
, ptecnt
;
2495 dirbase
= kernel_pmap
->dirbase
;
2498 db_printf("need a dirbase...\n");
2501 dirbase
= (pt_entry_t
*) (int) ((unsigned long) dirbase
& ~INTEL_OFFMASK
);
2503 db_printf("dirbase: 0x%x\n", dirbase
);
2505 pdecnt
= ptecnt
= 0;
2507 for (y
= 0; y
< NPDEPG
; y
++, pdep
++) {
2508 if (((tmp
= *pdep
) & INTEL_PTE_VALID
) == 0) {
2512 ptep
= (pt_entry_t
*) ((unsigned long)(*pdep
) & ~INTEL_OFFMASK
);
2513 db_printf("dir[%4d]: 0x%x\n", y
, *pdep
);
2514 for (x
= 0; x
< NPTEPG
; x
++, ptep
++) {
2515 if (((tmp
= *ptep
) & INTEL_PTE_VALID
) == 0) {
2519 db_printf(" tab[%4d]: 0x%x, va=0x%x, pa=0x%x\n",
2522 (y
<< 22) | (x
<< 12),
2523 *ptep
& ~INTEL_OFFMASK
);
2527 db_printf("total: %d tables, %d page table entries.\n", pdecnt
, ptecnt
);
2531 #endif /* MACH_KDB */
2533 #include <mach_vm_debug.h>
2535 #include <vm/vm_debug.h>
2538 pmap_list_resident_pages(
2539 __unused pmap_t pmap
,
2540 __unused vm_offset_t
*listp
,
2545 #endif /* MACH_VM_DEBUG */
2549 /* temporary workaround */
2551 coredumpok(__unused vm_map_t map
, __unused vm_offset_t va
)
2556 ptep
= pmap_pte(map
->pmap
, va
);
2559 return ((*ptep
& (INTEL_PTE_NCACHE
| INTEL_PTE_WIRED
)) != (INTEL_PTE_NCACHE
| INTEL_PTE_WIRED
));
2570 assert(pn
!= vm_page_fictitious_addr
);
2572 if (!pmap_initialized
)
2575 if (pn
== vm_page_guard_addr
)
2578 if (!managed_page(ppn_to_pai(pn
)))
2585 pmap_commpage32_init(vm_offset_t kernel_commpage
, vm_offset_t user_commpage
, int cnt
)
2588 pt_entry_t
*opte
, *npte
;
2592 for (i
= 0; i
< cnt
; i
++) {
2594 opte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)kernel_commpage
);
2596 panic("kernel_commpage");
2597 pte
= *opte
| INTEL_PTE_USER
|INTEL_PTE_GLOBAL
;
2598 pte
&= ~INTEL_PTE_WRITE
; // ensure read only
2599 npte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)user_commpage
);
2601 panic("user_commpage");
2602 pmap_store_pte(npte
, pte
);
2604 kernel_commpage
+= INTEL_PGBYTES
;
2605 user_commpage
+= INTEL_PGBYTES
;
2610 #define PMAP_COMMPAGE64_CNT (_COMM_PAGE64_AREA_USED/PAGE_SIZE)
2611 pt_entry_t pmap_commpage64_ptes
[PMAP_COMMPAGE64_CNT
];
2614 pmap_commpage64_init(vm_offset_t kernel_commpage
, __unused vm_map_offset_t user_commpage
, int cnt
)
2619 PMAP_LOCK(kernel_pmap
);
2621 for (i
= 0; i
< cnt
; i
++) {
2622 kptep
= pmap_pte(kernel_pmap
, (uint64_t)kernel_commpage
+ (i
*PAGE_SIZE
));
2623 if ((0 == kptep
) || (0 == (*kptep
& INTEL_PTE_VALID
)))
2624 panic("pmap_commpage64_init pte");
2625 pmap_commpage64_ptes
[i
] = ((*kptep
& ~INTEL_PTE_WRITE
) | INTEL_PTE_USER
);
2627 PMAP_UNLOCK(kernel_pmap
);
2631 static cpu_pmap_t cpu_pmap_master
;
2634 pmap_cpu_alloc(boolean_t is_boot_cpu
)
2639 vm_offset_t address
;
2640 vm_map_address_t mapaddr
;
2641 vm_map_entry_t entry
;
2645 cp
= &cpu_pmap_master
;
2648 * The per-cpu pmap data structure itself.
2650 ret
= kmem_alloc(kernel_map
,
2651 (vm_offset_t
*) &cp
, sizeof(cpu_pmap_t
));
2652 if (ret
!= KERN_SUCCESS
) {
2653 printf("pmap_cpu_alloc() failed ret=%d\n", ret
);
2656 bzero((void *)cp
, sizeof(cpu_pmap_t
));
2659 * The temporary windows used for copy/zero - see loose_ends.c
2661 ret
= vm_map_find_space(kernel_map
,
2662 &mapaddr
, PMAP_NWINDOWS
*PAGE_SIZE
, (vm_map_offset_t
)0, 0, &entry
);
2663 if (ret
!= KERN_SUCCESS
) {
2664 printf("pmap_cpu_alloc() "
2665 "vm_map_find_space ret=%d\n", ret
);
2669 address
= (vm_offset_t
)mapaddr
;
2671 for (i
= 0; i
< PMAP_NWINDOWS
; i
++, address
+= PAGE_SIZE
) {
2674 while ((pte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)address
)) == 0)
2675 pmap_expand(kernel_pmap
, (vm_map_offset_t
)address
);
2677 cp
->mapwindow
[i
].prv_CADDR
= (caddr_t
) address
;
2678 cp
->mapwindow
[i
].prv_CMAP
= pte
;
2681 vm_map_unlock(kernel_map
);
2684 cp
->pdpt_window_index
= PMAP_PDPT_FIRST_WINDOW
;
2685 cp
->pde_window_index
= PMAP_PDE_FIRST_WINDOW
;
2686 cp
->pte_window_index
= PMAP_PTE_FIRST_WINDOW
;
2692 pmap_cpu_free(struct cpu_pmap
*cp
)
2694 if (cp
!= NULL
&& cp
!= &cpu_pmap_master
) {
2695 kfree((void *) cp
, sizeof(cpu_pmap_t
));
2700 pmap_get_mapwindow(pt_entry_t pentry
)
2705 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
2706 /* fold in cache attributes for this physical page */
2707 pentry
|= pmap_get_cache_attributes(i386_btop(pte_to_pa(pentry
)));
2709 * Note: 0th map reserved for pmap_pte()
2711 for (i
= PMAP_NWINDOWS_FIRSTFREE
; i
< PMAP_NWINDOWS
; i
++) {
2712 mp
= ¤t_cpu_datap()->cpu_pmap
->mapwindow
[i
];
2714 if (*mp
->prv_CMAP
== 0) {
2715 pmap_store_pte(mp
->prv_CMAP
, pentry
);
2717 invlpg((uintptr_t)mp
->prv_CADDR
);
2722 panic("pmap_get_mapwindow: no windows available");
2729 pmap_put_mapwindow(mapwindow_t
*mp
)
2731 pmap_store_pte(mp
->prv_CMAP
, 0);
2735 pmap_switch(pmap_t tpmap
)
2739 s
= splhigh(); /* Make sure interruptions are disabled */
2741 set_dirbase(tpmap
, current_thread());
2748 * disable no-execute capability on
2749 * the specified pmap
2751 void pmap_disable_NX(pmap_t pmap
) {
2753 pmap
->nx_enabled
= 0;
2757 pt_fake_zone_init(int zone_index
)
2759 pt_fake_zone_index
= zone_index
;
2763 pt_fake_zone_info(int *count
,
2764 vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
, vm_size_t
*alloc_size
,
2765 uint64_t *sum_size
, int *collectable
, int *exhaustable
, int *caller_acct
)
2767 *count
= inuse_ptepages_count
;
2768 *cur_size
= PAGE_SIZE
* inuse_ptepages_count
;
2769 *max_size
= PAGE_SIZE
* (inuse_ptepages_count
+ vm_page_inactive_count
+ vm_page_active_count
+ vm_page_free_count
);
2770 *elem_size
= PAGE_SIZE
;
2771 *alloc_size
= PAGE_SIZE
;
2772 *sum_size
= alloc_ptepages_count
* PAGE_SIZE
;
2779 vm_offset_t
pmap_cpu_high_map_vaddr(int cpu
, enum high_cpu_types e
)
2781 enum high_fixed_addresses a
;
2782 a
= e
+ HIGH_CPU_END
* cpu
;
2783 return pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN
+ a
);
2786 vm_offset_t
pmap_high_map_vaddr(enum high_cpu_types e
)
2788 return pmap_cpu_high_map_vaddr(cpu_number(), e
);
2791 vm_offset_t
pmap_high_map(pt_entry_t pte
, enum high_cpu_types e
)
2793 enum high_fixed_addresses a
;
2796 a
= e
+ HIGH_CPU_END
* cpu_number();
2797 vaddr
= (vm_offset_t
)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN
+ a
);
2798 pmap_store_pte(pte_unique_base
+ a
, pte
);
2800 /* TLB flush for this page for this cpu */
2801 invlpg((uintptr_t)vaddr
);
2807 pmap_cpuset_NMIPI(cpu_set cpu_mask
) {
2808 unsigned int cpu
, cpu_bit
;
2811 for (cpu
= 0, cpu_bit
= 1; cpu
< real_ncpus
; cpu
++, cpu_bit
<<= 1) {
2812 if (cpu_mask
& cpu_bit
)
2813 cpu_NMI_interrupt(cpu
);
2815 deadline
= mach_absolute_time() + (((uint64_t)LockTimeOut
) * 3);
2816 while (mach_absolute_time() < deadline
)
2821 * Called with pmap locked, we:
2822 * - scan through per-cpu data to see which other cpus need to flush
2823 * - send an IPI to each non-idle cpu to be flushed
2824 * - wait for all to signal back that they are inactive or we see that
2825 * they are in an interrupt handler or at a safe point
2826 * - flush the local tlb is active for this pmap
2827 * - return ... the caller will unlock the pmap
2830 pmap_flush_tlbs(pmap_t pmap
, vm_map_offset_t startv
, vm_map_offset_t endv
)
2833 unsigned int cpu_bit
;
2834 cpu_set cpus_to_signal
;
2835 unsigned int my_cpu
= cpu_number();
2836 pmap_paddr_t pmap_cr3
= pmap
->pm_cr3
;
2837 boolean_t flush_self
= FALSE
;
2840 assert((processor_avail_count
< 2) ||
2841 (ml_get_interrupts_enabled() && get_preemption_level() != 0));
2844 * Scan other cpus for matching active or task CR3.
2845 * For idle cpus (with no active map) we mark them invalid but
2846 * don't signal -- they'll check as they go busy.
2847 * Note: for the kernel pmap we look for 64-bit shared address maps.
2850 for (cpu
= 0, cpu_bit
= 1; cpu
< real_ncpus
; cpu
++, cpu_bit
<<= 1) {
2851 if (!cpu_datap(cpu
)->cpu_running
)
2853 if ((cpu_datap(cpu
)->cpu_task_cr3
== pmap_cr3
) ||
2854 (CPU_GET_ACTIVE_CR3(cpu
) == pmap_cr3
) ||
2855 (pmap
->pm_shared
) ||
2856 ((pmap
== kernel_pmap
) &&
2857 (!CPU_CR3_IS_ACTIVE(cpu
) ||
2858 cpu_datap(cpu
)->cpu_task_map
== TASK_MAP_64BIT_SHARED
))) {
2859 if (cpu
== my_cpu
) {
2863 cpu_datap(cpu
)->cpu_tlb_invalid
= TRUE
;
2864 __asm__
volatile("mfence");
2866 if (CPU_CR3_IS_ACTIVE(cpu
)) {
2867 cpus_to_signal
|= cpu_bit
;
2868 i386_signal_cpu(cpu
, MP_TLB_FLUSH
, ASYNC
);
2873 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS
) | DBG_FUNC_START
,
2874 (uintptr_t) pmap
, cpus_to_signal
, flush_self
, startv
, 0);
2876 if (cpus_to_signal
) {
2877 cpu_set cpus_to_respond
= cpus_to_signal
;
2879 deadline
= mach_absolute_time() + LockTimeOut
;
2881 * Wait for those other cpus to acknowledge
2883 while (cpus_to_respond
!= 0) {
2886 for (cpu
= 0, cpu_bit
= 1; cpu
< real_ncpus
; cpu
++, cpu_bit
<<= 1) {
2887 if ((cpus_to_respond
& cpu_bit
) != 0) {
2888 if (!cpu_datap(cpu
)->cpu_running
||
2889 cpu_datap(cpu
)->cpu_tlb_invalid
== FALSE
||
2890 !CPU_CR3_IS_ACTIVE(cpu
)) {
2891 cpus_to_respond
&= ~cpu_bit
;
2895 if (cpus_to_respond
== 0)
2899 if (cpus_to_respond
&& (mach_absolute_time() > deadline
)) {
2900 if (machine_timeout_suspended())
2902 pmap_tlb_flush_timeout
= TRUE
;
2903 orig_acks
= NMIPI_acks
;
2904 pmap_cpuset_NMIPI(cpus_to_respond
);
2906 panic("TLB invalidation IPI timeout: "
2907 "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%lx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
2908 cpus_to_respond
, orig_acks
, NMIPI_acks
);
2913 * Flush local tlb if required.
2914 * We need this flush even if the pmap being changed
2915 * is the user map... in case we do a copyin/out
2916 * before returning to user mode.
2921 if ((pmap
== kernel_pmap
) && (flush_self
!= TRUE
)) {
2922 panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap
->pm_cr3
, current_cpu_datap()->cpu_active_cr3
, current_cpu_datap()->cpu_task_map
);
2925 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS
) | DBG_FUNC_END
,
2926 (uintptr_t) pmap
, cpus_to_signal
, startv
, endv
, 0);
2930 process_pmap_updates(void)
2932 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
2936 current_cpu_datap()->cpu_tlb_invalid
= FALSE
;
2937 __asm__
volatile("mfence");
2941 pmap_update_interrupt(void)
2943 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT
) | DBG_FUNC_START
,
2946 process_pmap_updates();
2948 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT
) | DBG_FUNC_END
,
2957 kprintf("pmap 0x%x\n",p
);
2959 kprintf(" pm_cr3 0x%llx\n",p
->pm_cr3
);
2960 kprintf(" pm_pml4 0x%x\n",p
->pm_pml4
);
2961 kprintf(" pm_pdpt 0x%x\n",p
->pm_pdpt
);
2963 kprintf(" pml4[0] 0x%llx\n",*p
->pm_pml4
);
2965 kprintf(" pdpt[%d] 0x%llx\n",i
, p
->pm_pdpt
[i
]);
2968 void pmap_dump_wrap(void)
2970 pmap_dump(current_cpu_datap()->cpu_active_thread
->task
->map
->pmap
);
2974 dump_4GB_pdpt(pmap_t p
)
2977 pdpt_entry_t
*user_pdptp
;
2978 pdpt_entry_t
*kern_pdptp
;
2979 pdpt_entry_t
*pml4p
;
2982 while ((user_pdptp
= pmap64_pdpt(p
, 0x0)) == PDPT_ENTRY_NULL
) {
2984 pmap_expand_pml4(p
, 0x0);
2987 kern_pdptp
= kernel_pmap
->pm_pdpt
;
2988 if (kern_pdptp
== NULL
)
2989 panic("kern_pdptp == NULL");
2990 kprintf("dump_4GB_pdpt(%p)\n"
2991 "kern_pdptp=%p (phys=0x%016llx)\n"
2992 "\t 0x%08x: 0x%016llx\n"
2993 "\t 0x%08x: 0x%016llx\n"
2994 "\t 0x%08x: 0x%016llx\n"
2995 "\t 0x%08x: 0x%016llx\n"
2996 "\t 0x%08x: 0x%016llx\n"
2997 "user_pdptp=%p (phys=0x%016llx)\n"
2998 "\t 0x%08x: 0x%016llx\n"
2999 "\t 0x%08x: 0x%016llx\n"
3000 "\t 0x%08x: 0x%016llx\n"
3001 "\t 0x%08x: 0x%016llx\n"
3002 "\t 0x%08x: 0x%016llx\n",
3003 p
, kern_pdptp
, kvtophys(kern_pdptp
),
3004 kern_pdptp
+0, *(kern_pdptp
+0),
3005 kern_pdptp
+1, *(kern_pdptp
+1),
3006 kern_pdptp
+2, *(kern_pdptp
+2),
3007 kern_pdptp
+3, *(kern_pdptp
+3),
3008 kern_pdptp
+4, *(kern_pdptp
+4),
3009 user_pdptp
, kvtophys(user_pdptp
),
3010 user_pdptp
+0, *(user_pdptp
+0),
3011 user_pdptp
+1, *(user_pdptp
+1),
3012 user_pdptp
+2, *(user_pdptp
+2),
3013 user_pdptp
+3, *(user_pdptp
+3),
3014 user_pdptp
+4, *(user_pdptp
+4));
3015 kprintf("user pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
3016 p
->pm_cr3
, p
->pm_hold
, p
->pm_pml4
);
3017 pml4p
= (pdpt_entry_t
*)p
->pm_hold
;
3019 panic("user pml4p == NULL");
3020 kprintf("\t 0x%08x: 0x%016llx\n"
3021 "\t 0x%08x: 0x%016llx\n",
3023 pml4p
+KERNEL_UBER_PML4_INDEX
, *(pml4p
+KERNEL_UBER_PML4_INDEX
));
3024 kprintf("kern pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
3025 kernel_pmap
->pm_cr3
, kernel_pmap
->pm_hold
, kernel_pmap
->pm_pml4
);
3026 pml4p
= (pdpt_entry_t
*)kernel_pmap
->pm_hold
;
3028 panic("kern pml4p == NULL");
3029 kprintf("\t 0x%08x: 0x%016llx\n"
3030 "\t 0x%08x: 0x%016llx\n",
3032 pml4p
+511, *(pml4p
+511));
3036 void dump_4GB_pdpt_thread(thread_t tp
)
3038 dump_4GB_pdpt(tp
->map
->pmap
);