2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
65 * Machine-dependent structures for the physical map module.
68 #ifndef _PMAP_MACHINE_
69 #define _PMAP_MACHINE_ 1
73 #include <platforms.h>
75 #include <mach/kern_return.h>
76 #include <mach/machine/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_statistics.h>
79 #include <mach/machine/vm_param.h>
80 #include <kern/kern_types.h>
81 #include <kern/thread.h>
82 #include <kern/lock.h>
83 #include <mach/branch_predicates.h>
86 #include <i386/proc_reg.h>
88 #include <i386/pal_routines.h>
91 * Define the generic in terms of the specific
94 #define INTEL_PGBYTES I386_PGBYTES
95 #define INTEL_PGSHIFT I386_PGSHIFT
96 #define intel_btop(x) i386_btop(x)
97 #define intel_ptob(x) i386_ptob(x)
98 #define intel_round_page(x) i386_round_page(x)
99 #define intel_trunc_page(x) i386_trunc_page(x)
100 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
101 #define round_intel_to_vm(x) round_i386_to_vm(x)
102 #define vm_to_intel(x) vm_to_i386(x)
105 * i386/i486/i860 Page Table Entry
108 #endif /* ASSEMBLER */
112 #define PTEMASK 0x1ff
118 #define INITPT_SEG_BASE 0x100000
119 #define INITGDT_SEG_BASE 0x106000
120 #define SLEEP_SEG_BASE 0x107000
123 #define LOW_4GB_MASK ((vm_offset_t)0x00000000FFFFFFFFUL)
126 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
127 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
129 #define INTEL_OFFMASK (I386_PGBYTES - 1)
130 #define INTEL_LOFFMASK (I386_LPGBYTES - 1)
131 #define PG_FRAME 0x000FFFFFFFFFF000ULL
132 #define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
133 #define NPTDPG (PAGE_SIZE/(sizeof (pd_entry_t)))
135 #define NBPTD (NPGPTD << PAGE_SHIFT)
136 #define NPDEPTD (NBPTD / (sizeof (pd_entry_t)))
137 #define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
138 #define NBPDE (1 << PDESHIFT)
139 #define PDEMASK (NBPDE - 1)
141 #define PTE_PER_PAGE 512 /* number of PTE's per page on any level */
143 /* cleanly define parameters for all the page table levels */
144 typedef uint64_t pml4_entry_t
;
145 #define NPML4PG (PAGE_SIZE/(sizeof (pml4_entry_t)))
147 #define PML4PGSHIFT 9
148 #define NBPML4 (1ULL << PML4SHIFT)
149 #define PML4MASK (NBPML4-1)
150 #define PML4_ENTRY_NULL ((pml4_entry_t *) 0)
152 typedef uint64_t pdpt_entry_t
;
153 #define NPDPTPG (PAGE_SIZE/(sizeof (pdpt_entry_t)))
155 #define PDPTPGSHIFT 9
156 #define NBPDPT (1 << PDPTSHIFT)
157 #define PDPTMASK (NBPDPT-1)
158 #define PDPT_ENTRY_NULL ((pdpt_entry_t *) 0)
160 typedef uint64_t pd_entry_t
;
161 #define NPDPG (PAGE_SIZE/(sizeof (pd_entry_t)))
164 #define NBPD (1 << PDSHIFT)
165 #define PDMASK (NBPD-1)
166 #define PD_ENTRY_NULL ((pd_entry_t *) 0)
168 typedef uint64_t pt_entry_t
;
169 #define NPTPG (PAGE_SIZE/(sizeof (pt_entry_t)))
172 #define NBPT (1 << PTSHIFT)
173 #define PTMASK (NBPT-1)
174 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
176 typedef uint64_t pmap_paddr_t
;
179 #define PMAP_ASSERT 1
182 #define pmap_assert(ex) ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
184 #define pmap_assert2(ex, fmt, args...) \
187 kprintf("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE__, __LINE__, __builtin_return_address(0), ##args); \
188 panic("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE__, __LINE__, __builtin_return_address(0), ##args); \
192 #define pmap_assert(ex)
193 #define pmap_assert2(ex, fmt, args...)
198 #define SUPERPAGE_NBASEPAGES 512
200 #define SUPERPAGE_NBASEPAGES 1 /* we don't support superpages on i386 */
204 * Atomic 64-bit store of a page table entry.
207 pmap_store_pte(pt_entry_t
*entryp
, pt_entry_t value
)
211 * Load the new value into %ecx:%ebx
212 * Load the old value into %edx:%eax
213 * Compare-exchange-8bytes at address entryp (loaded in %edi)
214 * If the compare succeeds, the new value will have been stored.
215 * Otherwise, the old value changed and reloaded, so try again.
218 " movl (%0), %%eax \n\t"
219 " movl 4(%0), %%edx \n\t"
221 " cmpxchg8b (%0) \n\t"
225 "b" ((uint32_t)value
),
226 "c" ((uint32_t)(value
>> 32))
227 : "eax", "edx", "memory");
230 * In the 32-bit kernel a compare-and-exchange loop was
231 * required to provide atomicity. For K64, life is easier:
238 * Atomic 64-bit compare and exchange of a page table entry.
240 static inline boolean_t
241 pmap_cmpx_pte(pt_entry_t
*entryp
, pt_entry_t old
, pt_entry_t
new)
247 * Load the old value into %edx:%eax
248 * Load the new value into %ecx:%ebx
249 * Compare-exchange-8bytes at address entryp (loaded in %edi)
250 * If the compare succeeds, the new value is stored, return TRUE.
251 * Otherwise, no swap is made, return FALSE.
254 " lock; cmpxchg8b (%1) \n\t"
260 "d" ((uint32_t)(old
>> 32)),
262 "c" ((uint32_t)(new >> 32))
266 * Load the old value into %rax
267 * Load the new value into another register
268 * Compare-exchange-quad at address entryp
269 * If the compare succeeds, the new value is stored, return TRUE.
270 * Otherwise, no swap is made, return FALSE.
273 " lock; cmpxchgq %2,(%3) \n\t"
285 #define pmap_update_pte(entryp, old, new) \
286 while (!pmap_cmpx_pte((entryp), (old), (new)))
289 /* in 64 bit spaces, the number of each type of page in the page tables */
290 #define NPML4PGS (1ULL * (PAGE_SIZE/(sizeof (pml4_entry_t))))
291 #define NPDPTPGS (NPML4PGS * (PAGE_SIZE/(sizeof (pdpt_entry_t))))
292 #define NPDEPGS (NPDPTPGS * (PAGE_SIZE/(sizeof (pd_entry_t))))
293 #define NPTEPGS (NPDEPGS * (PAGE_SIZE/(sizeof (pt_entry_t))))
297 * The 64-bit kernel is remapped in uber-space which is at the base
298 * the highest 4th-level directory (KERNEL_UBER_PML4_INDEX). That is,
299 * 512GB from the top of virtual space (or zero).
301 #define KERNEL_UBER_PML4_INDEX 511
302 #define KERNEL_UBER_BASE (0ULL - NBPML4)
303 #define KERNEL_UBER_BASE_HI32 ((uint32_t)(KERNEL_UBER_BASE >> 32))
305 #define KERNEL_PML4_INDEX 511
306 #define KERNEL_KEXTS_INDEX 510 /* Home of KEXTs - the basement */
307 #define KERNEL_PHYSMAP_INDEX 509 /* virtual to physical map */
308 #define KERNEL_BASE (0ULL - NBPML4)
309 #define KERNEL_BASEMENT (KERNEL_BASE - NBPML4)
312 #define VM_WIMG_COPYBACK VM_MEM_COHERENT
313 #define VM_WIMG_DEFAULT VM_MEM_COHERENT
315 #define VM_WIMG_IO (VM_MEM_COHERENT | \
316 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
317 #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
318 /* write combining mode, aka store gather */
319 #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
325 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT)))
326 #define VADDR64(pmi, pdi, pti) ((vm_offset_t)(((pmi)<<PLM4SHIFT))((pdi)<<PDESHIFT)|((pti)<<PTESHIFT))
328 #define KVADDR(pmi, pdpi, pdi, pti) \
330 ((uint64_t) -1 << 47) | \
331 ((uint64_t)(pmi) << PML4SHIFT) | \
332 ((uint64_t)(pdpi) << PDPTSHIFT) | \
333 ((uint64_t)(pdi) << PDESHIFT) | \
334 ((uint64_t)(pti) << PTESHIFT))
338 * Size of Kernel address space. This is the number of page table pages
339 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
340 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
343 #define KVA_PAGES 1024
347 #define NKPT 500 /* actual number of kernel page tables */
350 #define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */
355 enum high_cpu_types
{
360 HIGH_CPU_LDT_END
= HIGH_CPU_LDT_BEGIN
+ (LDTSZ
/ 512) - 1,
364 enum high_fixed_addresses
{
365 HIGH_FIXED_TRAMPS
, /* must be first */
366 HIGH_FIXED_TRAMPS_END
,
369 HIGH_FIXED_LDT_BEGIN
,
370 HIGH_FIXED_LDT_END
= HIGH_FIXED_LDT_BEGIN
+ (LDTSZ
/ 512) - 1,
374 HIGH_FIXED_CPUS_BEGIN
,
375 HIGH_FIXED_CPUS_END
= HIGH_FIXED_CPUS_BEGIN
+ (HIGH_CPU_END
* MAX_CPUS
) - 1,
379 /* XXX64 below PTDI values need cleanup */
381 * The *PTDI values control the layout of virtual memory
384 #define KPTDI (0x000)/* start of kernel virtual pde's */
385 #define PTDPTDI (0x7F4) /* ptd entry that points to ptd! */
386 #define APTDPTDI (0x7F8) /* alt ptd entry that points to APTD */
387 #define UMAXPTDI (0x7F8) /* ptd entry for user space end */
388 #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
390 #define KERNBASE VADDR(KPTDI,0)
393 * Convert address offset to directory address
394 * containing the page table pointer - legacy
396 /*#define pmap_pde(m,v) (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT]))*/
398 #define HIGH_MEM_BASE ((uint32_t)( -NBPDE) ) /* shared gdt etc seg addr */ /* XXX64 ?? */
399 #define pmap_index_to_virt(x) (HIGH_MEM_BASE | ((unsigned)(x) << PAGE_SHIFT))
403 * Convert address offset to page descriptor index
405 #define pdptnum(pmap, a) (((vm_offset_t)(a) >> PDPTSHIFT) & PDPTMASK)
406 #define pdenum(pmap, a) (((vm_offset_t)(a) >> PDESHIFT) & PDEMASK)
407 #define PMAP_INVALID_PDPTNUM (~0ULL)
409 #define pdeidx(pmap, a) (((a) >> PDSHIFT) & ((1ULL<<(48 - PDSHIFT)) -1))
410 #define pdptidx(pmap, a) (((a) >> PDPTSHIFT) & ((1ULL<<(48 - PDPTSHIFT)) -1))
411 #define pml4idx(pmap, a) (((a) >> PML4SHIFT) & ((1ULL<<(48 - PML4SHIFT)) -1))
415 * Convert page descriptor index to user virtual address
417 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
420 * Convert address offset to page table index
422 #define ptenum(a) (((vm_offset_t)(a) >> PTESHIFT) & PTEMASK)
425 * Hardware pte bit definitions (to be used directly on the ptes
426 * without using the bit fields).
429 #define INTEL_PTE_VALID 0x00000001
430 #define INTEL_PTE_WRITE 0x00000002
431 #define INTEL_PTE_RW 0x00000002
432 #define INTEL_PTE_USER 0x00000004
433 #define INTEL_PTE_WTHRU 0x00000008
434 #define INTEL_PTE_NCACHE 0x00000010
435 #define INTEL_PTE_REF 0x00000020
436 #define INTEL_PTE_MOD 0x00000040
437 #define INTEL_PTE_PS 0x00000080
438 #define INTEL_PTE_PTA 0x00000080
439 #define INTEL_PTE_GLOBAL 0x00000100
440 #define INTEL_PTE_WIRED 0x00000200
441 #define INTEL_PDPTE_NESTED 0x00000400
442 #define INTEL_PTE_PFN PG_FRAME
444 #define INTEL_PTE_NX (1ULL << 63)
446 #define INTEL_PTE_INVALID 0
447 /* This is conservative, but suffices */
448 #define INTEL_PTE_RSVD ((1ULL << 10) | (1ULL << 11) | (0x1FFULL << 54))
450 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */
451 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */
452 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
454 #define pte_kernel_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_RW))
455 #define pte_kernel_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID))
456 #define pte_user_rw(p) ((pt_entry)t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER|INTEL_PTE_RW))
457 #define pte_user_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER))
459 #define PMAP_DEFAULT_CACHE 0
460 #define PMAP_INHIBIT_CACHE 1
461 #define PMAP_GUARDED_CACHE 2
462 #define PMAP_ACTIVATE_CACHE 4
463 #define PMAP_NO_GUARD_CACHE 8
468 #include <sys/queue.h>
471 * Address of current and alternate address space page table maps
476 extern pt_entry_t PTmap
[], APTmap
[], Upte
;
477 extern pd_entry_t PTD
[], APTD
[], PTDpde
[], APTDpde
[], Upde
;
478 extern pmap_paddr_t lo_kernel_cr3
;
479 extern pdpt_entry_t
*IdlePDPT64
;
481 extern pt_entry_t
*PTmap
;
483 extern boolean_t no_shared_cr3
;
484 extern addr64_t kernel64_cr3
;
485 extern pd_entry_t
*IdlePTD
; /* physical addr of "Idle" state PTD */
486 extern pdpt_entry_t IdlePDPT
[];
487 extern pml4_entry_t IdlePML4
[];
489 extern uint64_t pmap_pv_hashlist_walks
;
490 extern uint64_t pmap_pv_hashlist_cnts
;
491 extern uint32_t pmap_pv_hashlist_max
;
492 extern uint32_t pmap_kernel_text_ps
;
497 * virtual address to page table entry and
498 * to physical address. Likewise for alternate address space.
499 * Note: these work recursively, thus vtopte of a pte will give
500 * the corresponding pde that in turn maps it.
503 #define vtopte(va) (PTmap + i386_btop((vm_offset_t)va))
507 #define ID_MAP_VTOP(x) ((void *)(((uint64_t)(x)) & LOW_4GB_MASK))
509 #define PHYSMAP_BASE KVADDR(KERNEL_PHYSMAP_INDEX,0,0,0)
510 #define PHYSMAP_PTOV(x) ((void *)(((uint64_t)(x)) + PHYSMAP_BASE))
513 typedef volatile long cpu_set
; /* set of CPUs - must be <= 32 */
514 /* changed by other processors */
517 TAILQ_HEAD(,pv_entry
) pv_list
;
520 #include <vm/vm_page.h>
523 * For each vm_page_t, there is a list of all currently
524 * valid virtual mappings of that page. An entry is
525 * a pv_entry_t; the list is the pv_table.
529 decl_simple_lock_data(,lock
) /* lock on map */
530 pmap_paddr_t pm_cr3
; /* physical addr */
532 pd_entry_t
*dirbase
; /* page directory pointer */
534 pmap_paddr_t pdirbase
; /* phys. address of dirbase */
535 vm_offset_t pm_hold
; /* true pdpt zalloc addr */
537 vm_object_t pm_obj
; /* object to hold pde's */
538 task_map_t pm_task_map
;
539 pdpt_entry_t
*pm_pdpt
; /* KVA of 3rd level page */
540 pml4_entry_t
*pm_pml4
; /* VKA of top level */
541 vm_object_t pm_obj_pdpt
; /* holds pdpt pages */
542 vm_object_t pm_obj_pml4
; /* holds pml4 pages */
543 #define PMAP_PCID_MAX_CPUS (48) /* Must be a multiple of 8 */
544 pcid_t pmap_pcid_cpus
[PMAP_PCID_MAX_CPUS
];
545 volatile uint8_t pmap_pcid_coherency_vector
[PMAP_PCID_MAX_CPUS
];
546 struct pmap_statistics stats
; /* map statistics */
547 int ref_count
; /* reference count */
552 #if NCOPY_WINDOWS > 0
553 #define PMAP_PDPT_FIRST_WINDOW 0
554 #define PMAP_PDPT_NWINDOWS 4
555 #define PMAP_PDE_FIRST_WINDOW (PMAP_PDPT_NWINDOWS)
556 #define PMAP_PDE_NWINDOWS 4
557 #define PMAP_PTE_FIRST_WINDOW (PMAP_PDE_FIRST_WINDOW + PMAP_PDE_NWINDOWS)
558 #define PMAP_PTE_NWINDOWS 4
560 #define PMAP_NWINDOWS_FIRSTFREE (PMAP_PTE_FIRST_WINDOW + PMAP_PTE_NWINDOWS)
561 #define PMAP_WINDOW_SIZE 8
562 #define PMAP_NWINDOWS (PMAP_NWINDOWS_FIRSTFREE + PMAP_WINDOW_SIZE)
565 pt_entry_t
*prv_CMAP
;
569 typedef struct cpu_pmap
{
570 int pdpt_window_index
;
571 int pde_window_index
;
572 int pte_window_index
;
573 mapwindow_t mapwindow
[PMAP_NWINDOWS
];
577 extern mapwindow_t
*pmap_get_mapwindow(pt_entry_t pentry
);
578 extern void pmap_put_mapwindow(mapwindow_t
*map
);
581 typedef struct pmap_memory_regions
{
586 } pmap_memory_region_t
;
588 extern unsigned pmap_memory_region_count
;
589 extern unsigned pmap_memory_region_current
;
591 #define PMAP_MEMORY_REGIONS_SIZE 128
593 extern pmap_memory_region_t pmap_memory_regions
[];
594 #include <i386/pmap_pcid.h>
597 set_dirbase(pmap_t tpmap
, __unused thread_t thread
) {
598 int ccpu
= cpu_number();
599 cpu_datap(ccpu
)->cpu_task_cr3
= tpmap
->pm_cr3
;
600 cpu_datap(ccpu
)->cpu_task_map
= tpmap
->pm_task_map
;
603 * Switch cr3 if necessary
604 * - unless running with no_shared_cr3 debugging mode
605 * and we're not on the kernel's cr3 (after pre-empted copyio)
607 if (__probable(!no_shared_cr3
)) {
608 if (get_cr3_base() != tpmap
->pm_cr3
) {
609 if (pmap_pcid_ncpus
) {
610 pmap_pcid_activate(tpmap
, ccpu
);
613 set_cr3_raw(tpmap
->pm_cr3
);
616 if (get_cr3_base() != cpu_datap(ccpu
)->cpu_kernel_cr3
)
617 set_cr3_raw(cpu_datap(ccpu
)->cpu_kernel_cr3
);
623 * External declarations for PMAP_ACTIVATE.
626 extern void process_pmap_updates(void);
627 extern void pmap_update_interrupt(void);
630 * Machine dependent routines that are used only for i386/i486/i860.
633 extern addr64_t (kvtophys
)(
636 extern void pmap_expand(
638 vm_map_offset_t addr
);
639 #if !defined(__x86_64__)
640 extern pt_entry_t
*pmap_pte(
642 vm_map_offset_t addr
);
644 extern pd_entry_t
*pmap_pde(
646 vm_map_offset_t addr
);
648 extern pd_entry_t
*pmap64_pde(
650 vm_map_offset_t addr
);
652 extern pdpt_entry_t
*pmap64_pdpt(
654 vm_map_offset_t addr
);
656 extern vm_offset_t
pmap_map(
658 vm_map_offset_t start
,
663 extern vm_offset_t
pmap_map_bd(
665 vm_map_offset_t start
,
670 extern void pmap_bootstrap(
671 vm_offset_t load_start
,
674 extern boolean_t
pmap_valid_page(
677 extern int pmap_list_resident_pages(
681 extern void x86_filter_TLB_coherency_interrupts(boolean_t
);
683 extern void pmap_commpage32_init(
687 extern void pmap_commpage64_init(
689 vm_map_offset_t user
,
694 * Get cache attributes (as pagetable bits) for the specified phys page
696 extern unsigned pmap_get_cache_attributes(ppnum_t
);
697 #if NCOPY_WINDOWS > 0
698 extern struct cpu_pmap
*pmap_cpu_alloc(
699 boolean_t is_boot_cpu
);
700 extern void pmap_cpu_free(
701 struct cpu_pmap
*cp
);
704 extern void pmap_map_block(
713 extern void invalidate_icache(vm_offset_t addr
, unsigned cnt
, int phys
);
714 extern void flush_dcache(vm_offset_t addr
, unsigned count
, int phys
);
715 extern ppnum_t
pmap_find_phys(pmap_t map
, addr64_t va
);
717 extern void pmap_cpu_init(void);
718 extern void pmap_disable_NX(pmap_t pmap
);
720 extern void pmap_set_4GB_pagezero(pmap_t pmap
);
721 extern void pmap_clear_4GB_pagezero(pmap_t pmap
);
722 extern void pmap_load_kernel_cr3(void);
723 extern vm_offset_t
pmap_cpu_high_map_vaddr(int, enum high_cpu_types
);
724 extern vm_offset_t
pmap_high_map_vaddr(enum high_cpu_types
);
725 extern vm_offset_t
pmap_high_map(pt_entry_t
, enum high_cpu_types
);
726 extern vm_offset_t
pmap_cpu_high_shared_remap(int, enum high_cpu_types
, vm_offset_t
, int);
727 extern vm_offset_t
pmap_high_shared_remap(enum high_fixed_addresses
, vm_offset_t
, int);
730 extern void pt_fake_zone_init(int);
731 extern void pt_fake_zone_info(int *, vm_size_t
*, vm_size_t
*, vm_size_t
*, vm_size_t
*,
732 uint64_t *, int *, int *, int *);
733 extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt
, ...)__printflike(1,2));
740 #include <kern/spl.h>
743 #define PMAP_ACTIVATE_MAP(map, thread) { \
744 register pmap_t tpmap; \
746 tpmap = vm_map_pmap(map); \
747 set_dirbase(tpmap, thread); \
751 #define PMAP_DEACTIVATE_MAP(map, thread) \
752 if (vm_map_pmap(map)->pm_task_map == TASK_MAP_64BIT_SHARED) \
753 pmap_load_kernel_cr3();
754 #elif defined(__x86_64__)
755 #define PMAP_DEACTIVATE_MAP(map, thread) \
756 pmap_assert(pmap_pcid_ncpus ? (pcid_for_pmap_cpu_tuple(map->pmap, cpu_number()) == (get_cr3_raw() & 0xFFF)) : TRUE);
758 #define PMAP_DEACTIVATE_MAP(map, thread)
761 #if defined(__i386__)
763 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
772 if ((old_th->map != new_th->map) || (new_th->task != old_th->task)) { \
773 PMAP_DEACTIVATE_MAP(old_th->map, old_th); \
774 PMAP_ACTIVATE_MAP(new_th->map, new_th); \
776 kpdp = current_cpu_datap()->cpu_copywindow_pdp; \
777 for (i = 0; i < NCOPY_WINDOWS; i++) { \
778 if (new_th->machine.copy_window[i].user_base != (user_addr_t)-1) { \
779 updp = pmap_pde(new_th->map->pmap, \
780 new_th->machine.copy_window[i].user_base);\
781 pmap_store_pte(kpdp, updp ? *updp : 0); \
786 if (new_th->machine.copyio_state == WINDOWS_OPENED) \
789 new_th->machine.copyio_state = WINDOWS_DIRTY; \
790 if (new_th->machine.physwindow_pte) { \
791 pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), \
792 new_th->machine.physwindow_pte); \
793 if (need_flush == 0) \
794 invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base);\
800 #else /* __x86_64__ */
801 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
803 pmap_assert(ml_get_interrupts_enabled() == FALSE); \
804 if (old_th->map != new_th->map) { \
805 PMAP_DEACTIVATE_MAP(old_th->map, old_th); \
806 PMAP_ACTIVATE_MAP(new_th->map, new_th); \
809 #endif /* __i386__ */
811 #if NCOPY_WINDOWS > 0
812 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
816 PMAP_DEACTIVATE_MAP(th->map, th); \
818 PMAP_ACTIVATE_MAP(th->map, th); \
820 inval_copy_windows(th); \
823 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
827 PMAP_DEACTIVATE_MAP(th->map, th); \
829 PMAP_ACTIVATE_MAP(th->map, th); \
835 * Marking the current cpu's cr3 inactive is achieved by setting its lsb.
836 * Marking the current cpu's cr3 active once more involves clearng this bit.
837 * Note that valid page tables are page-aligned and so the bottom 12 bits
838 * are normally zero, modulo PCID.
839 * We can only mark the current cpu active/inactive but we can test any cpu.
841 #define CPU_CR3_MARK_INACTIVE() \
842 current_cpu_datap()->cpu_active_cr3 |= 1
844 #define CPU_CR3_MARK_ACTIVE() \
845 current_cpu_datap()->cpu_active_cr3 &= ~1
847 #define CPU_CR3_IS_ACTIVE(cpu) \
848 ((cpu_datap(cpu)->cpu_active_cr3 & 1) == 0)
850 #define CPU_GET_ACTIVE_CR3(cpu) \
851 (cpu_datap(cpu)->cpu_active_cr3 & ~1)
853 #define CPU_GET_TASK_CR3(cpu) \
854 (cpu_datap(cpu)->cpu_task_cr3)
857 * Mark this cpu idle, and remove it from the active set,
858 * since it is not actively using any pmap. Signal_cpus
859 * will notice that it is idle, and avoid signaling it,
860 * but will queue the update request for when the cpu
863 #if defined(__x86_64__)
864 #define MARK_CPU_IDLE(my_cpu) { \
865 assert(ml_get_interrupts_enabled() == FALSE); \
866 CPU_CR3_MARK_INACTIVE(); \
867 __asm__ volatile("mfence"); \
869 #else /* __i386__ native */
870 #define MARK_CPU_IDLE(my_cpu) { \
871 assert(ml_get_interrupts_enabled() == FALSE); \
873 * Mark this cpu idle, and remove it from the active set, \
874 * since it is not actively using any pmap. Signal_cpus \
875 * will notice that it is idle, and avoid signaling it, \
876 * but will queue the update request for when the cpu \
879 if (!cpu_mode_is64bit() || no_shared_cr3) \
880 process_pmap_updates(); \
882 pmap_load_kernel_cr3(); \
883 CPU_CR3_MARK_INACTIVE(); \
884 __asm__ volatile("mfence"); \
886 #endif /* __i386__ */
888 #define MARK_CPU_ACTIVE(my_cpu) { \
889 assert(ml_get_interrupts_enabled() == FALSE); \
891 * If a kernel_pmap update was requested while this cpu \
892 * was idle, process it as if we got the interrupt. \
893 * Before doing so, remove this cpu from the idle set. \
894 * Since we do not grab any pmap locks while we flush \
895 * our TLB, another cpu may start an update operation \
896 * before we finish. Removing this cpu from the idle \
897 * set assures that we will receive another update \
898 * interrupt if this happens. \
900 CPU_CR3_MARK_ACTIVE(); \
901 __asm__ volatile("mfence"); \
903 if (current_cpu_datap()->cpu_tlb_invalid) \
904 process_pmap_updates(); \
907 #define PMAP_CONTEXT(pmap, thread)
909 #define pmap_kernel_va(VA) \
910 ((((vm_offset_t) (VA)) >= vm_min_kernel_address) && \
911 (((vm_offset_t) (VA)) <= vm_max_kernel_address))
914 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
915 #define pmap_resident_max(pmap) ((pmap)->stats.resident_max)
916 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
917 #define pmap_attribute(pmap,addr,size,attr,value) \
918 (KERN_INVALID_ADDRESS)
919 #define pmap_attribute_cache_sync(addr,size,attr,value) \
920 (KERN_INVALID_ADDRESS)
922 #define MACHINE_PMAP_IS_EMPTY 1
923 extern boolean_t
pmap_is_empty(pmap_t pmap
,
924 vm_map_offset_t start
,
925 vm_map_offset_t end
);
927 #define MACHINE_BOOTSTRAPPTD 1 /* Static bootstrap page-tables */
930 #endif /* ASSEMBLER */
933 #endif /* _PMAP_MACHINE_ */
936 #endif /* KERNEL_PRIVATE */