]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmap.h
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.h
CommitLineData
5d5c5d0d 1
1c79356b 2/*
5d5c5d0d 3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 4 *
8ad349bb 5 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 6 *
8ad349bb
A
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the
11 * License may not be used to create, or enable the creation or
12 * redistribution of, unlawful or unlicensed copies of an Apple operating
13 * system, or to circumvent, violate, or enable the circumvention or
14 * violation of, any terms of an Apple operating system software license
15 * agreement.
16 *
17 * Please obtain a copy of the License at
18 * http://www.opensource.apple.com/apsl/ and read it before using this
19 * file.
20 *
21 * The Original Code and all software distributed under the License are
22 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
23 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
24 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
26 * Please see the License for the specific language governing rights and
27 * limitations under the License.
28 *
29 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
30 */
31/*
32 * @OSF_COPYRIGHT@
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
37 * All Rights Reserved.
38 *
39 * Permission to use, copy, modify and distribute this software and its
40 * documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation.
44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
47 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 *
49 * Carnegie Mellon requests users of this software to return to
50 *
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 *
56 * any improvements or extensions that they make and grant Carnegie Mellon
57 * the rights to redistribute these changes.
58 */
59/*
60 */
61
62/*
63 * File: pmap.h
64 *
65 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
66 * Date: 1985
67 *
68 * Machine-dependent structures for the physical map module.
69 */
5d5c5d0d 70#ifdef KERNEL_PRIVATE
1c79356b
A
71#ifndef _PMAP_MACHINE_
72#define _PMAP_MACHINE_ 1
73
74#ifndef ASSEMBLER
75
76#include <platforms.h>
1c79356b
A
77
78#include <mach/kern_return.h>
79#include <mach/machine/vm_types.h>
80#include <mach/vm_prot.h>
81#include <mach/vm_statistics.h>
82#include <mach/machine/vm_param.h>
83#include <kern/kern_types.h>
91447636 84#include <kern/thread.h>
1c79356b 85#include <kern/lock.h>
5d5c5d0d
A
86
87#include <i386/mp.h>
88#include <i386/proc_reg.h>
1c79356b
A
89
90/*
91 * Define the generic in terms of the specific
92 */
93
94#define INTEL_PGBYTES I386_PGBYTES
95#define INTEL_PGSHIFT I386_PGSHIFT
96#define intel_btop(x) i386_btop(x)
97#define intel_ptob(x) i386_ptob(x)
98#define intel_round_page(x) i386_round_page(x)
99#define intel_trunc_page(x) i386_trunc_page(x)
100#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
101#define round_intel_to_vm(x) round_i386_to_vm(x)
102#define vm_to_intel(x) vm_to_i386(x)
103
104/*
105 * i386/i486/i860 Page Table Entry
106 */
107
1c79356b
A
108#endif /* ASSEMBLER */
109
91447636
A
110#define NPGPTD 4
111#define PDESHIFT 21
112#define PTEMASK 0x1ff
113#define PTEINDX 3
5d5c5d0d 114
91447636
A
115#define PTESHIFT 12
116
117#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
118#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
119
120#define INTEL_OFFMASK (I386_PGBYTES - 1)
5d5c5d0d 121#define PG_FRAME 0x000FFFFFFFFFF000ULL
91447636 122#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
5d5c5d0d 123#define NPTDPG (PAGE_SIZE/(sizeof (pd_entry_t)))
1c79356b 124
91447636
A
125#define NBPTD (NPGPTD << PAGE_SHIFT)
126#define NPDEPTD (NBPTD / (sizeof (pd_entry_t)))
127#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
128#define NBPDE (1 << PDESHIFT)
129#define PDEMASK (NBPDE - 1)
9bccf70c 130
5d5c5d0d
A
131 /* cleanly define parameters for all the page table levels */
132typedef uint64_t pml4_entry_t;
133#define NPML4PG (PAGE_SIZE/(sizeof (pml4_entry_t)))
134#define PML4SHIFT 39
135#define PML4PGSHIFT 9
136#define NBPML4 (1ULL << PML4SHIFT)
137#define PML4MASK (NBPML4-1)
138#define PML4_ENTRY_NULL ((pml4_entry_t *) 0)
139
140typedef uint64_t pdpt_entry_t;
141#define NPDPTPG (PAGE_SIZE/(sizeof (pdpt_entry_t)))
142#define PDPTSHIFT 30
143#define PDPTPGSHIFT 9
144#define NBPDPT (1 << PDPTSHIFT)
145#define PDPTMASK (NBPDPT-1)
146#define PDPT_ENTRY_NULL ((pdpt_entry_t *) 0)
147
148typedef uint64_t pd_entry_t;
149#define NPDPG (PAGE_SIZE/(sizeof (pd_entry_t)))
150#define PDSHIFT 21
151#define PDPGSHIFT 9
152#define NBPD (1 << PDSHIFT)
153#define PDMASK (NBPD-1)
154#define PD_ENTRY_NULL ((pd_entry_t *) 0)
155
156typedef uint64_t pt_entry_t;
157#define NPTPG (PAGE_SIZE/(sizeof (pt_entry_t)))
158#define PTSHIFT 12
159#define PTPGSHIFT 9
160#define NBPT (1 << PTSHIFT)
161#define PTMASK (NBPT-1)
162#define PT_ENTRY_NULL ((pt_entry_t *) 0)
163
164typedef uint64_t pmap_paddr_t;
165
166/*
167 * Atomic 64-bit store of a page table entry.
168 */
169static inline void
170pmap_store_pte(pt_entry_t *entryp, pt_entry_t value)
171{
172 /*
173 * Load the new value into %ecx:%ebx
174 * Load the old value into %edx:%eax
175 * Compare-exchange-8bytes at address entryp (loaded in %edi)
176 * If the compare succeeds, the new value will have been stored.
177 * Otherwise, the old value changed and reloaded, so try again.
178 */
179 asm volatile(
180 " movl (%0), %%eax \n\t"
181 " movl 4(%0), %%edx \n\t"
182 "1: \n\t"
183 " cmpxchg8b (%0) \n\t"
184 " jnz 1b"
185 :
186 : "D" (entryp),
187 "b" ((uint32_t)value),
188 "c" ((uint32_t)(value >> 32))
189 : "eax", "edx", "memory");
190}
191
192/* in 64 bit spaces, the number of each type of page in the page tables */
193#define NPML4PGS (1ULL * (PAGE_SIZE/(sizeof (pml4_entry_t))))
194#define NPDPTPGS (NPML4PGS * (PAGE_SIZE/(sizeof (pdpt_entry_t))))
195#define NPDEPGS (NPDPTPGS * (PAGE_SIZE/(sizeof (pd_entry_t))))
196#define NPTEPGS (NPDEPGS * (PAGE_SIZE/(sizeof (pt_entry_t))))
197
198/*
199 * The 64-bit kernel is remapped in uber-space which is at the base
200 * the highest 4th-level directory (KERNEL_UBER_PML4_INDEX). That is,
201 * 512GB from the top of virtual space (or zero).
202 */
203#define KERNEL_UBER_PML4_INDEX 511
204#define KERNEL_UBER_BASE (0ULL - NBPML4)
205#define KERNEL_UBER_BASE_HI32 ((uint32_t)(KERNEL_UBER_BASE >> 32))
206
55e303ae 207#define VM_WIMG_COPYBACK VM_MEM_COHERENT
9bccf70c 208#define VM_WIMG_DEFAULT VM_MEM_COHERENT
55e303ae
A
209/* ?? intel ?? */
210#define VM_WIMG_IO (VM_MEM_COHERENT | \
211 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
212#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
213/* write combining mode, aka store gather */
214#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
9bccf70c 215
5d5c5d0d
A
216/*
217 * Pte related macros
218 */
219#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT)))
220#define VADDR64(pmi, pdi, pti) ((vm_offset_t)(((pmi)<<PLM4SHIFT))((pdi)<<PDESHIFT)|((pti)<<PTESHIFT))
221
1c79356b 222/*
91447636
A
223 * Size of Kernel address space. This is the number of page table pages
224 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
225 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
1c79356b 226 */
91447636 227#ifndef KVA_PAGES
5d5c5d0d 228#define KVA_PAGES 1024
91447636 229#endif
1c79356b 230
91447636 231#ifndef NKPT
91447636 232#define NKPT 500 /* actual number of kernel page tables */
91447636
A
233#endif
234#ifndef NKPDE
235#define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */
236#endif
237
5d5c5d0d
A
238
239enum high_cpu_types {
240 HIGH_CPU_ISS0,
241 HIGH_CPU_ISS1,
242 HIGH_CPU_DESC,
243 HIGH_CPU_LDT_BEGIN,
244 HIGH_CPU_LDT_END = HIGH_CPU_LDT_BEGIN + (LDTSZ / 512) - 1,
245 HIGH_CPU_END
246};
247
248enum high_fixed_addresses {
249 HIGH_FIXED_TRAMPS, /* must be first */
250 HIGH_FIXED_TRAMPS_END,
251 HIGH_FIXED_GDT,
252 HIGH_FIXED_IDT,
253 HIGH_FIXED_LDT_BEGIN,
254 HIGH_FIXED_LDT_END = HIGH_FIXED_LDT_BEGIN + (LDTSZ / 512) - 1,
255 HIGH_FIXED_KTSS,
256 HIGH_FIXED_DFTSS,
257 HIGH_FIXED_DBTSS,
258 HIGH_FIXED_CPUS_BEGIN,
259 HIGH_FIXED_CPUS_END = HIGH_FIXED_CPUS_BEGIN + (HIGH_CPU_END * MAX_CPUS) - 1,
260};
261
262
263/* XXX64 below PTDI values need cleanup */
91447636
A
264/*
265 * The *PTDI values control the layout of virtual memory
266 *
267 */
5d5c5d0d 268#define KPTDI (0x000)/* start of kernel virtual pde's */
91447636
A
269#define PTDPTDI (0x7F4) /* ptd entry that points to ptd! */
270#define APTDPTDI (0x7F8) /* alt ptd entry that points to APTD */
5d5c5d0d 271#define UMAXPTDI (0x7F8) /* ptd entry for user space end */
8ad349bb 272#define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
91447636
A
273
274#define KERNBASE VADDR(KPTDI,0)
1c79356b 275
5d5c5d0d
A
276/*
277 * Convert address offset to directory address
278 * containing the page table pointer - legacy
279 */
280/*#define pmap_pde(m,v) (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT]))*/
281
282#define HIGH_MEM_BASE ((uint32_t)( -NBPDE) ) /* shared gdt etc seg addr */ /* XXX64 ?? */
283#define pmap_index_to_virt(x) (HIGH_MEM_BASE | ((unsigned)(x) << PAGE_SHIFT))
284
1c79356b
A
285/*
286 * Convert address offset to page descriptor index
287 */
5d5c5d0d 288#define pdenum(pmap, a) (((vm_offset_t)(a) >> PDESHIFT) & PDEMASK)
91447636 289
5d5c5d0d
A
290#define pdeidx(pmap, a) (((a) >> PDSHIFT) & ((1ULL<<(48 - PDSHIFT)) -1))
291#define pdptidx(pmap, a) (((a) >> PDPTSHIFT) & ((1ULL<<(48 - PDPTSHIFT)) -1))
292#define pml4idx(pmap, a) (((a) >> PML4SHIFT) & ((1ULL<<(48 - PML4SHIFT)) -1))
1c79356b
A
293
294/*
295 * Convert page descriptor index to user virtual address
296 */
297#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
298
299/*
300 * Convert address offset to page table index
301 */
5d5c5d0d 302#define ptenum(a) (((vm_offset_t)(a) >> PTESHIFT) & PTEMASK)
1c79356b 303
1c79356b
A
304/*
305 * Hardware pte bit definitions (to be used directly on the ptes
306 * without using the bit fields).
307 */
308
309#define INTEL_PTE_VALID 0x00000001
310#define INTEL_PTE_WRITE 0x00000002
91447636 311#define INTEL_PTE_RW 0x00000002
1c79356b
A
312#define INTEL_PTE_USER 0x00000004
313#define INTEL_PTE_WTHRU 0x00000008
314#define INTEL_PTE_NCACHE 0x00000010
315#define INTEL_PTE_REF 0x00000020
316#define INTEL_PTE_MOD 0x00000040
91447636
A
317#define INTEL_PTE_PS 0x00000080
318#define INTEL_PTE_GLOBAL 0x00000100
1c79356b 319#define INTEL_PTE_WIRED 0x00000200
5d5c5d0d 320#define INTEL_PTE_PFN PG_FRAME
55e303ae 321#define INTEL_PTE_PTA 0x00000080
1c79356b 322
5d5c5d0d
A
323#define INTEL_PTE_NX (1ULL << 63)
324
325#define INTEL_PTE_INVALID 0
326
91447636
A
327#define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */
328#define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */
1c79356b
A
329#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
330
5d5c5d0d
A
331#define pte_kernel_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_RW))
332#define pte_kernel_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID))
333#define pte_user_rw(p) ((pt_entry)t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER|INTEL_PTE_RW))
334#define pte_user_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER))
335
9bccf70c
A
336#define PMAP_DEFAULT_CACHE 0
337#define PMAP_INHIBIT_CACHE 1
338#define PMAP_GUARDED_CACHE 2
339#define PMAP_ACTIVATE_CACHE 4
340#define PMAP_NO_GUARD_CACHE 8
341
342
91447636
A
343#ifndef ASSEMBLER
344
345#include <sys/queue.h>
346
1c79356b 347/*
91447636
A
348 * Address of current and alternate address space page table maps
349 * and directories.
1c79356b 350 */
1c79356b 351
91447636
A
352extern pt_entry_t PTmap[], APTmap[], Upte;
353extern pd_entry_t PTD[], APTD[], PTDpde[], APTDpde[], Upde;
354
355extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
91447636 356extern pdpt_entry_t *IdlePDPT;
5d5c5d0d
A
357
358extern pmap_paddr_t lo_kernel_cr3;
359
360extern pml4_entry_t *IdlePML4;
361extern pdpt_entry_t *IdlePDPT64;
362extern addr64_t kernel64_cr3;
363extern boolean_t no_shared_cr3;
91447636
A
364
365/*
366 * virtual address to page table entry and
367 * to physical address. Likewise for alternate address space.
368 * Note: these work recursively, thus vtopte of a pte will give
369 * the corresponding pde that in turn maps it.
370 */
5d5c5d0d 371#define vtopte(va) (PTmap + i386_btop((vm_offset_t)va))
91447636
A
372
373
1c79356b
A
374typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
375 /* changed by other processors */
91447636
A
376struct md_page {
377 int pv_list_count;
378 TAILQ_HEAD(,pv_entry) pv_list;
379};
380
381#include <vm/vm_page.h>
382
383/*
384 * For each vm_page_t, there is a list of all currently
385 * valid virtual mappings of that page. An entry is
386 * a pv_entry_t; the list is the pv_table.
387 */
1c79356b
A
388
389struct pmap {
5d5c5d0d
A
390 pd_entry_t *dirbase; /* page directory pointer */
391 pmap_paddr_t pdirbase; /* phys. address of dirbase */
392 vm_object_t pm_obj; /* object to hold pde's */
1c79356b 393 int ref_count; /* reference count */
5d5c5d0d
A
394 int nx_enabled;
395 boolean_t pm_64bit;
396 boolean_t pm_kernel_cr3;
1c79356b
A
397 decl_simple_lock_data(,lock) /* lock on map */
398 struct pmap_statistics stats; /* map statistics */
91447636 399 vm_offset_t pm_hold; /* true pdpt zalloc addr */
5d5c5d0d
A
400 pmap_paddr_t pm_cr3; /* physical addr */
401 pdpt_entry_t *pm_pdpt; /* KVA of 3rd level page */
402 pml4_entry_t *pm_pml4; /* VKA of top level */
403 vm_object_t pm_obj_pdpt; /* holds pdpt pages */
404 vm_object_t pm_obj_pml4; /* holds pml4 pages */
405 vm_object_t pm_obj_top; /* holds single top level page */
1c79356b
A
406};
407
5d5c5d0d
A
408
409#define PMAP_PDPT_FIRST_WINDOW 0
410#define PMAP_PDPT_NWINDOWS 4
411#define PMAP_PDE_FIRST_WINDOW (PMAP_PDPT_NWINDOWS)
412#define PMAP_PDE_NWINDOWS 4
413#define PMAP_PTE_FIRST_WINDOW (PMAP_PDE_FIRST_WINDOW + PMAP_PDE_NWINDOWS)
414#define PMAP_PTE_NWINDOWS 4
415
416#define PMAP_NWINDOWS_FIRSTFREE (PMAP_PTE_FIRST_WINDOW + PMAP_PTE_NWINDOWS)
417#define PMAP_WINDOW_SIZE 8
418#define PMAP_NWINDOWS (PMAP_NWINDOWS_FIRSTFREE + PMAP_WINDOW_SIZE)
419
91447636
A
420typedef struct {
421 pt_entry_t *prv_CMAP;
422 caddr_t prv_CADDR;
423} mapwindow_t;
424
425typedef struct cpu_pmap {
5d5c5d0d
A
426 int pdpt_window_index;
427 int pde_window_index;
428 int pte_window_index;
91447636 429 mapwindow_t mapwindow[PMAP_NWINDOWS];
91447636
A
430} cpu_pmap_t;
431
5d5c5d0d
A
432
433extern mapwindow_t *pmap_get_mapwindow(pt_entry_t pentry);
91447636
A
434
435typedef struct pmap_memory_regions {
436 ppnum_t base;
437 ppnum_t end;
438 ppnum_t alloc;
439 uint32_t type;
440} pmap_memory_region_t;
441
442unsigned pmap_memory_region_count;
443unsigned pmap_memory_region_current;
444
5d5c5d0d 445#define PMAP_MEMORY_REGIONS_SIZE 128
91447636
A
446
447extern pmap_memory_region_t pmap_memory_regions[];
448
5d5c5d0d
A
449static inline void set_dirbase(pmap_t tpmap, __unused int tcpu) {
450 current_cpu_datap()->cpu_task_cr3 = (pmap_paddr_t)((tpmap)->pm_cr3);
451 current_cpu_datap()->cpu_task_map = tpmap->pm_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;
1c79356b
A
452}
453
1c79356b
A
454/*
455 * External declarations for PMAP_ACTIVATE.
456 */
457
5d5c5d0d 458extern void process_pmap_updates(void);
1c79356b 459extern void pmap_update_interrupt(void);
1c79356b
A
460
461/*
462 * Machine dependent routines that are used only for i386/i486/i860.
463 */
1c79356b 464
5d5c5d0d 465extern addr64_t (kvtophys)(
1c79356b
A
466 vm_offset_t addr);
467
468extern pt_entry_t *pmap_pte(
469 struct pmap *pmap,
5d5c5d0d
A
470 vm_map_offset_t addr);
471
472extern pd_entry_t *pmap_pde(
473 struct pmap *pmap,
474 vm_map_offset_t addr);
475
476extern pd_entry_t *pmap64_pde(
477 struct pmap *pmap,
478 vm_map_offset_t addr);
479
480extern pdpt_entry_t *pmap64_pdpt(
481 struct pmap *pmap,
482 vm_map_offset_t addr);
1c79356b
A
483
484extern vm_offset_t pmap_map(
485 vm_offset_t virt,
5d5c5d0d
A
486 vm_map_offset_t start,
487 vm_map_offset_t end,
488 vm_prot_t prot,
489 unsigned int flags);
1c79356b
A
490
491extern vm_offset_t pmap_map_bd(
492 vm_offset_t virt,
5d5c5d0d
A
493 vm_map_offset_t start,
494 vm_map_offset_t end,
495 vm_prot_t prot,
496 unsigned int flags);
1c79356b
A
497
498extern void pmap_bootstrap(
5d5c5d0d
A
499 vm_offset_t load_start,
500 boolean_t IA32e);
1c79356b
A
501
502extern boolean_t pmap_valid_page(
91447636 503 ppnum_t pn);
1c79356b
A
504
505extern int pmap_list_resident_pages(
506 struct pmap *pmap,
507 vm_offset_t *listp,
508 int space);
509
5d5c5d0d 510extern void pmap_commpage32_init(
91447636
A
511 vm_offset_t kernel,
512 vm_offset_t user,
513 int count);
5d5c5d0d
A
514extern void pmap_commpage64_init(
515 vm_offset_t kernel,
516 vm_map_offset_t user,
517 int count);
518
91447636
A
519extern struct cpu_pmap *pmap_cpu_alloc(
520 boolean_t is_boot_cpu);
521extern void pmap_cpu_free(
522 struct cpu_pmap *cp);
5d5c5d0d
A
523
524extern void pmap_map_block(
525 pmap_t pmap,
526 addr64_t va,
527 ppnum_t pa,
528 uint32_t size,
529 vm_prot_t prot,
530 int attr,
531 unsigned int flags);
91447636 532
1c79356b
A
533extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
534extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
55e303ae 535extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
91447636
A
536extern void pmap_sync_page_data_phys(ppnum_t pa);
537extern void pmap_sync_page_attributes_phys(ppnum_t pa);
1c79356b 538
5d5c5d0d
A
539extern kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size);
540extern kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr);
541extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
542extern void pmap_unmap_sharedpage(pmap_t pmap);
543extern void pmap_disable_NX(pmap_t pmap);
544extern void pmap_set_4GB_pagezero(pmap_t pmap);
545extern void pmap_clear_4GB_pagezero(pmap_t pmap);
546extern void pmap_load_kernel_cr3(void);
547extern vm_offset_t pmap_cpu_high_map_vaddr(int, enum high_cpu_types);
548extern vm_offset_t pmap_high_map_vaddr(enum high_cpu_types);
549extern vm_offset_t pmap_high_map(pt_entry_t, enum high_cpu_types);
550extern vm_offset_t pmap_cpu_high_shared_remap(int, enum high_cpu_types, vm_offset_t, int);
551extern vm_offset_t pmap_high_shared_remap(enum high_fixed_addresses, vm_offset_t, int);
552
553extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, int *, int *);
554
555
556
1c79356b
A
557/*
558 * Macros for speed.
559 */
560
1c79356b
A
561
562#include <kern/spl.h>
563
55e303ae
A
564#if defined(PMAP_ACTIVATE_KERNEL)
565#undef PMAP_ACTIVATE_KERNEL
566#undef PMAP_DEACTIVATE_KERNEL
567#undef PMAP_ACTIVATE_USER
568#undef PMAP_DEACTIVATE_USER
569#endif
570
1c79356b 571
5d5c5d0d
A
572#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
573 spl_t spl; \
574 \
575 spl = splhigh(); \
576 if (current_cpu_datap()->cpu_tlb_invalid) \
577 process_pmap_updates(); \
578 splx(spl); \
1c79356b
A
579}
580
5d5c5d0d
A
581#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
582 spl_t spl; \
583 \
584 spl = splhigh(); \
585 process_pmap_updates(); \
586 splx(spl); \
1c79356b
A
587}
588
5d5c5d0d 589
1c79356b 590#define PMAP_ACTIVATE_MAP(map, my_cpu) { \
55e303ae 591 register pmap_t tpmap; \
5d5c5d0d
A
592 \
593 tpmap = vm_map_pmap(map); \
594 set_dirbase(tpmap, my_cpu); \
1c79356b
A
595}
596
597#define PMAP_DEACTIVATE_MAP(map, my_cpu)
598
5d5c5d0d
A
599#define PMAP_ACTIVATE_USER(th, my_cpu) { \
600 spl_t spl; \
601 \
602 spl = splhigh(); \
603 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
604 splx(spl); \
1c79356b
A
605}
606
91447636 607#define PMAP_DEACTIVATE_USER(th, my_cpu)
1c79356b 608
5d5c5d0d 609
1c79356b
A
610#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
611 spl_t spl; \
5d5c5d0d
A
612 pt_entry_t *kpdp; \
613 pt_entry_t *updp; \
614 int i; \
615 int need_flush; \
616 \
617 need_flush = 0; \
618 spl = splhigh(); \
1c79356b 619 if (old_th->map != new_th->map) { \
1c79356b
A
620 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
621 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
1c79356b 622 } \
5d5c5d0d
A
623 kpdp = current_cpu_datap()->cpu_copywindow_pdp; \
624 for (i = 0; i < NCOPY_WINDOWS; i++) { \
625 if (new_th->machine.copy_window[i].user_base != (user_addr_t)-1) { \
626 updp = pmap_pde(new_th->map->pmap, \
627 new_th->machine.copy_window[i].user_base);\
628 *kpdp = updp ? *updp : 0; \
629 } \
630 kpdp++; \
631 } \
632 splx(spl); \
633 if (new_th->machine.copyio_state == WINDOWS_OPENED) \
634 need_flush = 1; \
635 else \
636 new_th->machine.copyio_state = WINDOWS_DIRTY; \
637 if (new_th->machine.physwindow_pte) { \
638 *(current_cpu_datap()->cpu_physwindow_ptep) = \
639 new_th->machine.physwindow_pte; \
640 if (need_flush == 0) \
641 invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base);\
642 } \
643 if (need_flush) \
644 flush_tlb(); \
1c79356b
A
645}
646
647#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
648 spl_t spl; \
649 \
5d5c5d0d 650 spl = splhigh(); \
1c79356b
A
651 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
652 th->map = new_map; \
653 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
654 splx(spl); \
5d5c5d0d 655 inval_copy_windows(th); \
1c79356b
A
656}
657
5d5c5d0d
A
658/*
659 * Marking the current cpu's cr3 inactive is achieved by setting its lsb.
660 * Marking the current cpu's cr3 active once more involves clearng this bit.
661 * Note that valid page tables are page-aligned and so the bottom 12 bits
662 * are noramlly zero.
663 * We can only mark the current cpu active/inactive but we can test any cpu.
664 */
665#define CPU_CR3_MARK_INACTIVE() \
666 current_cpu_datap()->cpu_active_cr3 |= 1
667
668#define CPU_CR3_MARK_ACTIVE() \
669 current_cpu_datap()->cpu_active_cr3 &= ~1
670
671#define CPU_CR3_IS_ACTIVE(cpu) \
672 ((cpu_datap(cpu)->cpu_active_cr3 & 1) == 0)
673
1c79356b
A
674#define MARK_CPU_IDLE(my_cpu) { \
675 /* \
676 * Mark this cpu idle, and remove it from the active set, \
677 * since it is not actively using any pmap. Signal_cpus \
678 * will notice that it is idle, and avoid signaling it, \
679 * but will queue the update request for when the cpu \
680 * becomes active. \
681 */ \
682 int s = splhigh(); \
5d5c5d0d
A
683 if (!cpu_mode_is64bit() || no_shared_cr3) \
684 process_pmap_updates(); \
685 else \
686 pmap_load_kernel_cr3(); \
687 CPU_CR3_MARK_INACTIVE(); \
688 __asm__ volatile("mfence"); \
1c79356b 689 splx(s); \
1c79356b
A
690}
691
5d5c5d0d 692#define MARK_CPU_ACTIVE(my_cpu) { \
1c79356b
A
693 \
694 int s = splhigh(); \
695 /* \
696 * If a kernel_pmap update was requested while this cpu \
697 * was idle, process it as if we got the interrupt. \
698 * Before doing so, remove this cpu from the idle set. \
699 * Since we do not grab any pmap locks while we flush \
700 * our TLB, another cpu may start an update operation \
701 * before we finish. Removing this cpu from the idle \
702 * set assures that we will receive another update \
703 * interrupt if this happens. \
704 */ \
5d5c5d0d
A
705 CPU_CR3_MARK_ACTIVE(); \
706 __asm__ volatile("mfence"); \
55e303ae 707 \
5d5c5d0d
A
708 if (current_cpu_datap()->cpu_tlb_invalid) \
709 process_pmap_updates(); \
1c79356b 710 splx(s); \
1c79356b
A
711}
712
1c79356b
A
713#define PMAP_CONTEXT(pmap, thread)
714
715#define pmap_kernel_va(VA) \
5d5c5d0d
A
716 ((((vm_offset_t) (VA)) >= vm_min_kernel_address) && \
717 (((vm_offset_t) (VA)) <= vm_max_kernel_address))
718
1c79356b
A
719
720#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
1c79356b
A
721#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
722#define pmap_attribute(pmap,addr,size,attr,value) \
723 (KERN_INVALID_ADDRESS)
9bccf70c
A
724#define pmap_attribute_cache_sync(addr,size,attr,value) \
725 (KERN_INVALID_ADDRESS)
765c9de3 726
1c79356b
A
727#endif /* ASSEMBLER */
728
5d5c5d0d 729
1c79356b 730#endif /* _PMAP_MACHINE_ */
5d5c5d0d
A
731
732
733#endif /* KERNEL_PRIVATE */