]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/pmap.h
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / arm / pmap.h
CommitLineData
5ba3f43e
A
1/*
2 *
3 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29#ifndef _ARM_PMAP_H_
30#define _ARM_PMAP_H_ 1
31
32#include <mach_assert.h>
33
34#include <arm/proc_reg.h>
35#if defined(__arm64__)
36#include <arm64/proc_reg.h>
37#endif
38
39/*
40 * Machine-dependent structures for the physical map module.
41 */
42
43#ifndef ASSEMBLER
44
45#include <mach/kern_return.h>
46#include <mach/machine/vm_types.h>
47#include <mach/vm_prot.h>
48#include <mach/vm_statistics.h>
49#include <mach/machine/vm_param.h>
50#include <kern/kern_types.h>
51#include <kern/thread.h>
52#include <kern/queue.h>
53
54/* Base address for low globals. */
55#define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
56
57/*
58 * This indicates (roughly) where there is free space for the VM
59 * to use for the heap; this does not need to be precise.
60 */
61#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__
62#define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
63#else
64#define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
65#endif
66
67#if defined(__arm64__)
68
69typedef uint64_t tt_entry_t; /* translation table entry type */
70#define TT_ENTRY_NULL ((tt_entry_t *) 0)
71
72typedef uint64_t pt_entry_t; /* page table entry type */
73#define PT_ENTRY_NULL ((pt_entry_t *) 0)
74
75typedef uint64_t pmap_paddr_t; /* physical address (not ppnum_t) */
76
77#elif defined(__arm__)
78
79typedef uint32_t tt_entry_t; /* translation table entry type */
80#define PT_ENTRY_NULL ((pt_entry_t *) 0)
81
82typedef uint32_t pt_entry_t; /* page table entry type */
83#define TT_ENTRY_NULL ((tt_entry_t *) 0)
84
85typedef uint32_t pmap_paddr_t; /* physical address (not ppnum_t) */
86
87#else
88#error unknown arch
89#endif
90
91
92/* superpages */
93#define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
94
95/*
96 * Convert addresses to pages and vice versa.
97 * No rounding is used.
98 */
99#define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
100#define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
101
102/*
103 * Round off or truncate to the nearest page. These will work
104 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
105 * bytes.
106 */
107#define arm_round_page(x) \
108 ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
109#define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
110
111/* Convert address offset to page table index */
112#define ptenum(a) ((((a) & ARM_TT_LEAF_INDEX_MASK) >> ARM_TT_LEAF_SHIFT))
113
114/*
115 * For setups where the kernel page size does not match the hardware
116 * page size (assumably, the kernel page size must be a multiple of
117 * the hardware page size), we will need to determine what the page
118 * ratio is.
119 */
120#define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
121#define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
122
123#if (__ARM_VMSA__ <= 7)
124#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
125#define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t))
126#else
127#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
128#define NPTES (ARM_PGBYTES / sizeof(pt_entry_t))
129#endif
130
131extern void flush_mmu_tlb(void);
132extern void flush_core_tlb(void);
133#if defined(__arm64__)
134extern void flush_mmu_tlb_allentries(uint64_t, uint64_t);
135extern void flush_mmu_tlb_entry(uint64_t);
136extern void flush_mmu_tlb_entries(uint64_t, uint64_t);
137extern void flush_mmu_tlb_asid(uint64_t);
138extern void flush_core_tlb_asid(uint64_t);
139/*
140 * TLBI appers to only deal in 4KB page addresses, so give
141 * it an explicit shift of 12.
142 */
143#define TLBI_ADDR_SIZE 44
144#define TLBI_ADDR_MASK ((1ULL << TLBI_ADDR_SIZE) - 1)
145#define TLBI_ADDR_SHIFT (12)
146#define tlbi_addr(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
147
148#define TLBI_ASID_SHIFT 48
149#define TLBI_ASID_SIZE 16
150#define TLBI_ASID_MASK (((1ULL << TLBI_ASID_SIZE) - 1) << TLBI_ASID_SHIFT)
151#define tlbi_asid(x) (((uint64_t)x << TLBI_ASID_SHIFT) & TLBI_ASID_MASK)
152#else
153extern void flush_mmu_tlb_entry(uint32_t);
154extern void flush_mmu_tlb_entries(uint32_t, uint32_t);
155extern void flush_mmu_tlb_mva_entries(uint32_t);
156extern void flush_mmu_tlb_asid(uint32_t);
157extern void flush_core_tlb_asid(uint32_t);
158#endif
159extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
160
161#if defined(__arm64__)
162extern uint64_t get_mmu_control(void);
163extern void set_mmu_control(uint64_t);
164extern uint64_t get_aux_control(void);
165extern void set_aux_control(uint64_t);
166extern void set_mmu_ttb(uint64_t);
167extern void set_mmu_ttb_alternate(uint64_t);
168extern uint64_t get_tcr(void);
169extern void set_tcr(uint64_t);
170#else
171extern uint32_t get_mmu_control(void);
172extern void set_mmu_control(uint32_t);
173extern uint32_t get_aux_control(void);
174extern void set_aux_control(uint32_t);
175extern void set_mmu_ttb(pmap_paddr_t);
176extern void set_mmu_ttb_alternate(pmap_paddr_t);
177extern void set_context_id(uint32_t);
178#endif
179
180extern pmap_paddr_t get_mmu_ttb(void);
181extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
182extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
183extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
184
185#if (__ARM_VMSA__ <= 7)
186/* Convert address offset to translation table index */
187#define ttenum(a) ((a) >> ARM_TT_L1_SHIFT)
188
189/* Convert translation table index to user virtual address */
190#define tteitova(a) ((a) << ARM_TT_L1_SHIFT)
191
192#define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK)
193#define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK)
194
195#define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK)
196#define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK)
197
198#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
199#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
200
201#define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
202#define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
203#define pte_increment_pa(p) ((p) += ptoa(1))
204
205#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE)
206#define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE))
207
208#else
209
210/* Convert address offset to translation table index */
211#define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
212#define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
213#define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
214
215#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
216#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
217
218#define pa_to_pte(a) ((a) & ARM_PTE_MASK)
219#define pte_to_pa(p) ((p) & ARM_PTE_MASK)
220#define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
221#define pte_increment_pa(p) ((p) += ptoa(1))
222
223#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/ARM_PGBYTES)*ARM_TT_L2_SIZE)
224#define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL)
225
226#define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
227
228#endif /* __ARM_VMSA__ <= 7 */
229
230#define PMAP_GC_INFLIGHT 1
231#define PMAP_GC_WAIT 2
232
233/*
234 * Convert translation/page table entry to kernel virtual address
235 */
236#define ttetokv(a) (phystokv(tte_to_pa(a)))
237#define ptetokv(a) (phystokv(pte_to_pa(a)))
238
239struct pmap {
240 tt_entry_t *tte; /* translation table entries */
241 pmap_paddr_t ttep; /* translation table physical */
242 vm_map_address_t min; /* min address in pmap */
243 vm_map_address_t max; /* max address in pmap */
244 unsigned int asid; /* address space id */
245 unsigned int vasid; /* Virtual address space id */
246 unsigned int stamp; /* creation stamp */
247 unsigned int wired; /* wired bits */
248 volatile uint32_t ref_count; /* pmap reference count */
249 unsigned int cpu_ref; /* number of cpus using pmap */
250 unsigned int gc_status; /* gc status */
251 ledger_t ledger; /* ledger tracking phys mappings */
252 decl_simple_lock_data(,lock) /* lock on map */
253 struct pmap_statistics stats; /* map statistics */
254 queue_chain_t pmaps; /* global list of pmaps */
255 tt_entry_t *tt_entry_free; /* free translation table entries */
256 tt_entry_t *prev_tte; /* previous translation table */
257 unsigned int tte_index_max; /* max tte index in translation table entries */
258 boolean_t nx_enabled; /* no execute */
259 boolean_t nested; /* is nested */
260 boolean_t is_64bit; /* is 64bit */
261 struct pmap *nested_pmap; /* nested pmap */
262 vm_map_address_t nested_region_grand_addr;
263 vm_map_address_t nested_region_subord_addr;
264 vm_map_offset_t nested_region_size;
265 unsigned int *nested_region_asid_bitmap;
266 unsigned int nested_region_asid_bitmap_size;
267
268#if (__ARM_VMSA__ <= 7)
269 decl_simple_lock_data(,tt1_lock) /* lock on tt1 */
270#endif
271#if MACH_ASSERT
272 int pmap_pid;
273 char pmap_procname[17];
274#endif /* MACH_ASSERT */
275#if DEVELOPMENT || DEBUG
276 boolean_t footprint_suspended;
277 boolean_t footprint_was_suspended;
278#endif /* DEVELOPMENT || DEBUG */
279};
280
281/* typedef struct pmap *pmap_t; */
282#define PMAP_NULL ((pmap_t) 0)
283
284
285/*
286 * WIMG control
287 */
288#define VM_MEM_INNER 0x10
289#define VM_MEM_EARLY_ACK 0x20
290
291#define VM_WIMG_DEFAULT (VM_MEM_COHERENT)
292#define VM_WIMG_COPYBACK (VM_MEM_COHERENT)
293#define VM_WIMG_INNERWBACK (VM_MEM_COHERENT | VM_MEM_INNER)
294#define VM_WIMG_IO (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
295#define VM_WIMG_POSTED (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED | VM_MEM_EARLY_ACK)
296#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
297#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
298
299
300#if VM_DEBUG
301extern int pmap_list_resident_pages(
302 pmap_t pmap,
303 vm_offset_t *listp,
304 int space
305 );
306#else /* #if VM_DEBUG */
307#define pmap_list_resident_pages(pmap, listp, space) (0)
308#endif /* #if VM_DEBUG */
309
310extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
311
312/* globals shared between arm_vm_init and pmap */
313extern tt_entry_t *cpu_tte; /* first CPUs translation table (shared with kernel pmap) */
314extern pmap_paddr_t cpu_ttep; /* physical translation table addr */
315
316#if __arm64__
317extern void *ropagetable_begin;
318extern void *ropagetable_end;
319#endif
320
321#if __arm64__
322extern tt_entry_t *invalid_tte; /* global invalid translation table */
323extern pmap_paddr_t invalid_ttep; /* physical invalid translation table addr */
324#endif
325
326#define PMAP_CONTEXT(pmap, thread)
327
328/*
329 * platform dependent Prototypes
330 */
331extern void pmap_switch_user_ttb(pmap_t pmap);
332extern void pmap_bootstrap(vm_offset_t);
333extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
334extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
335extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
336extern void pmap_collect(pmap_t pmap);
337extern void pmap_gc(void);
338#if defined(__arm64__)
339extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va);
340#endif
341
342/*
343 * Interfaces implemented as macros.
344 */
345
346#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
347 th->map = new_map; \
348 pmap_set_pmap(vm_map_pmap(new_map), th); \
349}
350
351#define pmap_kernel() \
352 (kernel_pmap)
353
354#define pmap_compressed(pmap) \
355 ((pmap)->stats.compressed)
356
357#define pmap_resident_count(pmap) \
358 ((pmap)->stats.resident_count)
359
360#define pmap_resident_max(pmap) \
361 ((pmap)->stats.resident_max)
362
363#define MACRO_NOOP
364
365#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \
366 MACRO_NOOP
367
368#define pmap_pageable(pmap, start, end, pageable) \
369 MACRO_NOOP
370
371#define pmap_kernel_va(VA) \
372 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
373
374#define pmap_attribute(pmap,addr,size,attr,value) \
375 (KERN_INVALID_ADDRESS)
376
377#define copyinmsg(from, to, cnt) \
378 copyin(from, to, cnt)
379
380#define copyoutmsg(from, to, cnt) \
381 copyout(from, to, cnt)
382
383extern pmap_paddr_t kvtophys(vm_offset_t va);
384
385extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
386extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
387extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
388extern void pmap_map_globals(void);
389
390#define PMAP_MAP_BD_DEVICE 0x1
391#define PMAP_MAP_BD_WCOMB 0x2
392#define PMAP_MAP_BD_POSTED 0x3
393#define PMAP_MAP_BD_MASK 0x3
394
395extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
396extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
397
398extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
399extern void pmap_init_pte_static_page(pmap_t, pt_entry_t *, pmap_paddr_t);
400
401extern boolean_t pmap_valid_address(pmap_paddr_t addr);
402extern void pmap_disable_NX(pmap_t pmap);
403extern void pmap_set_nested(pmap_t pmap);
404extern vm_map_address_t pmap_create_sharedpage(void);
405extern void pmap_insert_sharedpage(pmap_t pmap);
406extern void pmap_protect_sharedpage(void);
407
408extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
409extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
410extern void pmap_unmap_cpu_windows_copy(unsigned int index);
411
412extern void pt_fake_zone_init(int);
413extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
414 uint64_t *, int *, int *, int *);
415
416extern boolean_t pmap_valid_page(ppnum_t pn);
417
418#define MACHINE_PMAP_IS_EMPTY 1
419extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
420
421#define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
422#define ARM_PMAP_MAX_OFFSET_MIN 0x02
423#define ARM_PMAP_MAX_OFFSET_MAX 0x04
424#define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
425#define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
426
427#define ASID_SHIFT (11) /* Shift for the maximum virtual ASID value (2048) */
428#define MAX_ASID (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */
429#define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */
430#define ARM_MAX_ASID (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */
431#define ASID_VIRT_BITS (ASID_SHIFT - ARM_ASID_SHIFT) /* The number of virtual bits in a virtaul ASID */
432#define NBBY 8
433
434extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
435
436boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
437
438boolean_t pmap_enforces_execute_only(pmap_t pmap);
439
440/* pmap dispatch indices */
441#define ARM_FAST_FAULT_INDEX 0
442#define ARM_FORCE_FAST_FAULT_INDEX 1
443#define MAPPING_FREE_PRIME_INDEX 2
444#define MAPPING_REPLENISH_INDEX 3
445#define PHYS_ATTRIBUTE_CLEAR_INDEX 4
446#define PHYS_ATTRIBUTE_SET_INDEX 5
447#define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
448#define PMAP_CHANGE_WIRING_INDEX 7
449#define PMAP_CREATE_INDEX 8
450#define PMAP_DESTROY_INDEX 9
451#define PMAP_ENTER_OPTIONS_INDEX 10
452#define PMAP_EXTRACT_INDEX 11
453#define PMAP_FIND_PHYS_INDEX 12
454#define PMAP_INSERT_SHAREDPAGE_INDEX 13
455#define PMAP_IS_EMPTY_INDEX 14
456#define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
457#define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
458#define PMAP_NEST_INDEX 17
459#define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
460#define PMAP_PROTECT_OPTIONS_INDEX 19
461#define PMAP_QUERY_PAGE_INFO_INDEX 20
462#define PMAP_QUERY_RESIDENT_INDEX 21
463#define PMAP_REFERENCE_INDEX 22
464#define PMAP_REMOVE_OPTIONS_INDEX 23
465#define PMAP_RETURN_INDEX 24
466#define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
467#define PMAP_SET_NESTED_INDEX 26
468#define PMAP_SET_PROCESS_INDEX 27
469#define PMAP_SWITCH_INDEX 28
470#define PMAP_SWITCH_USER_TTB_INDEX 29
471#define PMAP_UNHINT_KV_ADDR_INDEX 30
472#define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
473#define PMAP_UNNEST_OPTIONS_INDEX 32
474#define PMAP_FOOTPRINT_SUSPEND_INDEX 33
475#define PMAP_CPU_DATA_INIT_INDEX 34
476#define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
477
478#define MAX_PMAP_INDEX 36
479
480#define PMAP_INVALID_CPU_NUM (~0U)
481
482struct pmap_cpu_data {
483 pmap_t cpu_user_pmap;
484 unsigned int cpu_number;
485 unsigned int cpu_user_pmap_stamp;
486
487 /*
488 * This supports overloading of ARM ASIDs by the pmap. The field needs
489 * to be wide enough to cover all the virtual bits in a virtual ASID.
490 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
491 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
492 * ASID).
493 *
494 * If we were to use bitfield shenanigans here, we could save a bit of
495 * memory by only having enough bits to support MAX_ASID. However, such
496 * an implementation would be more error prone.
497 */
498 uint8_t cpu_asid_high_bits[ARM_MAX_ASID];
499};
500
501typedef struct pmap_cpu_data pmap_cpu_data_t;
502
503/* Initialize the pmap per-CPU data for the current CPU. */
504extern void pmap_cpu_data_init(void);
505
506/* Get the pmap per-CPU data for the current CPU. */
507extern pmap_cpu_data_t * pmap_get_cpu_data(void);
508
509#define MARK_AS_PMAP_TEXT
510#define MARK_AS_PMAP_DATA
511
512extern kern_return_t pmap_return(boolean_t do_panic, boolean_t do_recurse);
513
514#endif /* #ifndef ASSEMBLER */
515
516#endif /* #ifndef _ARM_PMAP_H_ */