3 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #define _ARM_PMAP_H_ 1
32 #include <mach_assert.h>
34 #include <arm/proc_reg.h>
35 #if defined(__arm64__)
36 #include <arm64/proc_reg.h>
40 * Machine-dependent structures for the physical map module.
45 #include <mach/kern_return.h>
46 #include <mach/machine/vm_types.h>
48 #if __ARM_KERNEL_PROTECT__
50 * For __ARM_KERNEL_PROTECT__, we need twice as many ASIDs to support having
51 * unique EL0 and EL1 ASIDs for each pmap.
53 #define ASID_SHIFT (12) /* Shift for the maximum virtual ASID value (2048)*/
54 #else /* __ARM_KERNEL_PROTECT__ */
55 #define ASID_SHIFT (11) /* Shift for the maximum virtual ASID value (2048) */
56 #endif /* __ARM_KERNEL_PROTECT__ */
57 #define MAX_ASID (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */
58 #define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */
59 #define ARM_MAX_ASID (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */
60 #define ASID_VIRT_BITS (ASID_SHIFT - ARM_ASID_SHIFT) /* The number of virtual bits in a virtaul ASID */
63 struct pmap_cpu_data
{
65 unsigned int cpu_number
;
66 unsigned int cpu_user_pmap_stamp
;
69 * This supports overloading of ARM ASIDs by the pmap. The field needs
70 * to be wide enough to cover all the virtual bits in a virtual ASID.
71 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
72 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
75 * If we were to use bitfield shenanigans here, we could save a bit of
76 * memory by only having enough bits to support MAX_ASID. However, such
77 * an implementation would be more error prone.
79 uint8_t cpu_asid_high_bits
[ARM_MAX_ASID
];
81 typedef struct pmap_cpu_data pmap_cpu_data_t
;
83 #include <mach/vm_prot.h>
84 #include <mach/vm_statistics.h>
85 #include <mach/machine/vm_param.h>
86 #include <kern/kern_types.h>
87 #include <kern/thread.h>
88 #include <kern/queue.h>
90 /* Base address for low globals. */
91 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
94 * This indicates (roughly) where there is free space for the VM
95 * to use for the heap; this does not need to be precise.
97 #if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__
98 #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
100 #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
103 #if defined(__arm64__)
105 typedef uint64_t tt_entry_t
; /* translation table entry type */
106 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
108 typedef uint64_t pt_entry_t
; /* page table entry type */
109 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
111 typedef uint64_t pmap_paddr_t
; /* physical address (not ppnum_t) */
113 #elif defined(__arm__)
115 typedef uint32_t tt_entry_t
; /* translation table entry type */
116 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
118 typedef uint32_t pt_entry_t
; /* page table entry type */
119 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
121 typedef uint32_t pmap_paddr_t
; /* physical address (not ppnum_t) */
129 #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
132 * Convert addresses to pages and vice versa.
133 * No rounding is used.
135 #define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
136 #define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
139 * Round off or truncate to the nearest page. These will work
140 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
143 #define arm_round_page(x) \
144 ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
145 #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
147 /* Convert address offset to page table index */
148 #define ptenum(a) ((((a) & ARM_TT_LEAF_INDEX_MASK) >> ARM_TT_LEAF_SHIFT))
151 * For setups where the kernel page size does not match the hardware
152 * page size (assumably, the kernel page size must be a multiple of
153 * the hardware page size), we will need to determine what the page
156 #define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
157 #define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
159 #if (__ARM_VMSA__ <= 7)
160 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
161 #define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t))
163 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
164 #define NPTES (ARM_PGBYTES / sizeof(pt_entry_t))
167 extern void flush_mmu_tlb(void);
168 extern void flush_core_tlb(void);
169 #if defined(__arm64__)
170 extern void flush_mmu_tlb_allentries(uint64_t, uint64_t);
171 extern void flush_mmu_tlb_entry(uint64_t);
172 extern void flush_mmu_tlb_entries(uint64_t, uint64_t);
173 extern void flush_mmu_tlb_asid(uint64_t);
174 extern void flush_core_tlb_asid(uint64_t);
176 #define tlbi_addr(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
177 #define tlbi_asid(x) (((uint64_t)x << TLBI_ASID_SHIFT) & TLBI_ASID_MASK)
179 extern void flush_mmu_tlb_entry(uint32_t);
180 extern void flush_mmu_tlb_entries(uint32_t, uint32_t);
181 extern void flush_mmu_tlb_mva_entries(uint32_t);
182 extern void flush_mmu_tlb_asid(uint32_t);
183 extern void flush_core_tlb_asid(uint32_t);
185 extern void flush_mmu_tlb_region(vm_offset_t va
, unsigned length
);
187 #if defined(__arm64__)
188 extern uint64_t get_mmu_control(void);
189 extern uint64_t get_aux_control(void);
190 extern void set_aux_control(uint64_t);
191 extern void set_mmu_ttb(uint64_t);
192 extern void set_mmu_ttb_alternate(uint64_t);
193 extern uint64_t get_tcr(void);
194 extern void set_tcr(uint64_t);
196 extern uint32_t get_mmu_control(void);
197 extern void set_mmu_control(uint32_t);
198 extern uint32_t get_aux_control(void);
199 extern void set_aux_control(uint32_t);
200 extern void set_mmu_ttb(pmap_paddr_t
);
201 extern void set_mmu_ttb_alternate(pmap_paddr_t
);
202 extern void set_context_id(uint32_t);
205 extern pmap_paddr_t
get_mmu_ttb(void);
206 extern pmap_paddr_t
mmu_kvtop(vm_offset_t va
);
207 extern pmap_paddr_t
mmu_kvtop_wpreflight(vm_offset_t va
);
208 extern pmap_paddr_t
mmu_uvtop(vm_offset_t va
);
210 #if (__ARM_VMSA__ <= 7)
211 /* Convert address offset to translation table index */
212 #define ttenum(a) ((a) >> ARM_TT_L1_SHIFT)
214 /* Convert translation table index to user virtual address */
215 #define tteitova(a) ((a) << ARM_TT_L1_SHIFT)
217 #define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK)
218 #define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK)
220 #define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK)
221 #define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK)
223 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
224 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
226 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
227 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
228 #define pte_increment_pa(p) ((p) += ptoa(1))
230 #define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE)
231 #define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE))
235 /* Convert address offset to translation table index */
236 #define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
237 #define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
238 #define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
240 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
241 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
243 #define pa_to_pte(a) ((a) & ARM_PTE_MASK)
244 #define pte_to_pa(p) ((p) & ARM_PTE_MASK)
245 #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
246 #define pte_increment_pa(p) ((p) += ptoa(1))
248 #define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/ARM_PGBYTES)*ARM_TT_L2_SIZE)
249 #define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL)
251 #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
253 #endif /* __ARM_VMSA__ <= 7 */
255 #define PMAP_GC_INFLIGHT 1
256 #define PMAP_GC_WAIT 2
259 * Convert translation/page table entry to kernel virtual address
261 #define ttetokv(a) (phystokv(tte_to_pa(a)))
262 #define ptetokv(a) (phystokv(pte_to_pa(a)))
265 tt_entry_t
*tte
; /* translation table entries */
266 pmap_paddr_t ttep
; /* translation table physical */
267 vm_map_address_t min
; /* min address in pmap */
268 vm_map_address_t max
; /* max address in pmap */
269 unsigned int asid
; /* address space id */
270 unsigned int vasid
; /* Virtual address space id */
271 unsigned int stamp
; /* creation stamp */
272 unsigned int wired
; /* wired bits */
273 volatile uint32_t ref_count
; /* pmap reference count */
274 unsigned int cpu_ref
; /* number of cpus using pmap */
275 unsigned int gc_status
; /* gc status */
276 ledger_t ledger
; /* ledger tracking phys mappings */
277 decl_simple_lock_data(,lock
) /* lock on map */
278 struct pmap_statistics stats
; /* map statistics */
279 queue_chain_t pmaps
; /* global list of pmaps */
280 tt_entry_t
*tt_entry_free
; /* free translation table entries */
281 tt_entry_t
*prev_tte
; /* previous translation table */
282 unsigned int tte_index_max
; /* max tte index in translation table entries */
283 boolean_t nx_enabled
; /* no execute */
284 boolean_t nested
; /* is nested */
285 boolean_t is_64bit
; /* is 64bit */
286 struct pmap
*nested_pmap
; /* nested pmap */
287 vm_map_address_t nested_region_grand_addr
;
288 vm_map_address_t nested_region_subord_addr
;
289 vm_map_offset_t nested_region_size
;
290 unsigned int *nested_region_asid_bitmap
;
291 unsigned int nested_region_asid_bitmap_size
;
293 #if (__ARM_VMSA__ <= 7)
294 decl_simple_lock_data(,tt1_lock
) /* lock on tt1 */
297 boolean_t pmap_stats_assert
;
299 char pmap_procname
[17];
300 #endif /* MACH_ASSERT */
301 #if DEVELOPMENT || DEBUG
302 boolean_t footprint_suspended
;
303 boolean_t footprint_was_suspended
;
304 #endif /* DEVELOPMENT || DEBUG */
307 /* typedef struct pmap *pmap_t; */
308 #define PMAP_NULL ((pmap_t) 0)
314 #define VM_MEM_INNER 0x10
315 #define VM_MEM_EARLY_ACK 0x20
317 #define VM_WIMG_DEFAULT (VM_MEM_COHERENT)
318 #define VM_WIMG_COPYBACK (VM_MEM_COHERENT)
319 #define VM_WIMG_INNERWBACK (VM_MEM_COHERENT | VM_MEM_INNER)
320 #define VM_WIMG_IO (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
321 #define VM_WIMG_POSTED (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED | VM_MEM_EARLY_ACK)
322 #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
323 #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
327 extern int pmap_list_resident_pages(
332 #else /* #if VM_DEBUG */
333 #define pmap_list_resident_pages(pmap, listp, space) (0)
334 #endif /* #if VM_DEBUG */
336 extern int copysafe(vm_map_address_t from
, vm_map_address_t to
, uint32_t cnt
, int type
, uint32_t *bytes_copied
);
338 /* globals shared between arm_vm_init and pmap */
339 extern tt_entry_t
*cpu_tte
; /* first CPUs translation table (shared with kernel pmap) */
340 extern pmap_paddr_t cpu_ttep
; /* physical translation table addr */
343 extern void *ropagetable_begin
;
344 extern void *ropagetable_end
;
348 extern tt_entry_t
*invalid_tte
; /* global invalid translation table */
349 extern pmap_paddr_t invalid_ttep
; /* physical invalid translation table addr */
352 #define PMAP_CONTEXT(pmap, thread)
355 * platform dependent Prototypes
357 extern void pmap_switch_user_ttb(pmap_t pmap
);
358 extern void pmap_bootstrap(vm_offset_t
);
359 extern vm_map_address_t
pmap_ptov(pmap_t
, ppnum_t
);
360 extern ppnum_t
pmap_find_phys(pmap_t map
, addr64_t va
);
361 extern void pmap_set_pmap(pmap_t pmap
, thread_t thread
);
362 extern void pmap_collect(pmap_t pmap
);
363 extern void pmap_gc(void);
364 #if defined(__arm64__)
365 extern vm_offset_t
pmap_extract(pmap_t pmap
, vm_map_offset_t va
);
369 * Interfaces implemented as macros.
372 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
374 pmap_set_pmap(vm_map_pmap(new_map), th); \
377 #define pmap_kernel() \
380 #define pmap_compressed(pmap) \
381 ((pmap)->stats.compressed)
383 #define pmap_resident_count(pmap) \
384 ((pmap)->stats.resident_count)
386 #define pmap_resident_max(pmap) \
387 ((pmap)->stats.resident_max)
391 #define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \
394 #define pmap_pageable(pmap, start, end, pageable) \
397 #define pmap_kernel_va(VA) \
398 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
400 #define pmap_attribute(pmap,addr,size,attr,value) \
401 (KERN_INVALID_ADDRESS)
403 #define copyinmsg(from, to, cnt) \
404 copyin(from, to, cnt)
406 #define copyoutmsg(from, to, cnt) \
407 copyout(from, to, cnt)
409 extern pmap_paddr_t
kvtophys(vm_offset_t va
);
411 extern vm_map_address_t
pmap_map(vm_map_address_t va
, vm_offset_t sa
, vm_offset_t ea
, vm_prot_t prot
, unsigned int flags
);
412 extern vm_map_address_t
pmap_map_high_window_bd( vm_offset_t pa
, vm_size_t len
, vm_prot_t prot
);
413 extern kern_return_t
pmap_map_block(pmap_t pmap
, addr64_t va
, ppnum_t pa
, uint32_t size
, vm_prot_t prot
, int attr
, unsigned int flags
);
414 extern void pmap_map_globals(void);
416 #define PMAP_MAP_BD_DEVICE 0x1
417 #define PMAP_MAP_BD_WCOMB 0x2
418 #define PMAP_MAP_BD_POSTED 0x3
419 #define PMAP_MAP_BD_MASK 0x3
421 extern vm_map_address_t
pmap_map_bd_with_options(vm_map_address_t va
, vm_offset_t sa
, vm_offset_t ea
, vm_prot_t prot
, int32_t options
);
422 extern vm_map_address_t
pmap_map_bd(vm_map_address_t va
, vm_offset_t sa
, vm_offset_t ea
, vm_prot_t prot
);
424 extern void pmap_init_pte_page(pmap_t
, pt_entry_t
*, vm_offset_t
, unsigned int ttlevel
, boolean_t alloc_ptd
);
425 extern void pmap_init_pte_static_page(pmap_t
, pt_entry_t
*, pmap_paddr_t
);
427 extern boolean_t
pmap_valid_address(pmap_paddr_t addr
);
428 extern void pmap_disable_NX(pmap_t pmap
);
429 extern void pmap_set_nested(pmap_t pmap
);
430 extern vm_map_address_t
pmap_create_sharedpage(void);
431 extern void pmap_insert_sharedpage(pmap_t pmap
);
432 extern void pmap_protect_sharedpage(void);
434 extern vm_offset_t
pmap_cpu_windows_copy_addr(int cpu_num
, unsigned int index
);
435 extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn
, vm_prot_t prot
, unsigned int wimg_bits
);
436 extern void pmap_unmap_cpu_windows_copy(unsigned int index
);
438 extern void pt_fake_zone_init(int);
439 extern void pt_fake_zone_info(int *, vm_size_t
*, vm_size_t
*, vm_size_t
*, vm_size_t
*,
440 uint64_t *, int *, int *, int *);
442 extern boolean_t
pmap_valid_page(ppnum_t pn
);
444 #define MACHINE_PMAP_IS_EMPTY 1
445 extern boolean_t
pmap_is_empty(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
447 #define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
448 #define ARM_PMAP_MAX_OFFSET_MIN 0x02
449 #define ARM_PMAP_MAX_OFFSET_MAX 0x04
450 #define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
451 #define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
454 extern vm_map_offset_t
pmap_max_offset(boolean_t is64
, unsigned int option
);
456 boolean_t
pmap_virtual_region(unsigned int region_select
, vm_map_offset_t
*startp
, vm_map_size_t
*size
);
458 boolean_t
pmap_enforces_execute_only(pmap_t pmap
);
460 /* pmap dispatch indices */
461 #define ARM_FAST_FAULT_INDEX 0
462 #define ARM_FORCE_FAST_FAULT_INDEX 1
463 #define MAPPING_FREE_PRIME_INDEX 2
464 #define MAPPING_REPLENISH_INDEX 3
465 #define PHYS_ATTRIBUTE_CLEAR_INDEX 4
466 #define PHYS_ATTRIBUTE_SET_INDEX 5
467 #define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
468 #define PMAP_CHANGE_WIRING_INDEX 7
469 #define PMAP_CREATE_INDEX 8
470 #define PMAP_DESTROY_INDEX 9
471 #define PMAP_ENTER_OPTIONS_INDEX 10
472 #define PMAP_EXTRACT_INDEX 11
473 #define PMAP_FIND_PHYS_INDEX 12
474 #define PMAP_INSERT_SHAREDPAGE_INDEX 13
475 #define PMAP_IS_EMPTY_INDEX 14
476 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
477 #define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
478 #define PMAP_NEST_INDEX 17
479 #define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
480 #define PMAP_PROTECT_OPTIONS_INDEX 19
481 #define PMAP_QUERY_PAGE_INFO_INDEX 20
482 #define PMAP_QUERY_RESIDENT_INDEX 21
483 #define PMAP_REFERENCE_INDEX 22
484 #define PMAP_REMOVE_OPTIONS_INDEX 23
485 #define PMAP_RETURN_INDEX 24
486 #define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
487 #define PMAP_SET_NESTED_INDEX 26
488 #define PMAP_SET_PROCESS_INDEX 27
489 #define PMAP_SWITCH_INDEX 28
490 #define PMAP_SWITCH_USER_TTB_INDEX 29
491 #define PMAP_UNHINT_KV_ADDR_INDEX 30
492 #define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
493 #define PMAP_UNNEST_OPTIONS_INDEX 32
494 #define PMAP_FOOTPRINT_SUSPEND_INDEX 33
495 #define PMAP_CPU_DATA_INIT_INDEX 34
496 #define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
498 #define MAX_PMAP_INDEX 36
500 #define PMAP_INVALID_CPU_NUM (~0U)
502 /* Initialize the pmap per-CPU data for the current CPU. */
503 extern void pmap_cpu_data_init(void);
505 /* Get the pmap per-CPU data for the current CPU. */
506 extern pmap_cpu_data_t
* pmap_get_cpu_data(void);
508 #define MARK_AS_PMAP_TEXT
509 #define MARK_AS_PMAP_DATA
511 extern kern_return_t
pmap_return(boolean_t do_panic
, boolean_t do_recurse
);
513 #endif /* #ifndef ASSEMBLER */
515 #if __ARM_KERNEL_PROTECT__
517 * The exception vector mappings start at the middle of the kernel page table
518 * range (so that the EL0 mapping can be located at the base of the range).
520 #define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
521 #endif /* __ARM_KERNEL_PROTECT__ */
523 #endif /* #ifndef _ARM_PMAP_H_ */