2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_debug.h>
33 #include <mach/vm_types.h>
34 #include <mach/vm_param.h>
35 #include <mach/thread_status.h>
36 #include <kern/misc_protos.h>
37 #include <kern/assert.h>
38 #include <kern/cpu_number.h>
39 #include <kern/thread.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
44 #include <arm/proc_reg.h>
45 #include <arm/caches_internal.h>
47 #include <arm/misc_protos.h>
48 #include <arm/lowglobals.h>
50 #include <pexpert/arm/boot.h>
52 #include <libkern/kernel_mach_header.h>
55 * Denotes the end of xnu.
57 extern void *last_kernel_symbol
;
62 vm_offset_t vm_kernel_base
;
63 vm_offset_t vm_kernel_top
;
64 vm_offset_t vm_kernel_stext
;
65 vm_offset_t vm_kernel_etext
;
66 vm_offset_t vm_kernel_slide
;
67 vm_offset_t vm_kernel_slid_base
;
68 vm_offset_t vm_kernel_slid_top
;
69 vm_offset_t vm_kext_base
;
70 vm_offset_t vm_kext_top
;
71 vm_offset_t vm_prelink_stext
;
72 vm_offset_t vm_prelink_etext
;
73 vm_offset_t vm_prelink_sinfo
;
74 vm_offset_t vm_prelink_einfo
;
75 vm_offset_t vm_slinkedit
;
76 vm_offset_t vm_elinkedit
;
77 vm_offset_t vm_prelink_sdata
;
78 vm_offset_t vm_prelink_edata
;
80 unsigned long gVirtBase
, gPhysBase
, gPhysSize
; /* Used by <mach/arm/vm_param.h> */
82 vm_offset_t mem_size
; /* Size of actual physical memory present
83 * minus any performance buffer and possibly
84 * limited by mem_limit in bytes */
85 uint64_t mem_actual
; /* The "One True" physical memory size
86 * actually, it's the highest physical
88 uint64_t max_mem
; /* Size of physical memory (bytes), adjusted
90 uint64_t sane_size
; /* Memory size to use for defaults
92 addr64_t vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Highest kernel
93 * virtual address known
96 static vm_offset_t segTEXTB
;
97 static unsigned long segSizeTEXT
;
98 static vm_offset_t segDATAB
;
99 static unsigned long segSizeDATA
;
100 static vm_offset_t segLINKB
;
101 static unsigned long segSizeLINK
;
102 static vm_offset_t segKLDB
;
103 static unsigned long segSizeKLD
;
104 static vm_offset_t segLASTB
;
105 static unsigned long segSizeLAST
;
106 static vm_offset_t sectCONSTB
;
107 static unsigned long sectSizeCONST
;
109 vm_offset_t segPRELINKTEXTB
;
110 unsigned long segSizePRELINKTEXT
;
111 vm_offset_t segPRELINKINFOB
;
112 unsigned long segSizePRELINKINFO
;
114 static kernel_segment_command_t
*segDATA
;
115 static boolean_t doconstro
= TRUE
;
117 vm_offset_t end_kern
, etext
, sdata
, edata
;
120 * Bootstrap the system enough to run with virtual memory.
121 * Map the kernel's code and data, and allocate the system page table.
122 * Page_size must already be set.
125 * first_avail: first available physical page -
126 * after kernel page tables
127 * avail_start: PA of first physical page
128 * avail_end : PA of last physical page
130 vm_offset_t first_avail
;
131 vm_offset_t static_memory_end
;
132 pmap_paddr_t avail_start
, avail_end
;
134 #define MEM_SIZE_MAX 0x40000000
136 extern vm_offset_t ExceptionVectorsBase
; /* the code we want to load there */
138 /* The translation tables have to be 16KB aligned */
139 #define round_x_table(x) \
140 (((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
144 arm_vm_page_granular_helper(vm_offset_t start
, vm_offset_t _end
, vm_offset_t va
,
145 int pte_prot_APX
, int pte_prot_XN
)
147 if (va
& ARM_TT_L1_PT_OFFMASK
) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */
148 va
&= (~ARM_TT_L1_PT_OFFMASK
);
149 tt_entry_t
*tte
= &cpu_tte
[ttenum(va
)];
150 tt_entry_t tmplate
= *tte
;
152 pt_entry_t
*ppte
, ptmp
;
155 pa
= va
- gVirtBase
+ gPhysBase
;
157 if (ARM_TTE_TYPE_TABLE
== (tmplate
& ARM_TTE_TYPE_MASK
)) {
158 /* pick up the existing page table. */
159 ppte
= (pt_entry_t
*)phystokv((tmplate
& ARM_TTE_TABLE_MASK
));
161 /* TTE must be reincarnated COARSE. */
162 ppte
= (pt_entry_t
*)phystokv(avail_start
);
163 avail_start
+= ARM_PGBYTES
;
165 pmap_init_pte_static_page(kernel_pmap
, ppte
, pa
);
167 for (i
= 0; i
< 4; ++i
)
168 tte
[i
] = pa_to_tte(kvtophys((vm_offset_t
)ppte
) + (i
* 0x400)) | ARM_TTE_TYPE_TABLE
;
171 /* Apply the desired protections to the specified page range */
172 for (i
= 0; i
< (ARM_PGBYTES
/ sizeof(*ppte
)); i
++) {
173 if (start
<= va
&& va
< _end
) {
175 ptmp
= pa
| ARM_PTE_AF
| ARM_PTE_SH
| ARM_PTE_TYPE
;
176 ptmp
= ptmp
| ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
);
177 ptmp
= ptmp
| ARM_PTE_AP(pte_prot_APX
);
179 ptmp
= ptmp
| ARM_PTE_NX
;
191 arm_vm_page_granular_prot(vm_offset_t start
, unsigned long size
,
192 int tte_prot_XN
, int pte_prot_APX
, int pte_prot_XN
, int forceCoarse
)
194 vm_offset_t _end
= start
+ size
;
195 vm_offset_t align_start
= (start
+ ARM_TT_L1_PT_OFFMASK
) & ~ARM_TT_L1_PT_OFFMASK
;
196 vm_offset_t align_end
= _end
& ~ARM_TT_L1_PT_OFFMASK
;
198 arm_vm_page_granular_helper(start
, _end
, start
, pte_prot_APX
, pte_prot_XN
);
200 while (align_start
< align_end
) {
202 arm_vm_page_granular_helper(align_start
, align_end
, align_start
+ 1,
203 pte_prot_APX
, pte_prot_XN
);
205 tt_entry_t
*tte
= &cpu_tte
[ttenum(align_start
)];
206 for (int i
= 0; i
< 4; ++i
) {
207 tt_entry_t tmplate
= tte
[i
];
209 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_APMASK
) | ARM_TTE_BLOCK_AP(pte_prot_APX
);
210 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_NX_MASK
);
212 tmplate
= tmplate
| ARM_TTE_BLOCK_NX
;
217 align_start
+= ARM_TT_L1_PT_SIZE
;
220 arm_vm_page_granular_helper(start
, _end
, _end
, pte_prot_APX
, pte_prot_XN
);
224 arm_vm_page_granular_RNX(vm_offset_t start
, unsigned long size
, int forceCoarse
)
226 arm_vm_page_granular_prot(start
, size
, 1, AP_RONA
, 1, forceCoarse
);
230 arm_vm_page_granular_ROX(vm_offset_t start
, unsigned long size
, int forceCoarse
)
232 arm_vm_page_granular_prot(start
, size
, 0, AP_RONA
, 0, forceCoarse
);
236 arm_vm_page_granular_RWNX(vm_offset_t start
, unsigned long size
, int forceCoarse
)
238 arm_vm_page_granular_prot(start
, size
, 1, AP_RWNA
, 1, forceCoarse
);
242 arm_vm_page_granular_RWX(vm_offset_t start
, unsigned long size
, int forceCoarse
)
244 arm_vm_page_granular_prot(start
, size
, 0, AP_RWNA
, 0, forceCoarse
);
248 arm_vm_prot_init(boot_args
* args
)
250 #if __ARM_PTE_PHYSMAP__
251 boolean_t force_coarse_physmap
= TRUE
;
253 boolean_t force_coarse_physmap
= FALSE
;
256 * Enforce W^X protections on segments that have been identified so far. This will be
257 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
261 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
262 * and storing an address into "error_buffer" (see arm_init.c) !?!
263 * These protections are tightened in arm_vm_prot_finalize()
265 arm_vm_page_granular_RWX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
269 * We map __DATA with 3 calls, so that the __const section can have its
270 * protections changed independently of the rest of the __DATA segment.
272 arm_vm_page_granular_RWNX(segDATAB
, sectCONSTB
- segDATAB
, FALSE
);
273 arm_vm_page_granular_RNX(sectCONSTB
, sectSizeCONST
, FALSE
);
274 arm_vm_page_granular_RWNX(sectCONSTB
+ sectSizeCONST
, (segDATAB
+ segSizeDATA
) - (sectCONSTB
+ sectSizeCONST
), FALSE
);
276 /* If we aren't protecting const, just map DATA as a single blob. */
277 arm_vm_page_granular_RWNX(segDATAB
, segSizeDATA
, FALSE
);
280 arm_vm_page_granular_ROX(segKLDB
, segSizeKLD
, force_coarse_physmap
);
281 arm_vm_page_granular_RWNX(segLINKB
, segSizeLINK
, force_coarse_physmap
);
282 arm_vm_page_granular_RWNX(segLASTB
, segSizeLAST
, FALSE
); // __LAST may be empty, but we cannot assume this
283 arm_vm_page_granular_RWNX(segPRELINKTEXTB
, segSizePRELINKTEXT
, TRUE
); // Refined in OSKext::readPrelinkedExtensions
284 arm_vm_page_granular_RWNX(segPRELINKTEXTB
+ segSizePRELINKTEXT
,
285 end_kern
- (segPRELINKTEXTB
+ segSizePRELINKTEXT
), force_coarse_physmap
); // PreLinkInfoDictionary
286 arm_vm_page_granular_RWNX(end_kern
, phystokv(args
->topOfKernelData
) - end_kern
, force_coarse_physmap
); // Device Tree, RAM Disk (if present), bootArgs
287 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
), ARM_PGBYTES
* 8, FALSE
); // boot_tte, cpu_tte
290 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
291 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
292 * write protected in the static mapping of that range.
293 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
294 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
295 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
297 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 8, ARM_PGBYTES
, FALSE
); /* Excess physMem over 1MB */
298 arm_vm_page_granular_RWX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* refined in finalize */
300 /* Map the remainder of xnu owned memory. */
301 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10,
302 static_memory_end
- (phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10), force_coarse_physmap
); /* rest of physmem */
305 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
306 * Recall that start.s handcrafted a page table page for EVB mapping
308 pmap_paddr_t p
= (pmap_paddr_t
)(args
->topOfKernelData
) + (ARM_PGBYTES
* 9);
309 pt_entry_t
*ppte
= (pt_entry_t
*)phystokv(p
);
311 int idx
= (HIGH_EXC_VECTORS
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
;
312 pt_entry_t ptmp
= ppte
[idx
];
314 ptmp
= (ptmp
& ~ARM_PTE_APMASK
) | ARM_PTE_AP(AP_RONA
);
320 arm_vm_prot_finalize(boot_args
* args
)
323 * Naively we could have:
324 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
325 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
326 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
328 arm_vm_page_granular_ROX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
330 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* commpage, EVB */
332 #ifndef __ARM_L1_PTW__
339 arm_vm_init(uint64_t memory_size
, boot_args
* args
)
341 vm_map_address_t va
, off
, off_end
;
342 tt_entry_t
*tte
, *tte_limit
;
343 pmap_paddr_t boot_ttep
;
344 tt_entry_t
*boot_tte
;
345 uint32_t mem_segments
;
346 kernel_section_t
*sectDCONST
;
349 * Get the virtual and physical memory base from boot_args.
351 gVirtBase
= args
->virtBase
;
352 gPhysBase
= args
->physBase
;
353 gPhysSize
= args
->memSize
;
354 mem_size
= args
->memSize
;
355 if ((memory_size
!= 0) && (mem_size
> memory_size
))
356 mem_size
= memory_size
;
357 if (mem_size
> MEM_SIZE_MAX
)
358 mem_size
= MEM_SIZE_MAX
;
359 static_memory_end
= gVirtBase
+ mem_size
;
361 /* Calculate the nubmer of ~256MB segments of memory */
362 mem_segments
= (mem_size
+ 0x0FFFFFFF) >> 28;
365 * Copy the boot mmu tt to create system mmu tt.
366 * System mmu tt start after the boot mmu tt.
367 * Determine translation table base virtual address: - aligned at end
370 boot_ttep
= args
->topOfKernelData
;
371 boot_tte
= (tt_entry_t
*) phystokv(boot_ttep
);
373 cpu_ttep
= boot_ttep
+ ARM_PGBYTES
* 4;
374 cpu_tte
= (tt_entry_t
*) phystokv(cpu_ttep
);
376 bcopy(boot_tte
, cpu_tte
, ARM_PGBYTES
* 4);
379 * Clear out any V==P mappings that may have been established in e.g. start.s
381 tte
= &cpu_tte
[ttenum(gPhysBase
)];
382 tte_limit
= &cpu_tte
[ttenum(gPhysBase
+ gPhysSize
)];
384 /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
385 if (gPhysBase
< gVirtBase
) {
386 if (gPhysBase
+ gPhysSize
> gVirtBase
)
387 tte_limit
= &cpu_tte
[ttenum(gVirtBase
)];
389 if (gPhysBase
< gVirtBase
+ gPhysSize
)
390 tte
= &cpu_tte
[ttenum(gVirtBase
+ gPhysSize
)];
393 while (tte
< tte_limit
) {
394 *tte
= ARM_TTE_TYPE_FAULT
;
398 /* Skip 6 pages (four L1 + two L2 entries) */
399 avail_start
= cpu_ttep
+ ARM_PGBYTES
* 6;
400 avail_end
= gPhysBase
+ mem_size
;
403 * Now retrieve addresses for end, edata, and etext
404 * from MACH-O headers for the currently running 32 bit kernel.
406 segTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__TEXT", &segSizeTEXT
);
407 segDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__DATA", &segSizeDATA
);
408 segLINKB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LINKEDIT", &segSizeLINK
);
409 segKLDB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__KLD", &segSizeKLD
);
410 segLASTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LAST", &segSizeLAST
);
411 segPRELINKTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_TEXT", &segSizePRELINKTEXT
);
412 segPRELINKINFOB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_INFO", &segSizePRELINKINFO
);
414 etext
= (vm_offset_t
) segTEXTB
+ segSizeTEXT
;
415 sdata
= (vm_offset_t
) segDATAB
;
416 edata
= (vm_offset_t
) segDATAB
+ segSizeDATA
;
417 end_kern
= round_page(getlastaddr()); /* Force end to next page */
420 * Special handling for the __DATA,__const *section*.
421 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
422 * so we can safely truncate the size. __DATA,__const is also aligned, but
423 * just in case we will round that to a page, too.
425 segDATA
= getsegbynamefromheader(&_mh_execute_header
, "__DATA");
426 sectDCONST
= getsectbynamefromheader(&_mh_execute_header
, "__DATA", "__const");
427 sectCONSTB
= sectDCONST
->addr
;
428 sectSizeCONST
= sectDCONST
->size
;
431 /* doconstro is true by default, but we allow a boot-arg to disable it */
432 (void) PE_parse_boot_argn("dataconstro", &doconstro
, sizeof(doconstro
));
436 extern vm_offset_t _lastkerneldataconst
;
437 extern vm_size_t _lastkerneldataconst_padsize
;
438 vm_offset_t sdataconst
= sectCONSTB
;
440 /* this should already be aligned, but so that we can protect we round */
441 sectCONSTB
= round_page(sectCONSTB
);
443 /* make sure lastkerneldataconst is really last and the right size */
444 if ((_lastkerneldataconst
== sdataconst
+ sectSizeCONST
- _lastkerneldataconst_padsize
) &&
445 (_lastkerneldataconst_padsize
>= PAGE_SIZE
)) {
446 sectSizeCONST
= trunc_page(sectSizeCONST
);
448 /* otherwise see if next section is aligned then protect up to it */
449 kernel_section_t
*next_sect
= nextsect(segDATA
, sectDCONST
);
451 if (next_sect
&& ((next_sect
->addr
& PAGE_MASK
) == 0)) {
452 sectSizeCONST
= next_sect
->addr
- sectCONSTB
;
454 /* lastly just go ahead and truncate so we try to protect something */
455 sectSizeCONST
= trunc_page(sectSizeCONST
);
460 if ((sectSizeCONST
== 0) || (sectCONSTB
< sdata
) || (sectCONSTB
+ sectSizeCONST
) >= edata
) {
467 #ifndef __ARM_L1_PTW__
470 set_mmu_ttb(cpu_ttep
);
471 set_mmu_ttb_alternate(cpu_ttep
);
473 #if __arm__ && __ARM_USER_PROTECT__
475 unsigned int ttbr0_val
, ttbr1_val
, ttbcr_val
;
476 thread_t thread
= current_thread();
478 __asm__
volatile("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val
));
479 __asm__
volatile("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val
));
480 __asm__
volatile("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val
));
481 thread
->machine
.uptw_ttb
= ttbr0_val
;
482 thread
->machine
.kptw_ttb
= ttbr1_val
;
483 thread
->machine
.uptw_ttc
= ttbcr_val
;
486 vm_prelink_stext
= segPRELINKTEXTB
;
487 vm_prelink_etext
= segPRELINKTEXTB
+ segSizePRELINKTEXT
;
488 vm_prelink_sinfo
= segPRELINKINFOB
;
489 vm_prelink_einfo
= segPRELINKINFOB
+ segSizePRELINKINFO
;
490 vm_slinkedit
= segLINKB
;
491 vm_elinkedit
= segLINKB
+ segSizeLINK
;
493 sane_size
= mem_size
- (avail_start
- gPhysBase
);
495 vm_kernel_slide
= gVirtBase
-0x80000000;
496 vm_kernel_stext
= segTEXTB
;
497 vm_kernel_etext
= segTEXTB
+ segSizeTEXT
;
498 vm_kernel_base
= gVirtBase
;
499 vm_kernel_top
= (vm_offset_t
) &last_kernel_symbol
;
500 vm_kext_base
= segPRELINKTEXTB
;
501 vm_kext_top
= vm_kext_base
+ segSizePRELINKTEXT
;
502 vm_kernel_slid_base
= segTEXTB
;
503 vm_kernel_slid_top
= vm_kext_top
;
505 pmap_bootstrap((gVirtBase
+MEM_SIZE_MAX
+0x3FFFFF) & 0xFFC00000);
507 arm_vm_prot_init(args
);
510 * To avoid recursing while trying to init the vm_page and object * mechanisms,
511 * pre-initialize kernel pmap page table pages to cover this address range:
512 * 2MB + FrameBuffer size + 3MB for each 256MB segment
514 off_end
= (2 + (mem_segments
* 3)) << 20;
515 off_end
+= (unsigned int) round_page(args
->Video
.v_height
* args
->Video
.v_rowBytes
);
517 for (off
= 0, va
= (gVirtBase
+MEM_SIZE_MAX
+0x3FFFFF) & 0xFFC00000; off
< off_end
; off
+= ARM_TT_L1_PT_SIZE
) {
519 pmap_paddr_t ptp_phys
;
521 ptp
= (pt_entry_t
*) phystokv(avail_start
);
522 ptp_phys
= (pmap_paddr_t
)avail_start
;
523 avail_start
+= ARM_PGBYTES
;
524 pmap_init_pte_page(kernel_pmap
, ptp
, va
+ off
, 2, TRUE
);
525 tte
= &cpu_tte
[ttenum(va
+ off
)];
526 *tte
= pa_to_tte((ptp_phys
)) | ARM_TTE_TYPE_TABLE
;;
527 *(tte
+1) = pa_to_tte((ptp_phys
+ 0x400)) | ARM_TTE_TYPE_TABLE
;;
528 *(tte
+2) = pa_to_tte((ptp_phys
+ 0x800)) | ARM_TTE_TYPE_TABLE
;;
529 *(tte
+3) = pa_to_tte((ptp_phys
+ 0xC00)) | ARM_TTE_TYPE_TABLE
;;
532 avail_start
= (avail_start
+ PAGE_MASK
) & ~PAGE_MASK
;
534 first_avail
= avail_start
;
535 patch_low_glo_static_region(args
->topOfKernelData
, avail_start
- args
->topOfKernelData
);