2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_debug.h>
33 #include <mach/vm_types.h>
34 #include <mach/vm_param.h>
35 #include <mach/thread_status.h>
36 #include <kern/misc_protos.h>
37 #include <kern/assert.h>
38 #include <kern/cpu_number.h>
39 #include <kern/thread.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
44 #include <arm/proc_reg.h>
45 #include <arm/caches_internal.h>
46 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49 #include <arm/lowglobals.h>
51 #include <pexpert/arm/boot.h>
52 #include <pexpert/device_tree.h>
54 #include <libkern/kernel_mach_header.h>
57 * Denotes the end of xnu.
59 extern void *last_kernel_symbol
;
64 vm_offset_t vm_kernel_base
;
65 vm_offset_t vm_kernel_top
;
66 vm_offset_t vm_kernel_stext
;
67 vm_offset_t vm_kernel_etext
;
68 vm_offset_t vm_kernel_slide
;
69 vm_offset_t vm_kernel_slid_base
;
70 vm_offset_t vm_kernel_slid_top
;
71 vm_offset_t vm_kext_base
;
72 vm_offset_t vm_kext_top
;
73 vm_offset_t vm_prelink_stext
;
74 vm_offset_t vm_prelink_etext
;
75 vm_offset_t vm_prelink_sinfo
;
76 vm_offset_t vm_prelink_einfo
;
77 vm_offset_t vm_slinkedit
;
78 vm_offset_t vm_elinkedit
;
79 vm_offset_t vm_prelink_sdata
;
80 vm_offset_t vm_prelink_edata
;
82 vm_offset_t vm_kernel_builtinkmod_text
;
83 vm_offset_t vm_kernel_builtinkmod_text_end
;
85 unsigned long gVirtBase
, gPhysBase
, gPhysSize
; /* Used by <mach/arm/vm_param.h> */
87 vm_offset_t mem_size
; /* Size of actual physical memory present
88 * minus any performance buffer and possibly
89 * limited by mem_limit in bytes */
90 uint64_t mem_actual
; /* The "One True" physical memory size
91 * actually, it's the highest physical
93 uint64_t max_mem
; /* Size of physical memory (bytes), adjusted
95 uint64_t sane_size
; /* Memory size to use for defaults
97 addr64_t vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Highest kernel
98 * virtual address known
101 vm_offset_t segEXTRADATA
;
102 unsigned long segSizeEXTRADATA
;
103 vm_offset_t segLOWESTTEXT
;
104 static vm_offset_t segTEXTB
;
105 static unsigned long segSizeTEXT
;
106 static vm_offset_t segDATAB
;
107 static unsigned long segSizeDATA
;
108 static vm_offset_t segLINKB
;
109 static unsigned long segSizeLINK
;
110 static vm_offset_t segKLDB
;
111 static unsigned long segSizeKLD
;
112 static vm_offset_t segLASTB
;
113 static unsigned long segSizeLAST
;
114 static vm_offset_t sectCONSTB
;
115 static unsigned long sectSizeCONST
;
116 vm_offset_t segBOOTDATAB
;
117 unsigned long segSizeBOOTDATA
;
118 extern vm_offset_t intstack_low_guard
;
119 extern vm_offset_t intstack_high_guard
;
120 extern vm_offset_t fiqstack_high_guard
;
122 vm_offset_t segPRELINKTEXTB
;
123 unsigned long segSizePRELINKTEXT
;
124 vm_offset_t segPRELINKINFOB
;
125 unsigned long segSizePRELINKINFO
;
127 static kernel_segment_command_t
*segDATA
;
128 static boolean_t doconstro
= TRUE
;
130 vm_offset_t end_kern
, etext
, sdata
, edata
;
133 * Bootstrap the system enough to run with virtual memory.
134 * Map the kernel's code and data, and allocate the system page table.
135 * Page_size must already be set.
138 * first_avail: first available physical page -
139 * after kernel page tables
140 * avail_start: PA of first physical page
141 * avail_end : PA of last physical page
143 vm_offset_t first_avail
;
144 vm_offset_t static_memory_end
;
145 pmap_paddr_t avail_start
, avail_end
;
147 #define MEM_SIZE_MAX 0x40000000
149 extern vm_offset_t ExceptionVectorsBase
; /* the code we want to load there */
151 /* The translation tables have to be 16KB aligned */
152 #define round_x_table(x) \
153 (((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
156 phystokv(pmap_paddr_t pa
)
158 return pa
- gPhysBase
+ gVirtBase
;
162 arm_vm_page_granular_helper(vm_offset_t start
, vm_offset_t _end
, vm_offset_t va
,
163 int pte_prot_APX
, int pte_prot_XN
)
165 if (va
& ARM_TT_L1_PT_OFFMASK
) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */
166 va
&= (~ARM_TT_L1_PT_OFFMASK
);
167 tt_entry_t
*tte
= &cpu_tte
[ttenum(va
)];
168 tt_entry_t tmplate
= *tte
;
170 pt_entry_t
*ppte
, ptmp
;
173 pa
= va
- gVirtBase
+ gPhysBase
;
175 if (pa
>= avail_end
) {
181 if (ARM_TTE_TYPE_TABLE
== (tmplate
& ARM_TTE_TYPE_MASK
)) {
182 /* pick up the existing page table. */
183 ppte
= (pt_entry_t
*)phystokv((tmplate
& ARM_TTE_TABLE_MASK
));
185 /* TTE must be reincarnated COARSE. */
186 ppte
= (pt_entry_t
*)phystokv(avail_start
);
187 pmap_paddr_t l2table
= avail_start
;
188 avail_start
+= ARM_PGBYTES
;
189 bzero(ppte
, ARM_PGBYTES
);
191 for (i
= 0; i
< 4; ++i
) {
192 tte
[i
] = pa_to_tte(l2table
+ (i
* 0x400)) | ARM_TTE_TYPE_TABLE
;
196 vm_offset_t len
= _end
- va
;
197 if ((pa
+ len
) > avail_end
) {
198 _end
-= (pa
+ len
- avail_end
);
200 assert((start
- gVirtBase
+ gPhysBase
) >= gPhysBase
);
202 /* Apply the desired protections to the specified page range */
203 for (i
= 0; i
< (ARM_PGBYTES
/ sizeof(*ppte
)); i
++) {
204 if (start
<= va
&& va
< _end
) {
205 ptmp
= pa
| ARM_PTE_AF
| ARM_PTE_SH
| ARM_PTE_TYPE
;
206 ptmp
= ptmp
| ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
);
207 ptmp
= ptmp
| ARM_PTE_AP(pte_prot_APX
);
209 ptmp
= ptmp
| ARM_PTE_NX
;
222 arm_vm_page_granular_prot(vm_offset_t start
, unsigned long size
,
223 int tte_prot_XN
, int pte_prot_APX
, int pte_prot_XN
, int force_page_granule
)
225 vm_offset_t _end
= start
+ size
;
226 vm_offset_t align_start
= (start
+ ARM_TT_L1_PT_OFFMASK
) & ~ARM_TT_L1_PT_OFFMASK
;
227 vm_offset_t align_end
= _end
& ~ARM_TT_L1_PT_OFFMASK
;
229 arm_vm_page_granular_helper(start
, _end
, start
, pte_prot_APX
, pte_prot_XN
);
231 while (align_start
< align_end
) {
232 if (force_page_granule
) {
233 arm_vm_page_granular_helper(align_start
, align_end
, align_start
+ 1,
234 pte_prot_APX
, pte_prot_XN
);
236 tt_entry_t
*tte
= &cpu_tte
[ttenum(align_start
)];
237 for (int i
= 0; i
< 4; ++i
) {
238 tt_entry_t tmplate
= tte
[i
];
240 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_APMASK
) | ARM_TTE_BLOCK_AP(pte_prot_APX
);
241 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_NX_MASK
);
243 tmplate
= tmplate
| ARM_TTE_BLOCK_NX
;
249 align_start
+= ARM_TT_L1_PT_SIZE
;
252 arm_vm_page_granular_helper(start
, _end
, _end
, pte_prot_APX
, pte_prot_XN
);
256 arm_vm_page_granular_RNX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
258 arm_vm_page_granular_prot(start
, size
, 1, AP_RONA
, 1, force_page_granule
);
262 arm_vm_page_granular_ROX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
264 arm_vm_page_granular_prot(start
, size
, 0, AP_RONA
, 0, force_page_granule
);
268 arm_vm_page_granular_RWNX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
270 arm_vm_page_granular_prot(start
, size
, 1, AP_RWNA
, 1, force_page_granule
);
274 arm_vm_page_granular_RWX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
276 arm_vm_page_granular_prot(start
, size
, 0, AP_RWNA
, 0, force_page_granule
);
280 arm_vm_prot_init(boot_args
* args
)
282 #if __ARM_PTE_PHYSMAP__
283 boolean_t force_coarse_physmap
= TRUE
;
285 boolean_t force_coarse_physmap
= FALSE
;
288 * Enforce W^X protections on segments that have been identified so far. This will be
289 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
293 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
294 * and storing an address into "error_buffer" (see arm_init.c) !?!
295 * These protections are tightened in arm_vm_prot_finalize()
297 arm_vm_page_granular_RWX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
301 * We map __DATA with 3 calls, so that the __const section can have its
302 * protections changed independently of the rest of the __DATA segment.
304 arm_vm_page_granular_RWNX(segDATAB
, sectCONSTB
- segDATAB
, FALSE
);
305 arm_vm_page_granular_RNX(sectCONSTB
, sectSizeCONST
, FALSE
);
306 arm_vm_page_granular_RWNX(sectCONSTB
+ sectSizeCONST
, (segDATAB
+ segSizeDATA
) - (sectCONSTB
+ sectSizeCONST
), FALSE
);
308 /* If we aren't protecting const, just map DATA as a single blob. */
309 arm_vm_page_granular_RWNX(segDATAB
, segSizeDATA
, FALSE
);
311 arm_vm_page_granular_RWNX(segBOOTDATAB
, segSizeBOOTDATA
, TRUE
);
312 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_low_guard
, PAGE_MAX_SIZE
, TRUE
);
313 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_high_guard
, PAGE_MAX_SIZE
, TRUE
);
314 arm_vm_page_granular_RNX((vm_offset_t
)&fiqstack_high_guard
, PAGE_MAX_SIZE
, TRUE
);
316 arm_vm_page_granular_ROX(segKLDB
, segSizeKLD
, force_coarse_physmap
);
317 arm_vm_page_granular_RWNX(segLINKB
, segSizeLINK
, force_coarse_physmap
);
318 arm_vm_page_granular_RWNX(segLASTB
, segSizeLAST
, FALSE
); // __LAST may be empty, but we cannot assume this
319 arm_vm_page_granular_RWNX(segPRELINKTEXTB
, segSizePRELINKTEXT
, TRUE
); // Refined in OSKext::readPrelinkedExtensions
320 arm_vm_page_granular_RWNX(segPRELINKTEXTB
+ segSizePRELINKTEXT
,
321 end_kern
- (segPRELINKTEXTB
+ segSizePRELINKTEXT
), force_coarse_physmap
); // PreLinkInfoDictionary
322 arm_vm_page_granular_RWNX(end_kern
, phystokv(args
->topOfKernelData
) - end_kern
, force_coarse_physmap
); // Device Tree, RAM Disk (if present), bootArgs, trust caches
323 arm_vm_page_granular_RNX(segEXTRADATA
, segSizeEXTRADATA
, FALSE
); // tighter trust cache protection
324 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
), ARM_PGBYTES
* 8, FALSE
); // boot_tte, cpu_tte
327 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
328 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
329 * write protected in the static mapping of that range.
330 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
331 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
332 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
334 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 8, ARM_PGBYTES
, FALSE
); /* Excess physMem over 1MB */
335 arm_vm_page_granular_RWX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* refined in finalize */
337 /* Map the remainder of xnu owned memory. */
338 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10,
339 static_memory_end
- (phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10), force_coarse_physmap
); /* rest of physmem */
342 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
343 * Recall that start.s handcrafted a page table page for EVB mapping
345 pmap_paddr_t p
= (pmap_paddr_t
)(args
->topOfKernelData
) + (ARM_PGBYTES
* 9);
346 pt_entry_t
*ppte
= (pt_entry_t
*)phystokv(p
);
347 pmap_init_pte_page(kernel_pmap
, ppte
, HIGH_EXC_VECTORS
& ~ARM_TT_L1_PT_OFFMASK
, 2, TRUE
, FALSE
);
349 int idx
= (HIGH_EXC_VECTORS
& ARM_TT_L1_PT_OFFMASK
) >> ARM_TT_L2_SHIFT
;
350 pt_entry_t ptmp
= ppte
[idx
];
352 ptmp
= (ptmp
& ~ARM_PTE_APMASK
) | ARM_PTE_AP(AP_RONA
);
358 arm_vm_prot_finalize(boot_args
* args
)
360 cpu_stack_alloc(&BootCpuData
);
361 ml_static_mfree(segBOOTDATAB
, segSizeBOOTDATA
);
363 * Naively we could have:
364 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
365 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
366 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
368 arm_vm_page_granular_ROX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
370 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* commpage, EVB */
375 /* used in the chosen/memory-map node, populated by iBoot. */
376 typedef struct MemoryMapFileInfo
{
383 arm_vm_init(uint64_t memory_size
, boot_args
* args
)
385 vm_map_address_t va
, off
, off_end
;
386 tt_entry_t
*tte
, *tte_limit
;
387 pmap_paddr_t boot_ttep
;
388 tt_entry_t
*boot_tte
;
389 uint32_t mem_segments
;
390 kernel_section_t
*sectDCONST
;
393 * Get the virtual and physical memory base from boot_args.
395 gVirtBase
= args
->virtBase
;
396 gPhysBase
= args
->physBase
;
397 gPhysSize
= args
->memSize
;
398 mem_size
= args
->memSize
;
399 if ((memory_size
!= 0) && (mem_size
> memory_size
)) {
400 mem_size
= memory_size
;
402 if (mem_size
> MEM_SIZE_MAX
) {
403 mem_size
= MEM_SIZE_MAX
;
405 static_memory_end
= gVirtBase
+ mem_size
;
407 /* Calculate the nubmer of ~256MB segments of memory */
408 mem_segments
= (mem_size
+ 0x0FFFFFFF) >> 28;
411 * Copy the boot mmu tt to create system mmu tt.
412 * System mmu tt start after the boot mmu tt.
413 * Determine translation table base virtual address: - aligned at end
416 boot_ttep
= args
->topOfKernelData
;
417 boot_tte
= (tt_entry_t
*) phystokv(boot_ttep
);
419 cpu_ttep
= boot_ttep
+ ARM_PGBYTES
* 4;
420 cpu_tte
= (tt_entry_t
*) phystokv(cpu_ttep
);
422 bcopy(boot_tte
, cpu_tte
, ARM_PGBYTES
* 4);
425 * Clear out any V==P mappings that may have been established in e.g. start.s
427 tte
= &cpu_tte
[ttenum(gPhysBase
)];
428 tte_limit
= &cpu_tte
[ttenum(gPhysBase
+ gPhysSize
)];
430 /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
431 if (gPhysBase
< gVirtBase
) {
432 if (gPhysBase
+ gPhysSize
> gVirtBase
) {
433 tte_limit
= &cpu_tte
[ttenum(gVirtBase
)];
436 if (gPhysBase
< gVirtBase
+ gPhysSize
) {
437 tte
= &cpu_tte
[ttenum(gVirtBase
+ gPhysSize
)];
441 while (tte
< tte_limit
) {
442 *tte
= ARM_TTE_TYPE_FAULT
;
446 /* Skip 6 pages (four L1 + two L2 entries) */
447 avail_start
= cpu_ttep
+ ARM_PGBYTES
* 6;
448 avail_end
= gPhysBase
+ mem_size
;
451 * Now retrieve addresses for end, edata, and etext
452 * from MACH-O headers for the currently running 32 bit kernel.
454 segTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__TEXT", &segSizeTEXT
);
455 segLOWESTTEXT
= segTEXTB
;
456 segDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__DATA", &segSizeDATA
);
457 segLINKB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LINKEDIT", &segSizeLINK
);
458 segKLDB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__KLD", &segSizeKLD
);
459 segLASTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LAST", &segSizeLAST
);
460 segPRELINKTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_TEXT", &segSizePRELINKTEXT
);
461 segPRELINKINFOB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_INFO", &segSizePRELINKINFO
);
462 segBOOTDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__BOOTDATA", &segSizeBOOTDATA
);
465 segSizeEXTRADATA
= 0;
468 MemoryMapFileInfo
*trustCacheRange
;
469 unsigned int trustCacheRangeSize
;
472 err
= DTLookupEntry(NULL
, "chosen/memory-map", &memory_map
);
473 assert(err
== kSuccess
);
475 err
= DTGetProperty(memory_map
, "TrustCache", (void**)&trustCacheRange
, &trustCacheRangeSize
);
476 if (err
== kSuccess
) {
477 assert(trustCacheRangeSize
== sizeof(MemoryMapFileInfo
));
479 segEXTRADATA
= phystokv(trustCacheRange
->paddr
);
480 segSizeEXTRADATA
= trustCacheRange
->length
;
483 etext
= (vm_offset_t
) segTEXTB
+ segSizeTEXT
;
484 sdata
= (vm_offset_t
) segDATAB
;
485 edata
= (vm_offset_t
) segDATAB
+ segSizeDATA
;
486 end_kern
= round_page(getlastaddr()); /* Force end to next page */
489 * Special handling for the __DATA,__const *section*.
490 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
491 * so we can safely truncate the size. __DATA,__const is also aligned, but
492 * just in case we will round that to a page, too.
494 segDATA
= getsegbynamefromheader(&_mh_execute_header
, "__DATA");
495 sectDCONST
= getsectbynamefromheader(&_mh_execute_header
, "__DATA", "__const");
496 sectCONSTB
= sectDCONST
->addr
;
497 sectSizeCONST
= sectDCONST
->size
;
500 extern vm_offset_t _lastkerneldataconst
;
501 extern vm_size_t _lastkerneldataconst_padsize
;
502 vm_offset_t sdataconst
= sectCONSTB
;
504 /* this should already be aligned, but so that we can protect we round */
505 sectCONSTB
= round_page(sectCONSTB
);
507 /* make sure lastkerneldataconst is really last and the right size */
508 if ((_lastkerneldataconst
== sdataconst
+ sectSizeCONST
- _lastkerneldataconst_padsize
) &&
509 (_lastkerneldataconst_padsize
>= PAGE_SIZE
)) {
510 sectSizeCONST
= trunc_page(sectSizeCONST
);
512 /* otherwise see if next section is aligned then protect up to it */
513 kernel_section_t
*next_sect
= nextsect(segDATA
, sectDCONST
);
515 if (next_sect
&& ((next_sect
->addr
& PAGE_MASK
) == 0)) {
516 sectSizeCONST
= next_sect
->addr
- sectCONSTB
;
518 /* lastly just go ahead and truncate so we try to protect something */
519 sectSizeCONST
= trunc_page(sectSizeCONST
);
524 if ((sectSizeCONST
== 0) || (sectCONSTB
< sdata
) || (sectCONSTB
+ sectSizeCONST
) >= edata
) {
531 vm_prelink_stext
= segPRELINKTEXTB
;
532 vm_prelink_etext
= segPRELINKTEXTB
+ segSizePRELINKTEXT
;
533 vm_prelink_sinfo
= segPRELINKINFOB
;
534 vm_prelink_einfo
= segPRELINKINFOB
+ segSizePRELINKINFO
;
535 vm_slinkedit
= segLINKB
;
536 vm_elinkedit
= segLINKB
+ segSizeLINK
;
538 sane_size
= mem_size
- (avail_start
- gPhysBase
);
540 vm_kernel_slide
= gVirtBase
- VM_KERNEL_LINK_ADDRESS
;
541 vm_kernel_stext
= segTEXTB
;
542 vm_kernel_etext
= segTEXTB
+ segSizeTEXT
;
543 vm_kernel_base
= gVirtBase
;
544 vm_kernel_top
= (vm_offset_t
) &last_kernel_symbol
;
545 vm_kext_base
= segPRELINKTEXTB
;
546 vm_kext_top
= vm_kext_base
+ segSizePRELINKTEXT
;
547 vm_kernel_slid_base
= segTEXTB
;
548 vm_kernel_slid_top
= vm_kext_top
;
550 pmap_bootstrap((gVirtBase
+ MEM_SIZE_MAX
+ 0x3FFFFF) & 0xFFC00000);
552 arm_vm_prot_init(args
);
555 * To avoid recursing while trying to init the vm_page and object * mechanisms,
556 * pre-initialize kernel pmap page table pages to cover this address range:
557 * 2MB + FrameBuffer size + 3MB for each 256MB segment
559 off_end
= (2 + (mem_segments
* 3)) << 20;
560 off_end
+= (unsigned int) round_page(args
->Video
.v_height
* args
->Video
.v_rowBytes
);
562 for (off
= 0, va
= (gVirtBase
+ MEM_SIZE_MAX
+ 0x3FFFFF) & 0xFFC00000; off
< off_end
; off
+= ARM_TT_L1_PT_SIZE
) {
564 pmap_paddr_t ptp_phys
;
566 ptp
= (pt_entry_t
*) phystokv(avail_start
);
567 ptp_phys
= (pmap_paddr_t
)avail_start
;
568 avail_start
+= ARM_PGBYTES
;
569 pmap_init_pte_page(kernel_pmap
, ptp
, va
+ off
, 2, TRUE
, TRUE
);
570 tte
= &cpu_tte
[ttenum(va
+ off
)];
571 *tte
= pa_to_tte((ptp_phys
)) | ARM_TTE_TYPE_TABLE
;
572 *(tte
+ 1) = pa_to_tte((ptp_phys
+ 0x400)) | ARM_TTE_TYPE_TABLE
;
573 *(tte
+ 2) = pa_to_tte((ptp_phys
+ 0x800)) | ARM_TTE_TYPE_TABLE
;
574 *(tte
+ 3) = pa_to_tte((ptp_phys
+ 0xC00)) | ARM_TTE_TYPE_TABLE
;
577 set_mmu_ttb(cpu_ttep
);
578 set_mmu_ttb_alternate(cpu_ttep
);
580 #if __arm__ && __ARM_USER_PROTECT__
582 unsigned int ttbr0_val
, ttbr1_val
, ttbcr_val
;
583 thread_t thread
= current_thread();
585 __asm__
volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val
));
586 __asm__
volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val
));
587 __asm__
volatile ("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val
));
588 thread
->machine
.uptw_ttb
= ttbr0_val
;
589 thread
->machine
.kptw_ttb
= ttbr1_val
;
590 thread
->machine
.uptw_ttc
= ttbcr_val
;
593 avail_start
= (avail_start
+ PAGE_MASK
) & ~PAGE_MASK
;
595 first_avail
= avail_start
;
596 patch_low_glo_static_region(args
->topOfKernelData
, avail_start
- args
->topOfKernelData
);