2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_debug.h>
33 #include <mach/vm_types.h>
34 #include <mach/vm_param.h>
35 #include <mach/thread_status.h>
36 #include <kern/misc_protos.h>
37 #include <kern/assert.h>
38 #include <kern/cpu_number.h>
39 #include <kern/thread.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
44 #include <arm/proc_reg.h>
45 #include <arm/caches_internal.h>
46 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49 #include <arm/lowglobals.h>
51 #include <pexpert/arm/boot.h>
52 #include <pexpert/device_tree.h>
54 #include <libkern/kernel_mach_header.h>
57 * Denotes the end of xnu.
59 extern void *last_kernel_symbol
;
64 vm_offset_t vm_kernel_base
;
65 vm_offset_t vm_kernel_top
;
66 vm_offset_t vm_kernel_stext
;
67 vm_offset_t vm_kernel_etext
;
68 vm_offset_t vm_kernel_slide
;
69 vm_offset_t vm_kernel_slid_base
;
70 vm_offset_t vm_kernel_slid_top
;
71 vm_offset_t vm_kext_base
;
72 vm_offset_t vm_kext_top
;
73 vm_offset_t vm_prelink_stext
;
74 vm_offset_t vm_prelink_etext
;
75 vm_offset_t vm_prelink_sinfo
;
76 vm_offset_t vm_prelink_einfo
;
77 vm_offset_t vm_slinkedit
;
78 vm_offset_t vm_elinkedit
;
79 vm_offset_t vm_prelink_sdata
;
80 vm_offset_t vm_prelink_edata
;
82 vm_offset_t vm_kernel_builtinkmod_text
;
83 vm_offset_t vm_kernel_builtinkmod_text_end
;
85 unsigned long gVirtBase
, gPhysBase
, gPhysSize
; /* Used by <mach/arm/vm_param.h> */
87 vm_offset_t mem_size
; /* Size of actual physical memory present
88 * minus any performance buffer and possibly
89 * limited by mem_limit in bytes */
90 uint64_t mem_actual
; /* The "One True" physical memory size
91 * actually, it's the highest physical
93 uint64_t max_mem
; /* kernel/vm managed memory, adjusted by maxmem */
94 uint64_t max_mem_actual
; /* Actual size of physical memory (bytes), adjusted
95 * by the maxmem boot-arg */
96 uint64_t sane_size
; /* Memory size to use for defaults
98 addr64_t vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Highest kernel
99 * virtual address known
100 * to the VM system */
102 vm_offset_t segEXTRADATA
;
103 unsigned long segSizeEXTRADATA
;
104 vm_offset_t segLOWESTTEXT
;
105 vm_offset_t segLOWEST
;
106 static vm_offset_t segTEXTB
;
107 static unsigned long segSizeTEXT
;
108 static vm_offset_t segDATAB
;
109 static unsigned long segSizeDATA
;
110 vm_offset_t segLINKB
;
111 static unsigned long segSizeLINK
;
112 static vm_offset_t segKLDB
;
113 static unsigned long segSizeKLD
;
114 static vm_offset_t segLASTB
;
115 static vm_offset_t segLASTDATACONSTB
;
116 static unsigned long segSizeLASTDATACONST
;
117 static unsigned long segSizeLAST
;
118 static vm_offset_t sectCONSTB
;
119 static unsigned long sectSizeCONST
;
120 vm_offset_t segBOOTDATAB
;
121 unsigned long segSizeBOOTDATA
;
122 extern vm_offset_t intstack_low_guard
;
123 extern vm_offset_t intstack_high_guard
;
124 extern vm_offset_t fiqstack_high_guard
;
126 vm_offset_t segPRELINKTEXTB
;
127 unsigned long segSizePRELINKTEXT
;
128 vm_offset_t segPRELINKINFOB
;
129 unsigned long segSizePRELINKINFO
;
131 vm_offset_t segLOWESTKC
;
132 vm_offset_t segHIGHESTKC
;
133 vm_offset_t segLOWESTROKC
;
134 vm_offset_t segHIGHESTROKC
;
135 vm_offset_t segLOWESTAuxKC
;
136 vm_offset_t segHIGHESTAuxKC
;
137 vm_offset_t segLOWESTROAuxKC
;
138 vm_offset_t segHIGHESTROAuxKC
;
139 vm_offset_t segLOWESTRXAuxKC
;
140 vm_offset_t segHIGHESTRXAuxKC
;
141 vm_offset_t segHIGHESTNLEAuxKC
;
143 static kernel_segment_command_t
*segDATA
;
144 static boolean_t doconstro
= TRUE
;
146 vm_offset_t end_kern
, etext
, sdata
, edata
;
149 * Bootstrap the system enough to run with virtual memory.
150 * Map the kernel's code and data, and allocate the system page table.
151 * Page_size must already be set.
154 * first_avail: first available physical page -
155 * after kernel page tables
156 * avail_start: PA of first physical page
157 * avail_end : PA of last physical page
159 vm_offset_t first_avail
;
160 vm_offset_t static_memory_end
;
161 pmap_paddr_t avail_start
, avail_end
;
163 #define MEM_SIZE_MAX 0x40000000
165 extern vm_offset_t ExceptionVectorsBase
; /* the code we want to load there */
167 /* The translation tables have to be 16KB aligned */
168 #define round_x_table(x) \
169 (((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
172 phystokv(pmap_paddr_t pa
)
174 return pa
- gPhysBase
+ gVirtBase
;
178 arm_vm_page_granular_helper(vm_offset_t start
, vm_offset_t _end
, vm_offset_t va
,
179 int pte_prot_APX
, int pte_prot_XN
)
181 if (va
& ARM_TT_L1_PT_OFFMASK
) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */
182 va
&= (~ARM_TT_L1_PT_OFFMASK
);
183 tt_entry_t
*tte
= &cpu_tte
[ttenum(va
)];
184 tt_entry_t tmplate
= *tte
;
186 pt_entry_t
*ppte
, ptmp
;
189 pa
= va
- gVirtBase
+ gPhysBase
;
191 if (pa
>= avail_end
) {
197 if (ARM_TTE_TYPE_TABLE
== (tmplate
& ARM_TTE_TYPE_MASK
)) {
198 /* pick up the existing page table. */
199 ppte
= (pt_entry_t
*)phystokv((tmplate
& ARM_TTE_TABLE_MASK
));
201 /* TTE must be reincarnated COARSE. */
202 ppte
= (pt_entry_t
*)phystokv(avail_start
);
203 pmap_paddr_t l2table
= avail_start
;
204 avail_start
+= ARM_PGBYTES
;
205 bzero(ppte
, ARM_PGBYTES
);
207 for (i
= 0; i
< 4; ++i
) {
208 tte
[i
] = pa_to_tte(l2table
+ (i
* 0x400)) | ARM_TTE_TYPE_TABLE
;
212 vm_offset_t len
= _end
- va
;
213 if ((pa
+ len
) > avail_end
) {
214 _end
-= (pa
+ len
- avail_end
);
216 assert((start
- gVirtBase
+ gPhysBase
) >= gPhysBase
);
218 /* Apply the desired protections to the specified page range */
219 for (i
= 0; i
< (ARM_PGBYTES
/ sizeof(*ppte
)); i
++) {
220 if (start
<= va
&& va
< _end
) {
221 ptmp
= pa
| ARM_PTE_AF
| ARM_PTE_SH
| ARM_PTE_TYPE
;
222 ptmp
= ptmp
| ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
);
223 ptmp
= ptmp
| ARM_PTE_AP(pte_prot_APX
);
225 ptmp
= ptmp
| ARM_PTE_NX
;
238 arm_vm_page_granular_prot(vm_offset_t start
, unsigned long size
,
239 int tte_prot_XN
, int pte_prot_APX
, int pte_prot_XN
, int force_page_granule
)
241 vm_offset_t _end
= start
+ size
;
242 vm_offset_t align_start
= (start
+ ARM_TT_L1_PT_OFFMASK
) & ~ARM_TT_L1_PT_OFFMASK
;
243 vm_offset_t align_end
= _end
& ~ARM_TT_L1_PT_OFFMASK
;
245 arm_vm_page_granular_helper(start
, _end
, start
, pte_prot_APX
, pte_prot_XN
);
247 while (align_start
< align_end
) {
248 if (force_page_granule
) {
249 arm_vm_page_granular_helper(align_start
, align_end
, align_start
+ 1,
250 pte_prot_APX
, pte_prot_XN
);
252 tt_entry_t
*tte
= &cpu_tte
[ttenum(align_start
)];
253 for (int i
= 0; i
< 4; ++i
) {
254 tt_entry_t tmplate
= tte
[i
];
256 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_APMASK
) | ARM_TTE_BLOCK_AP(pte_prot_APX
);
257 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_NX_MASK
);
259 tmplate
= tmplate
| ARM_TTE_BLOCK_NX
;
265 align_start
+= ARM_TT_L1_PT_SIZE
;
268 arm_vm_page_granular_helper(start
, _end
, _end
, pte_prot_APX
, pte_prot_XN
);
272 arm_vm_page_granular_RNX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
274 arm_vm_page_granular_prot(start
, size
, 1, AP_RONA
, 1, force_page_granule
);
278 arm_vm_page_granular_ROX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
280 arm_vm_page_granular_prot(start
, size
, 0, AP_RONA
, 0, force_page_granule
);
284 arm_vm_page_granular_RWNX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
286 arm_vm_page_granular_prot(start
, size
, 1, AP_RWNA
, 1, force_page_granule
);
290 arm_vm_page_granular_RWX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
292 arm_vm_page_granular_prot(start
, size
, 0, AP_RWNA
, 0, force_page_granule
);
296 arm_vm_prot_init(boot_args
* args
)
298 #if __ARM_PTE_PHYSMAP__
299 boolean_t force_coarse_physmap
= TRUE
;
301 boolean_t force_coarse_physmap
= FALSE
;
304 * Enforce W^X protections on segments that have been identified so far. This will be
305 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
309 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
310 * and storing an address into "error_buffer" (see arm_init.c) !?!
311 * These protections are tightened in arm_vm_prot_finalize()
313 arm_vm_page_granular_RWX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
317 * We map __DATA with 3 calls, so that the __const section can have its
318 * protections changed independently of the rest of the __DATA segment.
320 arm_vm_page_granular_RWNX(segDATAB
, sectCONSTB
- segDATAB
, FALSE
);
321 arm_vm_page_granular_RNX(sectCONSTB
, sectSizeCONST
, FALSE
);
322 arm_vm_page_granular_RWNX(sectCONSTB
+ sectSizeCONST
, (segDATAB
+ segSizeDATA
) - (sectCONSTB
+ sectSizeCONST
), FALSE
);
324 /* If we aren't protecting const, just map DATA as a single blob. */
325 arm_vm_page_granular_RWNX(segDATAB
, segSizeDATA
, FALSE
);
327 arm_vm_page_granular_RWNX(segBOOTDATAB
, segSizeBOOTDATA
, TRUE
);
328 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_low_guard
, PAGE_MAX_SIZE
, TRUE
);
329 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_high_guard
, PAGE_MAX_SIZE
, TRUE
);
330 arm_vm_page_granular_RNX((vm_offset_t
)&fiqstack_high_guard
, PAGE_MAX_SIZE
, TRUE
);
332 arm_vm_page_granular_ROX(segKLDB
, segSizeKLD
, force_coarse_physmap
);
333 arm_vm_page_granular_RWNX(segLINKB
, segSizeLINK
, force_coarse_physmap
);
334 arm_vm_page_granular_RWNX(segLASTB
, segSizeLAST
, FALSE
); // __LAST may be empty, but we cannot assume this
335 if (segLASTDATACONSTB
) {
336 arm_vm_page_granular_RWNX(segLASTDATACONSTB
, segSizeLASTDATACONST
, FALSE
); // __LASTDATA_CONST may be empty, but we cannot assume this
338 arm_vm_page_granular_RWNX(segPRELINKTEXTB
, segSizePRELINKTEXT
, TRUE
); // Refined in OSKext::readPrelinkedExtensions
339 arm_vm_page_granular_RWNX(segPRELINKTEXTB
+ segSizePRELINKTEXT
,
340 end_kern
- (segPRELINKTEXTB
+ segSizePRELINKTEXT
), force_coarse_physmap
); // PreLinkInfoDictionary
341 arm_vm_page_granular_RWNX(end_kern
, phystokv(args
->topOfKernelData
) - end_kern
, force_coarse_physmap
); // Device Tree, RAM Disk (if present), bootArgs, trust caches
342 arm_vm_page_granular_RNX(segEXTRADATA
, segSizeEXTRADATA
, FALSE
); // tighter trust cache protection
343 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
), ARM_PGBYTES
* 8, FALSE
); // boot_tte, cpu_tte
346 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
347 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
348 * write protected in the static mapping of that range.
349 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
350 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
351 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
353 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 8, ARM_PGBYTES
, FALSE
); /* Excess physMem over 1MB */
354 arm_vm_page_granular_RWX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* refined in finalize */
356 /* Map the remainder of xnu owned memory. */
357 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10,
358 static_memory_end
- (phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10), force_coarse_physmap
); /* rest of physmem */
361 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
362 * Recall that start.s handcrafted a page table page for EVB mapping
364 pmap_paddr_t p
= (pmap_paddr_t
)(args
->topOfKernelData
) + (ARM_PGBYTES
* 9);
365 pt_entry_t
*ppte
= (pt_entry_t
*)phystokv(p
);
366 pmap_init_pte_page(kernel_pmap
, ppte
, HIGH_EXC_VECTORS
& ~ARM_TT_L1_PT_OFFMASK
, 2, TRUE
);
368 int idx
= (HIGH_EXC_VECTORS
& ARM_TT_L1_PT_OFFMASK
) >> ARM_TT_L2_SHIFT
;
369 pt_entry_t ptmp
= ppte
[idx
];
371 ptmp
= (ptmp
& ~ARM_PTE_APMASK
) | ARM_PTE_AP(AP_RONA
);
377 arm_vm_prot_finalize(boot_args
* args
)
379 cpu_stack_alloc(&BootCpuData
);
380 ml_static_mfree(segBOOTDATAB
, segSizeBOOTDATA
);
382 * Naively we could have:
383 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
384 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
385 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
387 arm_vm_page_granular_ROX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
389 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* commpage, EVB */
394 /* used in the chosen/memory-map node, populated by iBoot. */
395 typedef struct MemoryMapFileInfo
{
402 arm_vm_init(uint64_t memory_size
, boot_args
* args
)
404 vm_map_address_t va
, off
, off_end
;
405 tt_entry_t
*tte
, *tte_limit
;
406 pmap_paddr_t boot_ttep
;
407 tt_entry_t
*boot_tte
;
408 uint32_t mem_segments
;
409 kernel_section_t
*sectDCONST
;
412 * Get the virtual and physical memory base from boot_args.
414 gVirtBase
= args
->virtBase
;
415 gPhysBase
= args
->physBase
;
416 gPhysSize
= args
->memSize
;
417 mem_size
= args
->memSize
;
418 mem_actual
= args
->memSizeActual
? args
->memSizeActual
: mem_size
;
419 if (mem_size
> MEM_SIZE_MAX
) {
420 mem_size
= MEM_SIZE_MAX
;
422 if ((memory_size
!= 0) && (mem_size
> memory_size
)) {
423 mem_size
= memory_size
;
424 max_mem_actual
= memory_size
;
426 max_mem_actual
= mem_actual
;
429 static_memory_end
= gVirtBase
+ mem_size
;
431 /* Calculate the nubmer of ~256MB segments of memory */
432 mem_segments
= (mem_size
+ 0x0FFFFFFF) >> 28;
435 * Copy the boot mmu tt to create system mmu tt.
436 * System mmu tt start after the boot mmu tt.
437 * Determine translation table base virtual address: - aligned at end
440 boot_ttep
= args
->topOfKernelData
;
441 boot_tte
= (tt_entry_t
*) phystokv(boot_ttep
);
443 cpu_ttep
= boot_ttep
+ ARM_PGBYTES
* 4;
444 cpu_tte
= (tt_entry_t
*) phystokv(cpu_ttep
);
446 bcopy(boot_tte
, cpu_tte
, ARM_PGBYTES
* 4);
449 * Clear out any V==P mappings that may have been established in e.g. start.s
451 tte
= &cpu_tte
[ttenum(gPhysBase
)];
452 tte_limit
= &cpu_tte
[ttenum(gPhysBase
+ gPhysSize
)];
454 /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
455 if (gPhysBase
< gVirtBase
) {
456 if (gPhysBase
+ gPhysSize
> gVirtBase
) {
457 tte_limit
= &cpu_tte
[ttenum(gVirtBase
)];
460 if (gPhysBase
< gVirtBase
+ gPhysSize
) {
461 tte
= &cpu_tte
[ttenum(gVirtBase
+ gPhysSize
)];
465 while (tte
< tte_limit
) {
466 *tte
= ARM_TTE_TYPE_FAULT
;
470 /* Skip 6 pages (four L1 + two L2 entries) */
471 avail_start
= cpu_ttep
+ ARM_PGBYTES
* 6;
472 avail_end
= gPhysBase
+ mem_size
;
475 * Now retrieve addresses for end, edata, and etext
476 * from MACH-O headers for the currently running 32 bit kernel.
478 segTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__TEXT", &segSizeTEXT
);
479 segLOWESTTEXT
= segTEXTB
;
480 segLOWEST
= segLOWESTTEXT
;
481 segDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__DATA", &segSizeDATA
);
482 segLINKB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LINKEDIT", &segSizeLINK
);
483 segKLDB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__KLD", &segSizeKLD
);
484 segLASTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LAST", &segSizeLAST
);
485 segLASTDATACONSTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LASTDATA_CONST", &segSizeLASTDATACONST
);
486 segPRELINKTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_TEXT", &segSizePRELINKTEXT
);
487 segPRELINKINFOB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_INFO", &segSizePRELINKINFO
);
488 segBOOTDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__BOOTDATA", &segSizeBOOTDATA
);
491 segSizeEXTRADATA
= 0;
494 MemoryMapFileInfo
const *trustCacheRange
;
495 unsigned int trustCacheRangeSize
;
498 err
= SecureDTLookupEntry(NULL
, "chosen/memory-map", &memory_map
);
499 assert(err
== kSuccess
);
501 err
= SecureDTGetProperty(memory_map
, "TrustCache", (const void**)&trustCacheRange
, &trustCacheRangeSize
);
502 if (err
== kSuccess
) {
503 assert(trustCacheRangeSize
== sizeof(MemoryMapFileInfo
));
505 segEXTRADATA
= phystokv(trustCacheRange
->paddr
);
506 segSizeEXTRADATA
= trustCacheRange
->length
;
509 etext
= (vm_offset_t
) segTEXTB
+ segSizeTEXT
;
510 sdata
= (vm_offset_t
) segDATAB
;
511 edata
= (vm_offset_t
) segDATAB
+ segSizeDATA
;
512 end_kern
= round_page(getlastaddr()); /* Force end to next page */
515 * Special handling for the __DATA,__const *section*.
516 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
517 * so we can safely truncate the size. __DATA,__const is also aligned, but
518 * just in case we will round that to a page, too.
520 segDATA
= getsegbynamefromheader(&_mh_execute_header
, "__DATA");
521 sectDCONST
= getsectbynamefromheader(&_mh_execute_header
, "__DATA", "__const");
522 sectCONSTB
= sectDCONST
->addr
;
523 sectSizeCONST
= sectDCONST
->size
;
526 extern vm_offset_t _lastkerneldataconst
;
527 extern vm_size_t _lastkerneldataconst_padsize
;
528 vm_offset_t sdataconst
= sectCONSTB
;
530 /* this should already be aligned, but so that we can protect we round */
531 sectCONSTB
= round_page(sectCONSTB
);
533 /* make sure lastkerneldataconst is really last and the right size */
534 if ((_lastkerneldataconst
== sdataconst
+ sectSizeCONST
- _lastkerneldataconst_padsize
) &&
535 (_lastkerneldataconst_padsize
>= PAGE_SIZE
)) {
536 sectSizeCONST
= trunc_page(sectSizeCONST
);
538 /* otherwise see if next section is aligned then protect up to it */
539 kernel_section_t
*next_sect
= nextsect(segDATA
, sectDCONST
);
541 if (next_sect
&& ((next_sect
->addr
& PAGE_MASK
) == 0)) {
542 sectSizeCONST
= next_sect
->addr
- sectCONSTB
;
544 /* lastly just go ahead and truncate so we try to protect something */
545 sectSizeCONST
= trunc_page(sectSizeCONST
);
550 if ((sectSizeCONST
== 0) || (sectCONSTB
< sdata
) || (sectCONSTB
+ sectSizeCONST
) >= edata
) {
557 vm_prelink_stext
= segPRELINKTEXTB
;
558 vm_prelink_etext
= segPRELINKTEXTB
+ segSizePRELINKTEXT
;
559 vm_prelink_sinfo
= segPRELINKINFOB
;
560 vm_prelink_einfo
= segPRELINKINFOB
+ segSizePRELINKINFO
;
561 vm_slinkedit
= segLINKB
;
562 vm_elinkedit
= segLINKB
+ segSizeLINK
;
564 sane_size
= mem_size
- (avail_start
- gPhysBase
);
566 vm_kernel_slide
= gVirtBase
- VM_KERNEL_LINK_ADDRESS
;
567 vm_kernel_stext
= segTEXTB
;
568 vm_kernel_etext
= segTEXTB
+ segSizeTEXT
;
569 vm_kernel_base
= gVirtBase
;
570 vm_kernel_top
= (vm_offset_t
) &last_kernel_symbol
;
571 vm_kext_base
= segPRELINKTEXTB
;
572 vm_kext_top
= vm_kext_base
+ segSizePRELINKTEXT
;
573 vm_kernel_slid_base
= segTEXTB
;
574 vm_kernel_slid_top
= vm_kext_top
;
576 pmap_bootstrap((gVirtBase
+ MEM_SIZE_MAX
+ 0x3FFFFF) & 0xFFC00000);
578 arm_vm_prot_init(args
);
580 vm_page_kernelcache_count
= (unsigned int) (atop_64(end_kern
- segLOWEST
));
583 * To avoid recursing while trying to init the vm_page and object * mechanisms,
584 * pre-initialize kernel pmap page table pages to cover this address range:
585 * 2MB + FrameBuffer size + 3MB for each 256MB segment
587 off_end
= (2 + (mem_segments
* 3)) << 20;
588 off_end
+= (unsigned int) round_page(args
->Video
.v_height
* args
->Video
.v_rowBytes
);
590 for (off
= 0, va
= (gVirtBase
+ MEM_SIZE_MAX
+ 0x3FFFFF) & 0xFFC00000; off
< off_end
; off
+= ARM_TT_L1_PT_SIZE
) {
592 pmap_paddr_t ptp_phys
;
594 ptp
= (pt_entry_t
*) phystokv(avail_start
);
595 ptp_phys
= (pmap_paddr_t
)avail_start
;
596 avail_start
+= ARM_PGBYTES
;
597 bzero(ptp
, ARM_PGBYTES
);
598 pmap_init_pte_page(kernel_pmap
, ptp
, va
+ off
, 2, TRUE
);
599 tte
= &cpu_tte
[ttenum(va
+ off
)];
600 *tte
= pa_to_tte((ptp_phys
)) | ARM_TTE_TYPE_TABLE
;
601 *(tte
+ 1) = pa_to_tte((ptp_phys
+ 0x400)) | ARM_TTE_TYPE_TABLE
;
602 *(tte
+ 2) = pa_to_tte((ptp_phys
+ 0x800)) | ARM_TTE_TYPE_TABLE
;
603 *(tte
+ 3) = pa_to_tte((ptp_phys
+ 0xC00)) | ARM_TTE_TYPE_TABLE
;
606 set_mmu_ttb(cpu_ttep
);
607 set_mmu_ttb_alternate(cpu_ttep
);
609 #if __arm__ && __ARM_USER_PROTECT__
611 unsigned int ttbr0_val
, ttbr1_val
;
612 thread_t thread
= current_thread();
614 __asm__
volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val
));
615 __asm__
volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val
));
616 thread
->machine
.uptw_ttb
= ttbr0_val
;
617 thread
->machine
.kptw_ttb
= ttbr1_val
;
620 avail_start
= (avail_start
+ PAGE_MASK
) & ~PAGE_MASK
;
622 first_avail
= avail_start
;
623 patch_low_glo_static_region(args
->topOfKernelData
, avail_start
- args
->topOfKernelData
);