2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_debug.h>
33 #include <mach/vm_types.h>
34 #include <mach/vm_param.h>
35 #include <mach/thread_status.h>
36 #include <kern/misc_protos.h>
37 #include <kern/assert.h>
38 #include <kern/cpu_number.h>
39 #include <kern/thread.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
44 #include <arm/proc_reg.h>
45 #include <arm/caches_internal.h>
46 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49 #include <arm/lowglobals.h>
51 #include <pexpert/arm/boot.h>
52 #include <pexpert/device_tree.h>
54 #include <libkern/kernel_mach_header.h>
57 * Denotes the end of xnu.
59 extern void *last_kernel_symbol
;
64 vm_offset_t vm_kernel_base
;
65 vm_offset_t vm_kernel_top
;
66 vm_offset_t vm_kernel_stext
;
67 vm_offset_t vm_kernel_etext
;
68 vm_offset_t vm_kernel_slide
;
69 vm_offset_t vm_kernel_slid_base
;
70 vm_offset_t vm_kernel_slid_top
;
71 vm_offset_t vm_kext_base
;
72 vm_offset_t vm_kext_top
;
73 vm_offset_t vm_prelink_stext
;
74 vm_offset_t vm_prelink_etext
;
75 vm_offset_t vm_prelink_sinfo
;
76 vm_offset_t vm_prelink_einfo
;
77 vm_offset_t vm_slinkedit
;
78 vm_offset_t vm_elinkedit
;
79 vm_offset_t vm_prelink_sdata
;
80 vm_offset_t vm_prelink_edata
;
82 vm_offset_t vm_kernel_builtinkmod_text
;
83 vm_offset_t vm_kernel_builtinkmod_text_end
;
85 unsigned long gVirtBase
, gPhysBase
, gPhysSize
; /* Used by <mach/arm/vm_param.h> */
87 vm_offset_t mem_size
; /* Size of actual physical memory present
88 * minus any performance buffer and possibly
89 * limited by mem_limit in bytes */
90 uint64_t mem_actual
; /* The "One True" physical memory size
91 * actually, it's the highest physical
93 uint64_t max_mem
; /* kernel/vm managed memory, adjusted by maxmem */
94 uint64_t max_mem_actual
; /* Actual size of physical memory (bytes), adjusted
95 * by the maxmem boot-arg */
96 uint64_t sane_size
; /* Memory size to use for defaults
98 addr64_t vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Highest kernel
99 * virtual address known
100 * to the VM system */
102 vm_offset_t segEXTRADATA
;
103 unsigned long segSizeEXTRADATA
;
104 vm_offset_t segLOWESTTEXT
;
105 vm_offset_t segLOWEST
;
106 static vm_offset_t segTEXTB
;
107 static unsigned long segSizeTEXT
;
108 static vm_offset_t segDATAB
;
109 static unsigned long segSizeDATA
;
110 vm_offset_t segLINKB
;
111 static unsigned long segSizeLINK
;
112 static vm_offset_t segKLDB
;
113 static unsigned long segSizeKLD
;
114 static vm_offset_t segKLDDATAB
;
115 static unsigned long segSizeKLDDATA
;
116 static vm_offset_t segLASTB
;
117 static vm_offset_t segLASTDATACONSTB
;
118 static unsigned long segSizeLASTDATACONST
;
119 static unsigned long segSizeLAST
;
120 static vm_offset_t sectCONSTB
;
121 static unsigned long sectSizeCONST
;
122 vm_offset_t segBOOTDATAB
;
123 unsigned long segSizeBOOTDATA
;
124 extern vm_offset_t intstack_low_guard
;
125 extern vm_offset_t intstack_high_guard
;
126 extern vm_offset_t fiqstack_high_guard
;
128 vm_offset_t segPRELINKTEXTB
;
129 unsigned long segSizePRELINKTEXT
;
130 vm_offset_t segPRELINKINFOB
;
131 unsigned long segSizePRELINKINFO
;
133 vm_offset_t segLOWESTKC
;
134 vm_offset_t segHIGHESTKC
;
135 vm_offset_t segLOWESTROKC
;
136 vm_offset_t segHIGHESTROKC
;
137 vm_offset_t segLOWESTAuxKC
;
138 vm_offset_t segHIGHESTAuxKC
;
139 vm_offset_t segLOWESTROAuxKC
;
140 vm_offset_t segHIGHESTROAuxKC
;
141 vm_offset_t segLOWESTRXAuxKC
;
142 vm_offset_t segHIGHESTRXAuxKC
;
143 vm_offset_t segHIGHESTNLEAuxKC
;
145 static kernel_segment_command_t
*segDATA
;
146 static boolean_t doconstro
= TRUE
;
148 vm_offset_t end_kern
, etext
, sdata
, edata
;
151 * Bootstrap the system enough to run with virtual memory.
152 * Map the kernel's code and data, and allocate the system page table.
153 * Page_size must already be set.
156 * first_avail: first available physical page -
157 * after kernel page tables
158 * avail_start: PA of first physical page
159 * avail_end : PA of last physical page
161 vm_offset_t first_avail
;
162 vm_offset_t static_memory_end
;
163 pmap_paddr_t avail_start
, avail_end
;
165 #define MEM_SIZE_MAX 0x40000000
167 extern vm_offset_t ExceptionVectorsBase
; /* the code we want to load there */
169 /* The translation tables have to be 16KB aligned */
170 #define round_x_table(x) \
171 (((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
174 phystokv(pmap_paddr_t pa
)
176 return pa
- gPhysBase
+ gVirtBase
;
180 arm_vm_page_granular_helper(vm_offset_t start
, vm_offset_t _end
, vm_offset_t va
,
181 int pte_prot_APX
, int pte_prot_XN
)
183 if (va
& ARM_TT_L1_PT_OFFMASK
) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */
184 va
&= (~ARM_TT_L1_PT_OFFMASK
);
185 tt_entry_t
*tte
= &cpu_tte
[ttenum(va
)];
186 tt_entry_t tmplate
= *tte
;
188 pt_entry_t
*ppte
, ptmp
;
191 pa
= va
- gVirtBase
+ gPhysBase
;
193 if (pa
>= avail_end
) {
199 if (ARM_TTE_TYPE_TABLE
== (tmplate
& ARM_TTE_TYPE_MASK
)) {
200 /* pick up the existing page table. */
201 ppte
= (pt_entry_t
*)phystokv((tmplate
& ARM_TTE_TABLE_MASK
));
203 /* TTE must be reincarnated COARSE. */
204 ppte
= (pt_entry_t
*)phystokv(avail_start
);
205 pmap_paddr_t l2table
= avail_start
;
206 avail_start
+= ARM_PGBYTES
;
207 bzero(ppte
, ARM_PGBYTES
);
209 for (i
= 0; i
< 4; ++i
) {
210 tte
[i
] = pa_to_tte(l2table
+ (i
* 0x400)) | ARM_TTE_TYPE_TABLE
;
214 vm_offset_t len
= _end
- va
;
215 if ((pa
+ len
) > avail_end
) {
216 _end
-= (pa
+ len
- avail_end
);
218 assert((start
- gVirtBase
+ gPhysBase
) >= gPhysBase
);
220 /* Apply the desired protections to the specified page range */
221 for (i
= 0; i
< (ARM_PGBYTES
/ sizeof(*ppte
)); i
++) {
222 if (start
<= va
&& va
< _end
) {
223 ptmp
= pa
| ARM_PTE_AF
| ARM_PTE_SH
| ARM_PTE_TYPE
;
224 ptmp
= ptmp
| ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
);
225 ptmp
= ptmp
| ARM_PTE_AP(pte_prot_APX
);
227 ptmp
= ptmp
| ARM_PTE_NX
;
240 arm_vm_page_granular_prot(vm_offset_t start
, unsigned long size
,
241 int tte_prot_XN
, int pte_prot_APX
, int pte_prot_XN
, int force_page_granule
)
243 vm_offset_t _end
= start
+ size
;
244 vm_offset_t align_start
= (start
+ ARM_TT_L1_PT_OFFMASK
) & ~ARM_TT_L1_PT_OFFMASK
;
245 vm_offset_t align_end
= _end
& ~ARM_TT_L1_PT_OFFMASK
;
247 arm_vm_page_granular_helper(start
, _end
, start
, pte_prot_APX
, pte_prot_XN
);
249 while (align_start
< align_end
) {
250 if (force_page_granule
) {
251 arm_vm_page_granular_helper(align_start
, align_end
, align_start
+ 1,
252 pte_prot_APX
, pte_prot_XN
);
254 tt_entry_t
*tte
= &cpu_tte
[ttenum(align_start
)];
255 for (int i
= 0; i
< 4; ++i
) {
256 tt_entry_t tmplate
= tte
[i
];
258 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_APMASK
) | ARM_TTE_BLOCK_AP(pte_prot_APX
);
259 tmplate
= (tmplate
& ~ARM_TTE_BLOCK_NX_MASK
);
261 tmplate
= tmplate
| ARM_TTE_BLOCK_NX
;
267 align_start
+= ARM_TT_L1_PT_SIZE
;
270 arm_vm_page_granular_helper(start
, _end
, _end
, pte_prot_APX
, pte_prot_XN
);
274 arm_vm_page_granular_RNX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
276 arm_vm_page_granular_prot(start
, size
, 1, AP_RONA
, 1, force_page_granule
);
280 arm_vm_page_granular_ROX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
282 arm_vm_page_granular_prot(start
, size
, 0, AP_RONA
, 0, force_page_granule
);
286 arm_vm_page_granular_RWNX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
288 arm_vm_page_granular_prot(start
, size
, 1, AP_RWNA
, 1, force_page_granule
);
292 arm_vm_page_granular_RWX(vm_offset_t start
, unsigned long size
, int force_page_granule
)
294 arm_vm_page_granular_prot(start
, size
, 0, AP_RWNA
, 0, force_page_granule
);
298 arm_vm_prot_init(boot_args
* args
)
300 #if __ARM_PTE_PHYSMAP__
301 boolean_t force_coarse_physmap
= TRUE
;
303 boolean_t force_coarse_physmap
= FALSE
;
306 * Enforce W^X protections on segments that have been identified so far. This will be
307 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
311 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
312 * and storing an address into "error_buffer" (see arm_init.c) !?!
313 * These protections are tightened in arm_vm_prot_finalize()
315 arm_vm_page_granular_RWX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
319 * We map __DATA with 3 calls, so that the __const section can have its
320 * protections changed independently of the rest of the __DATA segment.
322 arm_vm_page_granular_RWNX(segDATAB
, sectCONSTB
- segDATAB
, FALSE
);
323 arm_vm_page_granular_RNX(sectCONSTB
, sectSizeCONST
, FALSE
);
324 arm_vm_page_granular_RWNX(sectCONSTB
+ sectSizeCONST
, (segDATAB
+ segSizeDATA
) - (sectCONSTB
+ sectSizeCONST
), FALSE
);
326 /* If we aren't protecting const, just map DATA as a single blob. */
327 arm_vm_page_granular_RWNX(segDATAB
, segSizeDATA
, FALSE
);
329 arm_vm_page_granular_RWNX(segBOOTDATAB
, segSizeBOOTDATA
, TRUE
);
330 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_low_guard
, PAGE_MAX_SIZE
, TRUE
);
331 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_high_guard
, PAGE_MAX_SIZE
, TRUE
);
332 arm_vm_page_granular_RNX((vm_offset_t
)&fiqstack_high_guard
, PAGE_MAX_SIZE
, TRUE
);
334 arm_vm_page_granular_ROX(segKLDB
, segSizeKLD
, force_coarse_physmap
);
335 arm_vm_page_granular_RNX(segKLDDATAB
, segSizeKLDDATA
, force_coarse_physmap
);
336 arm_vm_page_granular_RWNX(segLINKB
, segSizeLINK
, force_coarse_physmap
);
337 arm_vm_page_granular_RWNX(segLASTB
, segSizeLAST
, FALSE
); // __LAST may be empty, but we cannot assume this
338 if (segLASTDATACONSTB
) {
339 arm_vm_page_granular_RWNX(segLASTDATACONSTB
, segSizeLASTDATACONST
, FALSE
); // __LASTDATA_CONST may be empty, but we cannot assume this
341 arm_vm_page_granular_RWNX(segPRELINKTEXTB
, segSizePRELINKTEXT
, TRUE
); // Refined in OSKext::readPrelinkedExtensions
342 arm_vm_page_granular_RWNX(segPRELINKTEXTB
+ segSizePRELINKTEXT
,
343 end_kern
- (segPRELINKTEXTB
+ segSizePRELINKTEXT
), force_coarse_physmap
); // PreLinkInfoDictionary
344 arm_vm_page_granular_RWNX(end_kern
, phystokv(args
->topOfKernelData
) - end_kern
, force_coarse_physmap
); // Device Tree, RAM Disk (if present), bootArgs, trust caches
345 arm_vm_page_granular_RNX(segEXTRADATA
, segSizeEXTRADATA
, FALSE
); // tighter trust cache protection
346 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
), ARM_PGBYTES
* 8, FALSE
); // boot_tte, cpu_tte
349 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
350 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
351 * write protected in the static mapping of that range.
352 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
353 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
354 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
356 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 8, ARM_PGBYTES
, FALSE
); /* Excess physMem over 1MB */
357 arm_vm_page_granular_RWX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* refined in finalize */
359 /* Map the remainder of xnu owned memory. */
360 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10,
361 static_memory_end
- (phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 10), force_coarse_physmap
); /* rest of physmem */
364 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
365 * Recall that start.s handcrafted a page table page for EVB mapping
367 pmap_paddr_t p
= (pmap_paddr_t
)(args
->topOfKernelData
) + (ARM_PGBYTES
* 9);
368 pt_entry_t
*ppte
= (pt_entry_t
*)phystokv(p
);
369 pmap_init_pte_page(kernel_pmap
, ppte
, HIGH_EXC_VECTORS
& ~ARM_TT_L1_PT_OFFMASK
, 2, TRUE
);
371 int idx
= (HIGH_EXC_VECTORS
& ARM_TT_L1_PT_OFFMASK
) >> ARM_TT_L2_SHIFT
;
372 pt_entry_t ptmp
= ppte
[idx
];
374 ptmp
= (ptmp
& ~ARM_PTE_APMASK
) | ARM_PTE_AP(AP_RONA
);
380 arm_vm_prot_finalize(boot_args
* args
)
382 cpu_stack_alloc(&BootCpuData
);
383 ml_static_mfree(segBOOTDATAB
, segSizeBOOTDATA
);
385 * Naively we could have:
386 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
387 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
388 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
390 arm_vm_page_granular_ROX(gVirtBase
, segSizeTEXT
+ (segTEXTB
- gVirtBase
), FALSE
);
392 arm_vm_page_granular_RWNX(phystokv(args
->topOfKernelData
) + ARM_PGBYTES
* 9, ARM_PGBYTES
, FALSE
); /* commpage, EVB */
397 /* used in the chosen/memory-map node, populated by iBoot. */
398 typedef struct MemoryMapFileInfo
{
405 arm_vm_init(uint64_t memory_size
, boot_args
* args
)
407 vm_map_address_t va
, off
, off_end
;
408 tt_entry_t
*tte
, *tte_limit
;
409 pmap_paddr_t boot_ttep
;
410 tt_entry_t
*boot_tte
;
411 uint32_t mem_segments
;
412 kernel_section_t
*sectDCONST
;
415 * Get the virtual and physical memory base from boot_args.
417 gVirtBase
= args
->virtBase
;
418 gPhysBase
= args
->physBase
;
419 gPhysSize
= args
->memSize
;
420 mem_size
= args
->memSize
;
421 mem_actual
= args
->memSizeActual
? args
->memSizeActual
: mem_size
;
422 if (mem_size
> MEM_SIZE_MAX
) {
423 mem_size
= MEM_SIZE_MAX
;
425 if ((memory_size
!= 0) && (mem_size
> memory_size
)) {
426 mem_size
= memory_size
;
427 max_mem_actual
= memory_size
;
429 max_mem_actual
= mem_actual
;
432 static_memory_end
= gVirtBase
+ mem_size
;
434 /* Calculate the nubmer of ~256MB segments of memory */
435 mem_segments
= (mem_size
+ 0x0FFFFFFF) >> 28;
438 * Copy the boot mmu tt to create system mmu tt.
439 * System mmu tt start after the boot mmu tt.
440 * Determine translation table base virtual address: - aligned at end
443 boot_ttep
= args
->topOfKernelData
;
444 boot_tte
= (tt_entry_t
*) phystokv(boot_ttep
);
446 cpu_ttep
= boot_ttep
+ ARM_PGBYTES
* 4;
447 cpu_tte
= (tt_entry_t
*) phystokv(cpu_ttep
);
449 bcopy(boot_tte
, cpu_tte
, ARM_PGBYTES
* 4);
452 * Clear out any V==P mappings that may have been established in e.g. start.s
454 tte
= &cpu_tte
[ttenum(gPhysBase
)];
455 tte_limit
= &cpu_tte
[ttenum(gPhysBase
+ gPhysSize
)];
457 /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
458 if (gPhysBase
< gVirtBase
) {
459 if (gPhysBase
+ gPhysSize
> gVirtBase
) {
460 tte_limit
= &cpu_tte
[ttenum(gVirtBase
)];
463 if (gPhysBase
< gVirtBase
+ gPhysSize
) {
464 tte
= &cpu_tte
[ttenum(gVirtBase
+ gPhysSize
)];
468 while (tte
< tte_limit
) {
469 *tte
= ARM_TTE_TYPE_FAULT
;
473 /* Skip 6 pages (four L1 + two L2 entries) */
474 avail_start
= cpu_ttep
+ ARM_PGBYTES
* 6;
475 avail_end
= gPhysBase
+ mem_size
;
478 * Now retrieve addresses for end, edata, and etext
479 * from MACH-O headers for the currently running 32 bit kernel.
481 segTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__TEXT", &segSizeTEXT
);
482 segLOWESTTEXT
= segTEXTB
;
483 segLOWEST
= segLOWESTTEXT
;
484 segDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__DATA", &segSizeDATA
);
485 segLINKB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LINKEDIT", &segSizeLINK
);
486 segKLDB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__KLD", &segSizeKLD
);
487 segKLDDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__KLDDATA", &segSizeKLDDATA
);
488 segLASTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LAST", &segSizeLAST
);
489 segLASTDATACONSTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LASTDATA_CONST", &segSizeLASTDATACONST
);
490 segPRELINKTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_TEXT", &segSizePRELINKTEXT
);
491 segPRELINKINFOB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_INFO", &segSizePRELINKINFO
);
492 segBOOTDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__BOOTDATA", &segSizeBOOTDATA
);
495 segSizeEXTRADATA
= 0;
498 MemoryMapFileInfo
const *trustCacheRange
;
499 unsigned int trustCacheRangeSize
;
502 err
= SecureDTLookupEntry(NULL
, "chosen/memory-map", &memory_map
);
503 assert(err
== kSuccess
);
505 err
= SecureDTGetProperty(memory_map
, "TrustCache", (const void**)&trustCacheRange
, &trustCacheRangeSize
);
506 if (err
== kSuccess
) {
507 assert(trustCacheRangeSize
== sizeof(MemoryMapFileInfo
));
509 segEXTRADATA
= phystokv(trustCacheRange
->paddr
);
510 segSizeEXTRADATA
= trustCacheRange
->length
;
513 etext
= (vm_offset_t
) segTEXTB
+ segSizeTEXT
;
514 sdata
= (vm_offset_t
) segDATAB
;
515 edata
= (vm_offset_t
) segDATAB
+ segSizeDATA
;
516 end_kern
= round_page(getlastaddr()); /* Force end to next page */
519 * Special handling for the __DATA,__const *section*.
520 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
521 * so we can safely truncate the size. __DATA,__const is also aligned, but
522 * just in case we will round that to a page, too.
524 segDATA
= getsegbynamefromheader(&_mh_execute_header
, "__DATA");
525 sectDCONST
= getsectbynamefromheader(&_mh_execute_header
, "__DATA", "__const");
526 sectCONSTB
= sectDCONST
->addr
;
527 sectSizeCONST
= sectDCONST
->size
;
530 extern vm_offset_t _lastkerneldataconst
;
531 extern vm_size_t _lastkerneldataconst_padsize
;
532 vm_offset_t sdataconst
= sectCONSTB
;
534 /* this should already be aligned, but so that we can protect we round */
535 sectCONSTB
= round_page(sectCONSTB
);
537 /* make sure lastkerneldataconst is really last and the right size */
538 if ((_lastkerneldataconst
== sdataconst
+ sectSizeCONST
- _lastkerneldataconst_padsize
) &&
539 (_lastkerneldataconst_padsize
>= PAGE_SIZE
)) {
540 sectSizeCONST
= trunc_page(sectSizeCONST
);
542 /* otherwise see if next section is aligned then protect up to it */
543 kernel_section_t
*next_sect
= nextsect(segDATA
, sectDCONST
);
545 if (next_sect
&& ((next_sect
->addr
& PAGE_MASK
) == 0)) {
546 sectSizeCONST
= next_sect
->addr
- sectCONSTB
;
548 /* lastly just go ahead and truncate so we try to protect something */
549 sectSizeCONST
= trunc_page(sectSizeCONST
);
554 if ((sectSizeCONST
== 0) || (sectCONSTB
< sdata
) || (sectCONSTB
+ sectSizeCONST
) >= edata
) {
561 vm_prelink_stext
= segPRELINKTEXTB
;
562 vm_prelink_etext
= segPRELINKTEXTB
+ segSizePRELINKTEXT
;
563 vm_prelink_sinfo
= segPRELINKINFOB
;
564 vm_prelink_einfo
= segPRELINKINFOB
+ segSizePRELINKINFO
;
565 vm_slinkedit
= segLINKB
;
566 vm_elinkedit
= segLINKB
+ segSizeLINK
;
568 sane_size
= mem_size
- (avail_start
- gPhysBase
);
570 vm_kernel_slide
= gVirtBase
- VM_KERNEL_LINK_ADDRESS
;
571 vm_kernel_stext
= segTEXTB
;
572 vm_kernel_etext
= segTEXTB
+ segSizeTEXT
;
573 vm_kernel_base
= gVirtBase
;
574 vm_kernel_top
= (vm_offset_t
) &last_kernel_symbol
;
575 vm_kext_base
= segPRELINKTEXTB
;
576 vm_kext_top
= vm_kext_base
+ segSizePRELINKTEXT
;
577 vm_kernel_slid_base
= segTEXTB
;
578 vm_kernel_slid_top
= vm_kext_top
;
580 pmap_bootstrap((gVirtBase
+ MEM_SIZE_MAX
+ 0x3FFFFF) & 0xFFC00000);
582 arm_vm_prot_init(args
);
584 vm_page_kernelcache_count
= (unsigned int) (atop_64(end_kern
- segLOWEST
));
587 * To avoid recursing while trying to init the vm_page and object * mechanisms,
588 * pre-initialize kernel pmap page table pages to cover this address range:
589 * 2MB + FrameBuffer size + 3MB for each 256MB segment
591 off_end
= (2 + (mem_segments
* 3)) << 20;
592 off_end
+= (unsigned int) round_page(args
->Video
.v_height
* args
->Video
.v_rowBytes
);
594 for (off
= 0, va
= (gVirtBase
+ MEM_SIZE_MAX
+ 0x3FFFFF) & 0xFFC00000; off
< off_end
; off
+= ARM_TT_L1_PT_SIZE
) {
596 pmap_paddr_t ptp_phys
;
598 ptp
= (pt_entry_t
*) phystokv(avail_start
);
599 ptp_phys
= (pmap_paddr_t
)avail_start
;
600 avail_start
+= ARM_PGBYTES
;
601 bzero(ptp
, ARM_PGBYTES
);
602 pmap_init_pte_page(kernel_pmap
, ptp
, va
+ off
, 2, TRUE
);
603 tte
= &cpu_tte
[ttenum(va
+ off
)];
604 *tte
= pa_to_tte((ptp_phys
)) | ARM_TTE_TYPE_TABLE
;
605 *(tte
+ 1) = pa_to_tte((ptp_phys
+ 0x400)) | ARM_TTE_TYPE_TABLE
;
606 *(tte
+ 2) = pa_to_tte((ptp_phys
+ 0x800)) | ARM_TTE_TYPE_TABLE
;
607 *(tte
+ 3) = pa_to_tte((ptp_phys
+ 0xC00)) | ARM_TTE_TYPE_TABLE
;
610 set_mmu_ttb(cpu_ttep
);
611 set_mmu_ttb_alternate(cpu_ttep
);
613 #if __arm__ && __ARM_USER_PROTECT__
615 unsigned int ttbr0_val
, ttbr1_val
;
616 thread_t thread
= current_thread();
618 __asm__
volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val
));
619 __asm__
volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val
));
620 thread
->machine
.uptw_ttb
= ttbr0_val
;
621 thread
->machine
.kptw_ttb
= ttbr1_val
;
624 avail_start
= (avail_start
+ PAGE_MASK
) & ~PAGE_MASK
;
626 first_avail
= avail_start
;
627 patch_low_glo_static_region(args
->topOfKernelData
, avail_start
- args
->topOfKernelData
);