]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/arm_vm_init.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / arm_vm_init.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29#include <mach_debug.h>
30#include <mach_kdp.h>
31#include <debug.h>
32
33#include <mach/vm_types.h>
34#include <mach/vm_param.h>
35#include <mach/thread_status.h>
36#include <kern/misc_protos.h>
37#include <kern/assert.h>
38#include <kern/cpu_number.h>
39#include <kern/thread.h>
40#include <vm/vm_map.h>
41#include <vm/vm_page.h>
42#include <vm/pmap.h>
43
44#include <arm/proc_reg.h>
45#include <arm/caches_internal.h>
d9a64523 46#include <arm/cpu_data_internal.h>
5ba3f43e
A
47#include <arm/pmap.h>
48#include <arm/misc_protos.h>
49#include <arm/lowglobals.h>
50
51#include <pexpert/arm/boot.h>
d9a64523 52#include <pexpert/device_tree.h>
5ba3f43e
A
53
54#include <libkern/kernel_mach_header.h>
55
56/*
57 * Denotes the end of xnu.
58 */
59extern void *last_kernel_symbol;
60
61/*
62 * KASLR parameters
63 */
64vm_offset_t vm_kernel_base;
65vm_offset_t vm_kernel_top;
66vm_offset_t vm_kernel_stext;
67vm_offset_t vm_kernel_etext;
68vm_offset_t vm_kernel_slide;
69vm_offset_t vm_kernel_slid_base;
70vm_offset_t vm_kernel_slid_top;
71vm_offset_t vm_kext_base;
72vm_offset_t vm_kext_top;
73vm_offset_t vm_prelink_stext;
74vm_offset_t vm_prelink_etext;
75vm_offset_t vm_prelink_sinfo;
76vm_offset_t vm_prelink_einfo;
77vm_offset_t vm_slinkedit;
78vm_offset_t vm_elinkedit;
79vm_offset_t vm_prelink_sdata;
80vm_offset_t vm_prelink_edata;
81
d9a64523
A
82vm_offset_t vm_kernel_builtinkmod_text;
83vm_offset_t vm_kernel_builtinkmod_text_end;
84
0a7de745 85unsigned long gVirtBase, gPhysBase, gPhysSize; /* Used by <mach/arm/vm_param.h> */
5ba3f43e
A
86
87vm_offset_t mem_size; /* Size of actual physical memory present
88 * minus any performance buffer and possibly
89 * limited by mem_limit in bytes */
90uint64_t mem_actual; /* The "One True" physical memory size
91 * actually, it's the highest physical
92 * address + 1 */
f427ee49
A
93uint64_t max_mem; /* kernel/vm managed memory, adjusted by maxmem */
94uint64_t max_mem_actual; /* Actual size of physical memory (bytes), adjusted
95 * by the maxmem boot-arg */
5ba3f43e
A
96uint64_t sane_size; /* Memory size to use for defaults
97 * calculations */
98addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel
99 * virtual address known
100 * to the VM system */
101
d9a64523
A
102vm_offset_t segEXTRADATA;
103unsigned long segSizeEXTRADATA;
104vm_offset_t segLOWESTTEXT;
f427ee49 105vm_offset_t segLOWEST;
5ba3f43e
A
106static vm_offset_t segTEXTB;
107static unsigned long segSizeTEXT;
108static vm_offset_t segDATAB;
109static unsigned long segSizeDATA;
f427ee49 110vm_offset_t segLINKB;
5ba3f43e
A
111static unsigned long segSizeLINK;
112static vm_offset_t segKLDB;
113static unsigned long segSizeKLD;
c3c9b80d
A
114static vm_offset_t segKLDDATAB;
115static unsigned long segSizeKLDDATA;
5ba3f43e 116static vm_offset_t segLASTB;
f427ee49
A
117static vm_offset_t segLASTDATACONSTB;
118static unsigned long segSizeLASTDATACONST;
5ba3f43e
A
119static unsigned long segSizeLAST;
120static vm_offset_t sectCONSTB;
121static unsigned long sectSizeCONST;
d9a64523
A
122vm_offset_t segBOOTDATAB;
123unsigned long segSizeBOOTDATA;
124extern vm_offset_t intstack_low_guard;
125extern vm_offset_t intstack_high_guard;
126extern vm_offset_t fiqstack_high_guard;
5ba3f43e
A
127
128vm_offset_t segPRELINKTEXTB;
129unsigned long segSizePRELINKTEXT;
130vm_offset_t segPRELINKINFOB;
131unsigned long segSizePRELINKINFO;
132
f427ee49
A
133vm_offset_t segLOWESTKC;
134vm_offset_t segHIGHESTKC;
135vm_offset_t segLOWESTROKC;
136vm_offset_t segHIGHESTROKC;
137vm_offset_t segLOWESTAuxKC;
138vm_offset_t segHIGHESTAuxKC;
139vm_offset_t segLOWESTROAuxKC;
140vm_offset_t segHIGHESTROAuxKC;
141vm_offset_t segLOWESTRXAuxKC;
142vm_offset_t segHIGHESTRXAuxKC;
143vm_offset_t segHIGHESTNLEAuxKC;
144
5ba3f43e
A
145static kernel_segment_command_t *segDATA;
146static boolean_t doconstro = TRUE;
147
148vm_offset_t end_kern, etext, sdata, edata;
149
150/*
151 * Bootstrap the system enough to run with virtual memory.
152 * Map the kernel's code and data, and allocate the system page table.
153 * Page_size must already be set.
154 *
155 * Parameters:
156 * first_avail: first available physical page -
157 * after kernel page tables
158 * avail_start: PA of first physical page
159 * avail_end : PA of last physical page
160 */
161vm_offset_t first_avail;
162vm_offset_t static_memory_end;
163pmap_paddr_t avail_start, avail_end;
164
165#define MEM_SIZE_MAX 0x40000000
166
167extern vm_offset_t ExceptionVectorsBase; /* the code we want to load there */
168
169/* The translation tables have to be 16KB aligned */
170#define round_x_table(x) \
171 (((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
172
d9a64523
A
173vm_map_address_t
174phystokv(pmap_paddr_t pa)
175{
0a7de745 176 return pa - gPhysBase + gVirtBase;
d9a64523 177}
5ba3f43e
A
178
179static void
0a7de745
A
180arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va,
181 int pte_prot_APX, int pte_prot_XN)
5ba3f43e
A
182{
183 if (va & ARM_TT_L1_PT_OFFMASK) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */
184 va &= (~ARM_TT_L1_PT_OFFMASK);
185 tt_entry_t *tte = &cpu_tte[ttenum(va)];
186 tt_entry_t tmplate = *tte;
187 pmap_paddr_t pa;
188 pt_entry_t *ppte, ptmp;
189 unsigned int i;
190
191 pa = va - gVirtBase + gPhysBase;
192
0a7de745 193 if (pa >= avail_end) {
d9a64523 194 return;
0a7de745 195 }
d9a64523
A
196
197 assert(_end >= va);
198
5ba3f43e
A
199 if (ARM_TTE_TYPE_TABLE == (tmplate & ARM_TTE_TYPE_MASK)) {
200 /* pick up the existing page table. */
201 ppte = (pt_entry_t *)phystokv((tmplate & ARM_TTE_TABLE_MASK));
202 } else {
203 /* TTE must be reincarnated COARSE. */
204 ppte = (pt_entry_t *)phystokv(avail_start);
cb323159 205 pmap_paddr_t l2table = avail_start;
5ba3f43e 206 avail_start += ARM_PGBYTES;
d9a64523 207 bzero(ppte, ARM_PGBYTES);
5ba3f43e 208
0a7de745 209 for (i = 0; i < 4; ++i) {
cb323159 210 tte[i] = pa_to_tte(l2table + (i * 0x400)) | ARM_TTE_TYPE_TABLE;
0a7de745 211 }
5ba3f43e
A
212 }
213
d9a64523 214 vm_offset_t len = _end - va;
0a7de745 215 if ((pa + len) > avail_end) {
d9a64523 216 _end -= (pa + len - avail_end);
0a7de745 217 }
d9a64523
A
218 assert((start - gVirtBase + gPhysBase) >= gPhysBase);
219
5ba3f43e
A
220 /* Apply the desired protections to the specified page range */
221 for (i = 0; i < (ARM_PGBYTES / sizeof(*ppte)); i++) {
222 if (start <= va && va < _end) {
5ba3f43e
A
223 ptmp = pa | ARM_PTE_AF | ARM_PTE_SH | ARM_PTE_TYPE;
224 ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
225 ptmp = ptmp | ARM_PTE_AP(pte_prot_APX);
0a7de745 226 if (pte_prot_XN) {
5ba3f43e 227 ptmp = ptmp | ARM_PTE_NX;
0a7de745 228 }
5ba3f43e
A
229
230 ppte[i] = ptmp;
231 }
232
233 va += ARM_PGBYTES;
234 pa += ARM_PGBYTES;
235 }
236 }
237}
238
239static void
0a7de745
A
240arm_vm_page_granular_prot(vm_offset_t start, unsigned long size,
241 int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, int force_page_granule)
5ba3f43e
A
242{
243 vm_offset_t _end = start + size;
244 vm_offset_t align_start = (start + ARM_TT_L1_PT_OFFMASK) & ~ARM_TT_L1_PT_OFFMASK;
245 vm_offset_t align_end = _end & ~ARM_TT_L1_PT_OFFMASK;
246
247 arm_vm_page_granular_helper(start, _end, start, pte_prot_APX, pte_prot_XN);
248
249 while (align_start < align_end) {
d9a64523 250 if (force_page_granule) {
0a7de745
A
251 arm_vm_page_granular_helper(align_start, align_end, align_start + 1,
252 pte_prot_APX, pte_prot_XN);
5ba3f43e
A
253 } else {
254 tt_entry_t *tte = &cpu_tte[ttenum(align_start)];
255 for (int i = 0; i < 4; ++i) {
256 tt_entry_t tmplate = tte[i];
257
258 tmplate = (tmplate & ~ARM_TTE_BLOCK_APMASK) | ARM_TTE_BLOCK_AP(pte_prot_APX);
259 tmplate = (tmplate & ~ARM_TTE_BLOCK_NX_MASK);
0a7de745 260 if (tte_prot_XN) {
5ba3f43e 261 tmplate = tmplate | ARM_TTE_BLOCK_NX;
0a7de745 262 }
5ba3f43e
A
263
264 tte[i] = tmplate;
265 }
266 }
267 align_start += ARM_TT_L1_PT_SIZE;
268 }
269
270 arm_vm_page_granular_helper(start, _end, _end, pte_prot_APX, pte_prot_XN);
271}
272
273static inline void
d9a64523 274arm_vm_page_granular_RNX(vm_offset_t start, unsigned long size, int force_page_granule)
5ba3f43e 275{
d9a64523 276 arm_vm_page_granular_prot(start, size, 1, AP_RONA, 1, force_page_granule);
5ba3f43e
A
277}
278
279static inline void
d9a64523 280arm_vm_page_granular_ROX(vm_offset_t start, unsigned long size, int force_page_granule)
5ba3f43e 281{
d9a64523 282 arm_vm_page_granular_prot(start, size, 0, AP_RONA, 0, force_page_granule);
5ba3f43e
A
283}
284
285static inline void
d9a64523 286arm_vm_page_granular_RWNX(vm_offset_t start, unsigned long size, int force_page_granule)
5ba3f43e 287{
d9a64523 288 arm_vm_page_granular_prot(start, size, 1, AP_RWNA, 1, force_page_granule);
5ba3f43e
A
289}
290
291static inline void
d9a64523 292arm_vm_page_granular_RWX(vm_offset_t start, unsigned long size, int force_page_granule)
5ba3f43e 293{
d9a64523 294 arm_vm_page_granular_prot(start, size, 0, AP_RWNA, 0, force_page_granule);
5ba3f43e
A
295}
296
297void
298arm_vm_prot_init(boot_args * args)
299{
300#if __ARM_PTE_PHYSMAP__
301 boolean_t force_coarse_physmap = TRUE;
302#else
303 boolean_t force_coarse_physmap = FALSE;
304#endif
305 /*
306 * Enforce W^X protections on segments that have been identified so far. This will be
0a7de745 307 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
5ba3f43e 308 */
0a7de745 309
5ba3f43e
A
310 /*
311 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
312 * and storing an address into "error_buffer" (see arm_init.c) !?!
313 * These protections are tightened in arm_vm_prot_finalize()
314 */
315 arm_vm_page_granular_RWX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
316
317 if (doconstro) {
318 /*
319 * We map __DATA with 3 calls, so that the __const section can have its
320 * protections changed independently of the rest of the __DATA segment.
321 */
322 arm_vm_page_granular_RWNX(segDATAB, sectCONSTB - segDATAB, FALSE);
323 arm_vm_page_granular_RNX(sectCONSTB, sectSizeCONST, FALSE);
324 arm_vm_page_granular_RWNX(sectCONSTB + sectSizeCONST, (segDATAB + segSizeDATA) - (sectCONSTB + sectSizeCONST), FALSE);
325 } else {
326 /* If we aren't protecting const, just map DATA as a single blob. */
327 arm_vm_page_granular_RWNX(segDATAB, segSizeDATA, FALSE);
328 }
d9a64523
A
329 arm_vm_page_granular_RWNX(segBOOTDATAB, segSizeBOOTDATA, TRUE);
330 arm_vm_page_granular_RNX((vm_offset_t)&intstack_low_guard, PAGE_MAX_SIZE, TRUE);
331 arm_vm_page_granular_RNX((vm_offset_t)&intstack_high_guard, PAGE_MAX_SIZE, TRUE);
332 arm_vm_page_granular_RNX((vm_offset_t)&fiqstack_high_guard, PAGE_MAX_SIZE, TRUE);
5ba3f43e
A
333
334 arm_vm_page_granular_ROX(segKLDB, segSizeKLD, force_coarse_physmap);
c3c9b80d 335 arm_vm_page_granular_RNX(segKLDDATAB, segSizeKLDDATA, force_coarse_physmap);
5ba3f43e
A
336 arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, force_coarse_physmap);
337 arm_vm_page_granular_RWNX(segLASTB, segSizeLAST, FALSE); // __LAST may be empty, but we cannot assume this
f427ee49
A
338 if (segLASTDATACONSTB) {
339 arm_vm_page_granular_RWNX(segLASTDATACONSTB, segSizeLASTDATACONST, FALSE); // __LASTDATA_CONST may be empty, but we cannot assume this
340 }
5ba3f43e
A
341 arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, TRUE); // Refined in OSKext::readPrelinkedExtensions
342 arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT,
0a7de745 343 end_kern - (segPRELINKTEXTB + segSizePRELINKTEXT), force_coarse_physmap); // PreLinkInfoDictionary
d9a64523
A
344 arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern, force_coarse_physmap); // Device Tree, RAM Disk (if present), bootArgs, trust caches
345 arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, FALSE); // tighter trust cache protection
5ba3f43e
A
346 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData), ARM_PGBYTES * 8, FALSE); // boot_tte, cpu_tte
347
348 /*
349 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
350 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
351 * write protected in the static mapping of that range.
352 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
353 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
354 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
355 */
356 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 8, ARM_PGBYTES, FALSE); /* Excess physMem over 1MB */
357 arm_vm_page_granular_RWX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* refined in finalize */
358
359 /* Map the remainder of xnu owned memory. */
360 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 10,
0a7de745 361 static_memory_end - (phystokv(args->topOfKernelData) + ARM_PGBYTES * 10), force_coarse_physmap); /* rest of physmem */
5ba3f43e
A
362
363 /*
364 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
365 * Recall that start.s handcrafted a page table page for EVB mapping
366 */
367 pmap_paddr_t p = (pmap_paddr_t)(args->topOfKernelData) + (ARM_PGBYTES * 9);
368 pt_entry_t *ppte = (pt_entry_t *)phystokv(p);
f427ee49 369 pmap_init_pte_page(kernel_pmap, ppte, HIGH_EXC_VECTORS & ~ARM_TT_L1_PT_OFFMASK, 2, TRUE);
5ba3f43e 370
cb323159 371 int idx = (HIGH_EXC_VECTORS & ARM_TT_L1_PT_OFFMASK) >> ARM_TT_L2_SHIFT;
5ba3f43e
A
372 pt_entry_t ptmp = ppte[idx];
373
374 ptmp = (ptmp & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA);
375
376 ppte[idx] = ptmp;
377}
378
379void
380arm_vm_prot_finalize(boot_args * args)
381{
d9a64523
A
382 cpu_stack_alloc(&BootCpuData);
383 ml_static_mfree(segBOOTDATAB, segSizeBOOTDATA);
5ba3f43e
A
384 /*
385 * Naively we could have:
386 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
387 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
388 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
389 */
390 arm_vm_page_granular_ROX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
391
392 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* commpage, EVB */
393
5ba3f43e
A
394 flush_mmu_tlb();
395}
396
d9a64523
A
397/* used in the chosen/memory-map node, populated by iBoot. */
398typedef struct MemoryMapFileInfo {
0a7de745
A
399 vm_offset_t paddr;
400 size_t length;
d9a64523
A
401} MemoryMapFileInfo;
402
403
5ba3f43e
A
404void
405arm_vm_init(uint64_t memory_size, boot_args * args)
406{
407 vm_map_address_t va, off, off_end;
408 tt_entry_t *tte, *tte_limit;
409 pmap_paddr_t boot_ttep;
410 tt_entry_t *boot_tte;
411 uint32_t mem_segments;
412 kernel_section_t *sectDCONST;
413
414 /*
415 * Get the virtual and physical memory base from boot_args.
416 */
417 gVirtBase = args->virtBase;
418 gPhysBase = args->physBase;
419 gPhysSize = args->memSize;
420 mem_size = args->memSize;
f427ee49 421 mem_actual = args->memSizeActual ? args->memSizeActual : mem_size;
0a7de745 422 if (mem_size > MEM_SIZE_MAX) {
5ba3f43e 423 mem_size = MEM_SIZE_MAX;
0a7de745 424 }
f427ee49
A
425 if ((memory_size != 0) && (mem_size > memory_size)) {
426 mem_size = memory_size;
427 max_mem_actual = memory_size;
428 } else {
429 max_mem_actual = mem_actual;
430 }
431
5ba3f43e
A
432 static_memory_end = gVirtBase + mem_size;
433
434 /* Calculate the nubmer of ~256MB segments of memory */
435 mem_segments = (mem_size + 0x0FFFFFFF) >> 28;
436
437 /*
438 * Copy the boot mmu tt to create system mmu tt.
439 * System mmu tt start after the boot mmu tt.
440 * Determine translation table base virtual address: - aligned at end
441 * of executable.
442 */
443 boot_ttep = args->topOfKernelData;
444 boot_tte = (tt_entry_t *) phystokv(boot_ttep);
445
446 cpu_ttep = boot_ttep + ARM_PGBYTES * 4;
447 cpu_tte = (tt_entry_t *) phystokv(cpu_ttep);
448
449 bcopy(boot_tte, cpu_tte, ARM_PGBYTES * 4);
450
451 /*
452 * Clear out any V==P mappings that may have been established in e.g. start.s
453 */
454 tte = &cpu_tte[ttenum(gPhysBase)];
455 tte_limit = &cpu_tte[ttenum(gPhysBase + gPhysSize)];
456
457 /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
458 if (gPhysBase < gVirtBase) {
0a7de745 459 if (gPhysBase + gPhysSize > gVirtBase) {
5ba3f43e 460 tte_limit = &cpu_tte[ttenum(gVirtBase)];
0a7de745 461 }
5ba3f43e 462 } else {
0a7de745 463 if (gPhysBase < gVirtBase + gPhysSize) {
5ba3f43e 464 tte = &cpu_tte[ttenum(gVirtBase + gPhysSize)];
0a7de745 465 }
5ba3f43e
A
466 }
467
468 while (tte < tte_limit) {
0a7de745 469 *tte = ARM_TTE_TYPE_FAULT;
d9a64523
A
470 tte++;
471 }
0a7de745 472
5ba3f43e
A
473 /* Skip 6 pages (four L1 + two L2 entries) */
474 avail_start = cpu_ttep + ARM_PGBYTES * 6;
475 avail_end = gPhysBase + mem_size;
476
477 /*
478 * Now retrieve addresses for end, edata, and etext
479 * from MACH-O headers for the currently running 32 bit kernel.
480 */
481 segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &segSizeTEXT);
d9a64523 482 segLOWESTTEXT = segTEXTB;
f427ee49 483 segLOWEST = segLOWESTTEXT;
5ba3f43e
A
484 segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &segSizeDATA);
485 segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK);
486 segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD);
c3c9b80d 487 segKLDDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLDDATA", &segSizeKLDDATA);
5ba3f43e 488 segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST);
f427ee49 489 segLASTDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LASTDATA_CONST", &segSizeLASTDATACONST);
5ba3f43e
A
490 segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT);
491 segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &segSizePRELINKINFO);
d9a64523
A
492 segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA);
493
494 segEXTRADATA = 0;
495 segSizeEXTRADATA = 0;
496
497 DTEntry memory_map;
f427ee49 498 MemoryMapFileInfo const *trustCacheRange;
d9a64523
A
499 unsigned int trustCacheRangeSize;
500 int err;
501
f427ee49 502 err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
d9a64523
A
503 assert(err == kSuccess);
504
f427ee49 505 err = SecureDTGetProperty(memory_map, "TrustCache", (const void**)&trustCacheRange, &trustCacheRangeSize);
d9a64523
A
506 if (err == kSuccess) {
507 assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo));
508
509 segEXTRADATA = phystokv(trustCacheRange->paddr);
510 segSizeEXTRADATA = trustCacheRange->length;
511 }
5ba3f43e
A
512
513 etext = (vm_offset_t) segTEXTB + segSizeTEXT;
514 sdata = (vm_offset_t) segDATAB;
515 edata = (vm_offset_t) segDATAB + segSizeDATA;
516 end_kern = round_page(getlastaddr()); /* Force end to next page */
517
518 /*
519 * Special handling for the __DATA,__const *section*.
520 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
521 * so we can safely truncate the size. __DATA,__const is also aligned, but
0a7de745 522 * just in case we will round that to a page, too.
5ba3f43e
A
523 */
524 segDATA = getsegbynamefromheader(&_mh_execute_header, "__DATA");
525 sectDCONST = getsectbynamefromheader(&_mh_execute_header, "__DATA", "__const");
526 sectCONSTB = sectDCONST->addr;
527 sectSizeCONST = sectDCONST->size;
528
5ba3f43e
A
529 if (doconstro) {
530 extern vm_offset_t _lastkerneldataconst;
531 extern vm_size_t _lastkerneldataconst_padsize;
532 vm_offset_t sdataconst = sectCONSTB;
533
534 /* this should already be aligned, but so that we can protect we round */
535 sectCONSTB = round_page(sectCONSTB);
536
537 /* make sure lastkerneldataconst is really last and the right size */
538 if ((_lastkerneldataconst == sdataconst + sectSizeCONST - _lastkerneldataconst_padsize) &&
539 (_lastkerneldataconst_padsize >= PAGE_SIZE)) {
540 sectSizeCONST = trunc_page(sectSizeCONST);
541 } else {
542 /* otherwise see if next section is aligned then protect up to it */
543 kernel_section_t *next_sect = nextsect(segDATA, sectDCONST);
544
545 if (next_sect && ((next_sect->addr & PAGE_MASK) == 0)) {
546 sectSizeCONST = next_sect->addr - sectCONSTB;
547 } else {
548 /* lastly just go ahead and truncate so we try to protect something */
549 sectSizeCONST = trunc_page(sectSizeCONST);
550 }
551 }
552
553 /* sanity check */
554 if ((sectSizeCONST == 0) || (sectCONSTB < sdata) || (sectCONSTB + sectSizeCONST) >= edata) {
555 doconstro = FALSE;
556 }
557 }
558
559 vm_set_page_size();
560
5ba3f43e
A
561 vm_prelink_stext = segPRELINKTEXTB;
562 vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT;
563 vm_prelink_sinfo = segPRELINKINFOB;
564 vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
565 vm_slinkedit = segLINKB;
566 vm_elinkedit = segLINKB + segSizeLINK;
567
568 sane_size = mem_size - (avail_start - gPhysBase);
569 max_mem = mem_size;
0a7de745 570 vm_kernel_slide = gVirtBase - VM_KERNEL_LINK_ADDRESS;
5ba3f43e
A
571 vm_kernel_stext = segTEXTB;
572 vm_kernel_etext = segTEXTB + segSizeTEXT;
573 vm_kernel_base = gVirtBase;
574 vm_kernel_top = (vm_offset_t) &last_kernel_symbol;
575 vm_kext_base = segPRELINKTEXTB;
576 vm_kext_top = vm_kext_base + segSizePRELINKTEXT;
577 vm_kernel_slid_base = segTEXTB;
578 vm_kernel_slid_top = vm_kext_top;
579
0a7de745 580 pmap_bootstrap((gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000);
5ba3f43e
A
581
582 arm_vm_prot_init(args);
583
f427ee49
A
584 vm_page_kernelcache_count = (unsigned int) (atop_64(end_kern - segLOWEST));
585
5ba3f43e
A
586 /*
587 * To avoid recursing while trying to init the vm_page and object * mechanisms,
588 * pre-initialize kernel pmap page table pages to cover this address range:
589 * 2MB + FrameBuffer size + 3MB for each 256MB segment
590 */
591 off_end = (2 + (mem_segments * 3)) << 20;
592 off_end += (unsigned int) round_page(args->Video.v_height * args->Video.v_rowBytes);
593
0a7de745 594 for (off = 0, va = (gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000; off < off_end; off += ARM_TT_L1_PT_SIZE) {
5ba3f43e
A
595 pt_entry_t *ptp;
596 pmap_paddr_t ptp_phys;
597
598 ptp = (pt_entry_t *) phystokv(avail_start);
599 ptp_phys = (pmap_paddr_t)avail_start;
600 avail_start += ARM_PGBYTES;
f427ee49
A
601 bzero(ptp, ARM_PGBYTES);
602 pmap_init_pte_page(kernel_pmap, ptp, va + off, 2, TRUE);
5ba3f43e 603 tte = &cpu_tte[ttenum(va + off)];
cb323159
A
604 *tte = pa_to_tte((ptp_phys)) | ARM_TTE_TYPE_TABLE;
605 *(tte + 1) = pa_to_tte((ptp_phys + 0x400)) | ARM_TTE_TYPE_TABLE;
606 *(tte + 2) = pa_to_tte((ptp_phys + 0x800)) | ARM_TTE_TYPE_TABLE;
607 *(tte + 3) = pa_to_tte((ptp_phys + 0xC00)) | ARM_TTE_TYPE_TABLE;
5ba3f43e
A
608 }
609
cb323159
A
610 set_mmu_ttb(cpu_ttep);
611 set_mmu_ttb_alternate(cpu_ttep);
612 flush_mmu_tlb();
613#if __arm__ && __ARM_USER_PROTECT__
614 {
f427ee49 615 unsigned int ttbr0_val, ttbr1_val;
cb323159
A
616 thread_t thread = current_thread();
617
618 __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val));
619 __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val));
cb323159
A
620 thread->machine.uptw_ttb = ttbr0_val;
621 thread->machine.kptw_ttb = ttbr1_val;
cb323159
A
622 }
623#endif
5ba3f43e
A
624 avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK;
625
626 first_avail = avail_start;
627 patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData);
628}