]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/arm_vm_init.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / arm / arm_vm_init.c
1 /*
2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 #include <mach_debug.h>
30 #include <mach_kdp.h>
31 #include <debug.h>
32
33 #include <mach/vm_types.h>
34 #include <mach/vm_param.h>
35 #include <mach/thread_status.h>
36 #include <kern/misc_protos.h>
37 #include <kern/assert.h>
38 #include <kern/cpu_number.h>
39 #include <kern/thread.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
42 #include <vm/pmap.h>
43
44 #include <arm/proc_reg.h>
45 #include <arm/caches_internal.h>
46 #include <arm/cpu_data_internal.h>
47 #include <arm/pmap.h>
48 #include <arm/misc_protos.h>
49 #include <arm/lowglobals.h>
50
51 #include <pexpert/arm/boot.h>
52 #include <pexpert/device_tree.h>
53
54 #include <libkern/kernel_mach_header.h>
55
56 /*
57 * Denotes the end of xnu.
58 */
59 extern void *last_kernel_symbol;
60
61 /*
62 * KASLR parameters
63 */
64 vm_offset_t vm_kernel_base;
65 vm_offset_t vm_kernel_top;
66 vm_offset_t vm_kernel_stext;
67 vm_offset_t vm_kernel_etext;
68 vm_offset_t vm_kernel_slide;
69 vm_offset_t vm_kernel_slid_base;
70 vm_offset_t vm_kernel_slid_top;
71 vm_offset_t vm_kext_base;
72 vm_offset_t vm_kext_top;
73 vm_offset_t vm_prelink_stext;
74 vm_offset_t vm_prelink_etext;
75 vm_offset_t vm_prelink_sinfo;
76 vm_offset_t vm_prelink_einfo;
77 vm_offset_t vm_slinkedit;
78 vm_offset_t vm_elinkedit;
79 vm_offset_t vm_prelink_sdata;
80 vm_offset_t vm_prelink_edata;
81
82 vm_offset_t vm_kernel_builtinkmod_text;
83 vm_offset_t vm_kernel_builtinkmod_text_end;
84
85 unsigned long gVirtBase, gPhysBase, gPhysSize; /* Used by <mach/arm/vm_param.h> */
86
87 vm_offset_t mem_size; /* Size of actual physical memory present
88 * minus any performance buffer and possibly
89 * limited by mem_limit in bytes */
90 uint64_t mem_actual; /* The "One True" physical memory size
91 * actually, it's the highest physical
92 * address + 1 */
93 uint64_t max_mem; /* Size of physical memory (bytes), adjusted
94 * by maxmem */
95 uint64_t sane_size; /* Memory size to use for defaults
96 * calculations */
97 addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel
98 * virtual address known
99 * to the VM system */
100
101 vm_offset_t segEXTRADATA;
102 unsigned long segSizeEXTRADATA;
103 vm_offset_t segLOWESTTEXT;
104 static vm_offset_t segTEXTB;
105 static unsigned long segSizeTEXT;
106 static vm_offset_t segDATAB;
107 static unsigned long segSizeDATA;
108 static vm_offset_t segLINKB;
109 static unsigned long segSizeLINK;
110 static vm_offset_t segKLDB;
111 static unsigned long segSizeKLD;
112 static vm_offset_t segLASTB;
113 static unsigned long segSizeLAST;
114 static vm_offset_t sectCONSTB;
115 static unsigned long sectSizeCONST;
116 vm_offset_t segBOOTDATAB;
117 unsigned long segSizeBOOTDATA;
118 extern vm_offset_t intstack_low_guard;
119 extern vm_offset_t intstack_high_guard;
120 extern vm_offset_t fiqstack_high_guard;
121
122 vm_offset_t segPRELINKTEXTB;
123 unsigned long segSizePRELINKTEXT;
124 vm_offset_t segPRELINKINFOB;
125 unsigned long segSizePRELINKINFO;
126
127 static kernel_segment_command_t *segDATA;
128 static boolean_t doconstro = TRUE;
129
130 vm_offset_t end_kern, etext, sdata, edata;
131
132 /*
133 * Bootstrap the system enough to run with virtual memory.
134 * Map the kernel's code and data, and allocate the system page table.
135 * Page_size must already be set.
136 *
137 * Parameters:
138 * first_avail: first available physical page -
139 * after kernel page tables
140 * avail_start: PA of first physical page
141 * avail_end : PA of last physical page
142 */
143 vm_offset_t first_avail;
144 vm_offset_t static_memory_end;
145 pmap_paddr_t avail_start, avail_end;
146
147 #define MEM_SIZE_MAX 0x40000000
148
149 extern vm_offset_t ExceptionVectorsBase; /* the code we want to load there */
150
151 /* The translation tables have to be 16KB aligned */
152 #define round_x_table(x) \
153 (((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
154
155 vm_map_address_t
156 phystokv(pmap_paddr_t pa)
157 {
158 return (pa - gPhysBase + gVirtBase);
159 }
160
161 static void
162 arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va,
163 int pte_prot_APX, int pte_prot_XN)
164 {
165 if (va & ARM_TT_L1_PT_OFFMASK) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */
166 va &= (~ARM_TT_L1_PT_OFFMASK);
167 tt_entry_t *tte = &cpu_tte[ttenum(va)];
168 tt_entry_t tmplate = *tte;
169 pmap_paddr_t pa;
170 pt_entry_t *ppte, ptmp;
171 unsigned int i;
172
173 pa = va - gVirtBase + gPhysBase;
174
175 if (pa >= avail_end)
176 return;
177
178 assert(_end >= va);
179
180 if (ARM_TTE_TYPE_TABLE == (tmplate & ARM_TTE_TYPE_MASK)) {
181 /* pick up the existing page table. */
182 ppte = (pt_entry_t *)phystokv((tmplate & ARM_TTE_TABLE_MASK));
183 } else {
184 /* TTE must be reincarnated COARSE. */
185 ppte = (pt_entry_t *)phystokv(avail_start);
186 avail_start += ARM_PGBYTES;
187 bzero(ppte, ARM_PGBYTES);
188
189 for (i = 0; i < 4; ++i)
190 tte[i] = pa_to_tte(kvtophys((vm_offset_t)ppte) + (i * 0x400)) | ARM_TTE_TYPE_TABLE;
191 }
192
193 vm_offset_t len = _end - va;
194 if ((pa + len) > avail_end)
195 _end -= (pa + len - avail_end);
196 assert((start - gVirtBase + gPhysBase) >= gPhysBase);
197
198 /* Apply the desired protections to the specified page range */
199 for (i = 0; i < (ARM_PGBYTES / sizeof(*ppte)); i++) {
200 if (start <= va && va < _end) {
201
202 ptmp = pa | ARM_PTE_AF | ARM_PTE_SH | ARM_PTE_TYPE;
203 ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
204 ptmp = ptmp | ARM_PTE_AP(pte_prot_APX);
205 if (pte_prot_XN)
206 ptmp = ptmp | ARM_PTE_NX;
207
208 ppte[i] = ptmp;
209 }
210
211 va += ARM_PGBYTES;
212 pa += ARM_PGBYTES;
213 }
214 }
215 }
216
217 static void
218 arm_vm_page_granular_prot(vm_offset_t start, unsigned long size,
219 int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, int force_page_granule)
220 {
221 vm_offset_t _end = start + size;
222 vm_offset_t align_start = (start + ARM_TT_L1_PT_OFFMASK) & ~ARM_TT_L1_PT_OFFMASK;
223 vm_offset_t align_end = _end & ~ARM_TT_L1_PT_OFFMASK;
224
225 arm_vm_page_granular_helper(start, _end, start, pte_prot_APX, pte_prot_XN);
226
227 while (align_start < align_end) {
228 if (force_page_granule) {
229 arm_vm_page_granular_helper(align_start, align_end, align_start + 1,
230 pte_prot_APX, pte_prot_XN);
231 } else {
232 tt_entry_t *tte = &cpu_tte[ttenum(align_start)];
233 for (int i = 0; i < 4; ++i) {
234 tt_entry_t tmplate = tte[i];
235
236 tmplate = (tmplate & ~ARM_TTE_BLOCK_APMASK) | ARM_TTE_BLOCK_AP(pte_prot_APX);
237 tmplate = (tmplate & ~ARM_TTE_BLOCK_NX_MASK);
238 if (tte_prot_XN)
239 tmplate = tmplate | ARM_TTE_BLOCK_NX;
240
241 tte[i] = tmplate;
242 }
243 }
244 align_start += ARM_TT_L1_PT_SIZE;
245 }
246
247 arm_vm_page_granular_helper(start, _end, _end, pte_prot_APX, pte_prot_XN);
248 }
249
250 static inline void
251 arm_vm_page_granular_RNX(vm_offset_t start, unsigned long size, int force_page_granule)
252 {
253 arm_vm_page_granular_prot(start, size, 1, AP_RONA, 1, force_page_granule);
254 }
255
256 static inline void
257 arm_vm_page_granular_ROX(vm_offset_t start, unsigned long size, int force_page_granule)
258 {
259 arm_vm_page_granular_prot(start, size, 0, AP_RONA, 0, force_page_granule);
260 }
261
262 static inline void
263 arm_vm_page_granular_RWNX(vm_offset_t start, unsigned long size, int force_page_granule)
264 {
265 arm_vm_page_granular_prot(start, size, 1, AP_RWNA, 1, force_page_granule);
266 }
267
268 static inline void
269 arm_vm_page_granular_RWX(vm_offset_t start, unsigned long size, int force_page_granule)
270 {
271 arm_vm_page_granular_prot(start, size, 0, AP_RWNA, 0, force_page_granule);
272 }
273
274 void
275 arm_vm_prot_init(boot_args * args)
276 {
277 #if __ARM_PTE_PHYSMAP__
278 boolean_t force_coarse_physmap = TRUE;
279 #else
280 boolean_t force_coarse_physmap = FALSE;
281 #endif
282 /*
283 * Enforce W^X protections on segments that have been identified so far. This will be
284 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
285 */
286
287 /*
288 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
289 * and storing an address into "error_buffer" (see arm_init.c) !?!
290 * These protections are tightened in arm_vm_prot_finalize()
291 */
292 arm_vm_page_granular_RWX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
293
294 if (doconstro) {
295 /*
296 * We map __DATA with 3 calls, so that the __const section can have its
297 * protections changed independently of the rest of the __DATA segment.
298 */
299 arm_vm_page_granular_RWNX(segDATAB, sectCONSTB - segDATAB, FALSE);
300 arm_vm_page_granular_RNX(sectCONSTB, sectSizeCONST, FALSE);
301 arm_vm_page_granular_RWNX(sectCONSTB + sectSizeCONST, (segDATAB + segSizeDATA) - (sectCONSTB + sectSizeCONST), FALSE);
302 } else {
303 /* If we aren't protecting const, just map DATA as a single blob. */
304 arm_vm_page_granular_RWNX(segDATAB, segSizeDATA, FALSE);
305 }
306 arm_vm_page_granular_RWNX(segBOOTDATAB, segSizeBOOTDATA, TRUE);
307 arm_vm_page_granular_RNX((vm_offset_t)&intstack_low_guard, PAGE_MAX_SIZE, TRUE);
308 arm_vm_page_granular_RNX((vm_offset_t)&intstack_high_guard, PAGE_MAX_SIZE, TRUE);
309 arm_vm_page_granular_RNX((vm_offset_t)&fiqstack_high_guard, PAGE_MAX_SIZE, TRUE);
310
311 arm_vm_page_granular_ROX(segKLDB, segSizeKLD, force_coarse_physmap);
312 arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, force_coarse_physmap);
313 arm_vm_page_granular_RWNX(segLASTB, segSizeLAST, FALSE); // __LAST may be empty, but we cannot assume this
314 arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, TRUE); // Refined in OSKext::readPrelinkedExtensions
315 arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT,
316 end_kern - (segPRELINKTEXTB + segSizePRELINKTEXT), force_coarse_physmap); // PreLinkInfoDictionary
317 arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern, force_coarse_physmap); // Device Tree, RAM Disk (if present), bootArgs, trust caches
318 arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, FALSE); // tighter trust cache protection
319 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData), ARM_PGBYTES * 8, FALSE); // boot_tte, cpu_tte
320
321 /*
322 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
323 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
324 * write protected in the static mapping of that range.
325 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
326 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
327 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
328 */
329 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 8, ARM_PGBYTES, FALSE); /* Excess physMem over 1MB */
330 arm_vm_page_granular_RWX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* refined in finalize */
331
332 /* Map the remainder of xnu owned memory. */
333 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 10,
334 static_memory_end - (phystokv(args->topOfKernelData) + ARM_PGBYTES * 10), force_coarse_physmap); /* rest of physmem */
335
336 /*
337 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
338 * Recall that start.s handcrafted a page table page for EVB mapping
339 */
340 pmap_paddr_t p = (pmap_paddr_t)(args->topOfKernelData) + (ARM_PGBYTES * 9);
341 pt_entry_t *ppte = (pt_entry_t *)phystokv(p);
342
343 int idx = (HIGH_EXC_VECTORS & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT;
344 pt_entry_t ptmp = ppte[idx];
345
346 ptmp = (ptmp & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA);
347
348 ppte[idx] = ptmp;
349 }
350
351 void
352 arm_vm_prot_finalize(boot_args * args)
353 {
354 cpu_stack_alloc(&BootCpuData);
355 ml_static_mfree(segBOOTDATAB, segSizeBOOTDATA);
356 /*
357 * Naively we could have:
358 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
359 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
360 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
361 */
362 arm_vm_page_granular_ROX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
363
364 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* commpage, EVB */
365
366 #ifndef __ARM_L1_PTW__
367 FlushPoC_Dcache();
368 #endif
369 flush_mmu_tlb();
370 }
371
372 /* used in the chosen/memory-map node, populated by iBoot. */
373 typedef struct MemoryMapFileInfo {
374 vm_offset_t paddr;
375 size_t length;
376 } MemoryMapFileInfo;
377
378
379 void
380 arm_vm_init(uint64_t memory_size, boot_args * args)
381 {
382 vm_map_address_t va, off, off_end;
383 tt_entry_t *tte, *tte_limit;
384 pmap_paddr_t boot_ttep;
385 tt_entry_t *boot_tte;
386 uint32_t mem_segments;
387 kernel_section_t *sectDCONST;
388
389 /*
390 * Get the virtual and physical memory base from boot_args.
391 */
392 gVirtBase = args->virtBase;
393 gPhysBase = args->physBase;
394 gPhysSize = args->memSize;
395 mem_size = args->memSize;
396 if ((memory_size != 0) && (mem_size > memory_size))
397 mem_size = memory_size;
398 if (mem_size > MEM_SIZE_MAX )
399 mem_size = MEM_SIZE_MAX;
400 static_memory_end = gVirtBase + mem_size;
401
402 /* Calculate the nubmer of ~256MB segments of memory */
403 mem_segments = (mem_size + 0x0FFFFFFF) >> 28;
404
405 /*
406 * Copy the boot mmu tt to create system mmu tt.
407 * System mmu tt start after the boot mmu tt.
408 * Determine translation table base virtual address: - aligned at end
409 * of executable.
410 */
411 boot_ttep = args->topOfKernelData;
412 boot_tte = (tt_entry_t *) phystokv(boot_ttep);
413
414 cpu_ttep = boot_ttep + ARM_PGBYTES * 4;
415 cpu_tte = (tt_entry_t *) phystokv(cpu_ttep);
416
417 bcopy(boot_tte, cpu_tte, ARM_PGBYTES * 4);
418
419 /*
420 * Clear out any V==P mappings that may have been established in e.g. start.s
421 */
422 tte = &cpu_tte[ttenum(gPhysBase)];
423 tte_limit = &cpu_tte[ttenum(gPhysBase + gPhysSize)];
424
425 /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
426 if (gPhysBase < gVirtBase) {
427 if (gPhysBase + gPhysSize > gVirtBase)
428 tte_limit = &cpu_tte[ttenum(gVirtBase)];
429 } else {
430 if (gPhysBase < gVirtBase + gPhysSize)
431 tte = &cpu_tte[ttenum(gVirtBase + gPhysSize)];
432 }
433
434 while (tte < tte_limit) {
435 *tte = ARM_TTE_TYPE_FAULT;
436 tte++;
437 }
438
439 /* Skip 6 pages (four L1 + two L2 entries) */
440 avail_start = cpu_ttep + ARM_PGBYTES * 6;
441 avail_end = gPhysBase + mem_size;
442
443 /*
444 * Now retrieve addresses for end, edata, and etext
445 * from MACH-O headers for the currently running 32 bit kernel.
446 */
447 segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &segSizeTEXT);
448 segLOWESTTEXT = segTEXTB;
449 segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &segSizeDATA);
450 segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK);
451 segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD);
452 segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST);
453 segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT);
454 segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &segSizePRELINKINFO);
455 segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA);
456
457 segEXTRADATA = 0;
458 segSizeEXTRADATA = 0;
459
460 DTEntry memory_map;
461 MemoryMapFileInfo *trustCacheRange;
462 unsigned int trustCacheRangeSize;
463 int err;
464
465 err = DTLookupEntry(NULL, "chosen/memory-map", &memory_map);
466 assert(err == kSuccess);
467
468 err = DTGetProperty(memory_map, "TrustCache", (void**)&trustCacheRange, &trustCacheRangeSize);
469 if (err == kSuccess) {
470 assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo));
471
472 segEXTRADATA = phystokv(trustCacheRange->paddr);
473 segSizeEXTRADATA = trustCacheRange->length;
474 }
475
476 etext = (vm_offset_t) segTEXTB + segSizeTEXT;
477 sdata = (vm_offset_t) segDATAB;
478 edata = (vm_offset_t) segDATAB + segSizeDATA;
479 end_kern = round_page(getlastaddr()); /* Force end to next page */
480
481 /*
482 * Special handling for the __DATA,__const *section*.
483 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
484 * so we can safely truncate the size. __DATA,__const is also aligned, but
485 * just in case we will round that to a page, too.
486 */
487 segDATA = getsegbynamefromheader(&_mh_execute_header, "__DATA");
488 sectDCONST = getsectbynamefromheader(&_mh_execute_header, "__DATA", "__const");
489 sectCONSTB = sectDCONST->addr;
490 sectSizeCONST = sectDCONST->size;
491
492 #if !SECURE_KERNEL
493 /* doconstro is true by default, but we allow a boot-arg to disable it */
494 (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
495 #endif
496
497 if (doconstro) {
498 extern vm_offset_t _lastkerneldataconst;
499 extern vm_size_t _lastkerneldataconst_padsize;
500 vm_offset_t sdataconst = sectCONSTB;
501
502 /* this should already be aligned, but so that we can protect we round */
503 sectCONSTB = round_page(sectCONSTB);
504
505 /* make sure lastkerneldataconst is really last and the right size */
506 if ((_lastkerneldataconst == sdataconst + sectSizeCONST - _lastkerneldataconst_padsize) &&
507 (_lastkerneldataconst_padsize >= PAGE_SIZE)) {
508 sectSizeCONST = trunc_page(sectSizeCONST);
509 } else {
510 /* otherwise see if next section is aligned then protect up to it */
511 kernel_section_t *next_sect = nextsect(segDATA, sectDCONST);
512
513 if (next_sect && ((next_sect->addr & PAGE_MASK) == 0)) {
514 sectSizeCONST = next_sect->addr - sectCONSTB;
515 } else {
516 /* lastly just go ahead and truncate so we try to protect something */
517 sectSizeCONST = trunc_page(sectSizeCONST);
518 }
519 }
520
521 /* sanity check */
522 if ((sectSizeCONST == 0) || (sectCONSTB < sdata) || (sectCONSTB + sectSizeCONST) >= edata) {
523 doconstro = FALSE;
524 }
525 }
526
527 vm_set_page_size();
528
529 #ifndef __ARM_L1_PTW__
530 FlushPoC_Dcache();
531 #endif
532 set_mmu_ttb(cpu_ttep);
533 set_mmu_ttb_alternate(cpu_ttep);
534 flush_mmu_tlb();
535 #if __arm__ && __ARM_USER_PROTECT__
536 {
537 unsigned int ttbr0_val, ttbr1_val, ttbcr_val;
538 thread_t thread = current_thread();
539
540 __asm__ volatile("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val));
541 __asm__ volatile("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val));
542 __asm__ volatile("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val));
543 thread->machine.uptw_ttb = ttbr0_val;
544 thread->machine.kptw_ttb = ttbr1_val;
545 thread->machine.uptw_ttc = ttbcr_val;
546 }
547 #endif
548 vm_prelink_stext = segPRELINKTEXTB;
549 vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT;
550 vm_prelink_sinfo = segPRELINKINFOB;
551 vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
552 vm_slinkedit = segLINKB;
553 vm_elinkedit = segLINKB + segSizeLINK;
554
555 sane_size = mem_size - (avail_start - gPhysBase);
556 max_mem = mem_size;
557 vm_kernel_slide = gVirtBase-VM_KERNEL_LINK_ADDRESS;
558 vm_kernel_stext = segTEXTB;
559 vm_kernel_etext = segTEXTB + segSizeTEXT;
560 vm_kernel_base = gVirtBase;
561 vm_kernel_top = (vm_offset_t) &last_kernel_symbol;
562 vm_kext_base = segPRELINKTEXTB;
563 vm_kext_top = vm_kext_base + segSizePRELINKTEXT;
564 vm_kernel_slid_base = segTEXTB;
565 vm_kernel_slid_top = vm_kext_top;
566
567 pmap_bootstrap((gVirtBase+MEM_SIZE_MAX+0x3FFFFF) & 0xFFC00000);
568
569 arm_vm_prot_init(args);
570
571 /*
572 * To avoid recursing while trying to init the vm_page and object * mechanisms,
573 * pre-initialize kernel pmap page table pages to cover this address range:
574 * 2MB + FrameBuffer size + 3MB for each 256MB segment
575 */
576 off_end = (2 + (mem_segments * 3)) << 20;
577 off_end += (unsigned int) round_page(args->Video.v_height * args->Video.v_rowBytes);
578
579 for (off = 0, va = (gVirtBase+MEM_SIZE_MAX+0x3FFFFF) & 0xFFC00000; off < off_end; off += ARM_TT_L1_PT_SIZE) {
580 pt_entry_t *ptp;
581 pmap_paddr_t ptp_phys;
582
583 ptp = (pt_entry_t *) phystokv(avail_start);
584 ptp_phys = (pmap_paddr_t)avail_start;
585 avail_start += ARM_PGBYTES;
586 pmap_init_pte_page(kernel_pmap, ptp, va + off, 2, TRUE);
587 tte = &cpu_tte[ttenum(va + off)];
588 *tte = pa_to_tte((ptp_phys )) | ARM_TTE_TYPE_TABLE;;
589 *(tte+1) = pa_to_tte((ptp_phys + 0x400)) | ARM_TTE_TYPE_TABLE;;
590 *(tte+2) = pa_to_tte((ptp_phys + 0x800)) | ARM_TTE_TYPE_TABLE;;
591 *(tte+3) = pa_to_tte((ptp_phys + 0xC00)) | ARM_TTE_TYPE_TABLE;;
592 }
593
594 avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK;
595
596 first_avail = avail_start;
597 patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData);
598 }
599