]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/arm_vm_init.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / arm / arm_vm_init.c
1 /*
2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 #include <mach_debug.h>
30 #include <mach_kdp.h>
31 #include <debug.h>
32
33 #include <mach/vm_types.h>
34 #include <mach/vm_param.h>
35 #include <mach/thread_status.h>
36 #include <kern/misc_protos.h>
37 #include <kern/assert.h>
38 #include <kern/cpu_number.h>
39 #include <kern/thread.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
42 #include <vm/pmap.h>
43
44 #include <arm/proc_reg.h>
45 #include <arm/caches_internal.h>
46 #include <arm/cpu_data_internal.h>
47 #include <arm/pmap.h>
48 #include <arm/misc_protos.h>
49 #include <arm/lowglobals.h>
50
51 #include <pexpert/arm/boot.h>
52 #include <pexpert/device_tree.h>
53
54 #include <libkern/kernel_mach_header.h>
55
56 /*
57 * Denotes the end of xnu.
58 */
59 extern void *last_kernel_symbol;
60
61 /*
62 * KASLR parameters
63 */
64 vm_offset_t vm_kernel_base;
65 vm_offset_t vm_kernel_top;
66 vm_offset_t vm_kernel_stext;
67 vm_offset_t vm_kernel_etext;
68 vm_offset_t vm_kernel_slide;
69 vm_offset_t vm_kernel_slid_base;
70 vm_offset_t vm_kernel_slid_top;
71 vm_offset_t vm_kext_base;
72 vm_offset_t vm_kext_top;
73 vm_offset_t vm_prelink_stext;
74 vm_offset_t vm_prelink_etext;
75 vm_offset_t vm_prelink_sinfo;
76 vm_offset_t vm_prelink_einfo;
77 vm_offset_t vm_slinkedit;
78 vm_offset_t vm_elinkedit;
79 vm_offset_t vm_prelink_sdata;
80 vm_offset_t vm_prelink_edata;
81
82 vm_offset_t vm_kernel_builtinkmod_text;
83 vm_offset_t vm_kernel_builtinkmod_text_end;
84
85 unsigned long gVirtBase, gPhysBase, gPhysSize; /* Used by <mach/arm/vm_param.h> */
86
87 vm_offset_t mem_size; /* Size of actual physical memory present
88 * minus any performance buffer and possibly
89 * limited by mem_limit in bytes */
90 uint64_t mem_actual; /* The "One True" physical memory size
91 * actually, it's the highest physical
92 * address + 1 */
93 uint64_t max_mem; /* kernel/vm managed memory, adjusted by maxmem */
94 uint64_t max_mem_actual; /* Actual size of physical memory (bytes), adjusted
95 * by the maxmem boot-arg */
96 uint64_t sane_size; /* Memory size to use for defaults
97 * calculations */
98 addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel
99 * virtual address known
100 * to the VM system */
101
102 vm_offset_t segEXTRADATA;
103 unsigned long segSizeEXTRADATA;
104 vm_offset_t segLOWESTTEXT;
105 vm_offset_t segLOWEST;
106 static vm_offset_t segTEXTB;
107 static unsigned long segSizeTEXT;
108 static vm_offset_t segDATAB;
109 static unsigned long segSizeDATA;
110 vm_offset_t segLINKB;
111 static unsigned long segSizeLINK;
112 static vm_offset_t segKLDB;
113 static unsigned long segSizeKLD;
114 static vm_offset_t segLASTB;
115 static vm_offset_t segLASTDATACONSTB;
116 static unsigned long segSizeLASTDATACONST;
117 static unsigned long segSizeLAST;
118 static vm_offset_t sectCONSTB;
119 static unsigned long sectSizeCONST;
120 vm_offset_t segBOOTDATAB;
121 unsigned long segSizeBOOTDATA;
122 extern vm_offset_t intstack_low_guard;
123 extern vm_offset_t intstack_high_guard;
124 extern vm_offset_t fiqstack_high_guard;
125
126 vm_offset_t segPRELINKTEXTB;
127 unsigned long segSizePRELINKTEXT;
128 vm_offset_t segPRELINKINFOB;
129 unsigned long segSizePRELINKINFO;
130
131 vm_offset_t segLOWESTKC;
132 vm_offset_t segHIGHESTKC;
133 vm_offset_t segLOWESTROKC;
134 vm_offset_t segHIGHESTROKC;
135 vm_offset_t segLOWESTAuxKC;
136 vm_offset_t segHIGHESTAuxKC;
137 vm_offset_t segLOWESTROAuxKC;
138 vm_offset_t segHIGHESTROAuxKC;
139 vm_offset_t segLOWESTRXAuxKC;
140 vm_offset_t segHIGHESTRXAuxKC;
141 vm_offset_t segHIGHESTNLEAuxKC;
142
143 static kernel_segment_command_t *segDATA;
144 static boolean_t doconstro = TRUE;
145
146 vm_offset_t end_kern, etext, sdata, edata;
147
148 /*
149 * Bootstrap the system enough to run with virtual memory.
150 * Map the kernel's code and data, and allocate the system page table.
151 * Page_size must already be set.
152 *
153 * Parameters:
154 * first_avail: first available physical page -
155 * after kernel page tables
156 * avail_start: PA of first physical page
157 * avail_end : PA of last physical page
158 */
159 vm_offset_t first_avail;
160 vm_offset_t static_memory_end;
161 pmap_paddr_t avail_start, avail_end;
162
163 #define MEM_SIZE_MAX 0x40000000
164
165 extern vm_offset_t ExceptionVectorsBase; /* the code we want to load there */
166
167 /* The translation tables have to be 16KB aligned */
168 #define round_x_table(x) \
169 (((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
170
171 vm_map_address_t
172 phystokv(pmap_paddr_t pa)
173 {
174 return pa - gPhysBase + gVirtBase;
175 }
176
177 static void
178 arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va,
179 int pte_prot_APX, int pte_prot_XN)
180 {
181 if (va & ARM_TT_L1_PT_OFFMASK) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */
182 va &= (~ARM_TT_L1_PT_OFFMASK);
183 tt_entry_t *tte = &cpu_tte[ttenum(va)];
184 tt_entry_t tmplate = *tte;
185 pmap_paddr_t pa;
186 pt_entry_t *ppte, ptmp;
187 unsigned int i;
188
189 pa = va - gVirtBase + gPhysBase;
190
191 if (pa >= avail_end) {
192 return;
193 }
194
195 assert(_end >= va);
196
197 if (ARM_TTE_TYPE_TABLE == (tmplate & ARM_TTE_TYPE_MASK)) {
198 /* pick up the existing page table. */
199 ppte = (pt_entry_t *)phystokv((tmplate & ARM_TTE_TABLE_MASK));
200 } else {
201 /* TTE must be reincarnated COARSE. */
202 ppte = (pt_entry_t *)phystokv(avail_start);
203 pmap_paddr_t l2table = avail_start;
204 avail_start += ARM_PGBYTES;
205 bzero(ppte, ARM_PGBYTES);
206
207 for (i = 0; i < 4; ++i) {
208 tte[i] = pa_to_tte(l2table + (i * 0x400)) | ARM_TTE_TYPE_TABLE;
209 }
210 }
211
212 vm_offset_t len = _end - va;
213 if ((pa + len) > avail_end) {
214 _end -= (pa + len - avail_end);
215 }
216 assert((start - gVirtBase + gPhysBase) >= gPhysBase);
217
218 /* Apply the desired protections to the specified page range */
219 for (i = 0; i < (ARM_PGBYTES / sizeof(*ppte)); i++) {
220 if (start <= va && va < _end) {
221 ptmp = pa | ARM_PTE_AF | ARM_PTE_SH | ARM_PTE_TYPE;
222 ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
223 ptmp = ptmp | ARM_PTE_AP(pte_prot_APX);
224 if (pte_prot_XN) {
225 ptmp = ptmp | ARM_PTE_NX;
226 }
227
228 ppte[i] = ptmp;
229 }
230
231 va += ARM_PGBYTES;
232 pa += ARM_PGBYTES;
233 }
234 }
235 }
236
237 static void
238 arm_vm_page_granular_prot(vm_offset_t start, unsigned long size,
239 int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, int force_page_granule)
240 {
241 vm_offset_t _end = start + size;
242 vm_offset_t align_start = (start + ARM_TT_L1_PT_OFFMASK) & ~ARM_TT_L1_PT_OFFMASK;
243 vm_offset_t align_end = _end & ~ARM_TT_L1_PT_OFFMASK;
244
245 arm_vm_page_granular_helper(start, _end, start, pte_prot_APX, pte_prot_XN);
246
247 while (align_start < align_end) {
248 if (force_page_granule) {
249 arm_vm_page_granular_helper(align_start, align_end, align_start + 1,
250 pte_prot_APX, pte_prot_XN);
251 } else {
252 tt_entry_t *tte = &cpu_tte[ttenum(align_start)];
253 for (int i = 0; i < 4; ++i) {
254 tt_entry_t tmplate = tte[i];
255
256 tmplate = (tmplate & ~ARM_TTE_BLOCK_APMASK) | ARM_TTE_BLOCK_AP(pte_prot_APX);
257 tmplate = (tmplate & ~ARM_TTE_BLOCK_NX_MASK);
258 if (tte_prot_XN) {
259 tmplate = tmplate | ARM_TTE_BLOCK_NX;
260 }
261
262 tte[i] = tmplate;
263 }
264 }
265 align_start += ARM_TT_L1_PT_SIZE;
266 }
267
268 arm_vm_page_granular_helper(start, _end, _end, pte_prot_APX, pte_prot_XN);
269 }
270
271 static inline void
272 arm_vm_page_granular_RNX(vm_offset_t start, unsigned long size, int force_page_granule)
273 {
274 arm_vm_page_granular_prot(start, size, 1, AP_RONA, 1, force_page_granule);
275 }
276
277 static inline void
278 arm_vm_page_granular_ROX(vm_offset_t start, unsigned long size, int force_page_granule)
279 {
280 arm_vm_page_granular_prot(start, size, 0, AP_RONA, 0, force_page_granule);
281 }
282
283 static inline void
284 arm_vm_page_granular_RWNX(vm_offset_t start, unsigned long size, int force_page_granule)
285 {
286 arm_vm_page_granular_prot(start, size, 1, AP_RWNA, 1, force_page_granule);
287 }
288
289 static inline void
290 arm_vm_page_granular_RWX(vm_offset_t start, unsigned long size, int force_page_granule)
291 {
292 arm_vm_page_granular_prot(start, size, 0, AP_RWNA, 0, force_page_granule);
293 }
294
295 void
296 arm_vm_prot_init(boot_args * args)
297 {
298 #if __ARM_PTE_PHYSMAP__
299 boolean_t force_coarse_physmap = TRUE;
300 #else
301 boolean_t force_coarse_physmap = FALSE;
302 #endif
303 /*
304 * Enforce W^X protections on segments that have been identified so far. This will be
305 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
306 */
307
308 /*
309 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
310 * and storing an address into "error_buffer" (see arm_init.c) !?!
311 * These protections are tightened in arm_vm_prot_finalize()
312 */
313 arm_vm_page_granular_RWX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
314
315 if (doconstro) {
316 /*
317 * We map __DATA with 3 calls, so that the __const section can have its
318 * protections changed independently of the rest of the __DATA segment.
319 */
320 arm_vm_page_granular_RWNX(segDATAB, sectCONSTB - segDATAB, FALSE);
321 arm_vm_page_granular_RNX(sectCONSTB, sectSizeCONST, FALSE);
322 arm_vm_page_granular_RWNX(sectCONSTB + sectSizeCONST, (segDATAB + segSizeDATA) - (sectCONSTB + sectSizeCONST), FALSE);
323 } else {
324 /* If we aren't protecting const, just map DATA as a single blob. */
325 arm_vm_page_granular_RWNX(segDATAB, segSizeDATA, FALSE);
326 }
327 arm_vm_page_granular_RWNX(segBOOTDATAB, segSizeBOOTDATA, TRUE);
328 arm_vm_page_granular_RNX((vm_offset_t)&intstack_low_guard, PAGE_MAX_SIZE, TRUE);
329 arm_vm_page_granular_RNX((vm_offset_t)&intstack_high_guard, PAGE_MAX_SIZE, TRUE);
330 arm_vm_page_granular_RNX((vm_offset_t)&fiqstack_high_guard, PAGE_MAX_SIZE, TRUE);
331
332 arm_vm_page_granular_ROX(segKLDB, segSizeKLD, force_coarse_physmap);
333 arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, force_coarse_physmap);
334 arm_vm_page_granular_RWNX(segLASTB, segSizeLAST, FALSE); // __LAST may be empty, but we cannot assume this
335 if (segLASTDATACONSTB) {
336 arm_vm_page_granular_RWNX(segLASTDATACONSTB, segSizeLASTDATACONST, FALSE); // __LASTDATA_CONST may be empty, but we cannot assume this
337 }
338 arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, TRUE); // Refined in OSKext::readPrelinkedExtensions
339 arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT,
340 end_kern - (segPRELINKTEXTB + segSizePRELINKTEXT), force_coarse_physmap); // PreLinkInfoDictionary
341 arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern, force_coarse_physmap); // Device Tree, RAM Disk (if present), bootArgs, trust caches
342 arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, FALSE); // tighter trust cache protection
343 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData), ARM_PGBYTES * 8, FALSE); // boot_tte, cpu_tte
344
345 /*
346 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
347 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
348 * write protected in the static mapping of that range.
349 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
350 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
351 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
352 */
353 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 8, ARM_PGBYTES, FALSE); /* Excess physMem over 1MB */
354 arm_vm_page_granular_RWX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* refined in finalize */
355
356 /* Map the remainder of xnu owned memory. */
357 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 10,
358 static_memory_end - (phystokv(args->topOfKernelData) + ARM_PGBYTES * 10), force_coarse_physmap); /* rest of physmem */
359
360 /*
361 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
362 * Recall that start.s handcrafted a page table page for EVB mapping
363 */
364 pmap_paddr_t p = (pmap_paddr_t)(args->topOfKernelData) + (ARM_PGBYTES * 9);
365 pt_entry_t *ppte = (pt_entry_t *)phystokv(p);
366 pmap_init_pte_page(kernel_pmap, ppte, HIGH_EXC_VECTORS & ~ARM_TT_L1_PT_OFFMASK, 2, TRUE);
367
368 int idx = (HIGH_EXC_VECTORS & ARM_TT_L1_PT_OFFMASK) >> ARM_TT_L2_SHIFT;
369 pt_entry_t ptmp = ppte[idx];
370
371 ptmp = (ptmp & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA);
372
373 ppte[idx] = ptmp;
374 }
375
376 void
377 arm_vm_prot_finalize(boot_args * args)
378 {
379 cpu_stack_alloc(&BootCpuData);
380 ml_static_mfree(segBOOTDATAB, segSizeBOOTDATA);
381 /*
382 * Naively we could have:
383 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
384 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
385 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
386 */
387 arm_vm_page_granular_ROX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
388
389 arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* commpage, EVB */
390
391 flush_mmu_tlb();
392 }
393
394 /* used in the chosen/memory-map node, populated by iBoot. */
395 typedef struct MemoryMapFileInfo {
396 vm_offset_t paddr;
397 size_t length;
398 } MemoryMapFileInfo;
399
400
401 void
402 arm_vm_init(uint64_t memory_size, boot_args * args)
403 {
404 vm_map_address_t va, off, off_end;
405 tt_entry_t *tte, *tte_limit;
406 pmap_paddr_t boot_ttep;
407 tt_entry_t *boot_tte;
408 uint32_t mem_segments;
409 kernel_section_t *sectDCONST;
410
411 /*
412 * Get the virtual and physical memory base from boot_args.
413 */
414 gVirtBase = args->virtBase;
415 gPhysBase = args->physBase;
416 gPhysSize = args->memSize;
417 mem_size = args->memSize;
418 mem_actual = args->memSizeActual ? args->memSizeActual : mem_size;
419 if (mem_size > MEM_SIZE_MAX) {
420 mem_size = MEM_SIZE_MAX;
421 }
422 if ((memory_size != 0) && (mem_size > memory_size)) {
423 mem_size = memory_size;
424 max_mem_actual = memory_size;
425 } else {
426 max_mem_actual = mem_actual;
427 }
428
429 static_memory_end = gVirtBase + mem_size;
430
431 /* Calculate the nubmer of ~256MB segments of memory */
432 mem_segments = (mem_size + 0x0FFFFFFF) >> 28;
433
434 /*
435 * Copy the boot mmu tt to create system mmu tt.
436 * System mmu tt start after the boot mmu tt.
437 * Determine translation table base virtual address: - aligned at end
438 * of executable.
439 */
440 boot_ttep = args->topOfKernelData;
441 boot_tte = (tt_entry_t *) phystokv(boot_ttep);
442
443 cpu_ttep = boot_ttep + ARM_PGBYTES * 4;
444 cpu_tte = (tt_entry_t *) phystokv(cpu_ttep);
445
446 bcopy(boot_tte, cpu_tte, ARM_PGBYTES * 4);
447
448 /*
449 * Clear out any V==P mappings that may have been established in e.g. start.s
450 */
451 tte = &cpu_tte[ttenum(gPhysBase)];
452 tte_limit = &cpu_tte[ttenum(gPhysBase + gPhysSize)];
453
454 /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
455 if (gPhysBase < gVirtBase) {
456 if (gPhysBase + gPhysSize > gVirtBase) {
457 tte_limit = &cpu_tte[ttenum(gVirtBase)];
458 }
459 } else {
460 if (gPhysBase < gVirtBase + gPhysSize) {
461 tte = &cpu_tte[ttenum(gVirtBase + gPhysSize)];
462 }
463 }
464
465 while (tte < tte_limit) {
466 *tte = ARM_TTE_TYPE_FAULT;
467 tte++;
468 }
469
470 /* Skip 6 pages (four L1 + two L2 entries) */
471 avail_start = cpu_ttep + ARM_PGBYTES * 6;
472 avail_end = gPhysBase + mem_size;
473
474 /*
475 * Now retrieve addresses for end, edata, and etext
476 * from MACH-O headers for the currently running 32 bit kernel.
477 */
478 segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &segSizeTEXT);
479 segLOWESTTEXT = segTEXTB;
480 segLOWEST = segLOWESTTEXT;
481 segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &segSizeDATA);
482 segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK);
483 segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD);
484 segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST);
485 segLASTDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LASTDATA_CONST", &segSizeLASTDATACONST);
486 segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT);
487 segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &segSizePRELINKINFO);
488 segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA);
489
490 segEXTRADATA = 0;
491 segSizeEXTRADATA = 0;
492
493 DTEntry memory_map;
494 MemoryMapFileInfo const *trustCacheRange;
495 unsigned int trustCacheRangeSize;
496 int err;
497
498 err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
499 assert(err == kSuccess);
500
501 err = SecureDTGetProperty(memory_map, "TrustCache", (const void**)&trustCacheRange, &trustCacheRangeSize);
502 if (err == kSuccess) {
503 assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo));
504
505 segEXTRADATA = phystokv(trustCacheRange->paddr);
506 segSizeEXTRADATA = trustCacheRange->length;
507 }
508
509 etext = (vm_offset_t) segTEXTB + segSizeTEXT;
510 sdata = (vm_offset_t) segDATAB;
511 edata = (vm_offset_t) segDATAB + segSizeDATA;
512 end_kern = round_page(getlastaddr()); /* Force end to next page */
513
514 /*
515 * Special handling for the __DATA,__const *section*.
516 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
517 * so we can safely truncate the size. __DATA,__const is also aligned, but
518 * just in case we will round that to a page, too.
519 */
520 segDATA = getsegbynamefromheader(&_mh_execute_header, "__DATA");
521 sectDCONST = getsectbynamefromheader(&_mh_execute_header, "__DATA", "__const");
522 sectCONSTB = sectDCONST->addr;
523 sectSizeCONST = sectDCONST->size;
524
525 if (doconstro) {
526 extern vm_offset_t _lastkerneldataconst;
527 extern vm_size_t _lastkerneldataconst_padsize;
528 vm_offset_t sdataconst = sectCONSTB;
529
530 /* this should already be aligned, but so that we can protect we round */
531 sectCONSTB = round_page(sectCONSTB);
532
533 /* make sure lastkerneldataconst is really last and the right size */
534 if ((_lastkerneldataconst == sdataconst + sectSizeCONST - _lastkerneldataconst_padsize) &&
535 (_lastkerneldataconst_padsize >= PAGE_SIZE)) {
536 sectSizeCONST = trunc_page(sectSizeCONST);
537 } else {
538 /* otherwise see if next section is aligned then protect up to it */
539 kernel_section_t *next_sect = nextsect(segDATA, sectDCONST);
540
541 if (next_sect && ((next_sect->addr & PAGE_MASK) == 0)) {
542 sectSizeCONST = next_sect->addr - sectCONSTB;
543 } else {
544 /* lastly just go ahead and truncate so we try to protect something */
545 sectSizeCONST = trunc_page(sectSizeCONST);
546 }
547 }
548
549 /* sanity check */
550 if ((sectSizeCONST == 0) || (sectCONSTB < sdata) || (sectCONSTB + sectSizeCONST) >= edata) {
551 doconstro = FALSE;
552 }
553 }
554
555 vm_set_page_size();
556
557 vm_prelink_stext = segPRELINKTEXTB;
558 vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT;
559 vm_prelink_sinfo = segPRELINKINFOB;
560 vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
561 vm_slinkedit = segLINKB;
562 vm_elinkedit = segLINKB + segSizeLINK;
563
564 sane_size = mem_size - (avail_start - gPhysBase);
565 max_mem = mem_size;
566 vm_kernel_slide = gVirtBase - VM_KERNEL_LINK_ADDRESS;
567 vm_kernel_stext = segTEXTB;
568 vm_kernel_etext = segTEXTB + segSizeTEXT;
569 vm_kernel_base = gVirtBase;
570 vm_kernel_top = (vm_offset_t) &last_kernel_symbol;
571 vm_kext_base = segPRELINKTEXTB;
572 vm_kext_top = vm_kext_base + segSizePRELINKTEXT;
573 vm_kernel_slid_base = segTEXTB;
574 vm_kernel_slid_top = vm_kext_top;
575
576 pmap_bootstrap((gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000);
577
578 arm_vm_prot_init(args);
579
580 vm_page_kernelcache_count = (unsigned int) (atop_64(end_kern - segLOWEST));
581
582 /*
583 * To avoid recursing while trying to init the vm_page and object * mechanisms,
584 * pre-initialize kernel pmap page table pages to cover this address range:
585 * 2MB + FrameBuffer size + 3MB for each 256MB segment
586 */
587 off_end = (2 + (mem_segments * 3)) << 20;
588 off_end += (unsigned int) round_page(args->Video.v_height * args->Video.v_rowBytes);
589
590 for (off = 0, va = (gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000; off < off_end; off += ARM_TT_L1_PT_SIZE) {
591 pt_entry_t *ptp;
592 pmap_paddr_t ptp_phys;
593
594 ptp = (pt_entry_t *) phystokv(avail_start);
595 ptp_phys = (pmap_paddr_t)avail_start;
596 avail_start += ARM_PGBYTES;
597 bzero(ptp, ARM_PGBYTES);
598 pmap_init_pte_page(kernel_pmap, ptp, va + off, 2, TRUE);
599 tte = &cpu_tte[ttenum(va + off)];
600 *tte = pa_to_tte((ptp_phys)) | ARM_TTE_TYPE_TABLE;
601 *(tte + 1) = pa_to_tte((ptp_phys + 0x400)) | ARM_TTE_TYPE_TABLE;
602 *(tte + 2) = pa_to_tte((ptp_phys + 0x800)) | ARM_TTE_TYPE_TABLE;
603 *(tte + 3) = pa_to_tte((ptp_phys + 0xC00)) | ARM_TTE_TYPE_TABLE;
604 }
605
606 set_mmu_ttb(cpu_ttep);
607 set_mmu_ttb_alternate(cpu_ttep);
608 flush_mmu_tlb();
609 #if __arm__ && __ARM_USER_PROTECT__
610 {
611 unsigned int ttbr0_val, ttbr1_val;
612 thread_t thread = current_thread();
613
614 __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val));
615 __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val));
616 thread->machine.uptw_ttb = ttbr0_val;
617 thread->machine.kptw_ttb = ttbr1_val;
618 }
619 #endif
620 avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK;
621
622 first_avail = avail_start;
623 patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData);
624 }