]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <arm/proc_reg.h> | |
29 | #include <arm64/asm.h> | |
30 | #include <arm64/proc_reg.h> | |
31 | #include <pexpert/arm64/board_config.h> | |
32 | #include <pexpert/arm64/cyclone.h> | |
33 | #include <pexpert/arm64/hurricane.h> | |
34 | #include <mach_assert.h> | |
35 | #include <machine/asm.h> | |
36 | #include "assym.s" | |
37 | ||
5c9f4661 A |
38 | #if __ARM_KERNEL_PROTECT__ |
39 | #include <arm/pmap.h> | |
40 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
41 | ||
5ba3f43e A |
42 | |
43 | .macro MSR_VBAR_EL1_X0 | |
44 | #if defined(KERNEL_INTEGRITY_KTRR) | |
45 | mov x1, lr | |
46 | bl EXT(pinst_set_vbar) | |
47 | mov lr, x1 | |
48 | #else | |
49 | msr VBAR_EL1, x0 | |
50 | #endif | |
51 | .endmacro | |
52 | ||
53 | .macro MSR_TCR_EL1_X1 | |
54 | #if defined(KERNEL_INTEGRITY_KTRR) | |
55 | mov x0, x1 | |
56 | mov x1, lr | |
57 | bl _pinst_set_tcr | |
58 | mov lr, x1 | |
59 | #else | |
60 | msr TCR_EL1, x1 | |
61 | #endif | |
62 | .endmacro | |
63 | ||
64 | .macro MSR_TTBR1_EL1_X0 | |
65 | #if defined(KERNEL_INTEGRITY_KTRR) | |
66 | mov x1, lr | |
67 | bl _pinst_set_ttbr1 | |
68 | mov lr, x1 | |
69 | #else | |
70 | msr TTBR1_EL1, x0 | |
71 | #endif | |
72 | .endmacro | |
73 | ||
74 | .macro MSR_SCTLR_EL1_X0 | |
75 | #if defined(KERNEL_INTEGRITY_KTRR) | |
76 | mov x1, lr | |
77 | ||
78 | // This may abort, do so on SP1 | |
79 | bl _pinst_spsel_1 | |
80 | ||
81 | bl _pinst_set_sctlr | |
82 | msr SPSel, #0 // Back to SP0 | |
83 | mov lr, x1 | |
84 | #else | |
85 | msr SCTLR_EL1, x0 | |
86 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
87 | .endmacro | |
88 | ||
89 | /* | |
90 | * Checks the reset handler for global and CPU-specific reset-assist functions, | |
91 | * then jumps to the reset handler with boot args and cpu data. This is copied | |
92 | * to the first physical page during CPU bootstrap (see cpu.c). | |
93 | * | |
94 | * Variables: | |
95 | * x19 - Reset handler data pointer | |
96 | * x20 - Boot args pointer | |
97 | * x21 - CPU data pointer | |
98 | */ | |
99 | .text | |
100 | .align 12 | |
101 | .globl EXT(LowResetVectorBase) | |
102 | LEXT(LowResetVectorBase) | |
103 | // Preserve x0 for start_first_cpu, if called | |
104 | ||
105 | // Unlock the core for debugging | |
106 | msr OSLAR_EL1, xzr | |
107 | ||
108 | #if !(defined(KERNEL_INTEGRITY_KTRR)) | |
109 | // Set low reset vector before attempting any loads | |
110 | adrp x0, EXT(LowExceptionVectorBase)@page | |
111 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
112 | msr VBAR_EL1, x0 | |
113 | #endif | |
114 | ||
115 | ||
116 | #if defined(KERNEL_INTEGRITY_KTRR) | |
117 | /* | |
118 | * Set KTRR registers immediately after wake/resume | |
119 | * | |
120 | * During power on reset, XNU stashed the kernel text region range values | |
121 | * into __DATA,__const which should be protected by AMCC RoRgn at this point. | |
122 | * Read this data and program/lock KTRR registers accordingly. | |
123 | * If either values are zero, we're debugging kernel so skip programming KTRR. | |
124 | */ | |
125 | ||
126 | // load stashed rorgn_begin | |
127 | adrp x17, EXT(rorgn_begin)@page | |
128 | add x17, x17, EXT(rorgn_begin)@pageoff | |
129 | ldr x17, [x17] | |
130 | // if rorgn_begin is zero, we're debugging. skip enabling ktrr | |
131 | cbz x17, 1f | |
132 | ||
133 | // load stashed rorgn_end | |
134 | adrp x19, EXT(rorgn_end)@page | |
135 | add x19, x19, EXT(rorgn_end)@pageoff | |
136 | ldr x19, [x19] | |
137 | cbz x19, 1f | |
138 | ||
139 | // program and lock down KTRR | |
140 | // subtract one page from rorgn_end to make pinst insns NX | |
141 | msr ARM64_REG_KTRR_LOWER_EL1, x17 | |
142 | sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12 | |
143 | msr ARM64_REG_KTRR_UPPER_EL1, x19 | |
144 | mov x17, #1 | |
145 | msr ARM64_REG_KTRR_LOCK_EL1, x17 | |
146 | ||
147 | 1: | |
148 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
149 | ||
150 | // Process reset handlers | |
151 | adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data | |
152 | add x19, x19, EXT(ResetHandlerData)@pageoff | |
153 | mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number | |
154 | and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 | |
155 | ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries | |
156 | add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS) | |
157 | Lcheck_cpu_data_entry: | |
158 | ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address | |
159 | cbz x21, Lnext_cpu_data_entry | |
160 | ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id | |
161 | cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu | |
162 | b.eq Lfound_cpu_data_entry // Branch if match | |
163 | Lnext_cpu_data_entry: | |
164 | add x1, x1, #16 // Increment to the next cpu data entry | |
165 | cmp x1, x3 | |
166 | b.eq Lskip_cpu_reset_handler // Not found | |
167 | b Lcheck_cpu_data_entry // loop | |
168 | Lfound_cpu_data_entry: | |
169 | adrp x20, EXT(const_boot_args)@page | |
170 | add x20, x20, EXT(const_boot_args)@pageoff | |
171 | ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler | |
172 | cbz x0, Lskip_cpu_reset_handler | |
173 | ||
174 | // Validate that our handler is one of the two expected handlers | |
175 | adrp x2, EXT(resume_idle_cpu)@page | |
176 | add x2, x2, EXT(resume_idle_cpu)@pageoff | |
177 | cmp x0, x2 | |
178 | beq 1f | |
179 | adrp x2, EXT(start_cpu)@page | |
180 | add x2, x2, EXT(start_cpu)@pageoff | |
181 | cmp x0, x2 | |
182 | bne Lskip_cpu_reset_handler | |
183 | 1: | |
184 | ||
185 | ||
186 | ||
5c9f4661 A |
187 | #if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR) |
188 | /* | |
189 | * Populate TPIDR_EL1 (in case the CPU takes an exception while | |
190 | * turning on the MMU). | |
191 | */ | |
192 | ldr x13, [x21, CPU_ACTIVE_THREAD] | |
193 | msr TPIDR_EL1, x13 | |
194 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
195 | ||
5ba3f43e A |
196 | blr x0 |
197 | Lskip_cpu_reset_handler: | |
198 | b . // Hang if the handler is NULL or returns | |
199 | ||
200 | .align 3 | |
201 | .globl EXT(ResetHandlerData) | |
202 | LEXT(ResetHandlerData) | |
203 | .space (rhdSize_NUM),0 // (filled with 0s) | |
204 | ||
205 | .align 3 | |
206 | .global EXT(LowResetVectorEnd) | |
207 | LEXT(LowResetVectorEnd) | |
208 | .global EXT(SleepToken) | |
209 | #if WITH_CLASSIC_S2R | |
210 | LEXT(SleepToken) | |
211 | .space (stSize_NUM),0 | |
212 | #endif | |
213 | ||
214 | ||
215 | /* | |
216 | * __start trampoline is located at a position relative to LowResetVectorBase | |
217 | * so that iBoot can compute the reset vector position to set IORVBAR using | |
218 | * only the kernel entry point. Reset vector = (__start & ~0xfff) | |
219 | */ | |
220 | .align 3 | |
221 | .globl EXT(_start) | |
222 | LEXT(_start) | |
223 | b EXT(start_first_cpu) | |
224 | ||
225 | ||
226 | /* | |
227 | * Provides an early-boot exception vector so that the processor will spin | |
228 | * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap | |
229 | * code triggers an exception. This is copied to the second physical page | |
230 | * during CPU bootstrap (see cpu.c). | |
231 | */ | |
232 | .align 12, 0 | |
233 | .global EXT(LowExceptionVectorBase) | |
234 | LEXT(LowExceptionVectorBase) | |
235 | /* EL1 SP 0 */ | |
236 | b . | |
237 | .align 7 | |
238 | b . | |
239 | .align 7 | |
240 | b . | |
241 | .align 7 | |
242 | b . | |
243 | /* EL1 SP1 */ | |
244 | .align 7 | |
245 | b . | |
246 | .align 7 | |
247 | b . | |
248 | .align 7 | |
249 | b . | |
250 | .align 7 | |
251 | b . | |
252 | /* EL0 64 */ | |
253 | .align 7 | |
254 | b . | |
255 | .align 7 | |
256 | b . | |
257 | .align 7 | |
258 | b . | |
259 | .align 7 | |
260 | b . | |
261 | /* EL0 32 */ | |
262 | .align 7 | |
263 | b . | |
264 | .align 7 | |
265 | b . | |
266 | .align 7 | |
267 | b . | |
268 | .align 7 | |
269 | b . | |
270 | .align 12, 0 | |
271 | ||
272 | #if defined(KERNEL_INTEGRITY_KTRR) | |
273 | /* | |
274 | * Provide a global symbol so that we can narrow the V=P mapping to cover | |
275 | * this page during arm_vm_init. | |
276 | */ | |
277 | .align ARM_PGSHIFT | |
278 | .globl EXT(bootstrap_instructions) | |
279 | LEXT(bootstrap_instructions) | |
280 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ | |
281 | .align 2 | |
282 | .globl EXT(resume_idle_cpu) | |
283 | LEXT(resume_idle_cpu) | |
284 | adrp lr, EXT(arm_init_idle_cpu)@page | |
285 | add lr, lr, EXT(arm_init_idle_cpu)@pageoff | |
286 | b start_cpu | |
287 | ||
288 | .align 2 | |
289 | .globl EXT(start_cpu) | |
290 | LEXT(start_cpu) | |
291 | adrp lr, EXT(arm_init_cpu)@page | |
292 | add lr, lr, EXT(arm_init_cpu)@pageoff | |
293 | b start_cpu | |
294 | ||
295 | .align 2 | |
296 | start_cpu: | |
297 | #if defined(KERNEL_INTEGRITY_KTRR) | |
298 | // This is done right away in reset vector for pre-KTRR devices | |
299 | // Set low reset vector now that we are in the KTRR-free zone | |
300 | adrp x0, EXT(LowExceptionVectorBase)@page | |
301 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
302 | MSR_VBAR_EL1_X0 | |
303 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ | |
304 | ||
305 | // x20 set to BootArgs phys address | |
306 | // x21 set to cpu data phys address | |
307 | msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts | |
308 | ||
309 | // Get the kernel memory parameters from the boot args | |
310 | ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base | |
311 | ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base | |
312 | ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size | |
313 | ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data | |
314 | ||
315 | // Set TPIDRRO_EL0 with the CPU number | |
316 | ldr x0, [x21, CPU_NUMBER_GS] | |
317 | msr TPIDRRO_EL0, x0 | |
318 | ||
319 | // Set the exception stack pointer | |
320 | ldr x0, [x21, CPU_EXCEPSTACK_TOP] | |
321 | ||
322 | ||
323 | // Set SP_EL1 to exception stack | |
324 | #if defined(KERNEL_INTEGRITY_KTRR) | |
325 | mov x1, lr | |
326 | bl _pinst_spsel_1 | |
327 | mov lr, x1 | |
328 | #else | |
329 | msr SPSel, #1 | |
330 | #endif | |
331 | mov sp, x0 | |
332 | ||
333 | // Set the interrupt stack pointer | |
334 | ldr x0, [x21, CPU_INTSTACK_TOP] | |
335 | msr SPSel, #0 | |
336 | mov sp, x0 | |
337 | ||
338 | // Convert lr to KVA | |
339 | add lr, lr, x22 | |
340 | sub lr, lr, x23 | |
341 | ||
342 | b common_start | |
343 | ||
344 | /* | |
345 | * create_l1_table_entry | |
346 | * | |
347 | * Given a virtual address, creates a table entry in an L1 translation table | |
348 | * to point to an L2 translation table. | |
349 | * arg0 - Virtual address | |
350 | * arg1 - L1 table address | |
351 | * arg2 - L2 table address | |
352 | * arg3 - Scratch register | |
353 | * arg4 - Scratch register | |
354 | * arg5 - Scratch register | |
355 | */ | |
356 | .macro create_l1_table_entry | |
357 | and $3, $0, #(ARM_TT_L1_INDEX_MASK) | |
358 | lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table | |
359 | lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset | |
360 | add $3, $1, $3 // Get L1 entry pointer | |
361 | mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template | |
362 | and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table | |
363 | orr $5, $4, $5 // Create table entry for L2 table | |
364 | str $5, [$3] // Write entry to L1 table | |
365 | .endmacro | |
366 | ||
367 | /* | |
368 | * create_l2_block_entries | |
369 | * | |
370 | * Given base virtual and physical addresses, creates consecutive block entries | |
371 | * in an L2 translation table. | |
372 | * arg0 - Virtual address | |
373 | * arg1 - Physical address | |
374 | * arg2 - L2 table address | |
375 | * arg3 - Number of entries | |
376 | * arg4 - Scratch register | |
377 | * arg5 - Scratch register | |
378 | * arg6 - Scratch register | |
379 | * arg7 - Scratch register | |
380 | */ | |
381 | .macro create_l2_block_entries | |
382 | and $4, $0, #(ARM_TT_L2_INDEX_MASK) | |
383 | lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry | |
384 | lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset | |
385 | add $4, $2, $4 // Get L2 entry pointer | |
386 | mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template | |
387 | and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping | |
388 | orr $6, $5, $6 | |
389 | mov $5, $3 | |
390 | mov $7, #(ARM_TT_L2_SIZE) | |
391 | 1: | |
392 | str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance | |
393 | add $6, $6, $7 // Increment the output address | |
394 | subs $5, $5, #1 // Decrement the number of entries | |
395 | b.ne 1b | |
396 | .endmacro | |
397 | ||
398 | /* | |
399 | * _start_first_cpu | |
400 | * Cold boot init routine. Called from __start | |
401 | * x0 - Boot args | |
402 | */ | |
403 | .align 2 | |
404 | .globl EXT(start_first_cpu) | |
405 | LEXT(start_first_cpu) | |
406 | ||
407 | // Unlock the core for debugging | |
408 | msr OSLAR_EL1, xzr | |
409 | mov x20, x0 | |
410 | mov x21, xzr | |
411 | ||
412 | // Set low reset vector before attempting any loads | |
413 | adrp x0, EXT(LowExceptionVectorBase)@page | |
414 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
415 | MSR_VBAR_EL1_X0 | |
416 | ||
417 | ||
418 | ||
419 | // Get the kernel memory parameters from the boot args | |
420 | ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base | |
421 | ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base | |
422 | ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size | |
423 | ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data | |
424 | ||
425 | // Set CPU number to 0 | |
426 | msr TPIDRRO_EL0, x21 | |
427 | ||
428 | // Set up exception stack pointer | |
429 | adrp x0, EXT(excepstack_top)@page // Load top of exception stack | |
430 | add x0, x0, EXT(excepstack_top)@pageoff | |
431 | add x0, x0, x22 // Convert to KVA | |
432 | sub x0, x0, x23 | |
433 | ||
434 | // Set SP_EL1 to exception stack | |
435 | #if defined(KERNEL_INTEGRITY_KTRR) | |
436 | bl _pinst_spsel_1 | |
437 | #else | |
438 | msr SPSel, #1 | |
439 | #endif | |
440 | ||
441 | mov sp, x0 | |
442 | ||
443 | // Set up interrupt stack pointer | |
444 | adrp x0, EXT(intstack_top)@page // Load top of irq stack | |
445 | add x0, x0, EXT(intstack_top)@pageoff | |
446 | add x0, x0, x22 // Convert to KVA | |
447 | sub x0, x0, x23 | |
448 | msr SPSel, #0 // Set SP_EL0 to interrupt stack | |
449 | mov sp, x0 | |
450 | ||
451 | // Load address to the C init routine into link register | |
452 | adrp lr, EXT(arm_init)@page | |
453 | add lr, lr, EXT(arm_init)@pageoff | |
454 | add lr, lr, x22 // Convert to KVA | |
455 | sub lr, lr, x23 | |
456 | ||
457 | /* | |
458 | * Set up the bootstrap page tables with a single block entry for the V=P | |
459 | * mapping, a single block entry for the trampolined kernel address (KVA), | |
460 | * and all else invalid. This requires four pages: | |
461 | * Page 1 - V=P L1 table | |
462 | * Page 2 - V=P L2 table | |
463 | * Page 3 - KVA L1 table | |
464 | * Page 4 - KVA L2 table | |
465 | */ | |
466 | #if __ARM64_TWO_LEVEL_PMAP__ | |
467 | /* | |
468 | * If we are using a two level scheme, we don't need the L1 entries, so: | |
469 | * Page 1 - V=P L2 table | |
470 | * Page 2 - KVA L2 table | |
471 | */ | |
472 | #endif | |
473 | ||
474 | // Invalidate all entries in the bootstrap page tables | |
475 | mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template | |
476 | mov x1, x25 // Start at top of kernel | |
477 | mov x2, #(TTE_PGENTRIES) // Load number of entries per page | |
478 | #if __ARM64_TWO_LEVEL_PMAP__ | |
479 | lsl x2, x2, #1 // Shift by 1 for num entries on 2 pages | |
480 | #else | |
481 | lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages | |
482 | #endif | |
483 | sub x2, x2, #1 // Subtract one to terminate on last entry | |
484 | Linvalidate_bootstrap: // do { | |
485 | str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance | |
486 | subs x2, x2, #1 // entries-- | |
487 | b.ne Linvalidate_bootstrap // } while (entries != 0) | |
488 | ||
489 | /* Load addresses for page table construction macros | |
490 | * x0 - Physical base (used to identify V=P section to set up) | |
491 | * x1 - V=P L1 table base | |
492 | * x2 - V=P L2 table base | |
493 | * x3 - KVA L1 table base | |
494 | * x4 - KVA L2 table base | |
495 | * x5 - Mem size in entries (up to 1GB) | |
496 | */ | |
497 | ||
498 | /* | |
499 | * In order to reclaim memory on targets where TZ0 (or some other entity) | |
500 | * must be located at the base of memory, iBoot may set the virtual and | |
501 | * physical base addresses to immediately follow whatever lies at the | |
502 | * base of physical memory. | |
503 | * | |
504 | * If the base address belongs to TZ0, it may be dangerous for xnu to map | |
505 | * it (as it may be prefetched, despite being technically inaccessible). | |
506 | * In order to avoid this issue while keeping the mapping code simple, we | |
507 | * may continue to use block mappings, but we will only map xnu's mach | |
508 | * header to the end of memory. | |
509 | * | |
510 | * Given that iBoot guarantees that the unslid kernelcache base address | |
511 | * will begin on an L2 boundary, this should prevent us from accidentally | |
512 | * mapping TZ0. | |
513 | */ | |
514 | adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address | |
515 | add x0, x0, EXT(_mh_execute_header)@pageoff | |
516 | #if __ARM64_TWO_LEVEL_PMAP__ | |
517 | /* | |
518 | * We don't need the L1 entries in this case, so skip them. | |
519 | */ | |
520 | mov x2, x25 // Load V=P L2 table address | |
521 | add x4, x2, PGBYTES // Load KVA L2 table address | |
522 | #else | |
523 | mov x1, x25 // Load V=P L1 table address | |
524 | add x2, x1, PGBYTES // Load V=P L2 table address | |
525 | add x3, x2, PGBYTES // Load KVA L1 table address | |
526 | add x4, x3, PGBYTES // Load KVA L2 table address | |
527 | #endif | |
528 | /* | |
529 | * We must adjust the amount we wish to map in order to account for the | |
530 | * memory preceeding xnu's mach header. | |
531 | */ | |
532 | sub x5, x0, x23 // Map from the mach header up to the end of our memory | |
533 | sub x5, x24, x5 | |
534 | lsr x5, x5, #(ARM_TT_L2_SHIFT) | |
535 | mov x6, #(TTE_PGENTRIES) // Load number of L2 entries per page | |
536 | cmp x5, x6 // If memsize requires more than 1 page of entries | |
537 | csel x5, x5, x6, lt // ... round down to a single page (first 1GB) | |
538 | ||
539 | #if !__ARM64_TWO_LEVEL_PMAP__ | |
540 | /* Create entry for L2 table in V=P L1 table | |
541 | * create_l1_table_entry(V=P, L1 table, L2 table, scratch1, scratch2, scratch3) | |
542 | */ | |
543 | create_l1_table_entry x0, x1, x2, x10, x11, x12 | |
544 | #endif | |
545 | ||
546 | /* Create block entry in V=P L2 table | |
547 | * create_l2_block_entries(V=P virt, V=P phys, L2 table, num_ents, scratch1, scratch2, scratch3) | |
548 | */ | |
549 | create_l2_block_entries x0, x0, x2, x5, x10, x11, x12, x13 | |
550 | ||
551 | #if !__ARM64_TWO_LEVEL_PMAP__ | |
552 | /* Create entry for L2 table in KVA L1 table | |
553 | * create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) | |
554 | */ | |
555 | create_l1_table_entry x22, x3, x4, x10, x11, x12 | |
556 | #endif | |
557 | ||
558 | /* Create block entries in KVA L2 table | |
559 | * create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) | |
560 | */ | |
561 | create_l2_block_entries x22, x23, x4, x5, x10, x11, x12, x13 | |
562 | ||
563 | /* Ensure TTEs are visible */ | |
564 | dsb ish | |
565 | ||
566 | b common_start | |
567 | ||
568 | /* | |
569 | * Begin common CPU initialization | |
570 | * | |
571 | * Regster state: | |
572 | * x20 - PA of boot args | |
573 | * x21 - zero on cold boot, PA of cpu data on warm reset | |
574 | * x22 - Kernel virtual base | |
575 | * x23 - Kernel physical base | |
576 | * x24 - Physical memory size | |
577 | * x25 - PA of the end of the kernl | |
578 | * lr - KVA of C init routine | |
579 | * sp - SP_EL0 selected | |
580 | * | |
581 | * SP_EL0 - KVA of CPU's interrupt stack | |
582 | * SP_EL1 - KVA of CPU's exception stack | |
583 | * TPIDRRO_EL0 - CPU number | |
584 | */ | |
585 | common_start: | |
586 | // Set the translation control register. | |
587 | adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure | |
588 | add x0, x0, EXT(sysreg_restore)@pageoff | |
589 | ldr x1, [x0, SR_RESTORE_TCR_EL1] | |
590 | MSR_TCR_EL1_X1 | |
591 | ||
592 | /* Set up translation table base registers. | |
593 | * TTBR0 - V=P table @ top of kernel | |
594 | * TTBR1 - KVA table @ top of kernel + 2 pages | |
595 | */ | |
596 | #if defined(KERNEL_INTEGRITY_KTRR) | |
597 | /* Note that for KTRR configurations, the V=P map will be modified by | |
598 | * arm_vm_init.c. | |
599 | */ | |
600 | #endif | |
601 | and x0, x25, #(TTBR_BADDR_MASK) | |
5c9f4661 A |
602 | #if __ARM_KERNEL_PROTECT__ |
603 | /* We start out with a kernel ASID. */ | |
604 | orr x0, x0, #(1 << TTBR_ASID_SHIFT) | |
605 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
606 | msr TTBR0_EL1, x0 |
607 | #if __ARM64_TWO_LEVEL_PMAP__ | |
608 | /* | |
609 | * If we're using a two level pmap, we'll only need a | |
610 | * single page per bootstrap pmap. | |
611 | */ | |
612 | mov x12, #1 | |
613 | #else | |
614 | /* | |
615 | * If we're using a three level pmap, we'll need two | |
616 | * pages per bootstrap pmap. | |
617 | */ | |
618 | mov x12, #2 | |
619 | #endif | |
620 | add x0, x25, x12, lsl PGSHIFT | |
621 | and x0, x0, #(TTBR_BADDR_MASK) | |
622 | MSR_TTBR1_EL1_X0 | |
623 | ||
624 | // Set up MAIR attr0 for normal memory, attr1 for device memory | |
625 | mov x0, xzr | |
626 | mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK)) | |
627 | orr x0, x0, x1 | |
628 | mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK)) | |
629 | orr x0, x0, x1 | |
630 | mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE)) | |
631 | orr x0, x0, x1 | |
632 | mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU)) | |
633 | orr x0, x0, x1 | |
634 | mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB)) | |
635 | orr x0, x0, x1 | |
636 | mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED)) | |
637 | orr x0, x0, x1 | |
638 | msr MAIR_EL1, x0 | |
639 | ||
640 | // Disable interrupts | |
641 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) | |
642 | ||
643 | #if defined(APPLEHURRICANE) | |
644 | ||
645 | // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk | |
646 | // Needs to be done before MMU is enabled | |
647 | mrs x12, ARM64_REG_HID5 | |
648 | and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask) | |
649 | orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE | |
650 | msr ARM64_REG_HID5, x12 | |
651 | ||
652 | #endif | |
653 | ||
654 | ||
655 | #ifndef __ARM_IC_NOALIAS_ICACHE__ | |
656 | /* Invalidate the TLB and icache on systems that do not guarantee that the | |
657 | * caches are invalidated on reset. | |
658 | */ | |
659 | tlbi vmalle1 | |
660 | ic iallu | |
661 | #endif | |
662 | ||
663 | /* If x21 is not 0, then this is either the start_cpu path or | |
664 | * the resume_idle_cpu path. cpu_ttep should already be | |
665 | * populated, so just switch to the kernel_pmap now. | |
666 | */ | |
667 | ||
668 | cbz x21, 1f | |
669 | adrp x0, EXT(cpu_ttep)@page | |
670 | add x0, x0, EXT(cpu_ttep)@pageoff | |
671 | ldr x0, [x0] | |
672 | MSR_TTBR1_EL1_X0 | |
673 | 1: | |
674 | ||
675 | // Set up the exception vectors | |
5c9f4661 A |
676 | #if __ARM_KERNEL_PROTECT__ |
677 | /* If this is not the first reset of the boot CPU, the alternate mapping | |
678 | * for the exception vectors will be set up, so use it. Otherwise, we | |
679 | * should use the mapping located in the kernelcache mapping. | |
680 | */ | |
681 | MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START | |
682 | ||
683 | cbnz x21, 1f | |
684 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
685 | adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address |
686 | add x0, x0, EXT(ExceptionVectorsBase)@pageoff | |
687 | add x0, x0, x22 // Convert exception vector address to KVA | |
688 | sub x0, x0, x23 | |
5c9f4661 | 689 | 1: |
5ba3f43e A |
690 | MSR_VBAR_EL1_X0 |
691 | ||
692 | ||
693 | // Enable caches and MMU | |
694 | mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF) | |
695 | mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000) | |
696 | orr x0, x0, x1 | |
697 | MSR_SCTLR_EL1_X0 | |
698 | isb sy | |
699 | ||
700 | #if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT))) | |
701 | /* Watchtower | |
702 | * | |
703 | * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching | |
704 | * it here would trap to EL3. | |
705 | */ | |
706 | ||
707 | // Enable NEON | |
708 | mov x0, #(CPACR_FPEN_ENABLE) | |
709 | msr CPACR_EL1, x0 | |
710 | #endif | |
711 | ||
712 | // Clear thread pointer | |
713 | mov x0, #0 | |
714 | msr TPIDR_EL1, x0 // Set thread register | |
715 | ||
a39ff7e2 A |
716 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
717 | // Initialization common to all Apple targets | |
718 | ARM64_IS_PCORE x15 | |
719 | ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4 | |
720 | orr x12, x12, ARM64_REG_HID4_DisDcMVAOps | |
721 | orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops | |
722 | ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4 | |
723 | #endif // APPLE_ARM64_ARCH_FAMILY | |
724 | ||
5ba3f43e A |
725 | #if defined(APPLECYCLONE) || defined(APPLETYPHOON) |
726 | // | |
727 | // Cyclone/Typhoon-Specific initialization | |
a39ff7e2 | 728 | // For tunable summary, see <rdar://problem/13503621> |
5ba3f43e A |
729 | // |
730 | ||
731 | // | |
732 | // Disable LSP flush with context switch to work around bug in LSP | |
733 | // that can cause Cyclone to wedge when CONTEXTIDR is written. | |
a39ff7e2 | 734 | // <rdar://problem/12387704> |
5ba3f43e A |
735 | // |
736 | ||
737 | mrs x12, ARM64_REG_HID0 | |
738 | orr x12, x12, ARM64_REG_HID0_LoopBuffDisb | |
739 | msr ARM64_REG_HID0, x12 | |
740 | ||
741 | mrs x12, ARM64_REG_HID1 | |
742 | orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl | |
743 | #if defined(APPLECYCLONE) | |
744 | orr x12, x12, ARM64_REG_HID1_disLspFlushWithContextSwitch | |
745 | #endif | |
746 | msr ARM64_REG_HID1, x12 | |
747 | ||
748 | mrs x12, ARM64_REG_HID3 | |
749 | orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode | |
750 | msr ARM64_REG_HID3, x12 | |
751 | ||
5ba3f43e A |
752 | mrs x12, ARM64_REG_HID5 |
753 | and x12, x12, (~ARM64_REG_HID5_DisHwpLd) | |
754 | and x12, x12, (~ARM64_REG_HID5_DisHwpSt) | |
755 | msr ARM64_REG_HID5, x12 | |
756 | ||
757 | // Change the default memcache data set ID from 0 to 15 for all agents | |
758 | mrs x12, ARM64_REG_HID8 | |
759 | orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE) | |
760 | #if ARM64_BOARD_CONFIG_T7001 | |
761 | orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE | |
762 | #endif // ARM64_BOARD_CONFIG_T7001 | |
763 | msr ARM64_REG_HID8, x12 | |
764 | isb sy | |
765 | #endif // APPLECYCLONE || APPLETYPHOON | |
766 | ||
767 | #if defined(APPLETWISTER) | |
a39ff7e2 A |
768 | |
769 | // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK | |
770 | // to work around potential hang. Must only be applied to Maui C0. | |
771 | mrs x12, MIDR_EL1 | |
772 | ubfx x13, x12, #MIDR_EL1_PNUM_SHIFT, #12 | |
773 | cmp x13, #4 // Part number 4 => Maui, 5 => Malta/Elba | |
774 | bne Lskip_isalive | |
775 | ubfx x13, x12, #MIDR_EL1_VAR_SHIFT, #4 | |
776 | cmp x13, #2 // variant 2 => Maui C0 | |
777 | b.lt Lskip_isalive | |
778 | ||
779 | mrs x12, ARM64_REG_CYC_CFG | |
780 | orr x12, x12, ARM64_REG_CYC_CFG_skipInit | |
781 | msr ARM64_REG_CYC_CFG, x12 | |
782 | ||
783 | Lskip_isalive: | |
784 | ||
785 | mrs x12, ARM64_REG_HID11 | |
786 | and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt) | |
787 | msr ARM64_REG_HID11, x12 | |
5ba3f43e A |
788 | |
789 | // Change the default memcache data set ID from 0 to 15 for all agents | |
790 | mrs x12, ARM64_REG_HID8 | |
791 | orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE) | |
a39ff7e2 | 792 | orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE) |
5ba3f43e A |
793 | msr ARM64_REG_HID8, x12 |
794 | ||
795 | // Use 4-cycle MUL latency to avoid denormal stalls | |
a39ff7e2 A |
796 | mrs x12, ARM64_REG_HID7 |
797 | orr x12, x12, #ARM64_REG_HID7_disNexFastFmul | |
798 | msr ARM64_REG_HID7, x12 | |
5ba3f43e A |
799 | |
800 | // disable reporting of TLB-multi-hit-error | |
801 | // <rdar://problem/22163216> | |
802 | mrs x12, ARM64_REG_LSU_ERR_STS | |
803 | and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN) | |
804 | msr ARM64_REG_LSU_ERR_STS, x12 | |
805 | ||
806 | isb sy | |
807 | #endif // APPLETWISTER | |
808 | ||
809 | #if defined(APPLEHURRICANE) | |
810 | ||
811 | // IC prefetch configuration | |
812 | // <rdar://problem/23019425> | |
813 | mrs x12, ARM64_REG_HID0 | |
814 | and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk) | |
815 | orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift) | |
816 | orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn | |
817 | msr ARM64_REG_HID0, x12 | |
818 | ||
819 | // disable reporting of TLB-multi-hit-error | |
820 | // <rdar://problem/22163216> | |
821 | mrs x12, ARM64_REG_LSU_ERR_CTL | |
822 | and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN) | |
823 | msr ARM64_REG_LSU_ERR_CTL, x12 | |
824 | ||
825 | // disable crypto fusion across decode groups | |
826 | // <rdar://problem/27306424> | |
827 | mrs x12, ARM64_REG_HID1 | |
828 | orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp | |
829 | msr ARM64_REG_HID1, x12 | |
830 | ||
831 | #if defined(ARM64_BOARD_CONFIG_T8011) | |
832 | // Clear DisDcZvaCmdOnly | |
833 | // Per Myst A0/B0 tunables document | |
5ba3f43e A |
834 | // <rdar://problem/27627428> Myst: Confirm ACC Per-CPU Tunables |
835 | mrs x12, ARM64_REG_HID3 | |
836 | and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly | |
837 | msr ARM64_REG_HID3, x12 | |
838 | ||
839 | mrs x12, ARM64_REG_EHID3 | |
840 | and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly | |
841 | msr ARM64_REG_EHID3, x12 | |
842 | #endif /* defined(ARM64_BOARD_CONFIG_T8011) */ | |
843 | ||
844 | #endif // APPLEHURRICANE | |
845 | ||
846 | ||
847 | // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap. | |
848 | cbnz x21, Ltrampoline | |
849 | ||
850 | // Set KVA of boot args as first arg | |
851 | add x0, x20, x22 | |
852 | sub x0, x0, x23 | |
853 | ||
854 | #if KASAN | |
855 | mov x20, x0 | |
856 | mov x21, lr | |
857 | ||
858 | // x0: boot args | |
859 | // x1: KVA page table phys base | |
860 | mrs x1, TTBR1_EL1 | |
861 | bl _kasan_bootstrap | |
862 | ||
863 | mov x0, x20 | |
864 | mov lr, x21 | |
865 | #endif | |
866 | ||
867 | // Return to arm_init() | |
868 | ret | |
869 | ||
870 | Ltrampoline: | |
871 | // Load VA of the trampoline | |
872 | adrp x0, arm_init_tramp@page | |
873 | add x0, x0, arm_init_tramp@pageoff | |
874 | add x0, x0, x22 | |
875 | sub x0, x0, x23 | |
876 | ||
877 | // Branch to the trampoline | |
878 | br x0 | |
879 | ||
880 | /* | |
881 | * V=P to KVA trampoline. | |
882 | * x0 - KVA of cpu data pointer | |
883 | */ | |
884 | .text | |
885 | .align 2 | |
886 | arm_init_tramp: | |
887 | /* On a warm boot, the full kernel translation table is initialized in | |
888 | * addition to the bootstrap tables. The layout is as follows: | |
889 | * | |
890 | * +--Top of Memory--+ | |
891 | * ... | |
892 | * | | | |
893 | * | Primary Kernel | | |
894 | * | Trans. Table | | |
895 | * | | | |
896 | * +--Top + 5 pages--+ | |
897 | * | | | |
898 | * | Invalid Table | | |
899 | * | | | |
900 | * +--Top + 4 pages--+ | |
901 | * | | | |
902 | * | KVA Table | | |
903 | * | | | |
904 | * +--Top + 2 pages--+ | |
905 | * | | | |
906 | * | V=P Table | | |
907 | * | | | |
908 | * +--Top of Kernel--+ | |
909 | * | | | |
910 | * | Kernel Mach-O | | |
911 | * | | | |
912 | * ... | |
913 | * +---Kernel Base---+ | |
914 | */ | |
915 | ||
916 | ||
917 | adrp x0, EXT(invalid_ttep)@page | |
918 | add x0, x0, EXT(invalid_ttep)@pageoff | |
919 | ldr x0, [x0] | |
5c9f4661 A |
920 | #if __ARM_KERNEL_PROTECT__ |
921 | /* We start out with a kernel ASID. */ | |
922 | orr x0, x0, #(1 << TTBR_ASID_SHIFT) | |
923 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
924 | |
925 | msr TTBR0_EL1, x0 | |
926 | ||
927 | // Convert CPU data PA to VA and set as first argument | |
928 | add x0, x21, x22 | |
929 | sub x0, x0, x23 | |
930 | mov x1, #0 | |
931 | ||
932 | // Make sure that the TLB flush happens after the registers are set! | |
933 | isb sy | |
934 | ||
935 | // Synchronize system for TTBR updates | |
936 | tlbi vmalle1 | |
937 | dsb sy | |
938 | isb sy | |
939 | ||
940 | /* Return to arm_init() */ | |
941 | ret | |
942 | ||
943 | //#include "globals_asm.h" | |
944 | ||
945 | /* vim: set ts=4: */ |