]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <arm/proc_reg.h> | |
29 | #include <arm64/asm.h> | |
30 | #include <arm64/proc_reg.h> | |
31 | #include <pexpert/arm64/board_config.h> | |
5ba3f43e A |
32 | #include <mach_assert.h> |
33 | #include <machine/asm.h> | |
34 | #include "assym.s" | |
cb323159 | 35 | #include <arm64/exception_asm.h> |
5ba3f43e | 36 | |
5c9f4661 A |
37 | #if __ARM_KERNEL_PROTECT__ |
38 | #include <arm/pmap.h> | |
39 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
40 | ||
5ba3f43e | 41 | |
cb323159 | 42 | |
5ba3f43e A |
43 | .macro MSR_VBAR_EL1_X0 |
44 | #if defined(KERNEL_INTEGRITY_KTRR) | |
45 | mov x1, lr | |
46 | bl EXT(pinst_set_vbar) | |
47 | mov lr, x1 | |
48 | #else | |
49 | msr VBAR_EL1, x0 | |
50 | #endif | |
51 | .endmacro | |
52 | ||
53 | .macro MSR_TCR_EL1_X1 | |
54 | #if defined(KERNEL_INTEGRITY_KTRR) | |
55 | mov x0, x1 | |
56 | mov x1, lr | |
cb323159 | 57 | bl EXT(pinst_set_tcr) |
5ba3f43e A |
58 | mov lr, x1 |
59 | #else | |
60 | msr TCR_EL1, x1 | |
61 | #endif | |
62 | .endmacro | |
63 | ||
64 | .macro MSR_TTBR1_EL1_X0 | |
65 | #if defined(KERNEL_INTEGRITY_KTRR) | |
66 | mov x1, lr | |
cb323159 | 67 | bl EXT(pinst_set_ttbr1) |
5ba3f43e A |
68 | mov lr, x1 |
69 | #else | |
70 | msr TTBR1_EL1, x0 | |
71 | #endif | |
72 | .endmacro | |
73 | ||
74 | .macro MSR_SCTLR_EL1_X0 | |
75 | #if defined(KERNEL_INTEGRITY_KTRR) | |
76 | mov x1, lr | |
77 | ||
78 | // This may abort, do so on SP1 | |
cb323159 | 79 | bl EXT(pinst_spsel_1) |
5ba3f43e | 80 | |
cb323159 | 81 | bl EXT(pinst_set_sctlr) |
5ba3f43e A |
82 | msr SPSel, #0 // Back to SP0 |
83 | mov lr, x1 | |
84 | #else | |
85 | msr SCTLR_EL1, x0 | |
86 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
87 | .endmacro | |
88 | ||
89 | /* | |
90 | * Checks the reset handler for global and CPU-specific reset-assist functions, | |
91 | * then jumps to the reset handler with boot args and cpu data. This is copied | |
92 | * to the first physical page during CPU bootstrap (see cpu.c). | |
93 | * | |
94 | * Variables: | |
95 | * x19 - Reset handler data pointer | |
96 | * x20 - Boot args pointer | |
97 | * x21 - CPU data pointer | |
98 | */ | |
99 | .text | |
100 | .align 12 | |
101 | .globl EXT(LowResetVectorBase) | |
102 | LEXT(LowResetVectorBase) | |
cb323159 A |
103 | /* |
104 | * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1, | |
105 | * so on reset the CPU will jump to offset 0x0 and on exceptions | |
106 | * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380. | |
107 | * In order for both the reset vector and exception vectors to | |
108 | * coexist in the same space, the reset code is moved to the end | |
109 | * of the exception vector area. | |
110 | */ | |
111 | b EXT(reset_vector) | |
5ba3f43e | 112 | |
cb323159 A |
113 | /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */ |
114 | .align 9 | |
115 | b . | |
116 | .align 7 | |
117 | b . | |
118 | .align 7 | |
119 | b . | |
120 | .align 7 | |
121 | b . | |
122 | ||
123 | .align 7 | |
124 | .globl EXT(reset_vector) | |
125 | LEXT(reset_vector) | |
126 | // Preserve x0 for start_first_cpu, if called | |
5ba3f43e A |
127 | // Unlock the core for debugging |
128 | msr OSLAR_EL1, xzr | |
d9a64523 | 129 | msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts |
5ba3f43e A |
130 | |
131 | #if !(defined(KERNEL_INTEGRITY_KTRR)) | |
132 | // Set low reset vector before attempting any loads | |
133 | adrp x0, EXT(LowExceptionVectorBase)@page | |
134 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
135 | msr VBAR_EL1, x0 | |
136 | #endif | |
137 | ||
138 | ||
139 | #if defined(KERNEL_INTEGRITY_KTRR) | |
140 | /* | |
141 | * Set KTRR registers immediately after wake/resume | |
142 | * | |
143 | * During power on reset, XNU stashed the kernel text region range values | |
144 | * into __DATA,__const which should be protected by AMCC RoRgn at this point. | |
145 | * Read this data and program/lock KTRR registers accordingly. | |
146 | * If either values are zero, we're debugging kernel so skip programming KTRR. | |
147 | */ | |
148 | ||
cb323159 A |
149 | /* spin until bootstrap core has completed machine lockdown */ |
150 | adrp x17, EXT(lockdown_done)@page | |
151 | 1: | |
152 | ldr x18, [x17, EXT(lockdown_done)@pageoff] | |
153 | cbz x18, 1b | |
d9a64523 | 154 | |
5ba3f43e A |
155 | // load stashed rorgn_begin |
156 | adrp x17, EXT(rorgn_begin)@page | |
157 | add x17, x17, EXT(rorgn_begin)@pageoff | |
158 | ldr x17, [x17] | |
159 | // if rorgn_begin is zero, we're debugging. skip enabling ktrr | |
d9a64523 | 160 | cbz x17, Lskip_ktrr |
5ba3f43e A |
161 | |
162 | // load stashed rorgn_end | |
163 | adrp x19, EXT(rorgn_end)@page | |
164 | add x19, x19, EXT(rorgn_end)@pageoff | |
165 | ldr x19, [x19] | |
d9a64523 | 166 | cbz x19, Lskip_ktrr |
5ba3f43e A |
167 | |
168 | // program and lock down KTRR | |
169 | // subtract one page from rorgn_end to make pinst insns NX | |
170 | msr ARM64_REG_KTRR_LOWER_EL1, x17 | |
171 | sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12 | |
172 | msr ARM64_REG_KTRR_UPPER_EL1, x19 | |
173 | mov x17, #1 | |
174 | msr ARM64_REG_KTRR_LOCK_EL1, x17 | |
d9a64523 | 175 | Lskip_ktrr: |
cb323159 | 176 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ |
5ba3f43e A |
177 | |
178 | // Process reset handlers | |
179 | adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data | |
180 | add x19, x19, EXT(ResetHandlerData)@pageoff | |
181 | mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number | |
182 | and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 | |
183 | ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries | |
184 | add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS) | |
185 | Lcheck_cpu_data_entry: | |
186 | ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address | |
187 | cbz x21, Lnext_cpu_data_entry | |
188 | ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id | |
189 | cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu | |
cb323159 | 190 | b.eq Lfound_cpu_data_entry // Branch if match |
5ba3f43e A |
191 | Lnext_cpu_data_entry: |
192 | add x1, x1, #16 // Increment to the next cpu data entry | |
193 | cmp x1, x3 | |
cb323159 | 194 | b.eq Lskip_cpu_reset_handler // Not found |
5ba3f43e A |
195 | b Lcheck_cpu_data_entry // loop |
196 | Lfound_cpu_data_entry: | |
cb323159 | 197 | adrp x20, EXT(const_boot_args)@page |
5ba3f43e A |
198 | add x20, x20, EXT(const_boot_args)@pageoff |
199 | ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler | |
200 | cbz x0, Lskip_cpu_reset_handler | |
201 | ||
202 | // Validate that our handler is one of the two expected handlers | |
203 | adrp x2, EXT(resume_idle_cpu)@page | |
204 | add x2, x2, EXT(resume_idle_cpu)@pageoff | |
205 | cmp x0, x2 | |
206 | beq 1f | |
207 | adrp x2, EXT(start_cpu)@page | |
208 | add x2, x2, EXT(start_cpu)@pageoff | |
209 | cmp x0, x2 | |
cb323159 | 210 | bne Lskip_cpu_reset_handler |
5ba3f43e A |
211 | 1: |
212 | ||
213 | ||
214 | ||
5c9f4661 A |
215 | #if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR) |
216 | /* | |
217 | * Populate TPIDR_EL1 (in case the CPU takes an exception while | |
218 | * turning on the MMU). | |
219 | */ | |
220 | ldr x13, [x21, CPU_ACTIVE_THREAD] | |
221 | msr TPIDR_EL1, x13 | |
222 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
223 | ||
5ba3f43e A |
224 | blr x0 |
225 | Lskip_cpu_reset_handler: | |
226 | b . // Hang if the handler is NULL or returns | |
227 | ||
cb323159 | 228 | .align 3 |
5ba3f43e A |
229 | .global EXT(LowResetVectorEnd) |
230 | LEXT(LowResetVectorEnd) | |
231 | .global EXT(SleepToken) | |
232 | #if WITH_CLASSIC_S2R | |
233 | LEXT(SleepToken) | |
234 | .space (stSize_NUM),0 | |
235 | #endif | |
236 | ||
cb323159 A |
237 | .section __DATA_CONST,__const |
238 | .align 3 | |
239 | .globl EXT(ResetHandlerData) | |
240 | LEXT(ResetHandlerData) | |
241 | .space (rhdSize_NUM),0 // (filled with 0s) | |
242 | .text | |
243 | ||
5ba3f43e A |
244 | |
245 | /* | |
246 | * __start trampoline is located at a position relative to LowResetVectorBase | |
247 | * so that iBoot can compute the reset vector position to set IORVBAR using | |
248 | * only the kernel entry point. Reset vector = (__start & ~0xfff) | |
249 | */ | |
250 | .align 3 | |
251 | .globl EXT(_start) | |
252 | LEXT(_start) | |
253 | b EXT(start_first_cpu) | |
254 | ||
255 | ||
256 | /* | |
257 | * Provides an early-boot exception vector so that the processor will spin | |
258 | * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap | |
259 | * code triggers an exception. This is copied to the second physical page | |
260 | * during CPU bootstrap (see cpu.c). | |
261 | */ | |
262 | .align 12, 0 | |
263 | .global EXT(LowExceptionVectorBase) | |
264 | LEXT(LowExceptionVectorBase) | |
265 | /* EL1 SP 0 */ | |
266 | b . | |
267 | .align 7 | |
268 | b . | |
269 | .align 7 | |
270 | b . | |
271 | .align 7 | |
272 | b . | |
273 | /* EL1 SP1 */ | |
274 | .align 7 | |
275 | b . | |
276 | .align 7 | |
277 | b . | |
278 | .align 7 | |
279 | b . | |
280 | .align 7 | |
281 | b . | |
282 | /* EL0 64 */ | |
283 | .align 7 | |
284 | b . | |
285 | .align 7 | |
286 | b . | |
287 | .align 7 | |
288 | b . | |
289 | .align 7 | |
290 | b . | |
291 | /* EL0 32 */ | |
292 | .align 7 | |
293 | b . | |
294 | .align 7 | |
295 | b . | |
296 | .align 7 | |
297 | b . | |
298 | .align 7 | |
299 | b . | |
300 | .align 12, 0 | |
301 | ||
302 | #if defined(KERNEL_INTEGRITY_KTRR) | |
303 | /* | |
304 | * Provide a global symbol so that we can narrow the V=P mapping to cover | |
305 | * this page during arm_vm_init. | |
306 | */ | |
307 | .align ARM_PGSHIFT | |
308 | .globl EXT(bootstrap_instructions) | |
309 | LEXT(bootstrap_instructions) | |
cb323159 | 310 | |
5ba3f43e A |
311 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ |
312 | .align 2 | |
313 | .globl EXT(resume_idle_cpu) | |
314 | LEXT(resume_idle_cpu) | |
315 | adrp lr, EXT(arm_init_idle_cpu)@page | |
316 | add lr, lr, EXT(arm_init_idle_cpu)@pageoff | |
317 | b start_cpu | |
318 | ||
319 | .align 2 | |
320 | .globl EXT(start_cpu) | |
321 | LEXT(start_cpu) | |
322 | adrp lr, EXT(arm_init_cpu)@page | |
323 | add lr, lr, EXT(arm_init_cpu)@pageoff | |
324 | b start_cpu | |
325 | ||
326 | .align 2 | |
327 | start_cpu: | |
328 | #if defined(KERNEL_INTEGRITY_KTRR) | |
329 | // This is done right away in reset vector for pre-KTRR devices | |
330 | // Set low reset vector now that we are in the KTRR-free zone | |
331 | adrp x0, EXT(LowExceptionVectorBase)@page | |
332 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
333 | MSR_VBAR_EL1_X0 | |
334 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ | |
335 | ||
336 | // x20 set to BootArgs phys address | |
337 | // x21 set to cpu data phys address | |
5ba3f43e A |
338 | |
339 | // Get the kernel memory parameters from the boot args | |
340 | ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base | |
341 | ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base | |
342 | ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size | |
343 | ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data | |
d9a64523 | 344 | ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags |
5ba3f43e | 345 | |
cb323159 | 346 | |
5ba3f43e A |
347 | // Set TPIDRRO_EL0 with the CPU number |
348 | ldr x0, [x21, CPU_NUMBER_GS] | |
349 | msr TPIDRRO_EL0, x0 | |
350 | ||
351 | // Set the exception stack pointer | |
352 | ldr x0, [x21, CPU_EXCEPSTACK_TOP] | |
353 | ||
354 | ||
355 | // Set SP_EL1 to exception stack | |
356 | #if defined(KERNEL_INTEGRITY_KTRR) | |
357 | mov x1, lr | |
cb323159 | 358 | bl EXT(pinst_spsel_1) |
5ba3f43e A |
359 | mov lr, x1 |
360 | #else | |
361 | msr SPSel, #1 | |
362 | #endif | |
363 | mov sp, x0 | |
364 | ||
365 | // Set the interrupt stack pointer | |
366 | ldr x0, [x21, CPU_INTSTACK_TOP] | |
367 | msr SPSel, #0 | |
368 | mov sp, x0 | |
369 | ||
370 | // Convert lr to KVA | |
371 | add lr, lr, x22 | |
372 | sub lr, lr, x23 | |
373 | ||
374 | b common_start | |
375 | ||
376 | /* | |
377 | * create_l1_table_entry | |
378 | * | |
379 | * Given a virtual address, creates a table entry in an L1 translation table | |
380 | * to point to an L2 translation table. | |
381 | * arg0 - Virtual address | |
382 | * arg1 - L1 table address | |
383 | * arg2 - L2 table address | |
384 | * arg3 - Scratch register | |
385 | * arg4 - Scratch register | |
386 | * arg5 - Scratch register | |
387 | */ | |
388 | .macro create_l1_table_entry | |
389 | and $3, $0, #(ARM_TT_L1_INDEX_MASK) | |
390 | lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table | |
391 | lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset | |
392 | add $3, $1, $3 // Get L1 entry pointer | |
393 | mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template | |
394 | and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table | |
395 | orr $5, $4, $5 // Create table entry for L2 table | |
396 | str $5, [$3] // Write entry to L1 table | |
397 | .endmacro | |
398 | ||
399 | /* | |
400 | * create_l2_block_entries | |
401 | * | |
402 | * Given base virtual and physical addresses, creates consecutive block entries | |
403 | * in an L2 translation table. | |
404 | * arg0 - Virtual address | |
405 | * arg1 - Physical address | |
406 | * arg2 - L2 table address | |
407 | * arg3 - Number of entries | |
408 | * arg4 - Scratch register | |
409 | * arg5 - Scratch register | |
410 | * arg6 - Scratch register | |
411 | * arg7 - Scratch register | |
412 | */ | |
413 | .macro create_l2_block_entries | |
414 | and $4, $0, #(ARM_TT_L2_INDEX_MASK) | |
415 | lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry | |
416 | lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset | |
417 | add $4, $2, $4 // Get L2 entry pointer | |
418 | mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template | |
419 | and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping | |
420 | orr $6, $5, $6 | |
421 | mov $5, $3 | |
422 | mov $7, #(ARM_TT_L2_SIZE) | |
423 | 1: | |
424 | str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance | |
425 | add $6, $6, $7 // Increment the output address | |
426 | subs $5, $5, #1 // Decrement the number of entries | |
427 | b.ne 1b | |
428 | .endmacro | |
429 | ||
d9a64523 A |
430 | /* |
431 | * arg0 - virtual start address | |
432 | * arg1 - physical start address | |
433 | * arg2 - number of entries to map | |
434 | * arg3 - L1 table address | |
435 | * arg4 - free space pointer | |
436 | * arg5 - scratch (entries mapped per loop) | |
437 | * arg6 - scratch | |
438 | * arg7 - scratch | |
439 | * arg8 - scratch | |
440 | * arg9 - scratch | |
441 | */ | |
442 | .macro create_bootstrap_mapping | |
443 | /* calculate entries left in this page */ | |
444 | and $5, $0, #(ARM_TT_L2_INDEX_MASK) | |
445 | lsr $5, $5, #(ARM_TT_L2_SHIFT) | |
446 | mov $6, #(TTE_PGENTRIES) | |
447 | sub $5, $6, $5 | |
448 | ||
449 | /* allocate an L2 table */ | |
450 | 3: add $4, $4, PGBYTES | |
451 | ||
452 | /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */ | |
453 | create_l1_table_entry $0, $3, $4, $6, $7, $8 | |
454 | ||
455 | /* determine how many entries to map this loop - the smaller of entries | |
456 | * remaining in page and total entries left */ | |
457 | cmp $2, $5 | |
458 | csel $5, $2, $5, lt | |
459 | ||
460 | /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */ | |
461 | create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9 | |
462 | ||
463 | /* subtract entries just mapped and bail out if we're done */ | |
464 | subs $2, $2, $5 | |
465 | beq 2f | |
466 | ||
467 | /* entries left to map - advance base pointers */ | |
468 | add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT) | |
469 | add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT) | |
470 | ||
471 | mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */ | |
472 | b 3b | |
473 | 2: | |
474 | .endmacro | |
475 | ||
5ba3f43e A |
476 | /* |
477 | * _start_first_cpu | |
478 | * Cold boot init routine. Called from __start | |
479 | * x0 - Boot args | |
480 | */ | |
481 | .align 2 | |
482 | .globl EXT(start_first_cpu) | |
483 | LEXT(start_first_cpu) | |
484 | ||
485 | // Unlock the core for debugging | |
486 | msr OSLAR_EL1, xzr | |
d9a64523 | 487 | msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts |
cb323159 | 488 | |
5ba3f43e | 489 | mov x20, x0 |
d9a64523 | 490 | mov x21, #0 |
5ba3f43e A |
491 | |
492 | // Set low reset vector before attempting any loads | |
493 | adrp x0, EXT(LowExceptionVectorBase)@page | |
494 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
495 | MSR_VBAR_EL1_X0 | |
496 | ||
497 | ||
5ba3f43e A |
498 | // Get the kernel memory parameters from the boot args |
499 | ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base | |
500 | ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base | |
501 | ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size | |
502 | ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data | |
d9a64523 | 503 | ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags |
5ba3f43e | 504 | |
d9a64523 A |
505 | // Clear the register that will be used to store the userspace thread pointer and CPU number. |
506 | // We may not actually be booting from ordinal CPU 0, so this register will be updated | |
507 | // in ml_parse_cpu_topology(), which happens later in bootstrap. | |
5ba3f43e A |
508 | msr TPIDRRO_EL0, x21 |
509 | ||
510 | // Set up exception stack pointer | |
511 | adrp x0, EXT(excepstack_top)@page // Load top of exception stack | |
512 | add x0, x0, EXT(excepstack_top)@pageoff | |
513 | add x0, x0, x22 // Convert to KVA | |
514 | sub x0, x0, x23 | |
515 | ||
516 | // Set SP_EL1 to exception stack | |
517 | #if defined(KERNEL_INTEGRITY_KTRR) | |
cb323159 | 518 | bl EXT(pinst_spsel_1) |
5ba3f43e A |
519 | #else |
520 | msr SPSel, #1 | |
521 | #endif | |
522 | ||
523 | mov sp, x0 | |
524 | ||
525 | // Set up interrupt stack pointer | |
526 | adrp x0, EXT(intstack_top)@page // Load top of irq stack | |
527 | add x0, x0, EXT(intstack_top)@pageoff | |
528 | add x0, x0, x22 // Convert to KVA | |
529 | sub x0, x0, x23 | |
530 | msr SPSel, #0 // Set SP_EL0 to interrupt stack | |
531 | mov sp, x0 | |
532 | ||
533 | // Load address to the C init routine into link register | |
534 | adrp lr, EXT(arm_init)@page | |
535 | add lr, lr, EXT(arm_init)@pageoff | |
536 | add lr, lr, x22 // Convert to KVA | |
537 | sub lr, lr, x23 | |
538 | ||
539 | /* | |
540 | * Set up the bootstrap page tables with a single block entry for the V=P | |
541 | * mapping, a single block entry for the trampolined kernel address (KVA), | |
542 | * and all else invalid. This requires four pages: | |
543 | * Page 1 - V=P L1 table | |
544 | * Page 2 - V=P L2 table | |
545 | * Page 3 - KVA L1 table | |
546 | * Page 4 - KVA L2 table | |
547 | */ | |
5ba3f43e A |
548 | |
549 | // Invalidate all entries in the bootstrap page tables | |
550 | mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template | |
551 | mov x1, x25 // Start at top of kernel | |
552 | mov x2, #(TTE_PGENTRIES) // Load number of entries per page | |
5ba3f43e | 553 | lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages |
cb323159 | 554 | |
5ba3f43e A |
555 | Linvalidate_bootstrap: // do { |
556 | str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance | |
557 | subs x2, x2, #1 // entries-- | |
558 | b.ne Linvalidate_bootstrap // } while (entries != 0) | |
559 | ||
5ba3f43e A |
560 | /* |
561 | * In order to reclaim memory on targets where TZ0 (or some other entity) | |
562 | * must be located at the base of memory, iBoot may set the virtual and | |
563 | * physical base addresses to immediately follow whatever lies at the | |
564 | * base of physical memory. | |
565 | * | |
566 | * If the base address belongs to TZ0, it may be dangerous for xnu to map | |
567 | * it (as it may be prefetched, despite being technically inaccessible). | |
568 | * In order to avoid this issue while keeping the mapping code simple, we | |
569 | * may continue to use block mappings, but we will only map xnu's mach | |
570 | * header to the end of memory. | |
571 | * | |
572 | * Given that iBoot guarantees that the unslid kernelcache base address | |
573 | * will begin on an L2 boundary, this should prevent us from accidentally | |
574 | * mapping TZ0. | |
575 | */ | |
576 | adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address | |
d9a64523 A |
577 | add x0, x0, EXT(_mh_execute_header)@pageoff |
578 | ||
5ba3f43e | 579 | /* |
d9a64523 A |
580 | * Adjust physical and virtual base addresses to account for physical |
581 | * memory preceeding xnu Mach-O header | |
582 | * x22 - Kernel virtual base | |
583 | * x23 - Kernel physical base | |
584 | * x24 - Physical memory size | |
5ba3f43e | 585 | */ |
d9a64523 A |
586 | sub x18, x0, x23 |
587 | sub x24, x24, x18 | |
588 | add x22, x22, x18 | |
589 | add x23, x23, x18 | |
590 | ||
5ba3f43e | 591 | /* |
d9a64523 A |
592 | * x0 - V=P virtual cursor |
593 | * x4 - V=P physical cursor | |
594 | * x14 - KVA virtual cursor | |
595 | * x15 - KVA physical cursor | |
5ba3f43e | 596 | */ |
d9a64523 A |
597 | mov x4, x0 |
598 | mov x14, x22 | |
599 | mov x15, x23 | |
5ba3f43e | 600 | |
d9a64523 A |
601 | /* |
602 | * Allocate L1 tables | |
603 | * x1 - V=P L1 page | |
604 | * x3 - KVA L1 page | |
605 | * x2 - free mem pointer from which we allocate a variable number of L2 | |
606 | * pages. The maximum number of bootstrap page table pages is limited to | |
607 | * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case | |
608 | * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so | |
609 | * 8 total pages for V=P and KVA. | |
5ba3f43e | 610 | */ |
d9a64523 A |
611 | mov x1, x25 |
612 | add x3, x1, PGBYTES | |
613 | mov x2, x3 | |
5ba3f43e | 614 | |
d9a64523 A |
615 | /* |
616 | * Setup the V=P bootstrap mapping | |
617 | * x5 - total number of L2 entries to allocate | |
5ba3f43e | 618 | */ |
d9a64523 A |
619 | lsr x5, x24, #(ARM_TT_L2_SHIFT) |
620 | /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */ | |
621 | create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13 | |
5ba3f43e | 622 | |
d9a64523 A |
623 | /* Setup the KVA bootstrap mapping */ |
624 | lsr x5, x24, #(ARM_TT_L2_SHIFT) | |
625 | create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13 | |
5ba3f43e A |
626 | |
627 | /* Ensure TTEs are visible */ | |
628 | dsb ish | |
629 | ||
cb323159 | 630 | |
5ba3f43e A |
631 | b common_start |
632 | ||
633 | /* | |
634 | * Begin common CPU initialization | |
635 | * | |
636 | * Regster state: | |
637 | * x20 - PA of boot args | |
638 | * x21 - zero on cold boot, PA of cpu data on warm reset | |
639 | * x22 - Kernel virtual base | |
640 | * x23 - Kernel physical base | |
d9a64523 | 641 | * x25 - PA of the end of the kernel |
5ba3f43e A |
642 | * lr - KVA of C init routine |
643 | * sp - SP_EL0 selected | |
644 | * | |
645 | * SP_EL0 - KVA of CPU's interrupt stack | |
646 | * SP_EL1 - KVA of CPU's exception stack | |
647 | * TPIDRRO_EL0 - CPU number | |
648 | */ | |
649 | common_start: | |
650 | // Set the translation control register. | |
651 | adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure | |
652 | add x0, x0, EXT(sysreg_restore)@pageoff | |
653 | ldr x1, [x0, SR_RESTORE_TCR_EL1] | |
654 | MSR_TCR_EL1_X1 | |
655 | ||
656 | /* Set up translation table base registers. | |
657 | * TTBR0 - V=P table @ top of kernel | |
d9a64523 | 658 | * TTBR1 - KVA table @ top of kernel + 1 page |
5ba3f43e A |
659 | */ |
660 | #if defined(KERNEL_INTEGRITY_KTRR) | |
661 | /* Note that for KTRR configurations, the V=P map will be modified by | |
662 | * arm_vm_init.c. | |
663 | */ | |
664 | #endif | |
665 | and x0, x25, #(TTBR_BADDR_MASK) | |
d9a64523 A |
666 | mov x19, lr |
667 | bl EXT(set_mmu_ttb) | |
668 | mov lr, x19 | |
669 | add x0, x25, PGBYTES | |
5ba3f43e A |
670 | and x0, x0, #(TTBR_BADDR_MASK) |
671 | MSR_TTBR1_EL1_X0 | |
672 | ||
673 | // Set up MAIR attr0 for normal memory, attr1 for device memory | |
674 | mov x0, xzr | |
675 | mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK)) | |
676 | orr x0, x0, x1 | |
677 | mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK)) | |
678 | orr x0, x0, x1 | |
679 | mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE)) | |
680 | orr x0, x0, x1 | |
681 | mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU)) | |
682 | orr x0, x0, x1 | |
683 | mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB)) | |
684 | orr x0, x0, x1 | |
685 | mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED)) | |
686 | orr x0, x0, x1 | |
cb323159 A |
687 | mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED)) |
688 | orr x0, x0, x1 | |
689 | mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED)) | |
690 | orr x0, x0, x1 | |
5ba3f43e A |
691 | msr MAIR_EL1, x0 |
692 | ||
5ba3f43e A |
693 | #if defined(APPLEHURRICANE) |
694 | ||
695 | // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk | |
696 | // Needs to be done before MMU is enabled | |
697 | mrs x12, ARM64_REG_HID5 | |
698 | and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask) | |
699 | orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE | |
700 | msr ARM64_REG_HID5, x12 | |
701 | ||
702 | #endif | |
703 | ||
d9a64523 A |
704 | #if defined(BCM2837) |
705 | // Setup timer interrupt routing; must be done before MMU is enabled | |
706 | mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number | |
707 | and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 | |
708 | mov x0, #0x4000 | |
709 | lsl x0, x0, #16 | |
710 | add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control | |
711 | add x0, x0, x15, lsl #2 | |
712 | mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs | |
713 | str w1, [x0] | |
714 | isb sy | |
715 | #endif | |
716 | ||
5ba3f43e | 717 | |
cb323159 | 718 | |
5ba3f43e A |
719 | #ifndef __ARM_IC_NOALIAS_ICACHE__ |
720 | /* Invalidate the TLB and icache on systems that do not guarantee that the | |
721 | * caches are invalidated on reset. | |
722 | */ | |
723 | tlbi vmalle1 | |
724 | ic iallu | |
725 | #endif | |
726 | ||
727 | /* If x21 is not 0, then this is either the start_cpu path or | |
728 | * the resume_idle_cpu path. cpu_ttep should already be | |
729 | * populated, so just switch to the kernel_pmap now. | |
730 | */ | |
731 | ||
732 | cbz x21, 1f | |
733 | adrp x0, EXT(cpu_ttep)@page | |
734 | add x0, x0, EXT(cpu_ttep)@pageoff | |
735 | ldr x0, [x0] | |
736 | MSR_TTBR1_EL1_X0 | |
737 | 1: | |
738 | ||
739 | // Set up the exception vectors | |
5c9f4661 A |
740 | #if __ARM_KERNEL_PROTECT__ |
741 | /* If this is not the first reset of the boot CPU, the alternate mapping | |
742 | * for the exception vectors will be set up, so use it. Otherwise, we | |
743 | * should use the mapping located in the kernelcache mapping. | |
744 | */ | |
745 | MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START | |
746 | ||
747 | cbnz x21, 1f | |
748 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
749 | adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address |
750 | add x0, x0, EXT(ExceptionVectorsBase)@pageoff | |
751 | add x0, x0, x22 // Convert exception vector address to KVA | |
752 | sub x0, x0, x23 | |
5c9f4661 | 753 | 1: |
5ba3f43e A |
754 | MSR_VBAR_EL1_X0 |
755 | ||
cb323159 A |
756 | 1: |
757 | #ifdef HAS_APPLE_PAC | |
758 | #ifdef __APSTS_SUPPORTED__ | |
759 | mrs x0, ARM64_REG_APSTS_EL1 | |
760 | and x1, x0, #(APSTS_EL1_MKEYVld) | |
761 | cbz x1, 1b // Poll APSTS_EL1.MKEYVld | |
762 | mrs x0, ARM64_REG_APCTL_EL1 | |
763 | orr x0, x0, #(APCTL_EL1_AppleMode) | |
764 | orr x0, x0, #(APCTL_EL1_KernKeyEn) | |
765 | and x0, x0, #~(APCTL_EL1_EnAPKey0) | |
766 | msr ARM64_REG_APCTL_EL1, x0 | |
767 | #else | |
768 | mrs x0, ARM64_REG_APCTL_EL1 | |
769 | and x1, x0, #(APCTL_EL1_MKEYVld) | |
770 | cbz x1, 1b // Poll APCTL_EL1.MKEYVld | |
771 | orr x0, x0, #(APCTL_EL1_AppleMode) | |
772 | orr x0, x0, #(APCTL_EL1_KernKeyEn) | |
773 | msr ARM64_REG_APCTL_EL1, x0 | |
774 | #endif /* APSTS_SUPPORTED */ | |
775 | ||
776 | /* ISB necessary to ensure APCTL_EL1_AppleMode logic enabled before proceeding */ | |
777 | isb sy | |
778 | /* Load static kernel key diversification values */ | |
779 | ldr x0, =KERNEL_ROP_ID | |
780 | /* set ROP key. must write at least once to pickup mkey per boot diversification */ | |
781 | msr APIBKeyLo_EL1, x0 | |
782 | add x0, x0, #1 | |
783 | msr APIBKeyHi_EL1, x0 | |
784 | add x0, x0, #1 | |
785 | msr APDBKeyLo_EL1, x0 | |
786 | add x0, x0, #1 | |
787 | msr APDBKeyHi_EL1, x0 | |
788 | add x0, x0, #1 | |
789 | msr ARM64_REG_KERNELKEYLO_EL1, x0 | |
790 | add x0, x0, #1 | |
791 | msr ARM64_REG_KERNELKEYHI_EL1, x0 | |
792 | /* set JOP key. must write at least once to pickup mkey per boot diversification */ | |
793 | add x0, x0, #1 | |
794 | msr APIAKeyLo_EL1, x0 | |
795 | add x0, x0, #1 | |
796 | msr APIAKeyHi_EL1, x0 | |
797 | add x0, x0, #1 | |
798 | msr APDAKeyLo_EL1, x0 | |
799 | add x0, x0, #1 | |
800 | msr APDAKeyHi_EL1, x0 | |
801 | /* set G key */ | |
802 | add x0, x0, #1 | |
803 | msr APGAKeyLo_EL1, x0 | |
804 | add x0, x0, #1 | |
805 | msr APGAKeyHi_EL1, x0 | |
806 | ||
807 | // Enable caches, MMU, ROP and JOP | |
808 | mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF) | |
809 | mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000) | |
810 | orr x0, x0, x1 | |
811 | orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */ | |
812 | ||
813 | #if DEBUG || DEVELOPMENT | |
814 | and x2, x26, BA_BOOT_FLAGS_DISABLE_JOP | |
815 | #if __APCFG_SUPPORTED__ | |
816 | // for APCFG systems, JOP keys are always on for EL1 unless ELXENKEY is cleared. | |
817 | // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled | |
818 | cbz x2, Lenable_mmu | |
819 | mrs x3, APCFG_EL1 | |
820 | and x3, x3, #~(APCFG_EL1_ELXENKEY) | |
821 | msr APCFG_EL1, x3 | |
822 | #else /* __APCFG_SUPPORTED__ */ | |
823 | cbnz x2, Lenable_mmu | |
824 | #endif /* __APCFG_SUPPORTED__ */ | |
825 | #endif /* DEBUG || DEVELOPMENT */ | |
826 | ||
827 | #if !__APCFG_SUPPORTED__ | |
828 | MOV64 x1, SCTLR_JOP_KEYS_ENABLED | |
829 | orr x0, x0, x1 | |
830 | #endif /* !__APCFG_SUPPORTED__ */ | |
831 | Lenable_mmu: | |
832 | #else /* HAS_APPLE_PAC */ | |
5ba3f43e A |
833 | |
834 | // Enable caches and MMU | |
835 | mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF) | |
836 | mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000) | |
837 | orr x0, x0, x1 | |
cb323159 | 838 | #endif /* HAS_APPLE_PAC */ |
5ba3f43e A |
839 | MSR_SCTLR_EL1_X0 |
840 | isb sy | |
841 | ||
cb323159 A |
842 | MOV32 x1, SCTLR_EL1_DEFAULT |
843 | #if HAS_APPLE_PAC | |
844 | orr x1, x1, #(SCTLR_PACIB_ENABLED) | |
845 | #if !__APCFG_SUPPORTED__ | |
846 | MOV64 x2, SCTLR_JOP_KEYS_ENABLED | |
847 | #if (DEBUG || DEVELOPMENT) | |
848 | // Ignore the JOP bits, since we can't predict at compile time whether BA_BOOT_FLAGS_DISABLE_JOP is set | |
849 | bic x0, x0, x2 | |
850 | #else | |
851 | orr x1, x1, x2 | |
852 | #endif /* (DEBUG || DEVELOPMENT) */ | |
853 | #endif /* !__APCFG_SUPPORTED__ */ | |
854 | #endif /* HAS_APPLE_PAC */ | |
855 | cmp x0, x1 | |
856 | bne . | |
857 | ||
5ba3f43e A |
858 | #if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT))) |
859 | /* Watchtower | |
860 | * | |
861 | * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching | |
862 | * it here would trap to EL3. | |
863 | */ | |
864 | ||
865 | // Enable NEON | |
866 | mov x0, #(CPACR_FPEN_ENABLE) | |
867 | msr CPACR_EL1, x0 | |
868 | #endif | |
869 | ||
870 | // Clear thread pointer | |
871 | mov x0, #0 | |
872 | msr TPIDR_EL1, x0 // Set thread register | |
873 | ||
a39ff7e2 A |
874 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
875 | // Initialization common to all Apple targets | |
876 | ARM64_IS_PCORE x15 | |
877 | ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4 | |
878 | orr x12, x12, ARM64_REG_HID4_DisDcMVAOps | |
879 | orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops | |
880 | ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4 | |
881 | #endif // APPLE_ARM64_ARCH_FAMILY | |
882 | ||
cb323159 | 883 | #if defined(APPLETYPHOON) |
5ba3f43e | 884 | // |
cb323159 A |
885 | // Typhoon-Specific initialization |
886 | // For tunable summary, see <rdar://problem/13503621> | |
5ba3f43e A |
887 | // |
888 | ||
889 | // | |
890 | // Disable LSP flush with context switch to work around bug in LSP | |
cb323159 A |
891 | // that can cause Typhoon to wedge when CONTEXTIDR is written. |
892 | // <rdar://problem/12387704> | |
5ba3f43e A |
893 | // |
894 | ||
895 | mrs x12, ARM64_REG_HID0 | |
896 | orr x12, x12, ARM64_REG_HID0_LoopBuffDisb | |
897 | msr ARM64_REG_HID0, x12 | |
cb323159 | 898 | |
5ba3f43e A |
899 | mrs x12, ARM64_REG_HID1 |
900 | orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl | |
5ba3f43e A |
901 | msr ARM64_REG_HID1, x12 |
902 | ||
903 | mrs x12, ARM64_REG_HID3 | |
904 | orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode | |
905 | msr ARM64_REG_HID3, x12 | |
906 | ||
5ba3f43e A |
907 | mrs x12, ARM64_REG_HID5 |
908 | and x12, x12, (~ARM64_REG_HID5_DisHwpLd) | |
909 | and x12, x12, (~ARM64_REG_HID5_DisHwpSt) | |
910 | msr ARM64_REG_HID5, x12 | |
911 | ||
912 | // Change the default memcache data set ID from 0 to 15 for all agents | |
913 | mrs x12, ARM64_REG_HID8 | |
914 | orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE) | |
915 | #if ARM64_BOARD_CONFIG_T7001 | |
916 | orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE | |
917 | #endif // ARM64_BOARD_CONFIG_T7001 | |
918 | msr ARM64_REG_HID8, x12 | |
919 | isb sy | |
cb323159 | 920 | #endif // APPLETYPHOON |
5ba3f43e A |
921 | |
922 | #if defined(APPLETWISTER) | |
a39ff7e2 A |
923 | |
924 | // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK | |
925 | // to work around potential hang. Must only be applied to Maui C0. | |
926 | mrs x12, MIDR_EL1 | |
927 | ubfx x13, x12, #MIDR_EL1_PNUM_SHIFT, #12 | |
928 | cmp x13, #4 // Part number 4 => Maui, 5 => Malta/Elba | |
929 | bne Lskip_isalive | |
930 | ubfx x13, x12, #MIDR_EL1_VAR_SHIFT, #4 | |
931 | cmp x13, #2 // variant 2 => Maui C0 | |
932 | b.lt Lskip_isalive | |
933 | ||
934 | mrs x12, ARM64_REG_CYC_CFG | |
935 | orr x12, x12, ARM64_REG_CYC_CFG_skipInit | |
936 | msr ARM64_REG_CYC_CFG, x12 | |
937 | ||
938 | Lskip_isalive: | |
939 | ||
940 | mrs x12, ARM64_REG_HID11 | |
941 | and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt) | |
942 | msr ARM64_REG_HID11, x12 | |
5ba3f43e A |
943 | |
944 | // Change the default memcache data set ID from 0 to 15 for all agents | |
945 | mrs x12, ARM64_REG_HID8 | |
946 | orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE) | |
a39ff7e2 | 947 | orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE) |
5ba3f43e A |
948 | msr ARM64_REG_HID8, x12 |
949 | ||
950 | // Use 4-cycle MUL latency to avoid denormal stalls | |
a39ff7e2 A |
951 | mrs x12, ARM64_REG_HID7 |
952 | orr x12, x12, #ARM64_REG_HID7_disNexFastFmul | |
953 | msr ARM64_REG_HID7, x12 | |
5ba3f43e A |
954 | |
955 | // disable reporting of TLB-multi-hit-error | |
956 | // <rdar://problem/22163216> | |
957 | mrs x12, ARM64_REG_LSU_ERR_STS | |
958 | and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN) | |
959 | msr ARM64_REG_LSU_ERR_STS, x12 | |
960 | ||
961 | isb sy | |
962 | #endif // APPLETWISTER | |
963 | ||
964 | #if defined(APPLEHURRICANE) | |
965 | ||
966 | // IC prefetch configuration | |
967 | // <rdar://problem/23019425> | |
968 | mrs x12, ARM64_REG_HID0 | |
969 | and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk) | |
970 | orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift) | |
971 | orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn | |
972 | msr ARM64_REG_HID0, x12 | |
973 | ||
974 | // disable reporting of TLB-multi-hit-error | |
975 | // <rdar://problem/22163216> | |
976 | mrs x12, ARM64_REG_LSU_ERR_CTL | |
977 | and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN) | |
978 | msr ARM64_REG_LSU_ERR_CTL, x12 | |
979 | ||
980 | // disable crypto fusion across decode groups | |
981 | // <rdar://problem/27306424> | |
982 | mrs x12, ARM64_REG_HID1 | |
983 | orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp | |
984 | msr ARM64_REG_HID1, x12 | |
985 | ||
986 | #if defined(ARM64_BOARD_CONFIG_T8011) | |
987 | // Clear DisDcZvaCmdOnly | |
988 | // Per Myst A0/B0 tunables document | |
5ba3f43e A |
989 | // <rdar://problem/27627428> Myst: Confirm ACC Per-CPU Tunables |
990 | mrs x12, ARM64_REG_HID3 | |
991 | and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly | |
992 | msr ARM64_REG_HID3, x12 | |
993 | ||
994 | mrs x12, ARM64_REG_EHID3 | |
995 | and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly | |
996 | msr ARM64_REG_EHID3, x12 | |
997 | #endif /* defined(ARM64_BOARD_CONFIG_T8011) */ | |
998 | ||
999 | #endif // APPLEHURRICANE | |
1000 | ||
d9a64523 A |
1001 | #if defined(APPLEMONSOON) |
1002 | ||
1003 | /***** Tunables that apply to all skye cores, all chip revs *****/ | |
1004 | ||
1005 | // <rdar://problem/28512310> SW WAR/eval: WKdm write ack lost when bif_wke_colorWrAck_XXaH asserts concurrently for both colors | |
1006 | mrs x12, ARM64_REG_HID8 | |
1007 | orr x12, x12, #ARM64_REG_HID8_WkeForceStrictOrder | |
1008 | msr ARM64_REG_HID8, x12 | |
1009 | ||
1010 | // Skip if not E-core | |
1011 | ARM64_IS_PCORE x15 | |
1012 | cbnz x15, Lskip_skye_ecore_only | |
1013 | ||
1014 | /***** Tunables that only apply to skye e-cores, all chip revs *****/ | |
1015 | ||
1016 | // <rdar://problem/30423928>: Atomic launch eligibility is erroneously taken away when a store at SMB gets invalidated | |
1017 | mrs x12, ARM64_REG_EHID11 | |
1018 | and x12, x12, ~(ARM64_REG_EHID11_SmbDrainThresh_mask) | |
1019 | msr ARM64_REG_EHID11, x12 | |
1020 | ||
1021 | Lskip_skye_ecore_only: | |
1022 | ||
1023 | SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x12, MONSOON_CPU_VERSION_B0, Lskip_skye_a0_workarounds | |
1024 | ||
1025 | // Skip if not E-core | |
1026 | cbnz x15, Lskip_skye_a0_ecore_only | |
1027 | ||
1028 | /***** Tunables that only apply to skye e-cores, chip revs < B0 *****/ | |
1029 | ||
1030 | // Disable downstream fill bypass logic | |
1031 | // <rdar://problem/28545159> [Tunable] Skye - L2E fill bypass collision from both pipes to ecore | |
1032 | mrs x12, ARM64_REG_EHID5 | |
1033 | orr x12, x12, ARM64_REG_EHID5_DisFillByp | |
1034 | msr ARM64_REG_EHID5, x12 | |
1035 | ||
1036 | // Disable forwarding of return addresses to the NFP | |
1037 | // <rdar://problem/30387067> Skye: FED incorrectly taking illegal va exception | |
1038 | mrs x12, ARM64_REG_EHID0 | |
1039 | orr x12, x12, ARM64_REG_EHID0_nfpRetFwdDisb | |
1040 | msr ARM64_REG_EHID0, x12 | |
1041 | ||
1042 | Lskip_skye_a0_ecore_only: | |
1043 | ||
1044 | /***** Tunables that apply to all skye cores, chip revs < B0 *****/ | |
1045 | ||
1046 | // Disable clock divider gating | |
1047 | // <rdar://problem/30854420> [Tunable/Errata][cpu_1p_1e] [CPGV2] ACC power down issue when link FSM switches from GO_DN to CANCEL and at the same time upStreamDrain request is set. | |
1048 | mrs x12, ARM64_REG_HID6 | |
1049 | orr x12, x12, ARM64_REG_HID6_DisClkDivGating | |
1050 | msr ARM64_REG_HID6, x12 | |
1051 | ||
1052 | // Disable clock dithering | |
1053 | // <rdar://problem/29022199> [Tunable] Skye A0: Linux: LLC PIO Errors | |
1054 | mrs x12, ARM64_REG_ACC_OVRD | |
1055 | orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr | |
1056 | msr ARM64_REG_ACC_OVRD, x12 | |
1057 | ||
1058 | mrs x12, ARM64_REG_ACC_EBLK_OVRD | |
1059 | orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr | |
1060 | msr ARM64_REG_ACC_EBLK_OVRD, x12 | |
1061 | ||
1062 | Lskip_skye_a0_workarounds: | |
1063 | ||
1064 | SKIP_IF_CPU_VERSION_LESS_THAN x12, MONSOON_CPU_VERSION_B0, Lskip_skye_post_a1_workarounds | |
1065 | ||
1066 | /***** Tunables that apply to all skye cores, chip revs >= B0 *****/ | |
1067 | ||
1068 | // <rdar://problem/32512836>: Disable refcount syncing between E and P | |
1069 | mrs x12, ARM64_REG_CYC_OVRD | |
1070 | and x12, x12, ~ARM64_REG_CYC_OVRD_dsblSnoopTime_mask | |
1071 | orr x12, x12, ARM64_REG_CYC_OVRD_dsblSnoopPTime | |
1072 | msr ARM64_REG_CYC_OVRD, x12 | |
1073 | ||
1074 | Lskip_skye_post_a1_workarounds: | |
1075 | ||
1076 | #endif /* defined(APPLEMONSOON) */ | |
1077 | ||
5ba3f43e | 1078 | |
cb323159 A |
1079 | |
1080 | ||
1081 | ||
1082 | ||
1083 | ||
5ba3f43e A |
1084 | // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap. |
1085 | cbnz x21, Ltrampoline | |
1086 | ||
1087 | // Set KVA of boot args as first arg | |
1088 | add x0, x20, x22 | |
1089 | sub x0, x0, x23 | |
1090 | ||
1091 | #if KASAN | |
1092 | mov x20, x0 | |
1093 | mov x21, lr | |
1094 | ||
1095 | // x0: boot args | |
1096 | // x1: KVA page table phys base | |
1097 | mrs x1, TTBR1_EL1 | |
cb323159 | 1098 | bl EXT(kasan_bootstrap) |
5ba3f43e A |
1099 | |
1100 | mov x0, x20 | |
1101 | mov lr, x21 | |
1102 | #endif | |
1103 | ||
1104 | // Return to arm_init() | |
1105 | ret | |
1106 | ||
1107 | Ltrampoline: | |
1108 | // Load VA of the trampoline | |
1109 | adrp x0, arm_init_tramp@page | |
1110 | add x0, x0, arm_init_tramp@pageoff | |
1111 | add x0, x0, x22 | |
1112 | sub x0, x0, x23 | |
1113 | ||
1114 | // Branch to the trampoline | |
1115 | br x0 | |
1116 | ||
1117 | /* | |
1118 | * V=P to KVA trampoline. | |
1119 | * x0 - KVA of cpu data pointer | |
1120 | */ | |
1121 | .text | |
1122 | .align 2 | |
1123 | arm_init_tramp: | |
1124 | /* On a warm boot, the full kernel translation table is initialized in | |
1125 | * addition to the bootstrap tables. The layout is as follows: | |
1126 | * | |
1127 | * +--Top of Memory--+ | |
1128 | * ... | |
1129 | * | | | |
1130 | * | Primary Kernel | | |
1131 | * | Trans. Table | | |
1132 | * | | | |
1133 | * +--Top + 5 pages--+ | |
1134 | * | | | |
1135 | * | Invalid Table | | |
1136 | * | | | |
1137 | * +--Top + 4 pages--+ | |
1138 | * | | | |
1139 | * | KVA Table | | |
1140 | * | | | |
1141 | * +--Top + 2 pages--+ | |
1142 | * | | | |
1143 | * | V=P Table | | |
1144 | * | | | |
1145 | * +--Top of Kernel--+ | |
1146 | * | | | |
1147 | * | Kernel Mach-O | | |
1148 | * | | | |
1149 | * ... | |
1150 | * +---Kernel Base---+ | |
1151 | */ | |
1152 | ||
cb323159 | 1153 | |
d9a64523 | 1154 | mov x19, lr |
5ba3f43e | 1155 | // Convert CPU data PA to VA and set as first argument |
d9a64523 A |
1156 | mov x0, x21 |
1157 | bl EXT(phystokv) | |
5ba3f43e | 1158 | |
d9a64523 | 1159 | mov lr, x19 |
5ba3f43e A |
1160 | |
1161 | /* Return to arm_init() */ | |
1162 | ret | |
1163 | ||
1164 | //#include "globals_asm.h" | |
1165 | ||
1166 | /* vim: set ts=4: */ |