]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/start.s
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm64 / start.s
1 /*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <arm/proc_reg.h>
29 #include <arm64/asm.h>
30 #include <arm64/proc_reg.h>
31 #include <pexpert/arm64/board_config.h>
32 #include <mach_assert.h>
33 #include <machine/asm.h>
34 #include "assym.s"
35
36 #if __ARM_KERNEL_PROTECT__
37 #include <arm/pmap.h>
38 #endif /* __ARM_KERNEL_PROTECT__ */
39
40
41 .macro MSR_VBAR_EL1_X0
42 #if defined(KERNEL_INTEGRITY_KTRR)
43 mov x1, lr
44 bl EXT(pinst_set_vbar)
45 mov lr, x1
46 #else
47 msr VBAR_EL1, x0
48 #endif
49 .endmacro
50
51 .macro MSR_TCR_EL1_X1
52 #if defined(KERNEL_INTEGRITY_KTRR)
53 mov x0, x1
54 mov x1, lr
55 bl _pinst_set_tcr
56 mov lr, x1
57 #else
58 msr TCR_EL1, x1
59 #endif
60 .endmacro
61
62 .macro MSR_TTBR1_EL1_X0
63 #if defined(KERNEL_INTEGRITY_KTRR)
64 mov x1, lr
65 bl _pinst_set_ttbr1
66 mov lr, x1
67 #else
68 msr TTBR1_EL1, x0
69 #endif
70 .endmacro
71
72 .macro MSR_SCTLR_EL1_X0
73 #if defined(KERNEL_INTEGRITY_KTRR)
74 mov x1, lr
75
76 // This may abort, do so on SP1
77 bl _pinst_spsel_1
78
79 bl _pinst_set_sctlr
80 msr SPSel, #0 // Back to SP0
81 mov lr, x1
82 #else
83 msr SCTLR_EL1, x0
84 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
85 .endmacro
86
87 /*
88 * Checks the reset handler for global and CPU-specific reset-assist functions,
89 * then jumps to the reset handler with boot args and cpu data. This is copied
90 * to the first physical page during CPU bootstrap (see cpu.c).
91 *
92 * Variables:
93 * x19 - Reset handler data pointer
94 * x20 - Boot args pointer
95 * x21 - CPU data pointer
96 */
97 .text
98 .align 12
99 .globl EXT(LowResetVectorBase)
100 LEXT(LowResetVectorBase)
101 // Preserve x0 for start_first_cpu, if called
102
103 // Unlock the core for debugging
104 msr OSLAR_EL1, xzr
105 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
106
107 #if !(defined(KERNEL_INTEGRITY_KTRR))
108 // Set low reset vector before attempting any loads
109 adrp x0, EXT(LowExceptionVectorBase)@page
110 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
111 msr VBAR_EL1, x0
112 #endif
113
114
115 #if defined(KERNEL_INTEGRITY_KTRR)
116 /*
117 * Set KTRR registers immediately after wake/resume
118 *
119 * During power on reset, XNU stashed the kernel text region range values
120 * into __DATA,__const which should be protected by AMCC RoRgn at this point.
121 * Read this data and program/lock KTRR registers accordingly.
122 * If either values are zero, we're debugging kernel so skip programming KTRR.
123 */
124
125
126 // load stashed rorgn_begin
127 adrp x17, EXT(rorgn_begin)@page
128 add x17, x17, EXT(rorgn_begin)@pageoff
129 ldr x17, [x17]
130 // if rorgn_begin is zero, we're debugging. skip enabling ktrr
131 cbz x17, Lskip_ktrr
132
133 // load stashed rorgn_end
134 adrp x19, EXT(rorgn_end)@page
135 add x19, x19, EXT(rorgn_end)@pageoff
136 ldr x19, [x19]
137 cbz x19, Lskip_ktrr
138
139 // program and lock down KTRR
140 // subtract one page from rorgn_end to make pinst insns NX
141 msr ARM64_REG_KTRR_LOWER_EL1, x17
142 sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12
143 msr ARM64_REG_KTRR_UPPER_EL1, x19
144 mov x17, #1
145 msr ARM64_REG_KTRR_LOCK_EL1, x17
146 Lskip_ktrr:
147 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
148
149 // Process reset handlers
150 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data
151 add x19, x19, EXT(ResetHandlerData)@pageoff
152 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
153 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
154 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries
155 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS)
156 Lcheck_cpu_data_entry:
157 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address
158 cbz x21, Lnext_cpu_data_entry
159 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id
160 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu
161 b.eq Lfound_cpu_data_entry // Branch if match
162 Lnext_cpu_data_entry:
163 add x1, x1, #16 // Increment to the next cpu data entry
164 cmp x1, x3
165 b.eq Lskip_cpu_reset_handler // Not found
166 b Lcheck_cpu_data_entry // loop
167 Lfound_cpu_data_entry:
168 adrp x20, EXT(const_boot_args)@page
169 add x20, x20, EXT(const_boot_args)@pageoff
170 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler
171 cbz x0, Lskip_cpu_reset_handler
172
173 // Validate that our handler is one of the two expected handlers
174 adrp x2, EXT(resume_idle_cpu)@page
175 add x2, x2, EXT(resume_idle_cpu)@pageoff
176 cmp x0, x2
177 beq 1f
178 adrp x2, EXT(start_cpu)@page
179 add x2, x2, EXT(start_cpu)@pageoff
180 cmp x0, x2
181 bne Lskip_cpu_reset_handler
182 1:
183
184
185
186 #if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR)
187 /*
188 * Populate TPIDR_EL1 (in case the CPU takes an exception while
189 * turning on the MMU).
190 */
191 ldr x13, [x21, CPU_ACTIVE_THREAD]
192 msr TPIDR_EL1, x13
193 #endif /* __ARM_KERNEL_PROTECT__ */
194
195 blr x0
196 Lskip_cpu_reset_handler:
197 b . // Hang if the handler is NULL or returns
198
199 .align 3
200 .globl EXT(ResetHandlerData)
201 LEXT(ResetHandlerData)
202 .space (rhdSize_NUM),0 // (filled with 0s)
203
204 .align 3
205 .global EXT(LowResetVectorEnd)
206 LEXT(LowResetVectorEnd)
207 .global EXT(SleepToken)
208 #if WITH_CLASSIC_S2R
209 LEXT(SleepToken)
210 .space (stSize_NUM),0
211 #endif
212
213
214 /*
215 * __start trampoline is located at a position relative to LowResetVectorBase
216 * so that iBoot can compute the reset vector position to set IORVBAR using
217 * only the kernel entry point. Reset vector = (__start & ~0xfff)
218 */
219 .align 3
220 .globl EXT(_start)
221 LEXT(_start)
222 b EXT(start_first_cpu)
223
224
225 /*
226 * Provides an early-boot exception vector so that the processor will spin
227 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
228 * code triggers an exception. This is copied to the second physical page
229 * during CPU bootstrap (see cpu.c).
230 */
231 .align 12, 0
232 .global EXT(LowExceptionVectorBase)
233 LEXT(LowExceptionVectorBase)
234 /* EL1 SP 0 */
235 b .
236 .align 7
237 b .
238 .align 7
239 b .
240 .align 7
241 b .
242 /* EL1 SP1 */
243 .align 7
244 b .
245 .align 7
246 b .
247 .align 7
248 b .
249 .align 7
250 b .
251 /* EL0 64 */
252 .align 7
253 b .
254 .align 7
255 b .
256 .align 7
257 b .
258 .align 7
259 b .
260 /* EL0 32 */
261 .align 7
262 b .
263 .align 7
264 b .
265 .align 7
266 b .
267 .align 7
268 b .
269 .align 12, 0
270
271 #if defined(KERNEL_INTEGRITY_KTRR)
272 /*
273 * Provide a global symbol so that we can narrow the V=P mapping to cover
274 * this page during arm_vm_init.
275 */
276 .align ARM_PGSHIFT
277 .globl EXT(bootstrap_instructions)
278 LEXT(bootstrap_instructions)
279 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
280 .align 2
281 .globl EXT(resume_idle_cpu)
282 LEXT(resume_idle_cpu)
283 adrp lr, EXT(arm_init_idle_cpu)@page
284 add lr, lr, EXT(arm_init_idle_cpu)@pageoff
285 b start_cpu
286
287 .align 2
288 .globl EXT(start_cpu)
289 LEXT(start_cpu)
290 adrp lr, EXT(arm_init_cpu)@page
291 add lr, lr, EXT(arm_init_cpu)@pageoff
292 b start_cpu
293
294 .align 2
295 start_cpu:
296 #if defined(KERNEL_INTEGRITY_KTRR)
297 // This is done right away in reset vector for pre-KTRR devices
298 // Set low reset vector now that we are in the KTRR-free zone
299 adrp x0, EXT(LowExceptionVectorBase)@page
300 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
301 MSR_VBAR_EL1_X0
302 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
303
304 // x20 set to BootArgs phys address
305 // x21 set to cpu data phys address
306
307 // Get the kernel memory parameters from the boot args
308 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
309 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
310 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
311 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
312 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
313
314 // Set TPIDRRO_EL0 with the CPU number
315 ldr x0, [x21, CPU_NUMBER_GS]
316 msr TPIDRRO_EL0, x0
317
318 // Set the exception stack pointer
319 ldr x0, [x21, CPU_EXCEPSTACK_TOP]
320
321
322 // Set SP_EL1 to exception stack
323 #if defined(KERNEL_INTEGRITY_KTRR)
324 mov x1, lr
325 bl _pinst_spsel_1
326 mov lr, x1
327 #else
328 msr SPSel, #1
329 #endif
330 mov sp, x0
331
332 // Set the interrupt stack pointer
333 ldr x0, [x21, CPU_INTSTACK_TOP]
334 msr SPSel, #0
335 mov sp, x0
336
337 // Convert lr to KVA
338 add lr, lr, x22
339 sub lr, lr, x23
340
341 b common_start
342
343 /*
344 * create_l1_table_entry
345 *
346 * Given a virtual address, creates a table entry in an L1 translation table
347 * to point to an L2 translation table.
348 * arg0 - Virtual address
349 * arg1 - L1 table address
350 * arg2 - L2 table address
351 * arg3 - Scratch register
352 * arg4 - Scratch register
353 * arg5 - Scratch register
354 */
355 .macro create_l1_table_entry
356 and $3, $0, #(ARM_TT_L1_INDEX_MASK)
357 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table
358 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset
359 add $3, $1, $3 // Get L1 entry pointer
360 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template
361 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table
362 orr $5, $4, $5 // Create table entry for L2 table
363 str $5, [$3] // Write entry to L1 table
364 .endmacro
365
366 /*
367 * create_l2_block_entries
368 *
369 * Given base virtual and physical addresses, creates consecutive block entries
370 * in an L2 translation table.
371 * arg0 - Virtual address
372 * arg1 - Physical address
373 * arg2 - L2 table address
374 * arg3 - Number of entries
375 * arg4 - Scratch register
376 * arg5 - Scratch register
377 * arg6 - Scratch register
378 * arg7 - Scratch register
379 */
380 .macro create_l2_block_entries
381 and $4, $0, #(ARM_TT_L2_INDEX_MASK)
382 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry
383 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset
384 add $4, $2, $4 // Get L2 entry pointer
385 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template
386 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping
387 orr $6, $5, $6
388 mov $5, $3
389 mov $7, #(ARM_TT_L2_SIZE)
390 1:
391 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance
392 add $6, $6, $7 // Increment the output address
393 subs $5, $5, #1 // Decrement the number of entries
394 b.ne 1b
395 .endmacro
396
397 /*
398 * arg0 - virtual start address
399 * arg1 - physical start address
400 * arg2 - number of entries to map
401 * arg3 - L1 table address
402 * arg4 - free space pointer
403 * arg5 - scratch (entries mapped per loop)
404 * arg6 - scratch
405 * arg7 - scratch
406 * arg8 - scratch
407 * arg9 - scratch
408 */
409 .macro create_bootstrap_mapping
410 /* calculate entries left in this page */
411 and $5, $0, #(ARM_TT_L2_INDEX_MASK)
412 lsr $5, $5, #(ARM_TT_L2_SHIFT)
413 mov $6, #(TTE_PGENTRIES)
414 sub $5, $6, $5
415
416 /* allocate an L2 table */
417 3: add $4, $4, PGBYTES
418
419 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */
420 create_l1_table_entry $0, $3, $4, $6, $7, $8
421
422 /* determine how many entries to map this loop - the smaller of entries
423 * remaining in page and total entries left */
424 cmp $2, $5
425 csel $5, $2, $5, lt
426
427 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */
428 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9
429
430 /* subtract entries just mapped and bail out if we're done */
431 subs $2, $2, $5
432 beq 2f
433
434 /* entries left to map - advance base pointers */
435 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT)
436 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT)
437
438 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */
439 b 3b
440 2:
441 .endmacro
442
443 /*
444 * _start_first_cpu
445 * Cold boot init routine. Called from __start
446 * x0 - Boot args
447 */
448 .align 2
449 .globl EXT(start_first_cpu)
450 LEXT(start_first_cpu)
451
452 // Unlock the core for debugging
453 msr OSLAR_EL1, xzr
454 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
455 mov x20, x0
456 mov x21, #0
457
458 // Set low reset vector before attempting any loads
459 adrp x0, EXT(LowExceptionVectorBase)@page
460 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
461 MSR_VBAR_EL1_X0
462
463
464 // Get the kernel memory parameters from the boot args
465 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
466 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
467 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
468 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
469 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
470
471 // Clear the register that will be used to store the userspace thread pointer and CPU number.
472 // We may not actually be booting from ordinal CPU 0, so this register will be updated
473 // in ml_parse_cpu_topology(), which happens later in bootstrap.
474 msr TPIDRRO_EL0, x21
475
476 // Set up exception stack pointer
477 adrp x0, EXT(excepstack_top)@page // Load top of exception stack
478 add x0, x0, EXT(excepstack_top)@pageoff
479 add x0, x0, x22 // Convert to KVA
480 sub x0, x0, x23
481
482 // Set SP_EL1 to exception stack
483 #if defined(KERNEL_INTEGRITY_KTRR)
484 bl _pinst_spsel_1
485 #else
486 msr SPSel, #1
487 #endif
488
489 mov sp, x0
490
491 // Set up interrupt stack pointer
492 adrp x0, EXT(intstack_top)@page // Load top of irq stack
493 add x0, x0, EXT(intstack_top)@pageoff
494 add x0, x0, x22 // Convert to KVA
495 sub x0, x0, x23
496 msr SPSel, #0 // Set SP_EL0 to interrupt stack
497 mov sp, x0
498
499 // Load address to the C init routine into link register
500 adrp lr, EXT(arm_init)@page
501 add lr, lr, EXT(arm_init)@pageoff
502 add lr, lr, x22 // Convert to KVA
503 sub lr, lr, x23
504
505 /*
506 * Set up the bootstrap page tables with a single block entry for the V=P
507 * mapping, a single block entry for the trampolined kernel address (KVA),
508 * and all else invalid. This requires four pages:
509 * Page 1 - V=P L1 table
510 * Page 2 - V=P L2 table
511 * Page 3 - KVA L1 table
512 * Page 4 - KVA L2 table
513 */
514 #if __ARM64_TWO_LEVEL_PMAP__
515 /*
516 * If we are using a two level scheme, we don't need the L1 entries, so:
517 * Page 1 - V=P L2 table
518 * Page 2 - KVA L2 table
519 */
520 #endif
521
522 // Invalidate all entries in the bootstrap page tables
523 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template
524 mov x1, x25 // Start at top of kernel
525 mov x2, #(TTE_PGENTRIES) // Load number of entries per page
526 #if __ARM64_TWO_LEVEL_PMAP__
527 lsl x2, x2, #1 // Shift by 1 for num entries on 2 pages
528 #else
529 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages
530 #endif
531 Linvalidate_bootstrap: // do {
532 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance
533 subs x2, x2, #1 // entries--
534 b.ne Linvalidate_bootstrap // } while (entries != 0)
535
536 /*
537 * In order to reclaim memory on targets where TZ0 (or some other entity)
538 * must be located at the base of memory, iBoot may set the virtual and
539 * physical base addresses to immediately follow whatever lies at the
540 * base of physical memory.
541 *
542 * If the base address belongs to TZ0, it may be dangerous for xnu to map
543 * it (as it may be prefetched, despite being technically inaccessible).
544 * In order to avoid this issue while keeping the mapping code simple, we
545 * may continue to use block mappings, but we will only map xnu's mach
546 * header to the end of memory.
547 *
548 * Given that iBoot guarantees that the unslid kernelcache base address
549 * will begin on an L2 boundary, this should prevent us from accidentally
550 * mapping TZ0.
551 */
552 adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address
553 add x0, x0, EXT(_mh_execute_header)@pageoff
554
555 /*
556 * Adjust physical and virtual base addresses to account for physical
557 * memory preceeding xnu Mach-O header
558 * x22 - Kernel virtual base
559 * x23 - Kernel physical base
560 * x24 - Physical memory size
561 */
562 sub x18, x0, x23
563 sub x24, x24, x18
564 add x22, x22, x18
565 add x23, x23, x18
566
567 /*
568 * x0 - V=P virtual cursor
569 * x4 - V=P physical cursor
570 * x14 - KVA virtual cursor
571 * x15 - KVA physical cursor
572 */
573 mov x4, x0
574 mov x14, x22
575 mov x15, x23
576
577 /*
578 * Allocate L1 tables
579 * x1 - V=P L1 page
580 * x3 - KVA L1 page
581 * x2 - free mem pointer from which we allocate a variable number of L2
582 * pages. The maximum number of bootstrap page table pages is limited to
583 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case
584 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so
585 * 8 total pages for V=P and KVA.
586 */
587 mov x1, x25
588 add x3, x1, PGBYTES
589 mov x2, x3
590
591 /*
592 * Setup the V=P bootstrap mapping
593 * x5 - total number of L2 entries to allocate
594 */
595 lsr x5, x24, #(ARM_TT_L2_SHIFT)
596 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */
597 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13
598
599 /* Setup the KVA bootstrap mapping */
600 lsr x5, x24, #(ARM_TT_L2_SHIFT)
601 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13
602
603 /* Ensure TTEs are visible */
604 dsb ish
605
606 b common_start
607
608 /*
609 * Begin common CPU initialization
610 *
611 * Regster state:
612 * x20 - PA of boot args
613 * x21 - zero on cold boot, PA of cpu data on warm reset
614 * x22 - Kernel virtual base
615 * x23 - Kernel physical base
616 * x25 - PA of the end of the kernel
617 * lr - KVA of C init routine
618 * sp - SP_EL0 selected
619 *
620 * SP_EL0 - KVA of CPU's interrupt stack
621 * SP_EL1 - KVA of CPU's exception stack
622 * TPIDRRO_EL0 - CPU number
623 */
624 common_start:
625 // Set the translation control register.
626 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure
627 add x0, x0, EXT(sysreg_restore)@pageoff
628 ldr x1, [x0, SR_RESTORE_TCR_EL1]
629 MSR_TCR_EL1_X1
630
631 /* Set up translation table base registers.
632 * TTBR0 - V=P table @ top of kernel
633 * TTBR1 - KVA table @ top of kernel + 1 page
634 */
635 #if defined(KERNEL_INTEGRITY_KTRR)
636 /* Note that for KTRR configurations, the V=P map will be modified by
637 * arm_vm_init.c.
638 */
639 #endif
640 and x0, x25, #(TTBR_BADDR_MASK)
641 mov x19, lr
642 bl EXT(set_mmu_ttb)
643 mov lr, x19
644 add x0, x25, PGBYTES
645 and x0, x0, #(TTBR_BADDR_MASK)
646 MSR_TTBR1_EL1_X0
647
648 // Set up MAIR attr0 for normal memory, attr1 for device memory
649 mov x0, xzr
650 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
651 orr x0, x0, x1
652 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK))
653 orr x0, x0, x1
654 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
655 orr x0, x0, x1
656 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
657 orr x0, x0, x1
658 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
659 orr x0, x0, x1
660 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
661 orr x0, x0, x1
662 msr MAIR_EL1, x0
663
664 #if defined(APPLEHURRICANE)
665
666 // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk
667 // Needs to be done before MMU is enabled
668 mrs x12, ARM64_REG_HID5
669 and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask)
670 orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE
671 msr ARM64_REG_HID5, x12
672
673 #endif
674
675 #if defined(BCM2837)
676 // Setup timer interrupt routing; must be done before MMU is enabled
677 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
678 and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
679 mov x0, #0x4000
680 lsl x0, x0, #16
681 add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control
682 add x0, x0, x15, lsl #2
683 mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs
684 str w1, [x0]
685 isb sy
686 #endif
687
688
689 #ifndef __ARM_IC_NOALIAS_ICACHE__
690 /* Invalidate the TLB and icache on systems that do not guarantee that the
691 * caches are invalidated on reset.
692 */
693 tlbi vmalle1
694 ic iallu
695 #endif
696
697 /* If x21 is not 0, then this is either the start_cpu path or
698 * the resume_idle_cpu path. cpu_ttep should already be
699 * populated, so just switch to the kernel_pmap now.
700 */
701
702 cbz x21, 1f
703 adrp x0, EXT(cpu_ttep)@page
704 add x0, x0, EXT(cpu_ttep)@pageoff
705 ldr x0, [x0]
706 MSR_TTBR1_EL1_X0
707 1:
708
709 // Set up the exception vectors
710 #if __ARM_KERNEL_PROTECT__
711 /* If this is not the first reset of the boot CPU, the alternate mapping
712 * for the exception vectors will be set up, so use it. Otherwise, we
713 * should use the mapping located in the kernelcache mapping.
714 */
715 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START
716
717 cbnz x21, 1f
718 #endif /* __ARM_KERNEL_PROTECT__ */
719 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address
720 add x0, x0, EXT(ExceptionVectorsBase)@pageoff
721 add x0, x0, x22 // Convert exception vector address to KVA
722 sub x0, x0, x23
723 1:
724 MSR_VBAR_EL1_X0
725
726
727 // Enable caches and MMU
728 mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF)
729 mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000)
730 orr x0, x0, x1
731 MSR_SCTLR_EL1_X0
732 isb sy
733
734 #if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
735 /* Watchtower
736 *
737 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
738 * it here would trap to EL3.
739 */
740
741 // Enable NEON
742 mov x0, #(CPACR_FPEN_ENABLE)
743 msr CPACR_EL1, x0
744 #endif
745
746 // Clear thread pointer
747 mov x0, #0
748 msr TPIDR_EL1, x0 // Set thread register
749
750 #if defined(APPLE_ARM64_ARCH_FAMILY)
751 // Initialization common to all Apple targets
752 ARM64_IS_PCORE x15
753 ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
754 orr x12, x12, ARM64_REG_HID4_DisDcMVAOps
755 orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops
756 ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
757 #endif // APPLE_ARM64_ARCH_FAMILY
758
759 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
760 //
761 // Cyclone/Typhoon-Specific initialization
762 // For tunable summary, see <rdar://problem/13503621> Alcatraz/H6: Confirm Cyclone CPU tunables have been set
763 //
764
765 //
766 // Disable LSP flush with context switch to work around bug in LSP
767 // that can cause Cyclone to wedge when CONTEXTIDR is written.
768 // <rdar://problem/12387704> Innsbruck11A175: panic(cpu 0 caller 0xffffff800024e30c): "wait queue deadlock - wq=0xffffff805a7a63c0, cpu=0\n"
769 //
770
771 mrs x12, ARM64_REG_HID0
772 orr x12, x12, ARM64_REG_HID0_LoopBuffDisb
773 msr ARM64_REG_HID0, x12
774
775 mrs x12, ARM64_REG_HID1
776 orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl
777 #if defined(APPLECYCLONE)
778 orr x12, x12, ARM64_REG_HID1_disLspFlushWithContextSwitch
779 #endif
780 msr ARM64_REG_HID1, x12
781
782 mrs x12, ARM64_REG_HID3
783 orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode
784 msr ARM64_REG_HID3, x12
785
786 mrs x12, ARM64_REG_HID5
787 and x12, x12, (~ARM64_REG_HID5_DisHwpLd)
788 and x12, x12, (~ARM64_REG_HID5_DisHwpSt)
789 msr ARM64_REG_HID5, x12
790
791 // Change the default memcache data set ID from 0 to 15 for all agents
792 mrs x12, ARM64_REG_HID8
793 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
794 #if ARM64_BOARD_CONFIG_T7001
795 orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE
796 #endif // ARM64_BOARD_CONFIG_T7001
797 msr ARM64_REG_HID8, x12
798 isb sy
799 #endif // APPLECYCLONE || APPLETYPHOON
800
801 #if defined(APPLETWISTER)
802
803 // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK
804 // to work around potential hang. Must only be applied to Maui C0.
805 mrs x12, MIDR_EL1
806 ubfx x13, x12, #MIDR_EL1_PNUM_SHIFT, #12
807 cmp x13, #4 // Part number 4 => Maui, 5 => Malta/Elba
808 bne Lskip_isalive
809 ubfx x13, x12, #MIDR_EL1_VAR_SHIFT, #4
810 cmp x13, #2 // variant 2 => Maui C0
811 b.lt Lskip_isalive
812
813 mrs x12, ARM64_REG_CYC_CFG
814 orr x12, x12, ARM64_REG_CYC_CFG_skipInit
815 msr ARM64_REG_CYC_CFG, x12
816
817 Lskip_isalive:
818
819 mrs x12, ARM64_REG_HID11
820 and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt)
821 msr ARM64_REG_HID11, x12
822
823 // Change the default memcache data set ID from 0 to 15 for all agents
824 mrs x12, ARM64_REG_HID8
825 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
826 orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE)
827 msr ARM64_REG_HID8, x12
828
829 // Use 4-cycle MUL latency to avoid denormal stalls
830 mrs x12, ARM64_REG_HID7
831 orr x12, x12, #ARM64_REG_HID7_disNexFastFmul
832 msr ARM64_REG_HID7, x12
833
834 // disable reporting of TLB-multi-hit-error
835 // <rdar://problem/22163216>
836 mrs x12, ARM64_REG_LSU_ERR_STS
837 and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN)
838 msr ARM64_REG_LSU_ERR_STS, x12
839
840 isb sy
841 #endif // APPLETWISTER
842
843 #if defined(APPLEHURRICANE)
844
845 // IC prefetch configuration
846 // <rdar://problem/23019425>
847 mrs x12, ARM64_REG_HID0
848 and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk)
849 orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift)
850 orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn
851 msr ARM64_REG_HID0, x12
852
853 // disable reporting of TLB-multi-hit-error
854 // <rdar://problem/22163216>
855 mrs x12, ARM64_REG_LSU_ERR_CTL
856 and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN)
857 msr ARM64_REG_LSU_ERR_CTL, x12
858
859 // disable crypto fusion across decode groups
860 // <rdar://problem/27306424>
861 mrs x12, ARM64_REG_HID1
862 orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp
863 msr ARM64_REG_HID1, x12
864
865 #if defined(ARM64_BOARD_CONFIG_T8011)
866 // Clear DisDcZvaCmdOnly
867 // Per Myst A0/B0 tunables document
868 // <rdar://problem/27627428> Myst: Confirm ACC Per-CPU Tunables
869 mrs x12, ARM64_REG_HID3
870 and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly
871 msr ARM64_REG_HID3, x12
872
873 mrs x12, ARM64_REG_EHID3
874 and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly
875 msr ARM64_REG_EHID3, x12
876 #endif /* defined(ARM64_BOARD_CONFIG_T8011) */
877
878 #endif // APPLEHURRICANE
879
880 #if defined(APPLEMONSOON)
881
882 /***** Tunables that apply to all skye cores, all chip revs *****/
883
884 // <rdar://problem/28512310> SW WAR/eval: WKdm write ack lost when bif_wke_colorWrAck_XXaH asserts concurrently for both colors
885 mrs x12, ARM64_REG_HID8
886 orr x12, x12, #ARM64_REG_HID8_WkeForceStrictOrder
887 msr ARM64_REG_HID8, x12
888
889 // Skip if not E-core
890 ARM64_IS_PCORE x15
891 cbnz x15, Lskip_skye_ecore_only
892
893 /***** Tunables that only apply to skye e-cores, all chip revs *****/
894
895 // <rdar://problem/30423928>: Atomic launch eligibility is erroneously taken away when a store at SMB gets invalidated
896 mrs x12, ARM64_REG_EHID11
897 and x12, x12, ~(ARM64_REG_EHID11_SmbDrainThresh_mask)
898 msr ARM64_REG_EHID11, x12
899
900 Lskip_skye_ecore_only:
901
902 SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x12, MONSOON_CPU_VERSION_B0, Lskip_skye_a0_workarounds
903
904 // Skip if not E-core
905 cbnz x15, Lskip_skye_a0_ecore_only
906
907 /***** Tunables that only apply to skye e-cores, chip revs < B0 *****/
908
909 // Disable downstream fill bypass logic
910 // <rdar://problem/28545159> [Tunable] Skye - L2E fill bypass collision from both pipes to ecore
911 mrs x12, ARM64_REG_EHID5
912 orr x12, x12, ARM64_REG_EHID5_DisFillByp
913 msr ARM64_REG_EHID5, x12
914
915 // Disable forwarding of return addresses to the NFP
916 // <rdar://problem/30387067> Skye: FED incorrectly taking illegal va exception
917 mrs x12, ARM64_REG_EHID0
918 orr x12, x12, ARM64_REG_EHID0_nfpRetFwdDisb
919 msr ARM64_REG_EHID0, x12
920
921 Lskip_skye_a0_ecore_only:
922
923 /***** Tunables that apply to all skye cores, chip revs < B0 *****/
924
925 // Disable clock divider gating
926 // <rdar://problem/30854420> [Tunable/Errata][cpu_1p_1e] [CPGV2] ACC power down issue when link FSM switches from GO_DN to CANCEL and at the same time upStreamDrain request is set.
927 mrs x12, ARM64_REG_HID6
928 orr x12, x12, ARM64_REG_HID6_DisClkDivGating
929 msr ARM64_REG_HID6, x12
930
931 // Disable clock dithering
932 // <rdar://problem/29022199> [Tunable] Skye A0: Linux: LLC PIO Errors
933 mrs x12, ARM64_REG_ACC_OVRD
934 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
935 msr ARM64_REG_ACC_OVRD, x12
936
937 mrs x12, ARM64_REG_ACC_EBLK_OVRD
938 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
939 msr ARM64_REG_ACC_EBLK_OVRD, x12
940
941 Lskip_skye_a0_workarounds:
942
943 SKIP_IF_CPU_VERSION_LESS_THAN x12, MONSOON_CPU_VERSION_B0, Lskip_skye_post_a1_workarounds
944
945 /***** Tunables that apply to all skye cores, chip revs >= B0 *****/
946
947 // <rdar://problem/32512836>: Disable refcount syncing between E and P
948 mrs x12, ARM64_REG_CYC_OVRD
949 and x12, x12, ~ARM64_REG_CYC_OVRD_dsblSnoopTime_mask
950 orr x12, x12, ARM64_REG_CYC_OVRD_dsblSnoopPTime
951 msr ARM64_REG_CYC_OVRD, x12
952
953 Lskip_skye_post_a1_workarounds:
954
955 #endif /* defined(APPLEMONSOON) */
956
957
958 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
959 cbnz x21, Ltrampoline
960
961 // Set KVA of boot args as first arg
962 add x0, x20, x22
963 sub x0, x0, x23
964
965 #if KASAN
966 mov x20, x0
967 mov x21, lr
968
969 // x0: boot args
970 // x1: KVA page table phys base
971 mrs x1, TTBR1_EL1
972 bl _kasan_bootstrap
973
974 mov x0, x20
975 mov lr, x21
976 #endif
977
978 // Return to arm_init()
979 ret
980
981 Ltrampoline:
982 // Load VA of the trampoline
983 adrp x0, arm_init_tramp@page
984 add x0, x0, arm_init_tramp@pageoff
985 add x0, x0, x22
986 sub x0, x0, x23
987
988 // Branch to the trampoline
989 br x0
990
991 /*
992 * V=P to KVA trampoline.
993 * x0 - KVA of cpu data pointer
994 */
995 .text
996 .align 2
997 arm_init_tramp:
998 /* On a warm boot, the full kernel translation table is initialized in
999 * addition to the bootstrap tables. The layout is as follows:
1000 *
1001 * +--Top of Memory--+
1002 * ...
1003 * | |
1004 * | Primary Kernel |
1005 * | Trans. Table |
1006 * | |
1007 * +--Top + 5 pages--+
1008 * | |
1009 * | Invalid Table |
1010 * | |
1011 * +--Top + 4 pages--+
1012 * | |
1013 * | KVA Table |
1014 * | |
1015 * +--Top + 2 pages--+
1016 * | |
1017 * | V=P Table |
1018 * | |
1019 * +--Top of Kernel--+
1020 * | |
1021 * | Kernel Mach-O |
1022 * | |
1023 * ...
1024 * +---Kernel Base---+
1025 */
1026
1027 mov x19, lr
1028 // Convert CPU data PA to VA and set as first argument
1029 mov x0, x21
1030 bl EXT(phystokv)
1031
1032 mov lr, x19
1033
1034 /* Return to arm_init() */
1035 ret
1036
1037 //#include "globals_asm.h"
1038
1039 /* vim: set ts=4: */