]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/start.s
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / start.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <arm/proc_reg.h>
29#include <arm64/asm.h>
30#include <arm64/proc_reg.h>
31#include <pexpert/arm64/board_config.h>
5ba3f43e
A
32#include <mach_assert.h>
33#include <machine/asm.h>
34#include "assym.s"
f427ee49 35#include <arm64/tunables/tunables.s>
cb323159 36#include <arm64/exception_asm.h>
5ba3f43e 37
5c9f4661
A
38#if __ARM_KERNEL_PROTECT__
39#include <arm/pmap.h>
40#endif /* __ARM_KERNEL_PROTECT__ */
41
5ba3f43e 42
cb323159 43
5ba3f43e
A
44.macro MSR_VBAR_EL1_X0
45#if defined(KERNEL_INTEGRITY_KTRR)
46 mov x1, lr
47 bl EXT(pinst_set_vbar)
48 mov lr, x1
49#else
50 msr VBAR_EL1, x0
51#endif
52.endmacro
53
54.macro MSR_TCR_EL1_X1
55#if defined(KERNEL_INTEGRITY_KTRR)
56 mov x0, x1
57 mov x1, lr
cb323159 58 bl EXT(pinst_set_tcr)
5ba3f43e
A
59 mov lr, x1
60#else
61 msr TCR_EL1, x1
62#endif
63.endmacro
64
65.macro MSR_TTBR1_EL1_X0
66#if defined(KERNEL_INTEGRITY_KTRR)
67 mov x1, lr
cb323159 68 bl EXT(pinst_set_ttbr1)
5ba3f43e
A
69 mov lr, x1
70#else
71 msr TTBR1_EL1, x0
72#endif
73.endmacro
74
75.macro MSR_SCTLR_EL1_X0
f427ee49 76#if defined(KERNEL_INTEGRITY_KTRR)
5ba3f43e
A
77 mov x1, lr
78
79 // This may abort, do so on SP1
cb323159 80 bl EXT(pinst_spsel_1)
5ba3f43e 81
cb323159 82 bl EXT(pinst_set_sctlr)
5ba3f43e
A
83 msr SPSel, #0 // Back to SP0
84 mov lr, x1
85#else
86 msr SCTLR_EL1, x0
87#endif /* defined(KERNEL_INTEGRITY_KTRR) */
88.endmacro
89
90/*
91 * Checks the reset handler for global and CPU-specific reset-assist functions,
92 * then jumps to the reset handler with boot args and cpu data. This is copied
93 * to the first physical page during CPU bootstrap (see cpu.c).
94 *
95 * Variables:
96 * x19 - Reset handler data pointer
97 * x20 - Boot args pointer
98 * x21 - CPU data pointer
99 */
100 .text
101 .align 12
102 .globl EXT(LowResetVectorBase)
103LEXT(LowResetVectorBase)
cb323159
A
104 /*
105 * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1,
106 * so on reset the CPU will jump to offset 0x0 and on exceptions
107 * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380.
108 * In order for both the reset vector and exception vectors to
109 * coexist in the same space, the reset code is moved to the end
110 * of the exception vector area.
111 */
112 b EXT(reset_vector)
5ba3f43e 113
cb323159
A
114 /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */
115 .align 9
116 b .
117 .align 7
118 b .
119 .align 7
120 b .
121 .align 7
122 b .
123
124 .align 7
125 .globl EXT(reset_vector)
126LEXT(reset_vector)
127 // Preserve x0 for start_first_cpu, if called
5ba3f43e
A
128 // Unlock the core for debugging
129 msr OSLAR_EL1, xzr
d9a64523 130 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
5ba3f43e 131
c6bf4f31 132#if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
5ba3f43e
A
133 // Set low reset vector before attempting any loads
134 adrp x0, EXT(LowExceptionVectorBase)@page
135 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
136 msr VBAR_EL1, x0
137#endif
138
139
5ba3f43e
A
140
141 // Process reset handlers
142 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data
143 add x19, x19, EXT(ResetHandlerData)@pageoff
144 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
c6bf4f31
A
145#if HAS_CLUSTER
146 and x0, x15, #0xFFFF // CPU number in Affinity0, cluster ID in Affinity1
147#else
5ba3f43e 148 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
c6bf4f31 149#endif
5ba3f43e 150 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries
f427ee49 151 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS)
5ba3f43e
A
152Lcheck_cpu_data_entry:
153 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address
154 cbz x21, Lnext_cpu_data_entry
155 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id
156 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu
cb323159 157 b.eq Lfound_cpu_data_entry // Branch if match
5ba3f43e
A
158Lnext_cpu_data_entry:
159 add x1, x1, #16 // Increment to the next cpu data entry
160 cmp x1, x3
cb323159 161 b.eq Lskip_cpu_reset_handler // Not found
5ba3f43e
A
162 b Lcheck_cpu_data_entry // loop
163Lfound_cpu_data_entry:
cb323159 164 adrp x20, EXT(const_boot_args)@page
5ba3f43e
A
165 add x20, x20, EXT(const_boot_args)@pageoff
166 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler
167 cbz x0, Lskip_cpu_reset_handler
168
169 // Validate that our handler is one of the two expected handlers
170 adrp x2, EXT(resume_idle_cpu)@page
171 add x2, x2, EXT(resume_idle_cpu)@pageoff
172 cmp x0, x2
173 beq 1f
174 adrp x2, EXT(start_cpu)@page
175 add x2, x2, EXT(start_cpu)@pageoff
176 cmp x0, x2
cb323159 177 bne Lskip_cpu_reset_handler
5ba3f43e
A
1781:
179
c6bf4f31
A
180#if HAS_BP_RET
181 bl EXT(set_bp_ret)
182#endif
5ba3f43e 183
5c9f4661
A
184#if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR)
185 /*
186 * Populate TPIDR_EL1 (in case the CPU takes an exception while
187 * turning on the MMU).
188 */
189 ldr x13, [x21, CPU_ACTIVE_THREAD]
190 msr TPIDR_EL1, x13
191#endif /* __ARM_KERNEL_PROTECT__ */
192
5ba3f43e
A
193 blr x0
194Lskip_cpu_reset_handler:
195 b . // Hang if the handler is NULL or returns
196
cb323159 197 .align 3
5ba3f43e
A
198 .global EXT(LowResetVectorEnd)
199LEXT(LowResetVectorEnd)
200 .global EXT(SleepToken)
201#if WITH_CLASSIC_S2R
202LEXT(SleepToken)
203 .space (stSize_NUM),0
204#endif
205
cb323159
A
206 .section __DATA_CONST,__const
207 .align 3
208 .globl EXT(ResetHandlerData)
209LEXT(ResetHandlerData)
210 .space (rhdSize_NUM),0 // (filled with 0s)
211 .text
212
5ba3f43e
A
213
214/*
215 * __start trampoline is located at a position relative to LowResetVectorBase
216 * so that iBoot can compute the reset vector position to set IORVBAR using
217 * only the kernel entry point. Reset vector = (__start & ~0xfff)
218 */
219 .align 3
220 .globl EXT(_start)
221LEXT(_start)
222 b EXT(start_first_cpu)
223
224
225/*
226 * Provides an early-boot exception vector so that the processor will spin
227 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
228 * code triggers an exception. This is copied to the second physical page
229 * during CPU bootstrap (see cpu.c).
230 */
231 .align 12, 0
232 .global EXT(LowExceptionVectorBase)
233LEXT(LowExceptionVectorBase)
234 /* EL1 SP 0 */
235 b .
236 .align 7
237 b .
238 .align 7
239 b .
240 .align 7
241 b .
242 /* EL1 SP1 */
243 .align 7
244 b .
245 .align 7
246 b .
247 .align 7
248 b .
249 .align 7
250 b .
251 /* EL0 64 */
252 .align 7
253 b .
254 .align 7
255 b .
256 .align 7
257 b .
258 .align 7
259 b .
260 /* EL0 32 */
261 .align 7
262 b .
263 .align 7
264 b .
265 .align 7
266 b .
267 .align 7
268 b .
269 .align 12, 0
270
c6bf4f31 271#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
272/*
273 * Provide a global symbol so that we can narrow the V=P mapping to cover
274 * this page during arm_vm_init.
275 */
276.align ARM_PGSHIFT
277.globl EXT(bootstrap_instructions)
278LEXT(bootstrap_instructions)
cb323159 279
c6bf4f31 280#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
5ba3f43e
A
281 .align 2
282 .globl EXT(resume_idle_cpu)
283LEXT(resume_idle_cpu)
284 adrp lr, EXT(arm_init_idle_cpu)@page
285 add lr, lr, EXT(arm_init_idle_cpu)@pageoff
286 b start_cpu
287
288 .align 2
289 .globl EXT(start_cpu)
290LEXT(start_cpu)
291 adrp lr, EXT(arm_init_cpu)@page
292 add lr, lr, EXT(arm_init_cpu)@pageoff
293 b start_cpu
294
295 .align 2
296start_cpu:
c6bf4f31 297#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
298 // This is done right away in reset vector for pre-KTRR devices
299 // Set low reset vector now that we are in the KTRR-free zone
300 adrp x0, EXT(LowExceptionVectorBase)@page
301 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
302 MSR_VBAR_EL1_X0
c6bf4f31 303#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
5ba3f43e
A
304
305 // x20 set to BootArgs phys address
306 // x21 set to cpu data phys address
5ba3f43e
A
307
308 // Get the kernel memory parameters from the boot args
309 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
310 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
311 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
f427ee49 312 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables
d9a64523 313 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
5ba3f43e 314
cb323159 315
5ba3f43e
A
316 // Set TPIDRRO_EL0 with the CPU number
317 ldr x0, [x21, CPU_NUMBER_GS]
318 msr TPIDRRO_EL0, x0
319
320 // Set the exception stack pointer
321 ldr x0, [x21, CPU_EXCEPSTACK_TOP]
322
323
324 // Set SP_EL1 to exception stack
c6bf4f31 325#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e 326 mov x1, lr
cb323159 327 bl EXT(pinst_spsel_1)
5ba3f43e
A
328 mov lr, x1
329#else
330 msr SPSel, #1
331#endif
332 mov sp, x0
333
334 // Set the interrupt stack pointer
335 ldr x0, [x21, CPU_INTSTACK_TOP]
336 msr SPSel, #0
337 mov sp, x0
338
339 // Convert lr to KVA
340 add lr, lr, x22
341 sub lr, lr, x23
342
343 b common_start
344
345/*
346 * create_l1_table_entry
347 *
348 * Given a virtual address, creates a table entry in an L1 translation table
349 * to point to an L2 translation table.
350 * arg0 - Virtual address
351 * arg1 - L1 table address
352 * arg2 - L2 table address
353 * arg3 - Scratch register
354 * arg4 - Scratch register
355 * arg5 - Scratch register
356 */
357.macro create_l1_table_entry
358 and $3, $0, #(ARM_TT_L1_INDEX_MASK)
359 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table
360 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset
361 add $3, $1, $3 // Get L1 entry pointer
362 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template
363 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table
364 orr $5, $4, $5 // Create table entry for L2 table
365 str $5, [$3] // Write entry to L1 table
366.endmacro
367
368/*
369 * create_l2_block_entries
370 *
371 * Given base virtual and physical addresses, creates consecutive block entries
372 * in an L2 translation table.
373 * arg0 - Virtual address
374 * arg1 - Physical address
375 * arg2 - L2 table address
376 * arg3 - Number of entries
377 * arg4 - Scratch register
378 * arg5 - Scratch register
379 * arg6 - Scratch register
380 * arg7 - Scratch register
381 */
382.macro create_l2_block_entries
383 and $4, $0, #(ARM_TT_L2_INDEX_MASK)
384 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry
385 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset
386 add $4, $2, $4 // Get L2 entry pointer
387 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template
388 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping
389 orr $6, $5, $6
390 mov $5, $3
391 mov $7, #(ARM_TT_L2_SIZE)
3921:
393 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance
394 add $6, $6, $7 // Increment the output address
395 subs $5, $5, #1 // Decrement the number of entries
396 b.ne 1b
397.endmacro
398
d9a64523
A
399/*
400 * arg0 - virtual start address
401 * arg1 - physical start address
402 * arg2 - number of entries to map
403 * arg3 - L1 table address
404 * arg4 - free space pointer
405 * arg5 - scratch (entries mapped per loop)
406 * arg6 - scratch
407 * arg7 - scratch
408 * arg8 - scratch
409 * arg9 - scratch
410 */
411.macro create_bootstrap_mapping
412 /* calculate entries left in this page */
413 and $5, $0, #(ARM_TT_L2_INDEX_MASK)
414 lsr $5, $5, #(ARM_TT_L2_SHIFT)
415 mov $6, #(TTE_PGENTRIES)
416 sub $5, $6, $5
417
418 /* allocate an L2 table */
4193: add $4, $4, PGBYTES
420
421 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */
422 create_l1_table_entry $0, $3, $4, $6, $7, $8
423
424 /* determine how many entries to map this loop - the smaller of entries
425 * remaining in page and total entries left */
426 cmp $2, $5
427 csel $5, $2, $5, lt
428
429 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */
430 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9
431
432 /* subtract entries just mapped and bail out if we're done */
433 subs $2, $2, $5
434 beq 2f
435
436 /* entries left to map - advance base pointers */
437 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT)
438 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT)
439
440 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */
441 b 3b
4422:
443.endmacro
444
5ba3f43e
A
445/*
446 * _start_first_cpu
447 * Cold boot init routine. Called from __start
448 * x0 - Boot args
449 */
450 .align 2
451 .globl EXT(start_first_cpu)
452LEXT(start_first_cpu)
453
454 // Unlock the core for debugging
455 msr OSLAR_EL1, xzr
d9a64523 456 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
cb323159 457
5ba3f43e 458 mov x20, x0
d9a64523 459 mov x21, #0
5ba3f43e
A
460
461 // Set low reset vector before attempting any loads
462 adrp x0, EXT(LowExceptionVectorBase)@page
463 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
464 MSR_VBAR_EL1_X0
465
466
5ba3f43e
A
467 // Get the kernel memory parameters from the boot args
468 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
469 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
470 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
f427ee49 471 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables
d9a64523 472 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
5ba3f43e 473
d9a64523
A
474 // Clear the register that will be used to store the userspace thread pointer and CPU number.
475 // We may not actually be booting from ordinal CPU 0, so this register will be updated
476 // in ml_parse_cpu_topology(), which happens later in bootstrap.
5ba3f43e
A
477 msr TPIDRRO_EL0, x21
478
479 // Set up exception stack pointer
480 adrp x0, EXT(excepstack_top)@page // Load top of exception stack
481 add x0, x0, EXT(excepstack_top)@pageoff
482 add x0, x0, x22 // Convert to KVA
483 sub x0, x0, x23
484
485 // Set SP_EL1 to exception stack
c6bf4f31 486#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
cb323159 487 bl EXT(pinst_spsel_1)
5ba3f43e
A
488#else
489 msr SPSel, #1
490#endif
491
492 mov sp, x0
493
494 // Set up interrupt stack pointer
495 adrp x0, EXT(intstack_top)@page // Load top of irq stack
496 add x0, x0, EXT(intstack_top)@pageoff
497 add x0, x0, x22 // Convert to KVA
498 sub x0, x0, x23
499 msr SPSel, #0 // Set SP_EL0 to interrupt stack
500 mov sp, x0
501
502 // Load address to the C init routine into link register
503 adrp lr, EXT(arm_init)@page
504 add lr, lr, EXT(arm_init)@pageoff
505 add lr, lr, x22 // Convert to KVA
506 sub lr, lr, x23
507
508 /*
509 * Set up the bootstrap page tables with a single block entry for the V=P
510 * mapping, a single block entry for the trampolined kernel address (KVA),
511 * and all else invalid. This requires four pages:
512 * Page 1 - V=P L1 table
513 * Page 2 - V=P L2 table
514 * Page 3 - KVA L1 table
515 * Page 4 - KVA L2 table
516 */
5ba3f43e
A
517
518 // Invalidate all entries in the bootstrap page tables
519 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template
f427ee49 520 mov x1, x25 // Start at V=P pagetable root
5ba3f43e 521 mov x2, #(TTE_PGENTRIES) // Load number of entries per page
5ba3f43e 522 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages
cb323159 523
5ba3f43e
A
524Linvalidate_bootstrap: // do {
525 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance
526 subs x2, x2, #1 // entries--
527 b.ne Linvalidate_bootstrap // } while (entries != 0)
528
5ba3f43e
A
529 /*
530 * In order to reclaim memory on targets where TZ0 (or some other entity)
531 * must be located at the base of memory, iBoot may set the virtual and
532 * physical base addresses to immediately follow whatever lies at the
533 * base of physical memory.
534 *
535 * If the base address belongs to TZ0, it may be dangerous for xnu to map
536 * it (as it may be prefetched, despite being technically inaccessible).
537 * In order to avoid this issue while keeping the mapping code simple, we
f427ee49
A
538 * may continue to use block mappings, but we will only map the kernelcache
539 * mach header to the end of memory.
5ba3f43e
A
540 *
541 * Given that iBoot guarantees that the unslid kernelcache base address
542 * will begin on an L2 boundary, this should prevent us from accidentally
543 * mapping TZ0.
544 */
f427ee49
A
545 adrp x0, EXT(_mh_execute_header)@page // address of kernel mach header
546 add x0, x0, EXT(_mh_execute_header)@pageoff
547 ldr w1, [x0, #0x18] // load mach_header->flags
548 tbz w1, #0x1f, Lkernelcache_base_found // if MH_DYLIB_IN_CACHE unset, base is kernel mach header
549 ldr w1, [x0, #0x20] // load first segment cmd (offset sizeof(kernel_mach_header_t))
550 cmp w1, #0x19 // must be LC_SEGMENT_64
551 bne .
552 ldr x1, [x0, #0x38] // load first segment vmaddr
553 sub x1, x0, x1 // compute slide
554 MOV64 x0, VM_KERNEL_LINK_ADDRESS
555 add x0, x0, x1 // base is kernel link address + slide
d9a64523 556
f427ee49 557Lkernelcache_base_found:
5ba3f43e 558 /*
d9a64523
A
559 * Adjust physical and virtual base addresses to account for physical
560 * memory preceeding xnu Mach-O header
561 * x22 - Kernel virtual base
562 * x23 - Kernel physical base
563 * x24 - Physical memory size
5ba3f43e 564 */
d9a64523
A
565 sub x18, x0, x23
566 sub x24, x24, x18
567 add x22, x22, x18
568 add x23, x23, x18
569
5ba3f43e 570 /*
d9a64523
A
571 * x0 - V=P virtual cursor
572 * x4 - V=P physical cursor
573 * x14 - KVA virtual cursor
574 * x15 - KVA physical cursor
5ba3f43e 575 */
d9a64523
A
576 mov x4, x0
577 mov x14, x22
578 mov x15, x23
5ba3f43e 579
d9a64523
A
580 /*
581 * Allocate L1 tables
582 * x1 - V=P L1 page
583 * x3 - KVA L1 page
584 * x2 - free mem pointer from which we allocate a variable number of L2
585 * pages. The maximum number of bootstrap page table pages is limited to
586 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case
587 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so
588 * 8 total pages for V=P and KVA.
5ba3f43e 589 */
d9a64523
A
590 mov x1, x25
591 add x3, x1, PGBYTES
592 mov x2, x3
5ba3f43e 593
d9a64523
A
594 /*
595 * Setup the V=P bootstrap mapping
596 * x5 - total number of L2 entries to allocate
5ba3f43e 597 */
d9a64523
A
598 lsr x5, x24, #(ARM_TT_L2_SHIFT)
599 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */
600 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13
5ba3f43e 601
d9a64523
A
602 /* Setup the KVA bootstrap mapping */
603 lsr x5, x24, #(ARM_TT_L2_SHIFT)
604 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13
5ba3f43e
A
605
606 /* Ensure TTEs are visible */
607 dsb ish
608
cb323159 609
5ba3f43e
A
610 b common_start
611
612/*
613 * Begin common CPU initialization
614 *
615 * Regster state:
616 * x20 - PA of boot args
617 * x21 - zero on cold boot, PA of cpu data on warm reset
618 * x22 - Kernel virtual base
619 * x23 - Kernel physical base
f427ee49 620 * x25 - PA of the V=P pagetable root
5ba3f43e
A
621 * lr - KVA of C init routine
622 * sp - SP_EL0 selected
623 *
624 * SP_EL0 - KVA of CPU's interrupt stack
625 * SP_EL1 - KVA of CPU's exception stack
626 * TPIDRRO_EL0 - CPU number
627 */
628common_start:
f427ee49
A
629
630#if HAS_NEX_PG
631 mov x19, lr
632 bl EXT(set_nex_pg)
633 mov lr, x19
634#endif
635
5ba3f43e
A
636 // Set the translation control register.
637 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure
638 add x0, x0, EXT(sysreg_restore)@pageoff
639 ldr x1, [x0, SR_RESTORE_TCR_EL1]
640 MSR_TCR_EL1_X1
641
642 /* Set up translation table base registers.
643 * TTBR0 - V=P table @ top of kernel
d9a64523 644 * TTBR1 - KVA table @ top of kernel + 1 page
5ba3f43e 645 */
c6bf4f31 646#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
647 /* Note that for KTRR configurations, the V=P map will be modified by
648 * arm_vm_init.c.
649 */
650#endif
651 and x0, x25, #(TTBR_BADDR_MASK)
d9a64523
A
652 mov x19, lr
653 bl EXT(set_mmu_ttb)
654 mov lr, x19
655 add x0, x25, PGBYTES
5ba3f43e
A
656 and x0, x0, #(TTBR_BADDR_MASK)
657 MSR_TTBR1_EL1_X0
658
659 // Set up MAIR attr0 for normal memory, attr1 for device memory
660 mov x0, xzr
661 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
662 orr x0, x0, x1
663 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK))
664 orr x0, x0, x1
665 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
666 orr x0, x0, x1
667 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
668 orr x0, x0, x1
669 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
670 orr x0, x0, x1
671 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
672 orr x0, x0, x1
cb323159
A
673 mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED))
674 orr x0, x0, x1
675 mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED))
676 orr x0, x0, x1
5ba3f43e 677 msr MAIR_EL1, x0
f427ee49
A
678 isb
679 tlbi vmalle1
680 dsb ish
5ba3f43e 681
5ba3f43e 682#if defined(APPLEHURRICANE)
5ba3f43e
A
683 // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk
684 // Needs to be done before MMU is enabled
c3c9b80d 685 HID_INSERT_BITS HID5, ARM64_REG_HID5_CrdEdbSnpRsvd_mask, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE, x12
5ba3f43e
A
686#endif
687
d9a64523
A
688#if defined(BCM2837)
689 // Setup timer interrupt routing; must be done before MMU is enabled
690 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
691 and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
692 mov x0, #0x4000
693 lsl x0, x0, #16
694 add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control
695 add x0, x0, x15, lsl #2
696 mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs
697 str w1, [x0]
698 isb sy
699#endif
700
5ba3f43e
A
701#ifndef __ARM_IC_NOALIAS_ICACHE__
702 /* Invalidate the TLB and icache on systems that do not guarantee that the
703 * caches are invalidated on reset.
704 */
705 tlbi vmalle1
706 ic iallu
707#endif
708
709 /* If x21 is not 0, then this is either the start_cpu path or
710 * the resume_idle_cpu path. cpu_ttep should already be
711 * populated, so just switch to the kernel_pmap now.
712 */
713
714 cbz x21, 1f
715 adrp x0, EXT(cpu_ttep)@page
716 add x0, x0, EXT(cpu_ttep)@pageoff
717 ldr x0, [x0]
718 MSR_TTBR1_EL1_X0
7191:
720
721 // Set up the exception vectors
5c9f4661
A
722#if __ARM_KERNEL_PROTECT__
723 /* If this is not the first reset of the boot CPU, the alternate mapping
724 * for the exception vectors will be set up, so use it. Otherwise, we
725 * should use the mapping located in the kernelcache mapping.
726 */
727 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START
728
729 cbnz x21, 1f
730#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e
A
731 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address
732 add x0, x0, EXT(ExceptionVectorsBase)@pageoff
733 add x0, x0, x22 // Convert exception vector address to KVA
734 sub x0, x0, x23
5c9f4661 7351:
5ba3f43e
A
736 MSR_VBAR_EL1_X0
737
cb323159
A
7381:
739#ifdef HAS_APPLE_PAC
cb323159
A
740
741 // Enable caches, MMU, ROP and JOP
f427ee49 742 MOV64 x0, SCTLR_EL1_DEFAULT
cb323159
A
743 orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */
744
cb323159
A
745 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
746 orr x0, x0, x1
cb323159 747#else /* HAS_APPLE_PAC */
5ba3f43e
A
748
749 // Enable caches and MMU
f427ee49 750 MOV64 x0, SCTLR_EL1_DEFAULT
cb323159 751#endif /* HAS_APPLE_PAC */
5ba3f43e
A
752 MSR_SCTLR_EL1_X0
753 isb sy
754
f427ee49 755 MOV64 x1, SCTLR_EL1_DEFAULT
cb323159
A
756#if HAS_APPLE_PAC
757 orr x1, x1, #(SCTLR_PACIB_ENABLED)
cb323159 758 MOV64 x2, SCTLR_JOP_KEYS_ENABLED
cb323159 759 orr x1, x1, x2
cb323159
A
760#endif /* HAS_APPLE_PAC */
761 cmp x0, x1
762 bne .
763
5ba3f43e
A
764#if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
765 /* Watchtower
766 *
767 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
768 * it here would trap to EL3.
769 */
770
771 // Enable NEON
772 mov x0, #(CPACR_FPEN_ENABLE)
773 msr CPACR_EL1, x0
774#endif
775
776 // Clear thread pointer
f427ee49
A
777 msr TPIDR_EL1, xzr // Set thread register
778
5ba3f43e 779
a39ff7e2 780#if defined(APPLE_ARM64_ARCH_FAMILY)
c3c9b80d 781 // Initialization common to all non-virtual Apple targets
a39ff7e2 782 ARM64_IS_PCORE x15
c3c9b80d 783 ARM64_READ_EP_SPR x15, x12, S3_0_C15_C4_1, S3_0_C15_C4_0
a39ff7e2
A
784 orr x12, x12, ARM64_REG_HID4_DisDcMVAOps
785 orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops
c3c9b80d 786 ARM64_WRITE_EP_SPR x15, x12, S3_0_C15_C4_1, S3_0_C15_C4_0
a39ff7e2
A
787#endif // APPLE_ARM64_ARCH_FAMILY
788
f427ee49
A
789 // Read MIDR before start of per-SoC tunables
790 mrs x12, MIDR_EL1
c6bf4f31 791
f427ee49 792 APPLY_TUNABLES x12, x13
c6bf4f31 793
cb323159
A
794
795
f427ee49
A
796#if HAS_CLUSTER
797 // Unmask external IRQs if we're restarting from non-retention WFI
c3c9b80d 798 mrs x9, CPU_OVRD
f427ee49 799 and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
c3c9b80d 800 msr CPU_OVRD, x9
f427ee49 801#endif
cb323159 802
5ba3f43e
A
803 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
804 cbnz x21, Ltrampoline
805
806 // Set KVA of boot args as first arg
807 add x0, x20, x22
808 sub x0, x0, x23
809
810#if KASAN
811 mov x20, x0
812 mov x21, lr
813
814 // x0: boot args
815 // x1: KVA page table phys base
816 mrs x1, TTBR1_EL1
cb323159 817 bl EXT(kasan_bootstrap)
5ba3f43e
A
818
819 mov x0, x20
820 mov lr, x21
821#endif
822
823 // Return to arm_init()
824 ret
825
826Ltrampoline:
827 // Load VA of the trampoline
828 adrp x0, arm_init_tramp@page
829 add x0, x0, arm_init_tramp@pageoff
830 add x0, x0, x22
831 sub x0, x0, x23
832
833 // Branch to the trampoline
834 br x0
835
836/*
837 * V=P to KVA trampoline.
838 * x0 - KVA of cpu data pointer
839 */
840 .text
841 .align 2
842arm_init_tramp:
843 /* On a warm boot, the full kernel translation table is initialized in
844 * addition to the bootstrap tables. The layout is as follows:
845 *
846 * +--Top of Memory--+
847 * ...
848 * | |
849 * | Primary Kernel |
850 * | Trans. Table |
851 * | |
852 * +--Top + 5 pages--+
853 * | |
854 * | Invalid Table |
855 * | |
856 * +--Top + 4 pages--+
857 * | |
858 * | KVA Table |
859 * | |
860 * +--Top + 2 pages--+
861 * | |
862 * | V=P Table |
863 * | |
864 * +--Top of Kernel--+
865 * | |
866 * | Kernel Mach-O |
867 * | |
868 * ...
869 * +---Kernel Base---+
870 */
871
cb323159 872
d9a64523 873 mov x19, lr
c6bf4f31
A
874#if defined(HAS_VMSA_LOCK)
875 bl EXT(vmsa_lock)
876#endif
5ba3f43e 877 // Convert CPU data PA to VA and set as first argument
d9a64523
A
878 mov x0, x21
879 bl EXT(phystokv)
5ba3f43e 880
d9a64523 881 mov lr, x19
5ba3f43e
A
882
883 /* Return to arm_init() */
884 ret
885
886//#include "globals_asm.h"
887
888/* vim: set ts=4: */