]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/locore.s
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm64 / locore.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/proc_reg.h>
31#include <pexpert/arm64/board_config.h>
32#include <mach/exception_types.h>
33#include <mach_kdp.h>
34#include <config_dtrace.h>
35#include "assym.s"
36
5c9f4661
A
37#if __ARM_KERNEL_PROTECT__
38#include <arm/pmap.h>
39#endif
40
5ba3f43e
A
41
42/*
43 * INIT_SAVED_STATE_FLAVORS
44 *
45 * Initializes the saved state flavors of a new saved state structure
46 * arg0 - saved state pointer
47 * arg1 - 32-bit scratch reg
48 * arg2 - 32-bit scratch reg
49 */
50.macro INIT_SAVED_STATE_FLAVORS
51 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor
52 mov $2, ARM_SAVED_STATE64_COUNT
53 stp $1, $2, [$0, SS_FLAVOR]
54 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor
55 str $1, [$0, NS_FLAVOR]
56 mov $1, ARM_NEON_SAVED_STATE64_COUNT
57 str $1, [$0, NS_COUNT]
58.endmacro
59
5ba3f43e
A
60
61/*
62 * SPILL_REGISTERS
63 *
64 * Spills the current set of registers (excluding x0 and x1) to the specified
65 * save area.
66 * x0 - Address of the save area
67 */
68.macro SPILL_REGISTERS
69 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs
70 stp x4, x5, [x0, SS64_X4]
71 stp x6, x7, [x0, SS64_X6]
72 stp x8, x9, [x0, SS64_X8]
73 stp x10, x11, [x0, SS64_X10]
74 stp x12, x13, [x0, SS64_X12]
75 stp x14, x15, [x0, SS64_X14]
76 stp x16, x17, [x0, SS64_X16]
77 stp x18, x19, [x0, SS64_X18]
78 stp x20, x21, [x0, SS64_X20]
79 stp x22, x23, [x0, SS64_X22]
80 stp x24, x25, [x0, SS64_X24]
81 stp x26, x27, [x0, SS64_X26]
82 str x28, [x0, SS64_X28]
83
84 /* Save arm_neon_saved_state64 */
85
86 stp q0, q1, [x0, NS64_Q0]
87 stp q2, q3, [x0, NS64_Q2]
88 stp q4, q5, [x0, NS64_Q4]
89 stp q6, q7, [x0, NS64_Q6]
90 stp q8, q9, [x0, NS64_Q8]
91 stp q10, q11, [x0, NS64_Q10]
92 stp q12, q13, [x0, NS64_Q12]
93 stp q14, q15, [x0, NS64_Q14]
94 stp q16, q17, [x0, NS64_Q16]
95 stp q18, q19, [x0, NS64_Q18]
96 stp q20, q21, [x0, NS64_Q20]
97 stp q22, q23, [x0, NS64_Q22]
98 stp q24, q25, [x0, NS64_Q24]
99 stp q26, q27, [x0, NS64_Q26]
100 stp q28, q29, [x0, NS64_Q28]
101 stp q30, q31, [x0, NS64_Q30]
102
103 mrs lr, ELR_EL1 // Get exception link register
104 mrs x23, SPSR_EL1 // Load CPSR into var reg x23
105 mrs x24, FPSR
106 mrs x25, FPCR
107
108 str lr, [x0, SS64_PC] // Save ELR to PCB
109 str w23, [x0, SS64_CPSR] // Save CPSR to PCB
110 str w24, [x0, NS64_FPSR]
111 str w25, [x0, NS64_FPCR]
112
113 mrs x20, FAR_EL1
114 mrs x21, ESR_EL1
115 str x20, [x0, SS64_FAR]
116 str w21, [x0, SS64_ESR]
117.endmacro
118
119
120#define CBF_DISABLE 0
121#define CBF_ENABLE 1
122
123.macro COMPARE_BRANCH_FUSION
124#if defined(APPLE_ARM64_ARCH_FAMILY)
125 mrs $1, ARM64_REG_HID1
126 .if $0 == CBF_DISABLE
127 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
128 .else
129 mov $2, ARM64_REG_HID1_disCmpBrFusion
130 bic $1, $1, $2
131 .endif
132 msr ARM64_REG_HID1, $1
133 .if $0 == CBF_DISABLE
134 isb sy
135 .endif
136#endif
137.endmacro
138
5c9f4661
A
139/*
140 * MAP_KERNEL
141 *
142 * Restores the kernel EL1 mappings, if necessary.
143 *
144 * This may mutate x18.
145 */
146.macro MAP_KERNEL
147#if __ARM_KERNEL_PROTECT__
148 /* Switch to the kernel ASID (low bit set) for the task. */
149 mrs x18, TTBR0_EL1
150 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
151 msr TTBR0_EL1, x18
152
153 /*
154 * We eschew some barriers on Apple CPUs, as relative ordering of writes
155 * to the TTBRs and writes to the TCR should be ensured by the
156 * microarchitecture.
157 */
158#if !defined(APPLE_ARM64_ARCH_FAMILY)
159 isb sy
160#endif
161
162 /*
163 * Update the TCR to map the kernel now that we are using the kernel
164 * ASID.
165 */
166 MOV64 x18, TCR_EL1_BOOT
167 msr TCR_EL1, x18
168 isb sy
169#endif /* __ARM_KERNEL_PROTECT__ */
170.endmacro
171
172/*
173 * BRANCH_TO_KVA_VECTOR
174 *
175 * Branches to the requested long exception vector in the kernelcache.
176 * arg0 - The label to branch to
177 * arg1 - The index of the label in exc_vectors_tables
178 *
179 * This may mutate x18.
180 */
181.macro BRANCH_TO_KVA_VECTOR
182#if __ARM_KERNEL_PROTECT__
183 /*
184 * Find the kernelcache table for the exception vectors by accessing
185 * the per-CPU data.
186 */
187 mrs x18, TPIDR_EL1
188 ldr x18, [x18, ACT_CPUDATAP]
189 ldr x18, [x18, CPU_EXC_VECTORS]
190
191 /*
192 * Get the handler for this exception and jump to it.
193 */
194 ldr x18, [x18, #($1 << 3)]
195 br x18
196#else
197 b $0
198#endif /* __ARM_KERNEL_PROTECT__ */
199.endmacro
200
201#if __ARM_KERNEL_PROTECT__
a39ff7e2 202 .text
5c9f4661
A
203 .align 3
204 .globl EXT(exc_vectors_table)
205LEXT(exc_vectors_table)
206 /* Table of exception handlers. */
207 .quad Lel1_sp0_synchronous_vector_long
208 .quad Lel1_sp0_irq_vector_long
209 .quad Lel1_sp0_fiq_vector_long
210 .quad Lel1_sp0_serror_vector_long
211 .quad Lel1_sp1_synchronous_vector_long
212 .quad Lel1_sp1_irq_vector_long
213 .quad Lel1_sp1_fiq_vector_long
214 .quad Lel1_sp1_serror_vector_long
215 .quad Lel0_synchronous_vector_64_long
216 .quad Lel0_irq_vector_64_long
217 .quad Lel0_fiq_vector_64_long
218 .quad Lel0_serror_vector_64_long
219#endif /* __ARM_KERNEL_PROTECT__ */
220
5ba3f43e 221 .text
5c9f4661
A
222#if __ARM_KERNEL_PROTECT__
223 /*
224 * We need this to be on a page boundary so that we may avoiding mapping
225 * other text along with it. As this must be on the VM page boundary
226 * (due to how the coredumping code currently works), this will be a
227 * 16KB page boundary.
228 */
229 .align 14
230#else
5ba3f43e 231 .align 12
5c9f4661 232#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e
A
233 .globl EXT(ExceptionVectorsBase)
234LEXT(ExceptionVectorsBase)
235Lel1_sp0_synchronous_vector:
5c9f4661
A
236 BRANCH_TO_KVA_VECTOR Lel1_sp0_synchronous_vector_long, 0
237
238 .text
239 .align 7
240Lel1_sp0_irq_vector:
241 BRANCH_TO_KVA_VECTOR Lel1_sp0_irq_vector_long, 1
242
243 .text
244 .align 7
245Lel1_sp0_fiq_vector:
246 BRANCH_TO_KVA_VECTOR Lel1_sp0_fiq_vector_long, 2
247
248 .text
249 .align 7
250Lel1_sp0_serror_vector:
251 BRANCH_TO_KVA_VECTOR Lel1_sp0_serror_vector_long, 3
252
253 .text
254 .align 7
255Lel1_sp1_synchronous_vector:
256 BRANCH_TO_KVA_VECTOR Lel1_sp1_synchronous_vector_long, 4
257
258 .text
259 .align 7
260Lel1_sp1_irq_vector:
261 BRANCH_TO_KVA_VECTOR Lel1_sp1_irq_vector_long, 5
262
263 .text
264 .align 7
265Lel1_sp1_fiq_vector:
266 BRANCH_TO_KVA_VECTOR Lel1_sp1_fiq_vector_long, 6
267
268 .text
269 .align 7
270Lel1_sp1_serror_vector:
271 BRANCH_TO_KVA_VECTOR Lel1_sp1_serror_vector, 7
272
273 .text
274 .align 7
275Lel0_synchronous_vector_64:
276 MAP_KERNEL
277 BRANCH_TO_KVA_VECTOR Lel0_synchronous_vector_64_long, 8
278
279 .text
280 .align 7
281Lel0_irq_vector_64:
282 MAP_KERNEL
283 BRANCH_TO_KVA_VECTOR Lel0_irq_vector_64_long, 9
284
285 .text
286 .align 7
287Lel0_fiq_vector_64:
288 MAP_KERNEL
289 BRANCH_TO_KVA_VECTOR Lel0_fiq_vector_64_long, 10
290
291 .text
292 .align 7
293Lel0_serror_vector_64:
294 MAP_KERNEL
295 BRANCH_TO_KVA_VECTOR Lel0_serror_vector_64_long, 11
296
297 /* Fill out the rest of the page */
298 .align 12
299
300/*********************************
301 * END OF EXCEPTION VECTORS PAGE *
302 *********************************/
303
304.macro EL1_SP0_VECTOR
305 msr SPSel, #0 // Switch to SP0
306 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
307 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
308 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
309 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
310 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
311 INIT_SAVED_STATE_FLAVORS sp, w0, w1
312 mov x0, sp // Copy saved state pointer to x0
313.endmacro
314
315Lel1_sp0_synchronous_vector_long:
5ba3f43e
A
316 sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
317 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
318 mrs x1, ESR_EL1 // Get the exception syndrome
319 /* If the stack pointer is corrupt, it will manifest either as a data abort
320 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
321 * these quickly by testing bit 5 of the exception class.
322 */
323 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
324 mrs x0, SP_EL0 // Get SP_EL0
325 stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
326 str x0, [sp, SS64_SP] // Save sp to the stack
327 bl check_kernel_stack
328 ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
329Lkernel_stack_valid:
330 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
331 add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
332 EL1_SP0_VECTOR
333 adrp x1, fleh_synchronous@page // Load address for fleh
334 add x1, x1, fleh_synchronous@pageoff
335 b fleh_dispatch64
336
5c9f4661 337Lel1_sp0_irq_vector_long:
5ba3f43e
A
338 EL1_SP0_VECTOR
339 mrs x1, TPIDR_EL1
340 ldr x1, [x1, ACT_CPUDATAP]
341 ldr x1, [x1, CPU_ISTACKPTR]
342 mov sp, x1
343 adrp x1, fleh_irq@page // Load address for fleh
344 add x1, x1, fleh_irq@pageoff
345 b fleh_dispatch64
346
5c9f4661 347Lel1_sp0_fiq_vector_long:
5ba3f43e
A
348 // ARM64_TODO write optimized decrementer
349 EL1_SP0_VECTOR
350 mrs x1, TPIDR_EL1
351 ldr x1, [x1, ACT_CPUDATAP]
352 ldr x1, [x1, CPU_ISTACKPTR]
353 mov sp, x1
354 adrp x1, fleh_fiq@page // Load address for fleh
355 add x1, x1, fleh_fiq@pageoff
356 b fleh_dispatch64
357
5c9f4661 358Lel1_sp0_serror_vector_long:
5ba3f43e
A
359 EL1_SP0_VECTOR
360 adrp x1, fleh_serror@page // Load address for fleh
361 add x1, x1, fleh_serror@pageoff
362 b fleh_dispatch64
363
364.macro EL1_SP1_VECTOR
365 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
366 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
367 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
368 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
369 INIT_SAVED_STATE_FLAVORS sp, w0, w1
370 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
371 mov x0, sp // Copy saved state pointer to x0
372.endmacro
373
5c9f4661 374Lel1_sp1_synchronous_vector_long:
5ba3f43e
A
375#if defined(KERNEL_INTEGRITY_KTRR)
376 b check_ktrr_sctlr_trap
377Lel1_sp1_synchronous_vector_continue:
378#endif
379 EL1_SP1_VECTOR
380 adrp x1, fleh_synchronous_sp1@page
381 add x1, x1, fleh_synchronous_sp1@pageoff
382 b fleh_dispatch64
383
5c9f4661 384Lel1_sp1_irq_vector_long:
5ba3f43e
A
385 EL1_SP1_VECTOR
386 adrp x1, fleh_irq_sp1@page
387 add x1, x1, fleh_irq_sp1@pageoff
388 b fleh_dispatch64
389
5c9f4661 390Lel1_sp1_fiq_vector_long:
5ba3f43e
A
391 EL1_SP1_VECTOR
392 adrp x1, fleh_fiq_sp1@page
393 add x1, x1, fleh_fiq_sp1@pageoff
394 b fleh_dispatch64
395
5c9f4661 396Lel1_sp1_serror_vector_long:
5ba3f43e
A
397 EL1_SP1_VECTOR
398 adrp x1, fleh_serror_sp1@page
399 add x1, x1, fleh_serror_sp1@pageoff
400 b fleh_dispatch64
401
402.macro EL0_64_VECTOR
9d749ea3 403 mov x18, xzr // Zero x18 to avoid leaking data to user SS
5ba3f43e
A
404 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
405 mrs x0, TPIDR_EL1 // Load the thread register
406 mrs x1, SP_EL0 // Load the user stack pointer
407 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
408 ldr x0, [x0] // Load the user context pointer
409 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
410 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
411 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
412 msr SPSel, #0 // Switch to SP0
413 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
414 stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
415 mov fp, xzr // Clear the fp and lr for the
416 mov lr, xzr // debugger stack frame
417 mov x0, sp // Copy the user PCB pointer to x0
418.endmacro
419
5c9f4661
A
420
421Lel0_synchronous_vector_64_long:
5ba3f43e
A
422 EL0_64_VECTOR
423 mrs x1, TPIDR_EL1 // Load the thread register
424 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
425 mov sp, x1 // Set the stack pointer to the kernel stack
426 adrp x1, fleh_synchronous@page // Load address for fleh
427 add x1, x1, fleh_synchronous@pageoff
428 b fleh_dispatch64
429
5c9f4661 430Lel0_irq_vector_64_long:
5ba3f43e
A
431 EL0_64_VECTOR
432 mrs x1, TPIDR_EL1
433 ldr x1, [x1, ACT_CPUDATAP]
434 ldr x1, [x1, CPU_ISTACKPTR]
435 mov sp, x1 // Set the stack pointer to the kernel stack
436 adrp x1, fleh_irq@page // load address for fleh
437 add x1, x1, fleh_irq@pageoff
438 b fleh_dispatch64
439
5c9f4661 440Lel0_fiq_vector_64_long:
5ba3f43e
A
441 EL0_64_VECTOR
442 mrs x1, TPIDR_EL1
443 ldr x1, [x1, ACT_CPUDATAP]
444 ldr x1, [x1, CPU_ISTACKPTR]
445 mov sp, x1 // Set the stack pointer to the kernel stack
446 adrp x1, fleh_fiq@page // load address for fleh
447 add x1, x1, fleh_fiq@pageoff
448 b fleh_dispatch64
449
5c9f4661 450Lel0_serror_vector_64_long:
5ba3f43e
A
451 EL0_64_VECTOR
452 mrs x1, TPIDR_EL1 // Load the thread register
453 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
454 mov sp, x1 // Set the stack pointer to the kernel stack
455 adrp x1, fleh_serror@page // load address for fleh
456 add x1, x1, fleh_serror@pageoff
457 b fleh_dispatch64
458
5ba3f43e
A
459
460/*
461 * check_kernel_stack
462 *
463 * Verifies that the kernel stack is aligned and mapped within an expected
464 * stack address range. Note: happens before saving registers (in case we can't
465 * save to kernel stack).
466 *
467 * Expects:
468 * {x0, x1, sp} - saved
469 * x0 - SP_EL0
470 * x1 - Exception syndrome
471 * sp - Saved state
472 */
473 .text
474 .align 2
475check_kernel_stack:
476 stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
477 and x1, x1, #ESR_EC_MASK // Mask the exception class
478 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
479 cmp x1, x2 // If we have a stack alignment exception
480 b.eq Lcorrupt_stack // ...the stack is definitely corrupted
481 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
482 cmp x1, x2 // If we have a data abort, we need to
483 b.ne Lvalid_stack // ...validate the stack pointer
484 mrs x1, TPIDR_EL1 // Get thread pointer
485Ltest_kstack:
486 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
487 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
488 cmp x0, x2 // if (SP_EL0 >= kstack top)
489 b.ge Ltest_istack // jump to istack test
490 cmp x0, x3 // if (SP_EL0 > kstack bottom)
491 b.gt Lvalid_stack // stack pointer valid
492Ltest_istack:
493 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
494 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
495 sub x3, x2, PGBYTES // Find bottom of istack
496 cmp x0, x2 // if (SP_EL0 >= istack top)
497 b.ge Ltest_fiqstack // jump to fiqstack test
498 cmp x0, x3 // if (SP_EL0 > istack bottom)
499 b.gt Lvalid_stack // stack pointer valid
500Ltest_fiqstack:
501 ldr x2, [x1, CPU_FIQSTACK_TOP] // Get top of fiqstack
502 sub x3, x2, PGBYTES // Find bottom of fiqstack
503 cmp x0, x2 // if (SP_EL0 >= fiqstack top)
504 b.ge Lcorrupt_stack // corrupt stack pointer
505 cmp x0, x3 // if (SP_EL0 > fiqstack bottom)
506 b.gt Lvalid_stack // stack pointer valid
507Lcorrupt_stack:
508 INIT_SAVED_STATE_FLAVORS sp, w0, w1
509 mov x0, sp // Copy exception frame pointer to x0
510 adrp x1, fleh_invalid_stack@page // Load address for fleh
511 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
512 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
513 b fleh_dispatch64
514Lvalid_stack:
515 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
516 ret
517
518#if defined(KERNEL_INTEGRITY_KTRR)
519 .text
520 .align 2
521check_ktrr_sctlr_trap:
522/* We may abort on an instruction fetch on reset when enabling the MMU by
523 * writing SCTLR_EL1 because the page containing the privileged instruction is
524 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
525 * would otherwise panic unconditionally. Check for the condition and return
526 * safe execution to the caller on behalf of the faulting function.
527 *
528 * Expected register state:
529 * x22 - Kernel virtual base
530 * x23 - Kernel physical base
531 */
532 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
533 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
534 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
535 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
536 movz w1, #0x8600, lsl #16
537 movk w1, #0x0000
538 cmp x0, x1
539 mrs x0, ELR_EL1 // Check for expected abort address
540 adrp x1, _pinst_set_sctlr_trap_addr@page
541 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
542 sub x1, x1, x22 // Convert to physical address
543 add x1, x1, x23
544 ccmp x0, x1, #0, eq
545 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
546 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
547 b.ne Lel1_sp1_synchronous_vector_continue
548 msr ELR_EL1, lr // Return to caller
549 eret
550#endif /* defined(KERNEL_INTEGRITY_KTRR)*/
551
552/* 64-bit first level exception handler dispatcher.
553 * Completes register context saving and branches to FLEH.
554 * Expects:
555 * {x0, x1, fp, lr, sp} - saved
556 * x0 - arm_context_t
557 * x1 - address of FLEH
558 * fp - previous stack frame if EL1
559 * lr - unused
560 * sp - kernel stack
561 */
562 .text
563 .align 2
564fleh_dispatch64:
565 /* Save arm_saved_state64 */
566 SPILL_REGISTERS
567
a39ff7e2
A
568 /* If exception is from userspace, zero unused registers */
569 and x23, x23, #(PSR64_MODE_EL_MASK)
570 cmp x23, #(PSR64_MODE_EL0)
5ba3f43e 571 bne 1f
a39ff7e2
A
572
573 mov x2, xzr
574 mov x3, xzr
575 mov x4, xzr
576 mov x5, xzr
577 mov x6, xzr
578 mov x7, xzr
579 mov x8, xzr
580 mov x9, xzr
581 mov x10, xzr
582 mov x11, xzr
583 mov x12, xzr
584 mov x13, xzr
585 mov x14, xzr
586 mov x15, xzr
587 mov x16, xzr
588 mov x17, xzr
589 mov x18, xzr
590 mov x19, xzr
591 mov x20, xzr
592 /* x21, x22 cleared in common case below */
593 mov x23, xzr
594 mov x24, xzr
595 mov x25, xzr
596 mov x26, xzr
597 mov x27, xzr
598 mov x28, xzr
599 /* fp/lr already cleared by EL0_64_VECTOR */
5ba3f43e
A
6001:
601
602 mov x21, x0 // Copy arm_context_t pointer to x21
603 mov x22, x1 // Copy handler routine to x22
604
605
606#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
607 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
608 b.ne 1f // kernel mode, so skip precise time update
609 PUSH_FRAME
610 bl EXT(timer_state_event_user_to_kernel)
611 POP_FRAME
612 mov x0, x21 // Reload arm_context_t pointer
6131:
614#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
615
616 /* Dispatch to FLEH */
617
618 br x22
619
620
621 .text
622 .align 2
623fleh_synchronous:
624 mrs x1, ESR_EL1 // Load exception syndrome
625 mrs x2, FAR_EL1 // Load fault address
626
627 /* At this point, the LR contains the value of ELR_EL1. In the case of an
628 * instruction prefetch abort, this will be the faulting pc, which we know
629 * to be invalid. This will prevent us from backtracing through the
630 * exception if we put it in our stack frame, so we load the LR from the
631 * exception saved state instead.
632 */
633 and w3, w1, #(ESR_EC_MASK)
634 lsr w3, w3, #(ESR_EC_SHIFT)
635 mov w4, #(ESR_EC_IABORT_EL1)
636 cmp w3, w4
637 b.eq Lfleh_sync_load_lr
638Lvalid_link_register:
639
640 PUSH_FRAME
641 bl EXT(sleh_synchronous)
642 POP_FRAME
643
644
645 b exception_return_dispatch
646
647Lfleh_sync_load_lr:
648 ldr lr, [x0, SS64_LR]
649 b Lvalid_link_register
650
651/* Shared prologue code for fleh_irq and fleh_fiq.
652 * Does any interrupt booking we may want to do
653 * before invoking the handler proper.
654 * Expects:
655 * x0 - arm_context_t
656 * x23 - CPSR
657 * fp - Undefined live value (we may push a frame)
658 * lr - Undefined live value (we may push a frame)
659 * sp - Interrupt stack for the current CPU
660 */
661.macro BEGIN_INTERRUPT_HANDLER
662 mrs x22, TPIDR_EL1
663 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
664 /* Update IRQ count */
665 ldr w1, [x23, CPU_STAT_IRQ]
666 add w1, w1, #1 // Increment count
667 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
668 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
669 add w1, w1, #1 // Increment count
670 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
671 /* Increment preempt count */
672 ldr w1, [x22, ACT_PREEMPT_CNT]
673 add w1, w1, #1
674 str w1, [x22, ACT_PREEMPT_CNT]
675 /* Store context in int state */
676 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
677.endmacro
678
679/* Shared epilogue code for fleh_irq and fleh_fiq.
680 * Cleans up after the prologue, and may do a bit more
681 * bookkeeping (kdebug related).
682 * Expects:
683 * x22 - Live TPIDR_EL1 value (thread address)
684 * x23 - Address of the current CPU data structure
685 * w24 - 0 if kdebug is disbled, nonzero otherwise
686 * fp - Undefined live value (we may push a frame)
687 * lr - Undefined live value (we may push a frame)
688 * sp - Interrupt stack for the current CPU
689 */
690.macro END_INTERRUPT_HANDLER
691 /* Clear int context */
692 str xzr, [x23, CPU_INT_STATE]
693 /* Decrement preempt count */
694 ldr w0, [x22, ACT_PREEMPT_CNT]
695 cbnz w0, 1f // Detect underflow
696 b preempt_underflow
6971:
698 sub w0, w0, #1
699 str w0, [x22, ACT_PREEMPT_CNT]
700 /* Switch back to kernel stack */
701 ldr x0, [x22, TH_KSTACKPTR]
702 mov sp, x0
703.endmacro
704
705 .text
706 .align 2
707fleh_irq:
708 BEGIN_INTERRUPT_HANDLER
709 PUSH_FRAME
710 bl EXT(sleh_irq)
711 POP_FRAME
712 END_INTERRUPT_HANDLER
713
714
715 b exception_return_dispatch
716
717 .text
718 .align 2
719 .global EXT(fleh_fiq_generic)
720LEXT(fleh_fiq_generic)
721 PANIC_UNIMPLEMENTED
722
723 .text
724 .align 2
725fleh_fiq:
726 BEGIN_INTERRUPT_HANDLER
727 PUSH_FRAME
728 bl EXT(sleh_fiq)
729 POP_FRAME
730 END_INTERRUPT_HANDLER
731
732
733 b exception_return_dispatch
734
735 .text
736 .align 2
737fleh_serror:
738 mrs x1, ESR_EL1 // Load exception syndrome
739 mrs x2, FAR_EL1 // Load fault address
740
741 PUSH_FRAME
742 bl EXT(sleh_serror)
743 POP_FRAME
744
745
746 b exception_return_dispatch
747
748/*
749 * Register state saved before we get here.
750 */
751 .text
752 .align 2
753fleh_invalid_stack:
754 mrs x1, ESR_EL1 // Load exception syndrome
755 str x1, [x0, SS64_ESR]
756 mrs x2, FAR_EL1 // Load fault address
757 str x2, [x0, SS64_FAR]
758 PUSH_FRAME
759 bl EXT(sleh_invalid_stack) // Shouldn't return!
760 b .
761
762 .text
763 .align 2
764fleh_synchronous_sp1:
765 mrs x1, ESR_EL1 // Load exception syndrome
766 str x1, [x0, SS64_ESR]
767 mrs x2, FAR_EL1 // Load fault address
768 str x2, [x0, SS64_FAR]
769 PUSH_FRAME
770 bl EXT(sleh_synchronous_sp1)
771 b .
772
773 .text
774 .align 2
775fleh_irq_sp1:
776 mov x1, x0
777 adr x0, Lsp1_irq_str
778 b EXT(panic_with_thread_kernel_state)
779Lsp1_irq_str:
780 .asciz "IRQ exception taken while SP1 selected"
781
782 .text
783 .align 2
784fleh_fiq_sp1:
785 mov x1, x0
786 adr x0, Lsp1_fiq_str
787 b EXT(panic_with_thread_kernel_state)
788Lsp1_fiq_str:
789 .asciz "FIQ exception taken while SP1 selected"
790
791 .text
792 .align 2
793fleh_serror_sp1:
794 mov x1, x0
795 adr x0, Lsp1_serror_str
796 b EXT(panic_with_thread_kernel_state)
797Lsp1_serror_str:
798 .asciz "Asynchronous exception taken while SP1 selected"
799
800 .text
801 .align 2
802exception_return_dispatch:
803 ldr w0, [x21, SS_FLAVOR] // x0 = (threadIs64Bit) ? ss_64.cpsr : ss_32.cpsr
804 cmp x0, ARM_SAVED_STATE64
805 ldr w1, [x21, SS64_CPSR]
806 ldr w2, [x21, SS32_CPSR]
807 csel w0, w1, w2, eq
808 tbnz w0, PSR64_MODE_EL_SHIFT, return_to_kernel // Test for low bit of EL, return to kernel if set
809 b return_to_user
810
811 .text
812 .align 2
813return_to_kernel:
814 tbnz w0, #DAIF_IRQF_SHIFT, Lkernel_skip_ast_taken // Skip AST check if IRQ disabled
815 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
816 mrs x0, TPIDR_EL1 // Load thread pointer
817 ldr w1, [x0, ACT_PREEMPT_CNT] // Load preemption count
818 cbnz x1, Lkernel_skip_ast_taken // If preemption disabled, skip AST check
819 ldr x1, [x0, ACT_CPUDATAP] // Get current CPU data pointer
820 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
821 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
822 b.eq Lkernel_skip_ast_taken
823 mov sp, x21 // Switch to thread stack for preemption
824 PUSH_FRAME
825 bl EXT(ast_taken_kernel) // Handle AST_URGENT
826 POP_FRAME
827Lkernel_skip_ast_taken:
828 b exception_return
829
830 .text
831 .globl EXT(thread_bootstrap_return)
832LEXT(thread_bootstrap_return)
833#if CONFIG_DTRACE
834 bl EXT(dtrace_thread_bootstrap)
835#endif
836 b EXT(thread_exception_return)
837
838 .text
839 .globl EXT(thread_exception_return)
840LEXT(thread_exception_return)
841 mrs x0, TPIDR_EL1
842 add x21, x0, ACT_CONTEXT
843 ldr x21, [x21]
844
845 //
846 // Fall Through to return_to_user from thread_exception_return.
847 // Note that if we move return_to_user or insert a new routine
848 // below thread_exception_return, the latter will need to change.
849 //
850 .text
851return_to_user:
852check_user_asts:
853 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
854 mrs x3, TPIDR_EL1 // Load thread pointer
855
856 movn w2, #0
857 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
858
859 ldr w0, [x3, TH_RWLOCK_CNT]
860 cbz w0, 1f // Detect unbalance RW lock/unlock
861 b rwlock_count_notzero
8621:
863
864 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
865 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
866 cbnz x0, user_take_ast // If pending ASTs, go service them
867
868#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
869 PUSH_FRAME
870 bl EXT(timer_state_event_kernel_to_user)
871 POP_FRAME
872 mrs x3, TPIDR_EL1 // Reload thread pointer
873#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
874
875#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
876 /* Watchtower
877 *
878 * Here we attempt to enable NEON access for EL0. If the last entry into the
879 * kernel from user-space was due to an IRQ, the monitor will have disabled
880 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
881 * check in with the monitor in order to reenable NEON for EL0 in exchange
882 * for routing IRQs through the monitor (2). This way the monitor will
883 * always 'own' either IRQs or EL0 NEON.
884 *
885 * If Watchtower is disabled or we did not enter the kernel through an IRQ
886 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
887 * here.
888 *
889 * EL0 user ________ IRQ ______
890 * EL1 xnu \ ______________________ CPACR_EL1 __/
891 * EL3 monitor \_/ \___/
892 *
893 * (1) (2)
894 */
895
896 mov x0, #(CPACR_FPEN_ENABLE)
897 msr CPACR_EL1, x0
898#endif
899
900 /* Establish this thread's debug state as the live state on the selected CPU. */
901 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
902 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
903 ldr x0, [x3, ACT_DEBUGDATA]
904 orr x1, x1, x0 // Thread debug state and live debug state both NULL?
905 cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
906
907 //
908 // Fall through from return_to_user to exception_return.
909 // Note that if we move exception_return or add a new routine below
910 // return_to_user, the latter will have to change.
911 //
912
913
914exception_return:
a39ff7e2
A
915 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
916 mrs x3, TPIDR_EL1 // Load thread pointer
917 mov sp, x21 // Reload the pcb pointer
5ba3f43e
A
918
919 /* ARM64_TODO Reserve x18 until we decide what to do with it */
920 ldr x0, [x3, TH_CTH_DATA] // Load cthread data pointer
921 str x0, [sp, SS64_X18] // and use it to trash x18
922
5c9f4661
A
923#if __ARM_KERNEL_PROTECT__
924 /*
925 * If we are going to eret to userspace, we must return through the EL0
926 * eret mapping.
927 */
928 ldr w1, [sp, SS64_CPSR] // Load CPSR
929 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
930
931 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
932 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
933 adrp x1, Lexception_return_restore_registers@page // Load target PC
934 add x1, x1, Lexception_return_restore_registers@pageoff
935 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
936 sub x1, x1, x0 // Calculate delta
937 add x0, x2, x1 // Convert KVA to EL0 vector address
938 br x0
939
940Lskip_el0_eret_mapping:
941#endif /* __ARM_KERNEL_PROTECT__ */
942
5ba3f43e
A
943Lexception_return_restore_registers:
944 /* Restore special register state */
945 ldr x0, [sp, SS64_PC] // Get the return address
946 ldr w1, [sp, SS64_CPSR] // Get the return CPSR
947 ldr w2, [sp, NS64_FPSR]
948 ldr w3, [sp, NS64_FPCR]
949
950 msr ELR_EL1, x0 // Load the return address into ELR
951 msr SPSR_EL1, x1 // Load the return CPSR into SPSR
952 msr FPSR, x2
953 msr FPCR, x3 // Synchronized by ERET
954
955 mov x0, sp // x0 = &pcb
956
957 /* Restore arm_neon_saved_state64 */
958 ldp q0, q1, [x0, NS64_Q0]
959 ldp q2, q3, [x0, NS64_Q2]
960 ldp q4, q5, [x0, NS64_Q4]
961 ldp q6, q7, [x0, NS64_Q6]
962 ldp q8, q9, [x0, NS64_Q8]
963 ldp q10, q11, [x0, NS64_Q10]
964 ldp q12, q13, [x0, NS64_Q12]
965 ldp q14, q15, [x0, NS64_Q14]
966 ldp q16, q17, [x0, NS64_Q16]
967 ldp q18, q19, [x0, NS64_Q18]
968 ldp q20, q21, [x0, NS64_Q20]
969 ldp q22, q23, [x0, NS64_Q22]
970 ldp q24, q25, [x0, NS64_Q24]
971 ldp q26, q27, [x0, NS64_Q26]
972 ldp q28, q29, [x0, NS64_Q28]
973 ldp q30, q31, [x0, NS64_Q30]
974
975 /* Restore arm_saved_state64 */
976
977 // Skip x0, x1 - we're using them
978 ldp x2, x3, [x0, SS64_X2]
979 ldp x4, x5, [x0, SS64_X4]
980 ldp x6, x7, [x0, SS64_X6]
981 ldp x8, x9, [x0, SS64_X8]
982 ldp x10, x11, [x0, SS64_X10]
983 ldp x12, x13, [x0, SS64_X12]
984 ldp x14, x15, [x0, SS64_X14]
985 ldp x16, x17, [x0, SS64_X16]
986 ldp x18, x19, [x0, SS64_X18]
987 ldp x20, x21, [x0, SS64_X20]
988 ldp x22, x23, [x0, SS64_X22]
989 ldp x24, x25, [x0, SS64_X24]
990 ldp x26, x27, [x0, SS64_X26]
991 ldr x28, [x0, SS64_X28]
992 ldp fp, lr, [x0, SS64_FP]
993
994 // Restore stack pointer and our last two GPRs
995 ldr x1, [x0, SS64_SP]
996 mov sp, x1
5c9f4661
A
997
998#if __ARM_KERNEL_PROTECT__
999 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1000#endif /* __ARM_KERNEL_PROTECT__ */
1001
5ba3f43e
A
1002 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1003
5c9f4661
A
1004#if __ARM_KERNEL_PROTECT__
1005 /* If we are going to eret to userspace, we must unmap the kernel. */
1006 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1007
1008 /* Update TCR to unmap the kernel. */
1009 MOV64 x18, TCR_EL1_USER
1010 msr TCR_EL1, x18
1011
1012 /*
1013 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1014 * each other due to the microarchitecture.
1015 */
1016#if !defined(APPLE_ARM64_ARCH_FAMILY)
1017 isb sy
1018#endif
1019
1020 /* Switch to the user ASID (low bit clear) for the task. */
1021 mrs x18, TTBR0_EL1
1022 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1023 msr TTBR0_EL1, x18
1024 mov x18, xzr
1025
1026 /* We don't need an ISB here, as the eret is synchronizing. */
1027Lskip_ttbr1_switch:
1028#endif /* __ARM_KERNEL_PROTECT__ */
1029
5ba3f43e
A
1030 eret
1031
1032user_take_ast:
1033 PUSH_FRAME
1034 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1035 POP_FRAME
1036 mrs x3, TPIDR_EL1 // Reload thread pointer
1037 b check_user_asts // Now try again
1038
1039user_set_debug_state_and_return:
1040 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1041 isb // Synchronize context
1042 PUSH_FRAME
1043 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1044 POP_FRAME
1045 isb
1046 mrs x3, TPIDR_EL1 // Reload thread pointer
1047 b exception_return // And continue
1048
1049 .text
1050 .align 2
1051preempt_underflow:
1052 mrs x0, TPIDR_EL1
1053 str x0, [sp, #-16]! // We'll print thread pointer
1054 adr x0, L_underflow_str // Format string
1055 CALL_EXTERN panic // Game over
1056
1057L_underflow_str:
1058 .asciz "Preemption count negative on thread %p"
1059.align 2
1060
1061 .text
1062 .align 2
1063rwlock_count_notzero:
1064 mrs x0, TPIDR_EL1
1065 str x0, [sp, #-16]! // We'll print thread pointer
1066 ldr w0, [x0, TH_RWLOCK_CNT]
1067 str w0, [sp, #8]
1068 adr x0, L_rwlock_count_notzero_str // Format string
1069 CALL_EXTERN panic // Game over
1070
1071L_rwlock_count_notzero_str:
1072 .asciz "RW lock count not 0 on thread %p (%u)"
1073.align 2
1074
5c9f4661
A
1075#if __ARM_KERNEL_PROTECT__
1076 /*
1077 * This symbol denotes the end of the exception vector/eret range; we page
1078 * align it so that we can avoid mapping other text in the EL0 exception
1079 * vector mapping.
1080 */
1081 .text
1082 .align 14
1083 .globl EXT(ExceptionVectorsEnd)
1084LEXT(ExceptionVectorsEnd)
1085#endif /* __ARM_KERNEL_PROTECT__ */
1086
5ba3f43e
A
1087 .text
1088 .align 2
1089 .globl EXT(ml_panic_trap_to_debugger)
1090LEXT(ml_panic_trap_to_debugger)
1091 ret
1092
1093/* ARM64_TODO Is globals_asm.h needed? */
1094//#include "globals_asm.h"
1095
1096/* vim: set ts=4: */