]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/locore.s
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / locore.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
c3c9b80d 30#include <arm64/machine_machdep.h>
cb323159 31#include <arm64/machine_routines_asm.h>
5ba3f43e
A
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
cb323159 38#include <arm64/exception_asm.h>
f427ee49 39#include "dwarf_unwind.h"
5ba3f43e 40
5c9f4661
A
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
c6bf4f31
A
45#if XNU_MONITOR
46/*
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
48 *
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
50 * into the PPL.
51 * x26 - 0 if the exception was taken while in the kernel, 1 if the
52 * exception was taken while in the PPL.
53 */
54.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55 cmp x26, xzr
56 b.eq 1f
57
58 /* Return to the PPL. */
59 mov x15, #0
60 mov w10, #PPL_STATE_EXCEPTION
c6bf4f31 61#error "XPRR configuration error"
c6bf4f31
A
621:
63.endmacro
64
c6bf4f31
A
65
66#endif /* XNU_MONITOR */
5ba3f43e 67
5ba3f43e
A
68#define CBF_DISABLE 0
69#define CBF_ENABLE 1
70
71.macro COMPARE_BRANCH_FUSION
72#if defined(APPLE_ARM64_ARCH_FAMILY)
c3c9b80d 73 mrs $1, HID1
5ba3f43e
A
74 .if $0 == CBF_DISABLE
75 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
76 .else
77 mov $2, ARM64_REG_HID1_disCmpBrFusion
78 bic $1, $1, $2
79 .endif
c3c9b80d 80 msr HID1, $1
5ba3f43e
A
81 .if $0 == CBF_DISABLE
82 isb sy
83 .endif
84#endif
85.endmacro
86
5c9f4661
A
87/*
88 * MAP_KERNEL
89 *
90 * Restores the kernel EL1 mappings, if necessary.
91 *
92 * This may mutate x18.
93 */
94.macro MAP_KERNEL
95#if __ARM_KERNEL_PROTECT__
96 /* Switch to the kernel ASID (low bit set) for the task. */
97 mrs x18, TTBR0_EL1
98 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
99 msr TTBR0_EL1, x18
100
101 /*
102 * We eschew some barriers on Apple CPUs, as relative ordering of writes
103 * to the TTBRs and writes to the TCR should be ensured by the
104 * microarchitecture.
105 */
106#if !defined(APPLE_ARM64_ARCH_FAMILY)
107 isb sy
108#endif
109
110 /*
111 * Update the TCR to map the kernel now that we are using the kernel
112 * ASID.
113 */
114 MOV64 x18, TCR_EL1_BOOT
115 msr TCR_EL1, x18
116 isb sy
117#endif /* __ARM_KERNEL_PROTECT__ */
118.endmacro
119
120/*
121 * BRANCH_TO_KVA_VECTOR
122 *
123 * Branches to the requested long exception vector in the kernelcache.
124 * arg0 - The label to branch to
125 * arg1 - The index of the label in exc_vectors_tables
126 *
127 * This may mutate x18.
128 */
129.macro BRANCH_TO_KVA_VECTOR
130#if __ARM_KERNEL_PROTECT__
131 /*
132 * Find the kernelcache table for the exception vectors by accessing
133 * the per-CPU data.
134 */
135 mrs x18, TPIDR_EL1
136 ldr x18, [x18, ACT_CPUDATAP]
137 ldr x18, [x18, CPU_EXC_VECTORS]
138
139 /*
140 * Get the handler for this exception and jump to it.
141 */
142 ldr x18, [x18, #($1 << 3)]
143 br x18
144#else
145 b $0
146#endif /* __ARM_KERNEL_PROTECT__ */
147.endmacro
148
eb6b6ca3
A
149/*
150 * CHECK_KERNEL_STACK
151 *
152 * Verifies that the kernel stack is aligned and mapped within an expected
153 * stack address range. Note: happens before saving registers (in case we can't
154 * save to kernel stack).
155 *
156 * Expects:
f427ee49 157 * {x0, x1} - saved
eb6b6ca3
A
158 * x1 - Exception syndrome
159 * sp - Saved state
160 *
161 * Seems like we need an unused argument to the macro for the \@ syntax to work
162 *
163 */
164.macro CHECK_KERNEL_STACK unused
f427ee49 165 stp x2, x3, [sp, #-16]! // Save {x2-x3}
eb6b6ca3
A
166 and x1, x1, #ESR_EC_MASK // Mask the exception class
167 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
168 cmp x1, x2 // If we have a stack alignment exception
169 b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted
170 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
171 cmp x1, x2 // If we have a data abort, we need to
172 b.ne Lvalid_stack_\@ // ...validate the stack pointer
f427ee49 173 mrs x0, SP_EL0 // Get SP_EL0
eb6b6ca3
A
174 mrs x1, TPIDR_EL1 // Get thread pointer
175Ltest_kstack_\@:
176 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
177 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
178 cmp x0, x2 // if (SP_EL0 >= kstack top)
179 b.ge Ltest_istack_\@ // jump to istack test
180 cmp x0, x3 // if (SP_EL0 > kstack bottom)
181 b.gt Lvalid_stack_\@ // stack pointer valid
182Ltest_istack_\@:
183 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
184 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
185 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
186 cmp x0, x2 // if (SP_EL0 >= istack top)
187 b.ge Lcorrupt_stack_\@ // corrupt stack pointer
188 cmp x0, x3 // if (SP_EL0 > istack bottom)
189 b.gt Lvalid_stack_\@ // stack pointer valid
190Lcorrupt_stack_\@:
f427ee49
A
191 ldp x2, x3, [sp], #16
192 ldp x0, x1, [sp], #16
193 sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame
194 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame
195 stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame
196 mrs x0, SP_EL0 // Get SP_EL0
197 str x0, [sp, SS64_SP] // Save sp to the exception frame
eb6b6ca3
A
198 INIT_SAVED_STATE_FLAVORS sp, w0, w1
199 mov x0, sp // Copy exception frame pointer to x0
200 adrp x1, fleh_invalid_stack@page // Load address for fleh
201 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
eb6b6ca3
A
202 b fleh_dispatch64
203Lvalid_stack_\@:
f427ee49 204 ldp x2, x3, [sp], #16 // Restore {x2-x3}
eb6b6ca3
A
205.endmacro
206
207
5c9f4661 208#if __ARM_KERNEL_PROTECT__
f427ee49 209 .section __DATA_CONST,__const
5c9f4661
A
210 .align 3
211 .globl EXT(exc_vectors_table)
212LEXT(exc_vectors_table)
cb323159
A
213 /* Table of exception handlers.
214 * These handlers sometimes contain deadloops.
215 * It's nice to have symbols for them when debugging. */
216 .quad el1_sp0_synchronous_vector_long
217 .quad el1_sp0_irq_vector_long
218 .quad el1_sp0_fiq_vector_long
219 .quad el1_sp0_serror_vector_long
220 .quad el1_sp1_synchronous_vector_long
221 .quad el1_sp1_irq_vector_long
222 .quad el1_sp1_fiq_vector_long
223 .quad el1_sp1_serror_vector_long
224 .quad el0_synchronous_vector_64_long
225 .quad el0_irq_vector_64_long
226 .quad el0_fiq_vector_64_long
227 .quad el0_serror_vector_64_long
5c9f4661
A
228#endif /* __ARM_KERNEL_PROTECT__ */
229
5ba3f43e 230 .text
5c9f4661
A
231#if __ARM_KERNEL_PROTECT__
232 /*
233 * We need this to be on a page boundary so that we may avoiding mapping
234 * other text along with it. As this must be on the VM page boundary
235 * (due to how the coredumping code currently works), this will be a
236 * 16KB page boundary.
237 */
238 .align 14
239#else
5ba3f43e 240 .align 12
5c9f4661 241#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e
A
242 .globl EXT(ExceptionVectorsBase)
243LEXT(ExceptionVectorsBase)
244Lel1_sp0_synchronous_vector:
cb323159 245 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
5c9f4661
A
246
247 .text
248 .align 7
249Lel1_sp0_irq_vector:
cb323159 250 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
5c9f4661
A
251
252 .text
253 .align 7
254Lel1_sp0_fiq_vector:
cb323159 255 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
5c9f4661
A
256
257 .text
258 .align 7
259Lel1_sp0_serror_vector:
cb323159 260 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
5c9f4661
A
261
262 .text
263 .align 7
264Lel1_sp1_synchronous_vector:
cb323159 265 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
5c9f4661
A
266
267 .text
268 .align 7
269Lel1_sp1_irq_vector:
cb323159 270 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
5c9f4661
A
271
272 .text
273 .align 7
274Lel1_sp1_fiq_vector:
cb323159 275 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
5c9f4661
A
276
277 .text
278 .align 7
279Lel1_sp1_serror_vector:
cb323159 280 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
5c9f4661
A
281
282 .text
283 .align 7
284Lel0_synchronous_vector_64:
285 MAP_KERNEL
cb323159 286 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
5c9f4661
A
287
288 .text
289 .align 7
290Lel0_irq_vector_64:
291 MAP_KERNEL
cb323159 292 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
5c9f4661
A
293
294 .text
295 .align 7
296Lel0_fiq_vector_64:
297 MAP_KERNEL
cb323159 298 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
5c9f4661
A
299
300 .text
301 .align 7
302Lel0_serror_vector_64:
303 MAP_KERNEL
cb323159 304 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
5c9f4661
A
305
306 /* Fill out the rest of the page */
307 .align 12
308
309/*********************************
310 * END OF EXCEPTION VECTORS PAGE *
311 *********************************/
312
f427ee49
A
313
314
5c9f4661
A
315.macro EL1_SP0_VECTOR
316 msr SPSel, #0 // Switch to SP0
317 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
318 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
319 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
320 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
5c9f4661
A
321 INIT_SAVED_STATE_FLAVORS sp, w0, w1
322 mov x0, sp // Copy saved state pointer to x0
323.endmacro
324
cb323159 325el1_sp0_synchronous_vector_long:
f427ee49 326 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
5ba3f43e
A
327 mrs x1, ESR_EL1 // Get the exception syndrome
328 /* If the stack pointer is corrupt, it will manifest either as a data abort
329 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
330 * these quickly by testing bit 5 of the exception class.
331 */
332 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
eb6b6ca3 333 CHECK_KERNEL_STACK
5ba3f43e 334Lkernel_stack_valid:
f427ee49 335 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
5ba3f43e 336 EL1_SP0_VECTOR
cb323159
A
337 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
338 add x1, x1, EXT(fleh_synchronous)@pageoff
5ba3f43e
A
339 b fleh_dispatch64
340
cb323159 341el1_sp0_irq_vector_long:
5ba3f43e 342 EL1_SP0_VECTOR
f427ee49 343 SWITCH_TO_INT_STACK
cb323159
A
344 adrp x1, EXT(fleh_irq)@page // Load address for fleh
345 add x1, x1, EXT(fleh_irq)@pageoff
5ba3f43e
A
346 b fleh_dispatch64
347
cb323159 348el1_sp0_fiq_vector_long:
5ba3f43e
A
349 // ARM64_TODO write optimized decrementer
350 EL1_SP0_VECTOR
f427ee49 351 SWITCH_TO_INT_STACK
cb323159
A
352 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
353 add x1, x1, EXT(fleh_fiq)@pageoff
5ba3f43e
A
354 b fleh_dispatch64
355
cb323159 356el1_sp0_serror_vector_long:
5ba3f43e 357 EL1_SP0_VECTOR
cb323159
A
358 adrp x1, EXT(fleh_serror)@page // Load address for fleh
359 add x1, x1, EXT(fleh_serror)@pageoff
5ba3f43e
A
360 b fleh_dispatch64
361
362.macro EL1_SP1_VECTOR
363 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
364 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
365 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
366 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
367 INIT_SAVED_STATE_FLAVORS sp, w0, w1
5ba3f43e
A
368 mov x0, sp // Copy saved state pointer to x0
369.endmacro
370
cb323159 371el1_sp1_synchronous_vector_long:
d9a64523
A
372 b check_exception_stack
373Lel1_sp1_synchronous_valid_stack:
5ba3f43e
A
374#if defined(KERNEL_INTEGRITY_KTRR)
375 b check_ktrr_sctlr_trap
376Lel1_sp1_synchronous_vector_continue:
377#endif
378 EL1_SP1_VECTOR
379 adrp x1, fleh_synchronous_sp1@page
380 add x1, x1, fleh_synchronous_sp1@pageoff
381 b fleh_dispatch64
382
cb323159 383el1_sp1_irq_vector_long:
5ba3f43e
A
384 EL1_SP1_VECTOR
385 adrp x1, fleh_irq_sp1@page
386 add x1, x1, fleh_irq_sp1@pageoff
387 b fleh_dispatch64
388
cb323159 389el1_sp1_fiq_vector_long:
5ba3f43e
A
390 EL1_SP1_VECTOR
391 adrp x1, fleh_fiq_sp1@page
392 add x1, x1, fleh_fiq_sp1@pageoff
393 b fleh_dispatch64
394
cb323159 395el1_sp1_serror_vector_long:
5ba3f43e
A
396 EL1_SP1_VECTOR
397 adrp x1, fleh_serror_sp1@page
398 add x1, x1, fleh_serror_sp1@pageoff
399 b fleh_dispatch64
400
cb323159 401
5ba3f43e
A
402.macro EL0_64_VECTOR
403 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
f427ee49
A
404#if __ARM_KERNEL_PROTECT__
405 mov x18, #0 // Zero x18 to avoid leaking data to user SS
406#endif
5ba3f43e
A
407 mrs x0, TPIDR_EL1 // Load the thread register
408 mrs x1, SP_EL0 // Load the user stack pointer
409 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
410 ldr x0, [x0] // Load the user context pointer
411 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
412 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
413 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
414 msr SPSel, #0 // Switch to SP0
415 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
f427ee49
A
416 mrs x1, TPIDR_EL1 // Load the thread register
417
418
5ba3f43e 419 mov x0, sp // Copy the user PCB pointer to x0
f427ee49 420 // x1 contains thread register
5ba3f43e
A
421.endmacro
422
5c9f4661 423
cb323159 424el0_synchronous_vector_64_long:
f427ee49
A
425 EL0_64_VECTOR sync
426 SWITCH_TO_KERN_STACK
cb323159
A
427 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
428 add x1, x1, EXT(fleh_synchronous)@pageoff
5ba3f43e
A
429 b fleh_dispatch64
430
cb323159 431el0_irq_vector_64_long:
f427ee49
A
432 EL0_64_VECTOR irq
433 SWITCH_TO_INT_STACK
cb323159
A
434 adrp x1, EXT(fleh_irq)@page // load address for fleh
435 add x1, x1, EXT(fleh_irq)@pageoff
5ba3f43e
A
436 b fleh_dispatch64
437
cb323159 438el0_fiq_vector_64_long:
f427ee49
A
439 EL0_64_VECTOR fiq
440 SWITCH_TO_INT_STACK
cb323159
A
441 adrp x1, EXT(fleh_fiq)@page // load address for fleh
442 add x1, x1, EXT(fleh_fiq)@pageoff
5ba3f43e
A
443 b fleh_dispatch64
444
cb323159 445el0_serror_vector_64_long:
f427ee49
A
446 EL0_64_VECTOR serror
447 SWITCH_TO_KERN_STACK
cb323159
A
448 adrp x1, EXT(fleh_serror)@page // load address for fleh
449 add x1, x1, EXT(fleh_serror)@pageoff
5ba3f43e
A
450 b fleh_dispatch64
451
5ba3f43e 452
d9a64523
A
453/*
454 * check_exception_stack
455 *
456 * Verifies that stack pointer at SP1 is within exception stack
457 * If not, will simply hang as we have no more stack to fall back on.
458 */
459
460 .text
461 .align 2
462check_exception_stack:
463 mrs x18, TPIDR_EL1 // Get thread pointer
464 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
465 ldr x18, [x18, ACT_CPUDATAP]
466 cbz x18, . // If thread context is set, cpu data should be too
467 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
468 cmp sp, x18
469 b.gt . // Hang if above exception stack top
470 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
471 cmp sp, x18
472 b.lt . // Hang if below exception stack bottom
473Lvalid_exception_stack:
474 mov x18, #0
475 b Lel1_sp1_synchronous_valid_stack
476
5ba3f43e
A
477
478#if defined(KERNEL_INTEGRITY_KTRR)
479 .text
480 .align 2
481check_ktrr_sctlr_trap:
482/* We may abort on an instruction fetch on reset when enabling the MMU by
483 * writing SCTLR_EL1 because the page containing the privileged instruction is
484 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
485 * would otherwise panic unconditionally. Check for the condition and return
486 * safe execution to the caller on behalf of the faulting function.
487 *
488 * Expected register state:
489 * x22 - Kernel virtual base
490 * x23 - Kernel physical base
491 */
492 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
493 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
494 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
495 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
496 movz w1, #0x8600, lsl #16
497 movk w1, #0x0000
498 cmp x0, x1
499 mrs x0, ELR_EL1 // Check for expected abort address
500 adrp x1, _pinst_set_sctlr_trap_addr@page
501 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
502 sub x1, x1, x22 // Convert to physical address
503 add x1, x1, x23
504 ccmp x0, x1, #0, eq
505 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
506 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
507 b.ne Lel1_sp1_synchronous_vector_continue
508 msr ELR_EL1, lr // Return to caller
ea3f0419 509 ERET_CONTEXT_SYNCHRONIZING
c6bf4f31 510#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
5ba3f43e
A
511
512/* 64-bit first level exception handler dispatcher.
513 * Completes register context saving and branches to FLEH.
514 * Expects:
eb6b6ca3 515 * {x0, x1, sp} - saved
5ba3f43e
A
516 * x0 - arm_context_t
517 * x1 - address of FLEH
518 * fp - previous stack frame if EL1
519 * lr - unused
520 * sp - kernel stack
521 */
522 .text
523 .align 2
524fleh_dispatch64:
525 /* Save arm_saved_state64 */
cb323159 526 SPILL_REGISTERS KERNEL_MODE
5ba3f43e 527
a39ff7e2
A
528 /* If exception is from userspace, zero unused registers */
529 and x23, x23, #(PSR64_MODE_EL_MASK)
530 cmp x23, #(PSR64_MODE_EL0)
5ba3f43e 531 bne 1f
a39ff7e2 532
f427ee49
A
533 SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
5342:
d9a64523
A
535 mov x2, #0
536 mov x3, #0
537 mov x4, #0
538 mov x5, #0
539 mov x6, #0
540 mov x7, #0
541 mov x8, #0
542 mov x9, #0
543 mov x10, #0
544 mov x11, #0
545 mov x12, #0
546 mov x13, #0
547 mov x14, #0
548 mov x15, #0
549 mov x16, #0
550 mov x17, #0
551 mov x18, #0
552 mov x19, #0
553 mov x20, #0
a39ff7e2 554 /* x21, x22 cleared in common case below */
d9a64523
A
555 mov x23, #0
556 mov x24, #0
557 mov x25, #0
c6bf4f31 558#if !XNU_MONITOR
d9a64523 559 mov x26, #0
c6bf4f31 560#endif
d9a64523
A
561 mov x27, #0
562 mov x28, #0
eb6b6ca3
A
563 mov fp, #0
564 mov lr, #0
5ba3f43e
A
5651:
566
567 mov x21, x0 // Copy arm_context_t pointer to x21
568 mov x22, x1 // Copy handler routine to x22
569
c6bf4f31
A
570#if XNU_MONITOR
571 /* Zero x26 to indicate that this should not return to the PPL. */
572 mov x26, #0
573#endif
5ba3f43e 574
f427ee49 575#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
5ba3f43e
A
576 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
577 b.ne 1f // kernel mode, so skip precise time update
578 PUSH_FRAME
579 bl EXT(timer_state_event_user_to_kernel)
580 POP_FRAME
581 mov x0, x21 // Reload arm_context_t pointer
5821:
f427ee49 583#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
5ba3f43e
A
584
585 /* Dispatch to FLEH */
586
587 br x22
588
589
590 .text
591 .align 2
cb323159
A
592 .global EXT(fleh_synchronous)
593LEXT(fleh_synchronous)
f427ee49
A
594
595UNWIND_PROLOGUE
596UNWIND_DIRECTIVES
597
5ba3f43e
A
598 mrs x1, ESR_EL1 // Load exception syndrome
599 mrs x2, FAR_EL1 // Load fault address
600
601 /* At this point, the LR contains the value of ELR_EL1. In the case of an
602 * instruction prefetch abort, this will be the faulting pc, which we know
603 * to be invalid. This will prevent us from backtracing through the
604 * exception if we put it in our stack frame, so we load the LR from the
605 * exception saved state instead.
606 */
607 and w3, w1, #(ESR_EC_MASK)
608 lsr w3, w3, #(ESR_EC_SHIFT)
609 mov w4, #(ESR_EC_IABORT_EL1)
610 cmp w3, w4
611 b.eq Lfleh_sync_load_lr
612Lvalid_link_register:
613
614 PUSH_FRAME
615 bl EXT(sleh_synchronous)
616 POP_FRAME
617
c6bf4f31
A
618#if XNU_MONITOR
619 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
620#endif
5ba3f43e 621
f427ee49 622 mov x28, xzr // Don't need to check PFZ if there are ASTs
5ba3f43e
A
623 b exception_return_dispatch
624
625Lfleh_sync_load_lr:
626 ldr lr, [x0, SS64_LR]
627 b Lvalid_link_register
f427ee49
A
628UNWIND_EPILOGUE
629
5ba3f43e
A
630/* Shared prologue code for fleh_irq and fleh_fiq.
631 * Does any interrupt booking we may want to do
632 * before invoking the handler proper.
633 * Expects:
634 * x0 - arm_context_t
635 * x23 - CPSR
636 * fp - Undefined live value (we may push a frame)
637 * lr - Undefined live value (we may push a frame)
638 * sp - Interrupt stack for the current CPU
639 */
640.macro BEGIN_INTERRUPT_HANDLER
641 mrs x22, TPIDR_EL1
642 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
643 /* Update IRQ count */
644 ldr w1, [x23, CPU_STAT_IRQ]
645 add w1, w1, #1 // Increment count
646 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
647 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
648 add w1, w1, #1 // Increment count
649 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
650 /* Increment preempt count */
651 ldr w1, [x22, ACT_PREEMPT_CNT]
652 add w1, w1, #1
653 str w1, [x22, ACT_PREEMPT_CNT]
654 /* Store context in int state */
655 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
656.endmacro
657
658/* Shared epilogue code for fleh_irq and fleh_fiq.
659 * Cleans up after the prologue, and may do a bit more
660 * bookkeeping (kdebug related).
661 * Expects:
662 * x22 - Live TPIDR_EL1 value (thread address)
663 * x23 - Address of the current CPU data structure
664 * w24 - 0 if kdebug is disbled, nonzero otherwise
665 * fp - Undefined live value (we may push a frame)
666 * lr - Undefined live value (we may push a frame)
667 * sp - Interrupt stack for the current CPU
668 */
669.macro END_INTERRUPT_HANDLER
670 /* Clear int context */
671 str xzr, [x23, CPU_INT_STATE]
672 /* Decrement preempt count */
673 ldr w0, [x22, ACT_PREEMPT_CNT]
674 cbnz w0, 1f // Detect underflow
675 b preempt_underflow
6761:
677 sub w0, w0, #1
678 str w0, [x22, ACT_PREEMPT_CNT]
679 /* Switch back to kernel stack */
680 ldr x0, [x22, TH_KSTACKPTR]
681 mov sp, x0
682.endmacro
683
684 .text
685 .align 2
cb323159
A
686 .global EXT(fleh_irq)
687LEXT(fleh_irq)
5ba3f43e
A
688 BEGIN_INTERRUPT_HANDLER
689 PUSH_FRAME
690 bl EXT(sleh_irq)
691 POP_FRAME
692 END_INTERRUPT_HANDLER
693
c6bf4f31
A
694#if XNU_MONITOR
695 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
696#endif
5ba3f43e 697
f427ee49 698 mov x28, #1 // Set a bit to check PFZ if there are ASTs
5ba3f43e
A
699 b exception_return_dispatch
700
701 .text
702 .align 2
703 .global EXT(fleh_fiq_generic)
704LEXT(fleh_fiq_generic)
705 PANIC_UNIMPLEMENTED
706
707 .text
708 .align 2
cb323159
A
709 .global EXT(fleh_fiq)
710LEXT(fleh_fiq)
5ba3f43e
A
711 BEGIN_INTERRUPT_HANDLER
712 PUSH_FRAME
713 bl EXT(sleh_fiq)
714 POP_FRAME
715 END_INTERRUPT_HANDLER
716
c6bf4f31
A
717#if XNU_MONITOR
718 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
719#endif
5ba3f43e 720
f427ee49 721 mov x28, #1 // Set a bit to check PFZ if there are ASTs
5ba3f43e
A
722 b exception_return_dispatch
723
724 .text
725 .align 2
cb323159
A
726 .global EXT(fleh_serror)
727LEXT(fleh_serror)
5ba3f43e
A
728 mrs x1, ESR_EL1 // Load exception syndrome
729 mrs x2, FAR_EL1 // Load fault address
730
731 PUSH_FRAME
732 bl EXT(sleh_serror)
733 POP_FRAME
734
c6bf4f31
A
735#if XNU_MONITOR
736 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
737#endif
5ba3f43e 738
f427ee49 739 mov x28, xzr // Don't need to check PFZ If there are ASTs
5ba3f43e
A
740 b exception_return_dispatch
741
742/*
743 * Register state saved before we get here.
744 */
745 .text
746 .align 2
747fleh_invalid_stack:
748 mrs x1, ESR_EL1 // Load exception syndrome
749 str x1, [x0, SS64_ESR]
750 mrs x2, FAR_EL1 // Load fault address
751 str x2, [x0, SS64_FAR]
752 PUSH_FRAME
753 bl EXT(sleh_invalid_stack) // Shouldn't return!
754 b .
755
756 .text
757 .align 2
758fleh_synchronous_sp1:
759 mrs x1, ESR_EL1 // Load exception syndrome
760 str x1, [x0, SS64_ESR]
761 mrs x2, FAR_EL1 // Load fault address
762 str x2, [x0, SS64_FAR]
763 PUSH_FRAME
764 bl EXT(sleh_synchronous_sp1)
765 b .
766
767 .text
768 .align 2
769fleh_irq_sp1:
770 mov x1, x0
771 adr x0, Lsp1_irq_str
772 b EXT(panic_with_thread_kernel_state)
773Lsp1_irq_str:
774 .asciz "IRQ exception taken while SP1 selected"
775
776 .text
777 .align 2
778fleh_fiq_sp1:
779 mov x1, x0
780 adr x0, Lsp1_fiq_str
781 b EXT(panic_with_thread_kernel_state)
782Lsp1_fiq_str:
783 .asciz "FIQ exception taken while SP1 selected"
784
785 .text
786 .align 2
787fleh_serror_sp1:
788 mov x1, x0
789 adr x0, Lsp1_serror_str
790 b EXT(panic_with_thread_kernel_state)
791Lsp1_serror_str:
792 .asciz "Asynchronous exception taken while SP1 selected"
793
794 .text
795 .align 2
796exception_return_dispatch:
cb323159
A
797 ldr w0, [x21, SS64_CPSR]
798 tst w0, PSR64_MODE_EL_MASK
f427ee49 799 b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0
5ba3f43e
A
800 b return_to_user
801
802 .text
803 .align 2
f427ee49
A
804 .global EXT(return_to_kernel)
805LEXT(return_to_kernel)
cb323159
A
806 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
807 mrs x3, TPIDR_EL1 // Load thread pointer
808 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
809 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
810 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
811 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
812 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
813 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
814 b.eq exception_return_unint_tpidr_x3
815 mov sp, x21 // Switch to thread stack for preemption
5ba3f43e 816 PUSH_FRAME
cb323159 817 bl EXT(ast_taken_kernel) // Handle AST_URGENT
5ba3f43e 818 POP_FRAME
5ba3f43e
A
819 b exception_return
820
821 .text
822 .globl EXT(thread_bootstrap_return)
823LEXT(thread_bootstrap_return)
824#if CONFIG_DTRACE
825 bl EXT(dtrace_thread_bootstrap)
826#endif
f427ee49 827 b EXT(arm64_thread_exception_return)
5ba3f43e
A
828
829 .text
f427ee49
A
830 .globl EXT(arm64_thread_exception_return)
831LEXT(arm64_thread_exception_return)
5ba3f43e
A
832 mrs x0, TPIDR_EL1
833 add x21, x0, ACT_CONTEXT
834 ldr x21, [x21]
f427ee49 835 mov x28, xzr
5ba3f43e
A
836
837 //
f427ee49 838 // Fall Through to return_to_user from arm64_thread_exception_return.
5ba3f43e 839 // Note that if we move return_to_user or insert a new routine
f427ee49 840 // below arm64_thread_exception_return, the latter will need to change.
5ba3f43e
A
841 //
842 .text
f427ee49
A
843/* x21 is always the machine context pointer when we get here
844 * x28 is a bit indicating whether or not we should check if pc is in pfz */
5ba3f43e
A
845return_to_user:
846check_user_asts:
f427ee49 847 mrs x3, TPIDR_EL1 // Load thread pointer
5ba3f43e
A
848
849 movn w2, #0
850 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
851
cb323159 852#if MACH_ASSERT
5ba3f43e 853 ldr w0, [x3, TH_RWLOCK_CNT]
f427ee49
A
854 cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock
855
cb323159 856 ldr w0, [x3, ACT_PREEMPT_CNT]
f427ee49 857 cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption
cb323159 858#endif
f427ee49
A
859 ldr w0, [x3, TH_TMP_ALLOC_CNT]
860 cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks
861
cb323159
A
862 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
863 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
864 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
f427ee49
A
865 cbz x0, no_asts // If no asts, skip ahead
866
867 cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts
868
869 /* At this point, we have ASTs and we need to check whether we are running in the
870 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
871 * the PFZ since we don't want to handle getting a signal or getting suspended
872 * while holding a spinlock in userspace.
873 *
874 * If userspace was in the PFZ, we know (via coordination with the PFZ code
875 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
876 * to use it to indicate to userspace to come back to take a delayed
877 * preemption, at which point the ASTs will be handled. */
878 mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again
879 mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64
880
881 ldr x0, [x21, SS64_PC] // Load pc from machine state
882 bl EXT(commpage_is_in_pfz64) // pc in pfz?
883 cbz x0, restore_and_check_ast // No, deal with other asts
884
885 mov x0, #1
886 str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption
887 mov x0, x19 // restore x0 to asts
888 b no_asts // pretend we have no asts
889
890restore_and_check_ast:
891 mov x0, x19 // restore x0
892 b user_take_ast // Service pending asts
893no_asts:
894
895
896#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
cb323159 897 mov x19, x3 // Preserve thread pointer across function call
5ba3f43e
A
898 PUSH_FRAME
899 bl EXT(timer_state_event_kernel_to_user)
900 POP_FRAME
cb323159 901 mov x3, x19
f427ee49 902#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
5ba3f43e
A
903
904#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
905 /* Watchtower
906 *
907 * Here we attempt to enable NEON access for EL0. If the last entry into the
908 * kernel from user-space was due to an IRQ, the monitor will have disabled
909 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
910 * check in with the monitor in order to reenable NEON for EL0 in exchange
911 * for routing IRQs through the monitor (2). This way the monitor will
912 * always 'own' either IRQs or EL0 NEON.
913 *
914 * If Watchtower is disabled or we did not enter the kernel through an IRQ
915 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
916 * here.
917 *
918 * EL0 user ________ IRQ ______
919 * EL1 xnu \ ______________________ CPACR_EL1 __/
920 * EL3 monitor \_/ \___/
921 *
922 * (1) (2)
923 */
924
925 mov x0, #(CPACR_FPEN_ENABLE)
926 msr CPACR_EL1, x0
927#endif
928
929 /* Establish this thread's debug state as the live state on the selected CPU. */
930 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
931 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
932 ldr x0, [x3, ACT_DEBUGDATA]
f427ee49
A
933 cmp x0, x1
934 beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state
935
936#if defined(APPLELIGHTNING)
937/* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
938
939 ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
940 cbz x12, 1f
941
c3c9b80d 942 mrs x12, HID1 // if any debug session ever existed, set forceNexL3ClkOn
f427ee49 943 orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
c3c9b80d 944 msr HID1, x12
f427ee49
A
9451:
946
947#endif
948
949 PUSH_FRAME
950 bl EXT(arm_debug_set) // Establish thread debug state in live regs
951 POP_FRAME
952 mrs x3, TPIDR_EL1 // Reload thread pointer
953L_skip_user_set_debug_state:
954
955
cb323159 956 b exception_return_unint_tpidr_x3
5ba3f43e
A
957
958 //
959 // Fall through from return_to_user to exception_return.
960 // Note that if we move exception_return or add a new routine below
961 // return_to_user, the latter will have to change.
962 //
963
5ba3f43e 964exception_return:
a39ff7e2 965 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
cb323159 966exception_return_unint:
a39ff7e2 967 mrs x3, TPIDR_EL1 // Load thread pointer
cb323159 968exception_return_unint_tpidr_x3:
a39ff7e2 969 mov sp, x21 // Reload the pcb pointer
5ba3f43e 970
f427ee49
A
971exception_return_unint_tpidr_x3_dont_trash_x18:
972
5ba3f43e 973
5c9f4661
A
974#if __ARM_KERNEL_PROTECT__
975 /*
976 * If we are going to eret to userspace, we must return through the EL0
977 * eret mapping.
978 */
979 ldr w1, [sp, SS64_CPSR] // Load CPSR
980 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
981
982 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
983 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
984 adrp x1, Lexception_return_restore_registers@page // Load target PC
985 add x1, x1, Lexception_return_restore_registers@pageoff
986 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
987 sub x1, x1, x0 // Calculate delta
988 add x0, x2, x1 // Convert KVA to EL0 vector address
989 br x0
990
991Lskip_el0_eret_mapping:
992#endif /* __ARM_KERNEL_PROTECT__ */
993
5ba3f43e 994Lexception_return_restore_registers:
5ba3f43e 995 mov x0, sp // x0 = &pcb
cb323159 996 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
bca245ac 997 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1
cb323159
A
998
999/* Restore special register state */
1000 ldr w3, [sp, NS64_FPSR]
1001 ldr w4, [sp, NS64_FPCR]
1002
1003 msr ELR_EL1, x1 // Load the return address into ELR
1004 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
1005 msr FPSR, x3
f427ee49
A
1006 mrs x5, FPCR
1007 CMSR FPCR, x5, x4, 1
10081:
cb323159 1009
5ba3f43e
A
1010
1011 /* Restore arm_neon_saved_state64 */
1012 ldp q0, q1, [x0, NS64_Q0]
1013 ldp q2, q3, [x0, NS64_Q2]
1014 ldp q4, q5, [x0, NS64_Q4]
1015 ldp q6, q7, [x0, NS64_Q6]
1016 ldp q8, q9, [x0, NS64_Q8]
1017 ldp q10, q11, [x0, NS64_Q10]
1018 ldp q12, q13, [x0, NS64_Q12]
1019 ldp q14, q15, [x0, NS64_Q14]
1020 ldp q16, q17, [x0, NS64_Q16]
1021 ldp q18, q19, [x0, NS64_Q18]
1022 ldp q20, q21, [x0, NS64_Q20]
1023 ldp q22, q23, [x0, NS64_Q22]
1024 ldp q24, q25, [x0, NS64_Q24]
1025 ldp q26, q27, [x0, NS64_Q26]
1026 ldp q28, q29, [x0, NS64_Q28]
1027 ldp q30, q31, [x0, NS64_Q30]
1028
1029 /* Restore arm_saved_state64 */
1030
1031 // Skip x0, x1 - we're using them
1032 ldp x2, x3, [x0, SS64_X2]
1033 ldp x4, x5, [x0, SS64_X4]
1034 ldp x6, x7, [x0, SS64_X6]
1035 ldp x8, x9, [x0, SS64_X8]
1036 ldp x10, x11, [x0, SS64_X10]
1037 ldp x12, x13, [x0, SS64_X12]
1038 ldp x14, x15, [x0, SS64_X14]
cb323159 1039 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
5ba3f43e
A
1040 ldp x18, x19, [x0, SS64_X18]
1041 ldp x20, x21, [x0, SS64_X20]
1042 ldp x22, x23, [x0, SS64_X22]
1043 ldp x24, x25, [x0, SS64_X24]
1044 ldp x26, x27, [x0, SS64_X26]
1045 ldr x28, [x0, SS64_X28]
cb323159
A
1046 ldr fp, [x0, SS64_FP]
1047 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
5ba3f43e
A
1048
1049 // Restore stack pointer and our last two GPRs
1050 ldr x1, [x0, SS64_SP]
1051 mov sp, x1
5c9f4661
A
1052
1053#if __ARM_KERNEL_PROTECT__
1054 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1055#endif /* __ARM_KERNEL_PROTECT__ */
1056
5ba3f43e
A
1057 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1058
5c9f4661
A
1059#if __ARM_KERNEL_PROTECT__
1060 /* If we are going to eret to userspace, we must unmap the kernel. */
1061 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1062
1063 /* Update TCR to unmap the kernel. */
1064 MOV64 x18, TCR_EL1_USER
1065 msr TCR_EL1, x18
1066
1067 /*
1068 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1069 * each other due to the microarchitecture.
1070 */
1071#if !defined(APPLE_ARM64_ARCH_FAMILY)
1072 isb sy
1073#endif
1074
1075 /* Switch to the user ASID (low bit clear) for the task. */
1076 mrs x18, TTBR0_EL1
1077 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1078 msr TTBR0_EL1, x18
d9a64523 1079 mov x18, #0
5c9f4661
A
1080
1081 /* We don't need an ISB here, as the eret is synchronizing. */
1082Lskip_ttbr1_switch:
1083#endif /* __ARM_KERNEL_PROTECT__ */
1084
ea3f0419 1085 ERET_CONTEXT_SYNCHRONIZING
5ba3f43e
A
1086
1087user_take_ast:
1088 PUSH_FRAME
1089 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1090 POP_FRAME
5ba3f43e
A
1091 b check_user_asts // Now try again
1092
5ba3f43e
A
1093 .text
1094 .align 2
1095preempt_underflow:
1096 mrs x0, TPIDR_EL1
1097 str x0, [sp, #-16]! // We'll print thread pointer
1098 adr x0, L_underflow_str // Format string
1099 CALL_EXTERN panic // Game over
1100
1101L_underflow_str:
1102 .asciz "Preemption count negative on thread %p"
1103.align 2
1104
cb323159 1105#if MACH_ASSERT
5ba3f43e
A
1106 .text
1107 .align 2
1108rwlock_count_notzero:
1109 mrs x0, TPIDR_EL1
1110 str x0, [sp, #-16]! // We'll print thread pointer
1111 ldr w0, [x0, TH_RWLOCK_CNT]
1112 str w0, [sp, #8]
f427ee49 1113 adr x0, L_rwlock_count_notzero_str // Format string
5ba3f43e
A
1114 CALL_EXTERN panic // Game over
1115
1116L_rwlock_count_notzero_str:
1117 .asciz "RW lock count not 0 on thread %p (%u)"
cb323159
A
1118
1119 .text
1120 .align 2
1121preempt_count_notzero:
1122 mrs x0, TPIDR_EL1
1123 str x0, [sp, #-16]! // We'll print thread pointer
1124 ldr w0, [x0, ACT_PREEMPT_CNT]
1125 str w0, [sp, #8]
f427ee49 1126 adr x0, L_preempt_count_notzero_str // Format string
cb323159
A
1127 CALL_EXTERN panic // Game over
1128
1129L_preempt_count_notzero_str:
1130 .asciz "preemption count not 0 on thread %p (%u)"
1131#endif /* MACH_ASSERT */
1132
f427ee49
A
1133 .text
1134 .align 2
1135tmp_alloc_count_nozero:
1136 mrs x0, TPIDR_EL1
1137 CALL_EXTERN kheap_temp_leak_panic
5ba3f43e 1138
5c9f4661
A
1139#if __ARM_KERNEL_PROTECT__
1140 /*
1141 * This symbol denotes the end of the exception vector/eret range; we page
1142 * align it so that we can avoid mapping other text in the EL0 exception
1143 * vector mapping.
1144 */
1145 .text
1146 .align 14
1147 .globl EXT(ExceptionVectorsEnd)
1148LEXT(ExceptionVectorsEnd)
1149#endif /* __ARM_KERNEL_PROTECT__ */
1150
c6bf4f31 1151#if XNU_MONITOR
c6bf4f31
A
1152
1153/*
1154 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1155 * mostly concerned with setting up state for the normal fleh code.
1156 */
1157fleh_synchronous_from_ppl:
1158 /* Save x0. */
1159 mov x15, x0
1160
1161 /* Grab the ESR. */
1162 mrs x1, ESR_EL1 // Get the exception syndrome
1163
1164 /* If the stack pointer is corrupt, it will manifest either as a data abort
1165 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1166 * these quickly by testing bit 5 of the exception class.
1167 */
1168 tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1169 mrs x0, SP_EL0 // Get SP_EL0
1170
1171 /* Perform high level checks for stack corruption. */
1172 and x1, x1, #ESR_EC_MASK // Mask the exception class
1173 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1174 cmp x1, x2 // If we have a stack alignment exception
1175 b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
1176 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1177 cmp x1, x2 // If we have a data abort, we need to
1178 b.ne Lvalid_ppl_stack // ...validate the stack pointer
1179
1180Ltest_pstack:
1181 /* Bounds check the PPL stack. */
1182 adrp x10, EXT(pmap_stacks_start)@page
1183 ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1184 adrp x11, EXT(pmap_stacks_end)@page
1185 ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1186 cmp x0, x10
1187 b.lo Lcorrupt_ppl_stack
1188 cmp x0, x11
1189 b.hi Lcorrupt_ppl_stack
1190
1191Lvalid_ppl_stack:
1192 /* Restore x0. */
1193 mov x0, x15
1194
1195 /* Switch back to the kernel stack. */
1196 msr SPSel, #0
1197 GET_PMAP_CPU_DATA x5, x6, x7
1198 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1199 mov sp, x6
1200
1201 /* Hand off to the synch handler. */
1202 b EXT(fleh_synchronous)
1203
1204Lcorrupt_ppl_stack:
1205 /* Restore x0. */
1206 mov x0, x15
1207
1208 /* Hand off to the invalid stack handler. */
1209 b fleh_invalid_stack
1210
1211fleh_fiq_from_ppl:
f427ee49 1212 SWITCH_TO_INT_STACK
c6bf4f31
A
1213 b EXT(fleh_fiq)
1214
1215fleh_irq_from_ppl:
f427ee49 1216 SWITCH_TO_INT_STACK
c6bf4f31
A
1217 b EXT(fleh_irq)
1218
1219fleh_serror_from_ppl:
1220 GET_PMAP_CPU_DATA x5, x6, x7
1221 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1222 mov sp, x6
1223 b EXT(fleh_serror)
1224
c6bf4f31 1225
c6bf4f31
A
1226
1227
1228 // x15: ppl call number
1229 // w10: ppl_state
1230 // x20: gxf_enter caller's DAIF
1231 .globl EXT(ppl_trampoline_start)
1232LEXT(ppl_trampoline_start)
1233
c6bf4f31 1234
c6bf4f31 1235#error "XPRR configuration error"
c6bf4f31
A
1236 cmp x14, x21
1237 b.ne Lppl_fail_dispatch
1238
1239 /* Verify the request ID. */
1240 cmp x15, PMAP_COUNT
1241 b.hs Lppl_fail_dispatch
1242
c6bf4f31
A
1243 GET_PMAP_CPU_DATA x12, x13, x14
1244
1245 /* Mark this CPU as being in the PPL. */
1246 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1247
1248 cmp w9, #PPL_STATE_KERNEL
1249 b.eq Lppl_mark_cpu_as_dispatching
1250
1251 /* Check to see if we are trying to trap from within the PPL. */
1252 cmp w9, #PPL_STATE_DISPATCH
1253 b.eq Lppl_fail_dispatch_ppl
1254
1255
1256 /* Ensure that we are returning from an exception. */
1257 cmp w9, #PPL_STATE_EXCEPTION
1258 b.ne Lppl_fail_dispatch
1259
1260 // where is w10 set?
1261 // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1262 cmp w10, #PPL_STATE_EXCEPTION
1263 b.ne Lppl_fail_dispatch
1264
1265 /* This is an exception return; set the CPU to the dispatching state. */
1266 mov w9, #PPL_STATE_DISPATCH
1267 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1268
1269 /* Find the save area, and return to the saved PPL context. */
1270 ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1271 mov sp, x0
c6bf4f31 1272 b EXT(return_to_ppl)
c6bf4f31
A
1273
1274Lppl_mark_cpu_as_dispatching:
1275 cmp w10, #PPL_STATE_KERNEL
1276 b.ne Lppl_fail_dispatch
1277
1278 /* Mark the CPU as dispatching. */
1279 mov w13, #PPL_STATE_DISPATCH
1280 str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1281
c6bf4f31
A
1282 /* Switch to the regular PPL stack. */
1283 // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1284 ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1285
1286 // SP0 is thread stack here
1287 mov x21, sp
1288 // SP0 is now PPL stack
1289 mov sp, x9
1290
c6bf4f31
A
1291 /* Save the old stack pointer off in case we need it. */
1292 str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1293
f427ee49
A
1294 /* Get the handler for the request */
1295 adrp x9, EXT(ppl_handler_table)@page
1296 add x9, x9, EXT(ppl_handler_table)@pageoff
1297 add x9, x9, x15, lsl #3
1298 ldr x10, [x9]
1299
c6bf4f31
A
1300 /* Branch to the code that will invoke the PPL request. */
1301 b EXT(ppl_dispatch)
1302
1303Lppl_fail_dispatch_ppl:
1304 /* Switch back to the kernel stack. */
1305 ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1306 mov sp, x10
1307
1308Lppl_fail_dispatch:
1309 /* Indicate that we failed. */
1310 mov x15, #PPL_EXIT_BAD_CALL
1311
1312 /* Move the DAIF bits into the expected register. */
1313 mov x10, x20
1314
1315 /* Return to kernel mode. */
1316 b ppl_return_to_kernel_mode
1317
1318Lppl_dispatch_exit:
1319 /* Indicate that we are cleanly exiting the PPL. */
1320 mov x15, #PPL_EXIT_DISPATCH
1321
1322 /* Switch back to the original (kernel thread) stack. */
1323 mov sp, x21
1324
1325 /* Move the saved DAIF bits. */
1326 mov x10, x20
1327
1328 /* Clear the old stack pointer. */
1329 str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1330
1331 /*
1332 * Mark the CPU as no longer being in the PPL. We spin if our state
1333 * machine is broken.
1334 */
1335 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1336 cmp w9, #PPL_STATE_DISPATCH
1337 b.ne .
1338 mov w9, #PPL_STATE_KERNEL
1339 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1340
1341 /* Return to the kernel. */
1342 b ppl_return_to_kernel_mode
1343
c6bf4f31
A
1344
1345
1346 .text
1347ppl_exit:
1348 /*
1349 * If we are dealing with an exception, hand off to the first level
1350 * exception handler.
1351 */
1352 cmp x15, #PPL_EXIT_EXCEPTION
1353 b.eq Ljump_to_fleh_handler
1354
1355 /* Restore the original AIF state. */
1356 REENABLE_DAIF x10
1357
1358 /* If this was a panic call from the PPL, reinvoke panic. */
1359 cmp x15, #PPL_EXIT_PANIC_CALL
1360 b.eq Ljump_to_panic_trap_to_debugger
1361
1362 /* Load the preemption count. */
1363 mrs x10, TPIDR_EL1
1364 ldr w12, [x10, ACT_PREEMPT_CNT]
1365
1366 /* Detect underflow */
1367 cbnz w12, Lno_preempt_underflow
1368 b preempt_underflow
1369Lno_preempt_underflow:
1370
1371 /* Lower the preemption count. */
1372 sub w12, w12, #1
1373 str w12, [x10, ACT_PREEMPT_CNT]
1374
1375 /* Skip ASTs if the peemption count is not zero. */
1376 cbnz x12, Lppl_skip_ast_taken
1377
1378 /* Skip the AST check if interrupts are disabled. */
1379 mrs x1, DAIF
1380 tst x1, #DAIF_IRQF
1381 b.ne Lppl_skip_ast_taken
1382
1383 /* Disable interrupts. */
1384 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
1385
1386 /* IF there is no urgent AST, skip the AST. */
1387 ldr x12, [x10, ACT_CPUDATAP]
1388 ldr x14, [x12, CPU_PENDING_AST]
1389 tst x14, AST_URGENT
1390 b.eq Lppl_defer_ast_taken
1391
1392 /* Stash our return value and return reason. */
1393 mov x20, x0
1394 mov x21, x15
1395
1396 /* Handle the AST. */
1397 bl EXT(ast_taken_kernel)
1398
1399 /* Restore the return value and the return reason. */
1400 mov x15, x21
1401 mov x0, x20
1402
1403Lppl_defer_ast_taken:
1404 /* Reenable interrupts. */
1405 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
1406
1407Lppl_skip_ast_taken:
1408 /* Pop the stack frame. */
1409 ldp x29, x30, [sp, #0x10]
1410 ldp x20, x21, [sp], #0x20
1411
1412 /* Check to see if this was a bad request. */
1413 cmp x15, #PPL_EXIT_BAD_CALL
1414 b.eq Lppl_bad_call
1415
1416 /* Return. */
1417 ARM64_STACK_EPILOG
1418
1419 .align 2
1420Ljump_to_fleh_handler:
1421 br x25
1422
1423 .align 2
1424Ljump_to_panic_trap_to_debugger:
1425 b EXT(panic_trap_to_debugger)
1426
1427Lppl_bad_call:
1428 /* Panic. */
1429 adrp x0, Lppl_bad_call_panic_str@page
1430 add x0, x0, Lppl_bad_call_panic_str@pageoff
1431 b EXT(panic)
1432
1433 .text
1434 .align 2
1435 .globl EXT(ppl_dispatch)
1436LEXT(ppl_dispatch)
1437 /*
1438 * Save a couple of important registers (implementation detail; x12 has
1439 * the PPL per-CPU data address; x13 is not actually interesting).
1440 */
1441 stp x12, x13, [sp, #-0x10]!
1442
1443 /* Restore the original AIF state. */
1444 REENABLE_DAIF x20
1445
1446 /*
1447 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1448 * but the exception vectors will deal with this properly.
1449 */
1450
1451 /* Invoke the PPL method. */
1452#ifdef HAS_APPLE_PAC
f427ee49 1453 blraa x10, x9
c6bf4f31
A
1454#else
1455 blr x10
1456#endif
1457
1458 /* Disable AIF. */
1459 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1460
1461 /* Restore those important registers. */
1462 ldp x12, x13, [sp], #0x10
1463
1464 /* Mark this as a regular return, and hand off to the return path. */
1465 b Lppl_dispatch_exit
1466
1467 .text
1468 .align 2
1469 .globl EXT(ppl_bootstrap_dispatch)
1470LEXT(ppl_bootstrap_dispatch)
1471 /* Verify the PPL request. */
1472 cmp x15, PMAP_COUNT
1473 b.hs Lppl_fail_bootstrap_dispatch
1474
1475 /* Get the requested PPL routine. */
1476 adrp x9, EXT(ppl_handler_table)@page
1477 add x9, x9, EXT(ppl_handler_table)@pageoff
f427ee49
A
1478 add x9, x9, x15, lsl #3
1479 ldr x10, [x9]
c6bf4f31
A
1480
1481 /* Invoke the requested PPL routine. */
1482#ifdef HAS_APPLE_PAC
f427ee49 1483 blraa x10, x9
c6bf4f31
A
1484#else
1485 blr x10
1486#endif
1487 /* Stash off the return value */
1488 mov x20, x0
1489 /* Drop the preemption count */
1490 bl EXT(_enable_preemption)
1491 mov x0, x20
1492
1493 /* Pop the stack frame. */
1494 ldp x29, x30, [sp, #0x10]
1495 ldp x20, x21, [sp], #0x20
1496#if __has_feature(ptrauth_returns)
1497 retab
1498#else
1499 ret
1500#endif
1501
1502Lppl_fail_bootstrap_dispatch:
1503 /* Pop our stack frame and panic. */
1504 ldp x29, x30, [sp, #0x10]
1505 ldp x20, x21, [sp], #0x20
1506#if __has_feature(ptrauth_returns)
1507 autibsp
1508#endif
1509 adrp x0, Lppl_bad_call_panic_str@page
1510 add x0, x0, Lppl_bad_call_panic_str@pageoff
1511 b EXT(panic)
1512
1513 .text
1514 .align 2
1515 .globl EXT(ml_panic_trap_to_debugger)
1516LEXT(ml_panic_trap_to_debugger)
c6bf4f31 1517 mrs x10, DAIF
c6bf4f31
A
1518 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1519
f427ee49
A
1520 adrp x12, EXT(pmap_ppl_locked_down)@page
1521 ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
1522 cbz w12, Lnot_in_ppl_dispatch
1523
1524 LOAD_PMAP_CPU_DATA x11, x12, x13
1525
1526 ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1527 cmp w12, #PPL_STATE_DISPATCH
1528 b.ne Lnot_in_ppl_dispatch
c6bf4f31
A
1529
1530 /* Indicate (for the PPL->kernel transition) that we are panicking. */
1531 mov x15, #PPL_EXIT_PANIC_CALL
1532
c6bf4f31
A
1533 /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1534 ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1535 mov sp, x12
1536
f427ee49 1537 mrs x10, DAIF
c6bf4f31
A
1538 mov w13, #PPL_STATE_PANIC
1539 str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1540
1541 /* Now we are ready to exit the PPL. */
1542 b ppl_return_to_kernel_mode
f427ee49
A
1543Lnot_in_ppl_dispatch:
1544 REENABLE_DAIF x10
1545 ret
c6bf4f31
A
1546
1547 .data
1548Lppl_bad_call_panic_str:
1549 .asciz "ppl_dispatch: failed due to bad arguments/state"
1550#else /* XNU_MONITOR */
5ba3f43e
A
1551 .text
1552 .align 2
1553 .globl EXT(ml_panic_trap_to_debugger)
1554LEXT(ml_panic_trap_to_debugger)
1555 ret
c6bf4f31 1556#endif /* XNU_MONITOR */
5ba3f43e
A
1557
1558/* ARM64_TODO Is globals_asm.h needed? */
1559//#include "globals_asm.h"
1560
1561/* vim: set ts=4: */