]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/locore.s
5694d2fa1008b3450c514509029b37f53812948d
[apple/xnu.git] / osfmk / arm64 / locore.s
1 /*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/machine_routines_asm.h>
31 #include <arm64/proc_reg.h>
32 #include <pexpert/arm64/board_config.h>
33 #include <mach/exception_types.h>
34 #include <mach_kdp.h>
35 #include <config_dtrace.h>
36 #include "assym.s"
37 #include <arm64/exception_asm.h>
38 #include <arm64/pac_asm.h>
39 #include "dwarf_unwind.h"
40
41 #if __ARM_KERNEL_PROTECT__
42 #include <arm/pmap.h>
43 #endif
44
45 #if XNU_MONITOR
46 /*
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
48 *
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
50 * into the PPL.
51 * x26 - 0 if the exception was taken while in the kernel, 1 if the
52 * exception was taken while in the PPL.
53 */
54 .macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55 cmp x26, xzr
56 b.eq 1f
57
58 /* Return to the PPL. */
59 mov x15, #0
60 mov w10, #PPL_STATE_EXCEPTION
61 #if __APRR_SUPPORTED__
62 b Ldisable_aif_and_enter_ppl
63 #else
64 #error "XPRR configuration error"
65 #endif /* __APRR_SUPPORTED__ */
66 1:
67 .endmacro
68
69 #if __APRR_SUPPORTED__
70 /*
71 * EL1_SP0_VECTOR_PPL_CHECK
72 *
73 * Check to see if the exception was taken by the kernel or the PPL. Falls
74 * through if kernel, hands off to the given label if PPL. Expects to run on
75 * SP1.
76 * arg0 - Label to go to if this was a PPL exception.
77 */
78 .macro EL1_SP0_VECTOR_PPL_CHECK
79 sub sp, sp, ARM_CONTEXT_SIZE
80 stp x0, x1, [sp, SS64_X0]
81 mrs x0, APRR_EL1
82 MOV64 x1, APRR_EL1_DEFAULT
83 cmp x0, x1
84 b.ne $0
85 ldp x0, x1, [sp, SS64_X0]
86 add sp, sp, ARM_CONTEXT_SIZE
87 .endmacro
88
89 #define STAY_ON_SP1 0
90 #define SWITCH_TO_SP0 1
91
92 #define INVOKE_PREFLIGHT 0
93 #define NO_INVOKE_PREFLIGHT 1
94
95 /*
96 * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
97 *
98 * Verify whether an exception came from the PPL or from the kernel. If it came
99 * from the PPL, save off the PPL state and transition out of the PPL.
100 * arg0 - Label to go to if this was a kernel exception
101 * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
102 * arg2 - Indicates if this should switch back to SP0
103 * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
104 */
105 .macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
106 /* Spill some more registers. */
107 stp x2, x3, [sp, SS64_X2]
108
109 /*
110 * Check if the PPL is locked down; if not, we can treat this as a
111 * kernel execption.
112 */
113 adrp x1, EXT(pmap_ppl_locked_down)@page
114 ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
115 cbz x1, 2f
116
117 /* Ensure that APRR_EL1 is actually in PPL mode. */
118 MOV64 x1, APRR_EL1_PPL
119 cmp x0, x1
120 b.ne .
121
122 /*
123 * Check if the CPU is in the PPL; if not we can treat this as a
124 * kernel exception.
125 */
126 GET_PMAP_CPU_DATA x3, x1, x2
127 ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE]
128 cmp x1, #PPL_STATE_KERNEL
129 b.eq 2f
130
131 /* Ensure that the CPU is in the expected PPL state. */
132 cmp x1, #PPL_STATE_DISPATCH
133 b.ne .
134
135 /* Mark the CPU as dealing with an exception. */
136 mov x1, #PPL_STATE_EXCEPTION
137 str w1, [x3, PMAP_CPU_DATA_PPL_STATE]
138
139 /* Load the bounds of the PPL trampoline. */
140 adrp x0, EXT(ppl_no_exception_start)@page
141 add x0, x0, EXT(ppl_no_exception_start)@pageoff
142 adrp x1, EXT(ppl_no_exception_end)@page
143 add x1, x1, EXT(ppl_no_exception_end)@pageoff
144
145 /*
146 * Ensure that the exception did not occur in the trampoline. If it
147 * did, we are either being attacked or our state machine is
148 * horrifically broken.
149 */
150 mrs x2, ELR_EL1
151 cmp x2, x0
152 b.lo 1f
153 cmp x2, x1
154 b.hi 1f
155
156 /* We might be under attack; spin. */
157 b .
158
159 1:
160 /* Get the PPL save area. */
161 mov x1, x3
162 ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
163
164 /* Save our x0, x1 state. */
165 ldp x2, x3, [sp, SS64_X0]
166 stp x2, x3, [x0, SS64_X0]
167
168 /* Restore SP1 to its original state. */
169 mov x3, sp
170 add sp, sp, ARM_CONTEXT_SIZE
171
172 .if $2 == SWITCH_TO_SP0
173 /* Switch back to SP0. */
174 msr SPSel, #0
175 mov x2, sp
176 .else
177 /* Load the SP0 value. */
178 mrs x2, SP_EL0
179 .endif
180
181 /* Save off the stack pointer. */
182 str x2, [x0, SS64_SP]
183
184 INIT_SAVED_STATE_FLAVORS x0, w1, w2
185
186 /* Save the context that was interrupted. */
187 ldp x2, x3, [x3, SS64_X2]
188 SPILL_REGISTERS PPL_MODE
189
190 /*
191 * Stash the function we wish to be invoked to deal with the exception;
192 * usually this is some preflight function for the fleh_* handler.
193 */
194 adrp x25, $1@page
195 add x25, x25, $1@pageoff
196
197 /*
198 * Indicate that this is a PPL exception, and that we should return to
199 * the PPL.
200 */
201 mov x26, #1
202
203 /* Transition back to kernel mode. */
204 mov x15, #PPL_EXIT_EXCEPTION
205 b ppl_return_to_kernel_mode
206 2:
207 /* Restore SP1 state. */
208 ldp x2, x3, [sp, SS64_X2]
209 ldp x0, x1, [sp, SS64_X0]
210 add sp, sp, ARM_CONTEXT_SIZE
211
212 /* Go to the specified label (usually the original exception vector). */
213 b $0
214 .endmacro
215 #endif /* __APRR_SUPPORTED__ */
216
217 #endif /* XNU_MONITOR */
218
219 #define CBF_DISABLE 0
220 #define CBF_ENABLE 1
221
222 .macro COMPARE_BRANCH_FUSION
223 #if defined(APPLE_ARM64_ARCH_FAMILY)
224 mrs $1, ARM64_REG_HID1
225 .if $0 == CBF_DISABLE
226 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
227 .else
228 mov $2, ARM64_REG_HID1_disCmpBrFusion
229 bic $1, $1, $2
230 .endif
231 msr ARM64_REG_HID1, $1
232 .if $0 == CBF_DISABLE
233 isb sy
234 .endif
235 #endif
236 .endmacro
237
238 /*
239 * MAP_KERNEL
240 *
241 * Restores the kernel EL1 mappings, if necessary.
242 *
243 * This may mutate x18.
244 */
245 .macro MAP_KERNEL
246 #if __ARM_KERNEL_PROTECT__
247 /* Switch to the kernel ASID (low bit set) for the task. */
248 mrs x18, TTBR0_EL1
249 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
250 msr TTBR0_EL1, x18
251
252 /*
253 * We eschew some barriers on Apple CPUs, as relative ordering of writes
254 * to the TTBRs and writes to the TCR should be ensured by the
255 * microarchitecture.
256 */
257 #if !defined(APPLE_ARM64_ARCH_FAMILY)
258 isb sy
259 #endif
260
261 /*
262 * Update the TCR to map the kernel now that we are using the kernel
263 * ASID.
264 */
265 MOV64 x18, TCR_EL1_BOOT
266 msr TCR_EL1, x18
267 isb sy
268 #endif /* __ARM_KERNEL_PROTECT__ */
269 .endmacro
270
271 /*
272 * BRANCH_TO_KVA_VECTOR
273 *
274 * Branches to the requested long exception vector in the kernelcache.
275 * arg0 - The label to branch to
276 * arg1 - The index of the label in exc_vectors_tables
277 *
278 * This may mutate x18.
279 */
280 .macro BRANCH_TO_KVA_VECTOR
281 #if __ARM_KERNEL_PROTECT__
282 /*
283 * Find the kernelcache table for the exception vectors by accessing
284 * the per-CPU data.
285 */
286 mrs x18, TPIDR_EL1
287 ldr x18, [x18, ACT_CPUDATAP]
288 ldr x18, [x18, CPU_EXC_VECTORS]
289
290 /*
291 * Get the handler for this exception and jump to it.
292 */
293 ldr x18, [x18, #($1 << 3)]
294 br x18
295 #else
296 b $0
297 #endif /* __ARM_KERNEL_PROTECT__ */
298 .endmacro
299
300 /*
301 * CHECK_KERNEL_STACK
302 *
303 * Verifies that the kernel stack is aligned and mapped within an expected
304 * stack address range. Note: happens before saving registers (in case we can't
305 * save to kernel stack).
306 *
307 * Expects:
308 * {x0, x1} - saved
309 * x1 - Exception syndrome
310 * sp - Saved state
311 *
312 * Seems like we need an unused argument to the macro for the \@ syntax to work
313 *
314 */
315 .macro CHECK_KERNEL_STACK unused
316 stp x2, x3, [sp, #-16]! // Save {x2-x3}
317 and x1, x1, #ESR_EC_MASK // Mask the exception class
318 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
319 cmp x1, x2 // If we have a stack alignment exception
320 b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted
321 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
322 cmp x1, x2 // If we have a data abort, we need to
323 b.ne Lvalid_stack_\@ // ...validate the stack pointer
324 mrs x0, SP_EL0 // Get SP_EL0
325 mrs x1, TPIDR_EL1 // Get thread pointer
326 Ltest_kstack_\@:
327 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
328 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
329 cmp x0, x2 // if (SP_EL0 >= kstack top)
330 b.ge Ltest_istack_\@ // jump to istack test
331 cmp x0, x3 // if (SP_EL0 > kstack bottom)
332 b.gt Lvalid_stack_\@ // stack pointer valid
333 Ltest_istack_\@:
334 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
335 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
336 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
337 cmp x0, x2 // if (SP_EL0 >= istack top)
338 b.ge Lcorrupt_stack_\@ // corrupt stack pointer
339 cmp x0, x3 // if (SP_EL0 > istack bottom)
340 b.gt Lvalid_stack_\@ // stack pointer valid
341 Lcorrupt_stack_\@:
342 ldp x2, x3, [sp], #16
343 ldp x0, x1, [sp], #16
344 sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame
345 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame
346 stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame
347 mrs x0, SP_EL0 // Get SP_EL0
348 str x0, [sp, SS64_SP] // Save sp to the exception frame
349 INIT_SAVED_STATE_FLAVORS sp, w0, w1
350 mov x0, sp // Copy exception frame pointer to x0
351 adrp x1, fleh_invalid_stack@page // Load address for fleh
352 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
353 b fleh_dispatch64
354 Lvalid_stack_\@:
355 ldp x2, x3, [sp], #16 // Restore {x2-x3}
356 .endmacro
357
358
359 #if __ARM_KERNEL_PROTECT__
360 .section __DATA_CONST,__const
361 .align 3
362 .globl EXT(exc_vectors_table)
363 LEXT(exc_vectors_table)
364 /* Table of exception handlers.
365 * These handlers sometimes contain deadloops.
366 * It's nice to have symbols for them when debugging. */
367 .quad el1_sp0_synchronous_vector_long
368 .quad el1_sp0_irq_vector_long
369 .quad el1_sp0_fiq_vector_long
370 .quad el1_sp0_serror_vector_long
371 .quad el1_sp1_synchronous_vector_long
372 .quad el1_sp1_irq_vector_long
373 .quad el1_sp1_fiq_vector_long
374 .quad el1_sp1_serror_vector_long
375 .quad el0_synchronous_vector_64_long
376 .quad el0_irq_vector_64_long
377 .quad el0_fiq_vector_64_long
378 .quad el0_serror_vector_64_long
379 #endif /* __ARM_KERNEL_PROTECT__ */
380
381 .text
382 #if __ARM_KERNEL_PROTECT__
383 /*
384 * We need this to be on a page boundary so that we may avoiding mapping
385 * other text along with it. As this must be on the VM page boundary
386 * (due to how the coredumping code currently works), this will be a
387 * 16KB page boundary.
388 */
389 .align 14
390 #else
391 .align 12
392 #endif /* __ARM_KERNEL_PROTECT__ */
393 .globl EXT(ExceptionVectorsBase)
394 LEXT(ExceptionVectorsBase)
395 Lel1_sp0_synchronous_vector:
396 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
397
398 .text
399 .align 7
400 Lel1_sp0_irq_vector:
401 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
402
403 .text
404 .align 7
405 Lel1_sp0_fiq_vector:
406 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
407
408 .text
409 .align 7
410 Lel1_sp0_serror_vector:
411 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
412
413 .text
414 .align 7
415 Lel1_sp1_synchronous_vector:
416 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
417
418 .text
419 .align 7
420 Lel1_sp1_irq_vector:
421 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
422
423 .text
424 .align 7
425 Lel1_sp1_fiq_vector:
426 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
427
428 .text
429 .align 7
430 Lel1_sp1_serror_vector:
431 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
432
433 .text
434 .align 7
435 Lel0_synchronous_vector_64:
436 MAP_KERNEL
437 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
438
439 .text
440 .align 7
441 Lel0_irq_vector_64:
442 MAP_KERNEL
443 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
444
445 .text
446 .align 7
447 Lel0_fiq_vector_64:
448 MAP_KERNEL
449 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
450
451 .text
452 .align 7
453 Lel0_serror_vector_64:
454 MAP_KERNEL
455 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
456
457 /* Fill out the rest of the page */
458 .align 12
459
460 /*********************************
461 * END OF EXCEPTION VECTORS PAGE *
462 *********************************/
463
464
465
466 .macro EL1_SP0_VECTOR
467 msr SPSel, #0 // Switch to SP0
468 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
469 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
470 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
471 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
472 INIT_SAVED_STATE_FLAVORS sp, w0, w1
473 mov x0, sp // Copy saved state pointer to x0
474 .endmacro
475
476 el1_sp0_synchronous_vector_long:
477 #if XNU_MONITOR && __APRR_SUPPORTED__
478 /*
479 * We do not have enough space for new instructions in this vector, so
480 * jump to outside code to check if this exception was taken in the PPL.
481 */
482 b el1_sp0_synchronous_vector_ppl_check
483 Lel1_sp0_synchronous_vector_kernel:
484 #endif
485 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
486 mrs x1, ESR_EL1 // Get the exception syndrome
487 /* If the stack pointer is corrupt, it will manifest either as a data abort
488 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
489 * these quickly by testing bit 5 of the exception class.
490 */
491 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
492 CHECK_KERNEL_STACK
493 Lkernel_stack_valid:
494 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
495 EL1_SP0_VECTOR
496 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
497 add x1, x1, EXT(fleh_synchronous)@pageoff
498 b fleh_dispatch64
499
500 el1_sp0_irq_vector_long:
501 #if XNU_MONITOR && __APRR_SUPPORTED__
502 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
503 Lel1_sp0_irq_vector_kernel:
504 #endif
505 EL1_SP0_VECTOR
506 SWITCH_TO_INT_STACK
507 adrp x1, EXT(fleh_irq)@page // Load address for fleh
508 add x1, x1, EXT(fleh_irq)@pageoff
509 b fleh_dispatch64
510
511 el1_sp0_fiq_vector_long:
512 // ARM64_TODO write optimized decrementer
513 #if XNU_MONITOR && __APRR_SUPPORTED__
514 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
515 Lel1_sp0_fiq_vector_kernel:
516 #endif
517 EL1_SP0_VECTOR
518 SWITCH_TO_INT_STACK
519 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
520 add x1, x1, EXT(fleh_fiq)@pageoff
521 b fleh_dispatch64
522
523 el1_sp0_serror_vector_long:
524 #if XNU_MONITOR && __APRR_SUPPORTED__
525 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
526 Lel1_sp0_serror_vector_kernel:
527 #endif
528 EL1_SP0_VECTOR
529 adrp x1, EXT(fleh_serror)@page // Load address for fleh
530 add x1, x1, EXT(fleh_serror)@pageoff
531 b fleh_dispatch64
532
533 .macro EL1_SP1_VECTOR
534 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
535 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
536 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
537 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
538 INIT_SAVED_STATE_FLAVORS sp, w0, w1
539 mov x0, sp // Copy saved state pointer to x0
540 .endmacro
541
542 el1_sp1_synchronous_vector_long:
543 b check_exception_stack
544 Lel1_sp1_synchronous_valid_stack:
545 #if defined(KERNEL_INTEGRITY_KTRR)
546 b check_ktrr_sctlr_trap
547 Lel1_sp1_synchronous_vector_continue:
548 #endif
549 EL1_SP1_VECTOR
550 adrp x1, fleh_synchronous_sp1@page
551 add x1, x1, fleh_synchronous_sp1@pageoff
552 b fleh_dispatch64
553
554 el1_sp1_irq_vector_long:
555 EL1_SP1_VECTOR
556 adrp x1, fleh_irq_sp1@page
557 add x1, x1, fleh_irq_sp1@pageoff
558 b fleh_dispatch64
559
560 el1_sp1_fiq_vector_long:
561 EL1_SP1_VECTOR
562 adrp x1, fleh_fiq_sp1@page
563 add x1, x1, fleh_fiq_sp1@pageoff
564 b fleh_dispatch64
565
566 el1_sp1_serror_vector_long:
567 EL1_SP1_VECTOR
568 adrp x1, fleh_serror_sp1@page
569 add x1, x1, fleh_serror_sp1@pageoff
570 b fleh_dispatch64
571
572 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
573 /**
574 * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
575 * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
576 */
577 #define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
578 #define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
579 #endif
580
581 .macro EL0_64_VECTOR
582 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
583 #if __ARM_KERNEL_PROTECT__
584 mov x18, #0 // Zero x18 to avoid leaking data to user SS
585 #endif
586 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
587 // enable JOP for kernel
588 mrs x0, SCTLR_EL1
589 tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
590 // if (!jop_running) {
591 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
592 orr x0, x0, x1
593 msr SCTLR_EL1, x0
594 isb sy
595 MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
596 cmp x0, x1
597 bne .
598 // }
599 1:
600 #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
601 mrs x0, TPIDR_EL1 // Load the thread register
602 mrs x1, SP_EL0 // Load the user stack pointer
603 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
604 ldr x0, [x0] // Load the user context pointer
605 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
606 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
607 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
608 msr SPSel, #0 // Switch to SP0
609 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
610 mrs x1, TPIDR_EL1 // Load the thread register
611
612
613 mov x0, sp // Copy the user PCB pointer to x0
614 // x1 contains thread register
615 .endmacro
616
617
618 el0_synchronous_vector_64_long:
619 EL0_64_VECTOR sync
620 SWITCH_TO_KERN_STACK
621 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
622 add x1, x1, EXT(fleh_synchronous)@pageoff
623 b fleh_dispatch64
624
625 el0_irq_vector_64_long:
626 EL0_64_VECTOR irq
627 SWITCH_TO_INT_STACK
628 adrp x1, EXT(fleh_irq)@page // load address for fleh
629 add x1, x1, EXT(fleh_irq)@pageoff
630 b fleh_dispatch64
631
632 el0_fiq_vector_64_long:
633 EL0_64_VECTOR fiq
634 SWITCH_TO_INT_STACK
635 adrp x1, EXT(fleh_fiq)@page // load address for fleh
636 add x1, x1, EXT(fleh_fiq)@pageoff
637 b fleh_dispatch64
638
639 el0_serror_vector_64_long:
640 EL0_64_VECTOR serror
641 SWITCH_TO_KERN_STACK
642 adrp x1, EXT(fleh_serror)@page // load address for fleh
643 add x1, x1, EXT(fleh_serror)@pageoff
644 b fleh_dispatch64
645
646 #if XNU_MONITOR && __APRR_SUPPORTED__
647 el1_sp0_synchronous_vector_ppl_check:
648 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
649
650 /* Jump back to the primary exception vector if we fell through. */
651 b Lel1_sp0_synchronous_vector_kernel
652 #endif
653
654 /*
655 * check_exception_stack
656 *
657 * Verifies that stack pointer at SP1 is within exception stack
658 * If not, will simply hang as we have no more stack to fall back on.
659 */
660
661 .text
662 .align 2
663 check_exception_stack:
664 mrs x18, TPIDR_EL1 // Get thread pointer
665 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
666 ldr x18, [x18, ACT_CPUDATAP]
667 cbz x18, . // If thread context is set, cpu data should be too
668 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
669 cmp sp, x18
670 b.gt . // Hang if above exception stack top
671 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
672 cmp sp, x18
673 b.lt . // Hang if below exception stack bottom
674 Lvalid_exception_stack:
675 mov x18, #0
676 b Lel1_sp1_synchronous_valid_stack
677
678
679 #if defined(KERNEL_INTEGRITY_KTRR)
680 .text
681 .align 2
682 check_ktrr_sctlr_trap:
683 /* We may abort on an instruction fetch on reset when enabling the MMU by
684 * writing SCTLR_EL1 because the page containing the privileged instruction is
685 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
686 * would otherwise panic unconditionally. Check for the condition and return
687 * safe execution to the caller on behalf of the faulting function.
688 *
689 * Expected register state:
690 * x22 - Kernel virtual base
691 * x23 - Kernel physical base
692 */
693 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
694 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
695 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
696 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
697 movz w1, #0x8600, lsl #16
698 movk w1, #0x0000
699 cmp x0, x1
700 mrs x0, ELR_EL1 // Check for expected abort address
701 adrp x1, _pinst_set_sctlr_trap_addr@page
702 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
703 sub x1, x1, x22 // Convert to physical address
704 add x1, x1, x23
705 ccmp x0, x1, #0, eq
706 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
707 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
708 b.ne Lel1_sp1_synchronous_vector_continue
709 msr ELR_EL1, lr // Return to caller
710 ERET_CONTEXT_SYNCHRONIZING
711 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
712
713 /* 64-bit first level exception handler dispatcher.
714 * Completes register context saving and branches to FLEH.
715 * Expects:
716 * {x0, x1, sp} - saved
717 * x0 - arm_context_t
718 * x1 - address of FLEH
719 * fp - previous stack frame if EL1
720 * lr - unused
721 * sp - kernel stack
722 */
723 .text
724 .align 2
725 fleh_dispatch64:
726 /* Save arm_saved_state64 */
727 SPILL_REGISTERS KERNEL_MODE
728
729 /* If exception is from userspace, zero unused registers */
730 and x23, x23, #(PSR64_MODE_EL_MASK)
731 cmp x23, #(PSR64_MODE_EL0)
732 bne 1f
733
734 SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
735 2:
736 mov x2, #0
737 mov x3, #0
738 mov x4, #0
739 mov x5, #0
740 mov x6, #0
741 mov x7, #0
742 mov x8, #0
743 mov x9, #0
744 mov x10, #0
745 mov x11, #0
746 mov x12, #0
747 mov x13, #0
748 mov x14, #0
749 mov x15, #0
750 mov x16, #0
751 mov x17, #0
752 mov x18, #0
753 mov x19, #0
754 mov x20, #0
755 /* x21, x22 cleared in common case below */
756 mov x23, #0
757 mov x24, #0
758 mov x25, #0
759 #if !XNU_MONITOR
760 mov x26, #0
761 #endif
762 mov x27, #0
763 mov x28, #0
764 mov fp, #0
765 mov lr, #0
766 1:
767
768 mov x21, x0 // Copy arm_context_t pointer to x21
769 mov x22, x1 // Copy handler routine to x22
770
771 #if XNU_MONITOR
772 /* Zero x26 to indicate that this should not return to the PPL. */
773 mov x26, #0
774 #endif
775
776 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
777 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
778 b.ne 1f // kernel mode, so skip precise time update
779 PUSH_FRAME
780 bl EXT(timer_state_event_user_to_kernel)
781 POP_FRAME
782 mov x0, x21 // Reload arm_context_t pointer
783 1:
784 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
785
786 /* Dispatch to FLEH */
787
788 br x22
789
790
791 .text
792 .align 2
793 .global EXT(fleh_synchronous)
794 LEXT(fleh_synchronous)
795
796 UNWIND_PROLOGUE
797 UNWIND_DIRECTIVES
798
799 mrs x1, ESR_EL1 // Load exception syndrome
800 mrs x2, FAR_EL1 // Load fault address
801
802 /* At this point, the LR contains the value of ELR_EL1. In the case of an
803 * instruction prefetch abort, this will be the faulting pc, which we know
804 * to be invalid. This will prevent us from backtracing through the
805 * exception if we put it in our stack frame, so we load the LR from the
806 * exception saved state instead.
807 */
808 and w3, w1, #(ESR_EC_MASK)
809 lsr w3, w3, #(ESR_EC_SHIFT)
810 mov w4, #(ESR_EC_IABORT_EL1)
811 cmp w3, w4
812 b.eq Lfleh_sync_load_lr
813 Lvalid_link_register:
814
815 PUSH_FRAME
816 bl EXT(sleh_synchronous)
817 POP_FRAME
818
819 #if XNU_MONITOR
820 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
821 #endif
822
823 mov x28, xzr // Don't need to check PFZ if there are ASTs
824 b exception_return_dispatch
825
826 Lfleh_sync_load_lr:
827 ldr lr, [x0, SS64_LR]
828 b Lvalid_link_register
829 UNWIND_EPILOGUE
830
831 /* Shared prologue code for fleh_irq and fleh_fiq.
832 * Does any interrupt booking we may want to do
833 * before invoking the handler proper.
834 * Expects:
835 * x0 - arm_context_t
836 * x23 - CPSR
837 * fp - Undefined live value (we may push a frame)
838 * lr - Undefined live value (we may push a frame)
839 * sp - Interrupt stack for the current CPU
840 */
841 .macro BEGIN_INTERRUPT_HANDLER
842 mrs x22, TPIDR_EL1
843 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
844 /* Update IRQ count */
845 ldr w1, [x23, CPU_STAT_IRQ]
846 add w1, w1, #1 // Increment count
847 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
848 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
849 add w1, w1, #1 // Increment count
850 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
851 /* Increment preempt count */
852 ldr w1, [x22, ACT_PREEMPT_CNT]
853 add w1, w1, #1
854 str w1, [x22, ACT_PREEMPT_CNT]
855 /* Store context in int state */
856 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
857 .endmacro
858
859 /* Shared epilogue code for fleh_irq and fleh_fiq.
860 * Cleans up after the prologue, and may do a bit more
861 * bookkeeping (kdebug related).
862 * Expects:
863 * x22 - Live TPIDR_EL1 value (thread address)
864 * x23 - Address of the current CPU data structure
865 * w24 - 0 if kdebug is disbled, nonzero otherwise
866 * fp - Undefined live value (we may push a frame)
867 * lr - Undefined live value (we may push a frame)
868 * sp - Interrupt stack for the current CPU
869 */
870 .macro END_INTERRUPT_HANDLER
871 /* Clear int context */
872 str xzr, [x23, CPU_INT_STATE]
873 /* Decrement preempt count */
874 ldr w0, [x22, ACT_PREEMPT_CNT]
875 cbnz w0, 1f // Detect underflow
876 b preempt_underflow
877 1:
878 sub w0, w0, #1
879 str w0, [x22, ACT_PREEMPT_CNT]
880 /* Switch back to kernel stack */
881 ldr x0, [x22, TH_KSTACKPTR]
882 mov sp, x0
883 .endmacro
884
885 .text
886 .align 2
887 .global EXT(fleh_irq)
888 LEXT(fleh_irq)
889 BEGIN_INTERRUPT_HANDLER
890 PUSH_FRAME
891 bl EXT(sleh_irq)
892 POP_FRAME
893 END_INTERRUPT_HANDLER
894
895 #if XNU_MONITOR
896 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
897 #endif
898
899 mov x28, #1 // Set a bit to check PFZ if there are ASTs
900 b exception_return_dispatch
901
902 .text
903 .align 2
904 .global EXT(fleh_fiq_generic)
905 LEXT(fleh_fiq_generic)
906 PANIC_UNIMPLEMENTED
907
908 .text
909 .align 2
910 .global EXT(fleh_fiq)
911 LEXT(fleh_fiq)
912 BEGIN_INTERRUPT_HANDLER
913 PUSH_FRAME
914 bl EXT(sleh_fiq)
915 POP_FRAME
916 END_INTERRUPT_HANDLER
917
918 #if XNU_MONITOR
919 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
920 #endif
921
922 mov x28, #1 // Set a bit to check PFZ if there are ASTs
923 b exception_return_dispatch
924
925 .text
926 .align 2
927 .global EXT(fleh_serror)
928 LEXT(fleh_serror)
929 mrs x1, ESR_EL1 // Load exception syndrome
930 mrs x2, FAR_EL1 // Load fault address
931
932 PUSH_FRAME
933 bl EXT(sleh_serror)
934 POP_FRAME
935
936 #if XNU_MONITOR
937 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
938 #endif
939
940 mov x28, xzr // Don't need to check PFZ If there are ASTs
941 b exception_return_dispatch
942
943 /*
944 * Register state saved before we get here.
945 */
946 .text
947 .align 2
948 fleh_invalid_stack:
949 mrs x1, ESR_EL1 // Load exception syndrome
950 str x1, [x0, SS64_ESR]
951 mrs x2, FAR_EL1 // Load fault address
952 str x2, [x0, SS64_FAR]
953 PUSH_FRAME
954 bl EXT(sleh_invalid_stack) // Shouldn't return!
955 b .
956
957 .text
958 .align 2
959 fleh_synchronous_sp1:
960 mrs x1, ESR_EL1 // Load exception syndrome
961 str x1, [x0, SS64_ESR]
962 mrs x2, FAR_EL1 // Load fault address
963 str x2, [x0, SS64_FAR]
964 PUSH_FRAME
965 bl EXT(sleh_synchronous_sp1)
966 b .
967
968 .text
969 .align 2
970 fleh_irq_sp1:
971 mov x1, x0
972 adr x0, Lsp1_irq_str
973 b EXT(panic_with_thread_kernel_state)
974 Lsp1_irq_str:
975 .asciz "IRQ exception taken while SP1 selected"
976
977 .text
978 .align 2
979 fleh_fiq_sp1:
980 mov x1, x0
981 adr x0, Lsp1_fiq_str
982 b EXT(panic_with_thread_kernel_state)
983 Lsp1_fiq_str:
984 .asciz "FIQ exception taken while SP1 selected"
985
986 .text
987 .align 2
988 fleh_serror_sp1:
989 mov x1, x0
990 adr x0, Lsp1_serror_str
991 b EXT(panic_with_thread_kernel_state)
992 Lsp1_serror_str:
993 .asciz "Asynchronous exception taken while SP1 selected"
994
995 .text
996 .align 2
997 exception_return_dispatch:
998 ldr w0, [x21, SS64_CPSR]
999 tst w0, PSR64_MODE_EL_MASK
1000 b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0
1001 b return_to_user
1002
1003 .text
1004 .align 2
1005 .global EXT(return_to_kernel)
1006 LEXT(return_to_kernel)
1007 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
1008 mrs x3, TPIDR_EL1 // Load thread pointer
1009 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
1010 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1011 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
1012 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1013 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
1014 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
1015 b.eq exception_return_unint_tpidr_x3
1016 mov sp, x21 // Switch to thread stack for preemption
1017 PUSH_FRAME
1018 bl EXT(ast_taken_kernel) // Handle AST_URGENT
1019 POP_FRAME
1020 b exception_return
1021
1022 .text
1023 .globl EXT(thread_bootstrap_return)
1024 LEXT(thread_bootstrap_return)
1025 #if CONFIG_DTRACE
1026 bl EXT(dtrace_thread_bootstrap)
1027 #endif
1028 b EXT(arm64_thread_exception_return)
1029
1030 .text
1031 .globl EXT(arm64_thread_exception_return)
1032 LEXT(arm64_thread_exception_return)
1033 mrs x0, TPIDR_EL1
1034 add x21, x0, ACT_CONTEXT
1035 ldr x21, [x21]
1036 mov x28, xzr
1037
1038 //
1039 // Fall Through to return_to_user from arm64_thread_exception_return.
1040 // Note that if we move return_to_user or insert a new routine
1041 // below arm64_thread_exception_return, the latter will need to change.
1042 //
1043 .text
1044 /* x21 is always the machine context pointer when we get here
1045 * x28 is a bit indicating whether or not we should check if pc is in pfz */
1046 return_to_user:
1047 check_user_asts:
1048 mrs x3, TPIDR_EL1 // Load thread pointer
1049
1050 movn w2, #0
1051 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1052
1053 #if MACH_ASSERT
1054 ldr w0, [x3, TH_RWLOCK_CNT]
1055 cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock
1056
1057 ldr w0, [x3, ACT_PREEMPT_CNT]
1058 cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption
1059 #endif
1060 ldr w0, [x3, TH_TMP_ALLOC_CNT]
1061 cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks
1062
1063 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1064 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1065 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
1066 cbz x0, no_asts // If no asts, skip ahead
1067
1068 cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts
1069
1070 /* At this point, we have ASTs and we need to check whether we are running in the
1071 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
1072 * the PFZ since we don't want to handle getting a signal or getting suspended
1073 * while holding a spinlock in userspace.
1074 *
1075 * If userspace was in the PFZ, we know (via coordination with the PFZ code
1076 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
1077 * to use it to indicate to userspace to come back to take a delayed
1078 * preemption, at which point the ASTs will be handled. */
1079 mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again
1080 mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64
1081
1082 ldr x0, [x21, SS64_PC] // Load pc from machine state
1083 bl EXT(commpage_is_in_pfz64) // pc in pfz?
1084 cbz x0, restore_and_check_ast // No, deal with other asts
1085
1086 mov x0, #1
1087 str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption
1088 mov x0, x19 // restore x0 to asts
1089 b no_asts // pretend we have no asts
1090
1091 restore_and_check_ast:
1092 mov x0, x19 // restore x0
1093 b user_take_ast // Service pending asts
1094 no_asts:
1095
1096
1097 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
1098 mov x19, x3 // Preserve thread pointer across function call
1099 PUSH_FRAME
1100 bl EXT(timer_state_event_kernel_to_user)
1101 POP_FRAME
1102 mov x3, x19
1103 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
1104
1105 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1106 /* Watchtower
1107 *
1108 * Here we attempt to enable NEON access for EL0. If the last entry into the
1109 * kernel from user-space was due to an IRQ, the monitor will have disabled
1110 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1111 * check in with the monitor in order to reenable NEON for EL0 in exchange
1112 * for routing IRQs through the monitor (2). This way the monitor will
1113 * always 'own' either IRQs or EL0 NEON.
1114 *
1115 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1116 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1117 * here.
1118 *
1119 * EL0 user ________ IRQ ______
1120 * EL1 xnu \ ______________________ CPACR_EL1 __/
1121 * EL3 monitor \_/ \___/
1122 *
1123 * (1) (2)
1124 */
1125
1126 mov x0, #(CPACR_FPEN_ENABLE)
1127 msr CPACR_EL1, x0
1128 #endif
1129
1130 /* Establish this thread's debug state as the live state on the selected CPU. */
1131 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1132 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
1133 ldr x0, [x3, ACT_DEBUGDATA]
1134 cmp x0, x1
1135 beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state
1136
1137 #if defined(APPLELIGHTNING)
1138 /* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
1139
1140 ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
1141 cbz x12, 1f
1142
1143 #endif
1144
1145 #if defined(APPLELIGHTNING) || defined(APPLEFIRESTORM)
1146
1147 mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn
1148 orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
1149 msr ARM64_REG_HID1, x12
1150 1:
1151
1152 #endif
1153
1154 PUSH_FRAME
1155 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1156 POP_FRAME
1157 mrs x3, TPIDR_EL1 // Reload thread pointer
1158 L_skip_user_set_debug_state:
1159
1160
1161 b exception_return_unint_tpidr_x3
1162
1163 //
1164 // Fall through from return_to_user to exception_return.
1165 // Note that if we move exception_return or add a new routine below
1166 // return_to_user, the latter will have to change.
1167 //
1168
1169 exception_return:
1170 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1171 exception_return_unint:
1172 mrs x3, TPIDR_EL1 // Load thread pointer
1173 exception_return_unint_tpidr_x3:
1174 mov sp, x21 // Reload the pcb pointer
1175
1176 exception_return_unint_tpidr_x3_dont_trash_x18:
1177
1178
1179 #if __ARM_KERNEL_PROTECT__
1180 /*
1181 * If we are going to eret to userspace, we must return through the EL0
1182 * eret mapping.
1183 */
1184 ldr w1, [sp, SS64_CPSR] // Load CPSR
1185 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
1186
1187 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
1188 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
1189 adrp x1, Lexception_return_restore_registers@page // Load target PC
1190 add x1, x1, Lexception_return_restore_registers@pageoff
1191 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
1192 sub x1, x1, x0 // Calculate delta
1193 add x0, x2, x1 // Convert KVA to EL0 vector address
1194 br x0
1195
1196 Lskip_el0_eret_mapping:
1197 #endif /* __ARM_KERNEL_PROTECT__ */
1198
1199 Lexception_return_restore_registers:
1200 mov x0, sp // x0 = &pcb
1201 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1202 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1
1203
1204 /* Restore special register state */
1205 ldr w3, [sp, NS64_FPSR]
1206 ldr w4, [sp, NS64_FPCR]
1207
1208 msr ELR_EL1, x1 // Load the return address into ELR
1209 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
1210 msr FPSR, x3
1211 mrs x5, FPCR
1212 CMSR FPCR, x5, x4, 1
1213 1:
1214
1215 #if defined(HAS_APPLE_PAC)
1216 // if (eret to userspace) {
1217 and x2, x2, #(PSR64_MODE_EL_MASK)
1218 cmp x2, #(PSR64_MODE_EL0)
1219 bne Ldone_reconfigure_jop
1220 // thread_t thread = current_thread();
1221 // bool disable_jop;
1222 // if (arm_user_jop_disabled()) {
1223 // /* if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) */
1224 // disable_jop = true;
1225 // } else {
1226 // disable_jop = thread->machine.disable_user_jop;
1227 // }
1228 mrs x2, TPIDR_EL1
1229 ldrb w1, [x2, TH_DISABLE_USER_JOP]
1230 cbz w1, Lenable_jop
1231 // if (disable_jop) {
1232 // if (cpu does not have discrete JOP-at-EL1 bit) {
1233 // disable_sctlr_jop_keys();
1234 // }
1235 // } else {
1236 // if (cpu does not have fast A-key switching) {
1237 // reprogram_jop_keys(thread->machine.jop_pid);
1238 // }
1239 // }
1240 // }
1241 Ldisable_jop:
1242 #if !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
1243 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
1244 mrs x4, SCTLR_EL1
1245 bic x4, x4, x1
1246 msr SCTLR_EL1, x4
1247 MOV64 x1, SCTLR_EL1_EXPECTED
1248 cmp x4, x1
1249 bne .
1250 #endif /* !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
1251 b Ldone_reconfigure_jop
1252 Lenable_jop:
1253 #if HAS_PAC_SLOW_A_KEY_SWITCHING
1254 IF_PAC_FAST_A_KEY_SWITCHING Ldone_reconfigure_jop, x1
1255 ldr x1, [x2, TH_JOP_PID]
1256 ldr x2, [x2, ACT_CPUDATAP]
1257 REPROGRAM_JOP_KEYS Ldone_reconfigure_jop, x1, x2, x3
1258 #if defined(__ARM_ARCH_8_5__)
1259 /**
1260 * The new keys will be used after eret to userspace, so explicit sync is
1261 * required iff eret is non-synchronizing.
1262 */
1263 isb sy
1264 #endif /* defined(__ARM_ARCH_8_5__) */
1265 #endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
1266 Ldone_reconfigure_jop:
1267 #endif /* defined(HAS_APPLE_PAC) */
1268
1269 /* Restore arm_neon_saved_state64 */
1270 ldp q0, q1, [x0, NS64_Q0]
1271 ldp q2, q3, [x0, NS64_Q2]
1272 ldp q4, q5, [x0, NS64_Q4]
1273 ldp q6, q7, [x0, NS64_Q6]
1274 ldp q8, q9, [x0, NS64_Q8]
1275 ldp q10, q11, [x0, NS64_Q10]
1276 ldp q12, q13, [x0, NS64_Q12]
1277 ldp q14, q15, [x0, NS64_Q14]
1278 ldp q16, q17, [x0, NS64_Q16]
1279 ldp q18, q19, [x0, NS64_Q18]
1280 ldp q20, q21, [x0, NS64_Q20]
1281 ldp q22, q23, [x0, NS64_Q22]
1282 ldp q24, q25, [x0, NS64_Q24]
1283 ldp q26, q27, [x0, NS64_Q26]
1284 ldp q28, q29, [x0, NS64_Q28]
1285 ldp q30, q31, [x0, NS64_Q30]
1286
1287 /* Restore arm_saved_state64 */
1288
1289 // Skip x0, x1 - we're using them
1290 ldp x2, x3, [x0, SS64_X2]
1291 ldp x4, x5, [x0, SS64_X4]
1292 ldp x6, x7, [x0, SS64_X6]
1293 ldp x8, x9, [x0, SS64_X8]
1294 ldp x10, x11, [x0, SS64_X10]
1295 ldp x12, x13, [x0, SS64_X12]
1296 ldp x14, x15, [x0, SS64_X14]
1297 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1298 ldp x18, x19, [x0, SS64_X18]
1299 ldp x20, x21, [x0, SS64_X20]
1300 ldp x22, x23, [x0, SS64_X22]
1301 ldp x24, x25, [x0, SS64_X24]
1302 ldp x26, x27, [x0, SS64_X26]
1303 ldr x28, [x0, SS64_X28]
1304 ldr fp, [x0, SS64_FP]
1305 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1306
1307 // Restore stack pointer and our last two GPRs
1308 ldr x1, [x0, SS64_SP]
1309 mov sp, x1
1310
1311 #if __ARM_KERNEL_PROTECT__
1312 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1313 #endif /* __ARM_KERNEL_PROTECT__ */
1314
1315 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1316
1317 #if __ARM_KERNEL_PROTECT__
1318 /* If we are going to eret to userspace, we must unmap the kernel. */
1319 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1320
1321 /* Update TCR to unmap the kernel. */
1322 MOV64 x18, TCR_EL1_USER
1323 msr TCR_EL1, x18
1324
1325 /*
1326 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1327 * each other due to the microarchitecture.
1328 */
1329 #if !defined(APPLE_ARM64_ARCH_FAMILY)
1330 isb sy
1331 #endif
1332
1333 /* Switch to the user ASID (low bit clear) for the task. */
1334 mrs x18, TTBR0_EL1
1335 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1336 msr TTBR0_EL1, x18
1337 mov x18, #0
1338
1339 /* We don't need an ISB here, as the eret is synchronizing. */
1340 Lskip_ttbr1_switch:
1341 #endif /* __ARM_KERNEL_PROTECT__ */
1342
1343 ERET_CONTEXT_SYNCHRONIZING
1344
1345 user_take_ast:
1346 PUSH_FRAME
1347 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1348 POP_FRAME
1349 b check_user_asts // Now try again
1350
1351 .text
1352 .align 2
1353 preempt_underflow:
1354 mrs x0, TPIDR_EL1
1355 str x0, [sp, #-16]! // We'll print thread pointer
1356 adr x0, L_underflow_str // Format string
1357 CALL_EXTERN panic // Game over
1358
1359 L_underflow_str:
1360 .asciz "Preemption count negative on thread %p"
1361 .align 2
1362
1363 #if MACH_ASSERT
1364 .text
1365 .align 2
1366 rwlock_count_notzero:
1367 mrs x0, TPIDR_EL1
1368 str x0, [sp, #-16]! // We'll print thread pointer
1369 ldr w0, [x0, TH_RWLOCK_CNT]
1370 str w0, [sp, #8]
1371 adr x0, L_rwlock_count_notzero_str // Format string
1372 CALL_EXTERN panic // Game over
1373
1374 L_rwlock_count_notzero_str:
1375 .asciz "RW lock count not 0 on thread %p (%u)"
1376
1377 .text
1378 .align 2
1379 preempt_count_notzero:
1380 mrs x0, TPIDR_EL1
1381 str x0, [sp, #-16]! // We'll print thread pointer
1382 ldr w0, [x0, ACT_PREEMPT_CNT]
1383 str w0, [sp, #8]
1384 adr x0, L_preempt_count_notzero_str // Format string
1385 CALL_EXTERN panic // Game over
1386
1387 L_preempt_count_notzero_str:
1388 .asciz "preemption count not 0 on thread %p (%u)"
1389 #endif /* MACH_ASSERT */
1390
1391 .text
1392 .align 2
1393 tmp_alloc_count_nozero:
1394 mrs x0, TPIDR_EL1
1395 CALL_EXTERN kheap_temp_leak_panic
1396
1397 #if __ARM_KERNEL_PROTECT__
1398 /*
1399 * This symbol denotes the end of the exception vector/eret range; we page
1400 * align it so that we can avoid mapping other text in the EL0 exception
1401 * vector mapping.
1402 */
1403 .text
1404 .align 14
1405 .globl EXT(ExceptionVectorsEnd)
1406 LEXT(ExceptionVectorsEnd)
1407 #endif /* __ARM_KERNEL_PROTECT__ */
1408
1409 #if XNU_MONITOR
1410 #if __APRR_SUPPORTED__
1411 .text
1412 .align 2
1413 el1_sp0_synchronous_vector_not_in_kernel_mode:
1414 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
1415
1416 .text
1417 .align 2
1418 el1_sp0_fiq_vector_not_in_kernel_mode:
1419 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
1420
1421 .text
1422 .align 2
1423 el1_sp0_irq_vector_not_in_kernel_mode:
1424 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
1425
1426 .text
1427 .align 2
1428 el1_sp0_serror_vector_not_in_kernel_mode:
1429 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
1430 #endif /* __APRR_SUPPORTED__ */
1431
1432 /*
1433 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1434 * mostly concerned with setting up state for the normal fleh code.
1435 */
1436 fleh_synchronous_from_ppl:
1437 /* Save x0. */
1438 mov x15, x0
1439
1440 /* Grab the ESR. */
1441 mrs x1, ESR_EL1 // Get the exception syndrome
1442
1443 /* If the stack pointer is corrupt, it will manifest either as a data abort
1444 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1445 * these quickly by testing bit 5 of the exception class.
1446 */
1447 tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1448 mrs x0, SP_EL0 // Get SP_EL0
1449
1450 /* Perform high level checks for stack corruption. */
1451 and x1, x1, #ESR_EC_MASK // Mask the exception class
1452 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1453 cmp x1, x2 // If we have a stack alignment exception
1454 b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
1455 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1456 cmp x1, x2 // If we have a data abort, we need to
1457 b.ne Lvalid_ppl_stack // ...validate the stack pointer
1458
1459 Ltest_pstack:
1460 /* Bounds check the PPL stack. */
1461 adrp x10, EXT(pmap_stacks_start)@page
1462 ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1463 adrp x11, EXT(pmap_stacks_end)@page
1464 ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1465 cmp x0, x10
1466 b.lo Lcorrupt_ppl_stack
1467 cmp x0, x11
1468 b.hi Lcorrupt_ppl_stack
1469
1470 Lvalid_ppl_stack:
1471 /* Restore x0. */
1472 mov x0, x15
1473
1474 /* Switch back to the kernel stack. */
1475 msr SPSel, #0
1476 GET_PMAP_CPU_DATA x5, x6, x7
1477 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1478 mov sp, x6
1479
1480 /* Hand off to the synch handler. */
1481 b EXT(fleh_synchronous)
1482
1483 Lcorrupt_ppl_stack:
1484 /* Restore x0. */
1485 mov x0, x15
1486
1487 /* Hand off to the invalid stack handler. */
1488 b fleh_invalid_stack
1489
1490 fleh_fiq_from_ppl:
1491 SWITCH_TO_INT_STACK
1492 b EXT(fleh_fiq)
1493
1494 fleh_irq_from_ppl:
1495 SWITCH_TO_INT_STACK
1496 b EXT(fleh_irq)
1497
1498 fleh_serror_from_ppl:
1499 GET_PMAP_CPU_DATA x5, x6, x7
1500 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1501 mov sp, x6
1502 b EXT(fleh_serror)
1503
1504
1505 #if XNU_MONITOR && __APRR_SUPPORTED__
1506 /*
1507 * aprr_ppl_enter
1508 *
1509 * Invokes the PPL
1510 * x15 - The index of the requested PPL function.
1511 */
1512 .text
1513 .align 2
1514 .globl EXT(aprr_ppl_enter)
1515 LEXT(aprr_ppl_enter)
1516 /* Push a frame. */
1517 ARM64_STACK_PROLOG
1518 stp x20, x21, [sp, #-0x20]!
1519 stp x29, x30, [sp, #0x10]
1520 add x29, sp, #0x10
1521
1522 /* Increase the preemption count. */
1523 mrs x10, TPIDR_EL1
1524 ldr w12, [x10, ACT_PREEMPT_CNT]
1525 add w12, w12, #1
1526 str w12, [x10, ACT_PREEMPT_CNT]
1527
1528 /* Is the PPL currently locked down? */
1529 adrp x13, EXT(pmap_ppl_locked_down)@page
1530 add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
1531 ldr w14, [x13]
1532 cmp w14, wzr
1533
1534 /* If not, just perform the call in the current context. */
1535 b.eq EXT(ppl_bootstrap_dispatch)
1536
1537 mov w10, #PPL_STATE_KERNEL
1538 b Ldisable_aif_and_enter_ppl
1539
1540 /* We align this to land the next few instructions on their own page. */
1541 .section __PPLTRAMP,__text,regular,pure_instructions
1542 .align 14
1543 .space (16*1024)-(4*8) // 8 insns
1544
1545 /*
1546 * This label is used by exception handlers that are trying to return
1547 * to the PPL.
1548 */
1549 Ldisable_aif_and_enter_ppl:
1550 /* We must trampoline to the PPL context; disable AIF. */
1551 mrs x20, DAIF
1552 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1553
1554 .globl EXT(ppl_no_exception_start)
1555 LEXT(ppl_no_exception_start)
1556 /* Switch APRR_EL1 to PPL mode. */
1557 MOV64 x14, APRR_EL1_PPL
1558 msr APRR_EL1, x14
1559
1560 /* This ISB should be the last instruction on a page. */
1561 // TODO: can we static assert this?
1562 isb
1563 #endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
1564
1565
1566 // x15: ppl call number
1567 // w10: ppl_state
1568 // x20: gxf_enter caller's DAIF
1569 .globl EXT(ppl_trampoline_start)
1570 LEXT(ppl_trampoline_start)
1571
1572 #if __APRR_SUPPORTED__
1573 /* Squash AIF AGAIN, because someone may have attacked us. */
1574 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1575 #endif /* __APRR_SUPPORTED__ */
1576
1577 #if __APRR_SUPPORTED__
1578 /* Verify the state of APRR_EL1. */
1579 MOV64 x14, APRR_EL1_PPL
1580 mrs x21, APRR_EL1
1581 #else /* __APRR_SUPPORTED__ */
1582 #error "XPRR configuration error"
1583 #endif /* __APRR_SUPPORTED__ */
1584 cmp x14, x21
1585 b.ne Lppl_fail_dispatch
1586
1587 /* Verify the request ID. */
1588 cmp x15, PMAP_COUNT
1589 b.hs Lppl_fail_dispatch
1590
1591 GET_PMAP_CPU_DATA x12, x13, x14
1592
1593 /* Mark this CPU as being in the PPL. */
1594 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1595
1596 cmp w9, #PPL_STATE_KERNEL
1597 b.eq Lppl_mark_cpu_as_dispatching
1598
1599 /* Check to see if we are trying to trap from within the PPL. */
1600 cmp w9, #PPL_STATE_DISPATCH
1601 b.eq Lppl_fail_dispatch_ppl
1602
1603
1604 /* Ensure that we are returning from an exception. */
1605 cmp w9, #PPL_STATE_EXCEPTION
1606 b.ne Lppl_fail_dispatch
1607
1608 // where is w10 set?
1609 // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1610 cmp w10, #PPL_STATE_EXCEPTION
1611 b.ne Lppl_fail_dispatch
1612
1613 /* This is an exception return; set the CPU to the dispatching state. */
1614 mov w9, #PPL_STATE_DISPATCH
1615 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1616
1617 /* Find the save area, and return to the saved PPL context. */
1618 ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1619 mov sp, x0
1620 #if __APRR_SUPPORTED__
1621 b Lexception_return_restore_registers
1622 #else
1623 b EXT(return_to_ppl)
1624 #endif /* __APRR_SUPPORTED__ */
1625
1626 Lppl_mark_cpu_as_dispatching:
1627 cmp w10, #PPL_STATE_KERNEL
1628 b.ne Lppl_fail_dispatch
1629
1630 /* Mark the CPU as dispatching. */
1631 mov w13, #PPL_STATE_DISPATCH
1632 str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1633
1634 /* Switch to the regular PPL stack. */
1635 // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1636 ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1637
1638 // SP0 is thread stack here
1639 mov x21, sp
1640 // SP0 is now PPL stack
1641 mov sp, x9
1642
1643 /* Save the old stack pointer off in case we need it. */
1644 str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1645
1646 /* Get the handler for the request */
1647 adrp x9, EXT(ppl_handler_table)@page
1648 add x9, x9, EXT(ppl_handler_table)@pageoff
1649 add x9, x9, x15, lsl #3
1650 ldr x10, [x9]
1651
1652 /* Branch to the code that will invoke the PPL request. */
1653 b EXT(ppl_dispatch)
1654
1655 Lppl_fail_dispatch_ppl:
1656 /* Switch back to the kernel stack. */
1657 ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1658 mov sp, x10
1659
1660 Lppl_fail_dispatch:
1661 /* Indicate that we failed. */
1662 mov x15, #PPL_EXIT_BAD_CALL
1663
1664 /* Move the DAIF bits into the expected register. */
1665 mov x10, x20
1666
1667 /* Return to kernel mode. */
1668 b ppl_return_to_kernel_mode
1669
1670 Lppl_dispatch_exit:
1671 /* Indicate that we are cleanly exiting the PPL. */
1672 mov x15, #PPL_EXIT_DISPATCH
1673
1674 /* Switch back to the original (kernel thread) stack. */
1675 mov sp, x21
1676
1677 /* Move the saved DAIF bits. */
1678 mov x10, x20
1679
1680 /* Clear the old stack pointer. */
1681 str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1682
1683 /*
1684 * Mark the CPU as no longer being in the PPL. We spin if our state
1685 * machine is broken.
1686 */
1687 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1688 cmp w9, #PPL_STATE_DISPATCH
1689 b.ne .
1690 mov w9, #PPL_STATE_KERNEL
1691 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1692
1693 /* Return to the kernel. */
1694 b ppl_return_to_kernel_mode
1695
1696 #if __APRR_SUPPORTED__
1697 /* We align this to land the next few instructions on their own page. */
1698 .align 14
1699 .space (16*1024)-(4*5) // 5 insns
1700
1701 ppl_return_to_kernel_mode:
1702 /* Switch APRR_EL1 back to the kernel mode. */
1703 // must be 5 instructions
1704 MOV64 x14, APRR_EL1_DEFAULT
1705 msr APRR_EL1, x14
1706
1707 .globl EXT(ppl_trampoline_end)
1708 LEXT(ppl_trampoline_end)
1709
1710 /* This should be the first instruction on a page. */
1711 isb
1712
1713 .globl EXT(ppl_no_exception_end)
1714 LEXT(ppl_no_exception_end)
1715 b ppl_exit
1716 #endif /* __APRR_SUPPORTED__ */
1717
1718
1719 .text
1720 ppl_exit:
1721 /*
1722 * If we are dealing with an exception, hand off to the first level
1723 * exception handler.
1724 */
1725 cmp x15, #PPL_EXIT_EXCEPTION
1726 b.eq Ljump_to_fleh_handler
1727
1728 /* Restore the original AIF state. */
1729 REENABLE_DAIF x10
1730
1731 /* If this was a panic call from the PPL, reinvoke panic. */
1732 cmp x15, #PPL_EXIT_PANIC_CALL
1733 b.eq Ljump_to_panic_trap_to_debugger
1734
1735 /* Load the preemption count. */
1736 mrs x10, TPIDR_EL1
1737 ldr w12, [x10, ACT_PREEMPT_CNT]
1738
1739 /* Detect underflow */
1740 cbnz w12, Lno_preempt_underflow
1741 b preempt_underflow
1742 Lno_preempt_underflow:
1743
1744 /* Lower the preemption count. */
1745 sub w12, w12, #1
1746 str w12, [x10, ACT_PREEMPT_CNT]
1747
1748 /* Skip ASTs if the peemption count is not zero. */
1749 cbnz x12, Lppl_skip_ast_taken
1750
1751 /* Skip the AST check if interrupts are disabled. */
1752 mrs x1, DAIF
1753 tst x1, #DAIF_IRQF
1754 b.ne Lppl_skip_ast_taken
1755
1756 /* Disable interrupts. */
1757 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
1758
1759 /* IF there is no urgent AST, skip the AST. */
1760 ldr x12, [x10, ACT_CPUDATAP]
1761 ldr x14, [x12, CPU_PENDING_AST]
1762 tst x14, AST_URGENT
1763 b.eq Lppl_defer_ast_taken
1764
1765 /* Stash our return value and return reason. */
1766 mov x20, x0
1767 mov x21, x15
1768
1769 /* Handle the AST. */
1770 bl EXT(ast_taken_kernel)
1771
1772 /* Restore the return value and the return reason. */
1773 mov x15, x21
1774 mov x0, x20
1775
1776 Lppl_defer_ast_taken:
1777 /* Reenable interrupts. */
1778 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
1779
1780 Lppl_skip_ast_taken:
1781 /* Pop the stack frame. */
1782 ldp x29, x30, [sp, #0x10]
1783 ldp x20, x21, [sp], #0x20
1784
1785 /* Check to see if this was a bad request. */
1786 cmp x15, #PPL_EXIT_BAD_CALL
1787 b.eq Lppl_bad_call
1788
1789 /* Return. */
1790 ARM64_STACK_EPILOG
1791
1792 .align 2
1793 Ljump_to_fleh_handler:
1794 br x25
1795
1796 .align 2
1797 Ljump_to_panic_trap_to_debugger:
1798 b EXT(panic_trap_to_debugger)
1799
1800 Lppl_bad_call:
1801 /* Panic. */
1802 adrp x0, Lppl_bad_call_panic_str@page
1803 add x0, x0, Lppl_bad_call_panic_str@pageoff
1804 b EXT(panic)
1805
1806 .text
1807 .align 2
1808 .globl EXT(ppl_dispatch)
1809 LEXT(ppl_dispatch)
1810 /*
1811 * Save a couple of important registers (implementation detail; x12 has
1812 * the PPL per-CPU data address; x13 is not actually interesting).
1813 */
1814 stp x12, x13, [sp, #-0x10]!
1815
1816 /* Restore the original AIF state. */
1817 REENABLE_DAIF x20
1818
1819 /*
1820 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1821 * but the exception vectors will deal with this properly.
1822 */
1823
1824 /* Invoke the PPL method. */
1825 #ifdef HAS_APPLE_PAC
1826 blraa x10, x9
1827 #else
1828 blr x10
1829 #endif
1830
1831 /* Disable AIF. */
1832 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1833
1834 /* Restore those important registers. */
1835 ldp x12, x13, [sp], #0x10
1836
1837 /* Mark this as a regular return, and hand off to the return path. */
1838 b Lppl_dispatch_exit
1839
1840 .text
1841 .align 2
1842 .globl EXT(ppl_bootstrap_dispatch)
1843 LEXT(ppl_bootstrap_dispatch)
1844 /* Verify the PPL request. */
1845 cmp x15, PMAP_COUNT
1846 b.hs Lppl_fail_bootstrap_dispatch
1847
1848 /* Get the requested PPL routine. */
1849 adrp x9, EXT(ppl_handler_table)@page
1850 add x9, x9, EXT(ppl_handler_table)@pageoff
1851 add x9, x9, x15, lsl #3
1852 ldr x10, [x9]
1853
1854 /* Invoke the requested PPL routine. */
1855 #ifdef HAS_APPLE_PAC
1856 blraa x10, x9
1857 #else
1858 blr x10
1859 #endif
1860 /* Stash off the return value */
1861 mov x20, x0
1862 /* Drop the preemption count */
1863 bl EXT(_enable_preemption)
1864 mov x0, x20
1865
1866 /* Pop the stack frame. */
1867 ldp x29, x30, [sp, #0x10]
1868 ldp x20, x21, [sp], #0x20
1869 #if __has_feature(ptrauth_returns)
1870 retab
1871 #else
1872 ret
1873 #endif
1874
1875 Lppl_fail_bootstrap_dispatch:
1876 /* Pop our stack frame and panic. */
1877 ldp x29, x30, [sp, #0x10]
1878 ldp x20, x21, [sp], #0x20
1879 #if __has_feature(ptrauth_returns)
1880 autibsp
1881 #endif
1882 adrp x0, Lppl_bad_call_panic_str@page
1883 add x0, x0, Lppl_bad_call_panic_str@pageoff
1884 b EXT(panic)
1885
1886 .text
1887 .align 2
1888 .globl EXT(ml_panic_trap_to_debugger)
1889 LEXT(ml_panic_trap_to_debugger)
1890 mrs x10, DAIF
1891 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1892
1893 adrp x12, EXT(pmap_ppl_locked_down)@page
1894 ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
1895 cbz w12, Lnot_in_ppl_dispatch
1896
1897 LOAD_PMAP_CPU_DATA x11, x12, x13
1898
1899 ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1900 cmp w12, #PPL_STATE_DISPATCH
1901 b.ne Lnot_in_ppl_dispatch
1902
1903 /* Indicate (for the PPL->kernel transition) that we are panicking. */
1904 mov x15, #PPL_EXIT_PANIC_CALL
1905
1906 /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1907 ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1908 mov sp, x12
1909
1910 mrs x10, DAIF
1911 mov w13, #PPL_STATE_PANIC
1912 str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1913
1914 /* Now we are ready to exit the PPL. */
1915 b ppl_return_to_kernel_mode
1916 Lnot_in_ppl_dispatch:
1917 REENABLE_DAIF x10
1918 ret
1919
1920 .data
1921 Lppl_bad_call_panic_str:
1922 .asciz "ppl_dispatch: failed due to bad arguments/state"
1923 #else /* XNU_MONITOR */
1924 .text
1925 .align 2
1926 .globl EXT(ml_panic_trap_to_debugger)
1927 LEXT(ml_panic_trap_to_debugger)
1928 ret
1929 #endif /* XNU_MONITOR */
1930
1931 /* ARM64_TODO Is globals_asm.h needed? */
1932 //#include "globals_asm.h"
1933
1934 /* vim: set ts=4: */