]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/locore.s
b18a335e870d1a32a9c08ea3f58257c533ca79e4
[apple/xnu.git] / osfmk / arm64 / locore.s
1 /*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/hv/hv_regs.h>
31 #include <arm64/machine_routines_asm.h>
32 #include <arm64/proc_reg.h>
33 #include <pexpert/arm64/board_config.h>
34 #include <mach/exception_types.h>
35 #include <mach_kdp.h>
36 #include <config_dtrace.h>
37 #include "assym.s"
38 #include <arm64/exception_asm.h>
39 #include <arm64/pac_asm.h>
40 #include "dwarf_unwind.h"
41
42 #if __ARM_KERNEL_PROTECT__
43 #include <arm/pmap.h>
44 #endif
45
46 #if XNU_MONITOR
47 /*
48 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
49 *
50 * Checks if an exception was taken from the PPL, and if so, trampolines back
51 * into the PPL.
52 * x26 - 0 if the exception was taken while in the kernel, 1 if the
53 * exception was taken while in the PPL.
54 */
55 .macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
56 cmp x26, xzr
57 b.eq 1f
58
59 /* Return to the PPL. */
60 mov x15, #0
61 mov w10, #PPL_STATE_EXCEPTION
62 #if __APRR_SUPPORTED__
63 b Ldisable_aif_and_enter_ppl
64 #else
65 #error "XPRR configuration error"
66 #endif /* __APRR_SUPPORTED__ */
67 1:
68 .endmacro
69
70 #if __APRR_SUPPORTED__
71 /*
72 * EL1_SP0_VECTOR_PPL_CHECK
73 *
74 * Check to see if the exception was taken by the kernel or the PPL. Falls
75 * through if kernel, hands off to the given label if PPL. Expects to run on
76 * SP1.
77 * arg0 - Label to go to if this was a PPL exception.
78 */
79 .macro EL1_SP0_VECTOR_PPL_CHECK
80 sub sp, sp, ARM_CONTEXT_SIZE
81 stp x0, x1, [sp, SS64_X0]
82 mrs x0, APRR_EL1
83 MOV64 x1, APRR_EL1_DEFAULT
84 cmp x0, x1
85 b.ne $0
86 ldp x0, x1, [sp, SS64_X0]
87 add sp, sp, ARM_CONTEXT_SIZE
88 .endmacro
89
90 #define STAY_ON_SP1 0
91 #define SWITCH_TO_SP0 1
92
93 #define INVOKE_PREFLIGHT 0
94 #define NO_INVOKE_PREFLIGHT 1
95
96 /*
97 * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
98 *
99 * Verify whether an exception came from the PPL or from the kernel. If it came
100 * from the PPL, save off the PPL state and transition out of the PPL.
101 * arg0 - Label to go to if this was a kernel exception
102 * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
103 * arg2 - Indicates if this should switch back to SP0
104 * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
105 */
106 .macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
107 /* Spill some more registers. */
108 stp x2, x3, [sp, SS64_X2]
109
110 /*
111 * Check if the PPL is locked down; if not, we can treat this as a
112 * kernel execption.
113 */
114 adrp x1, EXT(pmap_ppl_locked_down)@page
115 ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
116 cbz x1, 2f
117
118 /* Ensure that APRR_EL1 is actually in PPL mode. */
119 MOV64 x1, APRR_EL1_PPL
120 cmp x0, x1
121 b.ne .
122
123 /*
124 * Check if the CPU is in the PPL; if not we can treat this as a
125 * kernel exception.
126 */
127 GET_PMAP_CPU_DATA x3, x1, x2
128 ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE]
129 cmp x1, #PPL_STATE_KERNEL
130 b.eq 2f
131
132 /* Ensure that the CPU is in the expected PPL state. */
133 cmp x1, #PPL_STATE_DISPATCH
134 b.ne .
135
136 /* Mark the CPU as dealing with an exception. */
137 mov x1, #PPL_STATE_EXCEPTION
138 str w1, [x3, PMAP_CPU_DATA_PPL_STATE]
139
140 /* Load the bounds of the PPL trampoline. */
141 adrp x0, EXT(ppl_no_exception_start)@page
142 add x0, x0, EXT(ppl_no_exception_start)@pageoff
143 adrp x1, EXT(ppl_no_exception_end)@page
144 add x1, x1, EXT(ppl_no_exception_end)@pageoff
145
146 /*
147 * Ensure that the exception did not occur in the trampoline. If it
148 * did, we are either being attacked or our state machine is
149 * horrifically broken.
150 */
151 mrs x2, ELR_EL1
152 cmp x2, x0
153 b.lo 1f
154 cmp x2, x1
155 b.hi 1f
156
157 /* We might be under attack; spin. */
158 b .
159
160 1:
161 /* Get the PPL save area. */
162 mov x1, x3
163 ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
164
165 /* Save our x0, x1 state. */
166 ldp x2, x3, [sp, SS64_X0]
167 stp x2, x3, [x0, SS64_X0]
168
169 /* Restore SP1 to its original state. */
170 mov x3, sp
171 add sp, sp, ARM_CONTEXT_SIZE
172
173 .if $2 == SWITCH_TO_SP0
174 /* Switch back to SP0. */
175 msr SPSel, #0
176 mov x2, sp
177 .else
178 /* Load the SP0 value. */
179 mrs x2, SP_EL0
180 .endif
181
182 /* Save off the stack pointer. */
183 str x2, [x0, SS64_SP]
184
185 INIT_SAVED_STATE_FLAVORS x0, w1, w2
186
187 /* Save the context that was interrupted. */
188 ldp x2, x3, [x3, SS64_X2]
189 SPILL_REGISTERS PPL_MODE
190
191 /*
192 * Stash the function we wish to be invoked to deal with the exception;
193 * usually this is some preflight function for the fleh_* handler.
194 */
195 adrp x25, $1@page
196 add x25, x25, $1@pageoff
197
198 /*
199 * Indicate that this is a PPL exception, and that we should return to
200 * the PPL.
201 */
202 mov x26, #1
203
204 /* Transition back to kernel mode. */
205 mov x15, #PPL_EXIT_EXCEPTION
206 b ppl_return_to_kernel_mode
207 2:
208 /* Restore SP1 state. */
209 ldp x2, x3, [sp, SS64_X2]
210 ldp x0, x1, [sp, SS64_X0]
211 add sp, sp, ARM_CONTEXT_SIZE
212
213 /* Go to the specified label (usually the original exception vector). */
214 b $0
215 .endmacro
216 #endif /* __APRR_SUPPORTED__ */
217
218 #endif /* XNU_MONITOR */
219
220 #define CBF_DISABLE 0
221 #define CBF_ENABLE 1
222
223 .macro COMPARE_BRANCH_FUSION
224 #if defined(APPLE_ARM64_ARCH_FAMILY)
225 mrs $1, ARM64_REG_HID1
226 .if $0 == CBF_DISABLE
227 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
228 .else
229 mov $2, ARM64_REG_HID1_disCmpBrFusion
230 bic $1, $1, $2
231 .endif
232 msr ARM64_REG_HID1, $1
233 .if $0 == CBF_DISABLE
234 isb sy
235 .endif
236 #endif
237 .endmacro
238
239 /*
240 * MAP_KERNEL
241 *
242 * Restores the kernel EL1 mappings, if necessary.
243 *
244 * This may mutate x18.
245 */
246 .macro MAP_KERNEL
247 #if __ARM_KERNEL_PROTECT__
248 /* Switch to the kernel ASID (low bit set) for the task. */
249 mrs x18, TTBR0_EL1
250 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
251 msr TTBR0_EL1, x18
252
253 /*
254 * We eschew some barriers on Apple CPUs, as relative ordering of writes
255 * to the TTBRs and writes to the TCR should be ensured by the
256 * microarchitecture.
257 */
258 #if !defined(APPLE_ARM64_ARCH_FAMILY)
259 isb sy
260 #endif
261
262 /*
263 * Update the TCR to map the kernel now that we are using the kernel
264 * ASID.
265 */
266 MOV64 x18, TCR_EL1_BOOT
267 msr TCR_EL1, x18
268 isb sy
269 #endif /* __ARM_KERNEL_PROTECT__ */
270 .endmacro
271
272 /*
273 * BRANCH_TO_KVA_VECTOR
274 *
275 * Branches to the requested long exception vector in the kernelcache.
276 * arg0 - The label to branch to
277 * arg1 - The index of the label in exc_vectors_tables
278 *
279 * This may mutate x18.
280 */
281 .macro BRANCH_TO_KVA_VECTOR
282 #if __ARM_KERNEL_PROTECT__
283 /*
284 * Find the kernelcache table for the exception vectors by accessing
285 * the per-CPU data.
286 */
287 mrs x18, TPIDR_EL1
288 ldr x18, [x18, ACT_CPUDATAP]
289 ldr x18, [x18, CPU_EXC_VECTORS]
290
291 /*
292 * Get the handler for this exception and jump to it.
293 */
294 ldr x18, [x18, #($1 << 3)]
295 br x18
296 #else
297 b $0
298 #endif /* __ARM_KERNEL_PROTECT__ */
299 .endmacro
300
301 /*
302 * CHECK_KERNEL_STACK
303 *
304 * Verifies that the kernel stack is aligned and mapped within an expected
305 * stack address range. Note: happens before saving registers (in case we can't
306 * save to kernel stack).
307 *
308 * Expects:
309 * {x0, x1} - saved
310 * x1 - Exception syndrome
311 * sp - Saved state
312 *
313 * Seems like we need an unused argument to the macro for the \@ syntax to work
314 *
315 */
316 .macro CHECK_KERNEL_STACK unused
317 stp x2, x3, [sp, #-16]! // Save {x2-x3}
318 and x1, x1, #ESR_EC_MASK // Mask the exception class
319 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
320 cmp x1, x2 // If we have a stack alignment exception
321 b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted
322 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
323 cmp x1, x2 // If we have a data abort, we need to
324 b.ne Lvalid_stack_\@ // ...validate the stack pointer
325 mrs x0, SP_EL0 // Get SP_EL0
326 mrs x1, TPIDR_EL1 // Get thread pointer
327 Ltest_kstack_\@:
328 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
329 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
330 cmp x0, x2 // if (SP_EL0 >= kstack top)
331 b.ge Ltest_istack_\@ // jump to istack test
332 cmp x0, x3 // if (SP_EL0 > kstack bottom)
333 b.gt Lvalid_stack_\@ // stack pointer valid
334 Ltest_istack_\@:
335 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
336 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
337 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
338 cmp x0, x2 // if (SP_EL0 >= istack top)
339 b.ge Lcorrupt_stack_\@ // corrupt stack pointer
340 cmp x0, x3 // if (SP_EL0 > istack bottom)
341 b.gt Lvalid_stack_\@ // stack pointer valid
342 Lcorrupt_stack_\@:
343 ldp x2, x3, [sp], #16
344 ldp x0, x1, [sp], #16
345 sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame
346 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame
347 stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame
348 mrs x0, SP_EL0 // Get SP_EL0
349 str x0, [sp, SS64_SP] // Save sp to the exception frame
350 INIT_SAVED_STATE_FLAVORS sp, w0, w1
351 mov x0, sp // Copy exception frame pointer to x0
352 adrp x1, fleh_invalid_stack@page // Load address for fleh
353 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
354 b fleh_dispatch64
355 Lvalid_stack_\@:
356 ldp x2, x3, [sp], #16 // Restore {x2-x3}
357 .endmacro
358
359
360 #if __ARM_KERNEL_PROTECT__
361 .section __DATA_CONST,__const
362 .align 3
363 .globl EXT(exc_vectors_table)
364 LEXT(exc_vectors_table)
365 /* Table of exception handlers.
366 * These handlers sometimes contain deadloops.
367 * It's nice to have symbols for them when debugging. */
368 .quad el1_sp0_synchronous_vector_long
369 .quad el1_sp0_irq_vector_long
370 .quad el1_sp0_fiq_vector_long
371 .quad el1_sp0_serror_vector_long
372 .quad el1_sp1_synchronous_vector_long
373 .quad el1_sp1_irq_vector_long
374 .quad el1_sp1_fiq_vector_long
375 .quad el1_sp1_serror_vector_long
376 .quad el0_synchronous_vector_64_long
377 .quad el0_irq_vector_64_long
378 .quad el0_fiq_vector_64_long
379 .quad el0_serror_vector_64_long
380 #endif /* __ARM_KERNEL_PROTECT__ */
381
382 .text
383 #if __ARM_KERNEL_PROTECT__
384 /*
385 * We need this to be on a page boundary so that we may avoiding mapping
386 * other text along with it. As this must be on the VM page boundary
387 * (due to how the coredumping code currently works), this will be a
388 * 16KB page boundary.
389 */
390 .align 14
391 #else
392 .align 12
393 #endif /* __ARM_KERNEL_PROTECT__ */
394 .globl EXT(ExceptionVectorsBase)
395 LEXT(ExceptionVectorsBase)
396 Lel1_sp0_synchronous_vector:
397 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
398
399 .text
400 .align 7
401 Lel1_sp0_irq_vector:
402 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
403
404 .text
405 .align 7
406 Lel1_sp0_fiq_vector:
407 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
408
409 .text
410 .align 7
411 Lel1_sp0_serror_vector:
412 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
413
414 .text
415 .align 7
416 Lel1_sp1_synchronous_vector:
417 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
418
419 .text
420 .align 7
421 Lel1_sp1_irq_vector:
422 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
423
424 .text
425 .align 7
426 Lel1_sp1_fiq_vector:
427 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
428
429 .text
430 .align 7
431 Lel1_sp1_serror_vector:
432 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
433
434 .text
435 .align 7
436 Lel0_synchronous_vector_64:
437 MAP_KERNEL
438 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
439
440 .text
441 .align 7
442 Lel0_irq_vector_64:
443 MAP_KERNEL
444 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
445
446 .text
447 .align 7
448 Lel0_fiq_vector_64:
449 MAP_KERNEL
450 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
451
452 .text
453 .align 7
454 Lel0_serror_vector_64:
455 MAP_KERNEL
456 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
457
458 /* Fill out the rest of the page */
459 .align 12
460
461 /*********************************
462 * END OF EXCEPTION VECTORS PAGE *
463 *********************************/
464
465
466
467 .macro EL1_SP0_VECTOR
468 msr SPSel, #0 // Switch to SP0
469 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
470 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
471 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
472 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
473 INIT_SAVED_STATE_FLAVORS sp, w0, w1
474 mov x0, sp // Copy saved state pointer to x0
475 .endmacro
476
477 el1_sp0_synchronous_vector_long:
478 #if XNU_MONITOR && __APRR_SUPPORTED__
479 /*
480 * We do not have enough space for new instructions in this vector, so
481 * jump to outside code to check if this exception was taken in the PPL.
482 */
483 b el1_sp0_synchronous_vector_ppl_check
484 Lel1_sp0_synchronous_vector_kernel:
485 #endif
486 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
487 mrs x1, ESR_EL1 // Get the exception syndrome
488 /* If the stack pointer is corrupt, it will manifest either as a data abort
489 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
490 * these quickly by testing bit 5 of the exception class.
491 */
492 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
493 CHECK_KERNEL_STACK
494 Lkernel_stack_valid:
495 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
496 EL1_SP0_VECTOR
497 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
498 add x1, x1, EXT(fleh_synchronous)@pageoff
499 b fleh_dispatch64
500
501 el1_sp0_irq_vector_long:
502 #if XNU_MONITOR && __APRR_SUPPORTED__
503 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
504 Lel1_sp0_irq_vector_kernel:
505 #endif
506 EL1_SP0_VECTOR
507 SWITCH_TO_INT_STACK
508 adrp x1, EXT(fleh_irq)@page // Load address for fleh
509 add x1, x1, EXT(fleh_irq)@pageoff
510 b fleh_dispatch64
511
512 el1_sp0_fiq_vector_long:
513 // ARM64_TODO write optimized decrementer
514 #if XNU_MONITOR && __APRR_SUPPORTED__
515 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
516 Lel1_sp0_fiq_vector_kernel:
517 #endif
518 EL1_SP0_VECTOR
519 SWITCH_TO_INT_STACK
520 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
521 add x1, x1, EXT(fleh_fiq)@pageoff
522 b fleh_dispatch64
523
524 el1_sp0_serror_vector_long:
525 #if XNU_MONITOR && __APRR_SUPPORTED__
526 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
527 Lel1_sp0_serror_vector_kernel:
528 #endif
529 EL1_SP0_VECTOR
530 adrp x1, EXT(fleh_serror)@page // Load address for fleh
531 add x1, x1, EXT(fleh_serror)@pageoff
532 b fleh_dispatch64
533
534 .macro EL1_SP1_VECTOR
535 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
536 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
537 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
538 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
539 INIT_SAVED_STATE_FLAVORS sp, w0, w1
540 mov x0, sp // Copy saved state pointer to x0
541 .endmacro
542
543 el1_sp1_synchronous_vector_long:
544 b check_exception_stack
545 Lel1_sp1_synchronous_valid_stack:
546 #if defined(KERNEL_INTEGRITY_KTRR)
547 b check_ktrr_sctlr_trap
548 Lel1_sp1_synchronous_vector_continue:
549 #endif
550 EL1_SP1_VECTOR
551 adrp x1, fleh_synchronous_sp1@page
552 add x1, x1, fleh_synchronous_sp1@pageoff
553 b fleh_dispatch64
554
555 el1_sp1_irq_vector_long:
556 EL1_SP1_VECTOR
557 adrp x1, fleh_irq_sp1@page
558 add x1, x1, fleh_irq_sp1@pageoff
559 b fleh_dispatch64
560
561 el1_sp1_fiq_vector_long:
562 EL1_SP1_VECTOR
563 adrp x1, fleh_fiq_sp1@page
564 add x1, x1, fleh_fiq_sp1@pageoff
565 b fleh_dispatch64
566
567 el1_sp1_serror_vector_long:
568 EL1_SP1_VECTOR
569 adrp x1, fleh_serror_sp1@page
570 add x1, x1, fleh_serror_sp1@pageoff
571 b fleh_dispatch64
572
573 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
574 /**
575 * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
576 * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
577 */
578 #define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
579 #define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
580 #endif
581
582 .macro EL0_64_VECTOR
583 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
584 #if __ARM_KERNEL_PROTECT__
585 mov x18, #0 // Zero x18 to avoid leaking data to user SS
586 #endif
587 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
588 // enable JOP for kernel
589 mrs x0, SCTLR_EL1
590 tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
591 // if (!jop_running) {
592 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
593 orr x0, x0, x1
594 msr SCTLR_EL1, x0
595 isb sy
596 MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
597 cmp x0, x1
598 bne .
599 // }
600 1:
601 #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
602 mrs x0, TPIDR_EL1 // Load the thread register
603 mrs x1, SP_EL0 // Load the user stack pointer
604 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
605 ldr x0, [x0] // Load the user context pointer
606 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
607 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
608 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
609 msr SPSel, #0 // Switch to SP0
610 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
611 mrs x1, TPIDR_EL1 // Load the thread register
612
613
614 mov x0, sp // Copy the user PCB pointer to x0
615 // x1 contains thread register
616 .endmacro
617
618
619 el0_synchronous_vector_64_long:
620 EL0_64_VECTOR sync
621 SWITCH_TO_KERN_STACK
622 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
623 add x1, x1, EXT(fleh_synchronous)@pageoff
624 b fleh_dispatch64
625
626 el0_irq_vector_64_long:
627 EL0_64_VECTOR irq
628 SWITCH_TO_INT_STACK
629 adrp x1, EXT(fleh_irq)@page // load address for fleh
630 add x1, x1, EXT(fleh_irq)@pageoff
631 b fleh_dispatch64
632
633 el0_fiq_vector_64_long:
634 EL0_64_VECTOR fiq
635 SWITCH_TO_INT_STACK
636 adrp x1, EXT(fleh_fiq)@page // load address for fleh
637 add x1, x1, EXT(fleh_fiq)@pageoff
638 b fleh_dispatch64
639
640 el0_serror_vector_64_long:
641 EL0_64_VECTOR serror
642 SWITCH_TO_KERN_STACK
643 adrp x1, EXT(fleh_serror)@page // load address for fleh
644 add x1, x1, EXT(fleh_serror)@pageoff
645 b fleh_dispatch64
646
647 #if XNU_MONITOR && __APRR_SUPPORTED__
648 el1_sp0_synchronous_vector_ppl_check:
649 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
650
651 /* Jump back to the primary exception vector if we fell through. */
652 b Lel1_sp0_synchronous_vector_kernel
653 #endif
654
655 /*
656 * check_exception_stack
657 *
658 * Verifies that stack pointer at SP1 is within exception stack
659 * If not, will simply hang as we have no more stack to fall back on.
660 */
661
662 .text
663 .align 2
664 check_exception_stack:
665 mrs x18, TPIDR_EL1 // Get thread pointer
666 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
667 ldr x18, [x18, ACT_CPUDATAP]
668 cbz x18, . // If thread context is set, cpu data should be too
669 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
670 cmp sp, x18
671 b.gt . // Hang if above exception stack top
672 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
673 cmp sp, x18
674 b.lt . // Hang if below exception stack bottom
675 Lvalid_exception_stack:
676 mov x18, #0
677 b Lel1_sp1_synchronous_valid_stack
678
679
680 #if defined(KERNEL_INTEGRITY_KTRR)
681 .text
682 .align 2
683 check_ktrr_sctlr_trap:
684 /* We may abort on an instruction fetch on reset when enabling the MMU by
685 * writing SCTLR_EL1 because the page containing the privileged instruction is
686 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
687 * would otherwise panic unconditionally. Check for the condition and return
688 * safe execution to the caller on behalf of the faulting function.
689 *
690 * Expected register state:
691 * x22 - Kernel virtual base
692 * x23 - Kernel physical base
693 */
694 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
695 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
696 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
697 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
698 movz w1, #0x8600, lsl #16
699 movk w1, #0x0000
700 cmp x0, x1
701 mrs x0, ELR_EL1 // Check for expected abort address
702 adrp x1, _pinst_set_sctlr_trap_addr@page
703 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
704 sub x1, x1, x22 // Convert to physical address
705 add x1, x1, x23
706 ccmp x0, x1, #0, eq
707 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
708 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
709 b.ne Lel1_sp1_synchronous_vector_continue
710 msr ELR_EL1, lr // Return to caller
711 ERET_CONTEXT_SYNCHRONIZING
712 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
713
714 /* 64-bit first level exception handler dispatcher.
715 * Completes register context saving and branches to FLEH.
716 * Expects:
717 * {x0, x1, sp} - saved
718 * x0 - arm_context_t
719 * x1 - address of FLEH
720 * fp - previous stack frame if EL1
721 * lr - unused
722 * sp - kernel stack
723 */
724 .text
725 .align 2
726 fleh_dispatch64:
727 /* Save arm_saved_state64 */
728 SPILL_REGISTERS KERNEL_MODE
729
730 /* If exception is from userspace, zero unused registers */
731 and x23, x23, #(PSR64_MODE_EL_MASK)
732 cmp x23, #(PSR64_MODE_EL0)
733 bne 1f
734
735 SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
736 2:
737 mov x2, #0
738 mov x3, #0
739 mov x4, #0
740 mov x5, #0
741 mov x6, #0
742 mov x7, #0
743 mov x8, #0
744 mov x9, #0
745 mov x10, #0
746 mov x11, #0
747 mov x12, #0
748 mov x13, #0
749 mov x14, #0
750 mov x15, #0
751 mov x16, #0
752 mov x17, #0
753 mov x18, #0
754 mov x19, #0
755 mov x20, #0
756 /* x21, x22 cleared in common case below */
757 mov x23, #0
758 mov x24, #0
759 mov x25, #0
760 #if !XNU_MONITOR
761 mov x26, #0
762 #endif
763 mov x27, #0
764 mov x28, #0
765 mov fp, #0
766 mov lr, #0
767 1:
768
769 mov x21, x0 // Copy arm_context_t pointer to x21
770 mov x22, x1 // Copy handler routine to x22
771
772 #if XNU_MONITOR
773 /* Zero x26 to indicate that this should not return to the PPL. */
774 mov x26, #0
775 #endif
776
777 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
778 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
779 b.ne 1f // kernel mode, so skip precise time update
780 PUSH_FRAME
781 bl EXT(timer_state_event_user_to_kernel)
782 POP_FRAME
783 mov x0, x21 // Reload arm_context_t pointer
784 1:
785 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
786
787 /* Dispatch to FLEH */
788
789 br x22
790
791
792 .text
793 .align 2
794 .global EXT(fleh_synchronous)
795 LEXT(fleh_synchronous)
796
797 UNWIND_PROLOGUE
798 UNWIND_DIRECTIVES
799
800 mrs x1, ESR_EL1 // Load exception syndrome
801 mrs x2, FAR_EL1 // Load fault address
802
803 /* At this point, the LR contains the value of ELR_EL1. In the case of an
804 * instruction prefetch abort, this will be the faulting pc, which we know
805 * to be invalid. This will prevent us from backtracing through the
806 * exception if we put it in our stack frame, so we load the LR from the
807 * exception saved state instead.
808 */
809 and w3, w1, #(ESR_EC_MASK)
810 lsr w3, w3, #(ESR_EC_SHIFT)
811 mov w4, #(ESR_EC_IABORT_EL1)
812 cmp w3, w4
813 b.eq Lfleh_sync_load_lr
814 Lvalid_link_register:
815
816 PUSH_FRAME
817 bl EXT(sleh_synchronous)
818 POP_FRAME
819
820 #if XNU_MONITOR
821 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
822 #endif
823
824 mov x28, xzr // Don't need to check PFZ if there are ASTs
825 b exception_return_dispatch
826
827 Lfleh_sync_load_lr:
828 ldr lr, [x0, SS64_LR]
829 b Lvalid_link_register
830 UNWIND_EPILOGUE
831
832 /* Shared prologue code for fleh_irq and fleh_fiq.
833 * Does any interrupt booking we may want to do
834 * before invoking the handler proper.
835 * Expects:
836 * x0 - arm_context_t
837 * x23 - CPSR
838 * fp - Undefined live value (we may push a frame)
839 * lr - Undefined live value (we may push a frame)
840 * sp - Interrupt stack for the current CPU
841 */
842 .macro BEGIN_INTERRUPT_HANDLER
843 mrs x22, TPIDR_EL1
844 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
845 /* Update IRQ count */
846 ldr w1, [x23, CPU_STAT_IRQ]
847 add w1, w1, #1 // Increment count
848 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
849 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
850 add w1, w1, #1 // Increment count
851 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
852 /* Increment preempt count */
853 ldr w1, [x22, ACT_PREEMPT_CNT]
854 add w1, w1, #1
855 str w1, [x22, ACT_PREEMPT_CNT]
856 /* Store context in int state */
857 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
858 .endmacro
859
860 /* Shared epilogue code for fleh_irq and fleh_fiq.
861 * Cleans up after the prologue, and may do a bit more
862 * bookkeeping (kdebug related).
863 * Expects:
864 * x22 - Live TPIDR_EL1 value (thread address)
865 * x23 - Address of the current CPU data structure
866 * w24 - 0 if kdebug is disbled, nonzero otherwise
867 * fp - Undefined live value (we may push a frame)
868 * lr - Undefined live value (we may push a frame)
869 * sp - Interrupt stack for the current CPU
870 */
871 .macro END_INTERRUPT_HANDLER
872 /* Clear int context */
873 str xzr, [x23, CPU_INT_STATE]
874 /* Decrement preempt count */
875 ldr w0, [x22, ACT_PREEMPT_CNT]
876 cbnz w0, 1f // Detect underflow
877 b preempt_underflow
878 1:
879 sub w0, w0, #1
880 str w0, [x22, ACT_PREEMPT_CNT]
881 /* Switch back to kernel stack */
882 ldr x0, [x22, TH_KSTACKPTR]
883 mov sp, x0
884 .endmacro
885
886 .text
887 .align 2
888 .global EXT(fleh_irq)
889 LEXT(fleh_irq)
890 BEGIN_INTERRUPT_HANDLER
891 PUSH_FRAME
892 bl EXT(sleh_irq)
893 POP_FRAME
894 END_INTERRUPT_HANDLER
895
896 #if XNU_MONITOR
897 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
898 #endif
899
900 mov x28, #1 // Set a bit to check PFZ if there are ASTs
901 b exception_return_dispatch
902
903 .text
904 .align 2
905 .global EXT(fleh_fiq_generic)
906 LEXT(fleh_fiq_generic)
907 PANIC_UNIMPLEMENTED
908
909 .text
910 .align 2
911 .global EXT(fleh_fiq)
912 LEXT(fleh_fiq)
913 BEGIN_INTERRUPT_HANDLER
914 PUSH_FRAME
915 bl EXT(sleh_fiq)
916 POP_FRAME
917 END_INTERRUPT_HANDLER
918
919 #if XNU_MONITOR
920 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
921 #endif
922
923 mov x28, #1 // Set a bit to check PFZ if there are ASTs
924 b exception_return_dispatch
925
926 .text
927 .align 2
928 .global EXT(fleh_serror)
929 LEXT(fleh_serror)
930 mrs x1, ESR_EL1 // Load exception syndrome
931 mrs x2, FAR_EL1 // Load fault address
932
933 PUSH_FRAME
934 bl EXT(sleh_serror)
935 POP_FRAME
936
937 #if XNU_MONITOR
938 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
939 #endif
940
941 mov x28, xzr // Don't need to check PFZ If there are ASTs
942 b exception_return_dispatch
943
944 /*
945 * Register state saved before we get here.
946 */
947 .text
948 .align 2
949 fleh_invalid_stack:
950 mrs x1, ESR_EL1 // Load exception syndrome
951 str x1, [x0, SS64_ESR]
952 mrs x2, FAR_EL1 // Load fault address
953 str x2, [x0, SS64_FAR]
954 PUSH_FRAME
955 bl EXT(sleh_invalid_stack) // Shouldn't return!
956 b .
957
958 .text
959 .align 2
960 fleh_synchronous_sp1:
961 mrs x1, ESR_EL1 // Load exception syndrome
962 str x1, [x0, SS64_ESR]
963 mrs x2, FAR_EL1 // Load fault address
964 str x2, [x0, SS64_FAR]
965 PUSH_FRAME
966 bl EXT(sleh_synchronous_sp1)
967 b .
968
969 .text
970 .align 2
971 fleh_irq_sp1:
972 mov x1, x0
973 adr x0, Lsp1_irq_str
974 b EXT(panic_with_thread_kernel_state)
975 Lsp1_irq_str:
976 .asciz "IRQ exception taken while SP1 selected"
977
978 .text
979 .align 2
980 fleh_fiq_sp1:
981 mov x1, x0
982 adr x0, Lsp1_fiq_str
983 b EXT(panic_with_thread_kernel_state)
984 Lsp1_fiq_str:
985 .asciz "FIQ exception taken while SP1 selected"
986
987 .text
988 .align 2
989 fleh_serror_sp1:
990 mov x1, x0
991 adr x0, Lsp1_serror_str
992 b EXT(panic_with_thread_kernel_state)
993 Lsp1_serror_str:
994 .asciz "Asynchronous exception taken while SP1 selected"
995
996 .text
997 .align 2
998 exception_return_dispatch:
999 ldr w0, [x21, SS64_CPSR]
1000 tst w0, PSR64_MODE_EL_MASK
1001 b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0
1002 b return_to_user
1003
1004 .text
1005 .align 2
1006 .global EXT(return_to_kernel)
1007 LEXT(return_to_kernel)
1008 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
1009 mrs x3, TPIDR_EL1 // Load thread pointer
1010 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
1011 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1012 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
1013 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1014 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
1015 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
1016 b.eq exception_return_unint_tpidr_x3
1017 mov sp, x21 // Switch to thread stack for preemption
1018 PUSH_FRAME
1019 bl EXT(ast_taken_kernel) // Handle AST_URGENT
1020 POP_FRAME
1021 b exception_return
1022
1023 .text
1024 .globl EXT(thread_bootstrap_return)
1025 LEXT(thread_bootstrap_return)
1026 #if CONFIG_DTRACE
1027 bl EXT(dtrace_thread_bootstrap)
1028 #endif
1029 b EXT(arm64_thread_exception_return)
1030
1031 .text
1032 .globl EXT(arm64_thread_exception_return)
1033 LEXT(arm64_thread_exception_return)
1034 mrs x0, TPIDR_EL1
1035 add x21, x0, ACT_CONTEXT
1036 ldr x21, [x21]
1037 mov x28, xzr
1038
1039 //
1040 // Fall Through to return_to_user from arm64_thread_exception_return.
1041 // Note that if we move return_to_user or insert a new routine
1042 // below arm64_thread_exception_return, the latter will need to change.
1043 //
1044 .text
1045 /* x21 is always the machine context pointer when we get here
1046 * x28 is a bit indicating whether or not we should check if pc is in pfz */
1047 return_to_user:
1048 check_user_asts:
1049 mrs x3, TPIDR_EL1 // Load thread pointer
1050
1051 movn w2, #0
1052 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1053
1054 #if MACH_ASSERT
1055 ldr w0, [x3, TH_RWLOCK_CNT]
1056 cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock
1057
1058 ldr w0, [x3, ACT_PREEMPT_CNT]
1059 cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption
1060 #endif
1061 ldr w0, [x3, TH_TMP_ALLOC_CNT]
1062 cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks
1063
1064 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1065 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1066 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
1067 cbz x0, no_asts // If no asts, skip ahead
1068
1069 cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts
1070
1071 /* At this point, we have ASTs and we need to check whether we are running in the
1072 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
1073 * the PFZ since we don't want to handle getting a signal or getting suspended
1074 * while holding a spinlock in userspace.
1075 *
1076 * If userspace was in the PFZ, we know (via coordination with the PFZ code
1077 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
1078 * to use it to indicate to userspace to come back to take a delayed
1079 * preemption, at which point the ASTs will be handled. */
1080 mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again
1081 mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64
1082
1083 ldr x0, [x21, SS64_PC] // Load pc from machine state
1084 bl EXT(commpage_is_in_pfz64) // pc in pfz?
1085 cbz x0, restore_and_check_ast // No, deal with other asts
1086
1087 mov x0, #1
1088 str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption
1089 mov x0, x19 // restore x0 to asts
1090 b no_asts // pretend we have no asts
1091
1092 restore_and_check_ast:
1093 mov x0, x19 // restore x0
1094 b user_take_ast // Service pending asts
1095 no_asts:
1096
1097
1098 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
1099 mov x19, x3 // Preserve thread pointer across function call
1100 PUSH_FRAME
1101 bl EXT(timer_state_event_kernel_to_user)
1102 POP_FRAME
1103 mov x3, x19
1104 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
1105
1106 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1107 /* Watchtower
1108 *
1109 * Here we attempt to enable NEON access for EL0. If the last entry into the
1110 * kernel from user-space was due to an IRQ, the monitor will have disabled
1111 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1112 * check in with the monitor in order to reenable NEON for EL0 in exchange
1113 * for routing IRQs through the monitor (2). This way the monitor will
1114 * always 'own' either IRQs or EL0 NEON.
1115 *
1116 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1117 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1118 * here.
1119 *
1120 * EL0 user ________ IRQ ______
1121 * EL1 xnu \ ______________________ CPACR_EL1 __/
1122 * EL3 monitor \_/ \___/
1123 *
1124 * (1) (2)
1125 */
1126
1127 mov x0, #(CPACR_FPEN_ENABLE)
1128 msr CPACR_EL1, x0
1129 #endif
1130
1131 /* Establish this thread's debug state as the live state on the selected CPU. */
1132 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1133 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
1134 ldr x0, [x3, ACT_DEBUGDATA]
1135 cmp x0, x1
1136 beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state
1137
1138 #if defined(APPLELIGHTNING)
1139 /* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
1140
1141 ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
1142 cbz x12, 1f
1143
1144 #endif
1145
1146 #if defined(APPLELIGHTNING)
1147
1148 mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn
1149 orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
1150 msr ARM64_REG_HID1, x12
1151 1:
1152
1153 #endif
1154
1155 PUSH_FRAME
1156 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1157 POP_FRAME
1158 mrs x3, TPIDR_EL1 // Reload thread pointer
1159 L_skip_user_set_debug_state:
1160
1161
1162 b exception_return_unint_tpidr_x3
1163
1164 //
1165 // Fall through from return_to_user to exception_return.
1166 // Note that if we move exception_return or add a new routine below
1167 // return_to_user, the latter will have to change.
1168 //
1169
1170 exception_return:
1171 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1172 exception_return_unint:
1173 mrs x3, TPIDR_EL1 // Load thread pointer
1174 exception_return_unint_tpidr_x3:
1175 mov sp, x21 // Reload the pcb pointer
1176
1177 exception_return_unint_tpidr_x3_dont_trash_x18:
1178
1179
1180 #if __ARM_KERNEL_PROTECT__
1181 /*
1182 * If we are going to eret to userspace, we must return through the EL0
1183 * eret mapping.
1184 */
1185 ldr w1, [sp, SS64_CPSR] // Load CPSR
1186 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
1187
1188 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
1189 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
1190 adrp x1, Lexception_return_restore_registers@page // Load target PC
1191 add x1, x1, Lexception_return_restore_registers@pageoff
1192 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
1193 sub x1, x1, x0 // Calculate delta
1194 add x0, x2, x1 // Convert KVA to EL0 vector address
1195 br x0
1196
1197 Lskip_el0_eret_mapping:
1198 #endif /* __ARM_KERNEL_PROTECT__ */
1199
1200 Lexception_return_restore_registers:
1201 mov x0, sp // x0 = &pcb
1202 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1203 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1
1204
1205 /* Restore special register state */
1206 ldr w3, [sp, NS64_FPSR]
1207 ldr w4, [sp, NS64_FPCR]
1208
1209 msr ELR_EL1, x1 // Load the return address into ELR
1210 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
1211 msr FPSR, x3
1212 mrs x5, FPCR
1213 CMSR FPCR, x5, x4, 1
1214 1:
1215
1216 #if defined(HAS_APPLE_PAC)
1217 // if (eret to userspace) {
1218 and x2, x2, #(PSR64_MODE_EL_MASK)
1219 cmp x2, #(PSR64_MODE_EL0)
1220 bne Ldone_reconfigure_jop
1221 // thread_t thread = current_thread();
1222 // bool disable_jop;
1223 // if (arm_user_jop_disabled()) {
1224 // /* if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) */
1225 // disable_jop = true;
1226 // } else {
1227 // disable_jop = thread->machine.disable_user_jop;
1228 // }
1229 #if DEVELOPMENT || DEBUG
1230 adrp x4, EXT(const_boot_args)@page
1231 add x4, x4, EXT(const_boot_args)@pageoff
1232 ldr x4, [x4, BA_BOOT_FLAGS]
1233 and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP
1234 cbnz x1, Ldisable_jop
1235 #endif
1236 mrs x2, TPIDR_EL1
1237 ldrb w1, [x2, TH_DISABLE_USER_JOP]
1238 cbz w1, Lenable_jop
1239 // if (disable_jop) {
1240 // if (cpu does not have discrete JOP-at-EL1 bit) {
1241 // disable_sctlr_jop_keys();
1242 // }
1243 // } else {
1244 // if (cpu does not have fast A-key switching) {
1245 // reprogram_jop_keys(thread->machine.jop_pid);
1246 // }
1247 // }
1248 // }
1249 Ldisable_jop:
1250 #if !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
1251 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
1252 mrs x4, SCTLR_EL1
1253 bic x4, x4, x1
1254 msr SCTLR_EL1, x4
1255 MOV64 x1, SCTLR_EL1_EXPECTED
1256 cmp x4, x1
1257 bne .
1258 #endif /* !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
1259 b Ldone_reconfigure_jop
1260 Lenable_jop:
1261 #if HAS_PAC_SLOW_A_KEY_SWITCHING
1262 IF_PAC_FAST_A_KEY_SWITCHING Ldone_reconfigure_jop, x1
1263 ldr x1, [x2, TH_JOP_PID]
1264 ldr x2, [x2, ACT_CPUDATAP]
1265 REPROGRAM_JOP_KEYS Ldone_reconfigure_jop, x1, x2, x3
1266 #endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
1267 Ldone_reconfigure_jop:
1268 #endif /* defined(HAS_APPLE_PAC) */
1269
1270 /* Restore arm_neon_saved_state64 */
1271 ldp q0, q1, [x0, NS64_Q0]
1272 ldp q2, q3, [x0, NS64_Q2]
1273 ldp q4, q5, [x0, NS64_Q4]
1274 ldp q6, q7, [x0, NS64_Q6]
1275 ldp q8, q9, [x0, NS64_Q8]
1276 ldp q10, q11, [x0, NS64_Q10]
1277 ldp q12, q13, [x0, NS64_Q12]
1278 ldp q14, q15, [x0, NS64_Q14]
1279 ldp q16, q17, [x0, NS64_Q16]
1280 ldp q18, q19, [x0, NS64_Q18]
1281 ldp q20, q21, [x0, NS64_Q20]
1282 ldp q22, q23, [x0, NS64_Q22]
1283 ldp q24, q25, [x0, NS64_Q24]
1284 ldp q26, q27, [x0, NS64_Q26]
1285 ldp q28, q29, [x0, NS64_Q28]
1286 ldp q30, q31, [x0, NS64_Q30]
1287
1288 /* Restore arm_saved_state64 */
1289
1290 // Skip x0, x1 - we're using them
1291 ldp x2, x3, [x0, SS64_X2]
1292 ldp x4, x5, [x0, SS64_X4]
1293 ldp x6, x7, [x0, SS64_X6]
1294 ldp x8, x9, [x0, SS64_X8]
1295 ldp x10, x11, [x0, SS64_X10]
1296 ldp x12, x13, [x0, SS64_X12]
1297 ldp x14, x15, [x0, SS64_X14]
1298 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1299 ldp x18, x19, [x0, SS64_X18]
1300 ldp x20, x21, [x0, SS64_X20]
1301 ldp x22, x23, [x0, SS64_X22]
1302 ldp x24, x25, [x0, SS64_X24]
1303 ldp x26, x27, [x0, SS64_X26]
1304 ldr x28, [x0, SS64_X28]
1305 ldr fp, [x0, SS64_FP]
1306 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1307
1308 // Restore stack pointer and our last two GPRs
1309 ldr x1, [x0, SS64_SP]
1310 mov sp, x1
1311
1312 #if __ARM_KERNEL_PROTECT__
1313 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1314 #endif /* __ARM_KERNEL_PROTECT__ */
1315
1316 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1317
1318 #if __ARM_KERNEL_PROTECT__
1319 /* If we are going to eret to userspace, we must unmap the kernel. */
1320 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1321
1322 /* Update TCR to unmap the kernel. */
1323 MOV64 x18, TCR_EL1_USER
1324 msr TCR_EL1, x18
1325
1326 /*
1327 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1328 * each other due to the microarchitecture.
1329 */
1330 #if !defined(APPLE_ARM64_ARCH_FAMILY)
1331 isb sy
1332 #endif
1333
1334 /* Switch to the user ASID (low bit clear) for the task. */
1335 mrs x18, TTBR0_EL1
1336 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1337 msr TTBR0_EL1, x18
1338 mov x18, #0
1339
1340 /* We don't need an ISB here, as the eret is synchronizing. */
1341 Lskip_ttbr1_switch:
1342 #endif /* __ARM_KERNEL_PROTECT__ */
1343
1344 ERET_CONTEXT_SYNCHRONIZING
1345
1346 user_take_ast:
1347 PUSH_FRAME
1348 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1349 POP_FRAME
1350 b check_user_asts // Now try again
1351
1352 .text
1353 .align 2
1354 preempt_underflow:
1355 mrs x0, TPIDR_EL1
1356 str x0, [sp, #-16]! // We'll print thread pointer
1357 adr x0, L_underflow_str // Format string
1358 CALL_EXTERN panic // Game over
1359
1360 L_underflow_str:
1361 .asciz "Preemption count negative on thread %p"
1362 .align 2
1363
1364 #if MACH_ASSERT
1365 .text
1366 .align 2
1367 rwlock_count_notzero:
1368 mrs x0, TPIDR_EL1
1369 str x0, [sp, #-16]! // We'll print thread pointer
1370 ldr w0, [x0, TH_RWLOCK_CNT]
1371 str w0, [sp, #8]
1372 adr x0, L_rwlock_count_notzero_str // Format string
1373 CALL_EXTERN panic // Game over
1374
1375 L_rwlock_count_notzero_str:
1376 .asciz "RW lock count not 0 on thread %p (%u)"
1377
1378 .text
1379 .align 2
1380 preempt_count_notzero:
1381 mrs x0, TPIDR_EL1
1382 str x0, [sp, #-16]! // We'll print thread pointer
1383 ldr w0, [x0, ACT_PREEMPT_CNT]
1384 str w0, [sp, #8]
1385 adr x0, L_preempt_count_notzero_str // Format string
1386 CALL_EXTERN panic // Game over
1387
1388 L_preempt_count_notzero_str:
1389 .asciz "preemption count not 0 on thread %p (%u)"
1390 #endif /* MACH_ASSERT */
1391
1392 .text
1393 .align 2
1394 tmp_alloc_count_nozero:
1395 mrs x0, TPIDR_EL1
1396 CALL_EXTERN kheap_temp_leak_panic
1397
1398 #if __ARM_KERNEL_PROTECT__
1399 /*
1400 * This symbol denotes the end of the exception vector/eret range; we page
1401 * align it so that we can avoid mapping other text in the EL0 exception
1402 * vector mapping.
1403 */
1404 .text
1405 .align 14
1406 .globl EXT(ExceptionVectorsEnd)
1407 LEXT(ExceptionVectorsEnd)
1408 #endif /* __ARM_KERNEL_PROTECT__ */
1409
1410 #if XNU_MONITOR
1411 #if __APRR_SUPPORTED__
1412 .text
1413 .align 2
1414 el1_sp0_synchronous_vector_not_in_kernel_mode:
1415 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
1416
1417 .text
1418 .align 2
1419 el1_sp0_fiq_vector_not_in_kernel_mode:
1420 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
1421
1422 .text
1423 .align 2
1424 el1_sp0_irq_vector_not_in_kernel_mode:
1425 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
1426
1427 .text
1428 .align 2
1429 el1_sp0_serror_vector_not_in_kernel_mode:
1430 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
1431 #endif /* __APRR_SUPPORTED__ */
1432
1433 /*
1434 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1435 * mostly concerned with setting up state for the normal fleh code.
1436 */
1437 fleh_synchronous_from_ppl:
1438 /* Save x0. */
1439 mov x15, x0
1440
1441 /* Grab the ESR. */
1442 mrs x1, ESR_EL1 // Get the exception syndrome
1443
1444 /* If the stack pointer is corrupt, it will manifest either as a data abort
1445 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1446 * these quickly by testing bit 5 of the exception class.
1447 */
1448 tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1449 mrs x0, SP_EL0 // Get SP_EL0
1450
1451 /* Perform high level checks for stack corruption. */
1452 and x1, x1, #ESR_EC_MASK // Mask the exception class
1453 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1454 cmp x1, x2 // If we have a stack alignment exception
1455 b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
1456 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1457 cmp x1, x2 // If we have a data abort, we need to
1458 b.ne Lvalid_ppl_stack // ...validate the stack pointer
1459
1460 Ltest_pstack:
1461 /* Bounds check the PPL stack. */
1462 adrp x10, EXT(pmap_stacks_start)@page
1463 ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1464 adrp x11, EXT(pmap_stacks_end)@page
1465 ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1466 cmp x0, x10
1467 b.lo Lcorrupt_ppl_stack
1468 cmp x0, x11
1469 b.hi Lcorrupt_ppl_stack
1470
1471 Lvalid_ppl_stack:
1472 /* Restore x0. */
1473 mov x0, x15
1474
1475 /* Switch back to the kernel stack. */
1476 msr SPSel, #0
1477 GET_PMAP_CPU_DATA x5, x6, x7
1478 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1479 mov sp, x6
1480
1481 /* Hand off to the synch handler. */
1482 b EXT(fleh_synchronous)
1483
1484 Lcorrupt_ppl_stack:
1485 /* Restore x0. */
1486 mov x0, x15
1487
1488 /* Hand off to the invalid stack handler. */
1489 b fleh_invalid_stack
1490
1491 fleh_fiq_from_ppl:
1492 SWITCH_TO_INT_STACK
1493 b EXT(fleh_fiq)
1494
1495 fleh_irq_from_ppl:
1496 SWITCH_TO_INT_STACK
1497 b EXT(fleh_irq)
1498
1499 fleh_serror_from_ppl:
1500 GET_PMAP_CPU_DATA x5, x6, x7
1501 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1502 mov sp, x6
1503 b EXT(fleh_serror)
1504
1505
1506 #if XNU_MONITOR && __APRR_SUPPORTED__
1507 /*
1508 * aprr_ppl_enter
1509 *
1510 * Invokes the PPL
1511 * x15 - The index of the requested PPL function.
1512 */
1513 .text
1514 .align 2
1515 .globl EXT(aprr_ppl_enter)
1516 LEXT(aprr_ppl_enter)
1517 /* Push a frame. */
1518 ARM64_STACK_PROLOG
1519 stp x20, x21, [sp, #-0x20]!
1520 stp x29, x30, [sp, #0x10]
1521 add x29, sp, #0x10
1522
1523 /* Increase the preemption count. */
1524 mrs x10, TPIDR_EL1
1525 ldr w12, [x10, ACT_PREEMPT_CNT]
1526 add w12, w12, #1
1527 str w12, [x10, ACT_PREEMPT_CNT]
1528
1529 /* Is the PPL currently locked down? */
1530 adrp x13, EXT(pmap_ppl_locked_down)@page
1531 add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
1532 ldr w14, [x13]
1533 cmp w14, wzr
1534
1535 /* If not, just perform the call in the current context. */
1536 b.eq EXT(ppl_bootstrap_dispatch)
1537
1538 mov w10, #PPL_STATE_KERNEL
1539 b Ldisable_aif_and_enter_ppl
1540
1541 /* We align this to land the next few instructions on their own page. */
1542 .section __PPLTRAMP,__text,regular,pure_instructions
1543 .align 14
1544 .space (16*1024)-(4*8) // 8 insns
1545
1546 /*
1547 * This label is used by exception handlers that are trying to return
1548 * to the PPL.
1549 */
1550 Ldisable_aif_and_enter_ppl:
1551 /* We must trampoline to the PPL context; disable AIF. */
1552 mrs x20, DAIF
1553 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1554
1555 .globl EXT(ppl_no_exception_start)
1556 LEXT(ppl_no_exception_start)
1557 /* Switch APRR_EL1 to PPL mode. */
1558 MOV64 x14, APRR_EL1_PPL
1559 msr APRR_EL1, x14
1560
1561 /* This ISB should be the last instruction on a page. */
1562 // TODO: can we static assert this?
1563 isb
1564 #endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
1565
1566
1567 // x15: ppl call number
1568 // w10: ppl_state
1569 // x20: gxf_enter caller's DAIF
1570 .globl EXT(ppl_trampoline_start)
1571 LEXT(ppl_trampoline_start)
1572
1573 #if __APRR_SUPPORTED__
1574 /* Squash AIF AGAIN, because someone may have attacked us. */
1575 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1576 #endif /* __APRR_SUPPORTED__ */
1577
1578 #if __APRR_SUPPORTED__
1579 /* Verify the state of APRR_EL1. */
1580 MOV64 x14, APRR_EL1_PPL
1581 mrs x21, APRR_EL1
1582 #else /* __APRR_SUPPORTED__ */
1583 #error "XPRR configuration error"
1584 #endif /* __APRR_SUPPORTED__ */
1585 cmp x14, x21
1586 b.ne Lppl_fail_dispatch
1587
1588 /* Verify the request ID. */
1589 cmp x15, PMAP_COUNT
1590 b.hs Lppl_fail_dispatch
1591
1592 GET_PMAP_CPU_DATA x12, x13, x14
1593
1594 /* Mark this CPU as being in the PPL. */
1595 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1596
1597 cmp w9, #PPL_STATE_KERNEL
1598 b.eq Lppl_mark_cpu_as_dispatching
1599
1600 /* Check to see if we are trying to trap from within the PPL. */
1601 cmp w9, #PPL_STATE_DISPATCH
1602 b.eq Lppl_fail_dispatch_ppl
1603
1604
1605 /* Ensure that we are returning from an exception. */
1606 cmp w9, #PPL_STATE_EXCEPTION
1607 b.ne Lppl_fail_dispatch
1608
1609 // where is w10 set?
1610 // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1611 cmp w10, #PPL_STATE_EXCEPTION
1612 b.ne Lppl_fail_dispatch
1613
1614 /* This is an exception return; set the CPU to the dispatching state. */
1615 mov w9, #PPL_STATE_DISPATCH
1616 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1617
1618 /* Find the save area, and return to the saved PPL context. */
1619 ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1620 mov sp, x0
1621 #if __APRR_SUPPORTED__
1622 b Lexception_return_restore_registers
1623 #else
1624 b EXT(return_to_ppl)
1625 #endif /* __APRR_SUPPORTED__ */
1626
1627 Lppl_mark_cpu_as_dispatching:
1628 cmp w10, #PPL_STATE_KERNEL
1629 b.ne Lppl_fail_dispatch
1630
1631 /* Mark the CPU as dispatching. */
1632 mov w13, #PPL_STATE_DISPATCH
1633 str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1634
1635 /* Switch to the regular PPL stack. */
1636 // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1637 ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1638
1639 // SP0 is thread stack here
1640 mov x21, sp
1641 // SP0 is now PPL stack
1642 mov sp, x9
1643
1644 /* Save the old stack pointer off in case we need it. */
1645 str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1646
1647 /* Get the handler for the request */
1648 adrp x9, EXT(ppl_handler_table)@page
1649 add x9, x9, EXT(ppl_handler_table)@pageoff
1650 add x9, x9, x15, lsl #3
1651 ldr x10, [x9]
1652
1653 /* Branch to the code that will invoke the PPL request. */
1654 b EXT(ppl_dispatch)
1655
1656 Lppl_fail_dispatch_ppl:
1657 /* Switch back to the kernel stack. */
1658 ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1659 mov sp, x10
1660
1661 Lppl_fail_dispatch:
1662 /* Indicate that we failed. */
1663 mov x15, #PPL_EXIT_BAD_CALL
1664
1665 /* Move the DAIF bits into the expected register. */
1666 mov x10, x20
1667
1668 /* Return to kernel mode. */
1669 b ppl_return_to_kernel_mode
1670
1671 Lppl_dispatch_exit:
1672 /* Indicate that we are cleanly exiting the PPL. */
1673 mov x15, #PPL_EXIT_DISPATCH
1674
1675 /* Switch back to the original (kernel thread) stack. */
1676 mov sp, x21
1677
1678 /* Move the saved DAIF bits. */
1679 mov x10, x20
1680
1681 /* Clear the old stack pointer. */
1682 str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1683
1684 /*
1685 * Mark the CPU as no longer being in the PPL. We spin if our state
1686 * machine is broken.
1687 */
1688 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1689 cmp w9, #PPL_STATE_DISPATCH
1690 b.ne .
1691 mov w9, #PPL_STATE_KERNEL
1692 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1693
1694 /* Return to the kernel. */
1695 b ppl_return_to_kernel_mode
1696
1697 #if __APRR_SUPPORTED__
1698 /* We align this to land the next few instructions on their own page. */
1699 .align 14
1700 .space (16*1024)-(4*5) // 5 insns
1701
1702 ppl_return_to_kernel_mode:
1703 /* Switch APRR_EL1 back to the kernel mode. */
1704 // must be 5 instructions
1705 MOV64 x14, APRR_EL1_DEFAULT
1706 msr APRR_EL1, x14
1707
1708 .globl EXT(ppl_trampoline_end)
1709 LEXT(ppl_trampoline_end)
1710
1711 /* This should be the first instruction on a page. */
1712 isb
1713
1714 .globl EXT(ppl_no_exception_end)
1715 LEXT(ppl_no_exception_end)
1716 b ppl_exit
1717 #endif /* __APRR_SUPPORTED__ */
1718
1719
1720 .text
1721 ppl_exit:
1722 /*
1723 * If we are dealing with an exception, hand off to the first level
1724 * exception handler.
1725 */
1726 cmp x15, #PPL_EXIT_EXCEPTION
1727 b.eq Ljump_to_fleh_handler
1728
1729 /* Restore the original AIF state. */
1730 REENABLE_DAIF x10
1731
1732 /* If this was a panic call from the PPL, reinvoke panic. */
1733 cmp x15, #PPL_EXIT_PANIC_CALL
1734 b.eq Ljump_to_panic_trap_to_debugger
1735
1736 /* Load the preemption count. */
1737 mrs x10, TPIDR_EL1
1738 ldr w12, [x10, ACT_PREEMPT_CNT]
1739
1740 /* Detect underflow */
1741 cbnz w12, Lno_preempt_underflow
1742 b preempt_underflow
1743 Lno_preempt_underflow:
1744
1745 /* Lower the preemption count. */
1746 sub w12, w12, #1
1747 str w12, [x10, ACT_PREEMPT_CNT]
1748
1749 /* Skip ASTs if the peemption count is not zero. */
1750 cbnz x12, Lppl_skip_ast_taken
1751
1752 /* Skip the AST check if interrupts are disabled. */
1753 mrs x1, DAIF
1754 tst x1, #DAIF_IRQF
1755 b.ne Lppl_skip_ast_taken
1756
1757 /* Disable interrupts. */
1758 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
1759
1760 /* IF there is no urgent AST, skip the AST. */
1761 ldr x12, [x10, ACT_CPUDATAP]
1762 ldr x14, [x12, CPU_PENDING_AST]
1763 tst x14, AST_URGENT
1764 b.eq Lppl_defer_ast_taken
1765
1766 /* Stash our return value and return reason. */
1767 mov x20, x0
1768 mov x21, x15
1769
1770 /* Handle the AST. */
1771 bl EXT(ast_taken_kernel)
1772
1773 /* Restore the return value and the return reason. */
1774 mov x15, x21
1775 mov x0, x20
1776
1777 Lppl_defer_ast_taken:
1778 /* Reenable interrupts. */
1779 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
1780
1781 Lppl_skip_ast_taken:
1782 /* Pop the stack frame. */
1783 ldp x29, x30, [sp, #0x10]
1784 ldp x20, x21, [sp], #0x20
1785
1786 /* Check to see if this was a bad request. */
1787 cmp x15, #PPL_EXIT_BAD_CALL
1788 b.eq Lppl_bad_call
1789
1790 /* Return. */
1791 ARM64_STACK_EPILOG
1792
1793 .align 2
1794 Ljump_to_fleh_handler:
1795 br x25
1796
1797 .align 2
1798 Ljump_to_panic_trap_to_debugger:
1799 b EXT(panic_trap_to_debugger)
1800
1801 Lppl_bad_call:
1802 /* Panic. */
1803 adrp x0, Lppl_bad_call_panic_str@page
1804 add x0, x0, Lppl_bad_call_panic_str@pageoff
1805 b EXT(panic)
1806
1807 .text
1808 .align 2
1809 .globl EXT(ppl_dispatch)
1810 LEXT(ppl_dispatch)
1811 /*
1812 * Save a couple of important registers (implementation detail; x12 has
1813 * the PPL per-CPU data address; x13 is not actually interesting).
1814 */
1815 stp x12, x13, [sp, #-0x10]!
1816
1817 /* Restore the original AIF state. */
1818 REENABLE_DAIF x20
1819
1820 /*
1821 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1822 * but the exception vectors will deal with this properly.
1823 */
1824
1825 /* Invoke the PPL method. */
1826 #ifdef HAS_APPLE_PAC
1827 blraa x10, x9
1828 #else
1829 blr x10
1830 #endif
1831
1832 /* Disable AIF. */
1833 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1834
1835 /* Restore those important registers. */
1836 ldp x12, x13, [sp], #0x10
1837
1838 /* Mark this as a regular return, and hand off to the return path. */
1839 b Lppl_dispatch_exit
1840
1841 .text
1842 .align 2
1843 .globl EXT(ppl_bootstrap_dispatch)
1844 LEXT(ppl_bootstrap_dispatch)
1845 /* Verify the PPL request. */
1846 cmp x15, PMAP_COUNT
1847 b.hs Lppl_fail_bootstrap_dispatch
1848
1849 /* Get the requested PPL routine. */
1850 adrp x9, EXT(ppl_handler_table)@page
1851 add x9, x9, EXT(ppl_handler_table)@pageoff
1852 add x9, x9, x15, lsl #3
1853 ldr x10, [x9]
1854
1855 /* Invoke the requested PPL routine. */
1856 #ifdef HAS_APPLE_PAC
1857 blraa x10, x9
1858 #else
1859 blr x10
1860 #endif
1861 /* Stash off the return value */
1862 mov x20, x0
1863 /* Drop the preemption count */
1864 bl EXT(_enable_preemption)
1865 mov x0, x20
1866
1867 /* Pop the stack frame. */
1868 ldp x29, x30, [sp, #0x10]
1869 ldp x20, x21, [sp], #0x20
1870 #if __has_feature(ptrauth_returns)
1871 retab
1872 #else
1873 ret
1874 #endif
1875
1876 Lppl_fail_bootstrap_dispatch:
1877 /* Pop our stack frame and panic. */
1878 ldp x29, x30, [sp, #0x10]
1879 ldp x20, x21, [sp], #0x20
1880 #if __has_feature(ptrauth_returns)
1881 autibsp
1882 #endif
1883 adrp x0, Lppl_bad_call_panic_str@page
1884 add x0, x0, Lppl_bad_call_panic_str@pageoff
1885 b EXT(panic)
1886
1887 .text
1888 .align 2
1889 .globl EXT(ml_panic_trap_to_debugger)
1890 LEXT(ml_panic_trap_to_debugger)
1891 mrs x10, DAIF
1892 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1893
1894 adrp x12, EXT(pmap_ppl_locked_down)@page
1895 ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
1896 cbz w12, Lnot_in_ppl_dispatch
1897
1898 LOAD_PMAP_CPU_DATA x11, x12, x13
1899
1900 ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1901 cmp w12, #PPL_STATE_DISPATCH
1902 b.ne Lnot_in_ppl_dispatch
1903
1904 /* Indicate (for the PPL->kernel transition) that we are panicking. */
1905 mov x15, #PPL_EXIT_PANIC_CALL
1906
1907 /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1908 ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1909 mov sp, x12
1910
1911 // we want interrupts to stay masked after exiting PPL when calling into panic to halt system
1912 // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT
1913 mrs x10, DAIF
1914 mov w13, #PPL_STATE_PANIC
1915 str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1916
1917 /* Now we are ready to exit the PPL. */
1918 b ppl_return_to_kernel_mode
1919 Lnot_in_ppl_dispatch:
1920 REENABLE_DAIF x10
1921 ret
1922
1923 .data
1924 Lppl_bad_call_panic_str:
1925 .asciz "ppl_dispatch: failed due to bad arguments/state"
1926 #else /* XNU_MONITOR */
1927 .text
1928 .align 2
1929 .globl EXT(ml_panic_trap_to_debugger)
1930 LEXT(ml_panic_trap_to_debugger)
1931 ret
1932 #endif /* XNU_MONITOR */
1933
1934 /* ARM64_TODO Is globals_asm.h needed? */
1935 //#include "globals_asm.h"
1936
1937 /* vim: set ts=4: */