]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/locore.s
c85b85ebc4e6cbbf98471fb0446979bce8692997
[apple/xnu.git] / osfmk / arm64 / locore.s
1 /*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/machine_routines_asm.h>
31 #include <arm64/proc_reg.h>
32 #include <pexpert/arm64/board_config.h>
33 #include <mach/exception_types.h>
34 #include <mach_kdp.h>
35 #include <config_dtrace.h>
36 #include "assym.s"
37 #include <arm64/exception_asm.h>
38 #include "dwarf_unwind.h"
39
40 #if __ARM_KERNEL_PROTECT__
41 #include <arm/pmap.h>
42 #endif
43
44 #if XNU_MONITOR
45 /*
46 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
47 *
48 * Checks if an exception was taken from the PPL, and if so, trampolines back
49 * into the PPL.
50 * x26 - 0 if the exception was taken while in the kernel, 1 if the
51 * exception was taken while in the PPL.
52 */
53 .macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
54 cmp x26, xzr
55 b.eq 1f
56
57 /* Return to the PPL. */
58 mov x15, #0
59 mov w10, #PPL_STATE_EXCEPTION
60 #error "XPRR configuration error"
61 1:
62 .endmacro
63
64
65 #endif /* XNU_MONITOR */
66
67 #define CBF_DISABLE 0
68 #define CBF_ENABLE 1
69
70 .macro COMPARE_BRANCH_FUSION
71 #if defined(APPLE_ARM64_ARCH_FAMILY)
72 mrs $1, ARM64_REG_HID1
73 .if $0 == CBF_DISABLE
74 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
75 .else
76 mov $2, ARM64_REG_HID1_disCmpBrFusion
77 bic $1, $1, $2
78 .endif
79 msr ARM64_REG_HID1, $1
80 .if $0 == CBF_DISABLE
81 isb sy
82 .endif
83 #endif
84 .endmacro
85
86 /*
87 * MAP_KERNEL
88 *
89 * Restores the kernel EL1 mappings, if necessary.
90 *
91 * This may mutate x18.
92 */
93 .macro MAP_KERNEL
94 #if __ARM_KERNEL_PROTECT__
95 /* Switch to the kernel ASID (low bit set) for the task. */
96 mrs x18, TTBR0_EL1
97 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
98 msr TTBR0_EL1, x18
99
100 /*
101 * We eschew some barriers on Apple CPUs, as relative ordering of writes
102 * to the TTBRs and writes to the TCR should be ensured by the
103 * microarchitecture.
104 */
105 #if !defined(APPLE_ARM64_ARCH_FAMILY)
106 isb sy
107 #endif
108
109 /*
110 * Update the TCR to map the kernel now that we are using the kernel
111 * ASID.
112 */
113 MOV64 x18, TCR_EL1_BOOT
114 msr TCR_EL1, x18
115 isb sy
116 #endif /* __ARM_KERNEL_PROTECT__ */
117 .endmacro
118
119 /*
120 * BRANCH_TO_KVA_VECTOR
121 *
122 * Branches to the requested long exception vector in the kernelcache.
123 * arg0 - The label to branch to
124 * arg1 - The index of the label in exc_vectors_tables
125 *
126 * This may mutate x18.
127 */
128 .macro BRANCH_TO_KVA_VECTOR
129 #if __ARM_KERNEL_PROTECT__
130 /*
131 * Find the kernelcache table for the exception vectors by accessing
132 * the per-CPU data.
133 */
134 mrs x18, TPIDR_EL1
135 ldr x18, [x18, ACT_CPUDATAP]
136 ldr x18, [x18, CPU_EXC_VECTORS]
137
138 /*
139 * Get the handler for this exception and jump to it.
140 */
141 ldr x18, [x18, #($1 << 3)]
142 br x18
143 #else
144 b $0
145 #endif /* __ARM_KERNEL_PROTECT__ */
146 .endmacro
147
148 /*
149 * CHECK_KERNEL_STACK
150 *
151 * Verifies that the kernel stack is aligned and mapped within an expected
152 * stack address range. Note: happens before saving registers (in case we can't
153 * save to kernel stack).
154 *
155 * Expects:
156 * {x0, x1} - saved
157 * x1 - Exception syndrome
158 * sp - Saved state
159 *
160 * Seems like we need an unused argument to the macro for the \@ syntax to work
161 *
162 */
163 .macro CHECK_KERNEL_STACK unused
164 stp x2, x3, [sp, #-16]! // Save {x2-x3}
165 and x1, x1, #ESR_EC_MASK // Mask the exception class
166 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
167 cmp x1, x2 // If we have a stack alignment exception
168 b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted
169 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
170 cmp x1, x2 // If we have a data abort, we need to
171 b.ne Lvalid_stack_\@ // ...validate the stack pointer
172 mrs x0, SP_EL0 // Get SP_EL0
173 mrs x1, TPIDR_EL1 // Get thread pointer
174 Ltest_kstack_\@:
175 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
176 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
177 cmp x0, x2 // if (SP_EL0 >= kstack top)
178 b.ge Ltest_istack_\@ // jump to istack test
179 cmp x0, x3 // if (SP_EL0 > kstack bottom)
180 b.gt Lvalid_stack_\@ // stack pointer valid
181 Ltest_istack_\@:
182 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
183 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
184 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
185 cmp x0, x2 // if (SP_EL0 >= istack top)
186 b.ge Lcorrupt_stack_\@ // corrupt stack pointer
187 cmp x0, x3 // if (SP_EL0 > istack bottom)
188 b.gt Lvalid_stack_\@ // stack pointer valid
189 Lcorrupt_stack_\@:
190 ldp x2, x3, [sp], #16
191 ldp x0, x1, [sp], #16
192 sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame
193 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame
194 stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame
195 mrs x0, SP_EL0 // Get SP_EL0
196 str x0, [sp, SS64_SP] // Save sp to the exception frame
197 INIT_SAVED_STATE_FLAVORS sp, w0, w1
198 mov x0, sp // Copy exception frame pointer to x0
199 adrp x1, fleh_invalid_stack@page // Load address for fleh
200 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
201 b fleh_dispatch64
202 Lvalid_stack_\@:
203 ldp x2, x3, [sp], #16 // Restore {x2-x3}
204 .endmacro
205
206
207 #if __ARM_KERNEL_PROTECT__
208 .section __DATA_CONST,__const
209 .align 3
210 .globl EXT(exc_vectors_table)
211 LEXT(exc_vectors_table)
212 /* Table of exception handlers.
213 * These handlers sometimes contain deadloops.
214 * It's nice to have symbols for them when debugging. */
215 .quad el1_sp0_synchronous_vector_long
216 .quad el1_sp0_irq_vector_long
217 .quad el1_sp0_fiq_vector_long
218 .quad el1_sp0_serror_vector_long
219 .quad el1_sp1_synchronous_vector_long
220 .quad el1_sp1_irq_vector_long
221 .quad el1_sp1_fiq_vector_long
222 .quad el1_sp1_serror_vector_long
223 .quad el0_synchronous_vector_64_long
224 .quad el0_irq_vector_64_long
225 .quad el0_fiq_vector_64_long
226 .quad el0_serror_vector_64_long
227 #endif /* __ARM_KERNEL_PROTECT__ */
228
229 .text
230 #if __ARM_KERNEL_PROTECT__
231 /*
232 * We need this to be on a page boundary so that we may avoiding mapping
233 * other text along with it. As this must be on the VM page boundary
234 * (due to how the coredumping code currently works), this will be a
235 * 16KB page boundary.
236 */
237 .align 14
238 #else
239 .align 12
240 #endif /* __ARM_KERNEL_PROTECT__ */
241 .globl EXT(ExceptionVectorsBase)
242 LEXT(ExceptionVectorsBase)
243 Lel1_sp0_synchronous_vector:
244 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
245
246 .text
247 .align 7
248 Lel1_sp0_irq_vector:
249 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
250
251 .text
252 .align 7
253 Lel1_sp0_fiq_vector:
254 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
255
256 .text
257 .align 7
258 Lel1_sp0_serror_vector:
259 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
260
261 .text
262 .align 7
263 Lel1_sp1_synchronous_vector:
264 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
265
266 .text
267 .align 7
268 Lel1_sp1_irq_vector:
269 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
270
271 .text
272 .align 7
273 Lel1_sp1_fiq_vector:
274 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
275
276 .text
277 .align 7
278 Lel1_sp1_serror_vector:
279 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
280
281 .text
282 .align 7
283 Lel0_synchronous_vector_64:
284 MAP_KERNEL
285 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
286
287 .text
288 .align 7
289 Lel0_irq_vector_64:
290 MAP_KERNEL
291 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
292
293 .text
294 .align 7
295 Lel0_fiq_vector_64:
296 MAP_KERNEL
297 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
298
299 .text
300 .align 7
301 Lel0_serror_vector_64:
302 MAP_KERNEL
303 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
304
305 /* Fill out the rest of the page */
306 .align 12
307
308 /*********************************
309 * END OF EXCEPTION VECTORS PAGE *
310 *********************************/
311
312
313
314 .macro EL1_SP0_VECTOR
315 msr SPSel, #0 // Switch to SP0
316 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
317 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
318 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
319 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
320 INIT_SAVED_STATE_FLAVORS sp, w0, w1
321 mov x0, sp // Copy saved state pointer to x0
322 .endmacro
323
324 el1_sp0_synchronous_vector_long:
325 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
326 mrs x1, ESR_EL1 // Get the exception syndrome
327 /* If the stack pointer is corrupt, it will manifest either as a data abort
328 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
329 * these quickly by testing bit 5 of the exception class.
330 */
331 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
332 CHECK_KERNEL_STACK
333 Lkernel_stack_valid:
334 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
335 EL1_SP0_VECTOR
336 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
337 add x1, x1, EXT(fleh_synchronous)@pageoff
338 b fleh_dispatch64
339
340 el1_sp0_irq_vector_long:
341 EL1_SP0_VECTOR
342 SWITCH_TO_INT_STACK
343 adrp x1, EXT(fleh_irq)@page // Load address for fleh
344 add x1, x1, EXT(fleh_irq)@pageoff
345 b fleh_dispatch64
346
347 el1_sp0_fiq_vector_long:
348 // ARM64_TODO write optimized decrementer
349 EL1_SP0_VECTOR
350 SWITCH_TO_INT_STACK
351 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
352 add x1, x1, EXT(fleh_fiq)@pageoff
353 b fleh_dispatch64
354
355 el1_sp0_serror_vector_long:
356 EL1_SP0_VECTOR
357 adrp x1, EXT(fleh_serror)@page // Load address for fleh
358 add x1, x1, EXT(fleh_serror)@pageoff
359 b fleh_dispatch64
360
361 .macro EL1_SP1_VECTOR
362 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
363 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
364 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
365 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
366 INIT_SAVED_STATE_FLAVORS sp, w0, w1
367 mov x0, sp // Copy saved state pointer to x0
368 .endmacro
369
370 el1_sp1_synchronous_vector_long:
371 b check_exception_stack
372 Lel1_sp1_synchronous_valid_stack:
373 #if defined(KERNEL_INTEGRITY_KTRR)
374 b check_ktrr_sctlr_trap
375 Lel1_sp1_synchronous_vector_continue:
376 #endif
377 EL1_SP1_VECTOR
378 adrp x1, fleh_synchronous_sp1@page
379 add x1, x1, fleh_synchronous_sp1@pageoff
380 b fleh_dispatch64
381
382 el1_sp1_irq_vector_long:
383 EL1_SP1_VECTOR
384 adrp x1, fleh_irq_sp1@page
385 add x1, x1, fleh_irq_sp1@pageoff
386 b fleh_dispatch64
387
388 el1_sp1_fiq_vector_long:
389 EL1_SP1_VECTOR
390 adrp x1, fleh_fiq_sp1@page
391 add x1, x1, fleh_fiq_sp1@pageoff
392 b fleh_dispatch64
393
394 el1_sp1_serror_vector_long:
395 EL1_SP1_VECTOR
396 adrp x1, fleh_serror_sp1@page
397 add x1, x1, fleh_serror_sp1@pageoff
398 b fleh_dispatch64
399
400
401 .macro EL0_64_VECTOR
402 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
403 #if __ARM_KERNEL_PROTECT__
404 mov x18, #0 // Zero x18 to avoid leaking data to user SS
405 #endif
406 mrs x0, TPIDR_EL1 // Load the thread register
407 mrs x1, SP_EL0 // Load the user stack pointer
408 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
409 ldr x0, [x0] // Load the user context pointer
410 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
411 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
412 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
413 msr SPSel, #0 // Switch to SP0
414 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
415 mrs x1, TPIDR_EL1 // Load the thread register
416
417
418 mov x0, sp // Copy the user PCB pointer to x0
419 // x1 contains thread register
420 .endmacro
421
422
423 el0_synchronous_vector_64_long:
424 EL0_64_VECTOR sync
425 SWITCH_TO_KERN_STACK
426 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
427 add x1, x1, EXT(fleh_synchronous)@pageoff
428 b fleh_dispatch64
429
430 el0_irq_vector_64_long:
431 EL0_64_VECTOR irq
432 SWITCH_TO_INT_STACK
433 adrp x1, EXT(fleh_irq)@page // load address for fleh
434 add x1, x1, EXT(fleh_irq)@pageoff
435 b fleh_dispatch64
436
437 el0_fiq_vector_64_long:
438 EL0_64_VECTOR fiq
439 SWITCH_TO_INT_STACK
440 adrp x1, EXT(fleh_fiq)@page // load address for fleh
441 add x1, x1, EXT(fleh_fiq)@pageoff
442 b fleh_dispatch64
443
444 el0_serror_vector_64_long:
445 EL0_64_VECTOR serror
446 SWITCH_TO_KERN_STACK
447 adrp x1, EXT(fleh_serror)@page // load address for fleh
448 add x1, x1, EXT(fleh_serror)@pageoff
449 b fleh_dispatch64
450
451
452 /*
453 * check_exception_stack
454 *
455 * Verifies that stack pointer at SP1 is within exception stack
456 * If not, will simply hang as we have no more stack to fall back on.
457 */
458
459 .text
460 .align 2
461 check_exception_stack:
462 mrs x18, TPIDR_EL1 // Get thread pointer
463 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
464 ldr x18, [x18, ACT_CPUDATAP]
465 cbz x18, . // If thread context is set, cpu data should be too
466 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
467 cmp sp, x18
468 b.gt . // Hang if above exception stack top
469 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
470 cmp sp, x18
471 b.lt . // Hang if below exception stack bottom
472 Lvalid_exception_stack:
473 mov x18, #0
474 b Lel1_sp1_synchronous_valid_stack
475
476
477 #if defined(KERNEL_INTEGRITY_KTRR)
478 .text
479 .align 2
480 check_ktrr_sctlr_trap:
481 /* We may abort on an instruction fetch on reset when enabling the MMU by
482 * writing SCTLR_EL1 because the page containing the privileged instruction is
483 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
484 * would otherwise panic unconditionally. Check for the condition and return
485 * safe execution to the caller on behalf of the faulting function.
486 *
487 * Expected register state:
488 * x22 - Kernel virtual base
489 * x23 - Kernel physical base
490 */
491 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
492 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
493 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
494 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
495 movz w1, #0x8600, lsl #16
496 movk w1, #0x0000
497 cmp x0, x1
498 mrs x0, ELR_EL1 // Check for expected abort address
499 adrp x1, _pinst_set_sctlr_trap_addr@page
500 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
501 sub x1, x1, x22 // Convert to physical address
502 add x1, x1, x23
503 ccmp x0, x1, #0, eq
504 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
505 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
506 b.ne Lel1_sp1_synchronous_vector_continue
507 msr ELR_EL1, lr // Return to caller
508 ERET_CONTEXT_SYNCHRONIZING
509 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
510
511 /* 64-bit first level exception handler dispatcher.
512 * Completes register context saving and branches to FLEH.
513 * Expects:
514 * {x0, x1, sp} - saved
515 * x0 - arm_context_t
516 * x1 - address of FLEH
517 * fp - previous stack frame if EL1
518 * lr - unused
519 * sp - kernel stack
520 */
521 .text
522 .align 2
523 fleh_dispatch64:
524 /* Save arm_saved_state64 */
525 SPILL_REGISTERS KERNEL_MODE
526
527 /* If exception is from userspace, zero unused registers */
528 and x23, x23, #(PSR64_MODE_EL_MASK)
529 cmp x23, #(PSR64_MODE_EL0)
530 bne 1f
531
532 SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
533 2:
534 mov x2, #0
535 mov x3, #0
536 mov x4, #0
537 mov x5, #0
538 mov x6, #0
539 mov x7, #0
540 mov x8, #0
541 mov x9, #0
542 mov x10, #0
543 mov x11, #0
544 mov x12, #0
545 mov x13, #0
546 mov x14, #0
547 mov x15, #0
548 mov x16, #0
549 mov x17, #0
550 mov x18, #0
551 mov x19, #0
552 mov x20, #0
553 /* x21, x22 cleared in common case below */
554 mov x23, #0
555 mov x24, #0
556 mov x25, #0
557 #if !XNU_MONITOR
558 mov x26, #0
559 #endif
560 mov x27, #0
561 mov x28, #0
562 mov fp, #0
563 mov lr, #0
564 1:
565
566 mov x21, x0 // Copy arm_context_t pointer to x21
567 mov x22, x1 // Copy handler routine to x22
568
569 #if XNU_MONITOR
570 /* Zero x26 to indicate that this should not return to the PPL. */
571 mov x26, #0
572 #endif
573
574 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
575 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
576 b.ne 1f // kernel mode, so skip precise time update
577 PUSH_FRAME
578 bl EXT(timer_state_event_user_to_kernel)
579 POP_FRAME
580 mov x0, x21 // Reload arm_context_t pointer
581 1:
582 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
583
584 /* Dispatch to FLEH */
585
586 br x22
587
588
589 .text
590 .align 2
591 .global EXT(fleh_synchronous)
592 LEXT(fleh_synchronous)
593
594 UNWIND_PROLOGUE
595 UNWIND_DIRECTIVES
596
597 mrs x1, ESR_EL1 // Load exception syndrome
598 mrs x2, FAR_EL1 // Load fault address
599
600 /* At this point, the LR contains the value of ELR_EL1. In the case of an
601 * instruction prefetch abort, this will be the faulting pc, which we know
602 * to be invalid. This will prevent us from backtracing through the
603 * exception if we put it in our stack frame, so we load the LR from the
604 * exception saved state instead.
605 */
606 and w3, w1, #(ESR_EC_MASK)
607 lsr w3, w3, #(ESR_EC_SHIFT)
608 mov w4, #(ESR_EC_IABORT_EL1)
609 cmp w3, w4
610 b.eq Lfleh_sync_load_lr
611 Lvalid_link_register:
612
613 PUSH_FRAME
614 bl EXT(sleh_synchronous)
615 POP_FRAME
616
617 #if XNU_MONITOR
618 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
619 #endif
620
621 mov x28, xzr // Don't need to check PFZ if there are ASTs
622 b exception_return_dispatch
623
624 Lfleh_sync_load_lr:
625 ldr lr, [x0, SS64_LR]
626 b Lvalid_link_register
627 UNWIND_EPILOGUE
628
629 /* Shared prologue code for fleh_irq and fleh_fiq.
630 * Does any interrupt booking we may want to do
631 * before invoking the handler proper.
632 * Expects:
633 * x0 - arm_context_t
634 * x23 - CPSR
635 * fp - Undefined live value (we may push a frame)
636 * lr - Undefined live value (we may push a frame)
637 * sp - Interrupt stack for the current CPU
638 */
639 .macro BEGIN_INTERRUPT_HANDLER
640 mrs x22, TPIDR_EL1
641 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
642 /* Update IRQ count */
643 ldr w1, [x23, CPU_STAT_IRQ]
644 add w1, w1, #1 // Increment count
645 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
646 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
647 add w1, w1, #1 // Increment count
648 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
649 /* Increment preempt count */
650 ldr w1, [x22, ACT_PREEMPT_CNT]
651 add w1, w1, #1
652 str w1, [x22, ACT_PREEMPT_CNT]
653 /* Store context in int state */
654 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
655 .endmacro
656
657 /* Shared epilogue code for fleh_irq and fleh_fiq.
658 * Cleans up after the prologue, and may do a bit more
659 * bookkeeping (kdebug related).
660 * Expects:
661 * x22 - Live TPIDR_EL1 value (thread address)
662 * x23 - Address of the current CPU data structure
663 * w24 - 0 if kdebug is disbled, nonzero otherwise
664 * fp - Undefined live value (we may push a frame)
665 * lr - Undefined live value (we may push a frame)
666 * sp - Interrupt stack for the current CPU
667 */
668 .macro END_INTERRUPT_HANDLER
669 /* Clear int context */
670 str xzr, [x23, CPU_INT_STATE]
671 /* Decrement preempt count */
672 ldr w0, [x22, ACT_PREEMPT_CNT]
673 cbnz w0, 1f // Detect underflow
674 b preempt_underflow
675 1:
676 sub w0, w0, #1
677 str w0, [x22, ACT_PREEMPT_CNT]
678 /* Switch back to kernel stack */
679 ldr x0, [x22, TH_KSTACKPTR]
680 mov sp, x0
681 .endmacro
682
683 .text
684 .align 2
685 .global EXT(fleh_irq)
686 LEXT(fleh_irq)
687 BEGIN_INTERRUPT_HANDLER
688 PUSH_FRAME
689 bl EXT(sleh_irq)
690 POP_FRAME
691 END_INTERRUPT_HANDLER
692
693 #if XNU_MONITOR
694 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
695 #endif
696
697 mov x28, #1 // Set a bit to check PFZ if there are ASTs
698 b exception_return_dispatch
699
700 .text
701 .align 2
702 .global EXT(fleh_fiq_generic)
703 LEXT(fleh_fiq_generic)
704 PANIC_UNIMPLEMENTED
705
706 .text
707 .align 2
708 .global EXT(fleh_fiq)
709 LEXT(fleh_fiq)
710 BEGIN_INTERRUPT_HANDLER
711 PUSH_FRAME
712 bl EXT(sleh_fiq)
713 POP_FRAME
714 END_INTERRUPT_HANDLER
715
716 #if XNU_MONITOR
717 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
718 #endif
719
720 mov x28, #1 // Set a bit to check PFZ if there are ASTs
721 b exception_return_dispatch
722
723 .text
724 .align 2
725 .global EXT(fleh_serror)
726 LEXT(fleh_serror)
727 mrs x1, ESR_EL1 // Load exception syndrome
728 mrs x2, FAR_EL1 // Load fault address
729
730 PUSH_FRAME
731 bl EXT(sleh_serror)
732 POP_FRAME
733
734 #if XNU_MONITOR
735 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
736 #endif
737
738 mov x28, xzr // Don't need to check PFZ If there are ASTs
739 b exception_return_dispatch
740
741 /*
742 * Register state saved before we get here.
743 */
744 .text
745 .align 2
746 fleh_invalid_stack:
747 mrs x1, ESR_EL1 // Load exception syndrome
748 str x1, [x0, SS64_ESR]
749 mrs x2, FAR_EL1 // Load fault address
750 str x2, [x0, SS64_FAR]
751 PUSH_FRAME
752 bl EXT(sleh_invalid_stack) // Shouldn't return!
753 b .
754
755 .text
756 .align 2
757 fleh_synchronous_sp1:
758 mrs x1, ESR_EL1 // Load exception syndrome
759 str x1, [x0, SS64_ESR]
760 mrs x2, FAR_EL1 // Load fault address
761 str x2, [x0, SS64_FAR]
762 PUSH_FRAME
763 bl EXT(sleh_synchronous_sp1)
764 b .
765
766 .text
767 .align 2
768 fleh_irq_sp1:
769 mov x1, x0
770 adr x0, Lsp1_irq_str
771 b EXT(panic_with_thread_kernel_state)
772 Lsp1_irq_str:
773 .asciz "IRQ exception taken while SP1 selected"
774
775 .text
776 .align 2
777 fleh_fiq_sp1:
778 mov x1, x0
779 adr x0, Lsp1_fiq_str
780 b EXT(panic_with_thread_kernel_state)
781 Lsp1_fiq_str:
782 .asciz "FIQ exception taken while SP1 selected"
783
784 .text
785 .align 2
786 fleh_serror_sp1:
787 mov x1, x0
788 adr x0, Lsp1_serror_str
789 b EXT(panic_with_thread_kernel_state)
790 Lsp1_serror_str:
791 .asciz "Asynchronous exception taken while SP1 selected"
792
793 .text
794 .align 2
795 exception_return_dispatch:
796 ldr w0, [x21, SS64_CPSR]
797 tst w0, PSR64_MODE_EL_MASK
798 b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0
799 b return_to_user
800
801 .text
802 .align 2
803 .global EXT(return_to_kernel)
804 LEXT(return_to_kernel)
805 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
806 mrs x3, TPIDR_EL1 // Load thread pointer
807 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
808 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
809 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
810 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
811 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
812 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
813 b.eq exception_return_unint_tpidr_x3
814 mov sp, x21 // Switch to thread stack for preemption
815 PUSH_FRAME
816 bl EXT(ast_taken_kernel) // Handle AST_URGENT
817 POP_FRAME
818 b exception_return
819
820 .text
821 .globl EXT(thread_bootstrap_return)
822 LEXT(thread_bootstrap_return)
823 #if CONFIG_DTRACE
824 bl EXT(dtrace_thread_bootstrap)
825 #endif
826 b EXT(arm64_thread_exception_return)
827
828 .text
829 .globl EXT(arm64_thread_exception_return)
830 LEXT(arm64_thread_exception_return)
831 mrs x0, TPIDR_EL1
832 add x21, x0, ACT_CONTEXT
833 ldr x21, [x21]
834 mov x28, xzr
835
836 //
837 // Fall Through to return_to_user from arm64_thread_exception_return.
838 // Note that if we move return_to_user or insert a new routine
839 // below arm64_thread_exception_return, the latter will need to change.
840 //
841 .text
842 /* x21 is always the machine context pointer when we get here
843 * x28 is a bit indicating whether or not we should check if pc is in pfz */
844 return_to_user:
845 check_user_asts:
846 mrs x3, TPIDR_EL1 // Load thread pointer
847
848 movn w2, #0
849 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
850
851 #if MACH_ASSERT
852 ldr w0, [x3, TH_RWLOCK_CNT]
853 cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock
854
855 ldr w0, [x3, ACT_PREEMPT_CNT]
856 cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption
857 #endif
858 ldr w0, [x3, TH_TMP_ALLOC_CNT]
859 cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks
860
861 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
862 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
863 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
864 cbz x0, no_asts // If no asts, skip ahead
865
866 cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts
867
868 /* At this point, we have ASTs and we need to check whether we are running in the
869 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
870 * the PFZ since we don't want to handle getting a signal or getting suspended
871 * while holding a spinlock in userspace.
872 *
873 * If userspace was in the PFZ, we know (via coordination with the PFZ code
874 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
875 * to use it to indicate to userspace to come back to take a delayed
876 * preemption, at which point the ASTs will be handled. */
877 mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again
878 mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64
879
880 ldr x0, [x21, SS64_PC] // Load pc from machine state
881 bl EXT(commpage_is_in_pfz64) // pc in pfz?
882 cbz x0, restore_and_check_ast // No, deal with other asts
883
884 mov x0, #1
885 str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption
886 mov x0, x19 // restore x0 to asts
887 b no_asts // pretend we have no asts
888
889 restore_and_check_ast:
890 mov x0, x19 // restore x0
891 b user_take_ast // Service pending asts
892 no_asts:
893
894
895 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
896 mov x19, x3 // Preserve thread pointer across function call
897 PUSH_FRAME
898 bl EXT(timer_state_event_kernel_to_user)
899 POP_FRAME
900 mov x3, x19
901 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
902
903 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
904 /* Watchtower
905 *
906 * Here we attempt to enable NEON access for EL0. If the last entry into the
907 * kernel from user-space was due to an IRQ, the monitor will have disabled
908 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
909 * check in with the monitor in order to reenable NEON for EL0 in exchange
910 * for routing IRQs through the monitor (2). This way the monitor will
911 * always 'own' either IRQs or EL0 NEON.
912 *
913 * If Watchtower is disabled or we did not enter the kernel through an IRQ
914 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
915 * here.
916 *
917 * EL0 user ________ IRQ ______
918 * EL1 xnu \ ______________________ CPACR_EL1 __/
919 * EL3 monitor \_/ \___/
920 *
921 * (1) (2)
922 */
923
924 mov x0, #(CPACR_FPEN_ENABLE)
925 msr CPACR_EL1, x0
926 #endif
927
928 /* Establish this thread's debug state as the live state on the selected CPU. */
929 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
930 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
931 ldr x0, [x3, ACT_DEBUGDATA]
932 cmp x0, x1
933 beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state
934
935 #if defined(APPLELIGHTNING)
936 /* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
937
938 ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
939 cbz x12, 1f
940
941 #endif
942
943 #if defined(APPLELIGHTNING) || defined(APPLEFIRESTORM)
944
945 mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn
946 orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
947 msr ARM64_REG_HID1, x12
948 1:
949
950 #endif
951
952 PUSH_FRAME
953 bl EXT(arm_debug_set) // Establish thread debug state in live regs
954 POP_FRAME
955 mrs x3, TPIDR_EL1 // Reload thread pointer
956 L_skip_user_set_debug_state:
957
958
959 b exception_return_unint_tpidr_x3
960
961 //
962 // Fall through from return_to_user to exception_return.
963 // Note that if we move exception_return or add a new routine below
964 // return_to_user, the latter will have to change.
965 //
966
967 exception_return:
968 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
969 exception_return_unint:
970 mrs x3, TPIDR_EL1 // Load thread pointer
971 exception_return_unint_tpidr_x3:
972 mov sp, x21 // Reload the pcb pointer
973
974 exception_return_unint_tpidr_x3_dont_trash_x18:
975
976
977 #if __ARM_KERNEL_PROTECT__
978 /*
979 * If we are going to eret to userspace, we must return through the EL0
980 * eret mapping.
981 */
982 ldr w1, [sp, SS64_CPSR] // Load CPSR
983 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
984
985 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
986 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
987 adrp x1, Lexception_return_restore_registers@page // Load target PC
988 add x1, x1, Lexception_return_restore_registers@pageoff
989 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
990 sub x1, x1, x0 // Calculate delta
991 add x0, x2, x1 // Convert KVA to EL0 vector address
992 br x0
993
994 Lskip_el0_eret_mapping:
995 #endif /* __ARM_KERNEL_PROTECT__ */
996
997 Lexception_return_restore_registers:
998 mov x0, sp // x0 = &pcb
999 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1000 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1
1001
1002 /* Restore special register state */
1003 ldr w3, [sp, NS64_FPSR]
1004 ldr w4, [sp, NS64_FPCR]
1005
1006 msr ELR_EL1, x1 // Load the return address into ELR
1007 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
1008 msr FPSR, x3
1009 mrs x5, FPCR
1010 CMSR FPCR, x5, x4, 1
1011 1:
1012
1013
1014 /* Restore arm_neon_saved_state64 */
1015 ldp q0, q1, [x0, NS64_Q0]
1016 ldp q2, q3, [x0, NS64_Q2]
1017 ldp q4, q5, [x0, NS64_Q4]
1018 ldp q6, q7, [x0, NS64_Q6]
1019 ldp q8, q9, [x0, NS64_Q8]
1020 ldp q10, q11, [x0, NS64_Q10]
1021 ldp q12, q13, [x0, NS64_Q12]
1022 ldp q14, q15, [x0, NS64_Q14]
1023 ldp q16, q17, [x0, NS64_Q16]
1024 ldp q18, q19, [x0, NS64_Q18]
1025 ldp q20, q21, [x0, NS64_Q20]
1026 ldp q22, q23, [x0, NS64_Q22]
1027 ldp q24, q25, [x0, NS64_Q24]
1028 ldp q26, q27, [x0, NS64_Q26]
1029 ldp q28, q29, [x0, NS64_Q28]
1030 ldp q30, q31, [x0, NS64_Q30]
1031
1032 /* Restore arm_saved_state64 */
1033
1034 // Skip x0, x1 - we're using them
1035 ldp x2, x3, [x0, SS64_X2]
1036 ldp x4, x5, [x0, SS64_X4]
1037 ldp x6, x7, [x0, SS64_X6]
1038 ldp x8, x9, [x0, SS64_X8]
1039 ldp x10, x11, [x0, SS64_X10]
1040 ldp x12, x13, [x0, SS64_X12]
1041 ldp x14, x15, [x0, SS64_X14]
1042 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1043 ldp x18, x19, [x0, SS64_X18]
1044 ldp x20, x21, [x0, SS64_X20]
1045 ldp x22, x23, [x0, SS64_X22]
1046 ldp x24, x25, [x0, SS64_X24]
1047 ldp x26, x27, [x0, SS64_X26]
1048 ldr x28, [x0, SS64_X28]
1049 ldr fp, [x0, SS64_FP]
1050 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1051
1052 // Restore stack pointer and our last two GPRs
1053 ldr x1, [x0, SS64_SP]
1054 mov sp, x1
1055
1056 #if __ARM_KERNEL_PROTECT__
1057 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1058 #endif /* __ARM_KERNEL_PROTECT__ */
1059
1060 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1061
1062 #if __ARM_KERNEL_PROTECT__
1063 /* If we are going to eret to userspace, we must unmap the kernel. */
1064 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1065
1066 /* Update TCR to unmap the kernel. */
1067 MOV64 x18, TCR_EL1_USER
1068 msr TCR_EL1, x18
1069
1070 /*
1071 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1072 * each other due to the microarchitecture.
1073 */
1074 #if !defined(APPLE_ARM64_ARCH_FAMILY)
1075 isb sy
1076 #endif
1077
1078 /* Switch to the user ASID (low bit clear) for the task. */
1079 mrs x18, TTBR0_EL1
1080 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1081 msr TTBR0_EL1, x18
1082 mov x18, #0
1083
1084 /* We don't need an ISB here, as the eret is synchronizing. */
1085 Lskip_ttbr1_switch:
1086 #endif /* __ARM_KERNEL_PROTECT__ */
1087
1088 ERET_CONTEXT_SYNCHRONIZING
1089
1090 user_take_ast:
1091 PUSH_FRAME
1092 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1093 POP_FRAME
1094 b check_user_asts // Now try again
1095
1096 .text
1097 .align 2
1098 preempt_underflow:
1099 mrs x0, TPIDR_EL1
1100 str x0, [sp, #-16]! // We'll print thread pointer
1101 adr x0, L_underflow_str // Format string
1102 CALL_EXTERN panic // Game over
1103
1104 L_underflow_str:
1105 .asciz "Preemption count negative on thread %p"
1106 .align 2
1107
1108 #if MACH_ASSERT
1109 .text
1110 .align 2
1111 rwlock_count_notzero:
1112 mrs x0, TPIDR_EL1
1113 str x0, [sp, #-16]! // We'll print thread pointer
1114 ldr w0, [x0, TH_RWLOCK_CNT]
1115 str w0, [sp, #8]
1116 adr x0, L_rwlock_count_notzero_str // Format string
1117 CALL_EXTERN panic // Game over
1118
1119 L_rwlock_count_notzero_str:
1120 .asciz "RW lock count not 0 on thread %p (%u)"
1121
1122 .text
1123 .align 2
1124 preempt_count_notzero:
1125 mrs x0, TPIDR_EL1
1126 str x0, [sp, #-16]! // We'll print thread pointer
1127 ldr w0, [x0, ACT_PREEMPT_CNT]
1128 str w0, [sp, #8]
1129 adr x0, L_preempt_count_notzero_str // Format string
1130 CALL_EXTERN panic // Game over
1131
1132 L_preempt_count_notzero_str:
1133 .asciz "preemption count not 0 on thread %p (%u)"
1134 #endif /* MACH_ASSERT */
1135
1136 .text
1137 .align 2
1138 tmp_alloc_count_nozero:
1139 mrs x0, TPIDR_EL1
1140 CALL_EXTERN kheap_temp_leak_panic
1141
1142 #if __ARM_KERNEL_PROTECT__
1143 /*
1144 * This symbol denotes the end of the exception vector/eret range; we page
1145 * align it so that we can avoid mapping other text in the EL0 exception
1146 * vector mapping.
1147 */
1148 .text
1149 .align 14
1150 .globl EXT(ExceptionVectorsEnd)
1151 LEXT(ExceptionVectorsEnd)
1152 #endif /* __ARM_KERNEL_PROTECT__ */
1153
1154 #if XNU_MONITOR
1155
1156 /*
1157 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1158 * mostly concerned with setting up state for the normal fleh code.
1159 */
1160 fleh_synchronous_from_ppl:
1161 /* Save x0. */
1162 mov x15, x0
1163
1164 /* Grab the ESR. */
1165 mrs x1, ESR_EL1 // Get the exception syndrome
1166
1167 /* If the stack pointer is corrupt, it will manifest either as a data abort
1168 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1169 * these quickly by testing bit 5 of the exception class.
1170 */
1171 tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1172 mrs x0, SP_EL0 // Get SP_EL0
1173
1174 /* Perform high level checks for stack corruption. */
1175 and x1, x1, #ESR_EC_MASK // Mask the exception class
1176 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1177 cmp x1, x2 // If we have a stack alignment exception
1178 b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
1179 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1180 cmp x1, x2 // If we have a data abort, we need to
1181 b.ne Lvalid_ppl_stack // ...validate the stack pointer
1182
1183 Ltest_pstack:
1184 /* Bounds check the PPL stack. */
1185 adrp x10, EXT(pmap_stacks_start)@page
1186 ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1187 adrp x11, EXT(pmap_stacks_end)@page
1188 ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1189 cmp x0, x10
1190 b.lo Lcorrupt_ppl_stack
1191 cmp x0, x11
1192 b.hi Lcorrupt_ppl_stack
1193
1194 Lvalid_ppl_stack:
1195 /* Restore x0. */
1196 mov x0, x15
1197
1198 /* Switch back to the kernel stack. */
1199 msr SPSel, #0
1200 GET_PMAP_CPU_DATA x5, x6, x7
1201 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1202 mov sp, x6
1203
1204 /* Hand off to the synch handler. */
1205 b EXT(fleh_synchronous)
1206
1207 Lcorrupt_ppl_stack:
1208 /* Restore x0. */
1209 mov x0, x15
1210
1211 /* Hand off to the invalid stack handler. */
1212 b fleh_invalid_stack
1213
1214 fleh_fiq_from_ppl:
1215 SWITCH_TO_INT_STACK
1216 b EXT(fleh_fiq)
1217
1218 fleh_irq_from_ppl:
1219 SWITCH_TO_INT_STACK
1220 b EXT(fleh_irq)
1221
1222 fleh_serror_from_ppl:
1223 GET_PMAP_CPU_DATA x5, x6, x7
1224 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1225 mov sp, x6
1226 b EXT(fleh_serror)
1227
1228
1229
1230
1231 // x15: ppl call number
1232 // w10: ppl_state
1233 // x20: gxf_enter caller's DAIF
1234 .globl EXT(ppl_trampoline_start)
1235 LEXT(ppl_trampoline_start)
1236
1237
1238 #error "XPRR configuration error"
1239 cmp x14, x21
1240 b.ne Lppl_fail_dispatch
1241
1242 /* Verify the request ID. */
1243 cmp x15, PMAP_COUNT
1244 b.hs Lppl_fail_dispatch
1245
1246 GET_PMAP_CPU_DATA x12, x13, x14
1247
1248 /* Mark this CPU as being in the PPL. */
1249 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1250
1251 cmp w9, #PPL_STATE_KERNEL
1252 b.eq Lppl_mark_cpu_as_dispatching
1253
1254 /* Check to see if we are trying to trap from within the PPL. */
1255 cmp w9, #PPL_STATE_DISPATCH
1256 b.eq Lppl_fail_dispatch_ppl
1257
1258
1259 /* Ensure that we are returning from an exception. */
1260 cmp w9, #PPL_STATE_EXCEPTION
1261 b.ne Lppl_fail_dispatch
1262
1263 // where is w10 set?
1264 // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1265 cmp w10, #PPL_STATE_EXCEPTION
1266 b.ne Lppl_fail_dispatch
1267
1268 /* This is an exception return; set the CPU to the dispatching state. */
1269 mov w9, #PPL_STATE_DISPATCH
1270 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1271
1272 /* Find the save area, and return to the saved PPL context. */
1273 ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1274 mov sp, x0
1275 b EXT(return_to_ppl)
1276
1277 Lppl_mark_cpu_as_dispatching:
1278 cmp w10, #PPL_STATE_KERNEL
1279 b.ne Lppl_fail_dispatch
1280
1281 /* Mark the CPU as dispatching. */
1282 mov w13, #PPL_STATE_DISPATCH
1283 str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1284
1285 /* Switch to the regular PPL stack. */
1286 // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1287 ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1288
1289 // SP0 is thread stack here
1290 mov x21, sp
1291 // SP0 is now PPL stack
1292 mov sp, x9
1293
1294 /* Save the old stack pointer off in case we need it. */
1295 str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1296
1297 /* Get the handler for the request */
1298 adrp x9, EXT(ppl_handler_table)@page
1299 add x9, x9, EXT(ppl_handler_table)@pageoff
1300 add x9, x9, x15, lsl #3
1301 ldr x10, [x9]
1302
1303 /* Branch to the code that will invoke the PPL request. */
1304 b EXT(ppl_dispatch)
1305
1306 Lppl_fail_dispatch_ppl:
1307 /* Switch back to the kernel stack. */
1308 ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1309 mov sp, x10
1310
1311 Lppl_fail_dispatch:
1312 /* Indicate that we failed. */
1313 mov x15, #PPL_EXIT_BAD_CALL
1314
1315 /* Move the DAIF bits into the expected register. */
1316 mov x10, x20
1317
1318 /* Return to kernel mode. */
1319 b ppl_return_to_kernel_mode
1320
1321 Lppl_dispatch_exit:
1322 /* Indicate that we are cleanly exiting the PPL. */
1323 mov x15, #PPL_EXIT_DISPATCH
1324
1325 /* Switch back to the original (kernel thread) stack. */
1326 mov sp, x21
1327
1328 /* Move the saved DAIF bits. */
1329 mov x10, x20
1330
1331 /* Clear the old stack pointer. */
1332 str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1333
1334 /*
1335 * Mark the CPU as no longer being in the PPL. We spin if our state
1336 * machine is broken.
1337 */
1338 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1339 cmp w9, #PPL_STATE_DISPATCH
1340 b.ne .
1341 mov w9, #PPL_STATE_KERNEL
1342 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1343
1344 /* Return to the kernel. */
1345 b ppl_return_to_kernel_mode
1346
1347
1348
1349 .text
1350 ppl_exit:
1351 /*
1352 * If we are dealing with an exception, hand off to the first level
1353 * exception handler.
1354 */
1355 cmp x15, #PPL_EXIT_EXCEPTION
1356 b.eq Ljump_to_fleh_handler
1357
1358 /* Restore the original AIF state. */
1359 REENABLE_DAIF x10
1360
1361 /* If this was a panic call from the PPL, reinvoke panic. */
1362 cmp x15, #PPL_EXIT_PANIC_CALL
1363 b.eq Ljump_to_panic_trap_to_debugger
1364
1365 /* Load the preemption count. */
1366 mrs x10, TPIDR_EL1
1367 ldr w12, [x10, ACT_PREEMPT_CNT]
1368
1369 /* Detect underflow */
1370 cbnz w12, Lno_preempt_underflow
1371 b preempt_underflow
1372 Lno_preempt_underflow:
1373
1374 /* Lower the preemption count. */
1375 sub w12, w12, #1
1376 str w12, [x10, ACT_PREEMPT_CNT]
1377
1378 /* Skip ASTs if the peemption count is not zero. */
1379 cbnz x12, Lppl_skip_ast_taken
1380
1381 /* Skip the AST check if interrupts are disabled. */
1382 mrs x1, DAIF
1383 tst x1, #DAIF_IRQF
1384 b.ne Lppl_skip_ast_taken
1385
1386 /* Disable interrupts. */
1387 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
1388
1389 /* IF there is no urgent AST, skip the AST. */
1390 ldr x12, [x10, ACT_CPUDATAP]
1391 ldr x14, [x12, CPU_PENDING_AST]
1392 tst x14, AST_URGENT
1393 b.eq Lppl_defer_ast_taken
1394
1395 /* Stash our return value and return reason. */
1396 mov x20, x0
1397 mov x21, x15
1398
1399 /* Handle the AST. */
1400 bl EXT(ast_taken_kernel)
1401
1402 /* Restore the return value and the return reason. */
1403 mov x15, x21
1404 mov x0, x20
1405
1406 Lppl_defer_ast_taken:
1407 /* Reenable interrupts. */
1408 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
1409
1410 Lppl_skip_ast_taken:
1411 /* Pop the stack frame. */
1412 ldp x29, x30, [sp, #0x10]
1413 ldp x20, x21, [sp], #0x20
1414
1415 /* Check to see if this was a bad request. */
1416 cmp x15, #PPL_EXIT_BAD_CALL
1417 b.eq Lppl_bad_call
1418
1419 /* Return. */
1420 ARM64_STACK_EPILOG
1421
1422 .align 2
1423 Ljump_to_fleh_handler:
1424 br x25
1425
1426 .align 2
1427 Ljump_to_panic_trap_to_debugger:
1428 b EXT(panic_trap_to_debugger)
1429
1430 Lppl_bad_call:
1431 /* Panic. */
1432 adrp x0, Lppl_bad_call_panic_str@page
1433 add x0, x0, Lppl_bad_call_panic_str@pageoff
1434 b EXT(panic)
1435
1436 .text
1437 .align 2
1438 .globl EXT(ppl_dispatch)
1439 LEXT(ppl_dispatch)
1440 /*
1441 * Save a couple of important registers (implementation detail; x12 has
1442 * the PPL per-CPU data address; x13 is not actually interesting).
1443 */
1444 stp x12, x13, [sp, #-0x10]!
1445
1446 /* Restore the original AIF state. */
1447 REENABLE_DAIF x20
1448
1449 /*
1450 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1451 * but the exception vectors will deal with this properly.
1452 */
1453
1454 /* Invoke the PPL method. */
1455 #ifdef HAS_APPLE_PAC
1456 blraa x10, x9
1457 #else
1458 blr x10
1459 #endif
1460
1461 /* Disable AIF. */
1462 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1463
1464 /* Restore those important registers. */
1465 ldp x12, x13, [sp], #0x10
1466
1467 /* Mark this as a regular return, and hand off to the return path. */
1468 b Lppl_dispatch_exit
1469
1470 .text
1471 .align 2
1472 .globl EXT(ppl_bootstrap_dispatch)
1473 LEXT(ppl_bootstrap_dispatch)
1474 /* Verify the PPL request. */
1475 cmp x15, PMAP_COUNT
1476 b.hs Lppl_fail_bootstrap_dispatch
1477
1478 /* Get the requested PPL routine. */
1479 adrp x9, EXT(ppl_handler_table)@page
1480 add x9, x9, EXT(ppl_handler_table)@pageoff
1481 add x9, x9, x15, lsl #3
1482 ldr x10, [x9]
1483
1484 /* Invoke the requested PPL routine. */
1485 #ifdef HAS_APPLE_PAC
1486 blraa x10, x9
1487 #else
1488 blr x10
1489 #endif
1490 /* Stash off the return value */
1491 mov x20, x0
1492 /* Drop the preemption count */
1493 bl EXT(_enable_preemption)
1494 mov x0, x20
1495
1496 /* Pop the stack frame. */
1497 ldp x29, x30, [sp, #0x10]
1498 ldp x20, x21, [sp], #0x20
1499 #if __has_feature(ptrauth_returns)
1500 retab
1501 #else
1502 ret
1503 #endif
1504
1505 Lppl_fail_bootstrap_dispatch:
1506 /* Pop our stack frame and panic. */
1507 ldp x29, x30, [sp, #0x10]
1508 ldp x20, x21, [sp], #0x20
1509 #if __has_feature(ptrauth_returns)
1510 autibsp
1511 #endif
1512 adrp x0, Lppl_bad_call_panic_str@page
1513 add x0, x0, Lppl_bad_call_panic_str@pageoff
1514 b EXT(panic)
1515
1516 .text
1517 .align 2
1518 .globl EXT(ml_panic_trap_to_debugger)
1519 LEXT(ml_panic_trap_to_debugger)
1520 mrs x10, DAIF
1521 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1522
1523 adrp x12, EXT(pmap_ppl_locked_down)@page
1524 ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
1525 cbz w12, Lnot_in_ppl_dispatch
1526
1527 LOAD_PMAP_CPU_DATA x11, x12, x13
1528
1529 ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1530 cmp w12, #PPL_STATE_DISPATCH
1531 b.ne Lnot_in_ppl_dispatch
1532
1533 /* Indicate (for the PPL->kernel transition) that we are panicking. */
1534 mov x15, #PPL_EXIT_PANIC_CALL
1535
1536 /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1537 ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1538 mov sp, x12
1539
1540 mrs x10, DAIF
1541 mov w13, #PPL_STATE_PANIC
1542 str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1543
1544 /* Now we are ready to exit the PPL. */
1545 b ppl_return_to_kernel_mode
1546 Lnot_in_ppl_dispatch:
1547 REENABLE_DAIF x10
1548 ret
1549
1550 .data
1551 Lppl_bad_call_panic_str:
1552 .asciz "ppl_dispatch: failed due to bad arguments/state"
1553 #else /* XNU_MONITOR */
1554 .text
1555 .align 2
1556 .globl EXT(ml_panic_trap_to_debugger)
1557 LEXT(ml_panic_trap_to_debugger)
1558 ret
1559 #endif /* XNU_MONITOR */
1560
1561 /* ARM64_TODO Is globals_asm.h needed? */
1562 //#include "globals_asm.h"
1563
1564 /* vim: set ts=4: */