]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2011-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
cb323159 | 30 | #include <arm64/machine_routines_asm.h> |
5ba3f43e A |
31 | #include <arm64/proc_reg.h> |
32 | #include <pexpert/arm64/board_config.h> | |
33 | #include <mach/exception_types.h> | |
34 | #include <mach_kdp.h> | |
35 | #include <config_dtrace.h> | |
36 | #include "assym.s" | |
cb323159 | 37 | #include <arm64/exception_asm.h> |
5ba3f43e | 38 | |
5c9f4661 A |
39 | #if __ARM_KERNEL_PROTECT__ |
40 | #include <arm/pmap.h> | |
41 | #endif | |
42 | ||
c6bf4f31 A |
43 | #if XNU_MONITOR |
44 | /* | |
45 | * CHECK_EXCEPTION_RETURN_DISPATCH_PPL | |
46 | * | |
47 | * Checks if an exception was taken from the PPL, and if so, trampolines back | |
48 | * into the PPL. | |
49 | * x26 - 0 if the exception was taken while in the kernel, 1 if the | |
50 | * exception was taken while in the PPL. | |
51 | */ | |
52 | .macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL | |
53 | cmp x26, xzr | |
54 | b.eq 1f | |
55 | ||
56 | /* Return to the PPL. */ | |
57 | mov x15, #0 | |
58 | mov w10, #PPL_STATE_EXCEPTION | |
59 | #if __APRR_SUPPORTED__ | |
60 | b Ldisable_aif_and_enter_ppl | |
61 | #else | |
62 | #error "XPRR configuration error" | |
63 | #endif /* __APRR_SUPPORTED__ */ | |
64 | 1: | |
65 | .endmacro | |
66 | ||
67 | #if __APRR_SUPPORTED__ | |
68 | /* | |
69 | * EL1_SP0_VECTOR_PPL_CHECK | |
70 | * | |
71 | * Check to see if the exception was taken by the kernel or the PPL. Falls | |
72 | * through if kernel, hands off to the given label if PPL. Expects to run on | |
73 | * SP1. | |
74 | * arg0 - Label to go to if this was a PPL exception. | |
75 | */ | |
76 | .macro EL1_SP0_VECTOR_PPL_CHECK | |
77 | sub sp, sp, ARM_CONTEXT_SIZE | |
78 | stp x0, x1, [sp, SS64_X0] | |
79 | mrs x0, APRR_EL1 | |
80 | MOV64 x1, APRR_EL1_DEFAULT | |
81 | cmp x0, x1 | |
82 | b.ne $0 | |
83 | ldp x0, x1, [sp, SS64_X0] | |
84 | add sp, sp, ARM_CONTEXT_SIZE | |
85 | .endmacro | |
86 | ||
87 | #define STAY_ON_SP1 0 | |
88 | #define SWITCH_TO_SP0 1 | |
89 | ||
90 | #define INVOKE_PREFLIGHT 0 | |
91 | #define NO_INVOKE_PREFLIGHT 1 | |
92 | ||
93 | /* | |
94 | * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE | |
95 | * | |
96 | * Verify whether an exception came from the PPL or from the kernel. If it came | |
97 | * from the PPL, save off the PPL state and transition out of the PPL. | |
98 | * arg0 - Label to go to if this was a kernel exception | |
99 | * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception | |
100 | * arg2 - Indicates if this should switch back to SP0 | |
101 | * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK | |
102 | */ | |
103 | .macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE | |
104 | /* Spill some more registers. */ | |
105 | stp x2, x3, [sp, SS64_X2] | |
106 | ||
107 | /* | |
108 | * Check if the PPL is locked down; if not, we can treat this as a | |
109 | * kernel execption. | |
110 | */ | |
111 | adrp x1, EXT(pmap_ppl_locked_down)@page | |
112 | ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff] | |
113 | cbz x1, 2f | |
114 | ||
115 | /* Ensure that APRR_EL1 is actually in PPL mode. */ | |
116 | MOV64 x1, APRR_EL1_PPL | |
117 | cmp x0, x1 | |
118 | b.ne . | |
119 | ||
120 | /* | |
121 | * Check if the CPU is in the PPL; if not we can treat this as a | |
122 | * kernel exception. | |
123 | */ | |
124 | GET_PMAP_CPU_DATA x3, x1, x2 | |
125 | ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE] | |
126 | cmp x1, #PPL_STATE_KERNEL | |
127 | b.eq 2f | |
128 | ||
129 | /* Ensure that the CPU is in the expected PPL state. */ | |
130 | cmp x1, #PPL_STATE_DISPATCH | |
131 | b.ne . | |
132 | ||
133 | /* Mark the CPU as dealing with an exception. */ | |
134 | mov x1, #PPL_STATE_EXCEPTION | |
135 | str w1, [x3, PMAP_CPU_DATA_PPL_STATE] | |
136 | ||
137 | /* Load the bounds of the PPL trampoline. */ | |
138 | adrp x0, EXT(ppl_no_exception_start)@page | |
139 | add x0, x0, EXT(ppl_no_exception_start)@pageoff | |
140 | adrp x1, EXT(ppl_no_exception_end)@page | |
141 | add x1, x1, EXT(ppl_no_exception_end)@pageoff | |
142 | ||
143 | /* | |
144 | * Ensure that the exception did not occur in the trampoline. If it | |
145 | * did, we are either being attacked or our state machine is | |
146 | * horrifically broken. | |
147 | */ | |
148 | mrs x2, ELR_EL1 | |
149 | cmp x2, x0 | |
150 | b.lo 1f | |
151 | cmp x2, x1 | |
152 | b.hi 1f | |
153 | ||
154 | /* We might be under attack; spin. */ | |
155 | b . | |
156 | ||
157 | 1: | |
158 | /* Get the PPL save area. */ | |
159 | mov x1, x3 | |
160 | ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA] | |
161 | ||
162 | /* Save our x0, x1 state. */ | |
163 | ldp x2, x3, [sp, SS64_X0] | |
164 | stp x2, x3, [x0, SS64_X0] | |
165 | ||
166 | /* Restore SP1 to its original state. */ | |
167 | mov x3, sp | |
168 | add sp, sp, ARM_CONTEXT_SIZE | |
169 | ||
170 | .if $2 == SWITCH_TO_SP0 | |
171 | /* Switch back to SP0. */ | |
172 | msr SPSel, #0 | |
173 | mov x2, sp | |
174 | .else | |
175 | /* Load the SP0 value. */ | |
176 | mrs x2, SP_EL0 | |
177 | .endif | |
178 | ||
179 | /* Save off the stack pointer. */ | |
180 | str x2, [x0, SS64_SP] | |
181 | ||
182 | INIT_SAVED_STATE_FLAVORS x0, w1, w2 | |
183 | ||
184 | /* Save the context that was interrupted. */ | |
185 | ldp x2, x3, [x3, SS64_X2] | |
186 | stp fp, lr, [x0, SS64_FP] | |
187 | SPILL_REGISTERS KERNEL_MODE | |
188 | ||
189 | /* | |
190 | * Stash the function we wish to be invoked to deal with the exception; | |
191 | * usually this is some preflight function for the fleh_* handler. | |
192 | */ | |
193 | adrp x25, $1@page | |
194 | add x25, x25, $1@pageoff | |
195 | ||
196 | /* | |
197 | * Indicate that this is a PPL exception, and that we should return to | |
198 | * the PPL. | |
199 | */ | |
200 | mov x26, #1 | |
201 | ||
202 | /* Transition back to kernel mode. */ | |
203 | mov x15, #PPL_EXIT_EXCEPTION | |
204 | b ppl_return_to_kernel_mode | |
205 | 2: | |
206 | /* Restore SP1 state. */ | |
207 | ldp x2, x3, [sp, SS64_X2] | |
208 | ldp x0, x1, [sp, SS64_X0] | |
209 | add sp, sp, ARM_CONTEXT_SIZE | |
210 | ||
211 | /* Go to the specified label (usually the original exception vector). */ | |
212 | b $0 | |
213 | .endmacro | |
214 | #endif /* __APRR_SUPPORTED__ */ | |
215 | ||
216 | #endif /* XNU_MONITOR */ | |
5ba3f43e | 217 | |
5ba3f43e A |
218 | #define CBF_DISABLE 0 |
219 | #define CBF_ENABLE 1 | |
220 | ||
221 | .macro COMPARE_BRANCH_FUSION | |
222 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
223 | mrs $1, ARM64_REG_HID1 | |
224 | .if $0 == CBF_DISABLE | |
225 | orr $1, $1, ARM64_REG_HID1_disCmpBrFusion | |
226 | .else | |
227 | mov $2, ARM64_REG_HID1_disCmpBrFusion | |
228 | bic $1, $1, $2 | |
229 | .endif | |
230 | msr ARM64_REG_HID1, $1 | |
231 | .if $0 == CBF_DISABLE | |
232 | isb sy | |
233 | .endif | |
234 | #endif | |
235 | .endmacro | |
236 | ||
5c9f4661 A |
237 | /* |
238 | * MAP_KERNEL | |
239 | * | |
240 | * Restores the kernel EL1 mappings, if necessary. | |
241 | * | |
242 | * This may mutate x18. | |
243 | */ | |
244 | .macro MAP_KERNEL | |
245 | #if __ARM_KERNEL_PROTECT__ | |
246 | /* Switch to the kernel ASID (low bit set) for the task. */ | |
247 | mrs x18, TTBR0_EL1 | |
248 | orr x18, x18, #(1 << TTBR_ASID_SHIFT) | |
249 | msr TTBR0_EL1, x18 | |
250 | ||
251 | /* | |
252 | * We eschew some barriers on Apple CPUs, as relative ordering of writes | |
253 | * to the TTBRs and writes to the TCR should be ensured by the | |
254 | * microarchitecture. | |
255 | */ | |
256 | #if !defined(APPLE_ARM64_ARCH_FAMILY) | |
257 | isb sy | |
258 | #endif | |
259 | ||
260 | /* | |
261 | * Update the TCR to map the kernel now that we are using the kernel | |
262 | * ASID. | |
263 | */ | |
264 | MOV64 x18, TCR_EL1_BOOT | |
265 | msr TCR_EL1, x18 | |
266 | isb sy | |
267 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
268 | .endmacro | |
269 | ||
270 | /* | |
271 | * BRANCH_TO_KVA_VECTOR | |
272 | * | |
273 | * Branches to the requested long exception vector in the kernelcache. | |
274 | * arg0 - The label to branch to | |
275 | * arg1 - The index of the label in exc_vectors_tables | |
276 | * | |
277 | * This may mutate x18. | |
278 | */ | |
279 | .macro BRANCH_TO_KVA_VECTOR | |
280 | #if __ARM_KERNEL_PROTECT__ | |
281 | /* | |
282 | * Find the kernelcache table for the exception vectors by accessing | |
283 | * the per-CPU data. | |
284 | */ | |
285 | mrs x18, TPIDR_EL1 | |
286 | ldr x18, [x18, ACT_CPUDATAP] | |
287 | ldr x18, [x18, CPU_EXC_VECTORS] | |
288 | ||
289 | /* | |
290 | * Get the handler for this exception and jump to it. | |
291 | */ | |
292 | ldr x18, [x18, #($1 << 3)] | |
293 | br x18 | |
294 | #else | |
295 | b $0 | |
296 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
297 | .endmacro | |
298 | ||
299 | #if __ARM_KERNEL_PROTECT__ | |
a39ff7e2 | 300 | .text |
5c9f4661 A |
301 | .align 3 |
302 | .globl EXT(exc_vectors_table) | |
303 | LEXT(exc_vectors_table) | |
cb323159 A |
304 | /* Table of exception handlers. |
305 | * These handlers sometimes contain deadloops. | |
306 | * It's nice to have symbols for them when debugging. */ | |
307 | .quad el1_sp0_synchronous_vector_long | |
308 | .quad el1_sp0_irq_vector_long | |
309 | .quad el1_sp0_fiq_vector_long | |
310 | .quad el1_sp0_serror_vector_long | |
311 | .quad el1_sp1_synchronous_vector_long | |
312 | .quad el1_sp1_irq_vector_long | |
313 | .quad el1_sp1_fiq_vector_long | |
314 | .quad el1_sp1_serror_vector_long | |
315 | .quad el0_synchronous_vector_64_long | |
316 | .quad el0_irq_vector_64_long | |
317 | .quad el0_fiq_vector_64_long | |
318 | .quad el0_serror_vector_64_long | |
5c9f4661 A |
319 | #endif /* __ARM_KERNEL_PROTECT__ */ |
320 | ||
5ba3f43e | 321 | .text |
5c9f4661 A |
322 | #if __ARM_KERNEL_PROTECT__ |
323 | /* | |
324 | * We need this to be on a page boundary so that we may avoiding mapping | |
325 | * other text along with it. As this must be on the VM page boundary | |
326 | * (due to how the coredumping code currently works), this will be a | |
327 | * 16KB page boundary. | |
328 | */ | |
329 | .align 14 | |
330 | #else | |
5ba3f43e | 331 | .align 12 |
5c9f4661 | 332 | #endif /* __ARM_KERNEL_PROTECT__ */ |
5ba3f43e A |
333 | .globl EXT(ExceptionVectorsBase) |
334 | LEXT(ExceptionVectorsBase) | |
335 | Lel1_sp0_synchronous_vector: | |
cb323159 | 336 | BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0 |
5c9f4661 A |
337 | |
338 | .text | |
339 | .align 7 | |
340 | Lel1_sp0_irq_vector: | |
cb323159 | 341 | BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1 |
5c9f4661 A |
342 | |
343 | .text | |
344 | .align 7 | |
345 | Lel1_sp0_fiq_vector: | |
cb323159 | 346 | BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2 |
5c9f4661 A |
347 | |
348 | .text | |
349 | .align 7 | |
350 | Lel1_sp0_serror_vector: | |
cb323159 | 351 | BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3 |
5c9f4661 A |
352 | |
353 | .text | |
354 | .align 7 | |
355 | Lel1_sp1_synchronous_vector: | |
cb323159 | 356 | BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4 |
5c9f4661 A |
357 | |
358 | .text | |
359 | .align 7 | |
360 | Lel1_sp1_irq_vector: | |
cb323159 | 361 | BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5 |
5c9f4661 A |
362 | |
363 | .text | |
364 | .align 7 | |
365 | Lel1_sp1_fiq_vector: | |
cb323159 | 366 | BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6 |
5c9f4661 A |
367 | |
368 | .text | |
369 | .align 7 | |
370 | Lel1_sp1_serror_vector: | |
cb323159 | 371 | BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7 |
5c9f4661 A |
372 | |
373 | .text | |
374 | .align 7 | |
375 | Lel0_synchronous_vector_64: | |
376 | MAP_KERNEL | |
cb323159 | 377 | BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8 |
5c9f4661 A |
378 | |
379 | .text | |
380 | .align 7 | |
381 | Lel0_irq_vector_64: | |
382 | MAP_KERNEL | |
cb323159 | 383 | BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9 |
5c9f4661 A |
384 | |
385 | .text | |
386 | .align 7 | |
387 | Lel0_fiq_vector_64: | |
388 | MAP_KERNEL | |
cb323159 | 389 | BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10 |
5c9f4661 A |
390 | |
391 | .text | |
392 | .align 7 | |
393 | Lel0_serror_vector_64: | |
394 | MAP_KERNEL | |
cb323159 | 395 | BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11 |
5c9f4661 A |
396 | |
397 | /* Fill out the rest of the page */ | |
398 | .align 12 | |
399 | ||
400 | /********************************* | |
401 | * END OF EXCEPTION VECTORS PAGE * | |
402 | *********************************/ | |
403 | ||
404 | .macro EL1_SP0_VECTOR | |
405 | msr SPSel, #0 // Switch to SP0 | |
406 | sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame | |
407 | stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame | |
408 | add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer | |
409 | str x0, [sp, SS64_SP] // Save stack pointer to exception frame | |
410 | stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame | |
411 | INIT_SAVED_STATE_FLAVORS sp, w0, w1 | |
412 | mov x0, sp // Copy saved state pointer to x0 | |
413 | .endmacro | |
414 | ||
cb323159 | 415 | el1_sp0_synchronous_vector_long: |
c6bf4f31 A |
416 | #if XNU_MONITOR && __APRR_SUPPORTED__ |
417 | /* | |
418 | * We do not have enough space for new instructions in this vector, so | |
419 | * jump to outside code to check if this exception was taken in the PPL. | |
420 | */ | |
421 | b el1_sp0_synchronous_vector_ppl_check | |
422 | Lel1_sp0_synchronous_vector_kernel: | |
423 | #endif | |
5ba3f43e A |
424 | sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack |
425 | stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack | |
426 | mrs x1, ESR_EL1 // Get the exception syndrome | |
427 | /* If the stack pointer is corrupt, it will manifest either as a data abort | |
428 | * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check | |
429 | * these quickly by testing bit 5 of the exception class. | |
430 | */ | |
431 | tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid | |
432 | mrs x0, SP_EL0 // Get SP_EL0 | |
433 | stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack | |
434 | str x0, [sp, SS64_SP] // Save sp to the stack | |
435 | bl check_kernel_stack | |
436 | ldp fp, lr, [sp, SS64_FP] // Restore fp, lr | |
437 | Lkernel_stack_valid: | |
438 | ldp x0, x1, [sp, SS64_X0] // Restore x0, x1 | |
439 | add sp, sp, ARM_CONTEXT_SIZE // Restore SP1 | |
440 | EL1_SP0_VECTOR | |
cb323159 A |
441 | adrp x1, EXT(fleh_synchronous)@page // Load address for fleh |
442 | add x1, x1, EXT(fleh_synchronous)@pageoff | |
5ba3f43e A |
443 | b fleh_dispatch64 |
444 | ||
cb323159 | 445 | el1_sp0_irq_vector_long: |
c6bf4f31 A |
446 | #if XNU_MONITOR && __APRR_SUPPORTED__ |
447 | EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode | |
448 | Lel1_sp0_irq_vector_kernel: | |
449 | #endif | |
5ba3f43e A |
450 | EL1_SP0_VECTOR |
451 | mrs x1, TPIDR_EL1 | |
452 | ldr x1, [x1, ACT_CPUDATAP] | |
453 | ldr x1, [x1, CPU_ISTACKPTR] | |
454 | mov sp, x1 | |
cb323159 A |
455 | adrp x1, EXT(fleh_irq)@page // Load address for fleh |
456 | add x1, x1, EXT(fleh_irq)@pageoff | |
5ba3f43e A |
457 | b fleh_dispatch64 |
458 | ||
cb323159 | 459 | el1_sp0_fiq_vector_long: |
5ba3f43e | 460 | // ARM64_TODO write optimized decrementer |
c6bf4f31 A |
461 | #if XNU_MONITOR && __APRR_SUPPORTED__ |
462 | EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode | |
463 | Lel1_sp0_fiq_vector_kernel: | |
464 | #endif | |
5ba3f43e A |
465 | EL1_SP0_VECTOR |
466 | mrs x1, TPIDR_EL1 | |
467 | ldr x1, [x1, ACT_CPUDATAP] | |
468 | ldr x1, [x1, CPU_ISTACKPTR] | |
469 | mov sp, x1 | |
cb323159 A |
470 | adrp x1, EXT(fleh_fiq)@page // Load address for fleh |
471 | add x1, x1, EXT(fleh_fiq)@pageoff | |
5ba3f43e A |
472 | b fleh_dispatch64 |
473 | ||
cb323159 | 474 | el1_sp0_serror_vector_long: |
c6bf4f31 A |
475 | #if XNU_MONITOR && __APRR_SUPPORTED__ |
476 | EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode | |
477 | Lel1_sp0_serror_vector_kernel: | |
478 | #endif | |
5ba3f43e | 479 | EL1_SP0_VECTOR |
cb323159 A |
480 | adrp x1, EXT(fleh_serror)@page // Load address for fleh |
481 | add x1, x1, EXT(fleh_serror)@pageoff | |
5ba3f43e A |
482 | b fleh_dispatch64 |
483 | ||
484 | .macro EL1_SP1_VECTOR | |
485 | sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame | |
486 | stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame | |
487 | add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer | |
488 | str x0, [sp, SS64_SP] // Save stack pointer to exception frame | |
489 | INIT_SAVED_STATE_FLAVORS sp, w0, w1 | |
490 | stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame | |
491 | mov x0, sp // Copy saved state pointer to x0 | |
492 | .endmacro | |
493 | ||
cb323159 | 494 | el1_sp1_synchronous_vector_long: |
d9a64523 A |
495 | b check_exception_stack |
496 | Lel1_sp1_synchronous_valid_stack: | |
5ba3f43e A |
497 | #if defined(KERNEL_INTEGRITY_KTRR) |
498 | b check_ktrr_sctlr_trap | |
499 | Lel1_sp1_synchronous_vector_continue: | |
500 | #endif | |
501 | EL1_SP1_VECTOR | |
502 | adrp x1, fleh_synchronous_sp1@page | |
503 | add x1, x1, fleh_synchronous_sp1@pageoff | |
504 | b fleh_dispatch64 | |
505 | ||
cb323159 | 506 | el1_sp1_irq_vector_long: |
5ba3f43e A |
507 | EL1_SP1_VECTOR |
508 | adrp x1, fleh_irq_sp1@page | |
509 | add x1, x1, fleh_irq_sp1@pageoff | |
510 | b fleh_dispatch64 | |
511 | ||
cb323159 | 512 | el1_sp1_fiq_vector_long: |
5ba3f43e A |
513 | EL1_SP1_VECTOR |
514 | adrp x1, fleh_fiq_sp1@page | |
515 | add x1, x1, fleh_fiq_sp1@pageoff | |
516 | b fleh_dispatch64 | |
517 | ||
cb323159 | 518 | el1_sp1_serror_vector_long: |
5ba3f43e A |
519 | EL1_SP1_VECTOR |
520 | adrp x1, fleh_serror_sp1@page | |
521 | add x1, x1, fleh_serror_sp1@pageoff | |
522 | b fleh_dispatch64 | |
523 | ||
cb323159 A |
524 | #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) |
525 | /** | |
526 | * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1. | |
527 | * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start. | |
528 | */ | |
529 | #define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED) | |
530 | #define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED) | |
531 | #endif | |
532 | ||
5ba3f43e | 533 | .macro EL0_64_VECTOR |
d9a64523 | 534 | mov x18, #0 // Zero x18 to avoid leaking data to user SS |
5ba3f43e | 535 | stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack |
cb323159 A |
536 | #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) |
537 | // enable JOP for kernel | |
538 | adrp x0, EXT(const_boot_args)@page | |
539 | add x0, x0, EXT(const_boot_args)@pageoff | |
540 | ldr x0, [x0, BA_BOOT_FLAGS] | |
541 | and x0, x0, BA_BOOT_FLAGS_DISABLE_JOP | |
542 | cbnz x0, 1f | |
543 | // if disable jop is set, don't touch SCTLR (it's already off) | |
544 | // if (!boot_args->kernel_jop_disable) { | |
545 | mrs x0, SCTLR_EL1 | |
546 | tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f | |
547 | // turn on jop for kernel if it isn't already on | |
548 | // if (!jop_running) { | |
549 | MOV64 x1, SCTLR_JOP_KEYS_ENABLED | |
550 | orr x0, x0, x1 | |
551 | msr SCTLR_EL1, x0 | |
552 | isb sy | |
553 | MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED | |
554 | cmp x0, x1 | |
555 | bne . | |
556 | // } | |
557 | // } | |
558 | 1: | |
559 | #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */ | |
5ba3f43e A |
560 | mrs x0, TPIDR_EL1 // Load the thread register |
561 | mrs x1, SP_EL0 // Load the user stack pointer | |
562 | add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer | |
563 | ldr x0, [x0] // Load the user context pointer | |
564 | str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB | |
565 | msr SP_EL0, x0 // Copy the user PCB pointer to SP0 | |
566 | ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack | |
567 | msr SPSel, #0 // Switch to SP0 | |
568 | stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB | |
569 | stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB | |
d9a64523 A |
570 | mov fp, #0 // Clear the fp and lr for the |
571 | mov lr, #0 // debugger stack frame | |
5ba3f43e A |
572 | mov x0, sp // Copy the user PCB pointer to x0 |
573 | .endmacro | |
574 | ||
5c9f4661 | 575 | |
cb323159 | 576 | el0_synchronous_vector_64_long: |
5ba3f43e A |
577 | EL0_64_VECTOR |
578 | mrs x1, TPIDR_EL1 // Load the thread register | |
579 | ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 | |
580 | mov sp, x1 // Set the stack pointer to the kernel stack | |
cb323159 A |
581 | adrp x1, EXT(fleh_synchronous)@page // Load address for fleh |
582 | add x1, x1, EXT(fleh_synchronous)@pageoff | |
5ba3f43e A |
583 | b fleh_dispatch64 |
584 | ||
cb323159 | 585 | el0_irq_vector_64_long: |
5ba3f43e A |
586 | EL0_64_VECTOR |
587 | mrs x1, TPIDR_EL1 | |
588 | ldr x1, [x1, ACT_CPUDATAP] | |
589 | ldr x1, [x1, CPU_ISTACKPTR] | |
590 | mov sp, x1 // Set the stack pointer to the kernel stack | |
cb323159 A |
591 | adrp x1, EXT(fleh_irq)@page // load address for fleh |
592 | add x1, x1, EXT(fleh_irq)@pageoff | |
5ba3f43e A |
593 | b fleh_dispatch64 |
594 | ||
cb323159 | 595 | el0_fiq_vector_64_long: |
5ba3f43e A |
596 | EL0_64_VECTOR |
597 | mrs x1, TPIDR_EL1 | |
598 | ldr x1, [x1, ACT_CPUDATAP] | |
599 | ldr x1, [x1, CPU_ISTACKPTR] | |
600 | mov sp, x1 // Set the stack pointer to the kernel stack | |
cb323159 A |
601 | adrp x1, EXT(fleh_fiq)@page // load address for fleh |
602 | add x1, x1, EXT(fleh_fiq)@pageoff | |
5ba3f43e A |
603 | b fleh_dispatch64 |
604 | ||
cb323159 | 605 | el0_serror_vector_64_long: |
5ba3f43e A |
606 | EL0_64_VECTOR |
607 | mrs x1, TPIDR_EL1 // Load the thread register | |
608 | ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 | |
609 | mov sp, x1 // Set the stack pointer to the kernel stack | |
cb323159 A |
610 | adrp x1, EXT(fleh_serror)@page // load address for fleh |
611 | add x1, x1, EXT(fleh_serror)@pageoff | |
5ba3f43e A |
612 | b fleh_dispatch64 |
613 | ||
c6bf4f31 A |
614 | #if XNU_MONITOR && __APRR_SUPPORTED__ |
615 | el1_sp0_synchronous_vector_ppl_check: | |
616 | EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode | |
617 | ||
618 | /* Jump back to the primary exception vector if we fell through. */ | |
619 | b Lel1_sp0_synchronous_vector_kernel | |
620 | #endif | |
5ba3f43e | 621 | |
d9a64523 A |
622 | /* |
623 | * check_exception_stack | |
624 | * | |
625 | * Verifies that stack pointer at SP1 is within exception stack | |
626 | * If not, will simply hang as we have no more stack to fall back on. | |
627 | */ | |
628 | ||
629 | .text | |
630 | .align 2 | |
631 | check_exception_stack: | |
632 | mrs x18, TPIDR_EL1 // Get thread pointer | |
633 | cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot | |
634 | ldr x18, [x18, ACT_CPUDATAP] | |
635 | cbz x18, . // If thread context is set, cpu data should be too | |
636 | ldr x18, [x18, CPU_EXCEPSTACK_TOP] | |
637 | cmp sp, x18 | |
638 | b.gt . // Hang if above exception stack top | |
639 | sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack | |
640 | cmp sp, x18 | |
641 | b.lt . // Hang if below exception stack bottom | |
642 | Lvalid_exception_stack: | |
643 | mov x18, #0 | |
644 | b Lel1_sp1_synchronous_valid_stack | |
645 | ||
5ba3f43e A |
646 | /* |
647 | * check_kernel_stack | |
648 | * | |
649 | * Verifies that the kernel stack is aligned and mapped within an expected | |
650 | * stack address range. Note: happens before saving registers (in case we can't | |
651 | * save to kernel stack). | |
652 | * | |
653 | * Expects: | |
654 | * {x0, x1, sp} - saved | |
655 | * x0 - SP_EL0 | |
656 | * x1 - Exception syndrome | |
657 | * sp - Saved state | |
658 | */ | |
659 | .text | |
660 | .align 2 | |
661 | check_kernel_stack: | |
662 | stp x2, x3, [sp, SS64_X2] // Save {x2-x3} | |
663 | and x1, x1, #ESR_EC_MASK // Mask the exception class | |
664 | mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) | |
665 | cmp x1, x2 // If we have a stack alignment exception | |
666 | b.eq Lcorrupt_stack // ...the stack is definitely corrupted | |
667 | mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) | |
668 | cmp x1, x2 // If we have a data abort, we need to | |
669 | b.ne Lvalid_stack // ...validate the stack pointer | |
670 | mrs x1, TPIDR_EL1 // Get thread pointer | |
671 | Ltest_kstack: | |
672 | ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack | |
673 | sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack | |
674 | cmp x0, x2 // if (SP_EL0 >= kstack top) | |
675 | b.ge Ltest_istack // jump to istack test | |
676 | cmp x0, x3 // if (SP_EL0 > kstack bottom) | |
677 | b.gt Lvalid_stack // stack pointer valid | |
678 | Ltest_istack: | |
679 | ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr | |
680 | ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack | |
d9a64523 | 681 | sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack |
5ba3f43e | 682 | cmp x0, x2 // if (SP_EL0 >= istack top) |
5ba3f43e | 683 | b.ge Lcorrupt_stack // corrupt stack pointer |
d9a64523 | 684 | cmp x0, x3 // if (SP_EL0 > istack bottom) |
5ba3f43e A |
685 | b.gt Lvalid_stack // stack pointer valid |
686 | Lcorrupt_stack: | |
687 | INIT_SAVED_STATE_FLAVORS sp, w0, w1 | |
688 | mov x0, sp // Copy exception frame pointer to x0 | |
689 | adrp x1, fleh_invalid_stack@page // Load address for fleh | |
690 | add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there | |
691 | ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3} | |
692 | b fleh_dispatch64 | |
693 | Lvalid_stack: | |
694 | ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3} | |
695 | ret | |
696 | ||
697 | #if defined(KERNEL_INTEGRITY_KTRR) | |
698 | .text | |
699 | .align 2 | |
700 | check_ktrr_sctlr_trap: | |
701 | /* We may abort on an instruction fetch on reset when enabling the MMU by | |
702 | * writing SCTLR_EL1 because the page containing the privileged instruction is | |
703 | * not executable at EL1 (due to KTRR). The abort happens only on SP1 which | |
704 | * would otherwise panic unconditionally. Check for the condition and return | |
705 | * safe execution to the caller on behalf of the faulting function. | |
706 | * | |
707 | * Expected register state: | |
708 | * x22 - Kernel virtual base | |
709 | * x23 - Kernel physical base | |
710 | */ | |
711 | sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack | |
712 | stp x0, x1, [sp, SS64_X0] // Stash x0, x1 | |
713 | mrs x0, ESR_EL1 // Check ESR for instr. fetch abort | |
714 | and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC | |
715 | movz w1, #0x8600, lsl #16 | |
716 | movk w1, #0x0000 | |
717 | cmp x0, x1 | |
718 | mrs x0, ELR_EL1 // Check for expected abort address | |
719 | adrp x1, _pinst_set_sctlr_trap_addr@page | |
720 | add x1, x1, _pinst_set_sctlr_trap_addr@pageoff | |
721 | sub x1, x1, x22 // Convert to physical address | |
722 | add x1, x1, x23 | |
723 | ccmp x0, x1, #0, eq | |
724 | ldp x0, x1, [sp, SS64_X0] // Restore x0, x1 | |
725 | add sp, sp, ARM_CONTEXT_SIZE // Clean up stack | |
726 | b.ne Lel1_sp1_synchronous_vector_continue | |
727 | msr ELR_EL1, lr // Return to caller | |
728 | eret | |
c6bf4f31 | 729 | #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ |
5ba3f43e A |
730 | |
731 | /* 64-bit first level exception handler dispatcher. | |
732 | * Completes register context saving and branches to FLEH. | |
733 | * Expects: | |
734 | * {x0, x1, fp, lr, sp} - saved | |
735 | * x0 - arm_context_t | |
736 | * x1 - address of FLEH | |
737 | * fp - previous stack frame if EL1 | |
738 | * lr - unused | |
739 | * sp - kernel stack | |
740 | */ | |
741 | .text | |
742 | .align 2 | |
743 | fleh_dispatch64: | |
744 | /* Save arm_saved_state64 */ | |
cb323159 | 745 | SPILL_REGISTERS KERNEL_MODE |
5ba3f43e | 746 | |
a39ff7e2 A |
747 | /* If exception is from userspace, zero unused registers */ |
748 | and x23, x23, #(PSR64_MODE_EL_MASK) | |
749 | cmp x23, #(PSR64_MODE_EL0) | |
5ba3f43e | 750 | bne 1f |
a39ff7e2 | 751 | |
d9a64523 A |
752 | mov x2, #0 |
753 | mov x3, #0 | |
754 | mov x4, #0 | |
755 | mov x5, #0 | |
756 | mov x6, #0 | |
757 | mov x7, #0 | |
758 | mov x8, #0 | |
759 | mov x9, #0 | |
760 | mov x10, #0 | |
761 | mov x11, #0 | |
762 | mov x12, #0 | |
763 | mov x13, #0 | |
764 | mov x14, #0 | |
765 | mov x15, #0 | |
766 | mov x16, #0 | |
767 | mov x17, #0 | |
768 | mov x18, #0 | |
769 | mov x19, #0 | |
770 | mov x20, #0 | |
a39ff7e2 | 771 | /* x21, x22 cleared in common case below */ |
d9a64523 A |
772 | mov x23, #0 |
773 | mov x24, #0 | |
774 | mov x25, #0 | |
c6bf4f31 | 775 | #if !XNU_MONITOR |
d9a64523 | 776 | mov x26, #0 |
c6bf4f31 | 777 | #endif |
d9a64523 A |
778 | mov x27, #0 |
779 | mov x28, #0 | |
a39ff7e2 | 780 | /* fp/lr already cleared by EL0_64_VECTOR */ |
5ba3f43e A |
781 | 1: |
782 | ||
783 | mov x21, x0 // Copy arm_context_t pointer to x21 | |
784 | mov x22, x1 // Copy handler routine to x22 | |
785 | ||
c6bf4f31 A |
786 | #if XNU_MONITOR |
787 | /* Zero x26 to indicate that this should not return to the PPL. */ | |
788 | mov x26, #0 | |
789 | #endif | |
5ba3f43e A |
790 | |
791 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
792 | tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from | |
793 | b.ne 1f // kernel mode, so skip precise time update | |
794 | PUSH_FRAME | |
795 | bl EXT(timer_state_event_user_to_kernel) | |
796 | POP_FRAME | |
797 | mov x0, x21 // Reload arm_context_t pointer | |
798 | 1: | |
799 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
800 | ||
801 | /* Dispatch to FLEH */ | |
802 | ||
803 | br x22 | |
804 | ||
805 | ||
806 | .text | |
807 | .align 2 | |
cb323159 A |
808 | .global EXT(fleh_synchronous) |
809 | LEXT(fleh_synchronous) | |
5ba3f43e A |
810 | mrs x1, ESR_EL1 // Load exception syndrome |
811 | mrs x2, FAR_EL1 // Load fault address | |
812 | ||
813 | /* At this point, the LR contains the value of ELR_EL1. In the case of an | |
814 | * instruction prefetch abort, this will be the faulting pc, which we know | |
815 | * to be invalid. This will prevent us from backtracing through the | |
816 | * exception if we put it in our stack frame, so we load the LR from the | |
817 | * exception saved state instead. | |
818 | */ | |
819 | and w3, w1, #(ESR_EC_MASK) | |
820 | lsr w3, w3, #(ESR_EC_SHIFT) | |
821 | mov w4, #(ESR_EC_IABORT_EL1) | |
822 | cmp w3, w4 | |
823 | b.eq Lfleh_sync_load_lr | |
824 | Lvalid_link_register: | |
825 | ||
826 | PUSH_FRAME | |
827 | bl EXT(sleh_synchronous) | |
828 | POP_FRAME | |
829 | ||
c6bf4f31 A |
830 | #if XNU_MONITOR |
831 | CHECK_EXCEPTION_RETURN_DISPATCH_PPL | |
832 | #endif | |
5ba3f43e A |
833 | |
834 | b exception_return_dispatch | |
835 | ||
836 | Lfleh_sync_load_lr: | |
837 | ldr lr, [x0, SS64_LR] | |
838 | b Lvalid_link_register | |
839 | ||
840 | /* Shared prologue code for fleh_irq and fleh_fiq. | |
841 | * Does any interrupt booking we may want to do | |
842 | * before invoking the handler proper. | |
843 | * Expects: | |
844 | * x0 - arm_context_t | |
845 | * x23 - CPSR | |
846 | * fp - Undefined live value (we may push a frame) | |
847 | * lr - Undefined live value (we may push a frame) | |
848 | * sp - Interrupt stack for the current CPU | |
849 | */ | |
850 | .macro BEGIN_INTERRUPT_HANDLER | |
851 | mrs x22, TPIDR_EL1 | |
852 | ldr x23, [x22, ACT_CPUDATAP] // Get current cpu | |
853 | /* Update IRQ count */ | |
854 | ldr w1, [x23, CPU_STAT_IRQ] | |
855 | add w1, w1, #1 // Increment count | |
856 | str w1, [x23, CPU_STAT_IRQ] // Update IRQ count | |
857 | ldr w1, [x23, CPU_STAT_IRQ_WAKE] | |
858 | add w1, w1, #1 // Increment count | |
859 | str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count | |
860 | /* Increment preempt count */ | |
861 | ldr w1, [x22, ACT_PREEMPT_CNT] | |
862 | add w1, w1, #1 | |
863 | str w1, [x22, ACT_PREEMPT_CNT] | |
864 | /* Store context in int state */ | |
865 | str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state | |
866 | .endmacro | |
867 | ||
868 | /* Shared epilogue code for fleh_irq and fleh_fiq. | |
869 | * Cleans up after the prologue, and may do a bit more | |
870 | * bookkeeping (kdebug related). | |
871 | * Expects: | |
872 | * x22 - Live TPIDR_EL1 value (thread address) | |
873 | * x23 - Address of the current CPU data structure | |
874 | * w24 - 0 if kdebug is disbled, nonzero otherwise | |
875 | * fp - Undefined live value (we may push a frame) | |
876 | * lr - Undefined live value (we may push a frame) | |
877 | * sp - Interrupt stack for the current CPU | |
878 | */ | |
879 | .macro END_INTERRUPT_HANDLER | |
880 | /* Clear int context */ | |
881 | str xzr, [x23, CPU_INT_STATE] | |
882 | /* Decrement preempt count */ | |
883 | ldr w0, [x22, ACT_PREEMPT_CNT] | |
884 | cbnz w0, 1f // Detect underflow | |
885 | b preempt_underflow | |
886 | 1: | |
887 | sub w0, w0, #1 | |
888 | str w0, [x22, ACT_PREEMPT_CNT] | |
889 | /* Switch back to kernel stack */ | |
890 | ldr x0, [x22, TH_KSTACKPTR] | |
891 | mov sp, x0 | |
892 | .endmacro | |
893 | ||
894 | .text | |
895 | .align 2 | |
cb323159 A |
896 | .global EXT(fleh_irq) |
897 | LEXT(fleh_irq) | |
5ba3f43e A |
898 | BEGIN_INTERRUPT_HANDLER |
899 | PUSH_FRAME | |
900 | bl EXT(sleh_irq) | |
901 | POP_FRAME | |
902 | END_INTERRUPT_HANDLER | |
903 | ||
c6bf4f31 A |
904 | #if XNU_MONITOR |
905 | CHECK_EXCEPTION_RETURN_DISPATCH_PPL | |
906 | #endif | |
5ba3f43e A |
907 | |
908 | b exception_return_dispatch | |
909 | ||
910 | .text | |
911 | .align 2 | |
912 | .global EXT(fleh_fiq_generic) | |
913 | LEXT(fleh_fiq_generic) | |
914 | PANIC_UNIMPLEMENTED | |
915 | ||
916 | .text | |
917 | .align 2 | |
cb323159 A |
918 | .global EXT(fleh_fiq) |
919 | LEXT(fleh_fiq) | |
5ba3f43e A |
920 | BEGIN_INTERRUPT_HANDLER |
921 | PUSH_FRAME | |
922 | bl EXT(sleh_fiq) | |
923 | POP_FRAME | |
924 | END_INTERRUPT_HANDLER | |
925 | ||
c6bf4f31 A |
926 | #if XNU_MONITOR |
927 | CHECK_EXCEPTION_RETURN_DISPATCH_PPL | |
928 | #endif | |
5ba3f43e A |
929 | |
930 | b exception_return_dispatch | |
931 | ||
932 | .text | |
933 | .align 2 | |
cb323159 A |
934 | .global EXT(fleh_serror) |
935 | LEXT(fleh_serror) | |
5ba3f43e A |
936 | mrs x1, ESR_EL1 // Load exception syndrome |
937 | mrs x2, FAR_EL1 // Load fault address | |
938 | ||
939 | PUSH_FRAME | |
940 | bl EXT(sleh_serror) | |
941 | POP_FRAME | |
942 | ||
c6bf4f31 A |
943 | #if XNU_MONITOR |
944 | CHECK_EXCEPTION_RETURN_DISPATCH_PPL | |
945 | #endif | |
5ba3f43e A |
946 | |
947 | b exception_return_dispatch | |
948 | ||
949 | /* | |
950 | * Register state saved before we get here. | |
951 | */ | |
952 | .text | |
953 | .align 2 | |
954 | fleh_invalid_stack: | |
955 | mrs x1, ESR_EL1 // Load exception syndrome | |
956 | str x1, [x0, SS64_ESR] | |
957 | mrs x2, FAR_EL1 // Load fault address | |
958 | str x2, [x0, SS64_FAR] | |
959 | PUSH_FRAME | |
960 | bl EXT(sleh_invalid_stack) // Shouldn't return! | |
961 | b . | |
962 | ||
963 | .text | |
964 | .align 2 | |
965 | fleh_synchronous_sp1: | |
966 | mrs x1, ESR_EL1 // Load exception syndrome | |
967 | str x1, [x0, SS64_ESR] | |
968 | mrs x2, FAR_EL1 // Load fault address | |
969 | str x2, [x0, SS64_FAR] | |
970 | PUSH_FRAME | |
971 | bl EXT(sleh_synchronous_sp1) | |
972 | b . | |
973 | ||
974 | .text | |
975 | .align 2 | |
976 | fleh_irq_sp1: | |
977 | mov x1, x0 | |
978 | adr x0, Lsp1_irq_str | |
979 | b EXT(panic_with_thread_kernel_state) | |
980 | Lsp1_irq_str: | |
981 | .asciz "IRQ exception taken while SP1 selected" | |
982 | ||
983 | .text | |
984 | .align 2 | |
985 | fleh_fiq_sp1: | |
986 | mov x1, x0 | |
987 | adr x0, Lsp1_fiq_str | |
988 | b EXT(panic_with_thread_kernel_state) | |
989 | Lsp1_fiq_str: | |
990 | .asciz "FIQ exception taken while SP1 selected" | |
991 | ||
992 | .text | |
993 | .align 2 | |
994 | fleh_serror_sp1: | |
995 | mov x1, x0 | |
996 | adr x0, Lsp1_serror_str | |
997 | b EXT(panic_with_thread_kernel_state) | |
998 | Lsp1_serror_str: | |
999 | .asciz "Asynchronous exception taken while SP1 selected" | |
1000 | ||
1001 | .text | |
1002 | .align 2 | |
1003 | exception_return_dispatch: | |
cb323159 A |
1004 | ldr w0, [x21, SS64_CPSR] |
1005 | tst w0, PSR64_MODE_EL_MASK | |
1006 | b.ne return_to_kernel // return to kernel if M[3:2] > 0 | |
5ba3f43e A |
1007 | b return_to_user |
1008 | ||
1009 | .text | |
1010 | .align 2 | |
1011 | return_to_kernel: | |
cb323159 A |
1012 | tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled |
1013 | mrs x3, TPIDR_EL1 // Load thread pointer | |
1014 | ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count | |
1015 | msr DAIFSet, #DAIFSC_ALL // Disable exceptions | |
1016 | cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check | |
1017 | ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer | |
1018 | ldr x2, [x1, CPU_PENDING_AST] // Get ASTs | |
1019 | tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken | |
1020 | b.eq exception_return_unint_tpidr_x3 | |
1021 | mov sp, x21 // Switch to thread stack for preemption | |
5ba3f43e | 1022 | PUSH_FRAME |
cb323159 | 1023 | bl EXT(ast_taken_kernel) // Handle AST_URGENT |
5ba3f43e | 1024 | POP_FRAME |
5ba3f43e A |
1025 | b exception_return |
1026 | ||
1027 | .text | |
1028 | .globl EXT(thread_bootstrap_return) | |
1029 | LEXT(thread_bootstrap_return) | |
1030 | #if CONFIG_DTRACE | |
1031 | bl EXT(dtrace_thread_bootstrap) | |
1032 | #endif | |
1033 | b EXT(thread_exception_return) | |
1034 | ||
1035 | .text | |
1036 | .globl EXT(thread_exception_return) | |
1037 | LEXT(thread_exception_return) | |
1038 | mrs x0, TPIDR_EL1 | |
1039 | add x21, x0, ACT_CONTEXT | |
1040 | ldr x21, [x21] | |
1041 | ||
1042 | // | |
1043 | // Fall Through to return_to_user from thread_exception_return. | |
1044 | // Note that if we move return_to_user or insert a new routine | |
1045 | // below thread_exception_return, the latter will need to change. | |
1046 | // | |
1047 | .text | |
1048 | return_to_user: | |
1049 | check_user_asts: | |
5ba3f43e A |
1050 | mrs x3, TPIDR_EL1 // Load thread pointer |
1051 | ||
1052 | movn w2, #0 | |
1053 | str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user | |
1054 | ||
cb323159 | 1055 | #if MACH_ASSERT |
5ba3f43e | 1056 | ldr w0, [x3, TH_RWLOCK_CNT] |
cb323159 | 1057 | cbz w0, 1f // Detect unbalance RW lock/unlock |
5ba3f43e A |
1058 | b rwlock_count_notzero |
1059 | 1: | |
cb323159 A |
1060 | ldr w0, [x3, ACT_PREEMPT_CNT] |
1061 | cbz w0, 1f | |
1062 | b preempt_count_notzero | |
1063 | 1: | |
1064 | #endif | |
5ba3f43e | 1065 | |
cb323159 A |
1066 | msr DAIFSet, #DAIFSC_ALL // Disable exceptions |
1067 | ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer | |
1068 | ldr x0, [x4, CPU_PENDING_AST] // Get ASTs | |
1069 | cbnz x0, user_take_ast // If pending ASTs, go service them | |
5ba3f43e A |
1070 | |
1071 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
cb323159 | 1072 | mov x19, x3 // Preserve thread pointer across function call |
5ba3f43e A |
1073 | PUSH_FRAME |
1074 | bl EXT(timer_state_event_kernel_to_user) | |
1075 | POP_FRAME | |
cb323159 | 1076 | mov x3, x19 |
5ba3f43e A |
1077 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ |
1078 | ||
1079 | #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT) | |
1080 | /* Watchtower | |
1081 | * | |
1082 | * Here we attempt to enable NEON access for EL0. If the last entry into the | |
1083 | * kernel from user-space was due to an IRQ, the monitor will have disabled | |
1084 | * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to | |
1085 | * check in with the monitor in order to reenable NEON for EL0 in exchange | |
1086 | * for routing IRQs through the monitor (2). This way the monitor will | |
1087 | * always 'own' either IRQs or EL0 NEON. | |
1088 | * | |
1089 | * If Watchtower is disabled or we did not enter the kernel through an IRQ | |
1090 | * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3 | |
1091 | * here. | |
1092 | * | |
1093 | * EL0 user ________ IRQ ______ | |
1094 | * EL1 xnu \ ______________________ CPACR_EL1 __/ | |
1095 | * EL3 monitor \_/ \___/ | |
1096 | * | |
1097 | * (1) (2) | |
1098 | */ | |
1099 | ||
1100 | mov x0, #(CPACR_FPEN_ENABLE) | |
1101 | msr CPACR_EL1, x0 | |
1102 | #endif | |
1103 | ||
1104 | /* Establish this thread's debug state as the live state on the selected CPU. */ | |
1105 | ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer | |
1106 | ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context | |
1107 | ldr x0, [x3, ACT_DEBUGDATA] | |
1108 | orr x1, x1, x0 // Thread debug state and live debug state both NULL? | |
1109 | cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state | |
cb323159 | 1110 | b exception_return_unint_tpidr_x3 |
5ba3f43e A |
1111 | |
1112 | // | |
1113 | // Fall through from return_to_user to exception_return. | |
1114 | // Note that if we move exception_return or add a new routine below | |
1115 | // return_to_user, the latter will have to change. | |
1116 | // | |
1117 | ||
5ba3f43e | 1118 | exception_return: |
a39ff7e2 | 1119 | msr DAIFSet, #DAIFSC_ALL // Disable exceptions |
cb323159 | 1120 | exception_return_unint: |
a39ff7e2 | 1121 | mrs x3, TPIDR_EL1 // Load thread pointer |
cb323159 | 1122 | exception_return_unint_tpidr_x3: |
a39ff7e2 | 1123 | mov sp, x21 // Reload the pcb pointer |
5ba3f43e A |
1124 | |
1125 | /* ARM64_TODO Reserve x18 until we decide what to do with it */ | |
94ff46dc | 1126 | str xzr, [sp, SS64_X18] |
5ba3f43e | 1127 | |
5c9f4661 A |
1128 | #if __ARM_KERNEL_PROTECT__ |
1129 | /* | |
1130 | * If we are going to eret to userspace, we must return through the EL0 | |
1131 | * eret mapping. | |
1132 | */ | |
1133 | ldr w1, [sp, SS64_CPSR] // Load CPSR | |
1134 | tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1 | |
1135 | ||
1136 | /* We need to switch to the EL0 mapping of this code to eret to EL0. */ | |
1137 | adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base | |
1138 | adrp x1, Lexception_return_restore_registers@page // Load target PC | |
1139 | add x1, x1, Lexception_return_restore_registers@pageoff | |
1140 | MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address | |
1141 | sub x1, x1, x0 // Calculate delta | |
1142 | add x0, x2, x1 // Convert KVA to EL0 vector address | |
1143 | br x0 | |
1144 | ||
1145 | Lskip_el0_eret_mapping: | |
1146 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
1147 | ||
5ba3f43e | 1148 | Lexception_return_restore_registers: |
5ba3f43e | 1149 | mov x0, sp // x0 = &pcb |
cb323159 A |
1150 | // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2 |
1151 | AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24 | |
1152 | ||
1153 | /* Restore special register state */ | |
1154 | ldr w3, [sp, NS64_FPSR] | |
1155 | ldr w4, [sp, NS64_FPCR] | |
1156 | ||
1157 | msr ELR_EL1, x1 // Load the return address into ELR | |
1158 | msr SPSR_EL1, x2 // Load the return CPSR into SPSR | |
1159 | msr FPSR, x3 | |
1160 | msr FPCR, x4 // Synchronized by ERET | |
1161 | ||
1162 | #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) | |
1163 | /* if eret to userspace, disable JOP */ | |
1164 | tbnz w2, PSR64_MODE_EL_SHIFT, Lskip_disable_jop | |
1165 | adrp x4, EXT(const_boot_args)@page | |
1166 | add x4, x4, EXT(const_boot_args)@pageoff | |
1167 | ldr x4, [x4, BA_BOOT_FLAGS] | |
1168 | and x1, x4, BA_BOOT_FLAGS_DISABLE_JOP | |
1169 | cbnz x1, Lskip_disable_jop // if global JOP disabled, don't touch SCTLR (kernel JOP is already off) | |
1170 | and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP | |
1171 | cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) | |
1172 | mrs x2, TPIDR_EL1 | |
1173 | ldr x2, [x2, TH_DISABLE_USER_JOP] | |
1174 | cbz x2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on) | |
1175 | Ldisable_jop: | |
1176 | MOV64 x1, SCTLR_JOP_KEYS_ENABLED | |
1177 | mrs x4, SCTLR_EL1 | |
1178 | bic x4, x4, x1 | |
1179 | msr SCTLR_EL1, x4 | |
1180 | MOV64 x1, SCTLR_EL1_EXPECTED | |
1181 | cmp x4, x1 | |
1182 | bne . | |
1183 | Lskip_disable_jop: | |
1184 | #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)*/ | |
5ba3f43e A |
1185 | |
1186 | /* Restore arm_neon_saved_state64 */ | |
1187 | ldp q0, q1, [x0, NS64_Q0] | |
1188 | ldp q2, q3, [x0, NS64_Q2] | |
1189 | ldp q4, q5, [x0, NS64_Q4] | |
1190 | ldp q6, q7, [x0, NS64_Q6] | |
1191 | ldp q8, q9, [x0, NS64_Q8] | |
1192 | ldp q10, q11, [x0, NS64_Q10] | |
1193 | ldp q12, q13, [x0, NS64_Q12] | |
1194 | ldp q14, q15, [x0, NS64_Q14] | |
1195 | ldp q16, q17, [x0, NS64_Q16] | |
1196 | ldp q18, q19, [x0, NS64_Q18] | |
1197 | ldp q20, q21, [x0, NS64_Q20] | |
1198 | ldp q22, q23, [x0, NS64_Q22] | |
1199 | ldp q24, q25, [x0, NS64_Q24] | |
1200 | ldp q26, q27, [x0, NS64_Q26] | |
1201 | ldp q28, q29, [x0, NS64_Q28] | |
1202 | ldp q30, q31, [x0, NS64_Q30] | |
1203 | ||
1204 | /* Restore arm_saved_state64 */ | |
1205 | ||
1206 | // Skip x0, x1 - we're using them | |
1207 | ldp x2, x3, [x0, SS64_X2] | |
1208 | ldp x4, x5, [x0, SS64_X4] | |
1209 | ldp x6, x7, [x0, SS64_X6] | |
1210 | ldp x8, x9, [x0, SS64_X8] | |
1211 | ldp x10, x11, [x0, SS64_X10] | |
1212 | ldp x12, x13, [x0, SS64_X12] | |
1213 | ldp x14, x15, [x0, SS64_X14] | |
cb323159 | 1214 | // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0 |
5ba3f43e A |
1215 | ldp x18, x19, [x0, SS64_X18] |
1216 | ldp x20, x21, [x0, SS64_X20] | |
1217 | ldp x22, x23, [x0, SS64_X22] | |
1218 | ldp x24, x25, [x0, SS64_X24] | |
1219 | ldp x26, x27, [x0, SS64_X26] | |
1220 | ldr x28, [x0, SS64_X28] | |
cb323159 A |
1221 | ldr fp, [x0, SS64_FP] |
1222 | // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0 | |
5ba3f43e A |
1223 | |
1224 | // Restore stack pointer and our last two GPRs | |
1225 | ldr x1, [x0, SS64_SP] | |
1226 | mov sp, x1 | |
5c9f4661 A |
1227 | |
1228 | #if __ARM_KERNEL_PROTECT__ | |
1229 | ldr w18, [x0, SS64_CPSR] // Stash CPSR | |
1230 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
1231 | ||
5ba3f43e A |
1232 | ldp x0, x1, [x0, SS64_X0] // Restore the GPRs |
1233 | ||
5c9f4661 A |
1234 | #if __ARM_KERNEL_PROTECT__ |
1235 | /* If we are going to eret to userspace, we must unmap the kernel. */ | |
1236 | tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch | |
1237 | ||
1238 | /* Update TCR to unmap the kernel. */ | |
1239 | MOV64 x18, TCR_EL1_USER | |
1240 | msr TCR_EL1, x18 | |
1241 | ||
1242 | /* | |
1243 | * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to | |
1244 | * each other due to the microarchitecture. | |
1245 | */ | |
1246 | #if !defined(APPLE_ARM64_ARCH_FAMILY) | |
1247 | isb sy | |
1248 | #endif | |
1249 | ||
1250 | /* Switch to the user ASID (low bit clear) for the task. */ | |
1251 | mrs x18, TTBR0_EL1 | |
1252 | bic x18, x18, #(1 << TTBR_ASID_SHIFT) | |
1253 | msr TTBR0_EL1, x18 | |
d9a64523 | 1254 | mov x18, #0 |
5c9f4661 A |
1255 | |
1256 | /* We don't need an ISB here, as the eret is synchronizing. */ | |
1257 | Lskip_ttbr1_switch: | |
1258 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
1259 | ||
5ba3f43e A |
1260 | eret |
1261 | ||
1262 | user_take_ast: | |
1263 | PUSH_FRAME | |
1264 | bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation | |
1265 | POP_FRAME | |
5ba3f43e A |
1266 | b check_user_asts // Now try again |
1267 | ||
1268 | user_set_debug_state_and_return: | |
cb323159 | 1269 | |
c6bf4f31 A |
1270 | #if defined(APPLELIGHTNING) |
1271 | /* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */ | |
1272 | ||
1273 | ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing | |
1274 | cbz x12, 1f | |
1275 | ||
1276 | mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn | |
1277 | orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn | |
1278 | msr ARM64_REG_HID1, x12 | |
1279 | 1: | |
1280 | ||
1281 | #endif | |
cb323159 | 1282 | |
5ba3f43e A |
1283 | ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer |
1284 | isb // Synchronize context | |
1285 | PUSH_FRAME | |
1286 | bl EXT(arm_debug_set) // Establish thread debug state in live regs | |
1287 | POP_FRAME | |
1288 | isb | |
cb323159 | 1289 | b exception_return_unint // Continue, reloading the thread pointer |
5ba3f43e A |
1290 | |
1291 | .text | |
1292 | .align 2 | |
1293 | preempt_underflow: | |
1294 | mrs x0, TPIDR_EL1 | |
1295 | str x0, [sp, #-16]! // We'll print thread pointer | |
1296 | adr x0, L_underflow_str // Format string | |
1297 | CALL_EXTERN panic // Game over | |
1298 | ||
1299 | L_underflow_str: | |
1300 | .asciz "Preemption count negative on thread %p" | |
1301 | .align 2 | |
1302 | ||
cb323159 | 1303 | #if MACH_ASSERT |
5ba3f43e A |
1304 | .text |
1305 | .align 2 | |
1306 | rwlock_count_notzero: | |
1307 | mrs x0, TPIDR_EL1 | |
1308 | str x0, [sp, #-16]! // We'll print thread pointer | |
1309 | ldr w0, [x0, TH_RWLOCK_CNT] | |
1310 | str w0, [sp, #8] | |
1311 | adr x0, L_rwlock_count_notzero_str // Format string | |
1312 | CALL_EXTERN panic // Game over | |
1313 | ||
1314 | L_rwlock_count_notzero_str: | |
1315 | .asciz "RW lock count not 0 on thread %p (%u)" | |
cb323159 A |
1316 | |
1317 | .text | |
1318 | .align 2 | |
1319 | preempt_count_notzero: | |
1320 | mrs x0, TPIDR_EL1 | |
1321 | str x0, [sp, #-16]! // We'll print thread pointer | |
1322 | ldr w0, [x0, ACT_PREEMPT_CNT] | |
1323 | str w0, [sp, #8] | |
1324 | adr x0, L_preempt_count_notzero_str // Format string | |
1325 | CALL_EXTERN panic // Game over | |
1326 | ||
1327 | L_preempt_count_notzero_str: | |
1328 | .asciz "preemption count not 0 on thread %p (%u)" | |
1329 | #endif /* MACH_ASSERT */ | |
1330 | ||
5ba3f43e A |
1331 | .align 2 |
1332 | ||
5c9f4661 A |
1333 | #if __ARM_KERNEL_PROTECT__ |
1334 | /* | |
1335 | * This symbol denotes the end of the exception vector/eret range; we page | |
1336 | * align it so that we can avoid mapping other text in the EL0 exception | |
1337 | * vector mapping. | |
1338 | */ | |
1339 | .text | |
1340 | .align 14 | |
1341 | .globl EXT(ExceptionVectorsEnd) | |
1342 | LEXT(ExceptionVectorsEnd) | |
1343 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
1344 | ||
c6bf4f31 A |
1345 | #if XNU_MONITOR |
1346 | #if __APRR_SUPPORTED__ | |
1347 | .text | |
1348 | .align 2 | |
1349 | el1_sp0_synchronous_vector_not_in_kernel_mode: | |
1350 | EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1 | |
1351 | ||
1352 | .text | |
1353 | .align 2 | |
1354 | el1_sp0_fiq_vector_not_in_kernel_mode: | |
1355 | EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0 | |
1356 | ||
1357 | .text | |
1358 | .align 2 | |
1359 | el1_sp0_irq_vector_not_in_kernel_mode: | |
1360 | EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0 | |
1361 | ||
1362 | .text | |
1363 | .align 2 | |
1364 | el1_sp0_serror_vector_not_in_kernel_mode: | |
1365 | EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0 | |
1366 | #endif /* __APRR_SUPPORTED__ */ | |
1367 | ||
1368 | /* | |
1369 | * Functions to preflight the fleh handlers when the PPL has taken an exception; | |
1370 | * mostly concerned with setting up state for the normal fleh code. | |
1371 | */ | |
1372 | fleh_synchronous_from_ppl: | |
1373 | /* Save x0. */ | |
1374 | mov x15, x0 | |
1375 | ||
1376 | /* Grab the ESR. */ | |
1377 | mrs x1, ESR_EL1 // Get the exception syndrome | |
1378 | ||
1379 | /* If the stack pointer is corrupt, it will manifest either as a data abort | |
1380 | * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check | |
1381 | * these quickly by testing bit 5 of the exception class. | |
1382 | */ | |
1383 | tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack | |
1384 | mrs x0, SP_EL0 // Get SP_EL0 | |
1385 | ||
1386 | /* Perform high level checks for stack corruption. */ | |
1387 | and x1, x1, #ESR_EC_MASK // Mask the exception class | |
1388 | mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) | |
1389 | cmp x1, x2 // If we have a stack alignment exception | |
1390 | b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted | |
1391 | mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) | |
1392 | cmp x1, x2 // If we have a data abort, we need to | |
1393 | b.ne Lvalid_ppl_stack // ...validate the stack pointer | |
1394 | ||
1395 | Ltest_pstack: | |
1396 | /* Bounds check the PPL stack. */ | |
1397 | adrp x10, EXT(pmap_stacks_start)@page | |
1398 | ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff] | |
1399 | adrp x11, EXT(pmap_stacks_end)@page | |
1400 | ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff] | |
1401 | cmp x0, x10 | |
1402 | b.lo Lcorrupt_ppl_stack | |
1403 | cmp x0, x11 | |
1404 | b.hi Lcorrupt_ppl_stack | |
1405 | ||
1406 | Lvalid_ppl_stack: | |
1407 | /* Restore x0. */ | |
1408 | mov x0, x15 | |
1409 | ||
1410 | /* Switch back to the kernel stack. */ | |
1411 | msr SPSel, #0 | |
1412 | GET_PMAP_CPU_DATA x5, x6, x7 | |
1413 | ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP] | |
1414 | mov sp, x6 | |
1415 | ||
1416 | /* Hand off to the synch handler. */ | |
1417 | b EXT(fleh_synchronous) | |
1418 | ||
1419 | Lcorrupt_ppl_stack: | |
1420 | /* Restore x0. */ | |
1421 | mov x0, x15 | |
1422 | ||
1423 | /* Hand off to the invalid stack handler. */ | |
1424 | b fleh_invalid_stack | |
1425 | ||
1426 | fleh_fiq_from_ppl: | |
1427 | mrs x1, TPIDR_EL1 | |
1428 | ldr x1, [x1, ACT_CPUDATAP] | |
1429 | ldr x1, [x1, CPU_ISTACKPTR] | |
1430 | mov sp, x1 | |
1431 | b EXT(fleh_fiq) | |
1432 | ||
1433 | fleh_irq_from_ppl: | |
1434 | mrs x1, TPIDR_EL1 | |
1435 | ldr x1, [x1, ACT_CPUDATAP] | |
1436 | ldr x1, [x1, CPU_ISTACKPTR] | |
1437 | mov sp, x1 | |
1438 | b EXT(fleh_irq) | |
1439 | ||
1440 | fleh_serror_from_ppl: | |
1441 | GET_PMAP_CPU_DATA x5, x6, x7 | |
1442 | ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP] | |
1443 | mov sp, x6 | |
1444 | b EXT(fleh_serror) | |
1445 | ||
1446 | /* | |
1447 | * REENABLE_DAIF | |
1448 | * | |
1449 | * Restores the DAIF bits to their original state (well, the AIF bits at least). | |
1450 | * arg0 - DAIF bits (read from the DAIF interface) to restore | |
1451 | */ | |
1452 | .macro REENABLE_DAIF | |
1453 | /* AIF enable. */ | |
1454 | tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF) | |
1455 | b.eq 3f | |
1456 | ||
1457 | /* IF enable. */ | |
1458 | tst $0, #(DAIF_IRQF | DAIF_FIQF) | |
1459 | b.eq 2f | |
1460 | ||
1461 | /* A enable. */ | |
1462 | tst $0, #(DAIF_ASYNCF) | |
1463 | b.eq 1f | |
1464 | ||
1465 | /* Enable nothing. */ | |
1466 | b 4f | |
1467 | ||
1468 | /* A enable. */ | |
1469 | 1: | |
1470 | msr DAIFClr, #(DAIFSC_ASYNCF) | |
1471 | b 4f | |
1472 | ||
1473 | /* IF enable. */ | |
1474 | 2: | |
1475 | msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF) | |
1476 | b 4f | |
1477 | ||
1478 | /* AIF enable. */ | |
1479 | 3: | |
1480 | msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) | |
1481 | ||
1482 | /* Done! */ | |
1483 | 4: | |
1484 | .endmacro | |
1485 | ||
1486 | ||
1487 | #if XNU_MONITOR && __APRR_SUPPORTED__ | |
1488 | /* | |
1489 | * aprr_ppl_enter | |
1490 | * | |
1491 | * Invokes the PPL | |
1492 | * x15 - The index of the requested PPL function. | |
1493 | */ | |
1494 | .text | |
1495 | .align 2 | |
1496 | .globl EXT(aprr_ppl_enter) | |
1497 | LEXT(aprr_ppl_enter) | |
1498 | /* Push a frame. */ | |
1499 | ARM64_STACK_PROLOG | |
1500 | stp x20, x21, [sp, #-0x20]! | |
1501 | stp x29, x30, [sp, #0x10] | |
1502 | add x29, sp, #0x10 | |
1503 | ||
1504 | /* Increase the preemption count. */ | |
1505 | mrs x10, TPIDR_EL1 | |
1506 | ldr w12, [x10, ACT_PREEMPT_CNT] | |
1507 | add w12, w12, #1 | |
1508 | str w12, [x10, ACT_PREEMPT_CNT] | |
1509 | ||
1510 | /* Is the PPL currently locked down? */ | |
1511 | adrp x13, EXT(pmap_ppl_locked_down)@page | |
1512 | add x13, x13, EXT(pmap_ppl_locked_down)@pageoff | |
1513 | ldr w14, [x13] | |
1514 | cmp w14, wzr | |
1515 | ||
1516 | /* If not, just perform the call in the current context. */ | |
1517 | b.eq EXT(ppl_bootstrap_dispatch) | |
1518 | ||
1519 | mov w10, #PPL_STATE_KERNEL | |
1520 | b Ldisable_aif_and_enter_ppl | |
1521 | ||
1522 | /* We align this to land the next few instructions on their own page. */ | |
1523 | .section __PPLTRAMP,__text,regular,pure_instructions | |
1524 | .align 14 | |
1525 | .space (16*1024)-(4*8) // 8 insns | |
1526 | ||
1527 | /* | |
1528 | * This label is used by exception handlers that are trying to return | |
1529 | * to the PPL. | |
1530 | */ | |
1531 | Ldisable_aif_and_enter_ppl: | |
1532 | /* We must trampoline to the PPL context; disable AIF. */ | |
1533 | mrs x20, DAIF | |
1534 | msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) | |
1535 | ||
1536 | .globl EXT(ppl_no_exception_start) | |
1537 | LEXT(ppl_no_exception_start) | |
1538 | /* Switch APRR_EL1 to PPL mode. */ | |
1539 | MOV64 x14, APRR_EL1_PPL | |
1540 | msr APRR_EL1, x14 | |
1541 | ||
1542 | /* This ISB should be the last instruction on a page. */ | |
1543 | // TODO: can we static assert this? | |
1544 | isb | |
1545 | #endif /* XNU_MONITOR && __APRR_SUPPORTED__ */ | |
1546 | ||
1547 | ||
1548 | // x15: ppl call number | |
1549 | // w10: ppl_state | |
1550 | // x20: gxf_enter caller's DAIF | |
1551 | .globl EXT(ppl_trampoline_start) | |
1552 | LEXT(ppl_trampoline_start) | |
1553 | ||
1554 | #if __APRR_SUPPORTED__ | |
1555 | /* Squash AIF AGAIN, because someone may have attacked us. */ | |
1556 | msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) | |
1557 | #endif /* __APRR_SUPPORTED__ */ | |
1558 | ||
1559 | #if __APRR_SUPPORTED__ | |
1560 | /* Verify the state of APRR_EL1. */ | |
1561 | MOV64 x14, APRR_EL1_PPL | |
1562 | mrs x21, APRR_EL1 | |
1563 | #else /* __APRR_SUPPORTED__ */ | |
1564 | #error "XPRR configuration error" | |
1565 | #endif /* __APRR_SUPPORTED__ */ | |
1566 | cmp x14, x21 | |
1567 | b.ne Lppl_fail_dispatch | |
1568 | ||
1569 | /* Verify the request ID. */ | |
1570 | cmp x15, PMAP_COUNT | |
1571 | b.hs Lppl_fail_dispatch | |
1572 | ||
1573 | /* Get the PPL CPU data structure. */ | |
1574 | GET_PMAP_CPU_DATA x12, x13, x14 | |
1575 | ||
1576 | /* Mark this CPU as being in the PPL. */ | |
1577 | ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE] | |
1578 | ||
1579 | cmp w9, #PPL_STATE_KERNEL | |
1580 | b.eq Lppl_mark_cpu_as_dispatching | |
1581 | ||
1582 | /* Check to see if we are trying to trap from within the PPL. */ | |
1583 | cmp w9, #PPL_STATE_DISPATCH | |
1584 | b.eq Lppl_fail_dispatch_ppl | |
1585 | ||
1586 | ||
1587 | /* Ensure that we are returning from an exception. */ | |
1588 | cmp w9, #PPL_STATE_EXCEPTION | |
1589 | b.ne Lppl_fail_dispatch | |
1590 | ||
1591 | // where is w10 set? | |
1592 | // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL | |
1593 | cmp w10, #PPL_STATE_EXCEPTION | |
1594 | b.ne Lppl_fail_dispatch | |
1595 | ||
1596 | /* This is an exception return; set the CPU to the dispatching state. */ | |
1597 | mov w9, #PPL_STATE_DISPATCH | |
1598 | str w9, [x12, PMAP_CPU_DATA_PPL_STATE] | |
1599 | ||
1600 | /* Find the save area, and return to the saved PPL context. */ | |
1601 | ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA] | |
1602 | mov sp, x0 | |
1603 | #if __APRR_SUPPORTED__ | |
1604 | b Lexception_return_restore_registers | |
1605 | #else | |
1606 | b EXT(return_to_ppl) | |
1607 | #endif /* __APRR_SUPPORTED__ */ | |
1608 | ||
1609 | Lppl_mark_cpu_as_dispatching: | |
1610 | cmp w10, #PPL_STATE_KERNEL | |
1611 | b.ne Lppl_fail_dispatch | |
1612 | ||
1613 | /* Mark the CPU as dispatching. */ | |
1614 | mov w13, #PPL_STATE_DISPATCH | |
1615 | str w13, [x12, PMAP_CPU_DATA_PPL_STATE] | |
1616 | ||
1617 | /* Get the handler for the request */ | |
1618 | adrp x9, EXT(ppl_handler_table)@page | |
1619 | add x9, x9, EXT(ppl_handler_table)@pageoff | |
1620 | ldr x10, [x9, x15, lsl #3] | |
1621 | ||
1622 | /* Switch to the regular PPL stack. */ | |
1623 | // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler | |
1624 | ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK] | |
1625 | ||
1626 | // SP0 is thread stack here | |
1627 | mov x21, sp | |
1628 | // SP0 is now PPL stack | |
1629 | mov sp, x9 | |
1630 | ||
1631 | ||
1632 | /* Save the old stack pointer off in case we need it. */ | |
1633 | str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] | |
1634 | ||
1635 | /* Branch to the code that will invoke the PPL request. */ | |
1636 | b EXT(ppl_dispatch) | |
1637 | ||
1638 | Lppl_fail_dispatch_ppl: | |
1639 | /* Switch back to the kernel stack. */ | |
1640 | ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] | |
1641 | mov sp, x10 | |
1642 | ||
1643 | Lppl_fail_dispatch: | |
1644 | /* Indicate that we failed. */ | |
1645 | mov x15, #PPL_EXIT_BAD_CALL | |
1646 | ||
1647 | /* Move the DAIF bits into the expected register. */ | |
1648 | mov x10, x20 | |
1649 | ||
1650 | /* Return to kernel mode. */ | |
1651 | b ppl_return_to_kernel_mode | |
1652 | ||
1653 | Lppl_dispatch_exit: | |
1654 | /* Indicate that we are cleanly exiting the PPL. */ | |
1655 | mov x15, #PPL_EXIT_DISPATCH | |
1656 | ||
1657 | /* Switch back to the original (kernel thread) stack. */ | |
1658 | mov sp, x21 | |
1659 | ||
1660 | /* Move the saved DAIF bits. */ | |
1661 | mov x10, x20 | |
1662 | ||
1663 | /* Clear the old stack pointer. */ | |
1664 | str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] | |
1665 | ||
1666 | /* | |
1667 | * Mark the CPU as no longer being in the PPL. We spin if our state | |
1668 | * machine is broken. | |
1669 | */ | |
1670 | ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE] | |
1671 | cmp w9, #PPL_STATE_DISPATCH | |
1672 | b.ne . | |
1673 | mov w9, #PPL_STATE_KERNEL | |
1674 | str w9, [x12, PMAP_CPU_DATA_PPL_STATE] | |
1675 | ||
1676 | /* Return to the kernel. */ | |
1677 | b ppl_return_to_kernel_mode | |
1678 | ||
1679 | #if __APRR_SUPPORTED__ | |
1680 | /* We align this to land the next few instructions on their own page. */ | |
1681 | .align 14 | |
1682 | .space (16*1024)-(4*5) // 5 insns | |
1683 | ||
1684 | ppl_return_to_kernel_mode: | |
1685 | /* Switch APRR_EL1 back to the kernel mode. */ | |
1686 | // must be 5 instructions | |
1687 | MOV64 x14, APRR_EL1_DEFAULT | |
1688 | msr APRR_EL1, x14 | |
1689 | ||
1690 | .globl EXT(ppl_trampoline_end) | |
1691 | LEXT(ppl_trampoline_end) | |
1692 | ||
1693 | /* This should be the first instruction on a page. */ | |
1694 | isb | |
1695 | ||
1696 | .globl EXT(ppl_no_exception_end) | |
1697 | LEXT(ppl_no_exception_end) | |
1698 | b ppl_exit | |
1699 | #endif /* __APRR_SUPPORTED__ */ | |
1700 | ||
1701 | ||
1702 | .text | |
1703 | ppl_exit: | |
1704 | /* | |
1705 | * If we are dealing with an exception, hand off to the first level | |
1706 | * exception handler. | |
1707 | */ | |
1708 | cmp x15, #PPL_EXIT_EXCEPTION | |
1709 | b.eq Ljump_to_fleh_handler | |
1710 | ||
1711 | /* Restore the original AIF state. */ | |
1712 | REENABLE_DAIF x10 | |
1713 | ||
1714 | /* If this was a panic call from the PPL, reinvoke panic. */ | |
1715 | cmp x15, #PPL_EXIT_PANIC_CALL | |
1716 | b.eq Ljump_to_panic_trap_to_debugger | |
1717 | ||
1718 | /* Load the preemption count. */ | |
1719 | mrs x10, TPIDR_EL1 | |
1720 | ldr w12, [x10, ACT_PREEMPT_CNT] | |
1721 | ||
1722 | /* Detect underflow */ | |
1723 | cbnz w12, Lno_preempt_underflow | |
1724 | b preempt_underflow | |
1725 | Lno_preempt_underflow: | |
1726 | ||
1727 | /* Lower the preemption count. */ | |
1728 | sub w12, w12, #1 | |
1729 | str w12, [x10, ACT_PREEMPT_CNT] | |
1730 | ||
1731 | /* Skip ASTs if the peemption count is not zero. */ | |
1732 | cbnz x12, Lppl_skip_ast_taken | |
1733 | ||
1734 | /* Skip the AST check if interrupts are disabled. */ | |
1735 | mrs x1, DAIF | |
1736 | tst x1, #DAIF_IRQF | |
1737 | b.ne Lppl_skip_ast_taken | |
1738 | ||
1739 | /* Disable interrupts. */ | |
1740 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) | |
1741 | ||
1742 | /* IF there is no urgent AST, skip the AST. */ | |
1743 | ldr x12, [x10, ACT_CPUDATAP] | |
1744 | ldr x14, [x12, CPU_PENDING_AST] | |
1745 | tst x14, AST_URGENT | |
1746 | b.eq Lppl_defer_ast_taken | |
1747 | ||
1748 | /* Stash our return value and return reason. */ | |
1749 | mov x20, x0 | |
1750 | mov x21, x15 | |
1751 | ||
1752 | /* Handle the AST. */ | |
1753 | bl EXT(ast_taken_kernel) | |
1754 | ||
1755 | /* Restore the return value and the return reason. */ | |
1756 | mov x15, x21 | |
1757 | mov x0, x20 | |
1758 | ||
1759 | Lppl_defer_ast_taken: | |
1760 | /* Reenable interrupts. */ | |
1761 | msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF) | |
1762 | ||
1763 | Lppl_skip_ast_taken: | |
1764 | /* Pop the stack frame. */ | |
1765 | ldp x29, x30, [sp, #0x10] | |
1766 | ldp x20, x21, [sp], #0x20 | |
1767 | ||
1768 | /* Check to see if this was a bad request. */ | |
1769 | cmp x15, #PPL_EXIT_BAD_CALL | |
1770 | b.eq Lppl_bad_call | |
1771 | ||
1772 | /* Return. */ | |
1773 | ARM64_STACK_EPILOG | |
1774 | ||
1775 | .align 2 | |
1776 | Ljump_to_fleh_handler: | |
1777 | br x25 | |
1778 | ||
1779 | .align 2 | |
1780 | Ljump_to_panic_trap_to_debugger: | |
1781 | b EXT(panic_trap_to_debugger) | |
1782 | ||
1783 | Lppl_bad_call: | |
1784 | /* Panic. */ | |
1785 | adrp x0, Lppl_bad_call_panic_str@page | |
1786 | add x0, x0, Lppl_bad_call_panic_str@pageoff | |
1787 | b EXT(panic) | |
1788 | ||
1789 | .text | |
1790 | .align 2 | |
1791 | .globl EXT(ppl_dispatch) | |
1792 | LEXT(ppl_dispatch) | |
1793 | /* | |
1794 | * Save a couple of important registers (implementation detail; x12 has | |
1795 | * the PPL per-CPU data address; x13 is not actually interesting). | |
1796 | */ | |
1797 | stp x12, x13, [sp, #-0x10]! | |
1798 | ||
1799 | /* Restore the original AIF state. */ | |
1800 | REENABLE_DAIF x20 | |
1801 | ||
1802 | /* | |
1803 | * Note that if the method is NULL, we'll blow up with a prefetch abort, | |
1804 | * but the exception vectors will deal with this properly. | |
1805 | */ | |
1806 | ||
1807 | /* Invoke the PPL method. */ | |
1808 | #ifdef HAS_APPLE_PAC | |
1809 | blraaz x10 | |
1810 | #else | |
1811 | blr x10 | |
1812 | #endif | |
1813 | ||
1814 | /* Disable AIF. */ | |
1815 | msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) | |
1816 | ||
1817 | /* Restore those important registers. */ | |
1818 | ldp x12, x13, [sp], #0x10 | |
1819 | ||
1820 | /* Mark this as a regular return, and hand off to the return path. */ | |
1821 | b Lppl_dispatch_exit | |
1822 | ||
1823 | .text | |
1824 | .align 2 | |
1825 | .globl EXT(ppl_bootstrap_dispatch) | |
1826 | LEXT(ppl_bootstrap_dispatch) | |
1827 | /* Verify the PPL request. */ | |
1828 | cmp x15, PMAP_COUNT | |
1829 | b.hs Lppl_fail_bootstrap_dispatch | |
1830 | ||
1831 | /* Get the requested PPL routine. */ | |
1832 | adrp x9, EXT(ppl_handler_table)@page | |
1833 | add x9, x9, EXT(ppl_handler_table)@pageoff | |
1834 | ldr x10, [x9, x15, lsl #3] | |
1835 | ||
1836 | /* Invoke the requested PPL routine. */ | |
1837 | #ifdef HAS_APPLE_PAC | |
1838 | blraaz x10 | |
1839 | #else | |
1840 | blr x10 | |
1841 | #endif | |
1842 | /* Stash off the return value */ | |
1843 | mov x20, x0 | |
1844 | /* Drop the preemption count */ | |
1845 | bl EXT(_enable_preemption) | |
1846 | mov x0, x20 | |
1847 | ||
1848 | /* Pop the stack frame. */ | |
1849 | ldp x29, x30, [sp, #0x10] | |
1850 | ldp x20, x21, [sp], #0x20 | |
1851 | #if __has_feature(ptrauth_returns) | |
1852 | retab | |
1853 | #else | |
1854 | ret | |
1855 | #endif | |
1856 | ||
1857 | Lppl_fail_bootstrap_dispatch: | |
1858 | /* Pop our stack frame and panic. */ | |
1859 | ldp x29, x30, [sp, #0x10] | |
1860 | ldp x20, x21, [sp], #0x20 | |
1861 | #if __has_feature(ptrauth_returns) | |
1862 | autibsp | |
1863 | #endif | |
1864 | adrp x0, Lppl_bad_call_panic_str@page | |
1865 | add x0, x0, Lppl_bad_call_panic_str@pageoff | |
1866 | b EXT(panic) | |
1867 | ||
1868 | .text | |
1869 | .align 2 | |
1870 | .globl EXT(ml_panic_trap_to_debugger) | |
1871 | LEXT(ml_panic_trap_to_debugger) | |
1872 | #if 0 | |
1873 | // TODO: why would we ever want to turn interrupts back on after going down panic path? | |
1874 | /* Grab the current AIF state, and disable AIF. */ | |
1875 | mrs x10, DAIF | |
1876 | #endif | |
1877 | msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) | |
1878 | ||
1879 | // we want interrupts to stay masked after exiting PPL when calling into panic to halt system | |
1880 | // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT | |
1881 | mrs x10, DAIF | |
1882 | ||
1883 | /* Indicate (for the PPL->kernel transition) that we are panicking. */ | |
1884 | mov x15, #PPL_EXIT_PANIC_CALL | |
1885 | ||
1886 | /* Get the PPL per-CPU data. */ | |
1887 | GET_PMAP_CPU_DATA x11, x12, x13 | |
1888 | ||
1889 | /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */ | |
1890 | ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP] | |
1891 | mov sp, x12 | |
1892 | ||
1893 | /* | |
1894 | * Mark this CPU as being in the PPL. Halt and catch fire if our state | |
1895 | * machine appears to be broken. | |
1896 | */ | |
1897 | ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE] | |
1898 | cmp w12, #PPL_STATE_DISPATCH | |
1899 | b.ne . | |
1900 | mov w13, #PPL_STATE_PANIC | |
1901 | str w13, [x11, PMAP_CPU_DATA_PPL_STATE] | |
1902 | ||
1903 | /* Now we are ready to exit the PPL. */ | |
1904 | b ppl_return_to_kernel_mode | |
1905 | ||
1906 | .data | |
1907 | Lppl_bad_call_panic_str: | |
1908 | .asciz "ppl_dispatch: failed due to bad arguments/state" | |
1909 | #else /* XNU_MONITOR */ | |
5ba3f43e A |
1910 | .text |
1911 | .align 2 | |
1912 | .globl EXT(ml_panic_trap_to_debugger) | |
1913 | LEXT(ml_panic_trap_to_debugger) | |
1914 | ret | |
c6bf4f31 | 1915 | #endif /* XNU_MONITOR */ |
5ba3f43e A |
1916 | |
1917 | /* ARM64_TODO Is globals_asm.h needed? */ | |
1918 | //#include "globals_asm.h" | |
1919 | ||
1920 | /* vim: set ts=4: */ |