]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/locore.s
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / locore.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
cb323159 30#include <arm64/machine_routines_asm.h>
5ba3f43e
A
31#include <arm64/proc_reg.h>
32#include <pexpert/arm64/board_config.h>
33#include <mach/exception_types.h>
34#include <mach_kdp.h>
35#include <config_dtrace.h>
36#include "assym.s"
cb323159 37#include <arm64/exception_asm.h>
5ba3f43e 38
5c9f4661
A
39#if __ARM_KERNEL_PROTECT__
40#include <arm/pmap.h>
41#endif
42
5ba3f43e 43
5ba3f43e
A
44#define CBF_DISABLE 0
45#define CBF_ENABLE 1
46
47.macro COMPARE_BRANCH_FUSION
48#if defined(APPLE_ARM64_ARCH_FAMILY)
49 mrs $1, ARM64_REG_HID1
50 .if $0 == CBF_DISABLE
51 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
52 .else
53 mov $2, ARM64_REG_HID1_disCmpBrFusion
54 bic $1, $1, $2
55 .endif
56 msr ARM64_REG_HID1, $1
57 .if $0 == CBF_DISABLE
58 isb sy
59 .endif
60#endif
61.endmacro
62
5c9f4661
A
63/*
64 * MAP_KERNEL
65 *
66 * Restores the kernel EL1 mappings, if necessary.
67 *
68 * This may mutate x18.
69 */
70.macro MAP_KERNEL
71#if __ARM_KERNEL_PROTECT__
72 /* Switch to the kernel ASID (low bit set) for the task. */
73 mrs x18, TTBR0_EL1
74 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
75 msr TTBR0_EL1, x18
76
77 /*
78 * We eschew some barriers on Apple CPUs, as relative ordering of writes
79 * to the TTBRs and writes to the TCR should be ensured by the
80 * microarchitecture.
81 */
82#if !defined(APPLE_ARM64_ARCH_FAMILY)
83 isb sy
84#endif
85
86 /*
87 * Update the TCR to map the kernel now that we are using the kernel
88 * ASID.
89 */
90 MOV64 x18, TCR_EL1_BOOT
91 msr TCR_EL1, x18
92 isb sy
93#endif /* __ARM_KERNEL_PROTECT__ */
94.endmacro
95
96/*
97 * BRANCH_TO_KVA_VECTOR
98 *
99 * Branches to the requested long exception vector in the kernelcache.
100 * arg0 - The label to branch to
101 * arg1 - The index of the label in exc_vectors_tables
102 *
103 * This may mutate x18.
104 */
105.macro BRANCH_TO_KVA_VECTOR
106#if __ARM_KERNEL_PROTECT__
107 /*
108 * Find the kernelcache table for the exception vectors by accessing
109 * the per-CPU data.
110 */
111 mrs x18, TPIDR_EL1
112 ldr x18, [x18, ACT_CPUDATAP]
113 ldr x18, [x18, CPU_EXC_VECTORS]
114
115 /*
116 * Get the handler for this exception and jump to it.
117 */
118 ldr x18, [x18, #($1 << 3)]
119 br x18
120#else
121 b $0
122#endif /* __ARM_KERNEL_PROTECT__ */
123.endmacro
124
125#if __ARM_KERNEL_PROTECT__
a39ff7e2 126 .text
5c9f4661
A
127 .align 3
128 .globl EXT(exc_vectors_table)
129LEXT(exc_vectors_table)
cb323159
A
130 /* Table of exception handlers.
131 * These handlers sometimes contain deadloops.
132 * It's nice to have symbols for them when debugging. */
133 .quad el1_sp0_synchronous_vector_long
134 .quad el1_sp0_irq_vector_long
135 .quad el1_sp0_fiq_vector_long
136 .quad el1_sp0_serror_vector_long
137 .quad el1_sp1_synchronous_vector_long
138 .quad el1_sp1_irq_vector_long
139 .quad el1_sp1_fiq_vector_long
140 .quad el1_sp1_serror_vector_long
141 .quad el0_synchronous_vector_64_long
142 .quad el0_irq_vector_64_long
143 .quad el0_fiq_vector_64_long
144 .quad el0_serror_vector_64_long
5c9f4661
A
145#endif /* __ARM_KERNEL_PROTECT__ */
146
5ba3f43e 147 .text
5c9f4661
A
148#if __ARM_KERNEL_PROTECT__
149 /*
150 * We need this to be on a page boundary so that we may avoiding mapping
151 * other text along with it. As this must be on the VM page boundary
152 * (due to how the coredumping code currently works), this will be a
153 * 16KB page boundary.
154 */
155 .align 14
156#else
5ba3f43e 157 .align 12
5c9f4661 158#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e
A
159 .globl EXT(ExceptionVectorsBase)
160LEXT(ExceptionVectorsBase)
161Lel1_sp0_synchronous_vector:
cb323159 162 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
5c9f4661
A
163
164 .text
165 .align 7
166Lel1_sp0_irq_vector:
cb323159 167 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
5c9f4661
A
168
169 .text
170 .align 7
171Lel1_sp0_fiq_vector:
cb323159 172 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
5c9f4661
A
173
174 .text
175 .align 7
176Lel1_sp0_serror_vector:
cb323159 177 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
5c9f4661
A
178
179 .text
180 .align 7
181Lel1_sp1_synchronous_vector:
cb323159 182 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
5c9f4661
A
183
184 .text
185 .align 7
186Lel1_sp1_irq_vector:
cb323159 187 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
5c9f4661
A
188
189 .text
190 .align 7
191Lel1_sp1_fiq_vector:
cb323159 192 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
5c9f4661
A
193
194 .text
195 .align 7
196Lel1_sp1_serror_vector:
cb323159 197 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
5c9f4661
A
198
199 .text
200 .align 7
201Lel0_synchronous_vector_64:
202 MAP_KERNEL
cb323159 203 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
5c9f4661
A
204
205 .text
206 .align 7
207Lel0_irq_vector_64:
208 MAP_KERNEL
cb323159 209 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
5c9f4661
A
210
211 .text
212 .align 7
213Lel0_fiq_vector_64:
214 MAP_KERNEL
cb323159 215 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
5c9f4661
A
216
217 .text
218 .align 7
219Lel0_serror_vector_64:
220 MAP_KERNEL
cb323159 221 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
5c9f4661
A
222
223 /* Fill out the rest of the page */
224 .align 12
225
226/*********************************
227 * END OF EXCEPTION VECTORS PAGE *
228 *********************************/
229
230.macro EL1_SP0_VECTOR
231 msr SPSel, #0 // Switch to SP0
232 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
233 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
234 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
235 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
236 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
237 INIT_SAVED_STATE_FLAVORS sp, w0, w1
238 mov x0, sp // Copy saved state pointer to x0
239.endmacro
240
cb323159 241el1_sp0_synchronous_vector_long:
5ba3f43e
A
242 sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
243 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
244 mrs x1, ESR_EL1 // Get the exception syndrome
245 /* If the stack pointer is corrupt, it will manifest either as a data abort
246 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
247 * these quickly by testing bit 5 of the exception class.
248 */
249 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
250 mrs x0, SP_EL0 // Get SP_EL0
251 stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
252 str x0, [sp, SS64_SP] // Save sp to the stack
253 bl check_kernel_stack
254 ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
255Lkernel_stack_valid:
256 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
257 add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
258 EL1_SP0_VECTOR
cb323159
A
259 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
260 add x1, x1, EXT(fleh_synchronous)@pageoff
5ba3f43e
A
261 b fleh_dispatch64
262
cb323159 263el1_sp0_irq_vector_long:
5ba3f43e
A
264 EL1_SP0_VECTOR
265 mrs x1, TPIDR_EL1
266 ldr x1, [x1, ACT_CPUDATAP]
267 ldr x1, [x1, CPU_ISTACKPTR]
268 mov sp, x1
cb323159
A
269 adrp x1, EXT(fleh_irq)@page // Load address for fleh
270 add x1, x1, EXT(fleh_irq)@pageoff
5ba3f43e
A
271 b fleh_dispatch64
272
cb323159 273el1_sp0_fiq_vector_long:
5ba3f43e
A
274 // ARM64_TODO write optimized decrementer
275 EL1_SP0_VECTOR
276 mrs x1, TPIDR_EL1
277 ldr x1, [x1, ACT_CPUDATAP]
278 ldr x1, [x1, CPU_ISTACKPTR]
279 mov sp, x1
cb323159
A
280 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
281 add x1, x1, EXT(fleh_fiq)@pageoff
5ba3f43e
A
282 b fleh_dispatch64
283
cb323159 284el1_sp0_serror_vector_long:
5ba3f43e 285 EL1_SP0_VECTOR
cb323159
A
286 adrp x1, EXT(fleh_serror)@page // Load address for fleh
287 add x1, x1, EXT(fleh_serror)@pageoff
5ba3f43e
A
288 b fleh_dispatch64
289
290.macro EL1_SP1_VECTOR
291 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
292 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
293 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
294 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
295 INIT_SAVED_STATE_FLAVORS sp, w0, w1
296 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
297 mov x0, sp // Copy saved state pointer to x0
298.endmacro
299
cb323159 300el1_sp1_synchronous_vector_long:
d9a64523
A
301 b check_exception_stack
302Lel1_sp1_synchronous_valid_stack:
5ba3f43e
A
303#if defined(KERNEL_INTEGRITY_KTRR)
304 b check_ktrr_sctlr_trap
305Lel1_sp1_synchronous_vector_continue:
306#endif
307 EL1_SP1_VECTOR
308 adrp x1, fleh_synchronous_sp1@page
309 add x1, x1, fleh_synchronous_sp1@pageoff
310 b fleh_dispatch64
311
cb323159 312el1_sp1_irq_vector_long:
5ba3f43e
A
313 EL1_SP1_VECTOR
314 adrp x1, fleh_irq_sp1@page
315 add x1, x1, fleh_irq_sp1@pageoff
316 b fleh_dispatch64
317
cb323159 318el1_sp1_fiq_vector_long:
5ba3f43e
A
319 EL1_SP1_VECTOR
320 adrp x1, fleh_fiq_sp1@page
321 add x1, x1, fleh_fiq_sp1@pageoff
322 b fleh_dispatch64
323
cb323159 324el1_sp1_serror_vector_long:
5ba3f43e
A
325 EL1_SP1_VECTOR
326 adrp x1, fleh_serror_sp1@page
327 add x1, x1, fleh_serror_sp1@pageoff
328 b fleh_dispatch64
329
cb323159
A
330#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
331/**
332 * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
333 * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
334 */
335#define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
336#define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
337#endif
338
5ba3f43e 339.macro EL0_64_VECTOR
d9a64523 340 mov x18, #0 // Zero x18 to avoid leaking data to user SS
5ba3f43e 341 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
cb323159
A
342#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
343 // enable JOP for kernel
344 adrp x0, EXT(const_boot_args)@page
345 add x0, x0, EXT(const_boot_args)@pageoff
346 ldr x0, [x0, BA_BOOT_FLAGS]
347 and x0, x0, BA_BOOT_FLAGS_DISABLE_JOP
348 cbnz x0, 1f
349 // if disable jop is set, don't touch SCTLR (it's already off)
350 // if (!boot_args->kernel_jop_disable) {
351 mrs x0, SCTLR_EL1
352 tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
353 // turn on jop for kernel if it isn't already on
354 // if (!jop_running) {
355 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
356 orr x0, x0, x1
357 msr SCTLR_EL1, x0
358 isb sy
359 MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
360 cmp x0, x1
361 bne .
362 // }
363 // }
3641:
365#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
5ba3f43e
A
366 mrs x0, TPIDR_EL1 // Load the thread register
367 mrs x1, SP_EL0 // Load the user stack pointer
368 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
369 ldr x0, [x0] // Load the user context pointer
370 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
371 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
372 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
373 msr SPSel, #0 // Switch to SP0
374 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
375 stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
d9a64523
A
376 mov fp, #0 // Clear the fp and lr for the
377 mov lr, #0 // debugger stack frame
5ba3f43e
A
378 mov x0, sp // Copy the user PCB pointer to x0
379.endmacro
380
5c9f4661 381
cb323159 382el0_synchronous_vector_64_long:
5ba3f43e
A
383 EL0_64_VECTOR
384 mrs x1, TPIDR_EL1 // Load the thread register
385 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
386 mov sp, x1 // Set the stack pointer to the kernel stack
cb323159
A
387 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
388 add x1, x1, EXT(fleh_synchronous)@pageoff
5ba3f43e
A
389 b fleh_dispatch64
390
cb323159 391el0_irq_vector_64_long:
5ba3f43e
A
392 EL0_64_VECTOR
393 mrs x1, TPIDR_EL1
394 ldr x1, [x1, ACT_CPUDATAP]
395 ldr x1, [x1, CPU_ISTACKPTR]
396 mov sp, x1 // Set the stack pointer to the kernel stack
cb323159
A
397 adrp x1, EXT(fleh_irq)@page // load address for fleh
398 add x1, x1, EXT(fleh_irq)@pageoff
5ba3f43e
A
399 b fleh_dispatch64
400
cb323159 401el0_fiq_vector_64_long:
5ba3f43e
A
402 EL0_64_VECTOR
403 mrs x1, TPIDR_EL1
404 ldr x1, [x1, ACT_CPUDATAP]
405 ldr x1, [x1, CPU_ISTACKPTR]
406 mov sp, x1 // Set the stack pointer to the kernel stack
cb323159
A
407 adrp x1, EXT(fleh_fiq)@page // load address for fleh
408 add x1, x1, EXT(fleh_fiq)@pageoff
5ba3f43e
A
409 b fleh_dispatch64
410
cb323159 411el0_serror_vector_64_long:
5ba3f43e
A
412 EL0_64_VECTOR
413 mrs x1, TPIDR_EL1 // Load the thread register
414 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
415 mov sp, x1 // Set the stack pointer to the kernel stack
cb323159
A
416 adrp x1, EXT(fleh_serror)@page // load address for fleh
417 add x1, x1, EXT(fleh_serror)@pageoff
5ba3f43e
A
418 b fleh_dispatch64
419
5ba3f43e 420
d9a64523
A
421/*
422 * check_exception_stack
423 *
424 * Verifies that stack pointer at SP1 is within exception stack
425 * If not, will simply hang as we have no more stack to fall back on.
426 */
427
428 .text
429 .align 2
430check_exception_stack:
431 mrs x18, TPIDR_EL1 // Get thread pointer
432 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
433 ldr x18, [x18, ACT_CPUDATAP]
434 cbz x18, . // If thread context is set, cpu data should be too
435 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
436 cmp sp, x18
437 b.gt . // Hang if above exception stack top
438 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
439 cmp sp, x18
440 b.lt . // Hang if below exception stack bottom
441Lvalid_exception_stack:
442 mov x18, #0
443 b Lel1_sp1_synchronous_valid_stack
444
5ba3f43e
A
445/*
446 * check_kernel_stack
447 *
448 * Verifies that the kernel stack is aligned and mapped within an expected
449 * stack address range. Note: happens before saving registers (in case we can't
450 * save to kernel stack).
451 *
452 * Expects:
453 * {x0, x1, sp} - saved
454 * x0 - SP_EL0
455 * x1 - Exception syndrome
456 * sp - Saved state
457 */
458 .text
459 .align 2
460check_kernel_stack:
461 stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
462 and x1, x1, #ESR_EC_MASK // Mask the exception class
463 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
464 cmp x1, x2 // If we have a stack alignment exception
465 b.eq Lcorrupt_stack // ...the stack is definitely corrupted
466 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
467 cmp x1, x2 // If we have a data abort, we need to
468 b.ne Lvalid_stack // ...validate the stack pointer
469 mrs x1, TPIDR_EL1 // Get thread pointer
470Ltest_kstack:
471 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
472 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
473 cmp x0, x2 // if (SP_EL0 >= kstack top)
474 b.ge Ltest_istack // jump to istack test
475 cmp x0, x3 // if (SP_EL0 > kstack bottom)
476 b.gt Lvalid_stack // stack pointer valid
477Ltest_istack:
478 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
479 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
d9a64523 480 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
5ba3f43e 481 cmp x0, x2 // if (SP_EL0 >= istack top)
5ba3f43e 482 b.ge Lcorrupt_stack // corrupt stack pointer
d9a64523 483 cmp x0, x3 // if (SP_EL0 > istack bottom)
5ba3f43e
A
484 b.gt Lvalid_stack // stack pointer valid
485Lcorrupt_stack:
486 INIT_SAVED_STATE_FLAVORS sp, w0, w1
487 mov x0, sp // Copy exception frame pointer to x0
488 adrp x1, fleh_invalid_stack@page // Load address for fleh
489 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
490 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
491 b fleh_dispatch64
492Lvalid_stack:
493 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
494 ret
495
496#if defined(KERNEL_INTEGRITY_KTRR)
497 .text
498 .align 2
499check_ktrr_sctlr_trap:
500/* We may abort on an instruction fetch on reset when enabling the MMU by
501 * writing SCTLR_EL1 because the page containing the privileged instruction is
502 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
503 * would otherwise panic unconditionally. Check for the condition and return
504 * safe execution to the caller on behalf of the faulting function.
505 *
506 * Expected register state:
507 * x22 - Kernel virtual base
508 * x23 - Kernel physical base
509 */
510 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
511 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
512 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
513 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
514 movz w1, #0x8600, lsl #16
515 movk w1, #0x0000
516 cmp x0, x1
517 mrs x0, ELR_EL1 // Check for expected abort address
518 adrp x1, _pinst_set_sctlr_trap_addr@page
519 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
520 sub x1, x1, x22 // Convert to physical address
521 add x1, x1, x23
522 ccmp x0, x1, #0, eq
523 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
524 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
525 b.ne Lel1_sp1_synchronous_vector_continue
526 msr ELR_EL1, lr // Return to caller
527 eret
528#endif /* defined(KERNEL_INTEGRITY_KTRR)*/
529
530/* 64-bit first level exception handler dispatcher.
531 * Completes register context saving and branches to FLEH.
532 * Expects:
533 * {x0, x1, fp, lr, sp} - saved
534 * x0 - arm_context_t
535 * x1 - address of FLEH
536 * fp - previous stack frame if EL1
537 * lr - unused
538 * sp - kernel stack
539 */
540 .text
541 .align 2
542fleh_dispatch64:
543 /* Save arm_saved_state64 */
cb323159 544 SPILL_REGISTERS KERNEL_MODE
5ba3f43e 545
a39ff7e2
A
546 /* If exception is from userspace, zero unused registers */
547 and x23, x23, #(PSR64_MODE_EL_MASK)
548 cmp x23, #(PSR64_MODE_EL0)
5ba3f43e 549 bne 1f
a39ff7e2 550
d9a64523
A
551 mov x2, #0
552 mov x3, #0
553 mov x4, #0
554 mov x5, #0
555 mov x6, #0
556 mov x7, #0
557 mov x8, #0
558 mov x9, #0
559 mov x10, #0
560 mov x11, #0
561 mov x12, #0
562 mov x13, #0
563 mov x14, #0
564 mov x15, #0
565 mov x16, #0
566 mov x17, #0
567 mov x18, #0
568 mov x19, #0
569 mov x20, #0
a39ff7e2 570 /* x21, x22 cleared in common case below */
d9a64523
A
571 mov x23, #0
572 mov x24, #0
573 mov x25, #0
574 mov x26, #0
575 mov x27, #0
576 mov x28, #0
a39ff7e2 577 /* fp/lr already cleared by EL0_64_VECTOR */
5ba3f43e
A
5781:
579
580 mov x21, x0 // Copy arm_context_t pointer to x21
581 mov x22, x1 // Copy handler routine to x22
582
583
584#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
585 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
586 b.ne 1f // kernel mode, so skip precise time update
587 PUSH_FRAME
588 bl EXT(timer_state_event_user_to_kernel)
589 POP_FRAME
590 mov x0, x21 // Reload arm_context_t pointer
5911:
592#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
593
594 /* Dispatch to FLEH */
595
596 br x22
597
598
599 .text
600 .align 2
cb323159
A
601 .global EXT(fleh_synchronous)
602LEXT(fleh_synchronous)
5ba3f43e
A
603 mrs x1, ESR_EL1 // Load exception syndrome
604 mrs x2, FAR_EL1 // Load fault address
605
606 /* At this point, the LR contains the value of ELR_EL1. In the case of an
607 * instruction prefetch abort, this will be the faulting pc, which we know
608 * to be invalid. This will prevent us from backtracing through the
609 * exception if we put it in our stack frame, so we load the LR from the
610 * exception saved state instead.
611 */
612 and w3, w1, #(ESR_EC_MASK)
613 lsr w3, w3, #(ESR_EC_SHIFT)
614 mov w4, #(ESR_EC_IABORT_EL1)
615 cmp w3, w4
616 b.eq Lfleh_sync_load_lr
617Lvalid_link_register:
618
619 PUSH_FRAME
620 bl EXT(sleh_synchronous)
621 POP_FRAME
622
623
624 b exception_return_dispatch
625
626Lfleh_sync_load_lr:
627 ldr lr, [x0, SS64_LR]
628 b Lvalid_link_register
629
630/* Shared prologue code for fleh_irq and fleh_fiq.
631 * Does any interrupt booking we may want to do
632 * before invoking the handler proper.
633 * Expects:
634 * x0 - arm_context_t
635 * x23 - CPSR
636 * fp - Undefined live value (we may push a frame)
637 * lr - Undefined live value (we may push a frame)
638 * sp - Interrupt stack for the current CPU
639 */
640.macro BEGIN_INTERRUPT_HANDLER
641 mrs x22, TPIDR_EL1
642 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
643 /* Update IRQ count */
644 ldr w1, [x23, CPU_STAT_IRQ]
645 add w1, w1, #1 // Increment count
646 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
647 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
648 add w1, w1, #1 // Increment count
649 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
650 /* Increment preempt count */
651 ldr w1, [x22, ACT_PREEMPT_CNT]
652 add w1, w1, #1
653 str w1, [x22, ACT_PREEMPT_CNT]
654 /* Store context in int state */
655 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
656.endmacro
657
658/* Shared epilogue code for fleh_irq and fleh_fiq.
659 * Cleans up after the prologue, and may do a bit more
660 * bookkeeping (kdebug related).
661 * Expects:
662 * x22 - Live TPIDR_EL1 value (thread address)
663 * x23 - Address of the current CPU data structure
664 * w24 - 0 if kdebug is disbled, nonzero otherwise
665 * fp - Undefined live value (we may push a frame)
666 * lr - Undefined live value (we may push a frame)
667 * sp - Interrupt stack for the current CPU
668 */
669.macro END_INTERRUPT_HANDLER
670 /* Clear int context */
671 str xzr, [x23, CPU_INT_STATE]
672 /* Decrement preempt count */
673 ldr w0, [x22, ACT_PREEMPT_CNT]
674 cbnz w0, 1f // Detect underflow
675 b preempt_underflow
6761:
677 sub w0, w0, #1
678 str w0, [x22, ACT_PREEMPT_CNT]
679 /* Switch back to kernel stack */
680 ldr x0, [x22, TH_KSTACKPTR]
681 mov sp, x0
682.endmacro
683
684 .text
685 .align 2
cb323159
A
686 .global EXT(fleh_irq)
687LEXT(fleh_irq)
5ba3f43e
A
688 BEGIN_INTERRUPT_HANDLER
689 PUSH_FRAME
690 bl EXT(sleh_irq)
691 POP_FRAME
692 END_INTERRUPT_HANDLER
693
694
695 b exception_return_dispatch
696
697 .text
698 .align 2
699 .global EXT(fleh_fiq_generic)
700LEXT(fleh_fiq_generic)
701 PANIC_UNIMPLEMENTED
702
703 .text
704 .align 2
cb323159
A
705 .global EXT(fleh_fiq)
706LEXT(fleh_fiq)
5ba3f43e
A
707 BEGIN_INTERRUPT_HANDLER
708 PUSH_FRAME
709 bl EXT(sleh_fiq)
710 POP_FRAME
711 END_INTERRUPT_HANDLER
712
713
714 b exception_return_dispatch
715
716 .text
717 .align 2
cb323159
A
718 .global EXT(fleh_serror)
719LEXT(fleh_serror)
5ba3f43e
A
720 mrs x1, ESR_EL1 // Load exception syndrome
721 mrs x2, FAR_EL1 // Load fault address
722
723 PUSH_FRAME
724 bl EXT(sleh_serror)
725 POP_FRAME
726
727
728 b exception_return_dispatch
729
730/*
731 * Register state saved before we get here.
732 */
733 .text
734 .align 2
735fleh_invalid_stack:
736 mrs x1, ESR_EL1 // Load exception syndrome
737 str x1, [x0, SS64_ESR]
738 mrs x2, FAR_EL1 // Load fault address
739 str x2, [x0, SS64_FAR]
740 PUSH_FRAME
741 bl EXT(sleh_invalid_stack) // Shouldn't return!
742 b .
743
744 .text
745 .align 2
746fleh_synchronous_sp1:
747 mrs x1, ESR_EL1 // Load exception syndrome
748 str x1, [x0, SS64_ESR]
749 mrs x2, FAR_EL1 // Load fault address
750 str x2, [x0, SS64_FAR]
751 PUSH_FRAME
752 bl EXT(sleh_synchronous_sp1)
753 b .
754
755 .text
756 .align 2
757fleh_irq_sp1:
758 mov x1, x0
759 adr x0, Lsp1_irq_str
760 b EXT(panic_with_thread_kernel_state)
761Lsp1_irq_str:
762 .asciz "IRQ exception taken while SP1 selected"
763
764 .text
765 .align 2
766fleh_fiq_sp1:
767 mov x1, x0
768 adr x0, Lsp1_fiq_str
769 b EXT(panic_with_thread_kernel_state)
770Lsp1_fiq_str:
771 .asciz "FIQ exception taken while SP1 selected"
772
773 .text
774 .align 2
775fleh_serror_sp1:
776 mov x1, x0
777 adr x0, Lsp1_serror_str
778 b EXT(panic_with_thread_kernel_state)
779Lsp1_serror_str:
780 .asciz "Asynchronous exception taken while SP1 selected"
781
782 .text
783 .align 2
784exception_return_dispatch:
cb323159
A
785 ldr w0, [x21, SS64_CPSR]
786 tst w0, PSR64_MODE_EL_MASK
787 b.ne return_to_kernel // return to kernel if M[3:2] > 0
5ba3f43e
A
788 b return_to_user
789
790 .text
791 .align 2
792return_to_kernel:
cb323159
A
793 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
794 mrs x3, TPIDR_EL1 // Load thread pointer
795 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
796 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
797 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
798 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
799 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
800 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
801 b.eq exception_return_unint_tpidr_x3
802 mov sp, x21 // Switch to thread stack for preemption
5ba3f43e 803 PUSH_FRAME
cb323159 804 bl EXT(ast_taken_kernel) // Handle AST_URGENT
5ba3f43e 805 POP_FRAME
5ba3f43e
A
806 b exception_return
807
808 .text
809 .globl EXT(thread_bootstrap_return)
810LEXT(thread_bootstrap_return)
811#if CONFIG_DTRACE
812 bl EXT(dtrace_thread_bootstrap)
813#endif
814 b EXT(thread_exception_return)
815
816 .text
817 .globl EXT(thread_exception_return)
818LEXT(thread_exception_return)
819 mrs x0, TPIDR_EL1
820 add x21, x0, ACT_CONTEXT
821 ldr x21, [x21]
822
823 //
824 // Fall Through to return_to_user from thread_exception_return.
825 // Note that if we move return_to_user or insert a new routine
826 // below thread_exception_return, the latter will need to change.
827 //
828 .text
829return_to_user:
830check_user_asts:
5ba3f43e
A
831 mrs x3, TPIDR_EL1 // Load thread pointer
832
833 movn w2, #0
834 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
835
cb323159 836#if MACH_ASSERT
5ba3f43e 837 ldr w0, [x3, TH_RWLOCK_CNT]
cb323159 838 cbz w0, 1f // Detect unbalance RW lock/unlock
5ba3f43e
A
839 b rwlock_count_notzero
8401:
cb323159
A
841 ldr w0, [x3, ACT_PREEMPT_CNT]
842 cbz w0, 1f
843 b preempt_count_notzero
8441:
845#endif
5ba3f43e 846
cb323159
A
847 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
848 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
849 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
850 cbnz x0, user_take_ast // If pending ASTs, go service them
5ba3f43e
A
851
852#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
cb323159 853 mov x19, x3 // Preserve thread pointer across function call
5ba3f43e
A
854 PUSH_FRAME
855 bl EXT(timer_state_event_kernel_to_user)
856 POP_FRAME
cb323159 857 mov x3, x19
5ba3f43e
A
858#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
859
860#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
861 /* Watchtower
862 *
863 * Here we attempt to enable NEON access for EL0. If the last entry into the
864 * kernel from user-space was due to an IRQ, the monitor will have disabled
865 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
866 * check in with the monitor in order to reenable NEON for EL0 in exchange
867 * for routing IRQs through the monitor (2). This way the monitor will
868 * always 'own' either IRQs or EL0 NEON.
869 *
870 * If Watchtower is disabled or we did not enter the kernel through an IRQ
871 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
872 * here.
873 *
874 * EL0 user ________ IRQ ______
875 * EL1 xnu \ ______________________ CPACR_EL1 __/
876 * EL3 monitor \_/ \___/
877 *
878 * (1) (2)
879 */
880
881 mov x0, #(CPACR_FPEN_ENABLE)
882 msr CPACR_EL1, x0
883#endif
884
885 /* Establish this thread's debug state as the live state on the selected CPU. */
886 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
887 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
888 ldr x0, [x3, ACT_DEBUGDATA]
889 orr x1, x1, x0 // Thread debug state and live debug state both NULL?
890 cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
cb323159 891 b exception_return_unint_tpidr_x3
5ba3f43e
A
892
893 //
894 // Fall through from return_to_user to exception_return.
895 // Note that if we move exception_return or add a new routine below
896 // return_to_user, the latter will have to change.
897 //
898
5ba3f43e 899exception_return:
a39ff7e2 900 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
cb323159 901exception_return_unint:
a39ff7e2 902 mrs x3, TPIDR_EL1 // Load thread pointer
cb323159 903exception_return_unint_tpidr_x3:
a39ff7e2 904 mov sp, x21 // Reload the pcb pointer
5ba3f43e
A
905
906 /* ARM64_TODO Reserve x18 until we decide what to do with it */
94ff46dc 907 str xzr, [sp, SS64_X18]
5ba3f43e 908
5c9f4661
A
909#if __ARM_KERNEL_PROTECT__
910 /*
911 * If we are going to eret to userspace, we must return through the EL0
912 * eret mapping.
913 */
914 ldr w1, [sp, SS64_CPSR] // Load CPSR
915 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
916
917 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
918 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
919 adrp x1, Lexception_return_restore_registers@page // Load target PC
920 add x1, x1, Lexception_return_restore_registers@pageoff
921 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
922 sub x1, x1, x0 // Calculate delta
923 add x0, x2, x1 // Convert KVA to EL0 vector address
924 br x0
925
926Lskip_el0_eret_mapping:
927#endif /* __ARM_KERNEL_PROTECT__ */
928
5ba3f43e 929Lexception_return_restore_registers:
5ba3f43e 930 mov x0, sp // x0 = &pcb
cb323159
A
931 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
932 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24
933
934/* Restore special register state */
935 ldr w3, [sp, NS64_FPSR]
936 ldr w4, [sp, NS64_FPCR]
937
938 msr ELR_EL1, x1 // Load the return address into ELR
939 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
940 msr FPSR, x3
941 msr FPCR, x4 // Synchronized by ERET
942
943#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
944 /* if eret to userspace, disable JOP */
945 tbnz w2, PSR64_MODE_EL_SHIFT, Lskip_disable_jop
946 adrp x4, EXT(const_boot_args)@page
947 add x4, x4, EXT(const_boot_args)@pageoff
948 ldr x4, [x4, BA_BOOT_FLAGS]
949 and x1, x4, BA_BOOT_FLAGS_DISABLE_JOP
950 cbnz x1, Lskip_disable_jop // if global JOP disabled, don't touch SCTLR (kernel JOP is already off)
951 and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP
952 cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on)
953 mrs x2, TPIDR_EL1
954 ldr x2, [x2, TH_DISABLE_USER_JOP]
955 cbz x2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on)
956Ldisable_jop:
957 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
958 mrs x4, SCTLR_EL1
959 bic x4, x4, x1
960 msr SCTLR_EL1, x4
961 MOV64 x1, SCTLR_EL1_EXPECTED
962 cmp x4, x1
963 bne .
964Lskip_disable_jop:
965#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)*/
5ba3f43e
A
966
967 /* Restore arm_neon_saved_state64 */
968 ldp q0, q1, [x0, NS64_Q0]
969 ldp q2, q3, [x0, NS64_Q2]
970 ldp q4, q5, [x0, NS64_Q4]
971 ldp q6, q7, [x0, NS64_Q6]
972 ldp q8, q9, [x0, NS64_Q8]
973 ldp q10, q11, [x0, NS64_Q10]
974 ldp q12, q13, [x0, NS64_Q12]
975 ldp q14, q15, [x0, NS64_Q14]
976 ldp q16, q17, [x0, NS64_Q16]
977 ldp q18, q19, [x0, NS64_Q18]
978 ldp q20, q21, [x0, NS64_Q20]
979 ldp q22, q23, [x0, NS64_Q22]
980 ldp q24, q25, [x0, NS64_Q24]
981 ldp q26, q27, [x0, NS64_Q26]
982 ldp q28, q29, [x0, NS64_Q28]
983 ldp q30, q31, [x0, NS64_Q30]
984
985 /* Restore arm_saved_state64 */
986
987 // Skip x0, x1 - we're using them
988 ldp x2, x3, [x0, SS64_X2]
989 ldp x4, x5, [x0, SS64_X4]
990 ldp x6, x7, [x0, SS64_X6]
991 ldp x8, x9, [x0, SS64_X8]
992 ldp x10, x11, [x0, SS64_X10]
993 ldp x12, x13, [x0, SS64_X12]
994 ldp x14, x15, [x0, SS64_X14]
cb323159 995 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
5ba3f43e
A
996 ldp x18, x19, [x0, SS64_X18]
997 ldp x20, x21, [x0, SS64_X20]
998 ldp x22, x23, [x0, SS64_X22]
999 ldp x24, x25, [x0, SS64_X24]
1000 ldp x26, x27, [x0, SS64_X26]
1001 ldr x28, [x0, SS64_X28]
cb323159
A
1002 ldr fp, [x0, SS64_FP]
1003 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
5ba3f43e
A
1004
1005 // Restore stack pointer and our last two GPRs
1006 ldr x1, [x0, SS64_SP]
1007 mov sp, x1
5c9f4661
A
1008
1009#if __ARM_KERNEL_PROTECT__
1010 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1011#endif /* __ARM_KERNEL_PROTECT__ */
1012
5ba3f43e
A
1013 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1014
5c9f4661
A
1015#if __ARM_KERNEL_PROTECT__
1016 /* If we are going to eret to userspace, we must unmap the kernel. */
1017 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1018
1019 /* Update TCR to unmap the kernel. */
1020 MOV64 x18, TCR_EL1_USER
1021 msr TCR_EL1, x18
1022
1023 /*
1024 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1025 * each other due to the microarchitecture.
1026 */
1027#if !defined(APPLE_ARM64_ARCH_FAMILY)
1028 isb sy
1029#endif
1030
1031 /* Switch to the user ASID (low bit clear) for the task. */
1032 mrs x18, TTBR0_EL1
1033 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1034 msr TTBR0_EL1, x18
d9a64523 1035 mov x18, #0
5c9f4661
A
1036
1037 /* We don't need an ISB here, as the eret is synchronizing. */
1038Lskip_ttbr1_switch:
1039#endif /* __ARM_KERNEL_PROTECT__ */
1040
5ba3f43e
A
1041 eret
1042
1043user_take_ast:
1044 PUSH_FRAME
1045 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1046 POP_FRAME
5ba3f43e
A
1047 b check_user_asts // Now try again
1048
1049user_set_debug_state_and_return:
cb323159
A
1050
1051
5ba3f43e
A
1052 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1053 isb // Synchronize context
1054 PUSH_FRAME
1055 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1056 POP_FRAME
1057 isb
cb323159 1058 b exception_return_unint // Continue, reloading the thread pointer
5ba3f43e
A
1059
1060 .text
1061 .align 2
1062preempt_underflow:
1063 mrs x0, TPIDR_EL1
1064 str x0, [sp, #-16]! // We'll print thread pointer
1065 adr x0, L_underflow_str // Format string
1066 CALL_EXTERN panic // Game over
1067
1068L_underflow_str:
1069 .asciz "Preemption count negative on thread %p"
1070.align 2
1071
cb323159 1072#if MACH_ASSERT
5ba3f43e
A
1073 .text
1074 .align 2
1075rwlock_count_notzero:
1076 mrs x0, TPIDR_EL1
1077 str x0, [sp, #-16]! // We'll print thread pointer
1078 ldr w0, [x0, TH_RWLOCK_CNT]
1079 str w0, [sp, #8]
1080 adr x0, L_rwlock_count_notzero_str // Format string
1081 CALL_EXTERN panic // Game over
1082
1083L_rwlock_count_notzero_str:
1084 .asciz "RW lock count not 0 on thread %p (%u)"
cb323159
A
1085
1086 .text
1087 .align 2
1088preempt_count_notzero:
1089 mrs x0, TPIDR_EL1
1090 str x0, [sp, #-16]! // We'll print thread pointer
1091 ldr w0, [x0, ACT_PREEMPT_CNT]
1092 str w0, [sp, #8]
1093 adr x0, L_preempt_count_notzero_str // Format string
1094 CALL_EXTERN panic // Game over
1095
1096L_preempt_count_notzero_str:
1097 .asciz "preemption count not 0 on thread %p (%u)"
1098#endif /* MACH_ASSERT */
1099
5ba3f43e
A
1100.align 2
1101
5c9f4661
A
1102#if __ARM_KERNEL_PROTECT__
1103 /*
1104 * This symbol denotes the end of the exception vector/eret range; we page
1105 * align it so that we can avoid mapping other text in the EL0 exception
1106 * vector mapping.
1107 */
1108 .text
1109 .align 14
1110 .globl EXT(ExceptionVectorsEnd)
1111LEXT(ExceptionVectorsEnd)
1112#endif /* __ARM_KERNEL_PROTECT__ */
1113
5ba3f43e
A
1114 .text
1115 .align 2
1116 .globl EXT(ml_panic_trap_to_debugger)
1117LEXT(ml_panic_trap_to_debugger)
1118 ret
1119
1120/* ARM64_TODO Is globals_asm.h needed? */
1121//#include "globals_asm.h"
1122
1123/* vim: set ts=4: */