]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/locore.s
ea33d6b42c6a4d64e2cf9b282702dbfa1370bc81
[apple/xnu.git] / osfmk / arm64 / locore.s
1 /*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/proc_reg.h>
31 #include <pexpert/arm64/board_config.h>
32 #include <mach/exception_types.h>
33 #include <mach_kdp.h>
34 #include <config_dtrace.h>
35 #include "assym.s"
36
37 #if __ARM_KERNEL_PROTECT__
38 #include <arm/pmap.h>
39 #endif
40
41
42 /*
43 * INIT_SAVED_STATE_FLAVORS
44 *
45 * Initializes the saved state flavors of a new saved state structure
46 * arg0 - saved state pointer
47 * arg1 - 32-bit scratch reg
48 * arg2 - 32-bit scratch reg
49 */
50 .macro INIT_SAVED_STATE_FLAVORS
51 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor
52 mov $2, ARM_SAVED_STATE64_COUNT
53 stp $1, $2, [$0, SS_FLAVOR]
54 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor
55 str $1, [$0, NS_FLAVOR]
56 mov $1, ARM_NEON_SAVED_STATE64_COUNT
57 str $1, [$0, NS_COUNT]
58 .endmacro
59
60
61 /*
62 * SPILL_REGISTERS
63 *
64 * Spills the current set of registers (excluding x0 and x1) to the specified
65 * save area.
66 * x0 - Address of the save area
67 */
68 .macro SPILL_REGISTERS
69 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs
70 stp x4, x5, [x0, SS64_X4]
71 stp x6, x7, [x0, SS64_X6]
72 stp x8, x9, [x0, SS64_X8]
73 stp x10, x11, [x0, SS64_X10]
74 stp x12, x13, [x0, SS64_X12]
75 stp x14, x15, [x0, SS64_X14]
76 stp x16, x17, [x0, SS64_X16]
77 stp x18, x19, [x0, SS64_X18]
78 stp x20, x21, [x0, SS64_X20]
79 stp x22, x23, [x0, SS64_X22]
80 stp x24, x25, [x0, SS64_X24]
81 stp x26, x27, [x0, SS64_X26]
82 str x28, [x0, SS64_X28]
83
84 /* Save arm_neon_saved_state64 */
85
86 stp q0, q1, [x0, NS64_Q0]
87 stp q2, q3, [x0, NS64_Q2]
88 stp q4, q5, [x0, NS64_Q4]
89 stp q6, q7, [x0, NS64_Q6]
90 stp q8, q9, [x0, NS64_Q8]
91 stp q10, q11, [x0, NS64_Q10]
92 stp q12, q13, [x0, NS64_Q12]
93 stp q14, q15, [x0, NS64_Q14]
94 stp q16, q17, [x0, NS64_Q16]
95 stp q18, q19, [x0, NS64_Q18]
96 stp q20, q21, [x0, NS64_Q20]
97 stp q22, q23, [x0, NS64_Q22]
98 stp q24, q25, [x0, NS64_Q24]
99 stp q26, q27, [x0, NS64_Q26]
100 stp q28, q29, [x0, NS64_Q28]
101 stp q30, q31, [x0, NS64_Q30]
102
103 mrs lr, ELR_EL1 // Get exception link register
104 mrs x23, SPSR_EL1 // Load CPSR into var reg x23
105 mrs x24, FPSR
106 mrs x25, FPCR
107
108 str lr, [x0, SS64_PC] // Save ELR to PCB
109 str w23, [x0, SS64_CPSR] // Save CPSR to PCB
110 str w24, [x0, NS64_FPSR]
111 str w25, [x0, NS64_FPCR]
112
113 mrs x20, FAR_EL1
114 mrs x21, ESR_EL1
115 str x20, [x0, SS64_FAR]
116 str w21, [x0, SS64_ESR]
117 .endmacro
118
119
120 #define CBF_DISABLE 0
121 #define CBF_ENABLE 1
122
123 .macro COMPARE_BRANCH_FUSION
124 #if defined(APPLE_ARM64_ARCH_FAMILY)
125 mrs $1, ARM64_REG_HID1
126 .if $0 == CBF_DISABLE
127 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
128 .else
129 mov $2, ARM64_REG_HID1_disCmpBrFusion
130 bic $1, $1, $2
131 .endif
132 msr ARM64_REG_HID1, $1
133 .if $0 == CBF_DISABLE
134 isb sy
135 .endif
136 #endif
137 .endmacro
138
139 /*
140 * MAP_KERNEL
141 *
142 * Restores the kernel EL1 mappings, if necessary.
143 *
144 * This may mutate x18.
145 */
146 .macro MAP_KERNEL
147 #if __ARM_KERNEL_PROTECT__
148 /* Switch to the kernel ASID (low bit set) for the task. */
149 mrs x18, TTBR0_EL1
150 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
151 msr TTBR0_EL1, x18
152
153 /*
154 * We eschew some barriers on Apple CPUs, as relative ordering of writes
155 * to the TTBRs and writes to the TCR should be ensured by the
156 * microarchitecture.
157 */
158 #if !defined(APPLE_ARM64_ARCH_FAMILY)
159 isb sy
160 #endif
161
162 /*
163 * Update the TCR to map the kernel now that we are using the kernel
164 * ASID.
165 */
166 MOV64 x18, TCR_EL1_BOOT
167 msr TCR_EL1, x18
168 isb sy
169 #endif /* __ARM_KERNEL_PROTECT__ */
170 .endmacro
171
172 /*
173 * BRANCH_TO_KVA_VECTOR
174 *
175 * Branches to the requested long exception vector in the kernelcache.
176 * arg0 - The label to branch to
177 * arg1 - The index of the label in exc_vectors_tables
178 *
179 * This may mutate x18.
180 */
181 .macro BRANCH_TO_KVA_VECTOR
182 #if __ARM_KERNEL_PROTECT__
183 /*
184 * Find the kernelcache table for the exception vectors by accessing
185 * the per-CPU data.
186 */
187 mrs x18, TPIDR_EL1
188 ldr x18, [x18, ACT_CPUDATAP]
189 ldr x18, [x18, CPU_EXC_VECTORS]
190
191 /*
192 * Get the handler for this exception and jump to it.
193 */
194 ldr x18, [x18, #($1 << 3)]
195 br x18
196 #else
197 b $0
198 #endif /* __ARM_KERNEL_PROTECT__ */
199 .endmacro
200
201 #if __ARM_KERNEL_PROTECT__
202 .data
203 .align 3
204 .globl EXT(exc_vectors_table)
205 LEXT(exc_vectors_table)
206 /* Table of exception handlers. */
207 .quad Lel1_sp0_synchronous_vector_long
208 .quad Lel1_sp0_irq_vector_long
209 .quad Lel1_sp0_fiq_vector_long
210 .quad Lel1_sp0_serror_vector_long
211 .quad Lel1_sp1_synchronous_vector_long
212 .quad Lel1_sp1_irq_vector_long
213 .quad Lel1_sp1_fiq_vector_long
214 .quad Lel1_sp1_serror_vector_long
215 .quad Lel0_synchronous_vector_64_long
216 .quad Lel0_irq_vector_64_long
217 .quad Lel0_fiq_vector_64_long
218 .quad Lel0_serror_vector_64_long
219 #endif /* __ARM_KERNEL_PROTECT__ */
220
221 .text
222 #if __ARM_KERNEL_PROTECT__
223 /*
224 * We need this to be on a page boundary so that we may avoiding mapping
225 * other text along with it. As this must be on the VM page boundary
226 * (due to how the coredumping code currently works), this will be a
227 * 16KB page boundary.
228 */
229 .align 14
230 #else
231 .align 12
232 #endif /* __ARM_KERNEL_PROTECT__ */
233 .globl EXT(ExceptionVectorsBase)
234 LEXT(ExceptionVectorsBase)
235 Lel1_sp0_synchronous_vector:
236 BRANCH_TO_KVA_VECTOR Lel1_sp0_synchronous_vector_long, 0
237
238 .text
239 .align 7
240 Lel1_sp0_irq_vector:
241 BRANCH_TO_KVA_VECTOR Lel1_sp0_irq_vector_long, 1
242
243 .text
244 .align 7
245 Lel1_sp0_fiq_vector:
246 BRANCH_TO_KVA_VECTOR Lel1_sp0_fiq_vector_long, 2
247
248 .text
249 .align 7
250 Lel1_sp0_serror_vector:
251 BRANCH_TO_KVA_VECTOR Lel1_sp0_serror_vector_long, 3
252
253 .text
254 .align 7
255 Lel1_sp1_synchronous_vector:
256 BRANCH_TO_KVA_VECTOR Lel1_sp1_synchronous_vector_long, 4
257
258 .text
259 .align 7
260 Lel1_sp1_irq_vector:
261 BRANCH_TO_KVA_VECTOR Lel1_sp1_irq_vector_long, 5
262
263 .text
264 .align 7
265 Lel1_sp1_fiq_vector:
266 BRANCH_TO_KVA_VECTOR Lel1_sp1_fiq_vector_long, 6
267
268 .text
269 .align 7
270 Lel1_sp1_serror_vector:
271 BRANCH_TO_KVA_VECTOR Lel1_sp1_serror_vector, 7
272
273 .text
274 .align 7
275 Lel0_synchronous_vector_64:
276 MAP_KERNEL
277 BRANCH_TO_KVA_VECTOR Lel0_synchronous_vector_64_long, 8
278
279 .text
280 .align 7
281 Lel0_irq_vector_64:
282 MAP_KERNEL
283 BRANCH_TO_KVA_VECTOR Lel0_irq_vector_64_long, 9
284
285 .text
286 .align 7
287 Lel0_fiq_vector_64:
288 MAP_KERNEL
289 BRANCH_TO_KVA_VECTOR Lel0_fiq_vector_64_long, 10
290
291 .text
292 .align 7
293 Lel0_serror_vector_64:
294 MAP_KERNEL
295 BRANCH_TO_KVA_VECTOR Lel0_serror_vector_64_long, 11
296
297 /* Fill out the rest of the page */
298 .align 12
299
300 /*********************************
301 * END OF EXCEPTION VECTORS PAGE *
302 *********************************/
303
304 .macro EL1_SP0_VECTOR
305 msr SPSel, #0 // Switch to SP0
306 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
307 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
308 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
309 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
310 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
311 INIT_SAVED_STATE_FLAVORS sp, w0, w1
312 mov x0, sp // Copy saved state pointer to x0
313 .endmacro
314
315 Lel1_sp0_synchronous_vector_long:
316 sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
317 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
318 mrs x1, ESR_EL1 // Get the exception syndrome
319 /* If the stack pointer is corrupt, it will manifest either as a data abort
320 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
321 * these quickly by testing bit 5 of the exception class.
322 */
323 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
324 mrs x0, SP_EL0 // Get SP_EL0
325 stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
326 str x0, [sp, SS64_SP] // Save sp to the stack
327 bl check_kernel_stack
328 ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
329 Lkernel_stack_valid:
330 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
331 add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
332 EL1_SP0_VECTOR
333 adrp x1, fleh_synchronous@page // Load address for fleh
334 add x1, x1, fleh_synchronous@pageoff
335 b fleh_dispatch64
336
337 Lel1_sp0_irq_vector_long:
338 EL1_SP0_VECTOR
339 mrs x1, TPIDR_EL1
340 ldr x1, [x1, ACT_CPUDATAP]
341 ldr x1, [x1, CPU_ISTACKPTR]
342 mov sp, x1
343 adrp x1, fleh_irq@page // Load address for fleh
344 add x1, x1, fleh_irq@pageoff
345 b fleh_dispatch64
346
347 Lel1_sp0_fiq_vector_long:
348 // ARM64_TODO write optimized decrementer
349 EL1_SP0_VECTOR
350 mrs x1, TPIDR_EL1
351 ldr x1, [x1, ACT_CPUDATAP]
352 ldr x1, [x1, CPU_ISTACKPTR]
353 mov sp, x1
354 adrp x1, fleh_fiq@page // Load address for fleh
355 add x1, x1, fleh_fiq@pageoff
356 b fleh_dispatch64
357
358 Lel1_sp0_serror_vector_long:
359 EL1_SP0_VECTOR
360 adrp x1, fleh_serror@page // Load address for fleh
361 add x1, x1, fleh_serror@pageoff
362 b fleh_dispatch64
363
364 .macro EL1_SP1_VECTOR
365 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
366 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
367 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
368 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
369 INIT_SAVED_STATE_FLAVORS sp, w0, w1
370 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
371 mov x0, sp // Copy saved state pointer to x0
372 .endmacro
373
374 Lel1_sp1_synchronous_vector_long:
375 #if defined(KERNEL_INTEGRITY_KTRR)
376 b check_ktrr_sctlr_trap
377 Lel1_sp1_synchronous_vector_continue:
378 #endif
379 EL1_SP1_VECTOR
380 adrp x1, fleh_synchronous_sp1@page
381 add x1, x1, fleh_synchronous_sp1@pageoff
382 b fleh_dispatch64
383
384 Lel1_sp1_irq_vector_long:
385 EL1_SP1_VECTOR
386 adrp x1, fleh_irq_sp1@page
387 add x1, x1, fleh_irq_sp1@pageoff
388 b fleh_dispatch64
389
390 Lel1_sp1_fiq_vector_long:
391 EL1_SP1_VECTOR
392 adrp x1, fleh_fiq_sp1@page
393 add x1, x1, fleh_fiq_sp1@pageoff
394 b fleh_dispatch64
395
396 Lel1_sp1_serror_vector_long:
397 EL1_SP1_VECTOR
398 adrp x1, fleh_serror_sp1@page
399 add x1, x1, fleh_serror_sp1@pageoff
400 b fleh_dispatch64
401
402 .macro EL0_64_VECTOR
403 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
404 mrs x0, TPIDR_EL1 // Load the thread register
405 mrs x1, SP_EL0 // Load the user stack pointer
406 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
407 ldr x0, [x0] // Load the user context pointer
408 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
409 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
410 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
411 msr SPSel, #0 // Switch to SP0
412 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
413 stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
414 mov fp, xzr // Clear the fp and lr for the
415 mov lr, xzr // debugger stack frame
416 mov x0, sp // Copy the user PCB pointer to x0
417 .endmacro
418
419
420 Lel0_synchronous_vector_64_long:
421 EL0_64_VECTOR
422 mrs x1, TPIDR_EL1 // Load the thread register
423 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
424 mov sp, x1 // Set the stack pointer to the kernel stack
425 adrp x1, fleh_synchronous@page // Load address for fleh
426 add x1, x1, fleh_synchronous@pageoff
427 b fleh_dispatch64
428
429 Lel0_irq_vector_64_long:
430 EL0_64_VECTOR
431 mrs x1, TPIDR_EL1
432 ldr x1, [x1, ACT_CPUDATAP]
433 ldr x1, [x1, CPU_ISTACKPTR]
434 mov sp, x1 // Set the stack pointer to the kernel stack
435 adrp x1, fleh_irq@page // load address for fleh
436 add x1, x1, fleh_irq@pageoff
437 b fleh_dispatch64
438
439 Lel0_fiq_vector_64_long:
440 EL0_64_VECTOR
441 mrs x1, TPIDR_EL1
442 ldr x1, [x1, ACT_CPUDATAP]
443 ldr x1, [x1, CPU_ISTACKPTR]
444 mov sp, x1 // Set the stack pointer to the kernel stack
445 adrp x1, fleh_fiq@page // load address for fleh
446 add x1, x1, fleh_fiq@pageoff
447 b fleh_dispatch64
448
449 Lel0_serror_vector_64_long:
450 EL0_64_VECTOR
451 mrs x1, TPIDR_EL1 // Load the thread register
452 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
453 mov sp, x1 // Set the stack pointer to the kernel stack
454 adrp x1, fleh_serror@page // load address for fleh
455 add x1, x1, fleh_serror@pageoff
456 b fleh_dispatch64
457
458
459 /*
460 * check_kernel_stack
461 *
462 * Verifies that the kernel stack is aligned and mapped within an expected
463 * stack address range. Note: happens before saving registers (in case we can't
464 * save to kernel stack).
465 *
466 * Expects:
467 * {x0, x1, sp} - saved
468 * x0 - SP_EL0
469 * x1 - Exception syndrome
470 * sp - Saved state
471 */
472 .text
473 .align 2
474 check_kernel_stack:
475 stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
476 and x1, x1, #ESR_EC_MASK // Mask the exception class
477 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
478 cmp x1, x2 // If we have a stack alignment exception
479 b.eq Lcorrupt_stack // ...the stack is definitely corrupted
480 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
481 cmp x1, x2 // If we have a data abort, we need to
482 b.ne Lvalid_stack // ...validate the stack pointer
483 mrs x1, TPIDR_EL1 // Get thread pointer
484 Ltest_kstack:
485 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
486 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
487 cmp x0, x2 // if (SP_EL0 >= kstack top)
488 b.ge Ltest_istack // jump to istack test
489 cmp x0, x3 // if (SP_EL0 > kstack bottom)
490 b.gt Lvalid_stack // stack pointer valid
491 Ltest_istack:
492 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
493 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
494 sub x3, x2, PGBYTES // Find bottom of istack
495 cmp x0, x2 // if (SP_EL0 >= istack top)
496 b.ge Ltest_fiqstack // jump to fiqstack test
497 cmp x0, x3 // if (SP_EL0 > istack bottom)
498 b.gt Lvalid_stack // stack pointer valid
499 Ltest_fiqstack:
500 ldr x2, [x1, CPU_FIQSTACK_TOP] // Get top of fiqstack
501 sub x3, x2, PGBYTES // Find bottom of fiqstack
502 cmp x0, x2 // if (SP_EL0 >= fiqstack top)
503 b.ge Lcorrupt_stack // corrupt stack pointer
504 cmp x0, x3 // if (SP_EL0 > fiqstack bottom)
505 b.gt Lvalid_stack // stack pointer valid
506 Lcorrupt_stack:
507 INIT_SAVED_STATE_FLAVORS sp, w0, w1
508 mov x0, sp // Copy exception frame pointer to x0
509 adrp x1, fleh_invalid_stack@page // Load address for fleh
510 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
511 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
512 b fleh_dispatch64
513 Lvalid_stack:
514 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
515 ret
516
517 #if defined(KERNEL_INTEGRITY_KTRR)
518 .text
519 .align 2
520 check_ktrr_sctlr_trap:
521 /* We may abort on an instruction fetch on reset when enabling the MMU by
522 * writing SCTLR_EL1 because the page containing the privileged instruction is
523 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
524 * would otherwise panic unconditionally. Check for the condition and return
525 * safe execution to the caller on behalf of the faulting function.
526 *
527 * Expected register state:
528 * x22 - Kernel virtual base
529 * x23 - Kernel physical base
530 */
531 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
532 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
533 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
534 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
535 movz w1, #0x8600, lsl #16
536 movk w1, #0x0000
537 cmp x0, x1
538 mrs x0, ELR_EL1 // Check for expected abort address
539 adrp x1, _pinst_set_sctlr_trap_addr@page
540 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
541 sub x1, x1, x22 // Convert to physical address
542 add x1, x1, x23
543 ccmp x0, x1, #0, eq
544 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
545 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
546 b.ne Lel1_sp1_synchronous_vector_continue
547 msr ELR_EL1, lr // Return to caller
548 eret
549 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
550
551 /* 64-bit first level exception handler dispatcher.
552 * Completes register context saving and branches to FLEH.
553 * Expects:
554 * {x0, x1, fp, lr, sp} - saved
555 * x0 - arm_context_t
556 * x1 - address of FLEH
557 * fp - previous stack frame if EL1
558 * lr - unused
559 * sp - kernel stack
560 */
561 .text
562 .align 2
563 fleh_dispatch64:
564 /* Save arm_saved_state64 */
565 SPILL_REGISTERS
566
567 /* If exception is from userspace, zero lr */
568 ldr w21, [x0, SS64_CPSR]
569 and x21, x21, #(PSR64_MODE_EL_MASK)
570 cmp x21, #(PSR64_MODE_EL0)
571 bne 1f
572 mov lr, #0
573 1:
574
575 mov x21, x0 // Copy arm_context_t pointer to x21
576 mov x22, x1 // Copy handler routine to x22
577
578
579 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
580 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
581 b.ne 1f // kernel mode, so skip precise time update
582 PUSH_FRAME
583 bl EXT(timer_state_event_user_to_kernel)
584 POP_FRAME
585 mov x0, x21 // Reload arm_context_t pointer
586 1:
587 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
588
589 /* Dispatch to FLEH */
590
591 br x22
592
593
594 .text
595 .align 2
596 fleh_synchronous:
597 mrs x1, ESR_EL1 // Load exception syndrome
598 mrs x2, FAR_EL1 // Load fault address
599
600 /* At this point, the LR contains the value of ELR_EL1. In the case of an
601 * instruction prefetch abort, this will be the faulting pc, which we know
602 * to be invalid. This will prevent us from backtracing through the
603 * exception if we put it in our stack frame, so we load the LR from the
604 * exception saved state instead.
605 */
606 and w3, w1, #(ESR_EC_MASK)
607 lsr w3, w3, #(ESR_EC_SHIFT)
608 mov w4, #(ESR_EC_IABORT_EL1)
609 cmp w3, w4
610 b.eq Lfleh_sync_load_lr
611 Lvalid_link_register:
612
613 PUSH_FRAME
614 bl EXT(sleh_synchronous)
615 POP_FRAME
616
617
618 b exception_return_dispatch
619
620 Lfleh_sync_load_lr:
621 ldr lr, [x0, SS64_LR]
622 b Lvalid_link_register
623
624 /* Shared prologue code for fleh_irq and fleh_fiq.
625 * Does any interrupt booking we may want to do
626 * before invoking the handler proper.
627 * Expects:
628 * x0 - arm_context_t
629 * x23 - CPSR
630 * fp - Undefined live value (we may push a frame)
631 * lr - Undefined live value (we may push a frame)
632 * sp - Interrupt stack for the current CPU
633 */
634 .macro BEGIN_INTERRUPT_HANDLER
635 mrs x22, TPIDR_EL1
636 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
637 /* Update IRQ count */
638 ldr w1, [x23, CPU_STAT_IRQ]
639 add w1, w1, #1 // Increment count
640 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
641 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
642 add w1, w1, #1 // Increment count
643 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
644 /* Increment preempt count */
645 ldr w1, [x22, ACT_PREEMPT_CNT]
646 add w1, w1, #1
647 str w1, [x22, ACT_PREEMPT_CNT]
648 /* Store context in int state */
649 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
650 .endmacro
651
652 /* Shared epilogue code for fleh_irq and fleh_fiq.
653 * Cleans up after the prologue, and may do a bit more
654 * bookkeeping (kdebug related).
655 * Expects:
656 * x22 - Live TPIDR_EL1 value (thread address)
657 * x23 - Address of the current CPU data structure
658 * w24 - 0 if kdebug is disbled, nonzero otherwise
659 * fp - Undefined live value (we may push a frame)
660 * lr - Undefined live value (we may push a frame)
661 * sp - Interrupt stack for the current CPU
662 */
663 .macro END_INTERRUPT_HANDLER
664 /* Clear int context */
665 str xzr, [x23, CPU_INT_STATE]
666 /* Decrement preempt count */
667 ldr w0, [x22, ACT_PREEMPT_CNT]
668 cbnz w0, 1f // Detect underflow
669 b preempt_underflow
670 1:
671 sub w0, w0, #1
672 str w0, [x22, ACT_PREEMPT_CNT]
673 /* Switch back to kernel stack */
674 ldr x0, [x22, TH_KSTACKPTR]
675 mov sp, x0
676 .endmacro
677
678 .text
679 .align 2
680 fleh_irq:
681 BEGIN_INTERRUPT_HANDLER
682 PUSH_FRAME
683 bl EXT(sleh_irq)
684 POP_FRAME
685 END_INTERRUPT_HANDLER
686
687
688 b exception_return_dispatch
689
690 .text
691 .align 2
692 .global EXT(fleh_fiq_generic)
693 LEXT(fleh_fiq_generic)
694 PANIC_UNIMPLEMENTED
695
696 .text
697 .align 2
698 fleh_fiq:
699 BEGIN_INTERRUPT_HANDLER
700 PUSH_FRAME
701 bl EXT(sleh_fiq)
702 POP_FRAME
703 END_INTERRUPT_HANDLER
704
705
706 b exception_return_dispatch
707
708 .text
709 .align 2
710 fleh_serror:
711 mrs x1, ESR_EL1 // Load exception syndrome
712 mrs x2, FAR_EL1 // Load fault address
713
714 PUSH_FRAME
715 bl EXT(sleh_serror)
716 POP_FRAME
717
718
719 b exception_return_dispatch
720
721 /*
722 * Register state saved before we get here.
723 */
724 .text
725 .align 2
726 fleh_invalid_stack:
727 mrs x1, ESR_EL1 // Load exception syndrome
728 str x1, [x0, SS64_ESR]
729 mrs x2, FAR_EL1 // Load fault address
730 str x2, [x0, SS64_FAR]
731 PUSH_FRAME
732 bl EXT(sleh_invalid_stack) // Shouldn't return!
733 b .
734
735 .text
736 .align 2
737 fleh_synchronous_sp1:
738 mrs x1, ESR_EL1 // Load exception syndrome
739 str x1, [x0, SS64_ESR]
740 mrs x2, FAR_EL1 // Load fault address
741 str x2, [x0, SS64_FAR]
742 PUSH_FRAME
743 bl EXT(sleh_synchronous_sp1)
744 b .
745
746 .text
747 .align 2
748 fleh_irq_sp1:
749 mov x1, x0
750 adr x0, Lsp1_irq_str
751 b EXT(panic_with_thread_kernel_state)
752 Lsp1_irq_str:
753 .asciz "IRQ exception taken while SP1 selected"
754
755 .text
756 .align 2
757 fleh_fiq_sp1:
758 mov x1, x0
759 adr x0, Lsp1_fiq_str
760 b EXT(panic_with_thread_kernel_state)
761 Lsp1_fiq_str:
762 .asciz "FIQ exception taken while SP1 selected"
763
764 .text
765 .align 2
766 fleh_serror_sp1:
767 mov x1, x0
768 adr x0, Lsp1_serror_str
769 b EXT(panic_with_thread_kernel_state)
770 Lsp1_serror_str:
771 .asciz "Asynchronous exception taken while SP1 selected"
772
773 .text
774 .align 2
775 exception_return_dispatch:
776 ldr w0, [x21, SS_FLAVOR] // x0 = (threadIs64Bit) ? ss_64.cpsr : ss_32.cpsr
777 cmp x0, ARM_SAVED_STATE64
778 ldr w1, [x21, SS64_CPSR]
779 ldr w2, [x21, SS32_CPSR]
780 csel w0, w1, w2, eq
781 tbnz w0, PSR64_MODE_EL_SHIFT, return_to_kernel // Test for low bit of EL, return to kernel if set
782 b return_to_user
783
784 .text
785 .align 2
786 return_to_kernel:
787 tbnz w0, #DAIF_IRQF_SHIFT, Lkernel_skip_ast_taken // Skip AST check if IRQ disabled
788 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
789 mrs x0, TPIDR_EL1 // Load thread pointer
790 ldr w1, [x0, ACT_PREEMPT_CNT] // Load preemption count
791 cbnz x1, Lkernel_skip_ast_taken // If preemption disabled, skip AST check
792 ldr x1, [x0, ACT_CPUDATAP] // Get current CPU data pointer
793 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
794 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
795 b.eq Lkernel_skip_ast_taken
796 mov sp, x21 // Switch to thread stack for preemption
797 PUSH_FRAME
798 bl EXT(ast_taken_kernel) // Handle AST_URGENT
799 POP_FRAME
800 Lkernel_skip_ast_taken:
801 b exception_return
802
803 .text
804 .globl EXT(thread_bootstrap_return)
805 LEXT(thread_bootstrap_return)
806 #if CONFIG_DTRACE
807 bl EXT(dtrace_thread_bootstrap)
808 #endif
809 b EXT(thread_exception_return)
810
811 .text
812 .globl EXT(thread_exception_return)
813 LEXT(thread_exception_return)
814 mrs x0, TPIDR_EL1
815 add x21, x0, ACT_CONTEXT
816 ldr x21, [x21]
817
818 //
819 // Fall Through to return_to_user from thread_exception_return.
820 // Note that if we move return_to_user or insert a new routine
821 // below thread_exception_return, the latter will need to change.
822 //
823 .text
824 return_to_user:
825 check_user_asts:
826 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
827 mrs x3, TPIDR_EL1 // Load thread pointer
828
829 movn w2, #0
830 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
831
832 ldr w0, [x3, TH_RWLOCK_CNT]
833 cbz w0, 1f // Detect unbalance RW lock/unlock
834 b rwlock_count_notzero
835 1:
836
837 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
838 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
839 cbnz x0, user_take_ast // If pending ASTs, go service them
840
841 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
842 PUSH_FRAME
843 bl EXT(timer_state_event_kernel_to_user)
844 POP_FRAME
845 mrs x3, TPIDR_EL1 // Reload thread pointer
846 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
847
848 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
849 /* Watchtower
850 *
851 * Here we attempt to enable NEON access for EL0. If the last entry into the
852 * kernel from user-space was due to an IRQ, the monitor will have disabled
853 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
854 * check in with the monitor in order to reenable NEON for EL0 in exchange
855 * for routing IRQs through the monitor (2). This way the monitor will
856 * always 'own' either IRQs or EL0 NEON.
857 *
858 * If Watchtower is disabled or we did not enter the kernel through an IRQ
859 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
860 * here.
861 *
862 * EL0 user ________ IRQ ______
863 * EL1 xnu \ ______________________ CPACR_EL1 __/
864 * EL3 monitor \_/ \___/
865 *
866 * (1) (2)
867 */
868
869 mov x0, #(CPACR_FPEN_ENABLE)
870 msr CPACR_EL1, x0
871 #endif
872
873 /* Establish this thread's debug state as the live state on the selected CPU. */
874 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
875 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
876 ldr x0, [x3, ACT_DEBUGDATA]
877 orr x1, x1, x0 // Thread debug state and live debug state both NULL?
878 cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
879
880 //
881 // Fall through from return_to_user to exception_return.
882 // Note that if we move exception_return or add a new routine below
883 // return_to_user, the latter will have to change.
884 //
885
886
887 exception_return:
888 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
889 mrs x3, TPIDR_EL1 // Load thread pointer
890 mov sp, x21 // Reload the pcb pointer
891
892 /* ARM64_TODO Reserve x18 until we decide what to do with it */
893 ldr x0, [x3, TH_CTH_DATA] // Load cthread data pointer
894 str x0, [sp, SS64_X18] // and use it to trash x18
895
896 #if __ARM_KERNEL_PROTECT__
897 /*
898 * If we are going to eret to userspace, we must return through the EL0
899 * eret mapping.
900 */
901 ldr w1, [sp, SS64_CPSR] // Load CPSR
902 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
903
904 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
905 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
906 adrp x1, Lexception_return_restore_registers@page // Load target PC
907 add x1, x1, Lexception_return_restore_registers@pageoff
908 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
909 sub x1, x1, x0 // Calculate delta
910 add x0, x2, x1 // Convert KVA to EL0 vector address
911 br x0
912
913 Lskip_el0_eret_mapping:
914 #endif /* __ARM_KERNEL_PROTECT__ */
915
916 Lexception_return_restore_registers:
917 /* Restore special register state */
918 ldr x0, [sp, SS64_PC] // Get the return address
919 ldr w1, [sp, SS64_CPSR] // Get the return CPSR
920 ldr w2, [sp, NS64_FPSR]
921 ldr w3, [sp, NS64_FPCR]
922
923 msr ELR_EL1, x0 // Load the return address into ELR
924 msr SPSR_EL1, x1 // Load the return CPSR into SPSR
925 msr FPSR, x2
926 msr FPCR, x3 // Synchronized by ERET
927
928 mov x0, sp // x0 = &pcb
929
930 /* Restore arm_neon_saved_state64 */
931 ldp q0, q1, [x0, NS64_Q0]
932 ldp q2, q3, [x0, NS64_Q2]
933 ldp q4, q5, [x0, NS64_Q4]
934 ldp q6, q7, [x0, NS64_Q6]
935 ldp q8, q9, [x0, NS64_Q8]
936 ldp q10, q11, [x0, NS64_Q10]
937 ldp q12, q13, [x0, NS64_Q12]
938 ldp q14, q15, [x0, NS64_Q14]
939 ldp q16, q17, [x0, NS64_Q16]
940 ldp q18, q19, [x0, NS64_Q18]
941 ldp q20, q21, [x0, NS64_Q20]
942 ldp q22, q23, [x0, NS64_Q22]
943 ldp q24, q25, [x0, NS64_Q24]
944 ldp q26, q27, [x0, NS64_Q26]
945 ldp q28, q29, [x0, NS64_Q28]
946 ldp q30, q31, [x0, NS64_Q30]
947
948 /* Restore arm_saved_state64 */
949
950 // Skip x0, x1 - we're using them
951 ldp x2, x3, [x0, SS64_X2]
952 ldp x4, x5, [x0, SS64_X4]
953 ldp x6, x7, [x0, SS64_X6]
954 ldp x8, x9, [x0, SS64_X8]
955 ldp x10, x11, [x0, SS64_X10]
956 ldp x12, x13, [x0, SS64_X12]
957 ldp x14, x15, [x0, SS64_X14]
958 ldp x16, x17, [x0, SS64_X16]
959 ldp x18, x19, [x0, SS64_X18]
960 ldp x20, x21, [x0, SS64_X20]
961 ldp x22, x23, [x0, SS64_X22]
962 ldp x24, x25, [x0, SS64_X24]
963 ldp x26, x27, [x0, SS64_X26]
964 ldr x28, [x0, SS64_X28]
965 ldp fp, lr, [x0, SS64_FP]
966
967 // Restore stack pointer and our last two GPRs
968 ldr x1, [x0, SS64_SP]
969 mov sp, x1
970
971 #if __ARM_KERNEL_PROTECT__
972 ldr w18, [x0, SS64_CPSR] // Stash CPSR
973 #endif /* __ARM_KERNEL_PROTECT__ */
974
975 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
976
977 #if __ARM_KERNEL_PROTECT__
978 /* If we are going to eret to userspace, we must unmap the kernel. */
979 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
980
981 /* Update TCR to unmap the kernel. */
982 MOV64 x18, TCR_EL1_USER
983 msr TCR_EL1, x18
984
985 /*
986 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
987 * each other due to the microarchitecture.
988 */
989 #if !defined(APPLE_ARM64_ARCH_FAMILY)
990 isb sy
991 #endif
992
993 /* Switch to the user ASID (low bit clear) for the task. */
994 mrs x18, TTBR0_EL1
995 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
996 msr TTBR0_EL1, x18
997 mov x18, xzr
998
999 /* We don't need an ISB here, as the eret is synchronizing. */
1000 Lskip_ttbr1_switch:
1001 #endif /* __ARM_KERNEL_PROTECT__ */
1002
1003 eret
1004
1005 user_take_ast:
1006 PUSH_FRAME
1007 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1008 POP_FRAME
1009 mrs x3, TPIDR_EL1 // Reload thread pointer
1010 b check_user_asts // Now try again
1011
1012 user_set_debug_state_and_return:
1013 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1014 isb // Synchronize context
1015 PUSH_FRAME
1016 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1017 POP_FRAME
1018 isb
1019 mrs x3, TPIDR_EL1 // Reload thread pointer
1020 b exception_return // And continue
1021
1022 .text
1023 .align 2
1024 preempt_underflow:
1025 mrs x0, TPIDR_EL1
1026 str x0, [sp, #-16]! // We'll print thread pointer
1027 adr x0, L_underflow_str // Format string
1028 CALL_EXTERN panic // Game over
1029
1030 L_underflow_str:
1031 .asciz "Preemption count negative on thread %p"
1032 .align 2
1033
1034 .text
1035 .align 2
1036 rwlock_count_notzero:
1037 mrs x0, TPIDR_EL1
1038 str x0, [sp, #-16]! // We'll print thread pointer
1039 ldr w0, [x0, TH_RWLOCK_CNT]
1040 str w0, [sp, #8]
1041 adr x0, L_rwlock_count_notzero_str // Format string
1042 CALL_EXTERN panic // Game over
1043
1044 L_rwlock_count_notzero_str:
1045 .asciz "RW lock count not 0 on thread %p (%u)"
1046 .align 2
1047
1048 #if __ARM_KERNEL_PROTECT__
1049 /*
1050 * This symbol denotes the end of the exception vector/eret range; we page
1051 * align it so that we can avoid mapping other text in the EL0 exception
1052 * vector mapping.
1053 */
1054 .text
1055 .align 14
1056 .globl EXT(ExceptionVectorsEnd)
1057 LEXT(ExceptionVectorsEnd)
1058 #endif /* __ARM_KERNEL_PROTECT__ */
1059
1060 .text
1061 .align 2
1062 .globl EXT(ml_panic_trap_to_debugger)
1063 LEXT(ml_panic_trap_to_debugger)
1064 ret
1065
1066 /* ARM64_TODO Is globals_asm.h needed? */
1067 //#include "globals_asm.h"
1068
1069 /* vim: set ts=4: */