]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/locore.s
6a8d109f785184ec72be4639a88740552a7171bf
[apple/xnu.git] / osfmk / arm64 / locore.s
1 /*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/proc_reg.h>
31 #include <pexpert/arm64/board_config.h>
32 #include <mach/exception_types.h>
33 #include <mach_kdp.h>
34 #include <config_dtrace.h>
35 #include "assym.s"
36
37 #if __ARM_KERNEL_PROTECT__
38 #include <arm/pmap.h>
39 #endif
40
41
42 /*
43 * INIT_SAVED_STATE_FLAVORS
44 *
45 * Initializes the saved state flavors of a new saved state structure
46 * arg0 - saved state pointer
47 * arg1 - 32-bit scratch reg
48 * arg2 - 32-bit scratch reg
49 */
50 .macro INIT_SAVED_STATE_FLAVORS
51 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor
52 mov $2, ARM_SAVED_STATE64_COUNT
53 stp $1, $2, [$0, SS_FLAVOR]
54 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor
55 str $1, [$0, NS_FLAVOR]
56 mov $1, ARM_NEON_SAVED_STATE64_COUNT
57 str $1, [$0, NS_COUNT]
58 .endmacro
59
60
61 /*
62 * SPILL_REGISTERS
63 *
64 * Spills the current set of registers (excluding x0 and x1) to the specified
65 * save area.
66 * x0 - Address of the save area
67 */
68 .macro SPILL_REGISTERS
69 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs
70 stp x4, x5, [x0, SS64_X4]
71 stp x6, x7, [x0, SS64_X6]
72 stp x8, x9, [x0, SS64_X8]
73 stp x10, x11, [x0, SS64_X10]
74 stp x12, x13, [x0, SS64_X12]
75 stp x14, x15, [x0, SS64_X14]
76 stp x16, x17, [x0, SS64_X16]
77 stp x18, x19, [x0, SS64_X18]
78 stp x20, x21, [x0, SS64_X20]
79 stp x22, x23, [x0, SS64_X22]
80 stp x24, x25, [x0, SS64_X24]
81 stp x26, x27, [x0, SS64_X26]
82 str x28, [x0, SS64_X28]
83
84 /* Save arm_neon_saved_state64 */
85
86 stp q0, q1, [x0, NS64_Q0]
87 stp q2, q3, [x0, NS64_Q2]
88 stp q4, q5, [x0, NS64_Q4]
89 stp q6, q7, [x0, NS64_Q6]
90 stp q8, q9, [x0, NS64_Q8]
91 stp q10, q11, [x0, NS64_Q10]
92 stp q12, q13, [x0, NS64_Q12]
93 stp q14, q15, [x0, NS64_Q14]
94 stp q16, q17, [x0, NS64_Q16]
95 stp q18, q19, [x0, NS64_Q18]
96 stp q20, q21, [x0, NS64_Q20]
97 stp q22, q23, [x0, NS64_Q22]
98 stp q24, q25, [x0, NS64_Q24]
99 stp q26, q27, [x0, NS64_Q26]
100 stp q28, q29, [x0, NS64_Q28]
101 stp q30, q31, [x0, NS64_Q30]
102
103 mrs lr, ELR_EL1 // Get exception link register
104 mrs x23, SPSR_EL1 // Load CPSR into var reg x23
105 mrs x24, FPSR
106 mrs x25, FPCR
107
108
109 str lr, [x0, SS64_PC] // Save ELR to PCB
110 str w23, [x0, SS64_CPSR] // Save CPSR to PCB
111 str w24, [x0, NS64_FPSR]
112 str w25, [x0, NS64_FPCR]
113
114 mrs x20, FAR_EL1
115 mrs x21, ESR_EL1
116 str x20, [x0, SS64_FAR]
117 str w21, [x0, SS64_ESR]
118 .endmacro
119
120
121 #define CBF_DISABLE 0
122 #define CBF_ENABLE 1
123
124 .macro COMPARE_BRANCH_FUSION
125 #if defined(APPLE_ARM64_ARCH_FAMILY)
126 mrs $1, ARM64_REG_HID1
127 .if $0 == CBF_DISABLE
128 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
129 .else
130 mov $2, ARM64_REG_HID1_disCmpBrFusion
131 bic $1, $1, $2
132 .endif
133 msr ARM64_REG_HID1, $1
134 .if $0 == CBF_DISABLE
135 isb sy
136 .endif
137 #endif
138 .endmacro
139
140 /*
141 * MAP_KERNEL
142 *
143 * Restores the kernel EL1 mappings, if necessary.
144 *
145 * This may mutate x18.
146 */
147 .macro MAP_KERNEL
148 #if __ARM_KERNEL_PROTECT__
149 /* Switch to the kernel ASID (low bit set) for the task. */
150 mrs x18, TTBR0_EL1
151 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
152 msr TTBR0_EL1, x18
153
154 /*
155 * We eschew some barriers on Apple CPUs, as relative ordering of writes
156 * to the TTBRs and writes to the TCR should be ensured by the
157 * microarchitecture.
158 */
159 #if !defined(APPLE_ARM64_ARCH_FAMILY)
160 isb sy
161 #endif
162
163 /*
164 * Update the TCR to map the kernel now that we are using the kernel
165 * ASID.
166 */
167 MOV64 x18, TCR_EL1_BOOT
168 msr TCR_EL1, x18
169 isb sy
170 #endif /* __ARM_KERNEL_PROTECT__ */
171 .endmacro
172
173 /*
174 * BRANCH_TO_KVA_VECTOR
175 *
176 * Branches to the requested long exception vector in the kernelcache.
177 * arg0 - The label to branch to
178 * arg1 - The index of the label in exc_vectors_tables
179 *
180 * This may mutate x18.
181 */
182 .macro BRANCH_TO_KVA_VECTOR
183 #if __ARM_KERNEL_PROTECT__
184 /*
185 * Find the kernelcache table for the exception vectors by accessing
186 * the per-CPU data.
187 */
188 mrs x18, TPIDR_EL1
189 ldr x18, [x18, ACT_CPUDATAP]
190 ldr x18, [x18, CPU_EXC_VECTORS]
191
192 /*
193 * Get the handler for this exception and jump to it.
194 */
195 ldr x18, [x18, #($1 << 3)]
196 br x18
197 #else
198 b $0
199 #endif /* __ARM_KERNEL_PROTECT__ */
200 .endmacro
201
202 #if __ARM_KERNEL_PROTECT__
203 .text
204 .align 3
205 .globl EXT(exc_vectors_table)
206 LEXT(exc_vectors_table)
207 /* Table of exception handlers. */
208 .quad Lel1_sp0_synchronous_vector_long
209 .quad Lel1_sp0_irq_vector_long
210 .quad Lel1_sp0_fiq_vector_long
211 .quad Lel1_sp0_serror_vector_long
212 .quad Lel1_sp1_synchronous_vector_long
213 .quad Lel1_sp1_irq_vector_long
214 .quad Lel1_sp1_fiq_vector_long
215 .quad Lel1_sp1_serror_vector_long
216 .quad Lel0_synchronous_vector_64_long
217 .quad Lel0_irq_vector_64_long
218 .quad Lel0_fiq_vector_64_long
219 .quad Lel0_serror_vector_64_long
220 #endif /* __ARM_KERNEL_PROTECT__ */
221
222 .text
223 #if __ARM_KERNEL_PROTECT__
224 /*
225 * We need this to be on a page boundary so that we may avoiding mapping
226 * other text along with it. As this must be on the VM page boundary
227 * (due to how the coredumping code currently works), this will be a
228 * 16KB page boundary.
229 */
230 .align 14
231 #else
232 .align 12
233 #endif /* __ARM_KERNEL_PROTECT__ */
234 .globl EXT(ExceptionVectorsBase)
235 LEXT(ExceptionVectorsBase)
236 Lel1_sp0_synchronous_vector:
237 BRANCH_TO_KVA_VECTOR Lel1_sp0_synchronous_vector_long, 0
238
239 .text
240 .align 7
241 Lel1_sp0_irq_vector:
242 BRANCH_TO_KVA_VECTOR Lel1_sp0_irq_vector_long, 1
243
244 .text
245 .align 7
246 Lel1_sp0_fiq_vector:
247 BRANCH_TO_KVA_VECTOR Lel1_sp0_fiq_vector_long, 2
248
249 .text
250 .align 7
251 Lel1_sp0_serror_vector:
252 BRANCH_TO_KVA_VECTOR Lel1_sp0_serror_vector_long, 3
253
254 .text
255 .align 7
256 Lel1_sp1_synchronous_vector:
257 BRANCH_TO_KVA_VECTOR Lel1_sp1_synchronous_vector_long, 4
258
259 .text
260 .align 7
261 Lel1_sp1_irq_vector:
262 BRANCH_TO_KVA_VECTOR Lel1_sp1_irq_vector_long, 5
263
264 .text
265 .align 7
266 Lel1_sp1_fiq_vector:
267 BRANCH_TO_KVA_VECTOR Lel1_sp1_fiq_vector_long, 6
268
269 .text
270 .align 7
271 Lel1_sp1_serror_vector:
272 BRANCH_TO_KVA_VECTOR Lel1_sp1_serror_vector, 7
273
274 .text
275 .align 7
276 Lel0_synchronous_vector_64:
277 MAP_KERNEL
278 BRANCH_TO_KVA_VECTOR Lel0_synchronous_vector_64_long, 8
279
280 .text
281 .align 7
282 Lel0_irq_vector_64:
283 MAP_KERNEL
284 BRANCH_TO_KVA_VECTOR Lel0_irq_vector_64_long, 9
285
286 .text
287 .align 7
288 Lel0_fiq_vector_64:
289 MAP_KERNEL
290 BRANCH_TO_KVA_VECTOR Lel0_fiq_vector_64_long, 10
291
292 .text
293 .align 7
294 Lel0_serror_vector_64:
295 MAP_KERNEL
296 BRANCH_TO_KVA_VECTOR Lel0_serror_vector_64_long, 11
297
298 /* Fill out the rest of the page */
299 .align 12
300
301 /*********************************
302 * END OF EXCEPTION VECTORS PAGE *
303 *********************************/
304
305 .macro EL1_SP0_VECTOR
306 msr SPSel, #0 // Switch to SP0
307 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
308 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
309 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
310 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
311 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
312 INIT_SAVED_STATE_FLAVORS sp, w0, w1
313 mov x0, sp // Copy saved state pointer to x0
314 .endmacro
315
316 Lel1_sp0_synchronous_vector_long:
317 sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
318 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
319 mrs x1, ESR_EL1 // Get the exception syndrome
320 /* If the stack pointer is corrupt, it will manifest either as a data abort
321 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
322 * these quickly by testing bit 5 of the exception class.
323 */
324 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
325 mrs x0, SP_EL0 // Get SP_EL0
326 stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
327 str x0, [sp, SS64_SP] // Save sp to the stack
328 bl check_kernel_stack
329 ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
330 Lkernel_stack_valid:
331 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
332 add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
333 EL1_SP0_VECTOR
334 adrp x1, fleh_synchronous@page // Load address for fleh
335 add x1, x1, fleh_synchronous@pageoff
336 b fleh_dispatch64
337
338 Lel1_sp0_irq_vector_long:
339 EL1_SP0_VECTOR
340 mrs x1, TPIDR_EL1
341 ldr x1, [x1, ACT_CPUDATAP]
342 ldr x1, [x1, CPU_ISTACKPTR]
343 mov sp, x1
344 adrp x1, fleh_irq@page // Load address for fleh
345 add x1, x1, fleh_irq@pageoff
346 b fleh_dispatch64
347
348 Lel1_sp0_fiq_vector_long:
349 // ARM64_TODO write optimized decrementer
350 EL1_SP0_VECTOR
351 mrs x1, TPIDR_EL1
352 ldr x1, [x1, ACT_CPUDATAP]
353 ldr x1, [x1, CPU_ISTACKPTR]
354 mov sp, x1
355 adrp x1, fleh_fiq@page // Load address for fleh
356 add x1, x1, fleh_fiq@pageoff
357 b fleh_dispatch64
358
359 Lel1_sp0_serror_vector_long:
360 EL1_SP0_VECTOR
361 adrp x1, fleh_serror@page // Load address for fleh
362 add x1, x1, fleh_serror@pageoff
363 b fleh_dispatch64
364
365 .macro EL1_SP1_VECTOR
366 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
367 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
368 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
369 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
370 INIT_SAVED_STATE_FLAVORS sp, w0, w1
371 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
372 mov x0, sp // Copy saved state pointer to x0
373 .endmacro
374
375 Lel1_sp1_synchronous_vector_long:
376 b check_exception_stack
377 Lel1_sp1_synchronous_valid_stack:
378 #if defined(KERNEL_INTEGRITY_KTRR)
379 b check_ktrr_sctlr_trap
380 Lel1_sp1_synchronous_vector_continue:
381 #endif
382 EL1_SP1_VECTOR
383 adrp x1, fleh_synchronous_sp1@page
384 add x1, x1, fleh_synchronous_sp1@pageoff
385 b fleh_dispatch64
386
387 Lel1_sp1_irq_vector_long:
388 EL1_SP1_VECTOR
389 adrp x1, fleh_irq_sp1@page
390 add x1, x1, fleh_irq_sp1@pageoff
391 b fleh_dispatch64
392
393 Lel1_sp1_fiq_vector_long:
394 EL1_SP1_VECTOR
395 adrp x1, fleh_fiq_sp1@page
396 add x1, x1, fleh_fiq_sp1@pageoff
397 b fleh_dispatch64
398
399 Lel1_sp1_serror_vector_long:
400 EL1_SP1_VECTOR
401 adrp x1, fleh_serror_sp1@page
402 add x1, x1, fleh_serror_sp1@pageoff
403 b fleh_dispatch64
404
405 .macro EL0_64_VECTOR
406 mov x18, #0 // Zero x18 to avoid leaking data to user SS
407 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
408 mrs x0, TPIDR_EL1 // Load the thread register
409 mrs x1, SP_EL0 // Load the user stack pointer
410 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
411 ldr x0, [x0] // Load the user context pointer
412 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
413 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
414 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
415 msr SPSel, #0 // Switch to SP0
416 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
417 stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
418 mov fp, #0 // Clear the fp and lr for the
419 mov lr, #0 // debugger stack frame
420 mov x0, sp // Copy the user PCB pointer to x0
421 .endmacro
422
423
424 Lel0_synchronous_vector_64_long:
425 EL0_64_VECTOR
426 mrs x1, TPIDR_EL1 // Load the thread register
427 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
428 mov sp, x1 // Set the stack pointer to the kernel stack
429 adrp x1, fleh_synchronous@page // Load address for fleh
430 add x1, x1, fleh_synchronous@pageoff
431 b fleh_dispatch64
432
433 Lel0_irq_vector_64_long:
434 EL0_64_VECTOR
435 mrs x1, TPIDR_EL1
436 ldr x1, [x1, ACT_CPUDATAP]
437 ldr x1, [x1, CPU_ISTACKPTR]
438 mov sp, x1 // Set the stack pointer to the kernel stack
439 adrp x1, fleh_irq@page // load address for fleh
440 add x1, x1, fleh_irq@pageoff
441 b fleh_dispatch64
442
443 Lel0_fiq_vector_64_long:
444 EL0_64_VECTOR
445 mrs x1, TPIDR_EL1
446 ldr x1, [x1, ACT_CPUDATAP]
447 ldr x1, [x1, CPU_ISTACKPTR]
448 mov sp, x1 // Set the stack pointer to the kernel stack
449 adrp x1, fleh_fiq@page // load address for fleh
450 add x1, x1, fleh_fiq@pageoff
451 b fleh_dispatch64
452
453 Lel0_serror_vector_64_long:
454 EL0_64_VECTOR
455 mrs x1, TPIDR_EL1 // Load the thread register
456 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
457 mov sp, x1 // Set the stack pointer to the kernel stack
458 adrp x1, fleh_serror@page // load address for fleh
459 add x1, x1, fleh_serror@pageoff
460 b fleh_dispatch64
461
462
463 /*
464 * check_exception_stack
465 *
466 * Verifies that stack pointer at SP1 is within exception stack
467 * If not, will simply hang as we have no more stack to fall back on.
468 */
469
470 .text
471 .align 2
472 check_exception_stack:
473 mrs x18, TPIDR_EL1 // Get thread pointer
474 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
475 ldr x18, [x18, ACT_CPUDATAP]
476 cbz x18, . // If thread context is set, cpu data should be too
477 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
478 cmp sp, x18
479 b.gt . // Hang if above exception stack top
480 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
481 cmp sp, x18
482 b.lt . // Hang if below exception stack bottom
483 Lvalid_exception_stack:
484 mov x18, #0
485 b Lel1_sp1_synchronous_valid_stack
486
487 /*
488 * check_kernel_stack
489 *
490 * Verifies that the kernel stack is aligned and mapped within an expected
491 * stack address range. Note: happens before saving registers (in case we can't
492 * save to kernel stack).
493 *
494 * Expects:
495 * {x0, x1, sp} - saved
496 * x0 - SP_EL0
497 * x1 - Exception syndrome
498 * sp - Saved state
499 */
500 .text
501 .align 2
502 check_kernel_stack:
503 stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
504 and x1, x1, #ESR_EC_MASK // Mask the exception class
505 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
506 cmp x1, x2 // If we have a stack alignment exception
507 b.eq Lcorrupt_stack // ...the stack is definitely corrupted
508 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
509 cmp x1, x2 // If we have a data abort, we need to
510 b.ne Lvalid_stack // ...validate the stack pointer
511 mrs x1, TPIDR_EL1 // Get thread pointer
512 Ltest_kstack:
513 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
514 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
515 cmp x0, x2 // if (SP_EL0 >= kstack top)
516 b.ge Ltest_istack // jump to istack test
517 cmp x0, x3 // if (SP_EL0 > kstack bottom)
518 b.gt Lvalid_stack // stack pointer valid
519 Ltest_istack:
520 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
521 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
522 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
523 cmp x0, x2 // if (SP_EL0 >= istack top)
524 b.ge Lcorrupt_stack // corrupt stack pointer
525 cmp x0, x3 // if (SP_EL0 > istack bottom)
526 b.gt Lvalid_stack // stack pointer valid
527 Lcorrupt_stack:
528 INIT_SAVED_STATE_FLAVORS sp, w0, w1
529 mov x0, sp // Copy exception frame pointer to x0
530 adrp x1, fleh_invalid_stack@page // Load address for fleh
531 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
532 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
533 b fleh_dispatch64
534 Lvalid_stack:
535 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
536 ret
537
538 #if defined(KERNEL_INTEGRITY_KTRR)
539 .text
540 .align 2
541 check_ktrr_sctlr_trap:
542 /* We may abort on an instruction fetch on reset when enabling the MMU by
543 * writing SCTLR_EL1 because the page containing the privileged instruction is
544 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
545 * would otherwise panic unconditionally. Check for the condition and return
546 * safe execution to the caller on behalf of the faulting function.
547 *
548 * Expected register state:
549 * x22 - Kernel virtual base
550 * x23 - Kernel physical base
551 */
552 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
553 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
554 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
555 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
556 movz w1, #0x8600, lsl #16
557 movk w1, #0x0000
558 cmp x0, x1
559 mrs x0, ELR_EL1 // Check for expected abort address
560 adrp x1, _pinst_set_sctlr_trap_addr@page
561 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
562 sub x1, x1, x22 // Convert to physical address
563 add x1, x1, x23
564 ccmp x0, x1, #0, eq
565 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
566 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
567 b.ne Lel1_sp1_synchronous_vector_continue
568 msr ELR_EL1, lr // Return to caller
569 eret
570 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
571
572 /* 64-bit first level exception handler dispatcher.
573 * Completes register context saving and branches to FLEH.
574 * Expects:
575 * {x0, x1, fp, lr, sp} - saved
576 * x0 - arm_context_t
577 * x1 - address of FLEH
578 * fp - previous stack frame if EL1
579 * lr - unused
580 * sp - kernel stack
581 */
582 .text
583 .align 2
584 fleh_dispatch64:
585 /* Save arm_saved_state64 */
586 SPILL_REGISTERS
587
588 /* If exception is from userspace, zero unused registers */
589 and x23, x23, #(PSR64_MODE_EL_MASK)
590 cmp x23, #(PSR64_MODE_EL0)
591 bne 1f
592
593 mov x2, #0
594 mov x3, #0
595 mov x4, #0
596 mov x5, #0
597 mov x6, #0
598 mov x7, #0
599 mov x8, #0
600 mov x9, #0
601 mov x10, #0
602 mov x11, #0
603 mov x12, #0
604 mov x13, #0
605 mov x14, #0
606 mov x15, #0
607 mov x16, #0
608 mov x17, #0
609 mov x18, #0
610 mov x19, #0
611 mov x20, #0
612 /* x21, x22 cleared in common case below */
613 mov x23, #0
614 mov x24, #0
615 mov x25, #0
616 mov x26, #0
617 mov x27, #0
618 mov x28, #0
619 /* fp/lr already cleared by EL0_64_VECTOR */
620 1:
621
622 mov x21, x0 // Copy arm_context_t pointer to x21
623 mov x22, x1 // Copy handler routine to x22
624
625
626 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
627 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
628 b.ne 1f // kernel mode, so skip precise time update
629 PUSH_FRAME
630 bl EXT(timer_state_event_user_to_kernel)
631 POP_FRAME
632 mov x0, x21 // Reload arm_context_t pointer
633 1:
634 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
635
636 /* Dispatch to FLEH */
637
638 br x22
639
640
641 .text
642 .align 2
643 fleh_synchronous:
644 mrs x1, ESR_EL1 // Load exception syndrome
645 mrs x2, FAR_EL1 // Load fault address
646
647 /* At this point, the LR contains the value of ELR_EL1. In the case of an
648 * instruction prefetch abort, this will be the faulting pc, which we know
649 * to be invalid. This will prevent us from backtracing through the
650 * exception if we put it in our stack frame, so we load the LR from the
651 * exception saved state instead.
652 */
653 and w3, w1, #(ESR_EC_MASK)
654 lsr w3, w3, #(ESR_EC_SHIFT)
655 mov w4, #(ESR_EC_IABORT_EL1)
656 cmp w3, w4
657 b.eq Lfleh_sync_load_lr
658 Lvalid_link_register:
659
660 PUSH_FRAME
661 bl EXT(sleh_synchronous)
662 POP_FRAME
663
664
665 b exception_return_dispatch
666
667 Lfleh_sync_load_lr:
668 ldr lr, [x0, SS64_LR]
669 b Lvalid_link_register
670
671 /* Shared prologue code for fleh_irq and fleh_fiq.
672 * Does any interrupt booking we may want to do
673 * before invoking the handler proper.
674 * Expects:
675 * x0 - arm_context_t
676 * x23 - CPSR
677 * fp - Undefined live value (we may push a frame)
678 * lr - Undefined live value (we may push a frame)
679 * sp - Interrupt stack for the current CPU
680 */
681 .macro BEGIN_INTERRUPT_HANDLER
682 mrs x22, TPIDR_EL1
683 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
684 /* Update IRQ count */
685 ldr w1, [x23, CPU_STAT_IRQ]
686 add w1, w1, #1 // Increment count
687 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
688 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
689 add w1, w1, #1 // Increment count
690 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
691 /* Increment preempt count */
692 ldr w1, [x22, ACT_PREEMPT_CNT]
693 add w1, w1, #1
694 str w1, [x22, ACT_PREEMPT_CNT]
695 /* Store context in int state */
696 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
697 .endmacro
698
699 /* Shared epilogue code for fleh_irq and fleh_fiq.
700 * Cleans up after the prologue, and may do a bit more
701 * bookkeeping (kdebug related).
702 * Expects:
703 * x22 - Live TPIDR_EL1 value (thread address)
704 * x23 - Address of the current CPU data structure
705 * w24 - 0 if kdebug is disbled, nonzero otherwise
706 * fp - Undefined live value (we may push a frame)
707 * lr - Undefined live value (we may push a frame)
708 * sp - Interrupt stack for the current CPU
709 */
710 .macro END_INTERRUPT_HANDLER
711 /* Clear int context */
712 str xzr, [x23, CPU_INT_STATE]
713 /* Decrement preempt count */
714 ldr w0, [x22, ACT_PREEMPT_CNT]
715 cbnz w0, 1f // Detect underflow
716 b preempt_underflow
717 1:
718 sub w0, w0, #1
719 str w0, [x22, ACT_PREEMPT_CNT]
720 /* Switch back to kernel stack */
721 ldr x0, [x22, TH_KSTACKPTR]
722 mov sp, x0
723 .endmacro
724
725 .text
726 .align 2
727 fleh_irq:
728 BEGIN_INTERRUPT_HANDLER
729 PUSH_FRAME
730 bl EXT(sleh_irq)
731 POP_FRAME
732 END_INTERRUPT_HANDLER
733
734
735 b exception_return_dispatch
736
737 .text
738 .align 2
739 .global EXT(fleh_fiq_generic)
740 LEXT(fleh_fiq_generic)
741 PANIC_UNIMPLEMENTED
742
743 .text
744 .align 2
745 fleh_fiq:
746 BEGIN_INTERRUPT_HANDLER
747 PUSH_FRAME
748 bl EXT(sleh_fiq)
749 POP_FRAME
750 END_INTERRUPT_HANDLER
751
752
753 b exception_return_dispatch
754
755 .text
756 .align 2
757 fleh_serror:
758 mrs x1, ESR_EL1 // Load exception syndrome
759 mrs x2, FAR_EL1 // Load fault address
760
761 PUSH_FRAME
762 bl EXT(sleh_serror)
763 POP_FRAME
764
765
766 b exception_return_dispatch
767
768 /*
769 * Register state saved before we get here.
770 */
771 .text
772 .align 2
773 fleh_invalid_stack:
774 mrs x1, ESR_EL1 // Load exception syndrome
775 str x1, [x0, SS64_ESR]
776 mrs x2, FAR_EL1 // Load fault address
777 str x2, [x0, SS64_FAR]
778 PUSH_FRAME
779 bl EXT(sleh_invalid_stack) // Shouldn't return!
780 b .
781
782 .text
783 .align 2
784 fleh_synchronous_sp1:
785 mrs x1, ESR_EL1 // Load exception syndrome
786 str x1, [x0, SS64_ESR]
787 mrs x2, FAR_EL1 // Load fault address
788 str x2, [x0, SS64_FAR]
789 PUSH_FRAME
790 bl EXT(sleh_synchronous_sp1)
791 b .
792
793 .text
794 .align 2
795 fleh_irq_sp1:
796 mov x1, x0
797 adr x0, Lsp1_irq_str
798 b EXT(panic_with_thread_kernel_state)
799 Lsp1_irq_str:
800 .asciz "IRQ exception taken while SP1 selected"
801
802 .text
803 .align 2
804 fleh_fiq_sp1:
805 mov x1, x0
806 adr x0, Lsp1_fiq_str
807 b EXT(panic_with_thread_kernel_state)
808 Lsp1_fiq_str:
809 .asciz "FIQ exception taken while SP1 selected"
810
811 .text
812 .align 2
813 fleh_serror_sp1:
814 mov x1, x0
815 adr x0, Lsp1_serror_str
816 b EXT(panic_with_thread_kernel_state)
817 Lsp1_serror_str:
818 .asciz "Asynchronous exception taken while SP1 selected"
819
820 .text
821 .align 2
822 exception_return_dispatch:
823 ldr w0, [x21, SS_FLAVOR] // x0 = (threadIs64Bit) ? ss_64.cpsr : ss_32.cpsr
824 cmp x0, ARM_SAVED_STATE64
825 ldr w1, [x21, SS64_CPSR]
826 ldr w2, [x21, SS32_CPSR]
827 csel w0, w1, w2, eq
828 tbnz w0, PSR64_MODE_EL_SHIFT, return_to_kernel // Test for low bit of EL, return to kernel if set
829 b return_to_user
830
831 .text
832 .align 2
833 return_to_kernel:
834 tbnz w0, #DAIF_IRQF_SHIFT, Lkernel_skip_ast_taken // Skip AST check if IRQ disabled
835 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
836 mrs x0, TPIDR_EL1 // Load thread pointer
837 ldr w1, [x0, ACT_PREEMPT_CNT] // Load preemption count
838 cbnz x1, Lkernel_skip_ast_taken // If preemption disabled, skip AST check
839 ldr x1, [x0, ACT_CPUDATAP] // Get current CPU data pointer
840 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
841 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
842 b.eq Lkernel_skip_ast_taken
843 mov sp, x21 // Switch to thread stack for preemption
844 PUSH_FRAME
845 bl EXT(ast_taken_kernel) // Handle AST_URGENT
846 POP_FRAME
847 Lkernel_skip_ast_taken:
848 b exception_return
849
850 .text
851 .globl EXT(thread_bootstrap_return)
852 LEXT(thread_bootstrap_return)
853 #if CONFIG_DTRACE
854 bl EXT(dtrace_thread_bootstrap)
855 #endif
856 b EXT(thread_exception_return)
857
858 .text
859 .globl EXT(thread_exception_return)
860 LEXT(thread_exception_return)
861 mrs x0, TPIDR_EL1
862 add x21, x0, ACT_CONTEXT
863 ldr x21, [x21]
864
865 //
866 // Fall Through to return_to_user from thread_exception_return.
867 // Note that if we move return_to_user or insert a new routine
868 // below thread_exception_return, the latter will need to change.
869 //
870 .text
871 return_to_user:
872 check_user_asts:
873 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
874 mrs x3, TPIDR_EL1 // Load thread pointer
875
876 movn w2, #0
877 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
878
879 ldr w0, [x3, TH_RWLOCK_CNT]
880 cbz w0, 1f // Detect unbalance RW lock/unlock
881 b rwlock_count_notzero
882 1:
883
884 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
885 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
886 cbnz x0, user_take_ast // If pending ASTs, go service them
887
888 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
889 PUSH_FRAME
890 bl EXT(timer_state_event_kernel_to_user)
891 POP_FRAME
892 mrs x3, TPIDR_EL1 // Reload thread pointer
893 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
894
895 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
896 /* Watchtower
897 *
898 * Here we attempt to enable NEON access for EL0. If the last entry into the
899 * kernel from user-space was due to an IRQ, the monitor will have disabled
900 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
901 * check in with the monitor in order to reenable NEON for EL0 in exchange
902 * for routing IRQs through the monitor (2). This way the monitor will
903 * always 'own' either IRQs or EL0 NEON.
904 *
905 * If Watchtower is disabled or we did not enter the kernel through an IRQ
906 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
907 * here.
908 *
909 * EL0 user ________ IRQ ______
910 * EL1 xnu \ ______________________ CPACR_EL1 __/
911 * EL3 monitor \_/ \___/
912 *
913 * (1) (2)
914 */
915
916 mov x0, #(CPACR_FPEN_ENABLE)
917 msr CPACR_EL1, x0
918 #endif
919
920 /* Establish this thread's debug state as the live state on the selected CPU. */
921 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
922 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
923 ldr x0, [x3, ACT_DEBUGDATA]
924 orr x1, x1, x0 // Thread debug state and live debug state both NULL?
925 cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
926
927 //
928 // Fall through from return_to_user to exception_return.
929 // Note that if we move exception_return or add a new routine below
930 // return_to_user, the latter will have to change.
931 //
932
933 exception_return:
934 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
935 mrs x3, TPIDR_EL1 // Load thread pointer
936 mov sp, x21 // Reload the pcb pointer
937
938 /* ARM64_TODO Reserve x18 until we decide what to do with it */
939 ldr x0, [x3, TH_CTH_DATA] // Load cthread data pointer
940 str x0, [sp, SS64_X18] // and use it to trash x18
941
942 #if __ARM_KERNEL_PROTECT__
943 /*
944 * If we are going to eret to userspace, we must return through the EL0
945 * eret mapping.
946 */
947 ldr w1, [sp, SS64_CPSR] // Load CPSR
948 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
949
950 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
951 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
952 adrp x1, Lexception_return_restore_registers@page // Load target PC
953 add x1, x1, Lexception_return_restore_registers@pageoff
954 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
955 sub x1, x1, x0 // Calculate delta
956 add x0, x2, x1 // Convert KVA to EL0 vector address
957 br x0
958
959 Lskip_el0_eret_mapping:
960 #endif /* __ARM_KERNEL_PROTECT__ */
961
962 Lexception_return_restore_registers:
963 /* Restore special register state */
964 ldr x0, [sp, SS64_PC] // Get the return address
965 ldr w1, [sp, SS64_CPSR] // Get the return CPSR
966 ldr w2, [sp, NS64_FPSR]
967 ldr w3, [sp, NS64_FPCR]
968
969 msr ELR_EL1, x0 // Load the return address into ELR
970 msr SPSR_EL1, x1 // Load the return CPSR into SPSR
971 msr FPSR, x2
972 msr FPCR, x3 // Synchronized by ERET
973
974 mov x0, sp // x0 = &pcb
975
976 /* Restore arm_neon_saved_state64 */
977 ldp q0, q1, [x0, NS64_Q0]
978 ldp q2, q3, [x0, NS64_Q2]
979 ldp q4, q5, [x0, NS64_Q4]
980 ldp q6, q7, [x0, NS64_Q6]
981 ldp q8, q9, [x0, NS64_Q8]
982 ldp q10, q11, [x0, NS64_Q10]
983 ldp q12, q13, [x0, NS64_Q12]
984 ldp q14, q15, [x0, NS64_Q14]
985 ldp q16, q17, [x0, NS64_Q16]
986 ldp q18, q19, [x0, NS64_Q18]
987 ldp q20, q21, [x0, NS64_Q20]
988 ldp q22, q23, [x0, NS64_Q22]
989 ldp q24, q25, [x0, NS64_Q24]
990 ldp q26, q27, [x0, NS64_Q26]
991 ldp q28, q29, [x0, NS64_Q28]
992 ldp q30, q31, [x0, NS64_Q30]
993
994 /* Restore arm_saved_state64 */
995
996 // Skip x0, x1 - we're using them
997 ldp x2, x3, [x0, SS64_X2]
998 ldp x4, x5, [x0, SS64_X4]
999 ldp x6, x7, [x0, SS64_X6]
1000 ldp x8, x9, [x0, SS64_X8]
1001 ldp x10, x11, [x0, SS64_X10]
1002 ldp x12, x13, [x0, SS64_X12]
1003 ldp x14, x15, [x0, SS64_X14]
1004 ldp x16, x17, [x0, SS64_X16]
1005 ldp x18, x19, [x0, SS64_X18]
1006 ldp x20, x21, [x0, SS64_X20]
1007 ldp x22, x23, [x0, SS64_X22]
1008 ldp x24, x25, [x0, SS64_X24]
1009 ldp x26, x27, [x0, SS64_X26]
1010 ldr x28, [x0, SS64_X28]
1011 ldp fp, lr, [x0, SS64_FP]
1012
1013 // Restore stack pointer and our last two GPRs
1014 ldr x1, [x0, SS64_SP]
1015 mov sp, x1
1016
1017 #if __ARM_KERNEL_PROTECT__
1018 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1019 #endif /* __ARM_KERNEL_PROTECT__ */
1020
1021 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1022
1023 #if __ARM_KERNEL_PROTECT__
1024 /* If we are going to eret to userspace, we must unmap the kernel. */
1025 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1026
1027 /* Update TCR to unmap the kernel. */
1028 MOV64 x18, TCR_EL1_USER
1029 msr TCR_EL1, x18
1030
1031 /*
1032 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1033 * each other due to the microarchitecture.
1034 */
1035 #if !defined(APPLE_ARM64_ARCH_FAMILY)
1036 isb sy
1037 #endif
1038
1039 /* Switch to the user ASID (low bit clear) for the task. */
1040 mrs x18, TTBR0_EL1
1041 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1042 msr TTBR0_EL1, x18
1043 mov x18, #0
1044
1045 /* We don't need an ISB here, as the eret is synchronizing. */
1046 Lskip_ttbr1_switch:
1047 #endif /* __ARM_KERNEL_PROTECT__ */
1048
1049 eret
1050
1051 user_take_ast:
1052 PUSH_FRAME
1053 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1054 POP_FRAME
1055 mrs x3, TPIDR_EL1 // Reload thread pointer
1056 b check_user_asts // Now try again
1057
1058 user_set_debug_state_and_return:
1059 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1060 isb // Synchronize context
1061 PUSH_FRAME
1062 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1063 POP_FRAME
1064 isb
1065 mrs x3, TPIDR_EL1 // Reload thread pointer
1066 b exception_return // And continue
1067
1068 .text
1069 .align 2
1070 preempt_underflow:
1071 mrs x0, TPIDR_EL1
1072 str x0, [sp, #-16]! // We'll print thread pointer
1073 adr x0, L_underflow_str // Format string
1074 CALL_EXTERN panic // Game over
1075
1076 L_underflow_str:
1077 .asciz "Preemption count negative on thread %p"
1078 .align 2
1079
1080 .text
1081 .align 2
1082 rwlock_count_notzero:
1083 mrs x0, TPIDR_EL1
1084 str x0, [sp, #-16]! // We'll print thread pointer
1085 ldr w0, [x0, TH_RWLOCK_CNT]
1086 str w0, [sp, #8]
1087 adr x0, L_rwlock_count_notzero_str // Format string
1088 CALL_EXTERN panic // Game over
1089
1090 L_rwlock_count_notzero_str:
1091 .asciz "RW lock count not 0 on thread %p (%u)"
1092 .align 2
1093
1094 #if __ARM_KERNEL_PROTECT__
1095 /*
1096 * This symbol denotes the end of the exception vector/eret range; we page
1097 * align it so that we can avoid mapping other text in the EL0 exception
1098 * vector mapping.
1099 */
1100 .text
1101 .align 14
1102 .globl EXT(ExceptionVectorsEnd)
1103 LEXT(ExceptionVectorsEnd)
1104 #endif /* __ARM_KERNEL_PROTECT__ */
1105
1106 .text
1107 .align 2
1108 .globl EXT(ml_panic_trap_to_debugger)
1109 LEXT(ml_panic_trap_to_debugger)
1110 ret
1111
1112 /* ARM64_TODO Is globals_asm.h needed? */
1113 //#include "globals_asm.h"
1114
1115 /* vim: set ts=4: */