]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/locore.s
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / arm64 / locore.s
1 /*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/proc_reg.h>
31 #include <pexpert/arm64/board_config.h>
32 #include <mach/exception_types.h>
33 #include <mach_kdp.h>
34 #include <config_dtrace.h>
35 #include "assym.s"
36
37
38 /*
39 * INIT_SAVED_STATE_FLAVORS
40 *
41 * Initializes the saved state flavors of a new saved state structure
42 * arg0 - saved state pointer
43 * arg1 - 32-bit scratch reg
44 * arg2 - 32-bit scratch reg
45 */
46 .macro INIT_SAVED_STATE_FLAVORS
47 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor
48 mov $2, ARM_SAVED_STATE64_COUNT
49 stp $1, $2, [$0, SS_FLAVOR]
50 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor
51 str $1, [$0, NS_FLAVOR]
52 mov $1, ARM_NEON_SAVED_STATE64_COUNT
53 str $1, [$0, NS_COUNT]
54 .endmacro
55
56 .macro EL1_SP0_VECTOR
57 msr SPSel, #0 // Switch to SP0
58 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
59 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
60 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
61 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
62 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
63 INIT_SAVED_STATE_FLAVORS sp, w0, w1
64 mov x0, sp // Copy saved state pointer to x0
65 .endmacro
66
67 /*
68 * SPILL_REGISTERS
69 *
70 * Spills the current set of registers (excluding x0 and x1) to the specified
71 * save area.
72 * x0 - Address of the save area
73 */
74 .macro SPILL_REGISTERS
75 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs
76 stp x4, x5, [x0, SS64_X4]
77 stp x6, x7, [x0, SS64_X6]
78 stp x8, x9, [x0, SS64_X8]
79 stp x10, x11, [x0, SS64_X10]
80 stp x12, x13, [x0, SS64_X12]
81 stp x14, x15, [x0, SS64_X14]
82 stp x16, x17, [x0, SS64_X16]
83 stp x18, x19, [x0, SS64_X18]
84 stp x20, x21, [x0, SS64_X20]
85 stp x22, x23, [x0, SS64_X22]
86 stp x24, x25, [x0, SS64_X24]
87 stp x26, x27, [x0, SS64_X26]
88 str x28, [x0, SS64_X28]
89
90 /* Save arm_neon_saved_state64 */
91
92 stp q0, q1, [x0, NS64_Q0]
93 stp q2, q3, [x0, NS64_Q2]
94 stp q4, q5, [x0, NS64_Q4]
95 stp q6, q7, [x0, NS64_Q6]
96 stp q8, q9, [x0, NS64_Q8]
97 stp q10, q11, [x0, NS64_Q10]
98 stp q12, q13, [x0, NS64_Q12]
99 stp q14, q15, [x0, NS64_Q14]
100 stp q16, q17, [x0, NS64_Q16]
101 stp q18, q19, [x0, NS64_Q18]
102 stp q20, q21, [x0, NS64_Q20]
103 stp q22, q23, [x0, NS64_Q22]
104 stp q24, q25, [x0, NS64_Q24]
105 stp q26, q27, [x0, NS64_Q26]
106 stp q28, q29, [x0, NS64_Q28]
107 stp q30, q31, [x0, NS64_Q30]
108
109 mrs lr, ELR_EL1 // Get exception link register
110 mrs x23, SPSR_EL1 // Load CPSR into var reg x23
111 mrs x24, FPSR
112 mrs x25, FPCR
113
114 str lr, [x0, SS64_PC] // Save ELR to PCB
115 str w23, [x0, SS64_CPSR] // Save CPSR to PCB
116 str w24, [x0, NS64_FPSR]
117 str w25, [x0, NS64_FPCR]
118
119 mrs x20, FAR_EL1
120 mrs x21, ESR_EL1
121 str x20, [x0, SS64_FAR]
122 str w21, [x0, SS64_ESR]
123 .endmacro
124
125
126 #define CBF_DISABLE 0
127 #define CBF_ENABLE 1
128
129 .macro COMPARE_BRANCH_FUSION
130 #if defined(APPLE_ARM64_ARCH_FAMILY)
131 mrs $1, ARM64_REG_HID1
132 .if $0 == CBF_DISABLE
133 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
134 .else
135 mov $2, ARM64_REG_HID1_disCmpBrFusion
136 bic $1, $1, $2
137 .endif
138 msr ARM64_REG_HID1, $1
139 .if $0 == CBF_DISABLE
140 isb sy
141 .endif
142 #endif
143 .endmacro
144
145 .text
146 .align 12
147 .globl EXT(ExceptionVectorsBase)
148 LEXT(ExceptionVectorsBase)
149 Lel1_sp0_synchronous_vector:
150 sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
151 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
152 mrs x1, ESR_EL1 // Get the exception syndrome
153 /* If the stack pointer is corrupt, it will manifest either as a data abort
154 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
155 * these quickly by testing bit 5 of the exception class.
156 */
157 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
158 mrs x0, SP_EL0 // Get SP_EL0
159 stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
160 str x0, [sp, SS64_SP] // Save sp to the stack
161 bl check_kernel_stack
162 ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
163 Lkernel_stack_valid:
164 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
165 add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
166 EL1_SP0_VECTOR
167 adrp x1, fleh_synchronous@page // Load address for fleh
168 add x1, x1, fleh_synchronous@pageoff
169 b fleh_dispatch64
170
171 .text
172 .align 7
173 Lel1_sp0_irq_vector:
174 EL1_SP0_VECTOR
175 mrs x1, TPIDR_EL1
176 ldr x1, [x1, ACT_CPUDATAP]
177 ldr x1, [x1, CPU_ISTACKPTR]
178 mov sp, x1
179 adrp x1, fleh_irq@page // Load address for fleh
180 add x1, x1, fleh_irq@pageoff
181 b fleh_dispatch64
182
183 .text
184 .align 7
185 Lel1_sp0_fiq_vector:
186 // ARM64_TODO write optimized decrementer
187 EL1_SP0_VECTOR
188 mrs x1, TPIDR_EL1
189 ldr x1, [x1, ACT_CPUDATAP]
190 ldr x1, [x1, CPU_ISTACKPTR]
191 mov sp, x1
192 adrp x1, fleh_fiq@page // Load address for fleh
193 add x1, x1, fleh_fiq@pageoff
194 b fleh_dispatch64
195
196 .text
197 .align 7
198 Lel1_sp0_serror_vector:
199 EL1_SP0_VECTOR
200 adrp x1, fleh_serror@page // Load address for fleh
201 add x1, x1, fleh_serror@pageoff
202 b fleh_dispatch64
203
204 .macro EL1_SP1_VECTOR
205 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
206 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
207 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
208 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
209 INIT_SAVED_STATE_FLAVORS sp, w0, w1
210 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
211 mov x0, sp // Copy saved state pointer to x0
212 .endmacro
213
214 .text
215 .align 7
216 Lel1_sp1_synchronous_vector:
217 #if defined(KERNEL_INTEGRITY_KTRR)
218 b check_ktrr_sctlr_trap
219 Lel1_sp1_synchronous_vector_continue:
220 #endif
221 EL1_SP1_VECTOR
222 adrp x1, fleh_synchronous_sp1@page
223 add x1, x1, fleh_synchronous_sp1@pageoff
224 b fleh_dispatch64
225
226 .text
227 .align 7
228 Lel1_sp1_irq_vector:
229 EL1_SP1_VECTOR
230 adrp x1, fleh_irq_sp1@page
231 add x1, x1, fleh_irq_sp1@pageoff
232 b fleh_dispatch64
233
234 .text
235 .align 7
236 Lel1_sp1_fiq_vector:
237 EL1_SP1_VECTOR
238 adrp x1, fleh_fiq_sp1@page
239 add x1, x1, fleh_fiq_sp1@pageoff
240 b fleh_dispatch64
241
242 .text
243 .align 7
244 Lel1_sp1_serror_vector:
245 EL1_SP1_VECTOR
246 adrp x1, fleh_serror_sp1@page
247 add x1, x1, fleh_serror_sp1@pageoff
248 b fleh_dispatch64
249
250 .macro EL0_64_VECTOR
251 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
252 mrs x0, TPIDR_EL1 // Load the thread register
253 mrs x1, SP_EL0 // Load the user stack pointer
254 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
255 ldr x0, [x0] // Load the user context pointer
256 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
257 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
258 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
259 msr SPSel, #0 // Switch to SP0
260 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
261 stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
262 mov fp, xzr // Clear the fp and lr for the
263 mov lr, xzr // debugger stack frame
264 mov x0, sp // Copy the user PCB pointer to x0
265 .endmacro
266
267 .text
268 .align 7
269 Lel0_synchronous_vector_64:
270 EL0_64_VECTOR
271 mrs x1, TPIDR_EL1 // Load the thread register
272 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
273 mov sp, x1 // Set the stack pointer to the kernel stack
274 adrp x1, fleh_synchronous@page // Load address for fleh
275 add x1, x1, fleh_synchronous@pageoff
276 b fleh_dispatch64
277
278 .text
279 .align 7
280 Lel0_irq_vector_64:
281 EL0_64_VECTOR
282 mrs x1, TPIDR_EL1
283 ldr x1, [x1, ACT_CPUDATAP]
284 ldr x1, [x1, CPU_ISTACKPTR]
285 mov sp, x1 // Set the stack pointer to the kernel stack
286 adrp x1, fleh_irq@page // load address for fleh
287 add x1, x1, fleh_irq@pageoff
288 b fleh_dispatch64
289
290 .text
291 .align 7
292 Lel0_fiq_vector_64:
293 EL0_64_VECTOR
294 mrs x1, TPIDR_EL1
295 ldr x1, [x1, ACT_CPUDATAP]
296 ldr x1, [x1, CPU_ISTACKPTR]
297 mov sp, x1 // Set the stack pointer to the kernel stack
298 adrp x1, fleh_fiq@page // load address for fleh
299 add x1, x1, fleh_fiq@pageoff
300 b fleh_dispatch64
301
302 .text
303 .align 7
304 Lel0_serror_vector_64:
305 EL0_64_VECTOR
306 mrs x1, TPIDR_EL1 // Load the thread register
307 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
308 mov sp, x1 // Set the stack pointer to the kernel stack
309 adrp x1, fleh_serror@page // load address for fleh
310 add x1, x1, fleh_serror@pageoff
311 b fleh_dispatch64
312
313 /* Fill out the rest of the page */
314 .align 12
315
316 /*********************************
317 * END OF EXCEPTION VECTORS PAGE *
318 *********************************/
319
320
321 /*
322 * check_kernel_stack
323 *
324 * Verifies that the kernel stack is aligned and mapped within an expected
325 * stack address range. Note: happens before saving registers (in case we can't
326 * save to kernel stack).
327 *
328 * Expects:
329 * {x0, x1, sp} - saved
330 * x0 - SP_EL0
331 * x1 - Exception syndrome
332 * sp - Saved state
333 */
334 .text
335 .align 2
336 check_kernel_stack:
337 stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
338 and x1, x1, #ESR_EC_MASK // Mask the exception class
339 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
340 cmp x1, x2 // If we have a stack alignment exception
341 b.eq Lcorrupt_stack // ...the stack is definitely corrupted
342 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
343 cmp x1, x2 // If we have a data abort, we need to
344 b.ne Lvalid_stack // ...validate the stack pointer
345 mrs x1, TPIDR_EL1 // Get thread pointer
346 Ltest_kstack:
347 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
348 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
349 cmp x0, x2 // if (SP_EL0 >= kstack top)
350 b.ge Ltest_istack // jump to istack test
351 cmp x0, x3 // if (SP_EL0 > kstack bottom)
352 b.gt Lvalid_stack // stack pointer valid
353 Ltest_istack:
354 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
355 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
356 sub x3, x2, PGBYTES // Find bottom of istack
357 cmp x0, x2 // if (SP_EL0 >= istack top)
358 b.ge Ltest_fiqstack // jump to fiqstack test
359 cmp x0, x3 // if (SP_EL0 > istack bottom)
360 b.gt Lvalid_stack // stack pointer valid
361 Ltest_fiqstack:
362 ldr x2, [x1, CPU_FIQSTACK_TOP] // Get top of fiqstack
363 sub x3, x2, PGBYTES // Find bottom of fiqstack
364 cmp x0, x2 // if (SP_EL0 >= fiqstack top)
365 b.ge Lcorrupt_stack // corrupt stack pointer
366 cmp x0, x3 // if (SP_EL0 > fiqstack bottom)
367 b.gt Lvalid_stack // stack pointer valid
368 Lcorrupt_stack:
369 INIT_SAVED_STATE_FLAVORS sp, w0, w1
370 mov x0, sp // Copy exception frame pointer to x0
371 adrp x1, fleh_invalid_stack@page // Load address for fleh
372 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
373 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
374 b fleh_dispatch64
375 Lvalid_stack:
376 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
377 ret
378
379 #if defined(KERNEL_INTEGRITY_KTRR)
380 .text
381 .align 2
382 check_ktrr_sctlr_trap:
383 /* We may abort on an instruction fetch on reset when enabling the MMU by
384 * writing SCTLR_EL1 because the page containing the privileged instruction is
385 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
386 * would otherwise panic unconditionally. Check for the condition and return
387 * safe execution to the caller on behalf of the faulting function.
388 *
389 * Expected register state:
390 * x22 - Kernel virtual base
391 * x23 - Kernel physical base
392 */
393 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
394 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
395 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
396 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
397 movz w1, #0x8600, lsl #16
398 movk w1, #0x0000
399 cmp x0, x1
400 mrs x0, ELR_EL1 // Check for expected abort address
401 adrp x1, _pinst_set_sctlr_trap_addr@page
402 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
403 sub x1, x1, x22 // Convert to physical address
404 add x1, x1, x23
405 ccmp x0, x1, #0, eq
406 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
407 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
408 b.ne Lel1_sp1_synchronous_vector_continue
409 msr ELR_EL1, lr // Return to caller
410 eret
411 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
412
413 /* 64-bit first level exception handler dispatcher.
414 * Completes register context saving and branches to FLEH.
415 * Expects:
416 * {x0, x1, fp, lr, sp} - saved
417 * x0 - arm_context_t
418 * x1 - address of FLEH
419 * fp - previous stack frame if EL1
420 * lr - unused
421 * sp - kernel stack
422 */
423 .text
424 .align 2
425 fleh_dispatch64:
426 /* Save arm_saved_state64 */
427 SPILL_REGISTERS
428
429 /* If exception is from userspace, zero lr */
430 ldr w21, [x0, SS64_CPSR]
431 and x21, x21, #(PSR64_MODE_EL_MASK)
432 cmp x21, #(PSR64_MODE_EL0)
433 bne 1f
434 mov lr, #0
435 1:
436
437 mov x21, x0 // Copy arm_context_t pointer to x21
438 mov x22, x1 // Copy handler routine to x22
439
440
441 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
442 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
443 b.ne 1f // kernel mode, so skip precise time update
444 PUSH_FRAME
445 bl EXT(timer_state_event_user_to_kernel)
446 POP_FRAME
447 mov x0, x21 // Reload arm_context_t pointer
448 1:
449 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
450
451 /* Dispatch to FLEH */
452
453 br x22
454
455
456 .text
457 .align 2
458 fleh_synchronous:
459 mrs x1, ESR_EL1 // Load exception syndrome
460 mrs x2, FAR_EL1 // Load fault address
461
462 /* At this point, the LR contains the value of ELR_EL1. In the case of an
463 * instruction prefetch abort, this will be the faulting pc, which we know
464 * to be invalid. This will prevent us from backtracing through the
465 * exception if we put it in our stack frame, so we load the LR from the
466 * exception saved state instead.
467 */
468 and w3, w1, #(ESR_EC_MASK)
469 lsr w3, w3, #(ESR_EC_SHIFT)
470 mov w4, #(ESR_EC_IABORT_EL1)
471 cmp w3, w4
472 b.eq Lfleh_sync_load_lr
473 Lvalid_link_register:
474
475 PUSH_FRAME
476 bl EXT(sleh_synchronous)
477 POP_FRAME
478
479
480 b exception_return_dispatch
481
482 Lfleh_sync_load_lr:
483 ldr lr, [x0, SS64_LR]
484 b Lvalid_link_register
485
486 /* Shared prologue code for fleh_irq and fleh_fiq.
487 * Does any interrupt booking we may want to do
488 * before invoking the handler proper.
489 * Expects:
490 * x0 - arm_context_t
491 * x23 - CPSR
492 * fp - Undefined live value (we may push a frame)
493 * lr - Undefined live value (we may push a frame)
494 * sp - Interrupt stack for the current CPU
495 */
496 .macro BEGIN_INTERRUPT_HANDLER
497 mrs x22, TPIDR_EL1
498 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
499 /* Update IRQ count */
500 ldr w1, [x23, CPU_STAT_IRQ]
501 add w1, w1, #1 // Increment count
502 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
503 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
504 add w1, w1, #1 // Increment count
505 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
506 /* Increment preempt count */
507 ldr w1, [x22, ACT_PREEMPT_CNT]
508 add w1, w1, #1
509 str w1, [x22, ACT_PREEMPT_CNT]
510 /* Store context in int state */
511 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
512 .endmacro
513
514 /* Shared epilogue code for fleh_irq and fleh_fiq.
515 * Cleans up after the prologue, and may do a bit more
516 * bookkeeping (kdebug related).
517 * Expects:
518 * x22 - Live TPIDR_EL1 value (thread address)
519 * x23 - Address of the current CPU data structure
520 * w24 - 0 if kdebug is disbled, nonzero otherwise
521 * fp - Undefined live value (we may push a frame)
522 * lr - Undefined live value (we may push a frame)
523 * sp - Interrupt stack for the current CPU
524 */
525 .macro END_INTERRUPT_HANDLER
526 /* Clear int context */
527 str xzr, [x23, CPU_INT_STATE]
528 /* Decrement preempt count */
529 ldr w0, [x22, ACT_PREEMPT_CNT]
530 cbnz w0, 1f // Detect underflow
531 b preempt_underflow
532 1:
533 sub w0, w0, #1
534 str w0, [x22, ACT_PREEMPT_CNT]
535 /* Switch back to kernel stack */
536 ldr x0, [x22, TH_KSTACKPTR]
537 mov sp, x0
538 .endmacro
539
540 .text
541 .align 2
542 fleh_irq:
543 BEGIN_INTERRUPT_HANDLER
544 PUSH_FRAME
545 bl EXT(sleh_irq)
546 POP_FRAME
547 END_INTERRUPT_HANDLER
548
549
550 b exception_return_dispatch
551
552 .text
553 .align 2
554 .global EXT(fleh_fiq_generic)
555 LEXT(fleh_fiq_generic)
556 PANIC_UNIMPLEMENTED
557
558 .text
559 .align 2
560 fleh_fiq:
561 BEGIN_INTERRUPT_HANDLER
562 PUSH_FRAME
563 bl EXT(sleh_fiq)
564 POP_FRAME
565 END_INTERRUPT_HANDLER
566
567
568 b exception_return_dispatch
569
570 .text
571 .align 2
572 fleh_serror:
573 mrs x1, ESR_EL1 // Load exception syndrome
574 mrs x2, FAR_EL1 // Load fault address
575
576 PUSH_FRAME
577 bl EXT(sleh_serror)
578 POP_FRAME
579
580
581 b exception_return_dispatch
582
583 /*
584 * Register state saved before we get here.
585 */
586 .text
587 .align 2
588 fleh_invalid_stack:
589 mrs x1, ESR_EL1 // Load exception syndrome
590 str x1, [x0, SS64_ESR]
591 mrs x2, FAR_EL1 // Load fault address
592 str x2, [x0, SS64_FAR]
593 PUSH_FRAME
594 bl EXT(sleh_invalid_stack) // Shouldn't return!
595 b .
596
597 .text
598 .align 2
599 fleh_synchronous_sp1:
600 mrs x1, ESR_EL1 // Load exception syndrome
601 str x1, [x0, SS64_ESR]
602 mrs x2, FAR_EL1 // Load fault address
603 str x2, [x0, SS64_FAR]
604 PUSH_FRAME
605 bl EXT(sleh_synchronous_sp1)
606 b .
607
608 .text
609 .align 2
610 fleh_irq_sp1:
611 mov x1, x0
612 adr x0, Lsp1_irq_str
613 b EXT(panic_with_thread_kernel_state)
614 Lsp1_irq_str:
615 .asciz "IRQ exception taken while SP1 selected"
616
617 .text
618 .align 2
619 fleh_fiq_sp1:
620 mov x1, x0
621 adr x0, Lsp1_fiq_str
622 b EXT(panic_with_thread_kernel_state)
623 Lsp1_fiq_str:
624 .asciz "FIQ exception taken while SP1 selected"
625
626 .text
627 .align 2
628 fleh_serror_sp1:
629 mov x1, x0
630 adr x0, Lsp1_serror_str
631 b EXT(panic_with_thread_kernel_state)
632 Lsp1_serror_str:
633 .asciz "Asynchronous exception taken while SP1 selected"
634
635 .text
636 .align 2
637 exception_return_dispatch:
638 ldr w0, [x21, SS_FLAVOR] // x0 = (threadIs64Bit) ? ss_64.cpsr : ss_32.cpsr
639 cmp x0, ARM_SAVED_STATE64
640 ldr w1, [x21, SS64_CPSR]
641 ldr w2, [x21, SS32_CPSR]
642 csel w0, w1, w2, eq
643 tbnz w0, PSR64_MODE_EL_SHIFT, return_to_kernel // Test for low bit of EL, return to kernel if set
644 b return_to_user
645
646 .text
647 .align 2
648 return_to_kernel:
649 tbnz w0, #DAIF_IRQF_SHIFT, Lkernel_skip_ast_taken // Skip AST check if IRQ disabled
650 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
651 mrs x0, TPIDR_EL1 // Load thread pointer
652 ldr w1, [x0, ACT_PREEMPT_CNT] // Load preemption count
653 cbnz x1, Lkernel_skip_ast_taken // If preemption disabled, skip AST check
654 ldr x1, [x0, ACT_CPUDATAP] // Get current CPU data pointer
655 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
656 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
657 b.eq Lkernel_skip_ast_taken
658 mov sp, x21 // Switch to thread stack for preemption
659 PUSH_FRAME
660 bl EXT(ast_taken_kernel) // Handle AST_URGENT
661 POP_FRAME
662 Lkernel_skip_ast_taken:
663 b exception_return
664
665 .text
666 .globl EXT(thread_bootstrap_return)
667 LEXT(thread_bootstrap_return)
668 #if CONFIG_DTRACE
669 bl EXT(dtrace_thread_bootstrap)
670 #endif
671 b EXT(thread_exception_return)
672
673 .text
674 .globl EXT(thread_exception_return)
675 LEXT(thread_exception_return)
676 mrs x0, TPIDR_EL1
677 add x21, x0, ACT_CONTEXT
678 ldr x21, [x21]
679
680 //
681 // Fall Through to return_to_user from thread_exception_return.
682 // Note that if we move return_to_user or insert a new routine
683 // below thread_exception_return, the latter will need to change.
684 //
685 .text
686 return_to_user:
687 check_user_asts:
688 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
689 mrs x3, TPIDR_EL1 // Load thread pointer
690
691 movn w2, #0
692 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
693
694 ldr w0, [x3, TH_RWLOCK_CNT]
695 cbz w0, 1f // Detect unbalance RW lock/unlock
696 b rwlock_count_notzero
697 1:
698
699 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
700 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
701 cbnz x0, user_take_ast // If pending ASTs, go service them
702
703 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
704 PUSH_FRAME
705 bl EXT(timer_state_event_kernel_to_user)
706 POP_FRAME
707 mrs x3, TPIDR_EL1 // Reload thread pointer
708 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
709
710 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
711 /* Watchtower
712 *
713 * Here we attempt to enable NEON access for EL0. If the last entry into the
714 * kernel from user-space was due to an IRQ, the monitor will have disabled
715 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
716 * check in with the monitor in order to reenable NEON for EL0 in exchange
717 * for routing IRQs through the monitor (2). This way the monitor will
718 * always 'own' either IRQs or EL0 NEON.
719 *
720 * If Watchtower is disabled or we did not enter the kernel through an IRQ
721 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
722 * here.
723 *
724 * EL0 user ________ IRQ ______
725 * EL1 xnu \ ______________________ CPACR_EL1 __/
726 * EL3 monitor \_/ \___/
727 *
728 * (1) (2)
729 */
730
731 mov x0, #(CPACR_FPEN_ENABLE)
732 msr CPACR_EL1, x0
733 #endif
734
735 /* Establish this thread's debug state as the live state on the selected CPU. */
736 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
737 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
738 ldr x0, [x3, ACT_DEBUGDATA]
739 orr x1, x1, x0 // Thread debug state and live debug state both NULL?
740 cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
741
742 //
743 // Fall through from return_to_user to exception_return.
744 // Note that if we move exception_return or add a new routine below
745 // return_to_user, the latter will have to change.
746 //
747
748
749 exception_return:
750 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts
751 mrs x3, TPIDR_EL1 // Load thread pointer
752 mov sp, x21 // Reload the pcb pointer
753
754 /* ARM64_TODO Reserve x18 until we decide what to do with it */
755 ldr x0, [x3, TH_CTH_DATA] // Load cthread data pointer
756 str x0, [sp, SS64_X18] // and use it to trash x18
757
758 Lexception_return_restore_registers:
759 /* Restore special register state */
760 ldr x0, [sp, SS64_PC] // Get the return address
761 ldr w1, [sp, SS64_CPSR] // Get the return CPSR
762 ldr w2, [sp, NS64_FPSR]
763 ldr w3, [sp, NS64_FPCR]
764
765 msr ELR_EL1, x0 // Load the return address into ELR
766 msr SPSR_EL1, x1 // Load the return CPSR into SPSR
767 msr FPSR, x2
768 msr FPCR, x3 // Synchronized by ERET
769
770 mov x0, sp // x0 = &pcb
771
772 /* Restore arm_neon_saved_state64 */
773 ldp q0, q1, [x0, NS64_Q0]
774 ldp q2, q3, [x0, NS64_Q2]
775 ldp q4, q5, [x0, NS64_Q4]
776 ldp q6, q7, [x0, NS64_Q6]
777 ldp q8, q9, [x0, NS64_Q8]
778 ldp q10, q11, [x0, NS64_Q10]
779 ldp q12, q13, [x0, NS64_Q12]
780 ldp q14, q15, [x0, NS64_Q14]
781 ldp q16, q17, [x0, NS64_Q16]
782 ldp q18, q19, [x0, NS64_Q18]
783 ldp q20, q21, [x0, NS64_Q20]
784 ldp q22, q23, [x0, NS64_Q22]
785 ldp q24, q25, [x0, NS64_Q24]
786 ldp q26, q27, [x0, NS64_Q26]
787 ldp q28, q29, [x0, NS64_Q28]
788 ldp q30, q31, [x0, NS64_Q30]
789
790 /* Restore arm_saved_state64 */
791
792 // Skip x0, x1 - we're using them
793 ldp x2, x3, [x0, SS64_X2]
794 ldp x4, x5, [x0, SS64_X4]
795 ldp x6, x7, [x0, SS64_X6]
796 ldp x8, x9, [x0, SS64_X8]
797 ldp x10, x11, [x0, SS64_X10]
798 ldp x12, x13, [x0, SS64_X12]
799 ldp x14, x15, [x0, SS64_X14]
800 ldp x16, x17, [x0, SS64_X16]
801 ldp x18, x19, [x0, SS64_X18]
802 ldp x20, x21, [x0, SS64_X20]
803 ldp x22, x23, [x0, SS64_X22]
804 ldp x24, x25, [x0, SS64_X24]
805 ldp x26, x27, [x0, SS64_X26]
806 ldr x28, [x0, SS64_X28]
807 ldp fp, lr, [x0, SS64_FP]
808
809 // Restore stack pointer and our last two GPRs
810 ldr x1, [x0, SS64_SP]
811 mov sp, x1
812 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
813
814 eret
815
816 user_take_ast:
817 PUSH_FRAME
818 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
819 POP_FRAME
820 mrs x3, TPIDR_EL1 // Reload thread pointer
821 b check_user_asts // Now try again
822
823 user_set_debug_state_and_return:
824 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
825 isb // Synchronize context
826 PUSH_FRAME
827 bl EXT(arm_debug_set) // Establish thread debug state in live regs
828 POP_FRAME
829 isb
830 mrs x3, TPIDR_EL1 // Reload thread pointer
831 b exception_return // And continue
832
833 .text
834 .align 2
835 preempt_underflow:
836 mrs x0, TPIDR_EL1
837 str x0, [sp, #-16]! // We'll print thread pointer
838 adr x0, L_underflow_str // Format string
839 CALL_EXTERN panic // Game over
840
841 L_underflow_str:
842 .asciz "Preemption count negative on thread %p"
843 .align 2
844
845 .text
846 .align 2
847 rwlock_count_notzero:
848 mrs x0, TPIDR_EL1
849 str x0, [sp, #-16]! // We'll print thread pointer
850 ldr w0, [x0, TH_RWLOCK_CNT]
851 str w0, [sp, #8]
852 adr x0, L_rwlock_count_notzero_str // Format string
853 CALL_EXTERN panic // Game over
854
855 L_rwlock_count_notzero_str:
856 .asciz "RW lock count not 0 on thread %p (%u)"
857 .align 2
858
859 .text
860 .align 2
861 .globl EXT(ml_panic_trap_to_debugger)
862 LEXT(ml_panic_trap_to_debugger)
863 ret
864
865 /* ARM64_TODO Is globals_asm.h needed? */
866 //#include "globals_asm.h"
867
868 /* vim: set ts=4: */