2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <machine/asm.h>
58 #include <arm/proc_reg.h>
59 #include <pexpert/arm/board_config.h>
60 #include <mach/exception_types.h>
62 #include <mach_assert.h>
63 #include <config_dtrace.h>
66 #define TRACE_SYSCALL 0
69 * Copied to low physical memory in arm_init,
70 * so the kernel must be linked virtually at
71 * 0xc0001000 or higher to leave space for it.
76 .globl EXT(ExceptionLowVectorsBase)
78 LEXT(ExceptionLowVectorsBase)
79 adr pc, Lreset_low_vector
84 b . // Address Exception
87 LEXT(ResetPrivateData)
88 .space (480),0 // (filled with 0s)
89 // ExceptionLowVectorsBase + 0x200
91 adr r4, EXT(ResetHandlerData)
92 ldr r0, [r4, ASSIST_RESET_HANDLER]
95 adr r4, EXT(ResetHandlerData)
96 ldr r1, [r4, CPU_DATA_ENTRIES]
97 ldr r1, [r1, CPU_DATA_PADDR]
98 ldr r5, [r1, CPU_RESET_ASSIST]
101 adr r4, EXT(ResetHandlerData)
102 ldr r0, [r4, BOOT_ARGS]
103 ldr r1, [r4, CPU_DATA_ENTRIES]
106 // physical cpu number is stored in MPIDR Affinity level 0
107 mrc p15, 0, r6, c0, c0, 5 // Read MPIDR
108 and r6, r6, #0xFF // Extract Affinity level 0
110 #error missing Who Am I implementation
114 #endif /* __ARM_SMP__ */
115 // physical cpu number matches cpu number
117 //#error cpu_data_entry is not 16bytes in size
119 lsl r6, r6, #4 // Get CpuDataEntry offset
120 add r1, r1, r6 // Get cpu_data_entry pointer
121 ldr r1, [r1, CPU_DATA_PADDR]
122 ldr r5, [r1, CPU_RESET_HANDLER]
124 blxne r5 // Branch to cpu reset handler
125 b . // Unexpected reset
126 .globl EXT(ResetHandlerData)
127 LEXT(ResetHandlerData)
128 .space (rhdSize_NUM),0 // (filled with 0s)
131 .globl EXT(ExceptionLowVectorsEnd)
132 LEXT(ExceptionLowVectorsEnd)
136 .globl EXT(ExceptionVectorsBase)
138 LEXT(ExceptionVectorsBase)
140 adr pc, Lexc_reset_vector
141 adr pc, Lexc_undefined_inst_vector
142 adr pc, Lexc_swi_vector
143 adr pc, Lexc_prefetch_abort_vector
144 adr pc, Lexc_data_abort_vector
145 adr pc, Lexc_address_exception_vector
146 adr pc, Lexc_irq_vector
148 adr pc, Lexc_decirq_vector
149 #else /* ! __ARM_TIME__ */
151 #endif /* __ARM_TIME__ */
158 Lexc_undefined_inst_vector:
159 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
160 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
161 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
162 ldr pc, [sp, #4] // Branch to exception handler
164 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
165 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
166 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
167 ldr pc, [sp, #8] // Branch to exception handler
168 Lexc_prefetch_abort_vector:
169 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
170 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
171 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
172 ldr pc, [sp, #0xC] // Branch to exception handler
173 Lexc_data_abort_vector:
174 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
175 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
176 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
177 ldr pc, [sp, #0x10] // Branch to exception handler
178 Lexc_address_exception_vector:
179 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
180 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
181 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
182 ldr pc, [sp, #0x14] // Branch to exception handler
184 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
185 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
186 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
187 ldr pc, [sp, #0x18] // Branch to exception handler
190 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
191 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
192 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
193 ldr pc, [sp, #0x1C] // Branch to exception handler
194 #else /* ! __ARM_TIME__ */
199 #endif /* __ARM_TIME__ */
201 .fill 984, 4, 0 // Push to the 4KB page boundary
203 .globl EXT(ExceptionVectorsEnd)
204 LEXT(ExceptionVectorsEnd)
208 * Targets for the exception vectors; we patch these during boot (to allow
209 * for position independent code without complicating the vectors; see start.s).
211 .globl EXT(ExceptionVectorsTable)
212 LEXT(ExceptionVectorsTable)
215 Lundefined_inst_vector:
219 Lprefetch_abort_vector:
223 Laddress_exception_vector:
232 * First Level Exception Handlers
236 .globl EXT(fleh_reset)
241 * First Level Exception Handler for Undefined Instruction.
245 .globl EXT(fleh_undef)
248 * Ensures the stack is safely aligned, usually in preparation for an external branch
249 * arg0: temp register for storing the stack offset
250 * arg1: temp register for storing the previous stack pointer
254 * For armv7k ABI, the stack needs to be 16-byte aligned
256 #if __BIGGEST_ALIGNMENT__ > 4
257 and $0, sp, #0x0F // sp mod 16-bytes
258 cmp $0, #4 // need space for the sp on the stack
259 addlt $0, $0, #0x10 // make room if needed, but keep stack aligned
260 mov $1, sp // get current sp
261 sub sp, sp, $0 // align stack
262 str $1, [sp] // store previous sp on stack
267 * Restores the stack pointer to its previous value following an ALIGN_STACK call
270 #if __BIGGEST_ALIGNMENT__ > 4
276 * Checks that cpu is currently in the expected mode, panics if not.
277 * arg0: the expected mode, should be one of the PSR_*_MODE defines
279 .macro VERIFY_EXCEPTION_MODE
280 mrs sp, cpsr // Read cpsr
281 and sp, sp, #PSR_MODE_MASK // Extract current mode
282 cmp sp, $0 // Check specified mode
284 bne EXT(ExceptionVectorPanic)
288 * Checks previous processor mode. If usermode, will execute the code
289 * following the macro to handle the userspace exception. Otherwise,
290 * will branch to a ELSE_IF_KERNELMODE_EXCEPTION call with the same
292 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
294 .macro IF_USERMODE_EXCEPTION
296 and sp, sp, #PSR_MODE_MASK // Is it from user?
297 cmp sp, #PSR_USER_MODE
299 cmp sp, #PSR_IRQ_MODE
301 cmp sp, #PSR_FIQ_MODE
308 * Handles an exception taken from kernelmode (IRQ/FIQ/SVC/etc).
309 * Places the processor into the correct mode and executes the
310 * code following the macro to handle the kernel exception.
311 * Intended to be paired with a prior call to IF_USERMODE_EXCEPTION.
312 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
314 .macro ELSE_IF_KERNELMODE_EXCEPTION
316 cpsid i, #PSR_IRQ_MODE
319 cpsid i, #PSR_FIQ_MODE
322 cpsid i, #PSR_SVC_MODE
327 VERIFY_EXCEPTION_MODE PSR_UND_MODE
328 mrs sp, spsr // For check the previous mode
329 tst sp, #PSR_TF // Is it Thumb?
332 IF_USERMODE_EXCEPTION undef
333 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
334 add sp, sp, ACT_PCBDATA // Get current thread PCB pointer
336 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
337 mov r7, #0 // Zero the frame pointer
340 mov r0, sp // Store arm_saved_state pointer
343 str lr, [sp, SS_PC] // Save user mode pc register
346 str r4, [sp, SS_CPSR] // Save user mode cpsr
348 cpsid i, #PSR_SVC_MODE
349 mrs r3, cpsr // Read cpsr
350 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
351 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
352 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
353 #if __ARM_USER_PROTECT__
354 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
355 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
356 mov r3, #0 // Load kernel asid
357 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
362 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
364 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
365 bl EXT(timer_state_event_user_to_kernel)
366 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
370 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
371 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
372 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
373 fmxr fpscr, r3 // And shove it into FPSCR
374 add r1, r9, ACT_UVFP // Reload the pointer to the save state
375 add r0, r9, ACT_PCBDATA // Reload the VFP save state argument
377 mov r1, #0 // Clear the VFP save state argument
378 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
381 bl EXT(sleh_undef) // Call second level handler
382 // sleh will enable interrupt
385 ELSE_IF_KERNELMODE_EXCEPTION undef
387 * We have a kernel stack already, and I will use it to save contexts
391 // We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception
392 // took place. We'll store that later after we switch to undef mode and pull out the LR from there.
394 // This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require
395 // changes in fbt_invop also.
396 stmfd sp!, { r7, lr }
399 sub sp, sp, EXC_CTX_SIZE // Reserve for arm_saved_state
401 stmia sp, {r0-r12} // Save on supervisor mode stack
405 add r7, sp, EXC_CTX_SIZE // Save frame pointer
409 str r4, [sp, SS_PC] // Save complete
411 str r4, [sp, SS_CPSR]
418 r7 - frame pointer state
423 ldr r0, [ip, SS_PC] // Get the exception pc to store later
426 add ip, ip, EXC_CTX_SIZE // Send stack pointer to debugger
431 str ip, [sp, SS_SP] // for accessing local variable
435 sub ip, ip, EXC_CTX_SIZE
438 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
439 add r0, sp, SS_SIZE // Get vfp state pointer
440 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
441 add r0, VSS_ALIGN // Get the actual vfp save area
442 mov r5, r0 // Stash the save area in another register
443 bl EXT(vfp_save) // Save the current VFP state to the stack
444 mov r1, r5 // Load the VFP save area argument
445 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
446 fmxr fpscr, r4 // And shove it into FPSCR
448 mov r1, #0 // Clear the facility context argument
450 #if __ARM_USER_PROTECT__
451 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
452 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
455 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
457 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
458 mov r3, #0 // Load kernel asid
459 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
462 mov r0, sp // Argument
465 bl EXT(sleh_undef) // Call second level handler
468 #if __ARM_USER_PROTECT__
469 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
470 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
473 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
476 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
477 ldr r11, [r9, ACT_ASID] // Load thread asid
479 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
486 * First Level Exception Handler for Software Interrupt
488 * We assert that only user level can use the "SWI" instruction for a system
489 * call on development kernels, and assume it's true on release.
491 * System call number is stored in r12.
492 * System call arguments are stored in r0 to r6 and r8 (we skip r7)
500 cpsid i, #PSR_ABT_MODE
501 mov sp, ip // Save ip
502 cpsid i, #PSR_SVC_MODE
503 mrs ip, spsr // Check the previous mode
505 cpsid i, #PSR_ABT_MODE
506 mov ip, sp // Restore ip
507 cpsid i, #PSR_SVC_MODE
510 /* Only user mode can use SWI. Panic if the kernel tries. */
512 sub sp, sp, EXC_CTX_SIZE
514 add r0, sp, EXC_CTX_SIZE
516 str r0, [sp, SS_SP] // Save supervisor mode sp
517 str lr, [sp, SS_LR] // Save supervisor mode lr
520 adr r0, L_kernel_swi_panic_str // Load panic messages and panic()
525 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
526 add sp, sp, ACT_PCBDATA // Get User PCB
529 /* Check for special mach_absolute_time trap value.
530 * This is intended to be a super-lightweight call to ml_get_timebase(), which
531 * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */
534 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
535 mov r7, #0 // Zero the frame pointer
537 mov r8, sp // Store arm_saved_state pointer
539 srsia sp, #PSR_SVC_MODE
540 mrs r3, cpsr // Read cpsr
541 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
542 sub r9, sp, ACT_PCBDATA_PC
544 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
545 mov r11, r12 // save the syscall vector in a nontrashed register
548 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
549 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
550 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
551 fmxr fpscr, r4 // And shove it into FPSCR
553 #if __ARM_USER_PROTECT__
554 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
555 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
556 mov r3, #0 // Load kernel asid
557 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
562 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling SWI from userspace
564 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
565 bl EXT(timer_state_event_user_to_kernel)
566 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
567 add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer
569 ldr r10, [r9, ACT_TASK] // Load the current task
571 /* enable interrupts */
572 cpsie i // Enable IRQ
574 cmp r11, #-4 // Special value for mach_continuous_time
575 beq fleh_swi_trap_mct
582 /* trace the syscall */
584 bl EXT(syscall_trace)
587 bl EXT(mach_kauth_cred_uthread_update)
588 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
590 rsbs r5, r11, #0 // make the syscall positive (if negative)
591 ble fleh_swi_unix // positive syscalls are unix (note reverse logic here)
594 /* note that mach_syscall_trace can modify r9, so increment the thread
595 * syscall count before the call : */
596 ldr r2, [r9, TH_MACH_SYSCALLS]
598 str r2, [r9, TH_MACH_SYSCALLS]
600 LOAD_ADDR(r1, mach_trap_table) // load mach_trap_table
601 #if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12
602 add r11, r5, r5, lsl #1 // syscall * 3
603 add r6, r1, r11, lsl #2 // trap_table + syscall * 12
604 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16
605 add r6, r1, r5, lsl #4 // trap_table + syscall * 16
606 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20
607 add r11, r5, r5, lsl #2 // syscall * 5
608 add r6, r1, r11, lsl #2 // trap_table + syscall * 20
610 #error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)!
614 LOAD_ADDR(r4, kdebug_enable)
617 movne r0, r8 // ready the reg state pointer as an arg to the call
618 movne r1, r5 // syscall number as 2nd arg
619 COND_EXTERN_BLNE(mach_syscall_trace)
621 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
622 cmp r5, MACH_TRAP_TABLE_COUNT // check syscall number range
623 bge fleh_swi_mach_error
626 * For arm32 ABI where 64-bit types are aligned to even registers and
627 * 64-bits on stack, we need to unpack registers differently. So
628 * we use the mungers for marshalling in arguments from user space.
629 * Currently this is just ARMv7k.
631 #if __BIGGEST_ALIGNMENT__ > 4
632 sub sp, #0x40 // allocate buffer and keep stack 128-bit aligned
633 // it should be big enough for all syscall arguments
634 ldr r11, [r6, #8] // get mach_trap_table[call_number].mach_trap_arg_munge32
635 teq r11, #0 // check if we have a munger
637 movne r0, r8 // ready the reg state pointer as an arg to the call
638 movne r1, sp // stack will hold arguments buffer
639 blxne r11 // call munger to get arguments from userspace
640 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
642 bne fleh_swi_mach_error // exit if the munger returned non-zero status
645 ldr r1, [r6, #4] // load the syscall vector
647 LOAD_ADDR(r2, kern_invalid) // test to make sure the trap is not kern_invalid
649 beq fleh_swi_mach_error
651 #if __BIGGEST_ALIGNMENT__ > 4
652 mov r0, sp // argument buffer on stack
653 bx r1 // call the syscall handler
655 mov r0, r8 // ready the reg state pointer as an arg to the call
656 bx r1 // call the syscall handler
660 str r1, [r8, #4] // top of 64-bit return
662 str r0, [r8] // save the return value
666 COND_EXTERN_BLNE(mach_syscall_trace_exit)
669 bl EXT(syscall_trace_exit)
673 bl EXT(throttle_lowpri_io) // throttle_lowpri_io(1);
675 bl EXT(thread_exception_return)
682 bl EXT(exception_triage)
687 ldr r1, [r9, TH_UNIX_SYSCALLS]
688 mov r0, r8 // reg state structure is arg
690 str r1, [r9, TH_UNIX_SYSCALLS]
691 mov r1, r9 // current thread in arg1
692 ldr r2, [r9, TH_UTHREAD] // current uthread in arg2
693 ldr r3, [r10, TASK_BSD_INFO] // current proc in arg3
700 addls pc, pc, r3, LSL#2
702 b icache_invalidate_trap
704 b thread_set_cthread_trap
705 b thread_get_cthread_trap
707 icache_invalidate_trap:
709 cmp r3, VM_MAX_ADDRESS
710 subhi r3, r3, #1<<MMU_CLINE
712 adr r11, cache_trap_jmp
713 ldr r6, [r9, TH_RECOVER] // Save existing recovery routine
714 str r11, [r9, TH_RECOVER]
715 #if __ARM_USER_PROTECT__
716 ldr r5, [r9, ACT_UPTW_TTB] // Load thread ttb
717 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
718 ldr r5, [r9, ACT_ASID] // Load thread asid
719 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
725 bl EXT(CleanPoU_DcacheRegion)
728 bl EXT(InvalidatePoU_IcacheRegion)
729 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
730 #if __ARM_USER_PROTECT__
731 ldr r4, [r9, ACT_KPTW_TTB] // Load kernel ttb
732 mcr p15, 0, r4, c2, c0, 0 // Set TTBR0
733 mov r4, #0 // Load kernel asid
734 mcr p15, 0, r4, c13, c0, 1 // Set CONTEXTIDR
737 str r6, [r9, TH_RECOVER]
738 bl EXT(thread_exception_return)
743 cmp r3, VM_MAX_ADDRESS
744 subhi r3, r3, #1<<MMU_CLINE
746 adr r11, cache_trap_jmp
747 ldr r4, [r9, TH_RECOVER] // Save existing recovery routine
748 str r11, [r9, TH_RECOVER]
749 #if __ARM_USER_PROTECT__
750 ldr r6, [r9, ACT_UPTW_TTB] // Load thread ttb
751 mcr p15, 0, r6, c2, c0, 0 // Set TTBR0
752 ldr r5, [r9, ACT_ASID] // Load thread asid
753 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
756 bl EXT(flush_dcache_syscall)
757 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
758 #if __ARM_USER_PROTECT__
759 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
760 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
761 mov r5, #0 // Load kernel asid
762 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
765 str r4, [r9, TH_RECOVER]
766 bl EXT(thread_exception_return)
769 thread_set_cthread_trap:
770 bl EXT(thread_set_cthread_self)
771 bl EXT(thread_exception_return)
774 thread_get_cthread_trap:
775 bl EXT(thread_get_cthread_self)
776 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
777 add r1, r9, ACT_PCBDATA // Get User PCB
778 str r0, [r1, SS_R0] // set return value
779 bl EXT(thread_exception_return)
783 #if __ARM_USER_PROTECT__
784 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
785 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
786 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
787 mov r5, #0 // Load kernel asid
788 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
791 mrc p15, 0, r3, c6, c0 // Read Fault Address
793 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
794 add r0, r9, ACT_PCBDATA // Get User PCB
795 ldr r1, [r0, SS_PC] // Save user mode pc register as pc
796 sub r1, r1, #4 // Backtrack current pc
797 str r1, [r0, SS_PC] // pc at cache assist swi
798 str r3, [r0, SS_VADDR] // Fault Address
799 mov r0, #EXC_BAD_ACCESS
800 mov r2, KERN_INVALID_ADDRESS
807 bl EXT(exception_triage)
811 bl EXT(mach_continuous_time)
812 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
813 add r9, r9, ACT_PCBDATA_R0 // Get User register state
814 stmia r9, {r0, r1} // set 64-bit return value
815 bl EXT(thread_exception_return)
820 bl EXT(ml_get_timebase) // ml_get_timebase() (64-bit return)
823 movs pc, lr // Return to user
826 L_kernel_swi_panic_str:
827 .asciz "fleh_swi: took SWI from kernel mode\n"
831 * First Level Exception Handler for Prefetching Abort.
835 .globl EXT(fleh_prefabt)
838 VERIFY_EXCEPTION_MODE PSR_ABT_MODE
841 IF_USERMODE_EXCEPTION prefabt
842 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
843 add sp, sp, ACT_PCBDATA // Get User PCB
845 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
846 mov r7, #0 // Zero the frame pointer
848 mov r0, sp // Store arm_saved_state pointer
850 str lr, [sp, SS_PC] // Save user mode pc register as pc
851 mrc p15, 0, r1, c6, c0, 2 // Read IFAR
852 str r1, [sp, SS_VADDR] // and fault address of pcb
854 mrc p15, 0, r5, c5, c0, 1 // Read Fault Status
855 str r5, [sp, SS_STATUS] // Save fault status register to pcb
858 str r4, [sp, SS_CPSR] // Save user mode cpsr
860 cpsid i, #PSR_SVC_MODE
861 mrs r3, cpsr // Read cpsr
862 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
863 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
864 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
867 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
868 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
869 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
870 fmxr fpscr, r3 // And shove it into FPSCR
872 #if __ARM_USER_PROTECT__
873 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
874 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
875 mov r3, #0 // Load kernel asid
876 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
881 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
883 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
884 bl EXT(timer_state_event_user_to_kernel)
885 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
888 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
889 mov r1, T_PREFETCH_ABT // Pass abort type
890 bl EXT(sleh_abort) // Call second level handler
891 // Sleh will enable interrupt
894 ELSE_IF_KERNELMODE_EXCEPTION prefabt
896 * We have a kernel stack already, and I will use it to save contexts:
898 * | VFP saved state |
899 * |------------------|
900 * | ARM saved state |
901 * SP ------------------
905 sub sp, sp, EXC_CTX_SIZE
907 add r0, sp, EXC_CTX_SIZE
909 str r0, [sp, SS_SP] // Save supervisor mode sp
910 str lr, [sp, SS_LR] // Save supervisor mode lr
912 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
915 add r0, sp, SS_SIZE // Get vfp state pointer
916 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
917 add r0, VSS_ALIGN // Get the actual vfp save area
918 bl EXT(vfp_save) // Save the current VFP state to the stack
919 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
920 fmxr fpscr, r4 // And shove it into FPSCR
922 #if __ARM_USER_PROTECT__
923 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
924 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
927 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
929 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
930 mov r3, #0 // Load kernel asid
931 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
936 str r4, [sp, SS_PC] // Save pc
938 mrc p15, 0, r5, c6, c0, 2 // Read IFAR
939 str r5, [sp, SS_VADDR] // and fault address of pcb
940 mrc p15, 0, r5, c5, c0, 1 // Read (instruction) Fault Status
941 str r5, [sp, SS_STATUS] // Save fault status register to pcb
944 str r4, [sp, SS_CPSR]
948 mov r1, T_PREFETCH_ABT // Pass abort type
949 bl EXT(sleh_abort) // Call second level handler
952 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
953 #if __ARM_USER_PROTECT__
954 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
957 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
960 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
961 ldr r11, [r9, ACT_ASID] // Load thread asid
963 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
971 * First Level Exception Handler for Data Abort
975 .globl EXT(fleh_dataabt)
978 VERIFY_EXCEPTION_MODE PSR_ABT_MODE
980 IF_USERMODE_EXCEPTION dataabt
981 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
982 add sp, sp, ACT_PCBDATA // Get User PCB
984 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
985 mov r7, #0 // Zero the frame pointer
988 mov r0, sp // Store arm_saved_state pointer
991 str lr, [sp, SS_PC] // Save user mode pc register
994 str r4, [sp, SS_CPSR] // Save user mode cpsr
996 mrc p15, 0, r5, c5, c0 // Read Fault Status
997 mrc p15, 0, r6, c6, c0 // Read Fault Address
998 str r5, [sp, SS_STATUS] // Save fault status register to pcb
999 str r6, [sp, SS_VADDR] // Save fault address to pcb
1001 cpsid i, #PSR_SVC_MODE
1002 mrs r3, cpsr // Read cpsr
1003 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
1004 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1005 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
1008 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1009 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1010 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
1011 fmxr fpscr, r3 // And shove it into FPSCR
1013 #if __ARM_USER_PROTECT__
1014 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1015 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1016 mov r3, #0 // Load kernel asid
1017 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1022 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
1024 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1025 bl EXT(timer_state_event_user_to_kernel)
1026 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1029 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
1030 mov r1, T_DATA_ABT // Pass abort type
1031 bl EXT(sleh_abort) // Call second level handler
1032 // Sleh will enable irq
1035 ELSE_IF_KERNELMODE_EXCEPTION dataabt
1037 * We have a kernel stack already, and I will use it to save contexts:
1038 * ------------------
1039 * | VFP saved state |
1040 * |------------------|
1041 * | ARM saved state |
1042 * SP ------------------
1046 sub sp, sp, EXC_CTX_SIZE
1048 add r0, sp, EXC_CTX_SIZE
1050 str r0, [sp, SS_SP] // Save supervisor mode sp
1051 str lr, [sp, SS_LR] // Save supervisor mode lr
1053 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1056 add r0, sp, SS_SIZE // Get vfp state pointer
1057 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1058 add r0, VSS_ALIGN // Get the actual vfp save area
1059 bl EXT(vfp_save) // Save the current VFP state to the stack
1060 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1061 fmxr fpscr, r4 // And shove it into FPSCR
1067 str r4, [sp, SS_CPSR]
1069 #if __ARM_USER_PROTECT__
1070 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1071 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1074 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1076 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1077 mov r3, #0 // Load kernel asid
1078 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1081 mrc p15, 0, r5, c5, c0 // Read Fault Status
1082 mrc p15, 0, r6, c6, c0 // Read Fault Address
1083 str r5, [sp, SS_STATUS] // Save fault status register to pcb
1084 str r6, [sp, SS_VADDR] // Save fault address to pcb
1086 mov r0, sp // Argument
1088 mov r1, T_DATA_ABT // Pass abort type
1089 bl EXT(sleh_abort) // Call second level handler
1092 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1093 #if __ARM_USER_PROTECT__
1094 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1097 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1100 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1101 ldr r11, [r9, ACT_ASID] // Load thread asid
1103 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1108 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1110 ldr r4, [sp, SS_CPSR] // Load saved cpsr
1111 tst r4, #PSR_IRQF // Test IRQ set
1112 bne lags1 // Branch if IRQ disabled
1114 cpsid i // Disable IRQ
1115 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1116 movs r2, r2 // Test if null
1117 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1118 bne lags1 // Branch if count not null
1119 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1120 ands r5, r5, AST_URGENT // Get the requests we do honor
1121 beq lags1 // Branch if no ASTs
1122 #if __ARM_USER_PROTECT__
1123 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1124 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1127 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1129 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1130 mov r3, #0 // Load kernel asid
1131 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1134 ldr lr, [sp, SS_LR] // Restore the link register
1135 stmfd sp!, {r7, lr} // Push a fake frame
1138 bl EXT(ast_taken_kernel) // Handle AST_URGENT
1141 ldmfd sp!, {r7, lr} // Pop the fake frame
1142 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1143 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1144 #if __ARM_USER_PROTECT__
1145 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1148 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1151 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1152 ldr r11, [r9, ACT_ASID] // Load thread asid
1154 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1160 mov ip, sp // Save pointer to contexts for abort mode
1161 ldr sp, [ip, SS_SP] // Restore stack pointer
1163 cpsid if, #PSR_ABT_MODE
1167 ldr r4, [sp, SS_CPSR]
1168 msr spsr_cxsf, r4 // Restore spsr
1170 clrex // clear exclusive memory tag
1171 #if __ARM_ENABLE_WFE_
1176 add r0, sp, SS_SIZE // Get vfp state pointer
1177 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1178 add r0, VSS_ALIGN // Get the actual vfp save area
1179 bl EXT(vfp_load) // Load the desired VFP state from the stack
1182 ldr lr, [sp, SS_PC] // Restore lr
1184 ldmia sp, {r0-r12} // Restore other registers
1186 movs pc, lr // Return to sys (svc, irq, fiq)
1189 * First Level Exception Handler for address exception
1194 .globl EXT(fleh_addrexc)
1201 * First Level Exception Handler for IRQ
1202 * Current mode : IRQ
1203 * IRQ and FIQ are always disabled while running in FIQ handler
1204 * We do not permit nested interrupt.
1206 * Saving area: from user : PCB.
1207 * from kernel : interrupt stack.
1212 .globl EXT(fleh_irq)
1217 cpsie a // Re-enable async aborts
1220 tst sp, #0x0f // From user? or kernel?
1224 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1225 add sp, sp, ACT_PCBDATA // Get User PCB
1226 stmia sp, {r0-r12, sp, lr}^
1227 mov r7, #0 // Zero the frame pointer
1231 str r4, [sp, SS_CPSR]
1232 mov r5, sp // Saved context in r5
1233 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1234 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1235 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1236 cpsid i, #PSR_SVC_MODE
1237 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1238 cpsid i, #PSR_IRQ_MODE
1241 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1242 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1243 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1244 fmxr fpscr, r4 // And shove it into FPSCR
1246 #if __ARM_USER_PROTECT__
1247 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1248 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1249 mov r3, #0 // Load kernel asid
1250 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1253 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1254 bl EXT(timer_state_event_user_to_kernel)
1255 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1257 #if CONFIG_TELEMETRY
1258 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1263 mov r1, #0 // (not a PMI record)
1264 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1265 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1272 cpsid i, #PSR_SVC_MODE
1274 sub sp, sp, EXC_CTX_SIZE
1276 add r0, sp, EXC_CTX_SIZE
1278 str r0, [sp, SS_SP] // Save supervisor mode sp
1279 str lr, [sp, SS_LR] // Save supervisor mode lr
1281 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1284 add r0, sp, SS_SIZE // Get vfp state pointer
1285 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1286 add r0, VSS_ALIGN // Get the actual vfp save area
1287 bl EXT(vfp_save) // Save the current VFP state to the stack
1288 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1289 fmxr fpscr, r4 // And shove it into FPSCR
1291 #if __ARM_USER_PROTECT__
1292 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1293 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1294 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1295 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1296 mov r3, #0 // Load kernel asid
1297 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1300 mov r5, sp // Saved context in r5
1302 cpsid i, #PSR_IRQ_MODE
1304 str lr, [r5, SS_PC] // Save LR as the return PC
1306 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1308 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1309 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1311 #if CONFIG_TELEMETRY
1312 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1317 mov r1, #0 // (not a PMI record)
1318 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1319 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1324 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1325 add r2, r2, #1 // Increment count
1326 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1328 LOAD_ADDR(r8, kdebug_enable)
1332 COND_EXTERN_BLNE(interrupt_trace)
1334 bl EXT(interrupt_stats) // Record interrupt statistics
1335 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1336 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1337 str r5, [r4, CPU_INT_STATE] // Saved context in cpu_int_state
1338 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1339 add r3, r3, #1 // Increment count
1340 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1341 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1342 add r3, r3, #1 // Increment count
1343 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1344 ldr r0, [r4, INTERRUPT_TARGET]
1345 ldr r1, [r4, INTERRUPT_REFCON]
1346 ldr r2, [r4, INTERRUPT_NUB]
1347 ldr r3, [r4, INTERRUPT_SOURCE]
1348 ldr r5, [r4, INTERRUPT_HANDLER] // Call second level exception handler
1352 COND_EXTERN_BLNE(interrupt_trace_exit)
1354 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1355 bl EXT(ml_get_timebase) // get current timebase
1356 LOAD_ADDR(r3, EntropyData)
1357 ldr r2, [r3, ENTROPY_INDEX_PTR]
1358 add r1, r3, ENTROPY_DATA_SIZE
1361 addge r2, r3, ENTROPY_BUFFER
1363 eor r0, r0, r4, ROR #9
1364 str r0, [r2] // Update gEntropie
1365 str r2, [r3, ENTROPY_INDEX_PTR]
1369 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1370 str r5, [r4, CPU_INT_STATE] // Clear cpu_int_state
1371 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1373 cmp r2, #0 // verify positive count
1377 adr r0, L_preemption_count_zero_str
1382 sub r2, r2, #1 // Decrement count
1383 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1385 mrs r0, spsr // For check the previous mode
1387 cpsid i, #PSR_SVC_MODE
1389 tst r0, #0x0f // Check if the previous is from user
1390 ldreq sp, [r9, TH_KSTACKPTR] // ...If so, reload the kernel stack pointer
1391 beq load_and_go_user // ...and return
1393 #if __ARM_USER_PROTECT__
1394 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1397 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1400 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1401 ldr r11, [r9, ACT_ASID] // Load thread asid
1403 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1409 L_preemption_count_zero_str:
1410 .ascii "locore.s: preemption count is zero \000"
1413 * First Level Exception Handler for DEC
1414 * Current mode : IRQ
1415 * IRQ and FIQ are always disabled while running in FIQ handler
1416 * We do not permit nested interrupt.
1418 * Saving area: from user : PCB.
1419 * from kernel : interrupt stack.
1424 .globl EXT(fleh_decirq)
1429 cpsie af // Re-enable async aborts/FIQ
1432 tst sp, #0x0f // From user? or kernel?
1433 bne fleh_decirq_kernel
1436 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1437 add sp, sp, ACT_PCBDATA // Get User PCB
1438 stmia sp, {r0-r12, sp, lr}^
1439 mov r7, #0 // Zero the frame pointer
1443 str r4, [sp, SS_CPSR]
1444 mov r5, sp // Saved context in r5
1445 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1446 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1447 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1448 cpsid i, #PSR_SVC_MODE
1449 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1450 cpsid i, #PSR_IRQ_MODE
1453 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1454 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1455 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1456 fmxr fpscr, r4 // And shove it into FPSCR
1458 #if __ARM_USER_PROTECT__
1459 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1460 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1461 mov r3, #0 // Load kernel asid
1462 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1465 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1466 bl EXT(timer_state_event_user_to_kernel)
1467 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1469 #if CONFIG_TELEMETRY
1470 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1475 mov r1, #0 // (not a PMI record)
1476 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1477 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1481 b fleh_decirq_handler
1484 cpsid i, #PSR_SVC_MODE
1486 sub sp, sp, EXC_CTX_SIZE
1488 add r0, sp, EXC_CTX_SIZE
1490 str r0, [sp, SS_SP] // Save supervisor mode sp
1491 str lr, [sp, SS_LR] // Save supervisor mode lr
1493 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1496 add r0, sp, SS_SIZE // Get vfp state pointer
1497 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1498 add r0, VSS_ALIGN // Get the actual vfp save area
1499 bl EXT(vfp_save) // Save the current VFP state to the stack
1500 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1501 fmxr fpscr, r4 // And shove it into FPSCR
1503 #if __ARM_USER_PROTECT__
1504 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1505 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1506 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1507 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1508 mov r3, #0 // Load kernel asid
1509 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1512 mov r5, sp // Saved context in r5
1514 cpsid i, #PSR_IRQ_MODE
1516 str lr, [r5, SS_PC] // Save LR as the return PC
1518 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1520 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1521 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1523 #if CONFIG_TELEMETRY
1524 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1529 mov r1, #0 // (not a pmi record)
1530 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1531 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1535 fleh_decirq_handler:
1536 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1537 add r2, r2, #1 // Increment count
1538 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1539 ldr r2, [r9, ACT_CPUDATAP] // Get current cpu
1540 str r5, [r2, CPU_INT_STATE] // Saved context in cpu_int_state
1541 ldr r3, [r2, CPU_STAT_IRQ] // Get IRQ count
1542 add r3, r3, #1 // Increment count
1543 str r3, [r2, CPU_STAT_IRQ] // Update IRQ count
1544 ldr r3, [r2, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1545 add r3, r3, #1 // Increment count
1546 str r3, [r2, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1548 LOAD_ADDR(r4, kdebug_enable)
1551 movne r0, r5 // Pass saved context
1552 COND_EXTERN_BLNE(interrupt_trace)
1554 bl EXT(interrupt_stats) // Record interrupt statistics
1556 bl EXT(rtclock_intr) // Call second level exception handler
1559 COND_EXTERN_BLNE(interrupt_trace_exit)
1562 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1568 * First Level Exception Handler for FIQ
1569 * Current mode : FIQ
1570 * IRQ and FIQ are always disabled while running in FIQ handler
1571 * We do not permit nested interrupt.
1573 * Saving area: from user : PCB.
1574 * from kernel : interrupt stack.
1576 * We have 7 added shadow registers in FIQ mode for fast services.
1577 * So only we have to save is just 8 general registers and LR.
1578 * But if the current thread was running on user mode before the FIQ interrupt,
1579 * All user registers be saved for ast handler routine.
1583 .globl EXT(fleh_fiq_generic)
1585 LEXT(fleh_fiq_generic)
1586 str r11, [r10] // Clear the FIQ source
1588 ldr r13, [r8, CPU_TIMEBASE_LOW] // Load TBL
1589 adds r13, r13, #1 // Increment TBL
1590 str r13, [r8, CPU_TIMEBASE_LOW] // Store TBL
1591 ldreq r13, [r8, CPU_TIMEBASE_HIGH] // Load TBU
1592 addeq r13, r13, #1 // Increment TBU
1593 streq r13, [r8, CPU_TIMEBASE_HIGH] // Store TBU
1594 subs r12, r12, #1 // Decrement, DEC
1595 str r12, [r8, CPU_DECREMENTER] // Store DEC
1596 subspl pc, lr, #4 // Return unless DEC < 0
1601 .globl EXT(fleh_dec)
1603 mrs sp, spsr // Get the spsr
1605 tst sp, #0x0f // From user? or kernel?
1609 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1610 add sp, sp, ACT_PCBDATA // Get User PCB
1612 stmia sp, {r0-r12, sp, lr}^
1613 mov r7, #0 // Zero the frame pointer
1618 str r4, [sp, SS_CPSR]
1620 sub sp, sp, ACT_PCBDATA // Get User PCB
1621 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1622 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1624 cpsid i, #PSR_SVC_MODE
1625 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1626 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1629 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1630 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1631 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1632 fmxr fpscr, r4 // And shove it into FPSCR
1634 #if __ARM_USER_PROTECT__
1635 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1636 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1637 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1638 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1639 mov r3, #0 // Load kernel asid
1640 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1643 mov r0, #1 // Mark this as coming from user context
1648 tst sp, #PSR_IRQF // Test for IRQ masked
1649 bne 3f // We're on the cpu_signal path
1651 cpsid if, #PSR_SVC_MODE
1653 sub sp, sp, EXC_CTX_SIZE
1655 add r0, sp, EXC_CTX_SIZE
1657 str r0, [sp, SS_SP] // Save supervisor mode sp
1658 str lr, [sp, SS_LR] // Save supervisor mode lr
1660 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1663 add r0, sp, SS_SIZE // Get vfp state pointer
1664 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1665 add r0, VSS_ALIGN // Get the actual vfp save area
1666 bl EXT(vfp_save) // Save the current VFP state to the stack
1667 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1668 fmxr fpscr, r4 // And shove it into FPSCR
1670 #if __ARM_USER_PROTECT__
1671 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1672 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1673 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1674 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1675 mov r3, #0 // Load kernel asid
1676 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1679 mov r5, sp // Saved context in r5
1681 cpsid if, #PSR_FIQ_MODE
1683 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1685 str lr, [r5, SS_PC] // Save LR as the return PC
1687 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1689 ldr r6, [r1, ACT_CPUDATAP] // Get current cpu
1690 ldr r6, [r6, CPU_ISTACKPTR] // Set interrupt stack
1692 mov r0, #0 // Mark this as coming from kernel context
1696 /* cpu_signal path */
1697 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1698 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1699 ldr sp, [sp, CPU_FIQSTACKPTR] // Set fiq stack
1700 sub sp, sp, EXC_CTX_SIZE
1704 str r4, [sp, SS_CPSR]
1705 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1708 add r0, sp, SS_SIZE // Get vfp state pointer
1709 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1710 add r0, VSS_ALIGN // Get the actual vfp save area
1711 bl EXT(vfp_save) // Save the current VFP state to the stack
1712 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1713 fmxr fpscr, r4 // And shove it into FPSCR
1715 #if __ARM_USER_PROTECT__
1716 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1717 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1718 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1719 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1720 mov r3, #0 // Load kernel asid
1721 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1726 mov r0, r8 // Get current cpu in arg 0
1727 mov r1, SIGPdec // Decrementer signal in arg1
1730 bl EXT(cpu_signal) // Call cpu_signal
1733 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1736 add r0, sp, SS_SIZE // Get vfp state pointer
1737 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1738 add r0, VSS_ALIGN // Get the actual vfp save area
1739 bl EXT(vfp_load) // Load the desired VFP state from the stack
1742 clrex // clear exclusive memory tag
1743 #if __ARM_ENABLE_WFE_
1746 #if __ARM_USER_PROTECT__
1747 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1748 mcr p15, 0, r11, c13, c0, 1 // Set CONTEXTIDR
1752 ldmia sp, {r0-r12} // Restore saved registers
1753 movs pc, lr // Return from fiq
1756 cpsid i, #PSR_IRQ_MODE
1758 mov sp, r6 // Restore the stack pointer
1760 msr spsr_cxsf, r4 // Restore the spsr
1761 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1762 add r2, r2, #1 // Increment count
1763 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1764 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1765 str r5, [r4, CPU_INT_STATE]
1766 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1767 add r3, r3, #1 // Increment count
1768 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1769 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1770 add r3, r3, #1 // Increment count
1771 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1772 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1775 mov r8, r0 // Stash our "from_user" boolean value
1776 bl EXT(timer_state_event_user_to_kernel)
1777 mov r0, r8 // Restore our "from_user" value
1778 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1781 #if CONFIG_TELEMETRY
1782 LOAD_ADDR(r4, telemetry_needs_record) // Check if a telemetry record was requested...
1786 mov r1, #0 // (not a PMI record)
1787 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1788 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1793 LOAD_ADDR(r4, kdebug_enable)
1796 ldrne r1, [r9, ACT_CPUDATAP] // Get current cpu
1797 ldrne r0, [r1, CPU_INT_STATE]
1798 COND_EXTERN_BLNE(interrupt_trace)
1800 bl EXT(interrupt_stats) // Record interrupt statistics
1802 bl EXT(rtclock_intr) // Call second level exception handler
1805 COND_EXTERN_BLNE(interrupt_trace_exit)
1809 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1814 * void thread_syscall_return(kern_return_t r0)
1819 .globl EXT(thread_syscall_return)
1821 LEXT(thread_syscall_return)
1822 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1823 add r1, r9, ACT_PCBDATA // Get User PCB
1824 str r0, [r1, SS_R0] // set return value
1826 LOAD_ADDR(r4, kdebug_enable)
1829 beq load_and_go_user
1830 ldr r12, [r1, SS_R12] // Load syscall number
1831 rsbs r1, r12, #0 // make the syscall positive (if negative)
1832 COND_EXTERN_BLGT(mach_syscall_trace_exit)
1837 * void thread_exception_return(void)
1838 * void thread_bootstrap_return(void)
1842 .globl EXT(thread_exception_return)
1843 .globl EXT(thread_bootstrap_return)
1845 LEXT(thread_bootstrap_return)
1847 bl EXT(dtrace_thread_bootstrap)
1851 LEXT(thread_exception_return)
1855 * Restore user mode states and go back to user mode
1857 cpsid i // Disable irq
1858 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1861 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1863 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1864 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1865 cmp r5, #0 // Test if ASTs pending
1866 beq return_to_user_now // Branch if no ASTs
1868 bl EXT(ast_taken_user) // Handle all ASTs (may continue via thread_exception_return)
1870 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1871 b load_and_go_user // Loop back
1877 * Assert that the preemption level is zero prior to the return to user space
1879 ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count
1881 beq 0f // Continue if zero, or...
1882 adr r0, L_lagu_panic_str // Load the panic string...
1883 blx EXT(panic) // Finally, panic
1885 ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count
1887 beq 0f // Continue if zero, or...
1888 adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string...
1889 mov r1, r9 // Thread argument for panic string
1890 blx EXT(panic) // Finally, panic
1894 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1895 bl EXT(timer_state_event_kernel_to_user)
1896 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1897 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu data
1898 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1899 #if __ARM_DEBUG__ >= 6
1900 ldr r0, [r9, ACT_DEBUGDATA]
1901 ldr r6, [r8, CPU_USER_DEBUG]
1902 cmp r0, r6 // test if debug registers need to be changed
1904 bl EXT(arm_debug_set) // argument is already in r0
1905 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1909 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1910 bl EXT(vfp_load) // Load the desired VFP state from ACT_UVFP
1912 add r0, r9, ACT_PCBDATA // Get User PCB
1913 ldr r4, [r0, SS_CPSR] // Get saved cpsr
1914 and r3, r4, #PSR_MODE_MASK // Extract current mode
1915 cmp r3, #PSR_USER_MODE // Check user mode
1917 bne EXT(ExceptionVectorPanic)
1919 msr spsr_cxsf, r4 // Restore spsr(user mode cpsr)
1920 mov sp, r0 // Get User PCB
1922 clrex // clear exclusive memory tag
1923 #if __ARM_ENABLE_WFE_
1926 #if __ARM_USER_PROTECT__
1927 ldr r3, [r9, ACT_UPTW_TTB] // Load thread ttb
1928 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1929 ldr r2, [r9, ACT_ASID] // Load thread asid
1930 mcr p15, 0, r2, c13, c0, 1
1933 ldr lr, [sp, SS_PC] // Restore user mode pc
1934 ldmia sp, {r0-r12, sp, lr}^ // Restore the other user mode registers
1935 nop // Hardware problem
1936 movs pc, lr // Return to user
1940 .asciz "load_and_go_user: preemption_level %d"
1944 L_lagu_rwlock_cnt_panic_str:
1945 .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)"
1950 .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000"
1955 .globl EXT(ExceptionVectorPanic)
1957 LEXT(ExceptionVectorPanic)
1958 cpsid i, #PSR_SVC_MODE
1961 adr r0, L_evimpanic_str
1965 #include "globals_asm.h"
1967 LOAD_ADDR_GEN_DEF(mach_trap_table)
1968 LOAD_ADDR_GEN_DEF(kern_invalid)
1970 /* vim: set ts=4: */