2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <machine/asm.h>
58 #include <arm/proc_reg.h>
59 #include <pexpert/arm/board_config.h>
60 #include <mach/exception_types.h>
62 #include <mach_assert.h>
63 #include <config_dtrace.h>
66 #define TRACE_SYSCALL 0
69 * Copied to low physical memory in arm_init,
70 * so the kernel must be linked virtually at
71 * 0xc0001000 or higher to leave space for it.
76 .globl EXT(ExceptionLowVectorsBase)
78 LEXT(ExceptionLowVectorsBase)
79 adr pc, Lreset_low_vector
84 b . // Address Exception
87 LEXT(ResetPrivateData)
88 .space (480),0 // (filled with 0s)
89 // ExceptionLowVectorsBase + 0x200
91 adr r4, EXT(ResetHandlerData)
92 ldr r0, [r4, ASSIST_RESET_HANDLER]
95 adr r4, EXT(ResetHandlerData)
96 ldr r1, [r4, CPU_DATA_ENTRIES]
97 ldr r1, [r1, CPU_DATA_PADDR]
98 ldr r5, [r1, CPU_RESET_ASSIST]
101 adr r4, EXT(ResetHandlerData)
102 ldr r0, [r4, BOOT_ARGS]
103 ldr r1, [r4, CPU_DATA_ENTRIES]
106 // physical cpu number is stored in MPIDR Affinity level 0
107 mrc p15, 0, r6, c0, c0, 5 // Read MPIDR
108 and r6, r6, #0xFF // Extract Affinity level 0
110 #error missing Who Am I implementation
114 #endif /* __ARM_SMP__ */
115 // physical cpu number matches cpu number
117 //#error cpu_data_entry is not 16bytes in size
119 lsl r6, r6, #4 // Get CpuDataEntry offset
120 add r1, r1, r6 // Get cpu_data_entry pointer
121 ldr r1, [r1, CPU_DATA_PADDR]
122 ldr r5, [r1, CPU_RESET_HANDLER]
124 blxne r5 // Branch to cpu reset handler
125 b . // Unexpected reset
126 .globl EXT(ResetHandlerData)
127 LEXT(ResetHandlerData)
128 .space (rhdSize_NUM),0 // (filled with 0s)
131 .globl EXT(ExceptionLowVectorsEnd)
132 LEXT(ExceptionLowVectorsEnd)
136 .globl EXT(ExceptionVectorsBase)
138 LEXT(ExceptionVectorsBase)
140 adr pc, Lexc_reset_vector
141 adr pc, Lexc_undefined_inst_vector
142 adr pc, Lexc_swi_vector
143 adr pc, Lexc_prefetch_abort_vector
144 adr pc, Lexc_data_abort_vector
145 adr pc, Lexc_address_exception_vector
146 adr pc, Lexc_irq_vector
148 adr pc, Lexc_decirq_vector
149 #else /* ! __ARM_TIME__ */
151 #endif /* __ARM_TIME__ */
158 Lexc_undefined_inst_vector:
159 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
160 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
161 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
162 ldr pc, [sp, #4] // Branch to exception handler
164 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
165 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
166 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
167 ldr pc, [sp, #8] // Branch to exception handler
168 Lexc_prefetch_abort_vector:
169 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
170 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
171 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
172 ldr pc, [sp, #0xC] // Branch to exception handler
173 Lexc_data_abort_vector:
174 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
175 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
176 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
177 ldr pc, [sp, #0x10] // Branch to exception handler
178 Lexc_address_exception_vector:
179 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
180 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
181 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
182 ldr pc, [sp, #0x14] // Branch to exception handler
184 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
185 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
186 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
187 ldr pc, [sp, #0x18] // Branch to exception handler
190 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
191 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
192 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
193 ldr pc, [sp, #0x1C] // Branch to exception handler
194 #else /* ! __ARM_TIME__ */
199 #endif /* __ARM_TIME__ */
201 .fill 984, 4, 0 // Push to the 4KB page boundary
203 .globl EXT(ExceptionVectorsEnd)
204 LEXT(ExceptionVectorsEnd)
208 * Targets for the exception vectors; we patch these during boot (to allow
209 * for position independent code without complicating the vectors; see start.s).
211 .globl EXT(ExceptionVectorsTable)
212 LEXT(ExceptionVectorsTable)
215 Lundefined_inst_vector:
219 Lprefetch_abort_vector:
223 Laddress_exception_vector:
232 * First Level Exception Handlers
236 .globl EXT(fleh_reset)
241 * First Level Exception Handler for Undefined Instruction.
245 .globl EXT(fleh_undef)
248 mrs sp, spsr // Check the previous mode
249 tst sp, #PSR_TF // Is it Thumb?
252 tst sp, #0x0f // Is it from user?
253 bne undef_from_kernel
256 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
257 add sp, sp, ACT_PCBDATA // Get current thread PCB pointer
259 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
260 mov r7, #0 // Zero the frame pointer
263 mov r0, sp // Store arm_saved_state pointer
266 str lr, [sp, SS_PC] // Save user mode pc register
269 str r4, [sp, SS_CPSR] // Save user mode cpsr
271 mrs r4, cpsr // Read cpsr
272 cpsid i, #PSR_SVC_MODE
273 mrs r3, cpsr // Read cpsr
274 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
275 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
276 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
277 #if __ARM_USER_PROTECT__
278 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
279 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
280 mov r3, #0 // Load kernel asid
281 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
284 and r0, r4, #PSR_MODE_MASK // Extract current mode
285 cmp r0, #PSR_UND_MODE // Check undef mode
286 bne EXT(ExceptionVectorPanic)
289 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
291 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
292 bl EXT(timer_state_event_user_to_kernel)
293 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
297 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
298 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
299 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
300 fmxr fpscr, r3 // And shove it into FPSCR
301 add r1, r9, ACT_UVFP // Reload the pointer to the save state
302 add r0, r9, ACT_PCBDATA // Reload the VFP save state argument
304 mov r1, #0 // Clear the VFP save state argument
305 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
308 bl EXT(sleh_undef) // Call second level handler
309 // sleh will enable interrupt
313 mrs sp, cpsr // Read cpsr
314 and sp, sp, #PSR_MODE_MASK // Extract current mode
315 cmp sp, #PSR_UND_MODE // Check undef mode
317 bne EXT(ExceptionVectorPanic)
318 mrs sp, spsr // Check the previous mode
321 * We have a kernel stack already, and I will use it to save contexts
327 * See if we came here from IRQ or SVC mode, and go back to that mode
330 and sp, sp, #PSR_MODE_MASK
331 cmp sp, #PSR_IRQ_MODE
332 bne undef_from_kernel_svc
334 cpsid i, #PSR_IRQ_MODE
338 undef_from_kernel_svc:
339 cpsid i, #PSR_SVC_MODE
343 // We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception
344 // took place. We'll store that later after we switch to undef mode and pull out the LR from there.
346 // This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require
347 // changes in fbt_invop also.
348 stmfd sp!, { r7, lr }
351 sub sp, sp, EXC_CTX_SIZE // Reserve for arm_saved_state
353 stmia sp, {r0-r12} // Save on supervisor mode stack
357 add r7, sp, EXC_CTX_SIZE // Save frame pointer
360 mov ip, sp // Stack transfer
362 cpsid i, #PSR_UND_MODE
364 str lr, [ip, SS_PC] // Save complete
366 str r4, [ip, SS_CPSR]
370 * Go back to previous mode for mode specific regs
372 and r4, r4, #PSR_MODE_MASK
373 cmp r4, #PSR_IRQ_MODE
374 bne handle_undef_from_svc
376 cpsid i, #PSR_IRQ_MODE
380 handle_undef_from_svc:
381 cpsid i, #PSR_SVC_MODE
388 r7 - frame pointer state
393 ldr r0, [ip, SS_PC] // Get the exception pc to store later
396 add ip, ip, EXC_CTX_SIZE // Send stack pointer to debugger
401 str ip, [sp, SS_SP] // for accessing local variable
405 sub ip, ip, EXC_CTX_SIZE
408 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
409 add r0, sp, SS_SIZE // Get vfp state pointer
410 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
411 add r0, VSS_ALIGN // Get the actual vfp save area
412 mov r5, r0 // Stash the save area in another register
413 bl EXT(vfp_save) // Save the current VFP state to the stack
414 mov r1, r5 // Load the VFP save area argument
415 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
416 fmxr fpscr, r4 // And shove it into FPSCR
418 mov r1, #0 // Clear the facility context argument
420 #if __ARM_USER_PROTECT__
421 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
422 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
425 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
427 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
428 mov r3, #0 // Load kernel asid
429 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
432 mov r0, sp // Argument
435 * For armv7k ABI, the stack needs to be 16-byte aligned
437 #if __BIGGEST_ALIGNMENT__ > 4
438 and r1, sp, #0x0F // sp mod 16-bytes
439 cmp r1, #4 // need space for the sp on the stack
440 addlt r1, r1, #0x10 // make room if needed, but keep stack aligned
441 mov r2, sp // get current sp
442 sub sp, sp, r1 // align stack
443 str r2, [sp] // store previous sp on stack
446 bl EXT(sleh_undef) // Call second level handler
448 #if __BIGGEST_ALIGNMENT__ > 4
449 ldr sp, [sp] // restore stack
452 #if __ARM_USER_PROTECT__
453 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
454 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
457 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
460 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
461 ldr r11, [r9, ACT_ASID] // Load thread asid
463 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
470 * First Level Exception Handler for Software Interrupt
472 * We assert that only user level can use the "SWI" instruction for a system
473 * call on development kernels, and assume it's true on release.
475 * System call number is stored in r12.
476 * System call arguments are stored in r0 to r6 and r8 (we skip r7)
484 cpsid i, #PSR_ABT_MODE
485 mov sp, ip // Save ip
486 cpsid i, #PSR_SVC_MODE
487 mrs ip, spsr // Check the previous mode
489 cpsid i, #PSR_ABT_MODE
490 mov ip, sp // Restore ip
491 cpsid i, #PSR_SVC_MODE
494 /* Only user mode can use SWI. Panic if the kernel tries. */
496 sub sp, sp, EXC_CTX_SIZE
498 add r0, sp, EXC_CTX_SIZE
500 str r0, [sp, SS_SP] // Save supervisor mode sp
501 str lr, [sp, SS_LR] // Save supervisor mode lr
503 adr r0, L_kernel_swi_panic_str // Load panic messages and panic()
508 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
509 add sp, sp, ACT_PCBDATA // Get User PCB
512 /* Check for special mach_absolute_time trap value.
513 * This is intended to be a super-lightweight call to ml_get_timebase(), which
514 * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */
517 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
518 mov r7, #0 // Zero the frame pointer
520 mov r8, sp // Store arm_saved_state pointer
522 srsia sp, #PSR_SVC_MODE
523 mrs r3, cpsr // Read cpsr
524 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
525 sub r9, sp, ACT_PCBDATA_PC
527 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
528 mov r11, r12 // save the syscall vector in a nontrashed register
531 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
532 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
533 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
534 fmxr fpscr, r4 // And shove it into FPSCR
536 #if __ARM_USER_PROTECT__
537 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
538 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
539 mov r3, #0 // Load kernel asid
540 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
545 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling SWI from userspace
547 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
548 bl EXT(timer_state_event_user_to_kernel)
549 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
550 add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer
552 ldr r10, [r9, ACT_TASK] // Load the current task
554 /* enable interrupts */
555 cpsie i // Enable IRQ
557 cmp r11, #-4 // Special value for mach_continuous_time
558 beq fleh_swi_trap_mct
565 /* trace the syscall */
567 bl EXT(syscall_trace)
570 bl EXT(mach_kauth_cred_uthread_update)
571 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
573 rsbs r5, r11, #0 // make the syscall positive (if negative)
574 ble fleh_swi_unix // positive syscalls are unix (note reverse logic here)
577 /* note that mach_syscall_trace can modify r9, so increment the thread
578 * syscall count before the call : */
579 ldr r2, [r9, TH_MACH_SYSCALLS]
581 str r2, [r9, TH_MACH_SYSCALLS]
583 LOAD_ADDR(r1, mach_trap_table) // load mach_trap_table
584 #if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12
585 add r11, r5, r5, lsl #1 // syscall * 3
586 add r6, r1, r11, lsl #2 // trap_table + syscall * 12
587 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16
588 add r6, r1, r5, lsl #4 // trap_table + syscall * 16
589 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20
590 add r11, r5, r5, lsl #2 // syscall * 5
591 add r6, r1, r11, lsl #2 // trap_table + syscall * 20
593 #error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)!
597 LOAD_ADDR(r4, kdebug_enable)
600 movne r0, r8 // ready the reg state pointer as an arg to the call
601 movne r1, r5 // syscall number as 2nd arg
602 COND_EXTERN_BLNE(mach_syscall_trace)
604 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
605 cmp r5, MACH_TRAP_TABLE_COUNT // check syscall number range
606 bge fleh_swi_mach_error
609 * For arm32 ABI where 64-bit types are aligned to even registers and
610 * 64-bits on stack, we need to unpack registers differently. So
611 * we use the mungers for marshalling in arguments from user space.
612 * Currently this is just ARMv7k.
614 #if __BIGGEST_ALIGNMENT__ > 4
615 sub sp, #0x40 // allocate buffer and keep stack 128-bit aligned
616 // it should be big enough for all syscall arguments
617 ldr r11, [r6, #8] // get mach_trap_table[call_number].mach_trap_arg_munge32
618 teq r11, #0 // check if we have a munger
620 movne r0, r8 // ready the reg state pointer as an arg to the call
621 movne r1, sp // stack will hold arguments buffer
622 blxne r11 // call munger to get arguments from userspace
623 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
625 bne fleh_swi_mach_error // exit if the munger returned non-zero status
628 ldr r1, [r6, #4] // load the syscall vector
630 LOAD_ADDR(r2, kern_invalid) // test to make sure the trap is not kern_invalid
632 beq fleh_swi_mach_error
634 #if __BIGGEST_ALIGNMENT__ > 4
635 mov r0, sp // argument buffer on stack
636 bx r1 // call the syscall handler
638 mov r0, r8 // ready the reg state pointer as an arg to the call
639 bx r1 // call the syscall handler
643 str r1, [r8, #4] // top of 64-bit return
645 str r0, [r8] // save the return value
649 COND_EXTERN_BLNE(mach_syscall_trace_exit)
652 bl EXT(syscall_trace_exit)
656 bl EXT(throttle_lowpri_io) // throttle_lowpri_io(1);
658 bl EXT(thread_exception_return)
665 bl EXT(exception_triage)
670 ldr r1, [r9, TH_UNIX_SYSCALLS]
671 mov r0, r8 // reg state structure is arg
673 str r1, [r9, TH_UNIX_SYSCALLS]
674 mov r1, r9 // current thread in arg1
675 ldr r2, [r9, TH_UTHREAD] // current uthread in arg2
676 ldr r3, [r10, TASK_BSD_INFO] // current proc in arg3
683 addls pc, pc, r3, LSL#2
685 b icache_invalidate_trap
687 b thread_set_cthread_trap
688 b thread_get_cthread_trap
690 icache_invalidate_trap:
692 cmp r3, VM_MAX_ADDRESS
693 subhi r3, r3, #1<<MMU_CLINE
695 adr r11, cache_trap_jmp
696 ldr r6, [r9, TH_RECOVER] // Save existing recovery routine
697 str r11, [r9, TH_RECOVER]
698 #if __ARM_USER_PROTECT__
699 ldr r5, [r9, ACT_UPTW_TTB] // Load thread ttb
700 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
701 ldr r5, [r9, ACT_ASID] // Load thread asid
702 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
708 bl EXT(CleanPoU_DcacheRegion)
711 bl EXT(InvalidatePoU_IcacheRegion)
712 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
713 #if __ARM_USER_PROTECT__
714 ldr r4, [r9, ACT_KPTW_TTB] // Load kernel ttb
715 mcr p15, 0, r4, c2, c0, 0 // Set TTBR0
716 mov r4, #0 // Load kernel asid
717 mcr p15, 0, r4, c13, c0, 1 // Set CONTEXTIDR
720 str r6, [r9, TH_RECOVER]
721 bl EXT(thread_exception_return)
726 cmp r3, VM_MAX_ADDRESS
727 subhi r3, r3, #1<<MMU_CLINE
729 adr r11, cache_trap_jmp
730 ldr r4, [r9, TH_RECOVER] // Save existing recovery routine
731 str r11, [r9, TH_RECOVER]
732 #if __ARM_USER_PROTECT__
733 ldr r6, [r9, ACT_UPTW_TTB] // Load thread ttb
734 mcr p15, 0, r6, c2, c0, 0 // Set TTBR0
735 ldr r5, [r9, ACT_ASID] // Load thread asid
736 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
739 bl EXT(flush_dcache_syscall)
740 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
741 #if __ARM_USER_PROTECT__
742 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
743 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
744 mov r5, #0 // Load kernel asid
745 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
748 str r4, [r9, TH_RECOVER]
749 bl EXT(thread_exception_return)
752 thread_set_cthread_trap:
753 bl EXT(thread_set_cthread_self)
754 bl EXT(thread_exception_return)
757 thread_get_cthread_trap:
758 bl EXT(thread_get_cthread_self)
759 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
760 add r1, r9, ACT_PCBDATA // Get User PCB
761 str r0, [r1, SS_R0] // set return value
762 bl EXT(thread_exception_return)
766 #if __ARM_USER_PROTECT__
767 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
768 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
769 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
770 mov r5, #0 // Load kernel asid
771 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
774 mrc p15, 0, r3, c6, c0 // Read Fault Address
776 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
777 add r0, r9, ACT_PCBDATA // Get User PCB
778 ldr r1, [r0, SS_PC] // Save user mode pc register as pc
779 sub r1, r1, #4 // Backtrack current pc
780 str r1, [r0, SS_PC] // pc at cache assist swi
781 str r3, [r0, SS_VADDR] // Fault Address
782 mov r0, #EXC_BAD_ACCESS
783 mov r2, KERN_INVALID_ADDRESS
789 bl EXT(exception_triage)
793 bl EXT(mach_continuous_time)
794 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
795 add r9, r9, ACT_PCBDATA_R0 // Get User register state
796 stmia r9, {r0, r1} // set 64-bit return value
797 bl EXT(thread_exception_return)
802 bl EXT(ml_get_timebase) // ml_get_timebase() (64-bit return)
805 movs pc, lr // Return to user
808 L_kernel_swi_panic_str:
809 .asciz "fleh_swi: took SWI from kernel mode\n"
813 * First Level Exception Handler for Prefetching Abort.
817 .globl EXT(fleh_prefabt)
822 mrs sp, spsr // For check the previous mode
823 tst sp, #0x0f // Is it from user?
824 bne prefabt_from_kernel
827 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
828 add sp, sp, ACT_PCBDATA // Get User PCB
830 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
831 mov r7, #0 // Zero the frame pointer
833 mov r0, sp // Store arm_saved_state pointer
835 str lr, [sp, SS_PC] // Save user mode pc register as pc
836 mrc p15, 0, r1, c6, c0, 2 // Read IFAR
837 str r1, [sp, SS_VADDR] // and fault address of pcb
839 mrc p15, 0, r5, c5, c0, 1 // Read Fault Status
840 str r5, [sp, SS_STATUS] // Save fault status register to pcb
843 str r4, [sp, SS_CPSR] // Save user mode cpsr
845 mrs r4, cpsr // Read cpsr
846 cpsid i, #PSR_SVC_MODE
847 mrs r3, cpsr // Read cpsr
848 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
849 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
850 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
853 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
854 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
855 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
856 fmxr fpscr, r3 // And shove it into FPSCR
858 #if __ARM_USER_PROTECT__
859 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
860 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
861 mov r3, #0 // Load kernel asid
862 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
865 and r0, r4, #PSR_MODE_MASK // Extract current mode
866 cmp r0, #PSR_ABT_MODE // Check abort mode
867 bne EXT(ExceptionVectorPanic)
870 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
872 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
873 bl EXT(timer_state_event_user_to_kernel)
874 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
877 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
878 mov r1, T_PREFETCH_ABT // Pass abort type
879 bl EXT(sleh_abort) // Call second level handler
880 // Sleh will enable interrupt
884 mrs sp, cpsr // Read cpsr
885 and sp, sp, #PSR_MODE_MASK // Extract current mode
886 cmp sp, #PSR_ABT_MODE // Check abort mode
888 bne EXT(ExceptionVectorPanic)
889 mrs sp, spsr // Check the previous mode
892 * We have a kernel stack already, and I will use it to save contexts:
894 * | VFP saved state |
895 * |------------------|
896 * | ARM saved state |
897 * SP ------------------
901 cpsid i, #PSR_SVC_MODE
903 sub sp, sp, EXC_CTX_SIZE
905 add r0, sp, EXC_CTX_SIZE
907 str r0, [sp, SS_SP] // Save supervisor mode sp
908 str lr, [sp, SS_LR] // Save supervisor mode lr
910 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
913 add r0, sp, SS_SIZE // Get vfp state pointer
914 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
915 add r0, VSS_ALIGN // Get the actual vfp save area
916 bl EXT(vfp_save) // Save the current VFP state to the stack
917 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
918 fmxr fpscr, r4 // And shove it into FPSCR
920 #if __ARM_USER_PROTECT__
921 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
922 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
925 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
927 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
928 mov r3, #0 // Load kernel asid
929 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
934 cpsid i, #PSR_ABT_MODE
936 str lr, [ip, SS_PC] // Save pc to pc and
938 mrc p15, 0, r5, c6, c0, 2 // Read IFAR
939 str r5, [ip, SS_VADDR] // and fault address of pcb
940 mrc p15, 0, r5, c5, c0, 1 // Read (instruction) Fault Status
941 str r5, [ip, SS_STATUS] // Save fault status register to pcb
944 str r4, [ip, SS_CPSR]
946 cpsid i, #PSR_SVC_MODE
951 * For armv7k ABI, the stack needs to be 16-byte aligned
953 #if __BIGGEST_ALIGNMENT__ > 4
954 and r1, sp, #0x0F // sp mod 16-bytes
955 cmp r1, #4 // need space for the sp on the stack
956 addlt r1, r1, #0x10 // make room if needed, but keep stack aligned
957 mov r2, sp // get current sp
958 sub sp, sp, r1 // align stack
959 str r2, [sp] // store previous sp on stack
962 mov r1, T_PREFETCH_ABT // Pass abort type
963 bl EXT(sleh_abort) // Call second level handler
965 #if __BIGGEST_ALIGNMENT__ > 4
966 ldr sp, [sp] // restore stack
969 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
970 #if __ARM_USER_PROTECT__
971 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
974 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
977 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
978 ldr r11, [r9, ACT_ASID] // Load thread asid
980 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
988 * First Level Exception Handler for Data Abort
992 .globl EXT(fleh_dataabt)
997 mrs sp, spsr // For check the previous mode
998 tst sp, #0x0f // Is it from kernel?
999 bne dataabt_from_kernel
1002 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1003 add sp, sp, ACT_PCBDATA // Get User PCB
1005 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
1006 mov r7, #0 // Zero the frame pointer
1009 mov r0, sp // Store arm_saved_state pointer
1012 str lr, [sp, SS_PC] // Save user mode pc register
1015 str r4, [sp, SS_CPSR] // Save user mode cpsr
1017 mrc p15, 0, r5, c5, c0 // Read Fault Status
1018 mrc p15, 0, r6, c6, c0 // Read Fault Address
1019 str r5, [sp, SS_STATUS] // Save fault status register to pcb
1020 str r6, [sp, SS_VADDR] // Save fault address to pcb
1022 mrs r4, cpsr // Read cpsr
1023 cpsid i, #PSR_SVC_MODE
1024 mrs r3, cpsr // Read cpsr
1025 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
1026 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1027 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
1030 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1031 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1032 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
1033 fmxr fpscr, r3 // And shove it into FPSCR
1035 #if __ARM_USER_PROTECT__
1036 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1037 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1038 mov r3, #0 // Load kernel asid
1039 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1042 and r0, r4, #PSR_MODE_MASK // Extract current mode
1043 cmp r0, #PSR_ABT_MODE // Check abort mode
1044 bne EXT(ExceptionVectorPanic)
1047 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
1049 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1050 bl EXT(timer_state_event_user_to_kernel)
1051 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1054 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
1055 mov r1, T_DATA_ABT // Pass abort type
1056 bl EXT(sleh_abort) // Call second level handler
1057 // Sleh will enable irq
1060 dataabt_from_kernel:
1061 mrs sp, cpsr // Read cpsr
1062 and sp, sp, #PSR_MODE_MASK // Extract current mode
1063 cmp sp, #PSR_ABT_MODE // Check abort mode
1065 bne EXT(ExceptionVectorPanic)
1066 mrs sp, spsr // Check the previous mode
1069 * We have a kernel stack already, and I will use it to save contexts:
1070 * ------------------
1071 * | VFP saved state |
1072 * |------------------|
1073 * | ARM saved state |
1074 * SP ------------------
1078 cpsid i, #PSR_SVC_MODE
1080 sub sp, sp, EXC_CTX_SIZE
1082 add r0, sp, EXC_CTX_SIZE
1084 str r0, [sp, SS_SP] // Save supervisor mode sp
1085 str lr, [sp, SS_LR] // Save supervisor mode lr
1087 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1090 add r0, sp, SS_SIZE // Get vfp state pointer
1091 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1092 add r0, VSS_ALIGN // Get the actual vfp save area
1093 bl EXT(vfp_save) // Save the current VFP state to the stack
1094 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1095 fmxr fpscr, r4 // And shove it into FPSCR
1100 cpsid i, #PSR_ABT_MODE
1104 str r4, [ip, SS_CPSR]
1106 cpsid i, #PSR_SVC_MODE
1108 #if __ARM_USER_PROTECT__
1109 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1110 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1113 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1115 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1116 mov r3, #0 // Load kernel asid
1117 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1120 mrc p15, 0, r5, c5, c0 // Read Fault Status
1121 mrc p15, 0, r6, c6, c0 // Read Fault Address
1122 str r5, [sp, SS_STATUS] // Save fault status register to pcb
1123 str r6, [sp, SS_VADDR] // Save fault address to pcb
1125 mov r0, sp // Argument
1128 * For armv7k ABI, the stack needs to be 16-byte aligned
1130 #if __BIGGEST_ALIGNMENT__ > 4
1131 and r1, sp, #0x0F // sp mod 16-bytes
1132 cmp r1, #4 // need space for the sp on the stack
1133 addlt r1, r1, #0x10 // make room if needed, but keep stack aligned
1134 mov r2, sp // get current sp
1135 sub sp, sp, r1 // align stack
1136 str r2, [sp] // store previous sp on stack
1139 mov r1, T_DATA_ABT // Pass abort type
1140 bl EXT(sleh_abort) // Call second level handler
1142 #if __BIGGEST_ALIGNMENT__ > 4
1143 ldr sp, [sp] // restore stack (removed align padding)
1146 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1147 #if __ARM_USER_PROTECT__
1148 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1151 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1154 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1155 ldr r11, [r9, ACT_ASID] // Load thread asid
1157 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1162 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1164 ldr r4, [sp, SS_CPSR] // Load saved cpsr
1165 tst r4, #PSR_IRQF // Test IRQ set
1166 bne lags1 // Branch if IRQ disabled
1168 cpsid i // Disable IRQ
1169 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1170 movs r2, r2 // Test if null
1171 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1172 bne lags1 // Branch if count not null
1173 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1174 ands r5, r5, AST_URGENT // Get the requests we do honor
1175 beq lags1 // Branch if no ASTs
1176 #if __ARM_USER_PROTECT__
1177 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1178 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1181 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1183 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1184 mov r3, #0 // Load kernel asid
1185 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1188 ldr lr, [sp, SS_LR] // Restore the link register
1189 stmfd sp!, {r7, lr} // Push a fake frame
1191 /* TODO: Should this be setting r7? I think so. */
1192 mov r7, sp // Set the frame pointer
1194 #if __BIGGEST_ALIGNMENT__ > 4
1195 and r2, sp, #0x0F // sp mod 16-bytes
1196 cmp r2, #4 // need space for the sp on the stack
1197 addlt r2, r2, #0x10 // make room if needed, but keep stack aligned
1198 mov r3, sp // get current sp
1199 sub sp, sp, r2 // align stack
1200 str r3, [sp] // store previous sp on stack
1203 bl EXT(ast_taken_kernel) // Handle AST_URGENT
1205 #if __BIGGEST_ALIGNMENT__ > 4
1210 ldmfd sp!, {r7, lr} // Pop the fake frame
1211 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1212 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1213 #if __ARM_USER_PROTECT__
1214 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1217 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1220 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1221 ldr r11, [r9, ACT_ASID] // Load thread asid
1223 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1229 mov ip, sp // Save pointer to contexts for abort mode
1230 ldr sp, [ip, SS_SP] // Restore stack pointer
1232 cpsid if, #PSR_ABT_MODE
1236 ldr r4, [sp, SS_CPSR]
1237 msr spsr_cxsf, r4 // Restore spsr
1239 clrex // clear exclusive memory tag
1240 #if __ARM_ENABLE_WFE_
1245 add r0, sp, SS_SIZE // Get vfp state pointer
1246 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1247 add r0, VSS_ALIGN // Get the actual vfp save area
1248 bl EXT(vfp_load) // Load the desired VFP state from the stack
1251 ldr lr, [sp, SS_PC] // Restore lr
1253 ldmia sp, {r0-r12} // Restore other registers
1255 movs pc, lr // Return to sys (svc, irq, fiq)
1258 * First Level Exception Handler for address exception
1263 .globl EXT(fleh_addrexc)
1270 * First Level Exception Handler for IRQ
1271 * Current mode : IRQ
1272 * IRQ and FIQ are always disabled while running in FIQ handler
1273 * We do not permit nested interrupt.
1275 * Saving area: from user : PCB.
1276 * from kernel : interrupt stack.
1281 .globl EXT(fleh_irq)
1286 cpsie a // Re-enable async aborts
1289 tst sp, #0x0f // From user? or kernel?
1293 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1294 add sp, sp, ACT_PCBDATA // Get User PCB
1295 stmia sp, {r0-r12, sp, lr}^
1296 mov r7, #0 // Zero the frame pointer
1300 str r4, [sp, SS_CPSR]
1301 mov r5, sp // Saved context in r5
1302 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1303 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1304 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1305 cpsid i, #PSR_SVC_MODE
1306 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1307 cpsid i, #PSR_IRQ_MODE
1310 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1311 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1312 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1313 fmxr fpscr, r4 // And shove it into FPSCR
1315 #if __ARM_USER_PROTECT__
1316 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1317 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1318 mov r3, #0 // Load kernel asid
1319 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1322 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1323 bl EXT(timer_state_event_user_to_kernel)
1324 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1326 #if CONFIG_TELEMETRY
1327 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1332 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1333 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1340 cpsid i, #PSR_SVC_MODE
1342 sub sp, sp, EXC_CTX_SIZE
1344 add r0, sp, EXC_CTX_SIZE
1346 str r0, [sp, SS_SP] // Save supervisor mode sp
1347 str lr, [sp, SS_LR] // Save supervisor mode lr
1349 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1352 add r0, sp, SS_SIZE // Get vfp state pointer
1353 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1354 add r0, VSS_ALIGN // Get the actual vfp save area
1355 bl EXT(vfp_save) // Save the current VFP state to the stack
1356 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1357 fmxr fpscr, r4 // And shove it into FPSCR
1359 #if __ARM_USER_PROTECT__
1360 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1361 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1362 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1363 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1364 mov r3, #0 // Load kernel asid
1365 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1368 mov r5, sp // Saved context in r5
1370 cpsid i, #PSR_IRQ_MODE
1372 str lr, [r5, SS_PC] // Save LR as the return PC
1374 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1376 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1377 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1379 #if CONFIG_TELEMETRY
1380 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1385 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1386 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1391 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1392 add r2, r2, #1 // Increment count
1393 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1395 LOAD_ADDR(r8, kdebug_enable)
1399 COND_EXTERN_BLNE(interrupt_trace)
1401 bl EXT(interrupt_stats) // Record interrupt statistics
1402 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1403 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1404 str r5, [r4, CPU_INT_STATE] // Saved context in cpu_int_state
1405 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1406 add r3, r3, #1 // Increment count
1407 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1408 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1409 add r3, r3, #1 // Increment count
1410 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1411 ldr r0, [r4, INTERRUPT_TARGET]
1412 ldr r1, [r4, INTERRUPT_REFCON]
1413 ldr r2, [r4, INTERRUPT_NUB]
1414 ldr r3, [r4, INTERRUPT_SOURCE]
1415 ldr r5, [r4, INTERRUPT_HANDLER] // Call second level exception handler
1419 COND_EXTERN_BLNE(interrupt_trace_exit)
1421 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1422 bl EXT(ml_get_timebase) // get current timebase
1423 LOAD_ADDR(r3, EntropyData)
1424 ldr r2, [r3, ENTROPY_INDEX_PTR]
1425 add r1, r3, ENTROPY_DATA_SIZE
1428 addge r2, r3, ENTROPY_BUFFER
1430 eor r0, r0, r4, ROR #9
1431 str r0, [r2] // Update gEntropie
1432 str r2, [r3, ENTROPY_INDEX_PTR]
1436 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1437 str r5, [r4, CPU_INT_STATE] // Clear cpu_int_state
1438 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1440 cmp r2, #0 // verify positive count
1444 adr r0, L_preemption_count_zero_str
1449 sub r2, r2, #1 // Decrement count
1450 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1452 mrs r0, spsr // For check the previous mode
1454 cpsid i, #PSR_SVC_MODE
1456 tst r0, #0x0f // Check if the previous is from user
1457 ldreq sp, [r9, TH_KSTACKPTR] // ...If so, reload the kernel stack pointer
1458 beq load_and_go_user // ...and return
1460 #if __ARM_USER_PROTECT__
1461 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1464 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1467 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1468 ldr r11, [r9, ACT_ASID] // Load thread asid
1470 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1476 L_preemption_count_zero_str:
1477 .ascii "locore.s: preemption count is zero \000"
1480 * First Level Exception Handler for DEC
1481 * Current mode : IRQ
1482 * IRQ and FIQ are always disabled while running in FIQ handler
1483 * We do not permit nested interrupt.
1485 * Saving area: from user : PCB.
1486 * from kernel : interrupt stack.
1491 .globl EXT(fleh_decirq)
1496 cpsie af // Re-enable async aborts/FIQ
1499 tst sp, #0x0f // From user? or kernel?
1500 bne fleh_decirq_kernel
1503 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1504 add sp, sp, ACT_PCBDATA // Get User PCB
1505 stmia sp, {r0-r12, sp, lr}^
1506 mov r7, #0 // Zero the frame pointer
1510 str r4, [sp, SS_CPSR]
1511 mov r5, sp // Saved context in r5
1512 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1513 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1514 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1515 cpsid i, #PSR_SVC_MODE
1516 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1517 cpsid i, #PSR_IRQ_MODE
1520 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1521 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1522 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1523 fmxr fpscr, r4 // And shove it into FPSCR
1525 #if __ARM_USER_PROTECT__
1526 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1527 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1528 mov r3, #0 // Load kernel asid
1529 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1532 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1533 bl EXT(timer_state_event_user_to_kernel)
1534 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1536 #if CONFIG_TELEMETRY
1537 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1542 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1543 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1547 b fleh_decirq_handler
1550 cpsid i, #PSR_SVC_MODE
1552 sub sp, sp, EXC_CTX_SIZE
1554 add r0, sp, EXC_CTX_SIZE
1556 str r0, [sp, SS_SP] // Save supervisor mode sp
1557 str lr, [sp, SS_LR] // Save supervisor mode lr
1559 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1562 add r0, sp, SS_SIZE // Get vfp state pointer
1563 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1564 add r0, VSS_ALIGN // Get the actual vfp save area
1565 bl EXT(vfp_save) // Save the current VFP state to the stack
1566 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1567 fmxr fpscr, r4 // And shove it into FPSCR
1569 #if __ARM_USER_PROTECT__
1570 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1571 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1572 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1573 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1574 mov r3, #0 // Load kernel asid
1575 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1578 mov r5, sp // Saved context in r5
1580 cpsid i, #PSR_IRQ_MODE
1582 str lr, [r5, SS_PC] // Save LR as the return PC
1584 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1586 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1587 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1589 #if CONFIG_TELEMETRY
1590 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1595 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1596 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1600 fleh_decirq_handler:
1601 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1602 add r2, r2, #1 // Increment count
1603 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1604 ldr r2, [r9, ACT_CPUDATAP] // Get current cpu
1605 str r5, [r2, CPU_INT_STATE] // Saved context in cpu_int_state
1606 ldr r3, [r2, CPU_STAT_IRQ] // Get IRQ count
1607 add r3, r3, #1 // Increment count
1608 str r3, [r2, CPU_STAT_IRQ] // Update IRQ count
1609 ldr r3, [r2, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1610 add r3, r3, #1 // Increment count
1611 str r3, [r2, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1613 LOAD_ADDR(r4, kdebug_enable)
1616 movne r0, r5 // Pass saved context
1617 COND_EXTERN_BLNE(interrupt_trace)
1619 bl EXT(interrupt_stats) // Record interrupt statistics
1621 bl EXT(rtclock_intr) // Call second level exception handler
1624 COND_EXTERN_BLNE(interrupt_trace_exit)
1627 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1633 * First Level Exception Handler for FIQ
1634 * Current mode : FIQ
1635 * IRQ and FIQ are always disabled while running in FIQ handler
1636 * We do not permit nested interrupt.
1638 * Saving area: from user : PCB.
1639 * from kernel : interrupt stack.
1641 * We have 7 added shadow registers in FIQ mode for fast services.
1642 * So only we have to save is just 8 general registers and LR.
1643 * But if the current thread was running on user mode before the FIQ interrupt,
1644 * All user registers be saved for ast handler routine.
1648 .globl EXT(fleh_fiq_generic)
1650 LEXT(fleh_fiq_generic)
1651 str r11, [r10] // Clear the FIQ source
1653 ldr r13, [r8, CPU_TIMEBASE_LOW] // Load TBL
1654 adds r13, r13, #1 // Increment TBL
1655 str r13, [r8, CPU_TIMEBASE_LOW] // Store TBL
1656 ldreq r13, [r8, CPU_TIMEBASE_HIGH] // Load TBU
1657 addeq r13, r13, #1 // Increment TBU
1658 streq r13, [r8, CPU_TIMEBASE_HIGH] // Store TBU
1659 subs r12, r12, #1 // Decrement, DEC
1660 str r12, [r8, CPU_DECREMENTER] // Store DEC
1661 subspl pc, lr, #4 // Return unless DEC < 0
1666 .globl EXT(fleh_dec)
1668 mrs sp, spsr // Get the spsr
1670 tst sp, #0x0f // From user? or kernel?
1674 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1675 add sp, sp, ACT_PCBDATA // Get User PCB
1677 stmia sp, {r0-r12, sp, lr}^
1678 mov r7, #0 // Zero the frame pointer
1683 str r4, [sp, SS_CPSR]
1685 sub sp, sp, ACT_PCBDATA // Get User PCB
1686 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1687 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1689 cpsid i, #PSR_SVC_MODE
1690 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1691 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1694 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1695 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1696 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1697 fmxr fpscr, r4 // And shove it into FPSCR
1699 #if __ARM_USER_PROTECT__
1700 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1701 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1702 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1703 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1704 mov r3, #0 // Load kernel asid
1705 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1708 mov r0, #1 // Mark this as coming from user context
1713 tst sp, #PSR_IRQF // Test for IRQ masked
1714 bne 3f // We're on the cpu_signal path
1716 cpsid if, #PSR_SVC_MODE
1718 sub sp, sp, EXC_CTX_SIZE
1720 add r0, sp, EXC_CTX_SIZE
1722 str r0, [sp, SS_SP] // Save supervisor mode sp
1723 str lr, [sp, SS_LR] // Save supervisor mode lr
1725 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1728 add r0, sp, SS_SIZE // Get vfp state pointer
1729 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1730 add r0, VSS_ALIGN // Get the actual vfp save area
1731 bl EXT(vfp_save) // Save the current VFP state to the stack
1732 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1733 fmxr fpscr, r4 // And shove it into FPSCR
1735 #if __ARM_USER_PROTECT__
1736 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1737 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1738 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1739 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1740 mov r3, #0 // Load kernel asid
1741 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1744 mov r5, sp // Saved context in r5
1746 cpsid if, #PSR_FIQ_MODE
1748 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1750 str lr, [r5, SS_PC] // Save LR as the return PC
1752 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1754 ldr r6, [r1, ACT_CPUDATAP] // Get current cpu
1755 ldr r6, [r6, CPU_ISTACKPTR] // Set interrupt stack
1757 mov r0, #0 // Mark this as coming from kernel context
1761 /* cpu_signal path */
1762 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1763 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1764 ldr sp, [sp, CPU_FIQSTACKPTR] // Set fiq stack
1765 sub sp, sp, EXC_CTX_SIZE
1769 str r4, [sp, SS_CPSR]
1770 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1773 add r0, sp, SS_SIZE // Get vfp state pointer
1774 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1775 add r0, VSS_ALIGN // Get the actual vfp save area
1776 bl EXT(vfp_save) // Save the current VFP state to the stack
1777 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1778 fmxr fpscr, r4 // And shove it into FPSCR
1780 #if __ARM_USER_PROTECT__
1781 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1782 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1783 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1784 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1785 mov r3, #0 // Load kernel asid
1786 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1789 mov r0, r8 // Get current cpu in arg 0
1790 mov r1, SIGPdec // Decrementer signal in arg1
1793 bl EXT(cpu_signal) // Call cpu_signal
1795 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1798 add r0, sp, SS_SIZE // Get vfp state pointer
1799 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1800 add r0, VSS_ALIGN // Get the actual vfp save area
1801 bl EXT(vfp_load) // Load the desired VFP state from the stack
1804 clrex // clear exclusive memory tag
1805 #if __ARM_ENABLE_WFE_
1808 #if __ARM_USER_PROTECT__
1809 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1810 mcr p15, 0, r11, c13, c0, 1 // Set CONTEXTIDR
1814 ldmia sp, {r0-r12} // Restore saved registers
1815 movs pc, lr // Return from fiq
1818 cpsid i, #PSR_IRQ_MODE
1820 mov sp, r6 // Restore the stack pointer
1821 msr spsr_cxsf, r4 // Restore the spsr
1822 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1823 add r2, r2, #1 // Increment count
1824 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1825 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1826 str r5, [r4, CPU_INT_STATE]
1827 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1828 add r3, r3, #1 // Increment count
1829 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1830 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1831 add r3, r3, #1 // Increment count
1832 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1833 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1836 mov r8, r0 // Stash our "from_user" boolean value
1837 bl EXT(timer_state_event_user_to_kernel)
1838 mov r0, r8 // Restore our "from_user" value
1839 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1842 #if CONFIG_TELEMETRY
1843 LOAD_ADDR(r4, telemetry_needs_record) // Check if a telemetry record was requested...
1847 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1848 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1853 LOAD_ADDR(r4, kdebug_enable)
1856 ldrne r1, [r9, ACT_CPUDATAP] // Get current cpu
1857 ldrne r0, [r1, CPU_INT_STATE]
1858 COND_EXTERN_BLNE(interrupt_trace)
1860 bl EXT(interrupt_stats) // Record interrupt statistics
1862 bl EXT(rtclock_intr) // Call second level exception handler
1865 COND_EXTERN_BLNE(interrupt_trace_exit)
1868 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1873 * void thread_syscall_return(kern_return_t r0)
1878 .globl EXT(thread_syscall_return)
1880 LEXT(thread_syscall_return)
1881 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1882 add r1, r9, ACT_PCBDATA // Get User PCB
1883 str r0, [r1, SS_R0] // set return value
1885 LOAD_ADDR(r4, kdebug_enable)
1888 beq load_and_go_user
1889 ldr r12, [r1, SS_R12] // Load syscall number
1890 rsbs r1, r12, #0 // make the syscall positive (if negative)
1891 COND_EXTERN_BLGT(mach_syscall_trace_exit)
1896 * void thread_exception_return(void)
1897 * void thread_bootstrap_return(void)
1901 .globl EXT(thread_exception_return)
1902 .globl EXT(thread_bootstrap_return)
1904 LEXT(thread_bootstrap_return)
1906 bl EXT(dtrace_thread_bootstrap)
1910 LEXT(thread_exception_return)
1914 * Restore user mode states and go back to user mode
1916 cpsid i // Disable irq
1917 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1920 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1922 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1923 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1924 cmp r5, #0 // Test if ASTs pending
1925 beq return_to_user_now // Branch if no ASTs
1927 #if __BIGGEST_ALIGNMENT__ > 4
1928 and r2, sp, #0x0F // sp mod 16-bytes
1929 cmp r2, #4 // need space for the sp on the stack
1930 addlt r2, r2, #0x10 // make room if needed, but keep stack aligned
1931 mov r3, sp // get current sp
1932 sub sp, sp, r2 // align stack
1933 str r3, [sp] // store previous sp on stack
1936 bl EXT(ast_taken_user) // Handle all ASTs (may continue via thread_exception_return)
1938 #if __BIGGEST_ALIGNMENT__ > 4
1939 ldr sp, [sp] // Restore the stack pointer
1942 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1943 b load_and_go_user // Loop back
1949 * Assert that the preemption level is zero prior to the return to user space
1951 ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count
1953 beq 0f // Continue if zero, or...
1954 adr r0, L_lagu_panic_str // Load the panic string...
1955 blx EXT(panic) // Finally, panic
1957 ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count
1959 beq 0f // Continue if zero, or...
1960 adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string...
1961 mov r1, r9 // Thread argument for panic string
1962 blx EXT(panic) // Finally, panic
1966 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1967 bl EXT(timer_state_event_kernel_to_user)
1968 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1969 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu data
1970 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1971 #if __ARM_DEBUG__ >= 6
1972 ldr r0, [r9, ACT_DEBUGDATA]
1973 ldr r6, [r8, CPU_USER_DEBUG]
1974 cmp r0, r6 // test if debug registers need to be changed
1976 bl EXT(arm_debug_set) // argument is already in r0
1977 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1981 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1982 bl EXT(vfp_load) // Load the desired VFP state from ACT_UVFP
1984 add r0, r9, ACT_PCBDATA // Get User PCB
1985 ldr r4, [r0, SS_CPSR] // Get saved cpsr
1986 and r3, r4, #PSR_MODE_MASK // Extract current mode
1987 cmp r3, #PSR_USER_MODE // Check user mode
1989 bne EXT(ExceptionVectorPanic)
1991 msr spsr_cxsf, r4 // Restore spsr(user mode cpsr)
1992 mov sp, r0 // Get User PCB
1994 clrex // clear exclusive memory tag
1995 #if __ARM_ENABLE_WFE_
1998 #if __ARM_USER_PROTECT__
1999 ldr r3, [r9, ACT_UPTW_TTB] // Load thread ttb
2000 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
2001 ldr r2, [r9, ACT_ASID] // Load thread asid
2002 mcr p15, 0, r2, c13, c0, 1
2005 ldr lr, [sp, SS_PC] // Restore user mode pc
2006 ldmia sp, {r0-r12, sp, lr}^ // Restore the other user mode registers
2007 nop // Hardware problem
2008 movs pc, lr // Return to user
2012 .asciz "load_and_go_user: preemption_level %d"
2016 L_lagu_rwlock_cnt_panic_str:
2017 .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)"
2022 .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000"
2027 .globl EXT(ExceptionVectorPanic)
2029 LEXT(ExceptionVectorPanic)
2030 cpsid i, #PSR_SVC_MODE
2032 adr r0, L_evimpanic_str
2036 #include "globals_asm.h"
2038 LOAD_ADDR_GEN_DEF(mach_trap_table)
2039 LOAD_ADDR_GEN_DEF(kern_invalid)
2041 /* vim: set ts=4: */