/* * Copyright (c) 2007-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. The rights granted to you under the License * may not be used to create, or enable the creation or redistribution of, * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ #include #include #include #include #include #include #include #include #include "assym.s" #include "dwarf_unwind.h" #define TRACE_SYSCALL 0 /* * Copied to low physical memory in arm_init, * so the kernel must be linked virtually at * 0xc0001000 or higher to leave space for it. */ .syntax unified .text .align 12 .globl EXT(ExceptionLowVectorsBase) LEXT(ExceptionLowVectorsBase) adr pc, Lreset_low_vector b . // Undef b . // SWI b . // Prefetch Abort b . // Data Abort b . // Address Exception b . // IRQ b . // FIQ/DEC LEXT(ResetPrivateData) .space (480),0 // (filled with 0s) // ExceptionLowVectorsBase + 0x200 Lreset_low_vector: adr r4, EXT(ResetHandlerData) ldr r0, [r4, ASSIST_RESET_HANDLER] movs r0, r0 blxne r0 adr r4, EXT(ResetHandlerData) ldr r1, [r4, CPU_DATA_ENTRIES] ldr r1, [r1, CPU_DATA_PADDR] ldr r5, [r1, CPU_RESET_ASSIST] movs r5, r5 blxne r5 adr r4, EXT(ResetHandlerData) ldr r0, [r4, BOOT_ARGS] ldr r1, [r4, CPU_DATA_ENTRIES] #if defined(ARMA7) // physical cpu number is stored in MPIDR Affinity level 0 mrc p15, 0, r6, c0, c0, 5 // Read MPIDR and r6, r6, #0xFF // Extract Affinity level 0 #else #error missing Who Am I implementation #endif // physical cpu number matches cpu number //#if cdeSize != 16 //#error cpu_data_entry is not 16bytes in size //#endif lsl r6, r6, #4 // Get CpuDataEntry offset add r1, r1, r6 // Get cpu_data_entry pointer ldr r1, [r1, CPU_DATA_PADDR] ldr r5, [r1, CPU_RESET_HANDLER] movs r5, r5 blxne r5 // Branch to cpu reset handler b . // Unexpected reset .globl EXT(ResetHandlerData) LEXT(ResetHandlerData) .space (rhdSize_NUM),0 // (filled with 0s) .globl EXT(ExceptionLowVectorsEnd) LEXT(ExceptionLowVectorsEnd) .text .align 12 .globl EXT(ExceptionVectorsBase) LEXT(ExceptionVectorsBase) adr pc, Lexc_reset_vector adr pc, Lexc_undefined_inst_vector adr pc, Lexc_swi_vector adr pc, Lexc_prefetch_abort_vector adr pc, Lexc_data_abort_vector adr pc, Lexc_address_exception_vector adr pc, Lexc_irq_vector #if __ARM_TIME__ adr pc, Lexc_decirq_vector #else /* ! __ARM_TIME__ */ mov pc, r9 #endif /* __ARM_TIME__ */ Lexc_reset_vector: b . .long 0x0 .long 0x0 .long 0x0 Lexc_undefined_inst_vector: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table ldr pc, [sp, #4] // Branch to exception handler Lexc_swi_vector: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table ldr pc, [sp, #8] // Branch to exception handler Lexc_prefetch_abort_vector: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table ldr pc, [sp, #0xC] // Branch to exception handler Lexc_data_abort_vector: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table ldr pc, [sp, #0x10] // Branch to exception handler Lexc_address_exception_vector: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table ldr pc, [sp, #0x14] // Branch to exception handler Lexc_irq_vector: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table ldr pc, [sp, #0x18] // Branch to exception handler #if __ARM_TIME__ Lexc_decirq_vector: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table ldr pc, [sp, #0x1C] // Branch to exception handler #else /* ! __ARM_TIME__ */ .long 0x0 .long 0x0 .long 0x0 .long 0x0 #endif /* __ARM_TIME__ */ .fill 984, 4, 0 // Push to the 4KB page boundary .globl EXT(ExceptionVectorsEnd) LEXT(ExceptionVectorsEnd) /* * Targets for the exception vectors; we patch these during boot (to allow * for position independent code without complicating the vectors; see start.s). */ .globl EXT(ExceptionVectorsTable) LEXT(ExceptionVectorsTable) Lreset_vector: .long 0x0 Lundefined_inst_vector: .long 0x0 Lswi_vector: .long 0x0 Lprefetch_abort_vector: .long 0x0 Ldata_abort_vector: .long 0x0 Laddress_exception_vector: .long 0x0 Lirq_vector: .long 0x0 Ldecirq_vector: .long 0x0 /* * First Level Exception Handlers */ .text .align 2 .globl EXT(fleh_reset) LEXT(fleh_reset) b . // Never return /* * First Level Exception Handler for Undefined Instruction. */ .text .align 2 .globl EXT(fleh_undef) /* * Ensures the stack is safely aligned, usually in preparation for an external branch * arg0: temp register for storing the stack offset * arg1: temp register for storing the previous stack pointer */ .macro ALIGN_STACK /* * For armv7k ABI, the stack needs to be 16-byte aligned */ #if __BIGGEST_ALIGNMENT__ > 4 and $0, sp, #0x0F // sp mod 16-bytes cmp $0, #4 // need space for the sp on the stack addlt $0, $0, #0x10 // make room if needed, but keep stack aligned mov $1, sp // get current sp sub sp, sp, $0 // align stack str $1, [sp] // store previous sp on stack #endif .endmacro /* * Restores the stack pointer to its previous value following an ALIGN_STACK call */ .macro UNALIGN_STACK #if __BIGGEST_ALIGNMENT__ > 4 ldr sp, [sp] #endif .endmacro /* * Checks that cpu is currently in the expected mode, panics if not. * arg0: the expected mode, should be one of the PSR_*_MODE defines */ .macro VERIFY_EXCEPTION_MODE mrs sp, cpsr // Read cpsr and sp, sp, #PSR_MODE_MASK // Extract current mode cmp sp, $0 // Check specified mode movne r0, sp bne EXT(ExceptionVectorPanic) .endmacro /* * Checks previous processor mode. If usermode, will execute the code * following the macro to handle the userspace exception. Otherwise, * will branch to a ELSE_IF_KERNELMODE_EXCEPTION call with the same * argument. * arg0: arbitrary string indicating the exception class, e.g. 'dataabt' */ .macro IF_USERMODE_EXCEPTION mrs sp, spsr and sp, sp, #PSR_MODE_MASK // Is it from user? cmp sp, #PSR_USER_MODE beq $0_from_user cmp sp, #PSR_IRQ_MODE beq $0_from_irq cmp sp, #PSR_FIQ_MODE beq $0_from_fiq bne $0_from_svc $0_from_user: .endmacro /* * Handles an exception taken from kernelmode (IRQ/FIQ/SVC/etc). * Places the processor into the correct mode and executes the * code following the macro to handle the kernel exception. * Intended to be paired with a prior call to IF_USERMODE_EXCEPTION. * arg0: arbitrary string indicating the exception class, e.g. 'dataabt' */ .macro ELSE_IF_KERNELMODE_EXCEPTION $0_from_irq: cpsid i, #PSR_IRQ_MODE b $0_from_kernel $0_from_fiq: cpsid i, #PSR_FIQ_MODE b $0_from_kernel $0_from_svc: cpsid i, #PSR_SVC_MODE $0_from_kernel: .endmacro LEXT(fleh_undef) VERIFY_EXCEPTION_MODE PSR_UND_MODE mrs sp, spsr // For check the previous mode tst sp, #PSR_TF // Is it Thumb? subeq lr, lr, #4 subne lr, lr, #2 IF_USERMODE_EXCEPTION undef mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW add sp, sp, ACT_PCBDATA // Get current thread PCB pointer stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB mov r7, #0 // Zero the frame pointer nop mov r0, sp // Store arm_saved_state pointer // for argument str lr, [sp, SS_PC] // Save user mode pc register mrs r4, spsr str r4, [sp, SS_CPSR] // Save user mode cpsr cpsid i, #PSR_SVC_MODE mrs r3, cpsr // Read cpsr msr spsr_cxsf, r3 // Set spsr(svc mode cpsr) mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack #if __ARM_USER_PROTECT__ ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 mov r3, #0 // Load kernel asid mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR isb #endif mvn r0, #0 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME bl EXT(timer_state_event_user_to_kernel) mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW #endif #if __ARM_VFP__ add r0, r9, ACT_UVFP // Get the address of the user VFP save area bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value... fmxr fpscr, r3 // And shove it into FPSCR add r1, r9, ACT_UVFP // Reload the pointer to the save state add r0, r9, ACT_PCBDATA // Reload the VFP save state argument #else mov r1, #0 // Clear the VFP save state argument add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer #endif bl EXT(sleh_undef) // Call second level handler // sleh will enable interrupt b load_and_go_user ELSE_IF_KERNELMODE_EXCEPTION undef /* * We have a kernel stack already, and I will use it to save contexts * IRQ is disabled */ #if CONFIG_DTRACE // We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception // took place. We'll store that later after we switch to undef mode and pull out the LR from there. // This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require // changes in fbt_invop also. stmfd sp!, { r7, lr } #endif sub sp, sp, EXC_CTX_SIZE // Reserve for arm_saved_state stmia sp, {r0-r12} // Save on supervisor mode stack str lr, [sp, SS_LR] #if CONFIG_DTRACE add r7, sp, EXC_CTX_SIZE // Save frame pointer #endif mrs r4, lr_und str r4, [sp, SS_PC] // Save complete mrs r4, spsr_und str r4, [sp, SS_CPSR] mov ip, sp /* sp - stack pointer ip - stack pointer r7 - frame pointer state */ #if CONFIG_DTRACE ldr r0, [ip, SS_PC] // Get the exception pc to store later #endif add ip, ip, EXC_CTX_SIZE // Send stack pointer to debugger #if CONFIG_DTRACE str r0, [ip, #4] add ip, ip, #8 #endif str ip, [sp, SS_SP] // for accessing local variable #if CONFIG_DTRACE sub ip, ip, #8 #endif sub ip, ip, EXC_CTX_SIZE #if __ARM_VFP__ mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW add r0, sp, SS_SIZE // Get vfp state pointer bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment add r0, VSS_ALIGN // Get the actual vfp save area mov r5, r0 // Stash the save area in another register bl EXT(vfp_save) // Save the current VFP state to the stack mov r1, r5 // Load the VFP save area argument mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... fmxr fpscr, r4 // And shove it into FPSCR #else mov r1, #0 // Clear the facility context argument #endif #if __ARM_USER_PROTECT__ mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb cmp r3, r10 beq 1f mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 1: mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR mov r3, #0 // Load kernel asid mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR isb #endif mov r0, sp // Argument ALIGN_STACK r2, r3 bl EXT(sleh_undef) // Call second level handler UNALIGN_STACK #if __ARM_USER_PROTECT__ mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb cmp r10, r0 beq 1f ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb cmp r10, r0 beq 1f mcr p15, 0, r10, c2, c0, 0 // Set TTBR0 ldr r11, [r9, ACT_ASID] // Load thread asid 1: mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR isb #endif b load_and_go_sys /* * First Level Exception Handler for Software Interrupt * * We assert that only user level can use the "SWI" instruction for a system * call on development kernels, and assume it's true on release. * * System call number is stored in r12. * System call arguments are stored in r0 to r6 and r8 (we skip r7) * */ .text .align 5 .globl EXT(fleh_swi) LEXT(fleh_swi) cpsid i, #PSR_ABT_MODE mov sp, ip // Save ip cpsid i, #PSR_SVC_MODE mrs ip, spsr // Check the previous mode tst ip, #0x0f cpsid i, #PSR_ABT_MODE mov ip, sp // Restore ip cpsid i, #PSR_SVC_MODE beq swi_from_user /* Only user mode can use SWI. Panic if the kernel tries. */ swi_from_kernel: sub sp, sp, EXC_CTX_SIZE stmia sp, {r0-r12} add r0, sp, EXC_CTX_SIZE str r0, [sp, SS_SP] // Save supervisor mode sp str lr, [sp, SS_LR] // Save supervisor mode lr ALIGN_STACK r0, r1 adr r0, L_kernel_swi_panic_str // Load panic messages and panic() blx EXT(panic) b . swi_from_user: mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW add sp, sp, ACT_PCBDATA // Get User PCB /* Check for special mach_absolute_time trap value. * This is intended to be a super-lightweight call to ml_get_timebase(), which * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */ cmp r12, #MACH_ARM_TRAP_ABSTIME beq fleh_swi_trap_tb stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB mov r7, #0 // Zero the frame pointer nop mov r8, sp // Store arm_saved_state pointer add sp, sp, SS_PC srsia sp, #PSR_SVC_MODE mrs r3, cpsr // Read cpsr msr spsr_cxsf, r3 // Set spsr(svc mode cpsr) sub r9, sp, ACT_PCBDATA_PC ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack mov r11, r12 // save the syscall vector in a nontrashed register #if __ARM_VFP__ add r0, r9, ACT_UVFP // Get the address of the user VFP save area bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... fmxr fpscr, r4 // And shove it into FPSCR #endif #if __ARM_USER_PROTECT__ ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 mov r3, #0 // Load kernel asid mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR isb #endif mvn r0, #0 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling SWI from userspace #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME bl EXT(timer_state_event_user_to_kernel) mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer #endif ldr r10, [r9, ACT_TASK] // Load the current task /* enable interrupts */ cpsie i // Enable IRQ cmp r11, #MACH_ARM_TRAP_CONTTIME // Special value for mach_continuous_time beq fleh_swi_trap_mct cmp r11, #0x80000000 beq fleh_swi_trap fleh_swi_trap_ret: #if TRACE_SYSCALL /* trace the syscall */ mov r0, r8 bl EXT(syscall_trace) #endif bl EXT(mach_kauth_cred_uthread_update) mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW /* unix syscall? */ rsbs r5, r11, #0 // make the syscall positive (if negative) ble fleh_swi_unix // positive syscalls are unix (note reverse logic here) fleh_swi_mach: /* note that mach_syscall_trace can modify r9, so increment the thread * syscall count before the call : */ ldr r2, [r9, TH_MACH_SYSCALLS] add r2, r2, #1 str r2, [r9, TH_MACH_SYSCALLS] LOAD_ADDR(r1, mach_trap_table) // load mach_trap_table #if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12 add r11, r5, r5, lsl #1 // syscall * 3 add r6, r1, r11, lsl #2 // trap_table + syscall * 12 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16 add r6, r1, r5, lsl #4 // trap_table + syscall * 16 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20 add r11, r5, r5, lsl #2 // syscall * 5 add r6, r1, r11, lsl #2 // trap_table + syscall * 20 #else #error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)! #endif #ifndef NO_KDEBUG LOAD_ADDR(r4, kdebug_enable) ldr r4, [r4] movs r4, r4 movne r0, r8 // ready the reg state pointer as an arg to the call movne r1, r5 // syscall number as 2nd arg COND_EXTERN_BLNE(mach_syscall_trace) #endif adr lr, fleh_swi_exit // any calls from here on out will return to our exit path cmp r5, MACH_TRAP_TABLE_COUNT // check syscall number range bge fleh_swi_mach_error /* * For arm32 ABI where 64-bit types are aligned to even registers and * 64-bits on stack, we need to unpack registers differently. So * we use the mungers for marshalling in arguments from user space. * Currently this is just ARMv7k. */ #if __BIGGEST_ALIGNMENT__ > 4 sub sp, #0x40 // allocate buffer and keep stack 128-bit aligned // it should be big enough for all syscall arguments ldr r11, [r6, #8] // get mach_trap_table[call_number].mach_trap_arg_munge32 teq r11, #0 // check if we have a munger moveq r0, #0 movne r0, r8 // ready the reg state pointer as an arg to the call movne r1, sp // stack will hold arguments buffer blxne r11 // call munger to get arguments from userspace adr lr, fleh_swi_exit // any calls from here on out will return to our exit path teq r0, #0 bne fleh_swi_mach_error // exit if the munger returned non-zero status #endif ldr r1, [r6, #4] // load the syscall vector LOAD_ADDR(r2, kern_invalid) // test to make sure the trap is not kern_invalid teq r1, r2 beq fleh_swi_mach_error #if __BIGGEST_ALIGNMENT__ > 4 mov r0, sp // argument buffer on stack bx r1 // call the syscall handler #else mov r0, r8 // ready the reg state pointer as an arg to the call bx r1 // call the syscall handler #endif fleh_swi_exit64: str r1, [r8, #4] // top of 64-bit return fleh_swi_exit: str r0, [r8] // save the return value #ifndef NO_KDEBUG movs r4, r4 movne r1, r5 COND_EXTERN_BLNE(mach_syscall_trace_exit) #endif #if TRACE_SYSCALL bl EXT(syscall_trace_exit) #endif mov r0, #1 bl EXT(throttle_lowpri_io) // throttle_lowpri_io(1); bl EXT(thread_exception_return) b . fleh_swi_mach_error: mov r0, #EXC_SYSCALL sub r1, sp, #4 mov r2, #1 bl EXT(exception_triage) b . .align 5 fleh_swi_unix: ldr r1, [r9, TH_UNIX_SYSCALLS] mov r0, r8 // reg state structure is arg add r1, r1, #1 str r1, [r9, TH_UNIX_SYSCALLS] mov r1, r9 // current thread in arg1 ldr r2, [r9, TH_UTHREAD] // current uthread in arg2 ldr r3, [r10, TASK_BSD_INFO] // current proc in arg3 bl EXT(unix_syscall) b . fleh_swi_trap: ldmia r8, {r0-r3} cmp r3, #3 addls pc, pc, r3, LSL#2 b fleh_swi_trap_ret b icache_invalidate_trap b dcache_flush_trap b thread_set_cthread_trap b thread_get_cthread_trap icache_invalidate_trap: add r3, r0, r1 cmp r3, VM_MAX_ADDRESS subhi r3, r3, #1<= 6 ldr r0, [r9, ACT_DEBUGDATA] ldr r6, [r8, CPU_USER_DEBUG] cmp r0, r6 // test if debug registers need to be changed beq 1f bl EXT(arm_debug_set) // argument is already in r0 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW 1: #endif #if __ARM_VFP__ add r0, r9, ACT_UVFP // Get the address of the user VFP save area bl EXT(vfp_load) // Load the desired VFP state from ACT_UVFP #endif add r0, r9, ACT_PCBDATA // Get User PCB ldr r4, [r0, SS_CPSR] // Get saved cpsr and r3, r4, #PSR_MODE_MASK // Extract current mode cmp r3, #PSR_USER_MODE // Check user mode movne r0, r3 bne EXT(ExceptionVectorPanic) msr spsr_cxsf, r4 // Restore spsr(user mode cpsr) mov sp, r0 // Get User PCB clrex // clear exclusive memory tag #if __ARM_ENABLE_WFE_ sev #endif #if __ARM_USER_PROTECT__ ldr r3, [r9, ACT_UPTW_TTB] // Load thread ttb mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 ldr r2, [r9, ACT_ASID] // Load thread asid mcr p15, 0, r2, c13, c0, 1 isb #endif ldr lr, [sp, SS_PC] // Restore user mode pc ldmia sp, {r0-r12, sp, lr}^ // Restore the other user mode registers nop // Hardware problem movs pc, lr // Return to user /* * r1: tmp alloc count * r9: current_thread() */ L_lagu_temp_alloc_cnt_panic: mov r0, r9 // Thread argument blx EXT(kheap_temp_leak_panic) // Finally, panic #if MACH_ASSERT /* * r1: current preemption count * r9: current_thread() */ L_lagu_preempt_panic: adr r0, L_lagu_preempt_panic_str // Load the panic string... blx EXT(panic) // Finally, panic /* * r2: rwlock count * r9: current_thread() */ L_lagu_rwlock_cnt_panic: adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string... mov r1, r9 // Thread argument for panic string blx EXT(panic) // Finally, panic .align 2 L_lagu_preempt_panic_str: .asciz "load_and_go_user: preemption_level %d" .align 2 .align 2 L_lagu_rwlock_cnt_panic_str: .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)" .align 2 #endif /* MACH_ASSERT */ .align 2 L_evimpanic_str: .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000" .align 2 .text .align 2 .globl EXT(ExceptionVectorPanic) LEXT(ExceptionVectorPanic) cpsid i, #PSR_SVC_MODE ALIGN_STACK r1, r2 mov r1, r0 adr r0, L_evimpanic_str blx EXT(panic) b . #include "globals_asm.h" LOAD_ADDR_GEN_DEF(mach_trap_table) LOAD_ADDR_GEN_DEF(kern_invalid) /* vim: set ts=4: */