]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/locore.s
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / arm / locore.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <machine/asm.h>
58#include <arm/proc_reg.h>
59#include <pexpert/arm/board_config.h>
f427ee49 60#include <mach/arm/traps.h>
5ba3f43e
A
61#include <mach/exception_types.h>
62#include <mach_kdp.h>
63#include <mach_assert.h>
64#include <config_dtrace.h>
65#include "assym.s"
f427ee49 66#include "dwarf_unwind.h"
5ba3f43e
A
67
68#define TRACE_SYSCALL 0
69
70/*
71 * Copied to low physical memory in arm_init,
72 * so the kernel must be linked virtually at
73 * 0xc0001000 or higher to leave space for it.
74 */
75 .syntax unified
76 .text
77 .align 12
78 .globl EXT(ExceptionLowVectorsBase)
79
80LEXT(ExceptionLowVectorsBase)
81 adr pc, Lreset_low_vector
82 b . // Undef
83 b . // SWI
84 b . // Prefetch Abort
85 b . // Data Abort
86 b . // Address Exception
87 b . // IRQ
88 b . // FIQ/DEC
89LEXT(ResetPrivateData)
90 .space (480),0 // (filled with 0s)
91 // ExceptionLowVectorsBase + 0x200
92Lreset_low_vector:
93 adr r4, EXT(ResetHandlerData)
94 ldr r0, [r4, ASSIST_RESET_HANDLER]
95 movs r0, r0
96 blxne r0
97 adr r4, EXT(ResetHandlerData)
98 ldr r1, [r4, CPU_DATA_ENTRIES]
99 ldr r1, [r1, CPU_DATA_PADDR]
100 ldr r5, [r1, CPU_RESET_ASSIST]
101 movs r5, r5
102 blxne r5
103 adr r4, EXT(ResetHandlerData)
104 ldr r0, [r4, BOOT_ARGS]
105 ldr r1, [r4, CPU_DATA_ENTRIES]
5ba3f43e
A
106#if defined(ARMA7)
107 // physical cpu number is stored in MPIDR Affinity level 0
108 mrc p15, 0, r6, c0, c0, 5 // Read MPIDR
109 and r6, r6, #0xFF // Extract Affinity level 0
110#else
111#error missing Who Am I implementation
112#endif
5ba3f43e
A
113 // physical cpu number matches cpu number
114//#if cdeSize != 16
115//#error cpu_data_entry is not 16bytes in size
116//#endif
117 lsl r6, r6, #4 // Get CpuDataEntry offset
118 add r1, r1, r6 // Get cpu_data_entry pointer
119 ldr r1, [r1, CPU_DATA_PADDR]
120 ldr r5, [r1, CPU_RESET_HANDLER]
121 movs r5, r5
122 blxne r5 // Branch to cpu reset handler
123 b . // Unexpected reset
124 .globl EXT(ResetHandlerData)
125LEXT(ResetHandlerData)
126 .space (rhdSize_NUM),0 // (filled with 0s)
127
128
129 .globl EXT(ExceptionLowVectorsEnd)
130LEXT(ExceptionLowVectorsEnd)
131
132 .text
133 .align 12
134 .globl EXT(ExceptionVectorsBase)
135
136LEXT(ExceptionVectorsBase)
137
138 adr pc, Lexc_reset_vector
139 adr pc, Lexc_undefined_inst_vector
140 adr pc, Lexc_swi_vector
141 adr pc, Lexc_prefetch_abort_vector
142 adr pc, Lexc_data_abort_vector
143 adr pc, Lexc_address_exception_vector
144 adr pc, Lexc_irq_vector
145#if __ARM_TIME__
146 adr pc, Lexc_decirq_vector
147#else /* ! __ARM_TIME__ */
148 mov pc, r9
149#endif /* __ARM_TIME__ */
150
151Lexc_reset_vector:
152 b .
153 .long 0x0
154 .long 0x0
155 .long 0x0
156Lexc_undefined_inst_vector:
157 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
158 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
159 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
160 ldr pc, [sp, #4] // Branch to exception handler
161Lexc_swi_vector:
162 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
163 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
164 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
165 ldr pc, [sp, #8] // Branch to exception handler
166Lexc_prefetch_abort_vector:
167 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
168 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
169 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
170 ldr pc, [sp, #0xC] // Branch to exception handler
171Lexc_data_abort_vector:
172 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
173 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
174 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
175 ldr pc, [sp, #0x10] // Branch to exception handler
176Lexc_address_exception_vector:
177 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
178 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
179 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
180 ldr pc, [sp, #0x14] // Branch to exception handler
181Lexc_irq_vector:
182 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
183 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
184 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
185 ldr pc, [sp, #0x18] // Branch to exception handler
186#if __ARM_TIME__
187Lexc_decirq_vector:
188 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
189 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
190 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
191 ldr pc, [sp, #0x1C] // Branch to exception handler
192#else /* ! __ARM_TIME__ */
193 .long 0x0
194 .long 0x0
195 .long 0x0
196 .long 0x0
197#endif /* __ARM_TIME__ */
198
199 .fill 984, 4, 0 // Push to the 4KB page boundary
200
201 .globl EXT(ExceptionVectorsEnd)
202LEXT(ExceptionVectorsEnd)
203
204
205/*
206 * Targets for the exception vectors; we patch these during boot (to allow
207 * for position independent code without complicating the vectors; see start.s).
208 */
209 .globl EXT(ExceptionVectorsTable)
210LEXT(ExceptionVectorsTable)
211Lreset_vector:
212 .long 0x0
213Lundefined_inst_vector:
214 .long 0x0
215Lswi_vector:
216 .long 0x0
217Lprefetch_abort_vector:
218 .long 0x0
219Ldata_abort_vector:
220 .long 0x0
221Laddress_exception_vector:
222 .long 0x0
223Lirq_vector:
224 .long 0x0
225Ldecirq_vector:
226 .long 0x0
227
228
229/*
230 * First Level Exception Handlers
231 */
232 .text
233 .align 2
234 .globl EXT(fleh_reset)
235LEXT(fleh_reset)
236 b . // Never return
237
238/*
239 * First Level Exception Handler for Undefined Instruction.
240 */
241 .text
242 .align 2
243 .globl EXT(fleh_undef)
244
a39ff7e2
A
245/*
246 * Ensures the stack is safely aligned, usually in preparation for an external branch
247 * arg0: temp register for storing the stack offset
248 * arg1: temp register for storing the previous stack pointer
249 */
250.macro ALIGN_STACK
251/*
252 * For armv7k ABI, the stack needs to be 16-byte aligned
253 */
254#if __BIGGEST_ALIGNMENT__ > 4
255 and $0, sp, #0x0F // sp mod 16-bytes
256 cmp $0, #4 // need space for the sp on the stack
257 addlt $0, $0, #0x10 // make room if needed, but keep stack aligned
258 mov $1, sp // get current sp
259 sub sp, sp, $0 // align stack
260 str $1, [sp] // store previous sp on stack
261#endif
262.endmacro
263
264/*
265 * Restores the stack pointer to its previous value following an ALIGN_STACK call
266 */
267.macro UNALIGN_STACK
268#if __BIGGEST_ALIGNMENT__ > 4
269 ldr sp, [sp]
270#endif
271.endmacro
272
273/*
274 * Checks that cpu is currently in the expected mode, panics if not.
275 * arg0: the expected mode, should be one of the PSR_*_MODE defines
276 */
277.macro VERIFY_EXCEPTION_MODE
278 mrs sp, cpsr // Read cpsr
279 and sp, sp, #PSR_MODE_MASK // Extract current mode
280 cmp sp, $0 // Check specified mode
281 movne r0, sp
282 bne EXT(ExceptionVectorPanic)
283.endmacro
284
285/*
286 * Checks previous processor mode. If usermode, will execute the code
287 * following the macro to handle the userspace exception. Otherwise,
288 * will branch to a ELSE_IF_KERNELMODE_EXCEPTION call with the same
289 * argument.
290 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
291 */
292.macro IF_USERMODE_EXCEPTION
293 mrs sp, spsr
294 and sp, sp, #PSR_MODE_MASK // Is it from user?
295 cmp sp, #PSR_USER_MODE
296 beq $0_from_user
297 cmp sp, #PSR_IRQ_MODE
298 beq $0_from_irq
299 cmp sp, #PSR_FIQ_MODE
300 beq $0_from_fiq
301 bne $0_from_svc
302$0_from_user:
303.endmacro
304
305/*
306 * Handles an exception taken from kernelmode (IRQ/FIQ/SVC/etc).
307 * Places the processor into the correct mode and executes the
308 * code following the macro to handle the kernel exception.
309 * Intended to be paired with a prior call to IF_USERMODE_EXCEPTION.
310 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
311 */
312.macro ELSE_IF_KERNELMODE_EXCEPTION
313$0_from_irq:
314 cpsid i, #PSR_IRQ_MODE
315 b $0_from_kernel
316$0_from_fiq:
317 cpsid i, #PSR_FIQ_MODE
318 b $0_from_kernel
319$0_from_svc:
320 cpsid i, #PSR_SVC_MODE
321$0_from_kernel:
322.endmacro
323
324LEXT(fleh_undef)
325VERIFY_EXCEPTION_MODE PSR_UND_MODE
326 mrs sp, spsr // For check the previous mode
5ba3f43e
A
327 tst sp, #PSR_TF // Is it Thumb?
328 subeq lr, lr, #4
329 subne lr, lr, #2
a39ff7e2 330IF_USERMODE_EXCEPTION undef
5ba3f43e
A
331 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
332 add sp, sp, ACT_PCBDATA // Get current thread PCB pointer
333
334 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
335 mov r7, #0 // Zero the frame pointer
336 nop
a39ff7e2 337
5ba3f43e
A
338 mov r0, sp // Store arm_saved_state pointer
339 // for argument
340
341 str lr, [sp, SS_PC] // Save user mode pc register
342
343 mrs r4, spsr
344 str r4, [sp, SS_CPSR] // Save user mode cpsr
345
5ba3f43e
A
346 cpsid i, #PSR_SVC_MODE
347 mrs r3, cpsr // Read cpsr
348 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
349 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
350 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
351#if __ARM_USER_PROTECT__
352 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
353 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
354 mov r3, #0 // Load kernel asid
355 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
356 isb
357#endif
5ba3f43e
A
358
359 mvn r0, #0
360 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
361
362#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
363 bl EXT(timer_state_event_user_to_kernel)
364 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
365#endif
366
367#if __ARM_VFP__
368 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
369 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
370 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
371 fmxr fpscr, r3 // And shove it into FPSCR
372 add r1, r9, ACT_UVFP // Reload the pointer to the save state
373 add r0, r9, ACT_PCBDATA // Reload the VFP save state argument
374#else
375 mov r1, #0 // Clear the VFP save state argument
376 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
377#endif
378
379 bl EXT(sleh_undef) // Call second level handler
380 // sleh will enable interrupt
381 b load_and_go_user
382
a39ff7e2 383ELSE_IF_KERNELMODE_EXCEPTION undef
5ba3f43e
A
384 /*
385 * We have a kernel stack already, and I will use it to save contexts
386 * IRQ is disabled
387 */
5ba3f43e
A
388#if CONFIG_DTRACE
389 // We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception
390 // took place. We'll store that later after we switch to undef mode and pull out the LR from there.
391
392 // This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require
393 // changes in fbt_invop also.
394 stmfd sp!, { r7, lr }
395#endif
396
397 sub sp, sp, EXC_CTX_SIZE // Reserve for arm_saved_state
398
399 stmia sp, {r0-r12} // Save on supervisor mode stack
400 str lr, [sp, SS_LR]
401
402#if CONFIG_DTRACE
403 add r7, sp, EXC_CTX_SIZE // Save frame pointer
404#endif
5ba3f43e 405
a39ff7e2
A
406 mrs r4, lr_und
407 str r4, [sp, SS_PC] // Save complete
408 mrs r4, spsr_und
409 str r4, [sp, SS_CPSR]
5ba3f43e 410
a39ff7e2 411 mov ip, sp
5ba3f43e
A
412
413/*
414 sp - stack pointer
415 ip - stack pointer
416 r7 - frame pointer state
417 */
418
419
420#if CONFIG_DTRACE
421 ldr r0, [ip, SS_PC] // Get the exception pc to store later
422#endif
423
424 add ip, ip, EXC_CTX_SIZE // Send stack pointer to debugger
425#if CONFIG_DTRACE
426 str r0, [ip, #4]
427 add ip, ip, #8
428#endif
429 str ip, [sp, SS_SP] // for accessing local variable
430#if CONFIG_DTRACE
431 sub ip, ip, #8
432#endif
433 sub ip, ip, EXC_CTX_SIZE
434
435#if __ARM_VFP__
436 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
437 add r0, sp, SS_SIZE // Get vfp state pointer
438 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
439 add r0, VSS_ALIGN // Get the actual vfp save area
440 mov r5, r0 // Stash the save area in another register
441 bl EXT(vfp_save) // Save the current VFP state to the stack
442 mov r1, r5 // Load the VFP save area argument
443 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
444 fmxr fpscr, r4 // And shove it into FPSCR
445#else
446 mov r1, #0 // Clear the facility context argument
447#endif
448#if __ARM_USER_PROTECT__
449 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
450 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
451 cmp r3, r10
452 beq 1f
453 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
4541:
455 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
456 mov r3, #0 // Load kernel asid
457 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
458 isb
459#endif
460 mov r0, sp // Argument
461
a39ff7e2 462 ALIGN_STACK r2, r3
5ba3f43e 463 bl EXT(sleh_undef) // Call second level handler
a39ff7e2 464 UNALIGN_STACK
5ba3f43e
A
465
466#if __ARM_USER_PROTECT__
467 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
468 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
469 cmp r10, r0
470 beq 1f
471 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
472 cmp r10, r0
473 beq 1f
474 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
475 ldr r11, [r9, ACT_ASID] // Load thread asid
4761:
477 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
478 isb
479#endif
480 b load_and_go_sys
481
482
483/*
484 * First Level Exception Handler for Software Interrupt
485 *
486 * We assert that only user level can use the "SWI" instruction for a system
487 * call on development kernels, and assume it's true on release.
488 *
489 * System call number is stored in r12.
490 * System call arguments are stored in r0 to r6 and r8 (we skip r7)
491 *
492 */
493 .text
494 .align 5
495 .globl EXT(fleh_swi)
496
497LEXT(fleh_swi)
498 cpsid i, #PSR_ABT_MODE
499 mov sp, ip // Save ip
500 cpsid i, #PSR_SVC_MODE
501 mrs ip, spsr // Check the previous mode
502 tst ip, #0x0f
503 cpsid i, #PSR_ABT_MODE
504 mov ip, sp // Restore ip
505 cpsid i, #PSR_SVC_MODE
506 beq swi_from_user
507
508/* Only user mode can use SWI. Panic if the kernel tries. */
509swi_from_kernel:
510 sub sp, sp, EXC_CTX_SIZE
511 stmia sp, {r0-r12}
512 add r0, sp, EXC_CTX_SIZE
513
514 str r0, [sp, SS_SP] // Save supervisor mode sp
515 str lr, [sp, SS_LR] // Save supervisor mode lr
516
a39ff7e2 517 ALIGN_STACK r0, r1
5ba3f43e
A
518 adr r0, L_kernel_swi_panic_str // Load panic messages and panic()
519 blx EXT(panic)
520 b .
521
522swi_from_user:
523 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
524 add sp, sp, ACT_PCBDATA // Get User PCB
525
526
527 /* Check for special mach_absolute_time trap value.
528 * This is intended to be a super-lightweight call to ml_get_timebase(), which
529 * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */
f427ee49 530 cmp r12, #MACH_ARM_TRAP_ABSTIME
5ba3f43e
A
531 beq fleh_swi_trap_tb
532 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
533 mov r7, #0 // Zero the frame pointer
534 nop
535 mov r8, sp // Store arm_saved_state pointer
536 add sp, sp, SS_PC
537 srsia sp, #PSR_SVC_MODE
538 mrs r3, cpsr // Read cpsr
539 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
540 sub r9, sp, ACT_PCBDATA_PC
541
542 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
543 mov r11, r12 // save the syscall vector in a nontrashed register
544
545#if __ARM_VFP__
546 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
547 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
548 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
549 fmxr fpscr, r4 // And shove it into FPSCR
550#endif
551#if __ARM_USER_PROTECT__
552 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
553 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
554 mov r3, #0 // Load kernel asid
555 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
556 isb
557#endif
558
559 mvn r0, #0
560 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling SWI from userspace
561
562#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
563 bl EXT(timer_state_event_user_to_kernel)
564 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
f427ee49 565 add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer
5ba3f43e 566#endif
f427ee49 567 ldr r10, [r9, ACT_TASK] // Load the current task
5ba3f43e
A
568
569 /* enable interrupts */
f427ee49 570 cpsie i // Enable IRQ
5ba3f43e 571
f427ee49 572 cmp r11, #MACH_ARM_TRAP_CONTTIME // Special value for mach_continuous_time
5ba3f43e
A
573 beq fleh_swi_trap_mct
574
575 cmp r11, #0x80000000
576 beq fleh_swi_trap
577fleh_swi_trap_ret:
578
579#if TRACE_SYSCALL
580 /* trace the syscall */
581 mov r0, r8
582 bl EXT(syscall_trace)
583#endif
584
585 bl EXT(mach_kauth_cred_uthread_update)
586 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
587 /* unix syscall? */
588 rsbs r5, r11, #0 // make the syscall positive (if negative)
589 ble fleh_swi_unix // positive syscalls are unix (note reverse logic here)
590
591fleh_swi_mach:
592 /* note that mach_syscall_trace can modify r9, so increment the thread
593 * syscall count before the call : */
594 ldr r2, [r9, TH_MACH_SYSCALLS]
595 add r2, r2, #1
596 str r2, [r9, TH_MACH_SYSCALLS]
597
598 LOAD_ADDR(r1, mach_trap_table) // load mach_trap_table
599#if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12
600 add r11, r5, r5, lsl #1 // syscall * 3
601 add r6, r1, r11, lsl #2 // trap_table + syscall * 12
602#elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16
603 add r6, r1, r5, lsl #4 // trap_table + syscall * 16
604#elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20
605 add r11, r5, r5, lsl #2 // syscall * 5
606 add r6, r1, r11, lsl #2 // trap_table + syscall * 20
607#else
608#error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)!
609#endif
610
611#ifndef NO_KDEBUG
612 LOAD_ADDR(r4, kdebug_enable)
613 ldr r4, [r4]
614 movs r4, r4
615 movne r0, r8 // ready the reg state pointer as an arg to the call
616 movne r1, r5 // syscall number as 2nd arg
617 COND_EXTERN_BLNE(mach_syscall_trace)
618#endif
619 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
620 cmp r5, MACH_TRAP_TABLE_COUNT // check syscall number range
621 bge fleh_swi_mach_error
622
623/*
624 * For arm32 ABI where 64-bit types are aligned to even registers and
625 * 64-bits on stack, we need to unpack registers differently. So
626 * we use the mungers for marshalling in arguments from user space.
627 * Currently this is just ARMv7k.
628 */
629#if __BIGGEST_ALIGNMENT__ > 4
630 sub sp, #0x40 // allocate buffer and keep stack 128-bit aligned
631 // it should be big enough for all syscall arguments
632 ldr r11, [r6, #8] // get mach_trap_table[call_number].mach_trap_arg_munge32
633 teq r11, #0 // check if we have a munger
634 moveq r0, #0
635 movne r0, r8 // ready the reg state pointer as an arg to the call
636 movne r1, sp // stack will hold arguments buffer
637 blxne r11 // call munger to get arguments from userspace
638 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
639 teq r0, #0
640 bne fleh_swi_mach_error // exit if the munger returned non-zero status
641#endif
642
643 ldr r1, [r6, #4] // load the syscall vector
644
645 LOAD_ADDR(r2, kern_invalid) // test to make sure the trap is not kern_invalid
646 teq r1, r2
647 beq fleh_swi_mach_error
648
649#if __BIGGEST_ALIGNMENT__ > 4
650 mov r0, sp // argument buffer on stack
651 bx r1 // call the syscall handler
652#else
653 mov r0, r8 // ready the reg state pointer as an arg to the call
654 bx r1 // call the syscall handler
655#endif
656
657fleh_swi_exit64:
658 str r1, [r8, #4] // top of 64-bit return
659fleh_swi_exit:
660 str r0, [r8] // save the return value
661#ifndef NO_KDEBUG
662 movs r4, r4
663 movne r1, r5
664 COND_EXTERN_BLNE(mach_syscall_trace_exit)
665#endif
666#if TRACE_SYSCALL
667 bl EXT(syscall_trace_exit)
668#endif
669
670 mov r0, #1
671 bl EXT(throttle_lowpri_io) // throttle_lowpri_io(1);
672
673 bl EXT(thread_exception_return)
674 b .
675
676fleh_swi_mach_error:
677 mov r0, #EXC_SYSCALL
678 sub r1, sp, #4
679 mov r2, #1
680 bl EXT(exception_triage)
681 b .
682
683 .align 5
684fleh_swi_unix:
685 ldr r1, [r9, TH_UNIX_SYSCALLS]
686 mov r0, r8 // reg state structure is arg
687 add r1, r1, #1
688 str r1, [r9, TH_UNIX_SYSCALLS]
689 mov r1, r9 // current thread in arg1
690 ldr r2, [r9, TH_UTHREAD] // current uthread in arg2
691 ldr r3, [r10, TASK_BSD_INFO] // current proc in arg3
692 bl EXT(unix_syscall)
693 b .
694
695fleh_swi_trap:
696 ldmia r8, {r0-r3}
697 cmp r3, #3
698 addls pc, pc, r3, LSL#2
699 b fleh_swi_trap_ret
700 b icache_invalidate_trap
701 b dcache_flush_trap
702 b thread_set_cthread_trap
703 b thread_get_cthread_trap
704
705icache_invalidate_trap:
706 add r3, r0, r1
707 cmp r3, VM_MAX_ADDRESS
708 subhi r3, r3, #1<<MMU_CLINE
709 bhi cache_trap_error
710 adr r11, cache_trap_jmp
711 ldr r6, [r9, TH_RECOVER] // Save existing recovery routine
712 str r11, [r9, TH_RECOVER]
713#if __ARM_USER_PROTECT__
714 ldr r5, [r9, ACT_UPTW_TTB] // Load thread ttb
715 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
716 ldr r5, [r9, ACT_ASID] // Load thread asid
717 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
718 dsb ish
719 isb
720#endif
5ba3f43e
A
721 bl EXT(InvalidatePoU_IcacheRegion)
722 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
723#if __ARM_USER_PROTECT__
724 ldr r4, [r9, ACT_KPTW_TTB] // Load kernel ttb
725 mcr p15, 0, r4, c2, c0, 0 // Set TTBR0
726 mov r4, #0 // Load kernel asid
727 mcr p15, 0, r4, c13, c0, 1 // Set CONTEXTIDR
728 isb
729#endif
730 str r6, [r9, TH_RECOVER]
731 bl EXT(thread_exception_return)
732 b .
733
734dcache_flush_trap:
735 add r3, r0, r1
736 cmp r3, VM_MAX_ADDRESS
737 subhi r3, r3, #1<<MMU_CLINE
738 bhi cache_trap_error
739 adr r11, cache_trap_jmp
740 ldr r4, [r9, TH_RECOVER] // Save existing recovery routine
741 str r11, [r9, TH_RECOVER]
742#if __ARM_USER_PROTECT__
743 ldr r6, [r9, ACT_UPTW_TTB] // Load thread ttb
744 mcr p15, 0, r6, c2, c0, 0 // Set TTBR0
745 ldr r5, [r9, ACT_ASID] // Load thread asid
746 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
747 isb
748#endif
749 bl EXT(flush_dcache_syscall)
750 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
751#if __ARM_USER_PROTECT__
752 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
753 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
754 mov r5, #0 // Load kernel asid
755 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
756 isb
757#endif
758 str r4, [r9, TH_RECOVER]
759 bl EXT(thread_exception_return)
760 b .
761
762thread_set_cthread_trap:
763 bl EXT(thread_set_cthread_self)
764 bl EXT(thread_exception_return)
765 b .
766
767thread_get_cthread_trap:
768 bl EXT(thread_get_cthread_self)
769 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
770 add r1, r9, ACT_PCBDATA // Get User PCB
771 str r0, [r1, SS_R0] // set return value
772 bl EXT(thread_exception_return)
773 b .
774
775cache_trap_jmp:
776#if __ARM_USER_PROTECT__
777 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
778 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
779 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
780 mov r5, #0 // Load kernel asid
781 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
782 isb
783#endif
784 mrc p15, 0, r3, c6, c0 // Read Fault Address
785cache_trap_error:
786 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
787 add r0, r9, ACT_PCBDATA // Get User PCB
788 ldr r1, [r0, SS_PC] // Save user mode pc register as pc
789 sub r1, r1, #4 // Backtrack current pc
790 str r1, [r0, SS_PC] // pc at cache assist swi
791 str r3, [r0, SS_VADDR] // Fault Address
792 mov r0, #EXC_BAD_ACCESS
793 mov r2, KERN_INVALID_ADDRESS
794 sub sp, sp, #8
795 mov r1, sp
796 str r2, [sp]
797 str r3, [sp, #4]
a39ff7e2 798 ALIGN_STACK r2, r3
5ba3f43e
A
799 mov r2, #2
800 bl EXT(exception_triage)
801 b .
802
803fleh_swi_trap_mct:
804 bl EXT(mach_continuous_time)
805 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
806 add r9, r9, ACT_PCBDATA_R0 // Get User register state
807 stmia r9, {r0, r1} // set 64-bit return value
808 bl EXT(thread_exception_return)
809 b .
810
811fleh_swi_trap_tb:
812 str lr, [sp, SS_PC]
813 bl EXT(ml_get_timebase) // ml_get_timebase() (64-bit return)
814 ldr lr, [sp, SS_PC]
815 nop
816 movs pc, lr // Return to user
817
818 .align 2
819L_kernel_swi_panic_str:
820 .asciz "fleh_swi: took SWI from kernel mode\n"
821 .align 2
822
823/*
824 * First Level Exception Handler for Prefetching Abort.
825 */
826 .text
827 .align 2
828 .globl EXT(fleh_prefabt)
829
830LEXT(fleh_prefabt)
a39ff7e2 831VERIFY_EXCEPTION_MODE PSR_ABT_MODE
5ba3f43e 832 sub lr, lr, #4
5ba3f43e 833
a39ff7e2 834IF_USERMODE_EXCEPTION prefabt
5ba3f43e
A
835 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
836 add sp, sp, ACT_PCBDATA // Get User PCB
837
838 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
839 mov r7, #0 // Zero the frame pointer
840 nop
841 mov r0, sp // Store arm_saved_state pointer
842 // For argument
843 str lr, [sp, SS_PC] // Save user mode pc register as pc
844 mrc p15, 0, r1, c6, c0, 2 // Read IFAR
845 str r1, [sp, SS_VADDR] // and fault address of pcb
846
847 mrc p15, 0, r5, c5, c0, 1 // Read Fault Status
848 str r5, [sp, SS_STATUS] // Save fault status register to pcb
849
850 mrs r4, spsr
851 str r4, [sp, SS_CPSR] // Save user mode cpsr
852
5ba3f43e
A
853 cpsid i, #PSR_SVC_MODE
854 mrs r3, cpsr // Read cpsr
855 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
856 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
857 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
858
859#if __ARM_VFP__
860 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
861 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
862 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
863 fmxr fpscr, r3 // And shove it into FPSCR
864#endif
865#if __ARM_USER_PROTECT__
866 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
867 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
868 mov r3, #0 // Load kernel asid
869 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
870 isb
871#endif
5ba3f43e
A
872
873 mvn r0, #0
874 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
875
876#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
877 bl EXT(timer_state_event_user_to_kernel)
878 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
879#endif
880
881 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
882 mov r1, T_PREFETCH_ABT // Pass abort type
883 bl EXT(sleh_abort) // Call second level handler
884 // Sleh will enable interrupt
885 b load_and_go_user
886
a39ff7e2 887ELSE_IF_KERNELMODE_EXCEPTION prefabt
f427ee49
A
888
889UNWIND_PROLOGUE
890
5ba3f43e
A
891 /*
892 * We have a kernel stack already, and I will use it to save contexts:
893 * ------------------
894 * | VFP saved state |
895 * |------------------|
896 * | ARM saved state |
897 * SP ------------------
898 *
899 * IRQ is disabled
900 */
5ba3f43e
A
901 sub sp, sp, EXC_CTX_SIZE
902 stmia sp, {r0-r12}
903 add r0, sp, EXC_CTX_SIZE
904
905 str r0, [sp, SS_SP] // Save supervisor mode sp
906 str lr, [sp, SS_LR] // Save supervisor mode lr
907
908 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
909
910#if __ARM_VFP__
911 add r0, sp, SS_SIZE // Get vfp state pointer
912 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
913 add r0, VSS_ALIGN // Get the actual vfp save area
914 bl EXT(vfp_save) // Save the current VFP state to the stack
915 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
916 fmxr fpscr, r4 // And shove it into FPSCR
917#endif
918#if __ARM_USER_PROTECT__
919 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
920 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
921 cmp r3, r10
922 beq 1f
923 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
9241:
925 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
926 mov r3, #0 // Load kernel asid
927 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
928 isb
929#endif
5ba3f43e 930
a39ff7e2
A
931 mrs r4, lr_abt
932 str r4, [sp, SS_PC] // Save pc
5ba3f43e
A
933
934 mrc p15, 0, r5, c6, c0, 2 // Read IFAR
a39ff7e2 935 str r5, [sp, SS_VADDR] // and fault address of pcb
5ba3f43e 936 mrc p15, 0, r5, c5, c0, 1 // Read (instruction) Fault Status
a39ff7e2 937 str r5, [sp, SS_STATUS] // Save fault status register to pcb
5ba3f43e 938
a39ff7e2
A
939 mrs r4, spsr_abt
940 str r4, [sp, SS_CPSR]
5ba3f43e 941
a39ff7e2
A
942 mov r0, sp
943 ALIGN_STACK r1, r2
5ba3f43e 944 mov r1, T_PREFETCH_ABT // Pass abort type
f427ee49
A
945
946
947UNWIND_DIRECTIVES
948
5ba3f43e 949 bl EXT(sleh_abort) // Call second level handler
a39ff7e2 950 UNALIGN_STACK
5ba3f43e
A
951
952 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
953#if __ARM_USER_PROTECT__
954 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
955 cmp r10, r0
956 beq 1f
957 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
958 cmp r10, r0
959 beq 1f
960 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
961 ldr r11, [r9, ACT_ASID] // Load thread asid
9621:
963 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
964 isb
965#endif
966
967 b load_and_go_sys
968
f427ee49 969UNWIND_EPILOGUE
5ba3f43e
A
970
971/*
972 * First Level Exception Handler for Data Abort
973 */
974 .text
975 .align 2
976 .globl EXT(fleh_dataabt)
977
978LEXT(fleh_dataabt)
a39ff7e2 979VERIFY_EXCEPTION_MODE PSR_ABT_MODE
5ba3f43e 980 sub lr, lr, #8
a39ff7e2 981IF_USERMODE_EXCEPTION dataabt
5ba3f43e
A
982 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
983 add sp, sp, ACT_PCBDATA // Get User PCB
984
985 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
986 mov r7, #0 // Zero the frame pointer
987 nop
988
989 mov r0, sp // Store arm_saved_state pointer
990 // For argument
991
992 str lr, [sp, SS_PC] // Save user mode pc register
993
994 mrs r4, spsr
995 str r4, [sp, SS_CPSR] // Save user mode cpsr
996
997 mrc p15, 0, r5, c5, c0 // Read Fault Status
998 mrc p15, 0, r6, c6, c0 // Read Fault Address
999 str r5, [sp, SS_STATUS] // Save fault status register to pcb
1000 str r6, [sp, SS_VADDR] // Save fault address to pcb
1001
5ba3f43e
A
1002 cpsid i, #PSR_SVC_MODE
1003 mrs r3, cpsr // Read cpsr
1004 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
1005 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1006 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
1007
1008#if __ARM_VFP__
1009 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1010 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1011 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
1012 fmxr fpscr, r3 // And shove it into FPSCR
1013#endif
1014#if __ARM_USER_PROTECT__
1015 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1016 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1017 mov r3, #0 // Load kernel asid
1018 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1019 isb
1020#endif
5ba3f43e
A
1021
1022 mvn r0, #0
1023 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
1024
1025#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1026 bl EXT(timer_state_event_user_to_kernel)
1027 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1028#endif
1029
1030 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
1031 mov r1, T_DATA_ABT // Pass abort type
1032 bl EXT(sleh_abort) // Call second level handler
1033 // Sleh will enable irq
1034 b load_and_go_user
1035
a39ff7e2 1036ELSE_IF_KERNELMODE_EXCEPTION dataabt
f427ee49
A
1037
1038UNWIND_PROLOGUE
1039
5ba3f43e
A
1040 /*
1041 * We have a kernel stack already, and I will use it to save contexts:
1042 * ------------------
1043 * | VFP saved state |
1044 * |------------------|
1045 * | ARM saved state |
1046 * SP ------------------
1047 *
1048 * IRQ is disabled
1049 */
5ba3f43e
A
1050 sub sp, sp, EXC_CTX_SIZE
1051 stmia sp, {r0-r12}
1052 add r0, sp, EXC_CTX_SIZE
1053
1054 str r0, [sp, SS_SP] // Save supervisor mode sp
1055 str lr, [sp, SS_LR] // Save supervisor mode lr
1056
1057 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1058
1059#if __ARM_VFP__
1060 add r0, sp, SS_SIZE // Get vfp state pointer
1061 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1062 add r0, VSS_ALIGN // Get the actual vfp save area
1063 bl EXT(vfp_save) // Save the current VFP state to the stack
1064 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1065 fmxr fpscr, r4 // And shove it into FPSCR
1066#endif
1067
a39ff7e2
A
1068 mrs r4, lr_abt
1069 str r4, [sp, SS_PC]
1070 mrs r4, spsr_abt
1071 str r4, [sp, SS_CPSR]
5ba3f43e
A
1072
1073#if __ARM_USER_PROTECT__
1074 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1075 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1076 cmp r3, r10
1077 beq 1f
1078 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
10791:
1080 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1081 mov r3, #0 // Load kernel asid
1082 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1083 isb
1084#endif
1085 mrc p15, 0, r5, c5, c0 // Read Fault Status
1086 mrc p15, 0, r6, c6, c0 // Read Fault Address
1087 str r5, [sp, SS_STATUS] // Save fault status register to pcb
1088 str r6, [sp, SS_VADDR] // Save fault address to pcb
1089
1090 mov r0, sp // Argument
a39ff7e2 1091 ALIGN_STACK r1, r2
5ba3f43e 1092 mov r1, T_DATA_ABT // Pass abort type
f427ee49
A
1093
1094UNWIND_DIRECTIVES
1095
5ba3f43e 1096 bl EXT(sleh_abort) // Call second level handler
a39ff7e2 1097 UNALIGN_STACK
5ba3f43e
A
1098
1099 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1100#if __ARM_USER_PROTECT__
1101 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1102 cmp r10, r0
1103 beq 1f
1104 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1105 cmp r10, r0
1106 beq 1f
1107 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1108 ldr r11, [r9, ACT_ASID] // Load thread asid
11091:
1110 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1111 isb
1112#endif
1113
1114load_and_go_sys:
1115 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1116
1117 ldr r4, [sp, SS_CPSR] // Load saved cpsr
1118 tst r4, #PSR_IRQF // Test IRQ set
1119 bne lags1 // Branch if IRQ disabled
1120
1121 cpsid i // Disable IRQ
1122 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1123 movs r2, r2 // Test if null
1124 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1125 bne lags1 // Branch if count not null
1126 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1127 ands r5, r5, AST_URGENT // Get the requests we do honor
1128 beq lags1 // Branch if no ASTs
1129#if __ARM_USER_PROTECT__
1130 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1131 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1132 cmp r3, r10
1133 beq 1f
1134 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
11351:
1136 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1137 mov r3, #0 // Load kernel asid
1138 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1139 isb
1140#endif
1141 ldr lr, [sp, SS_LR] // Restore the link register
1142 stmfd sp!, {r7, lr} // Push a fake frame
1143
a39ff7e2 1144 ALIGN_STACK r2, r3
5ba3f43e 1145 bl EXT(ast_taken_kernel) // Handle AST_URGENT
a39ff7e2 1146 UNALIGN_STACK
5ba3f43e
A
1147
1148 ldmfd sp!, {r7, lr} // Pop the fake frame
1149 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1150 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1151#if __ARM_USER_PROTECT__
1152 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1153 cmp r10, r0
1154 beq 1f
1155 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1156 cmp r10, r0
1157 beq 1f
1158 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1159 ldr r11, [r9, ACT_ASID] // Load thread asid
11601:
1161 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1162 isb
1163#endif
1164lags1:
1165 ldr lr, [sp, SS_LR]
1166
1167 mov ip, sp // Save pointer to contexts for abort mode
1168 ldr sp, [ip, SS_SP] // Restore stack pointer
1169
1170 cpsid if, #PSR_ABT_MODE
1171
1172 mov sp, ip
1173
1174 ldr r4, [sp, SS_CPSR]
1175 msr spsr_cxsf, r4 // Restore spsr
1176
1177 clrex // clear exclusive memory tag
1178#if __ARM_ENABLE_WFE_
1179 sev
1180#endif
1181
1182#if __ARM_VFP__
1183 add r0, sp, SS_SIZE // Get vfp state pointer
1184 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1185 add r0, VSS_ALIGN // Get the actual vfp save area
1186 bl EXT(vfp_load) // Load the desired VFP state from the stack
1187#endif
1188
1189 ldr lr, [sp, SS_PC] // Restore lr
1190
1191 ldmia sp, {r0-r12} // Restore other registers
1192
1193 movs pc, lr // Return to sys (svc, irq, fiq)
f427ee49
A
1194
1195UNWIND_EPILOGUE
1196
5ba3f43e
A
1197/*
1198 * First Level Exception Handler for address exception
1199 * Not supported
1200 */
1201 .text
1202 .align 2
1203 .globl EXT(fleh_addrexc)
1204
1205LEXT(fleh_addrexc)
1206 b .
1207
1208
1209/*
1210 * First Level Exception Handler for IRQ
1211 * Current mode : IRQ
1212 * IRQ and FIQ are always disabled while running in FIQ handler
1213 * We do not permit nested interrupt.
1214 *
1215 * Saving area: from user : PCB.
1216 * from kernel : interrupt stack.
1217 */
1218
1219 .text
1220 .align 2
1221 .globl EXT(fleh_irq)
1222
1223LEXT(fleh_irq)
1224 sub lr, lr, #4
1225
1226 cpsie a // Re-enable async aborts
1227
1228 mrs sp, spsr
1229 tst sp, #0x0f // From user? or kernel?
1230 bne fleh_irq_kernel
1231
1232fleh_irq_user:
1233 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1234 add sp, sp, ACT_PCBDATA // Get User PCB
1235 stmia sp, {r0-r12, sp, lr}^
1236 mov r7, #0 // Zero the frame pointer
1237 nop
1238 str lr, [sp, SS_PC]
1239 mrs r4, spsr
1240 str r4, [sp, SS_CPSR]
1241 mov r5, sp // Saved context in r5
1242 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1243 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1244 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1245 cpsid i, #PSR_SVC_MODE
1246 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1247 cpsid i, #PSR_IRQ_MODE
1248
1249#if __ARM_VFP__
1250 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1251 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1252 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1253 fmxr fpscr, r4 // And shove it into FPSCR
1254#endif
1255#if __ARM_USER_PROTECT__
1256 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1257 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1258 mov r3, #0 // Load kernel asid
1259 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1260 isb
1261#endif
1262#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1263 bl EXT(timer_state_event_user_to_kernel)
1264 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1265#endif
1266#if CONFIG_TELEMETRY
1267 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1268 mov r0, #1
1269 ldr r2, [r2]
1270 movs r2, r2
1271 beq 1f
d9a64523 1272 mov r1, #0 // (not a PMI record)
5ba3f43e
A
1273 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1274 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
12751:
1276#endif
1277
1278 b fleh_irq_handler
1279
1280fleh_irq_kernel:
1281 cpsid i, #PSR_SVC_MODE
1282
1283 sub sp, sp, EXC_CTX_SIZE
1284 stmia sp, {r0-r12}
1285 add r0, sp, EXC_CTX_SIZE
1286
1287 str r0, [sp, SS_SP] // Save supervisor mode sp
1288 str lr, [sp, SS_LR] // Save supervisor mode lr
1289
1290 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1291
1292#if __ARM_VFP__
1293 add r0, sp, SS_SIZE // Get vfp state pointer
1294 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1295 add r0, VSS_ALIGN // Get the actual vfp save area
1296 bl EXT(vfp_save) // Save the current VFP state to the stack
1297 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1298 fmxr fpscr, r4 // And shove it into FPSCR
1299#endif
1300#if __ARM_USER_PROTECT__
1301 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1302 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1303 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1304 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1305 mov r3, #0 // Load kernel asid
1306 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1307 isb
1308#endif
1309 mov r5, sp // Saved context in r5
1310
1311 cpsid i, #PSR_IRQ_MODE
1312
1313 str lr, [r5, SS_PC] // Save LR as the return PC
1314 mrs r4, spsr
1315 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1316
1317 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1318 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1319
1320#if CONFIG_TELEMETRY
1321 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1322 mov r0, #0
1323 ldr r2, [r2]
1324 movs r2, r2
1325 beq 1f
d9a64523 1326 mov r1, #0 // (not a PMI record)
5ba3f43e
A
1327 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1328 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
13291:
1330#endif
1331
1332fleh_irq_handler:
1333 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1334 add r2, r2, #1 // Increment count
1335 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1336#ifndef NO_KDEBUG
1337 LOAD_ADDR(r8, kdebug_enable)
1338 ldr r8, [r8]
1339 movs r8, r8
1340 movne r0, r5
1341 COND_EXTERN_BLNE(interrupt_trace)
1342#endif
1343 bl EXT(interrupt_stats) // Record interrupt statistics
1344 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1345 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1346 str r5, [r4, CPU_INT_STATE] // Saved context in cpu_int_state
1347 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1348 add r3, r3, #1 // Increment count
1349 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1350 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1351 add r3, r3, #1 // Increment count
1352 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1353 ldr r0, [r4, INTERRUPT_TARGET]
1354 ldr r1, [r4, INTERRUPT_REFCON]
1355 ldr r2, [r4, INTERRUPT_NUB]
1356 ldr r3, [r4, INTERRUPT_SOURCE]
1357 ldr r5, [r4, INTERRUPT_HANDLER] // Call second level exception handler
1358 blx r5
1359#ifndef NO_KDEBUG
1360 movs r8, r8
1361 COND_EXTERN_BLNE(interrupt_trace_exit)
1362#endif
1363 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1364 bl EXT(ml_get_timebase) // get current timebase
1365 LOAD_ADDR(r3, EntropyData)
f427ee49
A
1366 ldr r1, [r3, ENTROPY_SAMPLE_COUNT]
1367 ldr r2, [r3, ENTROPY_BUFFER_INDEX_MASK]
1368 add r4, r1, 1
1369 and r5, r1, r2
1370 str r4, [r3, ENTROPY_SAMPLE_COUNT]
1371 ldr r1, [r3, ENTROPY_BUFFER]
1372 ldr r2, [r3, ENTROPY_BUFFER_ROR_MASK]
1373 ldr r4, [r1, r5, lsl #2]
1374 and r4, r4, r2
cb323159 1375 eor r0, r0, r4, ror #9
f427ee49 1376 str r0, [r1, r5, lsl #2]
5ba3f43e
A
1377return_from_irq:
1378 mov r5, #0
1379 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1380 str r5, [r4, CPU_INT_STATE] // Clear cpu_int_state
1381 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1382#if MACH_ASSERT
1383 cmp r2, #0 // verify positive count
1384 bgt 1f
1385 push {r7, lr}
1386 mov r7, sp
1387 adr r0, L_preemption_count_zero_str
1388 blx EXT(panic)
1389 b .
13901:
1391#endif
1392 sub r2, r2, #1 // Decrement count
1393 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1394
1395 mrs r0, spsr // For check the previous mode
1396
1397 cpsid i, #PSR_SVC_MODE
1398
1399 tst r0, #0x0f // Check if the previous is from user
1400 ldreq sp, [r9, TH_KSTACKPTR] // ...If so, reload the kernel stack pointer
1401 beq load_and_go_user // ...and return
1402
1403#if __ARM_USER_PROTECT__
1404 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1405 cmp r10, r0
1406 beq 1f
1407 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1408 cmp r10, r0
1409 beq 1f
1410 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1411 ldr r11, [r9, ACT_ASID] // Load thread asid
14121:
1413 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1414 isb
1415#endif
1416 b load_and_go_sys
1417
1418 .align 2
1419L_preemption_count_zero_str:
1420 .ascii "locore.s: preemption count is zero \000"
1421 .align 2
1422/*
1423 * First Level Exception Handler for DEC
1424 * Current mode : IRQ
1425 * IRQ and FIQ are always disabled while running in FIQ handler
1426 * We do not permit nested interrupt.
1427 *
1428 * Saving area: from user : PCB.
1429 * from kernel : interrupt stack.
1430 */
1431
1432 .text
1433 .align 2
1434 .globl EXT(fleh_decirq)
1435
1436LEXT(fleh_decirq)
1437 sub lr, lr, #4
1438
1439 cpsie af // Re-enable async aborts/FIQ
1440
1441 mrs sp, spsr
1442 tst sp, #0x0f // From user? or kernel?
1443 bne fleh_decirq_kernel
1444
1445fleh_decirq_user:
1446 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1447 add sp, sp, ACT_PCBDATA // Get User PCB
1448 stmia sp, {r0-r12, sp, lr}^
1449 mov r7, #0 // Zero the frame pointer
1450 nop
1451 str lr, [sp, SS_PC]
1452 mrs r4, spsr
1453 str r4, [sp, SS_CPSR]
1454 mov r5, sp // Saved context in r5
1455 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1456 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1457 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1458 cpsid i, #PSR_SVC_MODE
1459 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1460 cpsid i, #PSR_IRQ_MODE
1461
1462#if __ARM_VFP__
1463 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1464 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1465 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1466 fmxr fpscr, r4 // And shove it into FPSCR
1467#endif
1468#if __ARM_USER_PROTECT__
1469 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1470 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1471 mov r3, #0 // Load kernel asid
1472 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1473 isb
1474#endif
1475#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1476 bl EXT(timer_state_event_user_to_kernel)
1477 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1478#endif
1479#if CONFIG_TELEMETRY
1480 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1481 mov r0, #1
1482 ldr r2, [r2]
1483 movs r2, r2
1484 beq 1f
d9a64523 1485 mov r1, #0 // (not a PMI record)
5ba3f43e
A
1486 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1487 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
14881:
1489#endif
1490
1491 b fleh_decirq_handler
1492
1493fleh_decirq_kernel:
1494 cpsid i, #PSR_SVC_MODE
1495
1496 sub sp, sp, EXC_CTX_SIZE
1497 stmia sp, {r0-r12}
1498 add r0, sp, EXC_CTX_SIZE
1499
1500 str r0, [sp, SS_SP] // Save supervisor mode sp
1501 str lr, [sp, SS_LR] // Save supervisor mode lr
1502
1503 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1504
1505#if __ARM_VFP__
1506 add r0, sp, SS_SIZE // Get vfp state pointer
1507 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1508 add r0, VSS_ALIGN // Get the actual vfp save area
1509 bl EXT(vfp_save) // Save the current VFP state to the stack
1510 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1511 fmxr fpscr, r4 // And shove it into FPSCR
1512#endif
1513#if __ARM_USER_PROTECT__
1514 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1515 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1516 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1517 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1518 mov r3, #0 // Load kernel asid
1519 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1520 isb
1521#endif
1522 mov r5, sp // Saved context in r5
1523
1524 cpsid i, #PSR_IRQ_MODE
1525
1526 str lr, [r5, SS_PC] // Save LR as the return PC
1527 mrs r4, spsr
1528 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1529
1530 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1531 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1532
1533#if CONFIG_TELEMETRY
1534 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1535 mov r0, #0
1536 ldr r2, [r2]
1537 movs r2, r2
1538 beq 1f
d9a64523 1539 mov r1, #0 // (not a pmi record)
5ba3f43e
A
1540 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1541 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
15421:
1543#endif
1544
1545fleh_decirq_handler:
1546 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1547 add r2, r2, #1 // Increment count
1548 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1549 ldr r2, [r9, ACT_CPUDATAP] // Get current cpu
1550 str r5, [r2, CPU_INT_STATE] // Saved context in cpu_int_state
1551 ldr r3, [r2, CPU_STAT_IRQ] // Get IRQ count
1552 add r3, r3, #1 // Increment count
1553 str r3, [r2, CPU_STAT_IRQ] // Update IRQ count
1554 ldr r3, [r2, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1555 add r3, r3, #1 // Increment count
1556 str r3, [r2, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1557#ifndef NO_KDEBUG
1558 LOAD_ADDR(r4, kdebug_enable)
1559 ldr r4, [r4]
1560 movs r4, r4
1561 movne r0, r5 // Pass saved context
1562 COND_EXTERN_BLNE(interrupt_trace)
1563#endif
1564 bl EXT(interrupt_stats) // Record interrupt statistics
1565 mov r0, #0
1566 bl EXT(rtclock_intr) // Call second level exception handler
1567#ifndef NO_KDEBUG
1568 movs r4, r4
1569 COND_EXTERN_BLNE(interrupt_trace_exit)
1570#endif
1571
1572 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1573
1574 b return_from_irq
1575
1576
1577/*
1578 * First Level Exception Handler for FIQ
1579 * Current mode : FIQ
1580 * IRQ and FIQ are always disabled while running in FIQ handler
1581 * We do not permit nested interrupt.
1582 *
1583 * Saving area: from user : PCB.
1584 * from kernel : interrupt stack.
1585 *
1586 * We have 7 added shadow registers in FIQ mode for fast services.
1587 * So only we have to save is just 8 general registers and LR.
1588 * But if the current thread was running on user mode before the FIQ interrupt,
1589 * All user registers be saved for ast handler routine.
1590 */
1591 .text
1592 .align 2
1593 .globl EXT(fleh_fiq_generic)
1594
1595LEXT(fleh_fiq_generic)
1596 str r11, [r10] // Clear the FIQ source
1597
1598 ldr r13, [r8, CPU_TIMEBASE_LOW] // Load TBL
1599 adds r13, r13, #1 // Increment TBL
1600 str r13, [r8, CPU_TIMEBASE_LOW] // Store TBL
1601 ldreq r13, [r8, CPU_TIMEBASE_HIGH] // Load TBU
1602 addeq r13, r13, #1 // Increment TBU
1603 streq r13, [r8, CPU_TIMEBASE_HIGH] // Store TBU
1604 subs r12, r12, #1 // Decrement, DEC
1605 str r12, [r8, CPU_DECREMENTER] // Store DEC
1606 subspl pc, lr, #4 // Return unless DEC < 0
1607 b EXT(fleh_dec)
1608
1609 .text
1610 .align 2
1611 .globl EXT(fleh_dec)
1612LEXT(fleh_dec)
1613 mrs sp, spsr // Get the spsr
1614 sub lr, lr, #4
1615 tst sp, #0x0f // From user? or kernel?
1616 bne 2f
1617
1618 /* From user */
1619 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1620 add sp, sp, ACT_PCBDATA // Get User PCB
1621
1622 stmia sp, {r0-r12, sp, lr}^
1623 mov r7, #0 // Zero the frame pointer
1624 nop
1625 str lr, [sp, SS_PC]
1626
1627 mrs r4, spsr
1628 str r4, [sp, SS_CPSR]
1629 mov r5, sp
1630 sub sp, sp, ACT_PCBDATA // Get User PCB
1631 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1632 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1633 mov r6, sp
1634 cpsid i, #PSR_SVC_MODE
1635 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1636 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1637
1638#if __ARM_VFP__
1639 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1640 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1641 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1642 fmxr fpscr, r4 // And shove it into FPSCR
1643#endif
1644#if __ARM_USER_PROTECT__
1645 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1646 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1647 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1648 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1649 mov r3, #0 // Load kernel asid
1650 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1651 isb
1652#endif
1653 mov r0, #1 // Mark this as coming from user context
1654 b 4f
1655
16562:
1657 /* From kernel */
1658 tst sp, #PSR_IRQF // Test for IRQ masked
1659 bne 3f // We're on the cpu_signal path
1660
1661 cpsid if, #PSR_SVC_MODE
1662
1663 sub sp, sp, EXC_CTX_SIZE
1664 stmia sp, {r0-r12}
1665 add r0, sp, EXC_CTX_SIZE
1666
1667 str r0, [sp, SS_SP] // Save supervisor mode sp
1668 str lr, [sp, SS_LR] // Save supervisor mode lr
1669
1670 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1671
1672#if __ARM_VFP__
1673 add r0, sp, SS_SIZE // Get vfp state pointer
1674 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1675 add r0, VSS_ALIGN // Get the actual vfp save area
1676 bl EXT(vfp_save) // Save the current VFP state to the stack
1677 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1678 fmxr fpscr, r4 // And shove it into FPSCR
1679#endif
1680#if __ARM_USER_PROTECT__
1681 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1682 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1683 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1684 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1685 mov r3, #0 // Load kernel asid
1686 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1687 isb
1688#endif
1689 mov r5, sp // Saved context in r5
1690
1691 cpsid if, #PSR_FIQ_MODE
1692
1693 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1694
1695 str lr, [r5, SS_PC] // Save LR as the return PC
1696 mrs r4, spsr
1697 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1698
1699 ldr r6, [r1, ACT_CPUDATAP] // Get current cpu
1700 ldr r6, [r6, CPU_ISTACKPTR] // Set interrupt stack
1701
1702 mov r0, #0 // Mark this as coming from kernel context
1703 b 4f
1704
17053:
1706 /* cpu_signal path */
1707 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1708 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1709 ldr sp, [sp, CPU_FIQSTACKPTR] // Set fiq stack
1710 sub sp, sp, EXC_CTX_SIZE
1711 stmia sp, {r0-r12}
1712 str lr, [sp, SS_PC]
1713 mrs r4, spsr
1714 str r4, [sp, SS_CPSR]
1715 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1716
1717#if __ARM_VFP__
1718 add r0, sp, SS_SIZE // Get vfp state pointer
1719 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1720 add r0, VSS_ALIGN // Get the actual vfp save area
1721 bl EXT(vfp_save) // Save the current VFP state to the stack
1722 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1723 fmxr fpscr, r4 // And shove it into FPSCR
1724#endif
1725#if __ARM_USER_PROTECT__
1726 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1727 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1728 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1729 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1730 mov r3, #0 // Load kernel asid
1731 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1732 isb
1733#endif
a39ff7e2
A
1734
1735 ALIGN_STACK r0, r1
5ba3f43e
A
1736 mov r0, r8 // Get current cpu in arg 0
1737 mov r1, SIGPdec // Decrementer signal in arg1
1738 mov r2, #0
1739 mov r3, #0
1740 bl EXT(cpu_signal) // Call cpu_signal
a39ff7e2 1741 UNALIGN_STACK
5ba3f43e
A
1742
1743 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1744
1745#if __ARM_VFP__
1746 add r0, sp, SS_SIZE // Get vfp state pointer
1747 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1748 add r0, VSS_ALIGN // Get the actual vfp save area
1749 bl EXT(vfp_load) // Load the desired VFP state from the stack
1750#endif
1751
1752 clrex // clear exclusive memory tag
1753#if __ARM_ENABLE_WFE_
1754 sev
1755#endif
1756#if __ARM_USER_PROTECT__
1757 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1758 mcr p15, 0, r11, c13, c0, 1 // Set CONTEXTIDR
1759 isb
1760#endif
1761 ldr lr, [sp, SS_PC]
1762 ldmia sp, {r0-r12} // Restore saved registers
1763 movs pc, lr // Return from fiq
1764
17654:
1766 cpsid i, #PSR_IRQ_MODE
1767 cpsie f
1768 mov sp, r6 // Restore the stack pointer
a39ff7e2 1769 ALIGN_STACK r2, r3
5ba3f43e
A
1770 msr spsr_cxsf, r4 // Restore the spsr
1771 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1772 add r2, r2, #1 // Increment count
1773 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1774 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1775 str r5, [r4, CPU_INT_STATE]
1776 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1777 add r3, r3, #1 // Increment count
1778 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1779 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1780 add r3, r3, #1 // Increment count
1781 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1782#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1783 movs r0, r0
1784 beq 5f
1785 mov r8, r0 // Stash our "from_user" boolean value
1786 bl EXT(timer_state_event_user_to_kernel)
1787 mov r0, r8 // Restore our "from_user" value
1788 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
17895:
1790#endif
1791#if CONFIG_TELEMETRY
1792 LOAD_ADDR(r4, telemetry_needs_record) // Check if a telemetry record was requested...
1793 ldr r4, [r4]
d9a64523 1794 movs r4, r4
5ba3f43e 1795 beq 6f
d9a64523 1796 mov r1, #0 // (not a PMI record)
5ba3f43e
A
1797 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1798 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
17996:
1800#endif
1801
1802#ifndef NO_KDEBUG
1803 LOAD_ADDR(r4, kdebug_enable)
1804 ldr r4, [r4]
1805 movs r4, r4
1806 ldrne r1, [r9, ACT_CPUDATAP] // Get current cpu
1807 ldrne r0, [r1, CPU_INT_STATE]
1808 COND_EXTERN_BLNE(interrupt_trace)
1809#endif
1810 bl EXT(interrupt_stats) // Record interrupt statistics
1811 mov r0, #0
1812 bl EXT(rtclock_intr) // Call second level exception handler
1813#ifndef NO_KDEBUG
1814 movs r4, r4
1815 COND_EXTERN_BLNE(interrupt_trace_exit)
1816#endif
a39ff7e2 1817 UNALIGN_STACK
5ba3f43e
A
1818
1819 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1820
1821 b return_from_irq
1822
1823/*
1824 * void thread_syscall_return(kern_return_t r0)
1825 *
1826 */
1827 .text
1828 .align 2
1829 .globl EXT(thread_syscall_return)
1830
1831LEXT(thread_syscall_return)
1832 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1833 add r1, r9, ACT_PCBDATA // Get User PCB
1834 str r0, [r1, SS_R0] // set return value
1835#ifndef NO_KDEBUG
1836 LOAD_ADDR(r4, kdebug_enable)
1837 ldr r4, [r4]
1838 movs r4, r4
1839 beq load_and_go_user
1840 ldr r12, [r1, SS_R12] // Load syscall number
1841 rsbs r1, r12, #0 // make the syscall positive (if negative)
1842 COND_EXTERN_BLGT(mach_syscall_trace_exit)
1843#endif
1844 b load_and_go_user
1845
1846/*
1847 * void thread_exception_return(void)
1848 * void thread_bootstrap_return(void)
1849 *
1850 */
1851 .text
1852 .globl EXT(thread_exception_return)
1853 .globl EXT(thread_bootstrap_return)
1854
1855LEXT(thread_bootstrap_return)
1856#if CONFIG_DTRACE
1857 bl EXT(dtrace_thread_bootstrap)
1858#endif
1859 // Fall through
1860
1861LEXT(thread_exception_return)
1862
1863load_and_go_user:
1864/*
1865 * Restore user mode states and go back to user mode
1866 */
1867 cpsid i // Disable irq
1868 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1869
1870 mvn r0, #0
1871 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1872
1873 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1874 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1875 cmp r5, #0 // Test if ASTs pending
1876 beq return_to_user_now // Branch if no ASTs
1877
5ba3f43e
A
1878 bl EXT(ast_taken_user) // Handle all ASTs (may continue via thread_exception_return)
1879
5ba3f43e
A
1880 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1881 b load_and_go_user // Loop back
1882
1883return_to_user_now:
1884
1885#if MACH_ASSERT
1886/*
1887 * Assert that the preemption level is zero prior to the return to user space
1888 */
f427ee49
A
1889 ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count
1890 cmp r1, #0 // Test
1891 bne L_lagu_preempt_panic // Panic if not zero
1892
1893/*
1894 * Assert that the preemption level is zero prior to the return to user space
1895 */
1896 ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count
1897 cmp r2, #0 // Test
1898 bne L_lagu_rwlock_cnt_panic // Panic if not zero
5ba3f43e
A
1899#endif
1900
f427ee49
A
1901/*
1902 * Assert that we aren't leaking KHEAP_TEMP allocations prior to the return to user space
1903 */
1904 ldr r1, [r9, TH_TMP_ALLOC_CNT] // Load temp alloc count
1905 cmp r1, #0 // Test
1906 bne L_lagu_temp_alloc_cnt_panic // Panic if not zero
1907
5ba3f43e
A
1908#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1909 bl EXT(timer_state_event_kernel_to_user)
1910 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1911 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu data
1912#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1913#if __ARM_DEBUG__ >= 6
1914 ldr r0, [r9, ACT_DEBUGDATA]
1915 ldr r6, [r8, CPU_USER_DEBUG]
1916 cmp r0, r6 // test if debug registers need to be changed
1917 beq 1f
1918 bl EXT(arm_debug_set) // argument is already in r0
1919 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
19201:
1921#endif
1922#if __ARM_VFP__
1923 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1924 bl EXT(vfp_load) // Load the desired VFP state from ACT_UVFP
1925#endif
1926 add r0, r9, ACT_PCBDATA // Get User PCB
1927 ldr r4, [r0, SS_CPSR] // Get saved cpsr
1928 and r3, r4, #PSR_MODE_MASK // Extract current mode
1929 cmp r3, #PSR_USER_MODE // Check user mode
1930 movne r0, r3
1931 bne EXT(ExceptionVectorPanic)
1932
1933 msr spsr_cxsf, r4 // Restore spsr(user mode cpsr)
1934 mov sp, r0 // Get User PCB
1935
1936 clrex // clear exclusive memory tag
1937#if __ARM_ENABLE_WFE_
1938 sev
1939#endif
1940#if __ARM_USER_PROTECT__
1941 ldr r3, [r9, ACT_UPTW_TTB] // Load thread ttb
1942 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1943 ldr r2, [r9, ACT_ASID] // Load thread asid
1944 mcr p15, 0, r2, c13, c0, 1
1945 isb
1946#endif
1947 ldr lr, [sp, SS_PC] // Restore user mode pc
1948 ldmia sp, {r0-r12, sp, lr}^ // Restore the other user mode registers
1949 nop // Hardware problem
1950 movs pc, lr // Return to user
1951
f427ee49
A
1952/*
1953 * r1: tmp alloc count
1954 * r9: current_thread()
1955 */
1956L_lagu_temp_alloc_cnt_panic:
1957 mov r0, r9 // Thread argument
1958 blx EXT(kheap_temp_leak_panic) // Finally, panic
1959
1960#if MACH_ASSERT
1961/*
1962 * r1: current preemption count
1963 * r9: current_thread()
1964 */
1965L_lagu_preempt_panic:
1966 adr r0, L_lagu_preempt_panic_str // Load the panic string...
1967 blx EXT(panic) // Finally, panic
1968
1969/*
1970 * r2: rwlock count
1971 * r9: current_thread()
1972 */
1973L_lagu_rwlock_cnt_panic:
1974 adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string...
1975 mov r1, r9 // Thread argument for panic string
1976 blx EXT(panic) // Finally, panic
1977
5ba3f43e 1978 .align 2
f427ee49 1979L_lagu_preempt_panic_str:
5ba3f43e
A
1980 .asciz "load_and_go_user: preemption_level %d"
1981 .align 2
1982
1983 .align 2
1984L_lagu_rwlock_cnt_panic_str:
1985 .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)"
1986 .align 2
f427ee49 1987#endif /* MACH_ASSERT */
5ba3f43e 1988
f427ee49 1989 .align 2
5ba3f43e 1990L_evimpanic_str:
f427ee49
A
1991 .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000"
1992 .align 2
5ba3f43e
A
1993
1994 .text
1995 .align 2
1996 .globl EXT(ExceptionVectorPanic)
1997
1998LEXT(ExceptionVectorPanic)
1999 cpsid i, #PSR_SVC_MODE
a39ff7e2 2000 ALIGN_STACK r1, r2
5ba3f43e
A
2001 mov r1, r0
2002 adr r0, L_evimpanic_str
2003 blx EXT(panic)
2004 b .
2005
2006#include "globals_asm.h"
2007
2008LOAD_ADDR_GEN_DEF(mach_trap_table)
2009LOAD_ADDR_GEN_DEF(kern_invalid)
2010
2011/* vim: set ts=4: */