]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/locore.s
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / locore.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <machine/asm.h>
58#include <arm/proc_reg.h>
59#include <pexpert/arm/board_config.h>
60#include <mach/exception_types.h>
61#include <mach_kdp.h>
62#include <mach_assert.h>
63#include <config_dtrace.h>
64#include "assym.s"
65
66#define TRACE_SYSCALL 0
67
68/*
69 * Copied to low physical memory in arm_init,
70 * so the kernel must be linked virtually at
71 * 0xc0001000 or higher to leave space for it.
72 */
73 .syntax unified
74 .text
75 .align 12
76 .globl EXT(ExceptionLowVectorsBase)
77
78LEXT(ExceptionLowVectorsBase)
79 adr pc, Lreset_low_vector
80 b . // Undef
81 b . // SWI
82 b . // Prefetch Abort
83 b . // Data Abort
84 b . // Address Exception
85 b . // IRQ
86 b . // FIQ/DEC
87LEXT(ResetPrivateData)
88 .space (480),0 // (filled with 0s)
89 // ExceptionLowVectorsBase + 0x200
90Lreset_low_vector:
91 adr r4, EXT(ResetHandlerData)
92 ldr r0, [r4, ASSIST_RESET_HANDLER]
93 movs r0, r0
94 blxne r0
95 adr r4, EXT(ResetHandlerData)
96 ldr r1, [r4, CPU_DATA_ENTRIES]
97 ldr r1, [r1, CPU_DATA_PADDR]
98 ldr r5, [r1, CPU_RESET_ASSIST]
99 movs r5, r5
100 blxne r5
101 adr r4, EXT(ResetHandlerData)
102 ldr r0, [r4, BOOT_ARGS]
103 ldr r1, [r4, CPU_DATA_ENTRIES]
104#if __ARM_SMP__
105#if defined(ARMA7)
106 // physical cpu number is stored in MPIDR Affinity level 0
107 mrc p15, 0, r6, c0, c0, 5 // Read MPIDR
108 and r6, r6, #0xFF // Extract Affinity level 0
109#else
110#error missing Who Am I implementation
111#endif
112#else
113 mov r6, #0
114#endif /* __ARM_SMP__ */
115 // physical cpu number matches cpu number
116//#if cdeSize != 16
117//#error cpu_data_entry is not 16bytes in size
118//#endif
119 lsl r6, r6, #4 // Get CpuDataEntry offset
120 add r1, r1, r6 // Get cpu_data_entry pointer
121 ldr r1, [r1, CPU_DATA_PADDR]
122 ldr r5, [r1, CPU_RESET_HANDLER]
123 movs r5, r5
124 blxne r5 // Branch to cpu reset handler
125 b . // Unexpected reset
126 .globl EXT(ResetHandlerData)
127LEXT(ResetHandlerData)
128 .space (rhdSize_NUM),0 // (filled with 0s)
129
130
131 .globl EXT(ExceptionLowVectorsEnd)
132LEXT(ExceptionLowVectorsEnd)
133
134 .text
135 .align 12
136 .globl EXT(ExceptionVectorsBase)
137
138LEXT(ExceptionVectorsBase)
139
140 adr pc, Lexc_reset_vector
141 adr pc, Lexc_undefined_inst_vector
142 adr pc, Lexc_swi_vector
143 adr pc, Lexc_prefetch_abort_vector
144 adr pc, Lexc_data_abort_vector
145 adr pc, Lexc_address_exception_vector
146 adr pc, Lexc_irq_vector
147#if __ARM_TIME__
148 adr pc, Lexc_decirq_vector
149#else /* ! __ARM_TIME__ */
150 mov pc, r9
151#endif /* __ARM_TIME__ */
152
153Lexc_reset_vector:
154 b .
155 .long 0x0
156 .long 0x0
157 .long 0x0
158Lexc_undefined_inst_vector:
159 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
160 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
161 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
162 ldr pc, [sp, #4] // Branch to exception handler
163Lexc_swi_vector:
164 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
165 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
166 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
167 ldr pc, [sp, #8] // Branch to exception handler
168Lexc_prefetch_abort_vector:
169 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
170 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
171 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
172 ldr pc, [sp, #0xC] // Branch to exception handler
173Lexc_data_abort_vector:
174 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
175 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
176 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
177 ldr pc, [sp, #0x10] // Branch to exception handler
178Lexc_address_exception_vector:
179 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
180 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
181 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
182 ldr pc, [sp, #0x14] // Branch to exception handler
183Lexc_irq_vector:
184 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
185 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
186 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
187 ldr pc, [sp, #0x18] // Branch to exception handler
188#if __ARM_TIME__
189Lexc_decirq_vector:
190 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
191 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
192 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
193 ldr pc, [sp, #0x1C] // Branch to exception handler
194#else /* ! __ARM_TIME__ */
195 .long 0x0
196 .long 0x0
197 .long 0x0
198 .long 0x0
199#endif /* __ARM_TIME__ */
200
201 .fill 984, 4, 0 // Push to the 4KB page boundary
202
203 .globl EXT(ExceptionVectorsEnd)
204LEXT(ExceptionVectorsEnd)
205
206
207/*
208 * Targets for the exception vectors; we patch these during boot (to allow
209 * for position independent code without complicating the vectors; see start.s).
210 */
211 .globl EXT(ExceptionVectorsTable)
212LEXT(ExceptionVectorsTable)
213Lreset_vector:
214 .long 0x0
215Lundefined_inst_vector:
216 .long 0x0
217Lswi_vector:
218 .long 0x0
219Lprefetch_abort_vector:
220 .long 0x0
221Ldata_abort_vector:
222 .long 0x0
223Laddress_exception_vector:
224 .long 0x0
225Lirq_vector:
226 .long 0x0
227Ldecirq_vector:
228 .long 0x0
229
230
231/*
232 * First Level Exception Handlers
233 */
234 .text
235 .align 2
236 .globl EXT(fleh_reset)
237LEXT(fleh_reset)
238 b . // Never return
239
240/*
241 * First Level Exception Handler for Undefined Instruction.
242 */
243 .text
244 .align 2
245 .globl EXT(fleh_undef)
246
a39ff7e2
A
247/*
248 * Ensures the stack is safely aligned, usually in preparation for an external branch
249 * arg0: temp register for storing the stack offset
250 * arg1: temp register for storing the previous stack pointer
251 */
252.macro ALIGN_STACK
253/*
254 * For armv7k ABI, the stack needs to be 16-byte aligned
255 */
256#if __BIGGEST_ALIGNMENT__ > 4
257 and $0, sp, #0x0F // sp mod 16-bytes
258 cmp $0, #4 // need space for the sp on the stack
259 addlt $0, $0, #0x10 // make room if needed, but keep stack aligned
260 mov $1, sp // get current sp
261 sub sp, sp, $0 // align stack
262 str $1, [sp] // store previous sp on stack
263#endif
264.endmacro
265
266/*
267 * Restores the stack pointer to its previous value following an ALIGN_STACK call
268 */
269.macro UNALIGN_STACK
270#if __BIGGEST_ALIGNMENT__ > 4
271 ldr sp, [sp]
272#endif
273.endmacro
274
275/*
276 * Checks that cpu is currently in the expected mode, panics if not.
277 * arg0: the expected mode, should be one of the PSR_*_MODE defines
278 */
279.macro VERIFY_EXCEPTION_MODE
280 mrs sp, cpsr // Read cpsr
281 and sp, sp, #PSR_MODE_MASK // Extract current mode
282 cmp sp, $0 // Check specified mode
283 movne r0, sp
284 bne EXT(ExceptionVectorPanic)
285.endmacro
286
287/*
288 * Checks previous processor mode. If usermode, will execute the code
289 * following the macro to handle the userspace exception. Otherwise,
290 * will branch to a ELSE_IF_KERNELMODE_EXCEPTION call with the same
291 * argument.
292 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
293 */
294.macro IF_USERMODE_EXCEPTION
295 mrs sp, spsr
296 and sp, sp, #PSR_MODE_MASK // Is it from user?
297 cmp sp, #PSR_USER_MODE
298 beq $0_from_user
299 cmp sp, #PSR_IRQ_MODE
300 beq $0_from_irq
301 cmp sp, #PSR_FIQ_MODE
302 beq $0_from_fiq
303 bne $0_from_svc
304$0_from_user:
305.endmacro
306
307/*
308 * Handles an exception taken from kernelmode (IRQ/FIQ/SVC/etc).
309 * Places the processor into the correct mode and executes the
310 * code following the macro to handle the kernel exception.
311 * Intended to be paired with a prior call to IF_USERMODE_EXCEPTION.
312 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
313 */
314.macro ELSE_IF_KERNELMODE_EXCEPTION
315$0_from_irq:
316 cpsid i, #PSR_IRQ_MODE
317 b $0_from_kernel
318$0_from_fiq:
319 cpsid i, #PSR_FIQ_MODE
320 b $0_from_kernel
321$0_from_svc:
322 cpsid i, #PSR_SVC_MODE
323$0_from_kernel:
324.endmacro
325
326LEXT(fleh_undef)
327VERIFY_EXCEPTION_MODE PSR_UND_MODE
328 mrs sp, spsr // For check the previous mode
5ba3f43e
A
329 tst sp, #PSR_TF // Is it Thumb?
330 subeq lr, lr, #4
331 subne lr, lr, #2
a39ff7e2 332IF_USERMODE_EXCEPTION undef
5ba3f43e
A
333 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
334 add sp, sp, ACT_PCBDATA // Get current thread PCB pointer
335
336 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
337 mov r7, #0 // Zero the frame pointer
338 nop
a39ff7e2 339
5ba3f43e
A
340 mov r0, sp // Store arm_saved_state pointer
341 // for argument
342
343 str lr, [sp, SS_PC] // Save user mode pc register
344
345 mrs r4, spsr
346 str r4, [sp, SS_CPSR] // Save user mode cpsr
347
5ba3f43e
A
348 cpsid i, #PSR_SVC_MODE
349 mrs r3, cpsr // Read cpsr
350 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
351 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
352 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
353#if __ARM_USER_PROTECT__
354 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
355 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
356 mov r3, #0 // Load kernel asid
357 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
358 isb
359#endif
5ba3f43e
A
360
361 mvn r0, #0
362 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
363
364#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
365 bl EXT(timer_state_event_user_to_kernel)
366 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
367#endif
368
369#if __ARM_VFP__
370 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
371 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
372 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
373 fmxr fpscr, r3 // And shove it into FPSCR
374 add r1, r9, ACT_UVFP // Reload the pointer to the save state
375 add r0, r9, ACT_PCBDATA // Reload the VFP save state argument
376#else
377 mov r1, #0 // Clear the VFP save state argument
378 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
379#endif
380
381 bl EXT(sleh_undef) // Call second level handler
382 // sleh will enable interrupt
383 b load_and_go_user
384
a39ff7e2 385ELSE_IF_KERNELMODE_EXCEPTION undef
5ba3f43e
A
386 /*
387 * We have a kernel stack already, and I will use it to save contexts
388 * IRQ is disabled
389 */
5ba3f43e
A
390#if CONFIG_DTRACE
391 // We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception
392 // took place. We'll store that later after we switch to undef mode and pull out the LR from there.
393
394 // This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require
395 // changes in fbt_invop also.
396 stmfd sp!, { r7, lr }
397#endif
398
399 sub sp, sp, EXC_CTX_SIZE // Reserve for arm_saved_state
400
401 stmia sp, {r0-r12} // Save on supervisor mode stack
402 str lr, [sp, SS_LR]
403
404#if CONFIG_DTRACE
405 add r7, sp, EXC_CTX_SIZE // Save frame pointer
406#endif
5ba3f43e 407
a39ff7e2
A
408 mrs r4, lr_und
409 str r4, [sp, SS_PC] // Save complete
410 mrs r4, spsr_und
411 str r4, [sp, SS_CPSR]
5ba3f43e 412
a39ff7e2 413 mov ip, sp
5ba3f43e
A
414
415/*
416 sp - stack pointer
417 ip - stack pointer
418 r7 - frame pointer state
419 */
420
421
422#if CONFIG_DTRACE
423 ldr r0, [ip, SS_PC] // Get the exception pc to store later
424#endif
425
426 add ip, ip, EXC_CTX_SIZE // Send stack pointer to debugger
427#if CONFIG_DTRACE
428 str r0, [ip, #4]
429 add ip, ip, #8
430#endif
431 str ip, [sp, SS_SP] // for accessing local variable
432#if CONFIG_DTRACE
433 sub ip, ip, #8
434#endif
435 sub ip, ip, EXC_CTX_SIZE
436
437#if __ARM_VFP__
438 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
439 add r0, sp, SS_SIZE // Get vfp state pointer
440 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
441 add r0, VSS_ALIGN // Get the actual vfp save area
442 mov r5, r0 // Stash the save area in another register
443 bl EXT(vfp_save) // Save the current VFP state to the stack
444 mov r1, r5 // Load the VFP save area argument
445 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
446 fmxr fpscr, r4 // And shove it into FPSCR
447#else
448 mov r1, #0 // Clear the facility context argument
449#endif
450#if __ARM_USER_PROTECT__
451 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
452 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
453 cmp r3, r10
454 beq 1f
455 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
4561:
457 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
458 mov r3, #0 // Load kernel asid
459 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
460 isb
461#endif
462 mov r0, sp // Argument
463
a39ff7e2 464 ALIGN_STACK r2, r3
5ba3f43e 465 bl EXT(sleh_undef) // Call second level handler
a39ff7e2 466 UNALIGN_STACK
5ba3f43e
A
467
468#if __ARM_USER_PROTECT__
469 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
470 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
471 cmp r10, r0
472 beq 1f
473 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
474 cmp r10, r0
475 beq 1f
476 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
477 ldr r11, [r9, ACT_ASID] // Load thread asid
4781:
479 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
480 isb
481#endif
482 b load_and_go_sys
483
484
485/*
486 * First Level Exception Handler for Software Interrupt
487 *
488 * We assert that only user level can use the "SWI" instruction for a system
489 * call on development kernels, and assume it's true on release.
490 *
491 * System call number is stored in r12.
492 * System call arguments are stored in r0 to r6 and r8 (we skip r7)
493 *
494 */
495 .text
496 .align 5
497 .globl EXT(fleh_swi)
498
499LEXT(fleh_swi)
500 cpsid i, #PSR_ABT_MODE
501 mov sp, ip // Save ip
502 cpsid i, #PSR_SVC_MODE
503 mrs ip, spsr // Check the previous mode
504 tst ip, #0x0f
505 cpsid i, #PSR_ABT_MODE
506 mov ip, sp // Restore ip
507 cpsid i, #PSR_SVC_MODE
508 beq swi_from_user
509
510/* Only user mode can use SWI. Panic if the kernel tries. */
511swi_from_kernel:
512 sub sp, sp, EXC_CTX_SIZE
513 stmia sp, {r0-r12}
514 add r0, sp, EXC_CTX_SIZE
515
516 str r0, [sp, SS_SP] // Save supervisor mode sp
517 str lr, [sp, SS_LR] // Save supervisor mode lr
518
a39ff7e2 519 ALIGN_STACK r0, r1
5ba3f43e
A
520 adr r0, L_kernel_swi_panic_str // Load panic messages and panic()
521 blx EXT(panic)
522 b .
523
524swi_from_user:
525 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
526 add sp, sp, ACT_PCBDATA // Get User PCB
527
528
529 /* Check for special mach_absolute_time trap value.
530 * This is intended to be a super-lightweight call to ml_get_timebase(), which
531 * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */
532 cmp r12, #-3
533 beq fleh_swi_trap_tb
534 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
535 mov r7, #0 // Zero the frame pointer
536 nop
537 mov r8, sp // Store arm_saved_state pointer
538 add sp, sp, SS_PC
539 srsia sp, #PSR_SVC_MODE
540 mrs r3, cpsr // Read cpsr
541 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
542 sub r9, sp, ACT_PCBDATA_PC
543
544 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
545 mov r11, r12 // save the syscall vector in a nontrashed register
546
547#if __ARM_VFP__
548 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
549 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
550 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
551 fmxr fpscr, r4 // And shove it into FPSCR
552#endif
553#if __ARM_USER_PROTECT__
554 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
555 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
556 mov r3, #0 // Load kernel asid
557 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
558 isb
559#endif
560
561 mvn r0, #0
562 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling SWI from userspace
563
564#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
565 bl EXT(timer_state_event_user_to_kernel)
566 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
567 add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer
568#endif
569 ldr r10, [r9, ACT_TASK] // Load the current task
570
571 /* enable interrupts */
572 cpsie i // Enable IRQ
573
574 cmp r11, #-4 // Special value for mach_continuous_time
575 beq fleh_swi_trap_mct
576
577 cmp r11, #0x80000000
578 beq fleh_swi_trap
579fleh_swi_trap_ret:
580
581#if TRACE_SYSCALL
582 /* trace the syscall */
583 mov r0, r8
584 bl EXT(syscall_trace)
585#endif
586
587 bl EXT(mach_kauth_cred_uthread_update)
588 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
589 /* unix syscall? */
590 rsbs r5, r11, #0 // make the syscall positive (if negative)
591 ble fleh_swi_unix // positive syscalls are unix (note reverse logic here)
592
593fleh_swi_mach:
594 /* note that mach_syscall_trace can modify r9, so increment the thread
595 * syscall count before the call : */
596 ldr r2, [r9, TH_MACH_SYSCALLS]
597 add r2, r2, #1
598 str r2, [r9, TH_MACH_SYSCALLS]
599
600 LOAD_ADDR(r1, mach_trap_table) // load mach_trap_table
601#if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12
602 add r11, r5, r5, lsl #1 // syscall * 3
603 add r6, r1, r11, lsl #2 // trap_table + syscall * 12
604#elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16
605 add r6, r1, r5, lsl #4 // trap_table + syscall * 16
606#elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20
607 add r11, r5, r5, lsl #2 // syscall * 5
608 add r6, r1, r11, lsl #2 // trap_table + syscall * 20
609#else
610#error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)!
611#endif
612
613#ifndef NO_KDEBUG
614 LOAD_ADDR(r4, kdebug_enable)
615 ldr r4, [r4]
616 movs r4, r4
617 movne r0, r8 // ready the reg state pointer as an arg to the call
618 movne r1, r5 // syscall number as 2nd arg
619 COND_EXTERN_BLNE(mach_syscall_trace)
620#endif
621 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
622 cmp r5, MACH_TRAP_TABLE_COUNT // check syscall number range
623 bge fleh_swi_mach_error
624
625/*
626 * For arm32 ABI where 64-bit types are aligned to even registers and
627 * 64-bits on stack, we need to unpack registers differently. So
628 * we use the mungers for marshalling in arguments from user space.
629 * Currently this is just ARMv7k.
630 */
631#if __BIGGEST_ALIGNMENT__ > 4
632 sub sp, #0x40 // allocate buffer and keep stack 128-bit aligned
633 // it should be big enough for all syscall arguments
634 ldr r11, [r6, #8] // get mach_trap_table[call_number].mach_trap_arg_munge32
635 teq r11, #0 // check if we have a munger
636 moveq r0, #0
637 movne r0, r8 // ready the reg state pointer as an arg to the call
638 movne r1, sp // stack will hold arguments buffer
639 blxne r11 // call munger to get arguments from userspace
640 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
641 teq r0, #0
642 bne fleh_swi_mach_error // exit if the munger returned non-zero status
643#endif
644
645 ldr r1, [r6, #4] // load the syscall vector
646
647 LOAD_ADDR(r2, kern_invalid) // test to make sure the trap is not kern_invalid
648 teq r1, r2
649 beq fleh_swi_mach_error
650
651#if __BIGGEST_ALIGNMENT__ > 4
652 mov r0, sp // argument buffer on stack
653 bx r1 // call the syscall handler
654#else
655 mov r0, r8 // ready the reg state pointer as an arg to the call
656 bx r1 // call the syscall handler
657#endif
658
659fleh_swi_exit64:
660 str r1, [r8, #4] // top of 64-bit return
661fleh_swi_exit:
662 str r0, [r8] // save the return value
663#ifndef NO_KDEBUG
664 movs r4, r4
665 movne r1, r5
666 COND_EXTERN_BLNE(mach_syscall_trace_exit)
667#endif
668#if TRACE_SYSCALL
669 bl EXT(syscall_trace_exit)
670#endif
671
672 mov r0, #1
673 bl EXT(throttle_lowpri_io) // throttle_lowpri_io(1);
674
675 bl EXT(thread_exception_return)
676 b .
677
678fleh_swi_mach_error:
679 mov r0, #EXC_SYSCALL
680 sub r1, sp, #4
681 mov r2, #1
682 bl EXT(exception_triage)
683 b .
684
685 .align 5
686fleh_swi_unix:
687 ldr r1, [r9, TH_UNIX_SYSCALLS]
688 mov r0, r8 // reg state structure is arg
689 add r1, r1, #1
690 str r1, [r9, TH_UNIX_SYSCALLS]
691 mov r1, r9 // current thread in arg1
692 ldr r2, [r9, TH_UTHREAD] // current uthread in arg2
693 ldr r3, [r10, TASK_BSD_INFO] // current proc in arg3
694 bl EXT(unix_syscall)
695 b .
696
697fleh_swi_trap:
698 ldmia r8, {r0-r3}
699 cmp r3, #3
700 addls pc, pc, r3, LSL#2
701 b fleh_swi_trap_ret
702 b icache_invalidate_trap
703 b dcache_flush_trap
704 b thread_set_cthread_trap
705 b thread_get_cthread_trap
706
707icache_invalidate_trap:
708 add r3, r0, r1
709 cmp r3, VM_MAX_ADDRESS
710 subhi r3, r3, #1<<MMU_CLINE
711 bhi cache_trap_error
712 adr r11, cache_trap_jmp
713 ldr r6, [r9, TH_RECOVER] // Save existing recovery routine
714 str r11, [r9, TH_RECOVER]
715#if __ARM_USER_PROTECT__
716 ldr r5, [r9, ACT_UPTW_TTB] // Load thread ttb
717 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
718 ldr r5, [r9, ACT_ASID] // Load thread asid
719 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
720 dsb ish
721 isb
722#endif
723 mov r4, r0
724 mov r5, r1
725 bl EXT(CleanPoU_DcacheRegion)
726 mov r0, r4
727 mov r1, r5
728 bl EXT(InvalidatePoU_IcacheRegion)
729 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
730#if __ARM_USER_PROTECT__
731 ldr r4, [r9, ACT_KPTW_TTB] // Load kernel ttb
732 mcr p15, 0, r4, c2, c0, 0 // Set TTBR0
733 mov r4, #0 // Load kernel asid
734 mcr p15, 0, r4, c13, c0, 1 // Set CONTEXTIDR
735 isb
736#endif
737 str r6, [r9, TH_RECOVER]
738 bl EXT(thread_exception_return)
739 b .
740
741dcache_flush_trap:
742 add r3, r0, r1
743 cmp r3, VM_MAX_ADDRESS
744 subhi r3, r3, #1<<MMU_CLINE
745 bhi cache_trap_error
746 adr r11, cache_trap_jmp
747 ldr r4, [r9, TH_RECOVER] // Save existing recovery routine
748 str r11, [r9, TH_RECOVER]
749#if __ARM_USER_PROTECT__
750 ldr r6, [r9, ACT_UPTW_TTB] // Load thread ttb
751 mcr p15, 0, r6, c2, c0, 0 // Set TTBR0
752 ldr r5, [r9, ACT_ASID] // Load thread asid
753 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
754 isb
755#endif
756 bl EXT(flush_dcache_syscall)
757 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
758#if __ARM_USER_PROTECT__
759 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
760 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
761 mov r5, #0 // Load kernel asid
762 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
763 isb
764#endif
765 str r4, [r9, TH_RECOVER]
766 bl EXT(thread_exception_return)
767 b .
768
769thread_set_cthread_trap:
770 bl EXT(thread_set_cthread_self)
771 bl EXT(thread_exception_return)
772 b .
773
774thread_get_cthread_trap:
775 bl EXT(thread_get_cthread_self)
776 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
777 add r1, r9, ACT_PCBDATA // Get User PCB
778 str r0, [r1, SS_R0] // set return value
779 bl EXT(thread_exception_return)
780 b .
781
782cache_trap_jmp:
783#if __ARM_USER_PROTECT__
784 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
785 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
786 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
787 mov r5, #0 // Load kernel asid
788 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
789 isb
790#endif
791 mrc p15, 0, r3, c6, c0 // Read Fault Address
792cache_trap_error:
793 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
794 add r0, r9, ACT_PCBDATA // Get User PCB
795 ldr r1, [r0, SS_PC] // Save user mode pc register as pc
796 sub r1, r1, #4 // Backtrack current pc
797 str r1, [r0, SS_PC] // pc at cache assist swi
798 str r3, [r0, SS_VADDR] // Fault Address
799 mov r0, #EXC_BAD_ACCESS
800 mov r2, KERN_INVALID_ADDRESS
801 sub sp, sp, #8
802 mov r1, sp
803 str r2, [sp]
804 str r3, [sp, #4]
a39ff7e2 805 ALIGN_STACK r2, r3
5ba3f43e
A
806 mov r2, #2
807 bl EXT(exception_triage)
808 b .
809
810fleh_swi_trap_mct:
811 bl EXT(mach_continuous_time)
812 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
813 add r9, r9, ACT_PCBDATA_R0 // Get User register state
814 stmia r9, {r0, r1} // set 64-bit return value
815 bl EXT(thread_exception_return)
816 b .
817
818fleh_swi_trap_tb:
819 str lr, [sp, SS_PC]
820 bl EXT(ml_get_timebase) // ml_get_timebase() (64-bit return)
821 ldr lr, [sp, SS_PC]
822 nop
823 movs pc, lr // Return to user
824
825 .align 2
826L_kernel_swi_panic_str:
827 .asciz "fleh_swi: took SWI from kernel mode\n"
828 .align 2
829
830/*
831 * First Level Exception Handler for Prefetching Abort.
832 */
833 .text
834 .align 2
835 .globl EXT(fleh_prefabt)
836
837LEXT(fleh_prefabt)
a39ff7e2 838VERIFY_EXCEPTION_MODE PSR_ABT_MODE
5ba3f43e 839 sub lr, lr, #4
5ba3f43e 840
a39ff7e2 841IF_USERMODE_EXCEPTION prefabt
5ba3f43e
A
842 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
843 add sp, sp, ACT_PCBDATA // Get User PCB
844
845 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
846 mov r7, #0 // Zero the frame pointer
847 nop
848 mov r0, sp // Store arm_saved_state pointer
849 // For argument
850 str lr, [sp, SS_PC] // Save user mode pc register as pc
851 mrc p15, 0, r1, c6, c0, 2 // Read IFAR
852 str r1, [sp, SS_VADDR] // and fault address of pcb
853
854 mrc p15, 0, r5, c5, c0, 1 // Read Fault Status
855 str r5, [sp, SS_STATUS] // Save fault status register to pcb
856
857 mrs r4, spsr
858 str r4, [sp, SS_CPSR] // Save user mode cpsr
859
5ba3f43e
A
860 cpsid i, #PSR_SVC_MODE
861 mrs r3, cpsr // Read cpsr
862 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
863 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
864 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
865
866#if __ARM_VFP__
867 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
868 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
869 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
870 fmxr fpscr, r3 // And shove it into FPSCR
871#endif
872#if __ARM_USER_PROTECT__
873 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
874 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
875 mov r3, #0 // Load kernel asid
876 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
877 isb
878#endif
5ba3f43e
A
879
880 mvn r0, #0
881 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
882
883#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
884 bl EXT(timer_state_event_user_to_kernel)
885 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
886#endif
887
888 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
889 mov r1, T_PREFETCH_ABT // Pass abort type
890 bl EXT(sleh_abort) // Call second level handler
891 // Sleh will enable interrupt
892 b load_and_go_user
893
a39ff7e2 894ELSE_IF_KERNELMODE_EXCEPTION prefabt
5ba3f43e
A
895 /*
896 * We have a kernel stack already, and I will use it to save contexts:
897 * ------------------
898 * | VFP saved state |
899 * |------------------|
900 * | ARM saved state |
901 * SP ------------------
902 *
903 * IRQ is disabled
904 */
5ba3f43e
A
905 sub sp, sp, EXC_CTX_SIZE
906 stmia sp, {r0-r12}
907 add r0, sp, EXC_CTX_SIZE
908
909 str r0, [sp, SS_SP] // Save supervisor mode sp
910 str lr, [sp, SS_LR] // Save supervisor mode lr
911
912 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
913
914#if __ARM_VFP__
915 add r0, sp, SS_SIZE // Get vfp state pointer
916 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
917 add r0, VSS_ALIGN // Get the actual vfp save area
918 bl EXT(vfp_save) // Save the current VFP state to the stack
919 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
920 fmxr fpscr, r4 // And shove it into FPSCR
921#endif
922#if __ARM_USER_PROTECT__
923 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
924 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
925 cmp r3, r10
926 beq 1f
927 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
9281:
929 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
930 mov r3, #0 // Load kernel asid
931 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
932 isb
933#endif
5ba3f43e 934
a39ff7e2
A
935 mrs r4, lr_abt
936 str r4, [sp, SS_PC] // Save pc
5ba3f43e
A
937
938 mrc p15, 0, r5, c6, c0, 2 // Read IFAR
a39ff7e2 939 str r5, [sp, SS_VADDR] // and fault address of pcb
5ba3f43e 940 mrc p15, 0, r5, c5, c0, 1 // Read (instruction) Fault Status
a39ff7e2 941 str r5, [sp, SS_STATUS] // Save fault status register to pcb
5ba3f43e 942
a39ff7e2
A
943 mrs r4, spsr_abt
944 str r4, [sp, SS_CPSR]
5ba3f43e 945
a39ff7e2
A
946 mov r0, sp
947 ALIGN_STACK r1, r2
5ba3f43e
A
948 mov r1, T_PREFETCH_ABT // Pass abort type
949 bl EXT(sleh_abort) // Call second level handler
a39ff7e2 950 UNALIGN_STACK
5ba3f43e
A
951
952 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
953#if __ARM_USER_PROTECT__
954 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
955 cmp r10, r0
956 beq 1f
957 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
958 cmp r10, r0
959 beq 1f
960 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
961 ldr r11, [r9, ACT_ASID] // Load thread asid
9621:
963 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
964 isb
965#endif
966
967 b load_and_go_sys
968
969
970/*
971 * First Level Exception Handler for Data Abort
972 */
973 .text
974 .align 2
975 .globl EXT(fleh_dataabt)
976
977LEXT(fleh_dataabt)
a39ff7e2 978VERIFY_EXCEPTION_MODE PSR_ABT_MODE
5ba3f43e 979 sub lr, lr, #8
a39ff7e2 980IF_USERMODE_EXCEPTION dataabt
5ba3f43e
A
981 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
982 add sp, sp, ACT_PCBDATA // Get User PCB
983
984 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
985 mov r7, #0 // Zero the frame pointer
986 nop
987
988 mov r0, sp // Store arm_saved_state pointer
989 // For argument
990
991 str lr, [sp, SS_PC] // Save user mode pc register
992
993 mrs r4, spsr
994 str r4, [sp, SS_CPSR] // Save user mode cpsr
995
996 mrc p15, 0, r5, c5, c0 // Read Fault Status
997 mrc p15, 0, r6, c6, c0 // Read Fault Address
998 str r5, [sp, SS_STATUS] // Save fault status register to pcb
999 str r6, [sp, SS_VADDR] // Save fault address to pcb
1000
5ba3f43e
A
1001 cpsid i, #PSR_SVC_MODE
1002 mrs r3, cpsr // Read cpsr
1003 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
1004 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1005 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
1006
1007#if __ARM_VFP__
1008 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1009 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1010 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
1011 fmxr fpscr, r3 // And shove it into FPSCR
1012#endif
1013#if __ARM_USER_PROTECT__
1014 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1015 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1016 mov r3, #0 // Load kernel asid
1017 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1018 isb
1019#endif
5ba3f43e
A
1020
1021 mvn r0, #0
1022 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
1023
1024#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1025 bl EXT(timer_state_event_user_to_kernel)
1026 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1027#endif
1028
1029 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
1030 mov r1, T_DATA_ABT // Pass abort type
1031 bl EXT(sleh_abort) // Call second level handler
1032 // Sleh will enable irq
1033 b load_and_go_user
1034
a39ff7e2 1035ELSE_IF_KERNELMODE_EXCEPTION dataabt
5ba3f43e
A
1036 /*
1037 * We have a kernel stack already, and I will use it to save contexts:
1038 * ------------------
1039 * | VFP saved state |
1040 * |------------------|
1041 * | ARM saved state |
1042 * SP ------------------
1043 *
1044 * IRQ is disabled
1045 */
5ba3f43e
A
1046 sub sp, sp, EXC_CTX_SIZE
1047 stmia sp, {r0-r12}
1048 add r0, sp, EXC_CTX_SIZE
1049
1050 str r0, [sp, SS_SP] // Save supervisor mode sp
1051 str lr, [sp, SS_LR] // Save supervisor mode lr
1052
1053 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1054
1055#if __ARM_VFP__
1056 add r0, sp, SS_SIZE // Get vfp state pointer
1057 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1058 add r0, VSS_ALIGN // Get the actual vfp save area
1059 bl EXT(vfp_save) // Save the current VFP state to the stack
1060 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1061 fmxr fpscr, r4 // And shove it into FPSCR
1062#endif
1063
a39ff7e2
A
1064 mrs r4, lr_abt
1065 str r4, [sp, SS_PC]
1066 mrs r4, spsr_abt
1067 str r4, [sp, SS_CPSR]
5ba3f43e
A
1068
1069#if __ARM_USER_PROTECT__
1070 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1071 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1072 cmp r3, r10
1073 beq 1f
1074 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
10751:
1076 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1077 mov r3, #0 // Load kernel asid
1078 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1079 isb
1080#endif
1081 mrc p15, 0, r5, c5, c0 // Read Fault Status
1082 mrc p15, 0, r6, c6, c0 // Read Fault Address
1083 str r5, [sp, SS_STATUS] // Save fault status register to pcb
1084 str r6, [sp, SS_VADDR] // Save fault address to pcb
1085
1086 mov r0, sp // Argument
a39ff7e2 1087 ALIGN_STACK r1, r2
5ba3f43e
A
1088 mov r1, T_DATA_ABT // Pass abort type
1089 bl EXT(sleh_abort) // Call second level handler
a39ff7e2 1090 UNALIGN_STACK
5ba3f43e
A
1091
1092 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1093#if __ARM_USER_PROTECT__
1094 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1095 cmp r10, r0
1096 beq 1f
1097 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1098 cmp r10, r0
1099 beq 1f
1100 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1101 ldr r11, [r9, ACT_ASID] // Load thread asid
11021:
1103 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1104 isb
1105#endif
1106
1107load_and_go_sys:
1108 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1109
1110 ldr r4, [sp, SS_CPSR] // Load saved cpsr
1111 tst r4, #PSR_IRQF // Test IRQ set
1112 bne lags1 // Branch if IRQ disabled
1113
1114 cpsid i // Disable IRQ
1115 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1116 movs r2, r2 // Test if null
1117 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1118 bne lags1 // Branch if count not null
1119 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1120 ands r5, r5, AST_URGENT // Get the requests we do honor
1121 beq lags1 // Branch if no ASTs
1122#if __ARM_USER_PROTECT__
1123 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1124 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1125 cmp r3, r10
1126 beq 1f
1127 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
11281:
1129 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1130 mov r3, #0 // Load kernel asid
1131 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1132 isb
1133#endif
1134 ldr lr, [sp, SS_LR] // Restore the link register
1135 stmfd sp!, {r7, lr} // Push a fake frame
1136
a39ff7e2 1137 ALIGN_STACK r2, r3
5ba3f43e 1138 bl EXT(ast_taken_kernel) // Handle AST_URGENT
a39ff7e2 1139 UNALIGN_STACK
5ba3f43e
A
1140
1141 ldmfd sp!, {r7, lr} // Pop the fake frame
1142 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1143 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1144#if __ARM_USER_PROTECT__
1145 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1146 cmp r10, r0
1147 beq 1f
1148 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1149 cmp r10, r0
1150 beq 1f
1151 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1152 ldr r11, [r9, ACT_ASID] // Load thread asid
11531:
1154 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1155 isb
1156#endif
1157lags1:
1158 ldr lr, [sp, SS_LR]
1159
1160 mov ip, sp // Save pointer to contexts for abort mode
1161 ldr sp, [ip, SS_SP] // Restore stack pointer
1162
1163 cpsid if, #PSR_ABT_MODE
1164
1165 mov sp, ip
1166
1167 ldr r4, [sp, SS_CPSR]
1168 msr spsr_cxsf, r4 // Restore spsr
1169
1170 clrex // clear exclusive memory tag
1171#if __ARM_ENABLE_WFE_
1172 sev
1173#endif
1174
1175#if __ARM_VFP__
1176 add r0, sp, SS_SIZE // Get vfp state pointer
1177 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1178 add r0, VSS_ALIGN // Get the actual vfp save area
1179 bl EXT(vfp_load) // Load the desired VFP state from the stack
1180#endif
1181
1182 ldr lr, [sp, SS_PC] // Restore lr
1183
1184 ldmia sp, {r0-r12} // Restore other registers
1185
1186 movs pc, lr // Return to sys (svc, irq, fiq)
1187
1188/*
1189 * First Level Exception Handler for address exception
1190 * Not supported
1191 */
1192 .text
1193 .align 2
1194 .globl EXT(fleh_addrexc)
1195
1196LEXT(fleh_addrexc)
1197 b .
1198
1199
1200/*
1201 * First Level Exception Handler for IRQ
1202 * Current mode : IRQ
1203 * IRQ and FIQ are always disabled while running in FIQ handler
1204 * We do not permit nested interrupt.
1205 *
1206 * Saving area: from user : PCB.
1207 * from kernel : interrupt stack.
1208 */
1209
1210 .text
1211 .align 2
1212 .globl EXT(fleh_irq)
1213
1214LEXT(fleh_irq)
1215 sub lr, lr, #4
1216
1217 cpsie a // Re-enable async aborts
1218
1219 mrs sp, spsr
1220 tst sp, #0x0f // From user? or kernel?
1221 bne fleh_irq_kernel
1222
1223fleh_irq_user:
1224 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1225 add sp, sp, ACT_PCBDATA // Get User PCB
1226 stmia sp, {r0-r12, sp, lr}^
1227 mov r7, #0 // Zero the frame pointer
1228 nop
1229 str lr, [sp, SS_PC]
1230 mrs r4, spsr
1231 str r4, [sp, SS_CPSR]
1232 mov r5, sp // Saved context in r5
1233 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1234 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1235 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1236 cpsid i, #PSR_SVC_MODE
1237 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1238 cpsid i, #PSR_IRQ_MODE
1239
1240#if __ARM_VFP__
1241 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1242 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1243 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1244 fmxr fpscr, r4 // And shove it into FPSCR
1245#endif
1246#if __ARM_USER_PROTECT__
1247 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1248 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1249 mov r3, #0 // Load kernel asid
1250 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1251 isb
1252#endif
1253#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1254 bl EXT(timer_state_event_user_to_kernel)
1255 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1256#endif
1257#if CONFIG_TELEMETRY
1258 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1259 mov r0, #1
1260 ldr r2, [r2]
1261 movs r2, r2
1262 beq 1f
1263 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1264 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
12651:
1266#endif
1267
1268 b fleh_irq_handler
1269
1270fleh_irq_kernel:
1271 cpsid i, #PSR_SVC_MODE
1272
1273 sub sp, sp, EXC_CTX_SIZE
1274 stmia sp, {r0-r12}
1275 add r0, sp, EXC_CTX_SIZE
1276
1277 str r0, [sp, SS_SP] // Save supervisor mode sp
1278 str lr, [sp, SS_LR] // Save supervisor mode lr
1279
1280 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1281
1282#if __ARM_VFP__
1283 add r0, sp, SS_SIZE // Get vfp state pointer
1284 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1285 add r0, VSS_ALIGN // Get the actual vfp save area
1286 bl EXT(vfp_save) // Save the current VFP state to the stack
1287 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1288 fmxr fpscr, r4 // And shove it into FPSCR
1289#endif
1290#if __ARM_USER_PROTECT__
1291 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1292 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1293 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1294 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1295 mov r3, #0 // Load kernel asid
1296 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1297 isb
1298#endif
1299 mov r5, sp // Saved context in r5
1300
1301 cpsid i, #PSR_IRQ_MODE
1302
1303 str lr, [r5, SS_PC] // Save LR as the return PC
1304 mrs r4, spsr
1305 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1306
1307 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1308 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1309
1310#if CONFIG_TELEMETRY
1311 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1312 mov r0, #0
1313 ldr r2, [r2]
1314 movs r2, r2
1315 beq 1f
1316 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1317 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
13181:
1319#endif
1320
1321fleh_irq_handler:
1322 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1323 add r2, r2, #1 // Increment count
1324 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1325#ifndef NO_KDEBUG
1326 LOAD_ADDR(r8, kdebug_enable)
1327 ldr r8, [r8]
1328 movs r8, r8
1329 movne r0, r5
1330 COND_EXTERN_BLNE(interrupt_trace)
1331#endif
1332 bl EXT(interrupt_stats) // Record interrupt statistics
1333 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1334 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1335 str r5, [r4, CPU_INT_STATE] // Saved context in cpu_int_state
1336 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1337 add r3, r3, #1 // Increment count
1338 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1339 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1340 add r3, r3, #1 // Increment count
1341 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1342 ldr r0, [r4, INTERRUPT_TARGET]
1343 ldr r1, [r4, INTERRUPT_REFCON]
1344 ldr r2, [r4, INTERRUPT_NUB]
1345 ldr r3, [r4, INTERRUPT_SOURCE]
1346 ldr r5, [r4, INTERRUPT_HANDLER] // Call second level exception handler
1347 blx r5
1348#ifndef NO_KDEBUG
1349 movs r8, r8
1350 COND_EXTERN_BLNE(interrupt_trace_exit)
1351#endif
1352 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1353 bl EXT(ml_get_timebase) // get current timebase
1354 LOAD_ADDR(r3, EntropyData)
1355 ldr r2, [r3, ENTROPY_INDEX_PTR]
1356 add r1, r3, ENTROPY_DATA_SIZE
1357 add r2, r2, #4
1358 cmp r2, r1
1359 addge r2, r3, ENTROPY_BUFFER
1360 ldr r4, [r2]
1361 eor r0, r0, r4, ROR #9
1362 str r0, [r2] // Update gEntropie
1363 str r2, [r3, ENTROPY_INDEX_PTR]
1364
1365return_from_irq:
1366 mov r5, #0
1367 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1368 str r5, [r4, CPU_INT_STATE] // Clear cpu_int_state
1369 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1370#if MACH_ASSERT
1371 cmp r2, #0 // verify positive count
1372 bgt 1f
1373 push {r7, lr}
1374 mov r7, sp
1375 adr r0, L_preemption_count_zero_str
1376 blx EXT(panic)
1377 b .
13781:
1379#endif
1380 sub r2, r2, #1 // Decrement count
1381 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1382
1383 mrs r0, spsr // For check the previous mode
1384
1385 cpsid i, #PSR_SVC_MODE
1386
1387 tst r0, #0x0f // Check if the previous is from user
1388 ldreq sp, [r9, TH_KSTACKPTR] // ...If so, reload the kernel stack pointer
1389 beq load_and_go_user // ...and return
1390
1391#if __ARM_USER_PROTECT__
1392 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1393 cmp r10, r0
1394 beq 1f
1395 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1396 cmp r10, r0
1397 beq 1f
1398 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1399 ldr r11, [r9, ACT_ASID] // Load thread asid
14001:
1401 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1402 isb
1403#endif
1404 b load_and_go_sys
1405
1406 .align 2
1407L_preemption_count_zero_str:
1408 .ascii "locore.s: preemption count is zero \000"
1409 .align 2
1410/*
1411 * First Level Exception Handler for DEC
1412 * Current mode : IRQ
1413 * IRQ and FIQ are always disabled while running in FIQ handler
1414 * We do not permit nested interrupt.
1415 *
1416 * Saving area: from user : PCB.
1417 * from kernel : interrupt stack.
1418 */
1419
1420 .text
1421 .align 2
1422 .globl EXT(fleh_decirq)
1423
1424LEXT(fleh_decirq)
1425 sub lr, lr, #4
1426
1427 cpsie af // Re-enable async aborts/FIQ
1428
1429 mrs sp, spsr
1430 tst sp, #0x0f // From user? or kernel?
1431 bne fleh_decirq_kernel
1432
1433fleh_decirq_user:
1434 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1435 add sp, sp, ACT_PCBDATA // Get User PCB
1436 stmia sp, {r0-r12, sp, lr}^
1437 mov r7, #0 // Zero the frame pointer
1438 nop
1439 str lr, [sp, SS_PC]
1440 mrs r4, spsr
1441 str r4, [sp, SS_CPSR]
1442 mov r5, sp // Saved context in r5
1443 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1444 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1445 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1446 cpsid i, #PSR_SVC_MODE
1447 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1448 cpsid i, #PSR_IRQ_MODE
1449
1450#if __ARM_VFP__
1451 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1452 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1453 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1454 fmxr fpscr, r4 // And shove it into FPSCR
1455#endif
1456#if __ARM_USER_PROTECT__
1457 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1458 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1459 mov r3, #0 // Load kernel asid
1460 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1461 isb
1462#endif
1463#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1464 bl EXT(timer_state_event_user_to_kernel)
1465 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1466#endif
1467#if CONFIG_TELEMETRY
1468 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1469 mov r0, #1
1470 ldr r2, [r2]
1471 movs r2, r2
1472 beq 1f
1473 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1474 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
14751:
1476#endif
1477
1478 b fleh_decirq_handler
1479
1480fleh_decirq_kernel:
1481 cpsid i, #PSR_SVC_MODE
1482
1483 sub sp, sp, EXC_CTX_SIZE
1484 stmia sp, {r0-r12}
1485 add r0, sp, EXC_CTX_SIZE
1486
1487 str r0, [sp, SS_SP] // Save supervisor mode sp
1488 str lr, [sp, SS_LR] // Save supervisor mode lr
1489
1490 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1491
1492#if __ARM_VFP__
1493 add r0, sp, SS_SIZE // Get vfp state pointer
1494 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1495 add r0, VSS_ALIGN // Get the actual vfp save area
1496 bl EXT(vfp_save) // Save the current VFP state to the stack
1497 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1498 fmxr fpscr, r4 // And shove it into FPSCR
1499#endif
1500#if __ARM_USER_PROTECT__
1501 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1502 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1503 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1504 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1505 mov r3, #0 // Load kernel asid
1506 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1507 isb
1508#endif
1509 mov r5, sp // Saved context in r5
1510
1511 cpsid i, #PSR_IRQ_MODE
1512
1513 str lr, [r5, SS_PC] // Save LR as the return PC
1514 mrs r4, spsr
1515 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1516
1517 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1518 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1519
1520#if CONFIG_TELEMETRY
1521 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1522 mov r0, #0
1523 ldr r2, [r2]
1524 movs r2, r2
1525 beq 1f
1526 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1527 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
15281:
1529#endif
1530
1531fleh_decirq_handler:
1532 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1533 add r2, r2, #1 // Increment count
1534 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1535 ldr r2, [r9, ACT_CPUDATAP] // Get current cpu
1536 str r5, [r2, CPU_INT_STATE] // Saved context in cpu_int_state
1537 ldr r3, [r2, CPU_STAT_IRQ] // Get IRQ count
1538 add r3, r3, #1 // Increment count
1539 str r3, [r2, CPU_STAT_IRQ] // Update IRQ count
1540 ldr r3, [r2, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1541 add r3, r3, #1 // Increment count
1542 str r3, [r2, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1543#ifndef NO_KDEBUG
1544 LOAD_ADDR(r4, kdebug_enable)
1545 ldr r4, [r4]
1546 movs r4, r4
1547 movne r0, r5 // Pass saved context
1548 COND_EXTERN_BLNE(interrupt_trace)
1549#endif
1550 bl EXT(interrupt_stats) // Record interrupt statistics
1551 mov r0, #0
1552 bl EXT(rtclock_intr) // Call second level exception handler
1553#ifndef NO_KDEBUG
1554 movs r4, r4
1555 COND_EXTERN_BLNE(interrupt_trace_exit)
1556#endif
1557
1558 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1559
1560 b return_from_irq
1561
1562
1563/*
1564 * First Level Exception Handler for FIQ
1565 * Current mode : FIQ
1566 * IRQ and FIQ are always disabled while running in FIQ handler
1567 * We do not permit nested interrupt.
1568 *
1569 * Saving area: from user : PCB.
1570 * from kernel : interrupt stack.
1571 *
1572 * We have 7 added shadow registers in FIQ mode for fast services.
1573 * So only we have to save is just 8 general registers and LR.
1574 * But if the current thread was running on user mode before the FIQ interrupt,
1575 * All user registers be saved for ast handler routine.
1576 */
1577 .text
1578 .align 2
1579 .globl EXT(fleh_fiq_generic)
1580
1581LEXT(fleh_fiq_generic)
1582 str r11, [r10] // Clear the FIQ source
1583
1584 ldr r13, [r8, CPU_TIMEBASE_LOW] // Load TBL
1585 adds r13, r13, #1 // Increment TBL
1586 str r13, [r8, CPU_TIMEBASE_LOW] // Store TBL
1587 ldreq r13, [r8, CPU_TIMEBASE_HIGH] // Load TBU
1588 addeq r13, r13, #1 // Increment TBU
1589 streq r13, [r8, CPU_TIMEBASE_HIGH] // Store TBU
1590 subs r12, r12, #1 // Decrement, DEC
1591 str r12, [r8, CPU_DECREMENTER] // Store DEC
1592 subspl pc, lr, #4 // Return unless DEC < 0
1593 b EXT(fleh_dec)
1594
1595 .text
1596 .align 2
1597 .globl EXT(fleh_dec)
1598LEXT(fleh_dec)
1599 mrs sp, spsr // Get the spsr
1600 sub lr, lr, #4
1601 tst sp, #0x0f // From user? or kernel?
1602 bne 2f
1603
1604 /* From user */
1605 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1606 add sp, sp, ACT_PCBDATA // Get User PCB
1607
1608 stmia sp, {r0-r12, sp, lr}^
1609 mov r7, #0 // Zero the frame pointer
1610 nop
1611 str lr, [sp, SS_PC]
1612
1613 mrs r4, spsr
1614 str r4, [sp, SS_CPSR]
1615 mov r5, sp
1616 sub sp, sp, ACT_PCBDATA // Get User PCB
1617 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1618 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1619 mov r6, sp
1620 cpsid i, #PSR_SVC_MODE
1621 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1622 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1623
1624#if __ARM_VFP__
1625 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1626 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1627 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1628 fmxr fpscr, r4 // And shove it into FPSCR
1629#endif
1630#if __ARM_USER_PROTECT__
1631 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1632 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1633 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1634 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1635 mov r3, #0 // Load kernel asid
1636 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1637 isb
1638#endif
1639 mov r0, #1 // Mark this as coming from user context
1640 b 4f
1641
16422:
1643 /* From kernel */
1644 tst sp, #PSR_IRQF // Test for IRQ masked
1645 bne 3f // We're on the cpu_signal path
1646
1647 cpsid if, #PSR_SVC_MODE
1648
1649 sub sp, sp, EXC_CTX_SIZE
1650 stmia sp, {r0-r12}
1651 add r0, sp, EXC_CTX_SIZE
1652
1653 str r0, [sp, SS_SP] // Save supervisor mode sp
1654 str lr, [sp, SS_LR] // Save supervisor mode lr
1655
1656 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1657
1658#if __ARM_VFP__
1659 add r0, sp, SS_SIZE // Get vfp state pointer
1660 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1661 add r0, VSS_ALIGN // Get the actual vfp save area
1662 bl EXT(vfp_save) // Save the current VFP state to the stack
1663 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1664 fmxr fpscr, r4 // And shove it into FPSCR
1665#endif
1666#if __ARM_USER_PROTECT__
1667 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1668 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1669 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1670 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1671 mov r3, #0 // Load kernel asid
1672 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1673 isb
1674#endif
1675 mov r5, sp // Saved context in r5
1676
1677 cpsid if, #PSR_FIQ_MODE
1678
1679 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1680
1681 str lr, [r5, SS_PC] // Save LR as the return PC
1682 mrs r4, spsr
1683 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1684
1685 ldr r6, [r1, ACT_CPUDATAP] // Get current cpu
1686 ldr r6, [r6, CPU_ISTACKPTR] // Set interrupt stack
1687
1688 mov r0, #0 // Mark this as coming from kernel context
1689 b 4f
1690
16913:
1692 /* cpu_signal path */
1693 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1694 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1695 ldr sp, [sp, CPU_FIQSTACKPTR] // Set fiq stack
1696 sub sp, sp, EXC_CTX_SIZE
1697 stmia sp, {r0-r12}
1698 str lr, [sp, SS_PC]
1699 mrs r4, spsr
1700 str r4, [sp, SS_CPSR]
1701 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1702
1703#if __ARM_VFP__
1704 add r0, sp, SS_SIZE // Get vfp state pointer
1705 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1706 add r0, VSS_ALIGN // Get the actual vfp save area
1707 bl EXT(vfp_save) // Save the current VFP state to the stack
1708 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1709 fmxr fpscr, r4 // And shove it into FPSCR
1710#endif
1711#if __ARM_USER_PROTECT__
1712 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1713 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1714 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1715 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1716 mov r3, #0 // Load kernel asid
1717 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1718 isb
1719#endif
a39ff7e2
A
1720
1721 ALIGN_STACK r0, r1
5ba3f43e
A
1722 mov r0, r8 // Get current cpu in arg 0
1723 mov r1, SIGPdec // Decrementer signal in arg1
1724 mov r2, #0
1725 mov r3, #0
1726 bl EXT(cpu_signal) // Call cpu_signal
a39ff7e2 1727 UNALIGN_STACK
5ba3f43e
A
1728
1729 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1730
1731#if __ARM_VFP__
1732 add r0, sp, SS_SIZE // Get vfp state pointer
1733 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1734 add r0, VSS_ALIGN // Get the actual vfp save area
1735 bl EXT(vfp_load) // Load the desired VFP state from the stack
1736#endif
1737
1738 clrex // clear exclusive memory tag
1739#if __ARM_ENABLE_WFE_
1740 sev
1741#endif
1742#if __ARM_USER_PROTECT__
1743 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1744 mcr p15, 0, r11, c13, c0, 1 // Set CONTEXTIDR
1745 isb
1746#endif
1747 ldr lr, [sp, SS_PC]
1748 ldmia sp, {r0-r12} // Restore saved registers
1749 movs pc, lr // Return from fiq
1750
17514:
1752 cpsid i, #PSR_IRQ_MODE
1753 cpsie f
1754 mov sp, r6 // Restore the stack pointer
a39ff7e2 1755 ALIGN_STACK r2, r3
5ba3f43e
A
1756 msr spsr_cxsf, r4 // Restore the spsr
1757 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1758 add r2, r2, #1 // Increment count
1759 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1760 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1761 str r5, [r4, CPU_INT_STATE]
1762 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1763 add r3, r3, #1 // Increment count
1764 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1765 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1766 add r3, r3, #1 // Increment count
1767 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1768#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1769 movs r0, r0
1770 beq 5f
1771 mov r8, r0 // Stash our "from_user" boolean value
1772 bl EXT(timer_state_event_user_to_kernel)
1773 mov r0, r8 // Restore our "from_user" value
1774 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
17755:
1776#endif
1777#if CONFIG_TELEMETRY
1778 LOAD_ADDR(r4, telemetry_needs_record) // Check if a telemetry record was requested...
1779 ldr r4, [r4]
1780 movs r4, r4
1781 beq 6f
1782 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1783 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
17846:
1785#endif
1786
1787#ifndef NO_KDEBUG
1788 LOAD_ADDR(r4, kdebug_enable)
1789 ldr r4, [r4]
1790 movs r4, r4
1791 ldrne r1, [r9, ACT_CPUDATAP] // Get current cpu
1792 ldrne r0, [r1, CPU_INT_STATE]
1793 COND_EXTERN_BLNE(interrupt_trace)
1794#endif
1795 bl EXT(interrupt_stats) // Record interrupt statistics
1796 mov r0, #0
1797 bl EXT(rtclock_intr) // Call second level exception handler
1798#ifndef NO_KDEBUG
1799 movs r4, r4
1800 COND_EXTERN_BLNE(interrupt_trace_exit)
1801#endif
a39ff7e2 1802 UNALIGN_STACK
5ba3f43e
A
1803
1804 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1805
1806 b return_from_irq
1807
1808/*
1809 * void thread_syscall_return(kern_return_t r0)
1810 *
1811 */
1812 .text
1813 .align 2
1814 .globl EXT(thread_syscall_return)
1815
1816LEXT(thread_syscall_return)
1817 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1818 add r1, r9, ACT_PCBDATA // Get User PCB
1819 str r0, [r1, SS_R0] // set return value
1820#ifndef NO_KDEBUG
1821 LOAD_ADDR(r4, kdebug_enable)
1822 ldr r4, [r4]
1823 movs r4, r4
1824 beq load_and_go_user
1825 ldr r12, [r1, SS_R12] // Load syscall number
1826 rsbs r1, r12, #0 // make the syscall positive (if negative)
1827 COND_EXTERN_BLGT(mach_syscall_trace_exit)
1828#endif
1829 b load_and_go_user
1830
1831/*
1832 * void thread_exception_return(void)
1833 * void thread_bootstrap_return(void)
1834 *
1835 */
1836 .text
1837 .globl EXT(thread_exception_return)
1838 .globl EXT(thread_bootstrap_return)
1839
1840LEXT(thread_bootstrap_return)
1841#if CONFIG_DTRACE
1842 bl EXT(dtrace_thread_bootstrap)
1843#endif
1844 // Fall through
1845
1846LEXT(thread_exception_return)
1847
1848load_and_go_user:
1849/*
1850 * Restore user mode states and go back to user mode
1851 */
1852 cpsid i // Disable irq
1853 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1854
1855 mvn r0, #0
1856 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1857
1858 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1859 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1860 cmp r5, #0 // Test if ASTs pending
1861 beq return_to_user_now // Branch if no ASTs
1862
5ba3f43e
A
1863 bl EXT(ast_taken_user) // Handle all ASTs (may continue via thread_exception_return)
1864
5ba3f43e
A
1865 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1866 b load_and_go_user // Loop back
1867
1868return_to_user_now:
1869
1870#if MACH_ASSERT
1871/*
1872 * Assert that the preemption level is zero prior to the return to user space
1873 */
1874 ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count
1875 movs r1, r1 // Test
1876 beq 0f // Continue if zero, or...
1877 adr r0, L_lagu_panic_str // Load the panic string...
1878 blx EXT(panic) // Finally, panic
18790:
1880 ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count
1881 movs r2, r2 // Test
1882 beq 0f // Continue if zero, or...
1883 adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string...
1884 mov r1, r9 // Thread argument for panic string
1885 blx EXT(panic) // Finally, panic
1886#endif
1887
18880:
1889#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1890 bl EXT(timer_state_event_kernel_to_user)
1891 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1892 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu data
1893#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1894#if __ARM_DEBUG__ >= 6
1895 ldr r0, [r9, ACT_DEBUGDATA]
1896 ldr r6, [r8, CPU_USER_DEBUG]
1897 cmp r0, r6 // test if debug registers need to be changed
1898 beq 1f
1899 bl EXT(arm_debug_set) // argument is already in r0
1900 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
19011:
1902#endif
1903#if __ARM_VFP__
1904 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1905 bl EXT(vfp_load) // Load the desired VFP state from ACT_UVFP
1906#endif
1907 add r0, r9, ACT_PCBDATA // Get User PCB
1908 ldr r4, [r0, SS_CPSR] // Get saved cpsr
1909 and r3, r4, #PSR_MODE_MASK // Extract current mode
1910 cmp r3, #PSR_USER_MODE // Check user mode
1911 movne r0, r3
1912 bne EXT(ExceptionVectorPanic)
1913
1914 msr spsr_cxsf, r4 // Restore spsr(user mode cpsr)
1915 mov sp, r0 // Get User PCB
1916
1917 clrex // clear exclusive memory tag
1918#if __ARM_ENABLE_WFE_
1919 sev
1920#endif
1921#if __ARM_USER_PROTECT__
1922 ldr r3, [r9, ACT_UPTW_TTB] // Load thread ttb
1923 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1924 ldr r2, [r9, ACT_ASID] // Load thread asid
1925 mcr p15, 0, r2, c13, c0, 1
1926 isb
1927#endif
1928 ldr lr, [sp, SS_PC] // Restore user mode pc
1929 ldmia sp, {r0-r12, sp, lr}^ // Restore the other user mode registers
1930 nop // Hardware problem
1931 movs pc, lr // Return to user
1932
1933 .align 2
1934L_lagu_panic_str:
1935 .asciz "load_and_go_user: preemption_level %d"
1936 .align 2
1937
1938 .align 2
1939L_lagu_rwlock_cnt_panic_str:
1940 .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)"
1941 .align 2
1942
1943 .align 2
1944L_evimpanic_str:
1945 .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000"
1946 .align 2
1947
1948 .text
1949 .align 2
1950 .globl EXT(ExceptionVectorPanic)
1951
1952LEXT(ExceptionVectorPanic)
1953 cpsid i, #PSR_SVC_MODE
a39ff7e2 1954 ALIGN_STACK r1, r2
5ba3f43e
A
1955 mov r1, r0
1956 adr r0, L_evimpanic_str
1957 blx EXT(panic)
1958 b .
1959
1960#include "globals_asm.h"
1961
1962LOAD_ADDR_GEN_DEF(mach_trap_table)
1963LOAD_ADDR_GEN_DEF(kern_invalid)
1964
1965/* vim: set ts=4: */