]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/locore.s
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / arm / locore.s
1 /*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <machine/asm.h>
58 #include <arm/proc_reg.h>
59 #include <pexpert/arm/board_config.h>
60 #include <mach/exception_types.h>
61 #include <mach_kdp.h>
62 #include <mach_assert.h>
63 #include <config_dtrace.h>
64 #include "assym.s"
65
66 #define TRACE_SYSCALL 0
67
68 /*
69 * Copied to low physical memory in arm_init,
70 * so the kernel must be linked virtually at
71 * 0xc0001000 or higher to leave space for it.
72 */
73 .syntax unified
74 .text
75 .align 12
76 .globl EXT(ExceptionLowVectorsBase)
77
78 LEXT(ExceptionLowVectorsBase)
79 adr pc, Lreset_low_vector
80 b . // Undef
81 b . // SWI
82 b . // Prefetch Abort
83 b . // Data Abort
84 b . // Address Exception
85 b . // IRQ
86 b . // FIQ/DEC
87 LEXT(ResetPrivateData)
88 .space (480),0 // (filled with 0s)
89 // ExceptionLowVectorsBase + 0x200
90 Lreset_low_vector:
91 adr r4, EXT(ResetHandlerData)
92 ldr r0, [r4, ASSIST_RESET_HANDLER]
93 movs r0, r0
94 blxne r0
95 adr r4, EXT(ResetHandlerData)
96 ldr r1, [r4, CPU_DATA_ENTRIES]
97 ldr r1, [r1, CPU_DATA_PADDR]
98 ldr r5, [r1, CPU_RESET_ASSIST]
99 movs r5, r5
100 blxne r5
101 adr r4, EXT(ResetHandlerData)
102 ldr r0, [r4, BOOT_ARGS]
103 ldr r1, [r4, CPU_DATA_ENTRIES]
104 #if __ARM_SMP__
105 #if defined(ARMA7)
106 // physical cpu number is stored in MPIDR Affinity level 0
107 mrc p15, 0, r6, c0, c0, 5 // Read MPIDR
108 and r6, r6, #0xFF // Extract Affinity level 0
109 #else
110 #error missing Who Am I implementation
111 #endif
112 #else
113 mov r6, #0
114 #endif /* __ARM_SMP__ */
115 // physical cpu number matches cpu number
116 //#if cdeSize != 16
117 //#error cpu_data_entry is not 16bytes in size
118 //#endif
119 lsl r6, r6, #4 // Get CpuDataEntry offset
120 add r1, r1, r6 // Get cpu_data_entry pointer
121 ldr r1, [r1, CPU_DATA_PADDR]
122 ldr r5, [r1, CPU_RESET_HANDLER]
123 movs r5, r5
124 blxne r5 // Branch to cpu reset handler
125 b . // Unexpected reset
126 .globl EXT(ResetHandlerData)
127 LEXT(ResetHandlerData)
128 .space (rhdSize_NUM),0 // (filled with 0s)
129
130
131 .globl EXT(ExceptionLowVectorsEnd)
132 LEXT(ExceptionLowVectorsEnd)
133
134 .text
135 .align 12
136 .globl EXT(ExceptionVectorsBase)
137
138 LEXT(ExceptionVectorsBase)
139
140 adr pc, Lexc_reset_vector
141 adr pc, Lexc_undefined_inst_vector
142 adr pc, Lexc_swi_vector
143 adr pc, Lexc_prefetch_abort_vector
144 adr pc, Lexc_data_abort_vector
145 adr pc, Lexc_address_exception_vector
146 adr pc, Lexc_irq_vector
147 #if __ARM_TIME__
148 adr pc, Lexc_decirq_vector
149 #else /* ! __ARM_TIME__ */
150 mov pc, r9
151 #endif /* __ARM_TIME__ */
152
153 Lexc_reset_vector:
154 b .
155 .long 0x0
156 .long 0x0
157 .long 0x0
158 Lexc_undefined_inst_vector:
159 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
160 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
161 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
162 ldr pc, [sp, #4] // Branch to exception handler
163 Lexc_swi_vector:
164 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
165 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
166 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
167 ldr pc, [sp, #8] // Branch to exception handler
168 Lexc_prefetch_abort_vector:
169 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
170 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
171 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
172 ldr pc, [sp, #0xC] // Branch to exception handler
173 Lexc_data_abort_vector:
174 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
175 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
176 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
177 ldr pc, [sp, #0x10] // Branch to exception handler
178 Lexc_address_exception_vector:
179 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
180 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
181 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
182 ldr pc, [sp, #0x14] // Branch to exception handler
183 Lexc_irq_vector:
184 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
185 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
186 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
187 ldr pc, [sp, #0x18] // Branch to exception handler
188 #if __ARM_TIME__
189 Lexc_decirq_vector:
190 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
191 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data
192 ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table
193 ldr pc, [sp, #0x1C] // Branch to exception handler
194 #else /* ! __ARM_TIME__ */
195 .long 0x0
196 .long 0x0
197 .long 0x0
198 .long 0x0
199 #endif /* __ARM_TIME__ */
200
201 .fill 984, 4, 0 // Push to the 4KB page boundary
202
203 .globl EXT(ExceptionVectorsEnd)
204 LEXT(ExceptionVectorsEnd)
205
206
207 /*
208 * Targets for the exception vectors; we patch these during boot (to allow
209 * for position independent code without complicating the vectors; see start.s).
210 */
211 .globl EXT(ExceptionVectorsTable)
212 LEXT(ExceptionVectorsTable)
213 Lreset_vector:
214 .long 0x0
215 Lundefined_inst_vector:
216 .long 0x0
217 Lswi_vector:
218 .long 0x0
219 Lprefetch_abort_vector:
220 .long 0x0
221 Ldata_abort_vector:
222 .long 0x0
223 Laddress_exception_vector:
224 .long 0x0
225 Lirq_vector:
226 .long 0x0
227 Ldecirq_vector:
228 .long 0x0
229
230
231 /*
232 * First Level Exception Handlers
233 */
234 .text
235 .align 2
236 .globl EXT(fleh_reset)
237 LEXT(fleh_reset)
238 b . // Never return
239
240 /*
241 * First Level Exception Handler for Undefined Instruction.
242 */
243 .text
244 .align 2
245 .globl EXT(fleh_undef)
246
247 /*
248 * Ensures the stack is safely aligned, usually in preparation for an external branch
249 * arg0: temp register for storing the stack offset
250 * arg1: temp register for storing the previous stack pointer
251 */
252 .macro ALIGN_STACK
253 /*
254 * For armv7k ABI, the stack needs to be 16-byte aligned
255 */
256 #if __BIGGEST_ALIGNMENT__ > 4
257 and $0, sp, #0x0F // sp mod 16-bytes
258 cmp $0, #4 // need space for the sp on the stack
259 addlt $0, $0, #0x10 // make room if needed, but keep stack aligned
260 mov $1, sp // get current sp
261 sub sp, sp, $0 // align stack
262 str $1, [sp] // store previous sp on stack
263 #endif
264 .endmacro
265
266 /*
267 * Restores the stack pointer to its previous value following an ALIGN_STACK call
268 */
269 .macro UNALIGN_STACK
270 #if __BIGGEST_ALIGNMENT__ > 4
271 ldr sp, [sp]
272 #endif
273 .endmacro
274
275 /*
276 * Checks that cpu is currently in the expected mode, panics if not.
277 * arg0: the expected mode, should be one of the PSR_*_MODE defines
278 */
279 .macro VERIFY_EXCEPTION_MODE
280 mrs sp, cpsr // Read cpsr
281 and sp, sp, #PSR_MODE_MASK // Extract current mode
282 cmp sp, $0 // Check specified mode
283 movne r0, sp
284 bne EXT(ExceptionVectorPanic)
285 .endmacro
286
287 /*
288 * Checks previous processor mode. If usermode, will execute the code
289 * following the macro to handle the userspace exception. Otherwise,
290 * will branch to a ELSE_IF_KERNELMODE_EXCEPTION call with the same
291 * argument.
292 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
293 */
294 .macro IF_USERMODE_EXCEPTION
295 mrs sp, spsr
296 and sp, sp, #PSR_MODE_MASK // Is it from user?
297 cmp sp, #PSR_USER_MODE
298 beq $0_from_user
299 cmp sp, #PSR_IRQ_MODE
300 beq $0_from_irq
301 cmp sp, #PSR_FIQ_MODE
302 beq $0_from_fiq
303 bne $0_from_svc
304 $0_from_user:
305 .endmacro
306
307 /*
308 * Handles an exception taken from kernelmode (IRQ/FIQ/SVC/etc).
309 * Places the processor into the correct mode and executes the
310 * code following the macro to handle the kernel exception.
311 * Intended to be paired with a prior call to IF_USERMODE_EXCEPTION.
312 * arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
313 */
314 .macro ELSE_IF_KERNELMODE_EXCEPTION
315 $0_from_irq:
316 cpsid i, #PSR_IRQ_MODE
317 b $0_from_kernel
318 $0_from_fiq:
319 cpsid i, #PSR_FIQ_MODE
320 b $0_from_kernel
321 $0_from_svc:
322 cpsid i, #PSR_SVC_MODE
323 $0_from_kernel:
324 .endmacro
325
326 LEXT(fleh_undef)
327 VERIFY_EXCEPTION_MODE PSR_UND_MODE
328 mrs sp, spsr // For check the previous mode
329 tst sp, #PSR_TF // Is it Thumb?
330 subeq lr, lr, #4
331 subne lr, lr, #2
332 IF_USERMODE_EXCEPTION undef
333 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
334 add sp, sp, ACT_PCBDATA // Get current thread PCB pointer
335
336 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
337 mov r7, #0 // Zero the frame pointer
338 nop
339
340 mov r0, sp // Store arm_saved_state pointer
341 // for argument
342
343 str lr, [sp, SS_PC] // Save user mode pc register
344
345 mrs r4, spsr
346 str r4, [sp, SS_CPSR] // Save user mode cpsr
347
348 cpsid i, #PSR_SVC_MODE
349 mrs r3, cpsr // Read cpsr
350 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
351 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
352 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
353 #if __ARM_USER_PROTECT__
354 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
355 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
356 mov r3, #0 // Load kernel asid
357 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
358 isb
359 #endif
360
361 mvn r0, #0
362 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
363
364 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
365 bl EXT(timer_state_event_user_to_kernel)
366 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
367 #endif
368
369 #if __ARM_VFP__
370 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
371 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
372 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
373 fmxr fpscr, r3 // And shove it into FPSCR
374 add r1, r9, ACT_UVFP // Reload the pointer to the save state
375 add r0, r9, ACT_PCBDATA // Reload the VFP save state argument
376 #else
377 mov r1, #0 // Clear the VFP save state argument
378 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
379 #endif
380
381 bl EXT(sleh_undef) // Call second level handler
382 // sleh will enable interrupt
383 b load_and_go_user
384
385 ELSE_IF_KERNELMODE_EXCEPTION undef
386 /*
387 * We have a kernel stack already, and I will use it to save contexts
388 * IRQ is disabled
389 */
390 #if CONFIG_DTRACE
391 // We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception
392 // took place. We'll store that later after we switch to undef mode and pull out the LR from there.
393
394 // This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require
395 // changes in fbt_invop also.
396 stmfd sp!, { r7, lr }
397 #endif
398
399 sub sp, sp, EXC_CTX_SIZE // Reserve for arm_saved_state
400
401 stmia sp, {r0-r12} // Save on supervisor mode stack
402 str lr, [sp, SS_LR]
403
404 #if CONFIG_DTRACE
405 add r7, sp, EXC_CTX_SIZE // Save frame pointer
406 #endif
407
408 mrs r4, lr_und
409 str r4, [sp, SS_PC] // Save complete
410 mrs r4, spsr_und
411 str r4, [sp, SS_CPSR]
412
413 mov ip, sp
414
415 /*
416 sp - stack pointer
417 ip - stack pointer
418 r7 - frame pointer state
419 */
420
421
422 #if CONFIG_DTRACE
423 ldr r0, [ip, SS_PC] // Get the exception pc to store later
424 #endif
425
426 add ip, ip, EXC_CTX_SIZE // Send stack pointer to debugger
427 #if CONFIG_DTRACE
428 str r0, [ip, #4]
429 add ip, ip, #8
430 #endif
431 str ip, [sp, SS_SP] // for accessing local variable
432 #if CONFIG_DTRACE
433 sub ip, ip, #8
434 #endif
435 sub ip, ip, EXC_CTX_SIZE
436
437 #if __ARM_VFP__
438 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
439 add r0, sp, SS_SIZE // Get vfp state pointer
440 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
441 add r0, VSS_ALIGN // Get the actual vfp save area
442 mov r5, r0 // Stash the save area in another register
443 bl EXT(vfp_save) // Save the current VFP state to the stack
444 mov r1, r5 // Load the VFP save area argument
445 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
446 fmxr fpscr, r4 // And shove it into FPSCR
447 #else
448 mov r1, #0 // Clear the facility context argument
449 #endif
450 #if __ARM_USER_PROTECT__
451 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
452 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
453 cmp r3, r10
454 beq 1f
455 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
456 1:
457 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
458 mov r3, #0 // Load kernel asid
459 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
460 isb
461 #endif
462 mov r0, sp // Argument
463
464 ALIGN_STACK r2, r3
465 bl EXT(sleh_undef) // Call second level handler
466 UNALIGN_STACK
467
468 #if __ARM_USER_PROTECT__
469 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
470 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
471 cmp r10, r0
472 beq 1f
473 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
474 cmp r10, r0
475 beq 1f
476 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
477 ldr r11, [r9, ACT_ASID] // Load thread asid
478 1:
479 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
480 isb
481 #endif
482 b load_and_go_sys
483
484
485 /*
486 * First Level Exception Handler for Software Interrupt
487 *
488 * We assert that only user level can use the "SWI" instruction for a system
489 * call on development kernels, and assume it's true on release.
490 *
491 * System call number is stored in r12.
492 * System call arguments are stored in r0 to r6 and r8 (we skip r7)
493 *
494 */
495 .text
496 .align 5
497 .globl EXT(fleh_swi)
498
499 LEXT(fleh_swi)
500 cpsid i, #PSR_ABT_MODE
501 mov sp, ip // Save ip
502 cpsid i, #PSR_SVC_MODE
503 mrs ip, spsr // Check the previous mode
504 tst ip, #0x0f
505 cpsid i, #PSR_ABT_MODE
506 mov ip, sp // Restore ip
507 cpsid i, #PSR_SVC_MODE
508 beq swi_from_user
509
510 /* Only user mode can use SWI. Panic if the kernel tries. */
511 swi_from_kernel:
512 sub sp, sp, EXC_CTX_SIZE
513 stmia sp, {r0-r12}
514 add r0, sp, EXC_CTX_SIZE
515
516 str r0, [sp, SS_SP] // Save supervisor mode sp
517 str lr, [sp, SS_LR] // Save supervisor mode lr
518
519 ALIGN_STACK r0, r1
520 adr r0, L_kernel_swi_panic_str // Load panic messages and panic()
521 blx EXT(panic)
522 b .
523
524 swi_from_user:
525 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
526 add sp, sp, ACT_PCBDATA // Get User PCB
527
528
529 /* Check for special mach_absolute_time trap value.
530 * This is intended to be a super-lightweight call to ml_get_timebase(), which
531 * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */
532 cmp r12, #-3
533 beq fleh_swi_trap_tb
534 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
535 mov r7, #0 // Zero the frame pointer
536 nop
537 mov r8, sp // Store arm_saved_state pointer
538 add sp, sp, SS_PC
539 srsia sp, #PSR_SVC_MODE
540 mrs r3, cpsr // Read cpsr
541 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
542 sub r9, sp, ACT_PCBDATA_PC
543
544 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
545 mov r11, r12 // save the syscall vector in a nontrashed register
546
547 #if __ARM_VFP__
548 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
549 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
550 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
551 fmxr fpscr, r4 // And shove it into FPSCR
552 #endif
553 #if __ARM_USER_PROTECT__
554 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
555 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
556 mov r3, #0 // Load kernel asid
557 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
558 isb
559 #endif
560
561 mvn r0, #0
562 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling SWI from userspace
563
564 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
565 bl EXT(timer_state_event_user_to_kernel)
566 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
567 add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer
568 #endif
569 ldr r10, [r9, ACT_TASK] // Load the current task
570
571 /* enable interrupts */
572 cpsie i // Enable IRQ
573
574 cmp r11, #-4 // Special value for mach_continuous_time
575 beq fleh_swi_trap_mct
576
577 cmp r11, #0x80000000
578 beq fleh_swi_trap
579 fleh_swi_trap_ret:
580
581 #if TRACE_SYSCALL
582 /* trace the syscall */
583 mov r0, r8
584 bl EXT(syscall_trace)
585 #endif
586
587 bl EXT(mach_kauth_cred_uthread_update)
588 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
589 /* unix syscall? */
590 rsbs r5, r11, #0 // make the syscall positive (if negative)
591 ble fleh_swi_unix // positive syscalls are unix (note reverse logic here)
592
593 fleh_swi_mach:
594 /* note that mach_syscall_trace can modify r9, so increment the thread
595 * syscall count before the call : */
596 ldr r2, [r9, TH_MACH_SYSCALLS]
597 add r2, r2, #1
598 str r2, [r9, TH_MACH_SYSCALLS]
599
600 LOAD_ADDR(r1, mach_trap_table) // load mach_trap_table
601 #if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12
602 add r11, r5, r5, lsl #1 // syscall * 3
603 add r6, r1, r11, lsl #2 // trap_table + syscall * 12
604 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16
605 add r6, r1, r5, lsl #4 // trap_table + syscall * 16
606 #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20
607 add r11, r5, r5, lsl #2 // syscall * 5
608 add r6, r1, r11, lsl #2 // trap_table + syscall * 20
609 #else
610 #error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)!
611 #endif
612
613 #ifndef NO_KDEBUG
614 LOAD_ADDR(r4, kdebug_enable)
615 ldr r4, [r4]
616 movs r4, r4
617 movne r0, r8 // ready the reg state pointer as an arg to the call
618 movne r1, r5 // syscall number as 2nd arg
619 COND_EXTERN_BLNE(mach_syscall_trace)
620 #endif
621 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
622 cmp r5, MACH_TRAP_TABLE_COUNT // check syscall number range
623 bge fleh_swi_mach_error
624
625 /*
626 * For arm32 ABI where 64-bit types are aligned to even registers and
627 * 64-bits on stack, we need to unpack registers differently. So
628 * we use the mungers for marshalling in arguments from user space.
629 * Currently this is just ARMv7k.
630 */
631 #if __BIGGEST_ALIGNMENT__ > 4
632 sub sp, #0x40 // allocate buffer and keep stack 128-bit aligned
633 // it should be big enough for all syscall arguments
634 ldr r11, [r6, #8] // get mach_trap_table[call_number].mach_trap_arg_munge32
635 teq r11, #0 // check if we have a munger
636 moveq r0, #0
637 movne r0, r8 // ready the reg state pointer as an arg to the call
638 movne r1, sp // stack will hold arguments buffer
639 blxne r11 // call munger to get arguments from userspace
640 adr lr, fleh_swi_exit // any calls from here on out will return to our exit path
641 teq r0, #0
642 bne fleh_swi_mach_error // exit if the munger returned non-zero status
643 #endif
644
645 ldr r1, [r6, #4] // load the syscall vector
646
647 LOAD_ADDR(r2, kern_invalid) // test to make sure the trap is not kern_invalid
648 teq r1, r2
649 beq fleh_swi_mach_error
650
651 #if __BIGGEST_ALIGNMENT__ > 4
652 mov r0, sp // argument buffer on stack
653 bx r1 // call the syscall handler
654 #else
655 mov r0, r8 // ready the reg state pointer as an arg to the call
656 bx r1 // call the syscall handler
657 #endif
658
659 fleh_swi_exit64:
660 str r1, [r8, #4] // top of 64-bit return
661 fleh_swi_exit:
662 str r0, [r8] // save the return value
663 #ifndef NO_KDEBUG
664 movs r4, r4
665 movne r1, r5
666 COND_EXTERN_BLNE(mach_syscall_trace_exit)
667 #endif
668 #if TRACE_SYSCALL
669 bl EXT(syscall_trace_exit)
670 #endif
671
672 mov r0, #1
673 bl EXT(throttle_lowpri_io) // throttle_lowpri_io(1);
674
675 bl EXT(thread_exception_return)
676 b .
677
678 fleh_swi_mach_error:
679 mov r0, #EXC_SYSCALL
680 sub r1, sp, #4
681 mov r2, #1
682 bl EXT(exception_triage)
683 b .
684
685 .align 5
686 fleh_swi_unix:
687 ldr r1, [r9, TH_UNIX_SYSCALLS]
688 mov r0, r8 // reg state structure is arg
689 add r1, r1, #1
690 str r1, [r9, TH_UNIX_SYSCALLS]
691 mov r1, r9 // current thread in arg1
692 ldr r2, [r9, TH_UTHREAD] // current uthread in arg2
693 ldr r3, [r10, TASK_BSD_INFO] // current proc in arg3
694 bl EXT(unix_syscall)
695 b .
696
697 fleh_swi_trap:
698 ldmia r8, {r0-r3}
699 cmp r3, #3
700 addls pc, pc, r3, LSL#2
701 b fleh_swi_trap_ret
702 b icache_invalidate_trap
703 b dcache_flush_trap
704 b thread_set_cthread_trap
705 b thread_get_cthread_trap
706
707 icache_invalidate_trap:
708 add r3, r0, r1
709 cmp r3, VM_MAX_ADDRESS
710 subhi r3, r3, #1<<MMU_CLINE
711 bhi cache_trap_error
712 adr r11, cache_trap_jmp
713 ldr r6, [r9, TH_RECOVER] // Save existing recovery routine
714 str r11, [r9, TH_RECOVER]
715 #if __ARM_USER_PROTECT__
716 ldr r5, [r9, ACT_UPTW_TTB] // Load thread ttb
717 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
718 ldr r5, [r9, ACT_ASID] // Load thread asid
719 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
720 dsb ish
721 isb
722 #endif
723 bl EXT(InvalidatePoU_IcacheRegion)
724 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
725 #if __ARM_USER_PROTECT__
726 ldr r4, [r9, ACT_KPTW_TTB] // Load kernel ttb
727 mcr p15, 0, r4, c2, c0, 0 // Set TTBR0
728 mov r4, #0 // Load kernel asid
729 mcr p15, 0, r4, c13, c0, 1 // Set CONTEXTIDR
730 isb
731 #endif
732 str r6, [r9, TH_RECOVER]
733 bl EXT(thread_exception_return)
734 b .
735
736 dcache_flush_trap:
737 add r3, r0, r1
738 cmp r3, VM_MAX_ADDRESS
739 subhi r3, r3, #1<<MMU_CLINE
740 bhi cache_trap_error
741 adr r11, cache_trap_jmp
742 ldr r4, [r9, TH_RECOVER] // Save existing recovery routine
743 str r11, [r9, TH_RECOVER]
744 #if __ARM_USER_PROTECT__
745 ldr r6, [r9, ACT_UPTW_TTB] // Load thread ttb
746 mcr p15, 0, r6, c2, c0, 0 // Set TTBR0
747 ldr r5, [r9, ACT_ASID] // Load thread asid
748 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
749 isb
750 #endif
751 bl EXT(flush_dcache_syscall)
752 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
753 #if __ARM_USER_PROTECT__
754 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
755 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
756 mov r5, #0 // Load kernel asid
757 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
758 isb
759 #endif
760 str r4, [r9, TH_RECOVER]
761 bl EXT(thread_exception_return)
762 b .
763
764 thread_set_cthread_trap:
765 bl EXT(thread_set_cthread_self)
766 bl EXT(thread_exception_return)
767 b .
768
769 thread_get_cthread_trap:
770 bl EXT(thread_get_cthread_self)
771 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
772 add r1, r9, ACT_PCBDATA // Get User PCB
773 str r0, [r1, SS_R0] // set return value
774 bl EXT(thread_exception_return)
775 b .
776
777 cache_trap_jmp:
778 #if __ARM_USER_PROTECT__
779 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
780 ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb
781 mcr p15, 0, r5, c2, c0, 0 // Set TTBR0
782 mov r5, #0 // Load kernel asid
783 mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR
784 isb
785 #endif
786 mrc p15, 0, r3, c6, c0 // Read Fault Address
787 cache_trap_error:
788 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
789 add r0, r9, ACT_PCBDATA // Get User PCB
790 ldr r1, [r0, SS_PC] // Save user mode pc register as pc
791 sub r1, r1, #4 // Backtrack current pc
792 str r1, [r0, SS_PC] // pc at cache assist swi
793 str r3, [r0, SS_VADDR] // Fault Address
794 mov r0, #EXC_BAD_ACCESS
795 mov r2, KERN_INVALID_ADDRESS
796 sub sp, sp, #8
797 mov r1, sp
798 str r2, [sp]
799 str r3, [sp, #4]
800 ALIGN_STACK r2, r3
801 mov r2, #2
802 bl EXT(exception_triage)
803 b .
804
805 fleh_swi_trap_mct:
806 bl EXT(mach_continuous_time)
807 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
808 add r9, r9, ACT_PCBDATA_R0 // Get User register state
809 stmia r9, {r0, r1} // set 64-bit return value
810 bl EXT(thread_exception_return)
811 b .
812
813 fleh_swi_trap_tb:
814 str lr, [sp, SS_PC]
815 bl EXT(ml_get_timebase) // ml_get_timebase() (64-bit return)
816 ldr lr, [sp, SS_PC]
817 nop
818 movs pc, lr // Return to user
819
820 .align 2
821 L_kernel_swi_panic_str:
822 .asciz "fleh_swi: took SWI from kernel mode\n"
823 .align 2
824
825 /*
826 * First Level Exception Handler for Prefetching Abort.
827 */
828 .text
829 .align 2
830 .globl EXT(fleh_prefabt)
831
832 LEXT(fleh_prefabt)
833 VERIFY_EXCEPTION_MODE PSR_ABT_MODE
834 sub lr, lr, #4
835
836 IF_USERMODE_EXCEPTION prefabt
837 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
838 add sp, sp, ACT_PCBDATA // Get User PCB
839
840 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
841 mov r7, #0 // Zero the frame pointer
842 nop
843 mov r0, sp // Store arm_saved_state pointer
844 // For argument
845 str lr, [sp, SS_PC] // Save user mode pc register as pc
846 mrc p15, 0, r1, c6, c0, 2 // Read IFAR
847 str r1, [sp, SS_VADDR] // and fault address of pcb
848
849 mrc p15, 0, r5, c5, c0, 1 // Read Fault Status
850 str r5, [sp, SS_STATUS] // Save fault status register to pcb
851
852 mrs r4, spsr
853 str r4, [sp, SS_CPSR] // Save user mode cpsr
854
855 cpsid i, #PSR_SVC_MODE
856 mrs r3, cpsr // Read cpsr
857 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
858 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
859 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
860
861 #if __ARM_VFP__
862 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
863 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
864 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
865 fmxr fpscr, r3 // And shove it into FPSCR
866 #endif
867 #if __ARM_USER_PROTECT__
868 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
869 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
870 mov r3, #0 // Load kernel asid
871 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
872 isb
873 #endif
874
875 mvn r0, #0
876 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
877
878 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
879 bl EXT(timer_state_event_user_to_kernel)
880 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
881 #endif
882
883 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
884 mov r1, T_PREFETCH_ABT // Pass abort type
885 bl EXT(sleh_abort) // Call second level handler
886 // Sleh will enable interrupt
887 b load_and_go_user
888
889 ELSE_IF_KERNELMODE_EXCEPTION prefabt
890 /*
891 * We have a kernel stack already, and I will use it to save contexts:
892 * ------------------
893 * | VFP saved state |
894 * |------------------|
895 * | ARM saved state |
896 * SP ------------------
897 *
898 * IRQ is disabled
899 */
900 sub sp, sp, EXC_CTX_SIZE
901 stmia sp, {r0-r12}
902 add r0, sp, EXC_CTX_SIZE
903
904 str r0, [sp, SS_SP] // Save supervisor mode sp
905 str lr, [sp, SS_LR] // Save supervisor mode lr
906
907 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
908
909 #if __ARM_VFP__
910 add r0, sp, SS_SIZE // Get vfp state pointer
911 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
912 add r0, VSS_ALIGN // Get the actual vfp save area
913 bl EXT(vfp_save) // Save the current VFP state to the stack
914 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
915 fmxr fpscr, r4 // And shove it into FPSCR
916 #endif
917 #if __ARM_USER_PROTECT__
918 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
919 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
920 cmp r3, r10
921 beq 1f
922 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
923 1:
924 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
925 mov r3, #0 // Load kernel asid
926 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
927 isb
928 #endif
929
930 mrs r4, lr_abt
931 str r4, [sp, SS_PC] // Save pc
932
933 mrc p15, 0, r5, c6, c0, 2 // Read IFAR
934 str r5, [sp, SS_VADDR] // and fault address of pcb
935 mrc p15, 0, r5, c5, c0, 1 // Read (instruction) Fault Status
936 str r5, [sp, SS_STATUS] // Save fault status register to pcb
937
938 mrs r4, spsr_abt
939 str r4, [sp, SS_CPSR]
940
941 mov r0, sp
942 ALIGN_STACK r1, r2
943 mov r1, T_PREFETCH_ABT // Pass abort type
944 bl EXT(sleh_abort) // Call second level handler
945 UNALIGN_STACK
946
947 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
948 #if __ARM_USER_PROTECT__
949 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
950 cmp r10, r0
951 beq 1f
952 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
953 cmp r10, r0
954 beq 1f
955 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
956 ldr r11, [r9, ACT_ASID] // Load thread asid
957 1:
958 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
959 isb
960 #endif
961
962 b load_and_go_sys
963
964
965 /*
966 * First Level Exception Handler for Data Abort
967 */
968 .text
969 .align 2
970 .globl EXT(fleh_dataabt)
971
972 LEXT(fleh_dataabt)
973 VERIFY_EXCEPTION_MODE PSR_ABT_MODE
974 sub lr, lr, #8
975 IF_USERMODE_EXCEPTION dataabt
976 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
977 add sp, sp, ACT_PCBDATA // Get User PCB
978
979 stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB
980 mov r7, #0 // Zero the frame pointer
981 nop
982
983 mov r0, sp // Store arm_saved_state pointer
984 // For argument
985
986 str lr, [sp, SS_PC] // Save user mode pc register
987
988 mrs r4, spsr
989 str r4, [sp, SS_CPSR] // Save user mode cpsr
990
991 mrc p15, 0, r5, c5, c0 // Read Fault Status
992 mrc p15, 0, r6, c6, c0 // Read Fault Address
993 str r5, [sp, SS_STATUS] // Save fault status register to pcb
994 str r6, [sp, SS_VADDR] // Save fault address to pcb
995
996 cpsid i, #PSR_SVC_MODE
997 mrs r3, cpsr // Read cpsr
998 msr spsr_cxsf, r3 // Set spsr(svc mode cpsr)
999 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1000 ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack
1001
1002 #if __ARM_VFP__
1003 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1004 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1005 mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value...
1006 fmxr fpscr, r3 // And shove it into FPSCR
1007 #endif
1008 #if __ARM_USER_PROTECT__
1009 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1010 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1011 mov r3, #0 // Load kernel asid
1012 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1013 isb
1014 #endif
1015
1016 mvn r0, #0
1017 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace
1018
1019 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1020 bl EXT(timer_state_event_user_to_kernel)
1021 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1022 #endif
1023
1024 add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer
1025 mov r1, T_DATA_ABT // Pass abort type
1026 bl EXT(sleh_abort) // Call second level handler
1027 // Sleh will enable irq
1028 b load_and_go_user
1029
1030 ELSE_IF_KERNELMODE_EXCEPTION dataabt
1031 /*
1032 * We have a kernel stack already, and I will use it to save contexts:
1033 * ------------------
1034 * | VFP saved state |
1035 * |------------------|
1036 * | ARM saved state |
1037 * SP ------------------
1038 *
1039 * IRQ is disabled
1040 */
1041 sub sp, sp, EXC_CTX_SIZE
1042 stmia sp, {r0-r12}
1043 add r0, sp, EXC_CTX_SIZE
1044
1045 str r0, [sp, SS_SP] // Save supervisor mode sp
1046 str lr, [sp, SS_LR] // Save supervisor mode lr
1047
1048 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1049
1050 #if __ARM_VFP__
1051 add r0, sp, SS_SIZE // Get vfp state pointer
1052 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1053 add r0, VSS_ALIGN // Get the actual vfp save area
1054 bl EXT(vfp_save) // Save the current VFP state to the stack
1055 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1056 fmxr fpscr, r4 // And shove it into FPSCR
1057 #endif
1058
1059 mrs r4, lr_abt
1060 str r4, [sp, SS_PC]
1061 mrs r4, spsr_abt
1062 str r4, [sp, SS_CPSR]
1063
1064 #if __ARM_USER_PROTECT__
1065 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1066 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1067 cmp r3, r10
1068 beq 1f
1069 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1070 1:
1071 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1072 mov r3, #0 // Load kernel asid
1073 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1074 isb
1075 #endif
1076 mrc p15, 0, r5, c5, c0 // Read Fault Status
1077 mrc p15, 0, r6, c6, c0 // Read Fault Address
1078 str r5, [sp, SS_STATUS] // Save fault status register to pcb
1079 str r6, [sp, SS_VADDR] // Save fault address to pcb
1080
1081 mov r0, sp // Argument
1082 ALIGN_STACK r1, r2
1083 mov r1, T_DATA_ABT // Pass abort type
1084 bl EXT(sleh_abort) // Call second level handler
1085 UNALIGN_STACK
1086
1087 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1088 #if __ARM_USER_PROTECT__
1089 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1090 cmp r10, r0
1091 beq 1f
1092 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1093 cmp r10, r0
1094 beq 1f
1095 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1096 ldr r11, [r9, ACT_ASID] // Load thread asid
1097 1:
1098 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1099 isb
1100 #endif
1101
1102 load_and_go_sys:
1103 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1104
1105 ldr r4, [sp, SS_CPSR] // Load saved cpsr
1106 tst r4, #PSR_IRQF // Test IRQ set
1107 bne lags1 // Branch if IRQ disabled
1108
1109 cpsid i // Disable IRQ
1110 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1111 movs r2, r2 // Test if null
1112 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1113 bne lags1 // Branch if count not null
1114 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1115 ands r5, r5, AST_URGENT // Get the requests we do honor
1116 beq lags1 // Branch if no ASTs
1117 #if __ARM_USER_PROTECT__
1118 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1119 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1120 cmp r3, r10
1121 beq 1f
1122 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1123 1:
1124 mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR
1125 mov r3, #0 // Load kernel asid
1126 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1127 isb
1128 #endif
1129 ldr lr, [sp, SS_LR] // Restore the link register
1130 stmfd sp!, {r7, lr} // Push a fake frame
1131
1132 ALIGN_STACK r2, r3
1133 bl EXT(ast_taken_kernel) // Handle AST_URGENT
1134 UNALIGN_STACK
1135
1136 ldmfd sp!, {r7, lr} // Pop the fake frame
1137 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1138 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1139 #if __ARM_USER_PROTECT__
1140 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1141 cmp r10, r0
1142 beq 1f
1143 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1144 cmp r10, r0
1145 beq 1f
1146 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1147 ldr r11, [r9, ACT_ASID] // Load thread asid
1148 1:
1149 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1150 isb
1151 #endif
1152 lags1:
1153 ldr lr, [sp, SS_LR]
1154
1155 mov ip, sp // Save pointer to contexts for abort mode
1156 ldr sp, [ip, SS_SP] // Restore stack pointer
1157
1158 cpsid if, #PSR_ABT_MODE
1159
1160 mov sp, ip
1161
1162 ldr r4, [sp, SS_CPSR]
1163 msr spsr_cxsf, r4 // Restore spsr
1164
1165 clrex // clear exclusive memory tag
1166 #if __ARM_ENABLE_WFE_
1167 sev
1168 #endif
1169
1170 #if __ARM_VFP__
1171 add r0, sp, SS_SIZE // Get vfp state pointer
1172 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1173 add r0, VSS_ALIGN // Get the actual vfp save area
1174 bl EXT(vfp_load) // Load the desired VFP state from the stack
1175 #endif
1176
1177 ldr lr, [sp, SS_PC] // Restore lr
1178
1179 ldmia sp, {r0-r12} // Restore other registers
1180
1181 movs pc, lr // Return to sys (svc, irq, fiq)
1182
1183 /*
1184 * First Level Exception Handler for address exception
1185 * Not supported
1186 */
1187 .text
1188 .align 2
1189 .globl EXT(fleh_addrexc)
1190
1191 LEXT(fleh_addrexc)
1192 b .
1193
1194
1195 /*
1196 * First Level Exception Handler for IRQ
1197 * Current mode : IRQ
1198 * IRQ and FIQ are always disabled while running in FIQ handler
1199 * We do not permit nested interrupt.
1200 *
1201 * Saving area: from user : PCB.
1202 * from kernel : interrupt stack.
1203 */
1204
1205 .text
1206 .align 2
1207 .globl EXT(fleh_irq)
1208
1209 LEXT(fleh_irq)
1210 sub lr, lr, #4
1211
1212 cpsie a // Re-enable async aborts
1213
1214 mrs sp, spsr
1215 tst sp, #0x0f // From user? or kernel?
1216 bne fleh_irq_kernel
1217
1218 fleh_irq_user:
1219 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1220 add sp, sp, ACT_PCBDATA // Get User PCB
1221 stmia sp, {r0-r12, sp, lr}^
1222 mov r7, #0 // Zero the frame pointer
1223 nop
1224 str lr, [sp, SS_PC]
1225 mrs r4, spsr
1226 str r4, [sp, SS_CPSR]
1227 mov r5, sp // Saved context in r5
1228 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1229 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1230 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1231 cpsid i, #PSR_SVC_MODE
1232 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1233 cpsid i, #PSR_IRQ_MODE
1234
1235 #if __ARM_VFP__
1236 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1237 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1238 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1239 fmxr fpscr, r4 // And shove it into FPSCR
1240 #endif
1241 #if __ARM_USER_PROTECT__
1242 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1243 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1244 mov r3, #0 // Load kernel asid
1245 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1246 isb
1247 #endif
1248 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1249 bl EXT(timer_state_event_user_to_kernel)
1250 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1251 #endif
1252 #if CONFIG_TELEMETRY
1253 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1254 mov r0, #1
1255 ldr r2, [r2]
1256 movs r2, r2
1257 beq 1f
1258 mov r1, #0 // (not a PMI record)
1259 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1260 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1261 1:
1262 #endif
1263
1264 b fleh_irq_handler
1265
1266 fleh_irq_kernel:
1267 cpsid i, #PSR_SVC_MODE
1268
1269 sub sp, sp, EXC_CTX_SIZE
1270 stmia sp, {r0-r12}
1271 add r0, sp, EXC_CTX_SIZE
1272
1273 str r0, [sp, SS_SP] // Save supervisor mode sp
1274 str lr, [sp, SS_LR] // Save supervisor mode lr
1275
1276 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1277
1278 #if __ARM_VFP__
1279 add r0, sp, SS_SIZE // Get vfp state pointer
1280 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1281 add r0, VSS_ALIGN // Get the actual vfp save area
1282 bl EXT(vfp_save) // Save the current VFP state to the stack
1283 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1284 fmxr fpscr, r4 // And shove it into FPSCR
1285 #endif
1286 #if __ARM_USER_PROTECT__
1287 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1288 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1289 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1290 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1291 mov r3, #0 // Load kernel asid
1292 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1293 isb
1294 #endif
1295 mov r5, sp // Saved context in r5
1296
1297 cpsid i, #PSR_IRQ_MODE
1298
1299 str lr, [r5, SS_PC] // Save LR as the return PC
1300 mrs r4, spsr
1301 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1302
1303 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1304 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1305
1306 #if CONFIG_TELEMETRY
1307 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1308 mov r0, #0
1309 ldr r2, [r2]
1310 movs r2, r2
1311 beq 1f
1312 mov r1, #0 // (not a PMI record)
1313 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1314 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1315 1:
1316 #endif
1317
1318 fleh_irq_handler:
1319 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1320 add r2, r2, #1 // Increment count
1321 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1322 #ifndef NO_KDEBUG
1323 LOAD_ADDR(r8, kdebug_enable)
1324 ldr r8, [r8]
1325 movs r8, r8
1326 movne r0, r5
1327 COND_EXTERN_BLNE(interrupt_trace)
1328 #endif
1329 bl EXT(interrupt_stats) // Record interrupt statistics
1330 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1331 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1332 str r5, [r4, CPU_INT_STATE] // Saved context in cpu_int_state
1333 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1334 add r3, r3, #1 // Increment count
1335 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1336 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1337 add r3, r3, #1 // Increment count
1338 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1339 ldr r0, [r4, INTERRUPT_TARGET]
1340 ldr r1, [r4, INTERRUPT_REFCON]
1341 ldr r2, [r4, INTERRUPT_NUB]
1342 ldr r3, [r4, INTERRUPT_SOURCE]
1343 ldr r5, [r4, INTERRUPT_HANDLER] // Call second level exception handler
1344 blx r5
1345 #ifndef NO_KDEBUG
1346 movs r8, r8
1347 COND_EXTERN_BLNE(interrupt_trace_exit)
1348 #endif
1349 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1350 bl EXT(ml_get_timebase) // get current timebase
1351 LOAD_ADDR(r3, EntropyData)
1352 ldr r2, [r3, ENTROPY_SAMPLE_COUNT]
1353 add r1, r2, 1
1354 str r1, [r3, ENTROPY_SAMPLE_COUNT]
1355 and r2, r2, ENTROPY_BUFFER_INDEX_MASK
1356 add r1, r3, ENTROPY_BUFFER
1357 ldr r4, [r1, r2, lsl #2]
1358 eor r0, r0, r4, ror #9
1359 str r0, [r1, r2, lsl #2] // Update gEntropie
1360
1361 return_from_irq:
1362 mov r5, #0
1363 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1364 str r5, [r4, CPU_INT_STATE] // Clear cpu_int_state
1365 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1366 #if MACH_ASSERT
1367 cmp r2, #0 // verify positive count
1368 bgt 1f
1369 push {r7, lr}
1370 mov r7, sp
1371 adr r0, L_preemption_count_zero_str
1372 blx EXT(panic)
1373 b .
1374 1:
1375 #endif
1376 sub r2, r2, #1 // Decrement count
1377 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1378
1379 mrs r0, spsr // For check the previous mode
1380
1381 cpsid i, #PSR_SVC_MODE
1382
1383 tst r0, #0x0f // Check if the previous is from user
1384 ldreq sp, [r9, TH_KSTACKPTR] // ...If so, reload the kernel stack pointer
1385 beq load_and_go_user // ...and return
1386
1387 #if __ARM_USER_PROTECT__
1388 ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb
1389 cmp r10, r0
1390 beq 1f
1391 ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb
1392 cmp r10, r0
1393 beq 1f
1394 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1395 ldr r11, [r9, ACT_ASID] // Load thread asid
1396 1:
1397 mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR
1398 isb
1399 #endif
1400 b load_and_go_sys
1401
1402 .align 2
1403 L_preemption_count_zero_str:
1404 .ascii "locore.s: preemption count is zero \000"
1405 .align 2
1406 /*
1407 * First Level Exception Handler for DEC
1408 * Current mode : IRQ
1409 * IRQ and FIQ are always disabled while running in FIQ handler
1410 * We do not permit nested interrupt.
1411 *
1412 * Saving area: from user : PCB.
1413 * from kernel : interrupt stack.
1414 */
1415
1416 .text
1417 .align 2
1418 .globl EXT(fleh_decirq)
1419
1420 LEXT(fleh_decirq)
1421 sub lr, lr, #4
1422
1423 cpsie af // Re-enable async aborts/FIQ
1424
1425 mrs sp, spsr
1426 tst sp, #0x0f // From user? or kernel?
1427 bne fleh_decirq_kernel
1428
1429 fleh_decirq_user:
1430 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1431 add sp, sp, ACT_PCBDATA // Get User PCB
1432 stmia sp, {r0-r12, sp, lr}^
1433 mov r7, #0 // Zero the frame pointer
1434 nop
1435 str lr, [sp, SS_PC]
1436 mrs r4, spsr
1437 str r4, [sp, SS_CPSR]
1438 mov r5, sp // Saved context in r5
1439 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1440 ldr r6, [r9, ACT_CPUDATAP] // Get current cpu
1441 ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack
1442 cpsid i, #PSR_SVC_MODE
1443 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1444 cpsid i, #PSR_IRQ_MODE
1445
1446 #if __ARM_VFP__
1447 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1448 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1449 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1450 fmxr fpscr, r4 // And shove it into FPSCR
1451 #endif
1452 #if __ARM_USER_PROTECT__
1453 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1454 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1455 mov r3, #0 // Load kernel asid
1456 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1457 isb
1458 #endif
1459 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1460 bl EXT(timer_state_event_user_to_kernel)
1461 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1462 #endif
1463 #if CONFIG_TELEMETRY
1464 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1465 mov r0, #1
1466 ldr r2, [r2]
1467 movs r2, r2
1468 beq 1f
1469 mov r1, #0 // (not a PMI record)
1470 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1471 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1472 1:
1473 #endif
1474
1475 b fleh_decirq_handler
1476
1477 fleh_decirq_kernel:
1478 cpsid i, #PSR_SVC_MODE
1479
1480 sub sp, sp, EXC_CTX_SIZE
1481 stmia sp, {r0-r12}
1482 add r0, sp, EXC_CTX_SIZE
1483
1484 str r0, [sp, SS_SP] // Save supervisor mode sp
1485 str lr, [sp, SS_LR] // Save supervisor mode lr
1486
1487 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1488
1489 #if __ARM_VFP__
1490 add r0, sp, SS_SIZE // Get vfp state pointer
1491 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1492 add r0, VSS_ALIGN // Get the actual vfp save area
1493 bl EXT(vfp_save) // Save the current VFP state to the stack
1494 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1495 fmxr fpscr, r4 // And shove it into FPSCR
1496 #endif
1497 #if __ARM_USER_PROTECT__
1498 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1499 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1500 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1501 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1502 mov r3, #0 // Load kernel asid
1503 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1504 isb
1505 #endif
1506 mov r5, sp // Saved context in r5
1507
1508 cpsid i, #PSR_IRQ_MODE
1509
1510 str lr, [r5, SS_PC] // Save LR as the return PC
1511 mrs r4, spsr
1512 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1513
1514 ldr sp, [r9, ACT_CPUDATAP] // Get current cpu
1515 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1516
1517 #if CONFIG_TELEMETRY
1518 LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested...
1519 mov r0, #0
1520 ldr r2, [r2]
1521 movs r2, r2
1522 beq 1f
1523 mov r1, #0 // (not a pmi record)
1524 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1525 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1526 1:
1527 #endif
1528
1529 fleh_decirq_handler:
1530 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1531 add r2, r2, #1 // Increment count
1532 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1533 ldr r2, [r9, ACT_CPUDATAP] // Get current cpu
1534 str r5, [r2, CPU_INT_STATE] // Saved context in cpu_int_state
1535 ldr r3, [r2, CPU_STAT_IRQ] // Get IRQ count
1536 add r3, r3, #1 // Increment count
1537 str r3, [r2, CPU_STAT_IRQ] // Update IRQ count
1538 ldr r3, [r2, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1539 add r3, r3, #1 // Increment count
1540 str r3, [r2, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1541 #ifndef NO_KDEBUG
1542 LOAD_ADDR(r4, kdebug_enable)
1543 ldr r4, [r4]
1544 movs r4, r4
1545 movne r0, r5 // Pass saved context
1546 COND_EXTERN_BLNE(interrupt_trace)
1547 #endif
1548 bl EXT(interrupt_stats) // Record interrupt statistics
1549 mov r0, #0
1550 bl EXT(rtclock_intr) // Call second level exception handler
1551 #ifndef NO_KDEBUG
1552 movs r4, r4
1553 COND_EXTERN_BLNE(interrupt_trace_exit)
1554 #endif
1555
1556 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1557
1558 b return_from_irq
1559
1560
1561 /*
1562 * First Level Exception Handler for FIQ
1563 * Current mode : FIQ
1564 * IRQ and FIQ are always disabled while running in FIQ handler
1565 * We do not permit nested interrupt.
1566 *
1567 * Saving area: from user : PCB.
1568 * from kernel : interrupt stack.
1569 *
1570 * We have 7 added shadow registers in FIQ mode for fast services.
1571 * So only we have to save is just 8 general registers and LR.
1572 * But if the current thread was running on user mode before the FIQ interrupt,
1573 * All user registers be saved for ast handler routine.
1574 */
1575 .text
1576 .align 2
1577 .globl EXT(fleh_fiq_generic)
1578
1579 LEXT(fleh_fiq_generic)
1580 str r11, [r10] // Clear the FIQ source
1581
1582 ldr r13, [r8, CPU_TIMEBASE_LOW] // Load TBL
1583 adds r13, r13, #1 // Increment TBL
1584 str r13, [r8, CPU_TIMEBASE_LOW] // Store TBL
1585 ldreq r13, [r8, CPU_TIMEBASE_HIGH] // Load TBU
1586 addeq r13, r13, #1 // Increment TBU
1587 streq r13, [r8, CPU_TIMEBASE_HIGH] // Store TBU
1588 subs r12, r12, #1 // Decrement, DEC
1589 str r12, [r8, CPU_DECREMENTER] // Store DEC
1590 subspl pc, lr, #4 // Return unless DEC < 0
1591 b EXT(fleh_dec)
1592
1593 .text
1594 .align 2
1595 .globl EXT(fleh_dec)
1596 LEXT(fleh_dec)
1597 mrs sp, spsr // Get the spsr
1598 sub lr, lr, #4
1599 tst sp, #0x0f // From user? or kernel?
1600 bne 2f
1601
1602 /* From user */
1603 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1604 add sp, sp, ACT_PCBDATA // Get User PCB
1605
1606 stmia sp, {r0-r12, sp, lr}^
1607 mov r7, #0 // Zero the frame pointer
1608 nop
1609 str lr, [sp, SS_PC]
1610
1611 mrs r4, spsr
1612 str r4, [sp, SS_CPSR]
1613 mov r5, sp
1614 sub sp, sp, ACT_PCBDATA // Get User PCB
1615 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1616 ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack
1617 mov r6, sp
1618 cpsid i, #PSR_SVC_MODE
1619 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1620 ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack
1621
1622 #if __ARM_VFP__
1623 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1624 bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP
1625 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1626 fmxr fpscr, r4 // And shove it into FPSCR
1627 #endif
1628 #if __ARM_USER_PROTECT__
1629 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1630 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1631 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1632 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1633 mov r3, #0 // Load kernel asid
1634 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1635 isb
1636 #endif
1637 mov r0, #1 // Mark this as coming from user context
1638 b 4f
1639
1640 2:
1641 /* From kernel */
1642 tst sp, #PSR_IRQF // Test for IRQ masked
1643 bne 3f // We're on the cpu_signal path
1644
1645 cpsid if, #PSR_SVC_MODE
1646
1647 sub sp, sp, EXC_CTX_SIZE
1648 stmia sp, {r0-r12}
1649 add r0, sp, EXC_CTX_SIZE
1650
1651 str r0, [sp, SS_SP] // Save supervisor mode sp
1652 str lr, [sp, SS_LR] // Save supervisor mode lr
1653
1654 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1655
1656 #if __ARM_VFP__
1657 add r0, sp, SS_SIZE // Get vfp state pointer
1658 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1659 add r0, VSS_ALIGN // Get the actual vfp save area
1660 bl EXT(vfp_save) // Save the current VFP state to the stack
1661 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1662 fmxr fpscr, r4 // And shove it into FPSCR
1663 #endif
1664 #if __ARM_USER_PROTECT__
1665 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1666 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1667 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1668 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1669 mov r3, #0 // Load kernel asid
1670 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1671 isb
1672 #endif
1673 mov r5, sp // Saved context in r5
1674
1675 cpsid if, #PSR_FIQ_MODE
1676
1677 mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW
1678
1679 str lr, [r5, SS_PC] // Save LR as the return PC
1680 mrs r4, spsr
1681 str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode
1682
1683 ldr r6, [r1, ACT_CPUDATAP] // Get current cpu
1684 ldr r6, [r6, CPU_ISTACKPTR] // Set interrupt stack
1685
1686 mov r0, #0 // Mark this as coming from kernel context
1687 b 4f
1688
1689 3:
1690 /* cpu_signal path */
1691 mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW
1692 ldr sp, [sp, ACT_CPUDATAP] // Get current cpu
1693 ldr sp, [sp, CPU_FIQSTACKPTR] // Set fiq stack
1694 sub sp, sp, EXC_CTX_SIZE
1695 stmia sp, {r0-r12}
1696 str lr, [sp, SS_PC]
1697 mrs r4, spsr
1698 str r4, [sp, SS_CPSR]
1699 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1700
1701 #if __ARM_VFP__
1702 add r0, sp, SS_SIZE // Get vfp state pointer
1703 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1704 add r0, VSS_ALIGN // Get the actual vfp save area
1705 bl EXT(vfp_save) // Save the current VFP state to the stack
1706 mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value...
1707 fmxr fpscr, r4 // And shove it into FPSCR
1708 #endif
1709 #if __ARM_USER_PROTECT__
1710 mrc p15, 0, r10, c2, c0, 0 // Get TTBR0
1711 ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb
1712 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1713 mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR
1714 mov r3, #0 // Load kernel asid
1715 mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR
1716 isb
1717 #endif
1718
1719 ALIGN_STACK r0, r1
1720 mov r0, r8 // Get current cpu in arg 0
1721 mov r1, SIGPdec // Decrementer signal in arg1
1722 mov r2, #0
1723 mov r3, #0
1724 bl EXT(cpu_signal) // Call cpu_signal
1725 UNALIGN_STACK
1726
1727 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1728
1729 #if __ARM_VFP__
1730 add r0, sp, SS_SIZE // Get vfp state pointer
1731 bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment
1732 add r0, VSS_ALIGN // Get the actual vfp save area
1733 bl EXT(vfp_load) // Load the desired VFP state from the stack
1734 #endif
1735
1736 clrex // clear exclusive memory tag
1737 #if __ARM_ENABLE_WFE_
1738 sev
1739 #endif
1740 #if __ARM_USER_PROTECT__
1741 mcr p15, 0, r10, c2, c0, 0 // Set TTBR0
1742 mcr p15, 0, r11, c13, c0, 1 // Set CONTEXTIDR
1743 isb
1744 #endif
1745 ldr lr, [sp, SS_PC]
1746 ldmia sp, {r0-r12} // Restore saved registers
1747 movs pc, lr // Return from fiq
1748
1749 4:
1750 cpsid i, #PSR_IRQ_MODE
1751 cpsie f
1752 mov sp, r6 // Restore the stack pointer
1753 ALIGN_STACK r2, r3
1754 msr spsr_cxsf, r4 // Restore the spsr
1755 ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count
1756 add r2, r2, #1 // Increment count
1757 str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count
1758 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu
1759 str r5, [r4, CPU_INT_STATE]
1760 ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count
1761 add r3, r3, #1 // Increment count
1762 str r3, [r4, CPU_STAT_IRQ] // Update IRQ count
1763 ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count
1764 add r3, r3, #1 // Increment count
1765 str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
1766 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1767 movs r0, r0
1768 beq 5f
1769 mov r8, r0 // Stash our "from_user" boolean value
1770 bl EXT(timer_state_event_user_to_kernel)
1771 mov r0, r8 // Restore our "from_user" value
1772 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1773 5:
1774 #endif
1775 #if CONFIG_TELEMETRY
1776 LOAD_ADDR(r4, telemetry_needs_record) // Check if a telemetry record was requested...
1777 ldr r4, [r4]
1778 movs r4, r4
1779 beq 6f
1780 mov r1, #0 // (not a PMI record)
1781 bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread...
1782 mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW
1783 6:
1784 #endif
1785
1786 #ifndef NO_KDEBUG
1787 LOAD_ADDR(r4, kdebug_enable)
1788 ldr r4, [r4]
1789 movs r4, r4
1790 ldrne r1, [r9, ACT_CPUDATAP] // Get current cpu
1791 ldrne r0, [r1, CPU_INT_STATE]
1792 COND_EXTERN_BLNE(interrupt_trace)
1793 #endif
1794 bl EXT(interrupt_stats) // Record interrupt statistics
1795 mov r0, #0
1796 bl EXT(rtclock_intr) // Call second level exception handler
1797 #ifndef NO_KDEBUG
1798 movs r4, r4
1799 COND_EXTERN_BLNE(interrupt_trace_exit)
1800 #endif
1801 UNALIGN_STACK
1802
1803 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1804
1805 b return_from_irq
1806
1807 /*
1808 * void thread_syscall_return(kern_return_t r0)
1809 *
1810 */
1811 .text
1812 .align 2
1813 .globl EXT(thread_syscall_return)
1814
1815 LEXT(thread_syscall_return)
1816 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1817 add r1, r9, ACT_PCBDATA // Get User PCB
1818 str r0, [r1, SS_R0] // set return value
1819 #ifndef NO_KDEBUG
1820 LOAD_ADDR(r4, kdebug_enable)
1821 ldr r4, [r4]
1822 movs r4, r4
1823 beq load_and_go_user
1824 ldr r12, [r1, SS_R12] // Load syscall number
1825 rsbs r1, r12, #0 // make the syscall positive (if negative)
1826 COND_EXTERN_BLGT(mach_syscall_trace_exit)
1827 #endif
1828 b load_and_go_user
1829
1830 /*
1831 * void thread_exception_return(void)
1832 * void thread_bootstrap_return(void)
1833 *
1834 */
1835 .text
1836 .globl EXT(thread_exception_return)
1837 .globl EXT(thread_bootstrap_return)
1838
1839 LEXT(thread_bootstrap_return)
1840 #if CONFIG_DTRACE
1841 bl EXT(dtrace_thread_bootstrap)
1842 #endif
1843 // Fall through
1844
1845 LEXT(thread_exception_return)
1846
1847 load_and_go_user:
1848 /*
1849 * Restore user mode states and go back to user mode
1850 */
1851 cpsid i // Disable irq
1852 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1853
1854 mvn r0, #0
1855 str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1856
1857 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu
1858 ldr r5, [r8, CPU_PENDING_AST] // Get ASTs
1859 cmp r5, #0 // Test if ASTs pending
1860 beq return_to_user_now // Branch if no ASTs
1861
1862 bl EXT(ast_taken_user) // Handle all ASTs (may continue via thread_exception_return)
1863
1864 mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW
1865 b load_and_go_user // Loop back
1866
1867 return_to_user_now:
1868
1869 #if MACH_ASSERT
1870 /*
1871 * Assert that the preemption level is zero prior to the return to user space
1872 */
1873 ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count
1874 movs r1, r1 // Test
1875 beq 0f // Continue if zero, or...
1876 adr r0, L_lagu_panic_str // Load the panic string...
1877 blx EXT(panic) // Finally, panic
1878 0:
1879 ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count
1880 movs r2, r2 // Test
1881 beq 0f // Continue if zero, or...
1882 adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string...
1883 mov r1, r9 // Thread argument for panic string
1884 blx EXT(panic) // Finally, panic
1885 #endif
1886
1887 0:
1888 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1889 bl EXT(timer_state_event_kernel_to_user)
1890 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1891 ldr r8, [r9, ACT_CPUDATAP] // Get current cpu data
1892 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1893 #if __ARM_DEBUG__ >= 6
1894 ldr r0, [r9, ACT_DEBUGDATA]
1895 ldr r6, [r8, CPU_USER_DEBUG]
1896 cmp r0, r6 // test if debug registers need to be changed
1897 beq 1f
1898 bl EXT(arm_debug_set) // argument is already in r0
1899 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
1900 1:
1901 #endif
1902 #if __ARM_VFP__
1903 add r0, r9, ACT_UVFP // Get the address of the user VFP save area
1904 bl EXT(vfp_load) // Load the desired VFP state from ACT_UVFP
1905 #endif
1906 add r0, r9, ACT_PCBDATA // Get User PCB
1907 ldr r4, [r0, SS_CPSR] // Get saved cpsr
1908 and r3, r4, #PSR_MODE_MASK // Extract current mode
1909 cmp r3, #PSR_USER_MODE // Check user mode
1910 movne r0, r3
1911 bne EXT(ExceptionVectorPanic)
1912
1913 msr spsr_cxsf, r4 // Restore spsr(user mode cpsr)
1914 mov sp, r0 // Get User PCB
1915
1916 clrex // clear exclusive memory tag
1917 #if __ARM_ENABLE_WFE_
1918 sev
1919 #endif
1920 #if __ARM_USER_PROTECT__
1921 ldr r3, [r9, ACT_UPTW_TTB] // Load thread ttb
1922 mcr p15, 0, r3, c2, c0, 0 // Set TTBR0
1923 ldr r2, [r9, ACT_ASID] // Load thread asid
1924 mcr p15, 0, r2, c13, c0, 1
1925 isb
1926 #endif
1927 ldr lr, [sp, SS_PC] // Restore user mode pc
1928 ldmia sp, {r0-r12, sp, lr}^ // Restore the other user mode registers
1929 nop // Hardware problem
1930 movs pc, lr // Return to user
1931
1932 .align 2
1933 L_lagu_panic_str:
1934 .asciz "load_and_go_user: preemption_level %d"
1935 .align 2
1936
1937 .align 2
1938 L_lagu_rwlock_cnt_panic_str:
1939 .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)"
1940 .align 2
1941
1942 .align 2
1943 L_evimpanic_str:
1944 .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000"
1945 .align 2
1946
1947 .text
1948 .align 2
1949 .globl EXT(ExceptionVectorPanic)
1950
1951 LEXT(ExceptionVectorPanic)
1952 cpsid i, #PSR_SVC_MODE
1953 ALIGN_STACK r1, r2
1954 mov r1, r0
1955 adr r0, L_evimpanic_str
1956 blx EXT(panic)
1957 b .
1958
1959 #include "globals_asm.h"
1960
1961 LOAD_ADDR_GEN_DEF(mach_trap_table)
1962 LOAD_ADDR_GEN_DEF(kern_invalid)
1963
1964 /* vim: set ts=4: */