]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <machine/asm.h> | |
58 | #include <arm/proc_reg.h> | |
59 | #include <pexpert/arm/board_config.h> | |
60 | #include <mach/exception_types.h> | |
61 | #include <mach_kdp.h> | |
62 | #include <mach_assert.h> | |
63 | #include <config_dtrace.h> | |
64 | #include "assym.s" | |
65 | ||
66 | #define TRACE_SYSCALL 0 | |
67 | ||
68 | /* | |
69 | * Copied to low physical memory in arm_init, | |
70 | * so the kernel must be linked virtually at | |
71 | * 0xc0001000 or higher to leave space for it. | |
72 | */ | |
73 | .syntax unified | |
74 | .text | |
75 | .align 12 | |
76 | .globl EXT(ExceptionLowVectorsBase) | |
77 | ||
78 | LEXT(ExceptionLowVectorsBase) | |
79 | adr pc, Lreset_low_vector | |
80 | b . // Undef | |
81 | b . // SWI | |
82 | b . // Prefetch Abort | |
83 | b . // Data Abort | |
84 | b . // Address Exception | |
85 | b . // IRQ | |
86 | b . // FIQ/DEC | |
87 | LEXT(ResetPrivateData) | |
88 | .space (480),0 // (filled with 0s) | |
89 | // ExceptionLowVectorsBase + 0x200 | |
90 | Lreset_low_vector: | |
91 | adr r4, EXT(ResetHandlerData) | |
92 | ldr r0, [r4, ASSIST_RESET_HANDLER] | |
93 | movs r0, r0 | |
94 | blxne r0 | |
95 | adr r4, EXT(ResetHandlerData) | |
96 | ldr r1, [r4, CPU_DATA_ENTRIES] | |
97 | ldr r1, [r1, CPU_DATA_PADDR] | |
98 | ldr r5, [r1, CPU_RESET_ASSIST] | |
99 | movs r5, r5 | |
100 | blxne r5 | |
101 | adr r4, EXT(ResetHandlerData) | |
102 | ldr r0, [r4, BOOT_ARGS] | |
103 | ldr r1, [r4, CPU_DATA_ENTRIES] | |
104 | #if __ARM_SMP__ | |
105 | #if defined(ARMA7) | |
106 | // physical cpu number is stored in MPIDR Affinity level 0 | |
107 | mrc p15, 0, r6, c0, c0, 5 // Read MPIDR | |
108 | and r6, r6, #0xFF // Extract Affinity level 0 | |
109 | #else | |
110 | #error missing Who Am I implementation | |
111 | #endif | |
112 | #else | |
113 | mov r6, #0 | |
114 | #endif /* __ARM_SMP__ */ | |
115 | // physical cpu number matches cpu number | |
116 | //#if cdeSize != 16 | |
117 | //#error cpu_data_entry is not 16bytes in size | |
118 | //#endif | |
119 | lsl r6, r6, #4 // Get CpuDataEntry offset | |
120 | add r1, r1, r6 // Get cpu_data_entry pointer | |
121 | ldr r1, [r1, CPU_DATA_PADDR] | |
122 | ldr r5, [r1, CPU_RESET_HANDLER] | |
123 | movs r5, r5 | |
124 | blxne r5 // Branch to cpu reset handler | |
125 | b . // Unexpected reset | |
126 | .globl EXT(ResetHandlerData) | |
127 | LEXT(ResetHandlerData) | |
128 | .space (rhdSize_NUM),0 // (filled with 0s) | |
129 | ||
130 | ||
131 | .globl EXT(ExceptionLowVectorsEnd) | |
132 | LEXT(ExceptionLowVectorsEnd) | |
133 | ||
134 | .text | |
135 | .align 12 | |
136 | .globl EXT(ExceptionVectorsBase) | |
137 | ||
138 | LEXT(ExceptionVectorsBase) | |
139 | ||
140 | adr pc, Lexc_reset_vector | |
141 | adr pc, Lexc_undefined_inst_vector | |
142 | adr pc, Lexc_swi_vector | |
143 | adr pc, Lexc_prefetch_abort_vector | |
144 | adr pc, Lexc_data_abort_vector | |
145 | adr pc, Lexc_address_exception_vector | |
146 | adr pc, Lexc_irq_vector | |
147 | #if __ARM_TIME__ | |
148 | adr pc, Lexc_decirq_vector | |
149 | #else /* ! __ARM_TIME__ */ | |
150 | mov pc, r9 | |
151 | #endif /* __ARM_TIME__ */ | |
152 | ||
153 | Lexc_reset_vector: | |
154 | b . | |
155 | .long 0x0 | |
156 | .long 0x0 | |
157 | .long 0x0 | |
158 | Lexc_undefined_inst_vector: | |
159 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
160 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data | |
161 | ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table | |
162 | ldr pc, [sp, #4] // Branch to exception handler | |
163 | Lexc_swi_vector: | |
164 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
165 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data | |
166 | ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table | |
167 | ldr pc, [sp, #8] // Branch to exception handler | |
168 | Lexc_prefetch_abort_vector: | |
169 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
170 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data | |
171 | ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table | |
172 | ldr pc, [sp, #0xC] // Branch to exception handler | |
173 | Lexc_data_abort_vector: | |
174 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
175 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data | |
176 | ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table | |
177 | ldr pc, [sp, #0x10] // Branch to exception handler | |
178 | Lexc_address_exception_vector: | |
179 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
180 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data | |
181 | ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table | |
182 | ldr pc, [sp, #0x14] // Branch to exception handler | |
183 | Lexc_irq_vector: | |
184 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
185 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data | |
186 | ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table | |
187 | ldr pc, [sp, #0x18] // Branch to exception handler | |
188 | #if __ARM_TIME__ | |
189 | Lexc_decirq_vector: | |
190 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
191 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu data | |
192 | ldr sp, [sp, CPU_EXC_VECTORS] // Get exception vector table | |
193 | ldr pc, [sp, #0x1C] // Branch to exception handler | |
194 | #else /* ! __ARM_TIME__ */ | |
195 | .long 0x0 | |
196 | .long 0x0 | |
197 | .long 0x0 | |
198 | .long 0x0 | |
199 | #endif /* __ARM_TIME__ */ | |
200 | ||
201 | .fill 984, 4, 0 // Push to the 4KB page boundary | |
202 | ||
203 | .globl EXT(ExceptionVectorsEnd) | |
204 | LEXT(ExceptionVectorsEnd) | |
205 | ||
206 | ||
207 | /* | |
208 | * Targets for the exception vectors; we patch these during boot (to allow | |
209 | * for position independent code without complicating the vectors; see start.s). | |
210 | */ | |
211 | .globl EXT(ExceptionVectorsTable) | |
212 | LEXT(ExceptionVectorsTable) | |
213 | Lreset_vector: | |
214 | .long 0x0 | |
215 | Lundefined_inst_vector: | |
216 | .long 0x0 | |
217 | Lswi_vector: | |
218 | .long 0x0 | |
219 | Lprefetch_abort_vector: | |
220 | .long 0x0 | |
221 | Ldata_abort_vector: | |
222 | .long 0x0 | |
223 | Laddress_exception_vector: | |
224 | .long 0x0 | |
225 | Lirq_vector: | |
226 | .long 0x0 | |
227 | Ldecirq_vector: | |
228 | .long 0x0 | |
229 | ||
230 | ||
231 | /* | |
232 | * First Level Exception Handlers | |
233 | */ | |
234 | .text | |
235 | .align 2 | |
236 | .globl EXT(fleh_reset) | |
237 | LEXT(fleh_reset) | |
238 | b . // Never return | |
239 | ||
240 | /* | |
241 | * First Level Exception Handler for Undefined Instruction. | |
242 | */ | |
243 | .text | |
244 | .align 2 | |
245 | .globl EXT(fleh_undef) | |
246 | ||
247 | LEXT(fleh_undef) | |
248 | mrs sp, spsr // Check the previous mode | |
249 | tst sp, #PSR_TF // Is it Thumb? | |
250 | subeq lr, lr, #4 | |
251 | subne lr, lr, #2 | |
252 | tst sp, #0x0f // Is it from user? | |
253 | bne undef_from_kernel | |
254 | ||
255 | undef_from_user: | |
256 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
257 | add sp, sp, ACT_PCBDATA // Get current thread PCB pointer | |
258 | ||
259 | stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB | |
260 | mov r7, #0 // Zero the frame pointer | |
261 | nop | |
262 | ||
263 | mov r0, sp // Store arm_saved_state pointer | |
264 | // for argument | |
265 | ||
266 | str lr, [sp, SS_PC] // Save user mode pc register | |
267 | ||
268 | mrs r4, spsr | |
269 | str r4, [sp, SS_CPSR] // Save user mode cpsr | |
270 | ||
271 | mrs r4, cpsr // Read cpsr | |
272 | cpsid i, #PSR_SVC_MODE | |
273 | mrs r3, cpsr // Read cpsr | |
274 | msr spsr_cxsf, r3 // Set spsr(svc mode cpsr) | |
275 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
276 | ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack | |
277 | #if __ARM_USER_PROTECT__ | |
278 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
279 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
280 | mov r3, #0 // Load kernel asid | |
281 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
282 | isb | |
283 | #endif | |
284 | and r0, r4, #PSR_MODE_MASK // Extract current mode | |
285 | cmp r0, #PSR_UND_MODE // Check undef mode | |
286 | bne EXT(ExceptionVectorPanic) | |
287 | ||
288 | mvn r0, #0 | |
289 | str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace | |
290 | ||
291 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
292 | bl EXT(timer_state_event_user_to_kernel) | |
293 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
294 | #endif | |
295 | ||
296 | #if __ARM_VFP__ | |
297 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
298 | bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP | |
299 | mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
300 | fmxr fpscr, r3 // And shove it into FPSCR | |
301 | add r1, r9, ACT_UVFP // Reload the pointer to the save state | |
302 | add r0, r9, ACT_PCBDATA // Reload the VFP save state argument | |
303 | #else | |
304 | mov r1, #0 // Clear the VFP save state argument | |
305 | add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer | |
306 | #endif | |
307 | ||
308 | bl EXT(sleh_undef) // Call second level handler | |
309 | // sleh will enable interrupt | |
310 | b load_and_go_user | |
311 | ||
312 | undef_from_kernel: | |
313 | mrs sp, cpsr // Read cpsr | |
314 | and sp, sp, #PSR_MODE_MASK // Extract current mode | |
315 | cmp sp, #PSR_UND_MODE // Check undef mode | |
316 | movne r0, sp | |
317 | bne EXT(ExceptionVectorPanic) | |
318 | mrs sp, spsr // Check the previous mode | |
319 | ||
320 | /* | |
321 | * We have a kernel stack already, and I will use it to save contexts | |
322 | * IRQ is disabled | |
323 | */ | |
324 | ||
325 | #if CONFIG_DTRACE | |
326 | /* | |
327 | * See if we came here from IRQ or SVC mode, and go back to that mode | |
328 | */ | |
329 | ||
330 | and sp, sp, #PSR_MODE_MASK | |
331 | cmp sp, #PSR_IRQ_MODE | |
332 | bne undef_from_kernel_svc | |
333 | ||
334 | cpsid i, #PSR_IRQ_MODE | |
335 | b handle_undef | |
336 | #endif | |
337 | ||
338 | undef_from_kernel_svc: | |
339 | cpsid i, #PSR_SVC_MODE | |
340 | ||
341 | handle_undef: | |
342 | #if CONFIG_DTRACE | |
343 | // We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception | |
344 | // took place. We'll store that later after we switch to undef mode and pull out the LR from there. | |
345 | ||
346 | // This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require | |
347 | // changes in fbt_invop also. | |
348 | stmfd sp!, { r7, lr } | |
349 | #endif | |
350 | ||
351 | sub sp, sp, EXC_CTX_SIZE // Reserve for arm_saved_state | |
352 | ||
353 | stmia sp, {r0-r12} // Save on supervisor mode stack | |
354 | str lr, [sp, SS_LR] | |
355 | ||
356 | #if CONFIG_DTRACE | |
357 | add r7, sp, EXC_CTX_SIZE // Save frame pointer | |
358 | #endif | |
359 | ||
360 | mov ip, sp // Stack transfer | |
361 | ||
362 | cpsid i, #PSR_UND_MODE | |
363 | ||
364 | str lr, [ip, SS_PC] // Save complete | |
365 | mrs r4, spsr | |
366 | str r4, [ip, SS_CPSR] | |
367 | ||
368 | #if CONFIG_DTRACE | |
369 | /* | |
370 | * Go back to previous mode for mode specific regs | |
371 | */ | |
372 | and r4, r4, #PSR_MODE_MASK | |
373 | cmp r4, #PSR_IRQ_MODE | |
374 | bne handle_undef_from_svc | |
375 | ||
376 | cpsid i, #PSR_IRQ_MODE | |
377 | b handle_undef2 | |
378 | #endif | |
379 | ||
380 | handle_undef_from_svc: | |
381 | cpsid i, #PSR_SVC_MODE | |
382 | ||
383 | handle_undef2: | |
384 | ||
385 | /* | |
386 | sp - stack pointer | |
387 | ip - stack pointer | |
388 | r7 - frame pointer state | |
389 | */ | |
390 | ||
391 | ||
392 | #if CONFIG_DTRACE | |
393 | ldr r0, [ip, SS_PC] // Get the exception pc to store later | |
394 | #endif | |
395 | ||
396 | add ip, ip, EXC_CTX_SIZE // Send stack pointer to debugger | |
397 | #if CONFIG_DTRACE | |
398 | str r0, [ip, #4] | |
399 | add ip, ip, #8 | |
400 | #endif | |
401 | str ip, [sp, SS_SP] // for accessing local variable | |
402 | #if CONFIG_DTRACE | |
403 | sub ip, ip, #8 | |
404 | #endif | |
405 | sub ip, ip, EXC_CTX_SIZE | |
406 | ||
407 | #if __ARM_VFP__ | |
408 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
409 | add r0, sp, SS_SIZE // Get vfp state pointer | |
410 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
411 | add r0, VSS_ALIGN // Get the actual vfp save area | |
412 | mov r5, r0 // Stash the save area in another register | |
413 | bl EXT(vfp_save) // Save the current VFP state to the stack | |
414 | mov r1, r5 // Load the VFP save area argument | |
415 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
416 | fmxr fpscr, r4 // And shove it into FPSCR | |
417 | #else | |
418 | mov r1, #0 // Clear the facility context argument | |
419 | #endif | |
420 | #if __ARM_USER_PROTECT__ | |
421 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
422 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
423 | cmp r3, r10 | |
424 | beq 1f | |
425 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
426 | 1: | |
427 | mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR | |
428 | mov r3, #0 // Load kernel asid | |
429 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
430 | isb | |
431 | #endif | |
432 | mov r0, sp // Argument | |
433 | ||
434 | /* | |
435 | * For armv7k ABI, the stack needs to be 16-byte aligned | |
436 | */ | |
437 | #if __BIGGEST_ALIGNMENT__ > 4 | |
438 | and r1, sp, #0x0F // sp mod 16-bytes | |
439 | cmp r1, #4 // need space for the sp on the stack | |
440 | addlt r1, r1, #0x10 // make room if needed, but keep stack aligned | |
441 | mov r2, sp // get current sp | |
442 | sub sp, sp, r1 // align stack | |
443 | str r2, [sp] // store previous sp on stack | |
444 | #endif | |
445 | ||
446 | bl EXT(sleh_undef) // Call second level handler | |
447 | ||
448 | #if __BIGGEST_ALIGNMENT__ > 4 | |
449 | ldr sp, [sp] // restore stack | |
450 | #endif | |
451 | ||
452 | #if __ARM_USER_PROTECT__ | |
453 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
454 | ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
455 | cmp r10, r0 | |
456 | beq 1f | |
457 | ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb | |
458 | cmp r10, r0 | |
459 | beq 1f | |
460 | mcr p15, 0, r10, c2, c0, 0 // Set TTBR0 | |
461 | ldr r11, [r9, ACT_ASID] // Load thread asid | |
462 | 1: | |
463 | mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR | |
464 | isb | |
465 | #endif | |
466 | b load_and_go_sys | |
467 | ||
468 | ||
469 | /* | |
470 | * First Level Exception Handler for Software Interrupt | |
471 | * | |
472 | * We assert that only user level can use the "SWI" instruction for a system | |
473 | * call on development kernels, and assume it's true on release. | |
474 | * | |
475 | * System call number is stored in r12. | |
476 | * System call arguments are stored in r0 to r6 and r8 (we skip r7) | |
477 | * | |
478 | */ | |
479 | .text | |
480 | .align 5 | |
481 | .globl EXT(fleh_swi) | |
482 | ||
483 | LEXT(fleh_swi) | |
484 | cpsid i, #PSR_ABT_MODE | |
485 | mov sp, ip // Save ip | |
486 | cpsid i, #PSR_SVC_MODE | |
487 | mrs ip, spsr // Check the previous mode | |
488 | tst ip, #0x0f | |
489 | cpsid i, #PSR_ABT_MODE | |
490 | mov ip, sp // Restore ip | |
491 | cpsid i, #PSR_SVC_MODE | |
492 | beq swi_from_user | |
493 | ||
494 | /* Only user mode can use SWI. Panic if the kernel tries. */ | |
495 | swi_from_kernel: | |
496 | sub sp, sp, EXC_CTX_SIZE | |
497 | stmia sp, {r0-r12} | |
498 | add r0, sp, EXC_CTX_SIZE | |
499 | ||
500 | str r0, [sp, SS_SP] // Save supervisor mode sp | |
501 | str lr, [sp, SS_LR] // Save supervisor mode lr | |
502 | ||
503 | adr r0, L_kernel_swi_panic_str // Load panic messages and panic() | |
504 | blx EXT(panic) | |
505 | b . | |
506 | ||
507 | swi_from_user: | |
508 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
509 | add sp, sp, ACT_PCBDATA // Get User PCB | |
510 | ||
511 | ||
512 | /* Check for special mach_absolute_time trap value. | |
513 | * This is intended to be a super-lightweight call to ml_get_timebase(), which | |
514 | * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */ | |
515 | cmp r12, #-3 | |
516 | beq fleh_swi_trap_tb | |
517 | stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB | |
518 | mov r7, #0 // Zero the frame pointer | |
519 | nop | |
520 | mov r8, sp // Store arm_saved_state pointer | |
521 | add sp, sp, SS_PC | |
522 | srsia sp, #PSR_SVC_MODE | |
523 | mrs r3, cpsr // Read cpsr | |
524 | msr spsr_cxsf, r3 // Set spsr(svc mode cpsr) | |
525 | sub r9, sp, ACT_PCBDATA_PC | |
526 | ||
527 | ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack | |
528 | mov r11, r12 // save the syscall vector in a nontrashed register | |
529 | ||
530 | #if __ARM_VFP__ | |
531 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
532 | bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP | |
533 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
534 | fmxr fpscr, r4 // And shove it into FPSCR | |
535 | #endif | |
536 | #if __ARM_USER_PROTECT__ | |
537 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
538 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
539 | mov r3, #0 // Load kernel asid | |
540 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
541 | isb | |
542 | #endif | |
543 | ||
544 | mvn r0, #0 | |
545 | str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling SWI from userspace | |
546 | ||
547 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
548 | bl EXT(timer_state_event_user_to_kernel) | |
549 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
550 | add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer | |
551 | #endif | |
552 | ldr r10, [r9, ACT_TASK] // Load the current task | |
553 | ||
554 | /* enable interrupts */ | |
555 | cpsie i // Enable IRQ | |
556 | ||
557 | cmp r11, #-4 // Special value for mach_continuous_time | |
558 | beq fleh_swi_trap_mct | |
559 | ||
560 | cmp r11, #0x80000000 | |
561 | beq fleh_swi_trap | |
562 | fleh_swi_trap_ret: | |
563 | ||
564 | #if TRACE_SYSCALL | |
565 | /* trace the syscall */ | |
566 | mov r0, r8 | |
567 | bl EXT(syscall_trace) | |
568 | #endif | |
569 | ||
570 | bl EXT(mach_kauth_cred_uthread_update) | |
571 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
572 | /* unix syscall? */ | |
573 | rsbs r5, r11, #0 // make the syscall positive (if negative) | |
574 | ble fleh_swi_unix // positive syscalls are unix (note reverse logic here) | |
575 | ||
576 | fleh_swi_mach: | |
577 | /* note that mach_syscall_trace can modify r9, so increment the thread | |
578 | * syscall count before the call : */ | |
579 | ldr r2, [r9, TH_MACH_SYSCALLS] | |
580 | add r2, r2, #1 | |
581 | str r2, [r9, TH_MACH_SYSCALLS] | |
582 | ||
583 | LOAD_ADDR(r1, mach_trap_table) // load mach_trap_table | |
584 | #if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12 | |
585 | add r11, r5, r5, lsl #1 // syscall * 3 | |
586 | add r6, r1, r11, lsl #2 // trap_table + syscall * 12 | |
587 | #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16 | |
588 | add r6, r1, r5, lsl #4 // trap_table + syscall * 16 | |
589 | #elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20 | |
590 | add r11, r5, r5, lsl #2 // syscall * 5 | |
591 | add r6, r1, r11, lsl #2 // trap_table + syscall * 20 | |
592 | #else | |
593 | #error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)! | |
594 | #endif | |
595 | ||
596 | #ifndef NO_KDEBUG | |
597 | LOAD_ADDR(r4, kdebug_enable) | |
598 | ldr r4, [r4] | |
599 | movs r4, r4 | |
600 | movne r0, r8 // ready the reg state pointer as an arg to the call | |
601 | movne r1, r5 // syscall number as 2nd arg | |
602 | COND_EXTERN_BLNE(mach_syscall_trace) | |
603 | #endif | |
604 | adr lr, fleh_swi_exit // any calls from here on out will return to our exit path | |
605 | cmp r5, MACH_TRAP_TABLE_COUNT // check syscall number range | |
606 | bge fleh_swi_mach_error | |
607 | ||
608 | /* | |
609 | * For arm32 ABI where 64-bit types are aligned to even registers and | |
610 | * 64-bits on stack, we need to unpack registers differently. So | |
611 | * we use the mungers for marshalling in arguments from user space. | |
612 | * Currently this is just ARMv7k. | |
613 | */ | |
614 | #if __BIGGEST_ALIGNMENT__ > 4 | |
615 | sub sp, #0x40 // allocate buffer and keep stack 128-bit aligned | |
616 | // it should be big enough for all syscall arguments | |
617 | ldr r11, [r6, #8] // get mach_trap_table[call_number].mach_trap_arg_munge32 | |
618 | teq r11, #0 // check if we have a munger | |
619 | moveq r0, #0 | |
620 | movne r0, r8 // ready the reg state pointer as an arg to the call | |
621 | movne r1, sp // stack will hold arguments buffer | |
622 | blxne r11 // call munger to get arguments from userspace | |
623 | adr lr, fleh_swi_exit // any calls from here on out will return to our exit path | |
624 | teq r0, #0 | |
625 | bne fleh_swi_mach_error // exit if the munger returned non-zero status | |
626 | #endif | |
627 | ||
628 | ldr r1, [r6, #4] // load the syscall vector | |
629 | ||
630 | LOAD_ADDR(r2, kern_invalid) // test to make sure the trap is not kern_invalid | |
631 | teq r1, r2 | |
632 | beq fleh_swi_mach_error | |
633 | ||
634 | #if __BIGGEST_ALIGNMENT__ > 4 | |
635 | mov r0, sp // argument buffer on stack | |
636 | bx r1 // call the syscall handler | |
637 | #else | |
638 | mov r0, r8 // ready the reg state pointer as an arg to the call | |
639 | bx r1 // call the syscall handler | |
640 | #endif | |
641 | ||
642 | fleh_swi_exit64: | |
643 | str r1, [r8, #4] // top of 64-bit return | |
644 | fleh_swi_exit: | |
645 | str r0, [r8] // save the return value | |
646 | #ifndef NO_KDEBUG | |
647 | movs r4, r4 | |
648 | movne r1, r5 | |
649 | COND_EXTERN_BLNE(mach_syscall_trace_exit) | |
650 | #endif | |
651 | #if TRACE_SYSCALL | |
652 | bl EXT(syscall_trace_exit) | |
653 | #endif | |
654 | ||
655 | mov r0, #1 | |
656 | bl EXT(throttle_lowpri_io) // throttle_lowpri_io(1); | |
657 | ||
658 | bl EXT(thread_exception_return) | |
659 | b . | |
660 | ||
661 | fleh_swi_mach_error: | |
662 | mov r0, #EXC_SYSCALL | |
663 | sub r1, sp, #4 | |
664 | mov r2, #1 | |
665 | bl EXT(exception_triage) | |
666 | b . | |
667 | ||
668 | .align 5 | |
669 | fleh_swi_unix: | |
670 | ldr r1, [r9, TH_UNIX_SYSCALLS] | |
671 | mov r0, r8 // reg state structure is arg | |
672 | add r1, r1, #1 | |
673 | str r1, [r9, TH_UNIX_SYSCALLS] | |
674 | mov r1, r9 // current thread in arg1 | |
675 | ldr r2, [r9, TH_UTHREAD] // current uthread in arg2 | |
676 | ldr r3, [r10, TASK_BSD_INFO] // current proc in arg3 | |
677 | bl EXT(unix_syscall) | |
678 | b . | |
679 | ||
680 | fleh_swi_trap: | |
681 | ldmia r8, {r0-r3} | |
682 | cmp r3, #3 | |
683 | addls pc, pc, r3, LSL#2 | |
684 | b fleh_swi_trap_ret | |
685 | b icache_invalidate_trap | |
686 | b dcache_flush_trap | |
687 | b thread_set_cthread_trap | |
688 | b thread_get_cthread_trap | |
689 | ||
690 | icache_invalidate_trap: | |
691 | add r3, r0, r1 | |
692 | cmp r3, VM_MAX_ADDRESS | |
693 | subhi r3, r3, #1<<MMU_CLINE | |
694 | bhi cache_trap_error | |
695 | adr r11, cache_trap_jmp | |
696 | ldr r6, [r9, TH_RECOVER] // Save existing recovery routine | |
697 | str r11, [r9, TH_RECOVER] | |
698 | #if __ARM_USER_PROTECT__ | |
699 | ldr r5, [r9, ACT_UPTW_TTB] // Load thread ttb | |
700 | mcr p15, 0, r5, c2, c0, 0 // Set TTBR0 | |
701 | ldr r5, [r9, ACT_ASID] // Load thread asid | |
702 | mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR | |
703 | dsb ish | |
704 | isb | |
705 | #endif | |
706 | mov r4, r0 | |
707 | mov r5, r1 | |
708 | bl EXT(CleanPoU_DcacheRegion) | |
709 | mov r0, r4 | |
710 | mov r1, r5 | |
711 | bl EXT(InvalidatePoU_IcacheRegion) | |
712 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
713 | #if __ARM_USER_PROTECT__ | |
714 | ldr r4, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
715 | mcr p15, 0, r4, c2, c0, 0 // Set TTBR0 | |
716 | mov r4, #0 // Load kernel asid | |
717 | mcr p15, 0, r4, c13, c0, 1 // Set CONTEXTIDR | |
718 | isb | |
719 | #endif | |
720 | str r6, [r9, TH_RECOVER] | |
721 | bl EXT(thread_exception_return) | |
722 | b . | |
723 | ||
724 | dcache_flush_trap: | |
725 | add r3, r0, r1 | |
726 | cmp r3, VM_MAX_ADDRESS | |
727 | subhi r3, r3, #1<<MMU_CLINE | |
728 | bhi cache_trap_error | |
729 | adr r11, cache_trap_jmp | |
730 | ldr r4, [r9, TH_RECOVER] // Save existing recovery routine | |
731 | str r11, [r9, TH_RECOVER] | |
732 | #if __ARM_USER_PROTECT__ | |
733 | ldr r6, [r9, ACT_UPTW_TTB] // Load thread ttb | |
734 | mcr p15, 0, r6, c2, c0, 0 // Set TTBR0 | |
735 | ldr r5, [r9, ACT_ASID] // Load thread asid | |
736 | mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR | |
737 | isb | |
738 | #endif | |
739 | bl EXT(flush_dcache_syscall) | |
740 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
741 | #if __ARM_USER_PROTECT__ | |
742 | ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
743 | mcr p15, 0, r5, c2, c0, 0 // Set TTBR0 | |
744 | mov r5, #0 // Load kernel asid | |
745 | mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR | |
746 | isb | |
747 | #endif | |
748 | str r4, [r9, TH_RECOVER] | |
749 | bl EXT(thread_exception_return) | |
750 | b . | |
751 | ||
752 | thread_set_cthread_trap: | |
753 | bl EXT(thread_set_cthread_self) | |
754 | bl EXT(thread_exception_return) | |
755 | b . | |
756 | ||
757 | thread_get_cthread_trap: | |
758 | bl EXT(thread_get_cthread_self) | |
759 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
760 | add r1, r9, ACT_PCBDATA // Get User PCB | |
761 | str r0, [r1, SS_R0] // set return value | |
762 | bl EXT(thread_exception_return) | |
763 | b . | |
764 | ||
765 | cache_trap_jmp: | |
766 | #if __ARM_USER_PROTECT__ | |
767 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
768 | ldr r5, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
769 | mcr p15, 0, r5, c2, c0, 0 // Set TTBR0 | |
770 | mov r5, #0 // Load kernel asid | |
771 | mcr p15, 0, r5, c13, c0, 1 // Set CONTEXTIDR | |
772 | isb | |
773 | #endif | |
774 | mrc p15, 0, r3, c6, c0 // Read Fault Address | |
775 | cache_trap_error: | |
776 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
777 | add r0, r9, ACT_PCBDATA // Get User PCB | |
778 | ldr r1, [r0, SS_PC] // Save user mode pc register as pc | |
779 | sub r1, r1, #4 // Backtrack current pc | |
780 | str r1, [r0, SS_PC] // pc at cache assist swi | |
781 | str r3, [r0, SS_VADDR] // Fault Address | |
782 | mov r0, #EXC_BAD_ACCESS | |
783 | mov r2, KERN_INVALID_ADDRESS | |
784 | sub sp, sp, #8 | |
785 | mov r1, sp | |
786 | str r2, [sp] | |
787 | str r3, [sp, #4] | |
788 | mov r2, #2 | |
789 | bl EXT(exception_triage) | |
790 | b . | |
791 | ||
792 | fleh_swi_trap_mct: | |
793 | bl EXT(mach_continuous_time) | |
794 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
795 | add r9, r9, ACT_PCBDATA_R0 // Get User register state | |
796 | stmia r9, {r0, r1} // set 64-bit return value | |
797 | bl EXT(thread_exception_return) | |
798 | b . | |
799 | ||
800 | fleh_swi_trap_tb: | |
801 | str lr, [sp, SS_PC] | |
802 | bl EXT(ml_get_timebase) // ml_get_timebase() (64-bit return) | |
803 | ldr lr, [sp, SS_PC] | |
804 | nop | |
805 | movs pc, lr // Return to user | |
806 | ||
807 | .align 2 | |
808 | L_kernel_swi_panic_str: | |
809 | .asciz "fleh_swi: took SWI from kernel mode\n" | |
810 | .align 2 | |
811 | ||
812 | /* | |
813 | * First Level Exception Handler for Prefetching Abort. | |
814 | */ | |
815 | .text | |
816 | .align 2 | |
817 | .globl EXT(fleh_prefabt) | |
818 | ||
819 | LEXT(fleh_prefabt) | |
820 | sub lr, lr, #4 | |
821 | ||
822 | mrs sp, spsr // For check the previous mode | |
823 | tst sp, #0x0f // Is it from user? | |
824 | bne prefabt_from_kernel | |
825 | ||
826 | prefabt_from_user: | |
827 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
828 | add sp, sp, ACT_PCBDATA // Get User PCB | |
829 | ||
830 | stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB | |
831 | mov r7, #0 // Zero the frame pointer | |
832 | nop | |
833 | mov r0, sp // Store arm_saved_state pointer | |
834 | // For argument | |
835 | str lr, [sp, SS_PC] // Save user mode pc register as pc | |
836 | mrc p15, 0, r1, c6, c0, 2 // Read IFAR | |
837 | str r1, [sp, SS_VADDR] // and fault address of pcb | |
838 | ||
839 | mrc p15, 0, r5, c5, c0, 1 // Read Fault Status | |
840 | str r5, [sp, SS_STATUS] // Save fault status register to pcb | |
841 | ||
842 | mrs r4, spsr | |
843 | str r4, [sp, SS_CPSR] // Save user mode cpsr | |
844 | ||
845 | mrs r4, cpsr // Read cpsr | |
846 | cpsid i, #PSR_SVC_MODE | |
847 | mrs r3, cpsr // Read cpsr | |
848 | msr spsr_cxsf, r3 // Set spsr(svc mode cpsr) | |
849 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
850 | ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack | |
851 | ||
852 | #if __ARM_VFP__ | |
853 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
854 | bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP | |
855 | mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
856 | fmxr fpscr, r3 // And shove it into FPSCR | |
857 | #endif | |
858 | #if __ARM_USER_PROTECT__ | |
859 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
860 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
861 | mov r3, #0 // Load kernel asid | |
862 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
863 | isb | |
864 | #endif | |
865 | and r0, r4, #PSR_MODE_MASK // Extract current mode | |
866 | cmp r0, #PSR_ABT_MODE // Check abort mode | |
867 | bne EXT(ExceptionVectorPanic) | |
868 | ||
869 | mvn r0, #0 | |
870 | str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace | |
871 | ||
872 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
873 | bl EXT(timer_state_event_user_to_kernel) | |
874 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
875 | #endif | |
876 | ||
877 | add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer | |
878 | mov r1, T_PREFETCH_ABT // Pass abort type | |
879 | bl EXT(sleh_abort) // Call second level handler | |
880 | // Sleh will enable interrupt | |
881 | b load_and_go_user | |
882 | ||
883 | prefabt_from_kernel: | |
884 | mrs sp, cpsr // Read cpsr | |
885 | and sp, sp, #PSR_MODE_MASK // Extract current mode | |
886 | cmp sp, #PSR_ABT_MODE // Check abort mode | |
887 | movne r0, sp | |
888 | bne EXT(ExceptionVectorPanic) | |
889 | mrs sp, spsr // Check the previous mode | |
890 | ||
891 | /* | |
892 | * We have a kernel stack already, and I will use it to save contexts: | |
893 | * ------------------ | |
894 | * | VFP saved state | | |
895 | * |------------------| | |
896 | * | ARM saved state | | |
897 | * SP ------------------ | |
898 | * | |
899 | * IRQ is disabled | |
900 | */ | |
901 | cpsid i, #PSR_SVC_MODE | |
902 | ||
903 | sub sp, sp, EXC_CTX_SIZE | |
904 | stmia sp, {r0-r12} | |
905 | add r0, sp, EXC_CTX_SIZE | |
906 | ||
907 | str r0, [sp, SS_SP] // Save supervisor mode sp | |
908 | str lr, [sp, SS_LR] // Save supervisor mode lr | |
909 | ||
910 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
911 | ||
912 | #if __ARM_VFP__ | |
913 | add r0, sp, SS_SIZE // Get vfp state pointer | |
914 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
915 | add r0, VSS_ALIGN // Get the actual vfp save area | |
916 | bl EXT(vfp_save) // Save the current VFP state to the stack | |
917 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
918 | fmxr fpscr, r4 // And shove it into FPSCR | |
919 | #endif | |
920 | #if __ARM_USER_PROTECT__ | |
921 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
922 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
923 | cmp r3, r10 | |
924 | beq 1f | |
925 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
926 | 1: | |
927 | mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR | |
928 | mov r3, #0 // Load kernel asid | |
929 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
930 | isb | |
931 | #endif | |
932 | mov ip, sp | |
933 | ||
934 | cpsid i, #PSR_ABT_MODE | |
935 | ||
936 | str lr, [ip, SS_PC] // Save pc to pc and | |
937 | ||
938 | mrc p15, 0, r5, c6, c0, 2 // Read IFAR | |
939 | str r5, [ip, SS_VADDR] // and fault address of pcb | |
940 | mrc p15, 0, r5, c5, c0, 1 // Read (instruction) Fault Status | |
941 | str r5, [ip, SS_STATUS] // Save fault status register to pcb | |
942 | ||
943 | mrs r4, spsr | |
944 | str r4, [ip, SS_CPSR] | |
945 | ||
946 | cpsid i, #PSR_SVC_MODE | |
947 | ||
948 | mov r0, sp | |
949 | ||
950 | /* | |
951 | * For armv7k ABI, the stack needs to be 16-byte aligned | |
952 | */ | |
953 | #if __BIGGEST_ALIGNMENT__ > 4 | |
954 | and r1, sp, #0x0F // sp mod 16-bytes | |
955 | cmp r1, #4 // need space for the sp on the stack | |
956 | addlt r1, r1, #0x10 // make room if needed, but keep stack aligned | |
957 | mov r2, sp // get current sp | |
958 | sub sp, sp, r1 // align stack | |
959 | str r2, [sp] // store previous sp on stack | |
960 | #endif | |
961 | ||
962 | mov r1, T_PREFETCH_ABT // Pass abort type | |
963 | bl EXT(sleh_abort) // Call second level handler | |
964 | ||
965 | #if __BIGGEST_ALIGNMENT__ > 4 | |
966 | ldr sp, [sp] // restore stack | |
967 | #endif | |
968 | ||
969 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
970 | #if __ARM_USER_PROTECT__ | |
971 | ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
972 | cmp r10, r0 | |
973 | beq 1f | |
974 | ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb | |
975 | cmp r10, r0 | |
976 | beq 1f | |
977 | mcr p15, 0, r10, c2, c0, 0 // Set TTBR0 | |
978 | ldr r11, [r9, ACT_ASID] // Load thread asid | |
979 | 1: | |
980 | mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR | |
981 | isb | |
982 | #endif | |
983 | ||
984 | b load_and_go_sys | |
985 | ||
986 | ||
987 | /* | |
988 | * First Level Exception Handler for Data Abort | |
989 | */ | |
990 | .text | |
991 | .align 2 | |
992 | .globl EXT(fleh_dataabt) | |
993 | ||
994 | LEXT(fleh_dataabt) | |
995 | sub lr, lr, #8 | |
996 | ||
997 | mrs sp, spsr // For check the previous mode | |
998 | tst sp, #0x0f // Is it from kernel? | |
999 | bne dataabt_from_kernel | |
1000 | ||
1001 | dataabt_from_user: | |
1002 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
1003 | add sp, sp, ACT_PCBDATA // Get User PCB | |
1004 | ||
1005 | stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB | |
1006 | mov r7, #0 // Zero the frame pointer | |
1007 | nop | |
1008 | ||
1009 | mov r0, sp // Store arm_saved_state pointer | |
1010 | // For argument | |
1011 | ||
1012 | str lr, [sp, SS_PC] // Save user mode pc register | |
1013 | ||
1014 | mrs r4, spsr | |
1015 | str r4, [sp, SS_CPSR] // Save user mode cpsr | |
1016 | ||
1017 | mrc p15, 0, r5, c5, c0 // Read Fault Status | |
1018 | mrc p15, 0, r6, c6, c0 // Read Fault Address | |
1019 | str r5, [sp, SS_STATUS] // Save fault status register to pcb | |
1020 | str r6, [sp, SS_VADDR] // Save fault address to pcb | |
1021 | ||
1022 | mrs r4, cpsr // Read cpsr | |
1023 | cpsid i, #PSR_SVC_MODE | |
1024 | mrs r3, cpsr // Read cpsr | |
1025 | msr spsr_cxsf, r3 // Set spsr(svc mode cpsr) | |
1026 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1027 | ldr sp, [r9, TH_KSTACKPTR] // Load kernel stack | |
1028 | ||
1029 | #if __ARM_VFP__ | |
1030 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
1031 | bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP | |
1032 | mov r3, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1033 | fmxr fpscr, r3 // And shove it into FPSCR | |
1034 | #endif | |
1035 | #if __ARM_USER_PROTECT__ | |
1036 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1037 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1038 | mov r3, #0 // Load kernel asid | |
1039 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1040 | isb | |
1041 | #endif | |
1042 | and r0, r4, #PSR_MODE_MASK // Extract current mode | |
1043 | cmp r0, #PSR_ABT_MODE // Check abort mode | |
1044 | bne EXT(ExceptionVectorPanic) | |
1045 | ||
1046 | mvn r0, #0 | |
1047 | str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before handling abort from userspace | |
1048 | ||
1049 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1050 | bl EXT(timer_state_event_user_to_kernel) | |
1051 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1052 | #endif | |
1053 | ||
1054 | add r0, r9, ACT_PCBDATA // Reload arm_saved_state pointer | |
1055 | mov r1, T_DATA_ABT // Pass abort type | |
1056 | bl EXT(sleh_abort) // Call second level handler | |
1057 | // Sleh will enable irq | |
1058 | b load_and_go_user | |
1059 | ||
1060 | dataabt_from_kernel: | |
1061 | mrs sp, cpsr // Read cpsr | |
1062 | and sp, sp, #PSR_MODE_MASK // Extract current mode | |
1063 | cmp sp, #PSR_ABT_MODE // Check abort mode | |
1064 | movne r0, sp | |
1065 | bne EXT(ExceptionVectorPanic) | |
1066 | mrs sp, spsr // Check the previous mode | |
1067 | ||
1068 | /* | |
1069 | * We have a kernel stack already, and I will use it to save contexts: | |
1070 | * ------------------ | |
1071 | * | VFP saved state | | |
1072 | * |------------------| | |
1073 | * | ARM saved state | | |
1074 | * SP ------------------ | |
1075 | * | |
1076 | * IRQ is disabled | |
1077 | */ | |
1078 | cpsid i, #PSR_SVC_MODE | |
1079 | ||
1080 | sub sp, sp, EXC_CTX_SIZE | |
1081 | stmia sp, {r0-r12} | |
1082 | add r0, sp, EXC_CTX_SIZE | |
1083 | ||
1084 | str r0, [sp, SS_SP] // Save supervisor mode sp | |
1085 | str lr, [sp, SS_LR] // Save supervisor mode lr | |
1086 | ||
1087 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1088 | ||
1089 | #if __ARM_VFP__ | |
1090 | add r0, sp, SS_SIZE // Get vfp state pointer | |
1091 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
1092 | add r0, VSS_ALIGN // Get the actual vfp save area | |
1093 | bl EXT(vfp_save) // Save the current VFP state to the stack | |
1094 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1095 | fmxr fpscr, r4 // And shove it into FPSCR | |
1096 | #endif | |
1097 | ||
1098 | mov ip, sp | |
1099 | ||
1100 | cpsid i, #PSR_ABT_MODE | |
1101 | ||
1102 | str lr, [ip, SS_PC] | |
1103 | mrs r4, spsr | |
1104 | str r4, [ip, SS_CPSR] | |
1105 | ||
1106 | cpsid i, #PSR_SVC_MODE | |
1107 | ||
1108 | #if __ARM_USER_PROTECT__ | |
1109 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
1110 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1111 | cmp r3, r10 | |
1112 | beq 1f | |
1113 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1114 | 1: | |
1115 | mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR | |
1116 | mov r3, #0 // Load kernel asid | |
1117 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1118 | isb | |
1119 | #endif | |
1120 | mrc p15, 0, r5, c5, c0 // Read Fault Status | |
1121 | mrc p15, 0, r6, c6, c0 // Read Fault Address | |
1122 | str r5, [sp, SS_STATUS] // Save fault status register to pcb | |
1123 | str r6, [sp, SS_VADDR] // Save fault address to pcb | |
1124 | ||
1125 | mov r0, sp // Argument | |
1126 | ||
1127 | /* | |
1128 | * For armv7k ABI, the stack needs to be 16-byte aligned | |
1129 | */ | |
1130 | #if __BIGGEST_ALIGNMENT__ > 4 | |
1131 | and r1, sp, #0x0F // sp mod 16-bytes | |
1132 | cmp r1, #4 // need space for the sp on the stack | |
1133 | addlt r1, r1, #0x10 // make room if needed, but keep stack aligned | |
1134 | mov r2, sp // get current sp | |
1135 | sub sp, sp, r1 // align stack | |
1136 | str r2, [sp] // store previous sp on stack | |
1137 | #endif | |
1138 | ||
1139 | mov r1, T_DATA_ABT // Pass abort type | |
1140 | bl EXT(sleh_abort) // Call second level handler | |
1141 | ||
1142 | #if __BIGGEST_ALIGNMENT__ > 4 | |
1143 | ldr sp, [sp] // restore stack (removed align padding) | |
1144 | #endif | |
1145 | ||
1146 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1147 | #if __ARM_USER_PROTECT__ | |
1148 | ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1149 | cmp r10, r0 | |
1150 | beq 1f | |
1151 | ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb | |
1152 | cmp r10, r0 | |
1153 | beq 1f | |
1154 | mcr p15, 0, r10, c2, c0, 0 // Set TTBR0 | |
1155 | ldr r11, [r9, ACT_ASID] // Load thread asid | |
1156 | 1: | |
1157 | mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR | |
1158 | isb | |
1159 | #endif | |
1160 | ||
1161 | load_and_go_sys: | |
1162 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1163 | ||
1164 | ldr r4, [sp, SS_CPSR] // Load saved cpsr | |
1165 | tst r4, #PSR_IRQF // Test IRQ set | |
1166 | bne lags1 // Branch if IRQ disabled | |
1167 | ||
1168 | cpsid i // Disable IRQ | |
1169 | ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count | |
1170 | movs r2, r2 // Test if null | |
1171 | ldr r8, [r9, ACT_CPUDATAP] // Get current cpu | |
1172 | bne lags1 // Branch if count not null | |
1173 | ldr r5, [r8, CPU_PENDING_AST] // Get ASTs | |
1174 | ands r5, r5, AST_URGENT // Get the requests we do honor | |
1175 | beq lags1 // Branch if no ASTs | |
1176 | #if __ARM_USER_PROTECT__ | |
1177 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
1178 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1179 | cmp r3, r10 | |
1180 | beq 1f | |
1181 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1182 | 1: | |
1183 | mrc p15, 0, r11, c13, c0, 1 // Save CONTEXTIDR | |
1184 | mov r3, #0 // Load kernel asid | |
1185 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1186 | isb | |
1187 | #endif | |
1188 | ldr lr, [sp, SS_LR] // Restore the link register | |
1189 | stmfd sp!, {r7, lr} // Push a fake frame | |
1190 | ||
1191 | /* TODO: Should this be setting r7? I think so. */ | |
1192 | mov r7, sp // Set the frame pointer | |
1193 | ||
1194 | #if __BIGGEST_ALIGNMENT__ > 4 | |
1195 | and r2, sp, #0x0F // sp mod 16-bytes | |
1196 | cmp r2, #4 // need space for the sp on the stack | |
1197 | addlt r2, r2, #0x10 // make room if needed, but keep stack aligned | |
1198 | mov r3, sp // get current sp | |
1199 | sub sp, sp, r2 // align stack | |
1200 | str r3, [sp] // store previous sp on stack | |
1201 | #endif | |
1202 | ||
1203 | bl EXT(ast_taken_kernel) // Handle AST_URGENT | |
1204 | ||
1205 | #if __BIGGEST_ALIGNMENT__ > 4 | |
1206 | ldr sp, [sp] | |
1207 | #endif | |
1208 | ||
1209 | ||
1210 | ldmfd sp!, {r7, lr} // Pop the fake frame | |
1211 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
1212 | ldr r8, [r9, ACT_CPUDATAP] // Get current cpu | |
1213 | #if __ARM_USER_PROTECT__ | |
1214 | ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1215 | cmp r10, r0 | |
1216 | beq 1f | |
1217 | ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb | |
1218 | cmp r10, r0 | |
1219 | beq 1f | |
1220 | mcr p15, 0, r10, c2, c0, 0 // Set TTBR0 | |
1221 | ldr r11, [r9, ACT_ASID] // Load thread asid | |
1222 | 1: | |
1223 | mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR | |
1224 | isb | |
1225 | #endif | |
1226 | lags1: | |
1227 | ldr lr, [sp, SS_LR] | |
1228 | ||
1229 | mov ip, sp // Save pointer to contexts for abort mode | |
1230 | ldr sp, [ip, SS_SP] // Restore stack pointer | |
1231 | ||
1232 | cpsid if, #PSR_ABT_MODE | |
1233 | ||
1234 | mov sp, ip | |
1235 | ||
1236 | ldr r4, [sp, SS_CPSR] | |
1237 | msr spsr_cxsf, r4 // Restore spsr | |
1238 | ||
1239 | clrex // clear exclusive memory tag | |
1240 | #if __ARM_ENABLE_WFE_ | |
1241 | sev | |
1242 | #endif | |
1243 | ||
1244 | #if __ARM_VFP__ | |
1245 | add r0, sp, SS_SIZE // Get vfp state pointer | |
1246 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
1247 | add r0, VSS_ALIGN // Get the actual vfp save area | |
1248 | bl EXT(vfp_load) // Load the desired VFP state from the stack | |
1249 | #endif | |
1250 | ||
1251 | ldr lr, [sp, SS_PC] // Restore lr | |
1252 | ||
1253 | ldmia sp, {r0-r12} // Restore other registers | |
1254 | ||
1255 | movs pc, lr // Return to sys (svc, irq, fiq) | |
1256 | ||
1257 | /* | |
1258 | * First Level Exception Handler for address exception | |
1259 | * Not supported | |
1260 | */ | |
1261 | .text | |
1262 | .align 2 | |
1263 | .globl EXT(fleh_addrexc) | |
1264 | ||
1265 | LEXT(fleh_addrexc) | |
1266 | b . | |
1267 | ||
1268 | ||
1269 | /* | |
1270 | * First Level Exception Handler for IRQ | |
1271 | * Current mode : IRQ | |
1272 | * IRQ and FIQ are always disabled while running in FIQ handler | |
1273 | * We do not permit nested interrupt. | |
1274 | * | |
1275 | * Saving area: from user : PCB. | |
1276 | * from kernel : interrupt stack. | |
1277 | */ | |
1278 | ||
1279 | .text | |
1280 | .align 2 | |
1281 | .globl EXT(fleh_irq) | |
1282 | ||
1283 | LEXT(fleh_irq) | |
1284 | sub lr, lr, #4 | |
1285 | ||
1286 | cpsie a // Re-enable async aborts | |
1287 | ||
1288 | mrs sp, spsr | |
1289 | tst sp, #0x0f // From user? or kernel? | |
1290 | bne fleh_irq_kernel | |
1291 | ||
1292 | fleh_irq_user: | |
1293 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
1294 | add sp, sp, ACT_PCBDATA // Get User PCB | |
1295 | stmia sp, {r0-r12, sp, lr}^ | |
1296 | mov r7, #0 // Zero the frame pointer | |
1297 | nop | |
1298 | str lr, [sp, SS_PC] | |
1299 | mrs r4, spsr | |
1300 | str r4, [sp, SS_CPSR] | |
1301 | mov r5, sp // Saved context in r5 | |
1302 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1303 | ldr r6, [r9, ACT_CPUDATAP] // Get current cpu | |
1304 | ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack | |
1305 | cpsid i, #PSR_SVC_MODE | |
1306 | ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack | |
1307 | cpsid i, #PSR_IRQ_MODE | |
1308 | ||
1309 | #if __ARM_VFP__ | |
1310 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
1311 | bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP | |
1312 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1313 | fmxr fpscr, r4 // And shove it into FPSCR | |
1314 | #endif | |
1315 | #if __ARM_USER_PROTECT__ | |
1316 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1317 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1318 | mov r3, #0 // Load kernel asid | |
1319 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1320 | isb | |
1321 | #endif | |
1322 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1323 | bl EXT(timer_state_event_user_to_kernel) | |
1324 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1325 | #endif | |
1326 | #if CONFIG_TELEMETRY | |
1327 | LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested... | |
1328 | mov r0, #1 | |
1329 | ldr r2, [r2] | |
1330 | movs r2, r2 | |
1331 | beq 1f | |
1332 | bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread... | |
1333 | mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW | |
1334 | 1: | |
1335 | #endif | |
1336 | ||
1337 | b fleh_irq_handler | |
1338 | ||
1339 | fleh_irq_kernel: | |
1340 | cpsid i, #PSR_SVC_MODE | |
1341 | ||
1342 | sub sp, sp, EXC_CTX_SIZE | |
1343 | stmia sp, {r0-r12} | |
1344 | add r0, sp, EXC_CTX_SIZE | |
1345 | ||
1346 | str r0, [sp, SS_SP] // Save supervisor mode sp | |
1347 | str lr, [sp, SS_LR] // Save supervisor mode lr | |
1348 | ||
1349 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1350 | ||
1351 | #if __ARM_VFP__ | |
1352 | add r0, sp, SS_SIZE // Get vfp state pointer | |
1353 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
1354 | add r0, VSS_ALIGN // Get the actual vfp save area | |
1355 | bl EXT(vfp_save) // Save the current VFP state to the stack | |
1356 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1357 | fmxr fpscr, r4 // And shove it into FPSCR | |
1358 | #endif | |
1359 | #if __ARM_USER_PROTECT__ | |
1360 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
1361 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1362 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1363 | mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR | |
1364 | mov r3, #0 // Load kernel asid | |
1365 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1366 | isb | |
1367 | #endif | |
1368 | mov r5, sp // Saved context in r5 | |
1369 | ||
1370 | cpsid i, #PSR_IRQ_MODE | |
1371 | ||
1372 | str lr, [r5, SS_PC] // Save LR as the return PC | |
1373 | mrs r4, spsr | |
1374 | str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode | |
1375 | ||
1376 | ldr sp, [r9, ACT_CPUDATAP] // Get current cpu | |
1377 | ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack | |
1378 | ||
1379 | #if CONFIG_TELEMETRY | |
1380 | LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested... | |
1381 | mov r0, #0 | |
1382 | ldr r2, [r2] | |
1383 | movs r2, r2 | |
1384 | beq 1f | |
1385 | bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread... | |
1386 | mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW | |
1387 | 1: | |
1388 | #endif | |
1389 | ||
1390 | fleh_irq_handler: | |
1391 | ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count | |
1392 | add r2, r2, #1 // Increment count | |
1393 | str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count | |
1394 | #ifndef NO_KDEBUG | |
1395 | LOAD_ADDR(r8, kdebug_enable) | |
1396 | ldr r8, [r8] | |
1397 | movs r8, r8 | |
1398 | movne r0, r5 | |
1399 | COND_EXTERN_BLNE(interrupt_trace) | |
1400 | #endif | |
1401 | bl EXT(interrupt_stats) // Record interrupt statistics | |
1402 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
1403 | ldr r4, [r9, ACT_CPUDATAP] // Get current cpu | |
1404 | str r5, [r4, CPU_INT_STATE] // Saved context in cpu_int_state | |
1405 | ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count | |
1406 | add r3, r3, #1 // Increment count | |
1407 | str r3, [r4, CPU_STAT_IRQ] // Update IRQ count | |
1408 | ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count | |
1409 | add r3, r3, #1 // Increment count | |
1410 | str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count | |
1411 | ldr r0, [r4, INTERRUPT_TARGET] | |
1412 | ldr r1, [r4, INTERRUPT_REFCON] | |
1413 | ldr r2, [r4, INTERRUPT_NUB] | |
1414 | ldr r3, [r4, INTERRUPT_SOURCE] | |
1415 | ldr r5, [r4, INTERRUPT_HANDLER] // Call second level exception handler | |
1416 | blx r5 | |
1417 | #ifndef NO_KDEBUG | |
1418 | movs r8, r8 | |
1419 | COND_EXTERN_BLNE(interrupt_trace_exit) | |
1420 | #endif | |
1421 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
1422 | bl EXT(ml_get_timebase) // get current timebase | |
1423 | LOAD_ADDR(r3, EntropyData) | |
1424 | ldr r2, [r3, ENTROPY_INDEX_PTR] | |
1425 | add r1, r3, ENTROPY_DATA_SIZE | |
1426 | add r2, r2, #4 | |
1427 | cmp r2, r1 | |
1428 | addge r2, r3, ENTROPY_BUFFER | |
1429 | ldr r4, [r2] | |
1430 | eor r0, r0, r4, ROR #9 | |
1431 | str r0, [r2] // Update gEntropie | |
1432 | str r2, [r3, ENTROPY_INDEX_PTR] | |
1433 | ||
1434 | return_from_irq: | |
1435 | mov r5, #0 | |
1436 | ldr r4, [r9, ACT_CPUDATAP] // Get current cpu | |
1437 | str r5, [r4, CPU_INT_STATE] // Clear cpu_int_state | |
1438 | ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count | |
1439 | #if MACH_ASSERT | |
1440 | cmp r2, #0 // verify positive count | |
1441 | bgt 1f | |
1442 | push {r7, lr} | |
1443 | mov r7, sp | |
1444 | adr r0, L_preemption_count_zero_str | |
1445 | blx EXT(panic) | |
1446 | b . | |
1447 | 1: | |
1448 | #endif | |
1449 | sub r2, r2, #1 // Decrement count | |
1450 | str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count | |
1451 | ||
1452 | mrs r0, spsr // For check the previous mode | |
1453 | ||
1454 | cpsid i, #PSR_SVC_MODE | |
1455 | ||
1456 | tst r0, #0x0f // Check if the previous is from user | |
1457 | ldreq sp, [r9, TH_KSTACKPTR] // ...If so, reload the kernel stack pointer | |
1458 | beq load_and_go_user // ...and return | |
1459 | ||
1460 | #if __ARM_USER_PROTECT__ | |
1461 | ldr r0, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1462 | cmp r10, r0 | |
1463 | beq 1f | |
1464 | ldr r10, [r9, ACT_UPTW_TTB] // Load thread ttb | |
1465 | cmp r10, r0 | |
1466 | beq 1f | |
1467 | mcr p15, 0, r10, c2, c0, 0 // Set TTBR0 | |
1468 | ldr r11, [r9, ACT_ASID] // Load thread asid | |
1469 | 1: | |
1470 | mcr p15, 0, r11, c13, c0, 1 // set CONTEXTIDR | |
1471 | isb | |
1472 | #endif | |
1473 | b load_and_go_sys | |
1474 | ||
1475 | .align 2 | |
1476 | L_preemption_count_zero_str: | |
1477 | .ascii "locore.s: preemption count is zero \000" | |
1478 | .align 2 | |
1479 | /* | |
1480 | * First Level Exception Handler for DEC | |
1481 | * Current mode : IRQ | |
1482 | * IRQ and FIQ are always disabled while running in FIQ handler | |
1483 | * We do not permit nested interrupt. | |
1484 | * | |
1485 | * Saving area: from user : PCB. | |
1486 | * from kernel : interrupt stack. | |
1487 | */ | |
1488 | ||
1489 | .text | |
1490 | .align 2 | |
1491 | .globl EXT(fleh_decirq) | |
1492 | ||
1493 | LEXT(fleh_decirq) | |
1494 | sub lr, lr, #4 | |
1495 | ||
1496 | cpsie af // Re-enable async aborts/FIQ | |
1497 | ||
1498 | mrs sp, spsr | |
1499 | tst sp, #0x0f // From user? or kernel? | |
1500 | bne fleh_decirq_kernel | |
1501 | ||
1502 | fleh_decirq_user: | |
1503 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
1504 | add sp, sp, ACT_PCBDATA // Get User PCB | |
1505 | stmia sp, {r0-r12, sp, lr}^ | |
1506 | mov r7, #0 // Zero the frame pointer | |
1507 | nop | |
1508 | str lr, [sp, SS_PC] | |
1509 | mrs r4, spsr | |
1510 | str r4, [sp, SS_CPSR] | |
1511 | mov r5, sp // Saved context in r5 | |
1512 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1513 | ldr r6, [r9, ACT_CPUDATAP] // Get current cpu | |
1514 | ldr sp, [r6, CPU_ISTACKPTR] // Set interrupt stack | |
1515 | cpsid i, #PSR_SVC_MODE | |
1516 | ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack | |
1517 | cpsid i, #PSR_IRQ_MODE | |
1518 | ||
1519 | #if __ARM_VFP__ | |
1520 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
1521 | bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP | |
1522 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1523 | fmxr fpscr, r4 // And shove it into FPSCR | |
1524 | #endif | |
1525 | #if __ARM_USER_PROTECT__ | |
1526 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1527 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1528 | mov r3, #0 // Load kernel asid | |
1529 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1530 | isb | |
1531 | #endif | |
1532 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1533 | bl EXT(timer_state_event_user_to_kernel) | |
1534 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1535 | #endif | |
1536 | #if CONFIG_TELEMETRY | |
1537 | LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested... | |
1538 | mov r0, #1 | |
1539 | ldr r2, [r2] | |
1540 | movs r2, r2 | |
1541 | beq 1f | |
1542 | bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread... | |
1543 | mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW | |
1544 | 1: | |
1545 | #endif | |
1546 | ||
1547 | b fleh_decirq_handler | |
1548 | ||
1549 | fleh_decirq_kernel: | |
1550 | cpsid i, #PSR_SVC_MODE | |
1551 | ||
1552 | sub sp, sp, EXC_CTX_SIZE | |
1553 | stmia sp, {r0-r12} | |
1554 | add r0, sp, EXC_CTX_SIZE | |
1555 | ||
1556 | str r0, [sp, SS_SP] // Save supervisor mode sp | |
1557 | str lr, [sp, SS_LR] // Save supervisor mode lr | |
1558 | ||
1559 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1560 | ||
1561 | #if __ARM_VFP__ | |
1562 | add r0, sp, SS_SIZE // Get vfp state pointer | |
1563 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
1564 | add r0, VSS_ALIGN // Get the actual vfp save area | |
1565 | bl EXT(vfp_save) // Save the current VFP state to the stack | |
1566 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1567 | fmxr fpscr, r4 // And shove it into FPSCR | |
1568 | #endif | |
1569 | #if __ARM_USER_PROTECT__ | |
1570 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
1571 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1572 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1573 | mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR | |
1574 | mov r3, #0 // Load kernel asid | |
1575 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1576 | isb | |
1577 | #endif | |
1578 | mov r5, sp // Saved context in r5 | |
1579 | ||
1580 | cpsid i, #PSR_IRQ_MODE | |
1581 | ||
1582 | str lr, [r5, SS_PC] // Save LR as the return PC | |
1583 | mrs r4, spsr | |
1584 | str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode | |
1585 | ||
1586 | ldr sp, [r9, ACT_CPUDATAP] // Get current cpu | |
1587 | ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack | |
1588 | ||
1589 | #if CONFIG_TELEMETRY | |
1590 | LOAD_ADDR(r2, telemetry_needs_record) // Check if a telemetry record was requested... | |
1591 | mov r0, #0 | |
1592 | ldr r2, [r2] | |
1593 | movs r2, r2 | |
1594 | beq 1f | |
1595 | bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread... | |
1596 | mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW | |
1597 | 1: | |
1598 | #endif | |
1599 | ||
1600 | fleh_decirq_handler: | |
1601 | ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count | |
1602 | add r2, r2, #1 // Increment count | |
1603 | str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count | |
1604 | ldr r2, [r9, ACT_CPUDATAP] // Get current cpu | |
1605 | str r5, [r2, CPU_INT_STATE] // Saved context in cpu_int_state | |
1606 | ldr r3, [r2, CPU_STAT_IRQ] // Get IRQ count | |
1607 | add r3, r3, #1 // Increment count | |
1608 | str r3, [r2, CPU_STAT_IRQ] // Update IRQ count | |
1609 | ldr r3, [r2, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count | |
1610 | add r3, r3, #1 // Increment count | |
1611 | str r3, [r2, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count | |
1612 | #ifndef NO_KDEBUG | |
1613 | LOAD_ADDR(r4, kdebug_enable) | |
1614 | ldr r4, [r4] | |
1615 | movs r4, r4 | |
1616 | movne r0, r5 // Pass saved context | |
1617 | COND_EXTERN_BLNE(interrupt_trace) | |
1618 | #endif | |
1619 | bl EXT(interrupt_stats) // Record interrupt statistics | |
1620 | mov r0, #0 | |
1621 | bl EXT(rtclock_intr) // Call second level exception handler | |
1622 | #ifndef NO_KDEBUG | |
1623 | movs r4, r4 | |
1624 | COND_EXTERN_BLNE(interrupt_trace_exit) | |
1625 | #endif | |
1626 | ||
1627 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
1628 | ||
1629 | b return_from_irq | |
1630 | ||
1631 | ||
1632 | /* | |
1633 | * First Level Exception Handler for FIQ | |
1634 | * Current mode : FIQ | |
1635 | * IRQ and FIQ are always disabled while running in FIQ handler | |
1636 | * We do not permit nested interrupt. | |
1637 | * | |
1638 | * Saving area: from user : PCB. | |
1639 | * from kernel : interrupt stack. | |
1640 | * | |
1641 | * We have 7 added shadow registers in FIQ mode for fast services. | |
1642 | * So only we have to save is just 8 general registers and LR. | |
1643 | * But if the current thread was running on user mode before the FIQ interrupt, | |
1644 | * All user registers be saved for ast handler routine. | |
1645 | */ | |
1646 | .text | |
1647 | .align 2 | |
1648 | .globl EXT(fleh_fiq_generic) | |
1649 | ||
1650 | LEXT(fleh_fiq_generic) | |
1651 | str r11, [r10] // Clear the FIQ source | |
1652 | ||
1653 | ldr r13, [r8, CPU_TIMEBASE_LOW] // Load TBL | |
1654 | adds r13, r13, #1 // Increment TBL | |
1655 | str r13, [r8, CPU_TIMEBASE_LOW] // Store TBL | |
1656 | ldreq r13, [r8, CPU_TIMEBASE_HIGH] // Load TBU | |
1657 | addeq r13, r13, #1 // Increment TBU | |
1658 | streq r13, [r8, CPU_TIMEBASE_HIGH] // Store TBU | |
1659 | subs r12, r12, #1 // Decrement, DEC | |
1660 | str r12, [r8, CPU_DECREMENTER] // Store DEC | |
1661 | subspl pc, lr, #4 // Return unless DEC < 0 | |
1662 | b EXT(fleh_dec) | |
1663 | ||
1664 | .text | |
1665 | .align 2 | |
1666 | .globl EXT(fleh_dec) | |
1667 | LEXT(fleh_dec) | |
1668 | mrs sp, spsr // Get the spsr | |
1669 | sub lr, lr, #4 | |
1670 | tst sp, #0x0f // From user? or kernel? | |
1671 | bne 2f | |
1672 | ||
1673 | /* From user */ | |
1674 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
1675 | add sp, sp, ACT_PCBDATA // Get User PCB | |
1676 | ||
1677 | stmia sp, {r0-r12, sp, lr}^ | |
1678 | mov r7, #0 // Zero the frame pointer | |
1679 | nop | |
1680 | str lr, [sp, SS_PC] | |
1681 | ||
1682 | mrs r4, spsr | |
1683 | str r4, [sp, SS_CPSR] | |
1684 | mov r5, sp | |
1685 | sub sp, sp, ACT_PCBDATA // Get User PCB | |
1686 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu | |
1687 | ldr sp, [sp, CPU_ISTACKPTR] // Set interrupt stack | |
1688 | mov r6, sp | |
1689 | cpsid i, #PSR_SVC_MODE | |
1690 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1691 | ldr sp, [r9, TH_KSTACKPTR] // Set kernel stack | |
1692 | ||
1693 | #if __ARM_VFP__ | |
1694 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
1695 | bl EXT(vfp_save) // Save the current VFP state to ACT_UVFP | |
1696 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1697 | fmxr fpscr, r4 // And shove it into FPSCR | |
1698 | #endif | |
1699 | #if __ARM_USER_PROTECT__ | |
1700 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
1701 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1702 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1703 | mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR | |
1704 | mov r3, #0 // Load kernel asid | |
1705 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1706 | isb | |
1707 | #endif | |
1708 | mov r0, #1 // Mark this as coming from user context | |
1709 | b 4f | |
1710 | ||
1711 | 2: | |
1712 | /* From kernel */ | |
1713 | tst sp, #PSR_IRQF // Test for IRQ masked | |
1714 | bne 3f // We're on the cpu_signal path | |
1715 | ||
1716 | cpsid if, #PSR_SVC_MODE | |
1717 | ||
1718 | sub sp, sp, EXC_CTX_SIZE | |
1719 | stmia sp, {r0-r12} | |
1720 | add r0, sp, EXC_CTX_SIZE | |
1721 | ||
1722 | str r0, [sp, SS_SP] // Save supervisor mode sp | |
1723 | str lr, [sp, SS_LR] // Save supervisor mode lr | |
1724 | ||
1725 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1726 | ||
1727 | #if __ARM_VFP__ | |
1728 | add r0, sp, SS_SIZE // Get vfp state pointer | |
1729 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
1730 | add r0, VSS_ALIGN // Get the actual vfp save area | |
1731 | bl EXT(vfp_save) // Save the current VFP state to the stack | |
1732 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1733 | fmxr fpscr, r4 // And shove it into FPSCR | |
1734 | #endif | |
1735 | #if __ARM_USER_PROTECT__ | |
1736 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
1737 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1738 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1739 | mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR | |
1740 | mov r3, #0 // Load kernel asid | |
1741 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1742 | isb | |
1743 | #endif | |
1744 | mov r5, sp // Saved context in r5 | |
1745 | ||
1746 | cpsid if, #PSR_FIQ_MODE | |
1747 | ||
1748 | mrc p15, 0, r1, c13, c0, 4 // Read TPIDRPRW | |
1749 | ||
1750 | str lr, [r5, SS_PC] // Save LR as the return PC | |
1751 | mrs r4, spsr | |
1752 | str r4, [r5, SS_CPSR] // Save the cpsr of the interrupted mode | |
1753 | ||
1754 | ldr r6, [r1, ACT_CPUDATAP] // Get current cpu | |
1755 | ldr r6, [r6, CPU_ISTACKPTR] // Set interrupt stack | |
1756 | ||
1757 | mov r0, #0 // Mark this as coming from kernel context | |
1758 | b 4f | |
1759 | ||
1760 | 3: | |
1761 | /* cpu_signal path */ | |
1762 | mrc p15, 0, sp, c13, c0, 4 // Read TPIDRPRW | |
1763 | ldr sp, [sp, ACT_CPUDATAP] // Get current cpu | |
1764 | ldr sp, [sp, CPU_FIQSTACKPTR] // Set fiq stack | |
1765 | sub sp, sp, EXC_CTX_SIZE | |
1766 | stmia sp, {r0-r12} | |
1767 | str lr, [sp, SS_PC] | |
1768 | mrs r4, spsr | |
1769 | str r4, [sp, SS_CPSR] | |
1770 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1771 | ||
1772 | #if __ARM_VFP__ | |
1773 | add r0, sp, SS_SIZE // Get vfp state pointer | |
1774 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
1775 | add r0, VSS_ALIGN // Get the actual vfp save area | |
1776 | bl EXT(vfp_save) // Save the current VFP state to the stack | |
1777 | mov r4, #FPSCR_DEFAULT // Load up the default FPSCR value... | |
1778 | fmxr fpscr, r4 // And shove it into FPSCR | |
1779 | #endif | |
1780 | #if __ARM_USER_PROTECT__ | |
1781 | mrc p15, 0, r10, c2, c0, 0 // Get TTBR0 | |
1782 | ldr r3, [r9, ACT_KPTW_TTB] // Load kernel ttb | |
1783 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
1784 | mrc p15, 0, r11, c13, c0, 1 // Get CONTEXTIDR | |
1785 | mov r3, #0 // Load kernel asid | |
1786 | mcr p15, 0, r3, c13, c0, 1 // Set CONTEXTIDR | |
1787 | isb | |
1788 | #endif | |
1789 | mov r0, r8 // Get current cpu in arg 0 | |
1790 | mov r1, SIGPdec // Decrementer signal in arg1 | |
1791 | mov r2, #0 | |
1792 | mov r3, #0 | |
1793 | bl EXT(cpu_signal) // Call cpu_signal | |
1794 | ||
1795 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1796 | ||
1797 | #if __ARM_VFP__ | |
1798 | add r0, sp, SS_SIZE // Get vfp state pointer | |
1799 | bic r0, #(VSS_ALIGN_NUM - 1) // Align to arm_vfpsaved_state alignment | |
1800 | add r0, VSS_ALIGN // Get the actual vfp save area | |
1801 | bl EXT(vfp_load) // Load the desired VFP state from the stack | |
1802 | #endif | |
1803 | ||
1804 | clrex // clear exclusive memory tag | |
1805 | #if __ARM_ENABLE_WFE_ | |
1806 | sev | |
1807 | #endif | |
1808 | #if __ARM_USER_PROTECT__ | |
1809 | mcr p15, 0, r10, c2, c0, 0 // Set TTBR0 | |
1810 | mcr p15, 0, r11, c13, c0, 1 // Set CONTEXTIDR | |
1811 | isb | |
1812 | #endif | |
1813 | ldr lr, [sp, SS_PC] | |
1814 | ldmia sp, {r0-r12} // Restore saved registers | |
1815 | movs pc, lr // Return from fiq | |
1816 | ||
1817 | 4: | |
1818 | cpsid i, #PSR_IRQ_MODE | |
1819 | cpsie f | |
1820 | mov sp, r6 // Restore the stack pointer | |
1821 | msr spsr_cxsf, r4 // Restore the spsr | |
1822 | ldr r2, [r9, ACT_PREEMPT_CNT] // Load preemption count | |
1823 | add r2, r2, #1 // Increment count | |
1824 | str r2, [r9, ACT_PREEMPT_CNT] // Update preemption count | |
1825 | ldr r4, [r9, ACT_CPUDATAP] // Get current cpu | |
1826 | str r5, [r4, CPU_INT_STATE] | |
1827 | ldr r3, [r4, CPU_STAT_IRQ] // Get IRQ count | |
1828 | add r3, r3, #1 // Increment count | |
1829 | str r3, [r4, CPU_STAT_IRQ] // Update IRQ count | |
1830 | ldr r3, [r4, CPU_STAT_IRQ_WAKE] // Get post-wake IRQ count | |
1831 | add r3, r3, #1 // Increment count | |
1832 | str r3, [r4, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count | |
1833 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1834 | movs r0, r0 | |
1835 | beq 5f | |
1836 | mov r8, r0 // Stash our "from_user" boolean value | |
1837 | bl EXT(timer_state_event_user_to_kernel) | |
1838 | mov r0, r8 // Restore our "from_user" value | |
1839 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1840 | 5: | |
1841 | #endif | |
1842 | #if CONFIG_TELEMETRY | |
1843 | LOAD_ADDR(r4, telemetry_needs_record) // Check if a telemetry record was requested... | |
1844 | ldr r4, [r4] | |
1845 | movs r4, r4 | |
1846 | beq 6f | |
1847 | bl EXT(telemetry_mark_curthread) // ...if so, mark the current thread... | |
1848 | mrc p15, 0, r9, c13, c0, 4 // ...and restore the thread pointer from TPIDRPRW | |
1849 | 6: | |
1850 | #endif | |
1851 | ||
1852 | #ifndef NO_KDEBUG | |
1853 | LOAD_ADDR(r4, kdebug_enable) | |
1854 | ldr r4, [r4] | |
1855 | movs r4, r4 | |
1856 | ldrne r1, [r9, ACT_CPUDATAP] // Get current cpu | |
1857 | ldrne r0, [r1, CPU_INT_STATE] | |
1858 | COND_EXTERN_BLNE(interrupt_trace) | |
1859 | #endif | |
1860 | bl EXT(interrupt_stats) // Record interrupt statistics | |
1861 | mov r0, #0 | |
1862 | bl EXT(rtclock_intr) // Call second level exception handler | |
1863 | #ifndef NO_KDEBUG | |
1864 | movs r4, r4 | |
1865 | COND_EXTERN_BLNE(interrupt_trace_exit) | |
1866 | #endif | |
1867 | ||
1868 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
1869 | ||
1870 | b return_from_irq | |
1871 | ||
1872 | /* | |
1873 | * void thread_syscall_return(kern_return_t r0) | |
1874 | * | |
1875 | */ | |
1876 | .text | |
1877 | .align 2 | |
1878 | .globl EXT(thread_syscall_return) | |
1879 | ||
1880 | LEXT(thread_syscall_return) | |
1881 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1882 | add r1, r9, ACT_PCBDATA // Get User PCB | |
1883 | str r0, [r1, SS_R0] // set return value | |
1884 | #ifndef NO_KDEBUG | |
1885 | LOAD_ADDR(r4, kdebug_enable) | |
1886 | ldr r4, [r4] | |
1887 | movs r4, r4 | |
1888 | beq load_and_go_user | |
1889 | ldr r12, [r1, SS_R12] // Load syscall number | |
1890 | rsbs r1, r12, #0 // make the syscall positive (if negative) | |
1891 | COND_EXTERN_BLGT(mach_syscall_trace_exit) | |
1892 | #endif | |
1893 | b load_and_go_user | |
1894 | ||
1895 | /* | |
1896 | * void thread_exception_return(void) | |
1897 | * void thread_bootstrap_return(void) | |
1898 | * | |
1899 | */ | |
1900 | .text | |
1901 | .globl EXT(thread_exception_return) | |
1902 | .globl EXT(thread_bootstrap_return) | |
1903 | ||
1904 | LEXT(thread_bootstrap_return) | |
1905 | #if CONFIG_DTRACE | |
1906 | bl EXT(dtrace_thread_bootstrap) | |
1907 | #endif | |
1908 | // Fall through | |
1909 | ||
1910 | LEXT(thread_exception_return) | |
1911 | ||
1912 | load_and_go_user: | |
1913 | /* | |
1914 | * Restore user mode states and go back to user mode | |
1915 | */ | |
1916 | cpsid i // Disable irq | |
1917 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1918 | ||
1919 | mvn r0, #0 | |
1920 | str r0, [r9, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user | |
1921 | ||
1922 | ldr r8, [r9, ACT_CPUDATAP] // Get current cpu | |
1923 | ldr r5, [r8, CPU_PENDING_AST] // Get ASTs | |
1924 | cmp r5, #0 // Test if ASTs pending | |
1925 | beq return_to_user_now // Branch if no ASTs | |
1926 | ||
1927 | #if __BIGGEST_ALIGNMENT__ > 4 | |
1928 | and r2, sp, #0x0F // sp mod 16-bytes | |
1929 | cmp r2, #4 // need space for the sp on the stack | |
1930 | addlt r2, r2, #0x10 // make room if needed, but keep stack aligned | |
1931 | mov r3, sp // get current sp | |
1932 | sub sp, sp, r2 // align stack | |
1933 | str r3, [sp] // store previous sp on stack | |
1934 | #endif | |
1935 | ||
1936 | bl EXT(ast_taken_user) // Handle all ASTs (may continue via thread_exception_return) | |
1937 | ||
1938 | #if __BIGGEST_ALIGNMENT__ > 4 | |
1939 | ldr sp, [sp] // Restore the stack pointer | |
1940 | #endif | |
1941 | ||
1942 | mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW | |
1943 | b load_and_go_user // Loop back | |
1944 | ||
1945 | return_to_user_now: | |
1946 | ||
1947 | #if MACH_ASSERT | |
1948 | /* | |
1949 | * Assert that the preemption level is zero prior to the return to user space | |
1950 | */ | |
1951 | ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count | |
1952 | movs r1, r1 // Test | |
1953 | beq 0f // Continue if zero, or... | |
1954 | adr r0, L_lagu_panic_str // Load the panic string... | |
1955 | blx EXT(panic) // Finally, panic | |
1956 | 0: | |
1957 | ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count | |
1958 | movs r2, r2 // Test | |
1959 | beq 0f // Continue if zero, or... | |
1960 | adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string... | |
1961 | mov r1, r9 // Thread argument for panic string | |
1962 | blx EXT(panic) // Finally, panic | |
1963 | #endif | |
1964 | ||
1965 | 0: | |
1966 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1967 | bl EXT(timer_state_event_kernel_to_user) | |
1968 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1969 | ldr r8, [r9, ACT_CPUDATAP] // Get current cpu data | |
1970 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
1971 | #if __ARM_DEBUG__ >= 6 | |
1972 | ldr r0, [r9, ACT_DEBUGDATA] | |
1973 | ldr r6, [r8, CPU_USER_DEBUG] | |
1974 | cmp r0, r6 // test if debug registers need to be changed | |
1975 | beq 1f | |
1976 | bl EXT(arm_debug_set) // argument is already in r0 | |
1977 | mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW | |
1978 | 1: | |
1979 | #endif | |
1980 | #if __ARM_VFP__ | |
1981 | add r0, r9, ACT_UVFP // Get the address of the user VFP save area | |
1982 | bl EXT(vfp_load) // Load the desired VFP state from ACT_UVFP | |
1983 | #endif | |
1984 | add r0, r9, ACT_PCBDATA // Get User PCB | |
1985 | ldr r4, [r0, SS_CPSR] // Get saved cpsr | |
1986 | and r3, r4, #PSR_MODE_MASK // Extract current mode | |
1987 | cmp r3, #PSR_USER_MODE // Check user mode | |
1988 | movne r0, r3 | |
1989 | bne EXT(ExceptionVectorPanic) | |
1990 | ||
1991 | msr spsr_cxsf, r4 // Restore spsr(user mode cpsr) | |
1992 | mov sp, r0 // Get User PCB | |
1993 | ||
1994 | clrex // clear exclusive memory tag | |
1995 | #if __ARM_ENABLE_WFE_ | |
1996 | sev | |
1997 | #endif | |
1998 | #if __ARM_USER_PROTECT__ | |
1999 | ldr r3, [r9, ACT_UPTW_TTB] // Load thread ttb | |
2000 | mcr p15, 0, r3, c2, c0, 0 // Set TTBR0 | |
2001 | ldr r2, [r9, ACT_ASID] // Load thread asid | |
2002 | mcr p15, 0, r2, c13, c0, 1 | |
2003 | isb | |
2004 | #endif | |
2005 | ldr lr, [sp, SS_PC] // Restore user mode pc | |
2006 | ldmia sp, {r0-r12, sp, lr}^ // Restore the other user mode registers | |
2007 | nop // Hardware problem | |
2008 | movs pc, lr // Return to user | |
2009 | ||
2010 | .align 2 | |
2011 | L_lagu_panic_str: | |
2012 | .asciz "load_and_go_user: preemption_level %d" | |
2013 | .align 2 | |
2014 | ||
2015 | .align 2 | |
2016 | L_lagu_rwlock_cnt_panic_str: | |
2017 | .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)" | |
2018 | .align 2 | |
2019 | ||
2020 | .align 2 | |
2021 | L_evimpanic_str: | |
2022 | .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000" | |
2023 | .align 2 | |
2024 | ||
2025 | .text | |
2026 | .align 2 | |
2027 | .globl EXT(ExceptionVectorPanic) | |
2028 | ||
2029 | LEXT(ExceptionVectorPanic) | |
2030 | cpsid i, #PSR_SVC_MODE | |
2031 | mov r1, r0 | |
2032 | adr r0, L_evimpanic_str | |
2033 | blx EXT(panic) | |
2034 | b . | |
2035 | ||
2036 | #include "globals_asm.h" | |
2037 | ||
2038 | LOAD_ADDR_GEN_DEF(mach_trap_table) | |
2039 | LOAD_ADDR_GEN_DEF(kern_invalid) | |
2040 | ||
2041 | /* vim: set ts=4: */ |