]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/arm/trap.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / trap.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <kern/debug.h>
29#include <mach_kdp.h>
30#include <machine/endian.h>
31#include <mach/mach_types.h>
32#include <mach/boolean.h>
33#include <mach/vm_prot.h>
34#include <mach/vm_types.h>
35#include <mach/mach_traps.h>
36
37#include <mach/exception.h>
38#include <mach/kern_return.h>
39#include <mach/vm_param.h>
40#include <mach/message.h>
41#include <mach/machine/thread_status.h>
42
43#include <vm/vm_page.h>
44#include <vm/pmap.h>
45#include <vm/vm_fault.h>
46#include <vm/vm_kern.h>
47
48#include <kern/ast.h>
49#include <kern/thread.h>
50#include <kern/task.h>
51#include <kern/sched_prim.h>
52
53#include <sys/kdebug.h>
54
55#include <arm/trap.h>
56#include <arm/caches_internal.h>
57#include <arm/cpu_data_internal.h>
58#include <arm/machdep_call.h>
59#include <arm/machine_routines.h>
60#include <arm/misc_protos.h>
61#include <arm/setjmp.h>
62#include <arm/proc_reg.h>
63
64/*
65 * External function prototypes.
66 */
67#include <kern/syscall_sw.h>
68#include <kern/host.h>
69#include <kern/processor.h>
70
71
72#if CONFIG_DTRACE
73extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs, unsigned int instr);
74extern boolean_t dtrace_tally_fault(user_addr_t);
75
76/* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
77 over from that file. Need to keep these in sync! */
78#define FASTTRAP_ARM_INSTR 0xe7ffdefc
79#define FASTTRAP_THUMB_INSTR 0xdefc
80
81#define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
82#define FASTTRAP_THUMB_RET_INSTR 0xdefb
83
84/* See <rdar://problem/4613924> */
85perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
86#endif
87
88#define COPYIN(dst, src, size) \
89 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
90 copyin_kern(dst, src, size) \
91 : \
92 copyin(dst, src, size)
93
94#define COPYOUT(src, dst, size) \
95 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
96 copyout_kern(src, dst, size) \
97 : \
98 copyout(src, dst, size)
99
100/* Second-level exception handlers forward declarations */
101void sleh_undef(struct arm_saved_state *, struct arm_vfpsaved_state *);
102void sleh_abort(struct arm_saved_state *, int);
103static kern_return_t sleh_alignment(struct arm_saved_state *);
104static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs);
105
106int sleh_alignment_count = 0;
107int trap_on_alignment_fault = 0;
108
109/*
110 * Routine: sleh_undef
111 * Function: Second level exception handler for undefined exception
112 */
113
114void
115sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __unused)
116{
117 exception_type_t exception = EXC_BAD_INSTRUCTION;
118 mach_exception_data_type_t code[2] = {EXC_ARM_UNDEFINED};
119 mach_msg_type_number_t codeCnt = 2;
120 thread_t thread = current_thread();
121 vm_offset_t recover;
122
123 recover = thread->recover;
124 thread->recover = 0;
125
126 getCpuDatap()->cpu_stat.undef_ex_cnt++;
127
128 /* Inherit the interrupt masks from previous */
129 if (!(regs->cpsr & PSR_INTMASK))
130 ml_set_interrupts_enabled(TRUE);
131
132#if CONFIG_DTRACE
133 if (tempDTraceTrapHook) {
134 if (tempDTraceTrapHook(exception, regs, 0, 0) == KERN_SUCCESS) {
135 /*
136 * If it succeeds, we are done...
137 */
138 goto exit;
139 }
140 }
141
142 /* Check to see if we've hit a userland probe */
143 if ((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) {
144 if (regs->cpsr & PSR_TF) {
145 uint16_t instr;
146
147 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS)
148 goto exit;
149
150 if (instr == FASTTRAP_THUMB_INSTR || instr == FASTTRAP_THUMB_RET_INSTR) {
151 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS)
152 /* If it succeeds, we are done... */
153 goto exit;
154 }
155 } else {
156 uint32_t instr;
157
158 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS)
159 goto exit;
160
161 if (instr == FASTTRAP_ARM_INSTR || instr == FASTTRAP_ARM_RET_INSTR) {
162 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS)
163 /* If it succeeds, we are done... */
164 goto exit;
165 }
166 }
167 }
168#endif /* CONFIG_DTRACE */
169
170
171 if (regs->cpsr & PSR_TF) {
172 unsigned short instr;
173
174 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS)
175 goto exit;
176
177 if (IS_THUMB32(instr)) {
178 unsigned int instr32;
179
180 instr32 = (instr<<16);
181
182 if(COPYIN((user_addr_t)(((unsigned short *) (regs->pc))+1), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS)
183 goto exit;
184
185 instr32 |= instr;
186 code[1] = instr32;
187
188#if __ARM_VFP__
189 if (IS_THUMB_VFP(instr32)) {
190 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
191 if (!get_vfp_enabled())
192 panic("VFP was disabled (thumb); VFP should always be enabled");
193 }
194#endif
195 } else {
196 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
197 code[1] = instr;
198
199 if (IS_THUMB_GDB_TRAP(instr)) {
200 exception = EXC_BREAKPOINT;
201 code[0] = EXC_ARM_BREAKPOINT;
202 }
203 }
204 } else {
205 uint32_t instr;
206
207 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS)
208 goto exit;
209
210 code[1] = instr;
211#if __ARM_VFP__
212 if (IS_ARM_VFP(instr)) {
213 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
214 if (!get_vfp_enabled())
215 panic("VFP was disabled (arm); VFP should always be enabled");
216 }
217#endif
218
219 if (IS_ARM_GDB_TRAP(instr)) {
220 exception = EXC_BREAKPOINT;
221 code[0] = EXC_ARM_BREAKPOINT;
222 }
223 }
224
225 if (!((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)) {
226 boolean_t intr;
227
228 intr = ml_set_interrupts_enabled(FALSE);
229
230 if (exception == EXC_BREAKPOINT) {
231 /* Save off the context here (so that the debug logic
232 * can see the original state of this thread).
233 */
234 vm_offset_t kstackptr = current_thread()->machine.kstackptr;
235 *((arm_saved_state_t *) kstackptr) = *regs;
236
237 DebuggerCall(exception, regs);
238 (void) ml_set_interrupts_enabled(intr);
239 goto exit;
240 }
241 panic_context(exception, (void *)regs, "undefined kernel instruction\n"
242 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
243 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
244 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
245 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
246 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
247 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
248 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
249 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
250 regs->r[12], regs->sp, regs->lr, regs->pc,
251 regs->cpsr, regs->fsr, regs->far);
252
253 (void) ml_set_interrupts_enabled(intr);
254
255 } else {
256 exception_triage(exception, code, codeCnt);
257 /* NOTREACHED */
258 }
259
260exit:
261 if (recover)
262 thread->recover = recover;
263}
264
265/*
266 * Routine: sleh_abort
267 * Function: Second level exception handler for abort(Pref/Data)
268 */
269
270void
271sleh_abort(struct arm_saved_state * regs, int type)
272{
273 int status;
274 int debug_status=0;
275 int spsr;
276 int exc;
277 mach_exception_data_type_t codes[2];
278 vm_map_t map;
279 vm_map_address_t vaddr;
280 vm_map_address_t fault_addr;
281 vm_prot_t fault_type;
282 kern_return_t result;
283 vm_offset_t recover;
284 thread_t thread = current_thread();
285 boolean_t intr;
286
287 recover = thread->recover;
288 thread->recover = 0;
289
290 status = regs->fsr & FSR_MASK;
291 spsr = regs->cpsr;
292
293 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
294 * Allow a platform-level error handler to decode it.
295 */
296 if ((regs->fsr) & FSR_EXT) {
297 cpu_data_t *cdp = getCpuDatap();
298
299 if (cdp->platform_error_handler != (platform_error_handler_t) NULL) {
300 (*(platform_error_handler_t)cdp->platform_error_handler) (cdp->cpu_id, 0);
301 /* If a platform error handler is registered, expect it to panic, not fall through */
302 panic("Unexpected return from platform_error_handler");
303 }
304 }
305
306 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
307 reenable_async_aborts();
308
309 if (ml_at_interrupt_context())
310 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs);
311
312 fault_addr = vaddr = regs->far;
313
314 if (type == T_DATA_ABT) {
315 getCpuDatap()->cpu_stat.data_ex_cnt++;
316 } else { /* T_PREFETCH_ABT */
317 getCpuDatap()->cpu_stat.instr_ex_cnt++;
318 fault_type = VM_PROT_READ | VM_PROT_EXECUTE;
319 }
320
321 if (status == FSR_DEBUG)
322 debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK;
323
324 /* Inherit the interrupt masks from previous */
325 if (!(spsr & PSR_INTMASK))
326 ml_set_interrupts_enabled(TRUE);
327
328 if (type == T_DATA_ABT) {
329 /*
330 * Now that interrupts are reenabled, we can perform any needed
331 * copyin operations.
332 *
333 * Because we have reenabled interrupts, any instruction copy
334 * must be a copyin, even on UP systems.
335 */
336
337 if (regs->fsr & DFSR_WRITE) {
338 fault_type = (VM_PROT_READ | VM_PROT_WRITE);
339 /* Cache operations report faults as write access, change these to read access */
340 /* Cache operations are invoked from arm mode for now */
341 if (!(regs->cpsr & PSR_TF)) {
342 unsigned int ins;
343
344 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS)
345 goto exit;
346
347 if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins))
348 fault_type = VM_PROT_READ;
349 }
350 } else {
351 fault_type = VM_PROT_READ;
352 /*
353 * DFSR is not getting the "write" bit set
354 * when a swp instruction is encountered (even when it is
355 * a write fault.
356 */
357 if (!(regs->cpsr & PSR_TF)) {
358 unsigned int ins;
359
360 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS)
361 goto exit;
362
363 if ((ins & ARM_SWP_MASK) == ARM_SWP)
364 fault_type = VM_PROT_WRITE;
365 }
366 }
367 }
368
369 if ((spsr & PSR_MODE_MASK) != PSR_USER_MODE) {
370 /* Fault in kernel mode */
371
372 if ((status == FSR_DEBUG)
373 && ((debug_status == ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT) || (debug_status == ARM_DBGDSCR_MOE_SYNC_WATCHPOINT))
374 && (recover != 0) && (getCpuDatap()->cpu_user_debug != 0)) {
375 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
376 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
377 */
378 arm_debug_set(NULL);
379 goto exit;
380 }
381
382 if ((type == T_PREFETCH_ABT) || (status == FSR_DEBUG)) {
383
384 intr = ml_set_interrupts_enabled(FALSE);
385 if (status == FSR_DEBUG) {
386 DebuggerCall(EXC_BREAKPOINT, regs);
387 (void) ml_set_interrupts_enabled(intr);
388 goto exit;
389 }
390 panic_context(EXC_BAD_ACCESS, (void*)regs, "sleh_abort: prefetch abort in kernel mode: fault_addr=0x%x\n"
391 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
392 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
393 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
394 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
395 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
396 fault_addr,
397 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
398 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
399 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
400 regs->r[12], regs->sp, regs->lr, regs->pc,
401 regs->cpsr, regs->fsr, regs->far);
402
403 (void) ml_set_interrupts_enabled(intr);
404
405 } else if (TEST_FSR_VMFAULT(status)) {
406
407#if CONFIG_DTRACE
408 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
409 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
410 /* Point to next instruction */
411 regs->pc += ((regs->cpsr & PSR_TF) && !IS_THUMB32(*((uint16_t*) (regs->pc)))) ? 2 : 4;
412 goto exit;
413 } else {
414 intr = ml_set_interrupts_enabled(FALSE);
415 panic_context(EXC_BAD_ACCESS, (void *)regs, "Unexpected page fault under dtrace_probe"
416 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
417 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
418 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
419 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
420 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
421 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
422 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
423 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
424 regs->r[12], regs->sp, regs->lr, regs->pc,
425 regs->cpsr, regs->fsr, regs->far);
426
427 (void) ml_set_interrupts_enabled(intr);
428
429 goto exit;
430 }
431 }
432#endif
433
434 if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL)
435 map = kernel_map;
436 else
437 map = thread->map;
438
439 /* check to see if it is just a pmap ref/modify fault */
440 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, FALSE);
441 if (result == KERN_SUCCESS)
442 goto exit;
443
444 /*
445 * We have to "fault" the page in.
446 */
447 result = vm_fault(map, fault_addr,
448 fault_type,
449 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
450 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
451
452 if (result == KERN_SUCCESS) {
453 goto exit;
454 } else {
455 /*
456 * If we have a recover handler, invoke it now.
457 */
458 if (recover != 0) {
459 regs->pc = (register_t) (recover & ~0x1);
460 regs->cpsr = (regs->cpsr & ~PSR_TF) | ((recover & 0x1) << PSR_TFb);
461 goto exit;
462 }
463 }
464 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
465 result = sleh_alignment(regs);
466 if (result == KERN_SUCCESS) {
467 goto exit;
468 } else {
469 intr = ml_set_interrupts_enabled(FALSE);
470
471 panic_context(EXC_BAD_ACCESS, (void *)regs, "unaligned kernel data access: pc=0x%08x fault_addr=0x%x\n"
472 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
473 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
474 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
475 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
476 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
477 regs->pc, fault_addr,
478 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
479 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
480 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
481 regs->r[12], regs->sp, regs->lr, regs->pc,
482 regs->cpsr, regs->fsr, regs->far);
483
484 (void) ml_set_interrupts_enabled(intr);
485
486 goto exit;
487 }
488
489 }
490 intr = ml_set_interrupts_enabled(FALSE);
491
492 panic_context(EXC_BAD_ACCESS, (void *)regs, "kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n"
493 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
494 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
495 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
496 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
497 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
498 type, fault_type, fault_addr,
499 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
500 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
501 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
502 regs->r[12], regs->sp, regs->lr, regs->pc,
503 regs->cpsr, regs->fsr, regs->far);
504
505 (void) ml_set_interrupts_enabled(intr);
506
507 goto exit;
508 }
509 /* Fault in user mode */
510
511 if (TEST_FSR_VMFAULT(status)) {
512 map = thread->map;
513
514#if CONFIG_DTRACE
515 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
516 if (dtrace_tally_fault(fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
517 if (recover) {
518 regs->pc = recover;
519 } else {
520 intr = ml_set_interrupts_enabled(FALSE);
521
522 panic_context(EXC_BAD_ACCESS, (void *)regs, "copyin/out has no recovery point"
523 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
524 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
525 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
526 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
527 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
528 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
529 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
530 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
531 regs->r[12], regs->sp, regs->lr, regs->pc,
532 regs->cpsr, regs->fsr, regs->far);
533
534 (void) ml_set_interrupts_enabled(intr);
535 }
536 goto exit;
537 } else {
538 intr = ml_set_interrupts_enabled(FALSE);
539
540 panic_context(EXC_BAD_ACCESS, (void*)regs, "Unexpected UMW page fault under dtrace_probe"
541 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
542 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
543 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
544 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
545 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
546 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
547 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
548 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
549 regs->r[12], regs->sp, regs->lr, regs->pc,
550 regs->cpsr, regs->fsr, regs->far);
551
552 (void) ml_set_interrupts_enabled(intr);
553
554 goto exit;
555 }
556 }
557#endif
558
559 /* check to see if it is just a pmap ref/modify fault */
560 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, TRUE);
561 if (result != KERN_SUCCESS) {
562 /*
563 * We have to "fault" the page in.
564 */
565 result = vm_fault(map, fault_addr, fault_type,
566 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
567 THREAD_ABORTSAFE, NULL, 0);
568 }
569 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
570 goto exception_return;
571 }
572 exc = EXC_BAD_ACCESS;
573 codes[0] = result;
574 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
575 if (sleh_alignment(regs) == KERN_SUCCESS) {
576 goto exception_return;
577 }
578 exc = EXC_BAD_ACCESS;
579 codes[0] = EXC_ARM_DA_ALIGN;
580 } else if (status == FSR_DEBUG) {
581 exc = EXC_BREAKPOINT;
582 codes[0] = EXC_ARM_DA_DEBUG;
583 } else if ((status == FSR_SDOM) || (status == FSR_PDOM)) {
584 exc = EXC_BAD_ACCESS;
585 codes[0] = KERN_INVALID_ADDRESS;
586 } else {
587 exc = EXC_BAD_ACCESS;
588 codes[0] = KERN_FAILURE;
589 }
590
591 codes[1] = vaddr;
592 exception_triage(exc, codes, 2);
593 /* NOTREACHED */
594
595exception_return:
596 if (recover)
597 thread->recover = recover;
598 thread_exception_return();
599 /* NOTREACHED */
600
601exit:
602 if (recover)
603 thread->recover = recover;
604 return;
605}
606
607
608/*
609 * Routine: sleh_alignment
610 * Function: Second level exception handler for alignment data fault
611 */
612
613static kern_return_t
614sleh_alignment(struct arm_saved_state * regs)
615{
616 unsigned int status;
617 unsigned int ins;
618 unsigned int rd_index;
619 unsigned int base_index;
620 unsigned int paddr;
621 void *src;
622 unsigned int reg_list;
623 unsigned int pre;
624 unsigned int up;
625 unsigned int write_back;
626 kern_return_t rc = KERN_SUCCESS;
627
628 getCpuDatap()->cpu_stat.unaligned_cnt++;
629
630 /* Do not try to emulate in modified execution states */
631 if (regs->cpsr & (PSR_EF | PSR_JF))
632 return KERN_NOT_SUPPORTED;
633
634 /* Disallow emulation of kernel instructions */
635 if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE)
636 return KERN_NOT_SUPPORTED;
637
638
639#define ALIGN_THRESHOLD 1024
640 if ((sleh_alignment_count++ & (ALIGN_THRESHOLD - 1)) ==
641 (ALIGN_THRESHOLD - 1))
642 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
643 ALIGN_THRESHOLD, sleh_alignment_count);
644
645 if ((trap_on_alignment_fault != 0)
646 && (sleh_alignment_count % trap_on_alignment_fault == 0))
647 return KERN_NOT_SUPPORTED;
648
649 status = regs->fsr;
650 paddr = regs->far;
651
652 if (regs->cpsr & PSR_TF) {
653 unsigned short ins16;
654
655 /* Get aborted instruction */
656#if __ARM_SMP__ || __ARM_USER_PROTECT__
657 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins16,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) {
658 /* Failed to fetch instruction, return success to re-drive the exception */
659 return KERN_SUCCESS;
660 }
661#else
662 ins16 = *(unsigned short *) (regs->pc);
663#endif
664
665 /*
666 * Map multi-word Thumb loads and stores to their ARM
667 * equivalents.
668 * Don't worry about single-word instructions, since those are
669 * handled in hardware.
670 */
671
672 reg_list = ins16 & 0xff;
673 if (reg_list == 0)
674 return KERN_NOT_SUPPORTED;
675
676 if (((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) ||
677 ((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA)) {
678 base_index = (ins16 >> 8) & 0x7;
679 ins = 0xE8800000 | (base_index << 16) | reg_list;
680 if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA)
681 ins |= (1 << 20);
682 if (((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA) ||
683 !(reg_list & (1 << base_index)))
684 ins |= (1 << 21);
685 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_POP) {
686 unsigned int r = (ins16 >> 8) & 1;
687 ins = 0xE8BD0000 | (r << 15) | reg_list;
688 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_PUSH) {
689 unsigned int r = (ins16 >> 8) & 1;
690 ins = 0xE92D0000 | (r << 14) | reg_list;
691 } else {
692 return KERN_NOT_SUPPORTED;
693 }
694 } else {
695 /* Get aborted instruction */
696#if __ARM_SMP__ || __ARM_USER_PROTECT__
697 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
698 /* Failed to fetch instruction, return success to re-drive the exception */
699 return KERN_SUCCESS;
700 }
701#else
702 ins = *(unsigned int *) (regs->pc);
703#endif
704 }
705
706 /* Don't try to emulate unconditional instructions */
707 if ((ins & 0xF0000000) == 0xF0000000)
708 return KERN_NOT_SUPPORTED;
709
710 pre = (ins >> 24) & 1;
711 up = (ins >> 23) & 1;
712 reg_list = ins & 0xffff;
713 write_back = (ins >> 21) & 1;
714 base_index = (ins >> 16) & 0xf;
715
716 if ((ins & ARM_BLK_MASK) == ARM_STM) { /* STM or LDM */
717 int reg_count = 0;
718 int waddr;
719
720 for (rd_index = 0; rd_index < 16; rd_index++) {
721 if (reg_list & (1 << rd_index))
722 reg_count++;
723 }
724
725 paddr = regs->r[base_index];
726
727 switch (ins & (ARM_POST_INDEXING | ARM_INCREMENT)) {
728 /* Increment after */
729 case ARM_INCREMENT:
730 waddr = paddr + reg_count * 4;
731 break;
732
733 /* Increment before */
734 case ARM_POST_INDEXING | ARM_INCREMENT:
735 waddr = paddr + reg_count * 4;
736 paddr += 4;
737 break;
738
739 /* Decrement after */
740 case 0:
741 waddr = paddr - reg_count * 4;
742 paddr = waddr + 4;
743 break;
744
745 /* Decrement before */
746 case ARM_POST_INDEXING:
747 waddr = paddr - reg_count * 4;
748 paddr = waddr;
749 break;
750
751 default:
752 waddr = 0;
753 }
754
755 for (rd_index = 0; rd_index < 16; rd_index++) {
756 if (reg_list & (1 << rd_index)) {
757 src = &regs->r[rd_index];
758
759 if ((ins & (1 << 20)) == 0) /* STM */
760 rc = COPYOUT(src, paddr, 4);
761 else /* LDM */
762 rc = COPYIN(paddr, src, 4);
763
764 if (rc != KERN_SUCCESS)
765 break;
766
767 paddr += 4;
768 }
769 }
770
771 paddr = waddr;
772 } else {
773 rc = 1;
774 }
775
776 if (rc == KERN_SUCCESS) {
777 if (regs->cpsr & PSR_TF)
778 regs->pc += 2;
779 else
780 regs->pc += 4;
781
782 if (write_back)
783 regs->r[base_index] = paddr;
784 }
785 return (rc);
786}
787
788
789#ifndef NO_KDEBUG
790/* XXX quell warnings */
791void syscall_trace(struct arm_saved_state * regs);
792void syscall_trace_exit(unsigned int, unsigned int);
793void mach_syscall_trace(struct arm_saved_state * regs, unsigned int call_number);
794void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
795void interrupt_trace(struct arm_saved_state * regs);
796void interrupt_trace_exit(void);
797
798/* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
799void
800syscall_trace(
801 struct arm_saved_state * regs)
802{
803 kprintf("syscall: %d\n", regs->r[12]);
804}
805
806void
807syscall_trace_exit(
808 unsigned int r0,
809 unsigned int r1)
810{
811 kprintf("syscall exit: 0x%x 0x%x\n", r0, r1);
812}
813
814void
815mach_syscall_trace(
816 struct arm_saved_state * regs,
817 unsigned int call_number)
818{
819 int i, argc;
820 int kdarg[3] = {0, 0, 0};
821
822 argc = mach_trap_table[call_number].mach_trap_arg_count;
823
824 if (argc > 3)
825 argc = 3;
826
827 for (i = 0; i < argc; i++)
828 kdarg[i] = (int) regs->r[i];
829
830 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
831 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
832 kdarg[0], kdarg[1], kdarg[2], 0, 0);
833
834}
835
836void
837mach_syscall_trace_exit(
838 unsigned int retval,
839 unsigned int call_number)
840{
841 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
842 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
843 retval, 0, 0, 0, 0);
844}
845
846void
847interrupt_trace(
848 struct arm_saved_state * regs)
849{
850#define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
851
852 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
853 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
854 0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc),
855 UMODE(regs), 0, 0);
856}
857
858void
859interrupt_trace_exit(
860 void)
861{
862 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
863 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
864 0, 0, 0, 0, 0);
865}
866#endif
867
868/* XXX quell warnings */
869void interrupt_stats(void);
870
871/* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
872void
873interrupt_stats(void)
874{
875 SCHED_STATS_INTERRUPT(current_processor());
876}
877
878static void
879panic_with_thread_kernel_state(const char *msg, struct arm_saved_state *regs)
880{
881 panic_context(0, (void*)regs, "%s (saved state:%p)\n"
882 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
883 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
884 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
885 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
886 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
887 msg, regs,
888 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
889 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
890 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
891 regs->r[12], regs->sp, regs->lr, regs->pc,
892 regs->cpsr, regs->fsr, regs->far);
893
894}