]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * Hardware trap/fault handler. | |
54 | */ | |
55 | ||
56 | #include <cpus.h> | |
57 | #include <fast_idle.h> | |
58 | #include <mach_kdb.h> | |
59 | #include <mach_kgdb.h> | |
60 | #include <mach_kdp.h> | |
61 | #include <mach_ldebug.h> | |
62 | ||
63 | #include <types.h> | |
64 | #include <i386/eflags.h> | |
65 | #include <i386/trap.h> | |
66 | #include <i386/pmap.h> | |
67 | #include <i386/fpu.h> | |
68 | ||
69 | #include <mach/exception.h> | |
70 | #include <mach/kern_return.h> | |
71 | #include <mach/vm_param.h> | |
72 | #include <mach/i386/thread_status.h> | |
73 | ||
74 | #include <vm/vm_kern.h> | |
75 | #include <vm/vm_fault.h> | |
76 | ||
77 | #include <kern/etap_macros.h> | |
78 | #include <kern/kern_types.h> | |
79 | #include <kern/ast.h> | |
80 | #include <kern/thread.h> | |
81 | #include <kern/task.h> | |
82 | #include <kern/sched.h> | |
83 | #include <kern/sched_prim.h> | |
84 | #include <kern/exception.h> | |
85 | #include <kern/spl.h> | |
86 | #include <kern/misc_protos.h> | |
87 | ||
88 | #if MACH_KGDB | |
89 | #include <kgdb/kgdb_defs.h> | |
90 | #endif /* MACH_KGDB */ | |
91 | ||
92 | #include <i386/intel_read_fault.h> | |
93 | ||
94 | #if MACH_KGDB | |
95 | #include <kgdb/kgdb_defs.h> | |
96 | #endif /* MACH_KGDB */ | |
97 | ||
98 | #if MACH_KDB | |
99 | #include <ddb/db_watch.h> | |
100 | #include <ddb/db_run.h> | |
101 | #include <ddb/db_break.h> | |
102 | #include <ddb/db_trap.h> | |
103 | #endif /* MACH_KDB */ | |
104 | ||
105 | #include <string.h> | |
106 | ||
107 | #include <i386/io_emulate.h> | |
108 | ||
109 | /* | |
110 | * Forward declarations | |
111 | */ | |
112 | extern void user_page_fault_continue( | |
113 | kern_return_t kr); | |
114 | ||
115 | extern boolean_t v86_assist( | |
116 | thread_t thread, | |
117 | struct i386_saved_state *regs); | |
118 | ||
119 | extern boolean_t check_io_fault( | |
120 | struct i386_saved_state *regs); | |
121 | ||
122 | extern int inst_fetch( | |
123 | int eip, | |
124 | int cs); | |
125 | ||
126 | void | |
127 | thread_syscall_return( | |
128 | kern_return_t ret) | |
129 | { | |
130 | register thread_act_t thr_act = current_act(); | |
131 | register struct i386_saved_state *regs = USER_REGS(thr_act); | |
132 | regs->eax = ret; | |
133 | thread_exception_return(); | |
134 | /*NOTREACHED*/ | |
135 | } | |
136 | ||
137 | ||
138 | #if MACH_KDB | |
139 | boolean_t debug_all_traps_with_kdb = FALSE; | |
140 | extern struct db_watchpoint *db_watchpoint_list; | |
141 | extern boolean_t db_watchpoints_inserted; | |
142 | extern boolean_t db_breakpoints_inserted; | |
143 | ||
144 | void | |
145 | thread_kdb_return(void) | |
146 | { | |
147 | register thread_act_t thr_act = current_act(); | |
148 | register thread_t cur_thr = current_thread(); | |
149 | register struct i386_saved_state *regs = USER_REGS(thr_act); | |
150 | ||
151 | if (kdb_trap(regs->trapno, regs->err, regs)) { | |
152 | #if MACH_LDEBUG | |
153 | assert(cur_thr->mutex_count == 0); | |
154 | #endif /* MACH_LDEBUG */ | |
155 | check_simple_locks(); | |
156 | thread_exception_return(); | |
157 | /*NOTREACHED*/ | |
158 | } | |
159 | } | |
160 | boolean_t let_ddb_vm_fault = FALSE; | |
161 | ||
162 | #if NCPUS > 1 | |
163 | extern int kdb_active[NCPUS]; | |
164 | #endif /* NCPUS > 1 */ | |
165 | ||
166 | #endif /* MACH_KDB */ | |
167 | ||
168 | void | |
169 | user_page_fault_continue( | |
170 | kern_return_t kr) | |
171 | { | |
172 | register thread_act_t thr_act = current_act(); | |
173 | register thread_t cur_thr = current_thread(); | |
174 | register struct i386_saved_state *regs = USER_REGS(thr_act); | |
175 | ||
0b4e3aa0 | 176 | if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) { |
1c79356b A |
177 | #if MACH_KDB |
178 | if (!db_breakpoints_inserted) { | |
179 | db_set_breakpoints(); | |
180 | } | |
181 | if (db_watchpoint_list && | |
182 | db_watchpoints_inserted && | |
183 | (regs->err & T_PF_WRITE) && | |
184 | db_find_watchpoint(thr_act->map, | |
185 | (vm_offset_t)regs->cr2, | |
186 | regs)) | |
187 | kdb_trap(T_WATCHPOINT, 0, regs); | |
188 | #endif /* MACH_KDB */ | |
189 | thread_exception_return(); | |
190 | /*NOTREACHED*/ | |
191 | } | |
192 | ||
193 | #if MACH_KDB | |
194 | if (debug_all_traps_with_kdb && | |
195 | kdb_trap(regs->trapno, regs->err, regs)) { | |
196 | #if MACH_LDEBUG | |
197 | assert(cur_thr->mutex_count == 0); | |
198 | #endif /* MACH_LDEBUG */ | |
199 | check_simple_locks(); | |
200 | thread_exception_return(); | |
201 | /*NOTREACHED*/ | |
202 | } | |
203 | #endif /* MACH_KDB */ | |
204 | ||
205 | i386_exception(EXC_BAD_ACCESS, kr, regs->cr2); | |
206 | /*NOTREACHED*/ | |
207 | } | |
208 | ||
209 | /* | |
210 | * Fault recovery in copyin/copyout routines. | |
211 | */ | |
212 | struct recovery { | |
213 | int fault_addr; | |
214 | int recover_addr; | |
215 | }; | |
216 | ||
217 | extern struct recovery recover_table[]; | |
218 | extern struct recovery recover_table_end[]; | |
219 | ||
220 | /* | |
221 | * Recovery from Successful fault in copyout does not | |
222 | * return directly - it retries the pte check, since | |
223 | * the 386 ignores write protection in kernel mode. | |
224 | */ | |
225 | extern struct recovery retry_table[]; | |
226 | extern struct recovery retry_table_end[]; | |
227 | ||
228 | char * trap_type[] = {TRAP_NAMES}; | |
229 | int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]); | |
230 | ||
231 | /* | |
232 | * Trap from kernel mode. Only page-fault errors are recoverable, | |
233 | * and then only in special circumstances. All other errors are | |
234 | * fatal. Return value indicates if trap was handled. | |
235 | */ | |
236 | boolean_t | |
237 | kernel_trap( | |
238 | register struct i386_saved_state *regs) | |
239 | { | |
240 | int exc; | |
241 | int code; | |
242 | int subcode; | |
243 | int interruptible; | |
244 | register int type; | |
245 | vm_map_t map; | |
246 | kern_return_t result; | |
247 | register thread_t thread; | |
248 | thread_act_t thr_act; | |
249 | etap_data_t probe_data; | |
250 | pt_entry_t *pte; | |
251 | extern vm_offset_t vm_last_phys; | |
252 | ||
253 | type = regs->trapno; | |
254 | code = regs->err; | |
255 | thread = current_thread(); | |
256 | thr_act = current_act(); | |
257 | ||
258 | ETAP_DATA_LOAD(probe_data[0], regs->trapno); | |
259 | ETAP_DATA_LOAD(probe_data[1], MACH_PORT_NULL); | |
260 | ETAP_DATA_LOAD(probe_data[2], MACH_PORT_NULL); | |
261 | ETAP_PROBE_DATA(ETAP_P_EXCEPTION, | |
262 | 0, | |
263 | thread, | |
264 | &probe_data, | |
265 | ETAP_DATA_ENTRY*3); | |
266 | ||
267 | switch (type) { | |
268 | case T_PREEMPT: | |
269 | return (TRUE); | |
270 | ||
271 | case T_NO_FPU: | |
272 | fpnoextflt(); | |
273 | return (TRUE); | |
274 | ||
275 | case T_FPU_FAULT: | |
276 | fpextovrflt(); | |
277 | return (TRUE); | |
278 | ||
279 | case T_FLOATING_POINT_ERROR: | |
280 | fpexterrflt(); | |
281 | return (TRUE); | |
282 | ||
283 | case T_PAGE_FAULT: | |
284 | /* | |
285 | * If the current map is a submap of the kernel map, | |
286 | * and the address is within that map, fault on that | |
287 | * map. If the same check is done in vm_fault | |
288 | * (vm_map_lookup), we may deadlock on the kernel map | |
289 | * lock. | |
290 | */ | |
291 | #if MACH_KDB | |
292 | mp_disable_preemption(); | |
293 | if (db_active | |
294 | #if NCPUS > 1 | |
295 | && kdb_active[cpu_number()] | |
296 | #endif /* NCPUS > 1 */ | |
297 | && !let_ddb_vm_fault) { | |
298 | /* | |
299 | * Force kdb to handle this one. | |
300 | */ | |
301 | mp_enable_preemption(); | |
302 | return (FALSE); | |
303 | } | |
304 | mp_enable_preemption(); | |
305 | #endif /* MACH_KDB */ | |
306 | subcode = regs->cr2; /* get faulting address */ | |
307 | ||
308 | if (subcode > LINEAR_KERNEL_ADDRESS) { | |
309 | map = kernel_map; | |
310 | subcode -= LINEAR_KERNEL_ADDRESS; | |
311 | } else if (thr_act == THR_ACT_NULL || thread == THREAD_NULL) | |
312 | map = kernel_map; | |
313 | else { | |
314 | map = thr_act->map; | |
315 | } | |
316 | ||
317 | #if MACH_KDB | |
318 | /* | |
319 | * Check for watchpoint on kernel static data. | |
320 | * vm_fault would fail in this case | |
321 | */ | |
322 | if (map == kernel_map && | |
323 | db_watchpoint_list && | |
324 | db_watchpoints_inserted && | |
325 | (code & T_PF_WRITE) && | |
326 | (vm_offset_t)subcode < vm_last_phys && | |
327 | ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) & | |
328 | INTEL_PTE_WRITE) == 0) { | |
329 | *pte = INTEL_PTE_VALID | INTEL_PTE_WRITE | | |
330 | pa_to_pte(trunc_page((vm_offset_t)subcode) - | |
331 | VM_MIN_KERNEL_ADDRESS); | |
332 | result = KERN_SUCCESS; | |
333 | } else | |
334 | #endif /* MACH_KDB */ | |
335 | { | |
336 | /* | |
337 | * Since the 386 ignores write protection in | |
338 | * kernel mode, always try for write permission | |
339 | * first. If that fails and the fault was a | |
340 | * read fault, retry with read permission. | |
341 | */ | |
342 | if (map == kernel_map) { | |
343 | register struct recovery *rp; | |
344 | ||
345 | interruptible = THREAD_UNINT; | |
346 | for (rp = recover_table; rp < recover_table_end; rp++) { | |
347 | if (regs->eip == rp->fault_addr) { | |
348 | interruptible = THREAD_ABORTSAFE; | |
349 | break; | |
350 | } | |
351 | } | |
352 | } | |
353 | ||
354 | result = vm_fault(map, | |
355 | trunc_page((vm_offset_t)subcode), | |
356 | VM_PROT_READ|VM_PROT_WRITE, | |
357 | FALSE, | |
358 | (map == kernel_map) ? interruptible : THREAD_ABORTSAFE); | |
359 | } | |
360 | #if MACH_KDB | |
361 | if (result == KERN_SUCCESS) { | |
362 | /* Look for watchpoints */ | |
363 | if (db_watchpoint_list && | |
364 | db_watchpoints_inserted && | |
365 | (code & T_PF_WRITE) && | |
366 | db_find_watchpoint(map, | |
367 | (vm_offset_t)subcode, regs)) | |
368 | kdb_trap(T_WATCHPOINT, 0, regs); | |
369 | } | |
370 | else | |
371 | #endif /* MACH_KDB */ | |
372 | if ((code & T_PF_WRITE) == 0 && | |
373 | result == KERN_PROTECTION_FAILURE) | |
374 | { | |
375 | /* | |
376 | * Must expand vm_fault by hand, | |
377 | * so that we can ask for read-only access | |
378 | * but enter a (kernel)writable mapping. | |
379 | */ | |
380 | result = intel_read_fault(map, | |
381 | trunc_page((vm_offset_t)subcode)); | |
382 | } | |
383 | ||
384 | if (result == KERN_SUCCESS) { | |
385 | /* | |
386 | * Certain faults require that we back up | |
387 | * the EIP. | |
388 | */ | |
389 | register struct recovery *rp; | |
390 | ||
391 | for (rp = retry_table; rp < retry_table_end; rp++) { | |
392 | if (regs->eip == rp->fault_addr) { | |
393 | regs->eip = rp->recover_addr; | |
394 | break; | |
395 | } | |
396 | } | |
397 | return (TRUE); | |
398 | } | |
399 | ||
400 | /* fall through */ | |
401 | ||
402 | case T_GENERAL_PROTECTION: | |
403 | ||
404 | /* | |
405 | * If there is a failure recovery address | |
406 | * for this fault, go there. | |
407 | */ | |
408 | { | |
409 | register struct recovery *rp; | |
410 | ||
411 | for (rp = recover_table; | |
412 | rp < recover_table_end; | |
413 | rp++) { | |
414 | if (regs->eip == rp->fault_addr) { | |
415 | regs->eip = rp->recover_addr; | |
416 | return (TRUE); | |
417 | } | |
418 | } | |
419 | } | |
420 | ||
421 | /* | |
422 | * Check thread recovery address also - | |
423 | * v86 assist uses it. | |
424 | */ | |
425 | if (thread->recover) { | |
426 | regs->eip = thread->recover; | |
427 | thread->recover = 0; | |
428 | return (TRUE); | |
429 | } | |
430 | ||
431 | /* | |
432 | * Unanticipated page-fault errors in kernel | |
433 | * should not happen. | |
434 | */ | |
435 | /* fall through... */ | |
436 | ||
437 | default: | |
438 | /* | |
439 | * ...and return failure, so that locore can call into | |
440 | * debugger. | |
441 | */ | |
442 | #if MACH_KDP | |
443 | kdp_i386_trap(type, regs, result, regs->cr2); | |
444 | #endif | |
445 | return (FALSE); | |
446 | } | |
447 | return (TRUE); | |
448 | } | |
449 | ||
450 | /* | |
451 | * Called if both kernel_trap() and kdb_trap() fail. | |
452 | */ | |
453 | void | |
454 | panic_trap( | |
455 | register struct i386_saved_state *regs) | |
456 | { | |
457 | int code; | |
458 | register int type; | |
459 | ||
460 | type = regs->trapno; | |
461 | code = regs->err; | |
462 | ||
463 | printf("trap type %d, code = %x, pc = %x\n", | |
464 | type, code, regs->eip); | |
465 | panic("trap"); | |
466 | } | |
467 | ||
468 | ||
469 | /* | |
470 | * Trap from user mode. | |
471 | */ | |
472 | void | |
473 | user_trap( | |
474 | register struct i386_saved_state *regs) | |
475 | { | |
476 | int exc; | |
477 | int code; | |
478 | int subcode; | |
479 | register int type; | |
480 | vm_map_t map; | |
481 | vm_prot_t prot; | |
482 | kern_return_t result; | |
483 | register thread_act_t thr_act = current_act(); | |
484 | thread_t thread = (thr_act ? thr_act->thread : THREAD_NULL); | |
485 | boolean_t kernel_act = thr_act->kernel_loaded; | |
486 | etap_data_t probe_data; | |
487 | ||
488 | if (regs->efl & EFL_VM) { | |
489 | /* | |
490 | * If hardware assist can handle exception, | |
491 | * continue execution. | |
492 | */ | |
493 | if (v86_assist(thread, regs)) | |
494 | return; | |
495 | } | |
496 | ||
497 | type = regs->trapno; | |
498 | code = 0; | |
499 | subcode = 0; | |
500 | ||
501 | switch (type) { | |
502 | ||
503 | case T_DIVIDE_ERROR: | |
504 | exc = EXC_ARITHMETIC; | |
505 | code = EXC_I386_DIV; | |
506 | break; | |
507 | ||
508 | case T_DEBUG: | |
509 | exc = EXC_BREAKPOINT; | |
510 | code = EXC_I386_SGL; | |
511 | break; | |
512 | ||
513 | case T_INT3: | |
514 | exc = EXC_BREAKPOINT; | |
515 | code = EXC_I386_BPT; | |
516 | break; | |
517 | ||
518 | case T_OVERFLOW: | |
519 | exc = EXC_ARITHMETIC; | |
520 | code = EXC_I386_INTO; | |
521 | break; | |
522 | ||
523 | case T_OUT_OF_BOUNDS: | |
524 | exc = EXC_SOFTWARE; | |
525 | code = EXC_I386_BOUND; | |
526 | break; | |
527 | ||
528 | case T_INVALID_OPCODE: | |
529 | exc = EXC_BAD_INSTRUCTION; | |
530 | code = EXC_I386_INVOP; | |
531 | break; | |
532 | ||
533 | case T_NO_FPU: | |
534 | case 32: /* XXX */ | |
535 | fpnoextflt(); | |
536 | return; | |
537 | ||
538 | case T_FPU_FAULT: | |
539 | fpextovrflt(); | |
540 | return; | |
541 | ||
542 | case 10: /* invalid TSS == iret with NT flag set */ | |
543 | exc = EXC_BAD_INSTRUCTION; | |
544 | code = EXC_I386_INVTSSFLT; | |
545 | subcode = regs->err & 0xffff; | |
546 | break; | |
547 | ||
548 | case T_SEGMENT_NOT_PRESENT: | |
549 | exc = EXC_BAD_INSTRUCTION; | |
550 | code = EXC_I386_SEGNPFLT; | |
551 | subcode = regs->err & 0xffff; | |
552 | break; | |
553 | ||
554 | case T_STACK_FAULT: | |
555 | exc = EXC_BAD_INSTRUCTION; | |
556 | code = EXC_I386_STKFLT; | |
557 | subcode = regs->err & 0xffff; | |
558 | break; | |
559 | ||
560 | case T_GENERAL_PROTECTION: | |
561 | if (!(regs->efl & EFL_VM)) { | |
562 | if (check_io_fault(regs)) | |
563 | return; | |
564 | } | |
565 | exc = EXC_BAD_INSTRUCTION; | |
566 | code = EXC_I386_GPFLT; | |
567 | subcode = regs->err & 0xffff; | |
568 | break; | |
569 | ||
570 | case T_PAGE_FAULT: | |
571 | subcode = regs->cr2; | |
572 | prot = VM_PROT_READ|VM_PROT_WRITE; | |
573 | if (kernel_act == FALSE) { | |
574 | if (!(regs->err & T_PF_WRITE)) | |
575 | prot = VM_PROT_READ; | |
576 | (void) user_page_fault_continue(vm_fault(thr_act->map, | |
577 | trunc_page((vm_offset_t)subcode), | |
578 | prot, | |
579 | FALSE, | |
580 | THREAD_ABORTSAFE)); | |
581 | /* NOTREACHED */ | |
582 | } | |
583 | else { | |
584 | if (subcode > LINEAR_KERNEL_ADDRESS) { | |
585 | map = kernel_map; | |
586 | subcode -= LINEAR_KERNEL_ADDRESS; | |
587 | } | |
588 | result = vm_fault(thr_act->map, | |
589 | trunc_page((vm_offset_t)subcode), | |
590 | prot, | |
591 | FALSE, | |
592 | (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE); | |
593 | if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) { | |
594 | /* | |
595 | * Must expand vm_fault by hand, | |
596 | * so that we can ask for read-only access | |
597 | * but enter a (kernel) writable mapping. | |
598 | */ | |
599 | result = intel_read_fault(thr_act->map, | |
600 | trunc_page((vm_offset_t)subcode)); | |
601 | } | |
602 | user_page_fault_continue(result); | |
603 | /*NOTREACHED*/ | |
604 | } | |
605 | break; | |
606 | ||
607 | case T_FLOATING_POINT_ERROR: | |
608 | fpexterrflt(); | |
609 | return; | |
610 | ||
611 | default: | |
612 | #if MACH_KGDB | |
613 | Debugger("Unanticipated user trap"); | |
614 | return; | |
615 | #endif /* MACH_KGDB */ | |
616 | #if MACH_KDB | |
617 | if (kdb_trap(type, regs->err, regs)) | |
618 | return; | |
619 | #endif /* MACH_KDB */ | |
620 | printf("user trap type %d, code = %x, pc = %x\n", | |
621 | type, regs->err, regs->eip); | |
622 | panic("user trap"); | |
623 | return; | |
624 | } | |
625 | ||
626 | #if MACH_KDB | |
627 | if (debug_all_traps_with_kdb && | |
628 | kdb_trap(type, regs->err, regs)) | |
629 | return; | |
630 | #endif /* MACH_KDB */ | |
631 | ||
632 | #if ETAP_EVENT_MONITOR | |
633 | if (thread != THREAD_NULL) { | |
634 | ETAP_DATA_LOAD(probe_data[0], regs->trapno); | |
635 | ETAP_DATA_LOAD(probe_data[1], | |
636 | thr_act->exc_actions[exc].port); | |
637 | ETAP_DATA_LOAD(probe_data[2], | |
638 | thr_act->task->exc_actions[exc].port); | |
639 | ETAP_PROBE_DATA(ETAP_P_EXCEPTION, | |
640 | 0, | |
641 | thread, | |
642 | &probe_data, | |
643 | ETAP_DATA_ENTRY*3); | |
644 | } | |
645 | #endif /* ETAP_EVENT_MONITOR */ | |
646 | ||
647 | i386_exception(exc, code, subcode); | |
648 | /*NOTREACHED*/ | |
649 | } | |
650 | ||
651 | /* | |
652 | * V86 mode assist for interrupt handling. | |
653 | */ | |
654 | boolean_t v86_assist_on = TRUE; | |
655 | boolean_t v86_unsafe_ok = FALSE; | |
656 | boolean_t v86_do_sti_cli = TRUE; | |
657 | boolean_t v86_do_sti_immediate = FALSE; | |
658 | ||
659 | #define V86_IRET_PENDING 0x4000 | |
660 | ||
661 | int cli_count = 0; | |
662 | int sti_count = 0; | |
663 | ||
664 | boolean_t | |
665 | v86_assist( | |
666 | thread_t thread, | |
667 | register struct i386_saved_state *regs) | |
668 | { | |
669 | register struct v86_assist_state *v86 = &thread->top_act->mact.pcb->ims.v86s; | |
670 | ||
671 | /* | |
672 | * Build an 8086 address. Use only when off is known to be 16 bits. | |
673 | */ | |
674 | #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off)) | |
675 | ||
676 | #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \ | |
677 | | EFL_SF | EFL_ZF | EFL_AF \ | |
678 | | EFL_PF | EFL_CF ) | |
679 | struct iret_32 { | |
680 | int eip; | |
681 | int cs; | |
682 | int eflags; | |
683 | }; | |
684 | struct iret_16 { | |
685 | unsigned short ip; | |
686 | unsigned short cs; | |
687 | unsigned short flags; | |
688 | }; | |
689 | union iret_struct { | |
690 | struct iret_32 iret_32; | |
691 | struct iret_16 iret_16; | |
692 | }; | |
693 | ||
694 | struct int_vec { | |
695 | unsigned short ip; | |
696 | unsigned short cs; | |
697 | }; | |
698 | ||
699 | if (!v86_assist_on) | |
700 | return FALSE; | |
701 | ||
702 | /* | |
703 | * If delayed STI pending, enable interrupts. | |
704 | * Turn off tracing if on only to delay STI. | |
705 | */ | |
706 | if (v86->flags & V86_IF_PENDING) { | |
707 | v86->flags &= ~V86_IF_PENDING; | |
708 | v86->flags |= EFL_IF; | |
709 | if ((v86->flags & EFL_TF) == 0) | |
710 | regs->efl &= ~EFL_TF; | |
711 | } | |
712 | ||
713 | if (regs->trapno == T_DEBUG) { | |
714 | ||
715 | if (v86->flags & EFL_TF) { | |
716 | /* | |
717 | * Trace flag was also set - it has priority | |
718 | */ | |
719 | return FALSE; /* handle as single-step */ | |
720 | } | |
721 | /* | |
722 | * Fall through to check for interrupts. | |
723 | */ | |
724 | } | |
725 | else if (regs->trapno == T_GENERAL_PROTECTION) { | |
726 | /* | |
727 | * General protection error - must be an 8086 instruction | |
728 | * to emulate. | |
729 | */ | |
730 | register int eip; | |
731 | boolean_t addr_32 = FALSE; | |
732 | boolean_t data_32 = FALSE; | |
733 | int io_port; | |
734 | ||
735 | /* | |
736 | * Set up error handler for bad instruction/data | |
737 | * fetches. | |
738 | */ | |
739 | __asm__("movl $(addr_error), %0" : : "m" (thread->recover)); | |
740 | ||
741 | eip = regs->eip; | |
742 | while (TRUE) { | |
743 | unsigned char opcode; | |
744 | ||
745 | if (eip > 0xFFFF) { | |
746 | thread->recover = 0; | |
747 | return FALSE; /* GP fault: IP out of range */ | |
748 | } | |
749 | ||
750 | opcode = *(unsigned char *)Addr8086(regs->cs,eip); | |
751 | eip++; | |
752 | switch (opcode) { | |
753 | case 0xf0: /* lock */ | |
754 | case 0xf2: /* repne */ | |
755 | case 0xf3: /* repe */ | |
756 | case 0x2e: /* cs */ | |
757 | case 0x36: /* ss */ | |
758 | case 0x3e: /* ds */ | |
759 | case 0x26: /* es */ | |
760 | case 0x64: /* fs */ | |
761 | case 0x65: /* gs */ | |
762 | /* ignore prefix */ | |
763 | continue; | |
764 | ||
765 | case 0x66: /* data size */ | |
766 | data_32 = TRUE; | |
767 | continue; | |
768 | ||
769 | case 0x67: /* address size */ | |
770 | addr_32 = TRUE; | |
771 | continue; | |
772 | ||
773 | case 0xe4: /* inb imm */ | |
774 | case 0xe5: /* inw imm */ | |
775 | case 0xe6: /* outb imm */ | |
776 | case 0xe7: /* outw imm */ | |
777 | io_port = *(unsigned char *)Addr8086(regs->cs, eip); | |
778 | eip++; | |
779 | goto do_in_out; | |
780 | ||
781 | case 0xec: /* inb dx */ | |
782 | case 0xed: /* inw dx */ | |
783 | case 0xee: /* outb dx */ | |
784 | case 0xef: /* outw dx */ | |
785 | case 0x6c: /* insb */ | |
786 | case 0x6d: /* insw */ | |
787 | case 0x6e: /* outsb */ | |
788 | case 0x6f: /* outsw */ | |
789 | io_port = regs->edx & 0xffff; | |
790 | ||
791 | do_in_out: | |
792 | if (!data_32) | |
793 | opcode |= 0x6600; /* word IO */ | |
794 | ||
795 | switch (emulate_io(regs, opcode, io_port)) { | |
796 | case EM_IO_DONE: | |
797 | /* instruction executed */ | |
798 | break; | |
799 | case EM_IO_RETRY: | |
800 | /* port mapped, retry instruction */ | |
801 | thread->recover = 0; | |
802 | return TRUE; | |
803 | case EM_IO_ERROR: | |
804 | /* port not mapped */ | |
805 | thread->recover = 0; | |
806 | return FALSE; | |
807 | } | |
808 | break; | |
809 | ||
810 | case 0xfa: /* cli */ | |
811 | if (!v86_do_sti_cli) { | |
812 | thread->recover = 0; | |
813 | return (FALSE); | |
814 | } | |
815 | ||
816 | v86->flags &= ~EFL_IF; | |
817 | /* disable simulated interrupts */ | |
818 | cli_count++; | |
819 | break; | |
820 | ||
821 | case 0xfb: /* sti */ | |
822 | if (!v86_do_sti_cli) { | |
823 | thread->recover = 0; | |
824 | return (FALSE); | |
825 | } | |
826 | ||
827 | if ((v86->flags & EFL_IF) == 0) { | |
828 | if (v86_do_sti_immediate) { | |
829 | v86->flags |= EFL_IF; | |
830 | } else { | |
831 | v86->flags |= V86_IF_PENDING; | |
832 | regs->efl |= EFL_TF; | |
833 | } | |
834 | /* single step to set IF next inst. */ | |
835 | } | |
836 | sti_count++; | |
837 | break; | |
838 | ||
839 | case 0x9c: /* pushf */ | |
840 | { | |
841 | int flags; | |
842 | vm_offset_t sp; | |
843 | int size; | |
844 | ||
845 | flags = regs->efl; | |
846 | if ((v86->flags & EFL_IF) == 0) | |
847 | flags &= ~EFL_IF; | |
848 | ||
849 | if ((v86->flags & EFL_TF) == 0) | |
850 | flags &= ~EFL_TF; | |
851 | else flags |= EFL_TF; | |
852 | ||
853 | sp = regs->uesp; | |
854 | if (!addr_32) | |
855 | sp &= 0xffff; | |
856 | else if (sp > 0xffff) | |
857 | goto stack_error; | |
858 | size = (data_32) ? 4 : 2; | |
859 | if (sp < size) | |
860 | goto stack_error; | |
861 | sp -= size; | |
862 | if (copyout((char *)&flags, | |
863 | (char *)Addr8086(regs->ss,sp), | |
864 | size)) | |
865 | goto addr_error; | |
866 | if (addr_32) | |
867 | regs->uesp = sp; | |
868 | else | |
869 | regs->uesp = (regs->uesp & 0xffff0000) | sp; | |
870 | break; | |
871 | } | |
872 | ||
873 | case 0x9d: /* popf */ | |
874 | { | |
875 | vm_offset_t sp; | |
876 | int nflags; | |
877 | ||
878 | sp = regs->uesp; | |
879 | if (!addr_32) | |
880 | sp &= 0xffff; | |
881 | else if (sp > 0xffff) | |
882 | goto stack_error; | |
883 | ||
884 | if (data_32) { | |
885 | if (sp > 0xffff - sizeof(int)) | |
886 | goto stack_error; | |
887 | nflags = *(int *)Addr8086(regs->ss,sp); | |
888 | sp += sizeof(int); | |
889 | } | |
890 | else { | |
891 | if (sp > 0xffff - sizeof(short)) | |
892 | goto stack_error; | |
893 | nflags = *(unsigned short *) | |
894 | Addr8086(regs->ss,sp); | |
895 | sp += sizeof(short); | |
896 | } | |
897 | if (addr_32) | |
898 | regs->uesp = sp; | |
899 | else | |
900 | regs->uesp = (regs->uesp & 0xffff0000) | sp; | |
901 | ||
902 | if (v86->flags & V86_IRET_PENDING) { | |
903 | v86->flags = nflags & (EFL_TF | EFL_IF); | |
904 | v86->flags |= V86_IRET_PENDING; | |
905 | } else { | |
906 | v86->flags = nflags & (EFL_TF | EFL_IF); | |
907 | } | |
908 | regs->efl = (regs->efl & ~EFL_V86_SAFE) | |
909 | | (nflags & EFL_V86_SAFE); | |
910 | break; | |
911 | } | |
912 | case 0xcf: /* iret */ | |
913 | { | |
914 | vm_offset_t sp; | |
915 | int nflags; | |
916 | int size; | |
917 | union iret_struct iret_struct; | |
918 | ||
919 | v86->flags &= ~V86_IRET_PENDING; | |
920 | sp = regs->uesp; | |
921 | if (!addr_32) | |
922 | sp &= 0xffff; | |
923 | else if (sp > 0xffff) | |
924 | goto stack_error; | |
925 | ||
926 | if (data_32) { | |
927 | if (sp > 0xffff - sizeof(struct iret_32)) | |
928 | goto stack_error; | |
929 | iret_struct.iret_32 = | |
930 | *(struct iret_32 *) Addr8086(regs->ss,sp); | |
931 | sp += sizeof(struct iret_32); | |
932 | } | |
933 | else { | |
934 | if (sp > 0xffff - sizeof(struct iret_16)) | |
935 | goto stack_error; | |
936 | iret_struct.iret_16 = | |
937 | *(struct iret_16 *) Addr8086(regs->ss,sp); | |
938 | sp += sizeof(struct iret_16); | |
939 | } | |
940 | if (addr_32) | |
941 | regs->uesp = sp; | |
942 | else | |
943 | regs->uesp = (regs->uesp & 0xffff0000) | sp; | |
944 | ||
945 | if (data_32) { | |
946 | eip = iret_struct.iret_32.eip; | |
947 | regs->cs = iret_struct.iret_32.cs & 0xffff; | |
948 | nflags = iret_struct.iret_32.eflags; | |
949 | } | |
950 | else { | |
951 | eip = iret_struct.iret_16.ip; | |
952 | regs->cs = iret_struct.iret_16.cs; | |
953 | nflags = iret_struct.iret_16.flags; | |
954 | } | |
955 | ||
956 | v86->flags = nflags & (EFL_TF | EFL_IF); | |
957 | regs->efl = (regs->efl & ~EFL_V86_SAFE) | |
958 | | (nflags & EFL_V86_SAFE); | |
959 | break; | |
960 | } | |
961 | default: | |
962 | /* | |
963 | * Instruction not emulated here. | |
964 | */ | |
965 | thread->recover = 0; | |
966 | return FALSE; | |
967 | } | |
968 | break; /* exit from 'while TRUE' */ | |
969 | } | |
970 | regs->eip = (regs->eip & 0xffff0000 | eip); | |
971 | } | |
972 | else { | |
973 | /* | |
974 | * Not a trap we handle. | |
975 | */ | |
976 | thread->recover = 0; | |
977 | return FALSE; | |
978 | } | |
979 | ||
980 | if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) { | |
981 | ||
982 | struct v86_interrupt_table *int_table; | |
983 | int int_count; | |
984 | int vec; | |
985 | int i; | |
986 | ||
987 | int_table = (struct v86_interrupt_table *) v86->int_table; | |
988 | int_count = v86->int_count; | |
989 | ||
990 | vec = 0; | |
991 | for (i = 0; i < int_count; int_table++, i++) { | |
992 | if (!int_table->mask && int_table->count > 0) { | |
993 | int_table->count--; | |
994 | vec = int_table->vec; | |
995 | break; | |
996 | } | |
997 | } | |
998 | if (vec != 0) { | |
999 | /* | |
1000 | * Take this interrupt | |
1001 | */ | |
1002 | vm_offset_t sp; | |
1003 | struct iret_16 iret_16; | |
1004 | struct int_vec int_vec; | |
1005 | ||
1006 | sp = regs->uesp & 0xffff; | |
1007 | if (sp < sizeof(struct iret_16)) | |
1008 | goto stack_error; | |
1009 | sp -= sizeof(struct iret_16); | |
1010 | iret_16.ip = regs->eip; | |
1011 | iret_16.cs = regs->cs; | |
1012 | iret_16.flags = regs->efl & 0xFFFF; | |
1013 | if ((v86->flags & EFL_TF) == 0) | |
1014 | iret_16.flags &= ~EFL_TF; | |
1015 | else iret_16.flags |= EFL_TF; | |
1016 | ||
1017 | (void) memcpy((char *) &int_vec, | |
1018 | (char *) (sizeof(struct int_vec) * vec), | |
1019 | sizeof (struct int_vec)); | |
1020 | if (copyout((char *)&iret_16, | |
1021 | (char *)Addr8086(regs->ss,sp), | |
1022 | sizeof(struct iret_16))) | |
1023 | goto addr_error; | |
1024 | regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff); | |
1025 | regs->eip = int_vec.ip; | |
1026 | regs->cs = int_vec.cs; | |
1027 | regs->efl &= ~EFL_TF; | |
1028 | v86->flags &= ~(EFL_IF | EFL_TF); | |
1029 | v86->flags |= V86_IRET_PENDING; | |
1030 | } | |
1031 | } | |
1032 | ||
1033 | thread->recover = 0; | |
1034 | return TRUE; | |
1035 | ||
1036 | /* | |
1037 | * On address error, report a page fault. | |
1038 | * XXX report GP fault - we don`t save | |
1039 | * the faulting address. | |
1040 | */ | |
1041 | addr_error: | |
1042 | __asm__("addr_error:;"); | |
1043 | thread->recover = 0; | |
1044 | return FALSE; | |
1045 | ||
1046 | /* | |
1047 | * On stack address error, return stack fault (12). | |
1048 | */ | |
1049 | stack_error: | |
1050 | thread->recover = 0; | |
1051 | regs->trapno = T_STACK_FAULT; | |
1052 | return FALSE; | |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * Handle AST traps for i386. | |
1057 | * Check for delayed floating-point exception from | |
1058 | * AT-bus machines. | |
1059 | */ | |
1060 | ||
1061 | extern void log_thread_action (thread_t, char *); | |
1062 | ||
1063 | void | |
1064 | i386_astintr(int preemption) | |
1065 | { | |
1066 | int mycpu; | |
1067 | ast_t mask = AST_ALL; | |
1068 | spl_t s; | |
1069 | thread_t self = current_thread(); | |
1070 | ||
1071 | s = splsched(); /* block interrupts to check reasons */ | |
1072 | mp_disable_preemption(); | |
1073 | mycpu = cpu_number(); | |
1074 | if (need_ast[mycpu] & AST_I386_FP) { | |
1075 | /* | |
1076 | * AST was for delayed floating-point exception - | |
1077 | * FP interrupt occured while in kernel. | |
1078 | * Turn off this AST reason and handle the FPU error. | |
1079 | */ | |
1080 | ||
1081 | ast_off(AST_I386_FP); | |
1082 | mp_enable_preemption(); | |
1083 | splx(s); | |
1084 | ||
1085 | fpexterrflt(); | |
1086 | } | |
1087 | else { | |
1088 | /* | |
1089 | * Not an FPU trap. Handle the AST. | |
1090 | * Interrupts are still blocked. | |
1091 | */ | |
1092 | ||
1093 | #ifdef XXX | |
1094 | if (preemption) { | |
1095 | ||
1096 | /* | |
1097 | * We don't want to process any AST if we were in | |
1098 | * kernel-mode and the current thread is in any | |
1099 | * funny state (waiting and/or suspended). | |
1100 | */ | |
1101 | ||
1102 | thread_lock (self); | |
1103 | ||
1104 | if (thread_not_preemptable(self) || self->preempt) { | |
1105 | ast_off(AST_URGENT); | |
1106 | thread_unlock (self); | |
1107 | mp_enable_preemption(); | |
1108 | splx(s); | |
1109 | return; | |
1110 | } | |
1111 | else mask = AST_PREEMPT; | |
1112 | mp_enable_preemption(); | |
1113 | ||
1114 | /* | |
1115 | self->preempt = TH_NOT_PREEMPTABLE; | |
1116 | */ | |
1117 | ||
1118 | thread_unlock (self); | |
1119 | } else { | |
1120 | mp_enable_preemption(); | |
1121 | } | |
1122 | #else | |
1123 | mp_enable_preemption(); | |
1124 | #endif | |
1125 | ||
0b4e3aa0 | 1126 | ast_taken(mask, s |
1c79356b A |
1127 | #if FAST_IDLE |
1128 | ,NO_IDLE_THREAD | |
1129 | #endif /* FAST_IDLE */ | |
1130 | ); | |
1131 | /* | |
1132 | self->preempt = TH_PREEMPTABLE; | |
1133 | */ | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | /* | |
1138 | * Handle exceptions for i386. | |
1139 | * | |
1140 | * If we are an AT bus machine, we must turn off the AST for a | |
1141 | * delayed floating-point exception. | |
1142 | * | |
1143 | * If we are providing floating-point emulation, we may have | |
1144 | * to retrieve the real register values from the floating point | |
1145 | * emulator. | |
1146 | */ | |
1147 | void | |
1148 | i386_exception( | |
1149 | int exc, | |
1150 | int code, | |
1151 | int subcode) | |
1152 | { | |
1153 | spl_t s; | |
1154 | exception_data_type_t codes[EXCEPTION_CODE_MAX]; | |
1155 | ||
1156 | /* | |
1157 | * Turn off delayed FPU error handling. | |
1158 | */ | |
1159 | s = splsched(); | |
1160 | mp_disable_preemption(); | |
1161 | ast_off(AST_I386_FP); | |
1162 | mp_enable_preemption(); | |
1163 | splx(s); | |
1164 | ||
1165 | codes[0] = code; /* new exception interface */ | |
1166 | codes[1] = subcode; | |
1167 | exception(exc, codes, 2); | |
1168 | /*NOTREACHED*/ | |
1169 | } | |
1170 | ||
1171 | boolean_t | |
1172 | check_io_fault( | |
1173 | struct i386_saved_state *regs) | |
1174 | { | |
1175 | int eip, opcode, io_port; | |
1176 | boolean_t data_16 = FALSE; | |
1177 | ||
1178 | /* | |
1179 | * Get the instruction. | |
1180 | */ | |
1181 | eip = regs->eip; | |
1182 | ||
1183 | for (;;) { | |
1184 | opcode = inst_fetch(eip, regs->cs); | |
1185 | eip++; | |
1186 | switch (opcode) { | |
1187 | case 0x66: /* data-size prefix */ | |
1188 | data_16 = TRUE; | |
1189 | continue; | |
1190 | ||
1191 | case 0xf3: /* rep prefix */ | |
1192 | case 0x26: /* es */ | |
1193 | case 0x2e: /* cs */ | |
1194 | case 0x36: /* ss */ | |
1195 | case 0x3e: /* ds */ | |
1196 | case 0x64: /* fs */ | |
1197 | case 0x65: /* gs */ | |
1198 | continue; | |
1199 | ||
1200 | case 0xE4: /* inb imm */ | |
1201 | case 0xE5: /* inl imm */ | |
1202 | case 0xE6: /* outb imm */ | |
1203 | case 0xE7: /* outl imm */ | |
1204 | /* port is immediate byte */ | |
1205 | io_port = inst_fetch(eip, regs->cs); | |
1206 | eip++; | |
1207 | break; | |
1208 | ||
1209 | case 0xEC: /* inb dx */ | |
1210 | case 0xED: /* inl dx */ | |
1211 | case 0xEE: /* outb dx */ | |
1212 | case 0xEF: /* outl dx */ | |
1213 | case 0x6C: /* insb */ | |
1214 | case 0x6D: /* insl */ | |
1215 | case 0x6E: /* outsb */ | |
1216 | case 0x6F: /* outsl */ | |
1217 | /* port is in DX register */ | |
1218 | io_port = regs->edx & 0xFFFF; | |
1219 | break; | |
1220 | ||
1221 | default: | |
1222 | return FALSE; | |
1223 | } | |
1224 | break; | |
1225 | } | |
1226 | ||
1227 | if (data_16) | |
1228 | opcode |= 0x6600; /* word IO */ | |
1229 | ||
1230 | switch (emulate_io(regs, opcode, io_port)) { | |
1231 | case EM_IO_DONE: | |
1232 | /* instruction executed */ | |
1233 | regs->eip = eip; | |
1234 | return TRUE; | |
1235 | ||
1236 | case EM_IO_RETRY: | |
1237 | /* port mapped, retry instruction */ | |
1238 | return TRUE; | |
1239 | ||
1240 | case EM_IO_ERROR: | |
1241 | /* port not mapped */ | |
1242 | return FALSE; | |
1243 | } | |
1244 | return FALSE; | |
1245 | } | |
1246 | ||
1247 | void | |
1248 | kernel_preempt_check (void) | |
1249 | { | |
1250 | mp_disable_preemption(); | |
1251 | if ((need_ast[cpu_number()] & AST_URGENT) && | |
1252 | #if NCPUS > 1 | |
1253 | get_interrupt_level() == 1 | |
1254 | #else /* NCPUS > 1 */ | |
1255 | get_interrupt_level() == 0 | |
1256 | #endif /* NCPUS > 1 */ | |
1257 | ) { | |
1258 | mp_enable_preemption_no_check(); | |
1259 | __asm__ volatile (" int $0xff"); | |
1260 | } else { | |
1261 | mp_enable_preemption_no_check(); | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | #if MACH_KDB | |
1266 | ||
1267 | extern void db_i386_state(struct i386_saved_state *regs); | |
1268 | ||
1269 | #include <ddb/db_output.h> | |
1270 | ||
1271 | void | |
1272 | db_i386_state( | |
1273 | struct i386_saved_state *regs) | |
1274 | { | |
1275 | db_printf("eip %8x\n", regs->eip); | |
1276 | db_printf("trap %8x\n", regs->trapno); | |
1277 | db_printf("err %8x\n", regs->err); | |
1278 | db_printf("efl %8x\n", regs->efl); | |
1279 | db_printf("ebp %8x\n", regs->ebp); | |
1280 | db_printf("esp %8x\n", regs->esp); | |
1281 | db_printf("uesp %8x\n", regs->uesp); | |
1282 | db_printf("cs %8x\n", regs->cs & 0xff); | |
1283 | db_printf("ds %8x\n", regs->ds & 0xff); | |
1284 | db_printf("es %8x\n", regs->es & 0xff); | |
1285 | db_printf("fs %8x\n", regs->fs & 0xff); | |
1286 | db_printf("gs %8x\n", regs->gs & 0xff); | |
1287 | db_printf("ss %8x\n", regs->ss & 0xff); | |
1288 | db_printf("eax %8x\n", regs->eax); | |
1289 | db_printf("ebx %8x\n", regs->ebx); | |
1290 | db_printf("ecx %8x\n", regs->ecx); | |
1291 | db_printf("edx %8x\n", regs->edx); | |
1292 | db_printf("esi %8x\n", regs->esi); | |
1293 | db_printf("edi %8x\n", regs->edi); | |
1294 | } | |
1295 | ||
1296 | #endif /* MACH_KDB */ |