2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1992 NeXT, Inc.
32 * 13 May 1992 ? at NeXT
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
39 #include <kern/thread.h>
41 #include <sys/systm.h>
42 #include <sys/param.h>
43 #include <sys/proc_internal.h>
45 #include <sys/sysproto.h>
46 #include <sys/sysent.h>
47 #include <sys/ucontext.h>
49 #include <mach/thread_act.h> /* for thread_abort_safely */
50 #include <mach/thread_status.h>
52 #include <i386/eflags.h>
54 #include <i386/machine_routines.h>
57 #include <sys/kdebug.h>
64 extern boolean_t
machine_exception(int, mach_exception_code_t
,
65 mach_exception_subcode_t
, int *, mach_exception_subcode_t
*);
66 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
67 thread_state_t tstate
, mach_msg_type_number_t
*count
);
68 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
69 thread_state_t tstate
, mach_msg_type_number_t count
);
71 /* Signal handler flavors supported */
72 /* These defns should match the Libc implmn */
75 #define UC_SET_ALT_STACK 0x40000000
76 #define UC_RESET_ALT_STACK 0x80000000
78 #define C_32_STK_ALIGN 16
79 #define C_64_STK_ALIGN 16
80 #define C_64_REDZONE_LEN 128
81 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
82 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
85 * Send an interrupt to process.
87 * Stack is set up to allow sigcode stored
88 * in u. to call routine, followed by chmk
89 * to sigreturn routine below. After sigreturn
90 * resets the signal mask, the stack, the frame
91 * pointer, and the argument pointer, it returns
92 * to the user specified pc, psl.
96 user32_addr_t catcher
; /* sig_t */
99 user32_addr_t sinfo
; /* siginfo32_t* */
100 user32_addr_t uctx
; /* struct ucontext32 */
104 * NOTE: Source and target may *NOT* overlap!
107 siginfo_user_to_user32(user_siginfo_t
*in
, user32_siginfo_t
*out
)
109 out
->si_signo
= in
->si_signo
;
110 out
->si_errno
= in
->si_errno
;
111 out
->si_code
= in
->si_code
;
112 out
->si_pid
= in
->si_pid
;
113 out
->si_uid
= in
->si_uid
;
114 out
->si_status
= in
->si_status
;
115 out
->si_addr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_addr
);
116 /* following cast works for sival_int because of padding */
117 out
->si_value
.sival_ptr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_value
.sival_ptr
);
118 out
->si_band
= in
->si_band
; /* range reduction */
119 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
123 siginfo_user_to_user64(user_siginfo_t
*in
, user64_siginfo_t
*out
)
125 out
->si_signo
= in
->si_signo
;
126 out
->si_errno
= in
->si_errno
;
127 out
->si_code
= in
->si_code
;
128 out
->si_pid
= in
->si_pid
;
129 out
->si_uid
= in
->si_uid
;
130 out
->si_status
= in
->si_status
;
131 out
->si_addr
= in
->si_addr
;
132 out
->si_value
.sival_ptr
= in
->si_value
.sival_ptr
;
133 out
->si_band
= in
->si_band
; /* range reduction */
134 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
138 sendsig(struct proc
*p
, user_addr_t ua_catcher
, int sig
, int mask
, __unused
uint32_t code
)
141 struct mcontext32 mctx32
;
142 struct mcontext64 mctx64
;
148 user_addr_t ua_uctxp
;
149 user_addr_t ua_mctxp
;
150 user_siginfo_t sinfo64
;
152 struct sigacts
*ps
= p
->p_sigacts
;
153 int oonstack
, flavor
;
154 user_addr_t trampact
;
157 mach_msg_type_number_t state_count
;
162 int infostyle
= UC_TRAD
;
164 thread
= current_thread();
165 ut
= get_bsdthread_info(thread
);
167 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
))
168 infostyle
= UC_FLAVOR
;
170 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
171 trampact
= ps
->ps_trampact
[sig
];
172 sigonstack
= (ps
->ps_sigonstack
& sigmask(sig
));
179 bzero((caddr_t
)&sinfo64
, sizeof(sinfo64
));
180 sinfo64
.si_signo
= sig
;
183 if (proc_is64bit(p
)) {
184 x86_thread_state64_t
*tstate64
;
185 struct user_ucontext64 uctx64
;
187 flavor
= x86_THREAD_STATE64
;
188 state_count
= x86_THREAD_STATE64_COUNT
;
189 state
= (void *)&mctx
.mctx64
.ss
;
190 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
193 flavor
= x86_FLOAT_STATE64
;
194 state_count
= x86_FLOAT_STATE64_COUNT
;
195 state
= (void *)&mctx
.mctx64
.fs
;
196 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
199 flavor
= x86_EXCEPTION_STATE64
;
200 state_count
= x86_EXCEPTION_STATE64_COUNT
;
201 state
= (void *)&mctx
.mctx64
.es
;
202 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
205 tstate64
= &mctx
.mctx64
.ss
;
207 /* figure out where our new stack lives */
208 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
210 ua_sp
= ut
->uu_sigstk
.ss_sp
;
211 stack_size
= ut
->uu_sigstk
.ss_size
;
213 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
215 ua_sp
= tstate64
->rsp
;
217 ua_cr2
= mctx
.mctx64
.es
.faultvaddr
;
219 /* The x86_64 ABI defines a 128-byte red zone. */
220 ua_sp
-= C_64_REDZONE_LEN
;
222 ua_sp
-= sizeof (struct user_ucontext64
);
223 ua_uctxp
= ua_sp
; // someone tramples the first word!
225 ua_sp
-= sizeof (user64_siginfo_t
);
228 ua_sp
-= sizeof (struct mcontext64
);
232 * Align the frame and stack pointers to 16 bytes for SSE.
233 * (Note that we use 'ua_fp' as the base of the stack going forward)
235 ua_fp
= TRUNC_DOWN64(ua_sp
, C_64_STK_ALIGN
);
238 * But we need to account for the return address so the alignment is
239 * truly "correct" at _sigtramp
241 ua_fp
-= sizeof(user_addr_t
);
244 * Build the signal context to be used by sigreturn.
246 bzero(&uctx64
, sizeof(uctx64
));
248 uctx64
.uc_onstack
= oonstack
;
249 uctx64
.uc_sigmask
= mask
;
250 uctx64
.uc_stack
.ss_sp
= ua_fp
;
251 uctx64
.uc_stack
.ss_size
= stack_size
;
254 uctx64
.uc_stack
.ss_flags
|= SS_ONSTACK
;
257 uctx64
.uc_mcsize
= sizeof(struct mcontext64
);
258 uctx64
.uc_mcontext64
= ua_mctxp
;
260 if (copyout((caddr_t
)&uctx64
, ua_uctxp
, sizeof (uctx64
)))
263 if (copyout((caddr_t
)&mctx
.mctx64
, ua_mctxp
, sizeof (struct mcontext64
)))
266 sinfo64
.pad
[0] = tstate64
->rsp
;
267 sinfo64
.si_addr
= tstate64
->rip
;
269 tstate64
->rip
= trampact
;
270 tstate64
->rsp
= ua_fp
;
271 tstate64
->rflags
= get_eflags_exportmask();
273 * JOE - might not need to set these
275 tstate64
->cs
= USER64_CS
;
276 tstate64
->fs
= NULL_SEG
;
277 tstate64
->gs
= USER_CTHREAD
;
280 * Build the argument list for the signal handler.
281 * Handler should call sigreturn to get out of it
283 tstate64
->rdi
= ua_catcher
;
284 tstate64
->rsi
= infostyle
;
286 tstate64
->rcx
= ua_sip
;
287 tstate64
->r8
= ua_uctxp
;
290 x86_thread_state32_t
*tstate32
;
291 struct user_ucontext32 uctx32
;
292 struct sigframe32 frame32
;
294 flavor
= x86_THREAD_STATE32
;
295 state_count
= x86_THREAD_STATE32_COUNT
;
296 state
= (void *)&mctx
.mctx32
.ss
;
297 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
300 flavor
= x86_FLOAT_STATE32
;
301 state_count
= x86_FLOAT_STATE32_COUNT
;
302 state
= (void *)&mctx
.mctx32
.fs
;
303 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
306 flavor
= x86_EXCEPTION_STATE32
;
307 state_count
= x86_EXCEPTION_STATE32_COUNT
;
308 state
= (void *)&mctx
.mctx32
.es
;
309 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
312 tstate32
= &mctx
.mctx32
.ss
;
314 /* figure out where our new stack lives */
315 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
317 ua_sp
= ut
->uu_sigstk
.ss_sp
;
318 stack_size
= ut
->uu_sigstk
.ss_size
;
320 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
322 ua_sp
= tstate32
->esp
;
324 ua_cr2
= mctx
.mctx32
.es
.faultvaddr
;
326 ua_sp
-= sizeof (struct user_ucontext32
);
327 ua_uctxp
= ua_sp
; // someone tramples the first word!
329 ua_sp
-= sizeof (user32_siginfo_t
);
332 ua_sp
-= sizeof (struct mcontext32
);
335 ua_sp
-= sizeof (struct sigframe32
);
339 * Align the frame and stack pointers to 16 bytes for SSE.
340 * (Note that we use 'fp' as the base of the stack going forward)
342 ua_fp
= TRUNC_DOWN32(ua_fp
, C_32_STK_ALIGN
);
345 * But we need to account for the return address so the alignment is
346 * truly "correct" at _sigtramp
348 ua_fp
-= sizeof(frame32
.retaddr
);
351 * Build the argument list for the signal handler.
352 * Handler should call sigreturn to get out of it
354 frame32
.retaddr
= -1;
355 frame32
.sigstyle
= infostyle
;
357 frame32
.catcher
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_catcher
);
358 frame32
.sinfo
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_sip
);
359 frame32
.uctx
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_uctxp
);
361 if (copyout((caddr_t
)&frame32
, ua_fp
, sizeof (frame32
)))
365 * Build the signal context to be used by sigreturn.
367 bzero(&uctx32
, sizeof(uctx32
));
369 uctx32
.uc_onstack
= oonstack
;
370 uctx32
.uc_sigmask
= mask
;
371 uctx32
.uc_stack
.ss_sp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
372 uctx32
.uc_stack
.ss_size
= stack_size
;
375 uctx32
.uc_stack
.ss_flags
|= SS_ONSTACK
;
378 uctx32
.uc_mcsize
= sizeof(struct mcontext32
);
380 uctx32
.uc_mcontext
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_mctxp
);
382 if (copyout((caddr_t
)&uctx32
, ua_uctxp
, sizeof (uctx32
)))
385 if (copyout((caddr_t
)&mctx
.mctx32
, ua_mctxp
, sizeof (struct mcontext32
)))
388 sinfo64
.pad
[0] = tstate32
->esp
;
389 sinfo64
.si_addr
= tstate32
->eip
;
394 switch (ut
->uu_code
) {
396 sinfo64
.si_code
= ILL_ILLOPC
;
399 sinfo64
.si_code
= ILL_NOOP
;
403 #define FP_IE 0 /* Invalid operation */
404 #define FP_DE 1 /* Denormalized operand */
405 #define FP_ZE 2 /* Zero divide */
406 #define FP_OE 3 /* overflow */
407 #define FP_UE 4 /* underflow */
408 #define FP_PE 5 /* precision */
409 if (ut
->uu_code
== EXC_I386_DIV
) {
410 sinfo64
.si_code
= FPE_INTDIV
;
412 else if (ut
->uu_code
== EXC_I386_INTO
) {
413 sinfo64
.si_code
= FPE_INTOVF
;
415 else if (ut
->uu_subcode
& (1 << FP_ZE
)) {
416 sinfo64
.si_code
= FPE_FLTDIV
;
417 } else if (ut
->uu_subcode
& (1 << FP_OE
)) {
418 sinfo64
.si_code
= FPE_FLTOVF
;
419 } else if (ut
->uu_subcode
& (1 << FP_UE
)) {
420 sinfo64
.si_code
= FPE_FLTUND
;
421 } else if (ut
->uu_subcode
& (1 << FP_PE
)) {
422 sinfo64
.si_code
= FPE_FLTRES
;
423 } else if (ut
->uu_subcode
& (1 << FP_IE
)) {
424 sinfo64
.si_code
= FPE_FLTINV
;
426 sinfo64
.si_code
= FPE_NOOP
;
430 sinfo64
.si_code
= BUS_ADRERR
;
431 sinfo64
.si_addr
= ua_cr2
;
434 sinfo64
.si_code
= TRAP_BRKPT
;
437 sinfo64
.si_addr
= ua_cr2
;
439 switch (ut
->uu_code
) {
441 /* CR2 is meaningless after GP fault */
442 /* XXX namespace clash! */
443 sinfo64
.si_addr
= 0ULL;
446 case KERN_PROTECTION_FAILURE
:
447 sinfo64
.si_code
= SEGV_ACCERR
;
449 case KERN_INVALID_ADDRESS
:
450 sinfo64
.si_code
= SEGV_MAPERR
;
453 sinfo64
.si_code
= FPE_NOOP
;
458 int status_and_exitcode
;
461 * All other signals need to fill out a minimum set of
462 * information for the siginfo structure passed into
463 * the signal handler, if SA_SIGINFO was specified.
465 * p->si_status actually contains both the status and
466 * the exit code; we save it off in its own variable
467 * for later breakdown.
470 sinfo64
.si_pid
= p
->si_pid
;
472 status_and_exitcode
= p
->si_status
;
474 sinfo64
.si_uid
= p
->si_uid
;
476 sinfo64
.si_code
= p
->si_code
;
479 if (sinfo64
.si_code
== CLD_EXITED
) {
480 if (WIFEXITED(status_and_exitcode
))
481 sinfo64
.si_code
= CLD_EXITED
;
482 else if (WIFSIGNALED(status_and_exitcode
)) {
483 if (WCOREDUMP(status_and_exitcode
)) {
484 sinfo64
.si_code
= CLD_DUMPED
;
485 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
487 sinfo64
.si_code
= CLD_KILLED
;
488 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
493 * The recorded status contains the exit code and the
494 * signal information, but the information to be passed
495 * in the siginfo to the handler is supposed to only
496 * contain the status, so we have to shift it out.
498 sinfo64
.si_status
= WEXITSTATUS(status_and_exitcode
);
502 if (proc_is64bit(p
)) {
503 user64_siginfo_t sinfo64_user64
;
505 bzero((caddr_t
)&sinfo64_user64
, sizeof(sinfo64_user64
));
507 siginfo_user_to_user64(&sinfo64
,&sinfo64_user64
);
510 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
512 ut
->t_dtrace_siginfo
.si_signo
= sinfo64
.si_signo
;
513 ut
->t_dtrace_siginfo
.si_code
= sinfo64
.si_code
;
514 ut
->t_dtrace_siginfo
.si_pid
= sinfo64
.si_pid
;
515 ut
->t_dtrace_siginfo
.si_uid
= sinfo64
.si_uid
;
516 ut
->t_dtrace_siginfo
.si_status
= sinfo64
.si_status
;
517 /* XXX truncates faulting address to void * on K32 */
518 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo64
.si_addr
);
520 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
522 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
523 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
529 /* XXX truncates catcher address to uintptr_t */
530 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
531 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
532 #endif /* CONFIG_DTRACE */
534 if (copyout((caddr_t
)&sinfo64_user64
, ua_sip
, sizeof (sinfo64_user64
)))
537 flavor
= x86_THREAD_STATE64
;
538 state_count
= x86_THREAD_STATE64_COUNT
;
539 state
= (void *)&mctx
.mctx64
.ss
;
541 x86_thread_state32_t
*tstate32
;
542 user32_siginfo_t sinfo32
;
544 bzero((caddr_t
)&sinfo32
, sizeof(sinfo32
));
546 siginfo_user_to_user32(&sinfo64
,&sinfo32
);
549 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
551 ut
->t_dtrace_siginfo
.si_signo
= sinfo32
.si_signo
;
552 ut
->t_dtrace_siginfo
.si_code
= sinfo32
.si_code
;
553 ut
->t_dtrace_siginfo
.si_pid
= sinfo32
.si_pid
;
554 ut
->t_dtrace_siginfo
.si_uid
= sinfo32
.si_uid
;
555 ut
->t_dtrace_siginfo
.si_status
= sinfo32
.si_status
;
556 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo32
.si_addr
);
558 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
560 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
561 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
567 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
568 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
569 #endif /* CONFIG_DTRACE */
571 if (copyout((caddr_t
)&sinfo32
, ua_sip
, sizeof (sinfo32
)))
574 tstate32
= &mctx
.mctx32
.ss
;
576 tstate32
->eip
= CAST_DOWN_EXPLICIT(user32_addr_t
, trampact
);
577 tstate32
->esp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
579 tstate32
->eflags
= get_eflags_exportmask();
581 tstate32
->cs
= USER_CS
;
582 tstate32
->ss
= USER_DS
;
583 tstate32
->ds
= USER_DS
;
584 tstate32
->es
= USER_DS
;
585 tstate32
->fs
= NULL_SEG
;
586 tstate32
->gs
= USER_CTHREAD
;
588 flavor
= x86_THREAD_STATE32
;
589 state_count
= x86_THREAD_STATE32_COUNT
;
590 state
= (void *)tstate32
;
592 if (thread_setstatus(thread
, flavor
, (thread_state_t
)state
, state_count
) != KERN_SUCCESS
)
594 ml_fp_setvalid(FALSE
);
603 SIGACTION(p
, SIGILL
) = SIG_DFL
;
604 sig
= sigmask(SIGILL
);
605 p
->p_sigignore
&= ~sig
;
606 p
->p_sigcatch
&= ~sig
;
607 ut
->uu_sigmask
&= ~sig
;
608 /* sendsig is called with signal lock held */
610 psignal_locked(p
, SIGILL
);
616 * System call to cleanup state after a signal
617 * has been taken. Reset signal mask and
618 * stack state from context left by sendsig (above).
619 * Return to previous pc and psl as specified by
620 * context left by sendsig. Check carefully to
621 * make sure that the user has not modified the
622 * psl to gain improper priviledges or to cause
627 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
630 struct mcontext32 mctx32
;
631 struct mcontext64 mctx64
;
633 thread_t thread
= current_thread();
638 mach_msg_type_number_t ts_count
;
639 unsigned int ts_flavor
;
641 mach_msg_type_number_t fs_count
;
642 unsigned int fs_flavor
;
645 ut
= (struct uthread
*)get_bsdthread_info(thread
);
648 * If we are being asked to change the altstack flag on the thread, we
649 * just set/reset it and return (the uap->uctx is not used).
651 if ((unsigned int)uap
->infostyle
== UC_SET_ALT_STACK
) {
652 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
654 } else if ((unsigned int)uap
->infostyle
== UC_RESET_ALT_STACK
) {
655 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
659 if (proc_is64bit(p
)) {
660 struct user_ucontext64 uctx64
;
662 if ((error
= copyin(uap
->uctx
, (void *)&uctx64
, sizeof (uctx64
))))
665 if ((error
= copyin(uctx64
.uc_mcontext64
, (void *)&mctx
.mctx64
, sizeof (struct mcontext64
))))
668 onstack
= uctx64
.uc_onstack
& 01;
669 ut
->uu_sigmask
= uctx64
.uc_sigmask
& ~sigcantmask
;
671 ts_flavor
= x86_THREAD_STATE64
;
672 ts_count
= x86_THREAD_STATE64_COUNT
;
673 ts
= (void *)&mctx
.mctx64
.ss
;
675 fs_flavor
= x86_FLOAT_STATE64
;
676 fs_count
= x86_FLOAT_STATE64_COUNT
;
677 fs
= (void *)&mctx
.mctx64
.fs
;
680 struct user_ucontext32 uctx32
;
682 if ((error
= copyin(uap
->uctx
, (void *)&uctx32
, sizeof (uctx32
))))
685 if ((error
= copyin(CAST_USER_ADDR_T(uctx32
.uc_mcontext
), (void *)&mctx
.mctx32
, sizeof (struct mcontext32
))))
688 onstack
= uctx32
.uc_onstack
& 01;
689 ut
->uu_sigmask
= uctx32
.uc_sigmask
& ~sigcantmask
;
691 ts_flavor
= x86_THREAD_STATE32
;
692 ts_count
= x86_THREAD_STATE32_COUNT
;
693 ts
= (void *)&mctx
.mctx32
.ss
;
695 fs_flavor
= x86_FLOAT_STATE32
;
696 fs_count
= x86_FLOAT_STATE32_COUNT
;
697 fs
= (void *)&mctx
.mctx32
.fs
;
701 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
703 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
705 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
706 signal_setast(thread
);
709 * thread_set_state() does all the needed checks for the passed in
712 if (thread_setstatus(thread
, ts_flavor
, ts
, ts_count
) != KERN_SUCCESS
)
715 ml_fp_setvalid(TRUE
);
717 if (thread_setstatus(thread
, fs_flavor
, fs
, fs_count
) != KERN_SUCCESS
)
720 return (EJUSTRETURN
);
725 * machine_exception() performs MD translation
726 * of a mach exception to a unix signal and code.
732 mach_exception_code_t code
,
733 __unused mach_exception_subcode_t subcode
,
735 mach_exception_code_t
*unix_code
)
741 /* Map GP fault to SIGSEGV, otherwise defer to caller */
742 if (code
== EXC_I386_GPFLT
) {
743 *unix_signal
= SIGSEGV
;
749 case EXC_BAD_INSTRUCTION
:
750 *unix_signal
= SIGILL
;
755 *unix_signal
= SIGFPE
;
760 if (code
== EXC_I386_BOUND
) {
762 * Map #BR, the Bound Range Exceeded exception, to
765 *unix_signal
= SIGTRAP
;