2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1992 NeXT, Inc.
32 * 13 May 1992 ? at NeXT
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
39 #include <kern/thread.h>
41 #include <sys/systm.h>
42 #include <sys/param.h>
43 #include <sys/proc_internal.h>
45 #include <sys/sysproto.h>
46 #include <sys/sysent.h>
47 #include <sys/ucontext.h>
49 #include <mach/thread_act.h> /* for thread_abort_safely */
50 #include <mach/thread_status.h>
52 #include <i386/eflags.h>
54 #include <i386/machine_routines.h>
57 #include <machine/pal_routines.h>
59 #include <sys/kdebug.h>
64 extern boolean_t
machine_exception(int, mach_exception_code_t
,
65 mach_exception_subcode_t
, int *, mach_exception_subcode_t
*);
66 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
67 thread_state_t tstate
, mach_msg_type_number_t
*count
);
68 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
69 thread_state_t tstate
, mach_msg_type_number_t count
);
71 /* Signal handler flavors supported */
72 /* These defns should match the Libc implmn */
75 #define UC_SET_ALT_STACK 0x40000000
76 #define UC_RESET_ALT_STACK 0x80000000
78 #define C_32_STK_ALIGN 16
79 #define C_64_STK_ALIGN 16
80 #define C_64_REDZONE_LEN 128
81 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
82 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
85 * Send an interrupt to process.
87 * Stack is set up to allow sigcode stored
88 * in u. to call routine, followed by chmk
89 * to sigreturn routine below. After sigreturn
90 * resets the signal mask, the stack, the frame
91 * pointer, and the argument pointer, it returns
92 * to the user specified pc, psl.
96 user32_addr_t catcher
; /* sig_t */
99 user32_addr_t sinfo
; /* siginfo32_t* */
100 user32_addr_t uctx
; /* struct ucontext32 */
104 * NOTE: Source and target may *NOT* overlap!
107 siginfo_user_to_user32(user_siginfo_t
*in
, user32_siginfo_t
*out
)
109 out
->si_signo
= in
->si_signo
;
110 out
->si_errno
= in
->si_errno
;
111 out
->si_code
= in
->si_code
;
112 out
->si_pid
= in
->si_pid
;
113 out
->si_uid
= in
->si_uid
;
114 out
->si_status
= in
->si_status
;
115 out
->si_addr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_addr
);
116 /* following cast works for sival_int because of padding */
117 out
->si_value
.sival_ptr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_value
.sival_ptr
);
118 out
->si_band
= in
->si_band
; /* range reduction */
119 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
123 siginfo_user_to_user64(user_siginfo_t
*in
, user64_siginfo_t
*out
)
125 out
->si_signo
= in
->si_signo
;
126 out
->si_errno
= in
->si_errno
;
127 out
->si_code
= in
->si_code
;
128 out
->si_pid
= in
->si_pid
;
129 out
->si_uid
= in
->si_uid
;
130 out
->si_status
= in
->si_status
;
131 out
->si_addr
= in
->si_addr
;
132 out
->si_value
.sival_ptr
= in
->si_value
.sival_ptr
;
133 out
->si_band
= in
->si_band
; /* range reduction */
134 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
138 sendsig(struct proc
*p
, user_addr_t ua_catcher
, int sig
, int mask
, __unused
uint32_t code
)
141 struct mcontext_avx32 mctx_avx32
;
142 struct mcontext_avx64 mctx_avx64
;
143 } mctx_store
, *mctxp
= &mctx_store
;
149 user_addr_t ua_uctxp
;
150 user_addr_t ua_mctxp
;
151 user_siginfo_t sinfo64
;
153 struct sigacts
*ps
= p
->p_sigacts
;
154 int oonstack
, flavor
;
155 user_addr_t trampact
;
158 mach_msg_type_number_t state_count
;
163 int infostyle
= UC_TRAD
;
166 thread
= current_thread();
167 ut
= get_bsdthread_info(thread
);
169 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
))
170 infostyle
= UC_FLAVOR
;
172 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
173 trampact
= ps
->ps_trampact
[sig
];
174 sigonstack
= (ps
->ps_sigonstack
& sigmask(sig
));
181 bzero((caddr_t
)&sinfo64
, sizeof(sinfo64
));
182 sinfo64
.si_signo
= sig
;
184 bzero(mctxp
, sizeof(*mctxp
));
185 sig_avx
= ml_fpu_avx_enabled();
187 if (proc_is64bit(p
)) {
188 x86_thread_state64_t
*tstate64
;
189 struct user_ucontext64 uctx64
;
191 flavor
= x86_THREAD_STATE64
;
192 state_count
= x86_THREAD_STATE64_COUNT
;
193 state
= (void *)&mctxp
->mctx_avx64
.ss
;
194 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
198 flavor
= x86_AVX_STATE64
;
199 state_count
= x86_AVX_STATE64_COUNT
;
202 flavor
= x86_FLOAT_STATE64
;
203 state_count
= x86_FLOAT_STATE64_COUNT
;
205 state
= (void *)&mctxp
->mctx_avx64
.fs
;
206 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
209 flavor
= x86_EXCEPTION_STATE64
;
210 state_count
= x86_EXCEPTION_STATE64_COUNT
;
211 state
= (void *)&mctxp
->mctx_avx64
.es
;
212 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
215 tstate64
= &mctxp
->mctx_avx64
.ss
;
217 /* figure out where our new stack lives */
218 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
220 ua_sp
= ut
->uu_sigstk
.ss_sp
;
221 stack_size
= ut
->uu_sigstk
.ss_size
;
223 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
225 ua_sp
= tstate64
->rsp
;
227 ua_cr2
= mctxp
->mctx_avx64
.es
.faultvaddr
;
229 /* The x86_64 ABI defines a 128-byte red zone. */
230 ua_sp
-= C_64_REDZONE_LEN
;
232 ua_sp
-= sizeof (struct user_ucontext64
);
233 ua_uctxp
= ua_sp
; // someone tramples the first word!
235 ua_sp
-= sizeof (user64_siginfo_t
);
238 ua_sp
-= sizeof (struct mcontext_avx64
);
242 * Align the frame and stack pointers to 16 bytes for SSE.
243 * (Note that we use 'ua_fp' as the base of the stack going forward)
245 ua_fp
= TRUNC_DOWN64(ua_sp
, C_64_STK_ALIGN
);
248 * But we need to account for the return address so the alignment is
249 * truly "correct" at _sigtramp
251 ua_fp
-= sizeof(user_addr_t
);
254 * Build the signal context to be used by sigreturn.
256 bzero(&uctx64
, sizeof(uctx64
));
258 uctx64
.uc_onstack
= oonstack
;
259 uctx64
.uc_sigmask
= mask
;
260 uctx64
.uc_stack
.ss_sp
= ua_fp
;
261 uctx64
.uc_stack
.ss_size
= stack_size
;
264 uctx64
.uc_stack
.ss_flags
|= SS_ONSTACK
;
267 uctx64
.uc_mcsize
= sig_avx
? sizeof(struct mcontext_avx64
) : sizeof(struct mcontext64
);
268 uctx64
.uc_mcontext64
= ua_mctxp
;
270 if (copyout((caddr_t
)&uctx64
, ua_uctxp
, sizeof (uctx64
)))
273 if (copyout((caddr_t
)&mctxp
->mctx_avx64
, ua_mctxp
, sizeof (struct mcontext_avx64
)))
276 sinfo64
.pad
[0] = tstate64
->rsp
;
277 sinfo64
.si_addr
= tstate64
->rip
;
279 tstate64
->rip
= trampact
;
280 tstate64
->rsp
= ua_fp
;
281 tstate64
->rflags
= get_eflags_exportmask();
283 * JOE - might not need to set these
285 tstate64
->cs
= USER64_CS
;
286 tstate64
->fs
= NULL_SEG
;
287 tstate64
->gs
= USER_CTHREAD
;
290 * Build the argument list for the signal handler.
291 * Handler should call sigreturn to get out of it
293 tstate64
->rdi
= ua_catcher
;
294 tstate64
->rsi
= infostyle
;
296 tstate64
->rcx
= ua_sip
;
297 tstate64
->r8
= ua_uctxp
;
300 x86_thread_state32_t
*tstate32
;
301 struct user_ucontext32 uctx32
;
302 struct sigframe32 frame32
;
304 flavor
= x86_THREAD_STATE32
;
305 state_count
= x86_THREAD_STATE32_COUNT
;
306 state
= (void *)&mctxp
->mctx_avx32
.ss
;
307 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
311 flavor
= x86_AVX_STATE32
;
312 state_count
= x86_AVX_STATE32_COUNT
;
315 flavor
= x86_FLOAT_STATE32
;
316 state_count
= x86_FLOAT_STATE32_COUNT
;
319 state
= (void *)&mctxp
->mctx_avx32
.fs
;
320 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
323 flavor
= x86_EXCEPTION_STATE32
;
324 state_count
= x86_EXCEPTION_STATE32_COUNT
;
325 state
= (void *)&mctxp
->mctx_avx32
.es
;
326 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
329 tstate32
= &mctxp
->mctx_avx32
.ss
;
331 /* figure out where our new stack lives */
332 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
334 ua_sp
= ut
->uu_sigstk
.ss_sp
;
335 stack_size
= ut
->uu_sigstk
.ss_size
;
337 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
339 ua_sp
= tstate32
->esp
;
341 ua_cr2
= mctxp
->mctx_avx32
.es
.faultvaddr
;
343 ua_sp
-= sizeof (struct user_ucontext32
);
344 ua_uctxp
= ua_sp
; // someone tramples the first word!
346 ua_sp
-= sizeof (user32_siginfo_t
);
349 ua_sp
-= sizeof (struct mcontext_avx32
);
352 ua_sp
-= sizeof (struct sigframe32
);
356 * Align the frame and stack pointers to 16 bytes for SSE.
357 * (Note that we use 'fp' as the base of the stack going forward)
359 ua_fp
= TRUNC_DOWN32(ua_fp
, C_32_STK_ALIGN
);
362 * But we need to account for the return address so the alignment is
363 * truly "correct" at _sigtramp
365 ua_fp
-= sizeof(frame32
.retaddr
);
368 * Build the argument list for the signal handler.
369 * Handler should call sigreturn to get out of it
371 frame32
.retaddr
= -1;
372 frame32
.sigstyle
= infostyle
;
374 frame32
.catcher
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_catcher
);
375 frame32
.sinfo
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_sip
);
376 frame32
.uctx
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_uctxp
);
378 if (copyout((caddr_t
)&frame32
, ua_fp
, sizeof (frame32
)))
382 * Build the signal context to be used by sigreturn.
384 bzero(&uctx32
, sizeof(uctx32
));
386 uctx32
.uc_onstack
= oonstack
;
387 uctx32
.uc_sigmask
= mask
;
388 uctx32
.uc_stack
.ss_sp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
389 uctx32
.uc_stack
.ss_size
= stack_size
;
392 uctx32
.uc_stack
.ss_flags
|= SS_ONSTACK
;
395 uctx32
.uc_mcsize
= sig_avx
? sizeof(struct mcontext_avx32
) : sizeof(struct mcontext32
);
397 uctx32
.uc_mcontext
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_mctxp
);
399 if (copyout((caddr_t
)&uctx32
, ua_uctxp
, sizeof (uctx32
)))
402 if (copyout((caddr_t
)&mctxp
->mctx_avx32
, ua_mctxp
, sizeof (struct mcontext_avx32
)))
405 sinfo64
.pad
[0] = tstate32
->esp
;
406 sinfo64
.si_addr
= tstate32
->eip
;
411 switch (ut
->uu_code
) {
413 sinfo64
.si_code
= ILL_ILLOPC
;
416 sinfo64
.si_code
= ILL_NOOP
;
420 #define FP_IE 0 /* Invalid operation */
421 #define FP_DE 1 /* Denormalized operand */
422 #define FP_ZE 2 /* Zero divide */
423 #define FP_OE 3 /* overflow */
424 #define FP_UE 4 /* underflow */
425 #define FP_PE 5 /* precision */
426 if (ut
->uu_code
== EXC_I386_DIV
) {
427 sinfo64
.si_code
= FPE_INTDIV
;
429 else if (ut
->uu_code
== EXC_I386_INTO
) {
430 sinfo64
.si_code
= FPE_INTOVF
;
432 else if (ut
->uu_subcode
& (1 << FP_ZE
)) {
433 sinfo64
.si_code
= FPE_FLTDIV
;
434 } else if (ut
->uu_subcode
& (1 << FP_OE
)) {
435 sinfo64
.si_code
= FPE_FLTOVF
;
436 } else if (ut
->uu_subcode
& (1 << FP_UE
)) {
437 sinfo64
.si_code
= FPE_FLTUND
;
438 } else if (ut
->uu_subcode
& (1 << FP_PE
)) {
439 sinfo64
.si_code
= FPE_FLTRES
;
440 } else if (ut
->uu_subcode
& (1 << FP_IE
)) {
441 sinfo64
.si_code
= FPE_FLTINV
;
443 sinfo64
.si_code
= FPE_NOOP
;
447 sinfo64
.si_code
= BUS_ADRERR
;
448 sinfo64
.si_addr
= ua_cr2
;
451 sinfo64
.si_code
= TRAP_BRKPT
;
454 sinfo64
.si_addr
= ua_cr2
;
456 switch (ut
->uu_code
) {
458 /* CR2 is meaningless after GP fault */
459 /* XXX namespace clash! */
460 sinfo64
.si_addr
= 0ULL;
463 case KERN_PROTECTION_FAILURE
:
464 sinfo64
.si_code
= SEGV_ACCERR
;
466 case KERN_INVALID_ADDRESS
:
467 sinfo64
.si_code
= SEGV_MAPERR
;
470 sinfo64
.si_code
= FPE_NOOP
;
475 int status_and_exitcode
;
478 * All other signals need to fill out a minimum set of
479 * information for the siginfo structure passed into
480 * the signal handler, if SA_SIGINFO was specified.
482 * p->si_status actually contains both the status and
483 * the exit code; we save it off in its own variable
484 * for later breakdown.
487 sinfo64
.si_pid
= p
->si_pid
;
489 status_and_exitcode
= p
->si_status
;
491 sinfo64
.si_uid
= p
->si_uid
;
493 sinfo64
.si_code
= p
->si_code
;
496 if (sinfo64
.si_code
== CLD_EXITED
) {
497 if (WIFEXITED(status_and_exitcode
))
498 sinfo64
.si_code
= CLD_EXITED
;
499 else if (WIFSIGNALED(status_and_exitcode
)) {
500 if (WCOREDUMP(status_and_exitcode
)) {
501 sinfo64
.si_code
= CLD_DUMPED
;
502 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
504 sinfo64
.si_code
= CLD_KILLED
;
505 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
510 * The recorded status contains the exit code and the
511 * signal information, but the information to be passed
512 * in the siginfo to the handler is supposed to only
513 * contain the status, so we have to shift it out.
515 sinfo64
.si_status
= WEXITSTATUS(status_and_exitcode
);
519 if (proc_is64bit(p
)) {
520 user64_siginfo_t sinfo64_user64
;
522 bzero((caddr_t
)&sinfo64_user64
, sizeof(sinfo64_user64
));
524 siginfo_user_to_user64(&sinfo64
,&sinfo64_user64
);
527 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
529 ut
->t_dtrace_siginfo
.si_signo
= sinfo64
.si_signo
;
530 ut
->t_dtrace_siginfo
.si_code
= sinfo64
.si_code
;
531 ut
->t_dtrace_siginfo
.si_pid
= sinfo64
.si_pid
;
532 ut
->t_dtrace_siginfo
.si_uid
= sinfo64
.si_uid
;
533 ut
->t_dtrace_siginfo
.si_status
= sinfo64
.si_status
;
534 /* XXX truncates faulting address to void * on K32 */
535 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo64
.si_addr
);
537 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
539 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
540 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
546 /* XXX truncates catcher address to uintptr_t */
547 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
548 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
549 #endif /* CONFIG_DTRACE */
551 if (copyout((caddr_t
)&sinfo64_user64
, ua_sip
, sizeof (sinfo64_user64
)))
554 flavor
= x86_THREAD_STATE64
;
555 state_count
= x86_THREAD_STATE64_COUNT
;
556 state
= (void *)&mctxp
->mctx_avx64
.ss
;
558 x86_thread_state32_t
*tstate32
;
559 user32_siginfo_t sinfo32
;
561 bzero((caddr_t
)&sinfo32
, sizeof(sinfo32
));
563 siginfo_user_to_user32(&sinfo64
,&sinfo32
);
566 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
568 ut
->t_dtrace_siginfo
.si_signo
= sinfo32
.si_signo
;
569 ut
->t_dtrace_siginfo
.si_code
= sinfo32
.si_code
;
570 ut
->t_dtrace_siginfo
.si_pid
= sinfo32
.si_pid
;
571 ut
->t_dtrace_siginfo
.si_uid
= sinfo32
.si_uid
;
572 ut
->t_dtrace_siginfo
.si_status
= sinfo32
.si_status
;
573 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo32
.si_addr
);
575 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
577 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
578 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
584 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
585 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
586 #endif /* CONFIG_DTRACE */
588 if (copyout((caddr_t
)&sinfo32
, ua_sip
, sizeof (sinfo32
)))
591 tstate32
= &mctxp
->mctx_avx32
.ss
;
593 tstate32
->eip
= CAST_DOWN_EXPLICIT(user32_addr_t
, trampact
);
594 tstate32
->esp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
596 tstate32
->eflags
= get_eflags_exportmask();
598 tstate32
->cs
= USER_CS
;
599 tstate32
->ss
= USER_DS
;
600 tstate32
->ds
= USER_DS
;
601 tstate32
->es
= USER_DS
;
602 tstate32
->fs
= NULL_SEG
;
603 tstate32
->gs
= USER_CTHREAD
;
605 flavor
= x86_THREAD_STATE32
;
606 state_count
= x86_THREAD_STATE32_COUNT
;
607 state
= (void *)tstate32
;
609 if (thread_setstatus(thread
, flavor
, (thread_state_t
)state
, state_count
) != KERN_SUCCESS
)
611 ml_fp_setvalid(FALSE
);
613 /* Tell the PAL layer about the signal */
614 pal_set_signal_delivery( thread
);
623 SIGACTION(p
, SIGILL
) = SIG_DFL
;
624 sig
= sigmask(SIGILL
);
625 p
->p_sigignore
&= ~sig
;
626 p
->p_sigcatch
&= ~sig
;
627 ut
->uu_sigmask
&= ~sig
;
628 /* sendsig is called with signal lock held */
630 psignal_locked(p
, SIGILL
);
636 * System call to cleanup state after a signal
637 * has been taken. Reset signal mask and
638 * stack state from context left by sendsig (above).
639 * Return to previous pc and psl as specified by
640 * context left by sendsig. Check carefully to
641 * make sure that the user has not modified the
642 * psl to gain improper priviledges or to cause
647 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
650 struct mcontext_avx32 mctx_avx32
;
651 struct mcontext_avx64 mctx_avx64
;
652 } mctx_store
, *mctxp
= &mctx_store
;
654 thread_t thread
= current_thread();
659 mach_msg_type_number_t ts_count
;
660 unsigned int ts_flavor
;
662 mach_msg_type_number_t fs_count
;
663 unsigned int fs_flavor
;
665 int rval
= EJUSTRETURN
;
668 ut
= (struct uthread
*)get_bsdthread_info(thread
);
671 * If we are being asked to change the altstack flag on the thread, we
672 * just set/reset it and return (the uap->uctx is not used).
674 if ((unsigned int)uap
->infostyle
== UC_SET_ALT_STACK
) {
675 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
677 } else if ((unsigned int)uap
->infostyle
== UC_RESET_ALT_STACK
) {
678 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
682 bzero(mctxp
, sizeof(*mctxp
));
683 sig_avx
= ml_fpu_avx_enabled();
685 if (proc_is64bit(p
)) {
686 struct user_ucontext64 uctx64
;
688 if ((error
= copyin(uap
->uctx
, (void *)&uctx64
, sizeof (uctx64
))))
691 if ((error
= copyin(uctx64
.uc_mcontext64
, (void *)&mctxp
->mctx_avx64
, sizeof (struct mcontext_avx64
))))
694 onstack
= uctx64
.uc_onstack
& 01;
695 ut
->uu_sigmask
= uctx64
.uc_sigmask
& ~sigcantmask
;
697 ts_flavor
= x86_THREAD_STATE64
;
698 ts_count
= x86_THREAD_STATE64_COUNT
;
699 ts
= (void *)&mctxp
->mctx_avx64
.ss
;
702 fs_flavor
= x86_AVX_STATE64
;
703 fs_count
= x86_AVX_STATE64_COUNT
;
706 fs_flavor
= x86_FLOAT_STATE64
;
707 fs_count
= x86_FLOAT_STATE64_COUNT
;
710 fs
= (void *)&mctxp
->mctx_avx64
.fs
;
713 struct user_ucontext32 uctx32
;
715 if ((error
= copyin(uap
->uctx
, (void *)&uctx32
, sizeof (uctx32
))))
718 if ((error
= copyin(CAST_USER_ADDR_T(uctx32
.uc_mcontext
), (void *)&mctxp
->mctx_avx32
, sizeof (struct mcontext_avx32
))))
721 onstack
= uctx32
.uc_onstack
& 01;
722 ut
->uu_sigmask
= uctx32
.uc_sigmask
& ~sigcantmask
;
724 ts_flavor
= x86_THREAD_STATE32
;
725 ts_count
= x86_THREAD_STATE32_COUNT
;
726 ts
= (void *)&mctxp
->mctx_avx32
.ss
;
729 fs_flavor
= x86_AVX_STATE32
;
730 fs_count
= x86_AVX_STATE32_COUNT
;
733 fs_flavor
= x86_FLOAT_STATE32
;
734 fs_count
= x86_FLOAT_STATE32_COUNT
;
737 fs
= (void *)&mctxp
->mctx_avx32
.fs
;
741 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
743 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
745 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
746 signal_setast(thread
);
748 * thread_set_state() does all the needed checks for the passed in
751 if (thread_setstatus(thread
, ts_flavor
, ts
, ts_count
) != KERN_SUCCESS
) {
756 ml_fp_setvalid(TRUE
);
758 if (thread_setstatus(thread
, fs_flavor
, fs
, fs_count
) != KERN_SUCCESS
) {
769 * machine_exception() performs MD translation
770 * of a mach exception to a unix signal and code.
776 mach_exception_code_t code
,
777 __unused mach_exception_subcode_t subcode
,
779 mach_exception_code_t
*unix_code
)
785 /* Map GP fault to SIGSEGV, otherwise defer to caller */
786 if (code
== EXC_I386_GPFLT
) {
787 *unix_signal
= SIGSEGV
;
793 case EXC_BAD_INSTRUCTION
:
794 *unix_signal
= SIGILL
;
799 *unix_signal
= SIGFPE
;
804 if (code
== EXC_I386_BOUND
) {
806 * Map #BR, the Bound Range Exceeded exception, to
809 *unix_signal
= SIGTRAP
;