2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1992 NeXT, Inc.
32 * 13 May 1992 ? at NeXT
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
39 #include <kern/thread.h>
41 #include <sys/systm.h>
42 #include <sys/param.h>
43 #include <sys/proc_internal.h>
45 #include <sys/sysproto.h>
46 #include <sys/sysent.h>
47 #include <sys/ucontext.h>
49 #include <mach/thread_act.h> /* for thread_abort_safely */
50 #include <mach/thread_status.h>
52 #include <i386/eflags.h>
54 #include <i386/machine_routines.h>
57 #include <machine/pal_routines.h>
59 #include <sys/kdebug.h>
64 extern boolean_t
machine_exception(int, mach_exception_code_t
,
65 mach_exception_subcode_t
, int *, mach_exception_subcode_t
*);
66 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
67 thread_state_t tstate
, mach_msg_type_number_t
*count
);
68 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
69 thread_state_t tstate
, mach_msg_type_number_t count
);
71 /* Signal handler flavors supported */
72 /* These defns should match the Libc implmn */
75 #define UC_SET_ALT_STACK 0x40000000
76 #define UC_RESET_ALT_STACK 0x80000000
78 #define C_32_STK_ALIGN 16
79 #define C_64_STK_ALIGN 16
80 #define C_64_REDZONE_LEN 128
81 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
82 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
85 * Send an interrupt to process.
87 * Stack is set up to allow sigcode stored
88 * in u. to call routine, followed by chmk
89 * to sigreturn routine below. After sigreturn
90 * resets the signal mask, the stack, the frame
91 * pointer, and the argument pointer, it returns
92 * to the user specified pc, psl.
96 user32_addr_t catcher
; /* sig_t */
99 user32_addr_t sinfo
; /* siginfo32_t* */
100 user32_addr_t uctx
; /* struct ucontext32 */
104 * NOTE: Source and target may *NOT* overlap!
105 * XXX: Unify with bsd/kern/kern_exit.c
108 siginfo_user_to_user32_x86(user_siginfo_t
*in
, user32_siginfo_t
*out
)
110 out
->si_signo
= in
->si_signo
;
111 out
->si_errno
= in
->si_errno
;
112 out
->si_code
= in
->si_code
;
113 out
->si_pid
= in
->si_pid
;
114 out
->si_uid
= in
->si_uid
;
115 out
->si_status
= in
->si_status
;
116 out
->si_addr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_addr
);
117 /* following cast works for sival_int because of padding */
118 out
->si_value
.sival_ptr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_value
.sival_ptr
);
119 out
->si_band
= in
->si_band
; /* range reduction */
120 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
124 siginfo_user_to_user64_x86(user_siginfo_t
*in
, user64_siginfo_t
*out
)
126 out
->si_signo
= in
->si_signo
;
127 out
->si_errno
= in
->si_errno
;
128 out
->si_code
= in
->si_code
;
129 out
->si_pid
= in
->si_pid
;
130 out
->si_uid
= in
->si_uid
;
131 out
->si_status
= in
->si_status
;
132 out
->si_addr
= in
->si_addr
;
133 out
->si_value
.sival_ptr
= in
->si_value
.sival_ptr
;
134 out
->si_band
= in
->si_band
; /* range reduction */
135 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
139 sendsig(struct proc
*p
, user_addr_t ua_catcher
, int sig
, int mask
, __unused
uint32_t code
)
142 struct mcontext_avx32 mctx_avx32
;
143 struct mcontext_avx64 mctx_avx64
;
144 } mctx_store
, *mctxp
= &mctx_store
;
150 user_addr_t ua_uctxp
;
151 user_addr_t ua_mctxp
;
152 user_siginfo_t sinfo64
;
154 struct sigacts
*ps
= p
->p_sigacts
;
155 int oonstack
, flavor
;
156 user_addr_t trampact
;
159 mach_msg_type_number_t state_count
;
164 int infostyle
= UC_TRAD
;
167 thread
= current_thread();
168 ut
= get_bsdthread_info(thread
);
170 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
))
171 infostyle
= UC_FLAVOR
;
173 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
174 trampact
= ps
->ps_trampact
[sig
];
175 sigonstack
= (ps
->ps_sigonstack
& sigmask(sig
));
182 bzero((caddr_t
)&sinfo64
, sizeof(sinfo64
));
183 sinfo64
.si_signo
= sig
;
185 bzero(mctxp
, sizeof(*mctxp
));
186 sig_avx
= ml_fpu_avx_enabled();
188 if (proc_is64bit(p
)) {
189 x86_thread_state64_t
*tstate64
;
190 struct user_ucontext64 uctx64
;
192 flavor
= x86_THREAD_STATE64
;
193 state_count
= x86_THREAD_STATE64_COUNT
;
194 state
= (void *)&mctxp
->mctx_avx64
.ss
;
195 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
199 flavor
= x86_AVX_STATE64
;
200 state_count
= x86_AVX_STATE64_COUNT
;
203 flavor
= x86_FLOAT_STATE64
;
204 state_count
= x86_FLOAT_STATE64_COUNT
;
206 state
= (void *)&mctxp
->mctx_avx64
.fs
;
207 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
210 flavor
= x86_EXCEPTION_STATE64
;
211 state_count
= x86_EXCEPTION_STATE64_COUNT
;
212 state
= (void *)&mctxp
->mctx_avx64
.es
;
213 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
216 tstate64
= &mctxp
->mctx_avx64
.ss
;
218 /* figure out where our new stack lives */
219 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
221 ua_sp
= ut
->uu_sigstk
.ss_sp
;
222 stack_size
= ut
->uu_sigstk
.ss_size
;
224 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
226 ua_sp
= tstate64
->rsp
;
228 ua_cr2
= mctxp
->mctx_avx64
.es
.faultvaddr
;
230 /* The x86_64 ABI defines a 128-byte red zone. */
231 ua_sp
-= C_64_REDZONE_LEN
;
233 ua_sp
-= sizeof (struct user_ucontext64
);
234 ua_uctxp
= ua_sp
; // someone tramples the first word!
236 ua_sp
-= sizeof (user64_siginfo_t
);
239 ua_sp
-= sizeof (struct mcontext_avx64
);
243 * Align the frame and stack pointers to 16 bytes for SSE.
244 * (Note that we use 'ua_fp' as the base of the stack going forward)
246 ua_fp
= TRUNC_DOWN64(ua_sp
, C_64_STK_ALIGN
);
249 * But we need to account for the return address so the alignment is
250 * truly "correct" at _sigtramp
252 ua_fp
-= sizeof(user_addr_t
);
255 * Build the signal context to be used by sigreturn.
257 bzero(&uctx64
, sizeof(uctx64
));
259 uctx64
.uc_onstack
= oonstack
;
260 uctx64
.uc_sigmask
= mask
;
261 uctx64
.uc_stack
.ss_sp
= ua_fp
;
262 uctx64
.uc_stack
.ss_size
= stack_size
;
265 uctx64
.uc_stack
.ss_flags
|= SS_ONSTACK
;
268 uctx64
.uc_mcsize
= sig_avx
? sizeof(struct mcontext_avx64
) : sizeof(struct mcontext64
);
269 uctx64
.uc_mcontext64
= ua_mctxp
;
271 if (copyout((caddr_t
)&uctx64
, ua_uctxp
, sizeof (uctx64
)))
274 if (copyout((caddr_t
)&mctxp
->mctx_avx64
, ua_mctxp
, sizeof (struct mcontext_avx64
)))
277 sinfo64
.pad
[0] = tstate64
->rsp
;
278 sinfo64
.si_addr
= tstate64
->rip
;
280 tstate64
->rip
= trampact
;
281 tstate64
->rsp
= ua_fp
;
282 tstate64
->rflags
= get_eflags_exportmask();
284 * JOE - might not need to set these
286 tstate64
->cs
= USER64_CS
;
287 tstate64
->fs
= NULL_SEG
;
288 tstate64
->gs
= USER_CTHREAD
;
291 * Build the argument list for the signal handler.
292 * Handler should call sigreturn to get out of it
294 tstate64
->rdi
= ua_catcher
;
295 tstate64
->rsi
= infostyle
;
297 tstate64
->rcx
= ua_sip
;
298 tstate64
->r8
= ua_uctxp
;
301 x86_thread_state32_t
*tstate32
;
302 struct user_ucontext32 uctx32
;
303 struct sigframe32 frame32
;
305 flavor
= x86_THREAD_STATE32
;
306 state_count
= x86_THREAD_STATE32_COUNT
;
307 state
= (void *)&mctxp
->mctx_avx32
.ss
;
308 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
312 flavor
= x86_AVX_STATE32
;
313 state_count
= x86_AVX_STATE32_COUNT
;
316 flavor
= x86_FLOAT_STATE32
;
317 state_count
= x86_FLOAT_STATE32_COUNT
;
320 state
= (void *)&mctxp
->mctx_avx32
.fs
;
321 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
324 flavor
= x86_EXCEPTION_STATE32
;
325 state_count
= x86_EXCEPTION_STATE32_COUNT
;
326 state
= (void *)&mctxp
->mctx_avx32
.es
;
327 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
330 tstate32
= &mctxp
->mctx_avx32
.ss
;
332 /* figure out where our new stack lives */
333 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
335 ua_sp
= ut
->uu_sigstk
.ss_sp
;
336 stack_size
= ut
->uu_sigstk
.ss_size
;
338 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
340 ua_sp
= tstate32
->esp
;
342 ua_cr2
= mctxp
->mctx_avx32
.es
.faultvaddr
;
344 ua_sp
-= sizeof (struct user_ucontext32
);
345 ua_uctxp
= ua_sp
; // someone tramples the first word!
347 ua_sp
-= sizeof (user32_siginfo_t
);
350 ua_sp
-= sizeof (struct mcontext_avx32
);
353 ua_sp
-= sizeof (struct sigframe32
);
357 * Align the frame and stack pointers to 16 bytes for SSE.
358 * (Note that we use 'fp' as the base of the stack going forward)
360 ua_fp
= TRUNC_DOWN32(ua_fp
, C_32_STK_ALIGN
);
363 * But we need to account for the return address so the alignment is
364 * truly "correct" at _sigtramp
366 ua_fp
-= sizeof(frame32
.retaddr
);
369 * Build the argument list for the signal handler.
370 * Handler should call sigreturn to get out of it
372 frame32
.retaddr
= -1;
373 frame32
.sigstyle
= infostyle
;
375 frame32
.catcher
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_catcher
);
376 frame32
.sinfo
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_sip
);
377 frame32
.uctx
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_uctxp
);
379 if (copyout((caddr_t
)&frame32
, ua_fp
, sizeof (frame32
)))
383 * Build the signal context to be used by sigreturn.
385 bzero(&uctx32
, sizeof(uctx32
));
387 uctx32
.uc_onstack
= oonstack
;
388 uctx32
.uc_sigmask
= mask
;
389 uctx32
.uc_stack
.ss_sp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
390 uctx32
.uc_stack
.ss_size
= stack_size
;
393 uctx32
.uc_stack
.ss_flags
|= SS_ONSTACK
;
396 uctx32
.uc_mcsize
= sig_avx
? sizeof(struct mcontext_avx32
) : sizeof(struct mcontext32
);
398 uctx32
.uc_mcontext
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_mctxp
);
400 if (copyout((caddr_t
)&uctx32
, ua_uctxp
, sizeof (uctx32
)))
403 if (copyout((caddr_t
)&mctxp
->mctx_avx32
, ua_mctxp
, sizeof (struct mcontext_avx32
)))
406 sinfo64
.pad
[0] = tstate32
->esp
;
407 sinfo64
.si_addr
= tstate32
->eip
;
412 switch (ut
->uu_code
) {
414 sinfo64
.si_code
= ILL_ILLOPC
;
417 sinfo64
.si_code
= ILL_NOOP
;
421 #define FP_IE 0 /* Invalid operation */
422 #define FP_DE 1 /* Denormalized operand */
423 #define FP_ZE 2 /* Zero divide */
424 #define FP_OE 3 /* overflow */
425 #define FP_UE 4 /* underflow */
426 #define FP_PE 5 /* precision */
427 if (ut
->uu_code
== EXC_I386_DIV
) {
428 sinfo64
.si_code
= FPE_INTDIV
;
430 else if (ut
->uu_code
== EXC_I386_INTO
) {
431 sinfo64
.si_code
= FPE_INTOVF
;
433 else if (ut
->uu_subcode
& (1 << FP_ZE
)) {
434 sinfo64
.si_code
= FPE_FLTDIV
;
435 } else if (ut
->uu_subcode
& (1 << FP_OE
)) {
436 sinfo64
.si_code
= FPE_FLTOVF
;
437 } else if (ut
->uu_subcode
& (1 << FP_UE
)) {
438 sinfo64
.si_code
= FPE_FLTUND
;
439 } else if (ut
->uu_subcode
& (1 << FP_PE
)) {
440 sinfo64
.si_code
= FPE_FLTRES
;
441 } else if (ut
->uu_subcode
& (1 << FP_IE
)) {
442 sinfo64
.si_code
= FPE_FLTINV
;
444 sinfo64
.si_code
= FPE_NOOP
;
448 sinfo64
.si_code
= BUS_ADRERR
;
449 sinfo64
.si_addr
= ua_cr2
;
452 sinfo64
.si_code
= TRAP_BRKPT
;
455 sinfo64
.si_addr
= ua_cr2
;
457 switch (ut
->uu_code
) {
459 /* CR2 is meaningless after GP fault */
460 /* XXX namespace clash! */
461 sinfo64
.si_addr
= 0ULL;
464 case KERN_PROTECTION_FAILURE
:
465 sinfo64
.si_code
= SEGV_ACCERR
;
467 case KERN_INVALID_ADDRESS
:
468 sinfo64
.si_code
= SEGV_MAPERR
;
471 sinfo64
.si_code
= FPE_NOOP
;
476 int status_and_exitcode
;
479 * All other signals need to fill out a minimum set of
480 * information for the siginfo structure passed into
481 * the signal handler, if SA_SIGINFO was specified.
483 * p->si_status actually contains both the status and
484 * the exit code; we save it off in its own variable
485 * for later breakdown.
488 sinfo64
.si_pid
= p
->si_pid
;
490 status_and_exitcode
= p
->si_status
;
492 sinfo64
.si_uid
= p
->si_uid
;
494 sinfo64
.si_code
= p
->si_code
;
497 if (sinfo64
.si_code
== CLD_EXITED
) {
498 if (WIFEXITED(status_and_exitcode
))
499 sinfo64
.si_code
= CLD_EXITED
;
500 else if (WIFSIGNALED(status_and_exitcode
)) {
501 if (WCOREDUMP(status_and_exitcode
)) {
502 sinfo64
.si_code
= CLD_DUMPED
;
503 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
505 sinfo64
.si_code
= CLD_KILLED
;
506 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
511 * The recorded status contains the exit code and the
512 * signal information, but the information to be passed
513 * in the siginfo to the handler is supposed to only
514 * contain the status, so we have to shift it out.
516 sinfo64
.si_status
= WEXITSTATUS(status_and_exitcode
);
520 if (proc_is64bit(p
)) {
521 user64_siginfo_t sinfo64_user64
;
523 bzero((caddr_t
)&sinfo64_user64
, sizeof(sinfo64_user64
));
525 siginfo_user_to_user64_x86(&sinfo64
,&sinfo64_user64
);
528 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
530 ut
->t_dtrace_siginfo
.si_signo
= sinfo64
.si_signo
;
531 ut
->t_dtrace_siginfo
.si_code
= sinfo64
.si_code
;
532 ut
->t_dtrace_siginfo
.si_pid
= sinfo64
.si_pid
;
533 ut
->t_dtrace_siginfo
.si_uid
= sinfo64
.si_uid
;
534 ut
->t_dtrace_siginfo
.si_status
= sinfo64
.si_status
;
535 /* XXX truncates faulting address to void * on K32 */
536 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo64
.si_addr
);
538 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
540 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
541 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
547 /* XXX truncates catcher address to uintptr_t */
548 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
549 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
550 #endif /* CONFIG_DTRACE */
552 if (copyout((caddr_t
)&sinfo64_user64
, ua_sip
, sizeof (sinfo64_user64
)))
555 flavor
= x86_THREAD_STATE64
;
556 state_count
= x86_THREAD_STATE64_COUNT
;
557 state
= (void *)&mctxp
->mctx_avx64
.ss
;
559 x86_thread_state32_t
*tstate32
;
560 user32_siginfo_t sinfo32
;
562 bzero((caddr_t
)&sinfo32
, sizeof(sinfo32
));
564 siginfo_user_to_user32_x86(&sinfo64
,&sinfo32
);
567 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
569 ut
->t_dtrace_siginfo
.si_signo
= sinfo32
.si_signo
;
570 ut
->t_dtrace_siginfo
.si_code
= sinfo32
.si_code
;
571 ut
->t_dtrace_siginfo
.si_pid
= sinfo32
.si_pid
;
572 ut
->t_dtrace_siginfo
.si_uid
= sinfo32
.si_uid
;
573 ut
->t_dtrace_siginfo
.si_status
= sinfo32
.si_status
;
574 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo32
.si_addr
);
576 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
578 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
579 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
585 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
586 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
587 #endif /* CONFIG_DTRACE */
589 if (copyout((caddr_t
)&sinfo32
, ua_sip
, sizeof (sinfo32
)))
592 tstate32
= &mctxp
->mctx_avx32
.ss
;
594 tstate32
->eip
= CAST_DOWN_EXPLICIT(user32_addr_t
, trampact
);
595 tstate32
->esp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
597 tstate32
->eflags
= get_eflags_exportmask();
599 tstate32
->cs
= USER_CS
;
600 tstate32
->ss
= USER_DS
;
601 tstate32
->ds
= USER_DS
;
602 tstate32
->es
= USER_DS
;
603 tstate32
->fs
= NULL_SEG
;
604 tstate32
->gs
= USER_CTHREAD
;
606 flavor
= x86_THREAD_STATE32
;
607 state_count
= x86_THREAD_STATE32_COUNT
;
608 state
= (void *)tstate32
;
610 if (thread_setstatus(thread
, flavor
, (thread_state_t
)state
, state_count
) != KERN_SUCCESS
)
612 ml_fp_setvalid(FALSE
);
614 /* Tell the PAL layer about the signal */
615 pal_set_signal_delivery( thread
);
624 SIGACTION(p
, SIGILL
) = SIG_DFL
;
625 sig
= sigmask(SIGILL
);
626 p
->p_sigignore
&= ~sig
;
627 p
->p_sigcatch
&= ~sig
;
628 ut
->uu_sigmask
&= ~sig
;
629 /* sendsig is called with signal lock held */
631 psignal_locked(p
, SIGILL
);
637 * System call to cleanup state after a signal
638 * has been taken. Reset signal mask and
639 * stack state from context left by sendsig (above).
640 * Return to previous pc and psl as specified by
641 * context left by sendsig. Check carefully to
642 * make sure that the user has not modified the
643 * psl to gain improper priviledges or to cause
648 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
651 struct mcontext_avx32 mctx_avx32
;
652 struct mcontext_avx64 mctx_avx64
;
653 } mctx_store
, *mctxp
= &mctx_store
;
655 thread_t thread
= current_thread();
660 mach_msg_type_number_t ts_count
;
661 unsigned int ts_flavor
;
663 mach_msg_type_number_t fs_count
;
664 unsigned int fs_flavor
;
666 int rval
= EJUSTRETURN
;
669 ut
= (struct uthread
*)get_bsdthread_info(thread
);
672 * If we are being asked to change the altstack flag on the thread, we
673 * just set/reset it and return (the uap->uctx is not used).
675 if ((unsigned int)uap
->infostyle
== UC_SET_ALT_STACK
) {
676 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
678 } else if ((unsigned int)uap
->infostyle
== UC_RESET_ALT_STACK
) {
679 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
683 bzero(mctxp
, sizeof(*mctxp
));
684 sig_avx
= ml_fpu_avx_enabled();
686 if (proc_is64bit(p
)) {
687 struct user_ucontext64 uctx64
;
689 if ((error
= copyin(uap
->uctx
, (void *)&uctx64
, sizeof (uctx64
))))
692 if ((error
= copyin(uctx64
.uc_mcontext64
, (void *)&mctxp
->mctx_avx64
, sizeof (struct mcontext_avx64
))))
695 onstack
= uctx64
.uc_onstack
& 01;
696 ut
->uu_sigmask
= uctx64
.uc_sigmask
& ~sigcantmask
;
698 ts_flavor
= x86_THREAD_STATE64
;
699 ts_count
= x86_THREAD_STATE64_COUNT
;
700 ts
= (void *)&mctxp
->mctx_avx64
.ss
;
703 fs_flavor
= x86_AVX_STATE64
;
704 fs_count
= x86_AVX_STATE64_COUNT
;
707 fs_flavor
= x86_FLOAT_STATE64
;
708 fs_count
= x86_FLOAT_STATE64_COUNT
;
711 fs
= (void *)&mctxp
->mctx_avx64
.fs
;
714 struct user_ucontext32 uctx32
;
716 if ((error
= copyin(uap
->uctx
, (void *)&uctx32
, sizeof (uctx32
))))
719 if ((error
= copyin(CAST_USER_ADDR_T(uctx32
.uc_mcontext
), (void *)&mctxp
->mctx_avx32
, sizeof (struct mcontext_avx32
))))
722 onstack
= uctx32
.uc_onstack
& 01;
723 ut
->uu_sigmask
= uctx32
.uc_sigmask
& ~sigcantmask
;
725 ts_flavor
= x86_THREAD_STATE32
;
726 ts_count
= x86_THREAD_STATE32_COUNT
;
727 ts
= (void *)&mctxp
->mctx_avx32
.ss
;
730 fs_flavor
= x86_AVX_STATE32
;
731 fs_count
= x86_AVX_STATE32_COUNT
;
734 fs_flavor
= x86_FLOAT_STATE32
;
735 fs_count
= x86_FLOAT_STATE32_COUNT
;
738 fs
= (void *)&mctxp
->mctx_avx32
.fs
;
742 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
744 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
746 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
747 signal_setast(thread
);
749 * thread_set_state() does all the needed checks for the passed in
752 if (thread_setstatus(thread
, ts_flavor
, ts
, ts_count
) != KERN_SUCCESS
) {
757 ml_fp_setvalid(TRUE
);
759 if (thread_setstatus(thread
, fs_flavor
, fs
, fs_count
) != KERN_SUCCESS
) {
770 * machine_exception() performs MD translation
771 * of a mach exception to a unix signal and code.
777 mach_exception_code_t code
,
778 __unused mach_exception_subcode_t subcode
,
780 mach_exception_code_t
*unix_code
)
786 /* Map GP fault to SIGSEGV, otherwise defer to caller */
787 if (code
== EXC_I386_GPFLT
) {
788 *unix_signal
= SIGSEGV
;
794 case EXC_BAD_INSTRUCTION
:
795 *unix_signal
= SIGILL
;
800 *unix_signal
= SIGFPE
;
805 if (code
== EXC_I386_BOUND
) {
807 * Map #BR, the Bound Range Exceeded exception, to
810 *unix_signal
= SIGTRAP
;