2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1992 NeXT, Inc.
32 * 13 May 1992 ? at NeXT
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
39 #include <kern/thread.h>
41 #include <sys/systm.h>
42 #include <sys/param.h>
43 #include <sys/proc_internal.h>
45 #include <sys/sysproto.h>
46 #include <sys/sysent.h>
47 #include <sys/ucontext.h>
49 #include <mach/thread_act.h> /* for thread_abort_safely */
50 #include <mach/thread_status.h>
52 #include <i386/eflags.h>
54 #include <i386/machine_routines.h>
58 #include <machine/pal_routines.h>
60 #include <sys/kdebug.h>
65 extern boolean_t
machine_exception(int, mach_exception_code_t
,
66 mach_exception_subcode_t
, int *, mach_exception_subcode_t
*);
67 extern kern_return_t
thread_getstatus(thread_t act
, int flavor
,
68 thread_state_t tstate
, mach_msg_type_number_t
*count
);
69 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
70 thread_state_t tstate
, mach_msg_type_number_t count
);
72 /* Signal handler flavors supported */
73 /* These defns should match the Libc implmn */
76 #define UC_SET_ALT_STACK 0x40000000
77 #define UC_RESET_ALT_STACK 0x80000000
79 #define C_32_STK_ALIGN 16
80 #define C_64_STK_ALIGN 16
81 #define C_64_REDZONE_LEN 128
82 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
83 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
86 * Send an interrupt to process.
88 * Stack is set up to allow sigcode stored
89 * in u. to call routine, followed by chmk
90 * to sigreturn routine below. After sigreturn
91 * resets the signal mask, the stack, the frame
92 * pointer, and the argument pointer, it returns
93 * to the user specified pc, psl.
97 user32_addr_t catcher
; /* sig_t */
100 user32_addr_t sinfo
; /* siginfo32_t* */
101 user32_addr_t uctx
; /* struct ucontext32 */
105 * Declare table of structure flavors and sizes for 64-bit and 32-bit processes
106 * for the cases of extended states (plain FP, or AVX):
109 int flavor
; natural_t state_count
; size_t mcontext_size
;
111 static const xstate_info_t thread_state64
[] = {
112 [FP
] = { x86_FLOAT_STATE64
, x86_FLOAT_STATE64_COUNT
, sizeof(struct mcontext64
) },
113 [AVX
] = { x86_AVX_STATE64
, x86_AVX_STATE64_COUNT
, sizeof(struct mcontext_avx64
) },
114 #if !defined(RC_HIDE_XNU_J137)
115 [AVX512
] = { x86_AVX512_STATE64
, x86_AVX512_STATE64_COUNT
, sizeof(struct mcontext_avx512_64
) }
118 static const xstate_info_t thread_state32
[] = {
119 [FP
] = { x86_FLOAT_STATE32
, x86_FLOAT_STATE32_COUNT
, sizeof(struct mcontext32
) },
120 [AVX
] = { x86_AVX_STATE32
, x86_AVX_STATE32_COUNT
, sizeof(struct mcontext_avx32
) },
121 #if !defined(RC_HIDE_XNU_J137)
122 [AVX512
] = { x86_AVX512_STATE32
, x86_AVX512_STATE32_COUNT
, sizeof(struct mcontext_avx512_32
) }
127 * NOTE: Source and target may *NOT* overlap!
128 * XXX: Unify with bsd/kern/kern_exit.c
131 siginfo_user_to_user32_x86(user_siginfo_t
*in
, user32_siginfo_t
*out
)
133 out
->si_signo
= in
->si_signo
;
134 out
->si_errno
= in
->si_errno
;
135 out
->si_code
= in
->si_code
;
136 out
->si_pid
= in
->si_pid
;
137 out
->si_uid
= in
->si_uid
;
138 out
->si_status
= in
->si_status
;
139 out
->si_addr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_addr
);
140 /* following cast works for sival_int because of padding */
141 out
->si_value
.sival_ptr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_value
.sival_ptr
);
142 out
->si_band
= in
->si_band
; /* range reduction */
143 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
147 siginfo_user_to_user64_x86(user_siginfo_t
*in
, user64_siginfo_t
*out
)
149 out
->si_signo
= in
->si_signo
;
150 out
->si_errno
= in
->si_errno
;
151 out
->si_code
= in
->si_code
;
152 out
->si_pid
= in
->si_pid
;
153 out
->si_uid
= in
->si_uid
;
154 out
->si_status
= in
->si_status
;
155 out
->si_addr
= in
->si_addr
;
156 out
->si_value
.sival_ptr
= in
->si_value
.sival_ptr
;
157 out
->si_band
= in
->si_band
; /* range reduction */
158 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
162 sendsig(struct proc
*p
, user_addr_t ua_catcher
, int sig
, int mask
, __unused
uint32_t code
)
165 struct mcontext_avx32 mctx_avx32
;
166 struct mcontext_avx64 mctx_avx64
;
167 #if !defined(RC_HIDE_XNU_J137)
168 struct mcontext_avx512_32 mctx_avx512_32
;
169 struct mcontext_avx512_64 mctx_avx512_64
;
171 } mctx_store
, *mctxp
= &mctx_store
;
177 user_addr_t ua_uctxp
;
178 user_addr_t ua_mctxp
;
179 user_siginfo_t sinfo64
;
181 struct sigacts
*ps
= p
->p_sigacts
;
182 int oonstack
, flavor
;
183 user_addr_t trampact
;
186 mach_msg_type_number_t state_count
;
191 int infostyle
= UC_TRAD
;
194 thread
= current_thread();
195 ut
= get_bsdthread_info(thread
);
197 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
))
198 infostyle
= UC_FLAVOR
;
200 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
201 trampact
= ps
->ps_trampact
[sig
];
202 sigonstack
= (ps
->ps_sigonstack
& sigmask(sig
));
209 bzero((caddr_t
)&sinfo64
, sizeof(sinfo64
));
210 sinfo64
.si_signo
= sig
;
212 bzero(mctxp
, sizeof(*mctxp
));
214 sig_xstate
= current_xstate();
216 if (proc_is64bit(p
)) {
217 x86_thread_state64_t
*tstate64
;
218 struct user_ucontext64 uctx64
;
220 flavor
= x86_THREAD_STATE64
;
221 state_count
= x86_THREAD_STATE64_COUNT
;
222 state
= (void *)&mctxp
->mctx_avx64
.ss
;
223 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
226 flavor
= thread_state64
[sig_xstate
].flavor
;
227 state_count
= thread_state64
[sig_xstate
].state_count
;
228 state
= (void *)&mctxp
->mctx_avx64
.fs
;
229 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
232 flavor
= x86_EXCEPTION_STATE64
;
233 state_count
= x86_EXCEPTION_STATE64_COUNT
;
234 state
= (void *)&mctxp
->mctx_avx64
.es
;
235 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
238 tstate64
= &mctxp
->mctx_avx64
.ss
;
240 /* figure out where our new stack lives */
241 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
243 ua_sp
= ut
->uu_sigstk
.ss_sp
;
244 stack_size
= ut
->uu_sigstk
.ss_size
;
246 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
248 ua_sp
= tstate64
->rsp
;
250 ua_cr2
= mctxp
->mctx_avx64
.es
.faultvaddr
;
252 /* The x86_64 ABI defines a 128-byte red zone. */
253 ua_sp
-= C_64_REDZONE_LEN
;
255 ua_sp
-= sizeof (struct user_ucontext64
);
256 ua_uctxp
= ua_sp
; // someone tramples the first word!
258 ua_sp
-= sizeof (user64_siginfo_t
);
261 ua_sp
-= thread_state64
[sig_xstate
].mcontext_size
;
265 * Align the frame and stack pointers to 16 bytes for SSE.
266 * (Note that we use 'ua_fp' as the base of the stack going forward)
268 ua_fp
= TRUNC_DOWN64(ua_sp
, C_64_STK_ALIGN
);
271 * But we need to account for the return address so the alignment is
272 * truly "correct" at _sigtramp
274 ua_fp
-= sizeof(user_addr_t
);
277 * Build the signal context to be used by sigreturn.
279 bzero(&uctx64
, sizeof(uctx64
));
281 uctx64
.uc_onstack
= oonstack
;
282 uctx64
.uc_sigmask
= mask
;
283 uctx64
.uc_stack
.ss_sp
= ua_fp
;
284 uctx64
.uc_stack
.ss_size
= stack_size
;
287 uctx64
.uc_stack
.ss_flags
|= SS_ONSTACK
;
290 uctx64
.uc_mcsize
= thread_state64
[sig_xstate
].mcontext_size
;
291 uctx64
.uc_mcontext64
= ua_mctxp
;
293 if (copyout((caddr_t
)&uctx64
, ua_uctxp
, sizeof (uctx64
)))
296 if (copyout((caddr_t
)&mctx_store
, ua_mctxp
, thread_state64
[sig_xstate
].mcontext_size
))
299 sinfo64
.pad
[0] = tstate64
->rsp
;
300 sinfo64
.si_addr
= tstate64
->rip
;
302 tstate64
->rip
= trampact
;
303 tstate64
->rsp
= ua_fp
;
304 tstate64
->rflags
= get_eflags_exportmask();
306 * JOE - might not need to set these
308 tstate64
->cs
= USER64_CS
;
309 tstate64
->fs
= NULL_SEG
;
310 tstate64
->gs
= USER_CTHREAD
;
313 * Build the argument list for the signal handler.
314 * Handler should call sigreturn to get out of it
316 tstate64
->rdi
= ua_catcher
;
317 tstate64
->rsi
= infostyle
;
319 tstate64
->rcx
= ua_sip
;
320 tstate64
->r8
= ua_uctxp
;
323 x86_thread_state32_t
*tstate32
;
324 struct user_ucontext32 uctx32
;
325 struct sigframe32 frame32
;
327 flavor
= x86_THREAD_STATE32
;
328 state_count
= x86_THREAD_STATE32_COUNT
;
329 state
= (void *)&mctxp
->mctx_avx32
.ss
;
330 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
333 flavor
= thread_state32
[sig_xstate
].flavor
;
334 state_count
= thread_state32
[sig_xstate
].state_count
;
335 state
= (void *)&mctxp
->mctx_avx32
.fs
;
336 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
339 flavor
= x86_EXCEPTION_STATE32
;
340 state_count
= x86_EXCEPTION_STATE32_COUNT
;
341 state
= (void *)&mctxp
->mctx_avx32
.es
;
342 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
345 tstate32
= &mctxp
->mctx_avx32
.ss
;
347 /* figure out where our new stack lives */
348 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
350 ua_sp
= ut
->uu_sigstk
.ss_sp
;
351 stack_size
= ut
->uu_sigstk
.ss_size
;
353 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
355 ua_sp
= tstate32
->esp
;
357 ua_cr2
= mctxp
->mctx_avx32
.es
.faultvaddr
;
359 ua_sp
-= sizeof (struct user_ucontext32
);
360 ua_uctxp
= ua_sp
; // someone tramples the first word!
362 ua_sp
-= sizeof (user32_siginfo_t
);
365 ua_sp
-= thread_state32
[sig_xstate
].mcontext_size
;
368 ua_sp
-= sizeof (struct sigframe32
);
372 * Align the frame and stack pointers to 16 bytes for SSE.
373 * (Note that we use 'fp' as the base of the stack going forward)
375 ua_fp
= TRUNC_DOWN32(ua_fp
, C_32_STK_ALIGN
);
378 * But we need to account for the return address so the alignment is
379 * truly "correct" at _sigtramp
381 ua_fp
-= sizeof(frame32
.retaddr
);
384 * Build the argument list for the signal handler.
385 * Handler should call sigreturn to get out of it
387 frame32
.retaddr
= -1;
388 frame32
.sigstyle
= infostyle
;
390 frame32
.catcher
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_catcher
);
391 frame32
.sinfo
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_sip
);
392 frame32
.uctx
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_uctxp
);
394 if (copyout((caddr_t
)&frame32
, ua_fp
, sizeof (frame32
)))
398 * Build the signal context to be used by sigreturn.
400 bzero(&uctx32
, sizeof(uctx32
));
402 uctx32
.uc_onstack
= oonstack
;
403 uctx32
.uc_sigmask
= mask
;
404 uctx32
.uc_stack
.ss_sp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
405 uctx32
.uc_stack
.ss_size
= stack_size
;
408 uctx32
.uc_stack
.ss_flags
|= SS_ONSTACK
;
411 uctx32
.uc_mcsize
= thread_state64
[sig_xstate
].mcontext_size
;
413 uctx32
.uc_mcontext
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_mctxp
);
415 if (copyout((caddr_t
)&uctx32
, ua_uctxp
, sizeof (uctx32
)))
418 if (copyout((caddr_t
)&mctx_store
, ua_mctxp
, thread_state32
[sig_xstate
].mcontext_size
))
421 sinfo64
.pad
[0] = tstate32
->esp
;
422 sinfo64
.si_addr
= tstate32
->eip
;
427 switch (ut
->uu_code
) {
429 sinfo64
.si_code
= ILL_ILLOPC
;
432 sinfo64
.si_code
= ILL_NOOP
;
436 #define FP_IE 0 /* Invalid operation */
437 #define FP_DE 1 /* Denormalized operand */
438 #define FP_ZE 2 /* Zero divide */
439 #define FP_OE 3 /* overflow */
440 #define FP_UE 4 /* underflow */
441 #define FP_PE 5 /* precision */
442 if (ut
->uu_code
== EXC_I386_DIV
) {
443 sinfo64
.si_code
= FPE_INTDIV
;
445 else if (ut
->uu_code
== EXC_I386_INTO
) {
446 sinfo64
.si_code
= FPE_INTOVF
;
448 else if (ut
->uu_subcode
& (1 << FP_ZE
)) {
449 sinfo64
.si_code
= FPE_FLTDIV
;
450 } else if (ut
->uu_subcode
& (1 << FP_OE
)) {
451 sinfo64
.si_code
= FPE_FLTOVF
;
452 } else if (ut
->uu_subcode
& (1 << FP_UE
)) {
453 sinfo64
.si_code
= FPE_FLTUND
;
454 } else if (ut
->uu_subcode
& (1 << FP_PE
)) {
455 sinfo64
.si_code
= FPE_FLTRES
;
456 } else if (ut
->uu_subcode
& (1 << FP_IE
)) {
457 sinfo64
.si_code
= FPE_FLTINV
;
459 sinfo64
.si_code
= FPE_NOOP
;
463 sinfo64
.si_code
= BUS_ADRERR
;
464 sinfo64
.si_addr
= ua_cr2
;
467 sinfo64
.si_code
= TRAP_BRKPT
;
470 sinfo64
.si_addr
= ua_cr2
;
472 switch (ut
->uu_code
) {
474 /* CR2 is meaningless after GP fault */
475 /* XXX namespace clash! */
476 sinfo64
.si_addr
= 0ULL;
479 case KERN_PROTECTION_FAILURE
:
480 sinfo64
.si_code
= SEGV_ACCERR
;
482 case KERN_INVALID_ADDRESS
:
483 sinfo64
.si_code
= SEGV_MAPERR
;
486 sinfo64
.si_code
= FPE_NOOP
;
491 int status_and_exitcode
;
494 * All other signals need to fill out a minimum set of
495 * information for the siginfo structure passed into
496 * the signal handler, if SA_SIGINFO was specified.
498 * p->si_status actually contains both the status and
499 * the exit code; we save it off in its own variable
500 * for later breakdown.
503 sinfo64
.si_pid
= p
->si_pid
;
505 status_and_exitcode
= p
->si_status
;
507 sinfo64
.si_uid
= p
->si_uid
;
509 sinfo64
.si_code
= p
->si_code
;
512 if (sinfo64
.si_code
== CLD_EXITED
) {
513 if (WIFEXITED(status_and_exitcode
))
514 sinfo64
.si_code
= CLD_EXITED
;
515 else if (WIFSIGNALED(status_and_exitcode
)) {
516 if (WCOREDUMP(status_and_exitcode
)) {
517 sinfo64
.si_code
= CLD_DUMPED
;
518 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
520 sinfo64
.si_code
= CLD_KILLED
;
521 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
526 * The recorded status contains the exit code and the
527 * signal information, but the information to be passed
528 * in the siginfo to the handler is supposed to only
529 * contain the status, so we have to shift it out.
531 sinfo64
.si_status
= (WEXITSTATUS(status_and_exitcode
) & 0x00FFFFFF) | (((uint32_t)(p
->p_xhighbits
) << 24) & 0xFF000000);
536 if (proc_is64bit(p
)) {
537 user64_siginfo_t sinfo64_user64
;
539 bzero((caddr_t
)&sinfo64_user64
, sizeof(sinfo64_user64
));
541 siginfo_user_to_user64_x86(&sinfo64
,&sinfo64_user64
);
544 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
546 ut
->t_dtrace_siginfo
.si_signo
= sinfo64
.si_signo
;
547 ut
->t_dtrace_siginfo
.si_code
= sinfo64
.si_code
;
548 ut
->t_dtrace_siginfo
.si_pid
= sinfo64
.si_pid
;
549 ut
->t_dtrace_siginfo
.si_uid
= sinfo64
.si_uid
;
550 ut
->t_dtrace_siginfo
.si_status
= sinfo64
.si_status
;
551 /* XXX truncates faulting address to void * on K32 */
552 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo64
.si_addr
);
554 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
556 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
557 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
563 /* XXX truncates catcher address to uintptr_t */
564 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
565 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
566 #endif /* CONFIG_DTRACE */
568 if (copyout((caddr_t
)&sinfo64_user64
, ua_sip
, sizeof (sinfo64_user64
)))
571 flavor
= x86_THREAD_STATE64
;
572 state_count
= x86_THREAD_STATE64_COUNT
;
573 state
= (void *)&mctxp
->mctx_avx64
.ss
;
575 x86_thread_state32_t
*tstate32
;
576 user32_siginfo_t sinfo32
;
578 bzero((caddr_t
)&sinfo32
, sizeof(sinfo32
));
580 siginfo_user_to_user32_x86(&sinfo64
,&sinfo32
);
583 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
585 ut
->t_dtrace_siginfo
.si_signo
= sinfo32
.si_signo
;
586 ut
->t_dtrace_siginfo
.si_code
= sinfo32
.si_code
;
587 ut
->t_dtrace_siginfo
.si_pid
= sinfo32
.si_pid
;
588 ut
->t_dtrace_siginfo
.si_uid
= sinfo32
.si_uid
;
589 ut
->t_dtrace_siginfo
.si_status
= sinfo32
.si_status
;
590 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo32
.si_addr
);
592 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
594 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
595 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
601 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
602 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
603 #endif /* CONFIG_DTRACE */
605 if (copyout((caddr_t
)&sinfo32
, ua_sip
, sizeof (sinfo32
)))
608 tstate32
= &mctxp
->mctx_avx32
.ss
;
610 tstate32
->eip
= CAST_DOWN_EXPLICIT(user32_addr_t
, trampact
);
611 tstate32
->esp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
613 tstate32
->eflags
= get_eflags_exportmask();
615 tstate32
->cs
= USER_CS
;
616 tstate32
->ss
= USER_DS
;
617 tstate32
->ds
= USER_DS
;
618 tstate32
->es
= USER_DS
;
619 tstate32
->fs
= NULL_SEG
;
620 tstate32
->gs
= USER_CTHREAD
;
622 flavor
= x86_THREAD_STATE32
;
623 state_count
= x86_THREAD_STATE32_COUNT
;
624 state
= (void *)tstate32
;
626 if (thread_setstatus(thread
, flavor
, (thread_state_t
)state
, state_count
) != KERN_SUCCESS
)
628 ml_fp_setvalid(FALSE
);
630 /* Tell the PAL layer about the signal */
631 pal_set_signal_delivery( thread
);
640 SIGACTION(p
, SIGILL
) = SIG_DFL
;
641 sig
= sigmask(SIGILL
);
642 p
->p_sigignore
&= ~sig
;
643 p
->p_sigcatch
&= ~sig
;
644 ut
->uu_sigmask
&= ~sig
;
645 /* sendsig is called with signal lock held */
647 psignal_locked(p
, SIGILL
);
653 * System call to cleanup state after a signal
654 * has been taken. Reset signal mask and
655 * stack state from context left by sendsig (above).
656 * Return to previous pc and psl as specified by
657 * context left by sendsig. Check carefully to
658 * make sure that the user has not modified the
659 * psl to gain improper priviledges or to cause
664 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
667 struct mcontext_avx32 mctx_avx32
;
668 struct mcontext_avx64 mctx_avx64
;
669 #if !defined(RC_HIDE_XNU_J137)
670 struct mcontext_avx512_32 mctx_avx512_32
;
671 struct mcontext_avx512_64 mctx_avx512_64
;
673 } mctx_store
, *mctxp
= &mctx_store
;
675 thread_t thread
= current_thread();
680 mach_msg_type_number_t ts_count
;
681 unsigned int ts_flavor
;
683 mach_msg_type_number_t fs_count
;
684 unsigned int fs_flavor
;
686 int rval
= EJUSTRETURN
;
689 ut
= (struct uthread
*)get_bsdthread_info(thread
);
692 * If we are being asked to change the altstack flag on the thread, we
693 * just set/reset it and return (the uap->uctx is not used).
695 if ((unsigned int)uap
->infostyle
== UC_SET_ALT_STACK
) {
696 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
698 } else if ((unsigned int)uap
->infostyle
== UC_RESET_ALT_STACK
) {
699 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
703 bzero(mctxp
, sizeof(*mctxp
));
705 sig_xstate
= current_xstate();
707 if (proc_is64bit(p
)) {
708 struct user_ucontext64 uctx64
;
710 if ((error
= copyin(uap
->uctx
, (void *)&uctx64
, sizeof (uctx64
))))
713 if ((error
= copyin(uctx64
.uc_mcontext64
, (void *)mctxp
, thread_state64
[sig_xstate
].mcontext_size
)))
716 onstack
= uctx64
.uc_onstack
& 01;
717 ut
->uu_sigmask
= uctx64
.uc_sigmask
& ~sigcantmask
;
719 ts_flavor
= x86_THREAD_STATE64
;
720 ts_count
= x86_THREAD_STATE64_COUNT
;
721 ts
= (void *)&mctxp
->mctx_avx64
.ss
;
723 fs_flavor
= thread_state64
[sig_xstate
].flavor
;
724 fs_count
= thread_state64
[sig_xstate
].state_count
;
725 fs
= (void *)&mctxp
->mctx_avx64
.fs
;
728 struct user_ucontext32 uctx32
;
730 if ((error
= copyin(uap
->uctx
, (void *)&uctx32
, sizeof (uctx32
))))
733 if ((error
= copyin(CAST_USER_ADDR_T(uctx32
.uc_mcontext
), (void *)mctxp
, thread_state32
[sig_xstate
].mcontext_size
)))
736 onstack
= uctx32
.uc_onstack
& 01;
737 ut
->uu_sigmask
= uctx32
.uc_sigmask
& ~sigcantmask
;
739 ts_flavor
= x86_THREAD_STATE32
;
740 ts_count
= x86_THREAD_STATE32_COUNT
;
741 ts
= (void *)&mctxp
->mctx_avx32
.ss
;
743 fs_flavor
= thread_state32
[sig_xstate
].flavor
;
744 fs_count
= thread_state32
[sig_xstate
].state_count
;
745 fs
= (void *)&mctxp
->mctx_avx32
.fs
;
749 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
751 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
753 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
754 signal_setast(thread
);
756 * thread_set_state() does all the needed checks for the passed in
759 if (thread_setstatus(thread
, ts_flavor
, ts
, ts_count
) != KERN_SUCCESS
) {
764 ml_fp_setvalid(TRUE
);
766 if (thread_setstatus(thread
, fs_flavor
, fs
, fs_count
) != KERN_SUCCESS
) {
777 * machine_exception() performs MD translation
778 * of a mach exception to a unix signal and code.
784 mach_exception_code_t code
,
785 __unused mach_exception_subcode_t subcode
,
787 mach_exception_code_t
*unix_code
)
793 /* Map GP fault to SIGSEGV, otherwise defer to caller */
794 if (code
== EXC_I386_GPFLT
) {
795 *unix_signal
= SIGSEGV
;
801 case EXC_BAD_INSTRUCTION
:
802 *unix_signal
= SIGILL
;
807 *unix_signal
= SIGFPE
;
812 if (code
== EXC_I386_BOUND
) {
814 * Map #BR, the Bound Range Exceeded exception, to
817 *unix_signal
= SIGTRAP
;