2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1992 NeXT, Inc.
32 * 13 May 1992 ? at NeXT
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
39 #include <kern/thread.h>
41 #include <sys/systm.h>
42 #include <sys/param.h>
43 #include <sys/proc_internal.h>
45 #include <sys/sysproto.h>
46 #include <sys/sysent.h>
47 #include <sys/ucontext.h>
50 #include <sys/ux_exception.h>
52 #include <mach/thread_act.h> /* for thread_abort_safely */
53 #include <mach/thread_status.h>
55 #include <i386/eflags.h>
57 #include <i386/machine_routines.h>
61 #include <machine/pal_routines.h>
63 #include <sys/kdebug.h>
68 extern kern_return_t
thread_getstatus(thread_t act
, int flavor
,
69 thread_state_t tstate
, mach_msg_type_number_t
*count
);
70 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
71 thread_state_t tstate
, mach_msg_type_number_t count
);
73 /* Signal handler flavors supported */
74 /* These defns should match the Libc implmn */
77 #define UC_SET_ALT_STACK 0x40000000
78 #define UC_RESET_ALT_STACK 0x80000000
80 #define C_32_STK_ALIGN 16
81 #define C_64_STK_ALIGN 16
82 #define C_64_REDZONE_LEN 128
83 #define TRUNC_DOWN32(a, c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
84 #define TRUNC_DOWN64(a, c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
87 * Send an interrupt to process.
89 * Stack is set up to allow sigcode stored
90 * in u. to call routine, followed by chmk
91 * to sigreturn routine below. After sigreturn
92 * resets the signal mask, the stack, the frame
93 * pointer, and the argument pointer, it returns
94 * to the user specified pc, psl.
98 user32_addr_t catcher
; /* sig_t */
101 user32_addr_t sinfo
; /* siginfo32_t* */
102 user32_addr_t uctx
; /* struct ucontext32 */
107 * Declare table of structure flavors and sizes for 64-bit and 32-bit processes
108 * for the cases of extended states (plain FP, or AVX):
111 int flavor
; natural_t state_count
; size_t mcontext_size
;
113 static const xstate_info_t thread_state64
[] = {
114 [FP
] = { x86_FLOAT_STATE64
, x86_FLOAT_STATE64_COUNT
, sizeof(struct mcontext64
) },
115 [FP_FULL
] = { x86_FLOAT_STATE64
, x86_FLOAT_STATE64_COUNT
, sizeof(struct mcontext64_full
) },
116 [AVX
] = { x86_AVX_STATE64
, x86_AVX_STATE64_COUNT
, sizeof(struct mcontext_avx64
) },
117 [AVX_FULL
] = { x86_AVX_STATE64
, x86_AVX_STATE64_COUNT
, sizeof(struct mcontext_avx64_full
) },
118 [AVX512
] = { x86_AVX512_STATE64
, x86_AVX512_STATE64_COUNT
, sizeof(struct mcontext_avx512_64
) },
119 [AVX512_FULL
] = { x86_AVX512_STATE64
, x86_AVX512_STATE64_COUNT
, sizeof(struct mcontext_avx512_64_full
) }
121 static const xstate_info_t thread_state32
[] = {
122 [FP
] = { x86_FLOAT_STATE32
, x86_FLOAT_STATE32_COUNT
, sizeof(struct mcontext32
) },
123 [AVX
] = { x86_AVX_STATE32
, x86_AVX_STATE32_COUNT
, sizeof(struct mcontext_avx32
) },
124 [AVX512
] = { x86_AVX512_STATE32
, x86_AVX512_STATE32_COUNT
, sizeof(struct mcontext_avx512_32
) }
128 * NOTE: Source and target may *NOT* overlap!
129 * XXX: Unify with bsd/kern/kern_exit.c
132 siginfo_user_to_user32_x86(user_siginfo_t
*in
, user32_siginfo_t
*out
)
134 out
->si_signo
= in
->si_signo
;
135 out
->si_errno
= in
->si_errno
;
136 out
->si_code
= in
->si_code
;
137 out
->si_pid
= in
->si_pid
;
138 out
->si_uid
= in
->si_uid
;
139 out
->si_status
= in
->si_status
;
140 out
->si_addr
= CAST_DOWN_EXPLICIT(user32_addr_t
, in
->si_addr
);
141 /* following cast works for sival_int because of padding */
142 out
->si_value
.sival_ptr
= CAST_DOWN_EXPLICIT(user32_addr_t
, in
->si_value
.sival_ptr
);
143 out
->si_band
= in
->si_band
; /* range reduction */
144 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
148 siginfo_user_to_user64_x86(user_siginfo_t
*in
, user64_siginfo_t
*out
)
150 out
->si_signo
= in
->si_signo
;
151 out
->si_errno
= in
->si_errno
;
152 out
->si_code
= in
->si_code
;
153 out
->si_pid
= in
->si_pid
;
154 out
->si_uid
= in
->si_uid
;
155 out
->si_status
= in
->si_status
;
156 out
->si_addr
= in
->si_addr
;
157 out
->si_value
.sival_ptr
= in
->si_value
.sival_ptr
;
158 out
->si_band
= in
->si_band
; /* range reduction */
159 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
163 sendsig(struct proc
*p
, user_addr_t ua_catcher
, int sig
, int mask
, __unused
uint32_t code
)
166 struct mcontext_avx32 mctx_avx32
;
167 struct mcontext_avx64 mctx_avx64
;
168 struct mcontext_avx64_full mctx_avx64_full
;
169 struct mcontext_avx512_32 mctx_avx512_32
;
170 struct mcontext_avx512_64 mctx_avx512_64
;
171 struct mcontext_avx512_64_full mctx_avx512_64_full
;
172 } mctx_store
, *mctxp
= &mctx_store
;
178 user_addr_t ua_uctxp
;
179 user_addr_t ua_mctxp
;
180 user_siginfo_t sinfo64
;
182 struct sigacts
*ps
= p
->p_sigacts
;
183 int oonstack
, flavor
;
184 user_addr_t trampact
;
186 void * state
, *fpstate
;
187 mach_msg_type_number_t state_count
;
192 int infostyle
= UC_TRAD
;
194 user_addr_t token_uctx
;
196 boolean_t reset_ss
= TRUE
;
198 thread
= current_thread();
199 ut
= get_bsdthread_info(thread
);
201 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
)) {
202 infostyle
= UC_FLAVOR
;
205 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
206 trampact
= ps
->ps_trampact
[sig
];
207 sigonstack
= (ps
->ps_sigonstack
& sigmask(sig
));
214 bzero((caddr_t
)&sinfo64
, sizeof(sinfo64
));
215 sinfo64
.si_signo
= sig
;
217 bzero(mctxp
, sizeof(*mctxp
));
219 sig_xstate
= current_xstate();
221 if (proc_is64bit(p
)) {
222 x86_thread_state64_t
*tstate64
;
223 struct user_ucontext64 uctx64
;
225 int task_has_ldt
= thread_task_has_ldt(thread
);
228 flavor
= x86_THREAD_FULL_STATE64
;
229 state_count
= x86_THREAD_FULL_STATE64_COUNT
;
230 fpstate
= (void *)&mctxp
->mctx_avx64_full
.fs
;
231 sig_xstate
|= STATE64_FULL
;
233 flavor
= x86_THREAD_STATE64
;
234 state_count
= x86_THREAD_STATE64_COUNT
;
235 fpstate
= (void *)&mctxp
->mctx_avx64
.fs
;
237 state
= (void *)&mctxp
->mctx_avx64
.ss
;
240 * The state copying is performed with pointers to fields in the state
241 * struct. This works specifically because the mcontext is layed-out with the
242 * variable-sized FP-state as the last member. However, with the requirement
243 * to support passing "full" 64-bit state to the signal handler, that layout has now
244 * changed (since the "full" state has a larger "ss" member than the non-"full"
245 * structure. Because of this, and to retain the array-lookup method of determining
246 * structure sizes, we OR-in STATE64_FULL to sig_xstate to ensure the proper mcontext
250 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
) {
254 if ((sig_xstate
& STATE64_FULL
) && mctxp
->mctx_avx64
.ss
.cs
!= USER64_CS
) {
255 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
265 flavor
= thread_state64
[sig_xstate
].flavor
;
266 state_count
= thread_state64
[sig_xstate
].state_count
;
267 if (thread_getstatus(thread
, flavor
, (thread_state_t
)fpstate
, &state_count
) != KERN_SUCCESS
) {
271 flavor
= x86_EXCEPTION_STATE64
;
272 state_count
= x86_EXCEPTION_STATE64_COUNT
;
273 state
= (void *)&mctxp
->mctx_avx64
.es
;
274 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
) {
278 tstate64
= &mctxp
->mctx_avx64
.ss
;
280 /* figure out where our new stack lives */
281 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
283 ua_sp
= ut
->uu_sigstk
.ss_sp
;
284 stack_size
= ut
->uu_sigstk
.ss_size
;
286 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
288 if ((sig_xstate
& STATE64_FULL
) && tstate64
->cs
!= USER64_CS
) {
291 ua_sp
= tstate64
->rsp
;
293 ua_cr2
= mctxp
->mctx_avx64
.es
.faultvaddr
;
295 /* The x86_64 ABI defines a 128-byte red zone. */
296 ua_sp
-= C_64_REDZONE_LEN
;
298 ua_sp
-= sizeof(struct user_ucontext64
);
299 ua_uctxp
= ua_sp
; // someone tramples the first word!
301 ua_sp
-= sizeof(user64_siginfo_t
);
304 ua_sp
-= thread_state64
[sig_xstate
].mcontext_size
;
308 * Align the frame and stack pointers to 16 bytes for SSE.
309 * (Note that we use 'ua_fp' as the base of the stack going forward)
311 ua_fp
= TRUNC_DOWN64(ua_sp
, C_64_STK_ALIGN
);
314 * But we need to account for the return address so the alignment is
315 * truly "correct" at _sigtramp
317 ua_fp
-= sizeof(user_addr_t
);
320 * Generate the validation token for sigreturn
322 token_uctx
= ua_uctxp
;
323 kr
= machine_thread_siguctx_pointer_convert_to_user(thread
, &token_uctx
);
324 assert(kr
== KERN_SUCCESS
);
325 token
= (user64_addr_t
)token_uctx
^ (user64_addr_t
)ps
->ps_sigreturn_token
;
328 * Build the signal context to be used by sigreturn.
330 bzero(&uctx64
, sizeof(uctx64
));
332 uctx64
.uc_onstack
= oonstack
;
333 uctx64
.uc_sigmask
= mask
;
334 uctx64
.uc_stack
.ss_sp
= ua_fp
;
335 uctx64
.uc_stack
.ss_size
= stack_size
;
338 uctx64
.uc_stack
.ss_flags
|= SS_ONSTACK
;
342 uctx64
.uc_mcsize
= thread_state64
[sig_xstate
].mcontext_size
;
343 uctx64
.uc_mcontext64
= ua_mctxp
;
345 if (copyout((caddr_t
)&uctx64
, ua_uctxp
, sizeof(uctx64
))) {
349 if (copyout((caddr_t
)&mctx_store
, ua_mctxp
, thread_state64
[sig_xstate
].mcontext_size
)) {
353 sinfo64
.pad
[0] = tstate64
->rsp
;
354 sinfo64
.si_addr
= tstate64
->rip
;
356 tstate64
->rip
= trampact
;
357 tstate64
->rsp
= ua_fp
;
358 tstate64
->rflags
= get_eflags_exportmask();
361 * SETH - need to set these for processes with LDTs
363 tstate64
->cs
= USER64_CS
;
364 tstate64
->fs
= NULL_SEG
;
366 * Set gs to 0 here to prevent restoration of %gs on return-to-user. If we
367 * did NOT do that here and %gs was non-zero, we'd blow away gsbase when
368 * we restore %gs in the kernel exit trampoline.
372 if (sig_xstate
& STATE64_FULL
) {
373 /* Reset DS, ES, and possibly SS */
376 * Restore %ss if (a) an altstack was used for signal delivery
377 * or (b) %cs at the time of the signal was the default
380 mctxp
->mctx_avx64_full
.ss
.ss
= USER64_DS
;
382 mctxp
->mctx_avx64_full
.ss
.ds
= USER64_DS
;
383 mctxp
->mctx_avx64_full
.ss
.es
= 0;
387 * Build the argument list for the signal handler.
388 * Handler should call sigreturn to get out of it
390 tstate64
->rdi
= ua_catcher
;
391 tstate64
->rsi
= infostyle
;
393 tstate64
->rcx
= ua_sip
;
394 tstate64
->r8
= ua_uctxp
;
395 tstate64
->r9
= token
;
397 x86_thread_state32_t
*tstate32
;
398 struct user_ucontext32 uctx32
;
399 struct sigframe32 frame32
;
402 flavor
= x86_THREAD_STATE32
;
403 state_count
= x86_THREAD_STATE32_COUNT
;
404 state
= (void *)&mctxp
->mctx_avx32
.ss
;
405 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
) {
409 flavor
= thread_state32
[sig_xstate
].flavor
;
410 state_count
= thread_state32
[sig_xstate
].state_count
;
411 state
= (void *)&mctxp
->mctx_avx32
.fs
;
412 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
) {
416 flavor
= x86_EXCEPTION_STATE32
;
417 state_count
= x86_EXCEPTION_STATE32_COUNT
;
418 state
= (void *)&mctxp
->mctx_avx32
.es
;
419 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
) {
423 tstate32
= &mctxp
->mctx_avx32
.ss
;
425 /* figure out where our new stack lives */
426 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
428 ua_sp
= ut
->uu_sigstk
.ss_sp
;
429 stack_size
= ut
->uu_sigstk
.ss_size
;
431 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
433 ua_sp
= tstate32
->esp
;
435 ua_cr2
= mctxp
->mctx_avx32
.es
.faultvaddr
;
437 ua_sp
-= sizeof(struct user_ucontext32
);
438 ua_uctxp
= ua_sp
; // someone tramples the first word!
440 ua_sp
-= sizeof(user32_siginfo_t
);
443 ua_sp
-= thread_state32
[sig_xstate
].mcontext_size
;
446 ua_sp
-= sizeof(struct sigframe32
);
450 * Align the frame and stack pointers to 16 bytes for SSE.
451 * (Note that we use 'fp' as the base of the stack going forward)
453 ua_fp
= TRUNC_DOWN32(ua_fp
, C_32_STK_ALIGN
);
456 * But we need to account for the return address so the alignment is
457 * truly "correct" at _sigtramp
459 ua_fp
-= sizeof(frame32
.retaddr
);
462 * Generate the validation token for sigreturn
464 token_uctx
= ua_uctxp
;
465 kr
= machine_thread_siguctx_pointer_convert_to_user(thread
, &token_uctx
);
466 assert(kr
== KERN_SUCCESS
);
467 token
= CAST_DOWN_EXPLICIT(user32_addr_t
, token_uctx
) ^
468 CAST_DOWN_EXPLICIT(user32_addr_t
, ps
->ps_sigreturn_token
);
471 * Build the argument list for the signal handler.
472 * Handler should call sigreturn to get out of it
474 frame32
.retaddr
= -1;
475 frame32
.sigstyle
= infostyle
;
477 frame32
.catcher
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_catcher
);
478 frame32
.sinfo
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_sip
);
479 frame32
.uctx
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_uctxp
);
480 frame32
.token
= token
;
482 if (copyout((caddr_t
)&frame32
, ua_fp
, sizeof(frame32
))) {
487 * Build the signal context to be used by sigreturn.
489 bzero(&uctx32
, sizeof(uctx32
));
491 uctx32
.uc_onstack
= oonstack
;
492 uctx32
.uc_sigmask
= mask
;
493 uctx32
.uc_stack
.ss_sp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
494 uctx32
.uc_stack
.ss_size
= stack_size
;
497 uctx32
.uc_stack
.ss_flags
|= SS_ONSTACK
;
501 uctx32
.uc_mcsize
= thread_state64
[sig_xstate
].mcontext_size
;
503 uctx32
.uc_mcontext
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_mctxp
);
505 if (copyout((caddr_t
)&uctx32
, ua_uctxp
, sizeof(uctx32
))) {
509 if (copyout((caddr_t
)&mctx_store
, ua_mctxp
, thread_state32
[sig_xstate
].mcontext_size
)) {
513 sinfo64
.pad
[0] = tstate32
->esp
;
514 sinfo64
.si_addr
= tstate32
->eip
;
519 switch (ut
->uu_code
) {
521 sinfo64
.si_code
= ILL_ILLOPC
;
524 sinfo64
.si_code
= ILL_NOOP
;
528 #define FP_IE 0 /* Invalid operation */
529 #define FP_DE 1 /* Denormalized operand */
530 #define FP_ZE 2 /* Zero divide */
531 #define FP_OE 3 /* overflow */
532 #define FP_UE 4 /* underflow */
533 #define FP_PE 5 /* precision */
534 if (ut
->uu_code
== EXC_I386_DIV
) {
535 sinfo64
.si_code
= FPE_INTDIV
;
536 } else if (ut
->uu_code
== EXC_I386_INTO
) {
537 sinfo64
.si_code
= FPE_INTOVF
;
538 } else if (ut
->uu_subcode
& (1 << FP_ZE
)) {
539 sinfo64
.si_code
= FPE_FLTDIV
;
540 } else if (ut
->uu_subcode
& (1 << FP_OE
)) {
541 sinfo64
.si_code
= FPE_FLTOVF
;
542 } else if (ut
->uu_subcode
& (1 << FP_UE
)) {
543 sinfo64
.si_code
= FPE_FLTUND
;
544 } else if (ut
->uu_subcode
& (1 << FP_PE
)) {
545 sinfo64
.si_code
= FPE_FLTRES
;
546 } else if (ut
->uu_subcode
& (1 << FP_IE
)) {
547 sinfo64
.si_code
= FPE_FLTINV
;
549 sinfo64
.si_code
= FPE_NOOP
;
553 sinfo64
.si_code
= BUS_ADRERR
;
554 sinfo64
.si_addr
= ua_cr2
;
557 sinfo64
.si_code
= TRAP_BRKPT
;
560 sinfo64
.si_addr
= ua_cr2
;
562 switch (ut
->uu_code
) {
564 /* CR2 is meaningless after GP fault */
565 /* XXX namespace clash! */
566 sinfo64
.si_addr
= 0ULL;
569 case KERN_PROTECTION_FAILURE
:
570 sinfo64
.si_code
= SEGV_ACCERR
;
572 case KERN_INVALID_ADDRESS
:
573 sinfo64
.si_code
= SEGV_MAPERR
;
576 sinfo64
.si_code
= FPE_NOOP
;
581 int status_and_exitcode
;
584 * All other signals need to fill out a minimum set of
585 * information for the siginfo structure passed into
586 * the signal handler, if SA_SIGINFO was specified.
588 * p->si_status actually contains both the status and
589 * the exit code; we save it off in its own variable
590 * for later breakdown.
593 sinfo64
.si_pid
= p
->si_pid
;
595 status_and_exitcode
= p
->si_status
;
597 sinfo64
.si_uid
= p
->si_uid
;
599 sinfo64
.si_code
= p
->si_code
;
602 if (sinfo64
.si_code
== CLD_EXITED
) {
603 if (WIFEXITED(status_and_exitcode
)) {
604 sinfo64
.si_code
= CLD_EXITED
;
605 } else if (WIFSIGNALED(status_and_exitcode
)) {
606 if (WCOREDUMP(status_and_exitcode
)) {
607 sinfo64
.si_code
= CLD_DUMPED
;
608 status_and_exitcode
= W_EXITCODE(status_and_exitcode
, status_and_exitcode
);
610 sinfo64
.si_code
= CLD_KILLED
;
611 status_and_exitcode
= W_EXITCODE(status_and_exitcode
, status_and_exitcode
);
616 * The recorded status contains the exit code and the
617 * signal information, but the information to be passed
618 * in the siginfo to the handler is supposed to only
619 * contain the status, so we have to shift it out.
621 sinfo64
.si_status
= (WEXITSTATUS(status_and_exitcode
) & 0x00FFFFFF) | (((uint32_t)(p
->p_xhighbits
) << 24) & 0xFF000000);
626 if (proc_is64bit(p
)) {
627 user64_siginfo_t sinfo64_user64
;
629 bzero((caddr_t
)&sinfo64_user64
, sizeof(sinfo64_user64
));
631 siginfo_user_to_user64_x86(&sinfo64
, &sinfo64_user64
);
634 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
636 ut
->t_dtrace_siginfo
.si_signo
= sinfo64
.si_signo
;
637 ut
->t_dtrace_siginfo
.si_code
= sinfo64
.si_code
;
638 ut
->t_dtrace_siginfo
.si_pid
= sinfo64
.si_pid
;
639 ut
->t_dtrace_siginfo
.si_uid
= sinfo64
.si_uid
;
640 ut
->t_dtrace_siginfo
.si_status
= sinfo64
.si_status
;
641 /* XXX truncates faulting address to void * on K32 */
642 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo64
.si_addr
);
644 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
646 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
647 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
653 /* XXX truncates catcher address to uintptr_t */
654 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
655 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
656 #endif /* CONFIG_DTRACE */
658 if (copyout((caddr_t
)&sinfo64_user64
, ua_sip
, sizeof(sinfo64_user64
))) {
662 if (sig_xstate
& STATE64_FULL
) {
663 flavor
= x86_THREAD_FULL_STATE64
;
664 state_count
= x86_THREAD_FULL_STATE64_COUNT
;
666 flavor
= x86_THREAD_STATE64
;
667 state_count
= x86_THREAD_STATE64_COUNT
;
669 state
= (void *)&mctxp
->mctx_avx64
.ss
;
671 x86_thread_state32_t
*tstate32
;
672 user32_siginfo_t sinfo32
;
674 bzero((caddr_t
)&sinfo32
, sizeof(sinfo32
));
676 siginfo_user_to_user32_x86(&sinfo64
, &sinfo32
);
679 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
681 ut
->t_dtrace_siginfo
.si_signo
= sinfo32
.si_signo
;
682 ut
->t_dtrace_siginfo
.si_code
= sinfo32
.si_code
;
683 ut
->t_dtrace_siginfo
.si_pid
= sinfo32
.si_pid
;
684 ut
->t_dtrace_siginfo
.si_uid
= sinfo32
.si_uid
;
685 ut
->t_dtrace_siginfo
.si_status
= sinfo32
.si_status
;
686 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo32
.si_addr
);
688 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
690 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
691 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
697 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
698 void (*)(void), CAST_DOWN(sig_t
, ua_catcher
));
699 #endif /* CONFIG_DTRACE */
701 if (copyout((caddr_t
)&sinfo32
, ua_sip
, sizeof(sinfo32
))) {
705 tstate32
= &mctxp
->mctx_avx32
.ss
;
707 tstate32
->eip
= CAST_DOWN_EXPLICIT(user32_addr_t
, trampact
);
708 tstate32
->esp
= CAST_DOWN_EXPLICIT(user32_addr_t
, ua_fp
);
710 tstate32
->eflags
= get_eflags_exportmask();
712 tstate32
->cs
= USER_CS
;
713 tstate32
->ss
= USER_DS
;
714 tstate32
->ds
= USER_DS
;
715 tstate32
->es
= USER_DS
;
716 tstate32
->fs
= NULL_SEG
;
717 tstate32
->gs
= USER_CTHREAD
;
719 flavor
= x86_THREAD_STATE32
;
720 state_count
= x86_THREAD_STATE32_COUNT
;
721 state
= (void *)tstate32
;
723 if (thread_setstatus(thread
, flavor
, (thread_state_t
)state
, state_count
) != KERN_SUCCESS
) {
726 ml_fp_setvalid(FALSE
);
728 /* Tell the PAL layer about the signal */
729 pal_set_signal_delivery( thread
);
738 SIGACTION(p
, SIGILL
) = SIG_DFL
;
739 sig
= sigmask(SIGILL
);
740 p
->p_sigignore
&= ~sig
;
741 p
->p_sigcatch
&= ~sig
;
742 ut
->uu_sigmask
&= ~sig
;
743 /* sendsig is called with signal lock held */
745 psignal_locked(p
, SIGILL
);
751 * System call to cleanup state after a signal
752 * has been taken. Reset signal mask and
753 * stack state from context left by sendsig (above).
754 * Return to previous pc and psl as specified by
755 * context left by sendsig. Check carefully to
756 * make sure that the user has not modified the
757 * psl to gain improper priviledges or to cause
762 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
765 struct mcontext_avx32 mctx_avx32
;
766 struct mcontext_avx64 mctx_avx64
;
767 struct mcontext_avx64_full mctx_avx64_full
;
768 struct mcontext_avx512_32 mctx_avx512_32
;
769 struct mcontext_avx512_64 mctx_avx512_64
;
770 struct mcontext_avx512_64_full mctx_avx512_64_full
;
771 } mctx_store
, *mctxp
= &mctx_store
;
773 thread_t thread
= current_thread();
775 struct sigacts
*ps
= p
->p_sigacts
;
779 mach_msg_type_number_t ts_count
;
780 unsigned int ts_flavor
;
782 mach_msg_type_number_t fs_count
;
783 unsigned int fs_flavor
;
785 int rval
= EJUSTRETURN
;
787 uint32_t sigreturn_validation
;
788 user_addr_t token_uctx
;
791 ut
= (struct uthread
*)get_bsdthread_info(thread
);
794 * If we are being asked to change the altstack flag on the thread, we
795 * just set/reset it and return (the uap->uctx is not used).
797 if ((unsigned int)uap
->infostyle
== UC_SET_ALT_STACK
) {
798 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
800 } else if ((unsigned int)uap
->infostyle
== UC_RESET_ALT_STACK
) {
801 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
805 bzero(mctxp
, sizeof(*mctxp
));
807 sig_xstate
= current_xstate();
809 sigreturn_validation
= atomic_load_explicit(
810 &ps
->ps_sigreturn_validation
, memory_order_relaxed
);
811 token_uctx
= uap
->uctx
;
812 kr
= machine_thread_siguctx_pointer_convert_to_user(thread
, &token_uctx
);
813 assert(kr
== KERN_SUCCESS
);
815 if (proc_is64bit(p
)) {
816 struct user_ucontext64 uctx64
;
818 int task_has_ldt
= thread_task_has_ldt(thread
);
820 if ((error
= copyin(uap
->uctx
, (void *)&uctx64
, sizeof(uctx64
)))) {
824 onstack
= uctx64
.uc_onstack
& 01;
825 ut
->uu_sigmask
= uctx64
.uc_sigmask
& ~sigcantmask
;
828 ts_flavor
= x86_THREAD_FULL_STATE64
;
829 ts_count
= x86_THREAD_FULL_STATE64_COUNT
;
830 fs
= (void *)&mctxp
->mctx_avx64_full
.fs
;
831 sig_xstate
|= STATE64_FULL
;
833 ts_flavor
= x86_THREAD_STATE64
;
834 ts_count
= x86_THREAD_STATE64_COUNT
;
835 fs
= (void *)&mctxp
->mctx_avx64
.fs
;
838 if ((error
= copyin(uctx64
.uc_mcontext64
, (void *)mctxp
, thread_state64
[sig_xstate
].mcontext_size
))) {
842 ts
= (void *)&mctxp
->mctx_avx64
.ss
;
844 fs_flavor
= thread_state64
[sig_xstate
].flavor
;
845 fs_count
= thread_state64
[sig_xstate
].state_count
;
847 token
= (user64_addr_t
)token_uctx
^ (user64_addr_t
)ps
->ps_sigreturn_token
;
848 if ((user64_addr_t
)uap
->token
!= token
) {
849 #if DEVELOPMENT || DEBUG
850 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
851 p
->p_comm
, p
->p_pid
, (user64_addr_t
)uap
->token
, token
);
852 #endif /* DEVELOPMENT || DEBUG */
853 if (sigreturn_validation
!= PS_SIGRETURN_VALIDATION_DISABLED
) {
858 struct user_ucontext32 uctx32
;
861 if ((error
= copyin(uap
->uctx
, (void *)&uctx32
, sizeof(uctx32
)))) {
865 if ((error
= copyin(CAST_USER_ADDR_T(uctx32
.uc_mcontext
), (void *)mctxp
, thread_state32
[sig_xstate
].mcontext_size
))) {
869 onstack
= uctx32
.uc_onstack
& 01;
870 ut
->uu_sigmask
= uctx32
.uc_sigmask
& ~sigcantmask
;
872 ts_flavor
= x86_THREAD_STATE32
;
873 ts_count
= x86_THREAD_STATE32_COUNT
;
874 ts
= (void *)&mctxp
->mctx_avx32
.ss
;
876 fs_flavor
= thread_state32
[sig_xstate
].flavor
;
877 fs_count
= thread_state32
[sig_xstate
].state_count
;
878 fs
= (void *)&mctxp
->mctx_avx32
.fs
;
880 token
= CAST_DOWN_EXPLICIT(user32_addr_t
, uap
->uctx
) ^
881 CAST_DOWN_EXPLICIT(user32_addr_t
, ps
->ps_sigreturn_token
);
882 if ((user32_addr_t
)uap
->token
!= token
) {
883 #if DEVELOPMENT || DEBUG
884 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
885 p
->p_comm
, p
->p_pid
, (user32_addr_t
)uap
->token
, token
);
886 #endif /* DEVELOPMENT || DEBUG */
887 if (sigreturn_validation
!= PS_SIGRETURN_VALIDATION_DISABLED
) {
894 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
896 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
899 if (ut
->uu_siglist
& ~ut
->uu_sigmask
) {
900 signal_setast(thread
);
903 if (rval
== EINVAL
) {
908 * thread_set_state() does all the needed checks for the passed in
911 if (thread_setstatus(thread
, ts_flavor
, ts
, ts_count
) != KERN_SUCCESS
) {
913 #if DEVELOPMENT || DEBUG
914 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
915 p
->p_comm
, p
->p_pid
, rval
);
916 #endif /* DEVELOPMENT || DEBUG */
920 ml_fp_setvalid(TRUE
);
922 if (thread_setstatus(thread
, fs_flavor
, fs
, fs_count
) != KERN_SUCCESS
) {
924 #if DEVELOPMENT || DEBUG
925 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
926 p
->p_comm
, p
->p_pid
, rval
);
927 #endif /* DEVELOPMENT || DEBUG */
936 * machine_exception() performs machine-dependent translation
937 * of a mach exception to a unix signal.
940 machine_exception(int exception
,
941 mach_exception_code_t code
,
942 __unused mach_exception_subcode_t subcode
)
946 /* Map GP fault to SIGSEGV, otherwise defer to caller */
947 if (code
== EXC_I386_GPFLT
) {
952 case EXC_BAD_INSTRUCTION
:
959 if (code
== EXC_I386_BOUND
) {
961 * Map #BR, the Bound Range Exceeded exception, to