]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/i386/unix_signal.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / dev / i386 / unix_signal.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1992 NeXT, Inc.
30 *
31 * HISTORY
32 * 13 May 1992 ? at NeXT
33 * Created.
34 */
35
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
38
39 #include <kern/thread.h>
40 #include <kern/ast.h>
41
42 #include <sys/systm.h>
43 #include <sys/param.h>
44 #include <sys/proc_internal.h>
45 #include <sys/user.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysent.h>
48 #include <sys/ucontext.h>
49 #include <sys/wait.h>
50
51 #include <sys/ux_exception.h>
52
53 #include <mach/thread_act.h> /* for thread_abort_safely */
54 #include <mach/thread_status.h>
55
56 #include <i386/eflags.h>
57 #include <i386/psl.h>
58 #include <i386/machine_routines.h>
59 #include <i386/seg.h>
60 #include <i386/fpu.h>
61
62 #include <machine/pal_routines.h>
63
64 #include <sys/kdebug.h>
65 #include <sys/sdt.h>
66
67
68 /* Forward: */
69 extern kern_return_t thread_getstatus(thread_t act, int flavor,
70 thread_state_t tstate, mach_msg_type_number_t *count);
71 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
72 thread_state_t tstate, mach_msg_type_number_t count);
73
74 /* Signal handler flavors supported */
75 /* These defns should match the Libc implmn */
76 #define UC_TRAD 1
77 #define UC_FLAVOR 30
78 #define UC_SET_ALT_STACK 0x40000000
79 #define UC_RESET_ALT_STACK 0x80000000
80
81 #define C_32_STK_ALIGN 16
82 #define C_64_STK_ALIGN 16
83 #define C_64_REDZONE_LEN 128
84 #define TRUNC_DOWN32(a, c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
85 #define TRUNC_DOWN64(a, c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
86
87 /*
88 * Send an interrupt to process.
89 *
90 * Stack is set up to allow sigcode stored
91 * in u. to call routine, followed by chmk
92 * to sigreturn routine below. After sigreturn
93 * resets the signal mask, the stack, the frame
94 * pointer, and the argument pointer, it returns
95 * to the user specified pc, psl.
96 */
97 struct sigframe32 {
98 int retaddr;
99 user32_addr_t catcher; /* sig_t */
100 int sigstyle;
101 int sig;
102 user32_addr_t sinfo; /* siginfo32_t* */
103 user32_addr_t uctx; /* struct ucontext32 */
104 user32_addr_t token;
105 };
106
107 /*
108 * Declare table of structure flavors and sizes for 64-bit and 32-bit processes
109 * for the cases of extended states (plain FP, or AVX):
110 */
111 typedef struct {
112 int flavor; natural_t state_count; size_t mcontext_size;
113 } xstate_info_t;
114 static const xstate_info_t thread_state64[] = {
115 [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) },
116 [FP_FULL] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64_full) },
117 [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) },
118 [AVX_FULL] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64_full) },
119 [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) },
120 [AVX512_FULL] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64_full) }
121 };
122 static const xstate_info_t thread_state32[] = {
123 [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) },
124 [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) },
125 [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) }
126 };
127
128 /*
129 * NOTE: Source and target may *NOT* overlap!
130 * XXX: Unify with bsd/kern/kern_exit.c
131 */
132 static void
133 siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out)
134 {
135 out->si_signo = in->si_signo;
136 out->si_errno = in->si_errno;
137 out->si_code = in->si_code;
138 out->si_pid = in->si_pid;
139 out->si_uid = in->si_uid;
140 out->si_status = in->si_status;
141 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
142 /* following cast works for sival_int because of padding */
143 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
144 out->si_band = in->si_band; /* range reduction */
145 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
146 }
147
148 static void
149 siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out)
150 {
151 out->si_signo = in->si_signo;
152 out->si_errno = in->si_errno;
153 out->si_code = in->si_code;
154 out->si_pid = in->si_pid;
155 out->si_uid = in->si_uid;
156 out->si_status = in->si_status;
157 out->si_addr = in->si_addr;
158 out->si_value.sival_ptr = in->si_value.sival_ptr;
159 out->si_band = in->si_band; /* range reduction */
160 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
161 }
162
163 void
164 sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code, sigset_t siginfo)
165 {
166 union {
167 struct mcontext_avx32 mctx_avx32;
168 struct mcontext_avx64 mctx_avx64;
169 struct mcontext_avx64_full mctx_avx64_full;
170 struct mcontext_avx512_32 mctx_avx512_32;
171 struct mcontext_avx512_64 mctx_avx512_64;
172 struct mcontext_avx512_64_full mctx_avx512_64_full;
173 } mctx_store, *mctxp = &mctx_store;
174
175 user_addr_t ua_sp;
176 user_addr_t ua_fp;
177 user_addr_t ua_cr2;
178 user_addr_t ua_sip;
179 user_addr_t ua_uctxp;
180 user_addr_t ua_mctxp;
181 user_siginfo_t sinfo64;
182
183 struct sigacts *ps = p->p_sigacts;
184 int oonstack, flavor;
185 user_addr_t trampact;
186 int sigonstack;
187 void * state, *fpstate;
188 mach_msg_type_number_t state_count;
189
190 thread_t thread;
191 struct uthread * ut;
192 int stack_size = 0;
193 int infostyle = UC_TRAD;
194 xstate_t sig_xstate;
195 user_addr_t token_uctx;
196 kern_return_t kr;
197 boolean_t reset_ss = TRUE;
198
199 thread = current_thread();
200 ut = get_bsdthread_info(thread);
201
202 if (siginfo & sigmask(sig)) {
203 infostyle = UC_FLAVOR;
204 }
205
206 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
207 trampact = ps->ps_trampact[sig];
208 sigonstack = (ps->ps_sigonstack & sigmask(sig));
209
210 /*
211 * init siginfo
212 */
213 proc_unlock(p);
214
215 bzero((caddr_t)&sinfo64, sizeof(sinfo64));
216 sinfo64.si_signo = sig;
217
218 bzero(mctxp, sizeof(*mctxp));
219
220 sig_xstate = current_xstate();
221
222 if (proc_is64bit(p)) {
223 x86_thread_state64_t *tstate64;
224 struct user_ucontext64 uctx64;
225 user64_addr_t token;
226 int task_has_ldt = thread_task_has_ldt(thread);
227
228 if (task_has_ldt) {
229 flavor = x86_THREAD_FULL_STATE64;
230 state_count = x86_THREAD_FULL_STATE64_COUNT;
231 fpstate = (void *)&mctxp->mctx_avx64_full.fs;
232 sig_xstate |= STATE64_FULL;
233 } else {
234 flavor = x86_THREAD_STATE64;
235 state_count = x86_THREAD_STATE64_COUNT;
236 fpstate = (void *)&mctxp->mctx_avx64.fs;
237 }
238 state = (void *)&mctxp->mctx_avx64.ss;
239
240 /*
241 * The state copying is performed with pointers to fields in the state
242 * struct. This works specifically because the mcontext is layed-out with the
243 * variable-sized FP-state as the last member. However, with the requirement
244 * to support passing "full" 64-bit state to the signal handler, that layout has now
245 * changed (since the "full" state has a larger "ss" member than the non-"full"
246 * structure. Because of this, and to retain the array-lookup method of determining
247 * structure sizes, we OR-in STATE64_FULL to sig_xstate to ensure the proper mcontext
248 * size is passed.
249 */
250
251 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
252 goto bad;
253 }
254
255 if ((sig_xstate & STATE64_FULL) && mctxp->mctx_avx64.ss.cs != USER64_CS) {
256 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
257 (sigonstack)) {
258 reset_ss = TRUE;
259 } else {
260 reset_ss = FALSE;
261 }
262 } else {
263 reset_ss = FALSE;
264 }
265
266 flavor = thread_state64[sig_xstate].flavor;
267 state_count = thread_state64[sig_xstate].state_count;
268 if (thread_getstatus(thread, flavor, (thread_state_t)fpstate, &state_count) != KERN_SUCCESS) {
269 goto bad;
270 }
271
272 flavor = x86_EXCEPTION_STATE64;
273 state_count = x86_EXCEPTION_STATE64_COUNT;
274 state = (void *)&mctxp->mctx_avx64.es;
275 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
276 goto bad;
277 }
278
279 tstate64 = &mctxp->mctx_avx64.ss;
280
281 /* figure out where our new stack lives */
282 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
283 (sigonstack)) {
284 ua_sp = ut->uu_sigstk.ss_sp;
285 stack_size = ut->uu_sigstk.ss_size;
286 ua_sp += stack_size;
287 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
288 } else {
289 if ((sig_xstate & STATE64_FULL) && tstate64->cs != USER64_CS) {
290 reset_ss = FALSE;
291 }
292 ua_sp = tstate64->rsp;
293 }
294 ua_cr2 = mctxp->mctx_avx64.es.faultvaddr;
295
296 /* The x86_64 ABI defines a 128-byte red zone. */
297 ua_sp -= C_64_REDZONE_LEN;
298
299 ua_sp -= sizeof(struct user_ucontext64);
300 ua_uctxp = ua_sp; // someone tramples the first word!
301
302 ua_sp -= sizeof(user64_siginfo_t);
303 ua_sip = ua_sp;
304
305 ua_sp -= thread_state64[sig_xstate].mcontext_size;
306 ua_mctxp = ua_sp;
307
308 /*
309 * Align the frame and stack pointers to 16 bytes for SSE.
310 * (Note that we use 'ua_fp' as the base of the stack going forward)
311 */
312 ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN);
313
314 /*
315 * But we need to account for the return address so the alignment is
316 * truly "correct" at _sigtramp
317 */
318 ua_fp -= sizeof(user_addr_t);
319
320 /*
321 * Generate the validation token for sigreturn
322 */
323 token_uctx = ua_uctxp;
324 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
325 assert(kr == KERN_SUCCESS);
326 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
327
328 /*
329 * Build the signal context to be used by sigreturn.
330 */
331 bzero(&uctx64, sizeof(uctx64));
332
333 uctx64.uc_onstack = oonstack;
334 uctx64.uc_sigmask = mask;
335 uctx64.uc_stack.ss_sp = ua_fp;
336 uctx64.uc_stack.ss_size = stack_size;
337
338 if (oonstack) {
339 uctx64.uc_stack.ss_flags |= SS_ONSTACK;
340 }
341 uctx64.uc_link = 0;
342
343 uctx64.uc_mcsize = thread_state64[sig_xstate].mcontext_size;
344 uctx64.uc_mcontext64 = ua_mctxp;
345
346 if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof(uctx64))) {
347 goto bad;
348 }
349
350 if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) {
351 goto bad;
352 }
353
354 sinfo64.pad[0] = tstate64->rsp;
355 sinfo64.si_addr = tstate64->rip;
356
357 tstate64->rip = trampact;
358 tstate64->rsp = ua_fp;
359 tstate64->rflags = get_eflags_exportmask();
360
361 /*
362 * SETH - need to set these for processes with LDTs
363 */
364 tstate64->cs = USER64_CS;
365 tstate64->fs = NULL_SEG;
366 /*
367 * Set gs to 0 here to prevent restoration of %gs on return-to-user. If we
368 * did NOT do that here and %gs was non-zero, we'd blow away gsbase when
369 * we restore %gs in the kernel exit trampoline.
370 */
371 tstate64->gs = 0;
372
373 if (sig_xstate & STATE64_FULL) {
374 /* Reset DS, ES, and possibly SS */
375 if (reset_ss) {
376 /*
377 * Restore %ss if (a) an altstack was used for signal delivery
378 * or (b) %cs at the time of the signal was the default
379 * (USER64_CS)
380 */
381 mctxp->mctx_avx64_full.ss.ss = USER64_DS;
382 }
383 mctxp->mctx_avx64_full.ss.ds = USER64_DS;
384 mctxp->mctx_avx64_full.ss.es = 0;
385 }
386
387 /*
388 * Build the argument list for the signal handler.
389 * Handler should call sigreturn to get out of it
390 */
391 tstate64->rdi = ua_catcher;
392 tstate64->rsi = infostyle;
393 tstate64->rdx = sig;
394 tstate64->rcx = ua_sip;
395 tstate64->r8 = ua_uctxp;
396 tstate64->r9 = token;
397 } else {
398 x86_thread_state32_t *tstate32;
399 struct user_ucontext32 uctx32;
400 struct sigframe32 frame32;
401 user32_addr_t token;
402
403 flavor = x86_THREAD_STATE32;
404 state_count = x86_THREAD_STATE32_COUNT;
405 state = (void *)&mctxp->mctx_avx32.ss;
406 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
407 goto bad;
408 }
409
410 flavor = thread_state32[sig_xstate].flavor;
411 state_count = thread_state32[sig_xstate].state_count;
412 state = (void *)&mctxp->mctx_avx32.fs;
413 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
414 goto bad;
415 }
416
417 flavor = x86_EXCEPTION_STATE32;
418 state_count = x86_EXCEPTION_STATE32_COUNT;
419 state = (void *)&mctxp->mctx_avx32.es;
420 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
421 goto bad;
422 }
423
424 tstate32 = &mctxp->mctx_avx32.ss;
425
426 /* figure out where our new stack lives */
427 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
428 (sigonstack)) {
429 ua_sp = ut->uu_sigstk.ss_sp;
430 stack_size = ut->uu_sigstk.ss_size;
431 ua_sp += stack_size;
432 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
433 } else {
434 ua_sp = tstate32->esp;
435 }
436 ua_cr2 = mctxp->mctx_avx32.es.faultvaddr;
437
438 ua_sp -= sizeof(struct user_ucontext32);
439 ua_uctxp = ua_sp; // someone tramples the first word!
440
441 ua_sp -= sizeof(user32_siginfo_t);
442 ua_sip = ua_sp;
443
444 ua_sp -= thread_state32[sig_xstate].mcontext_size;
445 ua_mctxp = ua_sp;
446
447 ua_sp -= sizeof(struct sigframe32);
448 ua_fp = ua_sp;
449
450 /*
451 * Align the frame and stack pointers to 16 bytes for SSE.
452 * (Note that we use 'fp' as the base of the stack going forward)
453 */
454 ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN);
455
456 /*
457 * But we need to account for the return address so the alignment is
458 * truly "correct" at _sigtramp
459 */
460 ua_fp -= sizeof(frame32.retaddr);
461
462 /*
463 * Generate the validation token for sigreturn
464 */
465 token_uctx = ua_uctxp;
466 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
467 assert(kr == KERN_SUCCESS);
468 token = CAST_DOWN_EXPLICIT(user32_addr_t, token_uctx) ^
469 CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token);
470
471 /*
472 * Build the argument list for the signal handler.
473 * Handler should call sigreturn to get out of it
474 */
475 frame32.retaddr = -1;
476 frame32.sigstyle = infostyle;
477 frame32.sig = sig;
478 frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher);
479 frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip);
480 frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp);
481 frame32.token = token;
482
483 if (copyout((caddr_t)&frame32, ua_fp, sizeof(frame32))) {
484 goto bad;
485 }
486
487 /*
488 * Build the signal context to be used by sigreturn.
489 */
490 bzero(&uctx32, sizeof(uctx32));
491
492 uctx32.uc_onstack = oonstack;
493 uctx32.uc_sigmask = mask;
494 uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
495 uctx32.uc_stack.ss_size = stack_size;
496
497 if (oonstack) {
498 uctx32.uc_stack.ss_flags |= SS_ONSTACK;
499 }
500 uctx32.uc_link = 0;
501
502 uctx32.uc_mcsize = thread_state64[sig_xstate].mcontext_size;
503
504 uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp);
505
506 if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof(uctx32))) {
507 goto bad;
508 }
509
510 if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) {
511 goto bad;
512 }
513
514 sinfo64.pad[0] = tstate32->esp;
515 sinfo64.si_addr = tstate32->eip;
516 }
517
518 switch (sig) {
519 case SIGILL:
520 switch (ut->uu_code) {
521 case EXC_I386_INVOP:
522 sinfo64.si_code = ILL_ILLOPC;
523 break;
524 default:
525 sinfo64.si_code = ILL_NOOP;
526 }
527 break;
528 case SIGFPE:
529 #define FP_IE 0 /* Invalid operation */
530 #define FP_DE 1 /* Denormalized operand */
531 #define FP_ZE 2 /* Zero divide */
532 #define FP_OE 3 /* overflow */
533 #define FP_UE 4 /* underflow */
534 #define FP_PE 5 /* precision */
535 if (ut->uu_code == EXC_I386_DIV) {
536 sinfo64.si_code = FPE_INTDIV;
537 } else if (ut->uu_code == EXC_I386_INTO) {
538 sinfo64.si_code = FPE_INTOVF;
539 } else if (ut->uu_subcode & (1 << FP_ZE)) {
540 sinfo64.si_code = FPE_FLTDIV;
541 } else if (ut->uu_subcode & (1 << FP_OE)) {
542 sinfo64.si_code = FPE_FLTOVF;
543 } else if (ut->uu_subcode & (1 << FP_UE)) {
544 sinfo64.si_code = FPE_FLTUND;
545 } else if (ut->uu_subcode & (1 << FP_PE)) {
546 sinfo64.si_code = FPE_FLTRES;
547 } else if (ut->uu_subcode & (1 << FP_IE)) {
548 sinfo64.si_code = FPE_FLTINV;
549 } else {
550 sinfo64.si_code = FPE_NOOP;
551 }
552 break;
553 case SIGBUS:
554 sinfo64.si_code = BUS_ADRERR;
555 sinfo64.si_addr = ua_cr2;
556 break;
557 case SIGTRAP:
558 sinfo64.si_code = TRAP_BRKPT;
559 break;
560 case SIGSEGV:
561 sinfo64.si_addr = ua_cr2;
562
563 switch (ut->uu_code) {
564 case EXC_I386_GPFLT:
565 /* CR2 is meaningless after GP fault */
566 /* XXX namespace clash! */
567 sinfo64.si_addr = 0ULL;
568 sinfo64.si_code = 0;
569 break;
570 case KERN_PROTECTION_FAILURE:
571 sinfo64.si_code = SEGV_ACCERR;
572 break;
573 case KERN_INVALID_ADDRESS:
574 sinfo64.si_code = SEGV_MAPERR;
575 break;
576 default:
577 sinfo64.si_code = FPE_NOOP;
578 }
579 break;
580 default:
581 {
582 int status_and_exitcode;
583
584 /*
585 * All other signals need to fill out a minimum set of
586 * information for the siginfo structure passed into
587 * the signal handler, if SA_SIGINFO was specified.
588 *
589 * p->si_status actually contains both the status and
590 * the exit code; we save it off in its own variable
591 * for later breakdown.
592 */
593 proc_lock(p);
594 sinfo64.si_pid = p->si_pid;
595 p->si_pid = 0;
596 status_and_exitcode = p->si_status;
597 p->si_status = 0;
598 sinfo64.si_uid = p->si_uid;
599 p->si_uid = 0;
600 sinfo64.si_code = p->si_code;
601 p->si_code = 0;
602 proc_unlock(p);
603 if (sinfo64.si_code == CLD_EXITED) {
604 if (WIFEXITED(status_and_exitcode)) {
605 sinfo64.si_code = CLD_EXITED;
606 } else if (WIFSIGNALED(status_and_exitcode)) {
607 if (WCOREDUMP(status_and_exitcode)) {
608 sinfo64.si_code = CLD_DUMPED;
609 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
610 } else {
611 sinfo64.si_code = CLD_KILLED;
612 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
613 }
614 }
615 }
616 /*
617 * The recorded status contains the exit code and the
618 * signal information, but the information to be passed
619 * in the siginfo to the handler is supposed to only
620 * contain the status, so we have to shift it out.
621 */
622 sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
623 p->p_xhighbits = 0;
624 break;
625 }
626 }
627 if (proc_is64bit(p)) {
628 user64_siginfo_t sinfo64_user64;
629
630 bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64));
631
632 siginfo_user_to_user64_x86(&sinfo64, &sinfo64_user64);
633
634 #if CONFIG_DTRACE
635 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
636
637 ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo;
638 ut->t_dtrace_siginfo.si_code = sinfo64.si_code;
639 ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid;
640 ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid;
641 ut->t_dtrace_siginfo.si_status = sinfo64.si_status;
642 /* XXX truncates faulting address to void * on K32 */
643 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr);
644
645 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
646 switch (sig) {
647 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
648 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
649 break;
650 default:
651 break;
652 }
653
654 /* XXX truncates catcher address to uintptr_t */
655 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
656 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
657 #endif /* CONFIG_DTRACE */
658
659 if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof(sinfo64_user64))) {
660 goto bad;
661 }
662
663 if (sig_xstate & STATE64_FULL) {
664 flavor = x86_THREAD_FULL_STATE64;
665 state_count = x86_THREAD_FULL_STATE64_COUNT;
666 } else {
667 flavor = x86_THREAD_STATE64;
668 state_count = x86_THREAD_STATE64_COUNT;
669 }
670 state = (void *)&mctxp->mctx_avx64.ss;
671 } else {
672 x86_thread_state32_t *tstate32;
673 user32_siginfo_t sinfo32;
674
675 bzero((caddr_t)&sinfo32, sizeof(sinfo32));
676
677 siginfo_user_to_user32_x86(&sinfo64, &sinfo32);
678
679 #if CONFIG_DTRACE
680 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
681
682 ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo;
683 ut->t_dtrace_siginfo.si_code = sinfo32.si_code;
684 ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid;
685 ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid;
686 ut->t_dtrace_siginfo.si_status = sinfo32.si_status;
687 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr);
688
689 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
690 switch (sig) {
691 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
692 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
693 break;
694 default:
695 break;
696 }
697
698 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
699 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
700 #endif /* CONFIG_DTRACE */
701
702 if (copyout((caddr_t)&sinfo32, ua_sip, sizeof(sinfo32))) {
703 goto bad;
704 }
705
706 tstate32 = &mctxp->mctx_avx32.ss;
707
708 tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact);
709 tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
710
711 tstate32->eflags = get_eflags_exportmask();
712
713 tstate32->cs = USER_CS;
714 tstate32->ss = USER_DS;
715 tstate32->ds = USER_DS;
716 tstate32->es = USER_DS;
717 tstate32->fs = NULL_SEG;
718 tstate32->gs = USER_CTHREAD;
719
720 flavor = x86_THREAD_STATE32;
721 state_count = x86_THREAD_STATE32_COUNT;
722 state = (void *)tstate32;
723 }
724 if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) {
725 goto bad;
726 }
727 ml_fp_setvalid(FALSE);
728
729 /* Tell the PAL layer about the signal */
730 pal_set_signal_delivery( thread );
731
732 proc_lock(p);
733
734 return;
735
736 bad:
737
738 proc_lock(p);
739 SIGACTION(p, SIGILL) = SIG_DFL;
740 sig = sigmask(SIGILL);
741 p->p_sigignore &= ~sig;
742 p->p_sigcatch &= ~sig;
743 ut->uu_sigmask &= ~sig;
744 /* sendsig is called with signal lock held */
745 proc_unlock(p);
746 psignal_locked(p, SIGILL);
747 proc_lock(p);
748 return;
749 }
750
751 /*
752 * System call to cleanup state after a signal
753 * has been taken. Reset signal mask and
754 * stack state from context left by sendsig (above).
755 * Return to previous pc and psl as specified by
756 * context left by sendsig. Check carefully to
757 * make sure that the user has not modified the
758 * psl to gain improper priviledges or to cause
759 * a machine fault.
760 */
761
762 int
763 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
764 {
765 union {
766 struct mcontext_avx32 mctx_avx32;
767 struct mcontext_avx64 mctx_avx64;
768 struct mcontext_avx64_full mctx_avx64_full;
769 struct mcontext_avx512_32 mctx_avx512_32;
770 struct mcontext_avx512_64 mctx_avx512_64;
771 struct mcontext_avx512_64_full mctx_avx512_64_full;
772 } mctx_store, *mctxp = &mctx_store;
773
774 thread_t thread = current_thread();
775 struct uthread * ut;
776 struct sigacts *ps = p->p_sigacts;
777 int error;
778 int onstack = 0;
779
780 mach_msg_type_number_t ts_count;
781 unsigned int ts_flavor;
782 void * ts;
783 mach_msg_type_number_t fs_count;
784 unsigned int fs_flavor;
785 void * fs;
786 int rval = EJUSTRETURN;
787 xstate_t sig_xstate;
788 uint32_t sigreturn_validation;
789 user_addr_t token_uctx;
790 kern_return_t kr;
791
792 ut = (struct uthread *)get_bsdthread_info(thread);
793
794 /* see osfmk/kern/restartable.c */
795 act_set_ast_reset_pcs(thread);
796 /*
797 * If we are being asked to change the altstack flag on the thread, we
798 * just set/reset it and return (the uap->uctx is not used).
799 */
800 if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) {
801 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
802 return 0;
803 } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) {
804 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
805 return 0;
806 }
807
808 bzero(mctxp, sizeof(*mctxp));
809
810 sig_xstate = current_xstate();
811
812 sigreturn_validation = atomic_load_explicit(
813 &ps->ps_sigreturn_validation, memory_order_relaxed);
814 token_uctx = uap->uctx;
815 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
816 assert(kr == KERN_SUCCESS);
817
818 if (proc_is64bit(p)) {
819 struct user_ucontext64 uctx64;
820 user64_addr_t token;
821 int task_has_ldt = thread_task_has_ldt(thread);
822
823 if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof(uctx64)))) {
824 return error;
825 }
826
827 onstack = uctx64.uc_onstack & 01;
828 ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask;
829
830 if (task_has_ldt) {
831 ts_flavor = x86_THREAD_FULL_STATE64;
832 ts_count = x86_THREAD_FULL_STATE64_COUNT;
833 fs = (void *)&mctxp->mctx_avx64_full.fs;
834 sig_xstate |= STATE64_FULL;
835 } else {
836 ts_flavor = x86_THREAD_STATE64;
837 ts_count = x86_THREAD_STATE64_COUNT;
838 fs = (void *)&mctxp->mctx_avx64.fs;
839 }
840
841 if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) {
842 return error;
843 }
844
845 ts = (void *)&mctxp->mctx_avx64.ss;
846
847 fs_flavor = thread_state64[sig_xstate].flavor;
848 fs_count = thread_state64[sig_xstate].state_count;
849
850 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
851 if ((user64_addr_t)uap->token != token) {
852 #if DEVELOPMENT || DEBUG
853 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
854 p->p_comm, p->p_pid, (user64_addr_t)uap->token, token);
855 #endif /* DEVELOPMENT || DEBUG */
856 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
857 rval = EINVAL;
858 }
859 }
860 } else {
861 struct user_ucontext32 uctx32;
862 user32_addr_t token;
863
864 if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof(uctx32)))) {
865 return error;
866 }
867
868 if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) {
869 return error;
870 }
871
872 onstack = uctx32.uc_onstack & 01;
873 ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask;
874
875 ts_flavor = x86_THREAD_STATE32;
876 ts_count = x86_THREAD_STATE32_COUNT;
877 ts = (void *)&mctxp->mctx_avx32.ss;
878
879 fs_flavor = thread_state32[sig_xstate].flavor;
880 fs_count = thread_state32[sig_xstate].state_count;
881 fs = (void *)&mctxp->mctx_avx32.fs;
882
883 token = CAST_DOWN_EXPLICIT(user32_addr_t, uap->uctx) ^
884 CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token);
885 if ((user32_addr_t)uap->token != token) {
886 #if DEVELOPMENT || DEBUG
887 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
888 p->p_comm, p->p_pid, (user32_addr_t)uap->token, token);
889 #endif /* DEVELOPMENT || DEBUG */
890 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
891 rval = EINVAL;
892 }
893 }
894 }
895
896 if (onstack) {
897 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
898 } else {
899 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
900 }
901
902 if (ut->uu_siglist & ~ut->uu_sigmask) {
903 signal_setast(thread);
904 }
905
906 if (rval == EINVAL) {
907 goto error_ret;
908 }
909
910 /*
911 * thread_set_state() does all the needed checks for the passed in
912 * content
913 */
914 if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) {
915 rval = EINVAL;
916 #if DEVELOPMENT || DEBUG
917 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
918 p->p_comm, p->p_pid, rval);
919 #endif /* DEVELOPMENT || DEBUG */
920 goto error_ret;
921 }
922
923 ml_fp_setvalid(TRUE);
924
925 if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) {
926 rval = EINVAL;
927 #if DEVELOPMENT || DEBUG
928 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
929 p->p_comm, p->p_pid, rval);
930 #endif /* DEVELOPMENT || DEBUG */
931 goto error_ret;
932 }
933 error_ret:
934 return rval;
935 }
936
937
938 /*
939 * machine_exception() performs machine-dependent translation
940 * of a mach exception to a unix signal.
941 */
942 int
943 machine_exception(int exception,
944 mach_exception_code_t code,
945 __unused mach_exception_subcode_t subcode)
946 {
947 switch (exception) {
948 case EXC_BAD_ACCESS:
949 /* Map GP fault to SIGSEGV, otherwise defer to caller */
950 if (code == EXC_I386_GPFLT) {
951 return SIGSEGV;
952 }
953 break;
954
955 case EXC_BAD_INSTRUCTION:
956 return SIGILL;
957
958 case EXC_ARITHMETIC:
959 return SIGFPE;
960
961 case EXC_SOFTWARE:
962 if (code == EXC_I386_BOUND) {
963 /*
964 * Map #BR, the Bound Range Exceeded exception, to
965 * SIGTRAP.
966 */
967 return SIGTRAP;
968 }
969 break;
970 }
971
972 return 0;
973 }