]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/i386/unix_signal.c
603b216142413035da1576fbb04286f6ee9bf85b
[apple/xnu.git] / bsd / dev / i386 / unix_signal.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1992 NeXT, Inc.
30 *
31 * HISTORY
32 * 13 May 1992 ? at NeXT
33 * Created.
34 */
35
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
38
39 #include <kern/thread.h>
40
41 #include <sys/systm.h>
42 #include <sys/param.h>
43 #include <sys/proc_internal.h>
44 #include <sys/user.h>
45 #include <sys/sysproto.h>
46 #include <sys/sysent.h>
47 #include <sys/ucontext.h>
48 #include <sys/wait.h>
49
50 #include <sys/ux_exception.h>
51
52 #include <mach/thread_act.h> /* for thread_abort_safely */
53 #include <mach/thread_status.h>
54
55 #include <i386/eflags.h>
56 #include <i386/psl.h>
57 #include <i386/machine_routines.h>
58 #include <i386/seg.h>
59 #include <i386/fpu.h>
60
61 #include <machine/pal_routines.h>
62
63 #include <sys/kdebug.h>
64 #include <sys/sdt.h>
65
66
67 /* Forward: */
68 extern kern_return_t thread_getstatus(thread_t act, int flavor,
69 thread_state_t tstate, mach_msg_type_number_t *count);
70 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
71 thread_state_t tstate, mach_msg_type_number_t count);
72
73 /* Signal handler flavors supported */
74 /* These defns should match the Libc implmn */
75 #define UC_TRAD 1
76 #define UC_FLAVOR 30
77 #define UC_SET_ALT_STACK 0x40000000
78 #define UC_RESET_ALT_STACK 0x80000000
79
80 #define C_32_STK_ALIGN 16
81 #define C_64_STK_ALIGN 16
82 #define C_64_REDZONE_LEN 128
83 #define TRUNC_DOWN32(a, c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
84 #define TRUNC_DOWN64(a, c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
85
86 /*
87 * Send an interrupt to process.
88 *
89 * Stack is set up to allow sigcode stored
90 * in u. to call routine, followed by chmk
91 * to sigreturn routine below. After sigreturn
92 * resets the signal mask, the stack, the frame
93 * pointer, and the argument pointer, it returns
94 * to the user specified pc, psl.
95 */
96 struct sigframe32 {
97 int retaddr;
98 user32_addr_t catcher; /* sig_t */
99 int sigstyle;
100 int sig;
101 user32_addr_t sinfo; /* siginfo32_t* */
102 user32_addr_t uctx; /* struct ucontext32 */
103 user32_addr_t token;
104 };
105
106 /*
107 * Declare table of structure flavors and sizes for 64-bit and 32-bit processes
108 * for the cases of extended states (plain FP, or AVX):
109 */
110 typedef struct {
111 int flavor; natural_t state_count; size_t mcontext_size;
112 } xstate_info_t;
113 static const xstate_info_t thread_state64[] = {
114 [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) },
115 [FP_FULL] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64_full) },
116 [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) },
117 [AVX_FULL] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64_full) },
118 [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) },
119 [AVX512_FULL] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64_full) }
120 };
121 static const xstate_info_t thread_state32[] = {
122 [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) },
123 [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) },
124 [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) }
125 };
126
127 /*
128 * NOTE: Source and target may *NOT* overlap!
129 * XXX: Unify with bsd/kern/kern_exit.c
130 */
131 static void
132 siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out)
133 {
134 out->si_signo = in->si_signo;
135 out->si_errno = in->si_errno;
136 out->si_code = in->si_code;
137 out->si_pid = in->si_pid;
138 out->si_uid = in->si_uid;
139 out->si_status = in->si_status;
140 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
141 /* following cast works for sival_int because of padding */
142 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
143 out->si_band = in->si_band; /* range reduction */
144 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
145 }
146
147 static void
148 siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out)
149 {
150 out->si_signo = in->si_signo;
151 out->si_errno = in->si_errno;
152 out->si_code = in->si_code;
153 out->si_pid = in->si_pid;
154 out->si_uid = in->si_uid;
155 out->si_status = in->si_status;
156 out->si_addr = in->si_addr;
157 out->si_value.sival_ptr = in->si_value.sival_ptr;
158 out->si_band = in->si_band; /* range reduction */
159 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
160 }
161
162 void
163 sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code)
164 {
165 union {
166 struct mcontext_avx32 mctx_avx32;
167 struct mcontext_avx64 mctx_avx64;
168 struct mcontext_avx64_full mctx_avx64_full;
169 struct mcontext_avx512_32 mctx_avx512_32;
170 struct mcontext_avx512_64 mctx_avx512_64;
171 struct mcontext_avx512_64_full mctx_avx512_64_full;
172 } mctx_store, *mctxp = &mctx_store;
173
174 user_addr_t ua_sp;
175 user_addr_t ua_fp;
176 user_addr_t ua_cr2;
177 user_addr_t ua_sip;
178 user_addr_t ua_uctxp;
179 user_addr_t ua_mctxp;
180 user_siginfo_t sinfo64;
181
182 struct sigacts *ps = p->p_sigacts;
183 int oonstack, flavor;
184 user_addr_t trampact;
185 int sigonstack;
186 void * state, *fpstate;
187 mach_msg_type_number_t state_count;
188
189 thread_t thread;
190 struct uthread * ut;
191 int stack_size = 0;
192 int infostyle = UC_TRAD;
193 xstate_t sig_xstate;
194 user_addr_t token_uctx;
195 kern_return_t kr;
196 boolean_t reset_ss = TRUE;
197
198 thread = current_thread();
199 ut = get_bsdthread_info(thread);
200
201 if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
202 infostyle = UC_FLAVOR;
203 }
204
205 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
206 trampact = ps->ps_trampact[sig];
207 sigonstack = (ps->ps_sigonstack & sigmask(sig));
208
209 /*
210 * init siginfo
211 */
212 proc_unlock(p);
213
214 bzero((caddr_t)&sinfo64, sizeof(sinfo64));
215 sinfo64.si_signo = sig;
216
217 bzero(mctxp, sizeof(*mctxp));
218
219 sig_xstate = current_xstate();
220
221 if (proc_is64bit(p)) {
222 x86_thread_state64_t *tstate64;
223 struct user_ucontext64 uctx64;
224 user64_addr_t token;
225 int task_has_ldt = thread_task_has_ldt(thread);
226
227 if (task_has_ldt) {
228 flavor = x86_THREAD_FULL_STATE64;
229 state_count = x86_THREAD_FULL_STATE64_COUNT;
230 fpstate = (void *)&mctxp->mctx_avx64_full.fs;
231 sig_xstate |= STATE64_FULL;
232 } else {
233 flavor = x86_THREAD_STATE64;
234 state_count = x86_THREAD_STATE64_COUNT;
235 fpstate = (void *)&mctxp->mctx_avx64.fs;
236 }
237 state = (void *)&mctxp->mctx_avx64.ss;
238
239 /*
240 * The state copying is performed with pointers to fields in the state
241 * struct. This works specifically because the mcontext is layed-out with the
242 * variable-sized FP-state as the last member. However, with the requirement
243 * to support passing "full" 64-bit state to the signal handler, that layout has now
244 * changed (since the "full" state has a larger "ss" member than the non-"full"
245 * structure. Because of this, and to retain the array-lookup method of determining
246 * structure sizes, we OR-in STATE64_FULL to sig_xstate to ensure the proper mcontext
247 * size is passed.
248 */
249
250 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
251 goto bad;
252 }
253
254 if ((sig_xstate & STATE64_FULL) && mctxp->mctx_avx64.ss.cs != USER64_CS) {
255 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
256 (sigonstack)) {
257 reset_ss = TRUE;
258 } else {
259 reset_ss = FALSE;
260 }
261 } else {
262 reset_ss = FALSE;
263 }
264
265 flavor = thread_state64[sig_xstate].flavor;
266 state_count = thread_state64[sig_xstate].state_count;
267 if (thread_getstatus(thread, flavor, (thread_state_t)fpstate, &state_count) != KERN_SUCCESS) {
268 goto bad;
269 }
270
271 flavor = x86_EXCEPTION_STATE64;
272 state_count = x86_EXCEPTION_STATE64_COUNT;
273 state = (void *)&mctxp->mctx_avx64.es;
274 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
275 goto bad;
276 }
277
278 tstate64 = &mctxp->mctx_avx64.ss;
279
280 /* figure out where our new stack lives */
281 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
282 (sigonstack)) {
283 ua_sp = ut->uu_sigstk.ss_sp;
284 stack_size = ut->uu_sigstk.ss_size;
285 ua_sp += stack_size;
286 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
287 } else {
288 if ((sig_xstate & STATE64_FULL) && tstate64->cs != USER64_CS) {
289 reset_ss = FALSE;
290 }
291 ua_sp = tstate64->rsp;
292 }
293 ua_cr2 = mctxp->mctx_avx64.es.faultvaddr;
294
295 /* The x86_64 ABI defines a 128-byte red zone. */
296 ua_sp -= C_64_REDZONE_LEN;
297
298 ua_sp -= sizeof(struct user_ucontext64);
299 ua_uctxp = ua_sp; // someone tramples the first word!
300
301 ua_sp -= sizeof(user64_siginfo_t);
302 ua_sip = ua_sp;
303
304 ua_sp -= thread_state64[sig_xstate].mcontext_size;
305 ua_mctxp = ua_sp;
306
307 /*
308 * Align the frame and stack pointers to 16 bytes for SSE.
309 * (Note that we use 'ua_fp' as the base of the stack going forward)
310 */
311 ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN);
312
313 /*
314 * But we need to account for the return address so the alignment is
315 * truly "correct" at _sigtramp
316 */
317 ua_fp -= sizeof(user_addr_t);
318
319 /*
320 * Generate the validation token for sigreturn
321 */
322 token_uctx = ua_uctxp;
323 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
324 assert(kr == KERN_SUCCESS);
325 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
326
327 /*
328 * Build the signal context to be used by sigreturn.
329 */
330 bzero(&uctx64, sizeof(uctx64));
331
332 uctx64.uc_onstack = oonstack;
333 uctx64.uc_sigmask = mask;
334 uctx64.uc_stack.ss_sp = ua_fp;
335 uctx64.uc_stack.ss_size = stack_size;
336
337 if (oonstack) {
338 uctx64.uc_stack.ss_flags |= SS_ONSTACK;
339 }
340 uctx64.uc_link = 0;
341
342 uctx64.uc_mcsize = thread_state64[sig_xstate].mcontext_size;
343 uctx64.uc_mcontext64 = ua_mctxp;
344
345 if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof(uctx64))) {
346 goto bad;
347 }
348
349 if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) {
350 goto bad;
351 }
352
353 sinfo64.pad[0] = tstate64->rsp;
354 sinfo64.si_addr = tstate64->rip;
355
356 tstate64->rip = trampact;
357 tstate64->rsp = ua_fp;
358 tstate64->rflags = get_eflags_exportmask();
359
360 /*
361 * SETH - need to set these for processes with LDTs
362 */
363 tstate64->cs = USER64_CS;
364 tstate64->fs = NULL_SEG;
365 /*
366 * Set gs to 0 here to prevent restoration of %gs on return-to-user. If we
367 * did NOT do that here and %gs was non-zero, we'd blow away gsbase when
368 * we restore %gs in the kernel exit trampoline.
369 */
370 tstate64->gs = 0;
371
372 if (sig_xstate & STATE64_FULL) {
373 /* Reset DS, ES, and possibly SS */
374 if (reset_ss) {
375 /*
376 * Restore %ss if (a) an altstack was used for signal delivery
377 * or (b) %cs at the time of the signal was the default
378 * (USER64_CS)
379 */
380 mctxp->mctx_avx64_full.ss.ss = USER64_DS;
381 }
382 mctxp->mctx_avx64_full.ss.ds = USER64_DS;
383 mctxp->mctx_avx64_full.ss.es = 0;
384 }
385
386 /*
387 * Build the argument list for the signal handler.
388 * Handler should call sigreturn to get out of it
389 */
390 tstate64->rdi = ua_catcher;
391 tstate64->rsi = infostyle;
392 tstate64->rdx = sig;
393 tstate64->rcx = ua_sip;
394 tstate64->r8 = ua_uctxp;
395 tstate64->r9 = token;
396 } else {
397 x86_thread_state32_t *tstate32;
398 struct user_ucontext32 uctx32;
399 struct sigframe32 frame32;
400 user32_addr_t token;
401
402 flavor = x86_THREAD_STATE32;
403 state_count = x86_THREAD_STATE32_COUNT;
404 state = (void *)&mctxp->mctx_avx32.ss;
405 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
406 goto bad;
407 }
408
409 flavor = thread_state32[sig_xstate].flavor;
410 state_count = thread_state32[sig_xstate].state_count;
411 state = (void *)&mctxp->mctx_avx32.fs;
412 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
413 goto bad;
414 }
415
416 flavor = x86_EXCEPTION_STATE32;
417 state_count = x86_EXCEPTION_STATE32_COUNT;
418 state = (void *)&mctxp->mctx_avx32.es;
419 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) {
420 goto bad;
421 }
422
423 tstate32 = &mctxp->mctx_avx32.ss;
424
425 /* figure out where our new stack lives */
426 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
427 (sigonstack)) {
428 ua_sp = ut->uu_sigstk.ss_sp;
429 stack_size = ut->uu_sigstk.ss_size;
430 ua_sp += stack_size;
431 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
432 } else {
433 ua_sp = tstate32->esp;
434 }
435 ua_cr2 = mctxp->mctx_avx32.es.faultvaddr;
436
437 ua_sp -= sizeof(struct user_ucontext32);
438 ua_uctxp = ua_sp; // someone tramples the first word!
439
440 ua_sp -= sizeof(user32_siginfo_t);
441 ua_sip = ua_sp;
442
443 ua_sp -= thread_state32[sig_xstate].mcontext_size;
444 ua_mctxp = ua_sp;
445
446 ua_sp -= sizeof(struct sigframe32);
447 ua_fp = ua_sp;
448
449 /*
450 * Align the frame and stack pointers to 16 bytes for SSE.
451 * (Note that we use 'fp' as the base of the stack going forward)
452 */
453 ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN);
454
455 /*
456 * But we need to account for the return address so the alignment is
457 * truly "correct" at _sigtramp
458 */
459 ua_fp -= sizeof(frame32.retaddr);
460
461 /*
462 * Generate the validation token for sigreturn
463 */
464 token_uctx = ua_uctxp;
465 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
466 assert(kr == KERN_SUCCESS);
467 token = CAST_DOWN_EXPLICIT(user32_addr_t, token_uctx) ^
468 CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token);
469
470 /*
471 * Build the argument list for the signal handler.
472 * Handler should call sigreturn to get out of it
473 */
474 frame32.retaddr = -1;
475 frame32.sigstyle = infostyle;
476 frame32.sig = sig;
477 frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher);
478 frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip);
479 frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp);
480 frame32.token = token;
481
482 if (copyout((caddr_t)&frame32, ua_fp, sizeof(frame32))) {
483 goto bad;
484 }
485
486 /*
487 * Build the signal context to be used by sigreturn.
488 */
489 bzero(&uctx32, sizeof(uctx32));
490
491 uctx32.uc_onstack = oonstack;
492 uctx32.uc_sigmask = mask;
493 uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
494 uctx32.uc_stack.ss_size = stack_size;
495
496 if (oonstack) {
497 uctx32.uc_stack.ss_flags |= SS_ONSTACK;
498 }
499 uctx32.uc_link = 0;
500
501 uctx32.uc_mcsize = thread_state64[sig_xstate].mcontext_size;
502
503 uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp);
504
505 if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof(uctx32))) {
506 goto bad;
507 }
508
509 if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) {
510 goto bad;
511 }
512
513 sinfo64.pad[0] = tstate32->esp;
514 sinfo64.si_addr = tstate32->eip;
515 }
516
517 switch (sig) {
518 case SIGILL:
519 switch (ut->uu_code) {
520 case EXC_I386_INVOP:
521 sinfo64.si_code = ILL_ILLOPC;
522 break;
523 default:
524 sinfo64.si_code = ILL_NOOP;
525 }
526 break;
527 case SIGFPE:
528 #define FP_IE 0 /* Invalid operation */
529 #define FP_DE 1 /* Denormalized operand */
530 #define FP_ZE 2 /* Zero divide */
531 #define FP_OE 3 /* overflow */
532 #define FP_UE 4 /* underflow */
533 #define FP_PE 5 /* precision */
534 if (ut->uu_code == EXC_I386_DIV) {
535 sinfo64.si_code = FPE_INTDIV;
536 } else if (ut->uu_code == EXC_I386_INTO) {
537 sinfo64.si_code = FPE_INTOVF;
538 } else if (ut->uu_subcode & (1 << FP_ZE)) {
539 sinfo64.si_code = FPE_FLTDIV;
540 } else if (ut->uu_subcode & (1 << FP_OE)) {
541 sinfo64.si_code = FPE_FLTOVF;
542 } else if (ut->uu_subcode & (1 << FP_UE)) {
543 sinfo64.si_code = FPE_FLTUND;
544 } else if (ut->uu_subcode & (1 << FP_PE)) {
545 sinfo64.si_code = FPE_FLTRES;
546 } else if (ut->uu_subcode & (1 << FP_IE)) {
547 sinfo64.si_code = FPE_FLTINV;
548 } else {
549 sinfo64.si_code = FPE_NOOP;
550 }
551 break;
552 case SIGBUS:
553 sinfo64.si_code = BUS_ADRERR;
554 sinfo64.si_addr = ua_cr2;
555 break;
556 case SIGTRAP:
557 sinfo64.si_code = TRAP_BRKPT;
558 break;
559 case SIGSEGV:
560 sinfo64.si_addr = ua_cr2;
561
562 switch (ut->uu_code) {
563 case EXC_I386_GPFLT:
564 /* CR2 is meaningless after GP fault */
565 /* XXX namespace clash! */
566 sinfo64.si_addr = 0ULL;
567 sinfo64.si_code = 0;
568 break;
569 case KERN_PROTECTION_FAILURE:
570 sinfo64.si_code = SEGV_ACCERR;
571 break;
572 case KERN_INVALID_ADDRESS:
573 sinfo64.si_code = SEGV_MAPERR;
574 break;
575 default:
576 sinfo64.si_code = FPE_NOOP;
577 }
578 break;
579 default:
580 {
581 int status_and_exitcode;
582
583 /*
584 * All other signals need to fill out a minimum set of
585 * information for the siginfo structure passed into
586 * the signal handler, if SA_SIGINFO was specified.
587 *
588 * p->si_status actually contains both the status and
589 * the exit code; we save it off in its own variable
590 * for later breakdown.
591 */
592 proc_lock(p);
593 sinfo64.si_pid = p->si_pid;
594 p->si_pid = 0;
595 status_and_exitcode = p->si_status;
596 p->si_status = 0;
597 sinfo64.si_uid = p->si_uid;
598 p->si_uid = 0;
599 sinfo64.si_code = p->si_code;
600 p->si_code = 0;
601 proc_unlock(p);
602 if (sinfo64.si_code == CLD_EXITED) {
603 if (WIFEXITED(status_and_exitcode)) {
604 sinfo64.si_code = CLD_EXITED;
605 } else if (WIFSIGNALED(status_and_exitcode)) {
606 if (WCOREDUMP(status_and_exitcode)) {
607 sinfo64.si_code = CLD_DUMPED;
608 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
609 } else {
610 sinfo64.si_code = CLD_KILLED;
611 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
612 }
613 }
614 }
615 /*
616 * The recorded status contains the exit code and the
617 * signal information, but the information to be passed
618 * in the siginfo to the handler is supposed to only
619 * contain the status, so we have to shift it out.
620 */
621 sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
622 p->p_xhighbits = 0;
623 break;
624 }
625 }
626 if (proc_is64bit(p)) {
627 user64_siginfo_t sinfo64_user64;
628
629 bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64));
630
631 siginfo_user_to_user64_x86(&sinfo64, &sinfo64_user64);
632
633 #if CONFIG_DTRACE
634 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
635
636 ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo;
637 ut->t_dtrace_siginfo.si_code = sinfo64.si_code;
638 ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid;
639 ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid;
640 ut->t_dtrace_siginfo.si_status = sinfo64.si_status;
641 /* XXX truncates faulting address to void * on K32 */
642 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr);
643
644 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
645 switch (sig) {
646 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
647 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
648 break;
649 default:
650 break;
651 }
652
653 /* XXX truncates catcher address to uintptr_t */
654 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
655 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
656 #endif /* CONFIG_DTRACE */
657
658 if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof(sinfo64_user64))) {
659 goto bad;
660 }
661
662 if (sig_xstate & STATE64_FULL) {
663 flavor = x86_THREAD_FULL_STATE64;
664 state_count = x86_THREAD_FULL_STATE64_COUNT;
665 } else {
666 flavor = x86_THREAD_STATE64;
667 state_count = x86_THREAD_STATE64_COUNT;
668 }
669 state = (void *)&mctxp->mctx_avx64.ss;
670 } else {
671 x86_thread_state32_t *tstate32;
672 user32_siginfo_t sinfo32;
673
674 bzero((caddr_t)&sinfo32, sizeof(sinfo32));
675
676 siginfo_user_to_user32_x86(&sinfo64, &sinfo32);
677
678 #if CONFIG_DTRACE
679 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
680
681 ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo;
682 ut->t_dtrace_siginfo.si_code = sinfo32.si_code;
683 ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid;
684 ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid;
685 ut->t_dtrace_siginfo.si_status = sinfo32.si_status;
686 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr);
687
688 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
689 switch (sig) {
690 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
691 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
692 break;
693 default:
694 break;
695 }
696
697 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
698 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
699 #endif /* CONFIG_DTRACE */
700
701 if (copyout((caddr_t)&sinfo32, ua_sip, sizeof(sinfo32))) {
702 goto bad;
703 }
704
705 tstate32 = &mctxp->mctx_avx32.ss;
706
707 tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact);
708 tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
709
710 tstate32->eflags = get_eflags_exportmask();
711
712 tstate32->cs = USER_CS;
713 tstate32->ss = USER_DS;
714 tstate32->ds = USER_DS;
715 tstate32->es = USER_DS;
716 tstate32->fs = NULL_SEG;
717 tstate32->gs = USER_CTHREAD;
718
719 flavor = x86_THREAD_STATE32;
720 state_count = x86_THREAD_STATE32_COUNT;
721 state = (void *)tstate32;
722 }
723 if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) {
724 goto bad;
725 }
726 ml_fp_setvalid(FALSE);
727
728 /* Tell the PAL layer about the signal */
729 pal_set_signal_delivery( thread );
730
731 proc_lock(p);
732
733 return;
734
735 bad:
736
737 proc_lock(p);
738 SIGACTION(p, SIGILL) = SIG_DFL;
739 sig = sigmask(SIGILL);
740 p->p_sigignore &= ~sig;
741 p->p_sigcatch &= ~sig;
742 ut->uu_sigmask &= ~sig;
743 /* sendsig is called with signal lock held */
744 proc_unlock(p);
745 psignal_locked(p, SIGILL);
746 proc_lock(p);
747 return;
748 }
749
750 /*
751 * System call to cleanup state after a signal
752 * has been taken. Reset signal mask and
753 * stack state from context left by sendsig (above).
754 * Return to previous pc and psl as specified by
755 * context left by sendsig. Check carefully to
756 * make sure that the user has not modified the
757 * psl to gain improper priviledges or to cause
758 * a machine fault.
759 */
760
761 int
762 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
763 {
764 union {
765 struct mcontext_avx32 mctx_avx32;
766 struct mcontext_avx64 mctx_avx64;
767 struct mcontext_avx64_full mctx_avx64_full;
768 struct mcontext_avx512_32 mctx_avx512_32;
769 struct mcontext_avx512_64 mctx_avx512_64;
770 struct mcontext_avx512_64_full mctx_avx512_64_full;
771 } mctx_store, *mctxp = &mctx_store;
772
773 thread_t thread = current_thread();
774 struct uthread * ut;
775 struct sigacts *ps = p->p_sigacts;
776 int error;
777 int onstack = 0;
778
779 mach_msg_type_number_t ts_count;
780 unsigned int ts_flavor;
781 void * ts;
782 mach_msg_type_number_t fs_count;
783 unsigned int fs_flavor;
784 void * fs;
785 int rval = EJUSTRETURN;
786 xstate_t sig_xstate;
787 uint32_t sigreturn_validation;
788 user_addr_t token_uctx;
789 kern_return_t kr;
790
791 ut = (struct uthread *)get_bsdthread_info(thread);
792
793 /*
794 * If we are being asked to change the altstack flag on the thread, we
795 * just set/reset it and return (the uap->uctx is not used).
796 */
797 if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) {
798 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
799 return 0;
800 } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) {
801 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
802 return 0;
803 }
804
805 bzero(mctxp, sizeof(*mctxp));
806
807 sig_xstate = current_xstate();
808
809 sigreturn_validation = atomic_load_explicit(
810 &ps->ps_sigreturn_validation, memory_order_relaxed);
811 token_uctx = uap->uctx;
812 kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx);
813 assert(kr == KERN_SUCCESS);
814
815 if (proc_is64bit(p)) {
816 struct user_ucontext64 uctx64;
817 user64_addr_t token;
818 int task_has_ldt = thread_task_has_ldt(thread);
819
820 if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof(uctx64)))) {
821 return error;
822 }
823
824 onstack = uctx64.uc_onstack & 01;
825 ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask;
826
827 if (task_has_ldt) {
828 ts_flavor = x86_THREAD_FULL_STATE64;
829 ts_count = x86_THREAD_FULL_STATE64_COUNT;
830 fs = (void *)&mctxp->mctx_avx64_full.fs;
831 sig_xstate |= STATE64_FULL;
832 } else {
833 ts_flavor = x86_THREAD_STATE64;
834 ts_count = x86_THREAD_STATE64_COUNT;
835 fs = (void *)&mctxp->mctx_avx64.fs;
836 }
837
838 if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) {
839 return error;
840 }
841
842 ts = (void *)&mctxp->mctx_avx64.ss;
843
844 fs_flavor = thread_state64[sig_xstate].flavor;
845 fs_count = thread_state64[sig_xstate].state_count;
846
847 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
848 if ((user64_addr_t)uap->token != token) {
849 #if DEVELOPMENT || DEBUG
850 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
851 p->p_comm, p->p_pid, (user64_addr_t)uap->token, token);
852 #endif /* DEVELOPMENT || DEBUG */
853 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
854 rval = EINVAL;
855 }
856 }
857 } else {
858 struct user_ucontext32 uctx32;
859 user32_addr_t token;
860
861 if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof(uctx32)))) {
862 return error;
863 }
864
865 if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) {
866 return error;
867 }
868
869 onstack = uctx32.uc_onstack & 01;
870 ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask;
871
872 ts_flavor = x86_THREAD_STATE32;
873 ts_count = x86_THREAD_STATE32_COUNT;
874 ts = (void *)&mctxp->mctx_avx32.ss;
875
876 fs_flavor = thread_state32[sig_xstate].flavor;
877 fs_count = thread_state32[sig_xstate].state_count;
878 fs = (void *)&mctxp->mctx_avx32.fs;
879
880 token = CAST_DOWN_EXPLICIT(user32_addr_t, uap->uctx) ^
881 CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token);
882 if ((user32_addr_t)uap->token != token) {
883 #if DEVELOPMENT || DEBUG
884 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
885 p->p_comm, p->p_pid, (user32_addr_t)uap->token, token);
886 #endif /* DEVELOPMENT || DEBUG */
887 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
888 rval = EINVAL;
889 }
890 }
891 }
892
893 if (onstack) {
894 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
895 } else {
896 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
897 }
898
899 if (ut->uu_siglist & ~ut->uu_sigmask) {
900 signal_setast(thread);
901 }
902
903 if (rval == EINVAL) {
904 goto error_ret;
905 }
906
907 /*
908 * thread_set_state() does all the needed checks for the passed in
909 * content
910 */
911 if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) {
912 rval = EINVAL;
913 #if DEVELOPMENT || DEBUG
914 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
915 p->p_comm, p->p_pid, rval);
916 #endif /* DEVELOPMENT || DEBUG */
917 goto error_ret;
918 }
919
920 ml_fp_setvalid(TRUE);
921
922 if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) {
923 rval = EINVAL;
924 #if DEVELOPMENT || DEBUG
925 printf("process %s[%d] sigreturn thread_setstatus error %d\n",
926 p->p_comm, p->p_pid, rval);
927 #endif /* DEVELOPMENT || DEBUG */
928 goto error_ret;
929 }
930 error_ret:
931 return rval;
932 }
933
934
935 /*
936 * machine_exception() performs machine-dependent translation
937 * of a mach exception to a unix signal.
938 */
939 int
940 machine_exception(int exception,
941 mach_exception_code_t code,
942 __unused mach_exception_subcode_t subcode)
943 {
944 switch (exception) {
945 case EXC_BAD_ACCESS:
946 /* Map GP fault to SIGSEGV, otherwise defer to caller */
947 if (code == EXC_I386_GPFLT) {
948 return SIGSEGV;
949 }
950 break;
951
952 case EXC_BAD_INSTRUCTION:
953 return SIGILL;
954
955 case EXC_ARITHMETIC:
956 return SIGFPE;
957
958 case EXC_SOFTWARE:
959 if (code == EXC_I386_BOUND) {
960 /*
961 * Map #BR, the Bound Range Exceeded exception, to
962 * SIGTRAP.
963 */
964 return SIGTRAP;
965 }
966 break;
967 }
968
969 return 0;
970 }