]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/i386/unix_signal.c
4292d6515f660fb138d39f7bdb0130b1f337fe2d
[apple/xnu.git] / bsd / dev / i386 / unix_signal.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1992 NeXT, Inc.
30 *
31 * HISTORY
32 * 13 May 1992 ? at NeXT
33 * Created.
34 */
35
36 #include <mach/mach_types.h>
37 #include <mach/exception.h>
38
39 #include <kern/thread.h>
40
41 #include <sys/systm.h>
42 #include <sys/param.h>
43 #include <sys/proc_internal.h>
44 #include <sys/user.h>
45 #include <sys/sysproto.h>
46 #include <sys/sysent.h>
47 #include <sys/ucontext.h>
48 #include <sys/wait.h>
49 #include <mach/thread_act.h> /* for thread_abort_safely */
50 #include <mach/thread_status.h>
51
52 #include <i386/eflags.h>
53 #include <i386/psl.h>
54 #include <i386/machine_routines.h>
55 #include <i386/seg.h>
56
57 #include <machine/pal_routines.h>
58
59 #include <sys/kdebug.h>
60 #include <sys/sdt.h>
61
62
63 /* Forward: */
64 extern boolean_t machine_exception(int, mach_exception_code_t,
65 mach_exception_subcode_t, int *, mach_exception_subcode_t *);
66 extern kern_return_t thread_getstatus(register thread_t act, int flavor,
67 thread_state_t tstate, mach_msg_type_number_t *count);
68 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
69 thread_state_t tstate, mach_msg_type_number_t count);
70
71 /* Signal handler flavors supported */
72 /* These defns should match the Libc implmn */
73 #define UC_TRAD 1
74 #define UC_FLAVOR 30
75 #define UC_SET_ALT_STACK 0x40000000
76 #define UC_RESET_ALT_STACK 0x80000000
77
78 #define C_32_STK_ALIGN 16
79 #define C_64_STK_ALIGN 16
80 #define C_64_REDZONE_LEN 128
81 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
82 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
83
84 /*
85 * Send an interrupt to process.
86 *
87 * Stack is set up to allow sigcode stored
88 * in u. to call routine, followed by chmk
89 * to sigreturn routine below. After sigreturn
90 * resets the signal mask, the stack, the frame
91 * pointer, and the argument pointer, it returns
92 * to the user specified pc, psl.
93 */
94 struct sigframe32 {
95 int retaddr;
96 user32_addr_t catcher; /* sig_t */
97 int sigstyle;
98 int sig;
99 user32_addr_t sinfo; /* siginfo32_t* */
100 user32_addr_t uctx; /* struct ucontext32 */
101 };
102
103 /*
104 * NOTE: Source and target may *NOT* overlap!
105 */
106 static void
107 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
108 {
109 out->si_signo = in->si_signo;
110 out->si_errno = in->si_errno;
111 out->si_code = in->si_code;
112 out->si_pid = in->si_pid;
113 out->si_uid = in->si_uid;
114 out->si_status = in->si_status;
115 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr);
116 /* following cast works for sival_int because of padding */
117 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr);
118 out->si_band = in->si_band; /* range reduction */
119 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
120 }
121
122 static void
123 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
124 {
125 out->si_signo = in->si_signo;
126 out->si_errno = in->si_errno;
127 out->si_code = in->si_code;
128 out->si_pid = in->si_pid;
129 out->si_uid = in->si_uid;
130 out->si_status = in->si_status;
131 out->si_addr = in->si_addr;
132 out->si_value.sival_ptr = in->si_value.sival_ptr;
133 out->si_band = in->si_band; /* range reduction */
134 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
135 }
136
137 void
138 sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code)
139 {
140 union {
141 struct mcontext_avx32 mctx_avx32;
142 struct mcontext_avx64 mctx_avx64;
143 } mctx_store, *mctxp = &mctx_store;
144
145 user_addr_t ua_sp;
146 user_addr_t ua_fp;
147 user_addr_t ua_cr2;
148 user_addr_t ua_sip;
149 user_addr_t ua_uctxp;
150 user_addr_t ua_mctxp;
151 user_siginfo_t sinfo64;
152
153 struct sigacts *ps = p->p_sigacts;
154 int oonstack, flavor;
155 user_addr_t trampact;
156 int sigonstack;
157 void * state;
158 mach_msg_type_number_t state_count;
159
160 thread_t thread;
161 struct uthread * ut;
162 int stack_size = 0;
163 int infostyle = UC_TRAD;
164 boolean_t sig_avx;
165
166 thread = current_thread();
167 ut = get_bsdthread_info(thread);
168
169 if (p->p_sigacts->ps_siginfo & sigmask(sig))
170 infostyle = UC_FLAVOR;
171
172 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
173 trampact = ps->ps_trampact[sig];
174 sigonstack = (ps->ps_sigonstack & sigmask(sig));
175
176 /*
177 * init siginfo
178 */
179 proc_unlock(p);
180
181 bzero((caddr_t)&sinfo64, sizeof(sinfo64));
182 sinfo64.si_signo = sig;
183
184 bzero(mctxp, sizeof(*mctxp));
185 sig_avx = ml_fpu_avx_enabled();
186
187 if (proc_is64bit(p)) {
188 x86_thread_state64_t *tstate64;
189 struct user_ucontext64 uctx64;
190
191 flavor = x86_THREAD_STATE64;
192 state_count = x86_THREAD_STATE64_COUNT;
193 state = (void *)&mctxp->mctx_avx64.ss;
194 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
195 goto bad;
196
197 if (sig_avx) {
198 flavor = x86_AVX_STATE64;
199 state_count = x86_AVX_STATE64_COUNT;
200 }
201 else {
202 flavor = x86_FLOAT_STATE64;
203 state_count = x86_FLOAT_STATE64_COUNT;
204 }
205 state = (void *)&mctxp->mctx_avx64.fs;
206 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
207 goto bad;
208
209 flavor = x86_EXCEPTION_STATE64;
210 state_count = x86_EXCEPTION_STATE64_COUNT;
211 state = (void *)&mctxp->mctx_avx64.es;
212 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
213 goto bad;
214
215 tstate64 = &mctxp->mctx_avx64.ss;
216
217 /* figure out where our new stack lives */
218 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
219 (sigonstack)) {
220 ua_sp = ut->uu_sigstk.ss_sp;
221 stack_size = ut->uu_sigstk.ss_size;
222 ua_sp += stack_size;
223 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
224 } else {
225 ua_sp = tstate64->rsp;
226 }
227 ua_cr2 = mctxp->mctx_avx64.es.faultvaddr;
228
229 /* The x86_64 ABI defines a 128-byte red zone. */
230 ua_sp -= C_64_REDZONE_LEN;
231
232 ua_sp -= sizeof (struct user_ucontext64);
233 ua_uctxp = ua_sp; // someone tramples the first word!
234
235 ua_sp -= sizeof (user64_siginfo_t);
236 ua_sip = ua_sp;
237
238 ua_sp -= sizeof (struct mcontext_avx64);
239 ua_mctxp = ua_sp;
240
241 /*
242 * Align the frame and stack pointers to 16 bytes for SSE.
243 * (Note that we use 'ua_fp' as the base of the stack going forward)
244 */
245 ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN);
246
247 /*
248 * But we need to account for the return address so the alignment is
249 * truly "correct" at _sigtramp
250 */
251 ua_fp -= sizeof(user_addr_t);
252
253 /*
254 * Build the signal context to be used by sigreturn.
255 */
256 bzero(&uctx64, sizeof(uctx64));
257
258 uctx64.uc_onstack = oonstack;
259 uctx64.uc_sigmask = mask;
260 uctx64.uc_stack.ss_sp = ua_fp;
261 uctx64.uc_stack.ss_size = stack_size;
262
263 if (oonstack)
264 uctx64.uc_stack.ss_flags |= SS_ONSTACK;
265 uctx64.uc_link = 0;
266
267 uctx64.uc_mcsize = sig_avx ? sizeof(struct mcontext_avx64) : sizeof(struct mcontext64);
268 uctx64.uc_mcontext64 = ua_mctxp;
269
270 if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64)))
271 goto bad;
272
273 if (copyout((caddr_t)&mctxp->mctx_avx64, ua_mctxp, sizeof (struct mcontext_avx64)))
274 goto bad;
275
276 sinfo64.pad[0] = tstate64->rsp;
277 sinfo64.si_addr = tstate64->rip;
278
279 tstate64->rip = trampact;
280 tstate64->rsp = ua_fp;
281 tstate64->rflags = get_eflags_exportmask();
282 /*
283 * JOE - might not need to set these
284 */
285 tstate64->cs = USER64_CS;
286 tstate64->fs = NULL_SEG;
287 tstate64->gs = USER_CTHREAD;
288
289 /*
290 * Build the argument list for the signal handler.
291 * Handler should call sigreturn to get out of it
292 */
293 tstate64->rdi = ua_catcher;
294 tstate64->rsi = infostyle;
295 tstate64->rdx = sig;
296 tstate64->rcx = ua_sip;
297 tstate64->r8 = ua_uctxp;
298
299 } else {
300 x86_thread_state32_t *tstate32;
301 struct user_ucontext32 uctx32;
302 struct sigframe32 frame32;
303
304 flavor = x86_THREAD_STATE32;
305 state_count = x86_THREAD_STATE32_COUNT;
306 state = (void *)&mctxp->mctx_avx32.ss;
307 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
308 goto bad;
309
310 if (sig_avx) {
311 flavor = x86_AVX_STATE32;
312 state_count = x86_AVX_STATE32_COUNT;
313 }
314 else {
315 flavor = x86_FLOAT_STATE32;
316 state_count = x86_FLOAT_STATE32_COUNT;
317 }
318
319 state = (void *)&mctxp->mctx_avx32.fs;
320 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
321 goto bad;
322
323 flavor = x86_EXCEPTION_STATE32;
324 state_count = x86_EXCEPTION_STATE32_COUNT;
325 state = (void *)&mctxp->mctx_avx32.es;
326 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
327 goto bad;
328
329 tstate32 = &mctxp->mctx_avx32.ss;
330
331 /* figure out where our new stack lives */
332 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
333 (sigonstack)) {
334 ua_sp = ut->uu_sigstk.ss_sp;
335 stack_size = ut->uu_sigstk.ss_size;
336 ua_sp += stack_size;
337 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
338 } else {
339 ua_sp = tstate32->esp;
340 }
341 ua_cr2 = mctxp->mctx_avx32.es.faultvaddr;
342
343 ua_sp -= sizeof (struct user_ucontext32);
344 ua_uctxp = ua_sp; // someone tramples the first word!
345
346 ua_sp -= sizeof (user32_siginfo_t);
347 ua_sip = ua_sp;
348
349 ua_sp -= sizeof (struct mcontext_avx32);
350 ua_mctxp = ua_sp;
351
352 ua_sp -= sizeof (struct sigframe32);
353 ua_fp = ua_sp;
354
355 /*
356 * Align the frame and stack pointers to 16 bytes for SSE.
357 * (Note that we use 'fp' as the base of the stack going forward)
358 */
359 ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN);
360
361 /*
362 * But we need to account for the return address so the alignment is
363 * truly "correct" at _sigtramp
364 */
365 ua_fp -= sizeof(frame32.retaddr);
366
367 /*
368 * Build the argument list for the signal handler.
369 * Handler should call sigreturn to get out of it
370 */
371 frame32.retaddr = -1;
372 frame32.sigstyle = infostyle;
373 frame32.sig = sig;
374 frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher);
375 frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip);
376 frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp);
377
378 if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32)))
379 goto bad;
380
381 /*
382 * Build the signal context to be used by sigreturn.
383 */
384 bzero(&uctx32, sizeof(uctx32));
385
386 uctx32.uc_onstack = oonstack;
387 uctx32.uc_sigmask = mask;
388 uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
389 uctx32.uc_stack.ss_size = stack_size;
390
391 if (oonstack)
392 uctx32.uc_stack.ss_flags |= SS_ONSTACK;
393 uctx32.uc_link = 0;
394
395 uctx32.uc_mcsize = sig_avx ? sizeof(struct mcontext_avx32) : sizeof(struct mcontext32);
396
397 uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp);
398
399 if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32)))
400 goto bad;
401
402 if (copyout((caddr_t)&mctxp->mctx_avx32, ua_mctxp, sizeof (struct mcontext_avx32)))
403 goto bad;
404
405 sinfo64.pad[0] = tstate32->esp;
406 sinfo64.si_addr = tstate32->eip;
407 }
408
409 switch (sig) {
410 case SIGILL:
411 switch (ut->uu_code) {
412 case EXC_I386_INVOP:
413 sinfo64.si_code = ILL_ILLOPC;
414 break;
415 default:
416 sinfo64.si_code = ILL_NOOP;
417 }
418 break;
419 case SIGFPE:
420 #define FP_IE 0 /* Invalid operation */
421 #define FP_DE 1 /* Denormalized operand */
422 #define FP_ZE 2 /* Zero divide */
423 #define FP_OE 3 /* overflow */
424 #define FP_UE 4 /* underflow */
425 #define FP_PE 5 /* precision */
426 if (ut->uu_code == EXC_I386_DIV) {
427 sinfo64.si_code = FPE_INTDIV;
428 }
429 else if (ut->uu_code == EXC_I386_INTO) {
430 sinfo64.si_code = FPE_INTOVF;
431 }
432 else if (ut->uu_subcode & (1 << FP_ZE)) {
433 sinfo64.si_code = FPE_FLTDIV;
434 } else if (ut->uu_subcode & (1 << FP_OE)) {
435 sinfo64.si_code = FPE_FLTOVF;
436 } else if (ut->uu_subcode & (1 << FP_UE)) {
437 sinfo64.si_code = FPE_FLTUND;
438 } else if (ut->uu_subcode & (1 << FP_PE)) {
439 sinfo64.si_code = FPE_FLTRES;
440 } else if (ut->uu_subcode & (1 << FP_IE)) {
441 sinfo64.si_code = FPE_FLTINV;
442 } else {
443 sinfo64.si_code = FPE_NOOP;
444 }
445 break;
446 case SIGBUS:
447 sinfo64.si_code = BUS_ADRERR;
448 sinfo64.si_addr = ua_cr2;
449 break;
450 case SIGTRAP:
451 sinfo64.si_code = TRAP_BRKPT;
452 break;
453 case SIGSEGV:
454 sinfo64.si_addr = ua_cr2;
455
456 switch (ut->uu_code) {
457 case EXC_I386_GPFLT:
458 /* CR2 is meaningless after GP fault */
459 /* XXX namespace clash! */
460 sinfo64.si_addr = 0ULL;
461 sinfo64.si_code = 0;
462 break;
463 case KERN_PROTECTION_FAILURE:
464 sinfo64.si_code = SEGV_ACCERR;
465 break;
466 case KERN_INVALID_ADDRESS:
467 sinfo64.si_code = SEGV_MAPERR;
468 break;
469 default:
470 sinfo64.si_code = FPE_NOOP;
471 }
472 break;
473 default:
474 {
475 int status_and_exitcode;
476
477 /*
478 * All other signals need to fill out a minimum set of
479 * information for the siginfo structure passed into
480 * the signal handler, if SA_SIGINFO was specified.
481 *
482 * p->si_status actually contains both the status and
483 * the exit code; we save it off in its own variable
484 * for later breakdown.
485 */
486 proc_lock(p);
487 sinfo64.si_pid = p->si_pid;
488 p->si_pid =0;
489 status_and_exitcode = p->si_status;
490 p->si_status = 0;
491 sinfo64.si_uid = p->si_uid;
492 p->si_uid =0;
493 sinfo64.si_code = p->si_code;
494 p->si_code = 0;
495 proc_unlock(p);
496 if (sinfo64.si_code == CLD_EXITED) {
497 if (WIFEXITED(status_and_exitcode))
498 sinfo64.si_code = CLD_EXITED;
499 else if (WIFSIGNALED(status_and_exitcode)) {
500 if (WCOREDUMP(status_and_exitcode)) {
501 sinfo64.si_code = CLD_DUMPED;
502 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
503 } else {
504 sinfo64.si_code = CLD_KILLED;
505 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
506 }
507 }
508 }
509 /*
510 * The recorded status contains the exit code and the
511 * signal information, but the information to be passed
512 * in the siginfo to the handler is supposed to only
513 * contain the status, so we have to shift it out.
514 */
515 sinfo64.si_status = WEXITSTATUS(status_and_exitcode);
516 break;
517 }
518 }
519 if (proc_is64bit(p)) {
520 user64_siginfo_t sinfo64_user64;
521
522 bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64));
523
524 siginfo_user_to_user64(&sinfo64,&sinfo64_user64);
525
526 #if CONFIG_DTRACE
527 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
528
529 ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo;
530 ut->t_dtrace_siginfo.si_code = sinfo64.si_code;
531 ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid;
532 ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid;
533 ut->t_dtrace_siginfo.si_status = sinfo64.si_status;
534 /* XXX truncates faulting address to void * on K32 */
535 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr);
536
537 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
538 switch (sig) {
539 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
540 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
541 break;
542 default:
543 break;
544 }
545
546 /* XXX truncates catcher address to uintptr_t */
547 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
548 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
549 #endif /* CONFIG_DTRACE */
550
551 if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof (sinfo64_user64)))
552 goto bad;
553
554 flavor = x86_THREAD_STATE64;
555 state_count = x86_THREAD_STATE64_COUNT;
556 state = (void *)&mctxp->mctx_avx64.ss;
557 } else {
558 x86_thread_state32_t *tstate32;
559 user32_siginfo_t sinfo32;
560
561 bzero((caddr_t)&sinfo32, sizeof(sinfo32));
562
563 siginfo_user_to_user32(&sinfo64,&sinfo32);
564
565 #if CONFIG_DTRACE
566 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
567
568 ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo;
569 ut->t_dtrace_siginfo.si_code = sinfo32.si_code;
570 ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid;
571 ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid;
572 ut->t_dtrace_siginfo.si_status = sinfo32.si_status;
573 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr);
574
575 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
576 switch (sig) {
577 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
578 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
579 break;
580 default:
581 break;
582 }
583
584 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
585 void (*)(void), CAST_DOWN(sig_t, ua_catcher));
586 #endif /* CONFIG_DTRACE */
587
588 if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32)))
589 goto bad;
590
591 tstate32 = &mctxp->mctx_avx32.ss;
592
593 tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact);
594 tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
595
596 tstate32->eflags = get_eflags_exportmask();
597
598 tstate32->cs = USER_CS;
599 tstate32->ss = USER_DS;
600 tstate32->ds = USER_DS;
601 tstate32->es = USER_DS;
602 tstate32->fs = NULL_SEG;
603 tstate32->gs = USER_CTHREAD;
604
605 flavor = x86_THREAD_STATE32;
606 state_count = x86_THREAD_STATE32_COUNT;
607 state = (void *)tstate32;
608 }
609 if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS)
610 goto bad;
611 ml_fp_setvalid(FALSE);
612
613 /* Tell the PAL layer about the signal */
614 pal_set_signal_delivery( thread );
615
616 proc_lock(p);
617
618 return;
619
620 bad:
621
622 proc_lock(p);
623 SIGACTION(p, SIGILL) = SIG_DFL;
624 sig = sigmask(SIGILL);
625 p->p_sigignore &= ~sig;
626 p->p_sigcatch &= ~sig;
627 ut->uu_sigmask &= ~sig;
628 /* sendsig is called with signal lock held */
629 proc_unlock(p);
630 psignal_locked(p, SIGILL);
631 proc_lock(p);
632 return;
633 }
634
635 /*
636 * System call to cleanup state after a signal
637 * has been taken. Reset signal mask and
638 * stack state from context left by sendsig (above).
639 * Return to previous pc and psl as specified by
640 * context left by sendsig. Check carefully to
641 * make sure that the user has not modified the
642 * psl to gain improper priviledges or to cause
643 * a machine fault.
644 */
645
646 int
647 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
648 {
649 union {
650 struct mcontext_avx32 mctx_avx32;
651 struct mcontext_avx64 mctx_avx64;
652 } mctx_store, *mctxp = &mctx_store;
653
654 thread_t thread = current_thread();
655 struct uthread * ut;
656 int error;
657 int onstack = 0;
658
659 mach_msg_type_number_t ts_count;
660 unsigned int ts_flavor;
661 void * ts;
662 mach_msg_type_number_t fs_count;
663 unsigned int fs_flavor;
664 void * fs;
665 int rval = EJUSTRETURN;
666 boolean_t sig_avx;
667
668 ut = (struct uthread *)get_bsdthread_info(thread);
669
670 /*
671 * If we are being asked to change the altstack flag on the thread, we
672 * just set/reset it and return (the uap->uctx is not used).
673 */
674 if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) {
675 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
676 return (0);
677 } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) {
678 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
679 return (0);
680 }
681
682 bzero(mctxp, sizeof(*mctxp));
683 sig_avx = ml_fpu_avx_enabled();
684
685 if (proc_is64bit(p)) {
686 struct user_ucontext64 uctx64;
687
688 if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof (uctx64))))
689 return(error);
690
691 if ((error = copyin(uctx64.uc_mcontext64, (void *)&mctxp->mctx_avx64, sizeof (struct mcontext_avx64))))
692 return(error);
693
694 onstack = uctx64.uc_onstack & 01;
695 ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask;
696
697 ts_flavor = x86_THREAD_STATE64;
698 ts_count = x86_THREAD_STATE64_COUNT;
699 ts = (void *)&mctxp->mctx_avx64.ss;
700
701 if (sig_avx) {
702 fs_flavor = x86_AVX_STATE64;
703 fs_count = x86_AVX_STATE64_COUNT;
704 }
705 else {
706 fs_flavor = x86_FLOAT_STATE64;
707 fs_count = x86_FLOAT_STATE64_COUNT;
708 }
709
710 fs = (void *)&mctxp->mctx_avx64.fs;
711
712 } else {
713 struct user_ucontext32 uctx32;
714
715 if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof (uctx32))))
716 return(error);
717
718 if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)&mctxp->mctx_avx32, sizeof (struct mcontext_avx32))))
719 return(error);
720
721 onstack = uctx32.uc_onstack & 01;
722 ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask;
723
724 ts_flavor = x86_THREAD_STATE32;
725 ts_count = x86_THREAD_STATE32_COUNT;
726 ts = (void *)&mctxp->mctx_avx32.ss;
727
728 if (sig_avx) {
729 fs_flavor = x86_AVX_STATE32;
730 fs_count = x86_AVX_STATE32_COUNT;
731 }
732 else {
733 fs_flavor = x86_FLOAT_STATE32;
734 fs_count = x86_FLOAT_STATE32_COUNT;
735 }
736
737 fs = (void *)&mctxp->mctx_avx32.fs;
738 }
739
740 if (onstack)
741 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
742 else
743 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
744
745 if (ut->uu_siglist & ~ut->uu_sigmask)
746 signal_setast(thread);
747 /*
748 * thread_set_state() does all the needed checks for the passed in
749 * content
750 */
751 if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) {
752 rval = EINVAL;
753 goto error_ret;
754 }
755
756 ml_fp_setvalid(TRUE);
757
758 if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) {
759 rval = EINVAL;
760 goto error_ret;
761
762 }
763 error_ret:
764 return rval;
765 }
766
767
768 /*
769 * machine_exception() performs MD translation
770 * of a mach exception to a unix signal and code.
771 */
772
773 boolean_t
774 machine_exception(
775 int exception,
776 mach_exception_code_t code,
777 __unused mach_exception_subcode_t subcode,
778 int *unix_signal,
779 mach_exception_code_t *unix_code)
780 {
781
782 switch(exception) {
783
784 case EXC_BAD_ACCESS:
785 /* Map GP fault to SIGSEGV, otherwise defer to caller */
786 if (code == EXC_I386_GPFLT) {
787 *unix_signal = SIGSEGV;
788 *unix_code = code;
789 break;
790 }
791 return(FALSE);
792
793 case EXC_BAD_INSTRUCTION:
794 *unix_signal = SIGILL;
795 *unix_code = code;
796 break;
797
798 case EXC_ARITHMETIC:
799 *unix_signal = SIGFPE;
800 *unix_code = code;
801 break;
802
803 case EXC_SOFTWARE:
804 if (code == EXC_I386_BOUND) {
805 /*
806 * Map #BR, the Bound Range Exceeded exception, to
807 * SIGTRAP.
808 */
809 *unix_signal = SIGTRAP;
810 *unix_code = code;
811 break;
812 }
813
814 default:
815 return(FALSE);
816 }
817
818 return(TRUE);
819 }
820