2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1992 NeXT, Inc.
26 * 13 May 1992 ? at NeXT
30 #include <mach/mach_types.h>
31 #include <mach/exception.h>
33 #include <kern/thread.h>
35 #include <sys/systm.h>
36 #include <sys/param.h>
37 #include <sys/proc_internal.h>
39 #include <sys/sysproto.h>
40 #include <sys/sysent.h>
41 #include <sys/ucontext.h>
43 #include <mach/thread_act.h> /* for thread_abort_safely */
44 #include <mach/thread_status.h>
45 #include <i386/machine_routines.h>
47 #include <i386/eflags.h>
51 #include <sys/kdebug.h>
54 extern boolean_t
machine_exception(int, int, int, int *, int *);
55 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
56 thread_state_t tstate
, mach_msg_type_number_t
*count
);
57 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
58 thread_state_t tstate
, mach_msg_type_number_t count
);
60 /* Signal handler flavors supported */
61 /* These defns should match the Libc implmn */
65 #define C_32_STK_ALIGN 16
66 #define C_64_STK_ALIGN 16
67 #define C_64_REDZONE_LEN 128
68 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
69 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
72 * Send an interrupt to process.
74 * Stack is set up to allow sigcode stored
75 * in u. to call routine, followed by chmk
76 * to sigreturn routine below. After sigreturn
77 * resets the signal mask, the stack, the frame
78 * pointer, and the argument pointer, it returns
79 * to the user specified pc, psl.
87 struct ucontext
* uctx
;
93 sendsig(struct proc
*p
, user_addr_t ua_catcher
, int sig
, int mask
, __unused u_long code
)
96 struct mcontext32 mctx32
;
97 struct mcontext64 mctx64
;
103 user_addr_t ua_uctxp
;
104 user_addr_t ua_mctxp
;
105 user_siginfo_t sinfo64
;
107 struct sigacts
*ps
= p
->p_sigacts
;
108 int oonstack
, flavor
;
110 mach_msg_type_number_t state_count
;
111 int uthsigaltstack
= 0;
114 thread_t thread
= current_thread();
117 int infostyle
= UC_TRAD
;
119 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
))
120 infostyle
= UC_FLAVOR
;
122 ut
= get_bsdthread_info(thread
);
124 uthsigaltstack
= p
->p_lflag
& P_LTHSIGSTACK
;
126 if (uthsigaltstack
!= 0 ) {
127 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
128 altstack
= ut
->uu_flag
& UT_ALTSTACK
;
130 oonstack
= ps
->ps_sigstk
.ss_flags
& SA_ONSTACK
;
131 altstack
= ps
->ps_flags
& SAS_ALTSTACK
;
136 bzero((caddr_t
)&sinfo64
, sizeof(user_siginfo_t
));
137 sinfo64
.si_signo
= sig
;
139 if (proc_is64bit(p
)) {
140 x86_thread_state64_t
*tstate64
;
141 struct user_ucontext64 uctx64
;
143 flavor
= x86_THREAD_STATE64
;
144 state_count
= x86_THREAD_STATE64_COUNT
;
145 state
= (void *)&mctx
.mctx64
.ss
;
146 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
149 flavor
= x86_FLOAT_STATE64
;
150 state_count
= x86_FLOAT_STATE64_COUNT
;
151 state
= (void *)&mctx
.mctx64
.fs
;
152 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
155 flavor
= x86_EXCEPTION_STATE64
;
156 state_count
= x86_EXCEPTION_STATE64_COUNT
;
157 state
= (void *)&mctx
.mctx64
.es
;
158 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
161 tstate64
= &mctx
.mctx64
.ss
;
163 if (altstack
&& !oonstack
&& (ps
->ps_sigonstack
& sigmask(sig
))) {
164 if (uthsigaltstack
!= 0) {
165 ua_sp
= ut
->uu_sigstk
.ss_sp
;
166 stack_size
= ut
->uu_sigstk
.ss_size
;
168 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
170 ua_sp
= ps
->ps_sigstk
.ss_sp
;
171 stack_size
= ps
->ps_sigstk
.ss_size
;
173 ps
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
176 ua_sp
= tstate64
->rsp
;
177 ua_cr2
= mctx
.mctx64
.es
.faultvaddr
;
179 /* The x86_64 ABI defines a 128-byte red zone. */
180 ua_sp
-= C_64_REDZONE_LEN
;
182 ua_sp
-= sizeof (struct user_ucontext64
);
183 ua_uctxp
= ua_sp
; // someone tramples the first word!
185 ua_sp
-= sizeof (user_siginfo_t
);
188 ua_sp
-= sizeof (struct mcontext64
);
192 * Align the frame and stack pointers to 16 bytes for SSE.
193 * (Note that we use 'ua_fp' as the base of the stack going forward)
195 ua_fp
= TRUNC_DOWN64(ua_sp
, C_64_STK_ALIGN
);
198 * But we need to account for the return address so the alignment is
199 * truly "correct" at _sigtramp
201 ua_fp
-= sizeof(user_addr_t
);
204 * Build the signal context to be used by sigreturn.
206 bzero(&uctx64
, sizeof(uctx64
));
208 uctx64
.uc_onstack
= oonstack
;
209 uctx64
.uc_sigmask
= mask
;
210 uctx64
.uc_stack
.ss_sp
= ua_fp
;
211 uctx64
.uc_stack
.ss_size
= stack_size
;
214 uctx64
.uc_stack
.ss_flags
|= SS_ONSTACK
;
217 uctx64
.uc_mcsize
= sizeof(struct mcontext64
);
218 uctx64
.uc_mcontext64
= ua_mctxp
;
220 if (copyout((caddr_t
)&uctx64
, ua_uctxp
, sizeof (uctx64
)))
223 if (copyout((caddr_t
)&mctx
.mctx64
, ua_mctxp
, sizeof (struct mcontext64
)))
226 sinfo64
.pad
[0] = tstate64
->rsp
;
227 sinfo64
.si_addr
= tstate64
->rip
;
229 tstate64
->rip
= ps
->ps_trampact
[sig
];
230 tstate64
->rsp
= ua_fp
;
231 tstate64
->rflags
= get_eflags_exportmask();
233 * JOE - might not need to set these
235 tstate64
->cs
= USER64_CS
;
236 tstate64
->fs
= NULL_SEG
;
237 tstate64
->gs
= USER_CTHREAD
;
240 * Build the argument list for the signal handler.
241 * Handler should call sigreturn to get out of it
243 tstate64
->rdi
= ua_catcher
;
244 tstate64
->rsi
= infostyle
;
246 tstate64
->rcx
= ua_sip
;
247 tstate64
->r8
= ua_uctxp
;
250 x86_thread_state32_t
*tstate32
;
251 struct ucontext uctx32
;
252 struct sigframe32 frame32
;
254 flavor
= x86_THREAD_STATE32
;
255 state_count
= x86_THREAD_STATE32_COUNT
;
256 state
= (void *)&mctx
.mctx32
.ss
;
257 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
260 flavor
= x86_FLOAT_STATE32
;
261 state_count
= x86_FLOAT_STATE32_COUNT
;
262 state
= (void *)&mctx
.mctx32
.fs
;
263 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
266 flavor
= x86_EXCEPTION_STATE32
;
267 state_count
= x86_EXCEPTION_STATE32_COUNT
;
268 state
= (void *)&mctx
.mctx32
.es
;
269 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
272 tstate32
= &mctx
.mctx32
.ss
;
274 if (altstack
&& !oonstack
&& (ps
->ps_sigonstack
& sigmask(sig
))) {
275 if (uthsigaltstack
!= 0) {
276 ua_sp
= ut
->uu_sigstk
.ss_sp
;
277 stack_size
= ut
->uu_sigstk
.ss_size
;
279 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
281 ua_sp
= ps
->ps_sigstk
.ss_sp
;
282 stack_size
= ps
->ps_sigstk
.ss_size
;
284 ps
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
287 ua_sp
= tstate32
->esp
;
288 ua_cr2
= mctx
.mctx32
.es
.faultvaddr
;
290 ua_sp
-= sizeof (struct ucontext
);
291 ua_uctxp
= ua_sp
; // someone tramples the first word!
293 ua_sp
-= sizeof (siginfo_t
);
296 ua_sp
-= sizeof (struct mcontext32
);
299 ua_sp
-= sizeof (struct sigframe32
);
303 * Align the frame and stack pointers to 16 bytes for SSE.
304 * (Note that we use 'fp' as the base of the stack going forward)
306 ua_fp
= TRUNC_DOWN32(ua_fp
, C_32_STK_ALIGN
);
309 * But we need to account for the return address so the alignment is
310 * truly "correct" at _sigtramp
312 ua_fp
-= sizeof(frame32
.retaddr
);
315 * Build the argument list for the signal handler.
316 * Handler should call sigreturn to get out of it
318 frame32
.retaddr
= -1;
319 frame32
.sigstyle
= infostyle
;
321 frame32
.catcher
= CAST_DOWN(sig_t
, ua_catcher
);
322 frame32
.sinfo
= CAST_DOWN(siginfo_t
*, ua_sip
);
323 frame32
.uctx
= CAST_DOWN(struct ucontext
*, ua_uctxp
);
325 if (copyout((caddr_t
)&frame32
, ua_fp
, sizeof (frame32
)))
329 * Build the signal context to be used by sigreturn.
331 bzero(&uctx32
, sizeof(uctx32
));
333 uctx32
.uc_onstack
= oonstack
;
334 uctx32
.uc_sigmask
= mask
;
335 uctx32
.uc_stack
.ss_sp
= CAST_DOWN(char *, ua_fp
);
336 uctx32
.uc_stack
.ss_size
= stack_size
;
339 uctx32
.uc_stack
.ss_flags
|= SS_ONSTACK
;
342 uctx32
.uc_mcsize
= sizeof(struct mcontext32
);
344 uctx32
.uc_mcontext
= CAST_DOWN(struct mcontext
*, ua_mctxp
);
346 if (copyout((caddr_t
)&uctx32
, ua_uctxp
, sizeof (uctx32
)))
349 if (copyout((caddr_t
)&mctx
.mctx32
, ua_mctxp
, sizeof (struct mcontext32
)))
352 sinfo64
.pad
[0] = tstate32
->esp
;
353 sinfo64
.si_addr
= tstate32
->eip
;
358 sinfo64
.si_pid
= p
->si_pid
;
360 sinfo64
.si_status
= p
->si_status
;
362 sinfo64
.si_uid
= p
->si_uid
;
364 sinfo64
.si_code
= p
->si_code
;
366 if (sinfo64
.si_code
== CLD_EXITED
) {
367 if (WIFEXITED(sinfo64
.si_status
))
368 sinfo64
.si_code
= CLD_EXITED
;
369 else if (WIFSIGNALED(sinfo64
.si_status
)) {
370 if (WCOREDUMP(sinfo64
.si_status
))
371 sinfo64
.si_code
= CLD_DUMPED
;
373 sinfo64
.si_code
= CLD_KILLED
;
378 switch (ut
->uu_code
) {
380 sinfo64
.si_code
= ILL_ILLOPC
;
383 sinfo64
.si_code
= ILL_PRVOPC
;
386 printf("unknown SIGILL code %d\n", ut
->uu_code
);
387 sinfo64
.si_code
= ILL_NOOP
;
391 #define FP_IE 0 /* Invalid operation */
392 #define FP_DE 1 /* Denormalized operand */
393 #define FP_ZE 2 /* Zero divide */
394 #define FP_OE 3 /* overflow */
395 #define FP_UE 4 /* underflow */
396 #define FP_PE 5 /* precision */
397 if (ut
->uu_subcode
& (1 << FP_ZE
)) {
398 sinfo64
.si_code
= FPE_FLTDIV
;
399 } else if (ut
->uu_subcode
& (1 << FP_OE
)) {
400 sinfo64
.si_code
= FPE_FLTOVF
;
401 } else if (ut
->uu_subcode
& (1 << FP_UE
)) {
402 sinfo64
.si_code
= FPE_FLTUND
;
403 } else if (ut
->uu_subcode
& (1 << FP_PE
)) {
404 sinfo64
.si_code
= FPE_FLTRES
;
405 } else if (ut
->uu_subcode
& (1 << FP_IE
)) {
406 sinfo64
.si_code
= FPE_FLTINV
;
408 printf("unknown SIGFPE code %d, subcode %x\n",
409 ut
->uu_code
, ut
->uu_subcode
);
410 sinfo64
.si_code
= FPE_NOOP
;
414 sinfo64
.si_code
= BUS_ADRERR
;
415 sinfo64
.si_addr
= ua_cr2
;
418 sinfo64
.si_code
= TRAP_BRKPT
;
421 sinfo64
.si_addr
= ua_cr2
;
423 switch (ut
->uu_code
) {
424 case KERN_PROTECTION_FAILURE
:
425 sinfo64
.si_code
= SEGV_ACCERR
;
427 case KERN_INVALID_ADDRESS
:
428 sinfo64
.si_code
= SEGV_MAPERR
;
431 printf("unknown SIGSEGV code %d\n", ut
->uu_code
);
432 sinfo64
.si_code
= FPE_NOOP
;
438 if (proc_is64bit(p
)) {
439 if (copyout((caddr_t
)&sinfo64
, ua_sip
, sizeof (sinfo64
)))
442 flavor
= x86_THREAD_STATE64
;
443 state_count
= x86_THREAD_STATE64_COUNT
;
444 state
= (void *)&mctx
.mctx64
.ss
;
446 x86_thread_state32_t
*tstate32
;
449 bzero((caddr_t
)&sinfo32
, sizeof(siginfo_t
));
451 sinfo32
.si_signo
= sinfo64
.si_signo
;
452 sinfo32
.si_code
= sinfo64
.si_code
;
453 sinfo32
.si_pid
= sinfo64
.si_pid
;
454 sinfo32
.si_uid
= sinfo64
.si_uid
;
455 sinfo32
.si_status
= sinfo64
.si_status
;
456 sinfo32
.si_addr
= CAST_DOWN(void *, sinfo64
.si_addr
);
457 sinfo32
.pad
[0] = sinfo64
.pad
[0];
459 if (copyout((caddr_t
)&sinfo32
, ua_sip
, sizeof (sinfo32
)))
462 tstate32
= &mctx
.mctx32
.ss
;
463 tstate32
->eip
= CAST_DOWN(unsigned int, ps
->ps_trampact
[sig
]);
464 tstate32
->esp
= CAST_DOWN(unsigned int, ua_fp
);
466 tstate32
->eflags
= get_eflags_exportmask();
468 tstate32
->cs
= USER_CS
;
469 tstate32
->ss
= USER_DS
;
470 tstate32
->ds
= USER_DS
;
471 tstate32
->es
= USER_DS
;
472 tstate32
->fs
= NULL_SEG
;
473 tstate32
->gs
= USER_CTHREAD
;
475 flavor
= x86_THREAD_STATE32
;
476 state_count
= x86_THREAD_STATE32_COUNT
;
477 state
= (void *)tstate32
;
479 if (thread_setstatus(thread
, flavor
, (thread_state_t
)state
, state_count
) != KERN_SUCCESS
)
481 ml_fp_setvalid(FALSE
);
486 SIGACTION(p
, SIGILL
) = SIG_DFL
;
487 sig
= sigmask(SIGILL
);
488 p
->p_sigignore
&= ~sig
;
489 p
->p_sigcatch
&= ~sig
;
490 ut
->uu_sigmask
&= ~sig
;
491 /* sendsig is called with signal lock held */
492 psignal_lock(p
, SIGILL
, 0);
497 * System call to cleanup state after a signal
498 * has been taken. Reset signal mask and
499 * stack state from context left by sendsig (above).
500 * Return to previous pc and psl as specified by
501 * context left by sendsig. Check carefully to
502 * make sure that the user has not modified the
503 * psl to gain improper priviledges or to cause
510 struct sigreturn_args
*uap
,
511 __unused
int *retval
)
514 struct mcontext32 mctx32
;
515 struct mcontext64 mctx64
;
517 thread_t thread
= current_thread();
520 int uthsigaltstack
= 0;
523 mach_msg_type_number_t ts_count
;
524 unsigned int ts_flavor
;
526 mach_msg_type_number_t fs_count
;
527 unsigned int fs_flavor
;
530 ut
= (struct uthread
*)get_bsdthread_info(thread
);
531 uthsigaltstack
= p
->p_lflag
& P_LTHSIGSTACK
;
533 if (proc_is64bit(p
)) {
534 struct user_ucontext64 uctx64
;
536 if ((error
= copyin(uap
->uctx
, (void *)&uctx64
, sizeof (uctx64
))))
539 if ((error
= copyin(uctx64
.uc_mcontext64
, (void *)&mctx
.mctx64
, sizeof (struct mcontext64
))))
542 onstack
= uctx64
.uc_onstack
& 01;
543 ut
->uu_sigmask
= uctx64
.uc_sigmask
& ~sigcantmask
;
545 ts_flavor
= x86_THREAD_STATE64
;
546 ts_count
= x86_THREAD_STATE64_COUNT
;
547 ts
= (void *)&mctx
.mctx64
.ss
;
549 fs_flavor
= x86_FLOAT_STATE64
;
550 fs_count
= x86_FLOAT_STATE64_COUNT
;
551 fs
= (void *)&mctx
.mctx64
.fs
;
554 struct ucontext uctx32
;
556 if ((error
= copyin(uap
->uctx
, (void *)&uctx32
, sizeof (uctx32
))))
559 if ((error
= copyin(CAST_USER_ADDR_T(uctx32
.uc_mcontext
), (void *)&mctx
.mctx32
, sizeof (struct mcontext32
))))
562 onstack
= uctx32
.uc_onstack
& 01;
563 ut
->uu_sigmask
= uctx32
.uc_sigmask
& ~sigcantmask
;
565 ts_flavor
= x86_THREAD_STATE32
;
566 ts_count
= x86_THREAD_STATE32_COUNT
;
567 ts
= (void *)&mctx
.mctx32
.ss
;
569 fs_flavor
= x86_FLOAT_STATE32
;
570 fs_count
= x86_FLOAT_STATE32_COUNT
;
571 fs
= (void *)&mctx
.mctx32
.fs
;
574 if (uthsigaltstack
!= 0)
575 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
577 p
->p_sigacts
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
579 if (uthsigaltstack
!= 0)
580 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
582 p
->p_sigacts
->ps_sigstk
.ss_flags
&= ~SA_ONSTACK
;
584 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
585 signal_setast(thread
);
588 * thread_set_state() does all the needed checks for the passed in content
590 if (thread_setstatus(thread
, ts_flavor
, ts
, ts_count
) != KERN_SUCCESS
)
593 ml_fp_setvalid(TRUE
);
595 if (thread_setstatus(thread
, fs_flavor
, fs
, fs_count
) != KERN_SUCCESS
)
598 return (EJUSTRETURN
);
603 * machine_exception() performs MD translation
604 * of a mach exception to a unix signal and code.
611 __unused
int subcode
,
619 case EXC_BAD_INSTRUCTION
:
620 *unix_signal
= SIGILL
;
625 *unix_signal
= SIGFPE
;
636 #include <sys/systm.h>
637 #include <sys/sysent.h>
639 int __pthread_cset(struct sysent
*);
640 void __pthread_creset(struct sysent
*);
643 __pthread_cset(struct sysent
*callp
)
645 unsigned int cancel_enable
;
647 struct uthread
* uthread
;
649 thread
= current_thread();
650 uthread
= get_bsdthread_info(thread
);
652 cancel_enable
= callp
->sy_cancel
;
653 if (cancel_enable
== _SYSCALL_CANCEL_NONE
) {
654 uthread
->uu_flag
|= UT_NOTCANCELPT
;
656 if((uthread
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
657 if (cancel_enable
== _SYSCALL_CANCEL_PRE
)
660 thread_abort_safely(thread
);
668 __pthread_creset(struct sysent
*callp
)
671 unsigned int cancel_enable
;
673 struct uthread
* uthread
;
675 thread
= current_thread();
676 uthread
= get_bsdthread_info(thread
);
678 cancel_enable
= callp
->sy_cancel
;
680 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;