2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 * Copyright (c) 1992 NeXT, Inc.
34 * 13 May 1992 ? at NeXT
38 #include <mach/mach_types.h>
39 #include <mach/exception.h>
41 #include <kern/thread.h>
43 #include <sys/systm.h>
44 #include <sys/param.h>
45 #include <sys/proc_internal.h>
47 #include <sys/sysproto.h>
48 #include <sys/sysent.h>
49 #include <sys/ucontext.h>
51 #include <mach/thread_act.h> /* for thread_abort_safely */
52 #include <mach/thread_status.h>
53 #include <i386/machine_routines.h>
55 #include <i386/eflags.h>
59 #include <sys/kdebug.h>
62 extern boolean_t
machine_exception(int, int, int, int *, int *);
63 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
64 thread_state_t tstate
, mach_msg_type_number_t
*count
);
65 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
66 thread_state_t tstate
, mach_msg_type_number_t count
);
68 /* Signal handler flavors supported */
69 /* These defns should match the Libc implmn */
73 #define C_32_STK_ALIGN 16
74 #define C_64_STK_ALIGN 16
75 #define C_64_REDZONE_LEN 128
76 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
77 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
80 * Send an interrupt to process.
82 * Stack is set up to allow sigcode stored
83 * in u. to call routine, followed by chmk
84 * to sigreturn routine below. After sigreturn
85 * resets the signal mask, the stack, the frame
86 * pointer, and the argument pointer, it returns
87 * to the user specified pc, psl.
95 struct ucontext
* uctx
;
101 sendsig(struct proc
*p
, user_addr_t ua_catcher
, int sig
, int mask
, __unused u_long code
)
104 struct mcontext32 mctx32
;
105 struct mcontext64 mctx64
;
111 user_addr_t ua_uctxp
;
112 user_addr_t ua_mctxp
;
113 user_siginfo_t sinfo64
;
115 struct sigacts
*ps
= p
->p_sigacts
;
116 int oonstack
, flavor
;
118 mach_msg_type_number_t state_count
;
119 int uthsigaltstack
= 0;
122 thread_t thread
= current_thread();
125 int infostyle
= UC_TRAD
;
127 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
))
128 infostyle
= UC_FLAVOR
;
130 ut
= get_bsdthread_info(thread
);
132 uthsigaltstack
= p
->p_lflag
& P_LTHSIGSTACK
;
134 if (uthsigaltstack
!= 0 ) {
135 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
136 altstack
= ut
->uu_flag
& UT_ALTSTACK
;
138 oonstack
= ps
->ps_sigstk
.ss_flags
& SA_ONSTACK
;
139 altstack
= ps
->ps_flags
& SAS_ALTSTACK
;
144 bzero((caddr_t
)&sinfo64
, sizeof(user_siginfo_t
));
145 sinfo64
.si_signo
= sig
;
147 if (proc_is64bit(p
)) {
148 x86_thread_state64_t
*tstate64
;
149 struct user_ucontext64 uctx64
;
151 flavor
= x86_THREAD_STATE64
;
152 state_count
= x86_THREAD_STATE64_COUNT
;
153 state
= (void *)&mctx
.mctx64
.ss
;
154 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
157 flavor
= x86_FLOAT_STATE64
;
158 state_count
= x86_FLOAT_STATE64_COUNT
;
159 state
= (void *)&mctx
.mctx64
.fs
;
160 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
163 flavor
= x86_EXCEPTION_STATE64
;
164 state_count
= x86_EXCEPTION_STATE64_COUNT
;
165 state
= (void *)&mctx
.mctx64
.es
;
166 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
169 tstate64
= &mctx
.mctx64
.ss
;
171 if (altstack
&& !oonstack
&& (ps
->ps_sigonstack
& sigmask(sig
))) {
172 if (uthsigaltstack
!= 0) {
173 ua_sp
= ut
->uu_sigstk
.ss_sp
;
174 stack_size
= ut
->uu_sigstk
.ss_size
;
176 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
178 ua_sp
= ps
->ps_sigstk
.ss_sp
;
179 stack_size
= ps
->ps_sigstk
.ss_size
;
181 ps
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
184 ua_sp
= tstate64
->rsp
;
185 ua_cr2
= mctx
.mctx64
.es
.faultvaddr
;
187 /* The x86_64 ABI defines a 128-byte red zone. */
188 ua_sp
-= C_64_REDZONE_LEN
;
190 ua_sp
-= sizeof (struct user_ucontext64
);
191 ua_uctxp
= ua_sp
; // someone tramples the first word!
193 ua_sp
-= sizeof (user_siginfo_t
);
196 ua_sp
-= sizeof (struct mcontext64
);
200 * Align the frame and stack pointers to 16 bytes for SSE.
201 * (Note that we use 'ua_fp' as the base of the stack going forward)
203 ua_fp
= TRUNC_DOWN64(ua_sp
, C_64_STK_ALIGN
);
206 * But we need to account for the return address so the alignment is
207 * truly "correct" at _sigtramp
209 ua_fp
-= sizeof(user_addr_t
);
212 * Build the signal context to be used by sigreturn.
214 bzero(&uctx64
, sizeof(uctx64
));
216 uctx64
.uc_onstack
= oonstack
;
217 uctx64
.uc_sigmask
= mask
;
218 uctx64
.uc_stack
.ss_sp
= ua_fp
;
219 uctx64
.uc_stack
.ss_size
= stack_size
;
222 uctx64
.uc_stack
.ss_flags
|= SS_ONSTACK
;
225 uctx64
.uc_mcsize
= sizeof(struct mcontext64
);
226 uctx64
.uc_mcontext64
= ua_mctxp
;
228 if (copyout((caddr_t
)&uctx64
, ua_uctxp
, sizeof (uctx64
)))
231 if (copyout((caddr_t
)&mctx
.mctx64
, ua_mctxp
, sizeof (struct mcontext64
)))
234 sinfo64
.pad
[0] = tstate64
->rsp
;
235 sinfo64
.si_addr
= tstate64
->rip
;
237 tstate64
->rip
= ps
->ps_trampact
[sig
];
238 tstate64
->rsp
= ua_fp
;
239 tstate64
->rflags
= get_eflags_exportmask();
241 * JOE - might not need to set these
243 tstate64
->cs
= USER64_CS
;
244 tstate64
->fs
= NULL_SEG
;
245 tstate64
->gs
= USER_CTHREAD
;
248 * Build the argument list for the signal handler.
249 * Handler should call sigreturn to get out of it
251 tstate64
->rdi
= ua_catcher
;
252 tstate64
->rsi
= infostyle
;
254 tstate64
->rcx
= ua_sip
;
255 tstate64
->r8
= ua_uctxp
;
258 x86_thread_state32_t
*tstate32
;
259 struct ucontext uctx32
;
260 struct sigframe32 frame32
;
262 flavor
= x86_THREAD_STATE32
;
263 state_count
= x86_THREAD_STATE32_COUNT
;
264 state
= (void *)&mctx
.mctx32
.ss
;
265 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
268 flavor
= x86_FLOAT_STATE32
;
269 state_count
= x86_FLOAT_STATE32_COUNT
;
270 state
= (void *)&mctx
.mctx32
.fs
;
271 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
274 flavor
= x86_EXCEPTION_STATE32
;
275 state_count
= x86_EXCEPTION_STATE32_COUNT
;
276 state
= (void *)&mctx
.mctx32
.es
;
277 if (thread_getstatus(thread
, flavor
, (thread_state_t
)state
, &state_count
) != KERN_SUCCESS
)
280 tstate32
= &mctx
.mctx32
.ss
;
282 if (altstack
&& !oonstack
&& (ps
->ps_sigonstack
& sigmask(sig
))) {
283 if (uthsigaltstack
!= 0) {
284 ua_sp
= ut
->uu_sigstk
.ss_sp
;
285 stack_size
= ut
->uu_sigstk
.ss_size
;
287 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
289 ua_sp
= ps
->ps_sigstk
.ss_sp
;
290 stack_size
= ps
->ps_sigstk
.ss_size
;
292 ps
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
295 ua_sp
= tstate32
->esp
;
296 ua_cr2
= mctx
.mctx32
.es
.faultvaddr
;
298 ua_sp
-= sizeof (struct ucontext
);
299 ua_uctxp
= ua_sp
; // someone tramples the first word!
301 ua_sp
-= sizeof (siginfo_t
);
304 ua_sp
-= sizeof (struct mcontext32
);
307 ua_sp
-= sizeof (struct sigframe32
);
311 * Align the frame and stack pointers to 16 bytes for SSE.
312 * (Note that we use 'fp' as the base of the stack going forward)
314 ua_fp
= TRUNC_DOWN32(ua_fp
, C_32_STK_ALIGN
);
317 * But we need to account for the return address so the alignment is
318 * truly "correct" at _sigtramp
320 ua_fp
-= sizeof(frame32
.retaddr
);
323 * Build the argument list for the signal handler.
324 * Handler should call sigreturn to get out of it
326 frame32
.retaddr
= -1;
327 frame32
.sigstyle
= infostyle
;
329 frame32
.catcher
= CAST_DOWN(sig_t
, ua_catcher
);
330 frame32
.sinfo
= CAST_DOWN(siginfo_t
*, ua_sip
);
331 frame32
.uctx
= CAST_DOWN(struct ucontext
*, ua_uctxp
);
333 if (copyout((caddr_t
)&frame32
, ua_fp
, sizeof (frame32
)))
337 * Build the signal context to be used by sigreturn.
339 bzero(&uctx32
, sizeof(uctx32
));
341 uctx32
.uc_onstack
= oonstack
;
342 uctx32
.uc_sigmask
= mask
;
343 uctx32
.uc_stack
.ss_sp
= CAST_DOWN(char *, ua_fp
);
344 uctx32
.uc_stack
.ss_size
= stack_size
;
347 uctx32
.uc_stack
.ss_flags
|= SS_ONSTACK
;
350 uctx32
.uc_mcsize
= sizeof(struct mcontext32
);
352 uctx32
.uc_mcontext
= CAST_DOWN(struct mcontext
*, ua_mctxp
);
354 if (copyout((caddr_t
)&uctx32
, ua_uctxp
, sizeof (uctx32
)))
357 if (copyout((caddr_t
)&mctx
.mctx32
, ua_mctxp
, sizeof (struct mcontext32
)))
360 sinfo64
.pad
[0] = tstate32
->esp
;
361 sinfo64
.si_addr
= tstate32
->eip
;
366 sinfo64
.si_pid
= p
->si_pid
;
368 sinfo64
.si_status
= p
->si_status
;
370 sinfo64
.si_uid
= p
->si_uid
;
372 sinfo64
.si_code
= p
->si_code
;
374 if (sinfo64
.si_code
== CLD_EXITED
) {
375 if (WIFEXITED(sinfo64
.si_status
))
376 sinfo64
.si_code
= CLD_EXITED
;
377 else if (WIFSIGNALED(sinfo64
.si_status
)) {
378 if (WCOREDUMP(sinfo64
.si_status
))
379 sinfo64
.si_code
= CLD_DUMPED
;
381 sinfo64
.si_code
= CLD_KILLED
;
386 switch (ut
->uu_code
) {
388 sinfo64
.si_code
= ILL_ILLOPC
;
391 sinfo64
.si_code
= ILL_PRVOPC
;
394 printf("unknown SIGILL code %d\n", ut
->uu_code
);
395 sinfo64
.si_code
= ILL_NOOP
;
399 #define FP_IE 0 /* Invalid operation */
400 #define FP_DE 1 /* Denormalized operand */
401 #define FP_ZE 2 /* Zero divide */
402 #define FP_OE 3 /* overflow */
403 #define FP_UE 4 /* underflow */
404 #define FP_PE 5 /* precision */
405 if (ut
->uu_subcode
& (1 << FP_ZE
)) {
406 sinfo64
.si_code
= FPE_FLTDIV
;
407 } else if (ut
->uu_subcode
& (1 << FP_OE
)) {
408 sinfo64
.si_code
= FPE_FLTOVF
;
409 } else if (ut
->uu_subcode
& (1 << FP_UE
)) {
410 sinfo64
.si_code
= FPE_FLTUND
;
411 } else if (ut
->uu_subcode
& (1 << FP_PE
)) {
412 sinfo64
.si_code
= FPE_FLTRES
;
413 } else if (ut
->uu_subcode
& (1 << FP_IE
)) {
414 sinfo64
.si_code
= FPE_FLTINV
;
416 printf("unknown SIGFPE code %d, subcode %x\n",
417 ut
->uu_code
, ut
->uu_subcode
);
418 sinfo64
.si_code
= FPE_NOOP
;
422 sinfo64
.si_code
= BUS_ADRERR
;
423 sinfo64
.si_addr
= ua_cr2
;
426 sinfo64
.si_code
= TRAP_BRKPT
;
429 sinfo64
.si_addr
= ua_cr2
;
431 switch (ut
->uu_code
) {
432 case KERN_PROTECTION_FAILURE
:
433 sinfo64
.si_code
= SEGV_ACCERR
;
435 case KERN_INVALID_ADDRESS
:
436 sinfo64
.si_code
= SEGV_MAPERR
;
439 printf("unknown SIGSEGV code %d\n", ut
->uu_code
);
440 sinfo64
.si_code
= FPE_NOOP
;
446 if (proc_is64bit(p
)) {
447 if (copyout((caddr_t
)&sinfo64
, ua_sip
, sizeof (sinfo64
)))
450 flavor
= x86_THREAD_STATE64
;
451 state_count
= x86_THREAD_STATE64_COUNT
;
452 state
= (void *)&mctx
.mctx64
.ss
;
454 x86_thread_state32_t
*tstate32
;
457 bzero((caddr_t
)&sinfo32
, sizeof(siginfo_t
));
459 sinfo32
.si_signo
= sinfo64
.si_signo
;
460 sinfo32
.si_code
= sinfo64
.si_code
;
461 sinfo32
.si_pid
= sinfo64
.si_pid
;
462 sinfo32
.si_uid
= sinfo64
.si_uid
;
463 sinfo32
.si_status
= sinfo64
.si_status
;
464 sinfo32
.si_addr
= CAST_DOWN(void *, sinfo64
.si_addr
);
465 sinfo32
.pad
[0] = sinfo64
.pad
[0];
467 if (copyout((caddr_t
)&sinfo32
, ua_sip
, sizeof (sinfo32
)))
470 tstate32
= &mctx
.mctx32
.ss
;
471 tstate32
->eip
= CAST_DOWN(unsigned int, ps
->ps_trampact
[sig
]);
472 tstate32
->esp
= CAST_DOWN(unsigned int, ua_fp
);
474 tstate32
->eflags
= get_eflags_exportmask();
476 tstate32
->cs
= USER_CS
;
477 tstate32
->ss
= USER_DS
;
478 tstate32
->ds
= USER_DS
;
479 tstate32
->es
= USER_DS
;
480 tstate32
->fs
= NULL_SEG
;
481 tstate32
->gs
= USER_CTHREAD
;
483 flavor
= x86_THREAD_STATE32
;
484 state_count
= x86_THREAD_STATE32_COUNT
;
485 state
= (void *)tstate32
;
487 if (thread_setstatus(thread
, flavor
, (thread_state_t
)state
, state_count
) != KERN_SUCCESS
)
489 ml_fp_setvalid(FALSE
);
494 SIGACTION(p
, SIGILL
) = SIG_DFL
;
495 sig
= sigmask(SIGILL
);
496 p
->p_sigignore
&= ~sig
;
497 p
->p_sigcatch
&= ~sig
;
498 ut
->uu_sigmask
&= ~sig
;
499 /* sendsig is called with signal lock held */
500 psignal_lock(p
, SIGILL
, 0);
505 * System call to cleanup state after a signal
506 * has been taken. Reset signal mask and
507 * stack state from context left by sendsig (above).
508 * Return to previous pc and psl as specified by
509 * context left by sendsig. Check carefully to
510 * make sure that the user has not modified the
511 * psl to gain improper priviledges or to cause
518 struct sigreturn_args
*uap
,
519 __unused
int *retval
)
522 struct mcontext32 mctx32
;
523 struct mcontext64 mctx64
;
525 thread_t thread
= current_thread();
528 int uthsigaltstack
= 0;
531 mach_msg_type_number_t ts_count
;
532 unsigned int ts_flavor
;
534 mach_msg_type_number_t fs_count
;
535 unsigned int fs_flavor
;
538 ut
= (struct uthread
*)get_bsdthread_info(thread
);
539 uthsigaltstack
= p
->p_lflag
& P_LTHSIGSTACK
;
541 if (proc_is64bit(p
)) {
542 struct user_ucontext64 uctx64
;
544 if ((error
= copyin(uap
->uctx
, (void *)&uctx64
, sizeof (uctx64
))))
547 if ((error
= copyin(uctx64
.uc_mcontext64
, (void *)&mctx
.mctx64
, sizeof (struct mcontext64
))))
550 onstack
= uctx64
.uc_onstack
& 01;
551 ut
->uu_sigmask
= uctx64
.uc_sigmask
& ~sigcantmask
;
553 ts_flavor
= x86_THREAD_STATE64
;
554 ts_count
= x86_THREAD_STATE64_COUNT
;
555 ts
= (void *)&mctx
.mctx64
.ss
;
557 fs_flavor
= x86_FLOAT_STATE64
;
558 fs_count
= x86_FLOAT_STATE64_COUNT
;
559 fs
= (void *)&mctx
.mctx64
.fs
;
562 struct ucontext uctx32
;
564 if ((error
= copyin(uap
->uctx
, (void *)&uctx32
, sizeof (uctx32
))))
567 if ((error
= copyin(CAST_USER_ADDR_T(uctx32
.uc_mcontext
), (void *)&mctx
.mctx32
, sizeof (struct mcontext32
))))
570 onstack
= uctx32
.uc_onstack
& 01;
571 ut
->uu_sigmask
= uctx32
.uc_sigmask
& ~sigcantmask
;
573 ts_flavor
= x86_THREAD_STATE32
;
574 ts_count
= x86_THREAD_STATE32_COUNT
;
575 ts
= (void *)&mctx
.mctx32
.ss
;
577 fs_flavor
= x86_FLOAT_STATE32
;
578 fs_count
= x86_FLOAT_STATE32_COUNT
;
579 fs
= (void *)&mctx
.mctx32
.fs
;
582 if (uthsigaltstack
!= 0)
583 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
585 p
->p_sigacts
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
587 if (uthsigaltstack
!= 0)
588 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
590 p
->p_sigacts
->ps_sigstk
.ss_flags
&= ~SA_ONSTACK
;
592 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
593 signal_setast(thread
);
596 * thread_set_state() does all the needed checks for the passed in content
598 if (thread_setstatus(thread
, ts_flavor
, ts
, ts_count
) != KERN_SUCCESS
)
601 ml_fp_setvalid(TRUE
);
603 if (thread_setstatus(thread
, fs_flavor
, fs
, fs_count
) != KERN_SUCCESS
)
606 return (EJUSTRETURN
);
611 * machine_exception() performs MD translation
612 * of a mach exception to a unix signal and code.
619 __unused
int subcode
,
627 case EXC_BAD_INSTRUCTION
:
628 *unix_signal
= SIGILL
;
633 *unix_signal
= SIGFPE
;
644 #include <sys/systm.h>
645 #include <sys/sysent.h>
647 int __pthread_cset(struct sysent
*);
648 void __pthread_creset(struct sysent
*);
651 __pthread_cset(struct sysent
*callp
)
653 unsigned int cancel_enable
;
655 struct uthread
* uthread
;
657 thread
= current_thread();
658 uthread
= get_bsdthread_info(thread
);
660 cancel_enable
= callp
->sy_cancel
;
661 if (cancel_enable
== _SYSCALL_CANCEL_NONE
) {
662 uthread
->uu_flag
|= UT_NOTCANCELPT
;
664 if((uthread
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
665 if (cancel_enable
== _SYSCALL_CANCEL_PRE
)
668 thread_abort_safely(thread
);
676 __pthread_creset(struct sysent
*callp
)
679 unsigned int cancel_enable
;
681 struct uthread
* uthread
;
683 thread
= current_thread();
684 uthread
= get_bsdthread_info(thread
);
686 cancel_enable
= callp
->sy_cancel
;
688 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;