2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
32 #include <sys/param.h>
35 #include <sys/ucontext.h>
37 #include <ppc/signal.h>
38 #include <sys/signalvar.h>
39 #include <sys/kdebug.h>
41 #include <kern/thread.h>
42 #include <kern/thread_act.h>
43 #include <mach/ppc/thread_status.h>
44 #include <ppc/proc_reg.h>
46 #define C_REDZONE_LEN 224
47 #define C_STK_ALIGN 16
48 #define C_PARAMSAVE_LEN 64
49 #define C_LINKAGE_LEN 48
50 #define TRUNC_DOWN(a,b,c) (((((unsigned)a)-(b))/(c)) * (c))
53 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
57 * Traditional64with vec: 25
59 * 32bit context with vector 35
61 * 64bit context with vector 45
63 * Dual context with vector 55
70 #define UC_TRAD64_VEC 25
72 #define UC_FLAVOR_VEC 35
73 #define UC_FLAVOR64 40
74 #define UC_FLAVOR64_VEC 45
76 #define UC_DUAL_VEC 55
78 /* The following are valid mcontext sizes */
79 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
81 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
83 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
85 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
89 * Arrange for this process to run a signal handler
93 sendsig(p
, catcher
, sig
, mask
, code
)
100 struct mcontext mctx
, *p_mctx
;
101 struct mcontext64 mctx64
, *p_mctx64
;
102 struct ucontext uctx
, *p_uctx
;
103 siginfo_t sinfo
, *p_sinfo
;
104 struct sigacts
*ps
= p
->p_sigacts
;
108 unsigned long state_count
;
111 unsigned long paramp
,linkp
;
112 int infostyle
= UC_TRAD
;
121 int is_64signalregset(void);
123 th_act
= current_act();
124 ut
= get_bsdthread_info(th_act
);
127 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
)) {
128 infostyle
= UC_FLAVOR
;
130 if(is_64signalregset() && (infostyle
== UC_FLAVOR
)) {
134 if (p
->p_sigacts
->ps_64regset
& sigmask(sig
)) {
137 infostyle
= UC_FLAVOR64
;
139 if (is_64signalregset() && (infostyle
== UC_TRAD
)) {
141 infostyle
= UC_TRAD64
;
144 /* I need this for SIGINFO anyway */
145 flavor
= PPC_THREAD_STATE
;
146 tstate
= (void *)&mctx
.ss
;
147 state_count
= PPC_THREAD_STATE_COUNT
;
148 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
151 if ((ctx32
== 0) || dualcontext
) {
152 flavor
= PPC_THREAD_STATE64
;
153 tstate
= (void *)&mctx64
.ss
;
154 state_count
= PPC_THREAD_STATE64_COUNT
;
155 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
159 if ((ctx32
== 1) || dualcontext
) {
160 flavor
= PPC_EXCEPTION_STATE
;
161 tstate
= (void *)&mctx
.es
;
162 state_count
= PPC_EXCEPTION_STATE_COUNT
;
163 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
167 if ((ctx32
== 0) || dualcontext
) {
168 flavor
= PPC_EXCEPTION_STATE64
;
169 tstate
= (void *)&mctx64
.es
;
170 state_count
= PPC_EXCEPTION_STATE64_COUNT
;
172 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
178 if ((ctx32
== 1) || dualcontext
) {
179 flavor
= PPC_FLOAT_STATE
;
180 tstate
= (void *)&mctx
.fs
;
181 state_count
= PPC_FLOAT_STATE_COUNT
;
182 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
186 if ((ctx32
== 0) || dualcontext
) {
187 flavor
= PPC_FLOAT_STATE
;
188 tstate
= (void *)&mctx64
.fs
;
189 state_count
= PPC_FLOAT_STATE_COUNT
;
190 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
196 if (find_user_vec_curr()) {
199 if ((ctx32
== 1) || dualcontext
) {
200 flavor
= PPC_VECTOR_STATE
;
201 tstate
= (void *)&mctx
.vs
;
202 state_count
= PPC_VECTOR_STATE_COUNT
;
203 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
208 if ((ctx32
== 0) || dualcontext
) {
209 flavor
= PPC_VECTOR_STATE
;
210 tstate
= (void *)&mctx64
.vs
;
211 state_count
= PPC_VECTOR_STATE_COUNT
;
212 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
218 trampact
= ps
->ps_trampact
[sig
];
219 oonstack
= ps
->ps_sigstk
.ss_flags
& SA_ONSTACK
;
221 /* figure out where our new stack lives */
222 if ((ps
->ps_flags
& SAS_ALTSTACK
) && !oonstack
&&
223 (ps
->ps_sigonstack
& sigmask(sig
))) {
224 sp
= (unsigned long)(ps
->ps_sigstk
.ss_sp
);
225 sp
+= ps
->ps_sigstk
.ss_size
;
226 stack_size
= ps
->ps_sigstk
.ss_size
;
227 ps
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
231 sp
= (unsigned int)mctx64
.ss
.r1
;
237 /* put siginfo on top */
239 /* preserve RED ZONE area */
240 sp
= TRUNC_DOWN(sp
, C_REDZONE_LEN
, C_STK_ALIGN
);
242 /* next are the saved registers */
243 if ((ctx32
== 0) || dualcontext
) {
244 sp
-= sizeof(*p_mctx64
);
245 p_mctx64
= (struct mcontext64
*)sp
;
247 if ((ctx32
== 1) || dualcontext
) {
248 sp
-= sizeof(*p_mctx
);
249 p_mctx
= (struct mcontext
*)sp
;
252 /* context goes first on stack */
253 sp
-= sizeof(*p_uctx
);
254 p_uctx
= (struct ucontext
*) sp
;
256 /* this is where siginfo goes on stack */
257 sp
-= sizeof(*p_sinfo
);
258 p_sinfo
= (siginfo_t
*) sp
;
260 /* C calling conventions, create param save and linkage
264 sp
= TRUNC_DOWN(sp
, C_PARAMSAVE_LEN
, C_STK_ALIGN
);
269 uctx
.uc_onstack
= oonstack
;
270 uctx
.uc_sigmask
= mask
;
271 uctx
.uc_stack
.ss_sp
= (char *)sp
;
272 uctx
.uc_stack
.ss_size
= stack_size
;
274 uctx
.uc_stack
.ss_flags
|= SS_ONSTACK
;
278 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE64_COUNT
+ PPC_THREAD_STATE64_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
280 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE_COUNT
+ PPC_THREAD_STATE_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
283 uctx
.uc_mcsize
+= (size_t)(PPC_VECTOR_STATE_COUNT
* sizeof(int));
286 uctx
.uc_mcontext
= (void *)p_mctx64
;
288 uctx
.uc_mcontext
= (void *)p_mctx
;
291 bzero((caddr_t
)&sinfo
, sizeof(siginfo_t
));
292 sinfo
.si_signo
= sig
;
293 sinfo
.si_addr
= (void *)mctx
.ss
.srr0
;
294 sinfo
.pad
[0] = (unsigned int)mctx
.ss
.r1
;
298 sinfo
.si_pid
= p
->si_pid
;
300 sinfo
.si_status
= p
->si_status
;
302 sinfo
.si_uid
= p
->si_uid
;
304 sinfo
.si_code
= p
->si_code
;
306 if (sinfo
.si_code
== CLD_EXITED
) {
307 if (WIFEXITED(sinfo
.si_status
))
308 sinfo
.si_code
= CLD_EXITED
;
309 else if (WIFSIGNALED(sinfo
.si_status
)) {
310 if (WCOREDUMP(sinfo
.si_status
))
311 sinfo
.si_code
= CLD_DUMPED
;
313 sinfo
.si_code
= CLD_KILLED
;
318 sinfo
.si_addr
= (void *)mctx
.ss
.srr0
;
319 if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
)))
320 sinfo
.si_code
= ILL_ILLOPC
;
321 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
)))
322 sinfo
.si_code
= ILL_PRVOPC
;
323 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
)))
324 sinfo
.si_code
= ILL_ILLTRP
;
326 sinfo
.si_code
= ILL_NOOP
;
334 sinfo
.si_addr
= (void *)mctx
.ss
.srr0
;
335 if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_VX
)))
336 sinfo
.si_code
= FPE_FLTINV
;
337 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_OX
)))
338 sinfo
.si_code
= FPE_FLTOVF
;
339 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_UX
)))
340 sinfo
.si_code
= FPE_FLTUND
;
341 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_ZX
)))
342 sinfo
.si_code
= FPE_FLTDIV
;
343 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_XX
)))
344 sinfo
.si_code
= FPE_FLTRES
;
346 sinfo
.si_code
= FPE_NOOP
;
350 sinfo
.si_addr
= (void *)mctx
.ss
.srr0
;
351 /* on ppc we generate only if EXC_PPC_UNALIGNED */
352 sinfo
.si_code
= BUS_ADRALN
;
356 sinfo
.si_addr
= (void *)mctx
.ss
.srr0
;
357 /* First check in srr1 and then in dsisr */
358 if (mctx
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
)))
359 sinfo
.si_code
= SEGV_ACCERR
;
360 else if (mctx
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
)))
361 sinfo
.si_code
= SEGV_ACCERR
;
363 sinfo
.si_code
= SEGV_MAPERR
;
370 /* copy info out to user space */
371 if (copyout((caddr_t
)&uctx
, (caddr_t
)p_uctx
, sizeof(struct ucontext
)))
373 if (copyout((caddr_t
)&sinfo
, (caddr_t
)p_sinfo
, sizeof(siginfo_t
)))
375 if ((ctx32
== 0) || dualcontext
) {
377 if (copyout((caddr_t
)tstate
, (caddr_t
)p_mctx64
, uctx
.uc_mcsize
))
380 if ((ctx32
== 1) || dualcontext
) {
382 if (copyout((caddr_t
)tstate
, (caddr_t
)p_mctx
, uctx
.uc_mcsize
))
387 /* Place our arguments in arg registers: rtm dependent */
389 mctx
.ss
.r3
= (unsigned long)catcher
;
390 mctx
.ss
.r4
= (unsigned long)infostyle
;
391 mctx
.ss
.r5
= (unsigned long)sig
;
392 mctx
.ss
.r6
= (unsigned long)p_sinfo
;
393 mctx
.ss
.r7
= (unsigned long)p_uctx
;
395 mctx
.ss
.srr0
= (unsigned long)trampact
;
396 mctx
.ss
.srr1
= get_msr_exportmask(); /* MSR_EXPORT_MASK_SET */
398 state_count
= PPC_THREAD_STATE_COUNT
;
399 if ((kretn
= thread_setstatus(th_act
, PPC_THREAD_STATE
, &mctx
.ss
, &state_count
)) != KERN_SUCCESS
) {
400 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn
);
405 SIGACTION(p
, SIGILL
) = SIG_DFL
;
406 sig
= sigmask(SIGILL
);
407 p
->p_sigignore
&= ~sig
;
408 p
->p_sigcatch
&= ~sig
;
409 ut
->uu_sigmask
&= ~sig
;
410 /* sendsig is called with signal lock held */
411 psignal_lock(p
, SIGILL
, 0);
416 * System call to cleanup state after a signal
417 * has been taken. Reset signal mask and
418 * stack state from context left by sendsig (above).
419 * Return to previous pc and psl as specified by
420 * context left by sendsig. Check carefully to
421 * make sure that the user has not modified the
422 * psl to gain improper priviledges or to cause
426 #define FOR64_TRANSITION 1
429 #ifdef FOR64_TRANSITION
431 struct osigreturn_args
{
432 struct ucontext
*uctx
;
437 osigreturn(p
, uap
, retval
)
439 struct osigreturn_args
*uap
;
442 struct ucontext uctx
;
443 struct ucontext
*p_uctx
;
444 struct mcontext64 mctx64
;
445 struct mcontext64
*p_64mctx
;
446 struct mcontext
*p_mctx
;
449 struct sigacts
*ps
= p
->p_sigacts
;
451 register sig_t action
;
452 unsigned long state_count
;
453 unsigned int state_flavor
;
456 void *tsptr
, *fptr
, *vptr
, *mactx
;
457 void ppc_checkthreadstate(void *, int);
459 th_act
= current_act();
460 /* lets use the larger one */
461 mactx
= (void *)&mctx64
;
463 ut
= (struct uthread
*)get_bsdthread_info(th_act
);
464 if (error
= copyin(uap
->uctx
, &uctx
, sizeof(struct ucontext
))) {
467 if (error
= copyin(uctx
.uc_mcontext
, mactx
, uctx
.uc_mcsize
)) {
471 if (uctx
.uc_onstack
& 01)
472 p
->p_sigacts
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
474 p
->p_sigacts
->ps_sigstk
.ss_flags
&= ~SA_ONSTACK
;
476 ut
->uu_sigmask
= uctx
.uc_sigmask
& ~sigcantmask
;
477 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
478 signal_setast(current_act());
481 switch (uctx
.uc_mcsize
) {
482 case UC_FLAVOR64_VEC_SIZE
:
484 case UC_FLAVOR64_SIZE
: {
485 p_64mctx
= (struct mcontext64
*)mactx
;
486 tsptr
= (void *)&p_64mctx
->ss
;
487 fptr
= (void *)&p_64mctx
->fs
;
488 vptr
= (void *)&p_64mctx
->vs
;
489 state_flavor
= PPC_THREAD_STATE64
;
490 state_count
= PPC_THREAD_STATE64_COUNT
;
493 case UC_FLAVOR_VEC_SIZE
:
497 p_mctx
= (struct mcontext
*)mactx
;
498 tsptr
= (void *)&p_mctx
->ss
;
499 fptr
= (void *)&p_mctx
->fs
;
500 vptr
= (void *)&p_mctx
->vs
;
501 state_flavor
= PPC_THREAD_STATE
;
502 state_count
= PPC_THREAD_STATE_COUNT
;
507 /* validate the thread state, set/reset appropriate mode bits in srr1 */
508 (void)ppc_checkthreadstate(tsptr
, state_flavor
);
510 if (thread_setstatus(th_act
, state_flavor
, tsptr
, &state_count
) != KERN_SUCCESS
) {
514 state_count
= PPC_FLOAT_STATE_COUNT
;
515 if (thread_setstatus(th_act
, PPC_FLOAT_STATE
, fptr
, &state_count
) != KERN_SUCCESS
) {
519 mask
= sigmask(SIGFPE
);
520 if (((ut
->uu_sigmask
& mask
) == 0) && (p
->p_sigcatch
& mask
) && ((p
->p_sigignore
& mask
) == 0)) {
521 action
= ps
->ps_sigact
[SIGFPE
];
522 if((action
!= SIG_DFL
) && (action
!= SIG_IGN
)) {
523 thread_enable_fpe(th_act
, 1);
528 state_count
= PPC_VECTOR_STATE_COUNT
;
529 if (thread_setstatus(th_act
, PPC_VECTOR_STATE
, vptr
, &state_count
) != KERN_SUCCESS
) {
533 return (EJUSTRETURN
);
536 #endif /* FOR64_TRANSITION */
538 struct sigreturn_args
{
539 struct ucontext
*uctx
;
545 sigreturn(p
, uap
, retval
)
547 struct sigreturn_args
*uap
;
550 struct ucontext uctx
;
551 struct ucontext
*p_uctx
;
552 char mactx
[sizeof(struct mcontext64
)];
553 struct mcontext
*p_mctx
;
554 struct mcontext64
*p_64mctx
;
557 struct sigacts
*ps
= p
->p_sigacts
;
559 register sig_t action
;
560 unsigned long state_count
;
561 unsigned int state_flavor
;
564 void *tsptr
, *fptr
, *vptr
;
565 int infostyle
= uap
->infostyle
;
566 void ppc_checkthreadstate(void *, int);
568 th_act
= current_act();
570 ut
= (struct uthread
*)get_bsdthread_info(th_act
);
571 if (error
= copyin(uap
->uctx
, &uctx
, sizeof(struct ucontext
))) {
576 if (error
= copyin(uctx
.uc_mcontext
, mactx
, uctx
.uc_mcsize
)) {
580 if (uctx
.uc_onstack
& 01)
581 p
->p_sigacts
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
583 p
->p_sigacts
->ps_sigstk
.ss_flags
&= ~SA_ONSTACK
;
585 ut
->uu_sigmask
= uctx
.uc_sigmask
& ~sigcantmask
;
586 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
587 signal_setast(current_act());
591 case UC_FLAVOR64_VEC
:
596 p_64mctx
= (struct mcontext64
*)mactx
;
597 tsptr
= (void *)&p_64mctx
->ss
;
598 fptr
= (void *)&p_64mctx
->fs
;
599 vptr
= (void *)&p_64mctx
->vs
;
600 state_flavor
= PPC_THREAD_STATE64
;
601 state_count
= PPC_THREAD_STATE64_COUNT
;
610 p_mctx
= (struct mcontext
*)mactx
;
611 tsptr
= (void *)&p_mctx
->ss
;
612 fptr
= (void *)&p_mctx
->fs
;
613 vptr
= (void *)&p_mctx
->vs
;
614 state_flavor
= PPC_THREAD_STATE
;
615 state_count
= PPC_THREAD_STATE_COUNT
;
620 /* validate the thread state, set/reset appropriate mode bits in srr1 */
621 (void)ppc_checkthreadstate(tsptr
, state_flavor
);
623 if (thread_setstatus(th_act
, state_flavor
, tsptr
, &state_count
) != KERN_SUCCESS
) {
627 state_count
= PPC_FLOAT_STATE_COUNT
;
628 if (thread_setstatus(th_act
, PPC_FLOAT_STATE
, fptr
, &state_count
) != KERN_SUCCESS
) {
632 mask
= sigmask(SIGFPE
);
633 if (((ut
->uu_sigmask
& mask
) == 0) && (p
->p_sigcatch
& mask
) && ((p
->p_sigignore
& mask
) == 0)) {
634 action
= ps
->ps_sigact
[SIGFPE
];
635 if((action
!= SIG_DFL
) && (action
!= SIG_IGN
)) {
636 thread_enable_fpe(th_act
, 1);
641 state_count
= PPC_VECTOR_STATE_COUNT
;
642 if (thread_setstatus(th_act
, PPC_VECTOR_STATE
, vptr
, &state_count
) != KERN_SUCCESS
) {
646 return (EJUSTRETURN
);
650 * machine_exception() performs MD translation
651 * of a mach exception to a unix signal and code.
665 case EXC_BAD_INSTRUCTION
:
666 *unix_signal
= SIGILL
;
671 *unix_signal
= SIGFPE
;
676 if (code
== EXC_PPC_TRAP
) {
677 *unix_signal
= SIGTRAP
;