2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
32 #include <mach/mach_types.h>
33 #include <mach/exception_types.h>
35 #include <sys/param.h>
36 #include <sys/proc_internal.h>
38 #include <sys/ucontext.h>
39 #include <sys/sysproto.h>
40 #include <sys/systm.h>
41 #include <sys/ux_exception.h>
43 #include <ppc/signal.h>
44 #include <sys/signalvar.h>
45 #include <sys/kdebug.h>
47 #include <kern/thread.h>
48 #include <mach/ppc/thread_status.h>
49 #include <ppc/proc_reg.h>
53 // #include <machine/thread.h> XXX include path messed up for some reason...
55 /* XXX functions not in a Mach headers */
56 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
57 thread_state_t tstate
, mach_msg_type_number_t
*count
);
58 extern unsigned int get_msr_exportmask(void);
59 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
60 thread_state_t tstate
, mach_msg_type_number_t count
);
61 extern void ppc_checkthreadstate(void *, int);
62 extern struct savearea_vec
*find_user_vec_curr(void);
63 extern int thread_enable_fpe(thread_t act
, int onoff
);
67 #define C_32_REDZONE_LEN 224
68 #define C_32_STK_ALIGN 16
69 #define C_32_PARAMSAVE_LEN 64
70 #define C_32_LINKAGE_LEN 48
72 #define C_64_REDZONE_LEN 320
73 #define C_64_STK_ALIGN 32
74 #define C_64_PARAMSAVE_LEN 64
75 #define C_64_LINKAGE_LEN 48
77 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
78 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
81 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
85 * Traditional64with vec: 25
87 * 32bit context with vector 35
89 * 64bit context with vector 45
91 * Dual context with vector 55
98 #define UC_TRAD64_VEC 25
100 #define UC_FLAVOR_VEC 35
101 #define UC_FLAVOR64 40
102 #define UC_FLAVOR64_VEC 45
104 #define UC_DUAL_VEC 55
105 #define UC_SET_ALT_STACK 0x40000000
106 #define UC_RESET_ALT_STACK 0x80000000
108 /* The following are valid mcontext sizes */
109 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
111 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
113 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
115 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
119 * NOTE: Source and target may *NOT* overlap!
122 ucontext_32to64(struct ucontext64
*in
, struct user_ucontext64
*out
)
124 out
->uc_onstack
= in
->uc_onstack
;
125 out
->uc_sigmask
= in
->uc_sigmask
;
127 /* internal "structure assign" */
128 out
->uc_stack
.ss_sp
= CAST_USER_ADDR_T(in
->uc_stack
.ss_sp
);
129 out
->uc_stack
.ss_size
= in
->uc_stack
.ss_size
;
130 out
->uc_stack
.ss_flags
= in
->uc_stack
.ss_flags
;
132 out
->uc_link
= CAST_USER_ADDR_T(in
->uc_link
);
133 out
->uc_mcsize
= in
->uc_mcsize
;
134 out
->uc_mcontext64
= CAST_USER_ADDR_T(in
->uc_mcontext64
);
138 * This conversion is safe, since if we are converting for a 32 bit process,
139 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
141 * NOTE: Source and target may *NOT* overlap!
144 ucontext_64to32(struct user_ucontext64
*in
, struct ucontext64
*out
)
146 out
->uc_onstack
= in
->uc_onstack
;
147 out
->uc_sigmask
= in
->uc_sigmask
;
149 /* internal "structure assign" */
150 out
->uc_stack
.ss_sp
= CAST_DOWN(void *,in
->uc_stack
.ss_sp
);
151 out
->uc_stack
.ss_size
= in
->uc_stack
.ss_size
; /* range reduction */
152 out
->uc_stack
.ss_flags
= in
->uc_stack
.ss_flags
;
154 out
->uc_link
= CAST_DOWN(void *,in
->uc_link
);
155 out
->uc_mcsize
= in
->uc_mcsize
; /* range reduction */
156 out
->uc_mcontext64
= CAST_DOWN(void *,in
->uc_mcontext64
);
160 * NOTE: Source and target may *NOT* overlap!
163 siginfo_user_to_user32(user_siginfo_t
*in
, user32_siginfo_t
*out
)
165 out
->si_signo
= in
->si_signo
;
166 out
->si_errno
= in
->si_errno
;
167 out
->si_code
= in
->si_code
;
168 out
->si_pid
= in
->si_pid
;
169 out
->si_uid
= in
->si_uid
;
170 out
->si_status
= in
->si_status
;
171 out
->si_addr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_addr
);
172 /* following cast works for sival_int because of padding */
173 out
->si_value
.sival_ptr
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_value
.sival_ptr
);
174 out
->si_band
= in
->si_band
; /* range reduction */
175 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
179 siginfo_user_to_user64(user_siginfo_t
*in
, user64_siginfo_t
*out
)
181 out
->si_signo
= in
->si_signo
;
182 out
->si_errno
= in
->si_errno
;
183 out
->si_code
= in
->si_code
;
184 out
->si_pid
= in
->si_pid
;
185 out
->si_uid
= in
->si_uid
;
186 out
->si_status
= in
->si_status
;
187 out
->si_addr
= in
->si_addr
;
188 out
->si_value
.sival_ptr
= in
->si_value
.sival_ptr
;
189 out
->si_band
= in
->si_band
; /* range reduction */
190 out
->__pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
195 * Arrange for this process to run a signal handler
199 sendsig(struct proc
*p
, user_addr_t catcher
, int sig
, int mask
, __unused
uint32_t code
)
202 struct mcontext mctx
;
203 user_addr_t p_mctx
= USER_ADDR_NULL
; /* mcontext dest. */
204 struct mcontext64 mctx64
;
205 user_addr_t p_mctx64
= USER_ADDR_NULL
; /* mcontext dest. */
206 struct user_ucontext64 uctx
;
207 user_addr_t p_uctx
; /* user stack addr top copy ucontext */
208 user_siginfo_t sinfo
;
209 user_addr_t p_sinfo
; /* user stack addr top copy siginfo */
210 struct sigacts
*ps
= p
->p_sigacts
;
213 mach_msg_type_number_t state_count
;
216 int infostyle
= UC_TRAD
;
218 user_addr_t trampact
;
225 th_act
= current_thread();
226 ut
= get_bsdthread_info(th_act
);
229 * XXX We conditionalize type passed here based on SA_SIGINFO, but
230 * XXX we always send up all the information, regardless; perhaps
231 * XXX this should not be conditionalized? Defer making this change
232 * XXX now, due to possible tools impact.
234 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
)) {
236 * If SA_SIGINFO is set, then we must provide the user
237 * process both a siginfo_t and a context argument. We call
238 * this "FLAVORED", as opposed to "TRADITIONAL", which doesn't
239 * expect a context. "DUAL" is a type of "FLAVORED".
241 if (is_64signalregset()) {
243 * If this is a 64 bit CPU, we must include a 64 bit
244 * context in the data we pass to user space; we may
245 * or may not also include a 32 bit context at the
246 * same time, for non-leaf functions.
248 * The user may also explicitly choose to not receive
249 * a 32 bit context, at their option; we only allow
250 * this to happen on 64 bit processors, for obvious
253 if (IS_64BIT_PROCESS(p
) ||
254 (p
->p_sigacts
->ps_64regset
& sigmask(sig
))) {
256 * For a 64 bit process, there is no 32 bit
260 infostyle
= UC_FLAVOR64
;
263 * For a 32 bit process on a 64 bit CPU, we
264 * may have 64 bit leaf functions, so we need
272 * If this is a 32 bit CPU, then we only have a 32 bit
273 * context to contend with.
275 infostyle
= UC_FLAVOR
;
279 * If SA_SIGINFO is not set, then we have a traditional style
280 * call which does not need additional context passed. The
281 * default is 32 bit traditional.
283 * XXX The second check is redundant on PPC32; keep it anyway.
285 if (is_64signalregset() || IS_64BIT_PROCESS(p
)) {
287 * However, if this is a 64 bit CPU, we need to change
288 * this to 64 bit traditional, and drop the 32 bit
292 infostyle
= UC_TRAD64
;
298 /* I need this for SIGINFO anyway */
299 flavor
= PPC_THREAD_STATE
;
300 tstate
= (void *)&mctx
.ss
;
301 state_count
= PPC_THREAD_STATE_COUNT
;
302 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
305 if ((ctx32
== 0) || dualcontext
) {
306 flavor
= PPC_THREAD_STATE64
;
307 tstate
= (void *)&mctx64
.ss
;
308 state_count
= PPC_THREAD_STATE64_COUNT
;
309 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
313 if ((ctx32
== 1) || dualcontext
) {
314 flavor
= PPC_EXCEPTION_STATE
;
315 tstate
= (void *)&mctx
.es
;
316 state_count
= PPC_EXCEPTION_STATE_COUNT
;
317 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
321 if ((ctx32
== 0) || dualcontext
) {
322 flavor
= PPC_EXCEPTION_STATE64
;
323 tstate
= (void *)&mctx64
.es
;
324 state_count
= PPC_EXCEPTION_STATE64_COUNT
;
326 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
332 if ((ctx32
== 1) || dualcontext
) {
333 flavor
= PPC_FLOAT_STATE
;
334 tstate
= (void *)&mctx
.fs
;
335 state_count
= PPC_FLOAT_STATE_COUNT
;
336 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
340 if ((ctx32
== 0) || dualcontext
) {
341 flavor
= PPC_FLOAT_STATE
;
342 tstate
= (void *)&mctx64
.fs
;
343 state_count
= PPC_FLOAT_STATE_COUNT
;
344 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
350 if (find_user_vec_curr()) {
353 if ((ctx32
== 1) || dualcontext
) {
354 flavor
= PPC_VECTOR_STATE
;
355 tstate
= (void *)&mctx
.vs
;
356 state_count
= PPC_VECTOR_STATE_COUNT
;
357 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
362 if ((ctx32
== 0) || dualcontext
) {
363 flavor
= PPC_VECTOR_STATE
;
364 tstate
= (void *)&mctx64
.vs
;
365 state_count
= PPC_VECTOR_STATE_COUNT
;
366 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
372 trampact
= ps
->ps_trampact
[sig
];
373 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
375 /* figure out where our new stack lives */
376 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
377 (ps
->ps_sigonstack
& sigmask(sig
))) {
378 sp
= ut
->uu_sigstk
.ss_sp
;
379 sp
+= ut
->uu_sigstk
.ss_size
;
380 stack_size
= ut
->uu_sigstk
.ss_size
;
381 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
387 sp
= CAST_USER_ADDR_T(mctx
.ss
.r1
);
391 /* put siginfo on top */
393 /* preserve RED ZONE area */
394 if (IS_64BIT_PROCESS(p
))
395 sp
= TRUNC_DOWN64(sp
, C_64_REDZONE_LEN
, C_64_STK_ALIGN
);
397 sp
= TRUNC_DOWN32(sp
, C_32_REDZONE_LEN
, C_32_STK_ALIGN
);
399 /* next are the saved registers */
400 if ((ctx32
== 0) || dualcontext
) {
401 sp
-= sizeof(struct mcontext64
);
404 if ((ctx32
== 1) || dualcontext
) {
405 sp
-= sizeof(struct mcontext
);
409 if (IS_64BIT_PROCESS(p
)) {
410 /* context goes first on stack */
411 sp
-= sizeof(struct user_ucontext64
);
414 /* this is where siginfo goes on stack */
415 sp
-= sizeof(user64_siginfo_t
);
418 sp
= TRUNC_DOWN64(sp
, C_64_PARAMSAVE_LEN
+C_64_LINKAGE_LEN
, C_64_STK_ALIGN
);
421 * struct ucontext and struct ucontext64 are identical in
422 * size and content; the only difference is the internal
423 * pointer type for the last element, which makes no
424 * difference for the copyout().
427 /* context goes first on stack */
428 sp
-= sizeof(struct ucontext64
);
431 /* this is where siginfo goes on stack */
432 sp
-= sizeof(user32_siginfo_t
);
435 sp
= TRUNC_DOWN32(sp
, C_32_PARAMSAVE_LEN
+C_32_LINKAGE_LEN
, C_32_STK_ALIGN
);
438 uctx
.uc_onstack
= oonstack
;
439 uctx
.uc_sigmask
= mask
;
440 uctx
.uc_stack
.ss_sp
= sp
;
441 uctx
.uc_stack
.ss_size
= stack_size
;
443 uctx
.uc_stack
.ss_flags
|= SS_ONSTACK
;
447 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE64_COUNT
+ PPC_THREAD_STATE64_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
449 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE_COUNT
+ PPC_THREAD_STATE_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
452 uctx
.uc_mcsize
+= (size_t)(PPC_VECTOR_STATE_COUNT
* sizeof(int));
455 uctx
.uc_mcontext64
= p_mctx64
;
457 uctx
.uc_mcontext64
= p_mctx
;
460 bzero((caddr_t
)&sinfo
, sizeof(sinfo
));
461 sinfo
.si_signo
= sig
;
463 sinfo
.si_addr
= mctx64
.ss
.srr0
;
464 sinfo
.pad
[0] = mctx64
.ss
.r1
;
466 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.ss
.srr0
);
467 sinfo
.pad
[0] = CAST_USER_ADDR_T(mctx
.ss
.r1
);
473 * If it's 64 bit and not a dual context, mctx will
474 * contain uninitialized data, so we have to use
478 if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
)))
479 sinfo
.si_code
= ILL_ILLOPC
;
480 else if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
)))
481 sinfo
.si_code
= ILL_PRVOPC
;
482 else if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
)))
483 sinfo
.si_code
= ILL_ILLTRP
;
485 sinfo
.si_code
= ILL_NOOP
;
487 if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
)))
488 sinfo
.si_code
= ILL_ILLOPC
;
489 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
)))
490 sinfo
.si_code
= ILL_PRVOPC
;
491 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
)))
492 sinfo
.si_code
= ILL_ILLTRP
;
494 sinfo
.si_code
= ILL_NOOP
;
504 * If it's 64 bit and not a dual context, mctx will
505 * contain uninitialized data, so we have to use
509 if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_VX
)))
510 sinfo
.si_code
= FPE_FLTINV
;
511 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_OX
)))
512 sinfo
.si_code
= FPE_FLTOVF
;
513 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_UX
)))
514 sinfo
.si_code
= FPE_FLTUND
;
515 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_ZX
)))
516 sinfo
.si_code
= FPE_FLTDIV
;
517 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_XX
)))
518 sinfo
.si_code
= FPE_FLTRES
;
520 sinfo
.si_code
= FPE_NOOP
;
522 if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_VX
)))
523 sinfo
.si_code
= FPE_FLTINV
;
524 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_OX
)))
525 sinfo
.si_code
= FPE_FLTOVF
;
526 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_UX
)))
527 sinfo
.si_code
= FPE_FLTUND
;
528 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_ZX
)))
529 sinfo
.si_code
= FPE_FLTDIV
;
530 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_XX
)))
531 sinfo
.si_code
= FPE_FLTRES
;
533 sinfo
.si_code
= FPE_NOOP
;
539 sinfo
.si_addr
= mctx64
.es
.dar
;
541 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.es
.dar
);
543 /* on ppc we generate only if EXC_PPC_UNALIGNED */
544 sinfo
.si_code
= BUS_ADRALN
;
549 * If it's 64 bit and not a dual context, mctx will
550 * contain uninitialized data, so we have to use
554 sinfo
.si_addr
= mctx64
.es
.dar
;
555 /* First check in srr1 and then in dsisr */
556 if (mctx64
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
)))
557 sinfo
.si_code
= SEGV_ACCERR
;
558 else if (mctx64
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
)))
559 sinfo
.si_code
= SEGV_ACCERR
;
561 sinfo
.si_code
= SEGV_MAPERR
;
563 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.es
.dar
);
564 /* First check in srr1 and then in dsisr */
565 if (mctx
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
)))
566 sinfo
.si_code
= SEGV_ACCERR
;
567 else if (mctx
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
)))
568 sinfo
.si_code
= SEGV_ACCERR
;
570 sinfo
.si_code
= SEGV_MAPERR
;
575 int status_and_exitcode
;
578 * All other signals need to fill out a minimum set of
579 * information for the siginfo structure passed into
580 * the signal handler, if SA_SIGINFO was specified.
582 * p->si_status actually contains both the status and
583 * the exit code; we save it off in its own variable
584 * for later breakdown.
587 sinfo
.si_pid
= p
->si_pid
;
589 status_and_exitcode
= p
->si_status
;
591 sinfo
.si_uid
= p
->si_uid
;
593 sinfo
.si_code
= p
->si_code
;
596 if (sinfo
.si_code
== CLD_EXITED
) {
597 if (WIFEXITED(status_and_exitcode
))
598 sinfo
.si_code
= CLD_EXITED
;
599 else if (WIFSIGNALED(status_and_exitcode
)) {
600 if (WCOREDUMP(status_and_exitcode
)) {
601 sinfo
.si_code
= CLD_DUMPED
;
602 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
604 sinfo
.si_code
= CLD_KILLED
;
605 status_and_exitcode
= W_EXITCODE(status_and_exitcode
,status_and_exitcode
);
610 * The recorded status contains the exit code and the
611 * signal information, but the information to be passed
612 * in the siginfo to the handler is supposed to only
613 * contain the status, so we have to shift it out.
615 sinfo
.si_status
= WEXITSTATUS(status_and_exitcode
);
621 /* copy info out to user space */
622 if (IS_64BIT_PROCESS(p
)) {
623 user64_siginfo_t sinfo64
;
625 siginfo_user_to_user64(&sinfo
,&sinfo64
);
628 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
630 ut
->t_dtrace_siginfo
.si_signo
= sinfo
.si_signo
;
631 ut
->t_dtrace_siginfo
.si_code
= sinfo
.si_code
;
632 ut
->t_dtrace_siginfo
.si_pid
= sinfo
.si_pid
;
633 ut
->t_dtrace_siginfo
.si_uid
= sinfo
.si_uid
;
634 ut
->t_dtrace_siginfo
.si_status
= sinfo
.si_status
;
635 /* XXX truncates faulting address to void * on K32 */
636 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo
.si_addr
);
639 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
641 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
642 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
648 /* XXX truncates catcher address to uintptr_t */
649 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
650 void (*)(void), CAST_DOWN(sig_t
, catcher
));
651 #endif /* CONFIG_DTRACE */
653 if (copyout(&uctx
, p_uctx
, sizeof(struct user_ucontext64
)))
655 if (copyout(&sinfo64
, p_sinfo
, sizeof(sinfo64
)))
658 struct ucontext64 uctx32
;
659 user32_siginfo_t sinfo32
;
661 ucontext_64to32(&uctx
, &uctx32
);
662 siginfo_user_to_user32(&sinfo
,&sinfo32
);
665 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
667 ut
->t_dtrace_siginfo
.si_signo
= sinfo
.si_signo
;
668 ut
->t_dtrace_siginfo
.si_code
= sinfo
.si_code
;
669 ut
->t_dtrace_siginfo
.si_pid
= sinfo
.si_pid
;
670 ut
->t_dtrace_siginfo
.si_uid
= sinfo
.si_uid
;
671 ut
->t_dtrace_siginfo
.si_status
= sinfo
.si_status
;
672 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN(void *, sinfo
.si_addr
);
675 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
677 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
678 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
684 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
685 void (*)(void), CAST_DOWN(sig_t
, catcher
));
686 #endif /* CONFIG_DTRACE */
688 if (copyout(&uctx32
, p_uctx
, sizeof(struct ucontext64
)))
691 if (copyout(&sinfo32
, p_sinfo
, sizeof(sinfo32
)))
694 if ((ctx32
== 0) || dualcontext
) {
696 * NOTE: Size of mcontext is not variant between 64bit and
697 * 32bit programs usng 64bit registers.
699 if (copyout(&mctx64
, p_mctx64
, (vec_used
? UC_FLAVOR64_VEC_SIZE
: UC_FLAVOR64_SIZE
)))
702 if ((ctx32
== 1) || dualcontext
) {
703 if (copyout(&mctx
, p_mctx
, uctx
.uc_mcsize
))
708 /* Place our arguments in arg registers: rtm dependent */
709 if(IS_64BIT_PROCESS(p
)) {
710 mctx64
.ss
.r3
= catcher
;
711 mctx64
.ss
.r4
= CAST_USER_ADDR_T(infostyle
);
712 mctx64
.ss
.r5
= CAST_USER_ADDR_T(sig
);
713 mctx64
.ss
.r6
= p_sinfo
;
714 mctx64
.ss
.r7
= p_uctx
;
716 mctx64
.ss
.srr0
= trampact
;
717 /* MSR_EXPORT_MASK_SET */
718 mctx64
.ss
.srr1
= CAST_USER_ADDR_T(get_msr_exportmask());
720 state_count
= PPC_THREAD_STATE64_COUNT
;
721 if ((kretn
= thread_setstatus(th_act
, PPC_THREAD_STATE64
, (void *)&mctx64
.ss
, state_count
)) != KERN_SUCCESS
) {
722 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn
);
725 mctx
.ss
.r3
= CAST_DOWN(uint32_t,catcher
);
726 mctx
.ss
.r4
= (uint32_t)infostyle
;
727 mctx
.ss
.r5
= (uint32_t)sig
;
728 mctx
.ss
.r6
= CAST_DOWN(uint32_t,p_sinfo
);
729 mctx
.ss
.r7
= CAST_DOWN(uint32_t,p_uctx
);
731 mctx
.ss
.srr0
= CAST_DOWN(uint32_t,trampact
);
732 /* MSR_EXPORT_MASK_SET */
733 mctx
.ss
.srr1
= get_msr_exportmask();
734 mctx
.ss
.r1
= CAST_DOWN(uint32_t,sp
);
735 state_count
= PPC_THREAD_STATE_COUNT
;
736 if ((kretn
= thread_setstatus(th_act
, PPC_THREAD_STATE
, (void *)&mctx
.ss
, state_count
)) != KERN_SUCCESS
) {
737 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn
);
746 SIGACTION(p
, SIGILL
) = SIG_DFL
;
747 sig
= sigmask(SIGILL
);
748 p
->p_sigignore
&= ~sig
;
749 p
->p_sigcatch
&= ~sig
;
750 ut
->uu_sigmask
&= ~sig
;
751 /* sendsig is called with signal lock held */
753 psignal_locked(p
, SIGILL
);
759 * System call to cleanup state after a signal
760 * has been taken. Reset signal mask and
761 * stack state from context left by sendsig (above).
762 * Return to previous pc and psl as specified by
763 * context left by sendsig. Check carefully to
764 * make sure that the user has not modified the
765 * psl to gain improper priviledges or to cause
771 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
773 struct user_ucontext64 uctx
;
775 char mactx
[sizeof(struct mcontext64
)];
776 struct mcontext
*p_mctx
;
777 struct mcontext64
*p_64mctx
;
780 struct sigacts
*ps
= p
->p_sigacts
;
783 uint32_t state_count
;
784 unsigned int state_flavor
;
787 void *tsptr
, *fptr
, *vptr
;
788 int infostyle
= uap
->infostyle
;
790 th_act
= current_thread();
792 ut
= (struct uthread
*)get_bsdthread_info(th_act
);
795 * If we are being asked to change the altstack flag on the thread, we
796 * just rest it and return (the uap->uctx is not used).
798 if (infostyle
== UC_SET_ALT_STACK
) {
799 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
801 } else if ((unsigned int)infostyle
== UC_RESET_ALT_STACK
) {
802 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
806 if (IS_64BIT_PROCESS(p
)) {
807 error
= copyin(uap
->uctx
, &uctx
, sizeof(struct user_ucontext64
));
811 struct ucontext64 uctx32
;
814 * struct ucontext and struct ucontext64 are identical in
815 * size and content; the only difference is the internal
816 * pointer type for the last element, which makes no
817 * difference for the copyin().
819 error
= copyin(uap
->uctx
, &uctx32
, sizeof(struct ucontext
));
822 ucontext_32to64(&uctx32
, &uctx
);
826 /* validate the machine context size */
827 switch (uctx
.uc_mcsize
) {
828 case UC_FLAVOR64_VEC_SIZE
:
829 case UC_FLAVOR64_SIZE
:
830 case UC_FLAVOR_VEC_SIZE
:
838 * The 64 bit process mcontext is identical to the mcontext64, so
839 * there is no conversion necessary.
841 error
= copyin(uctx
.uc_mcontext64
, mactx
, uctx
.uc_mcsize
);
845 if ((uctx
.uc_onstack
& 01))
846 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
848 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
850 ut
->uu_sigmask
= uctx
.uc_sigmask
& ~sigcantmask
;
851 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
852 signal_setast(current_thread());
856 case UC_FLAVOR64_VEC
:
861 p_64mctx
= (struct mcontext64
*)mactx
;
862 tsptr
= (void *)&p_64mctx
->ss
;
863 fptr
= (void *)&p_64mctx
->fs
;
864 vptr
= (void *)&p_64mctx
->vs
;
865 state_flavor
= PPC_THREAD_STATE64
;
866 state_count
= PPC_THREAD_STATE64_COUNT
;
875 p_mctx
= (struct mcontext
*)mactx
;
876 tsptr
= (void *)&p_mctx
->ss
;
877 fptr
= (void *)&p_mctx
->fs
;
878 vptr
= (void *)&p_mctx
->vs
;
879 state_flavor
= PPC_THREAD_STATE
;
880 state_count
= PPC_THREAD_STATE_COUNT
;
885 /* validate the thread state, set/reset appropriate mode bits in srr1 */
886 (void)ppc_checkthreadstate(tsptr
, state_flavor
);
888 if (thread_setstatus(th_act
, state_flavor
, tsptr
, state_count
) != KERN_SUCCESS
) {
892 state_count
= PPC_FLOAT_STATE_COUNT
;
893 if (thread_setstatus(th_act
, PPC_FLOAT_STATE
, fptr
, state_count
) != KERN_SUCCESS
) {
897 mask
= sigmask(SIGFPE
);
898 if (((ut
->uu_sigmask
& mask
) == 0) && (p
->p_sigcatch
& mask
) && ((p
->p_sigignore
& mask
) == 0)) {
899 action
= ps
->ps_sigact
[SIGFPE
];
900 if((action
!= SIG_DFL
) && (action
!= SIG_IGN
)) {
901 thread_enable_fpe(th_act
, 1);
906 state_count
= PPC_VECTOR_STATE_COUNT
;
907 if (thread_setstatus(th_act
, PPC_VECTOR_STATE
, vptr
, state_count
) != KERN_SUCCESS
) {
911 return (EJUSTRETURN
);
915 * machine_exception() performs MD translation
916 * of a mach exception to a unix signal and code.
922 mach_exception_code_t code
,
923 __unused mach_exception_subcode_t subcode
,
925 mach_exception_code_t
*unix_code
)
929 case EXC_BAD_INSTRUCTION
:
930 *unix_signal
= SIGILL
;
935 *unix_signal
= SIGFPE
;
940 if (code
== EXC_PPC_TRAP
) {
941 *unix_signal
= SIGTRAP
;