2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
26 #include <mach/mach_types.h>
27 #include <mach/exception_types.h>
29 #include <sys/param.h>
30 #include <sys/proc_internal.h>
32 #include <sys/ucontext.h>
33 #include <sys/sysproto.h>
34 #include <sys/systm.h>
35 #include <sys/ux_exception.h>
37 #include <ppc/signal.h>
38 #include <sys/signalvar.h>
39 #include <sys/kdebug.h>
41 #include <kern/thread.h>
42 #include <mach/ppc/thread_status.h>
43 #include <ppc/proc_reg.h>
45 // #include <machine/thread.h> XXX include path messed up for some reason...
47 /* XXX functions not in a Mach headers */
48 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
49 thread_state_t tstate
, mach_msg_type_number_t
*count
);
50 extern int is_64signalregset(void);
51 extern unsigned int get_msr_exportmask(void);
52 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
53 thread_state_t tstate
, mach_msg_type_number_t count
);
54 extern void ppc_checkthreadstate(void *, int);
55 extern struct savearea_vec
*find_user_vec_curr(void);
56 extern int thread_enable_fpe(thread_t act
, int onoff
);
60 #define C_32_REDZONE_LEN 224
61 #define C_32_STK_ALIGN 16
62 #define C_32_PARAMSAVE_LEN 64
63 #define C_32_LINKAGE_LEN 48
65 #define C_64_REDZONE_LEN 320
66 #define C_64_STK_ALIGN 32
67 #define C_64_PARAMSAVE_LEN 64
68 #define C_64_LINKAGE_LEN 48
70 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
71 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
74 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
78 * Traditional64with vec: 25
80 * 32bit context with vector 35
82 * 64bit context with vector 45
84 * Dual context with vector 55
91 #define UC_TRAD64_VEC 25
93 #define UC_FLAVOR_VEC 35
94 #define UC_FLAVOR64 40
95 #define UC_FLAVOR64_VEC 45
97 #define UC_DUAL_VEC 55
99 /* The following are valid mcontext sizes */
100 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
102 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
104 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
106 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
110 * NOTE: Source and target may *NOT* overlap!
113 ucontext_32to64(struct ucontext64
*in
, struct user_ucontext64
*out
)
115 out
->uc_onstack
= in
->uc_onstack
;
116 out
->uc_sigmask
= in
->uc_sigmask
;
118 /* internal "structure assign" */
119 out
->uc_stack
.ss_sp
= CAST_USER_ADDR_T(in
->uc_stack
.ss_sp
);
120 out
->uc_stack
.ss_size
= in
->uc_stack
.ss_size
;
121 out
->uc_stack
.ss_flags
= in
->uc_stack
.ss_flags
;
123 out
->uc_link
= CAST_USER_ADDR_T(in
->uc_link
);
124 out
->uc_mcsize
= in
->uc_mcsize
;
125 out
->uc_mcontext64
= CAST_USER_ADDR_T(in
->uc_mcontext64
);
129 * This conversion is safe, since if we are converting for a 32 bit process,
130 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
132 * NOTE: Source and target may *NOT* overlap!
135 ucontext_64to32(struct user_ucontext64
*in
, struct ucontext64
*out
)
137 out
->uc_onstack
= in
->uc_onstack
;
138 out
->uc_sigmask
= in
->uc_sigmask
;
140 /* internal "structure assign" */
141 out
->uc_stack
.ss_sp
= CAST_DOWN(void *,in
->uc_stack
.ss_sp
);
142 out
->uc_stack
.ss_size
= in
->uc_stack
.ss_size
; /* range reduction */
143 out
->uc_stack
.ss_flags
= in
->uc_stack
.ss_flags
;
145 out
->uc_link
= CAST_DOWN(void *,in
->uc_link
);
146 out
->uc_mcsize
= in
->uc_mcsize
; /* range reduction */
147 out
->uc_mcontext64
= CAST_DOWN(void *,in
->uc_mcontext64
);
151 * NOTE: Source and target may *NOT* overlap!
154 siginfo_64to32(user_siginfo_t
*in
, siginfo_t
*out
)
156 out
->si_signo
= in
->si_signo
;
157 out
->si_errno
= in
->si_errno
;
158 out
->si_code
= in
->si_code
;
159 out
->si_pid
= in
->si_pid
;
160 out
->si_uid
= in
->si_uid
;
161 out
->si_status
= in
->si_status
;
162 out
->si_addr
= CAST_DOWN(void *,in
->si_addr
);
163 /* following cast works for sival_int because of padding */
164 out
->si_value
.sival_ptr
= CAST_DOWN(void *,in
->si_value
.sival_ptr
);
165 out
->si_band
= in
->si_band
; /* range reduction */
166 out
->pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
171 * Arrange for this process to run a signal handler
175 sendsig(struct proc
*p
, user_addr_t catcher
, int sig
, int mask
, __unused u_long code
)
178 struct mcontext mctx
;
179 user_addr_t p_mctx
= USER_ADDR_NULL
; /* mcontext dest. */
180 struct mcontext64 mctx64
;
181 user_addr_t p_mctx64
= USER_ADDR_NULL
; /* mcontext dest. */
182 struct user_ucontext64 uctx
;
183 user_addr_t p_uctx
; /* user stack addr top copy ucontext */
184 user_siginfo_t sinfo
;
185 user_addr_t p_sinfo
; /* user stack addr top copy siginfo */
186 struct sigacts
*ps
= p
->p_sigacts
;
189 mach_msg_type_number_t state_count
;
192 int infostyle
= UC_TRAD
;
194 user_addr_t trampact
;
201 th_act
= current_thread();
202 ut
= get_bsdthread_info(th_act
);
205 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
)) {
206 infostyle
= UC_FLAVOR
;
208 if(is_64signalregset() && (infostyle
== UC_FLAVOR
)) {
212 if (p
->p_sigacts
->ps_64regset
& sigmask(sig
)) {
215 infostyle
= UC_FLAVOR64
;
217 /* treat 64 bit processes as having used 64 bit registers */
218 if ((IS_64BIT_PROCESS(p
) || is_64signalregset()) &&
219 (infostyle
== UC_TRAD
)) {
221 infostyle
= UC_TRAD64
;
223 if (IS_64BIT_PROCESS(p
)) {
228 /* I need this for SIGINFO anyway */
229 flavor
= PPC_THREAD_STATE
;
230 tstate
= (void *)&mctx
.ss
;
231 state_count
= PPC_THREAD_STATE_COUNT
;
232 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
235 if ((ctx32
== 0) || dualcontext
) {
236 flavor
= PPC_THREAD_STATE64
;
237 tstate
= (void *)&mctx64
.ss
;
238 state_count
= PPC_THREAD_STATE64_COUNT
;
239 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
243 if ((ctx32
== 1) || dualcontext
) {
244 flavor
= PPC_EXCEPTION_STATE
;
245 tstate
= (void *)&mctx
.es
;
246 state_count
= PPC_EXCEPTION_STATE_COUNT
;
247 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
251 if ((ctx32
== 0) || dualcontext
) {
252 flavor
= PPC_EXCEPTION_STATE64
;
253 tstate
= (void *)&mctx64
.es
;
254 state_count
= PPC_EXCEPTION_STATE64_COUNT
;
256 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
262 if ((ctx32
== 1) || dualcontext
) {
263 flavor
= PPC_FLOAT_STATE
;
264 tstate
= (void *)&mctx
.fs
;
265 state_count
= PPC_FLOAT_STATE_COUNT
;
266 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
270 if ((ctx32
== 0) || dualcontext
) {
271 flavor
= PPC_FLOAT_STATE
;
272 tstate
= (void *)&mctx64
.fs
;
273 state_count
= PPC_FLOAT_STATE_COUNT
;
274 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
280 if (find_user_vec_curr()) {
283 if ((ctx32
== 1) || dualcontext
) {
284 flavor
= PPC_VECTOR_STATE
;
285 tstate
= (void *)&mctx
.vs
;
286 state_count
= PPC_VECTOR_STATE_COUNT
;
287 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
292 if ((ctx32
== 0) || dualcontext
) {
293 flavor
= PPC_VECTOR_STATE
;
294 tstate
= (void *)&mctx64
.vs
;
295 state_count
= PPC_VECTOR_STATE_COUNT
;
296 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
302 trampact
= ps
->ps_trampact
[sig
];
303 oonstack
= ps
->ps_sigstk
.ss_flags
& SA_ONSTACK
;
305 /* figure out where our new stack lives */
306 if ((ps
->ps_flags
& SAS_ALTSTACK
) && !oonstack
&&
307 (ps
->ps_sigonstack
& sigmask(sig
))) {
308 sp
= ps
->ps_sigstk
.ss_sp
;
309 sp
+= ps
->ps_sigstk
.ss_size
;
310 stack_size
= ps
->ps_sigstk
.ss_size
;
311 ps
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
317 sp
= CAST_USER_ADDR_T(mctx
.ss
.r1
);
321 /* put siginfo on top */
323 /* preserve RED ZONE area */
324 if (IS_64BIT_PROCESS(p
))
325 sp
= TRUNC_DOWN64(sp
, C_64_REDZONE_LEN
, C_64_STK_ALIGN
);
327 sp
= TRUNC_DOWN32(sp
, C_32_REDZONE_LEN
, C_32_STK_ALIGN
);
329 /* next are the saved registers */
330 if ((ctx32
== 0) || dualcontext
) {
331 sp
-= sizeof(struct mcontext64
);
334 if ((ctx32
== 1) || dualcontext
) {
335 sp
-= sizeof(struct mcontext
);
339 if (IS_64BIT_PROCESS(p
)) {
340 /* context goes first on stack */
341 sp
-= sizeof(struct user_ucontext64
);
344 /* this is where siginfo goes on stack */
345 sp
-= sizeof(user_siginfo_t
);
348 sp
= TRUNC_DOWN64(sp
, C_64_PARAMSAVE_LEN
+C_64_LINKAGE_LEN
, C_64_STK_ALIGN
);
351 * struct ucontext and struct ucontext64 are identical in
352 * size and content; the only difference is the internal
353 * pointer type for the last element, which makes no
354 * difference for the copyout().
357 /* context goes first on stack */
358 sp
-= sizeof(struct ucontext64
);
361 /* this is where siginfo goes on stack */
362 sp
-= sizeof(siginfo_t
);
365 sp
= TRUNC_DOWN32(sp
, C_32_PARAMSAVE_LEN
+C_32_LINKAGE_LEN
, C_32_STK_ALIGN
);
368 uctx
.uc_onstack
= oonstack
;
369 uctx
.uc_sigmask
= mask
;
370 uctx
.uc_stack
.ss_sp
= sp
;
371 uctx
.uc_stack
.ss_size
= stack_size
;
373 uctx
.uc_stack
.ss_flags
|= SS_ONSTACK
;
377 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE64_COUNT
+ PPC_THREAD_STATE64_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
379 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE_COUNT
+ PPC_THREAD_STATE_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
382 uctx
.uc_mcsize
+= (size_t)(PPC_VECTOR_STATE_COUNT
* sizeof(int));
385 uctx
.uc_mcontext64
= p_mctx64
;
387 uctx
.uc_mcontext64
= p_mctx
;
390 bzero((caddr_t
)&sinfo
, sizeof(user_siginfo_t
));
391 sinfo
.si_signo
= sig
;
393 sinfo
.si_addr
= mctx64
.ss
.srr0
;
394 sinfo
.pad
[0] = mctx64
.ss
.r1
;
396 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.ss
.srr0
);
397 sinfo
.pad
[0] = CAST_USER_ADDR_T(mctx
.ss
.r1
);
402 sinfo
.si_pid
= p
->si_pid
;
404 sinfo
.si_status
= p
->si_status
;
406 sinfo
.si_uid
= p
->si_uid
;
408 sinfo
.si_code
= p
->si_code
;
410 if (sinfo
.si_code
== CLD_EXITED
) {
411 if (WIFEXITED(sinfo
.si_status
))
412 sinfo
.si_code
= CLD_EXITED
;
413 else if (WIFSIGNALED(sinfo
.si_status
)) {
414 if (WCOREDUMP(sinfo
.si_status
))
415 sinfo
.si_code
= CLD_DUMPED
;
417 sinfo
.si_code
= CLD_KILLED
;
423 * If it's 64 bit and not a dual context, mctx will
424 * contain uninitialized data, so we have to use
428 if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
)))
429 sinfo
.si_code
= ILL_ILLOPC
;
430 else if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
)))
431 sinfo
.si_code
= ILL_PRVOPC
;
432 else if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
)))
433 sinfo
.si_code
= ILL_ILLTRP
;
435 sinfo
.si_code
= ILL_NOOP
;
437 if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
)))
438 sinfo
.si_code
= ILL_ILLOPC
;
439 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
)))
440 sinfo
.si_code
= ILL_PRVOPC
;
441 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
)))
442 sinfo
.si_code
= ILL_ILLTRP
;
444 sinfo
.si_code
= ILL_NOOP
;
454 * If it's 64 bit and not a dual context, mctx will
455 * contain uninitialized data, so we have to use
459 if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_VX
)))
460 sinfo
.si_code
= FPE_FLTINV
;
461 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_OX
)))
462 sinfo
.si_code
= FPE_FLTOVF
;
463 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_UX
)))
464 sinfo
.si_code
= FPE_FLTUND
;
465 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_ZX
)))
466 sinfo
.si_code
= FPE_FLTDIV
;
467 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_XX
)))
468 sinfo
.si_code
= FPE_FLTRES
;
470 sinfo
.si_code
= FPE_NOOP
;
472 if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_VX
)))
473 sinfo
.si_code
= FPE_FLTINV
;
474 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_OX
)))
475 sinfo
.si_code
= FPE_FLTOVF
;
476 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_UX
)))
477 sinfo
.si_code
= FPE_FLTUND
;
478 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_ZX
)))
479 sinfo
.si_code
= FPE_FLTDIV
;
480 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_XX
)))
481 sinfo
.si_code
= FPE_FLTRES
;
483 sinfo
.si_code
= FPE_NOOP
;
489 sinfo
.si_addr
= mctx64
.es
.dar
;
491 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.es
.dar
);
493 /* on ppc we generate only if EXC_PPC_UNALIGNED */
494 sinfo
.si_code
= BUS_ADRALN
;
499 * If it's 64 bit and not a dual context, mctx will
500 * contain uninitialized data, so we have to use
504 sinfo
.si_addr
= mctx64
.es
.dar
;
505 /* First check in srr1 and then in dsisr */
506 if (mctx64
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
)))
507 sinfo
.si_code
= SEGV_ACCERR
;
508 else if (mctx64
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
)))
509 sinfo
.si_code
= SEGV_ACCERR
;
511 sinfo
.si_code
= SEGV_MAPERR
;
513 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.es
.dar
);
514 /* First check in srr1 and then in dsisr */
515 if (mctx
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
)))
516 sinfo
.si_code
= SEGV_ACCERR
;
517 else if (mctx
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
)))
518 sinfo
.si_code
= SEGV_ACCERR
;
520 sinfo
.si_code
= SEGV_MAPERR
;
528 /* copy info out to user space */
529 if (IS_64BIT_PROCESS(p
)) {
530 if (copyout(&uctx
, p_uctx
, sizeof(struct user_ucontext64
)))
532 if (copyout(&sinfo
, p_sinfo
, sizeof(user_siginfo_t
)))
535 struct ucontext64 uctx32
;
538 ucontext_64to32(&uctx
, &uctx32
);
539 if (copyout(&uctx32
, p_uctx
, sizeof(struct ucontext64
)))
542 siginfo_64to32(&sinfo
,&sinfo32
);
543 if (copyout(&sinfo32
, p_sinfo
, sizeof(siginfo_t
)))
546 if ((ctx32
== 0) || dualcontext
) {
548 * NOTE: Size of mcontext is not variant between 64bit and
549 * 32bit programs usng 64bit registers.
551 if (copyout(&mctx64
, p_mctx64
, (vec_used
? UC_FLAVOR64_VEC_SIZE
: UC_FLAVOR64_SIZE
)))
554 if ((ctx32
== 1) || dualcontext
) {
555 if (copyout(&mctx
, p_mctx
, uctx
.uc_mcsize
))
560 /* Place our arguments in arg registers: rtm dependent */
561 if(IS_64BIT_PROCESS(p
)) {
562 mctx64
.ss
.r3
= catcher
;
563 mctx64
.ss
.r4
= CAST_USER_ADDR_T(infostyle
);
564 mctx64
.ss
.r5
= CAST_USER_ADDR_T(sig
);
565 mctx64
.ss
.r6
= p_sinfo
;
566 mctx64
.ss
.r7
= p_uctx
;
568 mctx64
.ss
.srr0
= trampact
;
569 /* MSR_EXPORT_MASK_SET */
570 mctx64
.ss
.srr1
= CAST_USER_ADDR_T(get_msr_exportmask());
572 state_count
= PPC_THREAD_STATE64_COUNT
;
573 if ((kretn
= thread_setstatus(th_act
, PPC_THREAD_STATE64
, (void *)&mctx64
.ss
, state_count
)) != KERN_SUCCESS
) {
574 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn
);
577 mctx
.ss
.r3
= CAST_DOWN(unsigned long,catcher
);
578 mctx
.ss
.r4
= (unsigned long)infostyle
;
579 mctx
.ss
.r5
= (unsigned long)sig
;
580 mctx
.ss
.r6
= CAST_DOWN(unsigned long,p_sinfo
);
581 mctx
.ss
.r7
= CAST_DOWN(unsigned long,p_uctx
);
583 mctx
.ss
.srr0
= CAST_DOWN(unsigned long,trampact
);
584 /* MSR_EXPORT_MASK_SET */
585 mctx
.ss
.srr1
= get_msr_exportmask();
586 mctx
.ss
.r1
= CAST_DOWN(unsigned long,sp
);
587 state_count
= PPC_THREAD_STATE_COUNT
;
588 if ((kretn
= thread_setstatus(th_act
, PPC_THREAD_STATE
, (void *)&mctx
.ss
, state_count
)) != KERN_SUCCESS
) {
589 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn
);
595 SIGACTION(p
, SIGILL
) = SIG_DFL
;
596 sig
= sigmask(SIGILL
);
597 p
->p_sigignore
&= ~sig
;
598 p
->p_sigcatch
&= ~sig
;
599 ut
->uu_sigmask
&= ~sig
;
600 /* sendsig is called with signal lock held */
601 psignal_lock(p
, SIGILL
, 0);
606 * System call to cleanup state after a signal
607 * has been taken. Reset signal mask and
608 * stack state from context left by sendsig (above).
609 * Return to previous pc and psl as specified by
610 * context left by sendsig. Check carefully to
611 * make sure that the user has not modified the
612 * psl to gain improper priviledges or to cause
618 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
620 struct user_ucontext64 uctx
;
622 char mactx
[sizeof(struct mcontext64
)];
623 struct mcontext
*p_mctx
;
624 struct mcontext64
*p_64mctx
;
627 struct sigacts
*ps
= p
->p_sigacts
;
630 unsigned long state_count
;
631 unsigned int state_flavor
;
634 void *tsptr
, *fptr
, *vptr
;
635 int infostyle
= uap
->infostyle
;
637 th_act
= current_thread();
639 ut
= (struct uthread
*)get_bsdthread_info(th_act
);
640 if (IS_64BIT_PROCESS(p
)) {
641 error
= copyin(uap
->uctx
, &uctx
, sizeof(struct user_ucontext64
));
645 struct ucontext64 uctx32
;
648 * struct ucontext and struct ucontext64 are identical in
649 * size and content; the only difference is the internal
650 * pointer type for the last element, which makes no
651 * difference for the copyin().
653 error
= copyin(uap
->uctx
, &uctx32
, sizeof(struct ucontext
));
656 ucontext_32to64(&uctx32
, &uctx
);
660 /* validate the machine context size */
661 switch (uctx
.uc_mcsize
) {
662 case UC_FLAVOR64_VEC_SIZE
:
663 case UC_FLAVOR64_SIZE
:
664 case UC_FLAVOR_VEC_SIZE
:
672 * The 64 bit process mcontext is identical to the mcontext64, so
673 * there is no conversion necessary.
675 error
= copyin(uctx
.uc_mcontext64
, mactx
, uctx
.uc_mcsize
);
679 if ((uctx
.uc_onstack
& 01))
680 p
->p_sigacts
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
682 p
->p_sigacts
->ps_sigstk
.ss_flags
&= ~SA_ONSTACK
;
684 ut
->uu_sigmask
= uctx
.uc_sigmask
& ~sigcantmask
;
685 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
686 signal_setast(current_thread());
690 case UC_FLAVOR64_VEC
:
695 p_64mctx
= (struct mcontext64
*)mactx
;
696 tsptr
= (void *)&p_64mctx
->ss
;
697 fptr
= (void *)&p_64mctx
->fs
;
698 vptr
= (void *)&p_64mctx
->vs
;
699 state_flavor
= PPC_THREAD_STATE64
;
700 state_count
= PPC_THREAD_STATE64_COUNT
;
709 p_mctx
= (struct mcontext
*)mactx
;
710 tsptr
= (void *)&p_mctx
->ss
;
711 fptr
= (void *)&p_mctx
->fs
;
712 vptr
= (void *)&p_mctx
->vs
;
713 state_flavor
= PPC_THREAD_STATE
;
714 state_count
= PPC_THREAD_STATE_COUNT
;
719 /* validate the thread state, set/reset appropriate mode bits in srr1 */
720 (void)ppc_checkthreadstate(tsptr
, state_flavor
);
722 if (thread_setstatus(th_act
, state_flavor
, tsptr
, state_count
) != KERN_SUCCESS
) {
726 state_count
= PPC_FLOAT_STATE_COUNT
;
727 if (thread_setstatus(th_act
, PPC_FLOAT_STATE
, fptr
, state_count
) != KERN_SUCCESS
) {
731 mask
= sigmask(SIGFPE
);
732 if (((ut
->uu_sigmask
& mask
) == 0) && (p
->p_sigcatch
& mask
) && ((p
->p_sigignore
& mask
) == 0)) {
733 action
= ps
->ps_sigact
[SIGFPE
];
734 if((action
!= SIG_DFL
) && (action
!= SIG_IGN
)) {
735 thread_enable_fpe(th_act
, 1);
740 state_count
= PPC_VECTOR_STATE_COUNT
;
741 if (thread_setstatus(th_act
, PPC_VECTOR_STATE
, vptr
, state_count
) != KERN_SUCCESS
) {
745 return (EJUSTRETURN
);
749 * machine_exception() performs MD translation
750 * of a mach exception to a unix signal and code.
757 __unused
int subcode
,
764 case EXC_BAD_INSTRUCTION
:
765 *unix_signal
= SIGILL
;
770 *unix_signal
= SIGFPE
;
775 if (code
== EXC_PPC_TRAP
) {
776 *unix_signal
= SIGTRAP
;