2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
32 #include <mach/mach_types.h>
33 #include <mach/exception_types.h>
35 #include <sys/param.h>
36 #include <sys/proc_internal.h>
38 #include <sys/ucontext.h>
39 #include <sys/sysproto.h>
40 #include <sys/systm.h>
41 #include <sys/ux_exception.h>
43 #include <ppc/signal.h>
44 #include <sys/signalvar.h>
45 #include <sys/kdebug.h>
47 #include <kern/thread.h>
48 #include <mach/ppc/thread_status.h>
49 #include <ppc/proc_reg.h>
51 // #include <machine/thread.h> XXX include path messed up for some reason...
53 /* XXX functions not in a Mach headers */
54 extern kern_return_t
thread_getstatus(register thread_t act
, int flavor
,
55 thread_state_t tstate
, mach_msg_type_number_t
*count
);
56 extern int is_64signalregset(void);
57 extern unsigned int get_msr_exportmask(void);
58 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
59 thread_state_t tstate
, mach_msg_type_number_t count
);
60 extern void ppc_checkthreadstate(void *, int);
61 extern struct savearea_vec
*find_user_vec_curr(void);
62 extern int thread_enable_fpe(thread_t act
, int onoff
);
66 #define C_32_REDZONE_LEN 224
67 #define C_32_STK_ALIGN 16
68 #define C_32_PARAMSAVE_LEN 64
69 #define C_32_LINKAGE_LEN 48
71 #define C_64_REDZONE_LEN 320
72 #define C_64_STK_ALIGN 32
73 #define C_64_PARAMSAVE_LEN 64
74 #define C_64_LINKAGE_LEN 48
76 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
77 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
80 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
84 * Traditional64with vec: 25
86 * 32bit context with vector 35
88 * 64bit context with vector 45
90 * Dual context with vector 55
97 #define UC_TRAD64_VEC 25
99 #define UC_FLAVOR_VEC 35
100 #define UC_FLAVOR64 40
101 #define UC_FLAVOR64_VEC 45
103 #define UC_DUAL_VEC 55
105 /* The following are valid mcontext sizes */
106 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
108 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
110 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
112 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
116 * NOTE: Source and target may *NOT* overlap!
119 ucontext_32to64(struct ucontext64
*in
, struct user_ucontext64
*out
)
121 out
->uc_onstack
= in
->uc_onstack
;
122 out
->uc_sigmask
= in
->uc_sigmask
;
124 /* internal "structure assign" */
125 out
->uc_stack
.ss_sp
= CAST_USER_ADDR_T(in
->uc_stack
.ss_sp
);
126 out
->uc_stack
.ss_size
= in
->uc_stack
.ss_size
;
127 out
->uc_stack
.ss_flags
= in
->uc_stack
.ss_flags
;
129 out
->uc_link
= CAST_USER_ADDR_T(in
->uc_link
);
130 out
->uc_mcsize
= in
->uc_mcsize
;
131 out
->uc_mcontext64
= CAST_USER_ADDR_T(in
->uc_mcontext64
);
135 * This conversion is safe, since if we are converting for a 32 bit process,
136 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
138 * NOTE: Source and target may *NOT* overlap!
141 ucontext_64to32(struct user_ucontext64
*in
, struct ucontext64
*out
)
143 out
->uc_onstack
= in
->uc_onstack
;
144 out
->uc_sigmask
= in
->uc_sigmask
;
146 /* internal "structure assign" */
147 out
->uc_stack
.ss_sp
= CAST_DOWN(void *,in
->uc_stack
.ss_sp
);
148 out
->uc_stack
.ss_size
= in
->uc_stack
.ss_size
; /* range reduction */
149 out
->uc_stack
.ss_flags
= in
->uc_stack
.ss_flags
;
151 out
->uc_link
= CAST_DOWN(void *,in
->uc_link
);
152 out
->uc_mcsize
= in
->uc_mcsize
; /* range reduction */
153 out
->uc_mcontext64
= CAST_DOWN(void *,in
->uc_mcontext64
);
157 * NOTE: Source and target may *NOT* overlap!
160 siginfo_64to32(user_siginfo_t
*in
, siginfo_t
*out
)
162 out
->si_signo
= in
->si_signo
;
163 out
->si_errno
= in
->si_errno
;
164 out
->si_code
= in
->si_code
;
165 out
->si_pid
= in
->si_pid
;
166 out
->si_uid
= in
->si_uid
;
167 out
->si_status
= in
->si_status
;
168 out
->si_addr
= CAST_DOWN(void *,in
->si_addr
);
169 /* following cast works for sival_int because of padding */
170 out
->si_value
.sival_ptr
= CAST_DOWN(void *,in
->si_value
.sival_ptr
);
171 out
->si_band
= in
->si_band
; /* range reduction */
172 out
->pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
177 * Arrange for this process to run a signal handler
181 sendsig(struct proc
*p
, user_addr_t catcher
, int sig
, int mask
, __unused u_long code
)
184 struct mcontext mctx
;
185 user_addr_t p_mctx
= USER_ADDR_NULL
; /* mcontext dest. */
186 struct mcontext64 mctx64
;
187 user_addr_t p_mctx64
= USER_ADDR_NULL
; /* mcontext dest. */
188 struct user_ucontext64 uctx
;
189 user_addr_t p_uctx
; /* user stack addr top copy ucontext */
190 user_siginfo_t sinfo
;
191 user_addr_t p_sinfo
; /* user stack addr top copy siginfo */
192 struct sigacts
*ps
= p
->p_sigacts
;
195 mach_msg_type_number_t state_count
;
198 int infostyle
= UC_TRAD
;
200 user_addr_t trampact
;
206 int uthsigaltstack
= 0;
210 th_act
= current_thread();
211 ut
= get_bsdthread_info(th_act
);
214 if (p
->p_sigacts
->ps_siginfo
& sigmask(sig
)) {
215 infostyle
= UC_FLAVOR
;
217 if(is_64signalregset() && (infostyle
== UC_FLAVOR
)) {
221 if (p
->p_sigacts
->ps_64regset
& sigmask(sig
)) {
224 infostyle
= UC_FLAVOR64
;
226 /* treat 64 bit processes as having used 64 bit registers */
227 if ((IS_64BIT_PROCESS(p
) || is_64signalregset()) &&
228 (infostyle
== UC_TRAD
)) {
230 infostyle
= UC_TRAD64
;
232 if (IS_64BIT_PROCESS(p
)) {
237 /* I need this for SIGINFO anyway */
238 flavor
= PPC_THREAD_STATE
;
239 tstate
= (void *)&mctx
.ss
;
240 state_count
= PPC_THREAD_STATE_COUNT
;
241 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
244 if ((ctx32
== 0) || dualcontext
) {
245 flavor
= PPC_THREAD_STATE64
;
246 tstate
= (void *)&mctx64
.ss
;
247 state_count
= PPC_THREAD_STATE64_COUNT
;
248 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
252 if ((ctx32
== 1) || dualcontext
) {
253 flavor
= PPC_EXCEPTION_STATE
;
254 tstate
= (void *)&mctx
.es
;
255 state_count
= PPC_EXCEPTION_STATE_COUNT
;
256 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
260 if ((ctx32
== 0) || dualcontext
) {
261 flavor
= PPC_EXCEPTION_STATE64
;
262 tstate
= (void *)&mctx64
.es
;
263 state_count
= PPC_EXCEPTION_STATE64_COUNT
;
265 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
271 if ((ctx32
== 1) || dualcontext
) {
272 flavor
= PPC_FLOAT_STATE
;
273 tstate
= (void *)&mctx
.fs
;
274 state_count
= PPC_FLOAT_STATE_COUNT
;
275 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
279 if ((ctx32
== 0) || dualcontext
) {
280 flavor
= PPC_FLOAT_STATE
;
281 tstate
= (void *)&mctx64
.fs
;
282 state_count
= PPC_FLOAT_STATE_COUNT
;
283 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
289 if (find_user_vec_curr()) {
292 if ((ctx32
== 1) || dualcontext
) {
293 flavor
= PPC_VECTOR_STATE
;
294 tstate
= (void *)&mctx
.vs
;
295 state_count
= PPC_VECTOR_STATE_COUNT
;
296 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
301 if ((ctx32
== 0) || dualcontext
) {
302 flavor
= PPC_VECTOR_STATE
;
303 tstate
= (void *)&mctx64
.vs
;
304 state_count
= PPC_VECTOR_STATE_COUNT
;
305 if (thread_getstatus(th_act
, flavor
, (thread_state_t
)tstate
, &state_count
) != KERN_SUCCESS
)
311 trampact
= ps
->ps_trampact
[sig
];
312 uthsigaltstack
= p
->p_lflag
& P_LTHSIGSTACK
;
314 if (uthsigaltstack
!= 0 ) {
315 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
316 altstack
= ut
->uu_flag
& UT_ALTSTACK
;
318 oonstack
= ps
->ps_sigstk
.ss_flags
& SA_ONSTACK
;
319 altstack
= ps
->ps_flags
& SAS_ALTSTACK
;
323 /* figure out where our new stack lives */
324 if (altstack
&& !oonstack
&&
325 (ps
->ps_sigonstack
& sigmask(sig
))) {
326 if (uthsigaltstack
!= 0) {
327 sp
= ut
->uu_sigstk
.ss_sp
;
328 sp
+= ut
->uu_sigstk
.ss_size
;
329 stack_size
= ut
->uu_sigstk
.ss_size
;
330 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
332 sp
= ps
->ps_sigstk
.ss_sp
;
333 sp
+= ps
->ps_sigstk
.ss_size
;
334 stack_size
= ps
->ps_sigstk
.ss_size
;
335 ps
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
342 sp
= CAST_USER_ADDR_T(mctx
.ss
.r1
);
346 /* put siginfo on top */
348 /* preserve RED ZONE area */
349 if (IS_64BIT_PROCESS(p
))
350 sp
= TRUNC_DOWN64(sp
, C_64_REDZONE_LEN
, C_64_STK_ALIGN
);
352 sp
= TRUNC_DOWN32(sp
, C_32_REDZONE_LEN
, C_32_STK_ALIGN
);
354 /* next are the saved registers */
355 if ((ctx32
== 0) || dualcontext
) {
356 sp
-= sizeof(struct mcontext64
);
359 if ((ctx32
== 1) || dualcontext
) {
360 sp
-= sizeof(struct mcontext
);
364 if (IS_64BIT_PROCESS(p
)) {
365 /* context goes first on stack */
366 sp
-= sizeof(struct user_ucontext64
);
369 /* this is where siginfo goes on stack */
370 sp
-= sizeof(user_siginfo_t
);
373 sp
= TRUNC_DOWN64(sp
, C_64_PARAMSAVE_LEN
+C_64_LINKAGE_LEN
, C_64_STK_ALIGN
);
376 * struct ucontext and struct ucontext64 are identical in
377 * size and content; the only difference is the internal
378 * pointer type for the last element, which makes no
379 * difference for the copyout().
382 /* context goes first on stack */
383 sp
-= sizeof(struct ucontext64
);
386 /* this is where siginfo goes on stack */
387 sp
-= sizeof(siginfo_t
);
390 sp
= TRUNC_DOWN32(sp
, C_32_PARAMSAVE_LEN
+C_32_LINKAGE_LEN
, C_32_STK_ALIGN
);
393 uctx
.uc_onstack
= oonstack
;
394 uctx
.uc_sigmask
= mask
;
395 uctx
.uc_stack
.ss_sp
= sp
;
396 uctx
.uc_stack
.ss_size
= stack_size
;
398 uctx
.uc_stack
.ss_flags
|= SS_ONSTACK
;
402 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE64_COUNT
+ PPC_THREAD_STATE64_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
404 uctx
.uc_mcsize
= (size_t)((PPC_EXCEPTION_STATE_COUNT
+ PPC_THREAD_STATE_COUNT
+ PPC_FLOAT_STATE_COUNT
) * sizeof(int));
407 uctx
.uc_mcsize
+= (size_t)(PPC_VECTOR_STATE_COUNT
* sizeof(int));
410 uctx
.uc_mcontext64
= p_mctx64
;
412 uctx
.uc_mcontext64
= p_mctx
;
415 bzero((caddr_t
)&sinfo
, sizeof(user_siginfo_t
));
416 sinfo
.si_signo
= sig
;
418 sinfo
.si_addr
= mctx64
.ss
.srr0
;
419 sinfo
.pad
[0] = mctx64
.ss
.r1
;
421 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.ss
.srr0
);
422 sinfo
.pad
[0] = CAST_USER_ADDR_T(mctx
.ss
.r1
);
427 sinfo
.si_pid
= p
->si_pid
;
429 sinfo
.si_status
= p
->si_status
;
431 sinfo
.si_uid
= p
->si_uid
;
433 sinfo
.si_code
= p
->si_code
;
435 if (sinfo
.si_code
== CLD_EXITED
) {
436 if (WIFEXITED(sinfo
.si_status
))
437 sinfo
.si_code
= CLD_EXITED
;
438 else if (WIFSIGNALED(sinfo
.si_status
)) {
439 if (WCOREDUMP(sinfo
.si_status
))
440 sinfo
.si_code
= CLD_DUMPED
;
442 sinfo
.si_code
= CLD_KILLED
;
448 * If it's 64 bit and not a dual context, mctx will
449 * contain uninitialized data, so we have to use
453 if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
)))
454 sinfo
.si_code
= ILL_ILLOPC
;
455 else if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
)))
456 sinfo
.si_code
= ILL_PRVOPC
;
457 else if (mctx64
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
)))
458 sinfo
.si_code
= ILL_ILLTRP
;
460 sinfo
.si_code
= ILL_NOOP
;
462 if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
)))
463 sinfo
.si_code
= ILL_ILLOPC
;
464 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
)))
465 sinfo
.si_code
= ILL_PRVOPC
;
466 else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
)))
467 sinfo
.si_code
= ILL_ILLTRP
;
469 sinfo
.si_code
= ILL_NOOP
;
479 * If it's 64 bit and not a dual context, mctx will
480 * contain uninitialized data, so we have to use
484 if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_VX
)))
485 sinfo
.si_code
= FPE_FLTINV
;
486 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_OX
)))
487 sinfo
.si_code
= FPE_FLTOVF
;
488 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_UX
)))
489 sinfo
.si_code
= FPE_FLTUND
;
490 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_ZX
)))
491 sinfo
.si_code
= FPE_FLTDIV
;
492 else if (mctx64
.fs
.fpscr
& (1 << (31 - FPSCR_XX
)))
493 sinfo
.si_code
= FPE_FLTRES
;
495 sinfo
.si_code
= FPE_NOOP
;
497 if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_VX
)))
498 sinfo
.si_code
= FPE_FLTINV
;
499 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_OX
)))
500 sinfo
.si_code
= FPE_FLTOVF
;
501 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_UX
)))
502 sinfo
.si_code
= FPE_FLTUND
;
503 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_ZX
)))
504 sinfo
.si_code
= FPE_FLTDIV
;
505 else if (mctx
.fs
.fpscr
& (1 << (31 - FPSCR_XX
)))
506 sinfo
.si_code
= FPE_FLTRES
;
508 sinfo
.si_code
= FPE_NOOP
;
514 sinfo
.si_addr
= mctx64
.es
.dar
;
516 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.es
.dar
);
518 /* on ppc we generate only if EXC_PPC_UNALIGNED */
519 sinfo
.si_code
= BUS_ADRALN
;
524 * If it's 64 bit and not a dual context, mctx will
525 * contain uninitialized data, so we have to use
529 sinfo
.si_addr
= mctx64
.es
.dar
;
530 /* First check in srr1 and then in dsisr */
531 if (mctx64
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
)))
532 sinfo
.si_code
= SEGV_ACCERR
;
533 else if (mctx64
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
)))
534 sinfo
.si_code
= SEGV_ACCERR
;
536 sinfo
.si_code
= SEGV_MAPERR
;
538 sinfo
.si_addr
= CAST_USER_ADDR_T(mctx
.es
.dar
);
539 /* First check in srr1 and then in dsisr */
540 if (mctx
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
)))
541 sinfo
.si_code
= SEGV_ACCERR
;
542 else if (mctx
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
)))
543 sinfo
.si_code
= SEGV_ACCERR
;
545 sinfo
.si_code
= SEGV_MAPERR
;
553 /* copy info out to user space */
554 if (IS_64BIT_PROCESS(p
)) {
555 if (copyout(&uctx
, p_uctx
, sizeof(struct user_ucontext64
)))
557 if (copyout(&sinfo
, p_sinfo
, sizeof(user_siginfo_t
)))
560 struct ucontext64 uctx32
;
563 ucontext_64to32(&uctx
, &uctx32
);
564 if (copyout(&uctx32
, p_uctx
, sizeof(struct ucontext64
)))
567 siginfo_64to32(&sinfo
,&sinfo32
);
568 if (copyout(&sinfo32
, p_sinfo
, sizeof(siginfo_t
)))
571 if ((ctx32
== 0) || dualcontext
) {
573 * NOTE: Size of mcontext is not variant between 64bit and
574 * 32bit programs usng 64bit registers.
576 if (copyout(&mctx64
, p_mctx64
, (vec_used
? UC_FLAVOR64_VEC_SIZE
: UC_FLAVOR64_SIZE
)))
579 if ((ctx32
== 1) || dualcontext
) {
580 if (copyout(&mctx
, p_mctx
, uctx
.uc_mcsize
))
585 /* Place our arguments in arg registers: rtm dependent */
586 if(IS_64BIT_PROCESS(p
)) {
587 mctx64
.ss
.r3
= catcher
;
588 mctx64
.ss
.r4
= CAST_USER_ADDR_T(infostyle
);
589 mctx64
.ss
.r5
= CAST_USER_ADDR_T(sig
);
590 mctx64
.ss
.r6
= p_sinfo
;
591 mctx64
.ss
.r7
= p_uctx
;
593 mctx64
.ss
.srr0
= trampact
;
594 /* MSR_EXPORT_MASK_SET */
595 mctx64
.ss
.srr1
= CAST_USER_ADDR_T(get_msr_exportmask());
597 state_count
= PPC_THREAD_STATE64_COUNT
;
598 if ((kretn
= thread_setstatus(th_act
, PPC_THREAD_STATE64
, (void *)&mctx64
.ss
, state_count
)) != KERN_SUCCESS
) {
599 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn
);
602 mctx
.ss
.r3
= CAST_DOWN(unsigned long,catcher
);
603 mctx
.ss
.r4
= (unsigned long)infostyle
;
604 mctx
.ss
.r5
= (unsigned long)sig
;
605 mctx
.ss
.r6
= CAST_DOWN(unsigned long,p_sinfo
);
606 mctx
.ss
.r7
= CAST_DOWN(unsigned long,p_uctx
);
608 mctx
.ss
.srr0
= CAST_DOWN(unsigned long,trampact
);
609 /* MSR_EXPORT_MASK_SET */
610 mctx
.ss
.srr1
= get_msr_exportmask();
611 mctx
.ss
.r1
= CAST_DOWN(unsigned long,sp
);
612 state_count
= PPC_THREAD_STATE_COUNT
;
613 if ((kretn
= thread_setstatus(th_act
, PPC_THREAD_STATE
, (void *)&mctx
.ss
, state_count
)) != KERN_SUCCESS
) {
614 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn
);
620 SIGACTION(p
, SIGILL
) = SIG_DFL
;
621 sig
= sigmask(SIGILL
);
622 p
->p_sigignore
&= ~sig
;
623 p
->p_sigcatch
&= ~sig
;
624 ut
->uu_sigmask
&= ~sig
;
625 /* sendsig is called with signal lock held */
626 psignal_lock(p
, SIGILL
, 0);
631 * System call to cleanup state after a signal
632 * has been taken. Reset signal mask and
633 * stack state from context left by sendsig (above).
634 * Return to previous pc and psl as specified by
635 * context left by sendsig. Check carefully to
636 * make sure that the user has not modified the
637 * psl to gain improper priviledges or to cause
643 sigreturn(struct proc
*p
, struct sigreturn_args
*uap
, __unused
int *retval
)
645 struct user_ucontext64 uctx
;
647 char mactx
[sizeof(struct mcontext64
)];
648 struct mcontext
*p_mctx
;
649 struct mcontext64
*p_64mctx
;
652 struct sigacts
*ps
= p
->p_sigacts
;
655 unsigned long state_count
;
656 unsigned int state_flavor
;
659 void *tsptr
, *fptr
, *vptr
;
660 int infostyle
= uap
->infostyle
;
661 int uthsigaltstack
= 0;
663 th_act
= current_thread();
665 ut
= (struct uthread
*)get_bsdthread_info(th_act
);
666 if (IS_64BIT_PROCESS(p
)) {
667 error
= copyin(uap
->uctx
, &uctx
, sizeof(struct user_ucontext64
));
671 struct ucontext64 uctx32
;
674 * struct ucontext and struct ucontext64 are identical in
675 * size and content; the only difference is the internal
676 * pointer type for the last element, which makes no
677 * difference for the copyin().
679 error
= copyin(uap
->uctx
, &uctx32
, sizeof(struct ucontext
));
682 ucontext_32to64(&uctx32
, &uctx
);
686 /* validate the machine context size */
687 switch (uctx
.uc_mcsize
) {
688 case UC_FLAVOR64_VEC_SIZE
:
689 case UC_FLAVOR64_SIZE
:
690 case UC_FLAVOR_VEC_SIZE
:
698 * The 64 bit process mcontext is identical to the mcontext64, so
699 * there is no conversion necessary.
701 error
= copyin(uctx
.uc_mcontext64
, mactx
, uctx
.uc_mcsize
);
705 uthsigaltstack
= p
->p_lflag
& P_LTHSIGSTACK
;
708 if (uctx
.uc_onstack
& 01) {
709 if (uthsigaltstack
!= 0)
710 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
712 p
->p_sigacts
->ps_sigstk
.ss_flags
|= SA_ONSTACK
;
714 if (uthsigaltstack
!= 0)
715 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
717 p
->p_sigacts
->ps_sigstk
.ss_flags
&= ~SA_ONSTACK
;
720 ut
->uu_sigmask
= uctx
.uc_sigmask
& ~sigcantmask
;
721 if (ut
->uu_siglist
& ~ut
->uu_sigmask
)
722 signal_setast(current_thread());
726 case UC_FLAVOR64_VEC
:
731 p_64mctx
= (struct mcontext64
*)mactx
;
732 tsptr
= (void *)&p_64mctx
->ss
;
733 fptr
= (void *)&p_64mctx
->fs
;
734 vptr
= (void *)&p_64mctx
->vs
;
735 state_flavor
= PPC_THREAD_STATE64
;
736 state_count
= PPC_THREAD_STATE64_COUNT
;
745 p_mctx
= (struct mcontext
*)mactx
;
746 tsptr
= (void *)&p_mctx
->ss
;
747 fptr
= (void *)&p_mctx
->fs
;
748 vptr
= (void *)&p_mctx
->vs
;
749 state_flavor
= PPC_THREAD_STATE
;
750 state_count
= PPC_THREAD_STATE_COUNT
;
755 /* validate the thread state, set/reset appropriate mode bits in srr1 */
756 (void)ppc_checkthreadstate(tsptr
, state_flavor
);
758 if (thread_setstatus(th_act
, state_flavor
, tsptr
, state_count
) != KERN_SUCCESS
) {
762 state_count
= PPC_FLOAT_STATE_COUNT
;
763 if (thread_setstatus(th_act
, PPC_FLOAT_STATE
, fptr
, state_count
) != KERN_SUCCESS
) {
767 mask
= sigmask(SIGFPE
);
768 if (((ut
->uu_sigmask
& mask
) == 0) && (p
->p_sigcatch
& mask
) && ((p
->p_sigignore
& mask
) == 0)) {
769 action
= ps
->ps_sigact
[SIGFPE
];
770 if((action
!= SIG_DFL
) && (action
!= SIG_IGN
)) {
771 thread_enable_fpe(th_act
, 1);
776 state_count
= PPC_VECTOR_STATE_COUNT
;
777 if (thread_setstatus(th_act
, PPC_VECTOR_STATE
, vptr
, state_count
) != KERN_SUCCESS
) {
781 return (EJUSTRETURN
);
785 * machine_exception() performs MD translation
786 * of a mach exception to a unix signal and code.
793 __unused
int subcode
,
800 case EXC_BAD_INSTRUCTION
:
801 *unix_signal
= SIGILL
;
806 *unix_signal
= SIGFPE
;
811 if (code
== EXC_PPC_TRAP
) {
812 *unix_signal
= SIGTRAP
;