]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/unix_signal.c
4ae8fd6ae3355775089fd6f515738a1f123186d1
[apple/xnu.git] / bsd / dev / ppc / unix_signal.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
32 */
33
34 #include <mach/mach_types.h>
35 #include <mach/exception_types.h>
36
37 #include <sys/param.h>
38 #include <sys/proc_internal.h>
39 #include <sys/user.h>
40 #include <sys/ucontext.h>
41 #include <sys/sysproto.h>
42 #include <sys/systm.h>
43 #include <sys/ux_exception.h>
44
45 #include <ppc/signal.h>
46 #include <sys/signalvar.h>
47 #include <sys/kdebug.h>
48 #include <sys/wait.h>
49 #include <kern/thread.h>
50 #include <mach/ppc/thread_status.h>
51 #include <ppc/proc_reg.h>
52
53 // #include <machine/thread.h> XXX include path messed up for some reason...
54
55 /* XXX functions not in a Mach headers */
56 extern kern_return_t thread_getstatus(register thread_t act, int flavor,
57 thread_state_t tstate, mach_msg_type_number_t *count);
58 extern int is_64signalregset(void);
59 extern unsigned int get_msr_exportmask(void);
60 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
61 thread_state_t tstate, mach_msg_type_number_t count);
62 extern void ppc_checkthreadstate(void *, int);
63 extern struct savearea_vec *find_user_vec_curr(void);
64 extern int thread_enable_fpe(thread_t act, int onoff);
65
66
67
68 #define C_32_REDZONE_LEN 224
69 #define C_32_STK_ALIGN 16
70 #define C_32_PARAMSAVE_LEN 64
71 #define C_32_LINKAGE_LEN 48
72
73 #define C_64_REDZONE_LEN 320
74 #define C_64_STK_ALIGN 32
75 #define C_64_PARAMSAVE_LEN 64
76 #define C_64_LINKAGE_LEN 48
77
78 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
79 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
80
81 /*
82 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
83 *
84 * Traditional: 1
85 * Traditional64: 20
86 * Traditional64with vec: 25
87 * 32bit context 30
88 * 32bit context with vector 35
89 * 64bit context 40
90 * 64bit context with vector 45
91 * Dual context 50
92 * Dual context with vector 55
93 *
94 */
95
96 #define UC_TRAD 1
97 #define UC_TRAD_VEC 6
98 #define UC_TRAD64 20
99 #define UC_TRAD64_VEC 25
100 #define UC_FLAVOR 30
101 #define UC_FLAVOR_VEC 35
102 #define UC_FLAVOR64 40
103 #define UC_FLAVOR64_VEC 45
104 #define UC_DUAL 50
105 #define UC_DUAL_VEC 55
106
107 /* The following are valid mcontext sizes */
108 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
109
110 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
111
112 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
113
114 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
115
116
117 /*
118 * NOTE: Source and target may *NOT* overlap!
119 */
120 static void
121 ucontext_32to64(struct ucontext64 *in, struct user_ucontext64 *out)
122 {
123 out->uc_onstack = in->uc_onstack;
124 out->uc_sigmask = in->uc_sigmask;
125
126 /* internal "structure assign" */
127 out->uc_stack.ss_sp = CAST_USER_ADDR_T(in->uc_stack.ss_sp);
128 out->uc_stack.ss_size = in->uc_stack.ss_size;
129 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
130
131 out->uc_link = CAST_USER_ADDR_T(in->uc_link);
132 out->uc_mcsize = in->uc_mcsize;
133 out->uc_mcontext64 = CAST_USER_ADDR_T(in->uc_mcontext64);
134 }
135
136 /*
137 * This conversion is safe, since if we are converting for a 32 bit process,
138 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
139 *
140 * NOTE: Source and target may *NOT* overlap!
141 */
142 static void
143 ucontext_64to32(struct user_ucontext64 *in, struct ucontext64 *out)
144 {
145 out->uc_onstack = in->uc_onstack;
146 out->uc_sigmask = in->uc_sigmask;
147
148 /* internal "structure assign" */
149 out->uc_stack.ss_sp = CAST_DOWN(void *,in->uc_stack.ss_sp);
150 out->uc_stack.ss_size = in->uc_stack.ss_size; /* range reduction */
151 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
152
153 out->uc_link = CAST_DOWN(void *,in->uc_link);
154 out->uc_mcsize = in->uc_mcsize; /* range reduction */
155 out->uc_mcontext64 = CAST_DOWN(void *,in->uc_mcontext64);
156 }
157
158 /*
159 * NOTE: Source and target may *NOT* overlap!
160 */
161 static void
162 siginfo_64to32(user_siginfo_t *in, siginfo_t *out)
163 {
164 out->si_signo = in->si_signo;
165 out->si_errno = in->si_errno;
166 out->si_code = in->si_code;
167 out->si_pid = in->si_pid;
168 out->si_uid = in->si_uid;
169 out->si_status = in->si_status;
170 out->si_addr = CAST_DOWN(void *,in->si_addr);
171 /* following cast works for sival_int because of padding */
172 out->si_value.sival_ptr = CAST_DOWN(void *,in->si_value.sival_ptr);
173 out->si_band = in->si_band; /* range reduction */
174 out->pad[0] = in->pad[0]; /* mcontext.ss.r1 */
175 }
176
177
178 /*
179 * Arrange for this process to run a signal handler
180 */
181
182 void
183 sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code)
184 {
185 kern_return_t kretn;
186 struct mcontext mctx;
187 user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */
188 struct mcontext64 mctx64;
189 user_addr_t p_mctx64 = USER_ADDR_NULL; /* mcontext dest. */
190 struct user_ucontext64 uctx;
191 user_addr_t p_uctx; /* user stack addr top copy ucontext */
192 user_siginfo_t sinfo;
193 user_addr_t p_sinfo; /* user stack addr top copy siginfo */
194 struct sigacts *ps = p->p_sigacts;
195 int oonstack;
196 user_addr_t sp;
197 mach_msg_type_number_t state_count;
198 thread_t th_act;
199 struct uthread *ut;
200 int infostyle = UC_TRAD;
201 int dualcontext =0;
202 user_addr_t trampact;
203 int vec_used = 0;
204 int stack_size = 0;
205 void * tstate;
206 int flavor;
207 int ctx32 = 1;
208
209 th_act = current_thread();
210 ut = get_bsdthread_info(th_act);
211
212
213 if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
214 infostyle = UC_FLAVOR;
215 }
216 if(is_64signalregset() && (infostyle == UC_FLAVOR)) {
217 dualcontext = 1;
218 infostyle = UC_DUAL;
219 }
220 if (p->p_sigacts->ps_64regset & sigmask(sig)) {
221 dualcontext = 0;
222 ctx32 = 0;
223 infostyle = UC_FLAVOR64;
224 }
225 /* treat 64 bit processes as having used 64 bit registers */
226 if ((IS_64BIT_PROCESS(p) || is_64signalregset()) &&
227 (infostyle == UC_TRAD)) {
228 ctx32=0;
229 infostyle = UC_TRAD64;
230 }
231 if (IS_64BIT_PROCESS(p)) {
232 ctx32=0;
233 dualcontext = 0;
234 }
235
236 /* I need this for SIGINFO anyway */
237 flavor = PPC_THREAD_STATE;
238 tstate = (void *)&mctx.ss;
239 state_count = PPC_THREAD_STATE_COUNT;
240 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
241 goto bad;
242
243 if ((ctx32 == 0) || dualcontext) {
244 flavor = PPC_THREAD_STATE64;
245 tstate = (void *)&mctx64.ss;
246 state_count = PPC_THREAD_STATE64_COUNT;
247 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
248 goto bad;
249 }
250
251 if ((ctx32 == 1) || dualcontext) {
252 flavor = PPC_EXCEPTION_STATE;
253 tstate = (void *)&mctx.es;
254 state_count = PPC_EXCEPTION_STATE_COUNT;
255 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
256 goto bad;
257 }
258
259 if ((ctx32 == 0) || dualcontext) {
260 flavor = PPC_EXCEPTION_STATE64;
261 tstate = (void *)&mctx64.es;
262 state_count = PPC_EXCEPTION_STATE64_COUNT;
263
264 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
265 goto bad;
266
267 }
268
269
270 if ((ctx32 == 1) || dualcontext) {
271 flavor = PPC_FLOAT_STATE;
272 tstate = (void *)&mctx.fs;
273 state_count = PPC_FLOAT_STATE_COUNT;
274 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
275 goto bad;
276 }
277
278 if ((ctx32 == 0) || dualcontext) {
279 flavor = PPC_FLOAT_STATE;
280 tstate = (void *)&mctx64.fs;
281 state_count = PPC_FLOAT_STATE_COUNT;
282 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
283 goto bad;
284
285 }
286
287
288 if (find_user_vec_curr()) {
289 vec_used = 1;
290
291 if ((ctx32 == 1) || dualcontext) {
292 flavor = PPC_VECTOR_STATE;
293 tstate = (void *)&mctx.vs;
294 state_count = PPC_VECTOR_STATE_COUNT;
295 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
296 goto bad;
297 infostyle += 5;
298 }
299
300 if ((ctx32 == 0) || dualcontext) {
301 flavor = PPC_VECTOR_STATE;
302 tstate = (void *)&mctx64.vs;
303 state_count = PPC_VECTOR_STATE_COUNT;
304 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
305 goto bad;
306 infostyle += 5;
307 }
308 }
309
310 trampact = ps->ps_trampact[sig];
311 oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;
312
313 /* figure out where our new stack lives */
314 if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack &&
315 (ps->ps_sigonstack & sigmask(sig))) {
316 sp = ps->ps_sigstk.ss_sp;
317 sp += ps->ps_sigstk.ss_size;
318 stack_size = ps->ps_sigstk.ss_size;
319 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
320 }
321 else {
322 if (ctx32 == 0)
323 sp = mctx64.ss.r1;
324 else
325 sp = CAST_USER_ADDR_T(mctx.ss.r1);
326 }
327
328
329 /* put siginfo on top */
330
331 /* preserve RED ZONE area */
332 if (IS_64BIT_PROCESS(p))
333 sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN);
334 else
335 sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN);
336
337 /* next are the saved registers */
338 if ((ctx32 == 0) || dualcontext) {
339 sp -= sizeof(struct mcontext64);
340 p_mctx64 = sp;
341 }
342 if ((ctx32 == 1) || dualcontext) {
343 sp -= sizeof(struct mcontext);
344 p_mctx = sp;
345 }
346
347 if (IS_64BIT_PROCESS(p)) {
348 /* context goes first on stack */
349 sp -= sizeof(struct user_ucontext64);
350 p_uctx = sp;
351
352 /* this is where siginfo goes on stack */
353 sp -= sizeof(user_siginfo_t);
354 p_sinfo = sp;
355
356 sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN);
357 } else {
358 /*
359 * struct ucontext and struct ucontext64 are identical in
360 * size and content; the only difference is the internal
361 * pointer type for the last element, which makes no
362 * difference for the copyout().
363 */
364
365 /* context goes first on stack */
366 sp -= sizeof(struct ucontext64);
367 p_uctx = sp;
368
369 /* this is where siginfo goes on stack */
370 sp -= sizeof(siginfo_t);
371 p_sinfo = sp;
372
373 sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN);
374 }
375
376 uctx.uc_onstack = oonstack;
377 uctx.uc_sigmask = mask;
378 uctx.uc_stack.ss_sp = sp;
379 uctx.uc_stack.ss_size = stack_size;
380 if (oonstack)
381 uctx.uc_stack.ss_flags |= SS_ONSTACK;
382
383 uctx.uc_link = 0;
384 if (ctx32 == 0)
385 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
386 else
387 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
388
389 if (vec_used)
390 uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int));
391
392 if (ctx32 == 0)
393 uctx.uc_mcontext64 = p_mctx64;
394 else
395 uctx.uc_mcontext64 = p_mctx;
396
397 /* setup siginfo */
398 bzero((caddr_t)&sinfo, sizeof(user_siginfo_t));
399 sinfo.si_signo = sig;
400 if (ctx32 == 0) {
401 sinfo.si_addr = mctx64.ss.srr0;
402 sinfo.pad[0] = mctx64.ss.r1;
403 } else {
404 sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0);
405 sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1);
406 }
407
408 switch (sig) {
409 case SIGCHLD:
410 sinfo.si_pid = p->si_pid;
411 p->si_pid =0;
412 sinfo.si_status = p->si_status;
413 p->si_status = 0;
414 sinfo.si_uid = p->si_uid;
415 p->si_uid =0;
416 sinfo.si_code = p->si_code;
417 p->si_code = 0;
418 if (sinfo.si_code == CLD_EXITED) {
419 if (WIFEXITED(sinfo.si_status))
420 sinfo.si_code = CLD_EXITED;
421 else if (WIFSIGNALED(sinfo.si_status)) {
422 if (WCOREDUMP(sinfo.si_status))
423 sinfo.si_code = CLD_DUMPED;
424 else
425 sinfo.si_code = CLD_KILLED;
426 }
427 }
428 break;
429 case SIGILL:
430 /*
431 * If it's 64 bit and not a dual context, mctx will
432 * contain uninitialized data, so we have to use
433 * mctx64 here.
434 */
435 if(ctx32 == 0) {
436 if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
437 sinfo.si_code = ILL_ILLOPC;
438 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
439 sinfo.si_code = ILL_PRVOPC;
440 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
441 sinfo.si_code = ILL_ILLTRP;
442 else
443 sinfo.si_code = ILL_NOOP;
444 } else {
445 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
446 sinfo.si_code = ILL_ILLOPC;
447 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
448 sinfo.si_code = ILL_PRVOPC;
449 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
450 sinfo.si_code = ILL_ILLTRP;
451 else
452 sinfo.si_code = ILL_NOOP;
453 }
454 break;
455 case SIGFPE:
456 #define FPSCR_VX 2
457 #define FPSCR_OX 3
458 #define FPSCR_UX 4
459 #define FPSCR_ZX 5
460 #define FPSCR_XX 6
461 /*
462 * If it's 64 bit and not a dual context, mctx will
463 * contain uninitialized data, so we have to use
464 * mctx64 here.
465 */
466 if(ctx32 == 0) {
467 if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX)))
468 sinfo.si_code = FPE_FLTINV;
469 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX)))
470 sinfo.si_code = FPE_FLTOVF;
471 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX)))
472 sinfo.si_code = FPE_FLTUND;
473 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX)))
474 sinfo.si_code = FPE_FLTDIV;
475 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX)))
476 sinfo.si_code = FPE_FLTRES;
477 else
478 sinfo.si_code = FPE_NOOP;
479 } else {
480 if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX)))
481 sinfo.si_code = FPE_FLTINV;
482 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX)))
483 sinfo.si_code = FPE_FLTOVF;
484 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX)))
485 sinfo.si_code = FPE_FLTUND;
486 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX)))
487 sinfo.si_code = FPE_FLTDIV;
488 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX)))
489 sinfo.si_code = FPE_FLTRES;
490 else
491 sinfo.si_code = FPE_NOOP;
492 }
493 break;
494
495 case SIGBUS:
496 if (ctx32 == 0) {
497 sinfo.si_addr = mctx64.es.dar;
498 } else {
499 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
500 }
501 /* on ppc we generate only if EXC_PPC_UNALIGNED */
502 sinfo.si_code = BUS_ADRALN;
503 break;
504
505 case SIGSEGV:
506 /*
507 * If it's 64 bit and not a dual context, mctx will
508 * contain uninitialized data, so we have to use
509 * mctx64 here.
510 */
511 if (ctx32 == 0) {
512 sinfo.si_addr = mctx64.es.dar;
513 /* First check in srr1 and then in dsisr */
514 if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
515 sinfo.si_code = SEGV_ACCERR;
516 else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
517 sinfo.si_code = SEGV_ACCERR;
518 else
519 sinfo.si_code = SEGV_MAPERR;
520 } else {
521 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
522 /* First check in srr1 and then in dsisr */
523 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
524 sinfo.si_code = SEGV_ACCERR;
525 else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
526 sinfo.si_code = SEGV_ACCERR;
527 else
528 sinfo.si_code = SEGV_MAPERR;
529 }
530 break;
531 default:
532 break;
533 }
534
535
536 /* copy info out to user space */
537 if (IS_64BIT_PROCESS(p)) {
538 if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64)))
539 goto bad;
540 if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t)))
541 goto bad;
542 } else {
543 struct ucontext64 uctx32;
544 siginfo_t sinfo32;
545
546 ucontext_64to32(&uctx, &uctx32);
547 if (copyout(&uctx32, p_uctx, sizeof(struct ucontext64)))
548 goto bad;
549
550 siginfo_64to32(&sinfo,&sinfo32);
551 if (copyout(&sinfo32, p_sinfo, sizeof(siginfo_t)))
552 goto bad;
553 }
554 if ((ctx32 == 0) || dualcontext) {
555 /*
556 * NOTE: Size of mcontext is not variant between 64bit and
557 * 32bit programs usng 64bit registers.
558 */
559 if (copyout(&mctx64, p_mctx64, (vec_used? UC_FLAVOR64_VEC_SIZE: UC_FLAVOR64_SIZE)))
560 goto bad;
561 }
562 if ((ctx32 == 1) || dualcontext) {
563 if (copyout(&mctx, p_mctx, uctx.uc_mcsize))
564 goto bad;
565 }
566
567
568 /* Place our arguments in arg registers: rtm dependent */
569 if(IS_64BIT_PROCESS(p)) {
570 mctx64.ss.r3 = catcher;
571 mctx64.ss.r4 = CAST_USER_ADDR_T(infostyle);
572 mctx64.ss.r5 = CAST_USER_ADDR_T(sig);
573 mctx64.ss.r6 = p_sinfo;
574 mctx64.ss.r7 = p_uctx;
575
576 mctx64.ss.srr0 = trampact;
577 /* MSR_EXPORT_MASK_SET */
578 mctx64.ss.srr1 = CAST_USER_ADDR_T(get_msr_exportmask());
579 mctx64.ss.r1 = sp;
580 state_count = PPC_THREAD_STATE64_COUNT;
581 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE64, (void *)&mctx64.ss, state_count)) != KERN_SUCCESS) {
582 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
583 }
584 } else {
585 mctx.ss.r3 = CAST_DOWN(unsigned long,catcher);
586 mctx.ss.r4 = (unsigned long)infostyle;
587 mctx.ss.r5 = (unsigned long)sig;
588 mctx.ss.r6 = CAST_DOWN(unsigned long,p_sinfo);
589 mctx.ss.r7 = CAST_DOWN(unsigned long,p_uctx);
590
591 mctx.ss.srr0 = CAST_DOWN(unsigned long,trampact);
592 /* MSR_EXPORT_MASK_SET */
593 mctx.ss.srr1 = get_msr_exportmask();
594 mctx.ss.r1 = CAST_DOWN(unsigned long,sp);
595 state_count = PPC_THREAD_STATE_COUNT;
596 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE, (void *)&mctx.ss, state_count)) != KERN_SUCCESS) {
597 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
598 }
599 }
600 return;
601
602 bad:
603 SIGACTION(p, SIGILL) = SIG_DFL;
604 sig = sigmask(SIGILL);
605 p->p_sigignore &= ~sig;
606 p->p_sigcatch &= ~sig;
607 ut->uu_sigmask &= ~sig;
608 /* sendsig is called with signal lock held */
609 psignal_lock(p, SIGILL, 0);
610 return;
611 }
612
613 /*
614 * System call to cleanup state after a signal
615 * has been taken. Reset signal mask and
616 * stack state from context left by sendsig (above).
617 * Return to previous pc and psl as specified by
618 * context left by sendsig. Check carefully to
619 * make sure that the user has not modified the
620 * psl to gain improper priviledges or to cause
621 * a machine fault.
622 */
623
624 /* ARGSUSED */
625 int
626 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
627 {
628 struct user_ucontext64 uctx;
629
630 char mactx[sizeof(struct mcontext64)];
631 struct mcontext *p_mctx;
632 struct mcontext64 *p_64mctx;
633 int error;
634 thread_t th_act;
635 struct sigacts *ps = p->p_sigacts;
636 sigset_t mask;
637 user_addr_t action;
638 unsigned long state_count;
639 unsigned int state_flavor;
640 struct uthread * ut;
641 int vec_used = 0;
642 void *tsptr, *fptr, *vptr;
643 int infostyle = uap->infostyle;
644
645 th_act = current_thread();
646
647 ut = (struct uthread *)get_bsdthread_info(th_act);
648 if (IS_64BIT_PROCESS(p)) {
649 error = copyin(uap->uctx, &uctx, sizeof(struct user_ucontext64));
650 if (error)
651 return(error);
652 } else {
653 struct ucontext64 uctx32;
654
655 /*
656 * struct ucontext and struct ucontext64 are identical in
657 * size and content; the only difference is the internal
658 * pointer type for the last element, which makes no
659 * difference for the copyin().
660 */
661 error = copyin(uap->uctx, &uctx32, sizeof(struct ucontext));
662 if (error)
663 return(error);
664 ucontext_32to64(&uctx32, &uctx);
665 }
666
667
668 /* validate the machine context size */
669 switch (uctx.uc_mcsize) {
670 case UC_FLAVOR64_VEC_SIZE:
671 case UC_FLAVOR64_SIZE:
672 case UC_FLAVOR_VEC_SIZE:
673 case UC_FLAVOR_SIZE:
674 break;
675 default:
676 return(EINVAL);
677 }
678
679 /*
680 * The 64 bit process mcontext is identical to the mcontext64, so
681 * there is no conversion necessary.
682 */
683 error = copyin(uctx.uc_mcontext64, mactx, uctx.uc_mcsize);
684 if (error)
685 return(error);
686
687 if ((uctx.uc_onstack & 01))
688 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
689 else
690 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
691
692 ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask;
693 if (ut->uu_siglist & ~ut->uu_sigmask)
694 signal_setast(current_thread());
695
696 vec_used = 0;
697 switch (infostyle) {
698 case UC_FLAVOR64_VEC:
699 case UC_TRAD64_VEC:
700 vec_used = 1;
701 case UC_TRAD64:
702 case UC_FLAVOR64: {
703 p_64mctx = (struct mcontext64 *)mactx;
704 tsptr = (void *)&p_64mctx->ss;
705 fptr = (void *)&p_64mctx->fs;
706 vptr = (void *)&p_64mctx->vs;
707 state_flavor = PPC_THREAD_STATE64;
708 state_count = PPC_THREAD_STATE64_COUNT;
709 }
710 break;
711 case UC_FLAVOR_VEC :
712 case UC_TRAD_VEC :
713 vec_used = 1;
714 case UC_FLAVOR :
715 case UC_TRAD :
716 default: {
717 p_mctx = (struct mcontext *)mactx;
718 tsptr = (void *)&p_mctx->ss;
719 fptr = (void *)&p_mctx->fs;
720 vptr = (void *)&p_mctx->vs;
721 state_flavor = PPC_THREAD_STATE;
722 state_count = PPC_THREAD_STATE_COUNT;
723 }
724 break;
725 } /* switch () */
726
727 /* validate the thread state, set/reset appropriate mode bits in srr1 */
728 (void)ppc_checkthreadstate(tsptr, state_flavor);
729
730 if (thread_setstatus(th_act, state_flavor, tsptr, state_count) != KERN_SUCCESS) {
731 return(EINVAL);
732 }
733
734 state_count = PPC_FLOAT_STATE_COUNT;
735 if (thread_setstatus(th_act, PPC_FLOAT_STATE, fptr, state_count) != KERN_SUCCESS) {
736 return(EINVAL);
737 }
738
739 mask = sigmask(SIGFPE);
740 if (((ut->uu_sigmask & mask) == 0) && (p->p_sigcatch & mask) && ((p->p_sigignore & mask) == 0)) {
741 action = ps->ps_sigact[SIGFPE];
742 if((action != SIG_DFL) && (action != SIG_IGN)) {
743 thread_enable_fpe(th_act, 1);
744 }
745 }
746
747 if (vec_used) {
748 state_count = PPC_VECTOR_STATE_COUNT;
749 if (thread_setstatus(th_act, PPC_VECTOR_STATE, vptr, state_count) != KERN_SUCCESS) {
750 return(EINVAL);
751 }
752 }
753 return (EJUSTRETURN);
754 }
755
756 /*
757 * machine_exception() performs MD translation
758 * of a mach exception to a unix signal and code.
759 */
760
761 boolean_t
762 machine_exception(
763 int exception,
764 int code,
765 __unused int subcode,
766 int *unix_signal,
767 int *unix_code
768 )
769 {
770 switch(exception) {
771
772 case EXC_BAD_INSTRUCTION:
773 *unix_signal = SIGILL;
774 *unix_code = code;
775 break;
776
777 case EXC_ARITHMETIC:
778 *unix_signal = SIGFPE;
779 *unix_code = code;
780 break;
781
782 case EXC_SOFTWARE:
783 if (code == EXC_PPC_TRAP) {
784 *unix_signal = SIGTRAP;
785 *unix_code = code;
786 break;
787 } else
788 return(FALSE);
789
790 default:
791 return(FALSE);
792 }
793
794 return(TRUE);
795 }
796