]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/unix_signal.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / bsd / dev / ppc / unix_signal.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
24 */
25
26 #include <mach/mach_types.h>
27 #include <mach/exception_types.h>
28
29 #include <sys/param.h>
30 #include <sys/proc_internal.h>
31 #include <sys/user.h>
32 #include <sys/ucontext.h>
33 #include <sys/sysproto.h>
34 #include <sys/systm.h>
35 #include <sys/ux_exception.h>
36
37 #include <ppc/signal.h>
38 #include <sys/signalvar.h>
39 #include <sys/kdebug.h>
40 #include <sys/wait.h>
41 #include <kern/thread.h>
42 #include <mach/ppc/thread_status.h>
43 #include <ppc/proc_reg.h>
44
45 // #include <machine/thread.h> XXX include path messed up for some reason...
46
47 /* XXX functions not in a Mach headers */
48 extern kern_return_t thread_getstatus(register thread_t act, int flavor,
49 thread_state_t tstate, mach_msg_type_number_t *count);
50 extern int is_64signalregset(void);
51 extern unsigned int get_msr_exportmask(void);
52 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
53 thread_state_t tstate, mach_msg_type_number_t count);
54 extern void ppc_checkthreadstate(void *, int);
55 extern struct savearea_vec *find_user_vec_curr(void);
56 extern int thread_enable_fpe(thread_t act, int onoff);
57
58
59
60 #define C_32_REDZONE_LEN 224
61 #define C_32_STK_ALIGN 16
62 #define C_32_PARAMSAVE_LEN 64
63 #define C_32_LINKAGE_LEN 48
64
65 #define C_64_REDZONE_LEN 320
66 #define C_64_STK_ALIGN 32
67 #define C_64_PARAMSAVE_LEN 64
68 #define C_64_LINKAGE_LEN 48
69
70 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
71 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
72
73 /*
74 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
75 *
76 * Traditional: 1
77 * Traditional64: 20
78 * Traditional64with vec: 25
79 * 32bit context 30
80 * 32bit context with vector 35
81 * 64bit context 40
82 * 64bit context with vector 45
83 * Dual context 50
84 * Dual context with vector 55
85 *
86 */
87
88 #define UC_TRAD 1
89 #define UC_TRAD_VEC 6
90 #define UC_TRAD64 20
91 #define UC_TRAD64_VEC 25
92 #define UC_FLAVOR 30
93 #define UC_FLAVOR_VEC 35
94 #define UC_FLAVOR64 40
95 #define UC_FLAVOR64_VEC 45
96 #define UC_DUAL 50
97 #define UC_DUAL_VEC 55
98
99 /* The following are valid mcontext sizes */
100 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
101
102 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
103
104 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
105
106 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
107
108
109 /*
110 * NOTE: Source and target may *NOT* overlap!
111 */
112 static void
113 ucontext_32to64(struct ucontext64 *in, struct user_ucontext64 *out)
114 {
115 out->uc_onstack = in->uc_onstack;
116 out->uc_sigmask = in->uc_sigmask;
117
118 /* internal "structure assign" */
119 out->uc_stack.ss_sp = CAST_USER_ADDR_T(in->uc_stack.ss_sp);
120 out->uc_stack.ss_size = in->uc_stack.ss_size;
121 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
122
123 out->uc_link = CAST_USER_ADDR_T(in->uc_link);
124 out->uc_mcsize = in->uc_mcsize;
125 out->uc_mcontext64 = CAST_USER_ADDR_T(in->uc_mcontext64);
126 }
127
128 /*
129 * This conversion is safe, since if we are converting for a 32 bit process,
130 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
131 *
132 * NOTE: Source and target may *NOT* overlap!
133 */
134 static void
135 ucontext_64to32(struct user_ucontext64 *in, struct ucontext64 *out)
136 {
137 out->uc_onstack = in->uc_onstack;
138 out->uc_sigmask = in->uc_sigmask;
139
140 /* internal "structure assign" */
141 out->uc_stack.ss_sp = CAST_DOWN(void *,in->uc_stack.ss_sp);
142 out->uc_stack.ss_size = in->uc_stack.ss_size; /* range reduction */
143 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
144
145 out->uc_link = CAST_DOWN(void *,in->uc_link);
146 out->uc_mcsize = in->uc_mcsize; /* range reduction */
147 out->uc_mcontext64 = CAST_DOWN(void *,in->uc_mcontext64);
148 }
149
150 /*
151 * NOTE: Source and target may *NOT* overlap!
152 */
153 static void
154 siginfo_64to32(user_siginfo_t *in, siginfo_t *out)
155 {
156 out->si_signo = in->si_signo;
157 out->si_errno = in->si_errno;
158 out->si_code = in->si_code;
159 out->si_pid = in->si_pid;
160 out->si_uid = in->si_uid;
161 out->si_status = in->si_status;
162 out->si_addr = CAST_DOWN(void *,in->si_addr);
163 /* following cast works for sival_int because of padding */
164 out->si_value.sival_ptr = CAST_DOWN(void *,in->si_value.sival_ptr);
165 out->si_band = in->si_band; /* range reduction */
166 out->pad[0] = in->pad[0]; /* mcontext.ss.r1 */
167 }
168
169
170 /*
171 * Arrange for this process to run a signal handler
172 */
173
174 void
175 sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code)
176 {
177 kern_return_t kretn;
178 struct mcontext mctx;
179 user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */
180 struct mcontext64 mctx64;
181 user_addr_t p_mctx64 = USER_ADDR_NULL; /* mcontext dest. */
182 struct user_ucontext64 uctx;
183 user_addr_t p_uctx; /* user stack addr top copy ucontext */
184 user_siginfo_t sinfo;
185 user_addr_t p_sinfo; /* user stack addr top copy siginfo */
186 struct sigacts *ps = p->p_sigacts;
187 int oonstack;
188 user_addr_t sp;
189 mach_msg_type_number_t state_count;
190 thread_t th_act;
191 struct uthread *ut;
192 int infostyle = UC_TRAD;
193 int dualcontext =0;
194 user_addr_t trampact;
195 int vec_used = 0;
196 int stack_size = 0;
197 void * tstate;
198 int flavor;
199 int ctx32 = 1;
200
201 th_act = current_thread();
202 ut = get_bsdthread_info(th_act);
203
204
205 if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
206 infostyle = UC_FLAVOR;
207 }
208 if(is_64signalregset() && (infostyle == UC_FLAVOR)) {
209 dualcontext = 1;
210 infostyle = UC_DUAL;
211 }
212 if (p->p_sigacts->ps_64regset & sigmask(sig)) {
213 dualcontext = 0;
214 ctx32 = 0;
215 infostyle = UC_FLAVOR64;
216 }
217 /* treat 64 bit processes as having used 64 bit registers */
218 if ((IS_64BIT_PROCESS(p) || is_64signalregset()) &&
219 (infostyle == UC_TRAD)) {
220 ctx32=0;
221 infostyle = UC_TRAD64;
222 }
223 if (IS_64BIT_PROCESS(p)) {
224 ctx32=0;
225 dualcontext = 0;
226 }
227
228 /* I need this for SIGINFO anyway */
229 flavor = PPC_THREAD_STATE;
230 tstate = (void *)&mctx.ss;
231 state_count = PPC_THREAD_STATE_COUNT;
232 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
233 goto bad;
234
235 if ((ctx32 == 0) || dualcontext) {
236 flavor = PPC_THREAD_STATE64;
237 tstate = (void *)&mctx64.ss;
238 state_count = PPC_THREAD_STATE64_COUNT;
239 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
240 goto bad;
241 }
242
243 if ((ctx32 == 1) || dualcontext) {
244 flavor = PPC_EXCEPTION_STATE;
245 tstate = (void *)&mctx.es;
246 state_count = PPC_EXCEPTION_STATE_COUNT;
247 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
248 goto bad;
249 }
250
251 if ((ctx32 == 0) || dualcontext) {
252 flavor = PPC_EXCEPTION_STATE64;
253 tstate = (void *)&mctx64.es;
254 state_count = PPC_EXCEPTION_STATE64_COUNT;
255
256 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
257 goto bad;
258
259 }
260
261
262 if ((ctx32 == 1) || dualcontext) {
263 flavor = PPC_FLOAT_STATE;
264 tstate = (void *)&mctx.fs;
265 state_count = PPC_FLOAT_STATE_COUNT;
266 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
267 goto bad;
268 }
269
270 if ((ctx32 == 0) || dualcontext) {
271 flavor = PPC_FLOAT_STATE;
272 tstate = (void *)&mctx64.fs;
273 state_count = PPC_FLOAT_STATE_COUNT;
274 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
275 goto bad;
276
277 }
278
279
280 if (find_user_vec_curr()) {
281 vec_used = 1;
282
283 if ((ctx32 == 1) || dualcontext) {
284 flavor = PPC_VECTOR_STATE;
285 tstate = (void *)&mctx.vs;
286 state_count = PPC_VECTOR_STATE_COUNT;
287 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
288 goto bad;
289 infostyle += 5;
290 }
291
292 if ((ctx32 == 0) || dualcontext) {
293 flavor = PPC_VECTOR_STATE;
294 tstate = (void *)&mctx64.vs;
295 state_count = PPC_VECTOR_STATE_COUNT;
296 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
297 goto bad;
298 infostyle += 5;
299 }
300 }
301
302 trampact = ps->ps_trampact[sig];
303 oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;
304
305 /* figure out where our new stack lives */
306 if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack &&
307 (ps->ps_sigonstack & sigmask(sig))) {
308 sp = ps->ps_sigstk.ss_sp;
309 sp += ps->ps_sigstk.ss_size;
310 stack_size = ps->ps_sigstk.ss_size;
311 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
312 }
313 else {
314 if (ctx32 == 0)
315 sp = mctx64.ss.r1;
316 else
317 sp = CAST_USER_ADDR_T(mctx.ss.r1);
318 }
319
320
321 /* put siginfo on top */
322
323 /* preserve RED ZONE area */
324 if (IS_64BIT_PROCESS(p))
325 sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN);
326 else
327 sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN);
328
329 /* next are the saved registers */
330 if ((ctx32 == 0) || dualcontext) {
331 sp -= sizeof(struct mcontext64);
332 p_mctx64 = sp;
333 }
334 if ((ctx32 == 1) || dualcontext) {
335 sp -= sizeof(struct mcontext);
336 p_mctx = sp;
337 }
338
339 if (IS_64BIT_PROCESS(p)) {
340 /* context goes first on stack */
341 sp -= sizeof(struct user_ucontext64);
342 p_uctx = sp;
343
344 /* this is where siginfo goes on stack */
345 sp -= sizeof(user_siginfo_t);
346 p_sinfo = sp;
347
348 sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN);
349 } else {
350 /*
351 * struct ucontext and struct ucontext64 are identical in
352 * size and content; the only difference is the internal
353 * pointer type for the last element, which makes no
354 * difference for the copyout().
355 */
356
357 /* context goes first on stack */
358 sp -= sizeof(struct ucontext64);
359 p_uctx = sp;
360
361 /* this is where siginfo goes on stack */
362 sp -= sizeof(siginfo_t);
363 p_sinfo = sp;
364
365 sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN);
366 }
367
368 uctx.uc_onstack = oonstack;
369 uctx.uc_sigmask = mask;
370 uctx.uc_stack.ss_sp = sp;
371 uctx.uc_stack.ss_size = stack_size;
372 if (oonstack)
373 uctx.uc_stack.ss_flags |= SS_ONSTACK;
374
375 uctx.uc_link = 0;
376 if (ctx32 == 0)
377 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
378 else
379 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
380
381 if (vec_used)
382 uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int));
383
384 if (ctx32 == 0)
385 uctx.uc_mcontext64 = p_mctx64;
386 else
387 uctx.uc_mcontext64 = p_mctx;
388
389 /* setup siginfo */
390 bzero((caddr_t)&sinfo, sizeof(user_siginfo_t));
391 sinfo.si_signo = sig;
392 if (ctx32 == 0) {
393 sinfo.si_addr = mctx64.ss.srr0;
394 sinfo.pad[0] = mctx64.ss.r1;
395 } else {
396 sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0);
397 sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1);
398 }
399
400 switch (sig) {
401 case SIGCHLD:
402 sinfo.si_pid = p->si_pid;
403 p->si_pid =0;
404 sinfo.si_status = p->si_status;
405 p->si_status = 0;
406 sinfo.si_uid = p->si_uid;
407 p->si_uid =0;
408 sinfo.si_code = p->si_code;
409 p->si_code = 0;
410 if (sinfo.si_code == CLD_EXITED) {
411 if (WIFEXITED(sinfo.si_status))
412 sinfo.si_code = CLD_EXITED;
413 else if (WIFSIGNALED(sinfo.si_status)) {
414 if (WCOREDUMP(sinfo.si_status))
415 sinfo.si_code = CLD_DUMPED;
416 else
417 sinfo.si_code = CLD_KILLED;
418 }
419 }
420 break;
421 case SIGILL:
422 /*
423 * If it's 64 bit and not a dual context, mctx will
424 * contain uninitialized data, so we have to use
425 * mctx64 here.
426 */
427 if(ctx32 == 0) {
428 if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
429 sinfo.si_code = ILL_ILLOPC;
430 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
431 sinfo.si_code = ILL_PRVOPC;
432 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
433 sinfo.si_code = ILL_ILLTRP;
434 else
435 sinfo.si_code = ILL_NOOP;
436 } else {
437 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
438 sinfo.si_code = ILL_ILLOPC;
439 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
440 sinfo.si_code = ILL_PRVOPC;
441 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
442 sinfo.si_code = ILL_ILLTRP;
443 else
444 sinfo.si_code = ILL_NOOP;
445 }
446 break;
447 case SIGFPE:
448 #define FPSCR_VX 2
449 #define FPSCR_OX 3
450 #define FPSCR_UX 4
451 #define FPSCR_ZX 5
452 #define FPSCR_XX 6
453 /*
454 * If it's 64 bit and not a dual context, mctx will
455 * contain uninitialized data, so we have to use
456 * mctx64 here.
457 */
458 if(ctx32 == 0) {
459 if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX)))
460 sinfo.si_code = FPE_FLTINV;
461 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX)))
462 sinfo.si_code = FPE_FLTOVF;
463 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX)))
464 sinfo.si_code = FPE_FLTUND;
465 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX)))
466 sinfo.si_code = FPE_FLTDIV;
467 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX)))
468 sinfo.si_code = FPE_FLTRES;
469 else
470 sinfo.si_code = FPE_NOOP;
471 } else {
472 if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX)))
473 sinfo.si_code = FPE_FLTINV;
474 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX)))
475 sinfo.si_code = FPE_FLTOVF;
476 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX)))
477 sinfo.si_code = FPE_FLTUND;
478 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX)))
479 sinfo.si_code = FPE_FLTDIV;
480 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX)))
481 sinfo.si_code = FPE_FLTRES;
482 else
483 sinfo.si_code = FPE_NOOP;
484 }
485 break;
486
487 case SIGBUS:
488 if (ctx32 == 0) {
489 sinfo.si_addr = mctx64.es.dar;
490 } else {
491 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
492 }
493 /* on ppc we generate only if EXC_PPC_UNALIGNED */
494 sinfo.si_code = BUS_ADRALN;
495 break;
496
497 case SIGSEGV:
498 /*
499 * If it's 64 bit and not a dual context, mctx will
500 * contain uninitialized data, so we have to use
501 * mctx64 here.
502 */
503 if (ctx32 == 0) {
504 sinfo.si_addr = mctx64.es.dar;
505 /* First check in srr1 and then in dsisr */
506 if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
507 sinfo.si_code = SEGV_ACCERR;
508 else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
509 sinfo.si_code = SEGV_ACCERR;
510 else
511 sinfo.si_code = SEGV_MAPERR;
512 } else {
513 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
514 /* First check in srr1 and then in dsisr */
515 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
516 sinfo.si_code = SEGV_ACCERR;
517 else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
518 sinfo.si_code = SEGV_ACCERR;
519 else
520 sinfo.si_code = SEGV_MAPERR;
521 }
522 break;
523 default:
524 break;
525 }
526
527
528 /* copy info out to user space */
529 if (IS_64BIT_PROCESS(p)) {
530 if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64)))
531 goto bad;
532 if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t)))
533 goto bad;
534 } else {
535 struct ucontext64 uctx32;
536 siginfo_t sinfo32;
537
538 ucontext_64to32(&uctx, &uctx32);
539 if (copyout(&uctx32, p_uctx, sizeof(struct ucontext64)))
540 goto bad;
541
542 siginfo_64to32(&sinfo,&sinfo32);
543 if (copyout(&sinfo32, p_sinfo, sizeof(siginfo_t)))
544 goto bad;
545 }
546 if ((ctx32 == 0) || dualcontext) {
547 /*
548 * NOTE: Size of mcontext is not variant between 64bit and
549 * 32bit programs usng 64bit registers.
550 */
551 if (copyout(&mctx64, p_mctx64, (vec_used? UC_FLAVOR64_VEC_SIZE: UC_FLAVOR64_SIZE)))
552 goto bad;
553 }
554 if ((ctx32 == 1) || dualcontext) {
555 if (copyout(&mctx, p_mctx, uctx.uc_mcsize))
556 goto bad;
557 }
558
559
560 /* Place our arguments in arg registers: rtm dependent */
561 if(IS_64BIT_PROCESS(p)) {
562 mctx64.ss.r3 = catcher;
563 mctx64.ss.r4 = CAST_USER_ADDR_T(infostyle);
564 mctx64.ss.r5 = CAST_USER_ADDR_T(sig);
565 mctx64.ss.r6 = p_sinfo;
566 mctx64.ss.r7 = p_uctx;
567
568 mctx64.ss.srr0 = trampact;
569 /* MSR_EXPORT_MASK_SET */
570 mctx64.ss.srr1 = CAST_USER_ADDR_T(get_msr_exportmask());
571 mctx64.ss.r1 = sp;
572 state_count = PPC_THREAD_STATE64_COUNT;
573 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE64, (void *)&mctx64.ss, state_count)) != KERN_SUCCESS) {
574 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
575 }
576 } else {
577 mctx.ss.r3 = CAST_DOWN(unsigned long,catcher);
578 mctx.ss.r4 = (unsigned long)infostyle;
579 mctx.ss.r5 = (unsigned long)sig;
580 mctx.ss.r6 = CAST_DOWN(unsigned long,p_sinfo);
581 mctx.ss.r7 = CAST_DOWN(unsigned long,p_uctx);
582
583 mctx.ss.srr0 = CAST_DOWN(unsigned long,trampact);
584 /* MSR_EXPORT_MASK_SET */
585 mctx.ss.srr1 = get_msr_exportmask();
586 mctx.ss.r1 = CAST_DOWN(unsigned long,sp);
587 state_count = PPC_THREAD_STATE_COUNT;
588 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE, (void *)&mctx.ss, state_count)) != KERN_SUCCESS) {
589 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
590 }
591 }
592 return;
593
594 bad:
595 SIGACTION(p, SIGILL) = SIG_DFL;
596 sig = sigmask(SIGILL);
597 p->p_sigignore &= ~sig;
598 p->p_sigcatch &= ~sig;
599 ut->uu_sigmask &= ~sig;
600 /* sendsig is called with signal lock held */
601 psignal_lock(p, SIGILL, 0);
602 return;
603 }
604
605 /*
606 * System call to cleanup state after a signal
607 * has been taken. Reset signal mask and
608 * stack state from context left by sendsig (above).
609 * Return to previous pc and psl as specified by
610 * context left by sendsig. Check carefully to
611 * make sure that the user has not modified the
612 * psl to gain improper priviledges or to cause
613 * a machine fault.
614 */
615
616 /* ARGSUSED */
617 int
618 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
619 {
620 struct user_ucontext64 uctx;
621
622 char mactx[sizeof(struct mcontext64)];
623 struct mcontext *p_mctx;
624 struct mcontext64 *p_64mctx;
625 int error;
626 thread_t th_act;
627 struct sigacts *ps = p->p_sigacts;
628 sigset_t mask;
629 user_addr_t action;
630 unsigned long state_count;
631 unsigned int state_flavor;
632 struct uthread * ut;
633 int vec_used = 0;
634 void *tsptr, *fptr, *vptr;
635 int infostyle = uap->infostyle;
636
637 th_act = current_thread();
638
639 ut = (struct uthread *)get_bsdthread_info(th_act);
640 if (IS_64BIT_PROCESS(p)) {
641 error = copyin(uap->uctx, &uctx, sizeof(struct user_ucontext64));
642 if (error)
643 return(error);
644 } else {
645 struct ucontext64 uctx32;
646
647 /*
648 * struct ucontext and struct ucontext64 are identical in
649 * size and content; the only difference is the internal
650 * pointer type for the last element, which makes no
651 * difference for the copyin().
652 */
653 error = copyin(uap->uctx, &uctx32, sizeof(struct ucontext));
654 if (error)
655 return(error);
656 ucontext_32to64(&uctx32, &uctx);
657 }
658
659
660 /* validate the machine context size */
661 switch (uctx.uc_mcsize) {
662 case UC_FLAVOR64_VEC_SIZE:
663 case UC_FLAVOR64_SIZE:
664 case UC_FLAVOR_VEC_SIZE:
665 case UC_FLAVOR_SIZE:
666 break;
667 default:
668 return(EINVAL);
669 }
670
671 /*
672 * The 64 bit process mcontext is identical to the mcontext64, so
673 * there is no conversion necessary.
674 */
675 error = copyin(uctx.uc_mcontext64, mactx, uctx.uc_mcsize);
676 if (error)
677 return(error);
678
679 if ((uctx.uc_onstack & 01))
680 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
681 else
682 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
683
684 ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask;
685 if (ut->uu_siglist & ~ut->uu_sigmask)
686 signal_setast(current_thread());
687
688 vec_used = 0;
689 switch (infostyle) {
690 case UC_FLAVOR64_VEC:
691 case UC_TRAD64_VEC:
692 vec_used = 1;
693 case UC_TRAD64:
694 case UC_FLAVOR64: {
695 p_64mctx = (struct mcontext64 *)mactx;
696 tsptr = (void *)&p_64mctx->ss;
697 fptr = (void *)&p_64mctx->fs;
698 vptr = (void *)&p_64mctx->vs;
699 state_flavor = PPC_THREAD_STATE64;
700 state_count = PPC_THREAD_STATE64_COUNT;
701 }
702 break;
703 case UC_FLAVOR_VEC :
704 case UC_TRAD_VEC :
705 vec_used = 1;
706 case UC_FLAVOR :
707 case UC_TRAD :
708 default: {
709 p_mctx = (struct mcontext *)mactx;
710 tsptr = (void *)&p_mctx->ss;
711 fptr = (void *)&p_mctx->fs;
712 vptr = (void *)&p_mctx->vs;
713 state_flavor = PPC_THREAD_STATE;
714 state_count = PPC_THREAD_STATE_COUNT;
715 }
716 break;
717 } /* switch () */
718
719 /* validate the thread state, set/reset appropriate mode bits in srr1 */
720 (void)ppc_checkthreadstate(tsptr, state_flavor);
721
722 if (thread_setstatus(th_act, state_flavor, tsptr, state_count) != KERN_SUCCESS) {
723 return(EINVAL);
724 }
725
726 state_count = PPC_FLOAT_STATE_COUNT;
727 if (thread_setstatus(th_act, PPC_FLOAT_STATE, fptr, state_count) != KERN_SUCCESS) {
728 return(EINVAL);
729 }
730
731 mask = sigmask(SIGFPE);
732 if (((ut->uu_sigmask & mask) == 0) && (p->p_sigcatch & mask) && ((p->p_sigignore & mask) == 0)) {
733 action = ps->ps_sigact[SIGFPE];
734 if((action != SIG_DFL) && (action != SIG_IGN)) {
735 thread_enable_fpe(th_act, 1);
736 }
737 }
738
739 if (vec_used) {
740 state_count = PPC_VECTOR_STATE_COUNT;
741 if (thread_setstatus(th_act, PPC_VECTOR_STATE, vptr, state_count) != KERN_SUCCESS) {
742 return(EINVAL);
743 }
744 }
745 return (EJUSTRETURN);
746 }
747
748 /*
749 * machine_exception() performs MD translation
750 * of a mach exception to a unix signal and code.
751 */
752
753 boolean_t
754 machine_exception(
755 int exception,
756 int code,
757 __unused int subcode,
758 int *unix_signal,
759 int *unix_code
760 )
761 {
762 switch(exception) {
763
764 case EXC_BAD_INSTRUCTION:
765 *unix_signal = SIGILL;
766 *unix_code = code;
767 break;
768
769 case EXC_ARITHMETIC:
770 *unix_signal = SIGFPE;
771 *unix_code = code;
772 break;
773
774 case EXC_SOFTWARE:
775 if (code == EXC_PPC_TRAP) {
776 *unix_signal = SIGTRAP;
777 *unix_code = code;
778 break;
779 } else
780 return(FALSE);
781
782 default:
783 return(FALSE);
784 }
785
786 return(TRUE);
787 }
788