]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/unix_signal.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / bsd / dev / ppc / unix_signal.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 */
31
32 #include <mach/mach_types.h>
33 #include <mach/exception_types.h>
34
35 #include <sys/param.h>
36 #include <sys/proc_internal.h>
37 #include <sys/user.h>
38 #include <sys/ucontext.h>
39 #include <sys/sysproto.h>
40 #include <sys/systm.h>
41 #include <sys/ux_exception.h>
42
43 #include <ppc/signal.h>
44 #include <sys/signalvar.h>
45 #include <sys/kdebug.h>
46 #include <sys/wait.h>
47 #include <kern/thread.h>
48 #include <mach/ppc/thread_status.h>
49 #include <ppc/proc_reg.h>
50
51 #include <sys/sdt.h>
52
53 // #include <machine/thread.h> XXX include path messed up for some reason...
54
55 /* XXX functions not in a Mach headers */
56 extern kern_return_t thread_getstatus(register thread_t act, int flavor,
57 thread_state_t tstate, mach_msg_type_number_t *count);
58 extern unsigned int get_msr_exportmask(void);
59 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
60 thread_state_t tstate, mach_msg_type_number_t count);
61 extern void ppc_checkthreadstate(void *, int);
62 extern struct savearea_vec *find_user_vec_curr(void);
63 extern int thread_enable_fpe(thread_t act, int onoff);
64
65
66
67 #define C_32_REDZONE_LEN 224
68 #define C_32_STK_ALIGN 16
69 #define C_32_PARAMSAVE_LEN 64
70 #define C_32_LINKAGE_LEN 48
71
72 #define C_64_REDZONE_LEN 320
73 #define C_64_STK_ALIGN 32
74 #define C_64_PARAMSAVE_LEN 64
75 #define C_64_LINKAGE_LEN 48
76
77 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
78 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
79
80 /*
81 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
82 *
83 * Traditional: 1
84 * Traditional64: 20
85 * Traditional64with vec: 25
86 * 32bit context 30
87 * 32bit context with vector 35
88 * 64bit context 40
89 * 64bit context with vector 45
90 * Dual context 50
91 * Dual context with vector 55
92 *
93 */
94
95 #define UC_TRAD 1
96 #define UC_TRAD_VEC 6
97 #define UC_TRAD64 20
98 #define UC_TRAD64_VEC 25
99 #define UC_FLAVOR 30
100 #define UC_FLAVOR_VEC 35
101 #define UC_FLAVOR64 40
102 #define UC_FLAVOR64_VEC 45
103 #define UC_DUAL 50
104 #define UC_DUAL_VEC 55
105 #define UC_SET_ALT_STACK 0x40000000
106 #define UC_RESET_ALT_STACK 0x80000000
107
108 /* The following are valid mcontext sizes */
109 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
110
111 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
112
113 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
114
115 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
116
117
118 /*
119 * NOTE: Source and target may *NOT* overlap!
120 */
121 static void
122 ucontext_32to64(struct ucontext64 *in, struct user_ucontext64 *out)
123 {
124 out->uc_onstack = in->uc_onstack;
125 out->uc_sigmask = in->uc_sigmask;
126
127 /* internal "structure assign" */
128 out->uc_stack.ss_sp = CAST_USER_ADDR_T(in->uc_stack.ss_sp);
129 out->uc_stack.ss_size = in->uc_stack.ss_size;
130 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
131
132 out->uc_link = CAST_USER_ADDR_T(in->uc_link);
133 out->uc_mcsize = in->uc_mcsize;
134 out->uc_mcontext64 = CAST_USER_ADDR_T(in->uc_mcontext64);
135 }
136
137 /*
138 * This conversion is safe, since if we are converting for a 32 bit process,
139 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
140 *
141 * NOTE: Source and target may *NOT* overlap!
142 */
143 static void
144 ucontext_64to32(struct user_ucontext64 *in, struct ucontext64 *out)
145 {
146 out->uc_onstack = in->uc_onstack;
147 out->uc_sigmask = in->uc_sigmask;
148
149 /* internal "structure assign" */
150 out->uc_stack.ss_sp = CAST_DOWN(void *,in->uc_stack.ss_sp);
151 out->uc_stack.ss_size = in->uc_stack.ss_size; /* range reduction */
152 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
153
154 out->uc_link = CAST_DOWN(void *,in->uc_link);
155 out->uc_mcsize = in->uc_mcsize; /* range reduction */
156 out->uc_mcontext64 = CAST_DOWN(void *,in->uc_mcontext64);
157 }
158
159 /*
160 * NOTE: Source and target may *NOT* overlap!
161 */
162 static void
163 siginfo_64to32(user_siginfo_t *in, siginfo_t *out)
164 {
165 out->si_signo = in->si_signo;
166 out->si_errno = in->si_errno;
167 out->si_code = in->si_code;
168 out->si_pid = in->si_pid;
169 out->si_uid = in->si_uid;
170 out->si_status = in->si_status;
171 out->si_addr = CAST_DOWN(void *,in->si_addr);
172 /* following cast works for sival_int because of padding */
173 out->si_value.sival_ptr = CAST_DOWN(void *,in->si_value.sival_ptr);
174 out->si_band = in->si_band; /* range reduction */
175 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
176 }
177
178
179 /*
180 * Arrange for this process to run a signal handler
181 */
182
183 void
184 sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code)
185 {
186 kern_return_t kretn;
187 struct mcontext mctx;
188 user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */
189 struct mcontext64 mctx64;
190 user_addr_t p_mctx64 = USER_ADDR_NULL; /* mcontext dest. */
191 struct user_ucontext64 uctx;
192 user_addr_t p_uctx; /* user stack addr top copy ucontext */
193 user_siginfo_t sinfo;
194 user_addr_t p_sinfo; /* user stack addr top copy siginfo */
195 struct sigacts *ps = p->p_sigacts;
196 int oonstack;
197 user_addr_t sp;
198 mach_msg_type_number_t state_count;
199 thread_t th_act;
200 struct uthread *ut;
201 int infostyle = UC_TRAD;
202 int dualcontext =0;
203 user_addr_t trampact;
204 int vec_used = 0;
205 int stack_size = 0;
206 void * tstate;
207 int flavor;
208 int ctx32 = 1;
209
210 th_act = current_thread();
211 ut = get_bsdthread_info(th_act);
212
213 /*
214 * XXX We conditionalize type passed here based on SA_SIGINFO, but
215 * XXX we always send up all the information, regardless; perhaps
216 * XXX this should not be conditionalized? Defer making this change
217 * XXX now, due to possible tools impact.
218 */
219 if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
220 /*
221 * If SA_SIGINFO is set, then we must provide the user
222 * process both a siginfo_t and a context argument. We call
223 * this "FLAVORED", as opposed to "TRADITIONAL", which doesn't
224 * expect a context. "DUAL" is a type of "FLAVORED".
225 */
226 if (is_64signalregset()) {
227 /*
228 * If this is a 64 bit CPU, we must include a 64 bit
229 * context in the data we pass to user space; we may
230 * or may not also include a 32 bit context at the
231 * same time, for non-leaf functions.
232 *
233 * The user may also explicitly choose to not receive
234 * a 32 bit context, at their option; we only allow
235 * this to happen on 64 bit processors, for obvious
236 * reasons.
237 */
238 if (IS_64BIT_PROCESS(p) ||
239 (p->p_sigacts->ps_64regset & sigmask(sig))) {
240 /*
241 * For a 64 bit process, there is no 32 bit
242 * context.
243 */
244 ctx32 = 0;
245 infostyle = UC_FLAVOR64;
246 } else {
247 /*
248 * For a 32 bit process on a 64 bit CPU, we
249 * may have 64 bit leaf functions, so we need
250 * both contexts.
251 */
252 dualcontext = 1;
253 infostyle = UC_DUAL;
254 }
255 } else {
256 /*
257 * If this is a 32 bit CPU, then we only have a 32 bit
258 * context to contend with.
259 */
260 infostyle = UC_FLAVOR;
261 }
262 } else {
263 /*
264 * If SA_SIGINFO is not set, then we have a traditional style
265 * call which does not need additional context passed. The
266 * default is 32 bit traditional.
267 *
268 * XXX The second check is redundant on PPC32; keep it anyway.
269 */
270 if (is_64signalregset() || IS_64BIT_PROCESS(p)) {
271 /*
272 * However, if this is a 64 bit CPU, we need to change
273 * this to 64 bit traditional, and drop the 32 bit
274 * context.
275 */
276 ctx32 = 0;
277 infostyle = UC_TRAD64;
278 }
279 }
280
281 proc_unlock(p);
282
283 /* I need this for SIGINFO anyway */
284 flavor = PPC_THREAD_STATE;
285 tstate = (void *)&mctx.ss;
286 state_count = PPC_THREAD_STATE_COUNT;
287 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
288 goto bad;
289
290 if ((ctx32 == 0) || dualcontext) {
291 flavor = PPC_THREAD_STATE64;
292 tstate = (void *)&mctx64.ss;
293 state_count = PPC_THREAD_STATE64_COUNT;
294 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
295 goto bad;
296 }
297
298 if ((ctx32 == 1) || dualcontext) {
299 flavor = PPC_EXCEPTION_STATE;
300 tstate = (void *)&mctx.es;
301 state_count = PPC_EXCEPTION_STATE_COUNT;
302 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
303 goto bad;
304 }
305
306 if ((ctx32 == 0) || dualcontext) {
307 flavor = PPC_EXCEPTION_STATE64;
308 tstate = (void *)&mctx64.es;
309 state_count = PPC_EXCEPTION_STATE64_COUNT;
310
311 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
312 goto bad;
313
314 }
315
316
317 if ((ctx32 == 1) || dualcontext) {
318 flavor = PPC_FLOAT_STATE;
319 tstate = (void *)&mctx.fs;
320 state_count = PPC_FLOAT_STATE_COUNT;
321 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
322 goto bad;
323 }
324
325 if ((ctx32 == 0) || dualcontext) {
326 flavor = PPC_FLOAT_STATE;
327 tstate = (void *)&mctx64.fs;
328 state_count = PPC_FLOAT_STATE_COUNT;
329 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
330 goto bad;
331
332 }
333
334
335 if (find_user_vec_curr()) {
336 vec_used = 1;
337
338 if ((ctx32 == 1) || dualcontext) {
339 flavor = PPC_VECTOR_STATE;
340 tstate = (void *)&mctx.vs;
341 state_count = PPC_VECTOR_STATE_COUNT;
342 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
343 goto bad;
344 infostyle += 5;
345 }
346
347 if ((ctx32 == 0) || dualcontext) {
348 flavor = PPC_VECTOR_STATE;
349 tstate = (void *)&mctx64.vs;
350 state_count = PPC_VECTOR_STATE_COUNT;
351 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
352 goto bad;
353 infostyle += 5;
354 }
355 }
356
357 trampact = ps->ps_trampact[sig];
358 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
359
360 /* figure out where our new stack lives */
361 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
362 (ps->ps_sigonstack & sigmask(sig))) {
363 sp = ut->uu_sigstk.ss_sp;
364 sp += ut->uu_sigstk.ss_size;
365 stack_size = ut->uu_sigstk.ss_size;
366 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
367 }
368 else {
369 if (ctx32 == 0)
370 sp = mctx64.ss.r1;
371 else
372 sp = CAST_USER_ADDR_T(mctx.ss.r1);
373 }
374
375
376 /* put siginfo on top */
377
378 /* preserve RED ZONE area */
379 if (IS_64BIT_PROCESS(p))
380 sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN);
381 else
382 sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN);
383
384 /* next are the saved registers */
385 if ((ctx32 == 0) || dualcontext) {
386 sp -= sizeof(struct mcontext64);
387 p_mctx64 = sp;
388 }
389 if ((ctx32 == 1) || dualcontext) {
390 sp -= sizeof(struct mcontext);
391 p_mctx = sp;
392 }
393
394 if (IS_64BIT_PROCESS(p)) {
395 /* context goes first on stack */
396 sp -= sizeof(struct user_ucontext64);
397 p_uctx = sp;
398
399 /* this is where siginfo goes on stack */
400 sp -= sizeof(user_siginfo_t);
401 p_sinfo = sp;
402
403 sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN);
404 } else {
405 /*
406 * struct ucontext and struct ucontext64 are identical in
407 * size and content; the only difference is the internal
408 * pointer type for the last element, which makes no
409 * difference for the copyout().
410 */
411
412 /* context goes first on stack */
413 sp -= sizeof(struct ucontext64);
414 p_uctx = sp;
415
416 /* this is where siginfo goes on stack */
417 sp -= sizeof(siginfo_t);
418 p_sinfo = sp;
419
420 sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN);
421 }
422
423 uctx.uc_onstack = oonstack;
424 uctx.uc_sigmask = mask;
425 uctx.uc_stack.ss_sp = sp;
426 uctx.uc_stack.ss_size = stack_size;
427 if (oonstack)
428 uctx.uc_stack.ss_flags |= SS_ONSTACK;
429
430 uctx.uc_link = 0;
431 if (ctx32 == 0)
432 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
433 else
434 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
435
436 if (vec_used)
437 uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int));
438
439 if (ctx32 == 0)
440 uctx.uc_mcontext64 = p_mctx64;
441 else
442 uctx.uc_mcontext64 = p_mctx;
443
444 /* setup siginfo */
445 bzero((caddr_t)&sinfo, sizeof(user_siginfo_t));
446 sinfo.si_signo = sig;
447 if (ctx32 == 0) {
448 sinfo.si_addr = mctx64.ss.srr0;
449 sinfo.pad[0] = mctx64.ss.r1;
450 } else {
451 sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0);
452 sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1);
453 }
454
455 switch (sig) {
456 case SIGILL:
457 /*
458 * If it's 64 bit and not a dual context, mctx will
459 * contain uninitialized data, so we have to use
460 * mctx64 here.
461 */
462 if(ctx32 == 0) {
463 if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
464 sinfo.si_code = ILL_ILLOPC;
465 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
466 sinfo.si_code = ILL_PRVOPC;
467 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
468 sinfo.si_code = ILL_ILLTRP;
469 else
470 sinfo.si_code = ILL_NOOP;
471 } else {
472 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
473 sinfo.si_code = ILL_ILLOPC;
474 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
475 sinfo.si_code = ILL_PRVOPC;
476 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
477 sinfo.si_code = ILL_ILLTRP;
478 else
479 sinfo.si_code = ILL_NOOP;
480 }
481 break;
482 case SIGFPE:
483 #define FPSCR_VX 2
484 #define FPSCR_OX 3
485 #define FPSCR_UX 4
486 #define FPSCR_ZX 5
487 #define FPSCR_XX 6
488 /*
489 * If it's 64 bit and not a dual context, mctx will
490 * contain uninitialized data, so we have to use
491 * mctx64 here.
492 */
493 if(ctx32 == 0) {
494 if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX)))
495 sinfo.si_code = FPE_FLTINV;
496 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX)))
497 sinfo.si_code = FPE_FLTOVF;
498 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX)))
499 sinfo.si_code = FPE_FLTUND;
500 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX)))
501 sinfo.si_code = FPE_FLTDIV;
502 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX)))
503 sinfo.si_code = FPE_FLTRES;
504 else
505 sinfo.si_code = FPE_NOOP;
506 } else {
507 if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX)))
508 sinfo.si_code = FPE_FLTINV;
509 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX)))
510 sinfo.si_code = FPE_FLTOVF;
511 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX)))
512 sinfo.si_code = FPE_FLTUND;
513 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX)))
514 sinfo.si_code = FPE_FLTDIV;
515 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX)))
516 sinfo.si_code = FPE_FLTRES;
517 else
518 sinfo.si_code = FPE_NOOP;
519 }
520 break;
521
522 case SIGBUS:
523 if (ctx32 == 0) {
524 sinfo.si_addr = mctx64.es.dar;
525 } else {
526 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
527 }
528 /* on ppc we generate only if EXC_PPC_UNALIGNED */
529 sinfo.si_code = BUS_ADRALN;
530 break;
531
532 case SIGSEGV:
533 /*
534 * If it's 64 bit and not a dual context, mctx will
535 * contain uninitialized data, so we have to use
536 * mctx64 here.
537 */
538 if (ctx32 == 0) {
539 sinfo.si_addr = mctx64.es.dar;
540 /* First check in srr1 and then in dsisr */
541 if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
542 sinfo.si_code = SEGV_ACCERR;
543 else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
544 sinfo.si_code = SEGV_ACCERR;
545 else
546 sinfo.si_code = SEGV_MAPERR;
547 } else {
548 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
549 /* First check in srr1 and then in dsisr */
550 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
551 sinfo.si_code = SEGV_ACCERR;
552 else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
553 sinfo.si_code = SEGV_ACCERR;
554 else
555 sinfo.si_code = SEGV_MAPERR;
556 }
557 break;
558 default:
559 {
560 int status_and_exitcode;
561
562 /*
563 * All other signals need to fill out a minimum set of
564 * information for the siginfo structure passed into
565 * the signal handler, if SA_SIGINFO was specified.
566 *
567 * p->si_status actually contains both the status and
568 * the exit code; we save it off in its own variable
569 * for later breakdown.
570 */
571 proc_lock(p);
572 sinfo.si_pid = p->si_pid;
573 p->si_pid = 0;
574 status_and_exitcode = p->si_status;
575 p->si_status = 0;
576 sinfo.si_uid = p->si_uid;
577 p->si_uid = 0;
578 sinfo.si_code = p->si_code;
579 p->si_code = 0;
580 proc_unlock(p);
581 if (sinfo.si_code == CLD_EXITED) {
582 if (WIFEXITED(status_and_exitcode))
583 sinfo.si_code = CLD_EXITED;
584 else if (WIFSIGNALED(status_and_exitcode)) {
585 if (WCOREDUMP(status_and_exitcode)) {
586 sinfo.si_code = CLD_DUMPED;
587 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
588 } else {
589 sinfo.si_code = CLD_KILLED;
590 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
591 }
592 }
593 }
594 /*
595 * The recorded status contains the exit code and the
596 * signal information, but the information to be passed
597 * in the siginfo to the handler is supposed to only
598 * contain the status, so we have to shift it out.
599 */
600 sinfo.si_status = WEXITSTATUS(status_and_exitcode);
601 break;
602 }
603 }
604
605
606 /* copy info out to user space */
607 if (IS_64BIT_PROCESS(p)) {
608
609 /* XXX truncates catcher address to uintptr_t */
610 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &sinfo,
611 void (*)(void), CAST_DOWN(sig_t, catcher));
612
613 if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64)))
614 goto bad;
615 if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t)))
616 goto bad;
617 } else {
618 struct ucontext64 uctx32;
619 siginfo_t sinfo32;
620
621 ucontext_64to32(&uctx, &uctx32);
622 siginfo_64to32(&sinfo,&sinfo32);
623
624 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &sinfo32,
625 void (*)(void), CAST_DOWN(sig_t, catcher));
626
627 if (copyout(&uctx32, p_uctx, sizeof(struct ucontext64)))
628 goto bad;
629
630 if (copyout(&sinfo32, p_sinfo, sizeof(siginfo_t)))
631 goto bad;
632 }
633 if ((ctx32 == 0) || dualcontext) {
634 /*
635 * NOTE: Size of mcontext is not variant between 64bit and
636 * 32bit programs usng 64bit registers.
637 */
638 if (copyout(&mctx64, p_mctx64, (vec_used? UC_FLAVOR64_VEC_SIZE: UC_FLAVOR64_SIZE)))
639 goto bad;
640 }
641 if ((ctx32 == 1) || dualcontext) {
642 if (copyout(&mctx, p_mctx, uctx.uc_mcsize))
643 goto bad;
644 }
645
646
647 /* Place our arguments in arg registers: rtm dependent */
648 if(IS_64BIT_PROCESS(p)) {
649 mctx64.ss.r3 = catcher;
650 mctx64.ss.r4 = CAST_USER_ADDR_T(infostyle);
651 mctx64.ss.r5 = CAST_USER_ADDR_T(sig);
652 mctx64.ss.r6 = p_sinfo;
653 mctx64.ss.r7 = p_uctx;
654
655 mctx64.ss.srr0 = trampact;
656 /* MSR_EXPORT_MASK_SET */
657 mctx64.ss.srr1 = CAST_USER_ADDR_T(get_msr_exportmask());
658 mctx64.ss.r1 = sp;
659 state_count = PPC_THREAD_STATE64_COUNT;
660 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE64, (void *)&mctx64.ss, state_count)) != KERN_SUCCESS) {
661 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
662 }
663 } else {
664 mctx.ss.r3 = CAST_DOWN(unsigned long,catcher);
665 mctx.ss.r4 = (unsigned long)infostyle;
666 mctx.ss.r5 = (unsigned long)sig;
667 mctx.ss.r6 = CAST_DOWN(unsigned long,p_sinfo);
668 mctx.ss.r7 = CAST_DOWN(unsigned long,p_uctx);
669
670 mctx.ss.srr0 = CAST_DOWN(unsigned long,trampact);
671 /* MSR_EXPORT_MASK_SET */
672 mctx.ss.srr1 = get_msr_exportmask();
673 mctx.ss.r1 = CAST_DOWN(unsigned long,sp);
674 state_count = PPC_THREAD_STATE_COUNT;
675 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE, (void *)&mctx.ss, state_count)) != KERN_SUCCESS) {
676 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
677 }
678 }
679
680 proc_lock(p);
681 return;
682
683 bad:
684 proc_lock(p);
685 SIGACTION(p, SIGILL) = SIG_DFL;
686 sig = sigmask(SIGILL);
687 p->p_sigignore &= ~sig;
688 p->p_sigcatch &= ~sig;
689 ut->uu_sigmask &= ~sig;
690 /* sendsig is called with signal lock held */
691 proc_unlock(p);
692 psignal_locked(p, SIGILL);
693 proc_lock(p);
694 return;
695 }
696
697 /*
698 * System call to cleanup state after a signal
699 * has been taken. Reset signal mask and
700 * stack state from context left by sendsig (above).
701 * Return to previous pc and psl as specified by
702 * context left by sendsig. Check carefully to
703 * make sure that the user has not modified the
704 * psl to gain improper priviledges or to cause
705 * a machine fault.
706 */
707
708 /* ARGSUSED */
709 int
710 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
711 {
712 struct user_ucontext64 uctx;
713
714 char mactx[sizeof(struct mcontext64)];
715 struct mcontext *p_mctx;
716 struct mcontext64 *p_64mctx;
717 int error;
718 thread_t th_act;
719 struct sigacts *ps = p->p_sigacts;
720 sigset_t mask;
721 user_addr_t action;
722 unsigned long state_count;
723 unsigned int state_flavor;
724 struct uthread * ut;
725 int vec_used = 0;
726 void *tsptr, *fptr, *vptr;
727 int infostyle = uap->infostyle;
728
729 th_act = current_thread();
730
731 ut = (struct uthread *)get_bsdthread_info(th_act);
732
733 /*
734 * If we are being asked to change the altstack flag on the thread, we
735 * just rest it and return (the uap->uctx is not used).
736 */
737 if (infostyle == UC_SET_ALT_STACK) {
738 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
739 return (0);
740 } else if ((unsigned int)infostyle == UC_RESET_ALT_STACK) {
741 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
742 return (0);
743 }
744
745 if (IS_64BIT_PROCESS(p)) {
746 error = copyin(uap->uctx, &uctx, sizeof(struct user_ucontext64));
747 if (error)
748 return(error);
749 } else {
750 struct ucontext64 uctx32;
751
752 /*
753 * struct ucontext and struct ucontext64 are identical in
754 * size and content; the only difference is the internal
755 * pointer type for the last element, which makes no
756 * difference for the copyin().
757 */
758 error = copyin(uap->uctx, &uctx32, sizeof(struct ucontext));
759 if (error)
760 return(error);
761 ucontext_32to64(&uctx32, &uctx);
762 }
763
764
765 /* validate the machine context size */
766 switch (uctx.uc_mcsize) {
767 case UC_FLAVOR64_VEC_SIZE:
768 case UC_FLAVOR64_SIZE:
769 case UC_FLAVOR_VEC_SIZE:
770 case UC_FLAVOR_SIZE:
771 break;
772 default:
773 return(EINVAL);
774 }
775
776 /*
777 * The 64 bit process mcontext is identical to the mcontext64, so
778 * there is no conversion necessary.
779 */
780 error = copyin(uctx.uc_mcontext64, mactx, uctx.uc_mcsize);
781 if (error)
782 return(error);
783
784 if ((uctx.uc_onstack & 01))
785 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
786 else
787 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
788
789 ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask;
790 if (ut->uu_siglist & ~ut->uu_sigmask)
791 signal_setast(current_thread());
792
793 vec_used = 0;
794 switch (infostyle) {
795 case UC_FLAVOR64_VEC:
796 case UC_TRAD64_VEC:
797 vec_used = 1;
798 case UC_TRAD64:
799 case UC_FLAVOR64: {
800 p_64mctx = (struct mcontext64 *)mactx;
801 tsptr = (void *)&p_64mctx->ss;
802 fptr = (void *)&p_64mctx->fs;
803 vptr = (void *)&p_64mctx->vs;
804 state_flavor = PPC_THREAD_STATE64;
805 state_count = PPC_THREAD_STATE64_COUNT;
806 }
807 break;
808 case UC_FLAVOR_VEC :
809 case UC_TRAD_VEC :
810 vec_used = 1;
811 case UC_FLAVOR :
812 case UC_TRAD :
813 default: {
814 p_mctx = (struct mcontext *)mactx;
815 tsptr = (void *)&p_mctx->ss;
816 fptr = (void *)&p_mctx->fs;
817 vptr = (void *)&p_mctx->vs;
818 state_flavor = PPC_THREAD_STATE;
819 state_count = PPC_THREAD_STATE_COUNT;
820 }
821 break;
822 } /* switch () */
823
824 /* validate the thread state, set/reset appropriate mode bits in srr1 */
825 (void)ppc_checkthreadstate(tsptr, state_flavor);
826
827 if (thread_setstatus(th_act, state_flavor, tsptr, state_count) != KERN_SUCCESS) {
828 return(EINVAL);
829 }
830
831 state_count = PPC_FLOAT_STATE_COUNT;
832 if (thread_setstatus(th_act, PPC_FLOAT_STATE, fptr, state_count) != KERN_SUCCESS) {
833 return(EINVAL);
834 }
835
836 mask = sigmask(SIGFPE);
837 if (((ut->uu_sigmask & mask) == 0) && (p->p_sigcatch & mask) && ((p->p_sigignore & mask) == 0)) {
838 action = ps->ps_sigact[SIGFPE];
839 if((action != SIG_DFL) && (action != SIG_IGN)) {
840 thread_enable_fpe(th_act, 1);
841 }
842 }
843
844 if (vec_used) {
845 state_count = PPC_VECTOR_STATE_COUNT;
846 if (thread_setstatus(th_act, PPC_VECTOR_STATE, vptr, state_count) != KERN_SUCCESS) {
847 return(EINVAL);
848 }
849 }
850 return (EJUSTRETURN);
851 }
852
853 /*
854 * machine_exception() performs MD translation
855 * of a mach exception to a unix signal and code.
856 */
857
858 boolean_t
859 machine_exception(
860 int exception,
861 mach_exception_code_t code,
862 __unused mach_exception_subcode_t subcode,
863 int *unix_signal,
864 mach_exception_code_t *unix_code)
865 {
866 switch(exception) {
867
868 case EXC_BAD_INSTRUCTION:
869 *unix_signal = SIGILL;
870 *unix_code = code;
871 break;
872
873 case EXC_ARITHMETIC:
874 *unix_signal = SIGFPE;
875 *unix_code = code;
876 break;
877
878 case EXC_SOFTWARE:
879 if (code == EXC_PPC_TRAP) {
880 *unix_signal = SIGTRAP;
881 *unix_code = code;
882 break;
883 } else
884 return(FALSE);
885
886 default:
887 return(FALSE);
888 }
889
890 return(TRUE);
891 }
892