]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/unix_signal.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / bsd / dev / ppc / unix_signal.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 */
31
32 #include <mach/mach_types.h>
33 #include <mach/exception_types.h>
34
35 #include <sys/param.h>
36 #include <sys/proc_internal.h>
37 #include <sys/user.h>
38 #include <sys/ucontext.h>
39 #include <sys/sysproto.h>
40 #include <sys/systm.h>
41 #include <sys/ux_exception.h>
42
43 #include <ppc/signal.h>
44 #include <sys/signalvar.h>
45 #include <sys/kdebug.h>
46 #include <sys/wait.h>
47 #include <kern/thread.h>
48 #include <mach/ppc/thread_status.h>
49 #include <ppc/proc_reg.h>
50
51 #include <sys/sdt.h>
52
53 // #include <machine/thread.h> XXX include path messed up for some reason...
54
55 /* XXX functions not in a Mach headers */
56 extern kern_return_t thread_getstatus(register thread_t act, int flavor,
57 thread_state_t tstate, mach_msg_type_number_t *count);
58 extern unsigned int get_msr_exportmask(void);
59 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
60 thread_state_t tstate, mach_msg_type_number_t count);
61 extern void ppc_checkthreadstate(void *, int);
62 extern struct savearea_vec *find_user_vec_curr(void);
63 extern int thread_enable_fpe(thread_t act, int onoff);
64
65
66
67 #define C_32_REDZONE_LEN 224
68 #define C_32_STK_ALIGN 16
69 #define C_32_PARAMSAVE_LEN 64
70 #define C_32_LINKAGE_LEN 48
71
72 #define C_64_REDZONE_LEN 320
73 #define C_64_STK_ALIGN 32
74 #define C_64_PARAMSAVE_LEN 64
75 #define C_64_LINKAGE_LEN 48
76
77 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
78 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
79
80 /*
81 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
82 *
83 * Traditional: 1
84 * Traditional64: 20
85 * Traditional64with vec: 25
86 * 32bit context 30
87 * 32bit context with vector 35
88 * 64bit context 40
89 * 64bit context with vector 45
90 * Dual context 50
91 * Dual context with vector 55
92 *
93 */
94
95 #define UC_TRAD 1
96 #define UC_TRAD_VEC 6
97 #define UC_TRAD64 20
98 #define UC_TRAD64_VEC 25
99 #define UC_FLAVOR 30
100 #define UC_FLAVOR_VEC 35
101 #define UC_FLAVOR64 40
102 #define UC_FLAVOR64_VEC 45
103 #define UC_DUAL 50
104 #define UC_DUAL_VEC 55
105 #define UC_SET_ALT_STACK 0x40000000
106 #define UC_RESET_ALT_STACK 0x80000000
107
108 /* The following are valid mcontext sizes */
109 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
110
111 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
112
113 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
114
115 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
116
117
118 /*
119 * NOTE: Source and target may *NOT* overlap!
120 */
121 static void
122 ucontext_32to64(struct ucontext64 *in, struct user_ucontext64 *out)
123 {
124 out->uc_onstack = in->uc_onstack;
125 out->uc_sigmask = in->uc_sigmask;
126
127 /* internal "structure assign" */
128 out->uc_stack.ss_sp = CAST_USER_ADDR_T(in->uc_stack.ss_sp);
129 out->uc_stack.ss_size = in->uc_stack.ss_size;
130 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
131
132 out->uc_link = CAST_USER_ADDR_T(in->uc_link);
133 out->uc_mcsize = in->uc_mcsize;
134 out->uc_mcontext64 = CAST_USER_ADDR_T(in->uc_mcontext64);
135 }
136
137 /*
138 * This conversion is safe, since if we are converting for a 32 bit process,
139 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
140 *
141 * NOTE: Source and target may *NOT* overlap!
142 */
143 static void
144 ucontext_64to32(struct user_ucontext64 *in, struct ucontext64 *out)
145 {
146 out->uc_onstack = in->uc_onstack;
147 out->uc_sigmask = in->uc_sigmask;
148
149 /* internal "structure assign" */
150 out->uc_stack.ss_sp = CAST_DOWN(void *,in->uc_stack.ss_sp);
151 out->uc_stack.ss_size = in->uc_stack.ss_size; /* range reduction */
152 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
153
154 out->uc_link = CAST_DOWN(void *,in->uc_link);
155 out->uc_mcsize = in->uc_mcsize; /* range reduction */
156 out->uc_mcontext64 = CAST_DOWN(void *,in->uc_mcontext64);
157 }
158
159 /*
160 * NOTE: Source and target may *NOT* overlap!
161 */
162 static void
163 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
164 {
165 out->si_signo = in->si_signo;
166 out->si_errno = in->si_errno;
167 out->si_code = in->si_code;
168 out->si_pid = in->si_pid;
169 out->si_uid = in->si_uid;
170 out->si_status = in->si_status;
171 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr);
172 /* following cast works for sival_int because of padding */
173 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr);
174 out->si_band = in->si_band; /* range reduction */
175 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
176 }
177
178 static void
179 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
180 {
181 out->si_signo = in->si_signo;
182 out->si_errno = in->si_errno;
183 out->si_code = in->si_code;
184 out->si_pid = in->si_pid;
185 out->si_uid = in->si_uid;
186 out->si_status = in->si_status;
187 out->si_addr = in->si_addr;
188 out->si_value.sival_ptr = in->si_value.sival_ptr;
189 out->si_band = in->si_band; /* range reduction */
190 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
191 }
192
193
194 /*
195 * Arrange for this process to run a signal handler
196 */
197
198 void
199 sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused uint32_t code)
200 {
201 kern_return_t kretn;
202 struct mcontext mctx;
203 user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */
204 struct mcontext64 mctx64;
205 user_addr_t p_mctx64 = USER_ADDR_NULL; /* mcontext dest. */
206 struct user_ucontext64 uctx;
207 user_addr_t p_uctx; /* user stack addr top copy ucontext */
208 user_siginfo_t sinfo;
209 user_addr_t p_sinfo; /* user stack addr top copy siginfo */
210 struct sigacts *ps = p->p_sigacts;
211 int oonstack;
212 user_addr_t sp;
213 mach_msg_type_number_t state_count;
214 thread_t th_act;
215 struct uthread *ut;
216 int infostyle = UC_TRAD;
217 int dualcontext =0;
218 user_addr_t trampact;
219 int vec_used = 0;
220 int stack_size = 0;
221 void * tstate;
222 int flavor;
223 int ctx32 = 1;
224
225 th_act = current_thread();
226 ut = get_bsdthread_info(th_act);
227
228 /*
229 * XXX We conditionalize type passed here based on SA_SIGINFO, but
230 * XXX we always send up all the information, regardless; perhaps
231 * XXX this should not be conditionalized? Defer making this change
232 * XXX now, due to possible tools impact.
233 */
234 if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
235 /*
236 * If SA_SIGINFO is set, then we must provide the user
237 * process both a siginfo_t and a context argument. We call
238 * this "FLAVORED", as opposed to "TRADITIONAL", which doesn't
239 * expect a context. "DUAL" is a type of "FLAVORED".
240 */
241 if (is_64signalregset()) {
242 /*
243 * If this is a 64 bit CPU, we must include a 64 bit
244 * context in the data we pass to user space; we may
245 * or may not also include a 32 bit context at the
246 * same time, for non-leaf functions.
247 *
248 * The user may also explicitly choose to not receive
249 * a 32 bit context, at their option; we only allow
250 * this to happen on 64 bit processors, for obvious
251 * reasons.
252 */
253 if (IS_64BIT_PROCESS(p) ||
254 (p->p_sigacts->ps_64regset & sigmask(sig))) {
255 /*
256 * For a 64 bit process, there is no 32 bit
257 * context.
258 */
259 ctx32 = 0;
260 infostyle = UC_FLAVOR64;
261 } else {
262 /*
263 * For a 32 bit process on a 64 bit CPU, we
264 * may have 64 bit leaf functions, so we need
265 * both contexts.
266 */
267 dualcontext = 1;
268 infostyle = UC_DUAL;
269 }
270 } else {
271 /*
272 * If this is a 32 bit CPU, then we only have a 32 bit
273 * context to contend with.
274 */
275 infostyle = UC_FLAVOR;
276 }
277 } else {
278 /*
279 * If SA_SIGINFO is not set, then we have a traditional style
280 * call which does not need additional context passed. The
281 * default is 32 bit traditional.
282 *
283 * XXX The second check is redundant on PPC32; keep it anyway.
284 */
285 if (is_64signalregset() || IS_64BIT_PROCESS(p)) {
286 /*
287 * However, if this is a 64 bit CPU, we need to change
288 * this to 64 bit traditional, and drop the 32 bit
289 * context.
290 */
291 ctx32 = 0;
292 infostyle = UC_TRAD64;
293 }
294 }
295
296 proc_unlock(p);
297
298 /* I need this for SIGINFO anyway */
299 flavor = PPC_THREAD_STATE;
300 tstate = (void *)&mctx.ss;
301 state_count = PPC_THREAD_STATE_COUNT;
302 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
303 goto bad;
304
305 if ((ctx32 == 0) || dualcontext) {
306 flavor = PPC_THREAD_STATE64;
307 tstate = (void *)&mctx64.ss;
308 state_count = PPC_THREAD_STATE64_COUNT;
309 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
310 goto bad;
311 }
312
313 if ((ctx32 == 1) || dualcontext) {
314 flavor = PPC_EXCEPTION_STATE;
315 tstate = (void *)&mctx.es;
316 state_count = PPC_EXCEPTION_STATE_COUNT;
317 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
318 goto bad;
319 }
320
321 if ((ctx32 == 0) || dualcontext) {
322 flavor = PPC_EXCEPTION_STATE64;
323 tstate = (void *)&mctx64.es;
324 state_count = PPC_EXCEPTION_STATE64_COUNT;
325
326 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
327 goto bad;
328
329 }
330
331
332 if ((ctx32 == 1) || dualcontext) {
333 flavor = PPC_FLOAT_STATE;
334 tstate = (void *)&mctx.fs;
335 state_count = PPC_FLOAT_STATE_COUNT;
336 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
337 goto bad;
338 }
339
340 if ((ctx32 == 0) || dualcontext) {
341 flavor = PPC_FLOAT_STATE;
342 tstate = (void *)&mctx64.fs;
343 state_count = PPC_FLOAT_STATE_COUNT;
344 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
345 goto bad;
346
347 }
348
349
350 if (find_user_vec_curr()) {
351 vec_used = 1;
352
353 if ((ctx32 == 1) || dualcontext) {
354 flavor = PPC_VECTOR_STATE;
355 tstate = (void *)&mctx.vs;
356 state_count = PPC_VECTOR_STATE_COUNT;
357 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
358 goto bad;
359 infostyle += 5;
360 }
361
362 if ((ctx32 == 0) || dualcontext) {
363 flavor = PPC_VECTOR_STATE;
364 tstate = (void *)&mctx64.vs;
365 state_count = PPC_VECTOR_STATE_COUNT;
366 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
367 goto bad;
368 infostyle += 5;
369 }
370 }
371
372 trampact = ps->ps_trampact[sig];
373 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
374
375 /* figure out where our new stack lives */
376 if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
377 (ps->ps_sigonstack & sigmask(sig))) {
378 sp = ut->uu_sigstk.ss_sp;
379 sp += ut->uu_sigstk.ss_size;
380 stack_size = ut->uu_sigstk.ss_size;
381 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
382 }
383 else {
384 if (ctx32 == 0)
385 sp = mctx64.ss.r1;
386 else
387 sp = CAST_USER_ADDR_T(mctx.ss.r1);
388 }
389
390
391 /* put siginfo on top */
392
393 /* preserve RED ZONE area */
394 if (IS_64BIT_PROCESS(p))
395 sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN);
396 else
397 sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN);
398
399 /* next are the saved registers */
400 if ((ctx32 == 0) || dualcontext) {
401 sp -= sizeof(struct mcontext64);
402 p_mctx64 = sp;
403 }
404 if ((ctx32 == 1) || dualcontext) {
405 sp -= sizeof(struct mcontext);
406 p_mctx = sp;
407 }
408
409 if (IS_64BIT_PROCESS(p)) {
410 /* context goes first on stack */
411 sp -= sizeof(struct user_ucontext64);
412 p_uctx = sp;
413
414 /* this is where siginfo goes on stack */
415 sp -= sizeof(user64_siginfo_t);
416 p_sinfo = sp;
417
418 sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN);
419 } else {
420 /*
421 * struct ucontext and struct ucontext64 are identical in
422 * size and content; the only difference is the internal
423 * pointer type for the last element, which makes no
424 * difference for the copyout().
425 */
426
427 /* context goes first on stack */
428 sp -= sizeof(struct ucontext64);
429 p_uctx = sp;
430
431 /* this is where siginfo goes on stack */
432 sp -= sizeof(user32_siginfo_t);
433 p_sinfo = sp;
434
435 sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN);
436 }
437
438 uctx.uc_onstack = oonstack;
439 uctx.uc_sigmask = mask;
440 uctx.uc_stack.ss_sp = sp;
441 uctx.uc_stack.ss_size = stack_size;
442 if (oonstack)
443 uctx.uc_stack.ss_flags |= SS_ONSTACK;
444
445 uctx.uc_link = 0;
446 if (ctx32 == 0)
447 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
448 else
449 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
450
451 if (vec_used)
452 uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int));
453
454 if (ctx32 == 0)
455 uctx.uc_mcontext64 = p_mctx64;
456 else
457 uctx.uc_mcontext64 = p_mctx;
458
459 /* setup siginfo */
460 bzero((caddr_t)&sinfo, sizeof(sinfo));
461 sinfo.si_signo = sig;
462 if (ctx32 == 0) {
463 sinfo.si_addr = mctx64.ss.srr0;
464 sinfo.pad[0] = mctx64.ss.r1;
465 } else {
466 sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0);
467 sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1);
468 }
469
470 switch (sig) {
471 case SIGILL:
472 /*
473 * If it's 64 bit and not a dual context, mctx will
474 * contain uninitialized data, so we have to use
475 * mctx64 here.
476 */
477 if(ctx32 == 0) {
478 if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
479 sinfo.si_code = ILL_ILLOPC;
480 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
481 sinfo.si_code = ILL_PRVOPC;
482 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
483 sinfo.si_code = ILL_ILLTRP;
484 else
485 sinfo.si_code = ILL_NOOP;
486 } else {
487 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
488 sinfo.si_code = ILL_ILLOPC;
489 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
490 sinfo.si_code = ILL_PRVOPC;
491 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
492 sinfo.si_code = ILL_ILLTRP;
493 else
494 sinfo.si_code = ILL_NOOP;
495 }
496 break;
497 case SIGFPE:
498 #define FPSCR_VX 2
499 #define FPSCR_OX 3
500 #define FPSCR_UX 4
501 #define FPSCR_ZX 5
502 #define FPSCR_XX 6
503 /*
504 * If it's 64 bit and not a dual context, mctx will
505 * contain uninitialized data, so we have to use
506 * mctx64 here.
507 */
508 if(ctx32 == 0) {
509 if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX)))
510 sinfo.si_code = FPE_FLTINV;
511 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX)))
512 sinfo.si_code = FPE_FLTOVF;
513 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX)))
514 sinfo.si_code = FPE_FLTUND;
515 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX)))
516 sinfo.si_code = FPE_FLTDIV;
517 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX)))
518 sinfo.si_code = FPE_FLTRES;
519 else
520 sinfo.si_code = FPE_NOOP;
521 } else {
522 if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX)))
523 sinfo.si_code = FPE_FLTINV;
524 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX)))
525 sinfo.si_code = FPE_FLTOVF;
526 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX)))
527 sinfo.si_code = FPE_FLTUND;
528 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX)))
529 sinfo.si_code = FPE_FLTDIV;
530 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX)))
531 sinfo.si_code = FPE_FLTRES;
532 else
533 sinfo.si_code = FPE_NOOP;
534 }
535 break;
536
537 case SIGBUS:
538 if (ctx32 == 0) {
539 sinfo.si_addr = mctx64.es.dar;
540 } else {
541 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
542 }
543 /* on ppc we generate only if EXC_PPC_UNALIGNED */
544 sinfo.si_code = BUS_ADRALN;
545 break;
546
547 case SIGSEGV:
548 /*
549 * If it's 64 bit and not a dual context, mctx will
550 * contain uninitialized data, so we have to use
551 * mctx64 here.
552 */
553 if (ctx32 == 0) {
554 sinfo.si_addr = mctx64.es.dar;
555 /* First check in srr1 and then in dsisr */
556 if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
557 sinfo.si_code = SEGV_ACCERR;
558 else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
559 sinfo.si_code = SEGV_ACCERR;
560 else
561 sinfo.si_code = SEGV_MAPERR;
562 } else {
563 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
564 /* First check in srr1 and then in dsisr */
565 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
566 sinfo.si_code = SEGV_ACCERR;
567 else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
568 sinfo.si_code = SEGV_ACCERR;
569 else
570 sinfo.si_code = SEGV_MAPERR;
571 }
572 break;
573 default:
574 {
575 int status_and_exitcode;
576
577 /*
578 * All other signals need to fill out a minimum set of
579 * information for the siginfo structure passed into
580 * the signal handler, if SA_SIGINFO was specified.
581 *
582 * p->si_status actually contains both the status and
583 * the exit code; we save it off in its own variable
584 * for later breakdown.
585 */
586 proc_lock(p);
587 sinfo.si_pid = p->si_pid;
588 p->si_pid = 0;
589 status_and_exitcode = p->si_status;
590 p->si_status = 0;
591 sinfo.si_uid = p->si_uid;
592 p->si_uid = 0;
593 sinfo.si_code = p->si_code;
594 p->si_code = 0;
595 proc_unlock(p);
596 if (sinfo.si_code == CLD_EXITED) {
597 if (WIFEXITED(status_and_exitcode))
598 sinfo.si_code = CLD_EXITED;
599 else if (WIFSIGNALED(status_and_exitcode)) {
600 if (WCOREDUMP(status_and_exitcode)) {
601 sinfo.si_code = CLD_DUMPED;
602 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
603 } else {
604 sinfo.si_code = CLD_KILLED;
605 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
606 }
607 }
608 }
609 /*
610 * The recorded status contains the exit code and the
611 * signal information, but the information to be passed
612 * in the siginfo to the handler is supposed to only
613 * contain the status, so we have to shift it out.
614 */
615 sinfo.si_status = WEXITSTATUS(status_and_exitcode);
616 break;
617 }
618 }
619
620
621 /* copy info out to user space */
622 if (IS_64BIT_PROCESS(p)) {
623 user64_siginfo_t sinfo64;
624
625 siginfo_user_to_user64(&sinfo,&sinfo64);
626
627 #if CONFIG_DTRACE
628 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
629
630 ut->t_dtrace_siginfo.si_signo = sinfo.si_signo;
631 ut->t_dtrace_siginfo.si_code = sinfo.si_code;
632 ut->t_dtrace_siginfo.si_pid = sinfo.si_pid;
633 ut->t_dtrace_siginfo.si_uid = sinfo.si_uid;
634 ut->t_dtrace_siginfo.si_status = sinfo.si_status;
635 /* XXX truncates faulting address to void * on K32 */
636 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo.si_addr);
637
638
639 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
640 switch (sig) {
641 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
642 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
643 break;
644 default:
645 break;
646 }
647
648 /* XXX truncates catcher address to uintptr_t */
649 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
650 void (*)(void), CAST_DOWN(sig_t, catcher));
651 #endif /* CONFIG_DTRACE */
652
653 if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64)))
654 goto bad;
655 if (copyout(&sinfo64, p_sinfo, sizeof(sinfo64)))
656 goto bad;
657 } else {
658 struct ucontext64 uctx32;
659 user32_siginfo_t sinfo32;
660
661 ucontext_64to32(&uctx, &uctx32);
662 siginfo_user_to_user32(&sinfo,&sinfo32);
663
664 #if CONFIG_DTRACE
665 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
666
667 ut->t_dtrace_siginfo.si_signo = sinfo.si_signo;
668 ut->t_dtrace_siginfo.si_code = sinfo.si_code;
669 ut->t_dtrace_siginfo.si_pid = sinfo.si_pid;
670 ut->t_dtrace_siginfo.si_uid = sinfo.si_uid;
671 ut->t_dtrace_siginfo.si_status = sinfo.si_status;
672 ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo.si_addr);
673
674
675 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
676 switch (sig) {
677 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
678 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
679 break;
680 default:
681 break;
682 }
683
684 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
685 void (*)(void), CAST_DOWN(sig_t, catcher));
686 #endif /* CONFIG_DTRACE */
687
688 if (copyout(&uctx32, p_uctx, sizeof(struct ucontext64)))
689 goto bad;
690
691 if (copyout(&sinfo32, p_sinfo, sizeof(sinfo32)))
692 goto bad;
693 }
694 if ((ctx32 == 0) || dualcontext) {
695 /*
696 * NOTE: Size of mcontext is not variant between 64bit and
697 * 32bit programs usng 64bit registers.
698 */
699 if (copyout(&mctx64, p_mctx64, (vec_used? UC_FLAVOR64_VEC_SIZE: UC_FLAVOR64_SIZE)))
700 goto bad;
701 }
702 if ((ctx32 == 1) || dualcontext) {
703 if (copyout(&mctx, p_mctx, uctx.uc_mcsize))
704 goto bad;
705 }
706
707
708 /* Place our arguments in arg registers: rtm dependent */
709 if(IS_64BIT_PROCESS(p)) {
710 mctx64.ss.r3 = catcher;
711 mctx64.ss.r4 = CAST_USER_ADDR_T(infostyle);
712 mctx64.ss.r5 = CAST_USER_ADDR_T(sig);
713 mctx64.ss.r6 = p_sinfo;
714 mctx64.ss.r7 = p_uctx;
715
716 mctx64.ss.srr0 = trampact;
717 /* MSR_EXPORT_MASK_SET */
718 mctx64.ss.srr1 = CAST_USER_ADDR_T(get_msr_exportmask());
719 mctx64.ss.r1 = sp;
720 state_count = PPC_THREAD_STATE64_COUNT;
721 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE64, (void *)&mctx64.ss, state_count)) != KERN_SUCCESS) {
722 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
723 }
724 } else {
725 mctx.ss.r3 = CAST_DOWN(uint32_t,catcher);
726 mctx.ss.r4 = (uint32_t)infostyle;
727 mctx.ss.r5 = (uint32_t)sig;
728 mctx.ss.r6 = CAST_DOWN(uint32_t,p_sinfo);
729 mctx.ss.r7 = CAST_DOWN(uint32_t,p_uctx);
730
731 mctx.ss.srr0 = CAST_DOWN(uint32_t,trampact);
732 /* MSR_EXPORT_MASK_SET */
733 mctx.ss.srr1 = get_msr_exportmask();
734 mctx.ss.r1 = CAST_DOWN(uint32_t,sp);
735 state_count = PPC_THREAD_STATE_COUNT;
736 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE, (void *)&mctx.ss, state_count)) != KERN_SUCCESS) {
737 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
738 }
739 }
740
741 proc_lock(p);
742 return;
743
744 bad:
745 proc_lock(p);
746 SIGACTION(p, SIGILL) = SIG_DFL;
747 sig = sigmask(SIGILL);
748 p->p_sigignore &= ~sig;
749 p->p_sigcatch &= ~sig;
750 ut->uu_sigmask &= ~sig;
751 /* sendsig is called with signal lock held */
752 proc_unlock(p);
753 psignal_locked(p, SIGILL);
754 proc_lock(p);
755 return;
756 }
757
758 /*
759 * System call to cleanup state after a signal
760 * has been taken. Reset signal mask and
761 * stack state from context left by sendsig (above).
762 * Return to previous pc and psl as specified by
763 * context left by sendsig. Check carefully to
764 * make sure that the user has not modified the
765 * psl to gain improper priviledges or to cause
766 * a machine fault.
767 */
768
769 /* ARGSUSED */
770 int
771 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
772 {
773 struct user_ucontext64 uctx;
774
775 char mactx[sizeof(struct mcontext64)];
776 struct mcontext *p_mctx;
777 struct mcontext64 *p_64mctx;
778 int error;
779 thread_t th_act;
780 struct sigacts *ps = p->p_sigacts;
781 sigset_t mask;
782 user_addr_t action;
783 uint32_t state_count;
784 unsigned int state_flavor;
785 struct uthread * ut;
786 int vec_used = 0;
787 void *tsptr, *fptr, *vptr;
788 int infostyle = uap->infostyle;
789
790 th_act = current_thread();
791
792 ut = (struct uthread *)get_bsdthread_info(th_act);
793
794 /*
795 * If we are being asked to change the altstack flag on the thread, we
796 * just rest it and return (the uap->uctx is not used).
797 */
798 if (infostyle == UC_SET_ALT_STACK) {
799 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
800 return (0);
801 } else if ((unsigned int)infostyle == UC_RESET_ALT_STACK) {
802 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
803 return (0);
804 }
805
806 if (IS_64BIT_PROCESS(p)) {
807 error = copyin(uap->uctx, &uctx, sizeof(struct user_ucontext64));
808 if (error)
809 return(error);
810 } else {
811 struct ucontext64 uctx32;
812
813 /*
814 * struct ucontext and struct ucontext64 are identical in
815 * size and content; the only difference is the internal
816 * pointer type for the last element, which makes no
817 * difference for the copyin().
818 */
819 error = copyin(uap->uctx, &uctx32, sizeof(struct ucontext));
820 if (error)
821 return(error);
822 ucontext_32to64(&uctx32, &uctx);
823 }
824
825
826 /* validate the machine context size */
827 switch (uctx.uc_mcsize) {
828 case UC_FLAVOR64_VEC_SIZE:
829 case UC_FLAVOR64_SIZE:
830 case UC_FLAVOR_VEC_SIZE:
831 case UC_FLAVOR_SIZE:
832 break;
833 default:
834 return(EINVAL);
835 }
836
837 /*
838 * The 64 bit process mcontext is identical to the mcontext64, so
839 * there is no conversion necessary.
840 */
841 error = copyin(uctx.uc_mcontext64, mactx, uctx.uc_mcsize);
842 if (error)
843 return(error);
844
845 if ((uctx.uc_onstack & 01))
846 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
847 else
848 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
849
850 ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask;
851 if (ut->uu_siglist & ~ut->uu_sigmask)
852 signal_setast(current_thread());
853
854 vec_used = 0;
855 switch (infostyle) {
856 case UC_FLAVOR64_VEC:
857 case UC_TRAD64_VEC:
858 vec_used = 1;
859 case UC_TRAD64:
860 case UC_FLAVOR64: {
861 p_64mctx = (struct mcontext64 *)mactx;
862 tsptr = (void *)&p_64mctx->ss;
863 fptr = (void *)&p_64mctx->fs;
864 vptr = (void *)&p_64mctx->vs;
865 state_flavor = PPC_THREAD_STATE64;
866 state_count = PPC_THREAD_STATE64_COUNT;
867 }
868 break;
869 case UC_FLAVOR_VEC :
870 case UC_TRAD_VEC :
871 vec_used = 1;
872 case UC_FLAVOR :
873 case UC_TRAD :
874 default: {
875 p_mctx = (struct mcontext *)mactx;
876 tsptr = (void *)&p_mctx->ss;
877 fptr = (void *)&p_mctx->fs;
878 vptr = (void *)&p_mctx->vs;
879 state_flavor = PPC_THREAD_STATE;
880 state_count = PPC_THREAD_STATE_COUNT;
881 }
882 break;
883 } /* switch () */
884
885 /* validate the thread state, set/reset appropriate mode bits in srr1 */
886 (void)ppc_checkthreadstate(tsptr, state_flavor);
887
888 if (thread_setstatus(th_act, state_flavor, tsptr, state_count) != KERN_SUCCESS) {
889 return(EINVAL);
890 }
891
892 state_count = PPC_FLOAT_STATE_COUNT;
893 if (thread_setstatus(th_act, PPC_FLOAT_STATE, fptr, state_count) != KERN_SUCCESS) {
894 return(EINVAL);
895 }
896
897 mask = sigmask(SIGFPE);
898 if (((ut->uu_sigmask & mask) == 0) && (p->p_sigcatch & mask) && ((p->p_sigignore & mask) == 0)) {
899 action = ps->ps_sigact[SIGFPE];
900 if((action != SIG_DFL) && (action != SIG_IGN)) {
901 thread_enable_fpe(th_act, 1);
902 }
903 }
904
905 if (vec_used) {
906 state_count = PPC_VECTOR_STATE_COUNT;
907 if (thread_setstatus(th_act, PPC_VECTOR_STATE, vptr, state_count) != KERN_SUCCESS) {
908 return(EINVAL);
909 }
910 }
911 return (EJUSTRETURN);
912 }
913
914 /*
915 * machine_exception() performs MD translation
916 * of a mach exception to a unix signal and code.
917 */
918
919 boolean_t
920 machine_exception(
921 int exception,
922 mach_exception_code_t code,
923 __unused mach_exception_subcode_t subcode,
924 int *unix_signal,
925 mach_exception_code_t *unix_code)
926 {
927 switch(exception) {
928
929 case EXC_BAD_INSTRUCTION:
930 *unix_signal = SIGILL;
931 *unix_code = code;
932 break;
933
934 case EXC_ARITHMETIC:
935 *unix_signal = SIGFPE;
936 *unix_code = code;
937 break;
938
939 case EXC_SOFTWARE:
940 if (code == EXC_PPC_TRAP) {
941 *unix_signal = SIGTRAP;
942 *unix_code = code;
943 break;
944 } else
945 return(FALSE);
946
947 default:
948 return(FALSE);
949 }
950
951 return(TRUE);
952 }
953