]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/unix_signal.c
2c2244493f8d90e28f1775d22738801e51bbf339
[apple/xnu.git] / bsd / dev / ppc / unix_signal.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 */
31
32 #include <mach/mach_types.h>
33 #include <mach/exception_types.h>
34
35 #include <sys/param.h>
36 #include <sys/proc_internal.h>
37 #include <sys/user.h>
38 #include <sys/ucontext.h>
39 #include <sys/sysproto.h>
40 #include <sys/systm.h>
41 #include <sys/ux_exception.h>
42
43 #include <ppc/signal.h>
44 #include <sys/signalvar.h>
45 #include <sys/kdebug.h>
46 #include <sys/wait.h>
47 #include <kern/thread.h>
48 #include <mach/ppc/thread_status.h>
49 #include <ppc/proc_reg.h>
50
51 // #include <machine/thread.h> XXX include path messed up for some reason...
52
53 /* XXX functions not in a Mach headers */
54 extern kern_return_t thread_getstatus(register thread_t act, int flavor,
55 thread_state_t tstate, mach_msg_type_number_t *count);
56 extern int is_64signalregset(void);
57 extern unsigned int get_msr_exportmask(void);
58 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
59 thread_state_t tstate, mach_msg_type_number_t count);
60 extern void ppc_checkthreadstate(void *, int);
61 extern struct savearea_vec *find_user_vec_curr(void);
62 extern int thread_enable_fpe(thread_t act, int onoff);
63
64
65
66 #define C_32_REDZONE_LEN 224
67 #define C_32_STK_ALIGN 16
68 #define C_32_PARAMSAVE_LEN 64
69 #define C_32_LINKAGE_LEN 48
70
71 #define C_64_REDZONE_LEN 320
72 #define C_64_STK_ALIGN 32
73 #define C_64_PARAMSAVE_LEN 64
74 #define C_64_LINKAGE_LEN 48
75
76 #define TRUNC_DOWN32(a,b,c) ((((uint32_t)a)-(b)) & ((uint32_t)(-(c))))
77 #define TRUNC_DOWN64(a,b,c) ((((uint64_t)a)-(b)) & ((uint64_t)(-(c))))
78
79 /*
80 * The stack layout possibilities (info style); This needs to mach with signal trampoline code
81 *
82 * Traditional: 1
83 * Traditional64: 20
84 * Traditional64with vec: 25
85 * 32bit context 30
86 * 32bit context with vector 35
87 * 64bit context 40
88 * 64bit context with vector 45
89 * Dual context 50
90 * Dual context with vector 55
91 *
92 */
93
94 #define UC_TRAD 1
95 #define UC_TRAD_VEC 6
96 #define UC_TRAD64 20
97 #define UC_TRAD64_VEC 25
98 #define UC_FLAVOR 30
99 #define UC_FLAVOR_VEC 35
100 #define UC_FLAVOR64 40
101 #define UC_FLAVOR64_VEC 45
102 #define UC_DUAL 50
103 #define UC_DUAL_VEC 55
104
105 /* The following are valid mcontext sizes */
106 #define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
107
108 #define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
109
110 #define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
111
112 #define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
113
114
115 /*
116 * NOTE: Source and target may *NOT* overlap!
117 */
118 static void
119 ucontext_32to64(struct ucontext64 *in, struct user_ucontext64 *out)
120 {
121 out->uc_onstack = in->uc_onstack;
122 out->uc_sigmask = in->uc_sigmask;
123
124 /* internal "structure assign" */
125 out->uc_stack.ss_sp = CAST_USER_ADDR_T(in->uc_stack.ss_sp);
126 out->uc_stack.ss_size = in->uc_stack.ss_size;
127 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
128
129 out->uc_link = CAST_USER_ADDR_T(in->uc_link);
130 out->uc_mcsize = in->uc_mcsize;
131 out->uc_mcontext64 = CAST_USER_ADDR_T(in->uc_mcontext64);
132 }
133
134 /*
135 * This conversion is safe, since if we are converting for a 32 bit process,
136 * then it's values of uc-stack.ss_size and uc_mcsize will never exceed 4G.
137 *
138 * NOTE: Source and target may *NOT* overlap!
139 */
140 static void
141 ucontext_64to32(struct user_ucontext64 *in, struct ucontext64 *out)
142 {
143 out->uc_onstack = in->uc_onstack;
144 out->uc_sigmask = in->uc_sigmask;
145
146 /* internal "structure assign" */
147 out->uc_stack.ss_sp = CAST_DOWN(void *,in->uc_stack.ss_sp);
148 out->uc_stack.ss_size = in->uc_stack.ss_size; /* range reduction */
149 out->uc_stack.ss_flags = in->uc_stack.ss_flags;
150
151 out->uc_link = CAST_DOWN(void *,in->uc_link);
152 out->uc_mcsize = in->uc_mcsize; /* range reduction */
153 out->uc_mcontext64 = CAST_DOWN(void *,in->uc_mcontext64);
154 }
155
156 /*
157 * NOTE: Source and target may *NOT* overlap!
158 */
159 static void
160 siginfo_64to32(user_siginfo_t *in, siginfo_t *out)
161 {
162 out->si_signo = in->si_signo;
163 out->si_errno = in->si_errno;
164 out->si_code = in->si_code;
165 out->si_pid = in->si_pid;
166 out->si_uid = in->si_uid;
167 out->si_status = in->si_status;
168 out->si_addr = CAST_DOWN(void *,in->si_addr);
169 /* following cast works for sival_int because of padding */
170 out->si_value.sival_ptr = CAST_DOWN(void *,in->si_value.sival_ptr);
171 out->si_band = in->si_band; /* range reduction */
172 out->pad[0] = in->pad[0]; /* mcontext.ss.r1 */
173 }
174
175
176 /*
177 * Arrange for this process to run a signal handler
178 */
179
180 void
181 sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code)
182 {
183 kern_return_t kretn;
184 struct mcontext mctx;
185 user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */
186 struct mcontext64 mctx64;
187 user_addr_t p_mctx64 = USER_ADDR_NULL; /* mcontext dest. */
188 struct user_ucontext64 uctx;
189 user_addr_t p_uctx; /* user stack addr top copy ucontext */
190 user_siginfo_t sinfo;
191 user_addr_t p_sinfo; /* user stack addr top copy siginfo */
192 struct sigacts *ps = p->p_sigacts;
193 int oonstack;
194 user_addr_t sp;
195 mach_msg_type_number_t state_count;
196 thread_t th_act;
197 struct uthread *ut;
198 int infostyle = UC_TRAD;
199 int dualcontext =0;
200 user_addr_t trampact;
201 int vec_used = 0;
202 int stack_size = 0;
203 void * tstate;
204 int flavor;
205 int ctx32 = 1;
206
207 th_act = current_thread();
208 ut = get_bsdthread_info(th_act);
209
210
211 if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
212 infostyle = UC_FLAVOR;
213 }
214 if(is_64signalregset() && (infostyle == UC_FLAVOR)) {
215 dualcontext = 1;
216 infostyle = UC_DUAL;
217 }
218 if (p->p_sigacts->ps_64regset & sigmask(sig)) {
219 dualcontext = 0;
220 ctx32 = 0;
221 infostyle = UC_FLAVOR64;
222 }
223 /* treat 64 bit processes as having used 64 bit registers */
224 if ((IS_64BIT_PROCESS(p) || is_64signalregset()) &&
225 (infostyle == UC_TRAD)) {
226 ctx32=0;
227 infostyle = UC_TRAD64;
228 }
229 if (IS_64BIT_PROCESS(p)) {
230 ctx32=0;
231 dualcontext = 0;
232 }
233
234 /* I need this for SIGINFO anyway */
235 flavor = PPC_THREAD_STATE;
236 tstate = (void *)&mctx.ss;
237 state_count = PPC_THREAD_STATE_COUNT;
238 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
239 goto bad;
240
241 if ((ctx32 == 0) || dualcontext) {
242 flavor = PPC_THREAD_STATE64;
243 tstate = (void *)&mctx64.ss;
244 state_count = PPC_THREAD_STATE64_COUNT;
245 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
246 goto bad;
247 }
248
249 if ((ctx32 == 1) || dualcontext) {
250 flavor = PPC_EXCEPTION_STATE;
251 tstate = (void *)&mctx.es;
252 state_count = PPC_EXCEPTION_STATE_COUNT;
253 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
254 goto bad;
255 }
256
257 if ((ctx32 == 0) || dualcontext) {
258 flavor = PPC_EXCEPTION_STATE64;
259 tstate = (void *)&mctx64.es;
260 state_count = PPC_EXCEPTION_STATE64_COUNT;
261
262 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
263 goto bad;
264
265 }
266
267
268 if ((ctx32 == 1) || dualcontext) {
269 flavor = PPC_FLOAT_STATE;
270 tstate = (void *)&mctx.fs;
271 state_count = PPC_FLOAT_STATE_COUNT;
272 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
273 goto bad;
274 }
275
276 if ((ctx32 == 0) || dualcontext) {
277 flavor = PPC_FLOAT_STATE;
278 tstate = (void *)&mctx64.fs;
279 state_count = PPC_FLOAT_STATE_COUNT;
280 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
281 goto bad;
282
283 }
284
285
286 if (find_user_vec_curr()) {
287 vec_used = 1;
288
289 if ((ctx32 == 1) || dualcontext) {
290 flavor = PPC_VECTOR_STATE;
291 tstate = (void *)&mctx.vs;
292 state_count = PPC_VECTOR_STATE_COUNT;
293 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
294 goto bad;
295 infostyle += 5;
296 }
297
298 if ((ctx32 == 0) || dualcontext) {
299 flavor = PPC_VECTOR_STATE;
300 tstate = (void *)&mctx64.vs;
301 state_count = PPC_VECTOR_STATE_COUNT;
302 if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS)
303 goto bad;
304 infostyle += 5;
305 }
306 }
307
308 trampact = ps->ps_trampact[sig];
309 oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;
310
311 /* figure out where our new stack lives */
312 if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack &&
313 (ps->ps_sigonstack & sigmask(sig))) {
314 sp = ps->ps_sigstk.ss_sp;
315 sp += ps->ps_sigstk.ss_size;
316 stack_size = ps->ps_sigstk.ss_size;
317 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
318 }
319 else {
320 if (ctx32 == 0)
321 sp = mctx64.ss.r1;
322 else
323 sp = CAST_USER_ADDR_T(mctx.ss.r1);
324 }
325
326
327 /* put siginfo on top */
328
329 /* preserve RED ZONE area */
330 if (IS_64BIT_PROCESS(p))
331 sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN);
332 else
333 sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN);
334
335 /* next are the saved registers */
336 if ((ctx32 == 0) || dualcontext) {
337 sp -= sizeof(struct mcontext64);
338 p_mctx64 = sp;
339 }
340 if ((ctx32 == 1) || dualcontext) {
341 sp -= sizeof(struct mcontext);
342 p_mctx = sp;
343 }
344
345 if (IS_64BIT_PROCESS(p)) {
346 /* context goes first on stack */
347 sp -= sizeof(struct user_ucontext64);
348 p_uctx = sp;
349
350 /* this is where siginfo goes on stack */
351 sp -= sizeof(user_siginfo_t);
352 p_sinfo = sp;
353
354 sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN);
355 } else {
356 /*
357 * struct ucontext and struct ucontext64 are identical in
358 * size and content; the only difference is the internal
359 * pointer type for the last element, which makes no
360 * difference for the copyout().
361 */
362
363 /* context goes first on stack */
364 sp -= sizeof(struct ucontext64);
365 p_uctx = sp;
366
367 /* this is where siginfo goes on stack */
368 sp -= sizeof(siginfo_t);
369 p_sinfo = sp;
370
371 sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN);
372 }
373
374 uctx.uc_onstack = oonstack;
375 uctx.uc_sigmask = mask;
376 uctx.uc_stack.ss_sp = sp;
377 uctx.uc_stack.ss_size = stack_size;
378 if (oonstack)
379 uctx.uc_stack.ss_flags |= SS_ONSTACK;
380
381 uctx.uc_link = 0;
382 if (ctx32 == 0)
383 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
384 else
385 uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
386
387 if (vec_used)
388 uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int));
389
390 if (ctx32 == 0)
391 uctx.uc_mcontext64 = p_mctx64;
392 else
393 uctx.uc_mcontext64 = p_mctx;
394
395 /* setup siginfo */
396 bzero((caddr_t)&sinfo, sizeof(user_siginfo_t));
397 sinfo.si_signo = sig;
398 if (ctx32 == 0) {
399 sinfo.si_addr = mctx64.ss.srr0;
400 sinfo.pad[0] = mctx64.ss.r1;
401 } else {
402 sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0);
403 sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1);
404 }
405
406 switch (sig) {
407 case SIGCHLD:
408 sinfo.si_pid = p->si_pid;
409 p->si_pid =0;
410 sinfo.si_status = p->si_status;
411 p->si_status = 0;
412 sinfo.si_uid = p->si_uid;
413 p->si_uid =0;
414 sinfo.si_code = p->si_code;
415 p->si_code = 0;
416 if (sinfo.si_code == CLD_EXITED) {
417 if (WIFEXITED(sinfo.si_status))
418 sinfo.si_code = CLD_EXITED;
419 else if (WIFSIGNALED(sinfo.si_status)) {
420 if (WCOREDUMP(sinfo.si_status))
421 sinfo.si_code = CLD_DUMPED;
422 else
423 sinfo.si_code = CLD_KILLED;
424 }
425 }
426 break;
427 case SIGILL:
428 /*
429 * If it's 64 bit and not a dual context, mctx will
430 * contain uninitialized data, so we have to use
431 * mctx64 here.
432 */
433 if(ctx32 == 0) {
434 if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
435 sinfo.si_code = ILL_ILLOPC;
436 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
437 sinfo.si_code = ILL_PRVOPC;
438 else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
439 sinfo.si_code = ILL_ILLTRP;
440 else
441 sinfo.si_code = ILL_NOOP;
442 } else {
443 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
444 sinfo.si_code = ILL_ILLOPC;
445 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
446 sinfo.si_code = ILL_PRVOPC;
447 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
448 sinfo.si_code = ILL_ILLTRP;
449 else
450 sinfo.si_code = ILL_NOOP;
451 }
452 break;
453 case SIGFPE:
454 #define FPSCR_VX 2
455 #define FPSCR_OX 3
456 #define FPSCR_UX 4
457 #define FPSCR_ZX 5
458 #define FPSCR_XX 6
459 /*
460 * If it's 64 bit and not a dual context, mctx will
461 * contain uninitialized data, so we have to use
462 * mctx64 here.
463 */
464 if(ctx32 == 0) {
465 if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX)))
466 sinfo.si_code = FPE_FLTINV;
467 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX)))
468 sinfo.si_code = FPE_FLTOVF;
469 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX)))
470 sinfo.si_code = FPE_FLTUND;
471 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX)))
472 sinfo.si_code = FPE_FLTDIV;
473 else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX)))
474 sinfo.si_code = FPE_FLTRES;
475 else
476 sinfo.si_code = FPE_NOOP;
477 } else {
478 if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX)))
479 sinfo.si_code = FPE_FLTINV;
480 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX)))
481 sinfo.si_code = FPE_FLTOVF;
482 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX)))
483 sinfo.si_code = FPE_FLTUND;
484 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX)))
485 sinfo.si_code = FPE_FLTDIV;
486 else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX)))
487 sinfo.si_code = FPE_FLTRES;
488 else
489 sinfo.si_code = FPE_NOOP;
490 }
491 break;
492
493 case SIGBUS:
494 if (ctx32 == 0) {
495 sinfo.si_addr = mctx64.es.dar;
496 } else {
497 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
498 }
499 /* on ppc we generate only if EXC_PPC_UNALIGNED */
500 sinfo.si_code = BUS_ADRALN;
501 break;
502
503 case SIGSEGV:
504 /*
505 * If it's 64 bit and not a dual context, mctx will
506 * contain uninitialized data, so we have to use
507 * mctx64 here.
508 */
509 if (ctx32 == 0) {
510 sinfo.si_addr = mctx64.es.dar;
511 /* First check in srr1 and then in dsisr */
512 if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
513 sinfo.si_code = SEGV_ACCERR;
514 else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
515 sinfo.si_code = SEGV_ACCERR;
516 else
517 sinfo.si_code = SEGV_MAPERR;
518 } else {
519 sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
520 /* First check in srr1 and then in dsisr */
521 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
522 sinfo.si_code = SEGV_ACCERR;
523 else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
524 sinfo.si_code = SEGV_ACCERR;
525 else
526 sinfo.si_code = SEGV_MAPERR;
527 }
528 break;
529 default:
530 break;
531 }
532
533
534 /* copy info out to user space */
535 if (IS_64BIT_PROCESS(p)) {
536 if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64)))
537 goto bad;
538 if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t)))
539 goto bad;
540 } else {
541 struct ucontext64 uctx32;
542 siginfo_t sinfo32;
543
544 ucontext_64to32(&uctx, &uctx32);
545 if (copyout(&uctx32, p_uctx, sizeof(struct ucontext64)))
546 goto bad;
547
548 siginfo_64to32(&sinfo,&sinfo32);
549 if (copyout(&sinfo32, p_sinfo, sizeof(siginfo_t)))
550 goto bad;
551 }
552 if ((ctx32 == 0) || dualcontext) {
553 /*
554 * NOTE: Size of mcontext is not variant between 64bit and
555 * 32bit programs usng 64bit registers.
556 */
557 if (copyout(&mctx64, p_mctx64, (vec_used? UC_FLAVOR64_VEC_SIZE: UC_FLAVOR64_SIZE)))
558 goto bad;
559 }
560 if ((ctx32 == 1) || dualcontext) {
561 if (copyout(&mctx, p_mctx, uctx.uc_mcsize))
562 goto bad;
563 }
564
565
566 /* Place our arguments in arg registers: rtm dependent */
567 if(IS_64BIT_PROCESS(p)) {
568 mctx64.ss.r3 = catcher;
569 mctx64.ss.r4 = CAST_USER_ADDR_T(infostyle);
570 mctx64.ss.r5 = CAST_USER_ADDR_T(sig);
571 mctx64.ss.r6 = p_sinfo;
572 mctx64.ss.r7 = p_uctx;
573
574 mctx64.ss.srr0 = trampact;
575 /* MSR_EXPORT_MASK_SET */
576 mctx64.ss.srr1 = CAST_USER_ADDR_T(get_msr_exportmask());
577 mctx64.ss.r1 = sp;
578 state_count = PPC_THREAD_STATE64_COUNT;
579 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE64, (void *)&mctx64.ss, state_count)) != KERN_SUCCESS) {
580 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
581 }
582 } else {
583 mctx.ss.r3 = CAST_DOWN(unsigned long,catcher);
584 mctx.ss.r4 = (unsigned long)infostyle;
585 mctx.ss.r5 = (unsigned long)sig;
586 mctx.ss.r6 = CAST_DOWN(unsigned long,p_sinfo);
587 mctx.ss.r7 = CAST_DOWN(unsigned long,p_uctx);
588
589 mctx.ss.srr0 = CAST_DOWN(unsigned long,trampact);
590 /* MSR_EXPORT_MASK_SET */
591 mctx.ss.srr1 = get_msr_exportmask();
592 mctx.ss.r1 = CAST_DOWN(unsigned long,sp);
593 state_count = PPC_THREAD_STATE_COUNT;
594 if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE, (void *)&mctx.ss, state_count)) != KERN_SUCCESS) {
595 panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
596 }
597 }
598 return;
599
600 bad:
601 SIGACTION(p, SIGILL) = SIG_DFL;
602 sig = sigmask(SIGILL);
603 p->p_sigignore &= ~sig;
604 p->p_sigcatch &= ~sig;
605 ut->uu_sigmask &= ~sig;
606 /* sendsig is called with signal lock held */
607 psignal_lock(p, SIGILL, 0);
608 return;
609 }
610
611 /*
612 * System call to cleanup state after a signal
613 * has been taken. Reset signal mask and
614 * stack state from context left by sendsig (above).
615 * Return to previous pc and psl as specified by
616 * context left by sendsig. Check carefully to
617 * make sure that the user has not modified the
618 * psl to gain improper priviledges or to cause
619 * a machine fault.
620 */
621
622 /* ARGSUSED */
623 int
624 sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
625 {
626 struct user_ucontext64 uctx;
627
628 char mactx[sizeof(struct mcontext64)];
629 struct mcontext *p_mctx;
630 struct mcontext64 *p_64mctx;
631 int error;
632 thread_t th_act;
633 struct sigacts *ps = p->p_sigacts;
634 sigset_t mask;
635 user_addr_t action;
636 unsigned long state_count;
637 unsigned int state_flavor;
638 struct uthread * ut;
639 int vec_used = 0;
640 void *tsptr, *fptr, *vptr;
641 int infostyle = uap->infostyle;
642
643 th_act = current_thread();
644
645 ut = (struct uthread *)get_bsdthread_info(th_act);
646 if (IS_64BIT_PROCESS(p)) {
647 error = copyin(uap->uctx, &uctx, sizeof(struct user_ucontext64));
648 if (error)
649 return(error);
650 } else {
651 struct ucontext64 uctx32;
652
653 /*
654 * struct ucontext and struct ucontext64 are identical in
655 * size and content; the only difference is the internal
656 * pointer type for the last element, which makes no
657 * difference for the copyin().
658 */
659 error = copyin(uap->uctx, &uctx32, sizeof(struct ucontext));
660 if (error)
661 return(error);
662 ucontext_32to64(&uctx32, &uctx);
663 }
664
665
666 /* validate the machine context size */
667 switch (uctx.uc_mcsize) {
668 case UC_FLAVOR64_VEC_SIZE:
669 case UC_FLAVOR64_SIZE:
670 case UC_FLAVOR_VEC_SIZE:
671 case UC_FLAVOR_SIZE:
672 break;
673 default:
674 return(EINVAL);
675 }
676
677 /*
678 * The 64 bit process mcontext is identical to the mcontext64, so
679 * there is no conversion necessary.
680 */
681 error = copyin(uctx.uc_mcontext64, mactx, uctx.uc_mcsize);
682 if (error)
683 return(error);
684
685 if ((uctx.uc_onstack & 01))
686 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
687 else
688 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
689
690 ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask;
691 if (ut->uu_siglist & ~ut->uu_sigmask)
692 signal_setast(current_thread());
693
694 vec_used = 0;
695 switch (infostyle) {
696 case UC_FLAVOR64_VEC:
697 case UC_TRAD64_VEC:
698 vec_used = 1;
699 case UC_TRAD64:
700 case UC_FLAVOR64: {
701 p_64mctx = (struct mcontext64 *)mactx;
702 tsptr = (void *)&p_64mctx->ss;
703 fptr = (void *)&p_64mctx->fs;
704 vptr = (void *)&p_64mctx->vs;
705 state_flavor = PPC_THREAD_STATE64;
706 state_count = PPC_THREAD_STATE64_COUNT;
707 }
708 break;
709 case UC_FLAVOR_VEC :
710 case UC_TRAD_VEC :
711 vec_used = 1;
712 case UC_FLAVOR :
713 case UC_TRAD :
714 default: {
715 p_mctx = (struct mcontext *)mactx;
716 tsptr = (void *)&p_mctx->ss;
717 fptr = (void *)&p_mctx->fs;
718 vptr = (void *)&p_mctx->vs;
719 state_flavor = PPC_THREAD_STATE;
720 state_count = PPC_THREAD_STATE_COUNT;
721 }
722 break;
723 } /* switch () */
724
725 /* validate the thread state, set/reset appropriate mode bits in srr1 */
726 (void)ppc_checkthreadstate(tsptr, state_flavor);
727
728 if (thread_setstatus(th_act, state_flavor, tsptr, state_count) != KERN_SUCCESS) {
729 return(EINVAL);
730 }
731
732 state_count = PPC_FLOAT_STATE_COUNT;
733 if (thread_setstatus(th_act, PPC_FLOAT_STATE, fptr, state_count) != KERN_SUCCESS) {
734 return(EINVAL);
735 }
736
737 mask = sigmask(SIGFPE);
738 if (((ut->uu_sigmask & mask) == 0) && (p->p_sigcatch & mask) && ((p->p_sigignore & mask) == 0)) {
739 action = ps->ps_sigact[SIGFPE];
740 if((action != SIG_DFL) && (action != SIG_IGN)) {
741 thread_enable_fpe(th_act, 1);
742 }
743 }
744
745 if (vec_used) {
746 state_count = PPC_VECTOR_STATE_COUNT;
747 if (thread_setstatus(th_act, PPC_VECTOR_STATE, vptr, state_count) != KERN_SUCCESS) {
748 return(EINVAL);
749 }
750 }
751 return (EJUSTRETURN);
752 }
753
754 /*
755 * machine_exception() performs MD translation
756 * of a mach exception to a unix signal and code.
757 */
758
759 boolean_t
760 machine_exception(
761 int exception,
762 int code,
763 __unused int subcode,
764 int *unix_signal,
765 int *unix_code
766 )
767 {
768 switch(exception) {
769
770 case EXC_BAD_INSTRUCTION:
771 *unix_signal = SIGILL;
772 *unix_code = code;
773 break;
774
775 case EXC_ARITHMETIC:
776 *unix_signal = SIGFPE;
777 *unix_code = code;
778 break;
779
780 case EXC_SOFTWARE:
781 if (code == EXC_PPC_TRAP) {
782 *unix_signal = SIGTRAP;
783 *unix_code = code;
784 break;
785 } else
786 return(FALSE);
787
788 default:
789 return(FALSE);
790 }
791
792 return(TRUE);
793 }
794