]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/arm/unix_signal.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / bsd / dev / arm / unix_signal.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 */
4
5 #include <mach/mach_types.h>
6 #include <mach/exception_types.h>
7
8 #include <sys/param.h>
9 #include <sys/proc_internal.h>
10 #include <sys/user.h>
11 #include <sys/signal.h>
12 #include <sys/ucontext.h>
13 #include <sys/sysproto.h>
14 #include <sys/systm.h>
15 #include <sys/ux_exception.h>
16
17 #include <arm/signal.h>
18 #include <sys/signalvar.h>
19 #include <sys/kdebug.h>
20 #include <sys/sdt.h>
21 #include <sys/wait.h>
22 #include <kern/thread.h>
23 #include <mach/arm/thread_status.h>
24 #include <arm/proc_reg.h>
25
26 #include <kern/assert.h>
27 #include <pexpert/pexpert.h>
28
29 extern struct arm_saved_state *get_user_regs(thread_t);
30 extern user_addr_t thread_get_cthread_self(void);
31 extern kern_return_t thread_getstatus(thread_t act, int flavor,
32 thread_state_t tstate, mach_msg_type_number_t *count);
33 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
34 thread_state_t tstate, mach_msg_type_number_t count);
35 /* XXX Put these someplace smarter... */
36 typedef struct mcontext32 mcontext32_t;
37 typedef struct mcontext64 mcontext64_t;
38
39 /* Signal handler flavors supported */
40 /* These defns should match the Libc implmn */
41 #define UC_TRAD 1
42 #define UC_FLAVOR 30
43
44 /* The following are valid mcontext sizes */
45 #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int))
46 #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int))
47
48 #if __arm64__
49 #define C_64_REDZONE_LEN 128
50 #endif
51
52 static int
53 sendsig_get_state32(thread_t th_act, mcontext32_t *mcp)
54 {
55 void *tstate;
56 mach_msg_type_number_t state_count;
57
58 assert(!proc_is64bit(current_proc()));
59
60 tstate = (void *) &mcp->ss;
61 state_count = ARM_THREAD_STATE_COUNT;
62 if (thread_getstatus(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS)
63 return EINVAL;
64
65 tstate = (void *) &mcp->es;
66 state_count = ARM_EXCEPTION_STATE_COUNT;
67 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS)
68 return EINVAL;
69
70 tstate = (void *) &mcp->fs;
71 state_count = ARM_VFP_STATE_COUNT;
72 if (thread_getstatus(th_act, ARM_VFP_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS)
73 return EINVAL;
74
75 return 0;
76 }
77
78 #if defined(__arm64__)
79 struct user_sigframe64 {
80 /* We can pass the last arg in a register for ARM64 */
81 user64_siginfo_t sinfo;
82 struct user_ucontext64 uctx;
83 mcontext64_t mctx;
84 };
85
86 static int
87 sendsig_get_state64(thread_t th_act, mcontext64_t *mcp)
88 {
89 void *tstate;
90 mach_msg_type_number_t state_count;
91
92 assert(proc_is64bit(current_proc()));
93
94 tstate = (void *) &mcp->ss;
95 state_count = ARM_THREAD_STATE64_COUNT;
96 if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS)
97 return EINVAL;
98
99 tstate = (void *) &mcp->es;
100 state_count = ARM_EXCEPTION_STATE64_COUNT;
101 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS)
102 return EINVAL;
103
104 tstate = (void *) &mcp->ns;
105 state_count = ARM_NEON_STATE64_COUNT;
106 if (thread_getstatus(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS)
107 return EINVAL;
108
109 return 0;
110 }
111
112 static void
113 sendsig_fill_uctx64(user_ucontext64_t *uctx, int oonstack, int mask, user64_addr_t sp, user64_size_t stack_size, user64_addr_t p_mctx)
114 {
115 bzero(uctx, sizeof(*uctx));
116 uctx->uc_onstack = oonstack;
117 uctx->uc_sigmask = mask;
118 uctx->uc_stack.ss_sp = sp;
119 uctx->uc_stack.ss_size = stack_size;
120 if (oonstack)
121 uctx->uc_stack.ss_flags |= SS_ONSTACK;
122 uctx->uc_link = (user64_addr_t)0;
123 uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64;
124 uctx->uc_mcontext64 = (user64_addr_t) p_mctx;
125 }
126
127 static kern_return_t
128 sendsig_set_thread_state64(arm_thread_state64_t *regs,
129 user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo,
130 user64_addr_t p_uctx, user64_addr_t trampact, user64_addr_t sp, thread_t th_act)
131 {
132 assert(proc_is64bit(current_proc()));
133
134 regs->x[0] = catcher;
135 regs->x[1] = infostyle;
136 regs->x[2] = sig;
137 regs->x[3] = p_sinfo;
138 regs->x[4] = p_uctx;
139 regs->pc = trampact;
140 regs->cpsr = PSR64_USER64_DEFAULT;
141 regs->sp = sp;
142
143 return thread_setstatus(th_act, ARM_THREAD_STATE64, (void *)regs, ARM_THREAD_STATE64_COUNT);
144 }
145 #endif /* defined(__arm64__) */
146
147 static void
148 sendsig_fill_uctx32(user_ucontext32_t *uctx, int oonstack, int mask, user_addr_t sp, user_size_t stack_size, user_addr_t p_mctx)
149 {
150 bzero(uctx, sizeof(*uctx));
151 uctx->uc_onstack = oonstack;
152 uctx->uc_sigmask = mask;
153 uctx->uc_stack.ss_sp = (user32_addr_t) sp;
154 uctx->uc_stack.ss_size = (user32_size_t) stack_size;
155 if (oonstack)
156 uctx->uc_stack.ss_flags |= SS_ONSTACK;
157 uctx->uc_link = (user32_addr_t)0;
158 uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32;
159 uctx->uc_mcontext = (user32_addr_t) p_mctx;
160 }
161
162 static kern_return_t
163 sendsig_set_thread_state32(arm_thread_state_t *regs,
164 user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo,
165 user32_addr_t trampact, user32_addr_t sp, thread_t th_act)
166 {
167
168 assert(!proc_is64bit(current_proc()));
169
170 regs->r[0] = catcher;
171 regs->r[1] = infostyle;
172 regs->r[2] = sig;
173 regs->r[3] = p_sinfo;
174 if (trampact & 1) {
175 regs->pc = trampact & ~1;
176 #if defined(__arm64__)
177 regs->cpsr = PSR64_USER32_DEFAULT | PSR64_MODE_USER32_THUMB;
178 #elif defined(__arm__)
179 regs->cpsr = PSR_USERDFLT | PSR_TF;
180 #else
181 #error Unknown architeture.
182 #endif
183 } else {
184 regs->pc = trampact;
185 regs->cpsr = PSR_USERDFLT;
186 }
187 regs->sp = sp;
188
189 return thread_setstatus(th_act, ARM_THREAD_STATE, (void *)regs, ARM_THREAD_STATE_COUNT);
190 }
191
192 #if CONFIG_DTRACE
193 static void
194 sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catcher)
195 {
196 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
197
198 ut->t_dtrace_siginfo.si_signo = sinfo->si_signo;
199 ut->t_dtrace_siginfo.si_code = sinfo->si_code;
200 ut->t_dtrace_siginfo.si_pid = sinfo->si_pid;
201 ut->t_dtrace_siginfo.si_uid = sinfo->si_uid;
202 ut->t_dtrace_siginfo.si_status = sinfo->si_status;
203 /* XXX truncates faulting address to void * */
204 ut->t_dtrace_siginfo.si_addr = CAST_DOWN_EXPLICIT(void *, sinfo->si_addr);
205
206 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
207 switch (sig) {
208 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
209 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
210 break;
211 default:
212 break;
213 }
214
215 /* XXX truncates faulting address to uintptr_t */
216 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
217 void (*)(void), CAST_DOWN(sig_t, catcher));
218 }
219 #endif
220
221 struct user_sigframe32 {
222 user32_addr_t puctx;
223 user32_siginfo_t sinfo;
224 struct user_ucontext32 uctx;
225 mcontext32_t mctx;
226 };
227
228 /*
229 * Send an interrupt to process.
230 *
231 */
232 void
233 sendsig(
234 struct proc * p,
235 user_addr_t catcher,
236 int sig,
237 int mask,
238 __unused uint32_t code
239 )
240 {
241 union {
242 struct user_sigframe32 uf32;
243 #if defined(__arm64__)
244 struct user_sigframe64 uf64;
245 #endif
246 } user_frame;
247
248 user_siginfo_t sinfo;
249 user_addr_t sp = 0, trampact;
250 struct sigacts *ps = p->p_sigacts;
251 int oonstack, infostyle;
252 thread_t th_act;
253 struct uthread *ut;
254 user_size_t stack_size = 0;
255
256 th_act = current_thread();
257 ut = get_bsdthread_info(th_act);
258
259 bzero(&user_frame, sizeof(user_frame));
260
261 if (p->p_sigacts->ps_siginfo & sigmask(sig))
262 infostyle = UC_FLAVOR;
263 else
264 infostyle = UC_TRAD;
265
266 trampact = ps->ps_trampact[sig];
267 oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;
268
269 /*
270 * Get sundry thread state.
271 */
272 if (proc_is64bit(p)) {
273 #ifdef __arm64__
274 if (sendsig_get_state64(th_act, &user_frame.uf64.mctx) != 0) {
275 goto bad2;
276 }
277 #else
278 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
279 #endif
280 } else {
281 if (sendsig_get_state32(th_act, &user_frame.uf32.mctx) != 0) {
282 goto bad2;
283 }
284 }
285
286 /*
287 * Figure out where our new stack lives.
288 */
289 if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack &&
290 (ps->ps_sigonstack & sigmask(sig))) {
291 sp = ps->ps_sigstk.ss_sp;
292 sp += ps->ps_sigstk.ss_size;
293 stack_size = ps->ps_sigstk.ss_size;
294 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
295 } else {
296 /*
297 * Get stack pointer, and allocate enough space
298 * for signal handler data.
299 */
300 if (proc_is64bit(p)) {
301 #if defined(__arm64__)
302 sp = CAST_USER_ADDR_T(user_frame.uf64.mctx.ss.sp);
303 sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN) & ~0xf; /* Make sure to align to 16 bytes and respect red zone */
304 #else
305 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
306 #endif
307 } else {
308 sp = CAST_USER_ADDR_T(user_frame.uf32.mctx.ss.sp);
309 sp -= sizeof(user_frame.uf32);
310 #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4)
311 sp &= ~0xf; /* Make sure to align to 16 bytes for armv7k */
312 #endif
313 }
314 }
315
316 proc_unlock(p);
317
318 /*
319 * Fill in ucontext (points to mcontext, i.e. thread states).
320 */
321 if (proc_is64bit(p)) {
322 #if defined(__arm64__)
323 sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size,
324 (user64_addr_t)&((struct user_sigframe64*)sp)->mctx);
325 #else
326 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
327 #endif
328 } else {
329 sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size,
330 (user32_addr_t)&((struct user_sigframe32*)sp)->mctx);
331 }
332
333 /*
334 * Setup siginfo.
335 */
336 bzero((caddr_t) & sinfo, sizeof(sinfo));
337 sinfo.si_signo = sig;
338
339 if (proc_is64bit(p)) {
340 #if defined(__arm64__)
341 sinfo.si_addr = user_frame.uf64.mctx.ss.pc;
342 sinfo.pad[0] = user_frame.uf64.mctx.ss.sp;
343 #else
344 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
345 #endif
346 } else {
347 sinfo.si_addr = user_frame.uf32.mctx.ss.pc;
348 sinfo.pad[0] = user_frame.uf32.mctx.ss.sp;
349 }
350
351 switch (sig) {
352 case SIGILL:
353 #ifdef BER_XXX
354 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
355 sinfo.si_code = ILL_ILLOPC;
356 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
357 sinfo.si_code = ILL_PRVOPC;
358 else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
359 sinfo.si_code = ILL_ILLTRP;
360 else
361 sinfo.si_code = ILL_NOOP;
362 #else
363 sinfo.si_code = ILL_ILLTRP;
364 #endif
365 break;
366
367 case SIGFPE:
368 break;
369
370 case SIGBUS:
371 if (proc_is64bit(p)) {
372 #if defined(__arm64__)
373 sinfo.si_addr = user_frame.uf64.mctx.es.far;
374 #else
375 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
376 #endif
377 } else {
378 sinfo.si_addr = user_frame.uf32.mctx.es.far;
379 }
380
381 sinfo.si_code = BUS_ADRALN;
382 break;
383
384 case SIGSEGV:
385 if (proc_is64bit(p)) {
386 #if defined(__arm64__)
387 sinfo.si_addr = user_frame.uf64.mctx.es.far;
388 #else
389 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
390 #endif
391 } else {
392 sinfo.si_addr = user_frame.uf32.mctx.es.far;
393 }
394
395 #ifdef BER_XXX
396 /* First check in srr1 and then in dsisr */
397 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
398 sinfo.si_code = SEGV_ACCERR;
399 else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
400 sinfo.si_code = SEGV_ACCERR;
401 else
402 sinfo.si_code = SEGV_MAPERR;
403 #else
404 sinfo.si_code = SEGV_ACCERR;
405 #endif
406 break;
407
408 default:
409 {
410 int status_and_exitcode;
411
412 /*
413 * All other signals need to fill out a minimum set of
414 * information for the siginfo structure passed into
415 * the signal handler, if SA_SIGINFO was specified.
416 *
417 * p->si_status actually contains both the status and
418 * the exit code; we save it off in its own variable
419 * for later breakdown.
420 */
421 proc_lock(p);
422 sinfo.si_pid = p->si_pid;
423 p->si_pid = 0;
424 status_and_exitcode = p->si_status;
425 p->si_status = 0;
426 sinfo.si_uid = p->si_uid;
427 p->si_uid = 0;
428 sinfo.si_code = p->si_code;
429 p->si_code = 0;
430 proc_unlock(p);
431 if (sinfo.si_code == CLD_EXITED) {
432 if (WIFEXITED(status_and_exitcode))
433 sinfo.si_code = CLD_EXITED;
434 else if (WIFSIGNALED(status_and_exitcode)) {
435 if (WCOREDUMP(status_and_exitcode)) {
436 sinfo.si_code = CLD_DUMPED;
437 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
438 } else {
439 sinfo.si_code = CLD_KILLED;
440 status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
441 }
442 }
443 }
444 /*
445 * The recorded status contains the exit code and the
446 * signal information, but the information to be passed
447 * in the siginfo to the handler is supposed to only
448 * contain the status, so we have to shift it out.
449 */
450 sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
451 p->p_xhighbits = 0;
452 break;
453 }
454 }
455
456 #if CONFIG_DTRACE
457 sendsig_do_dtrace(ut, &sinfo, sig, catcher);
458 #endif /* CONFIG_DTRACE */
459
460 /*
461 * Copy signal-handling frame out to user space, set thread state.
462 */
463 if (proc_is64bit(p)) {
464 #if defined(__arm64__)
465 /*
466 * mctx filled in when we get state. uctx filled in by
467 * sendsig_fill_uctx64(). We fill in the sinfo now.
468 */
469 siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo);
470
471 if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) {
472 goto bad;
473 }
474
475 if (sendsig_set_thread_state64(&user_frame.uf64.mctx.ss,
476 catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo,
477 (user64_addr_t)&((struct user_sigframe64*)sp)->uctx, trampact, sp, th_act) != KERN_SUCCESS)
478 goto bad;
479
480 #else
481 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
482 #endif
483 } else {
484 /*
485 * mctx filled in when we get state. uctx filled in by
486 * sendsig_fill_uctx32(). We fill in the sinfo and *pointer*
487 * to uctx now.
488 */
489 siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo);
490 user_frame.uf32.puctx = (user32_addr_t) &((struct user_sigframe32*)sp)->uctx;
491
492 if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) {
493 goto bad;
494 }
495
496 if (sendsig_set_thread_state32(&user_frame.uf32.mctx.ss,
497 CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo,
498 CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS)
499 goto bad;
500 }
501
502 proc_lock(p);
503 return;
504
505 bad:
506 proc_lock(p);
507 bad2:
508 SIGACTION(p, SIGILL) = SIG_DFL;
509 sig = sigmask(SIGILL);
510 p->p_sigignore &= ~sig;
511 p->p_sigcatch &= ~sig;
512 ut->uu_sigmask &= ~sig;
513 /* sendsig is called with signal lock held */
514 proc_unlock(p);
515 psignal_locked(p, SIGILL);
516 proc_lock(p);
517 }
518
519 /*
520 * System call to cleanup state after a signal
521 * has been taken. Reset signal mask and
522 * stack state from context left by sendsig (above).
523 * Return to previous * context left by sendsig.
524 * Check carefully to * make sure that the user has not
525 * modified the * spr to gain improper priviledges.
526 */
527
528 static int
529 sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_addr_t uctx_addr)
530 {
531 int error;
532
533 assert(!proc_is64bit(current_proc()));
534
535 error = copyin(uctx_addr, uctx, sizeof(*uctx));
536 if (error) {
537 return (error);
538 }
539
540 /* validate the machine context size */
541 switch (uctx->uc_mcsize) {
542 case UC_FLAVOR_SIZE32:
543 break;
544 default:
545 return (EINVAL);
546 }
547
548 assert(uctx->uc_mcsize == sizeof(*mctx));
549 error = copyin((user_addr_t)uctx->uc_mcontext, mctx, uctx->uc_mcsize);
550 if (error) {
551 return (error);
552 }
553
554 return 0;
555 }
556
557 static int
558 sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx)
559 {
560 assert(!proc_is64bit(current_proc()));
561
562 /* validate the thread state, set/reset appropriate mode bits in cpsr */
563 #if defined(__arm__)
564 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR_MODE_MASK) | PSR_USERDFLT;
565 #elif defined(__arm64__)
566 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER32_DEFAULT;
567 #else
568 #error Unknown architecture.
569 #endif
570
571 if (thread_setstatus(th_act, ARM_THREAD_STATE, (void *)&mctx->ss, ARM_THREAD_STATE_COUNT) != KERN_SUCCESS) {
572 return (EINVAL);
573 }
574 if (thread_setstatus(th_act, ARM_VFP_STATE, (void *)&mctx->fs, ARM_VFP_STATE_COUNT) != KERN_SUCCESS) {
575 return (EINVAL);
576 }
577
578 return 0;
579 }
580
581 #if defined(__arm64__)
582 static int
583 sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_addr_t uctx_addr)
584 {
585 int error;
586
587 assert(proc_is64bit(current_proc()));
588
589 error = copyin(uctx_addr, uctx, sizeof(*uctx));
590 if (error) {
591 return (error);
592 }
593
594 /* validate the machine context size */
595 switch (uctx->uc_mcsize) {
596 case UC_FLAVOR_SIZE64:
597 break;
598 default:
599 return (EINVAL);
600 }
601
602 assert(uctx->uc_mcsize == sizeof(*mctx));
603 error = copyin((user_addr_t)uctx->uc_mcontext64, mctx, uctx->uc_mcsize);
604 if (error) {
605 return (error);
606 }
607
608 return 0;
609 }
610
611 static int
612 sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx)
613 {
614 assert(proc_is64bit(current_proc()));
615
616 /* validate the thread state, set/reset appropriate mode bits in cpsr */
617 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER64_DEFAULT;
618
619 if (thread_setstatus(th_act, ARM_THREAD_STATE64, (void *)&mctx->ss, ARM_THREAD_STATE64_COUNT) != KERN_SUCCESS) {
620 return (EINVAL);
621 }
622 if (thread_setstatus(th_act, ARM_NEON_STATE64, (void *)&mctx->ns, ARM_NEON_STATE64_COUNT) != KERN_SUCCESS) {
623 return (EINVAL);
624 }
625
626 return 0;
627 }
628 #endif /* defined(__arm64__) */
629
630 /* ARGSUSED */
631 int
632 sigreturn(
633 struct proc * p,
634 struct sigreturn_args * uap,
635 __unused int *retval)
636 {
637 union {
638 user_ucontext32_t uc32;
639 #if defined(__arm64__)
640 user_ucontext64_t uc64;
641 #endif
642 } uctx;
643
644 union {
645 mcontext32_t mc32;
646 #if defined(__arm64__)
647 mcontext64_t mc64;
648 #endif
649 } mctx;
650
651 int error, sigmask = 0, onstack = 0;
652 thread_t th_act;
653 struct uthread *ut;
654
655 th_act = current_thread();
656 ut = (struct uthread *) get_bsdthread_info(th_act);
657
658 if (proc_is64bit(p)) {
659 #if defined(__arm64__)
660 error = sigreturn_copyin_ctx64(&uctx.uc64, &mctx.mc64, uap->uctx);
661 if (error != 0) {
662 return error;
663 }
664
665 onstack = uctx.uc64.uc_onstack;
666 sigmask = uctx.uc64.uc_sigmask;
667 #else
668 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
669 #endif
670 } else {
671 error = sigreturn_copyin_ctx32(&uctx.uc32, &mctx.mc32, uap->uctx);
672 if (error != 0) {
673 return error;
674 }
675
676 onstack = uctx.uc32.uc_onstack;
677 sigmask = uctx.uc32.uc_sigmask;
678 }
679
680 if ((onstack & 01))
681 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
682 else
683 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
684
685 ut->uu_sigmask = sigmask & ~sigcantmask;
686 if (ut->uu_siglist & ~ut->uu_sigmask)
687 signal_setast(current_thread());
688
689 if (proc_is64bit(p)) {
690 #if defined(__arm64__)
691 error = sigreturn_set_state64(th_act, &mctx.mc64);
692 if (error != 0) {
693 return error;
694 }
695 #else
696 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
697 #endif
698 } else {
699 error = sigreturn_set_state32(th_act, &mctx.mc32);
700 if (error != 0) {
701 return error;
702 }
703 }
704
705 return (EJUSTRETURN);
706 }
707
708 /*
709 * machine_exception() performs MD translation
710 * of a mach exception to a unix signal and code.
711 */
712
713 boolean_t
714 machine_exception(
715 int exception,
716 mach_exception_subcode_t code,
717 __unused mach_exception_subcode_t subcode,
718 int *unix_signal,
719 mach_exception_subcode_t * unix_code
720 )
721 {
722 switch (exception) {
723 case EXC_BAD_INSTRUCTION:
724 *unix_signal = SIGILL;
725 *unix_code = code;
726 break;
727
728 case EXC_ARITHMETIC:
729 *unix_signal = SIGFPE;
730 *unix_code = code;
731 break;
732
733 default:
734 return (FALSE);
735 }
736 return (TRUE);
737 }