]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/arm/unix_signal.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / dev / arm / unix_signal.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 */
4
5 #include <mach/mach_types.h>
6 #include <mach/exception_types.h>
7
8 #include <sys/param.h>
9 #include <sys/proc_internal.h>
10 #include <sys/user.h>
11 #include <sys/signal.h>
12 #include <sys/ucontext.h>
13 #include <sys/sysproto.h>
14 #include <sys/systm.h>
15 #include <sys/ux_exception.h>
16
17 #include <arm/signal.h>
18 #include <sys/signalvar.h>
19 #include <sys/kdebug.h>
20 #include <sys/sdt.h>
21 #include <sys/wait.h>
22 #include <kern/thread.h>
23 #include <mach/arm/thread_status.h>
24 #include <arm/proc_reg.h>
25
26 #include <kern/assert.h>
27 #include <kern/ast.h>
28 #include <pexpert/pexpert.h>
29
30 extern struct arm_saved_state *get_user_regs(thread_t);
31 extern user_addr_t thread_get_cthread_self(void);
32 extern kern_return_t thread_getstatus(thread_t act, int flavor,
33 thread_state_t tstate, mach_msg_type_number_t *count);
34 extern kern_return_t thread_getstatus_to_user(thread_t act, int flavor,
35 thread_state_t tstate, mach_msg_type_number_t *count);
36 extern kern_return_t machine_thread_state_convert_to_user(thread_t act, int flavor,
37 thread_state_t tstate, mach_msg_type_number_t *count);
38 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
39 thread_state_t tstate, mach_msg_type_number_t count);
40 extern kern_return_t thread_setstatus_from_user(thread_t thread, int flavor,
41 thread_state_t tstate, mach_msg_type_number_t count);
42 /* XXX Put these someplace smarter... */
43 typedef struct mcontext32 mcontext32_t;
44 typedef struct mcontext64 mcontext64_t;
45
46 /* Signal handler flavors supported */
47 /* These defns should match the Libc implmn */
48 #define UC_TRAD 1
49 #define UC_FLAVOR 30
50
51 /* The following are valid mcontext sizes */
52 #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int))
53 #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int))
54
55 #if __arm64__
56 #define C_64_REDZONE_LEN 128
57 #endif
58
59 static int
60 sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp)
61 {
62 void *tstate;
63 mach_msg_type_number_t state_count;
64
65 assert(!proc_is64bit_data(current_proc()));
66
67 tstate = (void *) ts;
68 state_count = ARM_THREAD_STATE_COUNT;
69 if (thread_getstatus(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
70 return EINVAL;
71 }
72
73 mcp->ss = *ts;
74 tstate = (void *) &mcp->ss;
75 state_count = ARM_THREAD_STATE_COUNT;
76 if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
77 return EINVAL;
78 }
79
80 tstate = (void *) &mcp->es;
81 state_count = ARM_EXCEPTION_STATE_COUNT;
82 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
83 return EINVAL;
84 }
85
86 tstate = (void *) &mcp->fs;
87 state_count = ARM_VFP_STATE_COUNT;
88 if (thread_getstatus_to_user(th_act, ARM_VFP_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
89 return EINVAL;
90 }
91
92 return 0;
93 }
94
95 #if defined(__arm64__)
96 struct user_sigframe64 {
97 /* We can pass the last two args in registers for ARM64 */
98 user64_siginfo_t sinfo;
99 struct user_ucontext64 uctx;
100 mcontext64_t mctx;
101 };
102
103 static int
104 sendsig_get_state64(thread_t th_act, arm_thread_state64_t *ts, mcontext64_t *mcp)
105 {
106 void *tstate;
107 mach_msg_type_number_t state_count;
108
109 assert(proc_is64bit_data(current_proc()));
110
111 tstate = (void *) ts;
112 state_count = ARM_THREAD_STATE64_COUNT;
113 if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
114 return EINVAL;
115 }
116
117 mcp->ss = *ts;
118 tstate = (void *) &mcp->ss;
119 state_count = ARM_THREAD_STATE64_COUNT;
120 if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
121 return EINVAL;
122 }
123
124 tstate = (void *) &mcp->es;
125 state_count = ARM_EXCEPTION_STATE64_COUNT;
126 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
127 return EINVAL;
128 }
129
130 tstate = (void *) &mcp->ns;
131 state_count = ARM_NEON_STATE64_COUNT;
132 if (thread_getstatus_to_user(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
133 return EINVAL;
134 }
135
136 return 0;
137 }
138
139 static void
140 sendsig_fill_uctx64(user_ucontext64_t *uctx, int oonstack, int mask, user64_addr_t sp, user64_size_t stack_size, user64_addr_t p_mctx)
141 {
142 bzero(uctx, sizeof(*uctx));
143 uctx->uc_onstack = oonstack;
144 uctx->uc_sigmask = mask;
145 uctx->uc_stack.ss_sp = sp;
146 uctx->uc_stack.ss_size = stack_size;
147 if (oonstack) {
148 uctx->uc_stack.ss_flags |= SS_ONSTACK;
149 }
150 uctx->uc_link = (user64_addr_t)0;
151 uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64;
152 uctx->uc_mcontext64 = (user64_addr_t) p_mctx;
153 }
154
155 static kern_return_t
156 sendsig_set_thread_state64(arm_thread_state64_t *regs,
157 user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo,
158 user64_addr_t p_uctx, user64_addr_t token, user64_addr_t trampact, user64_addr_t sp, thread_t th_act)
159 {
160 assert(proc_is64bit_data(current_proc()));
161
162 regs->x[0] = catcher;
163 regs->x[1] = infostyle;
164 regs->x[2] = sig;
165 regs->x[3] = p_sinfo;
166 regs->x[4] = p_uctx;
167 regs->x[5] = token;
168 regs->pc = trampact;
169 regs->cpsr = PSR64_USER64_DEFAULT;
170 regs->sp = sp;
171
172 return thread_setstatus(th_act, ARM_THREAD_STATE64, (void *)regs, ARM_THREAD_STATE64_COUNT);
173 }
174 #endif /* defined(__arm64__) */
175
176 static void
177 sendsig_fill_uctx32(user_ucontext32_t *uctx, int oonstack, int mask, user_addr_t sp, user_size_t stack_size, user_addr_t p_mctx)
178 {
179 bzero(uctx, sizeof(*uctx));
180 uctx->uc_onstack = oonstack;
181 uctx->uc_sigmask = mask;
182 uctx->uc_stack.ss_sp = (user32_addr_t) sp;
183 uctx->uc_stack.ss_size = (user32_size_t) stack_size;
184 if (oonstack) {
185 uctx->uc_stack.ss_flags |= SS_ONSTACK;
186 }
187 uctx->uc_link = (user32_addr_t)0;
188 uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32;
189 uctx->uc_mcontext = (user32_addr_t) p_mctx;
190 }
191
192 static kern_return_t
193 sendsig_set_thread_state32(arm_thread_state_t *regs,
194 user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo,
195 user32_addr_t trampact, user32_addr_t sp, thread_t th_act)
196 {
197 assert(!proc_is64bit_data(current_proc()));
198
199 regs->r[0] = catcher;
200 regs->r[1] = infostyle;
201 regs->r[2] = sig;
202 regs->r[3] = p_sinfo;
203 if (trampact & 1) {
204 regs->pc = trampact & ~1;
205 #if defined(__arm64__)
206 regs->cpsr = PSR64_USER32_DEFAULT | PSR64_MODE_USER32_THUMB;
207 #elif defined(__arm__)
208 regs->cpsr = PSR_USERDFLT | PSR_TF;
209 #else
210 #error Unknown architeture.
211 #endif
212 } else {
213 regs->pc = trampact;
214 regs->cpsr = PSR_USERDFLT;
215 }
216 regs->sp = sp;
217
218 return thread_setstatus(th_act, ARM_THREAD_STATE, (void *)regs, ARM_THREAD_STATE_COUNT);
219 }
220
221 #if CONFIG_DTRACE
222 static void
223 sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catcher)
224 {
225 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
226
227 ut->t_dtrace_siginfo.si_signo = sinfo->si_signo;
228 ut->t_dtrace_siginfo.si_code = sinfo->si_code;
229 ut->t_dtrace_siginfo.si_pid = sinfo->si_pid;
230 ut->t_dtrace_siginfo.si_uid = sinfo->si_uid;
231 ut->t_dtrace_siginfo.si_status = sinfo->si_status;
232 /* XXX truncates faulting address to void * */
233 ut->t_dtrace_siginfo.si_addr = CAST_DOWN_EXPLICIT(void *, sinfo->si_addr);
234
235 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
236 switch (sig) {
237 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
238 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
239 break;
240 default:
241 break;
242 }
243
244 /* XXX truncates faulting address to uintptr_t */
245 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
246 void (*)(void), CAST_DOWN(sig_t, catcher));
247 }
248 #endif
249
250 struct user_sigframe32 {
251 user32_addr_t puctx;
252 user32_addr_t token;
253 user32_siginfo_t sinfo;
254 struct user_ucontext32 uctx;
255 mcontext32_t mctx;
256 };
257
258 /*
259 * Send an interrupt to process.
260 *
261 */
262 void
263 sendsig(
264 struct proc * p,
265 user_addr_t catcher,
266 int sig,
267 int mask,
268 __unused uint32_t code,
269 sigset_t siginfo
270 )
271 {
272 union {
273 struct ts32 {
274 arm_thread_state_t ss;
275 } ts32;
276 #if defined(__arm64__)
277 struct ts64 {
278 arm_thread_state64_t ss;
279 } ts64;
280 #endif
281 } ts;
282 union {
283 struct user_sigframe32 uf32;
284 #if defined(__arm64__)
285 struct user_sigframe64 uf64;
286 #endif
287 } user_frame;
288
289 user_siginfo_t sinfo;
290 user_addr_t sp = 0, trampact;
291 struct sigacts *ps = p->p_sigacts;
292 int oonstack, infostyle;
293 thread_t th_act;
294 struct uthread *ut;
295 user_size_t stack_size = 0;
296 user_addr_t p_uctx, token_uctx;
297 kern_return_t kr;
298
299 th_act = current_thread();
300 ut = get_bsdthread_info(th_act);
301
302 bzero(&ts, sizeof(ts));
303 bzero(&user_frame, sizeof(user_frame));
304
305 if (siginfo & sigmask(sig)) {
306 infostyle = UC_FLAVOR;
307 } else {
308 infostyle = UC_TRAD;
309 }
310
311 trampact = ps->ps_trampact[sig];
312 oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;
313
314 /*
315 * Get sundry thread state.
316 */
317 if (proc_is64bit_data(p)) {
318 #ifdef __arm64__
319 if (sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx) != 0) {
320 goto bad2;
321 }
322 #else
323 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
324 #endif
325 } else {
326 if (sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx) != 0) {
327 goto bad2;
328 }
329 }
330
331 /*
332 * Figure out where our new stack lives.
333 */
334 if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack &&
335 (ps->ps_sigonstack & sigmask(sig))) {
336 sp = ps->ps_sigstk.ss_sp;
337 sp += ps->ps_sigstk.ss_size;
338 stack_size = ps->ps_sigstk.ss_size;
339 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
340 } else {
341 /*
342 * Get stack pointer, and allocate enough space
343 * for signal handler data.
344 */
345 if (proc_is64bit_data(p)) {
346 #if defined(__arm64__)
347 sp = CAST_USER_ADDR_T(ts.ts64.ss.sp);
348 sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN) & ~0xf; /* Make sure to align to 16 bytes and respect red zone */
349 #else
350 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
351 #endif
352 } else {
353 sp = CAST_USER_ADDR_T(ts.ts32.ss.sp);
354 sp -= sizeof(user_frame.uf32);
355 #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4)
356 sp &= ~0xf; /* Make sure to align to 16 bytes for armv7k */
357 #endif
358 }
359 }
360
361 proc_unlock(p);
362
363 /*
364 * Fill in ucontext (points to mcontext, i.e. thread states).
365 */
366 if (proc_is64bit_data(p)) {
367 #if defined(__arm64__)
368 sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size,
369 (user64_addr_t)&((struct user_sigframe64*)sp)->mctx);
370 #else
371 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
372 #endif
373 } else {
374 sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size,
375 (user32_addr_t)&((struct user_sigframe32*)sp)->mctx);
376 }
377
378 /*
379 * Setup siginfo.
380 */
381 bzero((caddr_t) &sinfo, sizeof(sinfo));
382 sinfo.si_signo = sig;
383
384 if (proc_is64bit_data(p)) {
385 #if defined(__arm64__)
386 sinfo.si_addr = ts.ts64.ss.pc;
387 sinfo.pad[0] = ts.ts64.ss.sp;
388 #else
389 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
390 #endif
391 } else {
392 sinfo.si_addr = ts.ts32.ss.pc;
393 sinfo.pad[0] = ts.ts32.ss.sp;
394 }
395
396 switch (sig) {
397 case SIGILL:
398 #ifdef BER_XXX
399 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) {
400 sinfo.si_code = ILL_ILLOPC;
401 } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) {
402 sinfo.si_code = ILL_PRVOPC;
403 } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) {
404 sinfo.si_code = ILL_ILLTRP;
405 } else {
406 sinfo.si_code = ILL_NOOP;
407 }
408 #else
409 sinfo.si_code = ILL_ILLTRP;
410 #endif
411 break;
412
413 case SIGFPE:
414 switch (ut->uu_code) {
415 case EXC_ARM_FP_UF:
416 sinfo.si_code = FPE_FLTUND;
417 break;
418 case EXC_ARM_FP_OF:
419 sinfo.si_code = FPE_FLTOVF;
420 break;
421 case EXC_ARM_FP_IO:
422 sinfo.si_code = FPE_FLTINV;
423 break;
424 case EXC_ARM_FP_DZ:
425 sinfo.si_code = FPE_FLTDIV;
426 break;
427 case EXC_ARM_FP_ID:
428 sinfo.si_code = FPE_FLTINV;
429 break;
430 case EXC_ARM_FP_IX:
431 sinfo.si_code = FPE_FLTRES;
432 break;
433 default:
434 sinfo.si_code = FPE_NOOP;
435 break;
436 }
437
438 break;
439
440 case SIGBUS:
441 if (proc_is64bit_data(p)) {
442 #if defined(__arm64__)
443 sinfo.si_addr = user_frame.uf64.mctx.es.far;
444 #else
445 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
446 #endif
447 } else {
448 sinfo.si_addr = user_frame.uf32.mctx.es.far;
449 }
450
451 sinfo.si_code = BUS_ADRALN;
452 break;
453
454 case SIGSEGV:
455 if (proc_is64bit_data(p)) {
456 #if defined(__arm64__)
457 sinfo.si_addr = user_frame.uf64.mctx.es.far;
458 #else
459 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
460 #endif
461 } else {
462 sinfo.si_addr = user_frame.uf32.mctx.es.far;
463 }
464
465 #ifdef BER_XXX
466 /* First check in srr1 and then in dsisr */
467 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) {
468 sinfo.si_code = SEGV_ACCERR;
469 } else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) {
470 sinfo.si_code = SEGV_ACCERR;
471 } else {
472 sinfo.si_code = SEGV_MAPERR;
473 }
474 #else
475 sinfo.si_code = SEGV_ACCERR;
476 #endif
477 break;
478
479 default:
480 {
481 int status_and_exitcode;
482
483 /*
484 * All other signals need to fill out a minimum set of
485 * information for the siginfo structure passed into
486 * the signal handler, if SA_SIGINFO was specified.
487 *
488 * p->si_status actually contains both the status and
489 * the exit code; we save it off in its own variable
490 * for later breakdown.
491 */
492 proc_lock(p);
493 sinfo.si_pid = p->si_pid;
494 p->si_pid = 0;
495 status_and_exitcode = p->si_status;
496 p->si_status = 0;
497 sinfo.si_uid = p->si_uid;
498 p->si_uid = 0;
499 sinfo.si_code = p->si_code;
500 p->si_code = 0;
501 proc_unlock(p);
502 if (sinfo.si_code == CLD_EXITED) {
503 if (WIFEXITED(status_and_exitcode)) {
504 sinfo.si_code = CLD_EXITED;
505 } else if (WIFSIGNALED(status_and_exitcode)) {
506 if (WCOREDUMP(status_and_exitcode)) {
507 sinfo.si_code = CLD_DUMPED;
508 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
509 } else {
510 sinfo.si_code = CLD_KILLED;
511 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
512 }
513 }
514 }
515 /*
516 * The recorded status contains the exit code and the
517 * signal information, but the information to be passed
518 * in the siginfo to the handler is supposed to only
519 * contain the status, so we have to shift it out.
520 */
521 sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
522 p->p_xhighbits = 0;
523 break;
524 }
525 }
526
527 #if CONFIG_DTRACE
528 sendsig_do_dtrace(ut, &sinfo, sig, catcher);
529 #endif /* CONFIG_DTRACE */
530
531 /*
532 * Copy signal-handling frame out to user space, set thread state.
533 */
534 if (proc_is64bit_data(p)) {
535 #if defined(__arm64__)
536 user64_addr_t token;
537
538 /*
539 * mctx filled in when we get state. uctx filled in by
540 * sendsig_fill_uctx64(). We fill in the sinfo now.
541 */
542 siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo);
543
544 p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx;
545 /*
546 * Generate the validation token for sigreturn
547 */
548 token_uctx = p_uctx;
549 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
550 assert(kr == KERN_SUCCESS);
551 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
552
553 if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) {
554 goto bad;
555 }
556
557 if (sendsig_set_thread_state64(&ts.ts64.ss,
558 catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo,
559 (user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS) {
560 goto bad;
561 }
562
563 #else
564 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
565 #endif
566 } else {
567 user32_addr_t token;
568
569 /*
570 * mctx filled in when we get state. uctx filled in by
571 * sendsig_fill_uctx32(). We fill in the sinfo, *pointer*
572 * to uctx and token now.
573 */
574 siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo);
575
576 p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx;
577 /*
578 * Generate the validation token for sigreturn
579 */
580 token_uctx = (user_addr_t)p_uctx;
581 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
582 assert(kr == KERN_SUCCESS);
583 token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token;
584
585 user_frame.uf32.puctx = (user32_addr_t)p_uctx;
586 user_frame.uf32.token = token;
587
588 if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) {
589 goto bad;
590 }
591
592 if (sendsig_set_thread_state32(&ts.ts32.ss,
593 CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo,
594 CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) {
595 goto bad;
596 }
597 }
598
599 proc_lock(p);
600 return;
601
602 bad:
603 proc_lock(p);
604 bad2:
605 SIGACTION(p, SIGILL) = SIG_DFL;
606 sig = sigmask(SIGILL);
607 p->p_sigignore &= ~sig;
608 p->p_sigcatch &= ~sig;
609 ut->uu_sigmask &= ~sig;
610 /* sendsig is called with signal lock held */
611 proc_unlock(p);
612 psignal_locked(p, SIGILL);
613 proc_lock(p);
614 }
615
616 /*
617 * System call to cleanup state after a signal
618 * has been taken. Reset signal mask and
619 * stack state from context left by sendsig (above).
620 * Return to previous * context left by sendsig.
621 * Check carefully to * make sure that the user has not
622 * modified the * spr to gain improper priviledges.
623 */
624
625 static int
626 sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_addr_t uctx_addr)
627 {
628 int error;
629
630 assert(!proc_is64bit_data(current_proc()));
631
632 error = copyin(uctx_addr, uctx, sizeof(*uctx));
633 if (error) {
634 return error;
635 }
636
637 /* validate the machine context size */
638 switch (uctx->uc_mcsize) {
639 case UC_FLAVOR_SIZE32:
640 break;
641 default:
642 return EINVAL;
643 }
644
645 assert(uctx->uc_mcsize == sizeof(*mctx));
646 error = copyin((user_addr_t)uctx->uc_mcontext, mctx, uctx->uc_mcsize);
647 if (error) {
648 return error;
649 }
650
651 return 0;
652 }
653
654 static int
655 sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx)
656 {
657 assert(!proc_is64bit_data(current_proc()));
658
659 /* validate the thread state, set/reset appropriate mode bits in cpsr */
660 #if defined(__arm__)
661 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR_MODE_MASK) | PSR_USERDFLT;
662 #elif defined(__arm64__)
663 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER32_DEFAULT;
664 #else
665 #error Unknown architecture.
666 #endif
667
668 if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE, (void *)&mctx->ss, ARM_THREAD_STATE_COUNT) != KERN_SUCCESS) {
669 return EINVAL;
670 }
671 if (thread_setstatus_from_user(th_act, ARM_VFP_STATE, (void *)&mctx->fs, ARM_VFP_STATE_COUNT) != KERN_SUCCESS) {
672 return EINVAL;
673 }
674
675 return 0;
676 }
677
678 #if defined(__arm64__)
679 static int
680 sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_addr_t uctx_addr)
681 {
682 int error;
683
684 assert(proc_is64bit_data(current_proc()));
685
686 error = copyin(uctx_addr, uctx, sizeof(*uctx));
687 if (error) {
688 return error;
689 }
690
691 /* validate the machine context size */
692 switch (uctx->uc_mcsize) {
693 case UC_FLAVOR_SIZE64:
694 break;
695 default:
696 return EINVAL;
697 }
698
699 assert(uctx->uc_mcsize == sizeof(*mctx));
700 error = copyin((user_addr_t)uctx->uc_mcontext64, mctx, uctx->uc_mcsize);
701 if (error) {
702 return error;
703 }
704
705 return 0;
706 }
707
708 static int
709 sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx)
710 {
711 assert(proc_is64bit_data(current_proc()));
712
713 /* validate the thread state, set/reset appropriate mode bits in cpsr */
714 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER64_DEFAULT;
715
716 if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE64, (void *)&mctx->ss, ARM_THREAD_STATE64_COUNT) != KERN_SUCCESS) {
717 return EINVAL;
718 }
719 if (thread_setstatus_from_user(th_act, ARM_NEON_STATE64, (void *)&mctx->ns, ARM_NEON_STATE64_COUNT) != KERN_SUCCESS) {
720 return EINVAL;
721 }
722
723 return 0;
724 }
725 #endif /* defined(__arm64__) */
726
727 /* ARGSUSED */
728 int
729 sigreturn(
730 struct proc * p,
731 struct sigreturn_args * uap,
732 __unused int *retval)
733 {
734 union {
735 user_ucontext32_t uc32;
736 #if defined(__arm64__)
737 user_ucontext64_t uc64;
738 #endif
739 } uctx;
740
741 union {
742 mcontext32_t mc32;
743 #if defined(__arm64__)
744 mcontext64_t mc64;
745 #endif
746 } mctx;
747
748 struct sigacts *ps = p->p_sigacts;
749 int error, sigmask = 0, onstack = 0;
750 thread_t th_act;
751 struct uthread *ut;
752 uint32_t sigreturn_validation;
753 user_addr_t token_uctx;
754 kern_return_t kr;
755
756 th_act = current_thread();
757 ut = (struct uthread *) get_bsdthread_info(th_act);
758
759 /* see osfmk/kern/restartable.c */
760 act_set_ast_reset_pcs(th_act);
761
762 if (proc_is64bit_data(p)) {
763 #if defined(__arm64__)
764 error = sigreturn_copyin_ctx64(&uctx.uc64, &mctx.mc64, uap->uctx);
765 if (error != 0) {
766 return error;
767 }
768
769 onstack = uctx.uc64.uc_onstack;
770 sigmask = uctx.uc64.uc_sigmask;
771 #else
772 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
773 #endif
774 } else {
775 error = sigreturn_copyin_ctx32(&uctx.uc32, &mctx.mc32, uap->uctx);
776 if (error != 0) {
777 return error;
778 }
779
780 onstack = uctx.uc32.uc_onstack;
781 sigmask = uctx.uc32.uc_sigmask;
782 }
783
784 if ((onstack & 01)) {
785 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
786 } else {
787 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
788 }
789
790 ut->uu_sigmask = sigmask & ~sigcantmask;
791 if (ut->uu_siglist & ~ut->uu_sigmask) {
792 signal_setast(current_thread());
793 }
794
795 sigreturn_validation = atomic_load_explicit(
796 &ps->ps_sigreturn_validation, memory_order_relaxed);
797 token_uctx = uap->uctx;
798 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
799 assert(kr == KERN_SUCCESS);
800
801 if (proc_is64bit_data(p)) {
802 #if defined(__arm64__)
803 user64_addr_t token;
804 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
805 if ((user64_addr_t)uap->token != token) {
806 #if DEVELOPMENT || DEBUG
807 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
808 p->p_comm, p->p_pid, (user64_addr_t)uap->token, token);
809 #endif /* DEVELOPMENT || DEBUG */
810 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
811 return EINVAL;
812 }
813 }
814 error = sigreturn_set_state64(th_act, &mctx.mc64);
815 if (error != 0) {
816 #if DEVELOPMENT || DEBUG
817 printf("process %s[%d] sigreturn set_state64 error %d\n",
818 p->p_comm, p->p_pid, error);
819 #endif /* DEVELOPMENT || DEBUG */
820 return error;
821 }
822 #else
823 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
824 #endif
825 } else {
826 user32_addr_t token;
827 token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token;
828 if ((user32_addr_t)uap->token != token) {
829 #if DEVELOPMENT || DEBUG
830 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
831 p->p_comm, p->p_pid, (user32_addr_t)uap->token, token);
832 #endif /* DEVELOPMENT || DEBUG */
833 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
834 return EINVAL;
835 }
836 }
837 error = sigreturn_set_state32(th_act, &mctx.mc32);
838 if (error != 0) {
839 #if DEVELOPMENT || DEBUG
840 printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n",
841 p->p_comm, p->p_pid, error);
842 #endif /* DEVELOPMENT || DEBUG */
843 return error;
844 }
845 }
846
847 return EJUSTRETURN;
848 }
849
850 /*
851 * machine_exception() performs machine-dependent translation
852 * of a mach exception to a unix signal.
853 */
854 int
855 machine_exception(int exception,
856 __unused mach_exception_code_t code,
857 __unused mach_exception_subcode_t subcode)
858 {
859 switch (exception) {
860 case EXC_BAD_INSTRUCTION:
861 return SIGILL;
862
863 case EXC_ARITHMETIC:
864 return SIGFPE;
865 }
866
867 return 0;
868 }