]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/arm/unix_signal.c
12d7b69f7dddf180d2d0b51a1f9abf376131e642
[apple/xnu.git] / bsd / dev / arm / unix_signal.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 */
4
5 #include <mach/mach_types.h>
6 #include <mach/exception_types.h>
7
8 #include <sys/param.h>
9 #include <sys/proc_internal.h>
10 #include <sys/user.h>
11 #include <sys/signal.h>
12 #include <sys/ucontext.h>
13 #include <sys/sysproto.h>
14 #include <sys/systm.h>
15 #include <sys/ux_exception.h>
16
17 #include <arm/signal.h>
18 #include <sys/signalvar.h>
19 #include <sys/kdebug.h>
20 #include <sys/sdt.h>
21 #include <sys/wait.h>
22 #include <kern/thread.h>
23 #include <mach/arm/thread_status.h>
24 #include <arm/proc_reg.h>
25
26 #include <kern/assert.h>
27 #include <pexpert/pexpert.h>
28
29 extern struct arm_saved_state *get_user_regs(thread_t);
30 extern user_addr_t thread_get_cthread_self(void);
31 extern kern_return_t thread_getstatus(thread_t act, int flavor,
32 thread_state_t tstate, mach_msg_type_number_t *count);
33 extern kern_return_t thread_getstatus_to_user(thread_t act, int flavor,
34 thread_state_t tstate, mach_msg_type_number_t *count);
35 extern kern_return_t machine_thread_state_convert_to_user(thread_t act, int flavor,
36 thread_state_t tstate, mach_msg_type_number_t *count);
37 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
38 thread_state_t tstate, mach_msg_type_number_t count);
39 extern kern_return_t thread_setstatus_from_user(thread_t thread, int flavor,
40 thread_state_t tstate, mach_msg_type_number_t count);
41 /* XXX Put these someplace smarter... */
42 typedef struct mcontext32 mcontext32_t;
43 typedef struct mcontext64 mcontext64_t;
44
45 /* Signal handler flavors supported */
46 /* These defns should match the Libc implmn */
47 #define UC_TRAD 1
48 #define UC_FLAVOR 30
49
50 /* The following are valid mcontext sizes */
51 #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int))
52 #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int))
53
54 #if __arm64__
55 #define C_64_REDZONE_LEN 128
56 #endif
57
58 static int
59 sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp)
60 {
61 void *tstate;
62 mach_msg_type_number_t state_count;
63
64 assert(!proc_is64bit_data(current_proc()));
65
66 tstate = (void *) ts;
67 state_count = ARM_THREAD_STATE_COUNT;
68 if (thread_getstatus(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
69 return EINVAL;
70 }
71
72 mcp->ss = *ts;
73 tstate = (void *) &mcp->ss;
74 state_count = ARM_THREAD_STATE_COUNT;
75 if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
76 return EINVAL;
77 }
78
79 tstate = (void *) &mcp->es;
80 state_count = ARM_EXCEPTION_STATE_COUNT;
81 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
82 return EINVAL;
83 }
84
85 tstate = (void *) &mcp->fs;
86 state_count = ARM_VFP_STATE_COUNT;
87 if (thread_getstatus_to_user(th_act, ARM_VFP_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
88 return EINVAL;
89 }
90
91 return 0;
92 }
93
94 #if defined(__arm64__)
95 struct user_sigframe64 {
96 /* We can pass the last two args in registers for ARM64 */
97 user64_siginfo_t sinfo;
98 struct user_ucontext64 uctx;
99 mcontext64_t mctx;
100 };
101
102 static int
103 sendsig_get_state64(thread_t th_act, arm_thread_state64_t *ts, mcontext64_t *mcp)
104 {
105 void *tstate;
106 mach_msg_type_number_t state_count;
107
108 assert(proc_is64bit_data(current_proc()));
109
110 tstate = (void *) ts;
111 state_count = ARM_THREAD_STATE64_COUNT;
112 if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
113 return EINVAL;
114 }
115
116 mcp->ss = *ts;
117 tstate = (void *) &mcp->ss;
118 state_count = ARM_THREAD_STATE64_COUNT;
119 if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
120 return EINVAL;
121 }
122
123 tstate = (void *) &mcp->es;
124 state_count = ARM_EXCEPTION_STATE64_COUNT;
125 if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
126 return EINVAL;
127 }
128
129 tstate = (void *) &mcp->ns;
130 state_count = ARM_NEON_STATE64_COUNT;
131 if (thread_getstatus_to_user(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) {
132 return EINVAL;
133 }
134
135 return 0;
136 }
137
138 static void
139 sendsig_fill_uctx64(user_ucontext64_t *uctx, int oonstack, int mask, user64_addr_t sp, user64_size_t stack_size, user64_addr_t p_mctx)
140 {
141 bzero(uctx, sizeof(*uctx));
142 uctx->uc_onstack = oonstack;
143 uctx->uc_sigmask = mask;
144 uctx->uc_stack.ss_sp = sp;
145 uctx->uc_stack.ss_size = stack_size;
146 if (oonstack) {
147 uctx->uc_stack.ss_flags |= SS_ONSTACK;
148 }
149 uctx->uc_link = (user64_addr_t)0;
150 uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64;
151 uctx->uc_mcontext64 = (user64_addr_t) p_mctx;
152 }
153
154 static kern_return_t
155 sendsig_set_thread_state64(arm_thread_state64_t *regs,
156 user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo,
157 user64_addr_t p_uctx, user64_addr_t token, user64_addr_t trampact, user64_addr_t sp, thread_t th_act)
158 {
159 assert(proc_is64bit_data(current_proc()));
160
161 regs->x[0] = catcher;
162 regs->x[1] = infostyle;
163 regs->x[2] = sig;
164 regs->x[3] = p_sinfo;
165 regs->x[4] = p_uctx;
166 regs->x[5] = token;
167 regs->pc = trampact;
168 regs->cpsr = PSR64_USER64_DEFAULT;
169 regs->sp = sp;
170
171 return thread_setstatus(th_act, ARM_THREAD_STATE64, (void *)regs, ARM_THREAD_STATE64_COUNT);
172 }
173 #endif /* defined(__arm64__) */
174
175 static void
176 sendsig_fill_uctx32(user_ucontext32_t *uctx, int oonstack, int mask, user_addr_t sp, user_size_t stack_size, user_addr_t p_mctx)
177 {
178 bzero(uctx, sizeof(*uctx));
179 uctx->uc_onstack = oonstack;
180 uctx->uc_sigmask = mask;
181 uctx->uc_stack.ss_sp = (user32_addr_t) sp;
182 uctx->uc_stack.ss_size = (user32_size_t) stack_size;
183 if (oonstack) {
184 uctx->uc_stack.ss_flags |= SS_ONSTACK;
185 }
186 uctx->uc_link = (user32_addr_t)0;
187 uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32;
188 uctx->uc_mcontext = (user32_addr_t) p_mctx;
189 }
190
191 static kern_return_t
192 sendsig_set_thread_state32(arm_thread_state_t *regs,
193 user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo,
194 user32_addr_t trampact, user32_addr_t sp, thread_t th_act)
195 {
196 assert(!proc_is64bit_data(current_proc()));
197
198 regs->r[0] = catcher;
199 regs->r[1] = infostyle;
200 regs->r[2] = sig;
201 regs->r[3] = p_sinfo;
202 if (trampact & 1) {
203 regs->pc = trampact & ~1;
204 #if defined(__arm64__)
205 regs->cpsr = PSR64_USER32_DEFAULT | PSR64_MODE_USER32_THUMB;
206 #elif defined(__arm__)
207 regs->cpsr = PSR_USERDFLT | PSR_TF;
208 #else
209 #error Unknown architeture.
210 #endif
211 } else {
212 regs->pc = trampact;
213 regs->cpsr = PSR_USERDFLT;
214 }
215 regs->sp = sp;
216
217 return thread_setstatus(th_act, ARM_THREAD_STATE, (void *)regs, ARM_THREAD_STATE_COUNT);
218 }
219
220 #if CONFIG_DTRACE
221 static void
222 sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catcher)
223 {
224 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
225
226 ut->t_dtrace_siginfo.si_signo = sinfo->si_signo;
227 ut->t_dtrace_siginfo.si_code = sinfo->si_code;
228 ut->t_dtrace_siginfo.si_pid = sinfo->si_pid;
229 ut->t_dtrace_siginfo.si_uid = sinfo->si_uid;
230 ut->t_dtrace_siginfo.si_status = sinfo->si_status;
231 /* XXX truncates faulting address to void * */
232 ut->t_dtrace_siginfo.si_addr = CAST_DOWN_EXPLICIT(void *, sinfo->si_addr);
233
234 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
235 switch (sig) {
236 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
237 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
238 break;
239 default:
240 break;
241 }
242
243 /* XXX truncates faulting address to uintptr_t */
244 DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
245 void (*)(void), CAST_DOWN(sig_t, catcher));
246 }
247 #endif
248
249 struct user_sigframe32 {
250 user32_addr_t puctx;
251 user32_addr_t token;
252 user32_siginfo_t sinfo;
253 struct user_ucontext32 uctx;
254 mcontext32_t mctx;
255 };
256
257 /*
258 * Send an interrupt to process.
259 *
260 */
261 void
262 sendsig(
263 struct proc * p,
264 user_addr_t catcher,
265 int sig,
266 int mask,
267 __unused uint32_t code
268 )
269 {
270 union {
271 struct ts32 {
272 arm_thread_state_t ss;
273 } ts32;
274 #if defined(__arm64__)
275 struct ts64 {
276 arm_thread_state64_t ss;
277 } ts64;
278 #endif
279 } ts;
280 union {
281 struct user_sigframe32 uf32;
282 #if defined(__arm64__)
283 struct user_sigframe64 uf64;
284 #endif
285 } user_frame;
286
287 user_siginfo_t sinfo;
288 user_addr_t sp = 0, trampact;
289 struct sigacts *ps = p->p_sigacts;
290 int oonstack, infostyle;
291 thread_t th_act;
292 struct uthread *ut;
293 user_size_t stack_size = 0;
294 user_addr_t p_uctx, token_uctx;
295 kern_return_t kr;
296
297 th_act = current_thread();
298 ut = get_bsdthread_info(th_act);
299
300 bzero(&ts, sizeof(ts));
301 bzero(&user_frame, sizeof(user_frame));
302
303 if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
304 infostyle = UC_FLAVOR;
305 } else {
306 infostyle = UC_TRAD;
307 }
308
309 trampact = ps->ps_trampact[sig];
310 oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;
311
312 /*
313 * Get sundry thread state.
314 */
315 if (proc_is64bit_data(p)) {
316 #ifdef __arm64__
317 if (sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx) != 0) {
318 goto bad2;
319 }
320 #else
321 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
322 #endif
323 } else {
324 if (sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx) != 0) {
325 goto bad2;
326 }
327 }
328
329 /*
330 * Figure out where our new stack lives.
331 */
332 if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack &&
333 (ps->ps_sigonstack & sigmask(sig))) {
334 sp = ps->ps_sigstk.ss_sp;
335 sp += ps->ps_sigstk.ss_size;
336 stack_size = ps->ps_sigstk.ss_size;
337 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
338 } else {
339 /*
340 * Get stack pointer, and allocate enough space
341 * for signal handler data.
342 */
343 if (proc_is64bit_data(p)) {
344 #if defined(__arm64__)
345 sp = CAST_USER_ADDR_T(ts.ts64.ss.sp);
346 sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN) & ~0xf; /* Make sure to align to 16 bytes and respect red zone */
347 #else
348 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
349 #endif
350 } else {
351 sp = CAST_USER_ADDR_T(ts.ts32.ss.sp);
352 sp -= sizeof(user_frame.uf32);
353 #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4)
354 sp &= ~0xf; /* Make sure to align to 16 bytes for armv7k */
355 #endif
356 }
357 }
358
359 proc_unlock(p);
360
361 /*
362 * Fill in ucontext (points to mcontext, i.e. thread states).
363 */
364 if (proc_is64bit_data(p)) {
365 #if defined(__arm64__)
366 sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size,
367 (user64_addr_t)&((struct user_sigframe64*)sp)->mctx);
368 #else
369 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
370 #endif
371 } else {
372 sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size,
373 (user32_addr_t)&((struct user_sigframe32*)sp)->mctx);
374 }
375
376 /*
377 * Setup siginfo.
378 */
379 bzero((caddr_t) &sinfo, sizeof(sinfo));
380 sinfo.si_signo = sig;
381
382 if (proc_is64bit_data(p)) {
383 #if defined(__arm64__)
384 sinfo.si_addr = ts.ts64.ss.pc;
385 sinfo.pad[0] = ts.ts64.ss.sp;
386 #else
387 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
388 #endif
389 } else {
390 sinfo.si_addr = ts.ts32.ss.pc;
391 sinfo.pad[0] = ts.ts32.ss.sp;
392 }
393
394 switch (sig) {
395 case SIGILL:
396 #ifdef BER_XXX
397 if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) {
398 sinfo.si_code = ILL_ILLOPC;
399 } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) {
400 sinfo.si_code = ILL_PRVOPC;
401 } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) {
402 sinfo.si_code = ILL_ILLTRP;
403 } else {
404 sinfo.si_code = ILL_NOOP;
405 }
406 #else
407 sinfo.si_code = ILL_ILLTRP;
408 #endif
409 break;
410
411 case SIGFPE:
412 break;
413
414 case SIGBUS:
415 if (proc_is64bit_data(p)) {
416 #if defined(__arm64__)
417 sinfo.si_addr = user_frame.uf64.mctx.es.far;
418 #else
419 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
420 #endif
421 } else {
422 sinfo.si_addr = user_frame.uf32.mctx.es.far;
423 }
424
425 sinfo.si_code = BUS_ADRALN;
426 break;
427
428 case SIGSEGV:
429 if (proc_is64bit_data(p)) {
430 #if defined(__arm64__)
431 sinfo.si_addr = user_frame.uf64.mctx.es.far;
432 #else
433 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
434 #endif
435 } else {
436 sinfo.si_addr = user_frame.uf32.mctx.es.far;
437 }
438
439 #ifdef BER_XXX
440 /* First check in srr1 and then in dsisr */
441 if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) {
442 sinfo.si_code = SEGV_ACCERR;
443 } else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) {
444 sinfo.si_code = SEGV_ACCERR;
445 } else {
446 sinfo.si_code = SEGV_MAPERR;
447 }
448 #else
449 sinfo.si_code = SEGV_ACCERR;
450 #endif
451 break;
452
453 default:
454 {
455 int status_and_exitcode;
456
457 /*
458 * All other signals need to fill out a minimum set of
459 * information for the siginfo structure passed into
460 * the signal handler, if SA_SIGINFO was specified.
461 *
462 * p->si_status actually contains both the status and
463 * the exit code; we save it off in its own variable
464 * for later breakdown.
465 */
466 proc_lock(p);
467 sinfo.si_pid = p->si_pid;
468 p->si_pid = 0;
469 status_and_exitcode = p->si_status;
470 p->si_status = 0;
471 sinfo.si_uid = p->si_uid;
472 p->si_uid = 0;
473 sinfo.si_code = p->si_code;
474 p->si_code = 0;
475 proc_unlock(p);
476 if (sinfo.si_code == CLD_EXITED) {
477 if (WIFEXITED(status_and_exitcode)) {
478 sinfo.si_code = CLD_EXITED;
479 } else if (WIFSIGNALED(status_and_exitcode)) {
480 if (WCOREDUMP(status_and_exitcode)) {
481 sinfo.si_code = CLD_DUMPED;
482 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
483 } else {
484 sinfo.si_code = CLD_KILLED;
485 status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
486 }
487 }
488 }
489 /*
490 * The recorded status contains the exit code and the
491 * signal information, but the information to be passed
492 * in the siginfo to the handler is supposed to only
493 * contain the status, so we have to shift it out.
494 */
495 sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
496 p->p_xhighbits = 0;
497 break;
498 }
499 }
500
501 #if CONFIG_DTRACE
502 sendsig_do_dtrace(ut, &sinfo, sig, catcher);
503 #endif /* CONFIG_DTRACE */
504
505 /*
506 * Copy signal-handling frame out to user space, set thread state.
507 */
508 if (proc_is64bit_data(p)) {
509 #if defined(__arm64__)
510 user64_addr_t token;
511
512 /*
513 * mctx filled in when we get state. uctx filled in by
514 * sendsig_fill_uctx64(). We fill in the sinfo now.
515 */
516 siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo);
517
518 p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx;
519 /*
520 * Generate the validation token for sigreturn
521 */
522 token_uctx = p_uctx;
523 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
524 assert(kr == KERN_SUCCESS);
525 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
526
527 if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) {
528 goto bad;
529 }
530
531 if (sendsig_set_thread_state64(&ts.ts64.ss,
532 catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo,
533 (user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS) {
534 goto bad;
535 }
536
537 #else
538 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
539 #endif
540 } else {
541 user32_addr_t token;
542
543 /*
544 * mctx filled in when we get state. uctx filled in by
545 * sendsig_fill_uctx32(). We fill in the sinfo, *pointer*
546 * to uctx and token now.
547 */
548 siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo);
549
550 p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx;
551 /*
552 * Generate the validation token for sigreturn
553 */
554 token_uctx = (user_addr_t)p_uctx;
555 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
556 assert(kr == KERN_SUCCESS);
557 token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token;
558
559 user_frame.uf32.puctx = (user32_addr_t)p_uctx;
560 user_frame.uf32.token = token;
561
562 if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) {
563 goto bad;
564 }
565
566 if (sendsig_set_thread_state32(&ts.ts32.ss,
567 CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo,
568 CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) {
569 goto bad;
570 }
571 }
572
573 proc_lock(p);
574 return;
575
576 bad:
577 proc_lock(p);
578 bad2:
579 SIGACTION(p, SIGILL) = SIG_DFL;
580 sig = sigmask(SIGILL);
581 p->p_sigignore &= ~sig;
582 p->p_sigcatch &= ~sig;
583 ut->uu_sigmask &= ~sig;
584 /* sendsig is called with signal lock held */
585 proc_unlock(p);
586 psignal_locked(p, SIGILL);
587 proc_lock(p);
588 }
589
590 /*
591 * System call to cleanup state after a signal
592 * has been taken. Reset signal mask and
593 * stack state from context left by sendsig (above).
594 * Return to previous * context left by sendsig.
595 * Check carefully to * make sure that the user has not
596 * modified the * spr to gain improper priviledges.
597 */
598
599 static int
600 sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_addr_t uctx_addr)
601 {
602 int error;
603
604 assert(!proc_is64bit_data(current_proc()));
605
606 error = copyin(uctx_addr, uctx, sizeof(*uctx));
607 if (error) {
608 return error;
609 }
610
611 /* validate the machine context size */
612 switch (uctx->uc_mcsize) {
613 case UC_FLAVOR_SIZE32:
614 break;
615 default:
616 return EINVAL;
617 }
618
619 assert(uctx->uc_mcsize == sizeof(*mctx));
620 error = copyin((user_addr_t)uctx->uc_mcontext, mctx, uctx->uc_mcsize);
621 if (error) {
622 return error;
623 }
624
625 return 0;
626 }
627
628 static int
629 sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx)
630 {
631 assert(!proc_is64bit_data(current_proc()));
632
633 /* validate the thread state, set/reset appropriate mode bits in cpsr */
634 #if defined(__arm__)
635 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR_MODE_MASK) | PSR_USERDFLT;
636 #elif defined(__arm64__)
637 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER32_DEFAULT;
638 #else
639 #error Unknown architecture.
640 #endif
641
642 if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE, (void *)&mctx->ss, ARM_THREAD_STATE_COUNT) != KERN_SUCCESS) {
643 return EINVAL;
644 }
645 if (thread_setstatus_from_user(th_act, ARM_VFP_STATE, (void *)&mctx->fs, ARM_VFP_STATE_COUNT) != KERN_SUCCESS) {
646 return EINVAL;
647 }
648
649 return 0;
650 }
651
652 #if defined(__arm64__)
653 static int
654 sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_addr_t uctx_addr)
655 {
656 int error;
657
658 assert(proc_is64bit_data(current_proc()));
659
660 error = copyin(uctx_addr, uctx, sizeof(*uctx));
661 if (error) {
662 return error;
663 }
664
665 /* validate the machine context size */
666 switch (uctx->uc_mcsize) {
667 case UC_FLAVOR_SIZE64:
668 break;
669 default:
670 return EINVAL;
671 }
672
673 assert(uctx->uc_mcsize == sizeof(*mctx));
674 error = copyin((user_addr_t)uctx->uc_mcontext64, mctx, uctx->uc_mcsize);
675 if (error) {
676 return error;
677 }
678
679 return 0;
680 }
681
682 static int
683 sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx)
684 {
685 assert(proc_is64bit_data(current_proc()));
686
687 /* validate the thread state, set/reset appropriate mode bits in cpsr */
688 mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER64_DEFAULT;
689
690 if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE64, (void *)&mctx->ss, ARM_THREAD_STATE64_COUNT) != KERN_SUCCESS) {
691 return EINVAL;
692 }
693 if (thread_setstatus_from_user(th_act, ARM_NEON_STATE64, (void *)&mctx->ns, ARM_NEON_STATE64_COUNT) != KERN_SUCCESS) {
694 return EINVAL;
695 }
696
697 return 0;
698 }
699 #endif /* defined(__arm64__) */
700
701 /* ARGSUSED */
702 int
703 sigreturn(
704 struct proc * p,
705 struct sigreturn_args * uap,
706 __unused int *retval)
707 {
708 union {
709 user_ucontext32_t uc32;
710 #if defined(__arm64__)
711 user_ucontext64_t uc64;
712 #endif
713 } uctx;
714
715 union {
716 mcontext32_t mc32;
717 #if defined(__arm64__)
718 mcontext64_t mc64;
719 #endif
720 } mctx;
721
722 struct sigacts *ps = p->p_sigacts;
723 int error, sigmask = 0, onstack = 0;
724 thread_t th_act;
725 struct uthread *ut;
726 uint32_t sigreturn_validation;
727 user_addr_t token_uctx;
728 kern_return_t kr;
729
730 th_act = current_thread();
731 ut = (struct uthread *) get_bsdthread_info(th_act);
732
733 if (proc_is64bit_data(p)) {
734 #if defined(__arm64__)
735 error = sigreturn_copyin_ctx64(&uctx.uc64, &mctx.mc64, uap->uctx);
736 if (error != 0) {
737 return error;
738 }
739
740 onstack = uctx.uc64.uc_onstack;
741 sigmask = uctx.uc64.uc_sigmask;
742 #else
743 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
744 #endif
745 } else {
746 error = sigreturn_copyin_ctx32(&uctx.uc32, &mctx.mc32, uap->uctx);
747 if (error != 0) {
748 return error;
749 }
750
751 onstack = uctx.uc32.uc_onstack;
752 sigmask = uctx.uc32.uc_sigmask;
753 }
754
755 if ((onstack & 01)) {
756 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
757 } else {
758 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
759 }
760
761 ut->uu_sigmask = sigmask & ~sigcantmask;
762 if (ut->uu_siglist & ~ut->uu_sigmask) {
763 signal_setast(current_thread());
764 }
765
766 sigreturn_validation = atomic_load_explicit(
767 &ps->ps_sigreturn_validation, memory_order_relaxed);
768 token_uctx = uap->uctx;
769 kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
770 assert(kr == KERN_SUCCESS);
771
772 if (proc_is64bit_data(p)) {
773 #if defined(__arm64__)
774 user64_addr_t token;
775 token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;
776 if ((user64_addr_t)uap->token != token) {
777 #if DEVELOPMENT || DEBUG
778 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
779 p->p_comm, p->p_pid, (user64_addr_t)uap->token, token);
780 #endif /* DEVELOPMENT || DEBUG */
781 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
782 return EINVAL;
783 }
784 }
785 error = sigreturn_set_state64(th_act, &mctx.mc64);
786 if (error != 0) {
787 #if DEVELOPMENT || DEBUG
788 printf("process %s[%d] sigreturn set_state64 error %d\n",
789 p->p_comm, p->p_pid, error);
790 #endif /* DEVELOPMENT || DEBUG */
791 return error;
792 }
793 #else
794 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
795 #endif
796 } else {
797 user32_addr_t token;
798 token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token;
799 if ((user32_addr_t)uap->token != token) {
800 #if DEVELOPMENT || DEBUG
801 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
802 p->p_comm, p->p_pid, (user32_addr_t)uap->token, token);
803 #endif /* DEVELOPMENT || DEBUG */
804 if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) {
805 return EINVAL;
806 }
807 }
808 error = sigreturn_set_state32(th_act, &mctx.mc32);
809 if (error != 0) {
810 #if DEVELOPMENT || DEBUG
811 printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n",
812 p->p_comm, p->p_pid, error);
813 #endif /* DEVELOPMENT || DEBUG */
814 return error;
815 }
816 }
817
818 return EJUSTRETURN;
819 }
820
821 /*
822 * machine_exception() performs machine-dependent translation
823 * of a mach exception to a unix signal.
824 */
825 int
826 machine_exception(int exception,
827 __unused mach_exception_code_t code,
828 __unused mach_exception_subcode_t subcode)
829 {
830 switch (exception) {
831 case EXC_BAD_INSTRUCTION:
832 return SIGILL;
833
834 case EXC_ARITHMETIC:
835 return SIGFPE;
836 }
837
838 return 0;
839 }