2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
5 #include <mach/mach_types.h>
6 #include <mach/exception_types.h>
9 #include <sys/proc_internal.h>
11 #include <sys/signal.h>
12 #include <sys/ucontext.h>
13 #include <sys/sysproto.h>
14 #include <sys/systm.h>
15 #include <sys/ux_exception.h>
17 #include <arm/signal.h>
18 #include <sys/signalvar.h>
19 #include <sys/kdebug.h>
22 #include <kern/thread.h>
23 #include <mach/arm/thread_status.h>
24 #include <arm/proc_reg.h>
26 #include <kern/assert.h>
28 #include <pexpert/pexpert.h>
30 extern struct arm_saved_state
*get_user_regs(thread_t
);
31 extern user_addr_t
thread_get_cthread_self(void);
32 extern kern_return_t
thread_getstatus(thread_t act
, int flavor
,
33 thread_state_t tstate
, mach_msg_type_number_t
*count
);
34 extern kern_return_t
thread_getstatus_to_user(thread_t act
, int flavor
,
35 thread_state_t tstate
, mach_msg_type_number_t
*count
);
36 extern kern_return_t
machine_thread_state_convert_to_user(thread_t act
, int flavor
,
37 thread_state_t tstate
, mach_msg_type_number_t
*count
);
38 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
39 thread_state_t tstate
, mach_msg_type_number_t count
);
40 extern kern_return_t
thread_setstatus_from_user(thread_t thread
, int flavor
,
41 thread_state_t tstate
, mach_msg_type_number_t count
);
42 /* XXX Put these someplace smarter... */
43 typedef struct mcontext32 mcontext32_t
;
44 typedef struct mcontext64 mcontext64_t
;
46 /* Signal handler flavors supported */
47 /* These defns should match the libplatform implmn */
50 #define UC_SET_ALT_STACK 0x40000000
51 #define UC_RESET_ALT_STACK 0x80000000
53 /* The following are valid mcontext sizes */
54 #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int))
55 #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int))
58 #define C_64_REDZONE_LEN 128
61 #define TRUNC_TO_16_BYTES(addr) (addr & ~0xf)
64 sendsig_get_state32(thread_t th_act
, arm_thread_state_t
*ts
, mcontext32_t
*mcp
)
67 mach_msg_type_number_t state_count
;
69 assert(!proc_is64bit_data(current_proc()));
72 state_count
= ARM_THREAD_STATE_COUNT
;
73 if (thread_getstatus(th_act
, ARM_THREAD_STATE
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
78 tstate
= (void *) &mcp
->ss
;
79 state_count
= ARM_THREAD_STATE_COUNT
;
80 if (machine_thread_state_convert_to_user(th_act
, ARM_THREAD_STATE
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
84 tstate
= (void *) &mcp
->es
;
85 state_count
= ARM_EXCEPTION_STATE_COUNT
;
86 if (thread_getstatus(th_act
, ARM_EXCEPTION_STATE
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
90 tstate
= (void *) &mcp
->fs
;
91 state_count
= ARM_VFP_STATE_COUNT
;
92 if (thread_getstatus_to_user(th_act
, ARM_VFP_STATE
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
99 #if defined(__arm64__)
100 struct user_sigframe64
{
101 /* We can pass the last two args in registers for ARM64 */
102 user64_siginfo_t sinfo
;
103 struct user_ucontext64 uctx
;
108 sendsig_get_state64(thread_t th_act
, arm_thread_state64_t
*ts
, mcontext64_t
*mcp
)
111 mach_msg_type_number_t state_count
;
113 assert(proc_is64bit_data(current_proc()));
115 tstate
= (void *) ts
;
116 state_count
= ARM_THREAD_STATE64_COUNT
;
117 if (thread_getstatus(th_act
, ARM_THREAD_STATE64
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
122 tstate
= (void *) &mcp
->ss
;
123 state_count
= ARM_THREAD_STATE64_COUNT
;
124 if (machine_thread_state_convert_to_user(th_act
, ARM_THREAD_STATE64
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
128 tstate
= (void *) &mcp
->es
;
129 state_count
= ARM_EXCEPTION_STATE64_COUNT
;
130 if (thread_getstatus(th_act
, ARM_EXCEPTION_STATE64
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
134 tstate
= (void *) &mcp
->ns
;
135 state_count
= ARM_NEON_STATE64_COUNT
;
136 if (thread_getstatus_to_user(th_act
, ARM_NEON_STATE64
, (thread_state_t
) tstate
, &state_count
) != KERN_SUCCESS
) {
144 sendsig_fill_uctx64(user_ucontext64_t
*uctx
, int oonstack
, int mask
, user64_addr_t sp
, user64_size_t stack_size
, user64_addr_t p_mctx
)
146 bzero(uctx
, sizeof(*uctx
));
147 uctx
->uc_onstack
= oonstack
;
148 uctx
->uc_sigmask
= mask
;
149 uctx
->uc_stack
.ss_sp
= sp
;
150 uctx
->uc_stack
.ss_size
= stack_size
;
152 uctx
->uc_stack
.ss_flags
|= SS_ONSTACK
;
154 uctx
->uc_link
= (user64_addr_t
)0;
155 uctx
->uc_mcsize
= (user64_size_t
) UC_FLAVOR_SIZE64
;
156 uctx
->uc_mcontext64
= (user64_addr_t
) p_mctx
;
160 sendsig_set_thread_state64(arm_thread_state64_t
*regs
,
161 user64_addr_t catcher
, int infostyle
, int sig
, user64_addr_t p_sinfo
,
162 user64_addr_t p_uctx
, user64_addr_t token
, user64_addr_t trampact
, user64_addr_t sp
, thread_t th_act
)
164 assert(proc_is64bit_data(current_proc()));
166 regs
->x
[0] = catcher
;
167 regs
->x
[1] = infostyle
;
169 regs
->x
[3] = p_sinfo
;
173 regs
->cpsr
= PSR64_USER64_DEFAULT
;
176 return thread_setstatus(th_act
, ARM_THREAD_STATE64
, (void *)regs
, ARM_THREAD_STATE64_COUNT
);
178 #endif /* defined(__arm64__) */
181 sendsig_fill_uctx32(user_ucontext32_t
*uctx
, int oonstack
, int mask
, user_addr_t sp
, user_size_t stack_size
, user_addr_t p_mctx
)
183 bzero(uctx
, sizeof(*uctx
));
184 uctx
->uc_onstack
= oonstack
;
185 uctx
->uc_sigmask
= mask
;
186 uctx
->uc_stack
.ss_sp
= (user32_addr_t
) sp
;
187 uctx
->uc_stack
.ss_size
= (user32_size_t
) stack_size
;
189 uctx
->uc_stack
.ss_flags
|= SS_ONSTACK
;
191 uctx
->uc_link
= (user32_addr_t
)0;
192 uctx
->uc_mcsize
= (user32_size_t
) UC_FLAVOR_SIZE32
;
193 uctx
->uc_mcontext
= (user32_addr_t
) p_mctx
;
197 sendsig_set_thread_state32(arm_thread_state_t
*regs
,
198 user32_addr_t catcher
, int infostyle
, int sig
, user32_addr_t p_sinfo
,
199 user32_addr_t trampact
, user32_addr_t sp
, thread_t th_act
)
201 assert(!proc_is64bit_data(current_proc()));
203 regs
->r
[0] = catcher
;
204 regs
->r
[1] = infostyle
;
206 regs
->r
[3] = p_sinfo
;
208 regs
->pc
= trampact
& ~1;
209 #if defined(__arm64__)
210 regs
->cpsr
= PSR64_USER32_DEFAULT
| PSR64_MODE_USER32_THUMB
;
211 #elif defined(__arm__)
212 regs
->cpsr
= PSR_USERDFLT
| PSR_TF
;
214 #error Unknown architeture.
218 regs
->cpsr
= PSR_USERDFLT
;
222 return thread_setstatus(th_act
, ARM_THREAD_STATE
, (void *)regs
, ARM_THREAD_STATE_COUNT
);
227 sendsig_do_dtrace(uthread_t ut
, user_siginfo_t
*sinfo
, int sig
, user_addr_t catcher
)
229 bzero((caddr_t
)&(ut
->t_dtrace_siginfo
), sizeof(ut
->t_dtrace_siginfo
));
231 ut
->t_dtrace_siginfo
.si_signo
= sinfo
->si_signo
;
232 ut
->t_dtrace_siginfo
.si_code
= sinfo
->si_code
;
233 ut
->t_dtrace_siginfo
.si_pid
= sinfo
->si_pid
;
234 ut
->t_dtrace_siginfo
.si_uid
= sinfo
->si_uid
;
235 ut
->t_dtrace_siginfo
.si_status
= sinfo
->si_status
;
236 /* XXX truncates faulting address to void * */
237 ut
->t_dtrace_siginfo
.si_addr
= CAST_DOWN_EXPLICIT(void *, sinfo
->si_addr
);
239 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
241 case SIGILL
: case SIGBUS
: case SIGSEGV
: case SIGFPE
: case SIGTRAP
:
242 DTRACE_PROC2(fault
, int, (int)(ut
->uu_code
), siginfo_t
*, &(ut
->t_dtrace_siginfo
));
248 /* XXX truncates faulting address to uintptr_t */
249 DTRACE_PROC3(signal__handle
, int, sig
, siginfo_t
*, &(ut
->t_dtrace_siginfo
),
250 void (*)(void), CAST_DOWN(uintptr_t, catcher
));
254 struct user_sigframe32
{
257 user32_siginfo_t sinfo
;
258 struct user_ucontext32 uctx
;
263 * Send an interrupt to process.
272 __unused
uint32_t code
,
278 arm_thread_state_t ss
;
280 #if defined(__arm64__)
282 arm_thread_state64_t ss
;
287 struct user_sigframe32 uf32
;
288 #if defined(__arm64__)
289 struct user_sigframe64 uf64
;
293 user_siginfo_t sinfo
;
294 user_addr_t sp
= 0, trampact
;
295 struct sigacts
*ps
= p
->p_sigacts
;
296 int oonstack
, infostyle
;
299 user_size_t stack_size
= 0;
300 user_addr_t p_uctx
, token_uctx
;
303 th_act
= current_thread();
304 ut
= get_bsdthread_info(th_act
);
306 bzero(&ts
, sizeof(ts
));
307 bzero(&user_frame
, sizeof(user_frame
));
309 if (siginfo
& sigmask(sig
)) {
310 infostyle
= UC_FLAVOR
;
315 trampact
= ps
->ps_trampact
[sig
];
316 oonstack
= ut
->uu_sigstk
.ss_flags
& SA_ONSTACK
;
319 * Get sundry thread state.
321 if (proc_is64bit_data(p
)) {
324 if ((ret
= sendsig_get_state64(th_act
, &ts
.ts64
.ss
, &user_frame
.uf64
.mctx
)) != 0) {
325 #if DEVELOPMENT || DEBUG
326 printf("process [%s][%d] sendsig_get_state64 failed with ret %d, expected 0", p
->p_comm
, p
->p_pid
, ret
);
331 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
335 if ((ret
= sendsig_get_state32(th_act
, &ts
.ts32
.ss
, &user_frame
.uf32
.mctx
)) != 0) {
336 #if DEVELOPMENT || DEBUG
337 printf("process [%s][%d] sendsig_get_state32 failed with ret %d, expected 0", p
->p_comm
, p
->p_pid
, ret
);
344 * Figure out where our new stack lives.
346 if ((ut
->uu_flag
& UT_ALTSTACK
) && !oonstack
&&
347 (ps
->ps_sigonstack
& sigmask(sig
))) {
348 sp
= ut
->uu_sigstk
.ss_sp
;
349 stack_size
= ut
->uu_sigstk
.ss_size
;
352 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
355 * Get stack pointer, and allocate enough space
356 * for signal handler data.
358 if (proc_is64bit_data(p
)) {
359 #if defined(__arm64__)
360 sp
= CAST_USER_ADDR_T(ts
.ts64
.ss
.sp
);
362 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
365 sp
= CAST_USER_ADDR_T(ts
.ts32
.ss
.sp
);
369 /* Make sure to move stack pointer down for room for metadata */
370 if (proc_is64bit_data(p
)) {
371 #if defined(__arm64__)
372 sp
= (sp
- sizeof(user_frame
.uf64
) - C_64_REDZONE_LEN
);
373 sp
= TRUNC_TO_16_BYTES(sp
);
375 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
378 sp
-= sizeof(user_frame
.uf32
);
379 #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4)
380 sp
= TRUNC_TO_16_BYTES(sp
); /* Only for armv7k */
387 * Fill in ucontext (points to mcontext, i.e. thread states).
389 if (proc_is64bit_data(p
)) {
390 #if defined(__arm64__)
391 sendsig_fill_uctx64(&user_frame
.uf64
.uctx
, oonstack
, mask
, sp
, (user64_size_t
)stack_size
,
392 (user64_addr_t
)&((struct user_sigframe64
*)sp
)->mctx
);
394 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
397 sendsig_fill_uctx32(&user_frame
.uf32
.uctx
, oonstack
, mask
, sp
, (user32_size_t
)stack_size
,
398 (user32_addr_t
)&((struct user_sigframe32
*)sp
)->mctx
);
404 bzero((caddr_t
) &sinfo
, sizeof(sinfo
));
405 sinfo
.si_signo
= sig
;
407 if (proc_is64bit_data(p
)) {
408 #if defined(__arm64__)
409 sinfo
.si_addr
= ts
.ts64
.ss
.pc
;
410 sinfo
.pad
[0] = ts
.ts64
.ss
.sp
;
412 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
415 sinfo
.si_addr
= ts
.ts32
.ss
.pc
;
416 sinfo
.pad
[0] = ts
.ts32
.ss
.sp
;
422 if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_ILL_INS_BIT
))) {
423 sinfo
.si_code
= ILL_ILLOPC
;
424 } else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_PRV_INS_BIT
))) {
425 sinfo
.si_code
= ILL_PRVOPC
;
426 } else if (mctx
.ss
.srr1
& (1 << (31 - SRR1_PRG_TRAP_BIT
))) {
427 sinfo
.si_code
= ILL_ILLTRP
;
429 sinfo
.si_code
= ILL_NOOP
;
432 sinfo
.si_code
= ILL_ILLTRP
;
437 switch (ut
->uu_code
) {
439 sinfo
.si_code
= FPE_FLTUND
;
442 sinfo
.si_code
= FPE_FLTOVF
;
445 sinfo
.si_code
= FPE_FLTINV
;
448 sinfo
.si_code
= FPE_FLTDIV
;
451 sinfo
.si_code
= FPE_FLTINV
;
454 sinfo
.si_code
= FPE_FLTRES
;
457 sinfo
.si_code
= FPE_NOOP
;
464 if (proc_is64bit_data(p
)) {
465 #if defined(__arm64__)
466 sinfo
.si_addr
= user_frame
.uf64
.mctx
.es
.far
;
468 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
471 sinfo
.si_addr
= user_frame
.uf32
.mctx
.es
.far
;
474 sinfo
.si_code
= BUS_ADRALN
;
478 if (proc_is64bit_data(p
)) {
479 #if defined(__arm64__)
480 sinfo
.si_addr
= user_frame
.uf64
.mctx
.es
.far
;
482 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
485 sinfo
.si_addr
= user_frame
.uf32
.mctx
.es
.far
;
489 /* First check in srr1 and then in dsisr */
490 if (mctx
.ss
.srr1
& (1 << (31 - DSISR_PROT_BIT
))) {
491 sinfo
.si_code
= SEGV_ACCERR
;
492 } else if (mctx
.es
.dsisr
& (1 << (31 - DSISR_PROT_BIT
))) {
493 sinfo
.si_code
= SEGV_ACCERR
;
495 sinfo
.si_code
= SEGV_MAPERR
;
498 sinfo
.si_code
= SEGV_ACCERR
;
504 int status_and_exitcode
;
507 * All other signals need to fill out a minimum set of
508 * information for the siginfo structure passed into
509 * the signal handler, if SA_SIGINFO was specified.
511 * p->si_status actually contains both the status and
512 * the exit code; we save it off in its own variable
513 * for later breakdown.
516 sinfo
.si_pid
= p
->si_pid
;
518 status_and_exitcode
= p
->si_status
;
520 sinfo
.si_uid
= p
->si_uid
;
522 sinfo
.si_code
= p
->si_code
;
525 if (sinfo
.si_code
== CLD_EXITED
) {
526 if (WIFEXITED(status_and_exitcode
)) {
527 sinfo
.si_code
= CLD_EXITED
;
528 } else if (WIFSIGNALED(status_and_exitcode
)) {
529 if (WCOREDUMP(status_and_exitcode
)) {
530 sinfo
.si_code
= CLD_DUMPED
;
531 status_and_exitcode
= W_EXITCODE(status_and_exitcode
, status_and_exitcode
);
533 sinfo
.si_code
= CLD_KILLED
;
534 status_and_exitcode
= W_EXITCODE(status_and_exitcode
, status_and_exitcode
);
539 * The recorded status contains the exit code and the
540 * signal information, but the information to be passed
541 * in the siginfo to the handler is supposed to only
542 * contain the status, so we have to shift it out.
544 sinfo
.si_status
= (WEXITSTATUS(status_and_exitcode
) & 0x00FFFFFF) | (((uint32_t)(p
->p_xhighbits
) << 24) & 0xFF000000);
551 sendsig_do_dtrace(ut
, &sinfo
, sig
, catcher
);
552 #endif /* CONFIG_DTRACE */
555 * Copy signal-handling frame out to user space, set thread state.
557 if (proc_is64bit_data(p
)) {
558 #if defined(__arm64__)
562 * mctx filled in when we get state. uctx filled in by
563 * sendsig_fill_uctx64(). We fill in the sinfo now.
565 siginfo_user_to_user64(&sinfo
, &user_frame
.uf64
.sinfo
);
567 p_uctx
= (user_addr_t
)&((struct user_sigframe64
*)sp
)->uctx
;
569 * Generate the validation token for sigreturn
572 kr
= machine_thread_siguctx_pointer_convert_to_user(th_act
, &token_uctx
);
573 assert(kr
== KERN_SUCCESS
);
574 token
= (user64_addr_t
)token_uctx
^ (user64_addr_t
)ps
->ps_sigreturn_token
;
577 if ((ret
= copyout(&user_frame
.uf64
, sp
, sizeof(user_frame
.uf64
))) != 0) {
578 #if DEVELOPMENT || DEBUG
579 printf("process [%s][%d] copyout of user_frame to (sp, size) = (0x%llx, %zu) failed with ret %d, expected 0\n", p
->p_comm
, p
->p_pid
, sp
, sizeof(user_frame
.uf64
), ret
);
584 if ((kr
= sendsig_set_thread_state64(&ts
.ts64
.ss
,
585 catcher
, infostyle
, sig
, (user64_addr_t
)&((struct user_sigframe64
*)sp
)->sinfo
,
586 (user64_addr_t
)p_uctx
, token
, trampact
, sp
, th_act
)) != KERN_SUCCESS
) {
587 #if DEVELOPMENT || DEBUG
588 printf("process [%s][%d] sendsig_set_thread_state64 failed with kr %d, expected 0", p
->p_comm
, p
->p_pid
, kr
);
594 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
600 * mctx filled in when we get state. uctx filled in by
601 * sendsig_fill_uctx32(). We fill in the sinfo, *pointer*
602 * to uctx and token now.
604 siginfo_user_to_user32(&sinfo
, &user_frame
.uf32
.sinfo
);
606 p_uctx
= (user_addr_t
)&((struct user_sigframe32
*)sp
)->uctx
;
608 * Generate the validation token for sigreturn
610 token_uctx
= (user_addr_t
)p_uctx
;
611 kr
= machine_thread_siguctx_pointer_convert_to_user(th_act
, &token_uctx
);
612 assert(kr
== KERN_SUCCESS
);
613 token
= (user32_addr_t
)token_uctx
^ (user32_addr_t
)ps
->ps_sigreturn_token
;
615 user_frame
.uf32
.puctx
= (user32_addr_t
)p_uctx
;
616 user_frame
.uf32
.token
= token
;
618 if (copyout(&user_frame
.uf32
, sp
, sizeof(user_frame
.uf32
)) != 0) {
622 if (sendsig_set_thread_state32(&ts
.ts32
.ss
,
623 CAST_DOWN_EXPLICIT(user32_addr_t
, catcher
), infostyle
, sig
, (user32_addr_t
)&((struct user_sigframe32
*)sp
)->sinfo
,
624 CAST_DOWN_EXPLICIT(user32_addr_t
, trampact
), CAST_DOWN_EXPLICIT(user32_addr_t
, sp
), th_act
) != KERN_SUCCESS
) {
635 SIGACTION(p
, SIGILL
) = SIG_DFL
;
636 sig
= sigmask(SIGILL
);
637 p
->p_sigignore
&= ~sig
;
638 p
->p_sigcatch
&= ~sig
;
639 ut
->uu_sigmask
&= ~sig
;
640 /* sendsig is called with signal lock held */
642 psignal_locked(p
, SIGILL
);
647 * System call to cleanup state after a signal
648 * has been taken. Reset signal mask and
649 * stack state from context left by sendsig (above).
650 * Return to previous * context left by sendsig.
651 * Check carefully to * make sure that the user has not
652 * modified the * spr to gain improper priviledges.
656 sigreturn_copyin_ctx32(struct user_ucontext32
*uctx
, mcontext32_t
*mctx
, user_addr_t uctx_addr
)
660 assert(!proc_is64bit_data(current_proc()));
662 error
= copyin(uctx_addr
, uctx
, sizeof(*uctx
));
667 /* validate the machine context size */
668 switch (uctx
->uc_mcsize
) {
669 case UC_FLAVOR_SIZE32
:
675 assert(uctx
->uc_mcsize
== sizeof(*mctx
));
676 error
= copyin((user_addr_t
)uctx
->uc_mcontext
, mctx
, uctx
->uc_mcsize
);
685 sigreturn_set_state32(thread_t th_act
, mcontext32_t
*mctx
)
687 assert(!proc_is64bit_data(current_proc()));
689 /* validate the thread state, set/reset appropriate mode bits in cpsr */
691 mctx
->ss
.cpsr
= (mctx
->ss
.cpsr
& ~PSR_MODE_MASK
) | PSR_USERDFLT
;
692 #elif defined(__arm64__)
693 mctx
->ss
.cpsr
= (mctx
->ss
.cpsr
& ~PSR64_MODE_MASK
) | PSR64_USER32_DEFAULT
;
695 #error Unknown architecture.
698 if (thread_setstatus_from_user(th_act
, ARM_THREAD_STATE
, (void *)&mctx
->ss
, ARM_THREAD_STATE_COUNT
) != KERN_SUCCESS
) {
701 if (thread_setstatus_from_user(th_act
, ARM_VFP_STATE
, (void *)&mctx
->fs
, ARM_VFP_STATE_COUNT
) != KERN_SUCCESS
) {
708 #if defined(__arm64__)
710 sigreturn_copyin_ctx64(struct user_ucontext64
*uctx
, mcontext64_t
*mctx
, user_addr_t uctx_addr
)
714 assert(proc_is64bit_data(current_proc()));
716 error
= copyin(uctx_addr
, uctx
, sizeof(*uctx
));
721 /* validate the machine context size */
722 switch (uctx
->uc_mcsize
) {
723 case UC_FLAVOR_SIZE64
:
729 assert(uctx
->uc_mcsize
== sizeof(*mctx
));
730 error
= copyin((user_addr_t
)uctx
->uc_mcontext64
, mctx
, uctx
->uc_mcsize
);
739 sigreturn_set_state64(thread_t th_act
, mcontext64_t
*mctx
)
741 assert(proc_is64bit_data(current_proc()));
743 /* validate the thread state, set/reset appropriate mode bits in cpsr */
744 mctx
->ss
.cpsr
= (mctx
->ss
.cpsr
& ~PSR64_MODE_MASK
) | PSR64_USER64_DEFAULT
;
746 if (thread_setstatus_from_user(th_act
, ARM_THREAD_STATE64
, (void *)&mctx
->ss
, ARM_THREAD_STATE64_COUNT
) != KERN_SUCCESS
) {
749 if (thread_setstatus_from_user(th_act
, ARM_NEON_STATE64
, (void *)&mctx
->ns
, ARM_NEON_STATE64_COUNT
) != KERN_SUCCESS
) {
755 #endif /* defined(__arm64__) */
761 struct sigreturn_args
* uap
,
762 __unused
int *retval
)
765 user_ucontext32_t uc32
;
766 #if defined(__arm64__)
767 user_ucontext64_t uc64
;
773 #if defined(__arm64__)
778 struct sigacts
*ps
= p
->p_sigacts
;
779 int error
, sigmask
= 0, onstack
= 0;
782 uint32_t sigreturn_validation
;
783 user_addr_t token_uctx
;
786 th_act
= current_thread();
787 ut
= (struct uthread
*) get_bsdthread_info(th_act
);
789 /* see osfmk/kern/restartable.c */
790 act_set_ast_reset_pcs(th_act
);
792 * If we are being asked to change the altstack flag on the thread, we
793 * just set/reset it and return (the uap->uctx is not used).
795 if ((unsigned int)uap
->infostyle
== UC_SET_ALT_STACK
) {
796 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
798 } else if ((unsigned int)uap
->infostyle
== UC_RESET_ALT_STACK
) {
799 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
803 if (proc_is64bit_data(p
)) {
804 #if defined(__arm64__)
805 error
= sigreturn_copyin_ctx64(&uctx
.uc64
, &mctx
.mc64
, uap
->uctx
);
810 onstack
= uctx
.uc64
.uc_onstack
;
811 sigmask
= uctx
.uc64
.uc_sigmask
;
813 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
816 error
= sigreturn_copyin_ctx32(&uctx
.uc32
, &mctx
.mc32
, uap
->uctx
);
821 onstack
= uctx
.uc32
.uc_onstack
;
822 sigmask
= uctx
.uc32
.uc_sigmask
;
825 if ((onstack
& 01)) {
826 ut
->uu_sigstk
.ss_flags
|= SA_ONSTACK
;
828 ut
->uu_sigstk
.ss_flags
&= ~SA_ONSTACK
;
831 ut
->uu_sigmask
= sigmask
& ~sigcantmask
;
832 if (ut
->uu_siglist
& ~ut
->uu_sigmask
) {
833 signal_setast(current_thread());
836 sigreturn_validation
= atomic_load_explicit(
837 &ps
->ps_sigreturn_validation
, memory_order_relaxed
);
838 token_uctx
= uap
->uctx
;
839 kr
= machine_thread_siguctx_pointer_convert_to_user(th_act
, &token_uctx
);
840 assert(kr
== KERN_SUCCESS
);
842 if (proc_is64bit_data(p
)) {
843 #if defined(__arm64__)
845 token
= (user64_addr_t
)token_uctx
^ (user64_addr_t
)ps
->ps_sigreturn_token
;
846 if ((user64_addr_t
)uap
->token
!= token
) {
847 #if DEVELOPMENT || DEBUG
848 printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n",
849 p
->p_comm
, p
->p_pid
, (user64_addr_t
)uap
->token
, token
);
850 #endif /* DEVELOPMENT || DEBUG */
851 if (sigreturn_validation
!= PS_SIGRETURN_VALIDATION_DISABLED
) {
855 error
= sigreturn_set_state64(th_act
, &mctx
.mc64
);
857 #if DEVELOPMENT || DEBUG
858 printf("process %s[%d] sigreturn set_state64 error %d\n",
859 p
->p_comm
, p
->p_pid
, error
);
860 #endif /* DEVELOPMENT || DEBUG */
864 panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
868 token
= (user32_addr_t
)token_uctx
^ (user32_addr_t
)ps
->ps_sigreturn_token
;
869 if ((user32_addr_t
)uap
->token
!= token
) {
870 #if DEVELOPMENT || DEBUG
871 printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n",
872 p
->p_comm
, p
->p_pid
, (user32_addr_t
)uap
->token
, token
);
873 #endif /* DEVELOPMENT || DEBUG */
874 if (sigreturn_validation
!= PS_SIGRETURN_VALIDATION_DISABLED
) {
878 error
= sigreturn_set_state32(th_act
, &mctx
.mc32
);
880 #if DEVELOPMENT || DEBUG
881 printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n",
882 p
->p_comm
, p
->p_pid
, error
);
883 #endif /* DEVELOPMENT || DEBUG */
892 * machine_exception() performs machine-dependent translation
893 * of a mach exception to a unix signal.
896 machine_exception(int exception
,
897 __unused mach_exception_code_t code
,
898 __unused mach_exception_subcode_t subcode
)
901 case EXC_BAD_INSTRUCTION
: