]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. | |
3 | */ | |
4 | ||
5 | #include <mach/mach_types.h> | |
6 | #include <mach/exception_types.h> | |
7 | ||
8 | #include <sys/param.h> | |
9 | #include <sys/proc_internal.h> | |
10 | #include <sys/user.h> | |
11 | #include <sys/signal.h> | |
12 | #include <sys/ucontext.h> | |
13 | #include <sys/sysproto.h> | |
14 | #include <sys/systm.h> | |
15 | #include <sys/ux_exception.h> | |
16 | ||
17 | #include <arm/signal.h> | |
18 | #include <sys/signalvar.h> | |
19 | #include <sys/kdebug.h> | |
20 | #include <sys/sdt.h> | |
21 | #include <sys/wait.h> | |
22 | #include <kern/thread.h> | |
23 | #include <mach/arm/thread_status.h> | |
24 | #include <arm/proc_reg.h> | |
25 | ||
26 | #include <kern/assert.h> | |
27 | #include <kern/ast.h> | |
28 | #include <pexpert/pexpert.h> | |
29 | ||
30 | extern struct arm_saved_state *get_user_regs(thread_t); | |
31 | extern user_addr_t thread_get_cthread_self(void); | |
32 | extern kern_return_t thread_getstatus(thread_t act, int flavor, | |
33 | thread_state_t tstate, mach_msg_type_number_t *count); | |
34 | extern kern_return_t thread_getstatus_to_user(thread_t act, int flavor, | |
35 | thread_state_t tstate, mach_msg_type_number_t *count); | |
36 | extern kern_return_t machine_thread_state_convert_to_user(thread_t act, int flavor, | |
37 | thread_state_t tstate, mach_msg_type_number_t *count); | |
38 | extern kern_return_t thread_setstatus(thread_t thread, int flavor, | |
39 | thread_state_t tstate, mach_msg_type_number_t count); | |
40 | extern kern_return_t thread_setstatus_from_user(thread_t thread, int flavor, | |
41 | thread_state_t tstate, mach_msg_type_number_t count); | |
42 | /* XXX Put these someplace smarter... */ | |
43 | typedef struct mcontext32 mcontext32_t; | |
44 | typedef struct mcontext64 mcontext64_t; | |
45 | ||
46 | /* Signal handler flavors supported */ | |
47 | /* These defns should match the libplatform implmn */ | |
48 | #define UC_TRAD 1 | |
49 | #define UC_FLAVOR 30 | |
50 | #define UC_SET_ALT_STACK 0x40000000 | |
51 | #define UC_RESET_ALT_STACK 0x80000000 | |
52 | ||
53 | /* The following are valid mcontext sizes */ | |
54 | #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int)) | |
55 | #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int)) | |
56 | ||
57 | #if __arm64__ | |
58 | #define C_64_REDZONE_LEN 128 | |
59 | #endif | |
60 | ||
61 | #define TRUNC_TO_16_BYTES(addr) (addr & ~0xf) | |
62 | ||
63 | static int | |
64 | sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp) | |
65 | { | |
66 | void *tstate; | |
67 | mach_msg_type_number_t state_count; | |
68 | ||
69 | assert(!proc_is64bit_data(current_proc())); | |
70 | ||
71 | tstate = (void *) ts; | |
72 | state_count = ARM_THREAD_STATE_COUNT; | |
73 | if (thread_getstatus(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
74 | return EINVAL; | |
75 | } | |
76 | ||
77 | mcp->ss = *ts; | |
78 | tstate = (void *) &mcp->ss; | |
79 | state_count = ARM_THREAD_STATE_COUNT; | |
80 | if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
81 | return EINVAL; | |
82 | } | |
83 | ||
84 | tstate = (void *) &mcp->es; | |
85 | state_count = ARM_EXCEPTION_STATE_COUNT; | |
86 | if (thread_getstatus(th_act, ARM_EXCEPTION_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
87 | return EINVAL; | |
88 | } | |
89 | ||
90 | tstate = (void *) &mcp->fs; | |
91 | state_count = ARM_VFP_STATE_COUNT; | |
92 | if (thread_getstatus_to_user(th_act, ARM_VFP_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
93 | return EINVAL; | |
94 | } | |
95 | ||
96 | return 0; | |
97 | } | |
98 | ||
99 | #if defined(__arm64__) | |
100 | struct user_sigframe64 { | |
101 | /* We can pass the last two args in registers for ARM64 */ | |
102 | user64_siginfo_t sinfo; | |
103 | struct user_ucontext64 uctx; | |
104 | mcontext64_t mctx; | |
105 | }; | |
106 | ||
107 | static int | |
108 | sendsig_get_state64(thread_t th_act, arm_thread_state64_t *ts, mcontext64_t *mcp) | |
109 | { | |
110 | void *tstate; | |
111 | mach_msg_type_number_t state_count; | |
112 | ||
113 | assert(proc_is64bit_data(current_proc())); | |
114 | ||
115 | tstate = (void *) ts; | |
116 | state_count = ARM_THREAD_STATE64_COUNT; | |
117 | if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
118 | return EINVAL; | |
119 | } | |
120 | ||
121 | mcp->ss = *ts; | |
122 | tstate = (void *) &mcp->ss; | |
123 | state_count = ARM_THREAD_STATE64_COUNT; | |
124 | if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
125 | return EINVAL; | |
126 | } | |
127 | ||
128 | tstate = (void *) &mcp->es; | |
129 | state_count = ARM_EXCEPTION_STATE64_COUNT; | |
130 | if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
131 | return EINVAL; | |
132 | } | |
133 | ||
134 | tstate = (void *) &mcp->ns; | |
135 | state_count = ARM_NEON_STATE64_COUNT; | |
136 | if (thread_getstatus_to_user(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { | |
137 | return EINVAL; | |
138 | } | |
139 | ||
140 | return 0; | |
141 | } | |
142 | ||
143 | static void | |
144 | sendsig_fill_uctx64(user_ucontext64_t *uctx, int oonstack, int mask, user64_addr_t sp, user64_size_t stack_size, user64_addr_t p_mctx) | |
145 | { | |
146 | bzero(uctx, sizeof(*uctx)); | |
147 | uctx->uc_onstack = oonstack; | |
148 | uctx->uc_sigmask = mask; | |
149 | uctx->uc_stack.ss_sp = sp; | |
150 | uctx->uc_stack.ss_size = stack_size; | |
151 | if (oonstack) { | |
152 | uctx->uc_stack.ss_flags |= SS_ONSTACK; | |
153 | } | |
154 | uctx->uc_link = (user64_addr_t)0; | |
155 | uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64; | |
156 | uctx->uc_mcontext64 = (user64_addr_t) p_mctx; | |
157 | } | |
158 | ||
159 | static kern_return_t | |
160 | sendsig_set_thread_state64(arm_thread_state64_t *regs, | |
161 | user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo, | |
162 | user64_addr_t p_uctx, user64_addr_t token, user64_addr_t trampact, user64_addr_t sp, thread_t th_act) | |
163 | { | |
164 | assert(proc_is64bit_data(current_proc())); | |
165 | ||
166 | regs->x[0] = catcher; | |
167 | regs->x[1] = infostyle; | |
168 | regs->x[2] = sig; | |
169 | regs->x[3] = p_sinfo; | |
170 | regs->x[4] = p_uctx; | |
171 | regs->x[5] = token; | |
172 | regs->pc = trampact; | |
173 | regs->cpsr = PSR64_USER64_DEFAULT; | |
174 | regs->sp = sp; | |
175 | ||
176 | return thread_setstatus(th_act, ARM_THREAD_STATE64, (void *)regs, ARM_THREAD_STATE64_COUNT); | |
177 | } | |
178 | #endif /* defined(__arm64__) */ | |
179 | ||
180 | static void | |
181 | sendsig_fill_uctx32(user_ucontext32_t *uctx, int oonstack, int mask, user_addr_t sp, user_size_t stack_size, user_addr_t p_mctx) | |
182 | { | |
183 | bzero(uctx, sizeof(*uctx)); | |
184 | uctx->uc_onstack = oonstack; | |
185 | uctx->uc_sigmask = mask; | |
186 | uctx->uc_stack.ss_sp = (user32_addr_t) sp; | |
187 | uctx->uc_stack.ss_size = (user32_size_t) stack_size; | |
188 | if (oonstack) { | |
189 | uctx->uc_stack.ss_flags |= SS_ONSTACK; | |
190 | } | |
191 | uctx->uc_link = (user32_addr_t)0; | |
192 | uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32; | |
193 | uctx->uc_mcontext = (user32_addr_t) p_mctx; | |
194 | } | |
195 | ||
196 | static kern_return_t | |
197 | sendsig_set_thread_state32(arm_thread_state_t *regs, | |
198 | user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo, | |
199 | user32_addr_t trampact, user32_addr_t sp, thread_t th_act) | |
200 | { | |
201 | assert(!proc_is64bit_data(current_proc())); | |
202 | ||
203 | regs->r[0] = catcher; | |
204 | regs->r[1] = infostyle; | |
205 | regs->r[2] = sig; | |
206 | regs->r[3] = p_sinfo; | |
207 | if (trampact & 1) { | |
208 | regs->pc = trampact & ~1; | |
209 | #if defined(__arm64__) | |
210 | regs->cpsr = PSR64_USER32_DEFAULT | PSR64_MODE_USER32_THUMB; | |
211 | #elif defined(__arm__) | |
212 | regs->cpsr = PSR_USERDFLT | PSR_TF; | |
213 | #else | |
214 | #error Unknown architeture. | |
215 | #endif | |
216 | } else { | |
217 | regs->pc = trampact; | |
218 | regs->cpsr = PSR_USERDFLT; | |
219 | } | |
220 | regs->sp = sp; | |
221 | ||
222 | return thread_setstatus(th_act, ARM_THREAD_STATE, (void *)regs, ARM_THREAD_STATE_COUNT); | |
223 | } | |
224 | ||
225 | #if CONFIG_DTRACE | |
226 | static void | |
227 | sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catcher) | |
228 | { | |
229 | bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); | |
230 | ||
231 | ut->t_dtrace_siginfo.si_signo = sinfo->si_signo; | |
232 | ut->t_dtrace_siginfo.si_code = sinfo->si_code; | |
233 | ut->t_dtrace_siginfo.si_pid = sinfo->si_pid; | |
234 | ut->t_dtrace_siginfo.si_uid = sinfo->si_uid; | |
235 | ut->t_dtrace_siginfo.si_status = sinfo->si_status; | |
236 | /* XXX truncates faulting address to void * */ | |
237 | ut->t_dtrace_siginfo.si_addr = CAST_DOWN_EXPLICIT(void *, sinfo->si_addr); | |
238 | ||
239 | /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ | |
240 | switch (sig) { | |
241 | case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP: | |
242 | DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo)); | |
243 | break; | |
244 | default: | |
245 | break; | |
246 | } | |
247 | ||
248 | /* XXX truncates faulting address to uintptr_t */ | |
249 | DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), | |
250 | void (*)(void), CAST_DOWN(uintptr_t, catcher)); | |
251 | } | |
252 | #endif | |
253 | ||
254 | struct user_sigframe32 { | |
255 | user32_addr_t puctx; | |
256 | user32_addr_t token; | |
257 | user32_siginfo_t sinfo; | |
258 | struct user_ucontext32 uctx; | |
259 | mcontext32_t mctx; | |
260 | }; | |
261 | ||
262 | /* | |
263 | * Send an interrupt to process. | |
264 | * | |
265 | */ | |
266 | void | |
267 | sendsig( | |
268 | struct proc * p, | |
269 | user_addr_t catcher, | |
270 | int sig, | |
271 | int mask, | |
272 | __unused uint32_t code, | |
273 | sigset_t siginfo | |
274 | ) | |
275 | { | |
276 | union { | |
277 | struct ts32 { | |
278 | arm_thread_state_t ss; | |
279 | } ts32; | |
280 | #if defined(__arm64__) | |
281 | struct ts64 { | |
282 | arm_thread_state64_t ss; | |
283 | } ts64; | |
284 | #endif | |
285 | } ts; | |
286 | union { | |
287 | struct user_sigframe32 uf32; | |
288 | #if defined(__arm64__) | |
289 | struct user_sigframe64 uf64; | |
290 | #endif | |
291 | } user_frame; | |
292 | ||
293 | user_siginfo_t sinfo; | |
294 | user_addr_t sp = 0, trampact; | |
295 | struct sigacts *ps = p->p_sigacts; | |
296 | int oonstack, infostyle; | |
297 | thread_t th_act; | |
298 | struct uthread *ut; | |
299 | user_size_t stack_size = 0; | |
300 | user_addr_t p_uctx, token_uctx; | |
301 | kern_return_t kr; | |
302 | ||
303 | th_act = current_thread(); | |
304 | ut = get_bsdthread_info(th_act); | |
305 | ||
306 | bzero(&ts, sizeof(ts)); | |
307 | bzero(&user_frame, sizeof(user_frame)); | |
308 | ||
309 | if (siginfo & sigmask(sig)) { | |
310 | infostyle = UC_FLAVOR; | |
311 | } else { | |
312 | infostyle = UC_TRAD; | |
313 | } | |
314 | ||
315 | trampact = ps->ps_trampact[sig]; | |
316 | oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; | |
317 | ||
318 | /* | |
319 | * Get sundry thread state. | |
320 | */ | |
321 | if (proc_is64bit_data(p)) { | |
322 | #ifdef __arm64__ | |
323 | int ret = 0; | |
324 | if ((ret = sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx)) != 0) { | |
325 | #if DEVELOPMENT || DEBUG | |
326 | printf("process [%s][%d] sendsig_get_state64 failed with ret %d, expected 0", p->p_comm, p->p_pid, ret); | |
327 | #endif | |
328 | goto bad2; | |
329 | } | |
330 | #else | |
331 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
332 | #endif | |
333 | } else { | |
334 | int ret = 0; | |
335 | if ((ret = sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx)) != 0) { | |
336 | #if DEVELOPMENT || DEBUG | |
337 | printf("process [%s][%d] sendsig_get_state32 failed with ret %d, expected 0", p->p_comm, p->p_pid, ret); | |
338 | #endif | |
339 | goto bad2; | |
340 | } | |
341 | } | |
342 | ||
343 | /* | |
344 | * Figure out where our new stack lives. | |
345 | */ | |
346 | if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && | |
347 | (ps->ps_sigonstack & sigmask(sig))) { | |
348 | sp = ut->uu_sigstk.ss_sp; | |
349 | stack_size = ut->uu_sigstk.ss_size; | |
350 | ||
351 | sp += stack_size; | |
352 | ut->uu_sigstk.ss_flags |= SA_ONSTACK; | |
353 | } else { | |
354 | /* | |
355 | * Get stack pointer, and allocate enough space | |
356 | * for signal handler data. | |
357 | */ | |
358 | if (proc_is64bit_data(p)) { | |
359 | #if defined(__arm64__) | |
360 | sp = CAST_USER_ADDR_T(ts.ts64.ss.sp); | |
361 | #else | |
362 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
363 | #endif | |
364 | } else { | |
365 | sp = CAST_USER_ADDR_T(ts.ts32.ss.sp); | |
366 | } | |
367 | } | |
368 | ||
369 | /* Make sure to move stack pointer down for room for metadata */ | |
370 | if (proc_is64bit_data(p)) { | |
371 | #if defined(__arm64__) | |
372 | sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN); | |
373 | sp = TRUNC_TO_16_BYTES(sp); | |
374 | #else | |
375 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
376 | #endif | |
377 | } else { | |
378 | sp -= sizeof(user_frame.uf32); | |
379 | #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4) | |
380 | sp = TRUNC_TO_16_BYTES(sp); /* Only for armv7k */ | |
381 | #endif | |
382 | } | |
383 | ||
384 | proc_unlock(p); | |
385 | ||
386 | /* | |
387 | * Fill in ucontext (points to mcontext, i.e. thread states). | |
388 | */ | |
389 | if (proc_is64bit_data(p)) { | |
390 | #if defined(__arm64__) | |
391 | sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size, | |
392 | (user64_addr_t)&((struct user_sigframe64*)sp)->mctx); | |
393 | #else | |
394 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
395 | #endif | |
396 | } else { | |
397 | sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size, | |
398 | (user32_addr_t)&((struct user_sigframe32*)sp)->mctx); | |
399 | } | |
400 | ||
401 | /* | |
402 | * Setup siginfo. | |
403 | */ | |
404 | bzero((caddr_t) &sinfo, sizeof(sinfo)); | |
405 | sinfo.si_signo = sig; | |
406 | ||
407 | if (proc_is64bit_data(p)) { | |
408 | #if defined(__arm64__) | |
409 | sinfo.si_addr = ts.ts64.ss.pc; | |
410 | sinfo.pad[0] = ts.ts64.ss.sp; | |
411 | #else | |
412 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
413 | #endif | |
414 | } else { | |
415 | sinfo.si_addr = ts.ts32.ss.pc; | |
416 | sinfo.pad[0] = ts.ts32.ss.sp; | |
417 | } | |
418 | ||
419 | switch (sig) { | |
420 | case SIGILL: | |
421 | #ifdef BER_XXX | |
422 | if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) { | |
423 | sinfo.si_code = ILL_ILLOPC; | |
424 | } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) { | |
425 | sinfo.si_code = ILL_PRVOPC; | |
426 | } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) { | |
427 | sinfo.si_code = ILL_ILLTRP; | |
428 | } else { | |
429 | sinfo.si_code = ILL_NOOP; | |
430 | } | |
431 | #else | |
432 | sinfo.si_code = ILL_ILLTRP; | |
433 | #endif | |
434 | break; | |
435 | ||
436 | case SIGFPE: | |
437 | switch (ut->uu_code) { | |
438 | case EXC_ARM_FP_UF: | |
439 | sinfo.si_code = FPE_FLTUND; | |
440 | break; | |
441 | case EXC_ARM_FP_OF: | |
442 | sinfo.si_code = FPE_FLTOVF; | |
443 | break; | |
444 | case EXC_ARM_FP_IO: | |
445 | sinfo.si_code = FPE_FLTINV; | |
446 | break; | |
447 | case EXC_ARM_FP_DZ: | |
448 | sinfo.si_code = FPE_FLTDIV; | |
449 | break; | |
450 | case EXC_ARM_FP_ID: | |
451 | sinfo.si_code = FPE_FLTINV; | |
452 | break; | |
453 | case EXC_ARM_FP_IX: | |
454 | sinfo.si_code = FPE_FLTRES; | |
455 | break; | |
456 | default: | |
457 | sinfo.si_code = FPE_NOOP; | |
458 | break; | |
459 | } | |
460 | ||
461 | break; | |
462 | ||
463 | case SIGBUS: | |
464 | if (proc_is64bit_data(p)) { | |
465 | #if defined(__arm64__) | |
466 | sinfo.si_addr = user_frame.uf64.mctx.es.far; | |
467 | #else | |
468 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
469 | #endif | |
470 | } else { | |
471 | sinfo.si_addr = user_frame.uf32.mctx.es.far; | |
472 | } | |
473 | ||
474 | sinfo.si_code = BUS_ADRALN; | |
475 | break; | |
476 | ||
477 | case SIGSEGV: | |
478 | if (proc_is64bit_data(p)) { | |
479 | #if defined(__arm64__) | |
480 | sinfo.si_addr = user_frame.uf64.mctx.es.far; | |
481 | #else | |
482 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
483 | #endif | |
484 | } else { | |
485 | sinfo.si_addr = user_frame.uf32.mctx.es.far; | |
486 | } | |
487 | ||
488 | #ifdef BER_XXX | |
489 | /* First check in srr1 and then in dsisr */ | |
490 | if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) { | |
491 | sinfo.si_code = SEGV_ACCERR; | |
492 | } else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) { | |
493 | sinfo.si_code = SEGV_ACCERR; | |
494 | } else { | |
495 | sinfo.si_code = SEGV_MAPERR; | |
496 | } | |
497 | #else | |
498 | sinfo.si_code = SEGV_ACCERR; | |
499 | #endif | |
500 | break; | |
501 | ||
502 | default: | |
503 | { | |
504 | int status_and_exitcode; | |
505 | ||
506 | /* | |
507 | * All other signals need to fill out a minimum set of | |
508 | * information for the siginfo structure passed into | |
509 | * the signal handler, if SA_SIGINFO was specified. | |
510 | * | |
511 | * p->si_status actually contains both the status and | |
512 | * the exit code; we save it off in its own variable | |
513 | * for later breakdown. | |
514 | */ | |
515 | proc_lock(p); | |
516 | sinfo.si_pid = p->si_pid; | |
517 | p->si_pid = 0; | |
518 | status_and_exitcode = p->si_status; | |
519 | p->si_status = 0; | |
520 | sinfo.si_uid = p->si_uid; | |
521 | p->si_uid = 0; | |
522 | sinfo.si_code = p->si_code; | |
523 | p->si_code = 0; | |
524 | proc_unlock(p); | |
525 | if (sinfo.si_code == CLD_EXITED) { | |
526 | if (WIFEXITED(status_and_exitcode)) { | |
527 | sinfo.si_code = CLD_EXITED; | |
528 | } else if (WIFSIGNALED(status_and_exitcode)) { | |
529 | if (WCOREDUMP(status_and_exitcode)) { | |
530 | sinfo.si_code = CLD_DUMPED; | |
531 | status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); | |
532 | } else { | |
533 | sinfo.si_code = CLD_KILLED; | |
534 | status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); | |
535 | } | |
536 | } | |
537 | } | |
538 | /* | |
539 | * The recorded status contains the exit code and the | |
540 | * signal information, but the information to be passed | |
541 | * in the siginfo to the handler is supposed to only | |
542 | * contain the status, so we have to shift it out. | |
543 | */ | |
544 | sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); | |
545 | p->p_xhighbits = 0; | |
546 | break; | |
547 | } | |
548 | } | |
549 | ||
550 | #if CONFIG_DTRACE | |
551 | sendsig_do_dtrace(ut, &sinfo, sig, catcher); | |
552 | #endif /* CONFIG_DTRACE */ | |
553 | ||
554 | /* | |
555 | * Copy signal-handling frame out to user space, set thread state. | |
556 | */ | |
557 | if (proc_is64bit_data(p)) { | |
558 | #if defined(__arm64__) | |
559 | user64_addr_t token; | |
560 | ||
561 | /* | |
562 | * mctx filled in when we get state. uctx filled in by | |
563 | * sendsig_fill_uctx64(). We fill in the sinfo now. | |
564 | */ | |
565 | siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo); | |
566 | ||
567 | p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx; | |
568 | /* | |
569 | * Generate the validation token for sigreturn | |
570 | */ | |
571 | token_uctx = p_uctx; | |
572 | kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); | |
573 | assert(kr == KERN_SUCCESS); | |
574 | token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; | |
575 | ||
576 | int ret = 0; | |
577 | if ((ret = copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64))) != 0) { | |
578 | #if DEVELOPMENT || DEBUG | |
579 | printf("process [%s][%d] copyout of user_frame to (sp, size) = (0x%llx, %zu) failed with ret %d, expected 0\n", p->p_comm, p->p_pid, sp, sizeof(user_frame.uf64), ret); | |
580 | #endif | |
581 | goto bad; | |
582 | } | |
583 | ||
584 | if ((kr = sendsig_set_thread_state64(&ts.ts64.ss, | |
585 | catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo, | |
586 | (user64_addr_t)p_uctx, token, trampact, sp, th_act)) != KERN_SUCCESS) { | |
587 | #if DEVELOPMENT || DEBUG | |
588 | printf("process [%s][%d] sendsig_set_thread_state64 failed with kr %d, expected 0", p->p_comm, p->p_pid, kr); | |
589 | #endif | |
590 | goto bad; | |
591 | } | |
592 | ||
593 | #else | |
594 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
595 | #endif | |
596 | } else { | |
597 | user32_addr_t token; | |
598 | ||
599 | /* | |
600 | * mctx filled in when we get state. uctx filled in by | |
601 | * sendsig_fill_uctx32(). We fill in the sinfo, *pointer* | |
602 | * to uctx and token now. | |
603 | */ | |
604 | siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo); | |
605 | ||
606 | p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx; | |
607 | /* | |
608 | * Generate the validation token for sigreturn | |
609 | */ | |
610 | token_uctx = (user_addr_t)p_uctx; | |
611 | kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); | |
612 | assert(kr == KERN_SUCCESS); | |
613 | token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token; | |
614 | ||
615 | user_frame.uf32.puctx = (user32_addr_t)p_uctx; | |
616 | user_frame.uf32.token = token; | |
617 | ||
618 | if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) { | |
619 | goto bad; | |
620 | } | |
621 | ||
622 | if (sendsig_set_thread_state32(&ts.ts32.ss, | |
623 | CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo, | |
624 | CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) { | |
625 | goto bad; | |
626 | } | |
627 | } | |
628 | ||
629 | proc_lock(p); | |
630 | return; | |
631 | ||
632 | bad: | |
633 | proc_lock(p); | |
634 | bad2: | |
635 | SIGACTION(p, SIGILL) = SIG_DFL; | |
636 | sig = sigmask(SIGILL); | |
637 | p->p_sigignore &= ~sig; | |
638 | p->p_sigcatch &= ~sig; | |
639 | ut->uu_sigmask &= ~sig; | |
640 | /* sendsig is called with signal lock held */ | |
641 | proc_unlock(p); | |
642 | psignal_locked(p, SIGILL); | |
643 | proc_lock(p); | |
644 | } | |
645 | ||
646 | /* | |
647 | * System call to cleanup state after a signal | |
648 | * has been taken. Reset signal mask and | |
649 | * stack state from context left by sendsig (above). | |
650 | * Return to previous * context left by sendsig. | |
651 | * Check carefully to * make sure that the user has not | |
652 | * modified the * spr to gain improper priviledges. | |
653 | */ | |
654 | ||
655 | static int | |
656 | sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_addr_t uctx_addr) | |
657 | { | |
658 | int error; | |
659 | ||
660 | assert(!proc_is64bit_data(current_proc())); | |
661 | ||
662 | error = copyin(uctx_addr, uctx, sizeof(*uctx)); | |
663 | if (error) { | |
664 | return error; | |
665 | } | |
666 | ||
667 | /* validate the machine context size */ | |
668 | switch (uctx->uc_mcsize) { | |
669 | case UC_FLAVOR_SIZE32: | |
670 | break; | |
671 | default: | |
672 | return EINVAL; | |
673 | } | |
674 | ||
675 | assert(uctx->uc_mcsize == sizeof(*mctx)); | |
676 | error = copyin((user_addr_t)uctx->uc_mcontext, mctx, uctx->uc_mcsize); | |
677 | if (error) { | |
678 | return error; | |
679 | } | |
680 | ||
681 | return 0; | |
682 | } | |
683 | ||
684 | static int | |
685 | sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx) | |
686 | { | |
687 | assert(!proc_is64bit_data(current_proc())); | |
688 | ||
689 | /* validate the thread state, set/reset appropriate mode bits in cpsr */ | |
690 | #if defined(__arm__) | |
691 | mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR_MODE_MASK) | PSR_USERDFLT; | |
692 | #elif defined(__arm64__) | |
693 | mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER32_DEFAULT; | |
694 | #else | |
695 | #error Unknown architecture. | |
696 | #endif | |
697 | ||
698 | if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE, (void *)&mctx->ss, ARM_THREAD_STATE_COUNT) != KERN_SUCCESS) { | |
699 | return EINVAL; | |
700 | } | |
701 | if (thread_setstatus_from_user(th_act, ARM_VFP_STATE, (void *)&mctx->fs, ARM_VFP_STATE_COUNT) != KERN_SUCCESS) { | |
702 | return EINVAL; | |
703 | } | |
704 | ||
705 | return 0; | |
706 | } | |
707 | ||
708 | #if defined(__arm64__) | |
709 | static int | |
710 | sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_addr_t uctx_addr) | |
711 | { | |
712 | int error; | |
713 | ||
714 | assert(proc_is64bit_data(current_proc())); | |
715 | ||
716 | error = copyin(uctx_addr, uctx, sizeof(*uctx)); | |
717 | if (error) { | |
718 | return error; | |
719 | } | |
720 | ||
721 | /* validate the machine context size */ | |
722 | switch (uctx->uc_mcsize) { | |
723 | case UC_FLAVOR_SIZE64: | |
724 | break; | |
725 | default: | |
726 | return EINVAL; | |
727 | } | |
728 | ||
729 | assert(uctx->uc_mcsize == sizeof(*mctx)); | |
730 | error = copyin((user_addr_t)uctx->uc_mcontext64, mctx, uctx->uc_mcsize); | |
731 | if (error) { | |
732 | return error; | |
733 | } | |
734 | ||
735 | return 0; | |
736 | } | |
737 | ||
738 | static int | |
739 | sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx) | |
740 | { | |
741 | assert(proc_is64bit_data(current_proc())); | |
742 | ||
743 | /* validate the thread state, set/reset appropriate mode bits in cpsr */ | |
744 | mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER64_DEFAULT; | |
745 | ||
746 | if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE64, (void *)&mctx->ss, ARM_THREAD_STATE64_COUNT) != KERN_SUCCESS) { | |
747 | return EINVAL; | |
748 | } | |
749 | if (thread_setstatus_from_user(th_act, ARM_NEON_STATE64, (void *)&mctx->ns, ARM_NEON_STATE64_COUNT) != KERN_SUCCESS) { | |
750 | return EINVAL; | |
751 | } | |
752 | ||
753 | return 0; | |
754 | } | |
755 | #endif /* defined(__arm64__) */ | |
756 | ||
757 | /* ARGSUSED */ | |
758 | int | |
759 | sigreturn( | |
760 | struct proc * p, | |
761 | struct sigreturn_args * uap, | |
762 | __unused int *retval) | |
763 | { | |
764 | union { | |
765 | user_ucontext32_t uc32; | |
766 | #if defined(__arm64__) | |
767 | user_ucontext64_t uc64; | |
768 | #endif | |
769 | } uctx; | |
770 | ||
771 | union { | |
772 | mcontext32_t mc32; | |
773 | #if defined(__arm64__) | |
774 | mcontext64_t mc64; | |
775 | #endif | |
776 | } mctx; | |
777 | ||
778 | struct sigacts *ps = p->p_sigacts; | |
779 | int error, sigmask = 0, onstack = 0; | |
780 | thread_t th_act; | |
781 | struct uthread *ut; | |
782 | uint32_t sigreturn_validation; | |
783 | user_addr_t token_uctx; | |
784 | kern_return_t kr; | |
785 | ||
786 | th_act = current_thread(); | |
787 | ut = (struct uthread *) get_bsdthread_info(th_act); | |
788 | ||
789 | /* see osfmk/kern/restartable.c */ | |
790 | act_set_ast_reset_pcs(th_act); | |
791 | /* | |
792 | * If we are being asked to change the altstack flag on the thread, we | |
793 | * just set/reset it and return (the uap->uctx is not used). | |
794 | */ | |
795 | if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) { | |
796 | ut->uu_sigstk.ss_flags |= SA_ONSTACK; | |
797 | return 0; | |
798 | } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) { | |
799 | ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; | |
800 | return 0; | |
801 | } | |
802 | ||
803 | if (proc_is64bit_data(p)) { | |
804 | #if defined(__arm64__) | |
805 | error = sigreturn_copyin_ctx64(&uctx.uc64, &mctx.mc64, uap->uctx); | |
806 | if (error != 0) { | |
807 | return error; | |
808 | } | |
809 | ||
810 | onstack = uctx.uc64.uc_onstack; | |
811 | sigmask = uctx.uc64.uc_sigmask; | |
812 | #else | |
813 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
814 | #endif | |
815 | } else { | |
816 | error = sigreturn_copyin_ctx32(&uctx.uc32, &mctx.mc32, uap->uctx); | |
817 | if (error != 0) { | |
818 | return error; | |
819 | } | |
820 | ||
821 | onstack = uctx.uc32.uc_onstack; | |
822 | sigmask = uctx.uc32.uc_sigmask; | |
823 | } | |
824 | ||
825 | if ((onstack & 01)) { | |
826 | ut->uu_sigstk.ss_flags |= SA_ONSTACK; | |
827 | } else { | |
828 | ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; | |
829 | } | |
830 | ||
831 | ut->uu_sigmask = sigmask & ~sigcantmask; | |
832 | if (ut->uu_siglist & ~ut->uu_sigmask) { | |
833 | signal_setast(current_thread()); | |
834 | } | |
835 | ||
836 | sigreturn_validation = atomic_load_explicit( | |
837 | &ps->ps_sigreturn_validation, memory_order_relaxed); | |
838 | token_uctx = uap->uctx; | |
839 | kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); | |
840 | assert(kr == KERN_SUCCESS); | |
841 | ||
842 | if (proc_is64bit_data(p)) { | |
843 | #if defined(__arm64__) | |
844 | user64_addr_t token; | |
845 | token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; | |
846 | if ((user64_addr_t)uap->token != token) { | |
847 | #if DEVELOPMENT || DEBUG | |
848 | printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n", | |
849 | p->p_comm, p->p_pid, (user64_addr_t)uap->token, token); | |
850 | #endif /* DEVELOPMENT || DEBUG */ | |
851 | if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { | |
852 | return EINVAL; | |
853 | } | |
854 | } | |
855 | error = sigreturn_set_state64(th_act, &mctx.mc64); | |
856 | if (error != 0) { | |
857 | #if DEVELOPMENT || DEBUG | |
858 | printf("process %s[%d] sigreturn set_state64 error %d\n", | |
859 | p->p_comm, p->p_pid, error); | |
860 | #endif /* DEVELOPMENT || DEBUG */ | |
861 | return error; | |
862 | } | |
863 | #else | |
864 | panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); | |
865 | #endif | |
866 | } else { | |
867 | user32_addr_t token; | |
868 | token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token; | |
869 | if ((user32_addr_t)uap->token != token) { | |
870 | #if DEVELOPMENT || DEBUG | |
871 | printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n", | |
872 | p->p_comm, p->p_pid, (user32_addr_t)uap->token, token); | |
873 | #endif /* DEVELOPMENT || DEBUG */ | |
874 | if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { | |
875 | return EINVAL; | |
876 | } | |
877 | } | |
878 | error = sigreturn_set_state32(th_act, &mctx.mc32); | |
879 | if (error != 0) { | |
880 | #if DEVELOPMENT || DEBUG | |
881 | printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n", | |
882 | p->p_comm, p->p_pid, error); | |
883 | #endif /* DEVELOPMENT || DEBUG */ | |
884 | return error; | |
885 | } | |
886 | } | |
887 | ||
888 | return EJUSTRETURN; | |
889 | } | |
890 | ||
891 | /* | |
892 | * machine_exception() performs machine-dependent translation | |
893 | * of a mach exception to a unix signal. | |
894 | */ | |
895 | int | |
896 | machine_exception(int exception, | |
897 | __unused mach_exception_code_t code, | |
898 | __unused mach_exception_subcode_t subcode) | |
899 | { | |
900 | switch (exception) { | |
901 | case EXC_BAD_INSTRUCTION: | |
902 | return SIGILL; | |
903 | ||
904 | case EXC_ARITHMETIC: | |
905 | return SIGFPE; | |
906 | } | |
907 | ||
908 | return 0; | |
909 | } |